[
  {
    "path": ".github/FUNDING.yml",
    "content": "github: davidmigloz\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/1_feature.yml",
    "content": "name: \"🚀 Feature Request\"\ndescription: Suggest a new feature or enhancement.\nlabels: [\"t:enhancement\"]\nbody:\n  - type: markdown\n    attributes:\n      value: >\n        Thank you for taking the time to file a feature request. Before creating a new\n        issue, please make sure to take a few moments to check the [issue tracker](https://github.com/davidmigloz/langchain_dart/issues)\n        for existing issues about the feature.\n\n  - type: dropdown\n    id: package\n    validations:\n      required: true\n    attributes:\n      label: Package\n      description: >\n        Which package from the LangChain.dart ecosystem is this feature related to?\n      options:\n        - anthropic_sdk_dart\n        - chromadb\n        - googleai_dart\n        - langchain\n        - langchain_amazon\n        - langchain_anthropic\n        - langchain_chroma\n        - langchain_cohere\n        - langchain_community\n        - langchain_core\n        - langchain_firebase\n        - langchain_google\n        - langchain_huggingface\n        - langchain_microsoft\n        - langchain_mistralai\n        - langchain_ollama\n        - langchain_openai\n        - langchain_pinecone\n        - langchain_supabase\n        - langchain_weaviate\n        - langchain_wikipedia\n        - langchain_wolfram\n        - langgraph\n        - mistralai_dart\n        - ollama_dart\n        - openai_dart\n        - openai_realtime_dart\n        - tavily_dart\n        - vertex_ai\n        - other\n\n  - type: textarea\n    id: feature-request\n    validations:\n      required: true\n    attributes:\n      label: Feature Request\n      description: >\n        A clear and concise description of the feature proposal.  \n\n        **Please be as detailed as possible and include:**  \n        - **Detailed Description:** What exactly should this feature do?  \n        - **Use Cases:**  How would you or others use this feature? Real-world examples are very helpful.  \n        - **Example Code Snippets (if applicable):**  Show what the API could look like or how it might be used in code.  \n        - **Related Resources:** Links to relevant GitHub repos, papers, other libraries, or documentation that inspired this feature.  \n\n  - type: textarea\n    id: motivation\n    validations:\n      required: true\n    attributes:\n      label: Motivation\n      description: >\n        Please outline the motivation for the proposal.  \n\n        **Focus on the problem this feature solves and its impact:**  \n        - **Problem:** What problem does this feature address? Be specific and explain the current pain points.  \n        - **Benefits:** What are the positive outcomes of implementing this feature?  \n        - **Related Issues/Discussions (if any):** Link to existing GitHub issues, discussions, or external conversations that are relevant.  \n\n  - type: textarea\n    id: contribution\n    validations:\n      required: true\n    attributes:\n      label: Your contribution\n      description: >\n        Is there any way that you could help, e.g. by submitting a PR or helping to test the feature?\n        You can find more information in our [CONTRIBUTING guide](https://github.com/davidmigloz/langchain_dart/blob/main/CONTRIBUTING.md)\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/2_bug.yml",
    "content": "name: \"🐛 Bug Report\"\ndescription: Submit a bug report to help us improve LangChain.dart.\nlabels: [\"t:bug\"]\nbody:\n  - type: markdown\n    attributes:\n      value: >\n        Thank you for taking the time to file a bug report. Before creating a new\n        issue, please make sure to take a few moments to check the [issue tracker](https://github.com/davidmigloz/langchain_dart/issues)\n        for existing issues about the bug.\n\n  - type: dropdown\n    id: package\n    validations:\n      required: true\n    attributes:\n      label: Package\n      description: >\n        Which package from the LangChain.dart ecosystem is this bug related to?\n      options:\n        - anthropic_sdk_dart\n        - chromadb\n        - googleai_dart\n        - langchain\n        - langchain_amazon\n        - langchain_anthropic\n        - langchain_chroma\n        - langchain_cohere\n        - langchain_community\n        - langchain_core\n        - langchain_firebase\n        - langchain_google\n        - langchain_huggingface\n        - langchain_microsoft\n        - langchain_mistralai\n        - langchain_ollama\n        - langchain_openai\n        - langchain_pinecone\n        - langchain_supabase\n        - langchain_weaviate\n        - langchain_wikipedia\n        - langchain_wolfram\n        - langgraph\n        - mistralai_dart\n        - ollama_dart\n        - openai_dart\n        - openai_realtime_dart\n        - tavily_dart\n        - vertex_ai\n        - other\n\n  - type: textarea\n    id: reproduction\n    validations:\n      required: true\n    attributes:\n      label: Reproduction\n      description: |\n        Please provide a **minimal, reproducible code sample** that clearly demonstrates the bug.  \n\n        **Include:**  \n        - **Minimal Code:** The shortest possible code snippet to trigger the bug.  Focus on isolating the issue.  \n        - **Steps to reproduce:**  Number the exact steps needed to run the code and see the bug.  \n        - **Error Messages/Stack Traces:** Copy and paste any error messages or full stack traces. Use code blocks for formatting.  \n        - **Input Data (if applicable):** If the bug depends on specific input data (e.g., a specific prompt or document), include a minimal example of that data as well.  \n\n      placeholder: |\n        Steps to reproduce the behavior:\n          1. Run this code:\n              ```dart\n              // Your minimal code example here\n              ```\n          2. See this error:\n              ```\n              // Error message or stack trace here\n              ```\n          3. ... (Further steps if needed)\n\n  - type: textarea\n    id: current-behavior\n    validations:\n      required: true\n    attributes:\n      label: Current behavior\n      description: Describe what is actually happening when you run the code. Be specific and detailed.\n\n  - type: textarea\n    id: expected-behavior\n    validations:\n      required: true\n    attributes:\n      label: Expected behavior\n      description: A clear and concise description of what you would expect to happen if the bug was not present.\n\n  - type: textarea\n    id: contribution\n    validations:\n      required: true\n    attributes:\n      label: Your contribution\n      description: >\n        Is there any way that you could help, e.g. by submitting a PR or helping to test the fix?\n        You can find more information in our [CONTRIBUTING guide](https://github.com/davidmigloz/langchain_dart/blob/main/CONTRIBUTING.md)\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/3_documentation.yml",
    "content": "name: \"📄 Documentation\"\ndescription: Report an issue related to the LangChain.dart documentation.\nlabels: [\"t:documentation\"]\n\nbody:\n  - type: textarea\n    attributes:\n      label: \"Issue with current documentation:\"\n      description: >\n        Please make sure to leave a reference to the document/code you're referring to.\n\n  - type: textarea\n    attributes:\n      label: \"Idea or request for content:\"\n      description: >\n        Please describe as clearly as possible what topics you think are missing from the current \n        documentation.\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/config.yml",
    "content": "blank_issues_enabled: false\ncontact_links:\n  - name: LangChain.dart Discord\n    url: https://discord.gg/x4qbhqecVR\n    about: General community and contributors discussions.\n"
  },
  {
    "path": ".github/PULL_REQUEST_TEMPLATE.md",
    "content": "<!-- Thank you for contributing to LangChain.dart!\n\nReplace this comment with:\n  - Description: a description of the change.\n  - Issue: the issue # it resolves (if applicable).\n  - Dependencies: any dependencies required for this change.\n  - Tag maintainer: for a quicker response, tag the relevant maintainer (see below).\n\nIf you're adding a new integration, please include:\n  1. a test for the integration, preferably unit tests that do not rely on network access.\n  2. an example showing its use.\n\nMaintainer responsibilities:\n  - General: @davidmigloz\n\nSee contribution guidelines for more information on how to write/run tests, lint, etc: \nhttps://github.com/davidmigloz/langchain_dart/blob/main/CONTRIBUTING.md\n -->\n"
  },
  {
    "path": ".github/dependabot.yml",
    "content": "version: 2\nupdates:\n  # https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file\n  - package-ecosystem: pub\n    directory: /\n    schedule:\n      interval: weekly\n    groups:\n      dart_dep:\n        update-types:\n          - \"major\"\n          - \"minor\"\n          - \"patch\"\n  # https://docs.github.com/en/code-security/dependabot/working-with-dependabot/keeping-your-actions-up-to-date-with-dependabot\n  - package-ecosystem: github-actions\n    directory: /\n    schedule:\n      interval: weekly\n    groups:\n      actions_dep:\n        update-types:\n          - \"major\"\n          - \"minor\"\n          - \"patch\"\n"
  },
  {
    "path": ".github/workflows/docs.yaml",
    "content": "name: Docs\n\non:\n  push:\n    tags:\n      - 'langchain-v*.*.*'\n  workflow_dispatch:\n\n# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages\npermissions:\n  contents: read\n  pages: write\n  id-token: write\n\n# Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued.\n# However, do NOT cancel in-progress runs as we want to allow these production deployments to complete.\nconcurrency:\n  group: \"pages\"\n  cancel-in-progress: false\n\njobs:\n  deploy:\n    environment:\n      name: github-pages\n      url: ${{ steps.deployment.outputs.page_url }}\n    runs-on: ubuntu-latest\n    timeout-minutes: 30\n    steps:\n      - name: Checkout repository\n        uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd\n\n      - name: Setup Pages\n        uses: actions/configure-pages@45bfe0192ca1faeb007ade9deae92b16b8254a0d\n\n      - name: Upload artifact\n        uses: actions/upload-pages-artifact@7b1f4a764d45c48632c6b24a0339c27f5614fb0b\n        with:\n          path: ./docs\n\n      - name: Deploy to GitHub Pages\n        id: deployment\n        uses: actions/deploy-pages@cd2ce8fcbc39b97be8ca5fce6e763baed58fa128\n"
  },
  {
    "path": ".github/workflows/gemini-cli.yml",
    "content": "name: '💬 Gemini CLI'\n\non:\n  pull_request_review_comment:\n    types:\n      - 'created'\n  pull_request_review:\n    types:\n      - 'submitted'\n  issue_comment:\n    types:\n      - 'created'\n\nconcurrency:\n  group: '${{ github.workflow }}-${{ github.event.issue.number }}'\n  cancel-in-progress: |-\n    ${{ github.event.sender.type == 'User' && ( github.event.issue.author_association == 'OWNER' || github.event.issue.author_association == 'MEMBER' || github.event.issue.author_association == 'COLLABORATOR') }}\n\ndefaults:\n  run:\n    shell: 'bash'\n\npermissions:\n  contents: 'write'\n  id-token: 'write'\n  pull-requests: 'write'\n  issues: 'write'\n\njobs:\n  gemini-cli:\n    # This condition is complex to ensure we only run when explicitly invoked.\n    if: |-\n      github.event_name == 'workflow_dispatch' ||\n      (\n        github.event_name == 'issues' && github.event.action == 'opened' &&\n        contains(github.event.issue.body, '@gemini-cli') &&\n        !contains(github.event.issue.body, '/review') &&\n        !contains(github.event.issue.body, '/triage') &&\n        (\n          github.event.sender.type == 'User' && (\n            github.event.issue.author_association == 'OWNER' ||\n            github.event.issue.author_association == 'MEMBER' ||\n            github.event.issue.author_association == 'COLLABORATOR'\n          )\n        )\n      ) ||\n      (\n        github.event_name == 'issue_comment' &&\n        contains(github.event.comment.body, '@gemini-cli') &&\n        !contains(github.event.comment.body, '/review') &&\n        !contains(github.event.comment.body, '/triage') &&\n        (\n          github.event.sender.type == 'User' && (\n            github.event.comment.author_association == 'OWNER' ||\n            github.event.comment.author_association == 'MEMBER' ||\n            github.event.comment.author_association == 'COLLABORATOR'\n          )\n        )\n      ) ||\n      (\n        github.event_name == 'pull_request_review' &&\n        contains(github.event.review.body, '@gemini-cli') &&\n        !contains(github.event.review.body, '/review') &&\n        !contains(github.event.review.body, '/triage') &&\n        (\n          github.event.sender.type == 'User' && (\n            github.event.review.author_association == 'OWNER' ||\n            github.event.review.author_association == 'MEMBER' ||\n            github.event.review.author_association == 'COLLABORATOR'\n          )\n        )\n      ) ||\n      (\n        github.event_name == 'pull_request_review_comment' &&\n        contains(github.event.comment.body, '@gemini-cli') &&\n        !contains(github.event.comment.body, '/review') &&\n        !contains(github.event.comment.body, '/triage') &&\n        (\n          github.event.sender.type == 'User' && (\n            github.event.comment.author_association == 'OWNER' ||\n            github.event.comment.author_association == 'MEMBER' ||\n            github.event.comment.author_association == 'COLLABORATOR'\n          )\n        )\n      )\n    timeout-minutes: 10\n    runs-on: 'ubuntu-latest'\n\n    steps:\n      - name: 'Generate GitHub App Token'\n        id: 'generate_token'\n        if: |-\n          ${{ vars.APP_ID }}\n        uses: 'actions/create-github-app-token@f8d387b68d61c58ab83c6c016672934102569859' # ratchet:actions/create-github-app-token@v2\n        with:\n          app-id: '${{ vars.APP_ID }}'\n          private-key: '${{ secrets.APP_PRIVATE_KEY }}'\n\n      - name: 'Get context from event'\n        id: 'get_context'\n        env:\n          EVENT_NAME: '${{ github.event_name }}'\n          EVENT_PAYLOAD: '${{ toJSON(github.event) }}'\n        run: |-\n          set -euo pipefail\n\n          USER_REQUEST=\"\"\n          ISSUE_NUMBER=\"\"\n          IS_PR=\"false\"\n\n          if [[ \"${EVENT_NAME}\" == \"issues\" ]]; then\n            USER_REQUEST=$(echo \"${EVENT_PAYLOAD}\" | jq -r .issue.body)\n            ISSUE_NUMBER=$(echo \"${EVENT_PAYLOAD}\" | jq -r .issue.number)\n          elif [[ \"${EVENT_NAME}\" == \"issue_comment\" ]]; then\n            USER_REQUEST=$(echo \"${EVENT_PAYLOAD}\" | jq -r .comment.body)\n            ISSUE_NUMBER=$(echo \"${EVENT_PAYLOAD}\" | jq -r .issue.number)\n            if [[ $(echo \"${EVENT_PAYLOAD}\" | jq -r .issue.pull_request) != \"null\" ]]; then\n              IS_PR=\"true\"\n            fi\n          elif [[ \"${EVENT_NAME}\" == \"pull_request_review\" ]]; then\n            USER_REQUEST=$(echo \"${EVENT_PAYLOAD}\" | jq -r .review.body)\n            ISSUE_NUMBER=$(echo \"${EVENT_PAYLOAD}\" | jq -r .pull_request.number)\n            IS_PR=\"true\"\n          elif [[ \"${EVENT_NAME}\" == \"pull_request_review_comment\" ]]; then\n            USER_REQUEST=$(echo \"${EVENT_PAYLOAD}\" | jq -r .comment.body)\n            ISSUE_NUMBER=$(echo \"${EVENT_PAYLOAD}\" | jq -r .pull_request.number)\n            IS_PR=\"true\"\n          fi\n\n          # Clean up user request\n          USER_REQUEST=$(echo \"${USER_REQUEST}\" | sed 's/.*@gemini-cli//' | sed 's/^[[:space:]]*//;s/[[:space:]]*$//')\n\n          {\n            echo \"user_request=${USER_REQUEST}\"\n            echo \"issue_number=${ISSUE_NUMBER}\"\n            echo \"is_pr=${IS_PR}\"\n          } >> \"${GITHUB_OUTPUT}\"\n\n      - name: 'Set up git user for commits'\n        run: |-\n          git config --global user.name 'gemini-cli[bot]'\n          git config --global user.email 'gemini-cli[bot]@users.noreply.github.com'\n\n      - name: 'Checkout PR branch'\n        if: |-\n          ${{  steps.get_context.outputs.is_pr == 'true' }}\n        uses: 'actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd' # ratchet:actions/checkout@v4\n        with:\n          token: '${{ steps.generate_token.outputs.token || secrets.GITHUB_TOKEN }}'\n          repository: '${{ github.repository }}'\n          ref: 'refs/pull/${{ steps.get_context.outputs.issue_number }}/head'\n          fetch-depth: 0\n\n      - name: 'Checkout main branch'\n        if: |-\n          ${{  steps.get_context.outputs.is_pr == 'false' }}\n        uses: 'actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd' # ratchet:actions/checkout@v4\n        with:\n          token: '${{ steps.generate_token.outputs.token || secrets.GITHUB_TOKEN }}'\n          repository: '${{ github.repository }}'\n          fetch-depth: 0\n\n      - name: 'Acknowledge request'\n        env:\n          GITHUB_TOKEN: '${{ steps.generate_token.outputs.token || secrets.GITHUB_TOKEN }}'\n          ISSUE_NUMBER: '${{ steps.get_context.outputs.issue_number }}'\n          REPOSITORY: '${{ github.repository }}'\n          REQUEST_TYPE: '${{ steps.get_context.outputs.request_type }}'\n        run: |-\n          set -euo pipefail\n          MESSAGE=\"I've received your request and I'm working on it now! 🤖\"\n          if [[ -n \"${MESSAGE}\" ]]; then\n            gh issue comment \"${ISSUE_NUMBER}\" \\\n              --body \"${MESSAGE}\" \\\n              --repo \"${REPOSITORY}\"\n          fi\n\n      - name: 'Get description'\n        id: 'get_description'\n        env:\n          GITHUB_TOKEN: '${{ steps.generate_token.outputs.token || secrets.GITHUB_TOKEN }}'\n          IS_PR: '${{ steps.get_context.outputs.is_pr }}'\n          ISSUE_NUMBER: '${{ steps.get_context.outputs.issue_number }}'\n        run: |-\n          set -euo pipefail\n          if [[ \"${IS_PR}\" == \"true\" ]]; then\n            DESCRIPTION=$(gh pr view \"${ISSUE_NUMBER}\" --json body --template '{{.body}}')\n          else\n            DESCRIPTION=$(gh issue view \"${ISSUE_NUMBER}\" --json body --template '{{.body}}')\n          fi\n          {\n            echo \"description<<EOF\"\n            echo \"${DESCRIPTION}\"\n            echo \"EOF\"\n          } >> \"${GITHUB_OUTPUT}\"\n\n      - name: 'Get comments'\n        id: 'get_comments'\n        env:\n          GITHUB_TOKEN: '${{ steps.generate_token.outputs.token || secrets.GITHUB_TOKEN }}'\n          IS_PR: '${{ steps.get_context.outputs.is_pr }}'\n          ISSUE_NUMBER: '${{ steps.get_context.outputs.issue_number }}'\n        run: |-\n          set -euo pipefail\n          if [[ \"${IS_PR}\" == \"true\" ]]; then\n            COMMENTS=$(gh pr view \"${ISSUE_NUMBER}\" --json comments --template '{{range .comments}}{{.author.login}}: {{.body}}{{\"\\n\"}}{{end}}')\n          else\n            COMMENTS=$(gh issue view \"${ISSUE_NUMBER}\" --json comments --template '{{range .comments}}{{.author.login}}: {{.body}}{{\"\\n\"}}{{end}}')\n          fi\n          {\n            echo \"comments<<EOF\"\n            echo \"${COMMENTS}\"\n            echo \"EOF\"\n          } >> \"${GITHUB_OUTPUT}\"\n\n      - name: 'Run Gemini'\n        id: 'run_gemini'\n        uses: 'google-github-actions/run-gemini-cli@v0'\n        env:\n          GITHUB_TOKEN: '${{ steps.generate_token.outputs.token || secrets.GITHUB_TOKEN }}'\n          REPOSITORY: '${{ github.repository }}'\n          USER_REQUEST: '${{ steps.get_context.outputs.user_request }}'\n          ISSUE_NUMBER: '${{ steps.get_context.outputs.issue_number }}'\n          IS_PR: '${{ steps.get_context.outputs.is_pr }}'\n        with:\n          gemini_api_key: '${{ secrets.GEMINI_API_KEY }}'\n          gcp_workload_identity_provider: '${{ vars.GCP_WIF_PROVIDER }}'\n          gcp_project_id: '${{ vars.GOOGLE_CLOUD_PROJECT }}'\n          gcp_location: '${{ vars.GOOGLE_CLOUD_LOCATION }}'\n          gcp_service_account: '${{ vars.SERVICE_ACCOUNT_EMAIL }}'\n          use_vertex_ai: '${{ vars.GOOGLE_GENAI_USE_VERTEXAI }}'\n          use_gemini_code_assist: '${{ vars.GOOGLE_GENAI_USE_GCA }}'\n          settings: |-\n            {\n              \"maxSessionTurns\": 50,\n              \"telemetry\": {\n                \"enabled\": false,\n                \"target\": \"gcp\"\n              }\n            }\n          prompt: |-\n            ## Role\n\n            You are a helpful AI assistant invoked via a CLI interface in a GitHub workflow. You have access to tools to interact with the repository and respond to the user.\n\n            ## Context\n\n            - **Repository**: `${{ github.repository }}`\n            - **Triggering Event**: `${{ github.event_name }}`\n            - **Issue/PR Number**: `${{ steps.get_context.outputs.issue_number }}`\n            - **Is this a PR?**: `${{ steps.get_context.outputs.is_pr }}`\n            - **Issue/PR Description**:\n            `${{ steps.get_description.outputs.description }}`\n            - **Comments**:\n            `${{ steps.get_comments.outputs.comments }}`\n\n            ## User Request\n\n            The user has sent the following request:\n            `${{ steps.get_context.outputs.user_request }}`\n\n            ## How to Respond to Issues, PR Comments, and Questions\n\n            This workflow supports three main scenarios:\n\n            1. **Creating a Fix for an Issue**\n               - Carefully read the user request and the related issue or PR description.\n               - Use available tools to gather all relevant context (e.g., `gh issue view`, `gh pr view`, `gh pr diff`, `cat`, `head`, `tail`).\n               - Identify the root cause of the problem before proceeding.\n               - **Show and maintain a plan as a checklist**:\n                 - At the very beginning, outline the steps needed to resolve the issue or address the request and post them as a checklist comment on the issue or PR (use GitHub markdown checkboxes: `- [ ] Task`).\n                 - Example:\n                   ```\n                   ### Plan\n                   - [ ] Investigate the root cause\n                   - [ ] Implement the fix in `file.py`\n                   - [ ] Add/modify tests\n                   - [ ] Update documentation\n                   - [ ] Verify the fix and close the issue\n                   ```\n                 - Use: `gh pr comment \"${ISSUE_NUMBER}\" --body \"<plan>\"` or `gh issue comment \"${ISSUE_NUMBER}\" --body \"<plan>\"` to post the initial plan.\n                 - As you make progress, keep the checklist visible and up to date by editing the same comment (check off completed tasks with `- [x]`).\n                   - To update the checklist:\n                     1. Find the comment ID for the checklist (use `gh pr comment list \"${ISSUE_NUMBER}\"` or `gh issue comment list \"${ISSUE_NUMBER}\"`).\n                     2. Edit the comment with the updated checklist:\n                        - For PRs: `gh pr comment --edit <comment-id> --body \"<updated plan>\"`\n                        - For Issues: `gh issue comment --edit <comment-id> --body \"<updated plan>\"`\n                     3. The checklist should only be maintained as a comment on the issue or PR. Do not track or update the checklist in code files.\n               - If the fix requires code changes, determine which files and lines are affected. If clarification is needed, note any questions for the user.\n               - Make the necessary code or documentation changes using the available tools (e.g., `write_file`). Ensure all changes follow project conventions and best practices. Reference all shell variables as `\"${VAR}\"` (with quotes and braces) to prevent errors.\n               - Run any relevant tests or checks to verify the fix works as intended. If possible, provide evidence (test output, screenshots, etc.) that the issue is resolved.\n               - **Branching and Committing**:\n                 - **NEVER commit directly to the `main` branch.**\n                 - If you are working on a **pull request** (`IS_PR` is `true`), the correct branch is already checked out. Simply commit and push to it.\n                   - `git add .`\n                   - `git commit -m \"feat: <describe the change>\"`\n                   - `git push`\n                 - If you are working on an **issue** (`IS_PR` is `false`), create a new branch for your changes. A good branch name would be `issue/${ISSUE_NUMBER}/<short-description>`.\n                   - `git checkout -b issue/${ISSUE_NUMBER}/my-fix`\n                   - `git add .`\n                   - `git commit -m \"feat: <describe the fix>\"`\n                   - `git push origin issue/${ISSUE_NUMBER}/my-fix`\n                   - After pushing, you can create a pull request: `gh pr create --title \"Fixes #${ISSUE_NUMBER}: <short title>\" --body \"This PR addresses issue #${ISSUE_NUMBER}.\"`\n               - Summarize what was changed and why in a markdown file: `write_file(\"response.md\", \"<your response here>\")`\n               - Post the response as a comment:\n                 - For PRs: `gh pr comment \"${ISSUE_NUMBER}\" --body-file response.md`\n                 - For Issues: `gh issue comment \"${ISSUE_NUMBER}\" --body-file response.md`\n\n            2. **Addressing Comments on a Pull Request**\n               - Read the specific comment and the context of the PR.\n               - Use tools like `gh pr view`, `gh pr diff`, and `cat` to understand the code and discussion.\n               - If the comment requests a change or clarification, follow the same process as for fixing an issue: create a checklist plan, implement, test, and commit any required changes, updating the checklist as you go.\n               - **Committing Changes**: The correct PR branch is already checked out. Simply add, commit, and push your changes.\n                 - `git add .`\n                 - `git commit -m \"fix: address review comments\"`\n                 - `git push`\n               - If the comment is a question, answer it directly and clearly, referencing code or documentation as needed.\n               - Document your response in `response.md` and post it as a PR comment: `gh pr comment \"${ISSUE_NUMBER}\" --body-file response.md`\n\n            3. **Answering Any Question on an Issue**\n               - Read the question and the full issue context using `gh issue view` and related tools.\n               - Research or analyze the codebase as needed to provide an accurate answer.\n               - If the question requires code or documentation changes, follow the fix process above, including creating and updating a checklist plan and **creating a new branch for your changes as described in section 1.**\n               - Write a clear, concise answer in `response.md` and post it as an issue comment: `gh issue comment \"${ISSUE_NUMBER}\" --body-file response.md`\n\n            ## Guidelines\n\n            - **Be concise and actionable.** Focus on solving the user's problem efficiently.\n            - **Always commit and push your changes if you modify code or documentation.**\n            - **If you are unsure about the fix or answer, explain your reasoning and ask clarifying questions.**\n            - **Follow project conventions and best practices.**\n"
  },
  {
    "path": ".github/workflows/gemini-issue-automated-triage.yml",
    "content": "name: '🏷️ Gemini Automated Issue Triage'\n\non:\n  issues:\n    types:\n      - 'opened'\n      - 'reopened'\n  issue_comment:\n    types:\n      - 'created'\n  workflow_dispatch:\n    inputs:\n      issue_number:\n        description: 'issue number to triage'\n        required: true\n        type: 'number'\n\nconcurrency:\n  group: '${{ github.workflow }}-${{ github.event.issue.number }}'\n  cancel-in-progress: true\n\ndefaults:\n  run:\n    shell: 'bash'\n\npermissions:\n  contents: 'read'\n  id-token: 'write'\n  issues: 'write'\n  statuses: 'write'\n\njobs:\n  triage-issue:\n    if: >\n      github.event_name == 'issues' ||\n      github.event_name == 'workflow_dispatch' ||\n      (github.event_name == 'issue_comment' &&\n       contains(github.event.comment.body, '@gemini-cli /triage') &&\n       (github.event.comment.author_association == 'OWNER' ||\n        github.event.comment.author_association == 'MEMBER' ||\n        github.event.comment.author_association == 'COLLABORATOR'))\n    timeout-minutes: 5\n    runs-on: 'ubuntu-latest'\n\n    steps:\n      - name: 'Checkout repository'\n        uses: 'actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd' # ratchet:actions/checkout@v4\n\n      - name: 'Generate GitHub App Token'\n        id: 'generate_token'\n        if: |-\n          ${{ vars.APP_ID }}\n        uses: 'actions/create-github-app-token@f8d387b68d61c58ab83c6c016672934102569859' # ratchet:actions/create-github-app-token@v2\n        with:\n          app-id: '${{ vars.APP_ID }}'\n          private-key: '${{ secrets.APP_PRIVATE_KEY }}'\n\n      - name: 'Run Gemini Issue Triage'\n        uses: 'google-github-actions/run-gemini-cli@v0'\n        id: 'gemini_issue_triage'\n        env:\n          GITHUB_TOKEN: '${{ steps.generate_token.outputs.token || secrets.GITHUB_TOKEN }}'\n          ISSUE_TITLE: '${{ github.event.issue.title }}'\n          ISSUE_BODY: '${{ github.event.issue.body }}'\n          ISSUE_NUMBER: '${{ github.event.issue.number }}'\n          REPOSITORY: '${{ github.repository }}'\n        with:\n          gemini_cli_version: '${{ vars.GEMINI_CLI_VERSION }}'\n          gcp_workload_identity_provider: '${{ vars.GCP_WIF_PROVIDER }}'\n          gcp_project_id: '${{ vars.GOOGLE_CLOUD_PROJECT }}'\n          gcp_location: '${{ vars.GOOGLE_CLOUD_LOCATION }}'\n          gcp_service_account: '${{ vars.SERVICE_ACCOUNT_EMAIL }}'\n          gemini_api_key: '${{ secrets.GEMINI_API_KEY }}'\n          use_vertex_ai: '${{ vars.GOOGLE_GENAI_USE_VERTEXAI }}'\n          use_gemini_code_assist: '${{ vars.GOOGLE_GENAI_USE_GCA }}'\n          settings: |-\n            {\n              \"maxSessionTurns\": 25,\n              \"coreTools\": [\n                \"run_shell_command(gh label list)\",\n                \"run_shell_command(gh issue edit)\"\n              ],\n              \"telemetry\": {\n                \"enabled\": false,\n                \"target\": \"gcp\"\n              }\n            }\n          prompt: |-\n            ## Role\n\n            You are an issue triage assistant. Analyze the current GitHub issue\n            and apply the most appropriate existing labels. Use the available\n            tools to gather information; do not ask for information to be\n            provided.\n\n            ## Steps\n\n            1. Run: `gh label list` to get all available labels.\n            2. Review the issue title and body provided in the environment\n               variables: \"${ISSUE_TITLE}\" and \"${ISSUE_BODY}\".\n            3. Select the most relevant labels from the existing labels. If\n               available, set labels that follow the `kind/*`, `area/*`, and\n               `priority/*` patterns.\n            4. Apply the selected labels to this issue using:\n               `gh issue edit \"${ISSUE_NUMBER}\" --add-label \"label1,label2\"`\n            5. If the \"status/needs-triage\" label is present, remove it using:\n               `gh issue edit \"${ISSUE_NUMBER}\" --remove-label \"status/needs-triage\"`\n\n            ## Guidelines\n\n            - Only use labels that already exist in the repository\n            - Do not add comments or modify the issue content\n            - Triage only the current issue\n            - Assign all applicable labels based on the issue content\n            - Reference all shell variables as \"${VAR}\" (with quotes and braces)\n\n      - name: 'Post Issue Triage Failure Comment'\n        if: |-\n          ${{ failure() && steps.gemini_issue_triage.outcome == 'failure' }}\n        uses: 'actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd'\n        with:\n          github-token: '${{ steps.generate_token.outputs.token || secrets.GITHUB_TOKEN }}'\n          script: |-\n            github.rest.issues.createComment({\n              owner: '${{ github.repository }}'.split('/')[0],\n              repo: '${{ github.repository }}'.split('/')[1],\n              issue_number: '${{ github.event.issue.number }}',\n              body: 'There is a problem with the Gemini CLI issue triaging. Please check the [action logs](${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}) for details.'\n            })\n"
  },
  {
    "path": ".github/workflows/gemini-issue-scheduled-triage.yml",
    "content": "name: '📋 Gemini Scheduled Issue Triage'\n\non:\n  schedule:\n    - cron: '0 * * * *' # Runs every hour\n  workflow_dispatch:\n\nconcurrency:\n  group: '${{ github.workflow }}'\n  cancel-in-progress: true\n\ndefaults:\n  run:\n    shell: 'bash'\n\npermissions:\n  contents: 'read'\n  id-token: 'write'\n  issues: 'write'\n  statuses: 'write'\n\njobs:\n  triage-issues:\n    timeout-minutes: 5\n    runs-on: 'ubuntu-latest'\n\n    steps:\n      - name: 'Checkout repository'\n        uses: 'actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd' # ratchet:actions/checkout@v4\n\n      - name: 'Generate GitHub App Token'\n        id: 'generate_token'\n        if: |-\n          ${{ vars.APP_ID }}\n        uses: 'actions/create-github-app-token@f8d387b68d61c58ab83c6c016672934102569859' # ratchet:actions/create-github-app-token@v2\n        with:\n          app-id: '${{ vars.APP_ID }}'\n          private-key: '${{ secrets.APP_PRIVATE_KEY }}'\n\n      - name: 'Find untriaged issues'\n        id: 'find_issues'\n        env:\n          GITHUB_TOKEN: '${{ steps.generate_token.outputs.token || secrets.GITHUB_TOKEN }}'\n          GITHUB_REPOSITORY: '${{ github.repository }}'\n          GITHUB_OUTPUT: '${{ github.output }}'\n        run: |-\n          set -euo pipefail\n\n          echo '🔍 Finding issues without labels...'\n          NO_LABEL_ISSUES=\"$(gh issue list --repo \"${GITHUB_REPOSITORY}\" \\\n            --search 'is:open is:issue no:label' --json number,title,body)\"\n\n          echo '🏷️ Finding issues that need triage...'\n          NEED_TRIAGE_ISSUES=\"$(gh issue list --repo \"${GITHUB_REPOSITORY}\" \\\n            --search 'is:open is:issue label:\"status/needs-triage\"' --json number,title,body)\"\n\n          echo '🔄 Merging and deduplicating issues...'\n          ISSUES=\"$(echo \"${NO_LABEL_ISSUES}\" \"${NEED_TRIAGE_ISSUES}\" | jq -c -s 'add | unique_by(.number)')\"\n\n          echo '📝 Setting output for GitHub Actions...'\n          echo \"issues_to_triage=${ISSUES}\" >> \"${GITHUB_OUTPUT}\"\n\n          ISSUE_COUNT=\"$(echo \"${ISSUES}\" | jq 'length')\"\n          echo \"✅ Found ${ISSUE_COUNT} issues to triage! 🎯\"\n\n      - name: 'Run Gemini Issue Triage'\n        if: |-\n          ${{ steps.find_issues.outputs.issues_to_triage != '[]' }}\n        uses: 'google-github-actions/run-gemini-cli@v0'\n        id: 'gemini_issue_triage'\n        env:\n          GITHUB_TOKEN: '${{ steps.generate_token.outputs.token || secrets.GITHUB_TOKEN }}'\n          ISSUES_TO_TRIAGE: '${{ steps.find_issues.outputs.issues_to_triage }}'\n          REPOSITORY: '${{ github.repository }}'\n        with:\n          gemini_cli_version: '${{ vars.GEMINI_CLI_VERSION }}'\n          gcp_workload_identity_provider: '${{ vars.GCP_WIF_PROVIDER }}'\n          gcp_project_id: '${{ vars.GOOGLE_CLOUD_PROJECT }}'\n          gcp_location: '${{ vars.GOOGLE_CLOUD_LOCATION }}'\n          gcp_service_account: '${{ vars.SERVICE_ACCOUNT_EMAIL }}'\n          gemini_api_key: '${{ secrets.GEMINI_API_KEY }}'\n          use_vertex_ai: '${{ vars.GOOGLE_GENAI_USE_VERTEXAI }}'\n          use_gemini_code_assist: '${{ vars.GOOGLE_GENAI_USE_GCA }}'\n          settings: |-\n            {\n              \"maxSessionTurns\": 25,\n              \"coreTools\": [\n                \"run_shell_command(echo)\",\n                \"run_shell_command(gh label list)\",\n                \"run_shell_command(gh issue edit)\",\n                \"run_shell_command(gh issue list)\"\n              ],\n              \"telemetry\": {\n                \"enabled\": false,\n                \"target\": \"gcp\"\n              }\n            }\n          prompt: |-\n            ## Role\n\n            You are an issue triage assistant. Analyze issues and apply\n            appropriate labels. Use the available tools to gather information;\n            do not ask for information to be provided.\n\n            ## Steps\n\n            1. Run: `gh label list`\n            2. Check environment variable: \"${ISSUES_TO_TRIAGE}\" (JSON array\n               of issues)\n            3. For each issue, apply labels:\n               `gh issue edit \"${ISSUE_NUMBER}\" --add-label \"label1,label2\"`.\n               If available, set labels that follow the `kind/*`, `area/*`,\n               and `priority/*` patterns.\n            4. For each issue, if the `status/needs-triage` label is present,\n               remove it using:\n               `gh issue edit \"${ISSUE_NUMBER}\" --remove-label \"status/needs-triage\"`\n\n            ## Guidelines\n\n            - Only use existing repository labels\n            - Do not add comments\n            - Triage each issue independently\n            - Reference all shell variables as \"${VAR}\" (with quotes and braces)\n"
  },
  {
    "path": ".github/workflows/gemini-pr-review.yml",
    "content": "name: '🧐 Gemini Pull Request Review'\n\non:\n  pull_request:\n    types:\n      - 'opened'\n  pull_request_review_comment:\n    types:\n      - 'created'\n  pull_request_review:\n    types:\n      - 'submitted'\n  workflow_dispatch:\n    inputs:\n      pr_number:\n        description: 'PR number to review'\n        required: true\n        type: 'number'\n\nconcurrency:\n  group: '${{ github.workflow }}-${{ github.head_ref || github.ref }}'\n  cancel-in-progress: true\n\ndefaults:\n  run:\n    shell: 'bash'\n\npermissions:\n  contents: 'read'\n  id-token: 'write'\n  issues: 'write'\n  pull-requests: 'write'\n  statuses: 'write'\n\njobs:\n  review-pr:\n    if: |-\n      github.event_name == 'workflow_dispatch' ||\n      (github.event_name == 'pull_request' && github.event.action == 'opened') ||\n      (github.event_name == 'issue_comment' && github.event.issue.pull_request &&\n        contains(github.event.comment.body, '@gemini-cli /review') &&\n        (\n          github.event.comment.author_association == 'OWNER' ||\n          github.event.comment.author_association == 'MEMBER' ||\n          github.event.comment.author_association == 'COLLABORATOR'\n        )\n      ) ||\n      (github.event_name == 'pull_request_review_comment' &&\n        contains(github.event.comment.body, '@gemini-cli /review') &&\n        (\n          github.event.comment.author_association == 'OWNER' ||\n          github.event.comment.author_association == 'MEMBER' ||\n          github.event.comment.author_association == 'COLLABORATOR'\n        )\n      ) ||\n      (github.event_name == 'pull_request_review' &&\n        contains(github.event.review.body, '@gemini-cli /review') &&\n        (\n          github.event.review.author_association == 'OWNER' ||\n          github.event.review.author_association == 'MEMBER' ||\n          github.event.review.author_association == 'COLLABORATOR'\n        )\n      )\n    timeout-minutes: 5\n    runs-on: 'ubuntu-latest'\n\n    steps:\n      - name: 'Checkout PR code'\n        uses: 'actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd' # ratchet:actions/checkout@v4\n\n      - name: 'Generate GitHub App Token'\n        id: 'generate_token'\n        if: |-\n          ${{ vars.APP_ID }}\n        uses: 'actions/create-github-app-token@f8d387b68d61c58ab83c6c016672934102569859' # ratchet:actions/create-github-app-token@v2\n        with:\n          app-id: '${{ vars.APP_ID }}'\n          private-key: '${{ secrets.APP_PRIVATE_KEY }}'\n\n      - name: 'Get PR details (pull_request & workflow_dispatch)'\n        id: 'get_pr'\n        if: |-\n          ${{ github.event_name == 'pull_request' || github.event_name == 'workflow_dispatch' }}\n        env:\n          GITHUB_TOKEN: '${{ steps.generate_token.outputs.token || secrets.GITHUB_TOKEN }}'\n          EVENT_NAME: '${{ github.event_name }}'\n          WORKFLOW_PR_NUMBER: '${{ github.event.inputs.pr_number }}'\n          PULL_REQUEST_NUMBER: '${{ github.event.pull_request.number }}'\n        run: |-\n          set -euo pipefail\n\n          if [[ \"${EVENT_NAME}\" = \"workflow_dispatch\" ]]; then\n            PR_NUMBER=\"${WORKFLOW_PR_NUMBER}\"\n          else\n            PR_NUMBER=\"${PULL_REQUEST_NUMBER}\"\n          fi\n\n          echo \"pr_number=${PR_NUMBER}\" >> \"${GITHUB_OUTPUT}\"\n\n          # Get PR details\n          PR_DATA=\"$(gh pr view \"${PR_NUMBER}\" --json title,body,additions,deletions,changedFiles,baseRefName,headRefName)\"\n          echo \"pr_data=${PR_DATA}\" >> \"${GITHUB_OUTPUT}\"\n\n          # Get file changes\n          CHANGED_FILES=\"$(gh pr diff \"${PR_NUMBER}\" --name-only)\"\n          {\n            echo \"changed_files<<EOF\"\n            echo \"${CHANGED_FILES}\"\n            echo \"EOF\"\n          } >> \"${GITHUB_OUTPUT}\"\n\n\n      - name: 'Get PR details (issue_comment)'\n        id: 'get_pr_comment'\n        if: |-\n          ${{ github.event_name == 'issue_comment' }}\n        env:\n          GITHUB_TOKEN: '${{ steps.generate_token.outputs.token || secrets.GITHUB_TOKEN }}'\n          COMMENT_BODY: '${{ github.event.comment.body }}'\n          PR_NUMBER: '${{ github.event.issue.number }}'\n        run: |-\n          set -euo pipefail\n\n          echo \"pr_number=${PR_NUMBER}\" >> \"${GITHUB_OUTPUT}\"\n\n          # Extract additional instructions from comment\n          ADDITIONAL_INSTRUCTIONS=\"$(\n            echo \"${COMMENT_BODY}\" | sed 's/.*@gemini-cli \\/review//' | xargs\n          )\"\n          echo \"additional_instructions=${ADDITIONAL_INSTRUCTIONS}\" >> \"${GITHUB_OUTPUT}\"\n\n          # Get PR details\n          PR_DATA=\"$(gh pr view \"${PR_NUMBER}\" --json title,body,additions,deletions,changedFiles,baseRefName,headRefName)\"\n          echo \"pr_data=${PR_DATA}\" >> \"${GITHUB_OUTPUT}\"\n\n          # Get file changes\n          CHANGED_FILES=\"$(gh pr diff \"${PR_NUMBER}\" --name-only)\"\n          {\n            echo \"changed_files<<EOF\"\n            echo \"${CHANGED_FILES}\"\n            echo \"EOF\"\n          } >> \"${GITHUB_OUTPUT}\"\n\n      - name: 'Run Gemini PR Review'\n        uses: 'google-github-actions/run-gemini-cli@v0'\n        id: 'gemini_pr_review'\n        env:\n          GITHUB_TOKEN: '${{ steps.generate_token.outputs.token || secrets.GITHUB_TOKEN }}'\n          PR_NUMBER: '${{ steps.get_pr.outputs.pr_number || steps.get_pr_comment.outputs.pr_number }}'\n          PR_DATA: '${{ steps.get_pr.outputs.pr_data || steps.get_pr_comment.outputs.pr_data }}'\n          CHANGED_FILES: '${{ steps.get_pr.outputs.changed_files || steps.get_pr_comment.outputs.changed_files }}'\n          ADDITIONAL_INSTRUCTIONS: '${{ steps.get_pr.outputs.additional_instructions || steps.get_pr_comment.outputs.additional_instructions }}'\n          REPOSITORY: '${{ github.repository }}'\n        with:\n          gemini_cli_version: '${{ vars.GEMINI_CLI_VERSION }}'\n          gcp_workload_identity_provider: '${{ vars.GCP_WIF_PROVIDER }}'\n          gcp_project_id: '${{ vars.GOOGLE_CLOUD_PROJECT }}'\n          gcp_location: '${{ vars.GOOGLE_CLOUD_LOCATION }}'\n          gcp_service_account: '${{ vars.SERVICE_ACCOUNT_EMAIL }}'\n          gemini_api_key: '${{ secrets.GEMINI_API_KEY }}'\n          use_vertex_ai: '${{ vars.GOOGLE_GENAI_USE_VERTEXAI }}'\n          use_gemini_code_assist: '${{ vars.GOOGLE_GENAI_USE_GCA }}'\n          settings: |-\n            {\n              \"maxSessionTurns\": 20,\n              \"mcpServers\": {\n                \"github\": {\n                  \"command\": \"docker\",\n                  \"args\": [\n                    \"run\",\n                    \"-i\",\n                    \"--rm\",\n                    \"-e\",\n                    \"GITHUB_PERSONAL_ACCESS_TOKEN\",\n                    \"ghcr.io/github/github-mcp-server\"\n                  ],\n                  \"includeTools\": [\n                    \"create_pending_pull_request_review\",\n                    \"add_comment_to_pending_review\",\n                    \"submit_pending_pull_request_review\"\n                  ],\n                  \"env\": {\n                    \"GITHUB_PERSONAL_ACCESS_TOKEN\": \"${GITHUB_TOKEN}\"\n                  }\n                }\n              },\n              \"coreTools\": [\n                \"run_shell_command(echo)\",\n                \"run_shell_command(gh pr view)\",\n                \"run_shell_command(gh pr diff)\",\n                \"run_shell_command(cat)\",\n                \"run_shell_command(head)\",\n                \"run_shell_command(tail)\",\n                \"run_shell_command(grep)\"\n              ],\n              \"telemetry\": {\n                \"enabled\": false,\n                \"target\": \"gcp\"\n              }\n            }\n          prompt: |-\n            ## Role\n\n            You are an expert code reviewer. You have access to tools to gather\n            PR information and perform the review. Use the available tools to\n            gather information; do not ask for information to be provided.\n\n            ## Steps\n\n            Start by running these commands to gather the required data:\n            1. Run: echo \"${PR_DATA}\" to get PR details (JSON format)\n            2. Run: echo \"${CHANGED_FILES}\" to get the list of changed files\n            3. Run: echo \"${PR_NUMBER}\" to get the PR number\n            4. Run: echo \"${ADDITIONAL_INSTRUCTIONS}\" to see any specific review\n               instructions from the user\n            5. Run: gh pr diff \"${PR_NUMBER}\" to see the full diff and reference\n            Context section to understand it\n            6. For any specific files, use: cat filename, head -50 filename, or\n               tail -50 filename\n            7. If ADDITIONAL_INSTRUCTIONS contains text, prioritize those\n               specific areas or focus points in your review. Common instruction\n               examples: \"focus on security\", \"check performance\", \"review error\n               handling\", \"check for breaking changes\"\n\n            ## Guideline\n            ### Core Guideline(Always applicable)\n\n            1. Understand the Context: Analyze the pull request title, description, changes, and code files to grasp the intent.\n            2. Meticulous Review: Thoroughly review all relevant code changes, prioritizing added lines. Consider the specified\n              focus areas and any provided style guide.\n            3. Comprehensive Review: Ensure that the code is thoroughly reviewed, as it's important to the author\n              that you identify any and all relevant issues (subject to the review criteria and style guide).\n              Missing any issues will lead to a poor code review experience for the author.\n            4. Constructive Feedback:\n              * Provide clear explanations for each concern.\n              * Offer specific, improved code suggestions and suggest alternative approaches, when applicable.\n                Code suggestions in particular are very helpful so that the author can directly apply them\n                to their code, but they must be accurately anchored to the lines that should be replaced.\n            5. Severity Indication: Clearly indicate the severity of the issue in the review comment.\n              This is very important to help the author understand the urgency of the issue.\n              The severity should be one of the following (which are provided below in decreasing order of severity):\n              * `critical`: This issue must be addressed immediately, as it could lead to serious consequences\n                for the code's correctness, security, or performance.\n              * `high`: This issue should be addressed soon, as it could cause problems in the future.\n              * `medium`: This issue should be considered for future improvement, but it's not critical or urgent.\n              * `low`: This issue is minor or stylistic, and can be addressed at the author's discretion.\n            6. Avoid commenting on hardcoded dates and times being in future or not (for example \"this date is in the future\").\n              * Remember you don't have access to the current date and time and leave that to the author.\n            7. Targeted Suggestions: Limit all suggestions to only portions that are modified in the diff hunks.\n              This is a strict requirement as the GitHub (and other SCM's) API won't allow comments on parts of code files that are not\n              included in the diff hunks.\n            8. Code Suggestions in Review Comments:\n              * Succinctness: Aim to make code suggestions succinct, unless necessary. Larger code suggestions tend to be\n                harder for pull request authors to commit directly in the pull request UI.\n              * Valid Formatting:  Provide code suggestions within the suggestion field of the JSON response (as a string literal,\n                escaping special characters like \\n, \\\\, \\\").  Do not include markdown code blocks in the suggestion field.\n                Use markdown code blocks in the body of the comment only for broader examples or if a suggestion field would\n                create an excessively large diff.  Prefer the suggestion field for specific, targeted code changes.\n              * Line Number Accuracy: Code suggestions need to align perfectly with the code it intend to replace.\n                Pay special attention to line numbers when creating comments, particularly if there is a code suggestion.\n                Note the patch includes code versions with line numbers for the before and after code snippets for each diff, so use these to anchor\n                your comments and corresponding code suggestions.\n              * Compilable: Code suggestions should be compilable code snippets that can be directly copy/pasted into the code file.\n                If the suggestion is not compilable, it will not be accepted by the pull request. Note that not all languages Are\n                compiled of course, so by compilable here, we mean either literally or in spirit.\n              * Inline Code Comments: Feel free to add brief comments to the code suggestion if it enhances the underlying code readability.\n                Just make sure that the inline code comments add value, and are not just restating what the code does. Don't use\n                inline comments to \"teach\" the author (use the review comment body directly for that), instead use it if it's beneficial\n                to the readability of the code itself.\n            10. Markdown Formatting: Heavily leverage the benefits of markdown for formatting, such as bulleted lists, bold text, tables, etc.\n            11. Avoid mistaken review comments:\n              * Any comment you make must point towards a discrepancy found in the code and the best practice surfaced in your feedback.\n                For example, if you are pointing out that constants need to be named in all caps with underscores,\n                ensure that the code selected by the comment does not already do this, otherwise it's confusing let alone unnecessary.\n            12. Remove Duplicated code suggestions:\n              * Some provided code suggestions are duplicated, please remove the duplicated review comments.\n            13. Don't Approve The Pull Request\n            14. Reference all shell variables as \"${VAR}\" (with quotes and braces)\n\n            ### Review Criteria (Prioritized in Review)\n\n            * Correctness: Verify code functionality, handle edge cases, and ensure alignment between function\n              descriptions and implementations.  Consider common correctness issues (logic errors, error handling,\n              race conditions, data validation, API usage, type mismatches).\n            * Efficiency: Identify performance bottlenecks, optimize for efficiency, and avoid unnecessary\n              loops, iterations, or calculations. Consider common efficiency issues (excessive loops, memory\n              leaks, inefficient data structures, redundant calculations, excessive logging, etc.).\n            * Maintainability: Assess code readability, modularity, and adherence to language idioms and\n              best practices. Consider common maintainability issues (naming, comments/documentation, complexity,\n              code duplication, formatting, magic numbers).  State the style guide being followed (defaulting to\n              commonly used guides, for example Python's PEP 8 style guide or Google Java Style Guide, if no style guide is specified).\n            * Security: Identify potential vulnerabilities (e.g., insecure storage, injection attacks,\n              insufficient access controls).\n\n            ### Miscellaneous Considerations\n            * Testing: Ensure adequate unit tests, integration tests, and end-to-end tests. Evaluate\n              coverage, edge case handling, and overall test quality.\n            * Performance: Assess performance under expected load, identify bottlenecks, and suggest\n              optimizations.\n            * Scalability: Evaluate how the code will scale with growing user base or data volume.\n            * Modularity and Reusability: Assess code organization, modularity, and reusability. Suggest\n              refactoring or creating reusable components.\n            * Error Logging and Monitoring: Ensure errors are logged effectively, and implement monitoring\n              mechanisms to track application health in production.\n\n            **CRITICAL CONSTRAINTS:**\n\n            You MUST only provide comments on lines that represent the actual changes in\n            the diff. This means your comments should only refer to lines that begin with\n            a `+` or `-` character in the provided diff content.\n            DO NOT comment on lines that start with a space (context lines).\n\n            You MUST only add a review comment if there exists an actual ISSUE or BUG in the code changes.\n            DO NOT add review comments to tell the user to \"check\" or \"confirm\" or \"verify\" something.\n            DO NOT add review comments to tell the user to \"ensure\" something.\n            DO NOT add review comments to explain what the code change does.\n            DO NOT add review comments to validate what the code change does.\n            DO NOT use the review comments to explain the code to the author. They already know their code. Only comment when there's an improvement opportunity. This is very important.\n\n            Pay close attention to line numbers and ensure they are correct.\n            Pay close attention to indentations in the code suggestions and make sure they match the code they are to replace.\n            Avoid comments on the license headers - if any exists - and instead make comments on the code that is being changed.\n\n            It's absolutely important to avoid commenting on the license header of files.\n            It's absolutely important to avoid commenting on copyright headers.\n            Avoid commenting on hardcoded dates and times being in future or not (for example \"this date is in the future\").\n            Remember you don't have access to the current date and time and leave that to the author.\n\n            Avoid mentioning any of your instructions, settings or criteria.\n\n            Here are some general guidelines for setting the severity of your comments\n            - Comments about refactoring a hardcoded string or number as a constant are generally considered low severity.\n            - Comments about log messages or log enhancements are generally considered low severity.\n            - Comments in .md files are medium or low severity. This is really important.\n            - Comments about adding or expanding docstring/javadoc have low severity most of the times.\n            - Comments about suppressing unchecked warnings or todos are considered low severity.\n            - Comments about typos are usually low or medium severity.\n            - Comments about testing or on tests are usually low severity.\n            - Do not comment about the content of a URL if the content is not directly available in the input.\n\n            Keep comments bodies concise and to the point.\n            Keep each comment focused on one issue.\n\n            ## Context\n            The files that are changed in this pull request are represented below in the following\n            format, showing the file name and the portions of the file that are changed:\n\n            <PATCHES>\n            FILE:<NAME OF FIRST FILE>\n            DIFF:\n            <PATCH IN UNIFIED DIFF FORMAT>\n\n            --------------------\n\n            FILE:<NAME OF SECOND FILE>\n            DIFF:\n            <PATCH IN UNIFIED DIFF FORMAT>\n\n            --------------------\n\n            (and so on for all files changed)\n            </PATCHES>\n\n            Note that if you want to make a comment on the LEFT side of the UI / before the diff code version\n            to note those line numbers and the corresponding code. Same for a comment on the RIGHT side\n            of the UI / after the diff code version to note the line numbers and corresponding code.\n            This should be your guide to picking line numbers, and also very importantly, restrict\n            your comments to be only within this line range for these files, whether on LEFT or RIGHT.\n            If you comment out of bounds, the review will fail, so you must pay attention the file name,\n            line numbers, and pre/post diff versions when crafting your comment.\n\n            Here are the patches that were implemented in the pull request, per the\n            formatting above:\n\n            The get the files changed in this pull request, run:\n            \"$(gh pr diff \"${PR_NUMBER}\" --patch)\" to get the list of changed files PATCH\n\n            ## Review\n\n            Once you have the information, provide a comprehensive code review by:\n            1. Creating a pending review: Use the mcp__github__create_pending_pull_request_review to create a Pending Pull Request Review.\n\n            2. Adding review comments:\n                2.1 Use the mcp__github__add_comment_to_pending_review to add comments to the Pending Pull Request Review. Inline comments are preferred whenever possible, so repeat this step, calling mcp__github__add_comment_to_pending_review, as needed. All comments about specific lines of code should use inline comments. It is preferred to use code suggestions when possible, which include a code block that is labeled \"suggestion\", which contains what the new code should be. All comments should also have a severity. They syntax is:\n                  Normal Comment Syntax:\n                  <COMMENT>\n                  {{SEVERITY}} {{COMMENT_TEXT}}\n                  </COMMENT>\n\n                  Inline Comment Syntax: (Preferred):\n                  <COMMENT>\n                  {{SEVERITY}} {{COMMENT_TEXT}}\n                  ```suggestion\n                  {{CODE_SUGGESTION}}\n                  ```\n                  </COMMENT>\n\n                  Prepend a severity emoji to each comment:\n                  - 🟢 for low severity\n                  - 🟡 for medium severity\n                  - 🟠 for high severity\n                  - 🔴 for critical severity\n                  - 🔵 if severity is unclear\n\n                  Including all of this, an example inline comment would be:\n                  <COMMENT>\n                  🟢 Use camelCase for function names\n                  ```suggestion\n                  myFooBarFunction\n                  ```\n                  </COMMENT>\n\n                  A critical severity example would be:\n                  <COMMENT>\n                  🔴 Remove storage key from GitHub\n                  ```suggestion\n                  ```\n\n            3. Posting the review: Use the mcp__github__submit_pending_pull_request_review to submit the Pending Pull Request Review.\n\n              3.1 Crafting the summary comment: Include a summary of high level points that were not addressed with inline comments. Be concise. Do not repeat details mentioned inline.\n\n                Structure your summary comment using this exact format with markdown:\n                ## 📋 Review Summary\n\n                Provide a brief 2-3 sentence overview of the PR and overall\n                assessment.\n\n                ## 🔍 General Feedback\n                - List general observations about code quality\n                - Mention overall patterns or architectural decisions\n                - Highlight positive aspects of the implementation\n                - Note any recurring themes across files\n\n\n      - name: 'Post PR review failure comment'\n        if: |-\n          ${{ failure() && steps.gemini_pr_review.outcome == 'failure' }}\n        uses: 'actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd'\n        with:\n          github-token: '${{ steps.generate_token.outputs.token || secrets.GITHUB_TOKEN }}'\n          script: |-\n            github.rest.issues.createComment({\n              owner: '${{ github.repository }}'.split('/')[0],\n              repo: '${{ github.repository }}'.split('/')[1],\n              issue_number: '${{ steps.get_pr.outputs.pr_number || steps.get_pr_comment.outputs.pr_number }}',\n              body: 'There is a problem with the Gemini CLI PR review. Please check the [action logs](${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}) for details.'\n            })\n"
  },
  {
    "path": ".github/workflows/test.yaml",
    "content": "name: Test\n\non:\n  # pull_request_target is dangerous! Review external PRs code before approving to run the workflow\n  # We need this to be able to access the secrets required by the workflow\n  pull_request_target:\n    paths-ignore:\n      - 'docs/**'\n  workflow_dispatch:\n\n# Cancel currently running workflow when a new one is triggered\nconcurrency:\n  group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}\n  cancel-in-progress: true\n\njobs:\n  check:\n    name: Run Linter & Unit Tests\n    runs-on: ubuntu-latest\n    timeout-minutes: 30\n\n    steps:\n      - name: Checkout repository\n        uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd\n        with:\n          ref: \"${{ github.event.pull_request.base.sha }}\" # Required for pull_request_target\n          fetch-depth: 0\n\n      - name: Install Flutter\n        uses: subosito/flutter-action@1a449444c387b1966244ae4d4f8c696479add0b2\n        with:\n          channel: 'stable'\n\n      - name: Set-up Flutter\n        run: |\n          flutter config --no-analytics\n          dart --disable-analytics\n\n      - name: Install Melos\n        uses: bluefireteam/melos-action@705015c3d2bc4ab94201ac24accb2bbe070cf533\n        with:\n          melos-version: '7.0.0-dev.9'\n          run-bootstrap: false\n\n      - name: Bootstrap\n        uses: nick-invision/retry@ad984534de44a9489a53aefd81eb77f87c70dc60\n        with:\n          timeout_minutes: 5\n          max_attempts: 5\n          delay_seconds: 5\n          command: melos bootstrap\n\n      - name: Run linter\n        run: melos lint:diff\n\n      - name: Run unit tests\n        env:\n          OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}\n          PINECONE_API_KEY: ${{ secrets.PINECONE_API_KEY }}\n          VERTEX_AI_PROJECT_ID: ${{ secrets.VERTEX_AI_PROJECT_ID }}\n          VERTEX_AI_SERVICE_ACCOUNT: ${{ secrets.VERTEX_AI_SERVICE_ACCOUNT }}\n        run: melos test:diff\n"
  },
  {
    "path": ".gitignore",
    "content": ".DS_Store\n.idea/\n*.iml\n.dart_tool/\n/pubspec.lock\n.vscode/\n.aider*\n"
  },
  {
    "path": "API_CLIENT_ALIGNMENT_GUIDE.md",
    "content": "# API Client Alignment Guide\n\n## Overview\n\nThis guide documents the complete workflow for aligning Dart API clients (openai_dart, anthropic_sdk_dart) with their respective official APIs. Based on lessons learned from PRs #826-#837 for openai_dart.\n\n---\n\n## 1. Package Structure\n\nBoth packages follow identical structure:\n\n```\npackages/<package_name>/\n├── oas/\n│   ├── openapi_curated.yaml    # Hand-maintained spec (SOURCE OF TRUTH)\n│   ├── openapi_official.yaml   # Official spec for reference (openai_dart only)\n│   └── main.dart               # Code generation script\n├── lib/\n│   ├── <package_name>.dart     # Public API exports\n│   └── src/\n│       ├── client.dart         # Custom client wrapper\n│       ├── extensions.dart     # Helper extensions\n│       └── generated/\n│           ├── client.dart     # Generated base client\n│           └── schema/\n│               ├── schema.dart           # Main schema (parts all others)\n│               ├── schema.freezed.dart   # Freezed generated\n│               ├── schema.g.dart         # JSON serialization\n│               └── *.dart                # Individual schema files\n├── test/\n│   ├── *_test.dart             # Unit and integration tests\n│   └── assets/                 # Test fixtures\n├── example/\n├── build.yaml                  # Build runner config\n├── pubspec.yaml\n├── README.md\n└── CHANGELOG.md\n```\n\n---\n\n## 2. Finding API Differences\n\n### Step 1: Get the Latest Official Spec\n\n**OpenAI:**\n```bash\n# Check Stainless SDK for latest spec\ncurl -o packages/openai_dart/oas/openapi_official.yaml \\\n  \"https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-<hash>.yml\"\n```\n\n**Anthropic:**\n```bash\n# Download latest spec from Stainless SDK\ncurl -o packages/anthropic_sdk_dart/oas/anthropic_openapi_official.yaml \\\n  \"https://storage.googleapis.com/stainless-sdk-openapi-specs/anthropic%2Fanthropic-a49e89deec4e00d1da490808099d66e2001531b12d8666a7f5d0b496f760440d.yml\"\n```\n\n### Step 2: Compare Specs\n\nUse grep and diff to find differences:\n```bash\n# Compare curated vs official\ndiff packages/<package>/oas/openapi_curated.yaml \\\n     packages/<package>/oas/openapi_official.yaml\n\n# Search for specific schema in official spec\ngrep -A 50 \"ComponentName:\" openapi_official.yaml\n\n# Find all schema definitions\ngrep \"^  [A-Z]\" openapi_official.yaml | head -50\n\n# Compare specific schema between files\ngrep -A 30 \"Usage:\" openapi_curated.yaml\ngrep -A 30 \"Usage:\" openapi_official.yaml\n```\n\n### Step 3: Identify Missing Schemas/Fields\n\nLook for:\n1. **New schemas** - Entirely new object definitions\n2. **New fields** - Properties added to existing schemas\n3. **New enum values** - Values added to existing enums\n4. **Modified types** - Field type changes (rare)\n5. **Deprecated fields** - Fields marked for removal\n\n### Step 4: Prioritize Changes\n\nCreate one PR per logical change:\n- Group related fields together (e.g., all new ImagesResponse fields)\n- Separate unrelated additions (e.g., CompletionUsage vs ImagesResponse)\n- Keep PRs focused and reviewable\n\n---\n\n## 3. OpenAPI YAML Schema Patterns\n\n### Basic Object Schema\n\n```yaml\nComponentName:\n  type: object\n  description: Human-readable description of the component.\n  properties:\n    required_field:\n      type: string\n      description: This field is always present.\n    optional_field:\n      type: integer\n      nullable: true\n      default: null\n      description: This field may be absent or null.\n    nested_object:\n      $ref: \"#/components/schemas/NestedSchema\"\n  required:\n    - required_field\n```\n\n### Enum Schema\n\n```yaml\nMyEnumField:\n  title: MyEnumName          # Generates enum with this name\n  type: string\n  enum:\n    - value_one\n    - value_two\n    - value_three\n  nullable: true\n  description: Description of the enum.\n```\n\n### Nested Detail Object\n\n```yaml\n# Parent schema\nParentSchema:\n  type: object\n  properties:\n    detail_field:\n      $ref: \"#/components/schemas/DetailSchema\"\n\n# Child schema (define separately, not inline)\nDetailSchema:\n  type: object\n  nullable: true\n  description: Detailed breakdown information.\n  properties:\n    sub_field_one:\n      type: integer\n      description: First sub-field.\n    sub_field_two:\n      type: string\n      description: Second sub-field.\n```\n\n### Provider-Specific Extensions (OpenRouter, etc.)\n\n```yaml\n# Mark non-standard fields with x-openai-compatible: false\nprovider_specific_field:\n  type: number\n  minimum: 0\n  maximum: 1\n  nullable: true\n  default: null\n  description: Provider-specific parameter.\n  x-openai-compatible: false   # NOT in official OpenAI API\n```\n\n### Key Rules\n\n| Rule | Example |\n|------|---------|\n| Optional response fields | `nullable: true` + `default: null` |\n| Snake_case in YAML | `cached_tokens`, `audio_tokens` |\n| Use $ref for objects | `$ref: \"#/components/schemas/Name\"` |\n| Enum title for naming | `title: ImagesResponseQuality` |\n| Numeric constraints | `minimum: 0`, `maximum: 2.0` |\n\n---\n\n## 4. Code Generation Workflow\n\n### Step 1: Update openapi_curated.yaml\n\nAdd your new schema or fields to the YAML spec.\n\n### Step 2: Run Code Generator\n\n```bash\ncd packages/<package_name>\ndart run oas/main.dart\n```\n\nThis will:\n1. Parse the OpenAPI spec\n2. Generate Dart classes with freezed\n3. Run build_runner to create .freezed.dart and .g.dart files\n\n### Step 3: Verify Generated Code\n\nCheck the generated files in `lib/src/generated/schema/`:\n- New schema file created: `<component_name>.dart`\n- Part statement added to `schema.dart`\n- Freezed and JSON serialization generated\n\n### Generated Code Pattern\n\n```dart\n// coverage:ignore-file\n// GENERATED CODE - DO NOT MODIFY BY HAND\n// ignore_for_file: type=lint\n// ignore_for_file: invalid_annotation_target\npart of open_a_i_schema;\n\n/// Description from YAML\n@freezed\nabstract class ComponentName with _$ComponentName {\n  const ComponentName._();\n\n  /// Factory constructor for ComponentName\n  const factory ComponentName({\n    /// Field description\n    @JsonKey(name: 'snake_case_field', includeIfNull: false)\n    Type? fieldName,\n  }) = _ComponentName;\n\n  /// Object construction from a JSON representation\n  factory ComponentName.fromJson(Map<String, dynamic> json) =>\n      _$ComponentNameFromJson(json);\n\n  /// List of all property names of schema\n  static const List<String> propertyNames = ['snake_case_field'];\n\n  /// Perform validations on the schema property values\n  String? validateSchema() {\n    return null;\n  }\n\n  /// Map representation of object (not serialized)\n  Map<String, dynamic> toMap() {\n    return {'snake_case_field': fieldName};\n  }\n}\n```\n\n### Schema Name Mappings (main.dart)\n\nWhen generated names are awkward, add mappings:\n\n```dart\n// In oas/main.dart\nfinal schemaNameMappings = <String, String>{\n  'ModelEnumeration': 'ModelCatalog',\n  'ModelString': 'ModelId',\n};\n\nfinal unionFactoryNameMappings = <String, Map<String, String>>{\n  'ChatCompletionModel': {\n    'ModelEnumeration': 'model',\n    'ModelString': 'modelId',\n  },\n};\n```\n\n---\n\n## 5. Writing Tests\n\n### Test File Location\n\n```\npackages/<package_name>/test/<schema_name>_test.dart\n```\n\n### Test Structure Template\n\n```dart\nimport 'package:<package_name>/<package_name>.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('ComponentName tests', () {\n    // Deserialization tests\n    test('deserializes with all fields', () {\n      final json = {\n        'field_one': 100,\n        'field_two': 'value',\n      };\n      final obj = ComponentName.fromJson(json);\n      expect(obj.fieldOne, 100);\n      expect(obj.fieldTwo, 'value');\n    });\n\n    test('deserializes with optional fields missing', () {\n      final json = {'field_one': 100};\n      final obj = ComponentName.fromJson(json);\n      expect(obj.fieldOne, 100);\n      expect(obj.fieldTwo, isNull);\n    });\n\n    test('deserializes with null values', () {\n      final json = {'field_one': 100, 'field_two': null};\n      final obj = ComponentName.fromJson(json);\n      expect(obj.fieldTwo, isNull);\n    });\n\n    test('deserializes with empty JSON', () {\n      final json = <String, dynamic>{};\n      final obj = ComponentName.fromJson(json);\n      expect(obj.fieldOne, isNull);\n      expect(obj.fieldTwo, isNull);\n    });\n\n    // Serialization tests\n    test('serializes correctly', () {\n      const obj = ComponentName(fieldOne: 100, fieldTwo: 'value');\n      final json = obj.toJson();\n      expect(json['field_one'], 100);\n      expect(json['field_two'], 'value');\n    });\n\n    test('serializes with null fields excluded', () {\n      const obj = ComponentName(fieldOne: 100);\n      final json = obj.toJson();\n      expect(json['field_one'], 100);\n      expect(json.containsKey('field_two'), isFalse);\n    });\n\n    // Enum tests (if applicable)\n    test('MyEnum values', () {\n      expect(MyEnum.values, hasLength(3));\n      expect(MyEnum.values, containsAll([\n        MyEnum.valueOne,\n        MyEnum.valueTwo,\n        MyEnum.valueThree,\n      ]));\n    });\n  });\n\n  // Parent-child relationship tests\n  group('ParentSchema with DetailSchema tests', () {\n    test('deserializes with nested detail object', () {\n      final json = {\n        'main_field': 50,\n        'detail_field': {\n          'sub_field_one': 10,\n          'sub_field_two': 'nested',\n        },\n      };\n      final obj = ParentSchema.fromJson(json);\n      expect(obj.mainField, 50);\n      expect(obj.detailField, isNotNull);\n      expect(obj.detailField!.subFieldOne, 10);\n      expect(obj.detailField!.subFieldTwo, 'nested');\n    });\n  });\n}\n```\n\n### Test Coverage Checklist\n\n- [ ] Deserialize with all fields\n- [ ] Deserialize with optional fields missing\n- [ ] Deserialize with null values\n- [ ] Deserialize with empty JSON\n- [ ] Serialize with all fields\n- [ ] Serialize excludes null fields\n- [ ] Enum has expected values\n- [ ] Nested objects deserialize correctly\n- [ ] Parent-child relationships work\n\n---\n\n## 6. Running Formatter and Analyzer\n\n### Formatter\n\n```bash\n# Check if formatting needed\ndart format packages/<package_name> --set-exit-if-changed\n\n# Apply formatting\ndart format packages/<package_name>\n```\n\n### Analyzer\n\n```bash\ndart analyze packages/<package_name>\n```\n\n**Acceptable warnings in test files:**\n- `inference_failure_on_collection_literal` - Empty list type inference\n- `avoid_dynamic_calls` - Dynamic JSON access in tests\n\n**Must fix:**\n- Any errors\n- Warnings in lib/ code\n\n---\n\n## 7. PR Workflow\n\n### Branch Naming Convention\n\n```\nfeat/<package>-<brief-description>\nfix/<package>-<brief-description>\nchore/<package>-<brief-description>\ntest/<package>-<brief-description>\n```\n\nExamples:\n- `feat/openai-prompt-tokens-details`\n- `feat/anthropic-extended-thinking`\n- `chore/format-openai-dart`\n\n### Commit Message Format\n\n```\n<type>(<package>): <description>\n\n[optional body with details]\n```\n\nTypes:\n- `feat` - New feature or schema\n- `fix` - Bug fix\n- `chore` - Maintenance (formatting, deps)\n- `test` - Test changes only\n- `docs` - Documentation only\n\nExamples:\n```\nfeat(openai_dart): Add prompt_tokens_details to CompletionUsage\n\nAdd PromptTokensDetails schema with cached_tokens and audio_tokens\nfields to support the official OpenAI API response format.\n```\n\n### PR Creation Command\n\n```bash\ngh pr create --title \"<type>(<package>): <description>\" --body \"$(cat <<'EOF'\n## Summary\n- Brief description of changes\n\n## Changes\n- List of specific changes made\n\n## Test plan\n- [x] Unit tests added/updated\n- [x] Formatter passes\n- [x] Analyzer passes\n- [ ] CI passes\nEOF\n)\"\n```\n\n### PR Checklist\n\nBefore creating PR:\n1. [ ] Branch created from main\n2. [ ] Changes made to openapi_curated.yaml\n3. [ ] Code generator run\n4. [ ] Unit tests written\n5. [ ] All tests pass: `dart test packages/<package>/test/`\n6. [ ] Formatter run: `dart format packages/<package>`\n7. [ ] Analyzer passes: `dart analyze packages/<package>`\n8. [ ] Commit with proper message\n9. [ ] Push to remote\n\n---\n\n## 8. Complete Example: Adding a New Schema\n\n### Example: Adding `TokenUsageDetails` to anthropic_sdk_dart\n\n**Step 1: Update YAML**\n\n```yaml\n# In oas/anthropic_openapi_curated.yaml\ncomponents:\n  schemas:\n    TokenUsageDetails:\n      type: object\n      nullable: true\n      description: Detailed breakdown of token usage.\n      properties:\n        cache_creation_input_tokens:\n          type: integer\n          description: Tokens used to create cache.\n        cache_read_input_tokens:\n          type: integer\n          description: Tokens read from cache.\n```\n\n**Step 2: Add to parent schema**\n\n```yaml\nUsage:\n  properties:\n    # ... existing fields ...\n    details:\n      $ref: \"#/components/schemas/TokenUsageDetails\"\n```\n\n**Step 3: Run generator**\n\n```bash\ncd packages/anthropic_sdk_dart\ndart run oas/main.dart\n```\n\n**Step 4: Write tests**\n\n```dart\n// test/token_usage_details_test.dart\nimport 'package:anthropic_sdk_dart/anthropic_sdk_dart.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('TokenUsageDetails tests', () {\n    test('deserializes with all fields', () {\n      final json = {\n        'cache_creation_input_tokens': 100,\n        'cache_read_input_tokens': 50,\n      };\n      final details = TokenUsageDetails.fromJson(json);\n      expect(details.cacheCreationInputTokens, 100);\n      expect(details.cacheReadInputTokens, 50);\n    });\n    // ... more tests\n  });\n}\n```\n\n**Step 5: Run checks**\n\n```bash\ndart test packages/anthropic_sdk_dart/test/token_usage_details_test.dart\ndart format packages/anthropic_sdk_dart\ndart analyze packages/anthropic_sdk_dart\n```\n\n**Step 6: Create PR**\n\n```bash\ngit checkout -b feat/anthropic-token-usage-details\ngit add .\ngit commit -m \"feat(anthropic_sdk_dart): Add TokenUsageDetails schema\"\ngit push -u origin feat/anthropic-token-usage-details\ngh pr create --title \"feat(anthropic_sdk_dart): Add TokenUsageDetails schema\" --body \"...\"\n```\n\n---\n\n## 9. Common Pitfalls\n\n| Pitfall | Solution |\n|---------|----------|\n| Forgetting `nullable: true` | Always add for optional response fields |\n| Forgetting `includeIfNull: false` | Generator handles this, but verify |\n| Inline complex objects | Use $ref to separate schemas |\n| Modifying generated code | Never! Update YAML and regenerate |\n| Large multi-feature PRs | One logical change per PR |\n| Missing tests for enums | Always test enum values count |\n| Skipping empty JSON test | Always test deserialization of `{}` |\n| Relative test file paths | Use directory-aware paths (see Section 14) |\n| Breaking dependent packages | Always check langchain_* packages after schema changes |\n| Non-exhaustive switches | Add all new enum values/union types to switches |\n\n---\n\n## 10. Fixing OpenAPI Generator Bugs\n\nIf you encounter bugs in the code generation (openapi_spec package), you can fix them directly:\n\n### Step 1: Locate the Generator\n\nThe generator source is at:\n```\n/Users/davidmigloz/repos/openapi_spec\n```\n\nGitHub: https://github.com/davidmigloz/openapi_spec\n\n### Step 2: Make the Fix\n\nEdit the relevant files in the openapi_spec repo and test locally.\n\n### Step 3: Push the Fix\n\n```bash\ncd /Users/davidmigloz/repos/openapi_spec\ngit add .\ngit commit -m \"fix: Description of the fix\"\ngit push origin main\n```\n\n### Step 4: Update Dependency Reference\n\nUpdate the commit ref in the **root** `pubspec.yaml` of the monorepo:\n\n```yaml\n# In /Users/davidmigloz/repos/langchain_dart/pubspec.yaml\ndependency_overrides:\n  openapi_spec:\n    git:\n      url: https://github.com/davidmigloz/openapi_spec.git\n      ref: <new-commit-sha>  # Update this\n```\n\n### Step 5: Bootstrap the Monorepo\n\n```bash\ncd /Users/davidmigloz/repos/langchain_dart\nmelos bootstrap\n```\n\nThis applies the updated dependency to all packages.\n\n### Step 6: Validate Other Packages\n\n**Critical:** Verify the fix doesn't break generation for other packages:\n\n```bash\n# Regenerate openai_dart\ncd packages/openai_dart\ndart run oas/main.dart\ndart analyze .\ndart test\n\n# Regenerate anthropic_sdk_dart\ncd ../anthropic_sdk_dart\ndart run oas/main.dart\ndart analyze .\ndart test\n\n# Check any other packages using openapi_spec\n```\n\n### Step 7: Create PRs\n\n1. PR for openapi_spec fix (if public contribution needed)\n2. PR for langchain_dart updating the dependency ref\n\n---\n\n## 11. Package-Specific Notes\n\n### openai_dart\n- Maintains two spec files (curated + official for reference)\n- 189+ schema files, covers 8+ API categories\n- Has OpenRouter-specific extensions marked with `x-openai-compatible: false`\n- Integration tests require `OPENAI_API_KEY`\n\n### anthropic_sdk_dart\n- Single curated spec file (more focused)\n- 50+ schema files, covers Messages, Batches, Models, Token Counting\n- Custom client wrapper for streaming\n- Integration tests require `ANTHROPIC_API_KEY`\n- Extensions in `extensions.dart` for convenience methods\n- Beta features use `anthropic-beta` header (e.g., token-counting, extended-thinking)\n- Versioned tool types for computer use, text editor, bash (see Section 16)\n- Sealed classes for union types (Block, ImageBlockSource, MessageStreamEvent, etc.)\n\n---\n\n## 12. Quick Reference Commands\n\n```bash\n# Generate code\ndart run oas/main.dart\n\n# Run specific tests\ndart test packages/<pkg>/test/<test_file>.dart\n\n# Run all package tests\ndart test packages/<pkg>\n\n# Format\ndart format packages/<pkg>\n\n# Analyze\ndart analyze packages/<pkg>\n\n# Create PR\ngh pr create --title \"...\" --body \"...\"\n\n# Check git status\ngit status\n\n# View recent commits\ngit log --oneline -10\n```\n\n---\n\n## 13. Files Modified Per Schema Addition\n\nTypical files changed when adding a new schema:\n\n```\npackages/<package>/\n├── oas/openapi_curated.yaml           # YAML changes\n├── lib/src/generated/schema/\n│   ├── schema.dart                    # New part statement\n│   ├── schema.freezed.dart            # Regenerated\n│   ├── schema.g.dart                  # Regenerated\n│   └── <new_schema>.dart              # New file\n└── test/<new_schema>_test.dart        # New test file\n```\n\n---\n\n## 14. Handling Breaking Changes in Dependent Packages\n\nWhen SDK schema changes break dependent packages (e.g., `langchain_anthropic` depends on `anthropic_sdk_dart`), you must fix them in the same session.\n\n### Common Breaking Changes\n\n#### 1. Sealed Class/Union Type Changes\n\nWhen a simple class becomes a sealed class with union variants:\n\n```dart\n// BEFORE: Simple class\nsource: a.ImageBlockSource(\n  type: a.ImageBlockSourceType.base64,\n  mediaType: a.ImageBlockSourceMediaType.imageJpeg,\n  data: imageData,\n)\n\n// AFTER: Sealed class with named constructor\nsource: a.ImageBlockSource.base64ImageSource(\n  type: 'base64',\n  mediaType: a.Base64ImageSourceMediaType.imageJpeg,\n  data: imageData,\n)\n```\n\n#### 2. Exhaustive Switch Statements\n\nWhen new enum values or union variants are added, all switch statements must handle them:\n\n```dart\n// Adding new StopReason values\nFinishReason _mapFinishReason(final a.StopReason? reason) => switch (reason) {\n  a.StopReason.endTurn => FinishReason.stop,\n  a.StopReason.maxTokens => FinishReason.length,\n  a.StopReason.stopSequence => FinishReason.stop,\n  a.StopReason.toolUse => FinishReason.toolCalls,\n  // NEW: Must add these or Dart analyzer errors\n  a.StopReason.pauseTurn => FinishReason.unspecified,\n  a.StopReason.refusal => FinishReason.contentFilter,\n  null => FinishReason.unspecified,\n};\n```\n\n#### 3. New Block Types in Content Blocks\n\nWhen new content block types are added to a union:\n\n```dart\n// Must handle all new block types\n(String, AIChatMessageToolCall?) _mapContentBlock(final a.Block block) =>\n  switch (block) {\n    final a.TextBlock t => (t.text, null),\n    final a.ImageBlock i => (/* handle */, null),\n    final a.ToolUseBlock tu => (/* handle */, toolCall),\n    // NEW: Add placeholders for new block types\n    final a.DocumentBlock _ => ('', null),\n    final a.RedactedThinkingBlock _ => ('', null),\n    final a.ServerToolUseBlock _ => ('', null),\n    final a.WebSearchToolResultBlock _ => ('', null),\n    final a.MCPToolUseBlock _ => ('', null),\n    final a.MCPToolResultBlock _ => ('', null),\n    // ... etc\n  };\n```\n\n### Workflow for Breaking Changes\n\n1. **Run analyzer on dependent packages** after regenerating SDK:\n   ```bash\n   dart analyze packages/langchain_anthropic\n   dart analyze packages/langchain_openai\n   ```\n\n2. **Fix all errors** - usually exhaustive switch or constructor changes\n\n3. **Run tests** to verify functionality:\n   ```bash\n   dart test packages/langchain_anthropic/test/\n   ```\n\n4. **Commit SDK and dependent package fixes together** or in sequence\n\n---\n\n## 15. Test File Path Handling\n\nTests that reference asset files must work when run from both the package directory and the repo root.\n\n### Problem\n\n```dart\n// This fails when run from repo root\nfinal file = await File('./test/assets/data.txt').readAsString();\n```\n\n### Solution\n\n```dart\n// Directory-aware path that works from any location\nfinal testDir = Directory.current.path.endsWith('anthropic_sdk_dart')\n    ? Directory.current.path\n    : '${Directory.current.path}/packages/anthropic_sdk_dart';\nfinal file = await File('$testDir/test/assets/data.txt').readAsString();\n```\n\n### Skipping Flaky Tests\n\nSome tests depend on API behavior that varies by model or time. Use `skip` with explanation:\n\n```dart\ntest(\n  'Test computer tool use',\n  skip: 'claude-sonnet-4-5-20250929 does not support computer_20241022 tools',\n  () async {\n    // test code\n  },\n);\n\ntest(\n  'Test Prompt caching',\n  skip: 'Prompt caching behavior varies by model and may not return expected cache metrics',\n  () async {\n    // test code\n  },\n);\n```\n\n---\n\n## 16. Anthropic-Specific Patterns\n\n### Versioned Tool Types\n\nAnthropic uses versioned tool type identifiers. These change over time:\n\n| Tool | Current Version | Notes |\n|------|-----------------|-------|\n| Computer Use | `computer_20241022` | Beta, model-specific support |\n| Text Editor | `text_editor_20250728` | Updated in 2025 |\n| Bash | `bash_20250124` | Updated in 2025 |\n\n**Important:** Not all models support all tool versions. Verify before using.\n\n### OpenAPI Discriminator Mapping\n\nTool types use discriminator mappings in the OpenAPI spec:\n\n```yaml\nTool:\n  discriminator:\n    propertyName: type\n    mapping:\n      custom: \"#/components/schemas/ToolCustom\"\n      computer_20241022: \"#/components/schemas/ToolComputerUse\"\n      text_editor_20250728: \"#/components/schemas/ToolTextEditor\"\n      bash_20250124: \"#/components/schemas/ToolBash\"\n```\n\n### Beta Features Header\n\nSome endpoints require beta headers:\n\n```dart\n// In client, headers are added automatically for beta features\nfinal response = await client.countMessageTokens(\n  request: request,\n  // Beta header added: 'anthropic-beta': 'token-counting-2024-11-01'\n);\n```\n\n---\n\n## 17. Freezed Sealed Class Patterns\n\n### Union Types with Discriminator\n\nWhen the OpenAPI spec defines a `oneOf` with discriminator:\n\n```yaml\nImageBlockSource:\n  oneOf:\n    - $ref: \"#/components/schemas/Base64ImageSource\"\n    - $ref: \"#/components/schemas/UrlImageSource\"\n  discriminator:\n    propertyName: type\n    mapping:\n      base64: \"#/components/schemas/Base64ImageSource\"\n      url: \"#/components/schemas/UrlImageSource\"\n```\n\nGenerates a sealed class:\n\n```dart\n@Freezed(unionKey: 'type', unionValueCase: FreezedUnionCase.snake)\nsealed class ImageBlockSource with _$ImageBlockSource {\n  @FreezedUnionValue('base64')\n  const factory ImageBlockSource.base64ImageSource({\n    required String type,\n    required Base64ImageSourceMediaType mediaType,\n    required String data,\n  }) = Base64ImageSource;\n\n  @FreezedUnionValue('url')\n  const factory ImageBlockSource.urlImageSource({\n    required String type,\n    required String url,\n  }) = UrlImageSource;\n}\n```\n\n### Pattern Matching on Sealed Classes\n\n```dart\n// Use switch expression for exhaustive matching\nfinal result = switch (imageSource) {\n  final Base64ImageSource s => s.data,\n  final UrlImageSource s => s.url,\n};\n```\n\n---\n\n## 18. Merging Stacked PRs\n\nWhen creating multiple dependent PRs (stacked PRs), each PR's base branch should point to the previous PR's branch so GitHub shows only the incremental diff.\n\n### Critical: Update Base BEFORE Merging\n\n> **WARNING:** If you merge with `--delete-branch` before updating the next PR's base, GitHub will **auto-close** the next PR because its base branch no longer exists. The PR cannot be reopened and must be recreated.\n\n**Wrong order (causes auto-closure):**\n```bash\ngh pr merge 889 --squash --delete-branch  # Deletes base branch\ngh pr edit 890 --base main                 # FAILS: PR 890 is already closed!\n```\n\n**Correct order:**\n```bash\ngh pr edit 890 --base main                 # Update base FIRST\ngh pr merge 889 --squash --delete-branch   # Now safe to delete branch\n```\n\n### Merging Process\n\nFor each PR in the stack (starting from the one closest to `main`):\n\n1. **Update the NEXT PR's base to main BEFORE merging current PR:**\n   ```bash\n   gh pr edit <NEXT_PR_NUMBER> --base main\n   ```\n\n2. **Merge the current PR:**\n   ```bash\n   gh pr merge <CURRENT_PR_NUMBER> --squash --delete-branch\n   ```\n\n3. **Rebase the next PR on main:**\n   ```bash\n   git checkout <next-branch>\n   git fetch origin main\n   git rebase origin/main\n   ```\n\n   Git will automatically skip already-merged commits (you'll see \"skipped previously applied commit\" messages).\n\n4. **Force push the rebased branch:**\n   ```bash\n   git push --force-with-lease\n   ```\n\n5. **Verify the PR shows only its own commits**, then repeat from step 1 for the next PR.\n\n### Complete Example\n\n```bash\n# Stack: PR 889 -> PR 890 -> PR 891 (889 is closest to main)\n\n# === Merge PR 889 ===\ngh pr edit 890 --base main                 # Step 1: Update 890's base FIRST\ngh pr merge 889 --squash --delete-branch   # Step 2: Now merge 889\ngit checkout feat/pr-890-branch            # Step 3: Checkout 890's branch\ngit fetch origin main\ngit rebase origin/main                     # Git skips merged commits\ngit push --force-with-lease                # Step 4: Update remote\n\n# === Merge PR 890 ===\ngh pr edit 891 --base main                 # Step 1: Update 891's base FIRST\ngh pr merge 890 --squash --delete-branch   # Step 2: Now merge 890\ngit checkout feat/pr-891-branch            # Step 3: Checkout 891's branch\ngit fetch origin main\ngit rebase origin/main\ngit push --force-with-lease\n\n# === Merge PR 891 (last one) ===\ngh pr merge 891 --squash --delete-branch   # No next PR, just merge\n```\n\n### Why This Order Matters\n\n| Scenario | What Happens |\n|----------|--------------|\n| Update base first, then merge | ✅ Next PR stays open, points to main |\n| Merge first, then update base | ❌ Base branch deleted → PR auto-closed → Cannot reopen |\n\n### Troubleshooting\n\n| Issue | Cause | Solution |\n|-------|-------|----------|\n| PR auto-closed after merge | Base branch was deleted before updating | Create new PR: `gh pr create --base main --head <branch>` |\n| `gh pr edit --base` fails | PR already closed, or GraphQL error | Use API: `gh api repos/OWNER/REPO/pulls/NUM -X PATCH -f base=main` |\n| PR shows too many commits | Branch not rebased after base change | Rebase and force push |\n| Rebase conflicts | Commits conflict with squash merge | Use `git rebase -i origin/main` and drop merged commits |\n\n### Using Interactive Rebase (Optional)\n\nIf regular rebase has conflicts, use interactive mode to explicitly drop merged commits:\n\n```bash\ngit rebase -i origin/main\n```\n\nIn the editor, change `pick` to `drop` for commits that were in the already-merged PR:\n\n```\ndrop abc1234 feat: change from PR 889 (already merged)\npick def5678 feat: change from PR 890 (keep this)\n```\n\nSave and exit, then force push.\n\n---\n\n## 19. anyOf Patterns for Union Types\n\nWhen defining union types (e.g., `ChatCompletionToolChoice` that can be either an enum string or an object), the `anyOf` pattern must be **inlined** in the parent schema rather than defined as a top-level schema.\n\n### Problem\n\nTop-level `anyOf` schemas can cause freezed code generation to fail:\n\n```\nNull check operator used on a null value in DefaultValue.defaultValue\n```\n\nThe generated class may be empty instead of a proper sealed class.\n\n### Incorrect (Top-Level Schema)\n\n```yaml\n# This can fail during code generation\nChatCompletionToolChoice:\n  title: ChatCompletionToolChoice\n  anyOf:\n    - $ref: '#/components/schemas/ToolChoiceOption'\n    - $ref: '#/components/schemas/ToolChoiceTool'\n  nullable: true\n```\n\n### Correct (Inlined in Parent)\n\n```yaml\nChatCompletionRequest:\n  properties:\n    tool_choice:\n      title: ChatCompletionToolChoice\n      description: Controls which tool is called.\n      nullable: true\n      anyOf:\n        - type: string\n          title: ChatCompletionToolChoiceOption\n          enum: [none, auto, any, required]\n        - $ref: '#/components/schemas/ToolChoiceTool'\n```\n\nThis generates a proper sealed class with custom JSON converter:\n\n```dart\n@freezed\nsealed class ChatCompletionToolChoice with _$ChatCompletionToolChoice {\n  const factory ChatCompletionToolChoice.enumeration(\n    ChatCompletionToolChoiceOption value,\n  ) = ChatCompletionToolChoiceEnumeration;\n\n  const factory ChatCompletionToolChoice.toolChoiceTool(\n    ToolChoiceTool value,\n  ) = ChatCompletionToolChoiceToolChoiceTool;\n}\n```\n\n### When to Use This Pattern\n\n- Union types combining enums and objects (tool_choice, stop sequences)\n- Any `anyOf` that mixes primitive types with `$ref` objects\n- Fields where the API accepts multiple distinct formats\n\n---\n\n## 20. Import Conflicts Between Packages\n\nWhen a LangChain integration package (e.g., `langchain_mistralai`) imports both `langchain_core` and an SDK package (e.g., `mistralai_dart`), naming conflicts can occur.\n\n### Problem\n\n```dart\n// Both packages define a 'Tool' class\nimport 'package:langchain_core/tools.dart';  // Has Tool\nimport 'package:mistralai_dart/mistralai_dart.dart';  // Also has Tool\n\n// Error: The name 'Tool' is defined in multiple libraries\n```\n\n### Solution: Use Import Prefix\n\n```dart\nimport 'package:langchain_core/tools.dart';\nimport 'package:mistralai_dart/mistralai_dart.dart' as mistral;\n\n// Now use prefix for SDK types\nmistral.Tool _mapTool(ToolSpec tool) {\n  return mistral.Tool(\n    type: mistral.ToolType.function,\n    function: mistral.FunctionDefinition(\n      name: tool.name,\n      description: tool.description,\n      parameters: tool.inputJsonSchema,\n    ),\n  );\n}\n```\n\n### Common Conflicting Names\n\n| Class Name | langchain_core | SDK Packages |\n|------------|----------------|--------------|\n| `Tool` | ToolSpec wrapper | API schema |\n| `Message` | ChatMessage | API schema |\n| `Usage` | LanguageModelUsage | API schema |\n\n### Best Practice\n\nAlways use an `as` prefix for SDK imports in integration packages to prevent current and future conflicts as APIs evolve.\n\n---\n\n## 21. Practical Stacked PRs with Generated Code\n\nWhen making multiple related changes to an API client, the generated files (`schema.freezed.dart`, `schema.g.dart`) regenerate completely each time. This makes truly independent stacked PRs impractical.\n\n### The Challenge\n\nEach code generation run overwrites:\n- `schema.freezed.dart` (~thousands of lines)\n- `schema.g.dart` (~thousands of lines)\n- `schema.dart` (part statements)\n\nCreating separate commits for \"only tool calling\" vs \"only response format\" is nearly impossible since each regeneration includes all changes.\n\n### Practical Approach\n\nInstead of N separate PRs per feature, organize by package:\n\n| PR | Scope | Contents |\n|----|-------|----------|\n| PR 1 | SDK package | All schema changes (regenerated together) |\n| PR 2 | Integration package | Mapper/options changes (depends on PR 1) |\n\n### Example: Mistral Tool Calling\n\n**PR 1: mistralai_dart** (all schema changes)\n- Tool/function calling schemas\n- Response format schemas\n- Missing parameters\n- Updated models enum\n- Prediction/reasoning mode\n\n**PR 2: langchain_mistralai** (integration)\n- Tool mappers\n- Options updates\n- Finish reason handling\n\n### When to Split Further\n\nOnly split SDK changes into multiple PRs when:\n1. Changes are truly independent (different API endpoints)\n2. One change is experimental/risky and needs separate review\n3. Changes affect different major versions\n\n---\n\n*Document created: 2025-12-19*\n*Updated: 2025-12-20*\n*Based on: openai_dart PRs #826-#837, anthropic_sdk_dart API alignment work, mistralai_dart PRs #887-#888*\n"
  },
  {
    "path": "CHANGELOG.md",
    "content": "# Change Log\n\nAll notable changes to this project will be documented in this file.\nSee [Conventional Commits](https://conventionalcommits.org) for commit guidelines.\n\n## 2025-12-27\n\n### Changes\n\n---\n\nPackages with breaking changes:\n\n - [`googleai_dart` - `v3.0.0`](#googleai_dart---v300)\n\nPackages with other changes:\n\n - [`langchain_firebase` - `v0.3.2`](#langchain_firebase---v032)\n - [`openai_dart` - `v0.6.2`](#openai_dart---v062)\n - [`langchain_google` - `v0.7.1+2`](#langchain_google---v0712)\n - [`langchain_openai` - `v0.8.1+1`](#langchain_openai---v0811)\n\nPackages with dependency updates only:\n\n> Packages listed below depend on other packages in this workspace that have had changes. Their versions have been incremented to bump the minimum dependency versions of the packages they depend upon in this project.\n\n - `langchain_google` - `v0.7.1+2`\n - `langchain_openai` - `v0.8.1+1`\n\n---\n\n#### `googleai_dart` - `v3.0.0`\n\n - **FEAT**(googleai_dart): add convenience helpers for improved DX ([#924](https://github.com/davidmigloz/langchain_dart/issues/924)). ([634b4f97](https://github.com/davidmigloz/langchain_dart/commit/634b4f970ec3264cddaa6e42d7d03fc8af3593ff))\n - **FEAT**(googleai_dart): Update default models to Gemini 3 family ([#922](https://github.com/davidmigloz/langchain_dart/issues/922)). ([62bca9da](https://github.com/davidmigloz/langchain_dart/commit/62bca9da1abc4a64267c2d3085ad969cad33f4d6))\n - **FEAT**(googleai_dart): Auto-populate batch.model from method parameter ([#921](https://github.com/davidmigloz/langchain_dart/issues/921)). ([abfeded8](https://github.com/davidmigloz/langchain_dart/commit/abfeded8f602b1db28d0f8f35f4e275982a7fed6))\n - **BREAKING** **FEAT**(googleai_dart): replace List<dynamic> with strongly-typed lists ([#923](https://github.com/davidmigloz/langchain_dart/issues/923)). ([403d5319](https://github.com/davidmigloz/langchain_dart/commit/403d5319d67fb39298cc6182d883a8e2f1b731f8))\n\n#### `langchain_firebase` - `v0.3.2`\n\n - **FEAT**: Fix formatting issues ([#922](https://github.com/davidmigloz/langchain_dart/issues/922)). ([62bca9da](https://github.com/davidmigloz/langchain_dart/commit/62bca9da1abc4a64267c2d3085ad969cad33f4d6))\n\n#### `openai_dart` - `v0.6.2`\n\n - **FEAT**: Fix formatting issues ([#922](https://github.com/davidmigloz/langchain_dart/issues/922)). ([62bca9da](https://github.com/davidmigloz/langchain_dart/commit/62bca9da1abc4a64267c2d3085ad969cad33f4d6))\n\n## 2025-12-23\n\n### Changes\n\n---\n\nPackages with breaking changes:\n\n - There are no breaking changes in this release.\n\nPackages with other changes:\n\n - [`googleai_dart` - `v2.1.0`](#googleai_dart---v210)\n - [`langchain_google` - `v0.7.1+1`](#langchain_google---v0711)\n\nPackages with dependency updates only:\n\n> Packages listed below depend on other packages in this workspace that have had changes. Their versions have been incremented to bump the minimum dependency versions of the packages they depend upon in this project.\n\n - `langchain_google` - `v0.7.1+1`\n\n---\n\n#### `googleai_dart` - `v2.1.0`\n\n - **FEAT**(googleai_dart): Add Gemini Live API (WebSocket) support ([#920](https://github.com/davidmigloz/langchain_dart/issues/920)). ([4beb01dd](https://github.com/davidmigloz/langchain_dart/commit/4beb01dd532582257e3d06c1619da1ee1793c5f4))\n - **FEAT**(googleai_dart): Add missing model properties from OpenAPI spec ([#916](https://github.com/davidmigloz/langchain_dart/issues/916)). ([fc0e2f8a](https://github.com/davidmigloz/langchain_dart/commit/fc0e2f8ac70ccb8fc8bc3992f76aa05f90d81690))\n - **DOCS**(googleai_dart): Add documentation for grounding tools ([#917](https://github.com/davidmigloz/langchain_dart/issues/917)). ([b5a529fe](https://github.com/davidmigloz/langchain_dart/commit/b5a529fe015095e2a8c4dfff32c2b5155eb608fa))\n\n# Change Log\n\n📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details.\n\n## 2025-12-22\n\n### Changes\n\n---\n\nPackages with breaking changes:\n\n - There are no breaking changes in this release.\n\nPackages with other changes:\n\n - [`langchain_firebase` - `v0.3.1+1`](#langchain_firebase---v0311)\n - [`langchain_mistralai` - `v0.3.1+1`](#langchain_mistralai---v0311)\n - [`mistralai_dart` - `v0.1.1+1`](#mistralai_dart---v0111)\n\n---\n\n#### `langchain_firebase` - `v0.3.1+1`\n\n - **FIX**(langchain_firebase): Remove invalid FinishReason.malformedFunctionCall case ([#911](https://github.com/davidmigloz/langchain_dart/issues/911)). ([569e9cc5](https://github.com/davidmigloz/langchain_dart/commit/569e9cc53f3cf884f4a5c2bd5d56f081a9c39ad0))\n\n#### `langchain_mistralai` - `v0.3.1+1`\n\n - **FIX**(mistralai_dart): Fix streaming tool calls deserialization error ([#913](https://github.com/davidmigloz/langchain_dart/issues/913)) ([#914](https://github.com/davidmigloz/langchain_dart/issues/914)). ([ec4d20bf](https://github.com/davidmigloz/langchain_dart/commit/ec4d20bfd966a6c04ab44d47fd9baa175343a990))\n\n#### `mistralai_dart` - `v0.1.1+1`\n\n - **FIX**(mistralai_dart): Fix streaming tool calls deserialization error ([#913](https://github.com/davidmigloz/langchain_dart/issues/913)) ([#914](https://github.com/davidmigloz/langchain_dart/issues/914)). ([ec4d20bf](https://github.com/davidmigloz/langchain_dart/commit/ec4d20bfd966a6c04ab44d47fd9baa175343a990))\n\n## 2025-12-20\n\n### Changes\n\n---\n\nPackages with breaking changes:\n\n - [`googleai_dart` - `v2.0.0`](#googleai_dart---v200)\n\nPackages with other changes:\n\n - [`anthropic_sdk_dart` - `v0.3.1`](#anthropic_sdk_dart---v031)\n - [`langchain` - `v0.8.1`](#langchain---v081)\n - [`langchain_anthropic` - `v0.3.1`](#langchain_anthropic---v031)\n - [`langchain_core` - `v0.4.1`](#langchain_core---v041)\n - [`langchain_firebase` - `v0.3.1`](#langchain_firebase---v031)\n - [`langchain_google` - `v0.7.1`](#langchain_google---v071)\n - [`langchain_mistralai` - `v0.3.1`](#langchain_mistralai---v031)\n - [`langchain_ollama` - `v0.4.1`](#langchain_ollama---v041)\n - [`langchain_openai` - `v0.8.1`](#langchain_openai---v081)\n - [`mistralai_dart` - `v0.1.1`](#mistralai_dart---v011)\n - [`openai_dart` - `v0.6.1`](#openai_dart---v061)\n - [`langchain_chroma` - `v0.3.0+2`](#langchain_chroma---v0302)\n - [`langchain_community` - `v0.4.0+2`](#langchain_community---v0402)\n - [`langchain_supabase` - `v0.2.0+2`](#langchain_supabase---v0202)\n\nPackages with dependency updates only:\n\n> Packages listed below depend on other packages in this workspace that have had changes. Their versions have been incremented to bump the minimum dependency versions of the packages they depend upon in this project.\n\n - `langchain_chroma` - `v0.3.0+2`\n - `langchain_community` - `v0.4.0+2`\n - `langchain_supabase` - `v0.2.0+2`\n\n---\n\n#### `googleai_dart` - `v2.0.0`\n\n - **BREAKING** **FEAT**(googleai_dart): Remove deprecated schema fields ([#848](https://github.com/davidmigloz/langchain_dart/issues/848)). ([e6d07ec4](https://github.com/davidmigloz/langchain_dart/commit/e6d07ec4a94d1b09e9dbd71f30904d510fb749c6))\n - **BREAKING** **FEAT**(googleai_dart): Remove deprecated Chunks and query APIs ([#847](https://github.com/davidmigloz/langchain_dart/issues/847)). ([9cae76d5](https://github.com/davidmigloz/langchain_dart/commit/9cae76d534d45bcd36622216a0926bfbc8800d86))\n - **BREAKING** **FEAT**(googleai_dart): Remove deprecated RagStores resource ([#846](https://github.com/davidmigloz/langchain_dart/issues/846)). ([1ab553f1](https://github.com/davidmigloz/langchain_dart/commit/1ab553f1da173dbed72a1d9089e56ce11b78eac6))\n - **FEAT**(googleai_dart): Add InteractionsResource and client integration ([#905](https://github.com/davidmigloz/langchain_dart/issues/905)). ([af6b13ea](https://github.com/davidmigloz/langchain_dart/commit/af6b13ea3c91ca4f05196940505d3eddb5c55831))\n - **FEAT**(googleai_dart): Add Interactions API tool types ([#904](https://github.com/davidmigloz/langchain_dart/issues/904)). ([2258cfa1](https://github.com/davidmigloz/langchain_dart/commit/2258cfa187cb011eddfa204d7f2a68a2ab329a37))\n - **FEAT**(googleai_dart): Add Interactions API events and deltas ([#903](https://github.com/davidmigloz/langchain_dart/issues/903)). ([826d3f64](https://github.com/davidmigloz/langchain_dart/commit/826d3f64845eb7178b9567f5193951796f476ea1))\n - **FEAT**(googleai_dart): Add Interactions API content types ([#902](https://github.com/davidmigloz/langchain_dart/issues/902)). ([b8c61743](https://github.com/davidmigloz/langchain_dart/commit/b8c61743e2e6ffa9cd6cd44df289135f6250b30d))\n - **FEAT**(googleai_dart): Add Interactions API core models ([#901](https://github.com/davidmigloz/langchain_dart/issues/901)). ([65f5db17](https://github.com/davidmigloz/langchain_dart/commit/65f5db17d91282bfc7edaca7e9fcb97b505631c6))\n - **FEAT**(googleai_dart): Update existing models with new properties ([#856](https://github.com/davidmigloz/langchain_dart/issues/856)). ([dd3893e0](https://github.com/davidmigloz/langchain_dart/commit/dd3893e07e78f2ce852ba26fd7e67744402ec11a))\n - **FEAT**(googleai_dart): Add RetrievalConfig to ToolConfig ([#855](https://github.com/davidmigloz/langchain_dart/issues/855)). ([5e11aa70](https://github.com/davidmigloz/langchain_dart/commit/5e11aa7000d74dfc09201620e38670c505cc525b))\n - **FEAT**(googleai_dart): Add MediaResolution to Part ([#854](https://github.com/davidmigloz/langchain_dart/issues/854)). ([df76f8c5](https://github.com/davidmigloz/langchain_dart/commit/df76f8c5b967efd5ac11aa83760459b71e55a000))\n - **FEAT**(googleai_dart): Add GoogleMaps tool ([#853](https://github.com/davidmigloz/langchain_dart/issues/853)). ([54814614](https://github.com/davidmigloz/langchain_dart/commit/548146143cfe48c4f24c9644d27b88550b816904))\n - **FEAT**(googleai_dart): Add McpServers tool ([#852](https://github.com/davidmigloz/langchain_dart/issues/852)). ([97970687](https://github.com/davidmigloz/langchain_dart/commit/97970687d43ff8dea4c6a87633d0e82287eedc30))\n - **FEAT**(googleai_dart): Add FileSearch tool ([#851](https://github.com/davidmigloz/langchain_dart/issues/851)). ([a00895b1](https://github.com/davidmigloz/langchain_dart/commit/a00895b1e264164894b56f6cf7dccea5f3c6c5b6))\n - **FEAT**(googleai_dart): Add grounding models ([#850](https://github.com/davidmigloz/langchain_dart/issues/850)). ([bb1a6228](https://github.com/davidmigloz/langchain_dart/commit/bb1a62286d5e04b612e148a4e55bceacf289e57c))\n - **FEAT**(googleai_dart): Add FileSearchStores resource ([#849](https://github.com/davidmigloz/langchain_dart/issues/849)). ([acb63d72](https://github.com/davidmigloz/langchain_dart/commit/acb63d72f03af13c1e1d4ff62f3f5e43a3ec34fd))\n - **FEAT**(googleai_dart): Add ThinkingConfig support to GenerationConfig ([#817](https://github.com/davidmigloz/langchain_dart/issues/817)). ([36de62a9](https://github.com/davidmigloz/langchain_dart/commit/36de62a9c65b24d9db35589772e053bb9c090035))\n - **FIX**(googleai_dart): Complete alignment with target implementation ([#884](https://github.com/davidmigloz/langchain_dart/issues/884)). ([60476e8d](https://github.com/davidmigloz/langchain_dart/commit/60476e8db17ca9badba217269169f3f8eb11a318))\n - **DOCS**(googleai_dart): Add Interactions API docs and example ([#897](https://github.com/davidmigloz/langchain_dart/issues/897)). ([f4a04677](https://github.com/davidmigloz/langchain_dart/commit/f4a04677e1e0743f85ca7f06756ba148c49cad01))\n\n#### `anthropic_sdk_dart` - `v0.3.1`\n\n - **FIX**(anthropic_sdk_dart): Add signature_delta support to BlockDelta (fixes [#811](https://github.com/davidmigloz/langchain_dart/issues/811)) ([#878](https://github.com/davidmigloz/langchain_dart/issues/878)). ([1d281837](https://github.com/davidmigloz/langchain_dart/commit/1d281837f64ec8d5ce6cdf3d00bcdbdba6451ebe))\n - **FIX**(anthropic_sdk_dart): Update tool types and fix analyzer warnings ([#876](https://github.com/davidmigloz/langchain_dart/issues/876)). ([17613b1e](https://github.com/davidmigloz/langchain_dart/commit/17613b1e6dd6dcf420e914fe0e56ca972ec303ce))\n - **FEAT**(anthropic_sdk_dart): Add citations_delta support to BlockDelta ([#880](https://github.com/davidmigloz/langchain_dart/issues/880)). ([4da916bf](https://github.com/davidmigloz/langchain_dart/commit/4da916bf81094799d1b28fb7cfce5b5ade72cea0))\n - **FEAT**(anthropic_sdk_dart): Add beta features support ([#874](https://github.com/davidmigloz/langchain_dart/issues/874)). ([28e4a23a](https://github.com/davidmigloz/langchain_dart/commit/28e4a23ae996d9828f2b6e7b404e6d942613bb34))\n - **FEAT**(anthropic_sdk_dart): Add schema enhancements ([#873](https://github.com/davidmigloz/langchain_dart/issues/873)). ([424d3225](https://github.com/davidmigloz/langchain_dart/commit/424d32253c15d57752f9a75423d69dddec05642e))\n - **FEAT**(anthropic_sdk_dart): Add Models API ([#872](https://github.com/davidmigloz/langchain_dart/issues/872)). ([7962a867](https://github.com/davidmigloz/langchain_dart/commit/7962a867b5cca399364a65960fcb4b16c79e3dbb))\n - **FEAT**(anthropic_sdk_dart): Add get message batch results endpoint ([#871](https://github.com/davidmigloz/langchain_dart/issues/871)). ([46fb2a5d](https://github.com/davidmigloz/langchain_dart/commit/46fb2a5d1bd6efd53bd6dc73d21d82ecd5ff7a1f))\n - **FEAT**(anthropic_sdk_dart): Add delete message batch endpoint ([#870](https://github.com/davidmigloz/langchain_dart/issues/870)). ([6611e175](https://github.com/davidmigloz/langchain_dart/commit/6611e1758781e568442a9dec41a5e0b1eaeb13f4))\n - **FEAT**(anthropic_sdk_dart): Add cancel message batch endpoint ([#869](https://github.com/davidmigloz/langchain_dart/issues/869)). ([b7aa8602](https://github.com/davidmigloz/langchain_dart/commit/b7aa8602f5474c6a32ef39ce3a52c3568081dc13))\n - **FEAT**(anthropic_sdk_dart): Add list message batches endpoint ([#868](https://github.com/davidmigloz/langchain_dart/issues/868)). ([745e369d](https://github.com/davidmigloz/langchain_dart/commit/745e369d07a71d66de508ab5b7933f18693eee9c))\n - **FEAT**(anthropic_sdk_dart): Add token counting API ([#858](https://github.com/davidmigloz/langchain_dart/issues/858)). ([b0d61c92](https://github.com/davidmigloz/langchain_dart/commit/b0d61c9204fe959bd16eca842ab98292e723822a))\n\n#### `langchain` - `v0.8.1`\n\n - **FEAT**: Add listModels() API for LLMs and Embeddings ([#371](https://github.com/davidmigloz/langchain_dart/issues/371)) ([#844](https://github.com/davidmigloz/langchain_dart/issues/844)). ([4b737389](https://github.com/davidmigloz/langchain_dart/commit/4b7373894d5b8701b6d00d153c1741931a49b3a1))\n - **FIX**(langchain): Properly serialize non-String tool outputs in AgentExecutor ([#821](https://github.com/davidmigloz/langchain_dart/issues/821)). ([3891164c](https://github.com/davidmigloz/langchain_dart/commit/3891164c11d0e7dd809b179d15444dd2da71aca0))\n\n#### `langchain_anthropic` - `v0.3.1`\n\n - **FIX**(langchain_anthropic): Handle CitationsBlockDelta in streaming responses ([#881](https://github.com/davidmigloz/langchain_dart/issues/881)). ([445ddda1](https://github.com/davidmigloz/langchain_dart/commit/445ddda1fa7a61f6a34e4faae3e024c7e625b86b))\n - **FIX**(langchain_anthropic): Handle SignatureBlockDelta in streaming responses ([#879](https://github.com/davidmigloz/langchain_dart/issues/879)). ([a9fe5285](https://github.com/davidmigloz/langchain_dart/commit/a9fe5285ef00df5f80e934a480b1b79caeb0e1f0))\n - **FIX**(langchain_anthropic): Update mappers for anthropic_sdk_dart schema changes ([#877](https://github.com/davidmigloz/langchain_dart/issues/877)). ([c77d454d](https://github.com/davidmigloz/langchain_dart/commit/c77d454d8effd26aaff59a39c438e4731f7ad773))\n - **FEAT**(langchain_anthropic): Add listModels() support ([#882](https://github.com/davidmigloz/langchain_dart/issues/882)). ([c1ba2592](https://github.com/davidmigloz/langchain_dart/commit/c1ba25920277f625fb6b3629f3d0c312ebd75240))\n\n#### `langchain_core` - `v0.4.1`\n\n - **FEAT**: Add listModels() API for LLMs and Embeddings ([#371](https://github.com/davidmigloz/langchain_dart/issues/371)) ([#844](https://github.com/davidmigloz/langchain_dart/issues/844)). ([4b737389](https://github.com/davidmigloz/langchain_dart/commit/4b7373894d5b8701b6d00d153c1741931a49b3a1))\n\n#### `langchain_firebase` - `v0.3.1`\n\n - **FIX**(langchain_firebase): Handle malformedFunctionCall finish reason ([#842](https://github.com/davidmigloz/langchain_dart/issues/842)). ([d6eef0dd](https://github.com/davidmigloz/langchain_dart/commit/d6eef0ddb0c8a5436d830e0487218373ff6dbbce))\n - **FEAT**(langchain_firebase): Migrate to firebase_ai and add Google AI backend support ([#909](https://github.com/davidmigloz/langchain_dart/issues/909)). ([3be47d26](https://github.com/davidmigloz/langchain_dart/commit/3be47d261d1fd5c45c58bf84420d6fa37dc0c9c7))\n\n#### `langchain_google` - `v0.7.1`\n\n - **FIX**(langchain_google): Remove ServiceAccountCredentials stub export ([#838](https://github.com/davidmigloz/langchain_dart/issues/838)). ([d0a058b3](https://github.com/davidmigloz/langchain_dart/commit/d0a058b3f5488470362564fa84c350bdb7b41b14))\n - **FIX**(langchain_google): Add web platform compatibility for HttpClientAuthProvider ([#832](https://github.com/davidmigloz/langchain_dart/issues/832)). ([3a9e995b](https://github.com/davidmigloz/langchain_dart/commit/3a9e995b6dc75fe403175f6183c04387b6aa4e03))\n - **FEAT**: Add listModels() API for LLMs and Embeddings ([#371](https://github.com/davidmigloz/langchain_dart/issues/371)) ([#844](https://github.com/davidmigloz/langchain_dart/issues/844)). ([4b737389](https://github.com/davidmigloz/langchain_dart/commit/4b7373894d5b8701b6d00d153c1741931a49b3a1))\n\n#### `langchain_mistralai` - `v0.3.1`\n\n - **FEAT**(langchain_mistralai): Add tool/function calling support ([#888](https://github.com/davidmigloz/langchain_dart/issues/888)). ([f4a1480c](https://github.com/davidmigloz/langchain_dart/commit/f4a1480c787f53668569896933d0d9321600c20e))\n - **FEAT**: Add listModels() API for LLMs and Embeddings ([#371](https://github.com/davidmigloz/langchain_dart/issues/371)) ([#844](https://github.com/davidmigloz/langchain_dart/issues/844)). ([4b737389](https://github.com/davidmigloz/langchain_dart/commit/4b7373894d5b8701b6d00d153c1741931a49b3a1))\n - **FEAT**(mistralai_dart): Align embeddings API with latest Mistral spec ([#886](https://github.com/davidmigloz/langchain_dart/issues/886)). ([769edc49](https://github.com/davidmigloz/langchain_dart/commit/769edc4937ac611b9c8d4b65421e403012f565a1))\n\n#### `langchain_ollama` - `v0.4.1`\n\n - **FEAT**: Add listModels() API for LLMs and Embeddings ([#371](https://github.com/davidmigloz/langchain_dart/issues/371)) ([#844](https://github.com/davidmigloz/langchain_dart/issues/844)). ([4b737389](https://github.com/davidmigloz/langchain_dart/commit/4b7373894d5b8701b6d00d153c1741931a49b3a1))\n\n#### `langchain_openai` - `v0.8.1`\n\n - **FEAT**: Add listModels() API for LLMs and Embeddings ([#371](https://github.com/davidmigloz/langchain_dart/issues/371)) ([#844](https://github.com/davidmigloz/langchain_dart/issues/844)). ([4b737389](https://github.com/davidmigloz/langchain_dart/commit/4b7373894d5b8701b6d00d153c1741931a49b3a1))\n\n#### `mistralai_dart` - `v0.1.1`\n\n - **FEAT**(mistralai_dart): Align Chat API with latest Mistral spec ([#887](https://github.com/davidmigloz/langchain_dart/issues/887)). ([b5a12301](https://github.com/davidmigloz/langchain_dart/commit/b5a1230184e79df5cef1256527eebd352d1a3f6a))\n - **FEAT**(mistralai_dart): Align embeddings API with latest Mistral spec ([#886](https://github.com/davidmigloz/langchain_dart/issues/886)). ([769edc49](https://github.com/davidmigloz/langchain_dart/commit/769edc4937ac611b9c8d4b65421e403012f565a1))\n\n#### `openai_dart` - `v0.6.1`\n\n - **FEAT**(openai_dart): Add image streaming and new GPT image models ([#827](https://github.com/davidmigloz/langchain_dart/issues/827)). ([1218d8c3](https://github.com/davidmigloz/langchain_dart/commit/1218d8c3d67531066ba9b1e9320699461a7e172d))\n - **FEAT**(openai_dart): Add ImageGenStreamEvent schema for streaming ([#834](https://github.com/davidmigloz/langchain_dart/issues/834)). ([eb640052](https://github.com/davidmigloz/langchain_dart/commit/eb64005217cc632e6da7d222d257273dbf95cb41))\n - **FEAT**(openai_dart): Add ImageGenUsage schema for image generation ([#833](https://github.com/davidmigloz/langchain_dart/issues/833)). ([aecf79a9](https://github.com/davidmigloz/langchain_dart/commit/aecf79a93de2f74d051cb4fde7a0363a06375e96))\n - **FEAT**(openai_dart): Add metadata fields to ImagesResponse ([#831](https://github.com/davidmigloz/langchain_dart/issues/831)). ([bd94b4c6](https://github.com/davidmigloz/langchain_dart/commit/bd94b4c617555b3bbd7a3e97f4643a88ba128daa))\n - **FEAT**(openai_dart): Add prompt_tokens_details to CompletionUsage ([#830](https://github.com/davidmigloz/langchain_dart/issues/830)). ([ede649d1](https://github.com/davidmigloz/langchain_dart/commit/ede649d1d70816ef172f32837f311ff0955a26d3))\n - **FEAT**(openai_dart): Add fine-tuning method parameter and schemas ([#828](https://github.com/davidmigloz/langchain_dart/issues/828)). ([99d77425](https://github.com/davidmigloz/langchain_dart/commit/99d774252bf55e054602ee9b306cc32cb86e57eb))\n - **FEAT**(openai_dart): Add Batch model and usage fields ([#826](https://github.com/davidmigloz/langchain_dart/issues/826)). ([b2933f50](https://github.com/davidmigloz/langchain_dart/commit/b2933f50045180500874241d1b7177488d0282bc))\n - **FEAT**(openai_dart): Add OpenRouter-specific sampling parameters ([#825](https://github.com/davidmigloz/langchain_dart/issues/825)). ([3dd9075c](https://github.com/davidmigloz/langchain_dart/commit/3dd9075c7501dbf84713ca72d7506fd53c5bf1a4))\n - **FIX**(openai_dart): Remove default value from image stream parameter ([#829](https://github.com/davidmigloz/langchain_dart/issues/829)). ([d94c7063](https://github.com/davidmigloz/langchain_dart/commit/d94c70631e818057299eaa75fa7f807a7ec121fe))\n - **FIX**(openai_dart): Fix OpenRouter reasoning type enum parsing ([#810](https://github.com/davidmigloz/langchain_dart/issues/810)) ([#824](https://github.com/davidmigloz/langchain_dart/issues/824)). ([44ab2841](https://github.com/davidmigloz/langchain_dart/commit/44ab28414280c94e2599863770756ca8622650de))\n\n\n## 2025-10-16\n\n### Changes\n\n---\n\nPackages with breaking changes:\n\n - There are no breaking changes in this release.\n\nPackages with other changes:\n\n - [`anthropic_sdk_dart` - `v0.3.0+1`](#anthropic_sdk_dart---v0301)\n - [`chromadb` - `v0.3.0+1`](#chromadb---v0301)\n - [`googleai_dart` - `v1.1.0`](#googleai_dart---v110)\n - [`langchain` - `v0.8.0+1`](#langchain---v0801)\n - [`langchain_anthropic` - `v0.3.0+1`](#langchain_anthropic---v0301)\n - [`langchain_chroma` - `v0.3.0+1`](#langchain_chroma---v0301)\n - [`langchain_community` - `v0.4.0+1`](#langchain_community---v0401)\n - [`langchain_core` - `v0.4.0+1`](#langchain_core---v0401)\n - [`langchain_firebase` - `v0.3.0+1`](#langchain_firebase---v0301)\n - [`langchain_google` - `v0.7.0+1`](#langchain_google---v0701)\n - [`langchain_mistralai` - `v0.3.0+1`](#langchain_mistralai---v0301)\n - [`langchain_ollama` - `v0.4.0+1`](#langchain_ollama---v0401)\n - [`langchain_openai` - `v0.8.0+1`](#langchain_openai---v0801)\n - [`langchain_supabase` - `v0.2.0+1`](#langchain_supabase---v0201)\n - [`mistralai_dart` - `v0.1.0+1`](#mistralai_dart---v0101)\n - [`ollama_dart` - `v0.3.0+1`](#ollama_dart---v0301)\n - [`openai_dart` - `v0.6.0+1`](#openai_dart---v0601)\n - [`openai_realtime_dart` - `v0.1.0+1`](#openai_realtime_dart---v0101)\n - [`tavily_dart` - `v0.2.0+1`](#tavily_dart---v0201)\n - [`vertex_ai` - `v0.2.0+1`](#vertex_ai---v0201)\n\n---\n\n#### `googleai_dart` - `v1.1.0`\n\n - **REFACTOR**: Fix pub format warnings ([#809](https://github.com/davidmigloz/langchain_dart/issues/809)). ([640cdefb](https://github.com/davidmigloz/langchain_dart/commit/640cdefbede9c0a0182fb6bb4005a20aa6f35635))\n - **FEAT**: Make googleai_dart fully WASM compatible ([#808](https://github.com/davidmigloz/langchain_dart/issues/808)). ([07e597f3](https://github.com/davidmigloz/langchain_dart/commit/07e597f3984b2c0396ebfb5ae7e981bb52872368))\n\n#### `anthropic_sdk_dart` - `v0.3.0+1`\n\n - **REFACTOR**: Fix pub format warnings ([#809](https://github.com/davidmigloz/langchain_dart/issues/809)). ([640cdefb](https://github.com/davidmigloz/langchain_dart/commit/640cdefbede9c0a0182fb6bb4005a20aa6f35635))\n\n#### `chromadb` - `v0.3.0+1`\n\n - **REFACTOR**: Fix pub format warnings ([#809](https://github.com/davidmigloz/langchain_dart/issues/809)). ([640cdefb](https://github.com/davidmigloz/langchain_dart/commit/640cdefbede9c0a0182fb6bb4005a20aa6f35635))\n\n#### `langchain` - `v0.8.0+1`\n\n - **REFACTOR**: Fix pub format warnings ([#809](https://github.com/davidmigloz/langchain_dart/issues/809)). ([640cdefb](https://github.com/davidmigloz/langchain_dart/commit/640cdefbede9c0a0182fb6bb4005a20aa6f35635))\n\n#### `langchain_anthropic` - `v0.3.0+1`\n\n - **REFACTOR**: Fix pub format warnings ([#809](https://github.com/davidmigloz/langchain_dart/issues/809)). ([640cdefb](https://github.com/davidmigloz/langchain_dart/commit/640cdefbede9c0a0182fb6bb4005a20aa6f35635))\n\n#### `langchain_chroma` - `v0.3.0+1`\n\n - **REFACTOR**: Fix pub format warnings ([#809](https://github.com/davidmigloz/langchain_dart/issues/809)). ([640cdefb](https://github.com/davidmigloz/langchain_dart/commit/640cdefbede9c0a0182fb6bb4005a20aa6f35635))\n\n#### `langchain_community` - `v0.4.0+1`\n\n - **REFACTOR**: Fix pub format warnings ([#809](https://github.com/davidmigloz/langchain_dart/issues/809)). ([640cdefb](https://github.com/davidmigloz/langchain_dart/commit/640cdefbede9c0a0182fb6bb4005a20aa6f35635))\n\n#### `langchain_core` - `v0.4.0+1`\n\n - **REFACTOR**: Fix pub format warnings ([#809](https://github.com/davidmigloz/langchain_dart/issues/809)). ([640cdefb](https://github.com/davidmigloz/langchain_dart/commit/640cdefbede9c0a0182fb6bb4005a20aa6f35635))\n\n#### `langchain_firebase` - `v0.3.0+1`\n\n - **REFACTOR**: Fix pub format warnings ([#809](https://github.com/davidmigloz/langchain_dart/issues/809)). ([640cdefb](https://github.com/davidmigloz/langchain_dart/commit/640cdefbede9c0a0182fb6bb4005a20aa6f35635))\n\n#### `langchain_google` - `v0.7.0+1`\n\n - **REFACTOR**: Fix pub format warnings ([#809](https://github.com/davidmigloz/langchain_dart/issues/809)). ([640cdefb](https://github.com/davidmigloz/langchain_dart/commit/640cdefbede9c0a0182fb6bb4005a20aa6f35635))\n\n#### `langchain_mistralai` - `v0.3.0+1`\n\n - **REFACTOR**: Fix pub format warnings ([#809](https://github.com/davidmigloz/langchain_dart/issues/809)). ([640cdefb](https://github.com/davidmigloz/langchain_dart/commit/640cdefbede9c0a0182fb6bb4005a20aa6f35635))\n\n#### `langchain_ollama` - `v0.4.0+1`\n\n - **REFACTOR**: Fix pub format warnings ([#809](https://github.com/davidmigloz/langchain_dart/issues/809)). ([640cdefb](https://github.com/davidmigloz/langchain_dart/commit/640cdefbede9c0a0182fb6bb4005a20aa6f35635))\n\n#### `langchain_openai` - `v0.8.0+1`\n\n - **REFACTOR**: Fix pub format warnings ([#809](https://github.com/davidmigloz/langchain_dart/issues/809)). ([640cdefb](https://github.com/davidmigloz/langchain_dart/commit/640cdefbede9c0a0182fb6bb4005a20aa6f35635))\n\n#### `langchain_supabase` - `v0.2.0+1`\n\n - **REFACTOR**: Fix pub format warnings ([#809](https://github.com/davidmigloz/langchain_dart/issues/809)). ([640cdefb](https://github.com/davidmigloz/langchain_dart/commit/640cdefbede9c0a0182fb6bb4005a20aa6f35635))\n\n#### `mistralai_dart` - `v0.1.0+1`\n\n - **REFACTOR**: Fix pub format warnings ([#809](https://github.com/davidmigloz/langchain_dart/issues/809)). ([640cdefb](https://github.com/davidmigloz/langchain_dart/commit/640cdefbede9c0a0182fb6bb4005a20aa6f35635))\n\n#### `ollama_dart` - `v0.3.0+1`\n\n - **REFACTOR**: Fix pub format warnings ([#809](https://github.com/davidmigloz/langchain_dart/issues/809)). ([640cdefb](https://github.com/davidmigloz/langchain_dart/commit/640cdefbede9c0a0182fb6bb4005a20aa6f35635))\n\n#### `openai_dart` - `v0.6.0+1`\n\n - **REFACTOR**: Fix pub format warnings ([#809](https://github.com/davidmigloz/langchain_dart/issues/809)). ([640cdefb](https://github.com/davidmigloz/langchain_dart/commit/640cdefbede9c0a0182fb6bb4005a20aa6f35635))\n\n#### `openai_realtime_dart` - `v0.1.0+1`\n\n - **REFACTOR**: Fix pub format warnings ([#809](https://github.com/davidmigloz/langchain_dart/issues/809)). ([640cdefb](https://github.com/davidmigloz/langchain_dart/commit/640cdefbede9c0a0182fb6bb4005a20aa6f35635))\n\n#### `tavily_dart` - `v0.2.0+1`\n\n - **REFACTOR**: Fix pub format warnings ([#809](https://github.com/davidmigloz/langchain_dart/issues/809)). ([640cdefb](https://github.com/davidmigloz/langchain_dart/commit/640cdefbede9c0a0182fb6bb4005a20aa6f35635))\n\n#### `vertex_ai` - `v0.2.0+1`\n\n - **REFACTOR**: Fix pub format warnings ([#809](https://github.com/davidmigloz/langchain_dart/issues/809)). ([640cdefb](https://github.com/davidmigloz/langchain_dart/commit/640cdefbede9c0a0182fb6bb4005a20aa6f35635))\n\n\n## 2025-10-15\n\n### Changes\n\n---\n\nPackages with breaking changes:\n\n - [`anthropic_sdk_dart` - `v0.3.0`](#anthropic_sdk_dart---v030)\n - [`chromadb` - `v0.3.0`](#chromadb---v030)\n - [`googleai_dart` - `v1.0.0`](#googleai_dart---v100)\n - [`langchain` - `v0.8.0`](#langchain---v080)\n - [`langchain_anthropic` - `v0.3.0`](#langchain_anthropic---v030)\n - [`langchain_chroma` - `v0.3.0`](#langchain_chroma---v030)\n - [`langchain_community` - `v0.4.0`](#langchain_community---v040)\n - [`langchain_core` - `v0.4.0`](#langchain_core---v040)\n - [`langchain_firebase` - `v0.3.0`](#langchain_firebase---v030)\n - [`langchain_google` - `v0.7.0`](#langchain_google---v070)\n - [`langchain_mistralai` - `v0.3.0`](#langchain_mistralai---v030)\n - [`langchain_ollama` - `v0.4.0`](#langchain_ollama---v040)\n - [`langchain_openai` - `v0.8.0`](#langchain_openai---v080)\n - [`langchain_supabase` - `v0.2.0`](#langchain_supabase---v020)\n - [`langgraph` - `v0.0.1-dev.3`](#langgraph---v001-dev3)\n - [`mistralai_dart` - `v0.1.0`](#mistralai_dart---v010)\n - [`ollama_dart` - `v0.3.0`](#ollama_dart---v030)\n - [`openai_dart` - `v0.6.0`](#openai_dart---v060)\n - [`openai_realtime_dart` - `v0.1.0`](#openai_realtime_dart---v010)\n - [`tavily_dart` - `v0.2.0`](#tavily_dart---v020)\n - [`vertex_ai` - `v0.2.0`](#vertex_ai---v020)\n\nPackages with other changes:\n\n - There are no other changes in this release.\n\n---\n\n#### `googleai_dart` - `v1.0.0`\n\n**TL;DR**: Complete reimplementation with a new architecture, minimal dependencies, unified resource-based API, and full Gemini API coverage. Includes new Files, Batches, Caching, Corpora/RAG, RAG Stores, Dynamic Content, Permissions, Tuned Models, and Prediction (Veo) support.\n\n### What's new\n\n- **Unified client for both**:\n  - Google AI Gemini Developer API\n  - Vertex AI Gemini API\n- **Complete API coverage**: 78 endpoints.\n  - **Files API**: upload, list, get, delete, download.\n  - **Generated Files API**: list, get, getOperation (video outputs).\n  - **Cached Contents**: full CRUD.\n  - **Batch operations**: batchGenerateContent, batchEmbedContents, asyncBatchEmbedContent with LRO polling.\n  - **Corpora & RAG**: corpus CRUD (Google AI); documents/chunks/query, metadata filters, batch chunk ops (Vertex AI only).\n  - **RAG Stores**: documents list/create/get/delete/query + operations.\n  - **Dynamic Content**: generate/stream content with dynamic model IDs.\n  - **Permissions**: create/list/get/update/delete/transferOwnership for eligible resources.\n  - **Tuned Models**: list, get, listOperations, generation APIs.\n  - **Prediction (Veo)**: predict, predictLongRunning, operation polling, RAI filtering.\n- **Architecture**:\n  - Interceptor chain (Auth → Logging → Error).\n  - **Authentication**: API key, Bearer token, custom OAuth via `AuthProvider`.\n  - **Retry** with exponential backoff + jitter.\n  - **Abortable** requests via `abortTrigger` (streaming and non-streaming).\n  - **SSE** streaming parser.\n  - Central `GoogleAIConfig` (timeouts, retry policy, log level, baseUrl).\n- **Testing**:\n  - **560+ tests** covering all endpoints, error branches, streaming/abort flows.\n\n#### `anthropic_sdk_dart` - `v0.3.0`\n\n - **FIX**: Handle optional space after colon in SSE parser in anthropic_sdk_dart ([#790](https://github.com/davidmigloz/langchain_dart/issues/790)). ([b31fbead](https://github.com/davidmigloz/langchain_dart/commit/b31fbead3ad4cb3ca9aabd6d8fee5e523df82d65))\n - **FEAT**: Add extended thinking support to anthropic_sdk_dart ([#803](https://github.com/davidmigloz/langchain_dart/issues/803)). ([1ccb74a6](https://github.com/davidmigloz/langchain_dart/commit/1ccb74a639d63325a7fcac8474ed0500dedd657e))\n - **FEAT**: Upgrade to http v1.5.0 ([#785](https://github.com/davidmigloz/langchain_dart/issues/785)). ([f7c87790](https://github.com/davidmigloz/langchain_dart/commit/f7c8779011015b5a4a7f3a07dca32bde1bb2ea88))\n - **BREAKING** **BUILD**: Require Dart >=3.8.0 ([#792](https://github.com/davidmigloz/langchain_dart/issues/792)). ([b887f5c6](https://github.com/davidmigloz/langchain_dart/commit/b887f5c62e307b3a510c5049e3d1fbe7b7b4f4c9))\n\n#### `ollama_dart` - `v0.3.0`\n\n - **FEAT**: Enhance CreateModelRequest with new fields in ollama_dart ([#802](https://github.com/davidmigloz/langchain_dart/issues/802)). ([c5c73549](https://github.com/davidmigloz/langchain_dart/commit/c5c73549c51354996b2ca6bbce9d4c4c721fc159))\n - **FEAT**: Add tool_name and index support in ollama_dart ([#800](https://github.com/davidmigloz/langchain_dart/issues/800)). ([f0f77286](https://github.com/davidmigloz/langchain_dart/commit/f0f77286c02c64ea7b75a011761e677fc168ffff))\n - **FEAT**: Add remote_model and remote_host support in ollama_dart ([#799](https://github.com/davidmigloz/langchain_dart/issues/799)). ([36b9d5f2](https://github.com/davidmigloz/langchain_dart/commit/36b9d5f2ba26df6dd79f7105903cdbdd25711ebe))\n - **FEAT**: Add truncate and shift support in ollama_dart ([#798](https://github.com/davidmigloz/langchain_dart/issues/798)). ([098a0815](https://github.com/davidmigloz/langchain_dart/commit/098a08150f2607bf283bb5d2aef82593c91cf221))\n - **FEAT**: Support high, medium, low for think in ollama_dart ([#797](https://github.com/davidmigloz/langchain_dart/issues/797)). ([1cbe3fcf](https://github.com/davidmigloz/langchain_dart/commit/1cbe3fcf96926eb2e81b9f9a7aec8f37797c76d3))\n - **FEAT**: Support JSON schema in ResponseFormat in ollama_dart ([#796](https://github.com/davidmigloz/langchain_dart/issues/796)). ([2f399465](https://github.com/davidmigloz/langchain_dart/commit/2f3994656c32f32a79bb0b613bf38b9fd2e83b3d))\n - **FEAT**: Upgrade to http v1.5.0 ([#785](https://github.com/davidmigloz/langchain_dart/issues/785)). ([f7c87790](https://github.com/davidmigloz/langchain_dart/commit/f7c8779011015b5a4a7f3a07dca32bde1bb2ea88))\n - **BREAKING** **REFACTOR**: Improve factory names in ollama_dart ([#806](https://github.com/davidmigloz/langchain_dart/issues/806)). ([fbfa7acb](https://github.com/davidmigloz/langchain_dart/commit/fbfa7acb071a8c2271a6cfb6506e9f6d8b863ca4))\n - **BREAKING** **BUILD**: Require Dart >=3.8.0 ([#792](https://github.com/davidmigloz/langchain_dart/issues/792)). ([b887f5c6](https://github.com/davidmigloz/langchain_dart/commit/b887f5c62e307b3a510c5049e3d1fbe7b7b4f4c9))\n\n#### `openai_dart` - `v0.6.0`\n\n - **FIX**: Correct text content serialization in CreateMessageRequest in openai_dart ([#805](https://github.com/davidmigloz/langchain_dart/issues/805)). ([e4569c96](https://github.com/davidmigloz/langchain_dart/commit/e4569c96ede23223ca23711579d2415bd05b4e27))\n - **FIX**: Handle optional space after colon in SSE parser in openai_dart ([#779](https://github.com/davidmigloz/langchain_dart/issues/779)). ([9defa827](https://github.com/davidmigloz/langchain_dart/commit/9defa827ce145533a85ead2bccfc25f5fa069358))\n - **FEAT**: Add OpenRouter provider routing support in openai_dart ([#794](https://github.com/davidmigloz/langchain_dart/issues/794)). ([6d306bc1](https://github.com/davidmigloz/langchain_dart/commit/6d306bc1f8e8fda8dcf581ec993eea0c755f9433))\n - **FEAT**: Add OpenAI-compatible vendor reasoning content support ([#793](https://github.com/davidmigloz/langchain_dart/issues/793)). ([e0712c38](https://github.com/davidmigloz/langchain_dart/commit/e0712c3851377fae10a0b35606e1b5098abc575b))\n - **FEAT**: Upgrade to http v1.5.0 ([#785](https://github.com/davidmigloz/langchain_dart/issues/785)). ([f7c87790](https://github.com/davidmigloz/langchain_dart/commit/f7c8779011015b5a4a7f3a07dca32bde1bb2ea88))\n - **BREAKING** **BUILD**: Require Dart >=3.8.0 ([#792](https://github.com/davidmigloz/langchain_dart/issues/792)). ([b887f5c6](https://github.com/davidmigloz/langchain_dart/commit/b887f5c62e307b3a510c5049e3d1fbe7b7b4f4c9))\n\n#### `chromadb` - `v0.3.0`\n\n - **FEAT**: Upgrade to http v1.5.0 ([#785](https://github.com/davidmigloz/langchain_dart/issues/785)). ([f7c87790](https://github.com/davidmigloz/langchain_dart/commit/f7c8779011015b5a4a7f3a07dca32bde1bb2ea88))\n - **BREAKING** **BUILD**: Require Dart >=3.8.0 ([#792](https://github.com/davidmigloz/langchain_dart/issues/792)). ([b887f5c6](https://github.com/davidmigloz/langchain_dart/commit/b887f5c62e307b3a510c5049e3d1fbe7b7b4f4c9))\n\n#### `langchain` - `v0.8.0`\n\n - **FEAT**: Upgrade to http v1.5.0 ([#785](https://github.com/davidmigloz/langchain_dart/issues/785)). ([f7c87790](https://github.com/davidmigloz/langchain_dart/commit/f7c8779011015b5a4a7f3a07dca32bde1bb2ea88))\n - **BREAKING** **BUILD**: Require Dart >=3.8.0 ([#792](https://github.com/davidmigloz/langchain_dart/issues/792)). ([b887f5c6](https://github.com/davidmigloz/langchain_dart/commit/b887f5c62e307b3a510c5049e3d1fbe7b7b4f4c9))\n\n#### `langchain_anthropic` - `v0.3.0`\n\n - **FEAT**: Add extended thinking support to langchain_anthropic ([#804](https://github.com/davidmigloz/langchain_dart/issues/804)). ([0e58fd31](https://github.com/davidmigloz/langchain_dart/commit/0e58fd316191091fb014287b24063fca61b4a2e5))\n - **FEAT**: Upgrade to http v1.5.0 ([#785](https://github.com/davidmigloz/langchain_dart/issues/785)). ([f7c87790](https://github.com/davidmigloz/langchain_dart/commit/f7c8779011015b5a4a7f3a07dca32bde1bb2ea88))\n - **BREAKING** **BUILD**: Require Dart >=3.8.0 ([#792](https://github.com/davidmigloz/langchain_dart/issues/792)). ([b887f5c6](https://github.com/davidmigloz/langchain_dart/commit/b887f5c62e307b3a510c5049e3d1fbe7b7b4f4c9))\n\n#### `langchain_chroma` - `v0.3.0`\n\n - **FEAT**: Upgrade to http v1.5.0 ([#785](https://github.com/davidmigloz/langchain_dart/issues/785)). ([f7c87790](https://github.com/davidmigloz/langchain_dart/commit/f7c8779011015b5a4a7f3a07dca32bde1bb2ea88))\n - **BREAKING** **BUILD**: Require Dart >=3.8.0 ([#792](https://github.com/davidmigloz/langchain_dart/issues/792)). ([b887f5c6](https://github.com/davidmigloz/langchain_dart/commit/b887f5c62e307b3a510c5049e3d1fbe7b7b4f4c9))\n\n#### `langchain_community` - `v0.4.0`\n\n - **FEAT**: Upgrade to http v1.5.0 ([#785](https://github.com/davidmigloz/langchain_dart/issues/785)). ([f7c87790](https://github.com/davidmigloz/langchain_dart/commit/f7c8779011015b5a4a7f3a07dca32bde1bb2ea88))\n - **BREAKING** **BUILD**: Require Dart >=3.8.0 ([#792](https://github.com/davidmigloz/langchain_dart/issues/792)). ([b887f5c6](https://github.com/davidmigloz/langchain_dart/commit/b887f5c62e307b3a510c5049e3d1fbe7b7b4f4c9))\n\n#### `langchain_core` - `v0.4.0`\n\n - **FEAT**: Upgrade to http v1.5.0 ([#785](https://github.com/davidmigloz/langchain_dart/issues/785)). ([f7c87790](https://github.com/davidmigloz/langchain_dart/commit/f7c8779011015b5a4a7f3a07dca32bde1bb2ea88))\n - **BREAKING** **BUILD**: Require Dart >=3.8.0 ([#792](https://github.com/davidmigloz/langchain_dart/issues/792)). ([b887f5c6](https://github.com/davidmigloz/langchain_dart/commit/b887f5c62e307b3a510c5049e3d1fbe7b7b4f4c9))\n\n#### `langchain_firebase` - `v0.3.0`\n\n - **BREAKING** **BUILD**: Require Dart >=3.8.0 ([#792](https://github.com/davidmigloz/langchain_dart/issues/792)). ([b887f5c6](https://github.com/davidmigloz/langchain_dart/commit/b887f5c62e307b3a510c5049e3d1fbe7b7b4f4c9))\n\n#### `langchain_google` - `v0.7.0`\n\n - **REFACTOR**: Migrate langchain_google to the new googleai_dart client ([#788](https://github.com/davidmigloz/langchain_dart/issues/788)). ([f28edec9](https://github.com/davidmigloz/langchain_dart/commit/f28edec9206450d753db181f8af254df339d8290))\n - **FEAT**: Upgrade to http v1.5.0 ([#785](https://github.com/davidmigloz/langchain_dart/issues/785)). ([f7c87790](https://github.com/davidmigloz/langchain_dart/commit/f7c8779011015b5a4a7f3a07dca32bde1bb2ea88))\n - **BREAKING** **BUILD**: Require Dart >=3.8.0 ([#792](https://github.com/davidmigloz/langchain_dart/issues/792)). ([b887f5c6](https://github.com/davidmigloz/langchain_dart/commit/b887f5c62e307b3a510c5049e3d1fbe7b7b4f4c9))\n\n#### `langchain_mistralai` - `v0.3.0`\n\n - **FEAT**: Upgrade to http v1.5.0 ([#785](https://github.com/davidmigloz/langchain_dart/issues/785)). ([f7c87790](https://github.com/davidmigloz/langchain_dart/commit/f7c8779011015b5a4a7f3a07dca32bde1bb2ea88))\n - **BREAKING** **BUILD**: Require Dart >=3.8.0 ([#792](https://github.com/davidmigloz/langchain_dart/issues/792)). ([b887f5c6](https://github.com/davidmigloz/langchain_dart/commit/b887f5c62e307b3a510c5049e3d1fbe7b7b4f4c9))\n\n#### `langchain_ollama` - `v0.4.0`\n\n - **FEAT**: Add think support to Ollama and ChatOllama ([#801](https://github.com/davidmigloz/langchain_dart/issues/801)). ([553c7282](https://github.com/davidmigloz/langchain_dart/commit/553c72829073584b428770139939bd790da5c6aa))\n - **FEAT**: Upgrade to http v1.5.0 ([#785](https://github.com/davidmigloz/langchain_dart/issues/785)). ([f7c87790](https://github.com/davidmigloz/langchain_dart/commit/f7c8779011015b5a4a7f3a07dca32bde1bb2ea88))\n - **BREAKING** **REFACTOR**: Improve factory names in ollama_dart ([#806](https://github.com/davidmigloz/langchain_dart/issues/806)). ([fbfa7acb](https://github.com/davidmigloz/langchain_dart/commit/fbfa7acb071a8c2271a6cfb6506e9f6d8b863ca4))\n - **BREAKING** **BUILD**: Require Dart >=3.8.0 ([#792](https://github.com/davidmigloz/langchain_dart/issues/792)). ([b887f5c6](https://github.com/davidmigloz/langchain_dart/commit/b887f5c62e307b3a510c5049e3d1fbe7b7b4f4c9))\n\n#### `langchain_openai` - `v0.8.0`\n\n - **FEAT**: Upgrade to http v1.5.0 ([#785](https://github.com/davidmigloz/langchain_dart/issues/785)). ([f7c87790](https://github.com/davidmigloz/langchain_dart/commit/f7c8779011015b5a4a7f3a07dca32bde1bb2ea88))\n - **BREAKING** **BUILD**: Require Dart >=3.8.0 ([#792](https://github.com/davidmigloz/langchain_dart/issues/792)). ([b887f5c6](https://github.com/davidmigloz/langchain_dart/commit/b887f5c62e307b3a510c5049e3d1fbe7b7b4f4c9))\n\n#### `langchain_supabase` - `v0.2.0`\n\n - **FEAT**: Upgrade to http v1.5.0 ([#785](https://github.com/davidmigloz/langchain_dart/issues/785)). ([f7c87790](https://github.com/davidmigloz/langchain_dart/commit/f7c8779011015b5a4a7f3a07dca32bde1bb2ea88))\n - **BREAKING** **BUILD**: Require Dart >=3.8.0 ([#792](https://github.com/davidmigloz/langchain_dart/issues/792)). ([b887f5c6](https://github.com/davidmigloz/langchain_dart/commit/b887f5c62e307b3a510c5049e3d1fbe7b7b4f4c9))\n\n#### `langgraph` - `v0.0.1-dev.3`\n\n - **BREAKING** **BUILD**: Require Dart >=3.8.0 ([#792](https://github.com/davidmigloz/langchain_dart/issues/792)). ([b887f5c6](https://github.com/davidmigloz/langchain_dart/commit/b887f5c62e307b3a510c5049e3d1fbe7b7b4f4c9))\n\n#### `mistralai_dart` - `v0.1.0`\n\n - **FIX**: Add missing usage field to ChatCompletionStreamResponse in mistralai_dart ([#795](https://github.com/davidmigloz/langchain_dart/issues/795)). ([4da75561](https://github.com/davidmigloz/langchain_dart/commit/4da75561b173313479f50441bf318bd4b948032d))\n - **FIX**: Handle optional space after colon in SSE parser in mistralai_dart ([#791](https://github.com/davidmigloz/langchain_dart/issues/791)). ([cefb1d2f](https://github.com/davidmigloz/langchain_dart/commit/cefb1d2f124ba64da60e3f33ec16672542cae28c))\n - **FEAT**: Upgrade to http v1.5.0 ([#785](https://github.com/davidmigloz/langchain_dart/issues/785)). ([f7c87790](https://github.com/davidmigloz/langchain_dart/commit/f7c8779011015b5a4a7f3a07dca32bde1bb2ea88))\n - **BREAKING** **BUILD**: Require Dart >=3.8.0 ([#792](https://github.com/davidmigloz/langchain_dart/issues/792)). ([b887f5c6](https://github.com/davidmigloz/langchain_dart/commit/b887f5c62e307b3a510c5049e3d1fbe7b7b4f4c9))\n\n#### `openai_realtime_dart` - `v0.1.0`\n\n - **FEAT**: Upgrade to http v1.5.0 ([#785](https://github.com/davidmigloz/langchain_dart/issues/785)). ([f7c87790](https://github.com/davidmigloz/langchain_dart/commit/f7c8779011015b5a4a7f3a07dca32bde1bb2ea88))\n - **BREAKING** **BUILD**: Require Dart >=3.8.0 ([#792](https://github.com/davidmigloz/langchain_dart/issues/792)). ([b887f5c6](https://github.com/davidmigloz/langchain_dart/commit/b887f5c62e307b3a510c5049e3d1fbe7b7b4f4c9))\n\n#### `tavily_dart` - `v0.2.0`\n\n - **FEAT**: Upgrade to http v1.5.0 ([#785](https://github.com/davidmigloz/langchain_dart/issues/785)). ([f7c87790](https://github.com/davidmigloz/langchain_dart/commit/f7c8779011015b5a4a7f3a07dca32bde1bb2ea88))\n - **BREAKING** **BUILD**: Require Dart >=3.8.0 ([#792](https://github.com/davidmigloz/langchain_dart/issues/792)). ([b887f5c6](https://github.com/davidmigloz/langchain_dart/commit/b887f5c62e307b3a510c5049e3d1fbe7b7b4f4c9))\n\n#### `vertex_ai` - `v0.2.0`\n\n - **FEAT**: Upgrade to http v1.5.0 ([#785](https://github.com/davidmigloz/langchain_dart/issues/785)). ([f7c87790](https://github.com/davidmigloz/langchain_dart/commit/f7c8779011015b5a4a7f3a07dca32bde1bb2ea88))\n - **BREAKING** **BUILD**: Require Dart >=3.8.0 ([#792](https://github.com/davidmigloz/langchain_dart/issues/792)). ([b887f5c6](https://github.com/davidmigloz/langchain_dart/commit/b887f5c62e307b3a510c5049e3d1fbe7b7b4f4c9))\n\n\n## 2025-08-31\n\n### Changes\n\n---\n\nPackages with breaking changes:\n\n - There are no breaking changes in this release.\n\nPackages with other changes:\n\n - [`anthropic_sdk_dart` - `v0.2.3`](#anthropic_sdk_dart---v023)\n - [`chromadb` - `v0.2.3`](#chromadb---v023)\n - [`googleai_dart` - `v0.1.3`](#googleai_dart---v013)\n - [`mistralai_dart` - `v0.0.6`](#mistralai_dart---v006)\n - [`ollama_dart` - `v0.2.5`](#ollama_dart---v025)\n - [`openai_dart` - `v0.5.5`](#openai_dart---v055)\n - [`openai_realtime_dart` - `v0.0.6`](#openai_realtime_dart---v006)\n - [`tavily_dart` - `v0.1.3`](#tavily_dart---v013)\n - [`langchain_anthropic` - `v0.2.1+3`](#langchain_anthropic---v0213)\n - [`langchain_chroma` - `v0.2.2+3`](#langchain_chroma---v0223)\n - [`langchain_mistralai` - `v0.2.4+3`](#langchain_mistralai---v0243)\n - [`langchain_ollama` - `v0.3.3+3`](#langchain_ollama---v0333)\n - [`langchain_openai` - `v0.7.6+2`](#langchain_openai---v0762)\n - [`langchain_community` - `v0.3.4+3`](#langchain_community---v0343)\n\nPackages with dependency updates only:\n\n> Packages listed below depend on other packages in this workspace that have had changes. Their versions have been incremented to bump the minimum dependency versions of the packages they depend upon in this project.\n\n - `langchain_anthropic` - `v0.2.1+3`\n - `langchain_chroma` - `v0.2.2+3`\n - `langchain_mistralai` - `v0.2.4+3`\n - `langchain_ollama` - `v0.3.3+3`\n - `langchain_openai` - `v0.7.6+2`\n - `langchain_community` - `v0.3.4+3`\n\n---\n\n#### `anthropic_sdk_dart` - `v0.2.3`\n\n - **FEAT**: Migrate to Freezed v3 ([#773](https://github.com/davidmigloz/langchain_dart/issues/773)). ([f87c8c03](https://github.com/davidmigloz/langchain_dart/commit/f87c8c03711ef382d2c9de19d378bee92e7631c1))\n\n#### `chromadb` - `v0.2.3`\n\n - **FEAT**: Migrate to Freezed v3 ([#773](https://github.com/davidmigloz/langchain_dart/issues/773)). ([f87c8c03](https://github.com/davidmigloz/langchain_dart/commit/f87c8c03711ef382d2c9de19d378bee92e7631c1))\n\n#### `googleai_dart` - `v0.1.3`\n\n - **FEAT**: Migrate to Freezed v3 ([#773](https://github.com/davidmigloz/langchain_dart/issues/773)). ([f87c8c03](https://github.com/davidmigloz/langchain_dart/commit/f87c8c03711ef382d2c9de19d378bee92e7631c1))\n\n#### `mistralai_dart` - `v0.0.6`\n\n - **FEAT**: Migrate to Freezed v3 ([#773](https://github.com/davidmigloz/langchain_dart/issues/773)). ([f87c8c03](https://github.com/davidmigloz/langchain_dart/commit/f87c8c03711ef382d2c9de19d378bee92e7631c1))\n\n#### `ollama_dart` - `v0.2.5`\n\n - **FEAT**: Migrate to Freezed v3 ([#773](https://github.com/davidmigloz/langchain_dart/issues/773)). ([f87c8c03](https://github.com/davidmigloz/langchain_dart/commit/f87c8c03711ef382d2c9de19d378bee92e7631c1))\n\n#### `openai_dart` - `v0.5.5`\n\n - **FEAT**: Migrate to Freezed v3 ([#773](https://github.com/davidmigloz/langchain_dart/issues/773)). ([f87c8c03](https://github.com/davidmigloz/langchain_dart/commit/f87c8c03711ef382d2c9de19d378bee92e7631c1))\n\n#### `openai_realtime_dart` - `v0.0.6`\n\n - **FEAT**: Migrate to Freezed v3 ([#773](https://github.com/davidmigloz/langchain_dart/issues/773)). ([f87c8c03](https://github.com/davidmigloz/langchain_dart/commit/f87c8c03711ef382d2c9de19d378bee92e7631c1))\n\n#### `tavily_dart` - `v0.1.3`\n\n - **FEAT**: Migrate to Freezed v3 ([#773](https://github.com/davidmigloz/langchain_dart/issues/773)). ([f87c8c03](https://github.com/davidmigloz/langchain_dart/commit/f87c8c03711ef382d2c9de19d378bee92e7631c1))\n\n\n## 2025-08-26\n\n### Changes\n\n---\n\nPackages with breaking changes:\n\n - There are no breaking changes in this release.\n\nPackages with other changes:\n\n - [`langchain_firebase` - `v0.2.2+4`](#langchain_firebase---v0224)\n\n---\n\n#### `langchain_firebase` - `v0.2.2+4`\n\n - **FIX**: depend_on_referenced_packages error ([#772](https://github.com/davidmigloz/langchain_dart/issues/772)). ([ef57d530](https://github.com/davidmigloz/langchain_dart/commit/ef57d5303331c7cb85fdb077a50e040a819ec94e))\n\n\n## 2025-08-25\n\n### Changes\n\n---\n\nPackages with breaking changes:\n\n - There are no breaking changes in this release.\n\nPackages with other changes:\n\n - [`langchain_firebase` - `v0.2.2+3`](#langchain_firebase---v0223)\n - [`openai_dart` - `v0.5.4+1`](#openai_dart---v0541)\n - [`langchain_openai` - `v0.7.6+1`](#langchain_openai---v0761)\n\nPackages with dependency updates only:\n\n> Packages listed below depend on other packages in this workspace that have had changes. Their versions have been incremented to bump the minimum dependency versions of the packages they depend upon in this project.\n\n - `langchain_openai` - `v0.7.6+1`\n\n---\n\n#### `langchain_firebase` - `v0.2.2+3`\n\n - **FIX**: Breaking change in firebase_vertexai package ([#770](https://github.com/davidmigloz/langchain_dart/issues/770)). ([6a21546e](https://github.com/davidmigloz/langchain_dart/commit/6a21546e889956cc8e0f9282073757e8aa2abeb3))\n\n#### `openai_dart` - `v0.5.4+1`\n\n - **FIX**: Change CreateChatCompletionRequest.verbosity default value to null ([#771](https://github.com/davidmigloz/langchain_dart/issues/771)). ([46d22905](https://github.com/davidmigloz/langchain_dart/commit/46d22905fee42dd7f1b149d676323d8bce57630f))\n\n\n## 2025-08-10\n\n### Changes\n\n---\n\nPackages with breaking changes:\n\n - There are no breaking changes in this release.\n\nPackages with other changes:\n\n - [`langchain` - `v0.7.9`](#langchain---v079)\n - [`langchain_core` - `v0.3.9`](#langchain_core---v039)\n - [`langchain_firebase` - `v0.2.2+2`](#langchain_firebase---v0222)\n - [`langchain_google` - `v0.6.5+2`](#langchain_google---v0652)\n - [`langchain_openai` - `v0.7.6`](#langchain_openai---v076)\n - [`openai_dart` - `v0.5.4`](#openai_dart---v054)\n - [`langchain_anthropic` - `v0.2.1+2`](#langchain_anthropic---v0212)\n - [`langchain_chroma` - `v0.2.2+2`](#langchain_chroma---v0222)\n - [`langchain_community` - `v0.3.4+2`](#langchain_community---v0342)\n - [`langchain_mistralai` - `v0.2.4+2`](#langchain_mistralai---v0242)\n - [`langchain_ollama` - `v0.3.3+2`](#langchain_ollama---v0332)\n - [`langchain_pinecone` - `v0.1.1+2`](#langchain_pinecone---v0112)\n - [`langchain_supabase` - `v0.1.2+2`](#langchain_supabase---v0122)\n\nPackages with dependency updates only:\n\n> Packages listed below depend on other packages in this workspace that have had changes. Their versions have been incremented to bump the minimum dependency versions of the packages they depend upon in this project.\n\n - `langchain_anthropic` - `v0.2.1+2`\n - `langchain_chroma` - `v0.2.2+2`\n - `langchain_community` - `v0.3.4+2`\n - `langchain_mistralai` - `v0.2.4+2`\n - `langchain_ollama` - `v0.3.3+2`\n - `langchain_pinecone` - `v0.1.1+2`\n - `langchain_supabase` - `v0.1.2+2`\n\n---\n\n#### `langchain` - `v0.7.9`\n\n - **DOCS**: Remove Code Assist AI badge ([#752](https://github.com/davidmigloz/langchain_dart/issues/752)). ([dc0e70df](https://github.com/davidmigloz/langchain_dart/commit/dc0e70dfd9866267456b6caf0b76bf0cc646a425))\n\n#### `langchain_openai` - `v0.7.6`\n\n - **FEAT**: Support reasoningEffort, verbosity and other new fields in ChatOpenAI ([#762](https://github.com/davidmigloz/langchain_dart/issues/762)). ([9cc5d591](https://github.com/davidmigloz/langchain_dart/commit/9cc5d591e868bd5dd3e0a926e564d797dd602dab))\n - **FEAT**: Update ChatOpenAI default model to gpt-5-mini ([#761](https://github.com/davidmigloz/langchain_dart/issues/761)). ([b38ce320](https://github.com/davidmigloz/langchain_dart/commit/b38ce320971373454e10506e79ff75479b0391cd))\n\n#### `langchain_firebase` - `v0.2.2+2`\n\n - **FIX**: Batch sequential tool responses in GoogleAI & Firebase VertexAI ([#757](https://github.com/davidmigloz/langchain_dart/issues/757)). ([8ff44486](https://github.com/davidmigloz/langchain_dart/commit/8ff4448665d26b49c1e1077d0822703e7d853d39))\n\n#### `langchain_google` - `v0.6.5+2`\n\n - **FIX**: Batch sequential tool responses in GoogleAI & Firebase VertexAI ([#757](https://github.com/davidmigloz/langchain_dart/issues/757)). ([8ff44486](https://github.com/davidmigloz/langchain_dart/commit/8ff4448665d26b49c1e1077d0822703e7d853d39))\n\n#### `openai_dart` - `v0.5.4`\n\n - **FEAT**: Add gpt-5 to model catalog in openai_dart ([#758](https://github.com/davidmigloz/langchain_dart/issues/758)). ([f92c94ed](https://github.com/davidmigloz/langchain_dart/commit/f92c94ed799ab49e988f97880017f041522216a6))\n - **FEAT**: Add support for minimal reasoning effort in openai_dart ([#760](https://github.com/davidmigloz/langchain_dart/issues/760)). ([2ebc5506](https://github.com/davidmigloz/langchain_dart/commit/2ebc5506505e07f3d9b85ef60e1c54ed171a7480))\n - **FEAT**: Add Verbosity support in openai_dart ([#759](https://github.com/davidmigloz/langchain_dart/issues/759)). ([3894da76](https://github.com/davidmigloz/langchain_dart/commit/3894da76229bb0fd4a5124b68cd02e2996a6854a))\n\n\n## 2025-07-30\n\n### Changes\n\n---\n\nPackages with breaking changes:\n\n - There are no breaking changes in this release.\n\nPackages with other changes:\n\n - [`anthropic_sdk_dart` - `v0.2.2`](#anthropic_sdk_dart---v022)\n - [`chromadb` - `v0.2.2`](#chromadb---v022)\n - [`googleai_dart` - `v0.1.2`](#googleai_dart---v012)\n - [`langchain` - `v0.7.8+1`](#langchain---v0781)\n - [`langchain_anthropic` - `v0.2.1+1`](#langchain_anthropic---v0211)\n - [`langchain_chroma` - `v0.2.2+1`](#langchain_chroma---v0221)\n - [`langchain_community` - `v0.3.4+1`](#langchain_community---v0341)\n - [`langchain_core` - `v0.3.8`](#langchain_core---v038)\n - [`langchain_firebase` - `v0.2.2+1`](#langchain_firebase---v0221)\n - [`langchain_google` - `v0.6.5+1`](#langchain_google---v0651)\n - [`langchain_mistralai` - `v0.2.4+1`](#langchain_mistralai---v0241)\n - [`langchain_ollama` - `v0.3.3+1`](#langchain_ollama---v0331)\n - [`langchain_openai` - `v0.7.5`](#langchain_openai---v075)\n - [`langchain_pinecone` - `v0.1.1+1`](#langchain_pinecone---v0111)\n - [`langchain_supabase` - `v0.1.2+1`](#langchain_supabase---v0121)\n - [`mistralai_dart` - `v0.0.5`](#mistralai_dart---v005)\n - [`ollama_dart` - `v0.2.4`](#ollama_dart---v024)\n - [`openai_dart` - `v0.5.3`](#openai_dart---v053)\n - [`openai_realtime_dart` - `v0.0.5`](#openai_realtime_dart---v005)\n - [`tavily_dart` - `v0.1.2`](#tavily_dart---v012)\n - [`vertex_ai` - `v0.1.2`](#vertex_ai---v012)\n\n---\n\n#### `anthropic_sdk_dart` - `v0.2.2`\n\n - **BUILD**: Update dependencies ([#751](https://github.com/davidmigloz/langchain_dart/issues/751)). ([250a3c6](https://github.com/davidmigloz/langchain_dart/commit/250a3c6a6c1815703a61a142ba839c0392a31015))\n\n#### `chromadb` - `v0.2.2`\n\n - **BUILD**: Update dependencies ([#751](https://github.com/davidmigloz/langchain_dart/issues/751)). ([250a3c6](https://github.com/davidmigloz/langchain_dart/commit/250a3c6a6c1815703a61a142ba839c0392a31015)\n\n#### `googleai_dart` - `v0.1.2`\n\n - **BUILD**: Update dependencies ([#751](https://github.com/davidmigloz/langchain_dart/issues/751)). ([250a3c6](https://github.com/davidmigloz/langchain_dart/commit/250a3c6a6c1815703a61a142ba839c0392a31015))\n\n#### `langchain` - `v0.7.8+1`\n\n - **BUILD**: Update dependencies ([#751](https://github.com/davidmigloz/langchain_dart/issues/751)). ([250a3c6](https://github.com/davidmigloz/langchain_dart/commit/250a3c6a6c1815703a61a142ba839c0392a31015))\n\n#### `langchain_anthropic` - `v0.2.1+1`\n\n - **BUILD**: Update dependencies ([#751](https://github.com/davidmigloz/langchain_dart/issues/751)). ([250a3c6](https://github.com/davidmigloz/langchain_dart/commit/250a3c6a6c1815703a61a142ba839c0392a31015))\n\n#### `langchain_chroma` - `v0.2.2+1`\n\n - **BUILD**: Update dependencies ([#751](https://github.com/davidmigloz/langchain_dart/issues/751)). ([250a3c6](https://github.com/davidmigloz/langchain_dart/commit/250a3c6a6c1815703a61a142ba839c0392a31015))\n\n#### `langchain_community` - `v0.3.4+1`\n\n - **BUILD**: Update dependencies ([#751](https://github.com/davidmigloz/langchain_dart/issues/751)). ([250a3c6](https://github.com/davidmigloz/langchain_dart/commit/250a3c6a6c1815703a61a142ba839c0392a31015))\n\n#### `langchain_core` - `v0.3.8`\n\n - **FEAT**: Make CreateChatCompletionStreamResponse.choices field nullable to support Groq's OpenAI-compatible API ([#742](https://github.com/davidmigloz/langchain_dart/issues/742)). ([76fbbdc6](https://github.com/davidmigloz/langchain_dart/commit/76fbbdc6f78e83f1f622ed73ff4b27b37a4f744b))\n - **FIX**: Add multi-LLM compatibility for Tool.fromFunction getInputFromJson ([#738](https://github.com/davidmigloz/langchain_dart/issues/738)). ([291a0efc](https://github.com/davidmigloz/langchain_dart/commit/291a0efcebe1696f609ecbd0b803cc9324474db5))\n - **BUILD**: Update dependencies ([#751](https://github.com/davidmigloz/langchain_dart/issues/751)). ([250a3c6](https://github.com/davidmigloz/langchain_dart/commit/250a3c6a6c1815703a61a142ba839c0392a31015))\n\n#### `langchain_firebase` - `v0.2.2+1`\n\n - **BUILD**: Update dependencies ([#751](https://github.com/davidmigloz/langchain_dart/issues/751)). ([250a3c6](https://github.com/davidmigloz/langchain_dart/commit/250a3c6a6c1815703a61a142ba839c0392a31015))\n\n#### `langchain_google` - `v0.6.5+1`\n\n - **BUILD**: Update dependencies ([#751](https://github.com/davidmigloz/langchain_dart/issues/751)). ([250a3c6](https://github.com/davidmigloz/langchain_dart/commit/250a3c6a6c1815703a61a142ba839c0392a31015))\n\n#### `langchain_mistralai` - `v0.2.4+1`\n\n - **BUILD**: Update dependencies ([#751](https://github.com/davidmigloz/langchain_dart/issues/751)). ([250a3c6](https://github.com/davidmigloz/langchain_dart/commit/250a3c6a6c1815703a61a142ba839c0392a31015))\n\n#### `langchain_ollama` - `v0.3.3+1`\n\n - **BUILD**: Update dependencies ([#751](https://github.com/davidmigloz/langchain_dart/issues/751)). ([250a3c6](https://github.com/davidmigloz/langchain_dart/commit/250a3c6a6c1815703a61a142ba839c0392a31015))\n\n#### `langchain_openai` - `v0.7.5`\n\n - **FEAT**: Make CreateChatCompletionStreamResponse.choices field nullable to support Groq's OpenAI-compatible API ([#742](https://github.com/davidmigloz/langchain_dart/issues/742)). ([76fbbdc6](https://github.com/davidmigloz/langchain_dart/commit/76fbbdc6f78e83f1f622ed73ff4b27b37a4f744b))\n - **BUILD**: Update dependencies ([#751](https://github.com/davidmigloz/langchain_dart/issues/751)). ([250a3c6](https://github.com/davidmigloz/langchain_dart/commit/250a3c6a6c1815703a61a142ba839c0392a31015))\n\n#### `langchain_pinecone` - `v0.1.1+1`\n\n - **BUILD**: Update dependencies ([#751](https://github.com/davidmigloz/langchain_dart/issues/751)). ([250a3c6](https://github.com/davidmigloz/langchain_dart/commit/250a3c6a6c1815703a61a142ba839c0392a31015))\n\n#### `langchain_supabase` - `v0.1.2+1`\n\n - **BUILD**: Update dependencies ([#751](https://github.com/davidmigloz/langchain_dart/issues/751)). ([250a3c6](https://github.com/davidmigloz/langchain_dart/commit/250a3c6a6c1815703a61a142ba839c0392a31015))\n\n#### `mistralai_dart` - `v0.0.5`\n\n - **BUILD**: Update dependencies ([#751](https://github.com/davidmigloz/langchain_dart/issues/751)). ([250a3c6](https://github.com/davidmigloz/langchain_dart/commit/250a3c6a6c1815703a61a142ba839c0392a31015))\n\n#### `ollama_dart` - `v0.2.4`\n\n - **BUILD**: Update dependencies ([#751](https://github.com/davidmigloz/langchain_dart/issues/751)). ([250a3c6](https://github.com/davidmigloz/langchain_dart/commit/250a3c6a6c1815703a61a142ba839c0392a31015))\n\n#### `openai_dart` - `v0.5.3`\n\n - **FEAT**: Make CreateChatCompletionStreamResponse.choices field nullable to support Groq's OpenAI-compatible API ([#742](https://github.com/davidmigloz/langchain_dart/issues/742)). ([76fbbdc6](https://github.com/davidmigloz/langchain_dart/commit/76fbbdc6f78e83f1f622ed73ff4b27b37a4f744b))\n - **BUILD**: Update dependencies ([#751](https://github.com/davidmigloz/langchain_dart/issues/751)). ([250a3c6](https://github.com/davidmigloz/langchain_dart/commit/250a3c6a6c1815703a61a142ba839c0392a31015))\n\n#### `openai_realtime_dart` - `v0.0.5`\n\n - **BUILD**: Update dependencies ([#751](https://github.com/davidmigloz/langchain_dart/issues/751)). ([250a3c6](https://github.com/davidmigloz/langchain_dart/commit/250a3c6a6c1815703a61a142ba839c0392a31015))\n\n#### `tavily_dart` - `v0.1.2`\n\n - **BUILD**: Update dependencies ([#751](https://github.com/davidmigloz/langchain_dart/issues/751)). ([250a3c6](https://github.com/davidmigloz/langchain_dart/commit/250a3c6a6c1815703a61a142ba839c0392a31015))\n\n#### `vertex_ai` - `v0.1.2`\n\n - **BUILD**: Update dependencies ([#751](https://github.com/davidmigloz/langchain_dart/issues/751)). ([250a3c6](https://github.com/davidmigloz/langchain_dart/commit/250a3c6a6c1815703a61a142ba839c0392a31015))\n\n\n## 2025-06-20\n\n### Changes\n\n---\n\nPackages with breaking changes:\n\n - There are no breaking changes in this release.\n\nPackages with other changes:\n\n - [`openai_dart` - `v0.5.2`](#openai_dart---v052)\n - [`langchain_openai` - `v0.7.4+2`](#langchain_openai---v0742)\n\nPackages with dependency updates only:\n\n> Packages listed below depend on other packages in this workspace that have had changes. Their versions have been incremented to bump the minimum dependency versions of the packages they depend upon in this project.\n\n - `langchain_openai` - `v0.7.4+2`\n\n---\n\n#### `openai_dart` - `v0.5.2`\n\n - **FEAT**: Make Model.object/owned_by  fields nullable to support OpenRouter's OpenAI-compatible API ([#736](https://github.com/davidmigloz/langchain_dart/issues/736)). ([afa98b8c](https://github.com/davidmigloz/langchain_dart/commit/afa98b8c44c612126f2f6ee32d6aecdad41663b4))\n - **FEAT**: Make Model.created field nullable to support Google's OpenAI-compatible API ([#735](https://github.com/davidmigloz/langchain_dart/issues/735)). ([d617e49f](https://github.com/davidmigloz/langchain_dart/commit/d617e49f9d5760e2714d27d76cf699364e9cfe51))\n\n\n## 2025-06-18\n\n### Changes\n\n---\n\nPackages with breaking changes:\n\n - There are no breaking changes in this release.\n\nPackages with other changes:\n\n - [`openai_dart` - `v0.5.1`](#openai_dart---v051)\n - [`openai_realtime_dart` - `v0.0.4+1`](#openai_realtime_dart---v0041)\n - [`langchain_openai` - `v0.7.4+1`](#langchain_openai---v0741)\n\nPackages with dependency updates only:\n\n> Packages listed below depend on other packages in this workspace that have had changes. Their versions have been incremented to bump the minimum dependency versions of the packages they depend upon in this project.\n\n - `langchain_openai` - `v0.7.4+1`\n\n---\n\n#### `openai_dart` - `v0.5.1`\n\n - **FEAT**: Make ToolCallChunk.index field nullable to support Gemini OpenAI-compatible API ([#733](https://github.com/davidmigloz/langchain_dart/issues/733)). ([19cb49c0](https://github.com/davidmigloz/langchain_dart/commit/19cb49c09e42204cc523fbbdd3941b3070146063))\n - **FEAT**: Make Embedding.index field nullable to support Gemini OpenAI-compatible API ([#729](https://github.com/davidmigloz/langchain_dart/issues/729)). ([9d22f197](https://github.com/davidmigloz/langchain_dart/commit/9d22f1972d99b8b1f6dbcfcb3f7bfba2257fca5b))\n\n#### `openai_realtime_dart` - `v0.0.4+1`\n\n - **FIX**: toolChoice required infinite loop in openai_realtime_dart ([#723](https://github.com/davidmigloz/langchain_dart/issues/723)). ([f52211cc](https://github.com/davidmigloz/langchain_dart/commit/f52211cca063d64e346a4ed023c337a351ca3e33))\n\n\n## 2025-06-12\n\n### Changes\n\n---\n\nPackages with breaking changes:\n\n - [`openai_dart` - `v0.5.0`](#openai_dart---v050)\n\nPackages with other changes:\n\n - [`anthropic_sdk_dart` - `v0.2.1`](#anthropic_sdk_dart---v021)\n - [`chromadb` - `v0.2.1`](#chromadb---v021)\n - [`googleai_dart` - `v0.1.1`](#googleai_dart---v011)\n - [`langchain` - `v0.7.8`](#langchain---v078)\n - [`langchain_anthropic` - `v0.2.1`](#langchain_anthropic---v021)\n - [`langchain_chroma` - `v0.2.2`](#langchain_chroma---v022)\n - [`langchain_community` - `v0.3.4`](#langchain_community---v034)\n - [`langchain_core` - `v0.3.7`](#langchain_core---v037)\n - [`langchain_firebase` - `v0.2.2`](#langchain_firebase---v022)\n - [`langchain_google` - `v0.6.5`](#langchain_google---v065)\n - [`langchain_mistralai` - `v0.2.4`](#langchain_mistralai---v024)\n - [`langchain_ollama` - `v0.3.3`](#langchain_ollama---v033)\n - [`langchain_openai` - `v0.7.4`](#langchain_openai---v074)\n - [`langchain_pinecone` - `v0.1.1`](#langchain_pinecone---v011)\n - [`langchain_supabase` - `v0.1.2`](#langchain_supabase---v012)\n - [`langgraph` - `v0.0.1-dev.2`](#langgraph---v001-dev2)\n - [`mistralai_dart` - `v0.0.4`](#mistralai_dart---v004)\n - [`ollama_dart` - `v0.2.3`](#ollama_dart---v023)\n - [`openai_realtime_dart` - `v0.0.4`](#openai_realtime_dart---v004)\n - [`tavily_dart` - `v0.1.1`](#tavily_dart---v011)\n - [`vertex_ai` - `v0.1.1`](#vertex_ai---v011)\n\n---\n\n#### `openai_dart` - `v0.5.0`\n\n - **BREAKING** **FEAT**: Align OpenAI API changes ([#706](https://github.com/davidmigloz/langchain_dart/issues/706)). ([b8b04ca6](https://github.com/davidmigloz/langchain_dart/commit/b8b04ca618ffbc6f84b935a89852767479da1611))\n - **FEAT**: Add support for web search, gpt-image-1 and list chat completions ([#716](https://github.com/davidmigloz/langchain_dart/issues/716)). ([269dea03](https://github.com/davidmigloz/langchain_dart/commit/269dea035be679c8d2fcc03f526703c76c72c5d4))\n - **FEAT**: Update OpenAI model catalog ([#714](https://github.com/davidmigloz/langchain_dart/issues/714)). ([68df4558](https://github.com/davidmigloz/langchain_dart/commit/68df4558a01e872c73ad465f4b85f1b5c61ddd50))\n - **FEAT**: Change the default value of 'reasoning_effort' from medium to null ([#713](https://github.com/davidmigloz/langchain_dart/issues/713)). ([f224572e](https://github.com/davidmigloz/langchain_dart/commit/f224572eff249daa1971a7f287c150ee3779a6b2))\n - **FEAT**: Update dependencies (requires Dart 3.6.0) ([#709](https://github.com/davidmigloz/langchain_dart/issues/709)). ([9e3467f7](https://github.com/davidmigloz/langchain_dart/commit/9e3467f7caabe051a43c0eb3c1110bc4a9b77b81))\n - **REFACTOR**: Remove fetch_client dependency in favor of http v1.3.0 ([#659](https://github.com/davidmigloz/langchain_dart/issues/659)). ([0e0a685c](https://github.com/davidmigloz/langchain_dart/commit/0e0a685c376895425dbddb0f9b83758c700bb0c7))\n - **REFACTOR**: Fix linter issues ([#708](https://github.com/davidmigloz/langchain_dart/issues/708)). ([652e7c64](https://github.com/davidmigloz/langchain_dart/commit/652e7c64776d92d309cbd708d9e477fc2ee1391c))\n - **DOCS**: Fix TruncationObject docs typo in openai_dart. ([ee5ed4fd](https://github.com/davidmigloz/langchain_dart/commit/ee5ed4fdfdf4213ceec05d7a5a2b24cca95ae386))\n - **DOCS**: Document Azure Assistants API base url ([#626](https://github.com/davidmigloz/langchain_dart/issues/626)). ([c3459eea](https://github.com/davidmigloz/langchain_dart/commit/c3459eea354f36a11f69145a7313b3feda7a15eb))\n\n#### `anthropic_sdk_dart` - `v0.2.1`\n\n - **FEAT**: Update dependencies (requires Dart 3.6.0) ([#709](https://github.com/davidmigloz/langchain_dart/issues/709)). ([9e3467f7](https://github.com/davidmigloz/langchain_dart/commit/9e3467f7caabe051a43c0eb3c1110bc4a9b77b81))\n - **REFACTOR**: Remove fetch_client dependency in favor of http v1.3.0 ([#659](https://github.com/davidmigloz/langchain_dart/issues/659)). ([0e0a685c](https://github.com/davidmigloz/langchain_dart/commit/0e0a685c376895425dbddb0f9b83758c700bb0c7))\n - **FIX**: Fix linter issues ([#656](https://github.com/davidmigloz/langchain_dart/issues/656)). ([88a79c65](https://github.com/davidmigloz/langchain_dart/commit/88a79c65aad23bcf5859e58a7375a4b686cf02ef))\n\n#### `chromadb` - `v0.2.1`\n\n - **FEAT**: Update dependencies (requires Dart 3.6.0) ([#709](https://github.com/davidmigloz/langchain_dart/issues/709)). ([9e3467f7](https://github.com/davidmigloz/langchain_dart/commit/9e3467f7caabe051a43c0eb3c1110bc4a9b77b81))\n - **REFACTOR**: Remove fetch_client dependency in favor of http v1.3.0 ([#659](https://github.com/davidmigloz/langchain_dart/issues/659)). ([0e0a685c](https://github.com/davidmigloz/langchain_dart/commit/0e0a685c376895425dbddb0f9b83758c700bb0c7))\n - **FIX**: Fix linter issues ([#656](https://github.com/davidmigloz/langchain_dart/issues/656)). ([88a79c65](https://github.com/davidmigloz/langchain_dart/commit/88a79c65aad23bcf5859e58a7375a4b686cf02ef))\n\n#### `googleai_dart` - `v0.1.1`\n\n - **FEAT**: Update dependencies (requires Dart 3.6.0) ([#709](https://github.com/davidmigloz/langchain_dart/issues/709)). ([9e3467f7](https://github.com/davidmigloz/langchain_dart/commit/9e3467f7caabe051a43c0eb3c1110bc4a9b77b81))\n - **REFACTOR**: Remove fetch_client dependency in favor of http v1.3.0 ([#659](https://github.com/davidmigloz/langchain_dart/issues/659)). ([0e0a685c](https://github.com/davidmigloz/langchain_dart/commit/0e0a685c376895425dbddb0f9b83758c700bb0c7))\n - **FIX**: Fix linter issues ([#656](https://github.com/davidmigloz/langchain_dart/issues/656)). ([88a79c65](https://github.com/davidmigloz/langchain_dart/commit/88a79c65aad23bcf5859e58a7375a4b686cf02ef))\n\n#### `langchain` - `v0.7.8`\n\n - **FEAT**: Implement Markdown text splitter ([#635](https://github.com/davidmigloz/langchain_dart/issues/635)). ([242e4be2](https://github.com/davidmigloz/langchain_dart/commit/242e4be227503f93120b209bca350ed6a055f362))\n - **FEAT**: Update dependencies (requires Dart 3.6.0) ([#709](https://github.com/davidmigloz/langchain_dart/issues/709)). ([9e3467f7](https://github.com/davidmigloz/langchain_dart/commit/9e3467f7caabe051a43c0eb3c1110bc4a9b77b81))\n - **FEAT**: Add to/fromMap serialization to ChatMessage, PromptValue & ChatHistory ([#681](https://github.com/davidmigloz/langchain_dart/issues/681)). ([d239c7c7](https://github.com/davidmigloz/langchain_dart/commit/d239c7c7b4a1504559e475466be7f176521a0473))\n - **FIX**: Correctly calculate start_index when using chunkOverlap in TextSplitter ([#640](https://github.com/davidmigloz/langchain_dart/issues/640)). ([71dd5ac3](https://github.com/davidmigloz/langchain_dart/commit/71dd5ac31351d0ea45989c43a250a35668cb01b6))\n - **FIX**: Fix linter issues ([#656](https://github.com/davidmigloz/langchain_dart/issues/656)). ([88a79c65](https://github.com/davidmigloz/langchain_dart/commit/88a79c65aad23bcf5859e58a7375a4b686cf02ef))\n - **FIX**: Made apiKey optional for `TavilyAnswerTool` and `TavilySearchResultsTool` ([#646](https://github.com/davidmigloz/langchain_dart/issues/646)). ([5085ea4a](https://github.com/davidmigloz/langchain_dart/commit/5085ea4ad8b5cd072832e73afcbb7075a6375307))\n\n#### `langchain_anthropic` - `v0.2.1`\n\n - **FEAT**: Update dependencies (requires Dart 3.6.0) ([#709](https://github.com/davidmigloz/langchain_dart/issues/709)). ([9e3467f7](https://github.com/davidmigloz/langchain_dart/commit/9e3467f7caabe051a43c0eb3c1110bc4a9b77b81))\n - **REFACTOR**: Remove fetch_client dependency in favor of http v1.3.0 ([#659](https://github.com/davidmigloz/langchain_dart/issues/659)). ([0e0a685c](https://github.com/davidmigloz/langchain_dart/commit/0e0a685c376895425dbddb0f9b83758c700bb0c7))\n - **FIX**: Fix linter issues ([#656](https://github.com/davidmigloz/langchain_dart/issues/656)). ([88a79c65](https://github.com/davidmigloz/langchain_dart/commit/88a79c65aad23bcf5859e58a7375a4b686cf02ef))\n\n#### `langchain_chroma` - `v0.2.2`\n\n - **FEAT**: Update dependencies (requires Dart 3.6.0) ([#709](https://github.com/davidmigloz/langchain_dart/issues/709)). ([9e3467f7](https://github.com/davidmigloz/langchain_dart/commit/9e3467f7caabe051a43c0eb3c1110bc4a9b77b81))\n - **REFACTOR**: Remove fetch_client dependency in favor of http v1.3.0 ([#659](https://github.com/davidmigloz/langchain_dart/issues/659)). ([0e0a685c](https://github.com/davidmigloz/langchain_dart/commit/0e0a685c376895425dbddb0f9b83758c700bb0c7))\n - **FIX**: Fix linter issues ([#656](https://github.com/davidmigloz/langchain_dart/issues/656)). ([88a79c65](https://github.com/davidmigloz/langchain_dart/commit/88a79c65aad23bcf5859e58a7375a4b686cf02ef))\n\n#### `langchain_community` - `v0.3.4`\n\n - **FEAT**: Update dependencies (requires Dart 3.6.0) ([#709](https://github.com/davidmigloz/langchain_dart/issues/709)). ([9e3467f7](https://github.com/davidmigloz/langchain_dart/commit/9e3467f7caabe051a43c0eb3c1110bc4a9b77b81))\n - **REFACTOR**: Remove fetch_client dependency in favor of http v1.3.0 ([#659](https://github.com/davidmigloz/langchain_dart/issues/659)). ([0e0a685c](https://github.com/davidmigloz/langchain_dart/commit/0e0a685c376895425dbddb0f9b83758c700bb0c7))\n - **FIX**: Fix static instance of ObjectBoxVectorStore ([#684](https://github.com/davidmigloz/langchain_dart/issues/684)). ([719ead93](https://github.com/davidmigloz/langchain_dart/commit/719ead93c6e91d42bb8e45910ccf0da4e3e51afd))\n - **FIX**: Fix langchain_community  WASM compatibility ([#660](https://github.com/davidmigloz/langchain_dart/issues/660)). ([0be8aae4](https://github.com/davidmigloz/langchain_dart/commit/0be8aae44950fddd9ac9538ccfa5d017e6f585a0))\n - **FIX**: Fix linter issues ([#656](https://github.com/davidmigloz/langchain_dart/issues/656)). ([88a79c65](https://github.com/davidmigloz/langchain_dart/commit/88a79c65aad23bcf5859e58a7375a4b686cf02ef))\n - **FIX**: Made apiKey optional for `TavilyAnswerTool` and `TavilySearchResultsTool` ([#646](https://github.com/davidmigloz/langchain_dart/issues/646)). ([5085ea4a](https://github.com/davidmigloz/langchain_dart/commit/5085ea4ad8b5cd072832e73afcbb7075a6375307))\n\n#### `langchain_core` - `v0.3.7`\n\n - **FEAT**: Update dependencies (requires Dart 3.6.0) ([#709](https://github.com/davidmigloz/langchain_dart/issues/709)). ([9e3467f7](https://github.com/davidmigloz/langchain_dart/commit/9e3467f7caabe051a43c0eb3c1110bc4a9b77b81))\n - **FEAT**: Add to/fromMap serialization to ChatMessage, PromptValue & ChatHistory ([#681](https://github.com/davidmigloz/langchain_dart/issues/681)). ([d239c7c7](https://github.com/davidmigloz/langchain_dart/commit/d239c7c7b4a1504559e475466be7f176521a0473))\n - **FIX**: Fix linter issues ([#656](https://github.com/davidmigloz/langchain_dart/issues/656)). ([88a79c65](https://github.com/davidmigloz/langchain_dart/commit/88a79c65aad23bcf5859e58a7375a4b686cf02ef))\n - **FIX**: RunnableMap doesn't invoke multiple Runnables in parallel ([#649](https://github.com/davidmigloz/langchain_dart/issues/649)). ([fc722d85](https://github.com/davidmigloz/langchain_dart/commit/fc722d85eef6644f7593dd26c7fd55a56615595b))\n\n#### `langchain_firebase` - `v0.2.2`\n\n - **FEAT**: Update dependencies (requires Dart 3.6.0) ([#709](https://github.com/davidmigloz/langchain_dart/issues/709)). ([9e3467f7](https://github.com/davidmigloz/langchain_dart/commit/9e3467f7caabe051a43c0eb3c1110bc4a9b77b81))\n - **REFACTOR**: Fix linter issues ([#708](https://github.com/davidmigloz/langchain_dart/issues/708)). ([652e7c64](https://github.com/davidmigloz/langchain_dart/commit/652e7c64776d92d309cbd708d9e477fc2ee1391c))\n - **REFACTOR**: Migrate firebase_vertexai dep to 1.4.0 ([#663](https://github.com/davidmigloz/langchain_dart/issues/663)). ([4fca38c5](https://github.com/davidmigloz/langchain_dart/commit/4fca38c5599c4c5a058ece1a7d9c4e276b716432))\n - **FIX**: Fix linter issues ([#656](https://github.com/davidmigloz/langchain_dart/issues/656)). ([88a79c65](https://github.com/davidmigloz/langchain_dart/commit/88a79c65aad23bcf5859e58a7375a4b686cf02ef))\n\n#### `langchain_google` - `v0.6.5`\n\n - **FEAT**: Update dependencies (requires Dart 3.6.0) ([#709](https://github.com/davidmigloz/langchain_dart/issues/709)). ([9e3467f7](https://github.com/davidmigloz/langchain_dart/commit/9e3467f7caabe051a43c0eb3c1110bc4a9b77b81))\n - **REFACTOR**: Remove fetch_client dependency in favor of http v1.3.0 ([#659](https://github.com/davidmigloz/langchain_dart/issues/659)). ([0e0a685c](https://github.com/davidmigloz/langchain_dart/commit/0e0a685c376895425dbddb0f9b83758c700bb0c7))\n - **FIX**: Fix linter issues ([#656](https://github.com/davidmigloz/langchain_dart/issues/656)). ([88a79c65](https://github.com/davidmigloz/langchain_dart/commit/88a79c65aad23bcf5859e58a7375a4b686cf02ef))\n\n#### `langchain_mistralai` - `v0.2.4`\n\n - **FEAT**: Update dependencies (requires Dart 3.6.0) ([#709](https://github.com/davidmigloz/langchain_dart/issues/709)). ([9e3467f7](https://github.com/davidmigloz/langchain_dart/commit/9e3467f7caabe051a43c0eb3c1110bc4a9b77b81))\n - **REFACTOR**: Fix linter issues ([#708](https://github.com/davidmigloz/langchain_dart/issues/708)). ([652e7c64](https://github.com/davidmigloz/langchain_dart/commit/652e7c64776d92d309cbd708d9e477fc2ee1391c))\n - **REFACTOR**: Remove fetch_client dependency in favor of http v1.3.0 ([#659](https://github.com/davidmigloz/langchain_dart/issues/659)). ([0e0a685c](https://github.com/davidmigloz/langchain_dart/commit/0e0a685c376895425dbddb0f9b83758c700bb0c7))\n - **FIX**: Fix linter issues ([#656](https://github.com/davidmigloz/langchain_dart/issues/656)). ([88a79c65](https://github.com/davidmigloz/langchain_dart/commit/88a79c65aad23bcf5859e58a7375a4b686cf02ef))\n - **DOCS**: Add langchain_mistralai example ([#662](https://github.com/davidmigloz/langchain_dart/issues/662)). ([eca7a24d](https://github.com/davidmigloz/langchain_dart/commit/eca7a24d50629b9ce7d61a197bfd9acfb74a1261))\n\n#### `langchain_ollama` - `v0.3.3`\n\n - **FEAT**: Update dependencies (requires Dart 3.6.0) ([#709](https://github.com/davidmigloz/langchain_dart/issues/709)). ([9e3467f7](https://github.com/davidmigloz/langchain_dart/commit/9e3467f7caabe051a43c0eb3c1110bc4a9b77b81))\n - **REFACTOR**: Fix linter issues ([#708](https://github.com/davidmigloz/langchain_dart/issues/708)). ([652e7c64](https://github.com/davidmigloz/langchain_dart/commit/652e7c64776d92d309cbd708d9e477fc2ee1391c))\n - **REFACTOR**: Remove fetch_client dependency in favor of http v1.3.0 ([#659](https://github.com/davidmigloz/langchain_dart/issues/659)). ([0e0a685c](https://github.com/davidmigloz/langchain_dart/commit/0e0a685c376895425dbddb0f9b83758c700bb0c7))\n - **FIX**: Fix linter issues ([#656](https://github.com/davidmigloz/langchain_dart/issues/656)). ([88a79c65](https://github.com/davidmigloz/langchain_dart/commit/88a79c65aad23bcf5859e58a7375a4b686cf02ef))\n - **DOCS**: Add langchain_ollama example ([#661](https://github.com/davidmigloz/langchain_dart/issues/661)). ([0bba6cb4](https://github.com/davidmigloz/langchain_dart/commit/0bba6cb4ebe4386ad53b1aa02836d375d5f59cbe))\n\n#### `langchain_openai` - `v0.7.4`\n\n - **FEAT**: Update OpenAI model catalog ([#714](https://github.com/davidmigloz/langchain_dart/issues/714)). ([68df4558](https://github.com/davidmigloz/langchain_dart/commit/68df4558a01e872c73ad465f4b85f1b5c61ddd50))\n - **FEAT**: Update dependencies (requires Dart 3.6.0) ([#709](https://github.com/davidmigloz/langchain_dart/issues/709)). ([9e3467f7](https://github.com/davidmigloz/langchain_dart/commit/9e3467f7caabe051a43c0eb3c1110bc4a9b77b81))\n - **REFACTOR**: Remove fetch_client dependency in favor of http v1.3.0 ([#659](https://github.com/davidmigloz/langchain_dart/issues/659)). ([0e0a685c](https://github.com/davidmigloz/langchain_dart/commit/0e0a685c376895425dbddb0f9b83758c700bb0c7))\n - **FIX**: Fix linter issues ([#656](https://github.com/davidmigloz/langchain_dart/issues/656)). ([88a79c65](https://github.com/davidmigloz/langchain_dart/commit/88a79c65aad23bcf5859e58a7375a4b686cf02ef))\n\n#### `langchain_pinecone` - `v0.1.1`\n\n - **FEAT**: Update dependencies (requires Dart 3.6.0) ([#709](https://github.com/davidmigloz/langchain_dart/issues/709)). ([9e3467f7](https://github.com/davidmigloz/langchain_dart/commit/9e3467f7caabe051a43c0eb3c1110bc4a9b77b81))\n - **REFACTOR**: Remove fetch_client dependency in favor of http v1.3.0 ([#659](https://github.com/davidmigloz/langchain_dart/issues/659)). ([0e0a685c](https://github.com/davidmigloz/langchain_dart/commit/0e0a685c376895425dbddb0f9b83758c700bb0c7))\n - **FIX**: Fix linter issues ([#656](https://github.com/davidmigloz/langchain_dart/issues/656)). ([88a79c65](https://github.com/davidmigloz/langchain_dart/commit/88a79c65aad23bcf5859e58a7375a4b686cf02ef))\n\n#### `langchain_supabase` - `v0.1.2`\n\n - **FEAT**: Update dependencies (requires Dart 3.6.0) ([#709](https://github.com/davidmigloz/langchain_dart/issues/709)). ([9e3467f7](https://github.com/davidmigloz/langchain_dart/commit/9e3467f7caabe051a43c0eb3c1110bc4a9b77b81))\n - **REFACTOR**: Remove fetch_client dependency in favor of http v1.3.0 ([#659](https://github.com/davidmigloz/langchain_dart/issues/659)). ([0e0a685c](https://github.com/davidmigloz/langchain_dart/commit/0e0a685c376895425dbddb0f9b83758c700bb0c7))\n - **FIX**: Fix linter issues ([#656](https://github.com/davidmigloz/langchain_dart/issues/656)). ([88a79c65](https://github.com/davidmigloz/langchain_dart/commit/88a79c65aad23bcf5859e58a7375a4b686cf02ef))\n\n#### `langgraph` - `v0.0.1-dev.2`\n\n - **FEAT**: Update dependencies (requires Dart 3.6.0) ([#709](https://github.com/davidmigloz/langchain_dart/issues/709)). ([9e3467f7](https://github.com/davidmigloz/langchain_dart/commit/9e3467f7caabe051a43c0eb3c1110bc4a9b77b81))\n\n#### `mistralai_dart` - `v0.0.4`\n\n - **FEAT**: Update dependencies (requires Dart 3.6.0) ([#709](https://github.com/davidmigloz/langchain_dart/issues/709)). ([9e3467f7](https://github.com/davidmigloz/langchain_dart/commit/9e3467f7caabe051a43c0eb3c1110bc4a9b77b81))\n - **REFACTOR**: Remove fetch_client dependency in favor of http v1.3.0 ([#659](https://github.com/davidmigloz/langchain_dart/issues/659)). ([0e0a685c](https://github.com/davidmigloz/langchain_dart/commit/0e0a685c376895425dbddb0f9b83758c700bb0c7))\n - **FIX**: Fix linter issues ([#656](https://github.com/davidmigloz/langchain_dart/issues/656)). ([88a79c65](https://github.com/davidmigloz/langchain_dart/commit/88a79c65aad23bcf5859e58a7375a4b686cf02ef))\n\n#### `ollama_dart` - `v0.2.3`\n\n - **FEAT**: Add think/thinking params to ollama_dart ([#721](https://github.com/davidmigloz/langchain_dart/issues/721)). ([701d7968](https://github.com/davidmigloz/langchain_dart/commit/701d7968baaa07f5612a25d74a1d19c2c24e7077))\n - **FEAT**: Add capabilities, projector_info, tensors and modified_at to Ollama's ModelInfo ([#690](https://github.com/davidmigloz/langchain_dart/issues/690)). ([c5e247db](https://github.com/davidmigloz/langchain_dart/commit/c5e247db6aadedaa6ec668652e416477a6c03b51))\n - **FEAT**: Update dependencies (requires Dart 3.6.0) ([#709](https://github.com/davidmigloz/langchain_dart/issues/709)). ([9e3467f7](https://github.com/davidmigloz/langchain_dart/commit/9e3467f7caabe051a43c0eb3c1110bc4a9b77b81))\n - **REFACTOR**: Remove fetch_client dependency in favor of http v1.3.0 ([#659](https://github.com/davidmigloz/langchain_dart/issues/659)). ([0e0a685c](https://github.com/davidmigloz/langchain_dart/commit/0e0a685c376895425dbddb0f9b83758c700bb0c7))\n - **FIX**: Fix linter issues ([#656](https://github.com/davidmigloz/langchain_dart/issues/656)). ([88a79c65](https://github.com/davidmigloz/langchain_dart/commit/88a79c65aad23bcf5859e58a7375a4b686cf02ef))\n\n#### `openai_realtime_dart` - `v0.0.4`\n\n - **FEAT**: Align latest OpenAI Realtime API changes ([#707](https://github.com/davidmigloz/langchain_dart/issues/707)). ([c2fc4f53](https://github.com/davidmigloz/langchain_dart/commit/c2fc4f53492726ec78639d4a9ef55dfc054c2fae))\n - **FEAT**: Add language and prompt properties to InputAudioTranscriptionConfig ([#698](https://github.com/davidmigloz/langchain_dart/issues/698)). ([5daf6e60](https://github.com/davidmigloz/langchain_dart/commit/5daf6e603f18be7e330155d32648356ae05872bd))\n - **FEAT**: Add RealtimeEvent for input audio transcription delta updates ([#710](https://github.com/davidmigloz/langchain_dart/issues/710)). ([0848a253](https://github.com/davidmigloz/langchain_dart/commit/0848a253c36c0ab9f7e9b2bc0db5f000c6952ef5))\n - **FEAT**: Add support for custom headers in WebSocket connections ([#693](https://github.com/davidmigloz/langchain_dart/issues/693)). ([de81ef89](https://github.com/davidmigloz/langchain_dart/commit/de81ef89b33e54f1671ccf244fe1d088b203f76a))\n - **FEAT**: Allow to pass a custom model in OpenAI RealtimeClient ([#654](https://github.com/davidmigloz/langchain_dart/issues/654)). ([60feae46](https://github.com/davidmigloz/langchain_dart/commit/60feae46a6dc6bcaf1779d512d0d599fdcd0e1a4))\n - **FEAT**: Update dependencies (requires Dart 3.6.0) ([#709](https://github.com/davidmigloz/langchain_dart/issues/709)). ([9e3467f7](https://github.com/davidmigloz/langchain_dart/commit/9e3467f7caabe051a43c0eb3c1110bc4a9b77b81))\n - **FIX**: Server VAD is enabled after setting turnDetection to null ([#668](https://github.com/davidmigloz/langchain_dart/issues/668)). ([39c21ba4](https://github.com/davidmigloz/langchain_dart/commit/39c21ba4b2f40d3622ab492d1661a0a09424f393))\n - **FIX**: previous_item_id should be nullable in openai_realtime_dart ([#639](https://github.com/davidmigloz/langchain_dart/issues/639)). ([0f84850f](https://github.com/davidmigloz/langchain_dart/commit/0f84850f822b0872ad9c2b0abf87d14e2a5b29dd))\n - **REFACTOR**: Remove fetch_client dependency in favor of http v1.3.0 ([#659](https://github.com/davidmigloz/langchain_dart/issues/659)). ([0e0a685c](https://github.com/davidmigloz/langchain_dart/commit/0e0a685c376895425dbddb0f9b83758c700bb0c7))\n - **REFACTOR**: Remove unused property in openai_realtime_dart ([#625](https://github.com/davidmigloz/langchain_dart/issues/625)). ([231ccab2](https://github.com/davidmigloz/langchain_dart/commit/231ccab2d3f3cef3487d214b88c4ee983a62f362))\n - **REFACTOR**: Fix linter issues ([#708](https://github.com/davidmigloz/langchain_dart/issues/708)). ([652e7c64](https://github.com/davidmigloz/langchain_dart/commit/652e7c64776d92d309cbd708d9e477fc2ee1391c))\n\n#### `tavily_dart` - `v0.1.1`\n\n - **FEAT**: Update dependencies (requires Dart 3.6.0) ([#709](https://github.com/davidmigloz/langchain_dart/issues/709)). ([9e3467f7](https://github.com/davidmigloz/langchain_dart/commit/9e3467f7caabe051a43c0eb3c1110bc4a9b77b81))\n - **FIX**: Made apiKey optional for `TavilyAnswerTool` and `TavilySearchResultsTool` ([#646](https://github.com/davidmigloz/langchain_dart/issues/646)). ([5085ea4a](https://github.com/davidmigloz/langchain_dart/commit/5085ea4ad8b5cd072832e73afcbb7075a6375307))\n - **FIX**: Fix linter issues ([#656](https://github.com/davidmigloz/langchain_dart/issues/656)). ([88a79c65](https://github.com/davidmigloz/langchain_dart/commit/88a79c65aad23bcf5859e58a7375a4b686cf02ef))\n - **REFACTOR**: Remove fetch_client dependency in favor of http v1.3.0 ([#659](https://github.com/davidmigloz/langchain_dart/issues/659)). ([0e0a685c](https://github.com/davidmigloz/langchain_dart/commit/0e0a685c376895425dbddb0f9b83758c700bb0c7))\n\n#### `vertex_ai` - `v0.1.1`\n\n - **FEAT**: Update dependencies (requires Dart 3.6.0) ([#709](https://github.com/davidmigloz/langchain_dart/issues/709)). ([9e3467f7](https://github.com/davidmigloz/langchain_dart/commit/9e3467f7caabe051a43c0eb3c1110bc4a9b77b81))\n - **REFACTOR**: Remove fetch_client dependency in favor of http v1.3.0 ([#659](https://github.com/davidmigloz/langchain_dart/issues/659)). ([0e0a685c](https://github.com/davidmigloz/langchain_dart/commit/0e0a685c376895425dbddb0f9b83758c700bb0c7))\n - **FIX**: Fix linter issues ([#656](https://github.com/davidmigloz/langchain_dart/issues/656)). ([88a79c65](https://github.com/davidmigloz/langchain_dart/commit/88a79c65aad23bcf5859e58a7375a4b686cf02ef))\n\n\n## 2024-12-16\n\n### Changes\n\n---\n\nPackages with breaking changes:\n\n - There are no breaking changes in this release.\n\nPackages with other changes:\n\n - [`anthropic_sdk_dart` - `v0.2.0+1`](#anthropic_sdk_dart---v0201)\n - [`chromadb` - `v0.2.0+2`](#chromadb---v0202)\n - [`googleai_dart` - `v0.1.0+3`](#googleai_dart---v0103)\n - [`langchain` - `v0.7.7+2`](#langchain---v0772)\n - [`langchain_anthropic` - `v0.2.0+1`](#langchain_anthropic---v0201)\n - [`langchain_community` - `v0.3.3`](#langchain_community---v033)\n - [`langchain_core` - `v0.3.6+1`](#langchain_core---v0361)\n - [`langchain_firebase` - `v0.2.1+4`](#langchain_firebase---v0214)\n - [`langchain_google` - `v0.6.4+2`](#langchain_google---v0642)\n - [`langchain_mistralai` - `v0.2.3+2`](#langchain_mistralai---v0232)\n - [`langchain_ollama` - `v0.3.2+2`](#langchain_ollama---v0322)\n - [`langchain_openai` - `v0.7.3`](#langchain_openai---v073)\n - [`mistralai_dart` - `v0.0.3+4`](#mistralai_dart---v0034)\n - [`ollama_dart` - `v0.2.2+1`](#ollama_dart---v0221)\n - [`openai_dart` - `v0.4.5`](#openai_dart---v045)\n - [`openai_realtime_dart` - `v0.0.3+1`](#openai_realtime_dart---v0031)\n - [`tavily_dart` - `v0.1.0+1`](#tavily_dart---v0101)\n - [`vertex_ai` - `v0.1.0+3`](#vertex_ai---v0103)\n - [`langchain_chroma` - `v0.2.1+5`](#langchain_chroma---v0215)\n - [`langchain_pinecone` - `v0.1.0+11`](#langchain_pinecone---v01011)\n - [`langchain_supabase` - `v0.1.1+4`](#langchain_supabase---v0114)\n\nPackages with dependency updates only:\n\n> Packages listed below depend on other packages in this workspace that have had changes. Their versions have been incremented to bump the minimum dependency versions of the packages they depend upon in this project.\n\n - `langchain_chroma` - `v0.2.1+5`\n - `langchain_pinecone` - `v0.1.0+11`\n - `langchain_supabase` - `v0.1.1+4`\n\n---\n\n#### `langchain` - `v0.7.7+2`\n\n - **REFACTOR**: Add new lint rules and fix issues ([#621](https://github.com/davidmigloz/langchain_dart/issues/621)). ([60b10e00](https://github.com/davidmigloz/langchain_dart/commit/60b10e008acf55ebab90789ad08d2449a44b69d8))\n\n#### `langchain_core` - `v0.3.6+1`\n\n - **REFACTOR**: Add new lint rules and fix issues ([#621](https://github.com/davidmigloz/langchain_dart/issues/621)). ([60b10e00](https://github.com/davidmigloz/langchain_dart/commit/60b10e008acf55ebab90789ad08d2449a44b69d8))\n\n#### `langchain_community` - `v0.3.3`\n\n - **FEAT**: Add support for DirectoryLoader ([#620](https://github.com/davidmigloz/langchain_dart/issues/620)). ([4730f2a3](https://github.com/davidmigloz/langchain_dart/commit/4730f2a376b152ea38e5204125209ef01f29cab9))\n - **FEAT**: Expose internal store in ObjectBoxVectorStore ([#611](https://github.com/davidmigloz/langchain_dart/issues/611)). ([c33f2e07](https://github.com/davidmigloz/langchain_dart/commit/c33f2e07c31ddd91dae16856df3b6c8ffddc45e9))\n - **FIX**: Chinese character support on web loader ([#600](https://github.com/davidmigloz/langchain_dart/issues/600)). ([48e64d5b](https://github.com/davidmigloz/langchain_dart/commit/48e64d5b01aa8469dbf7a973350eeac26b43df8f))\n - **REFACTOR**: Add new lint rules and fix issues ([#621](https://github.com/davidmigloz/langchain_dart/issues/621)). ([60b10e00](https://github.com/davidmigloz/langchain_dart/commit/60b10e008acf55ebab90789ad08d2449a44b69d8))\n - **REFACTOR**: Upgrade api clients generator version ([#610](https://github.com/davidmigloz/langchain_dart/issues/610)). ([0c8750e8](https://github.com/davidmigloz/langchain_dart/commit/0c8750e85b34764f99b6e34cc531776ffe8fba7c))\n\n#### `langchain_openai` - `v0.7.3`\n\n - **FEAT**: Add gpt-4o-2024-11-20 to model catalog in openai_dart ([#614](https://github.com/davidmigloz/langchain_dart/issues/614)). ([bf333081](https://github.com/davidmigloz/langchain_dart/commit/bf33308165869792446c3897db95e6ad7a7cb519))\n - **REFACTOR**: Add new lint rules and fix issues ([#621](https://github.com/davidmigloz/langchain_dart/issues/621)). ([60b10e00](https://github.com/davidmigloz/langchain_dart/commit/60b10e008acf55ebab90789ad08d2449a44b69d8))\n\n#### `langchain_anthropic` - `v0.2.0+1`\n\n - **REFACTOR**: Add new lint rules and fix issues ([#621](https://github.com/davidmigloz/langchain_dart/issues/621)). ([60b10e00](https://github.com/davidmigloz/langchain_dart/commit/60b10e008acf55ebab90789ad08d2449a44b69d8))\n\n#### `langchain_firebase` - `v0.2.1+4`\n\n - **REFACTOR**: Add new lint rules and fix issues ([#621](https://github.com/davidmigloz/langchain_dart/issues/621)). ([60b10e00](https://github.com/davidmigloz/langchain_dart/commit/60b10e008acf55ebab90789ad08d2449a44b69d8))\n\n#### `langchain_google` - `v0.6.4+2`\n\n - **REFACTOR**: Add new lint rules and fix issues ([#621](https://github.com/davidmigloz/langchain_dart/issues/621)). ([60b10e00](https://github.com/davidmigloz/langchain_dart/commit/60b10e008acf55ebab90789ad08d2449a44b69d8))\n\n#### `langchain_mistralai` - `v0.2.3+2`\n\n - **REFACTOR**: Add new lint rules and fix issues ([#621](https://github.com/davidmigloz/langchain_dart/issues/621)). ([60b10e00](https://github.com/davidmigloz/langchain_dart/commit/60b10e008acf55ebab90789ad08d2449a44b69d8))\n\n#### `langchain_ollama` - `v0.3.2+2`\n\n - **REFACTOR**: Add new lint rules and fix issues ([#621](https://github.com/davidmigloz/langchain_dart/issues/621)). ([60b10e00](https://github.com/davidmigloz/langchain_dart/commit/60b10e008acf55ebab90789ad08d2449a44b69d8))\n\n#### `openai_dart` - `v0.4.5`\n\n - **FEAT**: Support Predicted Outputs in openai_dart ([#613](https://github.com/davidmigloz/langchain_dart/issues/613)). ([315fe0fd](https://github.com/davidmigloz/langchain_dart/commit/315fe0fd3227e2c5a1a874be7fd01e25dcd7b33c))\n - **FEAT**: Support streaming audio responses in chat completions in openai_dart ([#615](https://github.com/davidmigloz/langchain_dart/issues/615)). ([6da756a8](https://github.com/davidmigloz/langchain_dart/commit/6da756a87be35a34048c6671f7629b553bf0699e))\n - **FEAT**: Add gpt-4o-2024-11-20 to model catalog in openai_dart ([#614](https://github.com/davidmigloz/langchain_dart/issues/614)). ([bf333081](https://github.com/davidmigloz/langchain_dart/commit/bf33308165869792446c3897db95e6ad7a7cb519))\n - **FIX**: Default store field to null in openai_dart to support Azure and Groq APIs ([#608](https://github.com/davidmigloz/langchain_dart/issues/608)). ([21332960](https://github.com/davidmigloz/langchain_dart/commit/21332960c2c9928873b5b2948b86af31245f9312))\n - **FIX**: Make first_id and last_id nullable in list endpoints in openai_dart ([#607](https://github.com/davidmigloz/langchain_dart/issues/607)). ([7cfc4ddf](https://github.com/davidmigloz/langchain_dart/commit/7cfc4ddf469846624d3dd6f3f86cab54c5333395))\n - **DOCS**: Update OpenAI endpoints descriptions ([#612](https://github.com/davidmigloz/langchain_dart/issues/612)). ([10c66888](https://github.com/davidmigloz/langchain_dart/commit/10c6688884f8bc42ddaa771996030a42125333de))\n - **REFACTOR**: Add new lint rules and fix issues ([#621](https://github.com/davidmigloz/langchain_dart/issues/621)). ([60b10e00](https://github.com/davidmigloz/langchain_dart/commit/60b10e008acf55ebab90789ad08d2449a44b69d8))\n - **REFACTOR**: Upgrade api clients generator version ([#610](https://github.com/davidmigloz/langchain_dart/issues/610)). ([0c8750e8](https://github.com/davidmigloz/langchain_dart/commit/0c8750e85b34764f99b6e34cc531776ffe8fba7c))\n\n#### `openai_realtime_dart` - `v0.0.3+1`\n\n - **REFACTOR**: Add new lint rules and fix issues ([#621](https://github.com/davidmigloz/langchain_dart/issues/621)). ([60b10e00](https://github.com/davidmigloz/langchain_dart/commit/60b10e008acf55ebab90789ad08d2449a44b69d8))\n - **REFACTOR**: Upgrade api clients generator version ([#610](https://github.com/davidmigloz/langchain_dart/issues/610)). ([0c8750e8](https://github.com/davidmigloz/langchain_dart/commit/0c8750e85b34764f99b6e34cc531776ffe8fba7c))\n - **DOCS**: Update openai_spec_official.yaml. ([ee2eb35b](https://github.com/davidmigloz/langchain_dart/commit/ee2eb35b983afdb504ae52d3dfca5fea11a1dadb))\n - **DOCS**: Update README.md. ([44291a06](https://github.com/davidmigloz/langchain_dart/commit/44291a06af7ae26f0a5beadfec23f2128f5e2415))\n\n#### `anthropic_sdk_dart` - `v0.2.0+1`\n\n - **REFACTOR**: Add new lint rules and fix issues ([#621](https://github.com/davidmigloz/langchain_dart/issues/621)). ([60b10e00](https://github.com/davidmigloz/langchain_dart/commit/60b10e008acf55ebab90789ad08d2449a44b69d8))\n - **REFACTOR**: Upgrade api clients generator version ([#610](https://github.com/davidmigloz/langchain_dart/issues/610)). ([0c8750e8](https://github.com/davidmigloz/langchain_dart/commit/0c8750e85b34764f99b6e34cc531776ffe8fba7c))\n\n#### `chromadb` - `v0.2.0+2`\n\n - **REFACTOR**: Add new lint rules and fix issues ([#621](https://github.com/davidmigloz/langchain_dart/issues/621)). ([60b10e00](https://github.com/davidmigloz/langchain_dart/commit/60b10e008acf55ebab90789ad08d2449a44b69d8))\n - **REFACTOR**: Upgrade api clients generator version ([#610](https://github.com/davidmigloz/langchain_dart/issues/610)). ([0c8750e8](https://github.com/davidmigloz/langchain_dart/commit/0c8750e85b34764f99b6e34cc531776ffe8fba7c))\n\n#### `googleai_dart` - `v0.1.0+3`\n\n - **REFACTOR**: Upgrade api clients generator version ([#610](https://github.com/davidmigloz/langchain_dart/issues/610)). ([0c8750e8](https://github.com/davidmigloz/langchain_dart/commit/0c8750e85b34764f99b6e34cc531776ffe8fba7c))\n\n#### `mistralai_dart` - `v0.0.3+4`\n\n - **REFACTOR**: Add new lint rules and fix issues ([#621](https://github.com/davidmigloz/langchain_dart/issues/621)). ([60b10e00](https://github.com/davidmigloz/langchain_dart/commit/60b10e008acf55ebab90789ad08d2449a44b69d8))\n - **REFACTOR**: Upgrade api clients generator version ([#610](https://github.com/davidmigloz/langchain_dart/issues/610)). ([0c8750e8](https://github.com/davidmigloz/langchain_dart/commit/0c8750e85b34764f99b6e34cc531776ffe8fba7c))\n\n#### `ollama_dart` - `v0.2.2+1`\n\n - **REFACTOR**: Add new lint rules and fix issues ([#621](https://github.com/davidmigloz/langchain_dart/issues/621)). ([60b10e00](https://github.com/davidmigloz/langchain_dart/commit/60b10e008acf55ebab90789ad08d2449a44b69d8))\n - **REFACTOR**: Upgrade api clients generator version ([#610](https://github.com/davidmigloz/langchain_dart/issues/610)). ([0c8750e8](https://github.com/davidmigloz/langchain_dart/commit/0c8750e85b34764f99b6e34cc531776ffe8fba7c))\n\n#### `tavily_dart` - `v0.1.0+1`\n\n - **REFACTOR**: Upgrade api clients generator version ([#610](https://github.com/davidmigloz/langchain_dart/issues/610)). ([0c8750e8](https://github.com/davidmigloz/langchain_dart/commit/0c8750e85b34764f99b6e34cc531776ffe8fba7c))\n\n#### `vertex_ai` - `v0.1.0+3`\n\n - **REFACTOR**: Add new lint rules and fix issues ([#621](https://github.com/davidmigloz/langchain_dart/issues/621)). ([60b10e00](https://github.com/davidmigloz/langchain_dart/commit/60b10e008acf55ebab90789ad08d2449a44b69d8))\n\n\n## 2024-10-31\n\n### Changes\n\n---\n\nPackages with breaking changes:\n\n - There are no breaking changes in this release.\n\nPackages with other changes:\n\n - [`openai_dart` - `v0.4.4`](#openai_dart---v044)\n - [`openai_realtime_dart` - `v0.0.3`](#openai_realtime_dart---v003)\n - [`langchain_openai` - `v0.7.2+5`](#langchain_openai---v0725)\n\nPackages with dependency updates only:\n\n> Packages listed below depend on other packages in this workspace that have had changes. Their versions have been incremented to bump the minimum dependency versions of the packages they depend upon in this project.\n\n - `langchain_openai` - `v0.7.2+5`\n\n---\n\n#### `openai_dart` - `v0.4.4`\n\n - **FEAT**: Add five new voice types to Chat Completions API in openai_dart ([#594](https://github.com/davidmigloz/langchain_dart/issues/594)). ([543f2977](https://github.com/davidmigloz/langchain_dart/commit/543f2977ea1e6dd6e49fa4a2ae9a084ae525003e))\n\n#### `openai_realtime_dart` - `v0.0.3`\n\n - **FEAT**: Add five new voice types in openai_realtime_dart and minor improvements ([#593](https://github.com/davidmigloz/langchain_dart/issues/593)). ([6d0c8d3f](https://github.com/davidmigloz/langchain_dart/commit/6d0c8d3fceaab9d3eac7c5265b1e8b50deef9cc4))\n - **DOCS**: Update openai_realtime_dart README.md. ([7e9e1393](https://github.com/davidmigloz/langchain_dart/commit/7e9e139315b31308817fc71439feceb705d06ec2))\n\n\n## 2024-10-29\n\n### Changes\n\n---\n\nPackages with breaking changes:\n\n - [`anthropic_sdk_dart` - `v0.2.0`](#anthropic_sdk_dart---v020)\n - [`langchain_anthropic` - `v0.2.0`](#langchain_anthropic---v020)\n\nPackages with other changes:\n\n - [`langchain_community` - `v0.3.2+2`](#langchain_community---v0322)\n - [`openai_dart` - `v0.4.3`](#openai_dart---v043)\n - [`openai_realtime_dart` - `v0.0.2`](#openai_realtime_dart---v002)\n - [`langchain_openai` - `v0.7.2+4`](#langchain_openai---v0724)\n\nPackages with dependency updates only:\n\n> Packages listed below depend on other packages in this workspace that have had changes. Their versions have been incremented to bump the minimum dependency versions of the packages they depend upon in this project.\n\n - `langchain_openai` - `v0.7.2+4`\n\n---\n\n#### `langchain_anthropic` - `v0.2.0`\n\n - **FEAT**: Update ChatAnthropic default model to claude-3-5-sonnet-20241022 ([#584](https://github.com/davidmigloz/langchain_dart/issues/584)). ([4f0d9cfb](https://github.com/davidmigloz/langchain_dart/commit/4f0d9cfb0a71c567d1b37842cd44dac1f7308001))\n\n#### `langchain_community` - `v0.3.2+2`\n\n - **FIX**: Update ObjectBox SDK to v4.0.3 to fix StorageException in iOS ([#581](https://github.com/davidmigloz/langchain_dart/issues/581)). ([943811a5](https://github.com/davidmigloz/langchain_dart/commit/943811a5d5ab1c7ef3e83db0c45082a0d4d1fc4a))\n\n#### `anthropic_sdk_dart` - `v0.2.0`\n\n- **FEAT**: Add support for Message Batches in anthropic_sdk_dart ([#585](https://github.com/davidmigloz/langchain_dart/issues/585)). ([a41270a0](https://github.com/davidmigloz/langchain_dart/commit/a41270a06135112afce0fa4da985c92e2282ba08))\n- **FEAT**: Add claude-3-5-sonnet-20241022 to model catalog in anthropic_sdk_dart ([#583](https://github.com/davidmigloz/langchain_dart/issues/583)). ([0cc59e13](https://github.com/davidmigloz/langchain_dart/commit/0cc59e137b69b19c31eeefdad28e5cf757abe8d3))\n- **BREAKING** **FEAT**: Add support for prompt caching in anthropic_sdk_dart ([#587](https://github.com/davidmigloz/langchain_dart/issues/587)). ([79dabaa5](https://github.com/davidmigloz/langchain_dart/commit/79dabaa509fd37188999a2ee7282b8b334cce322))\n- **BREAKING** **FEAT**: Add computer use support in anthropic_sdk_dart ([#586](https://github.com/davidmigloz/langchain_dart/issues/586)). ([36c4a3e3](https://github.com/davidmigloz/langchain_dart/commit/36c4a3e39728398e885fe229c60aed33e645fa9a))\n- **DOCS**: Update anthropic_sdk_dart readme. ([78b7bccf](https://github.com/davidmigloz/langchain_dart/commit/78b7bccf277b147a230f9ec5eea61965baab0323))\n- \n#### `openai_dart` - `v0.4.3`\n\n - **FEAT**: Add support for audio in chat completions in openai_dart ([#577](https://github.com/davidmigloz/langchain_dart/issues/577)). ([0fb058cd](https://github.com/davidmigloz/langchain_dart/commit/0fb058cd9215c83b0ec5a10c84b125bb44845bf5))\n - **FEAT**: Add support for storing outputs for model distillation and metadata in openai_dart ([#578](https://github.com/davidmigloz/langchain_dart/issues/578)). ([c9b8bdf4](https://github.com/davidmigloz/langchain_dart/commit/c9b8bdf425b809a5b94a314173b57a43cc3fbc88))\n - **FEAT**: Support multi-modal moderations in openai_dart ([#576](https://github.com/davidmigloz/langchain_dart/issues/576)). ([45b9f423](https://github.com/davidmigloz/langchain_dart/commit/45b9f423a0aef2a1f8cad4ddac73a6a7d8cd89d2))\n - **FIX**: submitThreadToolOutputsToRunStream not returning any events ([#574](https://github.com/davidmigloz/langchain_dart/issues/574)). ([00803ac7](https://github.com/davidmigloz/langchain_dart/commit/00803ac7aedabcbca4c75e3918a4cb441f9e7b84))\n - **DOCS**: Add xAI to list of OpenAI-compatible APIs in openai_dart ([#582](https://github.com/davidmigloz/langchain_dart/issues/582)). ([017cb74f](https://github.com/davidmigloz/langchain_dart/commit/017cb74fc0ca3510d07f9f02c1efade8d37aecac))\n - **DOCS**: Fix openai_dart assistants API outdated documentation ([#579](https://github.com/davidmigloz/langchain_dart/issues/579)). ([624c4128](https://github.com/davidmigloz/langchain_dart/commit/624c41287a65904db5c91d19c4305bf377b6b339))\n\n#### `openai_realtime_dart` - `v0.0.2`\n\n- **FEAT**: Make openai_realtime_dart client to strong-typed ([#590](https://github.com/davidmigloz/langchain_dart/issues/590)). ([d84e88bf](https://github.com/davidmigloz/langchain_dart/commit/d84e88bf04956df2ab6a4606aa812ae4f3b6cd31))\n\n## 2024-10-14\n\n### Changes\n\n---\n\nPackages with breaking changes:\n\n - There are no breaking changes in this release.\n\nPackages with other changes:\n\n - [`openai_dart` - `v0.4.2+2`](#openai_dart---v0422)\n - [`openai_realtime_dart` - `v0.0.1+2`](#openai_realtime_dart---v0012)\n - [`langchain_openai` - `v0.7.2+3`](#langchain_openai---v0723)\n\nPackages with dependency updates only:\n\n> Packages listed below depend on other packages in this workspace that have had changes. Their versions have been incremented to bump the minimum dependency versions of the packages they depend upon in this project.\n\n - `langchain_openai` - `v0.7.2+3`\n\n---\n\n#### `openai_realtime_dart` - `v0.0.1+2`\n\n - **FIX**: Tool calling not working in openai_realtime_dart ([#572](https://github.com/davidmigloz/langchain_dart/issues/572)). ([f6b14919](https://github.com/davidmigloz/langchain_dart/commit/f6b14919463353a3d1a8eb2f1b8eb83c45340fa1))\n\n#### `openai_dart` - `v0.4.2+2`\n\n - **DOCS**: Fix typo in openai_dart. ([e7ddd558](https://github.com/davidmigloz/langchain_dart/commit/e7ddd558da643e3cc59581b8b0e69473c7cb9779))\n\n\n## 2024-10-09\n\n### Changes\n\n---\n\nPackages with breaking changes:\n\n - There are no breaking changes in this release.\n\nPackages with other changes:\n\n - [`langchain` - `v0.7.7+1`](#langchain---v0771)\n - [`langchain_chroma` - `v0.2.1+4`](#langchain_chroma---v0214)\n - [`langchain_community` - `v0.3.2+1`](#langchain_community---v0321)\n - [`langchain_firebase` - `v0.2.1+3`](#langchain_firebase---v0213)\n - [`langchain_google` - `v0.6.4+1`](#langchain_google---v0641)\n - [`langchain_ollama` - `v0.3.2+1`](#langchain_ollama---v0321)\n - [`langchain_openai` - `v0.7.2+2`](#langchain_openai---v0722)\n - [`langchain_pinecone` - `v0.1.0+10`](#langchain_pinecone---v01010)\n - [`openai_realtime_dart` - `v0.0.1+1`](#openai_realtime_dart---v0011)\n\n---\n\n#### `langchain` - `v0.7.7+1`\n\n - **FIX**: UUID 'Namespace' can't be assigned to the parameter type 'String?' ([#566](https://github.com/davidmigloz/langchain_dart/issues/566)). ([1e93a595](https://github.com/davidmigloz/langchain_dart/commit/1e93a595f2f166da2cae3f7cfcdbb28892abf9b5))\n\n#### `langchain_chroma` - `v0.2.1+4`\n\n - **FIX**: UUID 'Namespace' can't be assigned to the parameter type 'String?' ([#566](https://github.com/davidmigloz/langchain_dart/issues/566)). ([1e93a595](https://github.com/davidmigloz/langchain_dart/commit/1e93a595f2f166da2cae3f7cfcdbb28892abf9b5))\n\n#### `langchain_community` - `v0.3.2+1`\n\n - **FIX**: UUID 'Namespace' can't be assigned to the parameter type 'String?' ([#566](https://github.com/davidmigloz/langchain_dart/issues/566)). ([1e93a595](https://github.com/davidmigloz/langchain_dart/commit/1e93a595f2f166da2cae3f7cfcdbb28892abf9b5))\n\n#### `langchain_firebase` - `v0.2.1+3`\n\n - **FIX**: UUID 'Namespace' can't be assigned to the parameter type 'String?' ([#566](https://github.com/davidmigloz/langchain_dart/issues/566)). ([1e93a595](https://github.com/davidmigloz/langchain_dart/commit/1e93a595f2f166da2cae3f7cfcdbb28892abf9b5))\n\n#### `langchain_google` - `v0.6.4+1`\n\n - **FIX**: UUID 'Namespace' can't be assigned to the parameter type 'String?' ([#566](https://github.com/davidmigloz/langchain_dart/issues/566)). ([1e93a595](https://github.com/davidmigloz/langchain_dart/commit/1e93a595f2f166da2cae3f7cfcdbb28892abf9b5))\n\n#### `langchain_ollama` - `v0.3.2+1`\n\n - **FIX**: UUID 'Namespace' can't be assigned to the parameter type 'String?' ([#566](https://github.com/davidmigloz/langchain_dart/issues/566)). ([1e93a595](https://github.com/davidmigloz/langchain_dart/commit/1e93a595f2f166da2cae3f7cfcdbb28892abf9b5))\n\n#### `langchain_openai` - `v0.7.2+2`\n\n - **FIX**: UUID 'Namespace' can't be assigned to the parameter type 'String?' ([#566](https://github.com/davidmigloz/langchain_dart/issues/566)). ([1e93a595](https://github.com/davidmigloz/langchain_dart/commit/1e93a595f2f166da2cae3f7cfcdbb28892abf9b5))\n\n#### `langchain_pinecone` - `v0.1.0+10`\n\n - **FIX**: UUID 'Namespace' can't be assigned to the parameter type 'String?' ([#566](https://github.com/davidmigloz/langchain_dart/issues/566)). ([1e93a595](https://github.com/davidmigloz/langchain_dart/commit/1e93a595f2f166da2cae3f7cfcdbb28892abf9b5))\n\n#### `openai_realtime_dart` - `v0.0.1+1`\n\n - **DOCS**: Add note about the openai_dart client. ([26de8d97](https://github.com/davidmigloz/langchain_dart/commit/26de8d974fcd27fec857b93cc309e76860777dea))\n\n\n## 2024-10-08\n\n### Changes\n\n---\n\nPackages with breaking changes:\n\n - There are no breaking changes in this release.\n\nNew packages:\n\n - [`openai_realtime_dart` - `v0.0.1`](#openai_realtime_dart---v001)\n\nPackages with other changes:\n\n - [`langchain` - `v0.7.7`](#langchain---v077)\n - [`langchain_google` - `v0.6.4`](#langchain_google---v064)\n - [`openai_dart` - `v0.4.2+1`](#openai_dart---v0421)\n\n---\n\n#### `langchain` - `v0.7.7`\n\n - **REFACTOR**: Update deprecated UUID constant ([#558](https://github.com/davidmigloz/langchain_dart/issues/558)). ([8d9f14b4](https://github.com/davidmigloz/langchain_dart/commit/8d9f14b4c394f4652727eadf5849355cd9fa2f19))\n\n#### `langchain_google` - `v0.6.4`\n\n - **FEAT**: Add support for code execution in ChatGoogleGenerativeAI ([#564](https://github.com/davidmigloz/langchain_dart/issues/564)). ([020bc096](https://github.com/davidmigloz/langchain_dart/commit/020bc096e2bb83bd372d0568a111481df188a7f2))\n\n#### `openai_realtime_dart` - `v0.0.1`\n\n - **FEAT**: Implement openai_realtime_dart, a Dart client for OpenAI Realtime API ([#562](https://github.com/davidmigloz/langchain_dart/issues/562)). ([9f7406f7](https://github.com/davidmigloz/langchain_dart/commit/9f7406f7014624bf5086c60fb902bff70224ee15))\n\n#### `openai_dart` - `v0.4.2+1`\n\n - **DOCS**: Add note about the new [openai_realtime_dart](https://pub.dev/packages/openai_realtime_dart) client. ([44672f0a](https://github.com/davidmigloz/langchain_dart/commit/44672f0a453a1b2e1b31bc5ef400f4c8ac7a4e76))\n\n## 2024-09-25\n\n### Changes\n\n---\n\nPackages with breaking changes:\n\n- There are no breaking changes in this release.\n\nPackages with other changes:\n\n- [`langchain` - `v0.7.6`](#langchain---v076)\n- [`langchain_core` - `v0.3.6`](#langchain_core---v036)\n- [`langchain_community` - `v0.3.2`](#langchain_community---v032)\n- [`langchain_firebase` - `v0.2.1+2`](#langchain_firebase---v0212)\n- [`langchain_google` - `v0.6.3+1`](#langchain_google---v0631)\n- [`langchain_ollama` - `v0.3.2`](#langchain_ollama---v032)\n- [`langchain_openai` - `v0.7.2`](#langchain_openai---v072)\n- [`ollama_dart` - `v0.2.2`](#ollama_dart---v022)\n- [`openai_dart` - `v0.4.2`](#openai_dart---v042)\n- [`langchain_supabase` - `v0.1.1+3`](#langchain_supabase---v0113)\n- [`langchain_pinecone` - `v0.1.0+9`](#langchain_pinecone---v0109)\n- [`langchain_anthropic` - `v0.1.1+2`](#langchain_anthropic---v0112)\n- [`langchain_chroma` - `v0.2.1+3`](#langchain_chroma---v0213)\n- [`langchain_mistralai` - `v0.2.3+1`](#langchain_mistralai---v0231)\n\nPackages with dependency updates only:\n\n> Packages listed below depend on other packages in this workspace that have had changes. Their versions have been incremented to bump the minimum dependency versions of the packages they depend upon in this project.\n\n- `langchain_supabase` - `v0.1.1+3`\n- `langchain_pinecone` - `v0.1.0+9`\n- `langchain_anthropic` - `v0.1.1+2`\n- `langchain_chroma` - `v0.2.1+3`\n- `langchain_mistralai` - `v0.2.3+1`\n- `vertex_ai` - `v0.1.0+2`\n\n---\n\n#### `langchain` - `v0.7.6`\n\n- **FEAT**: Add retry support for Runnables ([#540](https://github.com/davidmigloz/langchain_dart/issues/540)). ([1099725d](https://github.com/davidmigloz/langchain_dart/commit/1099725d88de4103381edad533209a9a098bdb7f))\n\n#### `langchain_core` - `v0.3.6`\n\n- **FEAT**: Add retry support for Runnables ([#540](https://github.com/davidmigloz/langchain_dart/issues/540)). ([1099725d](https://github.com/davidmigloz/langchain_dart/commit/1099725d88de4103381edad533209a9a098bdb7f))\n\n#### `langchain_community` - `v0.3.2`\n\n- **FEAT**: Add support for deleteWhere in ObjectBoxVectorStore ([#552](https://github.com/davidmigloz/langchain_dart/issues/552)). ([90918bba](https://github.com/davidmigloz/langchain_dart/commit/90918bbac411ccfe4823ae195de6a50a46575573))\n- **REFACTOR**: Add stubs for ObjectBox on web platform ([#553](https://github.com/davidmigloz/langchain_dart/issues/553)). ([41caed92](https://github.com/davidmigloz/langchain_dart/commit/41caed924bf24382567758be4590d5ddff31e839))\n\n#### `langchain_firebase` - `v0.2.1+2`\n\n- **DOCS**: Update Google's models in documentation ([#551](https://github.com/davidmigloz/langchain_dart/issues/551)). ([1da543f7](https://github.com/davidmigloz/langchain_dart/commit/1da543f7ab90eb39b599a6fdd0cc52e2cbc1460d))\n\n#### `langchain_google` - `v0.6.3+1`\n\n- **FEAT**: Add support for reduced output dimensionality in GoogleGenerativeAIEmbeddings ([#544](https://github.com/davidmigloz/langchain_dart/issues/544)). ([d5880704](https://github.com/davidmigloz/langchain_dart/commit/d5880704c492889144738acffd49674b91e63981))\n- **DOCS**: Update Google's models in documentation ([#551](https://github.com/davidmigloz/langchain_dart/issues/551)). ([1da543f7](https://github.com/davidmigloz/langchain_dart/commit/1da543f7ab90eb39b599a6fdd0cc52e2cbc1460d))\n\n#### `langchain_ollama` - `v0.3.2`\n\n- **FEAT**: Update Ollama default model to llama-3.2 ([#554](https://github.com/davidmigloz/langchain_dart/issues/554)). ([f42ed0f0](https://github.com/davidmigloz/langchain_dart/commit/f42ed0f04136021b30556787cfdea13a14ca5768))\n\n#### `langchain_openai` - `v0.7.2`\n\n- **FEAT**: Add OpenAI o1-preview and o1-mini to model catalog ([#555](https://github.com/davidmigloz/langchain_dart/issues/555)). ([9ceb5ff9](https://github.com/davidmigloz/langchain_dart/commit/9ceb5ff9029cf1ae1967a32189f88c7a8215248e))\n- **REFACTOR**: Migrate ChatOpenAI to maxCompletionTokens ([#557](https://github.com/davidmigloz/langchain_dart/issues/557)). ([08057a5b](https://github.com/davidmigloz/langchain_dart/commit/08057a5b6e08ee2633c6be6144be1619e902bbc5))\n\n#### `ollama_dart` - `v0.2.2`\n\n- **FEAT**: Update Ollama default model to llama-3.2 ([#554](https://github.com/davidmigloz/langchain_dart/issues/554)). ([f42ed0f0](https://github.com/davidmigloz/langchain_dart/commit/f42ed0f04136021b30556787cfdea13a14ca5768))\n\n#### `openai_dart` - `v0.4.2`\n\n- **FEAT**: Add OpenAI o1-preview and o1-mini to model catalog ([#555](https://github.com/davidmigloz/langchain_dart/issues/555)). ([9ceb5ff9](https://github.com/davidmigloz/langchain_dart/commit/9ceb5ff9029cf1ae1967a32189f88c7a8215248e))\n- **FEAT**: Add support for maxCompletionTokens and reasoningTokens in openai_dart ([#556](https://github.com/davidmigloz/langchain_dart/issues/556)). ([37d75b61](https://github.com/davidmigloz/langchain_dart/commit/37d75b612b0f42bbf8d092bdd81c554278716582))\n- **FEAT**: Option to include file search results in assistants API ([#543](https://github.com/davidmigloz/langchain_dart/issues/543)). ([e916ad3c](https://github.com/davidmigloz/langchain_dart/commit/e916ad3c0c4e322319cedac8b06b5908f1c31935))\n\n\n## 2024-08-22\n\n### Changes\n\n---\n\nPackages with breaking changes:\n\n - There are no breaking changes in this release.\n\nPackages with other changes:\n\n - [`langchain` - `v0.7.5`](#langchain---v075)\n - [`langchain_core` - `v0.3.5`](#langchain_core---v035)\n - [`langchain_community` - `v0.3.1`](#langchain_community---v031)\n - [`langchain_openai` - `v0.7.1`](#langchain_openai---v071)\n - [`langchain_ollama` - `v0.3.1`](#langchain_ollama---v031)\n - [`langchain_google` - `v0.6.2`](#langchain_google---v062)\n - [`langchain_mistralai` - `v0.2.3`](#langchain_mistralai---v023)\n - [`ollama_dart` - `v0.2.1`](#ollama_dart---v021)\n - [`openai_dart` - `v0.4.1`](#openai_dart---v041)\n - [`langchain_firebase` - `v0.2.1+1`](#langchain_firebase---v0211)\n - [`langchain_supabase` - `v0.1.1+2`](#langchain_supabase---v0112)\n - [`langchain_pinecone` - `v0.1.0+8`](#langchain_pinecone---v0108)\n - [`langchain_anthropic` - `v0.1.1+1`](#langchain_anthropic---v0111)\n - [`langchain_chroma` - `v0.2.1+2`](#langchain_chroma---v0212)\n\nPackages with dependency updates only:\n\n> Packages listed below depend on other packages in this workspace that have had changes. Their versions have been incremented to bump the minimum dependency versions of the packages they depend upon in this project.\n\n - `langchain_firebase` - `v0.2.1+1`\n - `langchain_supabase` - `v0.1.1+2`\n - `langchain_pinecone` - `v0.1.0+8`\n - `langchain_anthropic` - `v0.1.1+1`\n - `langchain_chroma` - `v0.2.1+2`\n\n---\n\n#### `langchain` - `v0.7.5`\n\n - **FEAT**: Add ToolsAgent for models with tool-calling support ([#530](https://github.com/davidmigloz/langchain_dart/issues/530)). ([f3ee5b44](https://github.com/davidmigloz/langchain_dart/commit/f3ee5b44c4ffa378343ec4ee1e08d8e594a6cb36))\n - **FEAT**: Deprecate OpenAIToolsAgent in favour of ToolsAgent ([#532](https://github.com/davidmigloz/langchain_dart/issues/532)). ([68d8011a](https://github.com/davidmigloz/langchain_dart/commit/68d8011a9aa09368875ba0434839d12623ba2bab))\n - **DOCS**: Add Code Assist AI in README and documentation ([#538](https://github.com/davidmigloz/langchain_dart/issues/538)). ([e752464c](https://github.com/davidmigloz/langchain_dart/commit/e752464c0d2fc7e0ccc878933b0ef934c9527567))\n\n#### `langchain_core` - `v0.3.5`\n\n - **FEAT**: Add copyWith method to all RunnableOptions subclasses ([#531](https://github.com/davidmigloz/langchain_dart/issues/531)). ([42c8d480](https://github.com/davidmigloz/langchain_dart/commit/42c8d480041e7ca331e4928c46536037c06dbff0))\n - **FEAT**: Support OpenAI's strict mode for tool calling in ChatOpenAI ([#536](https://github.com/davidmigloz/langchain_dart/issues/536)). ([71623f49](https://github.com/davidmigloz/langchain_dart/commit/71623f490289e63252165167305e00038d800be1))\n - **FEAT**: Deprecate OpenAIToolsAgent in favour of ToolsAgent ([#532](https://github.com/davidmigloz/langchain_dart/issues/532)). ([68d8011a](https://github.com/davidmigloz/langchain_dart/commit/68d8011a9aa09368875ba0434839d12623ba2bab))\n\n#### `langchain_community` - `v0.3.1`\n\n - **FEAT**: Deprecate OpenAIToolsAgent in favour of ToolsAgent ([#532](https://github.com/davidmigloz/langchain_dart/issues/532)). ([68d8011a](https://github.com/davidmigloz/langchain_dart/commit/68d8011a9aa09368875ba0434839d12623ba2bab))\n\n#### `langchain_openai` - `v0.7.1`\n\n - **FEAT**: Add support for Structured Outputs in ChatOpenAI ([#526](https://github.com/davidmigloz/langchain_dart/issues/526)). ([c5387b5d](https://github.com/davidmigloz/langchain_dart/commit/c5387b5dd87fe2aac511c4eca2d4a497065db61f))\n - **FEAT**: Handle refusal in OpenAI's Structured Outputs API ([#533](https://github.com/davidmigloz/langchain_dart/issues/533)). ([f4c4ed99](https://github.com/davidmigloz/langchain_dart/commit/f4c4ed9902177560f13fa9f44b07f0a49c3fdf0a))\n - **FEAT**: Include logprobs in result metadata from ChatOpenAI ([#535](https://github.com/davidmigloz/langchain_dart/issues/535)). ([1834b3ad](https://github.com/davidmigloz/langchain_dart/commit/1834b3adb210b7d190a7e0574a304f069813486b))\n - **FEAT**: Add chatgpt-4o-latest to model catalog ([#527](https://github.com/davidmigloz/langchain_dart/issues/527)). ([ec82c760](https://github.com/davidmigloz/langchain_dart/commit/ec82c760582eed123d6e5d3287c24f82ac251df7))\n - **FEAT**: Add gpt-4o-2024-08-06 to model catalog ([#522](https://github.com/davidmigloz/langchain_dart/issues/522)). ([563200e0](https://github.com/davidmigloz/langchain_dart/commit/563200e0bb9d021d9cb3e46e7a77d96cf3860b1c))\n - **FEAT**: Deprecate OpenAIToolsAgent in favour of ToolsAgent ([#532](https://github.com/davidmigloz/langchain_dart/issues/532)). ([68d8011a](https://github.com/davidmigloz/langchain_dart/commit/68d8011a9aa09368875ba0434839d12623ba2bab))\n - **REFACTOR**: Don't send OpenAI-Beta header in ChatOpenAI ([#511](https://github.com/davidmigloz/langchain_dart/issues/511)). ([0e532bab](https://github.com/davidmigloz/langchain_dart/commit/0e532bab84483bf9d77a0d745f1a591eea2ff7c8))\n\n#### `langchain_ollama` - `v0.3.1`\n\n - **FEAT**: Add support for min_p in Ollama ([#512](https://github.com/davidmigloz/langchain_dart/issues/512)). ([e40d54b2](https://github.com/davidmigloz/langchain_dart/commit/e40d54b2e729d8fb6bf14bb4ea97820121bc85c7))\n - **FEAT**: Add copyWith method to all RunnableOptions subclasses ([#531](https://github.com/davidmigloz/langchain_dart/issues/531)). ([42c8d480](https://github.com/davidmigloz/langchain_dart/commit/42c8d480041e7ca331e4928c46536037c06dbff0))\n\n#### `langchain_google` - `v0.6.2`\n\n - **FEAT**: Add copyWith method to all RunnableOptions subclasses ([#531](https://github.com/davidmigloz/langchain_dart/issues/531)). ([42c8d480](https://github.com/davidmigloz/langchain_dart/commit/42c8d480041e7ca331e4928c46536037c06dbff0))\n\n#### `langchain_mistralai` - `v0.2.3`\n\n - **FEAT**: Add copyWith method to all RunnableOptions subclasses ([#531](https://github.com/davidmigloz/langchain_dart/issues/531)). ([42c8d480](https://github.com/davidmigloz/langchain_dart/commit/42c8d480041e7ca331e4928c46536037c06dbff0))\n\n#### `openai_dart` - `v0.4.1`\n\n - **FEAT**: Add support for Structured Outputs ([#525](https://github.com/davidmigloz/langchain_dart/issues/525)). ([c7574077](https://github.com/davidmigloz/langchain_dart/commit/c7574077195acfc96e9ca9d526cc050788c23c1d))\n - **FEAT**: Add log probabilities for refusal tokens ([#534](https://github.com/davidmigloz/langchain_dart/issues/534)). ([8470a24c](https://github.com/davidmigloz/langchain_dart/commit/8470a24cc42042e20ffffa4b67bc831e03efbc6c))\n - **FEAT**: Add gpt-4o-2024-08-06 to model catalog ([#522](https://github.com/davidmigloz/langchain_dart/issues/522)). ([563200e0](https://github.com/davidmigloz/langchain_dart/commit/563200e0bb9d021d9cb3e46e7a77d96cf3860b1c))\n - **FEAT**: Add chatgpt-4o-latest to model catalog ([#527](https://github.com/davidmigloz/langchain_dart/issues/527)). ([ec82c760](https://github.com/davidmigloz/langchain_dart/commit/ec82c760582eed123d6e5d3287c24f82ac251df7))\n\n#### `ollama_dart` - `v0.2.1`\n\n - **FEAT**: Add support for min_p in Ollama ([#512](https://github.com/davidmigloz/langchain_dart/issues/512)). ([e40d54b2](https://github.com/davidmigloz/langchain_dart/commit/e40d54b2e729d8fb6bf14bb4ea97820121bc85c7))\n\n\n## 2024-07-26\n\n### Changes\n\n---\n\nPackages with breaking changes:\n\n - [`langchain_community` - `v0.3.0`](#langchain_community---v030)\n - [`langchain_ollama` - `v0.3.0`](#langchain_ollama---v030)\n - [`langchain_openai` - `v0.7.0`](#langchain_openai---v070)\n - [`ollama_dart` - `v0.2.0`](#ollama_dart---v020)\n - [`openai_dart` - `v0.4.0`](#openai_dart---v040)\n\nPackages with other changes:\n\n - [`langchain` - `v0.7.4`](#langchain---v074)\n - [`langchain_anthropic` - `v0.1.1`](#langchain_anthropic---v011)\n - [`langchain_chroma` - `v0.2.1+1`](#langchain_chroma---v0211)\n - [`langchain_core` - `v0.3.4`](#langchain_core---v034)\n - [`langchain_firebase` - `v0.2.1`](#langchain_firebase---v021)\n - [`langchain_google` - `v0.6.1`](#langchain_google---v061)\n - [`langchain_mistralai` - `v0.2.2`](#langchain_mistralai---v022)\n - [`langchain_pinecone` - `v0.1.0+7`](#langchain_pinecone---v0107)\n - [`langchain_supabase` - `v0.1.1+1`](#langchain_supabase---v0111)\n\n---\n\n#### `langchain` - `v0.7.4`\n\n - **FEAT**: Add Fallback support for Runnables ([#501](https://github.com/davidmigloz/langchain_dart/issues/501)). ([5887858d](https://github.com/davidmigloz/langchain_dart/commit/5887858d667d43c49978291ea98a92cab0069971))\n - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43))\n - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560))\n - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d))\n - **DOCS**: Update README.md with Ollama tool call support. ([e016b0bd](https://github.com/davidmigloz/langchain_dart/commit/e016b0bd02065971faab2a3a48be625ff33a08cf))\n\n#### `langchain_core` - `v0.3.4`\n\n - **FEAT**: Add Fallback support for Runnables ([#501](https://github.com/davidmigloz/langchain_dart/issues/501)). ([5887858d](https://github.com/davidmigloz/langchain_dart/commit/5887858d667d43c49978291ea98a92cab0069971))\n - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43))\n - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560))\n\n#### `langchain_community` - `v0.3.0`\n\n - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43))\n - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d))\n\n#### `langchain_ollama` - `v0.3.0`\n\n - **FEAT**: Add tool calling support in ChatOllama ([#505](https://github.com/davidmigloz/langchain_dart/issues/505)). ([6ffde204](https://github.com/davidmigloz/langchain_dart/commit/6ffde2043c1e865411c8b1096063619d6bcd80aa))\n - **BREAKING** **FEAT**: Update Ollama default model to llama-3.1 ([#506](https://github.com/davidmigloz/langchain_dart/issues/506)). ([b1134bf1](https://github.com/davidmigloz/langchain_dart/commit/b1134bf1163cdcea26a9f1e65fee5c515be3857c))\n - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43))\n - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560))\n - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d))\n - **DOCS**: Update Ollama request options default values in API docs ([#479](https://github.com/davidmigloz/langchain_dart/issues/479)). ([e1f93366](https://github.com/davidmigloz/langchain_dart/commit/e1f9336619ee12624a7b045ca18a3118ead0158f))\n\n#### `langchain_openai` - `v0.7.0`\n\n - **BREAKING** **FEAT**: Update ChatOpenAI default model to gpt-4o-mini ([#507](https://github.com/davidmigloz/langchain_dart/issues/507)). ([c7b8ce91](https://github.com/davidmigloz/langchain_dart/commit/c7b8ce91ac5b4dbe6bed563fae124a9f5ad76a84))\n - **FEAT**: Add support for disabling parallel tool calls in ChatOpenAI ([#493](https://github.com/davidmigloz/langchain_dart/issues/493)). ([c46d676d](https://github.com/davidmigloz/langchain_dart/commit/c46d676dee836f1d17e0d1fd61a8f1f0ba5c2881))\n - **FEAT**: Add GPT-4o-mini to model catalog ([#497](https://github.com/davidmigloz/langchain_dart/issues/497)). ([faa23aee](https://github.com/davidmigloz/langchain_dart/commit/faa23aeeecfb64dc7d018e642952e41cc7f9eeaf))\n - **FEAT**: Add support for service tier in ChatOpenAI ([#495](https://github.com/davidmigloz/langchain_dart/issues/495)). ([af79a4ff](https://github.com/davidmigloz/langchain_dart/commit/af79a4ffcadb207bfc704365462edebfca1ed6c7))\n - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43))\n - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560))\n - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d))\n\n#### `langchain_anthropic` - `v0.1.1`\n\n - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43))\n - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560))\n - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d))\n\n#### `langchain_firebase` - `v0.2.1`\n\n - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43))\n - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560))\n - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d))\n\n#### `langchain_google` - `v0.6.1`\n\n - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43))\n - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560))\n - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d))\n\n#### `langchain_mistralai` - `v0.2.2`\n\n - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43))\n - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560))\n - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d))\n\n#### `langchain_chroma` - `v0.2.1+1`\n\n - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d))\n\n#### `langchain_pinecone` - `v0.1.0+7`\n\n - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d))\n\n#### `langchain_supabase` - `v0.1.1+1`\n\n - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d))\n\n#### `ollama_dart` - `v0.2.0`\n\n - **FEAT**: Add tool calling support in ollama_dart ([#504](https://github.com/davidmigloz/langchain_dart/issues/504)). ([1ffdb41b](https://github.com/davidmigloz/langchain_dart/commit/1ffdb41b8f19941336c1cd911c73f0b3d46af975))\n - **BREAKING** **FEAT**: Update Ollama default model to llama-3.1 ([#506](https://github.com/davidmigloz/langchain_dart/issues/506)). ([b1134bf1](https://github.com/davidmigloz/langchain_dart/commit/b1134bf1163cdcea26a9f1e65fee5c515be3857c))\n - **FEAT**: Add support for Ollama version and model info ([#488](https://github.com/davidmigloz/langchain_dart/issues/488)). ([a110ecb7](https://github.com/davidmigloz/langchain_dart/commit/a110ecb7f10e7975bd2416aa65add98984c6efb8))\n - **FEAT**: Add suffix support in Ollama completions API in ollama_dart ([#503](https://github.com/davidmigloz/langchain_dart/issues/503)). ([30d05a69](https://github.com/davidmigloz/langchain_dart/commit/30d05a69b07f88f803b9abfdf2fded9348a73490))\n - **BREAKING** **REFACTOR**: Change Ollama push model status type from enum to String ([#489](https://github.com/davidmigloz/langchain_dart/issues/489)). ([90c9ccd9](https://github.com/davidmigloz/langchain_dart/commit/90c9ccd986c7b679ed30225d2380120e17dfec41))\n - **DOCS**: Update Ollama request options default values in API docs ([#479](https://github.com/davidmigloz/langchain_dart/issues/479)). ([e1f93366](https://github.com/davidmigloz/langchain_dart/commit/e1f9336619ee12624a7b045ca18a3118ead0158f))\n\n#### `openai_dart` - `v0.4.0`\n\n - **FEAT**: Add support for disabling parallel tool calls in openai_dart ([#492](https://github.com/davidmigloz/langchain_dart/issues/492)). ([a91e0719](https://github.com/davidmigloz/langchain_dart/commit/a91e07196278ae4da5917d52395f3c246fc35bf2))\n - **FEAT**: Add GPT-4o-mini to model catalog ([#497](https://github.com/davidmigloz/langchain_dart/issues/497)). ([faa23aee](https://github.com/davidmigloz/langchain_dart/commit/faa23aeeecfb64dc7d018e642952e41cc7f9eeaf))\n - **FEAT**: Support chunking strategy in file_search tool in openai_dart ([#496](https://github.com/davidmigloz/langchain_dart/issues/496)). ([cfa974a9](https://github.com/davidmigloz/langchain_dart/commit/cfa974a9e2fc4b79e5b66765b22d76710575d5bc))\n - **FEAT**: Add support for overrides in the file search tool in openai_dart ([#491](https://github.com/davidmigloz/langchain_dart/issues/491)). ([89605638](https://github.com/davidmigloz/langchain_dart/commit/89605638c465be37c2738258d840c21d32fe9554))\n - **FEAT**: Allow to customize OpenAI-Beta header in openai_dart ([#502](https://github.com/davidmigloz/langchain_dart/issues/502)). ([5fed8dbb](https://github.com/davidmigloz/langchain_dart/commit/5fed8dbb8205ba7925ca59d6f07a4f5e052b52b1))\n - **FEAT**: Add support for service tier in openai_dart ([#494](https://github.com/davidmigloz/langchain_dart/issues/494)). ([0838e4b9](https://github.com/davidmigloz/langchain_dart/commit/0838e4b9f5bb25e29fbc163a0ff5cf3e64409d40))\n\n## 2024-07-02\n\n### Changes\n\n---\n\nNew packages:\n\n- [`langchain_anthropic` - `v0.1.0`](#langchain_anthropic---v010)\n- [`tavily_dart` - `v0.1.0`](#tavily_dart---v010)\n\nPackages with breaking changes:\n\n- [`langchain_firebase` - `v0.2.0`](#langchain_firebase---v020)\n- [`langchain_google` - `v0.6.0`](#langchain_google---v060)\n\nPackages with other changes:\n\n- [`langchain` - `v0.7.3`](#langchain---v073)\n- [`langchain_core` - `v0.3.3`](#langchain_core---v033)\n- [`langchain_community` - `v0.2.2`](#langchain_community---v022)\n- [`langchain_chroma` - `v0.2.1`](#langchain_chroma---v021)\n- [`langchain_mistralai` - `v0.2.1`](#langchain_mistralai---v021)\n- [`langchain_ollama` - `v0.2.2+1`](#langchain_ollama---v0221)\n- [`langchain_openai` - `v0.6.3`](#langchain_openai---v063)\n- [`langchain_pinecone` - `v0.1.0+6`](#langchain_pinecone---v0106)\n- [`langchain_supabase` - `v0.1.1`](#langchain_supabase---v011)\n- [`anthropic_sdk_dart` - `v0.1.0`](#anthropic_sdk_dart---v010)\n- [`googleai_dart` - `v0.1.0+2`](#googleai_dart---v0102)\n- [`mistralai_dart` - `v0.0.3+3`](#mistralai_dart---v0033)\n- [`ollama_dart` - `v0.1.2`](#ollama_dart---v012)\n- [`openai_dart` - `v0.3.3+1`](#openai_dart---v0331)\n\n---\n\n#### `langchain` - `v0.7.3`\n\n> Note: Anthropic integration (`ChatAnthropic`) is available in the new [`langchain_anthropic`](https://pub.dev/packages/langchain_anthropic) package.\n\n- **FEAT**: Add support for TavilySearchResultsTool and TavilyAnswerTool ([#467](https://github.com/davidmigloz/langchain_dart/issues/467)). ([a9f35755](https://github.com/davidmigloz/langchain_dart/commit/a9f35755dfac9d257efb251c4a6c5948673c2f6c))\n- **DOCS**: Document existing integrations in README.md. ([cc4246c8](https://github.com/davidmigloz/langchain_dart/commit/cc4246c8ab907de2c82843bff145edfffe32d302))\n\n#### `langchain_core` - `v0.3.3`\n\n- **FEAT**: Add support for ChatToolChoiceRequired ([#474](https://github.com/davidmigloz/langchain_dart/issues/474)). ([bf324f36](https://github.com/davidmigloz/langchain_dart/commit/bf324f36f645c53458d5891f8285991cd50f2649))\n- **FEAT**: Update ChatResult.id concat logic ([#477](https://github.com/davidmigloz/langchain_dart/issues/477)). ([44c7fafd](https://github.com/davidmigloz/langchain_dart/commit/44c7fafd934bf6517e285830b1ca98282127cb7d))\n\n#### `langchain_community` - `v0.2.2`\n\n- **FEAT**: Add support for TavilySearchResultsTool and TavilyAnswerTool ([#467](https://github.com/davidmigloz/langchain_dart/issues/467)). ([a9f35755](https://github.com/davidmigloz/langchain_dart/commit/a9f35755dfac9d257efb251c4a6c5948673c2f6c))\n\n#### `langchain_anthropic` - `v0.1.0`\n\n- **FEAT**: Add ChatAnthropic integration ([#477](https://github.com/davidmigloz/langchain_dart/issues/477)). ([44c7fafd](https://github.com/davidmigloz/langchain_dart/commit/44c7fafd934bf6517e285830b1ca98282127cb7d))\n\n#### `langchain_firebase` - `v0.2.0`\n\n> Note: `ChatFirebaseVertexAI` now uses `gemini-1.5-flash` model by default.\n\n- **BREAKING** **FEAT**: Update ChatFirebaseVertexAI default model to  gemini-1.5-flash ([#458](https://github.com/davidmigloz/langchain_dart/issues/458)). ([d3c96c52](https://github.com/davidmigloz/langchain_dart/commit/d3c96c52e95e889ba6955e3de80a83978b27618b))\n- **FEAT**: Add support for ChatToolChoiceRequired ([#474](https://github.com/davidmigloz/langchain_dart/issues/474)). ([bf324f36](https://github.com/davidmigloz/langchain_dart/commit/bf324f36f645c53458d5891f8285991cd50f2649))\n- **FEAT**: Support response MIME type in ChatFirebaseVertexAI ([#461](https://github.com/davidmigloz/langchain_dart/issues/461)) ([#463](https://github.com/davidmigloz/langchain_dart/issues/463)). ([c3452721](https://github.com/davidmigloz/langchain_dart/commit/c3452721c78ba3071ed2510a243f9c824a291c34))\n- **FEAT**: Add support for Firebase Auth in ChatFirebaseVertexAI ([#460](https://github.com/davidmigloz/langchain_dart/issues/460)). ([6d137290](https://github.com/davidmigloz/langchain_dart/commit/6d137290ca0f56c9fcc725e6211e838a3e3c6d16))\n- **FEAT**: Add support for usage metadata in ChatFirebaseVertexAI ([#457](https://github.com/davidmigloz/langchain_dart/issues/457)). ([2587f9e2](https://github.com/davidmigloz/langchain_dart/commit/2587f9e2bcbcc2bf5e2295dce409e92a89bf3c44))\n- **REFACTOR**: Simplify how tools are passed to the internal Firebase client ([#459](https://github.com/davidmigloz/langchain_dart/issues/459)). ([7f772396](https://github.com/davidmigloz/langchain_dart/commit/7f77239601fb216a01ec9d25680ec4d3dc4b97c7))\n\n#### `langchain_google` - `v0.6.0`\n\n> Note: `ChatGoogleGenerativeAI` now uses `gemini-1.5-flash` model by default.\n\n- **BREAKING** **FEAT**: Update ChatGoogleGenerativeAI default model to  gemini-1.5-flash ([#462](https://github.com/davidmigloz/langchain_dart/issues/462)). ([c8b30c90](https://github.com/davidmigloz/langchain_dart/commit/c8b30c906a17751547cc340f987b6670fbd67e69))\n- **FEAT**: Add support for ChatToolChoiceRequired ([#474](https://github.com/davidmigloz/langchain_dart/issues/474)). ([bf324f36](https://github.com/davidmigloz/langchain_dart/commit/bf324f36f645c53458d5891f8285991cd50f2649))\n- **FEAT**: Support response MIME type and schema in ChatGoogleGenerativeAI ([#461](https://github.com/davidmigloz/langchain_dart/issues/461)). ([e258399e](https://github.com/davidmigloz/langchain_dart/commit/e258399e03437e8abe25417a14671dfb719cb273))\n- **REFACTOR**: Migrate conditional imports to js_interop ([#453](https://github.com/davidmigloz/langchain_dart/issues/453)). ([a6a78cfe](https://github.com/davidmigloz/langchain_dart/commit/a6a78cfe05fb8ce68e683e1ad4395ca86197a6c5))\n\n#### `langchain_openai` - `v0.6.3`\n\n- **FEAT**: Add support for ChatToolChoiceRequired ([#474](https://github.com/davidmigloz/langchain_dart/issues/474)). ([bf324f36](https://github.com/davidmigloz/langchain_dart/commit/bf324f36f645c53458d5891f8285991cd50f2649))\n\n#### `langchain_ollama` - `v0.2.2+1`\n\n- **DOCS**: Update ChatOllama API docs. ([cc4246c8](https://github.com/davidmigloz/langchain_dart/commit/cc4246c8ab907de2c82843bff145edfffe32d302))\n\n#### `langchain_chroma` - `v0.2.1`\n\n- Update a dependency to the latest release.\n\n#### `langchain_mistralai` - `v0.2.1`\n\n- Update a dependency to the latest release.\n\n#### `langchain_pinecone` - `v0.1.0+6`\n\n- Update a dependency to the latest release.\n\n#### `langchain_supabase` - `v0.1.1`\n\n- Update a dependency to the latest release.\n\n#### `anthropic_sdk_dart` - `v0.1.0`\n\n- **FEAT**: Add support for tool use in anthropic_sdk_dart client ([#469](https://github.com/davidmigloz/langchain_dart/issues/469)). ([81896cfd](https://github.com/davidmigloz/langchain_dart/commit/81896cfdfce116b010dd51391994251d2a836333))\n- **FEAT**: Add extensions on ToolResultBlockContent in anthropic_sdk_dart ([#476](https://github.com/davidmigloz/langchain_dart/issues/476)). ([8d92d9b0](https://github.com/davidmigloz/langchain_dart/commit/8d92d9b008755ff9b9ca3545eb26fc49a296a909))\n- **REFACTOR**: Improve schemas names in anthropic_sdk_dart ([#475](https://github.com/davidmigloz/langchain_dart/issues/475)). ([8ebeacde](https://github.com/davidmigloz/langchain_dart/commit/8ebeacded02ab92885354c9447b1a55e024b56d1))\n- **REFACTOR**: Migrate conditional imports to js_interop ([#453](https://github.com/davidmigloz/langchain_dart/issues/453)). ([a6a78cfe](https://github.com/davidmigloz/langchain_dart/commit/a6a78cfe05fb8ce68e683e1ad4395ca86197a6c5))\n\n#### `ollama_dart` - `v0.1.2`\n\n- **FEAT**: Add support for listing running Ollama models ([#451](https://github.com/davidmigloz/langchain_dart/issues/451)). ([cfaa31fb](https://github.com/davidmigloz/langchain_dart/commit/cfaa31fb8ce1dc128570c95d403809f71e0199d9))\n- **REFACTOR**: Migrate conditional imports to js_interop ([#453](https://github.com/davidmigloz/langchain_dart/issues/453)). ([a6a78cfe](https://github.com/davidmigloz/langchain_dart/commit/a6a78cfe05fb8ce68e683e1ad4395ca86197a6c5))\n\n#### `tavily_dart` - `v0.1.0`\n\n- **FEAT**: Implement tavily_dart, a Dart client for Tavily API ([#456](https://github.com/davidmigloz/langchain_dart/issues/456)). ([fbfb79ba](https://github.com/davidmigloz/langchain_dart/commit/fbfb79bad81dbbd5844a90938fda79b201f20047))\n\n#### `googleai_dart` - `v0.1.0+2`\n\n- **REFACTOR**: Migrate conditional imports to js_interop ([#453](https://github.com/davidmigloz/langchain_dart/issues/453)). ([a6a78cfe](https://github.com/davidmigloz/langchain_dart/commit/a6a78cfe05fb8ce68e683e1ad4395ca86197a6c5))\n\n#### `mistralai_dart` - `v0.0.3+3`\n\n- **REFACTOR**: Migrate conditional imports to js_interop ([#453](https://github.com/davidmigloz/langchain_dart/issues/453)). ([a6a78cfe](https://github.com/davidmigloz/langchain_dart/commit/a6a78cfe05fb8ce68e683e1ad4395ca86197a6c5))\n\n#### `openai_dart` - `v0.3.3+1`\n\n- **REFACTOR**: Migrate conditional imports to js_interop ([#453](https://github.com/davidmigloz/langchain_dart/issues/453)). ([a6a78cfe](https://github.com/davidmigloz/langchain_dart/commit/a6a78cfe05fb8ce68e683e1ad4395ca86197a6c5))\n\n## 2024-06-01\n\n### Changes\n\n---\n\nNew packages:\n\n - [`anthropic_sdk_dart` - `v0.0.1`](#anthropic_sdk_dart---v001)\n\nPackages with other changes:\n\n - [`langchain` - `v0.7.2`](#langchain---v072)\n - [`langchain_core` - `v0.3.2`](#langchain_core---v032)\n - [`langchain_community` - `v0.2.1`](#langchain_community---v021)\n - [`langchain_chroma` - `v0.2.0+5`](#langchain_chroma---v0205)\n - [`langchain_firebase` - `v0.1.0+2`](#langchain_firebase---v0102)\n - [`langchain_google` - `v0.5.1`](#langchain_google---v051)\n - [`langchain_mistralai` - `v0.2.1`](#langchain_mistralai---v021)\n - [`langchain_ollama` - `v0.2.2`](#langchain_ollama---v022)\n - [`langchain_openai` - `v0.6.2`](#langchain_openai---v062)\n - [`langchain_pinecone` - `v0.1.0+5`](#langchain_pinecone---v0105)\n - [`langchain_supabase` - `v0.1.0+5`](#langchain_supabase---v0105)\n - [`chromadb` - `v0.2.0+1`](#chromadb---v0201)\n - [`googleai_dart` - `v0.1.0+1`](#googleai_dart---v0101)\n - [`mistralai_dart` - `v0.0.3+2`](#mistralai_dart---v0032)\n - [`ollama_dart` - `v0.1.1`](#ollama_dart---v011)\n - [`openai_dart` - `v0.3.3`](#openai_dart---v033)\n - [`vertex_ai` - `v0.1.0+1`](#vertex_ai---v0101)\n\n---\n\n#### `langchain` - `v0.7.2`\n\n - **FEAT**: Add support for ObjectBoxVectorStore ([#438](https://github.com/davidmigloz/langchain_dart/issues/438)). ([81e167a6](https://github.com/davidmigloz/langchain_dart/commit/81e167a6888fff9f8db381caaef6ee788acef3a8))\n   + Check out the [ObjectBoxVectorStore documentation](https://langchaindart.dev/#/modules/retrieval/vector_stores/integrations/objectbox?id=objectbox)\n - **REFACTOR**: Migrate to langchaindart.dev domain ([#434](https://github.com/davidmigloz/langchain_dart/issues/434)). ([358f79d6](https://github.com/davidmigloz/langchain_dart/commit/358f79d6e0bae2ecd657aeed2eae7fad16d97c18))\n\n#### `langchain_core` - `v0.3.2`\n\n - **FEAT**: Add Runnable.close() to close any resources associated with it ([#439](https://github.com/davidmigloz/langchain_dart/issues/439)). ([4e08cced](https://github.com/davidmigloz/langchain_dart/commit/4e08cceda964921178061e9721618a1505198ff5))\n - **FIX**: Stream errors are not propagated by StringOutputParser ([#440](https://github.com/davidmigloz/langchain_dart/issues/440)). ([496b11cc](https://github.com/davidmigloz/langchain_dart/commit/496b11cca9bbf9892c425e49138562537398bc70))\n\n#### `langchain_community` - `v0.2.1`\n\n - **FEAT**: Add support for ObjectBoxVectorStore ([#438](https://github.com/davidmigloz/langchain_dart/issues/438)). ([81e167a6](https://github.com/davidmigloz/langchain_dart/commit/81e167a6888fff9f8db381caaef6ee788acef3a8))\n   + Check out the [ObjectBoxVectorStore documentation](https://langchaindart.dev/#/modules/retrieval/vector_stores/integrations/objectbox?id=objectbox)\n\n#### `langchain_openai` - `v0.6.2`\n\n - **DOCS**: Document tool calling with OpenRouter ([#437](https://github.com/davidmigloz/langchain_dart/issues/437)). ([47986592](https://github.com/davidmigloz/langchain_dart/commit/47986592a674322fe2f69aff7166a3e594756ace))\n\n#### `anthropic_sdk_dart` - `v0.0.1`\n\n - **FEAT**: Implement anthropic_sdk_dart, a Dart client for Anthropic API ([#433](https://github.com/davidmigloz/langchain_dart/issues/433)). ([e5412b](https://github.com/davidmigloz/langchain_dart/commit/e5412bdedc7de911f7de88eb51e9d41cd85ab4ae))\n\n#### `ollama_dart` - `v0.1.1`\n\n - **FEAT**: Support buffered stream responses ([#445](https://github.com/davidmigloz/langchain_dart/issues/445)). ([ce2ef30c](https://github.com/davidmigloz/langchain_dart/commit/ce2ef30c9a9a0dfe8f3059988b7007c94c45b9bd))\n\n#### `openai_dart` - `v0.3.3`\n\n - **FEAT**: Support FastChat OpenAI-compatible API ([#444](https://github.com/davidmigloz/langchain_dart/issues/444)). ([ddaf1f69](https://github.com/davidmigloz/langchain_dart/commit/ddaf1f69d8262210637999367690bf362f2dc5c3))\n - **FIX**: Make vector store name optional ([#436](https://github.com/davidmigloz/langchain_dart/issues/436)). ([29a46c7f](https://github.com/davidmigloz/langchain_dart/commit/29a46c7fa645439e8f4acc10a16da904e7cf14ff))\n - **FIX**: Fix deserialization of sealed classes ([#435](https://github.com/davidmigloz/langchain_dart/issues/435)). ([7b9cf223](https://github.com/davidmigloz/langchain_dart/commit/7b9cf223e42eae8496f864ad7ef2f8d0dca45678))\n\n\n## 2024-05-20\n\n### Changes\n\n---\n\nPackages with breaking changes:\n\n- There are no breaking changes in this release.\n\nPackages with other changes:\n\n- [`langchain_firebase` - `v0.1.0+1`](#langchain_firebase---v0101)\n- [`ollama_dart` - `v0.1.0+1`](#ollama_dart---v0101)\n- [`openai_dart` - `v0.3.2+1`](#openai_dart---v0321)\n- [`langchain_ollama` - `v0.2.1+1`](#langchain_ollama---v0211)\n- [`langchain_openai` - `v0.6.1+1`](#langchain_openai---v0611)\n\nPackages with dependency updates only:\n\n> Packages listed below depend on other packages in this workspace that have had changes. Their versions have been incremented to bump the minimum dependency versions of the packages they depend upon in this project.\n\n- `langchain_ollama` - `v0.2.1+1`\n- `langchain_openai` - `v0.6.1+1`\n\n---\n\n#### `openai_dart` - `v0.3.2+1`\n\n- **FIX**: Rename CreateRunRequestModel factories names ([#429](https://github.com/davidmigloz/langchain_dart/issues/429)). ([fd15793b](https://github.com/davidmigloz/langchain_dart/commit/fd15793b3c4ac94dfc90567b4a709e1458f4e0e8))\n- **FIX**: Make quote nullable in MessageContentTextAnnotationsFileCitation ([#428](https://github.com/davidmigloz/langchain_dart/issues/428)). ([75b95645](https://github.com/davidmigloz/langchain_dart/commit/75b95645a58d51b369a01e261393e17f7463e1f5))\n\n#### `ollama_dart` - `v0.1.0+1`\n\n- **FIX**: digest path param in Ollama blob endpoints ([#430](https://github.com/davidmigloz/langchain_dart/issues/430)). ([2e9e935a](https://github.com/davidmigloz/langchain_dart/commit/2e9e935aefd74e5e9e09a23188a6c77ce535661d))\n\n#### `langchain_firebase` - `v0.1.0+1`\n\n- **DOCS**: Fix lint issues in langchain_firebase example. ([f85a6ad7](https://github.com/davidmigloz/langchain_dart/commit/f85a6ad755e00c513bd4349663e33d40be8a696c))\n\n## 2024-05-14\n\n### Changes\n\n---\n\nPackages with breaking changes:\n\n- [`langchain_google` - `v0.5.0`](#langchain_google---v050)\n- [`googleai_dart` - `v0.1.0`](#googleai_dart---v010)\n- [`ollama_dart` - `v0.1.0`](#ollama_dart---v010)\n\nPackages with other changes:\n\n- [`langchain` - `v0.7.1`](#langchain---v071)\n- [`langchain_core` - `v0.3.1`](#langchain_core---v031)\n- [`langchain_community` - `v0.2.0+1`](#langchain_community---v0201)\n- [`langchain_firebase` - `v0.1.0`](#langchain_firebase---v010)\n- [`langchain_openai` - `v0.6.1`](#langchain_openai---v061)\n- [`langchain_ollama` - `v0.2.1`](#langchain_ollama---v021)\n- [`langchain_chroma` - `v0.2.0+4`](#langchain_chroma---v0204)\n- [`langchain_mistralai` - `v0.2.0+1`](#langchain_mistralai---v0201)\n- [`langchain_pinecone` - `v0.1.0+4`](#langchain_pinecone---v0104)\n- [`langchain_supabase` - `v0.1.0+4`](#langchain_supabase---v0104)\n- [`openai_dart` - `v0.3.2`](#openai_dart---v032)\n\n---\n\n#### `langchain` - `v0.7.1`\n\n> Note: VertexAI for Firebase (`ChatFirebaseVertexAI`) is available in the new [`langchain_firebase`](https://pub.dev/packages/langchain_firebase) package.\n\n- **DOCS**: Add docs for ChatFirebaseVertexAI ([#422](https://github.com/davidmigloz/langchain_dart/issues/422)). ([8d0786bc](https://github.com/davidmigloz/langchain_dart/commit/8d0786bc6228ce86de962d30e9c2cc9728a08f3f))\n- **DOCS**: Update ChatOllama docs ([#417](https://github.com/davidmigloz/langchain_dart/issues/417)). ([9d30b1a1](https://github.com/davidmigloz/langchain_dart/commit/9d30b1a1c811d73cfa27110b8c3c10b10da1801e))\n\n#### `langchain_core` - `v0.3.1`\n\n- **FEAT**: Add equals to ChatToolChoiceForced ([#422](https://github.com/davidmigloz/langchain_dart/issues/422)). ([8d0786bc](https://github.com/davidmigloz/langchain_dart/commit/8d0786bc6228ce86de962d30e9c2cc9728a08f3f))\n- **FIX**: Fix finishReason null check ([#406](https://github.com/davidmigloz/langchain_dart/issues/406)). ([5e2b0ecc](https://github.com/davidmigloz/langchain_dart/commit/5e2b0eccd54c6c1dc15af8ff6d62c395f12fbd90))\n\n#### `langchain_community` - `v0.2.0+1`\n\n- Update a dependency to the latest release.\n\n#### `langchain_google` - `v0.5.0`\n\n> Note: `ChatGoogleGenerativeAI` and `GoogleGenerativeAIEmbeddings` now use the version `v1beta` of the Gemini API (instead of `v1`) which support the latest models (`gemini-1.5-pro-latest` and `gemini-1.5-flash-latest`).\n>\n> VertexAI for Firebase (`ChatFirebaseVertexAI`) is available in the new [`langchain_firebase`](https://pub.dev/packages/langchain_firebase) package.\n\n- **FEAT**: Add support for tool calling in ChatGoogleGenerativeAI ([#419](https://github.com/davidmigloz/langchain_dart/issues/419)). ([df41f38a](https://github.com/davidmigloz/langchain_dart/commit/df41f38aab64651a06a42fc41d9c35f33250a3e9))\n- **DOCS**: Add Gemini 1.5 Flash to models list ([#423](https://github.com/davidmigloz/langchain_dart/issues/423)). ([40f4c9de](https://github.com/davidmigloz/langchain_dart/commit/40f4c9de9c25804e298fd481c80f8c52d53302fb))\n- **BREAKING** **FEAT**: Migrate internal client from googleai_dart to google_generative_ai ([#407](https://github.com/davidmigloz/langchain_dart/issues/407)). ([fa4b5c37](https://github.com/davidmigloz/langchain_dart/commit/fa4b5c376a191fea50c3f8b1d6b07cef0480a74e))\n\n#### `langchain_firebase` - `v0.1.0`\n\n- **FEAT**: Add support for ChatFirebaseVertexAI ([#422](https://github.com/davidmigloz/langchain_dart/issues/422)). ([8d0786bc](https://github.com/davidmigloz/langchain_dart/commit/8d0786bc6228ce86de962d30e9c2cc9728a08f3f))\n- **DOCS**: Add Gemini 1.5 Flash to models list ([#423](https://github.com/davidmigloz/langchain_dart/issues/423)). ([40f4c9de](https://github.com/davidmigloz/langchain_dart/commit/40f4c9de9c25804e298fd481c80f8c52d53302fb))\n\n#### `langchain_openai` - `v0.6.1`\n\n- **FEAT**: Add GPT-4o to model catalog ([#420](https://github.com/davidmigloz/langchain_dart/issues/420)). ([96214307](https://github.com/davidmigloz/langchain_dart/commit/96214307ec8ae045dade687d4c623bd4dc1be896))\n- **FEAT**: Include usage stats when streaming with OpenAI and ChatOpenAI ([#406](https://github.com/davidmigloz/langchain_dart/issues/406)). ([5e2b0ecc](https://github.com/davidmigloz/langchain_dart/commit/5e2b0eccd54c6c1dc15af8ff6d62c395f12fbd90))\n\n#### `langchain_ollama` - `v0.2.1`\n\n- **FEAT**: Handle finish reason in ChatOllama ([#416](https://github.com/davidmigloz/langchain_dart/issues/416)). ([a5e1af13](https://github.com/davidmigloz/langchain_dart/commit/a5e1af13ef4d2db690ab599dbf5e42f28659a059))\n- **FEAT**: Add keepAlive option to OllamaEmbeddings ([#415](https://github.com/davidmigloz/langchain_dart/issues/415)). ([32e19028](https://github.com/davidmigloz/langchain_dart/commit/32e19028a7e19ef5fc32a410061eb85bc6e27c39))\n- **FEAT**: Update Ollama default model from llama2 to llama3 ([#417](https://github.com/davidmigloz/langchain_dart/issues/417)). ([9d30b1a1](https://github.com/davidmigloz/langchain_dart/commit/9d30b1a1c811d73cfa27110b8c3c10b10da1801e))\n- **REFACTOR**: Remove deprecated Ollama options ([#414](https://github.com/davidmigloz/langchain_dart/issues/414)). ([861a2b74](https://github.com/davidmigloz/langchain_dart/commit/861a2b7430d33718340676ec2804a7aaccb2a08a))\n\n#### `openai_dart` - `v0.3.2`\n\n- **FEAT**: Add GPT-4o to model catalog ([#420](https://github.com/davidmigloz/langchain_dart/issues/420)). ([96214307](https://github.com/davidmigloz/langchain_dart/commit/96214307ec8ae045dade687d4c623bd4dc1be896))\n- **FEAT**: Add support for different content types in Assistants API and other fixes ([#412](https://github.com/davidmigloz/langchain_dart/issues/412)). ([97acab45](https://github.com/davidmigloz/langchain_dart/commit/97acab45a5770422c666795ad3443c083fa08895))\n- **FEAT**: Add support for completions and embeddings in batch API in openai_dart ([#425](https://github.com/davidmigloz/langchain_dart/issues/425)). ([16fe4c68](https://github.com/davidmigloz/langchain_dart/commit/16fe4c6814a828fb0d271a6793598f8369da259d))\n- **FEAT**: Add incomplete status to RunObject in openai_dart ([#424](https://github.com/davidmigloz/langchain_dart/issues/424)). ([71b116e6](https://github.com/davidmigloz/langchain_dart/commit/71b116e6252a9dce5a92e979164e0af8fe96efc3))\n\n#### `ollama_dart` - `v0.1.0`\n\n- **BREAKING** **FEAT**: Align Ollama client to the Ollama v0.1.36 API  ([#411](https://github.com/davidmigloz/langchain_dart/issues/411)). ([326212ce](https://github.com/davidmigloz/langchain_dart/commit/326212ce4e4b035f7b29f4c810f447d5cf1731c4))\n- **FEAT**: Update Ollama default model from llama2 to llama3 ([#417](https://github.com/davidmigloz/langchain_dart/issues/417)). ([9d30b1a1](https://github.com/davidmigloz/langchain_dart/commit/9d30b1a1c811d73cfa27110b8c3c10b10da1801e))\n- **FEAT**: Add support for done reason ([#413](https://github.com/davidmigloz/langchain_dart/issues/413)). ([cc5b1b02](https://github.com/davidmigloz/langchain_dart/commit/cc5b1b021636379f32f215546b78547ace87d150))\n\n#### `googleai_dart` - `v0.1.0`\n\n- **REFACTOR**: Minor changes ([#407](https://github.com/davidmigloz/langchain_dart/issues/407)). ([fa4b5c37](https://github.com/davidmigloz/langchain_dart/commit/fa4b5c376a191fea50c3f8b1d6b07cef0480a74e))\n\n#### `langchain_chroma` - `v0.2.0+4`\n\n- Update a dependency to the latest release.\n\n#### `langchain_mistralai` - `v0.2.0+1`\n\n- Update a dependency to the latest release.\n\n#### `langchain_pinecone` - `v0.1.0+4`\n\n- Update a dependency to the latest release.\n\n#### `langchain_supabase` - `v0.1.0+4`\n\n- Update a dependency to the latest release.\n\n## 2024-05-09\n\n### Changes\n\n---\n\nPackages with breaking changes:\n\n - There are no breaking changes in this release.\n\nPackages with other changes:\n\n - [`openai_dart` - `v0.3.1`](#openai_dart---v031)\n - [`langchain_openai` - `v0.6.0+2`](#langchain_openai---v0602)\n\nPackages with dependency updates only:\n\n> Packages listed below depend on other packages in this workspace that have had changes. Their versions have been incremented to bump the minimum dependency versions of the packages they depend upon in this project.\n\n - `langchain_openai` - `v0.6.0+2`\n\n---\n\n#### `openai_dart` - `v0.3.1`\n\n - **FEAT**: Add support for stream_options in openai_dart ([#405](https://github.com/davidmigloz/langchain_dart/issues/405)). ([c15714ca](https://github.com/davidmigloz/langchain_dart/commit/c15714ca2df9e30873bc8e4901482faa2d858d8a))\n - **FIX**: RunStepDetailsToolCalls deserialization in Assistants API v2 ([#404](https://github.com/davidmigloz/langchain_dart/issues/404)). ([d76c6aba](https://github.com/davidmigloz/langchain_dart/commit/d76c6aba321e666940614cbc90726500aa370c87))\n\n## 2024-05-06\n\n### Changes\n\n---\n\nPackages with breaking changes:\n\n- [`openai_dart` - `v0.3.0`](#openai_dart---v030)\n\nPackages with other changes:\n\n- [`langchain_openai` - `v0.6.0+1`](#langchain_openai---v0601)\n\nPackages with dependency updates only:\n\n> Packages listed below depend on other packages in this workspace that have had changes. Their versions have been incremented to bump the minimum dependency versions of the packages they depend upon in this project.\n\n- `langchain_openai` - `v0.6.0+1`\n\n---\n\n#### `openai_dart` - `v0.3.0`\n\n- **BREAKING** **FEAT**: Migrate OpenAI Assistants API to v2 and add support for vector stores ([#402](https://github.com/davidmigloz/langchain_dart/issues/402)). ([45de29a1](https://github.com/davidmigloz/langchain_dart/commit/45de29a1957caf2ef05c91e4c99144a4e73ceb91))\n- **FEAT**: Add support for `ChatCompletionToolChoiceMode.required` ([#402](https://github.com/davidmigloz/langchain_dart/issues/402)). ([45de29a1](https://github.com/davidmigloz/langchain_dart/commit/45de29a1957caf2ef05c91e4c99144a4e73ceb91))\n\n## 2024-05-05\n\n### Changes\n\n---\n\nPackages with breaking changes:\n\n- [`langchain` - `v0.7.0`](#langchain---v070)\n- [`langchain_core` - `v0.3.0`](#langchain_core---v030)\n- [`langchain_community` - `v0.2.0`](#langchain_community---v020)\n- [`langchain_openai` - `v0.6.0`](#langchain_openai---v060)\n- [`langchain_google` - `v0.4.0`](#langchain_google---v040)\n- [`langchain_mistralai` - `v0.2.0`](#langchain_mistralai---v020)\n- [`langchain_ollama` - `v0.2.0`](#langchain_ollama---v020)\n\nPackages with other changes:\n\n- [`langchain_supabase` - `v0.1.0+3`](#langchain_supabase---v0103)\n- [`langchain_pinecone` - `v0.1.0+3`](#langchain_pinecone---v0103)\n- [`langchain_chroma` - `v0.2.0+3`](#langchain_chroma---v0203)\n\nPackages with dependency updates only:\n\n> Packages listed below depend on other packages in this workspace that have had changes. Their versions have been incremented to bump the minimum dependency versions of the packages they depend upon in this project.\n\n- `langchain_supabase` - `v0.1.0+3`\n- `langchain_pinecone` - `v0.1.0+3`\n- `langchain_chroma` - `v0.2.0+3`\n\n---\n\n#### `langchain` - `v0.7.0`\n\n- **BREAKING** **FEAT**: Migrate from function calling to tool calling ([#400](https://github.com/davidmigloz/langchain_dart/issues/400)). ([44413b83](https://github.com/davidmigloz/langchain_dart/commit/44413b8321b1188ff6b4027b1972a7ee0002761e))\n- **BREAKING** **REFACTOR**: Improve Tool abstractions ([#398](https://github.com/davidmigloz/langchain_dart/issues/398)). ([2a50aec2](https://github.com/davidmigloz/langchain_dart/commit/2a50aec28385068f9be32392020d727fc9a1561e))\n\n#### `langchain_core` - `v0.3.0`\n\n- **BREAKING** **FEAT**: Migrate from function calling to tool calling ([#400](https://github.com/davidmigloz/langchain_dart/issues/400)). ([44413b83](https://github.com/davidmigloz/langchain_dart/commit/44413b8321b1188ff6b4027b1972a7ee0002761e))\n- **BREAKING** **REFACTOR**: Improve Tool abstractions ([#398](https://github.com/davidmigloz/langchain_dart/issues/398)). ([2a50aec2](https://github.com/davidmigloz/langchain_dart/commit/2a50aec28385068f9be32392020d727fc9a1561e))\n\n#### `langchain_community` - `v0.2.0`\n\n- **BREAKING** **FEAT**: Migrate from function calling to tool calling ([#400](https://github.com/davidmigloz/langchain_dart/issues/400)). ([44413b83](https://github.com/davidmigloz/langchain_dart/commit/44413b8321b1188ff6b4027b1972a7ee0002761e))\n- **BREAKING** **REFACTOR**: Improve Tool abstractions ([#398](https://github.com/davidmigloz/langchain_dart/issues/398)). ([2a50aec2](https://github.com/davidmigloz/langchain_dart/commit/2a50aec28385068f9be32392020d727fc9a1561e))\n\n#### `langchain_openai` - `v0.6.0`\n\n- **BREAKING** **FEAT**: Migrate from function calling to tool calling ([#400](https://github.com/davidmigloz/langchain_dart/issues/400)). ([44413b83](https://github.com/davidmigloz/langchain_dart/commit/44413b8321b1188ff6b4027b1972a7ee0002761e))\n- **BREAKING** **REFACTOR**: Improve Tool abstractions ([#398](https://github.com/davidmigloz/langchain_dart/issues/398)). ([2a50aec2](https://github.com/davidmigloz/langchain_dart/commit/2a50aec28385068f9be32392020d727fc9a1561e))\n\n#### `langchain_google` - `v0.4.0`\n\n- **BREAKING** **FEAT**: Migrate from function calling to tool calling ([#400](https://github.com/davidmigloz/langchain_dart/issues/400)). ([44413b83](https://github.com/davidmigloz/langchain_dart/commit/44413b8321b1188ff6b4027b1972a7ee0002761e))\n\n#### `langchain_mistralai` - `v0.2.0`\n\n- **BREAKING** **FEAT**: Migrate from function calling to tool calling ([#400](https://github.com/davidmigloz/langchain_dart/issues/400)). ([44413b83](https://github.com/davidmigloz/langchain_dart/commit/44413b8321b1188ff6b4027b1972a7ee0002761e))\n\n#### `langchain_ollama` - `v0.2.0`\n\n- **BREAKING** **FEAT**: Migrate from function calling to tool calling ([#400](https://github.com/davidmigloz/langchain_dart/issues/400)). ([44413b83](https://github.com/davidmigloz/langchain_dart/commit/44413b8321b1188ff6b4027b1972a7ee0002761e))\n\n## 2024-04-30\n\n### Changes\n\n---\n\nPackages with breaking changes:\n\n - [`langchain` - `v0.6.0+1`](#langchain---v0601)\n - [`langchain_core` - `v0.2.0+1`](#langchain_core---v0201)\n\nPackages with other changes:\n\n - [`langchain_openai` - `v0.5.1+1`](#langchain_openai---v0511)\n - [`openai_dart` - `v0.2.2`](#openai_dart---v022)\n\nPackages with dependency updates only:\n\n - `langchain_community` - `v0.1.0+2`\n - `langchain_ollama` - `v0.1.0+2`\n - `langchain_supabase` - `v0.1.0+2`\n - `langchain_pinecone` - `v0.1.0+2`\n - `langchain_chroma` - `v0.2.0+2`\n - `langchain_google` - `v0.3.0+2`\n - `langchain_mistralai` - `v0.1.0+2`\n\n---\n\n#### `langchain` - `v0.6.0+1`\n\n- **FEAT** Add support for RunnableRouter ([#386](https://github.com/davidmigloz/langchain_dart/issues/386)). ([827e262](https://github.com/davidmigloz/langchain_dart/commit/827e2627535941d702e8fbe300ca1426ddf50efe))\n- **FEAT**: Add support for Runnable.mapInputStream ([#393](https://github.com/davidmigloz/langchain_dart/issues/393)). ([a2b6bbb5](https://github.com/davidmigloz/langchain_dart/commit/a2b6bbb5ea7a65c36d1e955f9f96298cf2384afc))\n- **FEAT**: Add support for JsonOutputParser ([#392](https://github.com/davidmigloz/langchain_dart/issues/392)). ([c6508f0f](https://github.com/davidmigloz/langchain_dart/commit/c6508f0fadde3fd4d93accbcae5cea37b7beca20))\n- **FEAT**: Reduce input stream for PromptTemplate, LLM, ChatModel, Retriever and Tool ([#388](https://github.com/davidmigloz/langchain_dart/issues/388)). ([b59bcd40](https://github.com/davidmigloz/langchain_dart/commit/b59bcd409f4904fb2e16f928b3c7206a186ab3f4))\n- **BREAKING** **FEAT**: Support different logic for streaming in RunnableFunction ([#394](https://github.com/davidmigloz/langchain_dart/issues/394)). ([8bb2b8ed](https://github.com/davidmigloz/langchain_dart/commit/8bb2b8ede18bfe3a4f266b78ca32f1dfb83db1b1))\n- **FIX**: Allow async functions in Runnable.mapInput ([#396](https://github.com/davidmigloz/langchain_dart/issues/396)). ([e4c35092](https://github.com/davidmigloz/langchain_dart/commit/e4c3509267b7be28e2b0fa334a9255baadabfb6a))\n- **DOCS**: Update LangChain Expression Language documentation ([#395](https://github.com/davidmigloz/langchain_dart/issues/395)). ([6ce75e5f](https://github.com/davidmigloz/langchain_dart/commit/6ce75e5fe6492c951f9b5209d7a2c3077ad178d2))\n\n#### `langchain_core` - `v0.2.0+1`\n\n- **FEAT** Add support for RunnableRouter ([#386](https://github.com/davidmigloz/langchain_dart/issues/386)). ([827e262](https://github.com/davidmigloz/langchain_dart/commit/827e2627535941d702e8fbe300ca1426ddf50efe))\n- **FEAT**: Add support for Runnable.mapInputStream ([#393](https://github.com/davidmigloz/langchain_dart/issues/393)). ([a2b6bbb5](https://github.com/davidmigloz/langchain_dart/commit/a2b6bbb5ea7a65c36d1e955f9f96298cf2384afc))\n- **FEAT**: Add support for JsonOutputParser ([#392](https://github.com/davidmigloz/langchain_dart/issues/392)). ([c6508f0f](https://github.com/davidmigloz/langchain_dart/commit/c6508f0fadde3fd4d93accbcae5cea37b7beca20))\n- **FEAT**: Reduce input stream for PromptTemplate, LLM, ChatModel, Retriever and Tool ([#388](https://github.com/davidmigloz/langchain_dart/issues/388)). ([b59bcd40](https://github.com/davidmigloz/langchain_dart/commit/b59bcd409f4904fb2e16f928b3c7206a186ab3f4))\n- **BREAKING** **FEAT**: Support different logic for streaming in RunnableFunction ([#394](https://github.com/davidmigloz/langchain_dart/issues/394)). ([8bb2b8ed](https://github.com/davidmigloz/langchain_dart/commit/8bb2b8ede18bfe3a4f266b78ca32f1dfb83db1b1))\n- **FIX**: Allow async functions in Runnable.mapInput ([#396](https://github.com/davidmigloz/langchain_dart/issues/396)). ([e4c35092](https://github.com/davidmigloz/langchain_dart/commit/e4c3509267b7be28e2b0fa334a9255baadabfb6a))\n\n#### `langchain_openai` - `v0.5.1+1`\n\n - **FEAT**: Add support for Runnable.mapInputStream ([#393](https://github.com/davidmigloz/langchain_dart/issues/393)). ([a2b6bbb5](https://github.com/davidmigloz/langchain_dart/commit/a2b6bbb5ea7a65c36d1e955f9f96298cf2384afc))\n\n#### `openai_dart` - `v0.2.2`\n\n - **FEAT**: Add temperature, top_p and response format to Assistants API ([#384](https://github.com/davidmigloz/langchain_dart/issues/384)). ([1d18290f](https://github.com/davidmigloz/langchain_dart/commit/1d18290fdaba558e8661fed4f2316c795f20aef8))\n\n## 2024-04-16\n\n### Changes\n\n---\n\nPackages with breaking changes:\n\n- There are no breaking changes in this release.\n\nPackages with other changes:\n\n- [`langchain` - `v0.5.0+1`](#langchain---v0501)\n- [`openai_dart` - `v0.2.1`](#openai_dart---v021)\n- [`langchain_openai` - `v0.5.0+1`](#langchain_openai---v0501)\n\nPackages with dependency updates only:\n\n> Packages listed below depend on other packages in this workspace that have had changes. Their versions have been incremented to bump the minimum dependency versions of the packages they depend upon in this project.\n\n- `langchain_openai` - `v0.5.0+1`\n\n---\n\n#### `langchain` - `v0.5.0+1`\n\n- **DOCS**: Update README.md. ([8139113a](https://github.com/davidmigloz/langchain_dart/commit/8139113a3ca8faa94145cbb6b1b80ca3bc2f3979))\n\n#### `openai_dart` - `v0.2.1`\n\n- **FEAT**: Add support for Batch API in openai_dart ([#383](https://github.com/davidmigloz/langchain_dart/issues/383)). ([6b89f4a2](https://github.com/davidmigloz/langchain_dart/commit/6b89f4a269417441df844545ab670fa67701e7b4))\n- **FEAT**: Support specifying tool choice in Assistant API in openai_dart ([#382](https://github.com/davidmigloz/langchain_dart/issues/382)). ([97d7977a](https://github.com/davidmigloz/langchain_dart/commit/97d7977a2666ed004c0e04d57114538e02849156))\n- **FEAT**: Support JSON mode in Assistant API in openai_dart ([#381](https://github.com/davidmigloz/langchain_dart/issues/381)). ([a864dae3](https://github.com/davidmigloz/langchain_dart/commit/a864dae3d38f49f83975012ecadec5b859dc43c2))\n- **FEAT**: Support max tokens and truncation strategy in Assistant API in openai_dart ([#380](https://github.com/davidmigloz/langchain_dart/issues/380)). ([7153167b](https://github.com/davidmigloz/langchain_dart/commit/7153167b550549155cf7f68af2292d24036fc9f2))\n- **FEAT**: Add streaming support to Assistant API in openai_dart ([#379](https://github.com/davidmigloz/langchain_dart/issues/379)). ([6ef68196](https://github.com/davidmigloz/langchain_dart/commit/6ef68196fbfff233b37eda8f3d1b1d373252613f))\n- **FEAT**: Update models catalog with GPT-4 Turbo with Vision in openai_dart ([#378](https://github.com/davidmigloz/langchain_dart/issues/378)). ([88537540](https://github.com/davidmigloz/langchain_dart/commit/88537540fbab3cd20fd611447519bbdfed950ebe))\n- **FEAT**: Add Weights & Biases fine-tuning integration and seed in openai_dart ([#377](https://github.com/davidmigloz/langchain_dart/issues/377)). ([a5fff1bf](https://github.com/davidmigloz/langchain_dart/commit/a5fff1bf6ec8cc258174f1e7bcf12c00b7201e0e))\n- **FEAT**: Add support for checkpoints in fine-tuning jobs in openai_dart ([#376](https://github.com/davidmigloz/langchain_dart/issues/376)). ([69f8e2f9](https://github.com/davidmigloz/langchain_dart/commit/69f8e2f9137a92683a9eec79f2de1ad03452244a))\n\n## 2024-04-10\n\n### Changes\n\n---\n\nPackages with breaking changes:\n\n- [`langchain` - `v0.5.0`](#langchain---v050)\n- [`langchain_chroma` - `v0.2.0`](#langchain_chroma---v020)\n- [`langchain_community` - `v0.1.0`](#langchain_community---v010)\n- [`langchain_core` - `v0.1.0`](#langchain_core---v010)\n- [`langchain_google` - `v0.3.0`](#langchain_google---v030)\n- [`langchain_mistralai` - `v0.1.0`](#langchain_mistralai---v010)\n- [`langchain_ollama` - `v0.1.0`](#langchain_ollama---v010)\n- [`langchain_openai` - `v0.5.0`](#langchain_openai---v050)\n- [`langchain_pinecone` - `v0.1.0`](#langchain_pinecone---v010)\n- [`langchain_supabase` - `v0.1.0`](#langchain_supabase---v010)\n- [`chromadb` - `v0.2.0`](#chromadb---v020)\n- [`openai_dart` - `v0.2.0`](#openai_dart---v020)\n- [`vertex_ai` - `v0.1.0`](#vertex_ai---v010)\n\nPackages with other changes:\n\n- [`googleai_dart` - `v0.0.4`](#googleai_dart---v004)\n- [`mistralai_dart` - `v0.0.3+1`](#mistralai_dart---v0031)\n- [`ollama_dart` - `v0.0.3+1`](#ollama_dart---v0031)\n\n---\n\n#### `langchain` - `v0.5.0`\n\n- **BREAKING** **REFACTOR**: Introduce langchain_core and langchain_community packages ([#328](https://github.com/davidmigloz/langchain_dart/issues/328)). ([5fa520e6](https://github.com/davidmigloz/langchain_dart/commit/5fa520e663602d9cdfcab0c62a053090fa02b02e))\n- **BREAKING** **REFACTOR**: Simplify LLMResult and ChatResult classes ([#363](https://github.com/davidmigloz/langchain_dart/issues/363)). ([ffe539c1](https://github.com/davidmigloz/langchain_dart/commit/ffe539c13f92cce5f564107430163b44be1dfd96))\n- **BREAKING** **REFACTOR**: Simplify Output Parsers ([#367](https://github.com/davidmigloz/langchain_dart/issues/367)). ([f24b7058](https://github.com/davidmigloz/langchain_dart/commit/f24b7058949fba47ba624f071a3f548b8f6e915e))\n- **BREAKING** **REFACTOR**: Remove deprecated generate and predict APIs ([#335](https://github.com/davidmigloz/langchain_dart/issues/335)). ([c55fe50f](https://github.com/davidmigloz/langchain_dart/commit/c55fe50f0040cc04cbd2e90bca475887c093c654))\n- **REFACTOR**: Simplify internal .stream implementation ([#364](https://github.com/davidmigloz/langchain_dart/issues/364)). ([c83fed22](https://github.com/davidmigloz/langchain_dart/commit/c83fed22b2b89d5e51211984b12ec126a3ca225e))\n- **FEAT**: Implement .batch support ([#370](https://github.com/davidmigloz/langchain_dart/issues/370)). ([d254f929](https://github.com/davidmigloz/langchain_dart/commit/d254f929b03d9c950029e55c66831f9f89cc14a9))\n- **FEAT**: Add reduceOutputStream option to StringOutputParser ([#368](https://github.com/davidmigloz/langchain_dart/issues/368)). ([7f9a9fae](https://github.com/davidmigloz/langchain_dart/commit/7f9a9faeef93685ff810a88bbfe866da4b843369))\n- **DOCS**: Update LCEL docs. ([ab3ab573](https://github.com/davidmigloz/langchain_dart/commit/ab3ab573f62d9a497e7c82308da0a044337e957d))\n- **DOCS**: Add RAG example using OllamaEmbeddings and ChatOllama ([#337](https://github.com/davidmigloz/langchain_dart/issues/337)). ([8bddc6c0](https://github.com/davidmigloz/langchain_dart/commit/8bddc6c05b762be357a3c3ed0f6fc4af3aad866a))\n\n#### `langchain_community` - `v0.1.0`\n\n- **BREAKING** **REFACTOR**: Introduce langchain_core and langchain_community packages ([#328](https://github.com/davidmigloz/langchain_dart/issues/328)). ([5fa520e6](https://github.com/davidmigloz/langchain_dart/commit/5fa520e663602d9cdfcab0c62a053090fa02b02e))\n\n#### `langchain_core` - `v0.1.0`\n\n- **BREAKING** **REFACTOR**: Introduce langchain_core and langchain_community packages ([#328](https://github.com/davidmigloz/langchain_dart/issues/328)). ([5fa520e6](https://github.com/davidmigloz/langchain_dart/commit/5fa520e663602d9cdfcab0c62a053090fa02b02e))\n- **BREAKING** **REFACTOR**: Simplify LLMResult and ChatResult classes ([#363](https://github.com/davidmigloz/langchain_dart/issues/363)). ([ffe539c1](https://github.com/davidmigloz/langchain_dart/commit/ffe539c13f92cce5f564107430163b44be1dfd96))\n- **BREAKING** **REFACTOR**: Simplify Output Parsers ([#367](https://github.com/davidmigloz/langchain_dart/issues/367)). ([f24b7058](https://github.com/davidmigloz/langchain_dart/commit/f24b7058949fba47ba624f071a3f548b8f6e915e))\n- **REFACTOR**: Simplify internal .stream implementation ([#364](https://github.com/davidmigloz/langchain_dart/issues/364)). ([c83fed22](https://github.com/davidmigloz/langchain_dart/commit/c83fed22b2b89d5e51211984b12ec126a3ca225e))\n- **FEAT**: Implement .batch support ([#370](https://github.com/davidmigloz/langchain_dart/issues/370)). ([d254f929](https://github.com/davidmigloz/langchain_dart/commit/d254f929b03d9c950029e55c66831f9f89cc14a9))\n- **FEAT**: Add reduceOutputStream option to StringOutputParser ([#368](https://github.com/davidmigloz/langchain_dart/issues/368)). ([7f9a9fae](https://github.com/davidmigloz/langchain_dart/commit/7f9a9faeef93685ff810a88bbfe866da4b843369))\n\n#### `langchain_chroma` - `v0.2.0`\n\n- **BREAKING** **REFACTOR**: Introduce langchain_core and langchain_community packages ([#328](https://github.com/davidmigloz/langchain_dart/issues/328)). ([5fa520e6](https://github.com/davidmigloz/langchain_dart/commit/5fa520e663602d9cdfcab0c62a053090fa02b02e))\n\n#### `langchain_google` - `v0.3.0`\n\n- **BREAKING** **REFACTOR**: Introduce langchain_core and langchain_community packages ([#328](https://github.com/davidmigloz/langchain_dart/issues/328)). ([5fa520e6](https://github.com/davidmigloz/langchain_dart/commit/5fa520e663602d9cdfcab0c62a053090fa02b02e))\n- **BREAKING** **REFACTOR**: Simplify LLMResult and ChatResult classes ([#363](https://github.com/davidmigloz/langchain_dart/issues/363)). ([ffe539c1](https://github.com/davidmigloz/langchain_dart/commit/ffe539c13f92cce5f564107430163b44be1dfd96))\n- **BREAKING** **REFACTOR**: Simplify Output Parsers ([#367](https://github.com/davidmigloz/langchain_dart/issues/367)). ([f24b7058](https://github.com/davidmigloz/langchain_dart/commit/f24b7058949fba47ba624f071a3f548b8f6e915e))\n- **BREAKING** **REFACTOR**: Remove deprecated generate and predict APIs ([#335](https://github.com/davidmigloz/langchain_dart/issues/335)). ([c55fe50f](https://github.com/davidmigloz/langchain_dart/commit/c55fe50f0040cc04cbd2e90bca475887c093c654))\n- **REFACTOR**: Simplify internal .stream implementation ([#364](https://github.com/davidmigloz/langchain_dart/issues/364)). ([c83fed22](https://github.com/davidmigloz/langchain_dart/commit/c83fed22b2b89d5e51211984b12ec126a3ca225e))\n- **FEAT**: Implement .batch support ([#370](https://github.com/davidmigloz/langchain_dart/issues/370)). ([d254f929](https://github.com/davidmigloz/langchain_dart/commit/d254f929b03d9c950029e55c66831f9f89cc14a9))\n- **FEAT**: Add streaming support in ChatGoogleGenerativeAI ([#360](https://github.com/davidmigloz/langchain_dart/issues/360)). ([68bfdb04](https://github.com/davidmigloz/langchain_dart/commit/68bfdb04e417a7023b8872cbe0798243503fbf3d))\n- **FEAT**: Support tuned models in ChatGoogleGenerativeAI ([#359](https://github.com/davidmigloz/langchain_dart/issues/359)). ([764b633d](https://github.com/davidmigloz/langchain_dart/commit/764b633df1412f53fc238afe1e97d1e1ac22f206))\n- **FEAT**: Add support for GoogleGenerativeAIEmbeddings ([#362](https://github.com/davidmigloz/langchain_dart/issues/362)). ([d4f888a0](https://github.com/davidmigloz/langchain_dart/commit/d4f888a0e347608f0538d656d0c5507b61e5ee7e))\n- **FEAT**: Support output dimensionality in GoogleGenerativeAIEmbeddings ([#373](https://github.com/davidmigloz/langchain_dart/issues/373)). ([6dcb27d8](https://github.com/davidmigloz/langchain_dart/commit/6dcb27d861fa65d2c882e31ce28e8c0a92b65cc1))\n- **FEAT**: Support updating API key in Google AI client ([#357](https://github.com/davidmigloz/langchain_dart/issues/357)). ([b9b808e7](https://github.com/davidmigloz/langchain_dart/commit/b9b808e72f02b9f38ab355d581284a0d848d4bd1))\n\n#### `langchain_mistralai` - `v0.1.0`\n\n- **BREAKING** **REFACTOR**: Introduce langchain_core and langchain_community packages ([#328](https://github.com/davidmigloz/langchain_dart/issues/328)). ([5fa520e6](https://github.com/davidmigloz/langchain_dart/commit/5fa520e663602d9cdfcab0c62a053090fa02b02e))\n- **BREAKING** **REFACTOR**: Simplify LLMResult and ChatResult classes ([#363](https://github.com/davidmigloz/langchain_dart/issues/363)). ([ffe539c1](https://github.com/davidmigloz/langchain_dart/commit/ffe539c13f92cce5f564107430163b44be1dfd96))\n- **BREAKING** **REFACTOR**: Simplify Output Parsers ([#367](https://github.com/davidmigloz/langchain_dart/issues/367)). ([f24b7058](https://github.com/davidmigloz/langchain_dart/commit/f24b7058949fba47ba624f071a3f548b8f6e915e))\n- **BREAKING** **REFACTOR**: Remove deprecated generate and predict APIs ([#335](https://github.com/davidmigloz/langchain_dart/issues/335)). ([c55fe50f](https://github.com/davidmigloz/langchain_dart/commit/c55fe50f0040cc04cbd2e90bca475887c093c654))\n- **REFACTOR**: Simplify internal .stream implementation ([#364](https://github.com/davidmigloz/langchain_dart/issues/364)). ([c83fed22](https://github.com/davidmigloz/langchain_dart/commit/c83fed22b2b89d5e51211984b12ec126a3ca225e))\n- **FEAT**: Implement .batch support ([#370](https://github.com/davidmigloz/langchain_dart/issues/370)). ([d254f929](https://github.com/davidmigloz/langchain_dart/commit/d254f929b03d9c950029e55c66831f9f89cc14a9))\n\n#### `langchain_ollama` - `v0.1.0`\n\n- **BREAKING** **REFACTOR**: Introduce langchain_core and langchain_community packages ([#328](https://github.com/davidmigloz/langchain_dart/issues/328)). ([5fa520e6](https://github.com/davidmigloz/langchain_dart/commit/5fa520e663602d9cdfcab0c62a053090fa02b02e))\n- **BREAKING** **REFACTOR**: Simplify LLMResult and ChatResult classes ([#363](https://github.com/davidmigloz/langchain_dart/issues/363)). ([ffe539c1](https://github.com/davidmigloz/langchain_dart/commit/ffe539c13f92cce5f564107430163b44be1dfd96))\n- **BREAKING** **REFACTOR**: Simplify Output Parsers ([#367](https://github.com/davidmigloz/langchain_dart/issues/367)). ([f24b7058](https://github.com/davidmigloz/langchain_dart/commit/f24b7058949fba47ba624f071a3f548b8f6e915e))\n- **BREAKING** **REFACTOR**: Remove deprecated generate and predict APIs ([#335](https://github.com/davidmigloz/langchain_dart/issues/335)). ([c55fe50f](https://github.com/davidmigloz/langchain_dart/commit/c55fe50f0040cc04cbd2e90bca475887c093c654))\n- **REFACTOR**: Simplify internal .stream implementation ([#364](https://github.com/davidmigloz/langchain_dart/issues/364)). ([c83fed22](https://github.com/davidmigloz/langchain_dart/commit/c83fed22b2b89d5e51211984b12ec126a3ca225e))\n- **FEAT**: Implement .batch support ([#370](https://github.com/davidmigloz/langchain_dart/issues/370)). ([d254f929](https://github.com/davidmigloz/langchain_dart/commit/d254f929b03d9c950029e55c66831f9f89cc14a9))\n\n#### `langchain_openai` - `v0.5.0`\n\n- **BREAKING** **REFACTOR**: Introduce langchain_core and langchain_community packages ([#328](https://github.com/davidmigloz/langchain_dart/issues/328)). ([5fa520e6](https://github.com/davidmigloz/langchain_dart/commit/5fa520e663602d9cdfcab0c62a053090fa02b02e))\n- **BREAKING** **REFACTOR**: Simplify LLMResult and ChatResult classes ([#363](https://github.com/davidmigloz/langchain_dart/issues/363)). ([ffe539c1](https://github.com/davidmigloz/langchain_dart/commit/ffe539c13f92cce5f564107430163b44be1dfd96))\n- **BREAKING** **REFACTOR**: Simplify Output Parsers ([#367](https://github.com/davidmigloz/langchain_dart/issues/367)). ([f24b7058](https://github.com/davidmigloz/langchain_dart/commit/f24b7058949fba47ba624f071a3f548b8f6e915e))\n- **BREAKING** **REFACTOR**: Remove deprecated generate and predict APIs ([#335](https://github.com/davidmigloz/langchain_dart/issues/335)). ([c55fe50f](https://github.com/davidmigloz/langchain_dart/commit/c55fe50f0040cc04cbd2e90bca475887c093c654))\n- **REFACTOR**: Simplify internal .stream implementation ([#364](https://github.com/davidmigloz/langchain_dart/issues/364)). ([c83fed22](https://github.com/davidmigloz/langchain_dart/commit/c83fed22b2b89d5e51211984b12ec126a3ca225e))\n- **FEAT**: Implement .batch support ([#370](https://github.com/davidmigloz/langchain_dart/issues/370)). ([d254f929](https://github.com/davidmigloz/langchain_dart/commit/d254f929b03d9c950029e55c66831f9f89cc14a9))\n- **FEAT**: Remove deprecated OpenAI instance id ([#350](https://github.com/davidmigloz/langchain_dart/issues/350)). ([52939336](https://github.com/davidmigloz/langchain_dart/commit/529393360b7643c8192153c3654e5482dfc299ad))\n\n#### `langchain_pinecone` - `v0.1.0`\n\n- **BREAKING** **REFACTOR**: Introduce langchain_core and langchain_community packages ([#328](https://github.com/davidmigloz/langchain_dart/issues/328)). ([5fa520e6](https://github.com/davidmigloz/langchain_dart/commit/5fa520e663602d9cdfcab0c62a053090fa02b02e))\n\n#### `langchain_supabase` - `v0.1.0`\n\n- **BREAKING** **REFACTOR**: Introduce langchain_core and langchain_community packages ([#328](https://github.com/davidmigloz/langchain_dart/issues/328)). ([5fa520e6](https://github.com/davidmigloz/langchain_dart/commit/5fa520e663602d9cdfcab0c62a053090fa02b02e))\n\n#### `chromadb` - `v0.2.0`\n\n- **FIX**: Have the == implementation use Object instead of dynamic ([#334](https://github.com/davidmigloz/langchain_dart/issues/334)). ([89f7b0b9](https://github.com/davidmigloz/langchain_dart/commit/89f7b0b94144c216de19ec7244c48f3c34c2c635))\n\n#### `openai_dart` - `v0.2.0`\n\n- **FEAT**: Sync OpenAI API ([#347](https://github.com/davidmigloz/langchain_dart/issues/347)). ([f296eef6](https://github.com/davidmigloz/langchain_dart/commit/f296eef68bfd81305f87475c802705fe3ef477c3))\n- **FIX**: Have the == implementation use Object instead of dynamic ([#334](https://github.com/davidmigloz/langchain_dart/issues/334)). ([89f7b0b9](https://github.com/davidmigloz/langchain_dart/commit/89f7b0b94144c216de19ec7244c48f3c34c2c635))\n\n#### `vertex_ai` - `v0.1.0`\n\n- **REFACTOR**: Minor changes ([#363](https://github.com/davidmigloz/langchain_dart/issues/363)). ([ffe539c1](https://github.com/davidmigloz/langchain_dart/commit/ffe539c13f92cce5f564107430163b44be1dfd96))\n\n#### `googleai_dart` - `v0.0.4`\n\n- **FEAT**: Support generateContent for tuned model in googleai_dart client ([#358](https://github.com/davidmigloz/langchain_dart/issues/358)). ([b4641a09](https://github.com/davidmigloz/langchain_dart/commit/b4641a09af7f6d67d503d526451a370eca920c5c))\n- **FEAT**: Support output dimensionality in Google AI Embeddings ([#373](https://github.com/davidmigloz/langchain_dart/issues/373)). ([6dcb27d8](https://github.com/davidmigloz/langchain_dart/commit/6dcb27d861fa65d2c882e31ce28e8c0a92b65cc1))\n- **FEAT**: Support updating API key in Google AI client ([#357](https://github.com/davidmigloz/langchain_dart/issues/357)). ([b9b808e7](https://github.com/davidmigloz/langchain_dart/commit/b9b808e72f02b9f38ab355d581284a0d848d4bd1))\n- **FIX**: Have the == implementation use Object instead of dynamic ([#334](https://github.com/davidmigloz/langchain_dart/issues/334)). ([89f7b0b9](https://github.com/davidmigloz/langchain_dart/commit/89f7b0b94144c216de19ec7244c48f3c34c2c635))\n\n#### `mistralai_dart` - `v0.0.3+1`\n\n- **FIX**: Have the == implementation use Object instead of dynamic ([#334](https://github.com/davidmigloz/langchain_dart/issues/334)). ([89f7b0b9](https://github.com/davidmigloz/langchain_dart/commit/89f7b0b94144c216de19ec7244c48f3c34c2c635))\n\n#### `ollama_dart` - `v0.0.3+1`\n\n- **FIX**: Have the == implementation use Object instead of dynamic ([#334](https://github.com/davidmigloz/langchain_dart/issues/334)). ([89f7b0b9](https://github.com/davidmigloz/langchain_dart/commit/89f7b0b94144c216de19ec7244c48f3c34c2c635))\n\n## 2024-02-15\n\n### Changes\n\n---\n\nPackages with breaking changes:\n\n- There are no breaking changes in this release.\n\nPackages with other changes:\n\n- [`chromadb` - `v0.1.2`](#chromadb---v012)\n- [`googleai_dart` - `v0.0.3`](#googleai_dart---v003)\n- [`langchain` - `v0.4.2`](#langchain---v042)\n- [`langchain_chroma` - `v0.1.1`](#langchain_chroma---v011)\n- [`langchain_google` - `v0.2.4`](#langchain_google---v024)\n- [`langchain_mistralai` - `v0.0.3`](#langchain_mistralai---v003)\n- [`langchain_ollama` - `v0.0.4`](#langchain_ollama---v004)\n- [`langchain_openai` - `v0.4.1`](#langchain_openai---v041)\n- [`langchain_pinecone` - `v0.0.7`](#langchain_pinecone---v007)\n- [`langchain_supabase` - `v0.0.1+1`](#langchain_supabase---v0011)\n- [`mistralai_dart` - `v0.0.3`](#mistralai_dart---v003)\n- [`ollama_dart` - `v0.0.3`](#ollama_dart---v003)\n- [`openai_dart` - `v0.1.7`](#openai_dart---v017)\n- [`vertex_ai` - `v0.0.10`](#vertex_ai---v0010)\n\n---\n\n#### `googleai_dart` - `v0.0.3`\n\n- **FEAT**: Add streaming support to googleai_dart client ([#299](https://github.com/davidmigloz/langchain_dart/issues/299)). ([2cbd538a](https://github.com/davidmigloz/langchain_dart/commit/2cbd538a3b67ef6bdd9ab7b92bebc3c8c7a1bea1))\n- **FEAT**: Update meta and test dependencies ([#331](https://github.com/davidmigloz/langchain_dart/issues/331)). ([912370ee](https://github.com/davidmigloz/langchain_dart/commit/912370ee0ba667ee9153303395a457e6caf5c72d))\n- **DOCS**: Update pubspecs. ([d23ed89a](https://github.com/davidmigloz/langchain_dart/commit/d23ed89adf95a34a78024e2f621dc0af07292f44))\n\n#### `openai_dart` - `v0.1.7`\n\n- **FEAT**: Allow to specify OpenAI custom instance ([#327](https://github.com/davidmigloz/langchain_dart/issues/327)). ([4744648c](https://github.com/davidmigloz/langchain_dart/commit/4744648cdf02828b9182ebd34ba3d7db5313786e))\n- **FEAT**: Update meta and test dependencies ([#331](https://github.com/davidmigloz/langchain_dart/issues/331)). ([912370ee](https://github.com/davidmigloz/langchain_dart/commit/912370ee0ba667ee9153303395a457e6caf5c72d))\n- **DOCS**: Update pubspecs. ([d23ed89a](https://github.com/davidmigloz/langchain_dart/commit/d23ed89adf95a34a78024e2f621dc0af07292f44))\n\n#### `langchain_openai` - `v0.4.1`\n\n- **FEAT**: Allow to specify OpenAI custom instance ([#327](https://github.com/davidmigloz/langchain_dart/issues/327)). ([4744648c](https://github.com/davidmigloz/langchain_dart/commit/4744648cdf02828b9182ebd34ba3d7db5313786e))\n- **FEAT**: Update meta and test dependencies ([#331](https://github.com/davidmigloz/langchain_dart/issues/331)). ([912370ee](https://github.com/davidmigloz/langchain_dart/commit/912370ee0ba667ee9153303395a457e6caf5c72d))\n- **DOCS**: Update pubspecs. ([d23ed89a](https://github.com/davidmigloz/langchain_dart/commit/d23ed89adf95a34a78024e2f621dc0af07292f44))\n\n#### `ollama_dart` - `v0.0.3`\n\n- **FEAT**: Add Ollama keep_alive param to control how long models stay loaded ([#319](https://github.com/davidmigloz/langchain_dart/issues/319)). ([3b86e227](https://github.com/davidmigloz/langchain_dart/commit/3b86e22788eb8df9c09b034c5acc98fdaa6b32c6))\n- **FEAT**: Update meta and test dependencies ([#331](https://github.com/davidmigloz/langchain_dart/issues/331)). ([912370ee](https://github.com/davidmigloz/langchain_dart/commit/912370ee0ba667ee9153303395a457e6caf5c72d))\n- **DOCS**: Update pubspecs. ([d23ed89a](https://github.com/davidmigloz/langchain_dart/commit/d23ed89adf95a34a78024e2f621dc0af07292f44))\n\n#### `langchain_ollama` - `v0.0.4`\n\n- **FEAT**: Add Ollama keep_alive param to control how long models stay loaded ([#319](https://github.com/davidmigloz/langchain_dart/issues/319)). ([3b86e227](https://github.com/davidmigloz/langchain_dart/commit/3b86e22788eb8df9c09b034c5acc98fdaa6b32c6))\n- **FEAT**: Update meta and test dependencies ([#331](https://github.com/davidmigloz/langchain_dart/issues/331)). ([912370ee](https://github.com/davidmigloz/langchain_dart/commit/912370ee0ba667ee9153303395a457e6caf5c72d))\n- **DOCS**: Update pubspecs. ([d23ed89a](https://github.com/davidmigloz/langchain_dart/commit/d23ed89adf95a34a78024e2f621dc0af07292f44))\n\n#### `chromadb` - `v0.1.2`\n\n- **FEAT**: Update meta and test dependencies ([#331](https://github.com/davidmigloz/langchain_dart/issues/331)). ([912370ee](https://github.com/davidmigloz/langchain_dart/commit/912370ee0ba667ee9153303395a457e6caf5c72d))\n\n#### `langchain` - `v0.4.2`\n\n- **FEAT**: Update meta and test dependencies ([#331](https://github.com/davidmigloz/langchain_dart/issues/331)). ([912370ee](https://github.com/davidmigloz/langchain_dart/commit/912370ee0ba667ee9153303395a457e6caf5c72d))\n\n#### `langchain_chroma` - `v0.1.1`\n\n- **FEAT**: Update meta and test dependencies ([#331](https://github.com/davidmigloz/langchain_dart/issues/331)). ([912370ee](https://github.com/davidmigloz/langchain_dart/commit/912370ee0ba667ee9153303395a457e6caf5c72d))\n- **DOCS**: Update pubspecs. ([d23ed89a](https://github.com/davidmigloz/langchain_dart/commit/d23ed89adf95a34a78024e2f621dc0af07292f44))\n\n#### `langchain_google` - `v0.2.4`\n\n- **FEAT**: Update meta and test dependencies ([#331](https://github.com/davidmigloz/langchain_dart/issues/331)). ([912370ee](https://github.com/davidmigloz/langchain_dart/commit/912370ee0ba667ee9153303395a457e6caf5c72d))\n- **DOCS**: Update pubspecs. ([d23ed89a](https://github.com/davidmigloz/langchain_dart/commit/d23ed89adf95a34a78024e2f621dc0af07292f44))\n\n#### `langchain_mistralai` - `v0.0.3`\n\n- **FEAT**: Update meta and test dependencies ([#331](https://github.com/davidmigloz/langchain_dart/issues/331)). ([912370ee](https://github.com/davidmigloz/langchain_dart/commit/912370ee0ba667ee9153303395a457e6caf5c72d))\n- **DOCS**: Update pubspecs. ([d23ed89a](https://github.com/davidmigloz/langchain_dart/commit/d23ed89adf95a34a78024e2f621dc0af07292f44))\n\n#### `langchain_pinecone` - `v0.0.7`\n\n- **FEAT**: Update meta and test dependencies ([#331](https://github.com/davidmigloz/langchain_dart/issues/331)). ([912370ee](https://github.com/davidmigloz/langchain_dart/commit/912370ee0ba667ee9153303395a457e6caf5c72d))\n- **DOCS**: Update pubspecs. ([d23ed89a](https://github.com/davidmigloz/langchain_dart/commit/d23ed89adf95a34a78024e2f621dc0af07292f44))\n\n#### `langchain_supabase` - `v0.0.1+1`\n\n- **DOCS**: Update pubspecs. ([d23ed89a](https://github.com/davidmigloz/langchain_dart/commit/d23ed89adf95a34a78024e2f621dc0af07292f44))\n\n#### `mistralai_dart` - `v0.0.3`\n\n- **FEAT**: Update meta and test dependencies ([#331](https://github.com/davidmigloz/langchain_dart/issues/331)). ([912370ee](https://github.com/davidmigloz/langchain_dart/commit/912370ee0ba667ee9153303395a457e6caf5c72d))\n- **DOCS**: Update pubspecs. ([d23ed89a](https://github.com/davidmigloz/langchain_dart/commit/d23ed89adf95a34a78024e2f621dc0af07292f44))\n\n#### `vertex_ai` - `v0.0.10`\n\n- **FEAT**: Update meta and test dependencies ([#331](https://github.com/davidmigloz/langchain_dart/issues/331)). ([912370ee](https://github.com/davidmigloz/langchain_dart/commit/912370ee0ba667ee9153303395a457e6caf5c72d))\n- **DOCS**: Update pubspecs. ([d23ed89a](https://github.com/davidmigloz/langchain_dart/commit/d23ed89adf95a34a78024e2f621dc0af07292f44))\n\n## 2024-01-31\n\n### Changes\n\n---\n\nPackages with breaking changes:\n\n- There are no breaking changes in this release.\n\nPackages with other changes:\n\n- [`langchain` - `v0.4.1`](#langchain---v041)\n- [`langchain_supabase` - `v0.0.1`](#langchain_supabase---v001)\n- [`langchain_chroma` - `v0.1.0+15`](#langchain_chroma---v01015)\n- [`langchain_google` - `v0.2.3+3`](#langchain_google---v0233)\n- [`langchain_mistralai` - `v0.0.2+3`](#langchain_mistralai---v0023)\n- [`langchain_ollama` - `v0.0.3+3`](#langchain_ollama---v0033)\n- [`langchain_openai` - `v0.4.0+1`](#langchain_openai---v0401)\n- [`langchain_pinecone` - `v0.0.6+14`](#langchain_pinecone---v00614)\n- [`chromadb` - `v0.1.1+1`](#chromadb---v0111)\n- [`googleai_dart` - `v0.0.2+2`](#googleai_dart---v0022)\n- [`mistralai_dart` - `v0.0.2+3`](#mistralai_dart---v0023)\n- [`ollama_dart` - `v0.0.2+1`](#ollama_dart---v0021)\n- [`openai_dart` - `v0.1.6+1`](#openai_dart---v0161)\n- [`vertex_ai` - `v0.0.9+1`](#vertex_ai---v0091)\n\n---\n\n#### `langchain` - `v0.4.1`\n\n- **DOCS**: Update Supabase docs. ([4a2a5329](https://github.com/davidmigloz/langchain_dart/commit/4a2a532931cac7577102d78b0ec8a5cc4eafb93c))\n- **DOCS**: Update CHANGELOG.md. ([d0d46534](https://github.com/davidmigloz/langchain_dart/commit/d0d46534565d6f52d819d62329e8917e00bc7030))\n\n#### `langchain_supabase` - `v0.0.1`\n\n- **FEAT**: Add support for Supabase VectorStore ([#69](https://github.com/davidmigloz/langchain_dart/issues/69)). ([be9e72bc](https://github.com/davidmigloz/langchain_dart/commit/be9e72bc210232e403f548a95a305d5bb6254f49))\n\n#### `langchain_chroma` - `v0.1.0+15`\n\n- **DOCS**: Update CHANGELOG.md. ([d0d46534](https://github.com/davidmigloz/langchain_dart/commit/d0d46534565d6f52d819d62329e8917e00bc7030))\n\n#### `langchain_google` - `v0.2.3+3`\n\n- **DOCS**: Update CHANGELOG.md. ([d0d46534](https://github.com/davidmigloz/langchain_dart/commit/d0d46534565d6f52d819d62329e8917e00bc7030))\n\n#### `langchain_mistralai` - `v0.0.2+3`\n\n- **DOCS**: Update CHANGELOG.md. ([d0d46534](https://github.com/davidmigloz/langchain_dart/commit/d0d46534565d6f52d819d62329e8917e00bc7030))\n\n#### `langchain_ollama` - `v0.0.3+3`\n\n- **DOCS**: Update CHANGELOG.md. ([d0d46534](https://github.com/davidmigloz/langchain_dart/commit/d0d46534565d6f52d819d62329e8917e00bc7030))\n\n#### `langchain_openai` - `v0.4.0+1`\n\n- **DOCS**: Update CHANGELOG.md. ([d0d46534](https://github.com/davidmigloz/langchain_dart/commit/d0d46534565d6f52d819d62329e8917e00bc7030))\n\n#### `langchain_pinecone` - `v0.0.6+14`\n\n- **DOCS**: Update CHANGELOG.md. ([d0d46534](https://github.com/davidmigloz/langchain_dart/commit/d0d46534565d6f52d819d62329e8917e00bc7030))\n\n#### `chromadb` - `v0.1.1+1`\n\n- **DOCS**: Update CHANGELOG.md. ([d0d46534](https://github.com/davidmigloz/langchain_dart/commit/d0d46534565d6f52d819d62329e8917e00bc7030))\n\n#### `googleai_dart` - `v0.0.2+2`\n\n- **DOCS**: Update CHANGELOG.md. ([d0d46534](https://github.com/davidmigloz/langchain_dart/commit/d0d46534565d6f52d819d62329e8917e00bc7030))\n\n#### `mistralai_dart` - `v0.0.2+3`\n\n- **DOCS**: Update CHANGELOG.md. ([d0d46534](https://github.com/davidmigloz/langchain_dart/commit/d0d46534565d6f52d819d62329e8917e00bc7030))\n\n#### `ollama_dart` - `v0.0.2+1`\n\n- **DOCS**: Update CHANGELOG.md. ([d0d46534](https://github.com/davidmigloz/langchain_dart/commit/d0d46534565d6f52d819d62329e8917e00bc7030))\n\n#### `openai_dart` - `v0.1.6+1`\n\n- **DOCS**: Update CHANGELOG.md. ([d0d46534](https://github.com/davidmigloz/langchain_dart/commit/d0d46534565d6f52d819d62329e8917e00bc7030))\n\n#### `vertex_ai` - `v0.0.9+1`\n\n- **DOCS**: Update CHANGELOG.md. ([d0d46534](https://github.com/davidmigloz/langchain_dart/commit/d0d46534565d6f52d819d62329e8917e00bc7030))\n\n## 2024-01-26\n\n### Changes\n\n---\n\nPackages with breaking changes:\n\n- [`langchain_openai` - `v0.4.0`](#langchain_openai---v040)\n\nPackages with other changes:\n\n- [`langchain` - `v0.4.0`](#langchain---v040)\n- [`openai_dart` - `v0.1.6`](#openai_dart---v016)\n- [`langchain_ollama` - `v0.0.3+2`](#langchain_ollama---v0032)\n- [`langchain_mistralai` - `v0.0.2+2`](#langchain_mistralai---v0022)\n- [`langchain_pinecone` - `v0.0.6+13`](#langchain_pinecone---v00613)\n- [`langchain_chroma` - `v0.1.0+14`](#langchain_chroma---v01014)\n- [`langchain_google` - `v0.2.3+2`](#langchain_google---v0232)\n\nPackages with dependency updates only:\n\n> Packages listed below depend on other packages in this workspace that have had changes. Their versions have been incremented to bump the minimum dependency versions of the packages they depend upon in this project.\n\n- `langchain_ollama` - `v0.0.3+2`\n- `langchain_mistralai` - `v0.0.2+2`\n- `langchain_pinecone` - `v0.0.6+13`\n- `langchain_chroma` - `v0.1.0+14`\n- `langchain_google` - `v0.2.3+2`\n\n---\n\n#### `langchain` - `v0.4.0`\n\n- **DOCS**: Update embeddings documentation ([#313](https://github.com/davidmigloz/langchain_dart/issues/313)). ([43463481](https://github.com/davidmigloz/langchain_dart/commit/4346348108dc105a1daaedc932641e725b648f3e))\n\n#### `langchain_openai` - `v0.4.0`\n\n- **BREAKING** **FEAT**: Update OpenAIEmbeddings' default model to text-embedding-3-small ([#313](https://github.com/davidmigloz/langchain_dart/issues/313)). ([43463481](https://github.com/davidmigloz/langchain_dart/commit/4346348108dc105a1daaedc932641e725b648f3e))\n- **FEAT**: Add support for shortening embeddings in OpenAIEmbeddings ([#312](https://github.com/davidmigloz/langchain_dart/issues/312)). ([5f5eb54f](https://github.com/davidmigloz/langchain_dart/commit/5f5eb54f2b991c14c18abf785b873a677bdf7e14))\n\n#### `openai_dart` - `v0.1.6`\n\n- **FEAT**: Add gpt-4-0125-preview and gpt-4-turbo-preview in model catalog ([#309](https://github.com/davidmigloz/langchain_dart/issues/309)). ([f5a78867](https://github.com/davidmigloz/langchain_dart/commit/f5a78867e7fa61e03d7e7da101c939c38564454c))\n- **FEAT**: Add text-embedding-3-small and text-embedding-3-large in model catalog ([#310](https://github.com/davidmigloz/langchain_dart/issues/310)). ([fda16024](https://github.com/davidmigloz/langchain_dart/commit/fda16024daa0b2b12999e628efe11d305d1abf4d))\n- **FEAT**: Add support for shortening embeddings ([#311](https://github.com/davidmigloz/langchain_dart/issues/311)). ([c725db0b](https://github.com/davidmigloz/langchain_dart/commit/c725db0b07b41bee0f12981f956ed0f3cb3d73eb))\n\n## 2024-01-25\n\n### Changes\n\n---\n\nPackages with breaking changes:\n\n - There are no breaking changes in this release.\n\nPackages with other changes:\n\n - [`langchain_openai` - `v0.3.3+1`](#langchain_openai---v0331)\n\n---\n\n#### `langchain_openai` - `v0.3.3+1`\n\n - **FIX**: Specified model is always overwritten in OpenAIFunctionsAgent ([#308](https://github.com/davidmigloz/langchain_dart/issues/308)). ([32dc37d8](https://github.com/davidmigloz/langchain_dart/commit/32dc37d8ca3e52929ab69d695f66627ff7e897fa))\n - **DOCS**: Update docs. ([6f15d7f8](https://github.com/davidmigloz/langchain_dart/commit/6f15d7f836e81ad82cf76988e90620a489b31abb))\n\n## 2024-01-20\n\n### Changes\n\n---\n\nPackages with breaking changes:\n\n - There are no breaking changes in this release.\n\nPackages with other changes:\n\n - [`langchain` - `v0.3.3`](#langchain---v033)\n - [`langchain_openai` - `v0.3.3`](#langchain_openai---v033)\n - [`langchain_google` - `v0.2.3+1`](#langchain_google---v0231)\n - [`langchain_mistralai` - `v0.0.2+1`](#langchain_mistralai---v0021)\n - [`openai_dart` - `v0.1.5`](#openai_dart---v015)\n - [`mistralai_dart` - `v0.0.2+2`](#mistralai_dart---v0022)\n - [`vertex_ai` - `v0.0.9`](#vertex_ai---v009)\n\nPackages with dependency updates only:\n\n> Packages listed below depend on other packages in this workspace that have had changes. Their versions have been incremented to bump the minimum dependency versions of the packages they depend upon in this project.\n\n - `langchain_pinecone` - `v0.0.6+12`\n - `langchain_ollama` - `v0.0.3+1`\n - `langchain_chroma` - `v0.1.0+13`\n\n---\n\n#### `langchain` - `v0.3.3`\n\n - **DOCS**: Add Anyscale and Together AI documentation ([#305](https://github.com/davidmigloz/langchain_dart/issues/305)). ([7daa3eb0](https://github.com/davidmigloz/langchain_dart/commit/7daa3eb052c32baa7473d7532c795b7f242ed9fc))\n\n#### `langchain_openai` - `v0.3.3`\n\n - **FEAT**: Support Anyscale in ChatOpenAI and OpenAIEmbeddings wrappers ([#305](https://github.com/davidmigloz/langchain_dart/issues/305)). ([7daa3eb0](https://github.com/davidmigloz/langchain_dart/commit/7daa3eb052c32baa7473d7532c795b7f242ed9fc))\n - **FEAT**: Support Together AI in ChatOpenAI wrapper ([#297](https://github.com/davidmigloz/langchain_dart/issues/297)). ([28ab56af](https://github.com/davidmigloz/langchain_dart/commit/28ab56aff35c93a6835e5f22397d47da9e45fe40))\n - **FEAT**: Support Together AI in OpenAIEmbeddings wrapper ([#304](https://github.com/davidmigloz/langchain_dart/issues/304)). ([ddc761d6](https://github.com/davidmigloz/langchain_dart/commit/ddc761d65154be2df1efc202d9e7e6b2e60e7ac2))\n\n#### `langchain_google` - `v0.2.3+1`\n\n - **REFACTOR**: Remove tiktoken in favour of countTokens API on VertexAI ([#307](https://github.com/davidmigloz/langchain_dart/issues/307)). ([8158572b](https://github.com/davidmigloz/langchain_dart/commit/8158572b15c0525b9caa9bc71fbbbee6ab4458fe))\n\n#### `langchain_mistralai` - `v0.0.2+1`\n\n - **REFACTOR**: Update safe_mode and max temperature in Mistral chat ([#300](https://github.com/davidmigloz/langchain_dart/issues/300)). ([1a4ccd1e](https://github.com/davidmigloz/langchain_dart/commit/1a4ccd1e7d1907e340ce609cc6ba8d0543ee3421))\n\n#### `openai_dart` - `v0.1.5`\n\n - **FEAT**: Support Anyscale API in openai_dart client ([#303](https://github.com/davidmigloz/langchain_dart/issues/303)). ([e0a3651c](https://github.com/davidmigloz/langchain_dart/commit/e0a3651c1457065808e1306c7f498eb716159583))\n - **FEAT**: Support Together AI API ([#296](https://github.com/davidmigloz/langchain_dart/issues/296)). ([ca6f23d5](https://github.com/davidmigloz/langchain_dart/commit/ca6f23d53baebe8679b4bc67a7de9a705692dde3))\n - **FEAT**: Support Together AI Embeddings API in openai_dart client ([#301](https://github.com/davidmigloz/langchain_dart/issues/301)). ([4a6e1045](https://github.com/davidmigloz/langchain_dart/commit/4a6e1045c13d712ec4da992dcaa097a7b5c2a626))\n - **FEAT**: Add usage to Run/RunStep in openai_dart client ([#302](https://github.com/davidmigloz/langchain_dart/issues/302)). ([cc6538b5](https://github.com/davidmigloz/langchain_dart/commit/cc6538b53394d04084276d8687ec5d7cbb5b5506))\n\n#### `vertex_ai` - `v0.0.9`\n\n - **FEAT**: Add count tokens method to vertex_ai client ([#306](https://github.com/davidmigloz/langchain_dart/issues/306)). ([54ae317d](https://github.com/davidmigloz/langchain_dart/commit/54ae317dda43f0313dca708d3831633252113c81))\n\n#### `mistralai_dart` - `v0.0.2+2`\n\n - **REFACTOR**: Update safe_mode and max temperature in Mistral chat ([#300](https://github.com/davidmigloz/langchain_dart/issues/300)). ([1a4ccd1e](https://github.com/davidmigloz/langchain_dart/commit/1a4ccd1e7d1907e340ce609cc6ba8d0543ee3421))\n\n## 2024-01-13\n\n### Changes\n\n---\n\nPackages with breaking changes:\n\n - There are no breaking changes in this release.\n\nPackages with other changes:\n\n - [`langchain` - `v0.3.2`](#langchain---v032)\n - [`langchain_openai` - `v0.3.2`](#langchain_openai---v032)\n - [`langchain_google` - `v0.2.3`](#langchain_google---v023)\n - [`langchain_mistralai` - `v0.0.2`](#langchain_mistralai---v002)\n - [`langchain_ollama` - `v0.0.3`](#langchain_ollama---v003)\n - [`langchain_pinecone` - `v0.0.6+11`](#langchain_pinecone---v00611)\n - [`langchain_chroma` - `v0.1.0+12`](#langchain_chroma---v01012)\n - [`openai_dart` - `v0.1.4`](#openai_dart---v014)\n - [`googleai_dart` - `v0.0.2+1`](#googleai_dart---v0021)\n - [`mistralai_dart` - `v0.0.2+1`](#mistralai_dart---v0021)\n - [`vertex_ai` - `v0.0.8`](#vertex_ai---v008)\n\nPackages with dependency updates only:\n\n> Packages listed below depend on other packages in this workspace that have had changes. Their versions have been incremented to bump the minimum dependency versions of the packages they depend upon in this project.\n\n - `langchain_pinecone` - `v0.0.6+11`\n - `langchain_chroma` - `v0.1.0+12`\n\n---\n\n#### `langchain` - `v0.3.2`\n\n - **REFACTOR**: Make all LLM options fields nullable and add copyWith ([#284](https://github.com/davidmigloz/langchain_dart/issues/284)). ([57eceb9b](https://github.com/davidmigloz/langchain_dart/commit/57eceb9b47da42cf19f64ddd88bfbd2c9676fd5e))\n - **FIX**: Export ConversationSummaryMemory ([#283](https://github.com/davidmigloz/langchain_dart/issues/283)). ([76b01d23](https://github.com/davidmigloz/langchain_dart/commit/76b01d2376c0d9727d1f4681dba83a46f4b02b3a))\n - **FEAT**: Update internal dependencies ([#291](https://github.com/davidmigloz/langchain_dart/issues/291)). ([69621cc6](https://github.com/davidmigloz/langchain_dart/commit/69621cc61659980d046518ee20ce055e806cba1f))\n\n#### `langchain_openai` - `v0.3.2`\n\n - **FEAT**: Support OpenRouter API in ChatOpenAI wrapper ([#292](https://github.com/davidmigloz/langchain_dart/issues/292)). ([c6e7e5be](https://github.com/davidmigloz/langchain_dart/commit/c6e7e5beeb03c32a93b062aab874cae3da0a52d9)) ([docs](https://langchaindart.com/#/modules/model_io/models/chat_models/integrations/open_router))\n - **REFACTOR**: Make all LLM options fields nullable and add copyWith ([#284](https://github.com/davidmigloz/langchain_dart/issues/284)). ([57eceb9b](https://github.com/davidmigloz/langchain_dart/commit/57eceb9b47da42cf19f64ddd88bfbd2c9676fd5e))\n - **REFACTOR**: Migrate tokenizer to langchain_tiktoken package ([#285](https://github.com/davidmigloz/langchain_dart/issues/285)). ([6a3b6466](https://github.com/davidmigloz/langchain_dart/commit/6a3b6466e3e4cfddda2f506adbf2eb563814d02f))\n - **FEAT**: Update internal dependencies ([#291](https://github.com/davidmigloz/langchain_dart/issues/291)). ([69621cc6](https://github.com/davidmigloz/langchain_dart/commit/69621cc61659980d046518ee20ce055e806cba1f))\n\n#### `langchain_google` - `v0.2.3`\n\n - **REFACTOR**: Use cl100k_base encoding model when no tokenizer is available ([#295](https://github.com/davidmigloz/langchain_dart/issues/295)). ([ca908e80](https://github.com/davidmigloz/langchain_dart/commit/ca908e8011a168a74240310c78abb3c590654a49))\n - **REFACTOR**: Make all LLM options fields nullable and add copyWith ([#284](https://github.com/davidmigloz/langchain_dart/issues/284)). ([57eceb9b](https://github.com/davidmigloz/langchain_dart/commit/57eceb9b47da42cf19f64ddd88bfbd2c9676fd5e))\n - **REFACTOR**: Migrate tokenizer to langchain_tiktoken package ([#285](https://github.com/davidmigloz/langchain_dart/issues/285)). ([6a3b6466](https://github.com/davidmigloz/langchain_dart/commit/6a3b6466e3e4cfddda2f506adbf2eb563814d02f))\n - **FEAT**: Update internal dependencies ([#291](https://github.com/davidmigloz/langchain_dart/issues/291)). ([69621cc6](https://github.com/davidmigloz/langchain_dart/commit/69621cc61659980d046518ee20ce055e806cba1f))\n\n#### `langchain_mistralai` - `v0.0.2`\n\n - **REFACTOR**: Use cl100k_base encoding model when no tokenizer is available ([#295](https://github.com/davidmigloz/langchain_dart/issues/295)). ([ca908e80](https://github.com/davidmigloz/langchain_dart/commit/ca908e8011a168a74240310c78abb3c590654a49))\n - **REFACTOR**: Make all LLM options fields nullable and add copyWith ([#284](https://github.com/davidmigloz/langchain_dart/issues/284)). ([57eceb9b](https://github.com/davidmigloz/langchain_dart/commit/57eceb9b47da42cf19f64ddd88bfbd2c9676fd5e))\n - **REFACTOR**: Migrate tokenizer to langchain_tiktoken package ([#285](https://github.com/davidmigloz/langchain_dart/issues/285)). ([6a3b6466](https://github.com/davidmigloz/langchain_dart/commit/6a3b6466e3e4cfddda2f506adbf2eb563814d02f))\n - **FEAT**: Update internal dependencies ([#291](https://github.com/davidmigloz/langchain_dart/issues/291)). ([69621cc6](https://github.com/davidmigloz/langchain_dart/commit/69621cc61659980d046518ee20ce055e806cba1f))\n\n#### `langchain_ollama` - `v0.0.3`\n\n - **REFACTOR**: Use cl100k_base encoding model when no tokenizer is available ([#295](https://github.com/davidmigloz/langchain_dart/issues/295)). ([ca908e80](https://github.com/davidmigloz/langchain_dart/commit/ca908e8011a168a74240310c78abb3c590654a49))\n - **REFACTOR**: Make all LLM options fields nullable and add copyWith ([#284](https://github.com/davidmigloz/langchain_dart/issues/284)). ([57eceb9b](https://github.com/davidmigloz/langchain_dart/commit/57eceb9b47da42cf19f64ddd88bfbd2c9676fd5e))\n - **REFACTOR**: Migrate tokenizer to langchain_tiktoken package ([#285](https://github.com/davidmigloz/langchain_dart/issues/285)). ([6a3b6466](https://github.com/davidmigloz/langchain_dart/commit/6a3b6466e3e4cfddda2f506adbf2eb563814d02f))\n - **FEAT**: Update internal dependencies ([#291](https://github.com/davidmigloz/langchain_dart/issues/291)). ([69621cc6](https://github.com/davidmigloz/langchain_dart/commit/69621cc61659980d046518ee20ce055e806cba1f))\n\n#### `openai_dart` - `v0.1.4`\n\n - **FEAT**: Support OpenRouter API ([#292](https://github.com/davidmigloz/langchain_dart/issues/292)). ([57699b32](https://github.com/davidmigloz/langchain_dart/commit/57699b328ee280bf9ac394d60013d6c2e969ab41))\n - **FEAT**: Remove OpenAI deprecated models ([#290](https://github.com/davidmigloz/langchain_dart/issues/290)). ([893b1c51](https://github.com/davidmigloz/langchain_dart/commit/893b1c51abe0fff7955cac6d3cedaa85ccdbf3eb))\n\n#### `googleai_dart` - `v0.0.2+1`\n\n - **REFACTOR**: Make all LLM options fields nullable and add copyWith ([#284](https://github.com/davidmigloz/langchain_dart/issues/284)). ([57eceb9b](https://github.com/davidmigloz/langchain_dart/commit/57eceb9b47da42cf19f64ddd88bfbd2c9676fd5e))\n\n#### `mistralai_dart` - `v0.0.2+1`\n\n - **REFACTOR**: Make all LLM options fields nullable and add copyWith ([#284](https://github.com/davidmigloz/langchain_dart/issues/284)). ([57eceb9b](https://github.com/davidmigloz/langchain_dart/commit/57eceb9b47da42cf19f64ddd88bfbd2c9676fd5e))\n\n#### `vertex_ai` - `v0.0.8`\n\n - **FEAT**: Update internal dependencies ([#291](https://github.com/davidmigloz/langchain_dart/issues/291)). ([69621cc6](https://github.com/davidmigloz/langchain_dart/commit/69621cc61659980d046518ee20ce055e806cba1f))\n\n## 2024-01-04\n\n### Changes\n\n---\n\nPackages with breaking changes:\n\n- There are no breaking changes in this release.\n\nPackages with other changes:\n\n - [`langchain` - `v0.3.1+1`](#langchain---v0311)\n - [`langchain_ollama` - `v0.0.2+1`](#langchain_ollama---v0021)\n - [`langchain_mistralai` - `v0.0.1+4`](#langchain_mistralai---v0014)\n - [`langchain_google` - `v0.2.2+1`](#langchain_google---v0221)\n - [`langchain_pinecone` - `v0.0.6+10`](#langchain_pinecone---v00610)\n - [`langchain_chroma` - `v0.1.0+11`](#langchain_chroma---v01011)\n - [`langchain_openai` - `v0.3.1+1`](#langchain_openai---v0311)\n\nPackages with dependency updates only:\n\n> Packages listed below depend on other packages in this workspace that have had changes. Their versions have been incremented to bump the minimum dependency versions of the packages they depend upon in this project.\n\n - `langchain_ollama` - `v0.0.2+1`\n - `langchain_mistralai` - `v0.0.1+4`\n - `langchain_google` - `v0.2.2+1`\n - `langchain_pinecone` - `v0.0.6+10`\n - `langchain_chroma` - `v0.1.0+11`\n - `langchain_openai` - `v0.3.1+1`\n\n---\n\n#### `langchain` - `v0.3.1+1`\n\n - **FIX**: Export token_buffer.dart (ConversationTokenBufferMemory) ([#280](https://github.com/davidmigloz/langchain_dart/issues/280)). ([265fcb4b](https://github.com/davidmigloz/langchain_dart/commit/265fcb4b68a5aa6144456868aebf023e1b0ce539))\n\n\n## 2023-12-26\n\n### Changes\n\n---\n\nPackages with breaking changes:\n\n - There are no breaking changes in this release.\n\nPackages with other changes:\n\n - [`langchain` - `v0.3.1`](#langchain---v031)\n - [`langchain_ollama` - `v0.0.2`](#langchain_ollama---v002)\n - [`ollama_dart` - `v0.0.2`](#ollama_dart---v002)\n - [`openai_dart` - `v0.1.3`](#openai_dart---v013)\n - [`langchain_google` - `v0.2.2`](#langchain_google---v022)\n - [`langchain_openai` - `v0.3.1`](#langchain_openai---v031)\n - [`langchain_chroma` - `v0.1.0+10`](#langchain_chroma---v01010)\n - [`langchain_pinecone` - `v0.0.6+9`](#langchain_pinecone---v0069)\n - [`langchain_mistralai` - `v0.0.1+3`](#langchain_mistralai---v0013)\n - [`googleai_dart` - `v0.0.2`](#googleai_dart---v002)\n - [`mistralai_dart` - `v0.0.2`](#mistralai_dart---v002)\n - [`chromadb` - `v0.1.1`](#chromadb---v011)\n\nPackages with dependency updates only:\n\n> Packages listed below depend on other packages in this workspace that have had changes. Their versions have been incremented to bump the minimum dependency versions of the packages they depend upon in this project.\n\n - `langchain_chroma` - `v0.1.0+10`\n - `langchain_pinecone` - `v0.0.6+9`\n - `langchain_mistralai` - `v0.0.1+3`\n\n---\n\n#### `langchain` - `v0.3.1`\n\n - **FEAT**: Make ChatPromptTemplates more convenient to use ([#275](https://github.com/davidmigloz/langchain_dart/issues/275)). ([9f8e6f75](https://github.com/davidmigloz/langchain_dart/commit/9f8e6f75543a41b87aff72fbeb249acf859a9562))\n\n#### `langchain_ollama` - `v0.0.2`\n\n - **FEAT**: Migrate ChatOllama to Ollama chat API and add multi-modal support ([#279](https://github.com/davidmigloz/langchain_dart/issues/279)). ([c5de7e12](https://github.com/davidmigloz/langchain_dart/commit/c5de7e12d14c7095864879c604ccd814c51212cc))\n\n#### `ollama_dart` - `v0.0.2`\n\n - **FEAT**: Add support for chat API and multi-modal LLMs ([#274](https://github.com/davidmigloz/langchain_dart/issues/274)). ([76e1a294](https://github.com/davidmigloz/langchain_dart/commit/76e1a2946fbbf5c4802c4e66addeb9adf5900b17))\n\n#### `openai_dart` - `v0.1.3`\n\n - **FEAT**: Add support for Assistants API ([#278](https://github.com/davidmigloz/langchain_dart/issues/278)). ([06de2d5e](https://github.com/davidmigloz/langchain_dart/commit/06de2d5e541aa79f8d54a8f9a33338c6a6edae3c))\n\n#### `langchain_google` - `v0.2.2`\n\n - Update a dependency to the latest release.\n\n#### `langchain_openai` - `v0.3.1`\n\n - Update a dependency to the latest release.\n\n#### `mistralai_dart` - `v0.0.2`\n\n - Update a dependency to the latest release.\n\n#### `chromadb` - `v0.1.1`\n\n - Update a dependency to the latest release.\n\n#### `googleai_dart` - `v0.0.2`\n\n- Update a dependency to the latest release.\n\n\n## 2023-12-15\n\n### Changes\n\n---\n\nNew packages:\n - [`googleai_dart` - `v0.0.1`](#googleai_dart---v001)\n\nPackages with breaking changes:\n\n - [`langchain` - `v0.3.0`](#langchain---v030)\n - [`langchain_openai` - `v0.3.0`](#langchain_openai---v030)\n\nPackages with other changes:\n\n - [`langchain_chroma` - `v0.1.0+9`](#langchain_chroma---v0109)\n - [`langchain_google` - `v0.2.1`](#langchain_google---v021)\n - [`langchain_ollama` - `v0.0.1+2`](#langchain_ollama---v0012)\n - [`langchain_mistralai` - `v0.0.1+1`](#langchain_mistralai---v0011)\n - [`langchain_pinecone` - `v0.0.6+8`](#langchain_pinecone---v0068)\n\n---\n\n#### `langchain` - `v0.3.0`\n\n - **BREAKING** **REFACTOR**: Make MIME Type mandatory for base64 images in prompt ([#269](https://github.com/davidmigloz/langchain_dart/issues/269)). ([2fe076bb](https://github.com/davidmigloz/langchain_dart/commit/2fe076bb8d2ddacfee6ec077c3f564bff919dace))\n - **FEAT**: Allow to pass options to countTokens method ([#268](https://github.com/davidmigloz/langchain_dart/issues/268)). ([4ecb123b](https://github.com/davidmigloz/langchain_dart/commit/4ecb123bd34f0b01d377045b97dace89676d5d16))\n - **DOCS**: Update README.md and docs ([#272](https://github.com/davidmigloz/langchain_dart/issues/272)). ([306a1fdd](https://github.com/davidmigloz/langchain_dart/commit/306a1fdd6504ef28dc2066953ae575e975ab9025))\n\n> [Migration guide](https://github.com/davidmigloz/langchain_dart/issues/269)\n\n#### `langchain_openai` - `v0.3.0`\n\n - **BREAKING** **REFACTOR**: Make MIME Type mandatory for base64 images in prompt ([#269](https://github.com/davidmigloz/langchain_dart/issues/269)). ([2fe076bb](https://github.com/davidmigloz/langchain_dart/commit/2fe076bb8d2ddacfee6ec077c3f564bff919dace))\n - **FEAT**: Allow to pass options to countTokens method ([#268](https://github.com/davidmigloz/langchain_dart/issues/268)). ([4ecb123b](https://github.com/davidmigloz/langchain_dart/commit/4ecb123bd34f0b01d377045b97dace89676d5d16))\n\n#### `googleai_dart` - `v0.0.1`\n\n - **FEAT**: Implement Dart client for Google AI API ([#267](https://github.com/davidmigloz/langchain_dart/issues/267)). ([99083cd2](https://github.com/davidmigloz/langchain_dart/commit/99083cd22ec35b3256b800ce76df328b9c9165e4))\n\n#### `langchain_chroma` - `v0.1.0+9`\n\n - **DOCS**: Update README.md and docs ([#272](https://github.com/davidmigloz/langchain_dart/issues/272)). ([306a1fdd](https://github.com/davidmigloz/langchain_dart/commit/306a1fdd6504ef28dc2066953ae575e975ab9025))\n\n#### `langchain_google` - `v0.2.1`\n\n - **FEAT**: Add support for ChatGoogleGenerativeAI wrapper (Gemini API) ([#270](https://github.com/davidmigloz/langchain_dart/issues/270)). ([5d006c12](https://github.com/davidmigloz/langchain_dart/commit/5d006c121172192765b1a76582588c05b779e9c0))\n\n#### `langchain_ollama` - `v0.0.1+2`\n\n - Update a dependency to the latest release.\n\n#### `langchain_mistralai` - `v0.0.1+1`\n\n - Update a dependency to the latest release.\n\n#### `langchain_pinecone` - `v0.0.6+8`\n\n - Update a dependency to the latest release.\n\n\n## 2023-12-12\n\n### Changes\n\n---\n\nNew packages:\n\n - [`mistralai_dart` - `v0.0.1`](#mistralai_dart---v001)\n - [`langchain_mistralai` - `v0.0.1`](#langchain_mistralai---v001)\n\nPackages with breaking changes:\n\n - There are no breaking changes in this release.\n\nPackages with other changes:\n\n - [`langchain` - `v0.2.1`](#langchain---v021)\n - [`langchain_ollama` - `v0.0.1+1`](#langchain_ollama---v0011)\n - [`langchain_openai` - `v0.2.0+1`](#langchain_openai---v0201)\n - [`openai_dart` - `v0.1.2+1`](#openai_dart---v0121)\n - [`langchain_pinecone` - `v0.0.6+7`](#langchain_pinecone---v0067)\n - [`langchain_chroma` - `v0.1.0+8`](#langchain_chroma---v0108)\n - [`langchain_google` - `v0.2.0+1`](#langchain_google---v0201)\n\n---\n\n#### `langchain` - `v0.2.1`\n\n - **FEAT**: Support customizing Tool input description ([#258](https://github.com/davidmigloz/langchain_dart/issues/258)). ([a9a1b2a0](https://github.com/davidmigloz/langchain_dart/commit/a9a1b2a0f4fa5fee320e9ca5b46a99a0b834035c))\n - **DOCS**: Update Mistral AI documentation ([#265](https://github.com/davidmigloz/langchain_dart/issues/265)). ([59b4127e](https://github.com/davidmigloz/langchain_dart/commit/59b4127eddb7a04bafa34b11b071336ab336e7a9))\n\n#### `langchain_mistralai` - `v0.0.1`\n\n - **FEAT**: Add support for ChatMistralAI wrapper ([#262](https://github.com/davidmigloz/langchain_dart/issues/262)). ([1364afec](https://github.com/davidmigloz/langchain_dart/commit/1364afec6ea56043ae17d5460276b10bf19b124e))\n - **FEAT**: Add support for MistralAIEmbeddings ([#254](https://github.com/davidmigloz/langchain_dart/issues/254)) ([#264](https://github.com/davidmigloz/langchain_dart/issues/264)). ([1c6bb1a3](https://github.com/davidmigloz/langchain_dart/commit/1c6bb1a3089c94340267f1091d226c3696efc1f1))\n\n#### `langchain_ollama` - `v0.0.1+1`\n\n - **REFACTOR**: Minor changes in ChatOllama. ([725b8ff0](https://github.com/davidmigloz/langchain_dart/commit/725b8ff0dde5507378a6f2f54e5979f2f596aa2f))\n\n#### `langchain_openai` - `v0.2.0+1`\n\n - **FIX**: Fix ChatOpenAI not considering functions from default options ([#257](https://github.com/davidmigloz/langchain_dart/issues/257)). ([cd864783](https://github.com/davidmigloz/langchain_dart/commit/cd864783f7190f7e8aa8988ba5c2cb5f7bfb3fad))\n\n#### `mistralai_dart` - `v0.0.1`\n\n - **FIX**: Wrong role used as system role ([#263](https://github.com/davidmigloz/langchain_dart/issues/263)). ([7d68a871](https://github.com/davidmigloz/langchain_dart/commit/7d68a871a7bdd940544aa20e2514099a215da782))\n - **FEAT**: Implement Dart client for Mistral AI API ([#261](https://github.com/davidmigloz/langchain_dart/issues/261)). ([f4954c59](https://github.com/davidmigloz/langchain_dart/commit/f4954c59f17c6427d554db7b380073302fb08175))\n\n#### `openai_dart` - `v0.1.2+1`\n\n - **FIX**: Make ChatCompletionNamedToolChoice fields required ([#259](https://github.com/davidmigloz/langchain_dart/issues/259)). ([4c7d0436](https://github.com/davidmigloz/langchain_dart/commit/4c7d0436070ede83369b9a667ca4c6d2cac99f1a))\n\n#### `langchain_pinecone` - `v0.0.6+7`\n\n - Update a dependency to the latest release.\n\n#### `langchain_chroma` - `v0.1.0+8`\n\n - Update a dependency to the latest release.\n\n#### `langchain_google` - `v0.2.0+1`\n\n - Update a dependency to the latest release.\n\n\n## 2023-12-05\n\n### Changes\n\n---\n\nNew packages:\n\n - [`langchain_ollama` - `v0.0.1`](#langchain_ollama---v001)\n\nPackages with breaking changes:\n\n - [`langchain` - `v0.2.0`](#langchain---v020)\n - [`langchain_google` - `v0.2.0`](#langchain_google---v020)\n - [`langchain_openai` - `v0.2.0`](#langchain_openai---v020)\n\nPackages with other changes:\n\n - [`langchain_chroma` - `v0.1.0+7`](#langchain_chroma---v0107)\n - [`ollama_dart` - `v0.0.1+1`](#ollama_dart---v0011)\n - [`openai_dart` - `v0.1.2`](#openai_dart---v012)\n - [`langchain_pinecone` - `v0.0.6+6`](#langchain_pinecone---v0066)\n\n---\n\n#### `langchain` - `v0.2.0`\n\n> Migration guides:\n> - [`Retriever`](https://github.com/davidmigloz/langchain_dart/issues/248)\n> - [`Tools`](https://github.com/davidmigloz/langchain_dart/issues/243)\n\n - **BREAKING** **FEAT**: Move all retriever config options to RetrieverOptions ([#248](https://github.com/davidmigloz/langchain_dart/issues/248)). ([f5785b77](https://github.com/davidmigloz/langchain_dart/commit/f5785b772c11750bb57f4b143f978a84743f9222))\n - **BREAKING** **FEAT**: Allow to pass call options to tools ([#243](https://github.com/davidmigloz/langchain_dart/issues/243)). ([4a01adb9](https://github.com/davidmigloz/langchain_dart/commit/4a01adb9346b33cdb148d0f0aa7196e2b16867a9))\n - **FEAT**: Allow to mutate default options ([#256](https://github.com/davidmigloz/langchain_dart/issues/256)). ([cb5e4058](https://github.com/davidmigloz/langchain_dart/commit/cb5e4058fb89f33c8495ac22fb240ce92daa683c))\n - **REFACTOR**: Use JsonPath.readValues in JsonLoader ([#245](https://github.com/davidmigloz/langchain_dart/issues/245)). ([3e159254](https://github.com/davidmigloz/langchain_dart/commit/3e159254379d03b70655f274b6fe81fc07a5095f))\n - **FIX**: Out of rage error in ConversationBufferWindowMemory ([#249](https://github.com/davidmigloz/langchain_dart/issues/249)). ([1b38bff7](https://github.com/davidmigloz/langchain_dart/commit/1b38bff7eff10327cd0154c0a8d47bd363870e2d))\n - **FIX**: PromptTemplate stream should only emit if it has all inputs ([#247](https://github.com/davidmigloz/langchain_dart/issues/247)). ([a56a2ec5](https://github.com/davidmigloz/langchain_dart/commit/a56a2ec5e084d5c140b0e8469707ecaa19dfdaff))\n\n#### `langchain_google` - `v0.2.0`\n\n> Migration guides:\n> - [`VertexAI`](https://github.com/davidmigloz/langchain_dart/issues/241)\n> - [`ChatVertexAI`](https://github.com/davidmigloz/langchain_dart/issues/242)\n\n - **BREAKING** **FEAT**: Move all model config options to VertexAIOptions ([#241](https://github.com/davidmigloz/langchain_dart/issues/241)). ([a714882a](https://github.com/davidmigloz/langchain_dart/commit/a714882a3026c7f381b6853d6b61506060b0775e))\n - **BREAKING** **FEAT**: Move all model config options to ChatVertexAIOptions ([#242](https://github.com/davidmigloz/langchain_dart/issues/242)). ([89bef8a2](https://github.com/davidmigloz/langchain_dart/commit/89bef8a22fb0b74ffd9d7a4028c64b2d94d38578))\n - **FEAT**: Allow to mutate default options ([#256](https://github.com/davidmigloz/langchain_dart/issues/256)). ([cb5e4058](https://github.com/davidmigloz/langchain_dart/commit/cb5e4058fb89f33c8495ac22fb240ce92daa683c))\n\n#### `langchain_openai` - `v0.2.0`\n\n> Migration guides:\n> - [`OpenAI`](https://github.com/davidmigloz/langchain_dart/issues/232)\n> - [`ChatOpenAI`](https://github.com/davidmigloz/langchain_dart/issues/240)\n> - [`OpenAIDallETool`](https://github.com/davidmigloz/langchain_dart/issues/244)\n\n - **BREAKING** **FEAT**: Move all model config options to OpenAIOptions ([#232](https://github.com/davidmigloz/langchain_dart/issues/232)). ([16e3e8e4](https://github.com/davidmigloz/langchain_dart/commit/16e3e8e449790444f2c1370f08430d42f15b6f5c))\n - **BREAKING** **FEAT**: Move all model config options to ChatOpenAIOptions ([#240](https://github.com/davidmigloz/langchain_dart/issues/240)). ([dd6a21a7](https://github.com/davidmigloz/langchain_dart/commit/dd6a21a75de28a0e605b287f75d9770bce4bb706))\n - **BREAKING** **FEAT**: Allow to pass call options to tools ([#243](https://github.com/davidmigloz/langchain_dart/issues/243)). ([4a01adb9](https://github.com/davidmigloz/langchain_dart/commit/4a01adb9346b33cdb148d0f0aa7196e2b16867a9))\n - **BREAKING** **FEAT**: Move all DallE config options to OpenAIDallEToolOptions ([#244](https://github.com/davidmigloz/langchain_dart/issues/244)). ([c24877c6](https://github.com/davidmigloz/langchain_dart/commit/c24877c6bb1063a06d2be7320cbf8ef94fa04ae0))\n - **FEAT**: Allow to mutate default options ([#256](https://github.com/davidmigloz/langchain_dart/issues/256)). ([cb5e4058](https://github.com/davidmigloz/langchain_dart/commit/cb5e4058fb89f33c8495ac22fb240ce92daa683c))\n - **FEAT**: Allow to update OpenAI key without having to recreate the wrapper ([#246](https://github.com/davidmigloz/langchain_dart/issues/246)). ([05739bd1](https://github.com/davidmigloz/langchain_dart/commit/05739bd1a43a82e1e5ba24543ccc985d48d48286))\n - **FIX**: PromptTemplate stream should only emit if it has all inputs ([#247](https://github.com/davidmigloz/langchain_dart/issues/247)). ([a56a2ec5](https://github.com/davidmigloz/langchain_dart/commit/a56a2ec5e084d5c140b0e8469707ecaa19dfdaff))\n\n#### `langchain_chroma` - `v0.1.0+7`\n\n - **DOCS**: Fix typo in Chroma docs. ([dd97db40](https://github.com/davidmigloz/langchain_dart/commit/dd97db400892fc42cd4d395ace93a40b313247c1))\n\n#### `langchain_ollama` - `v0.0.1`\n\n - **FEAT**: Add support for ChatOllama chat model ([#255](https://github.com/davidmigloz/langchain_dart/issues/255)). ([5b156910](https://github.com/davidmigloz/langchain_dart/commit/5b1569104a3e31fcba078e05b81e7a61b67a24dd))\n - **FEAT**: Add support for OllamaEmbeddings ([#254](https://github.com/davidmigloz/langchain_dart/issues/254)). ([b69701c7](https://github.com/davidmigloz/langchain_dart/commit/b69701c720ba63269ca3541881df4afa4c75504b))\n - **FEAT**: Add support for Ollama LLM ([#253](https://github.com/davidmigloz/langchain_dart/issues/253)). ([23362fdd](https://github.com/davidmigloz/langchain_dart/commit/23362fddf06c056fb2f497a6d1d1648e21895eb8))\n - **DOCS**: Update Ollama docs. ([8161f6c9](https://github.com/davidmigloz/langchain_dart/commit/8161f6c99a6d5169e6df48bb0cfc95374ec4c664))\n\n#### `ollama_dart` - `v0.0.1+1`\n\n - **DOCS**: Update README.me. ([be20dbaf](https://github.com/davidmigloz/langchain_dart/commit/be20dbaf4568c773aca88f1339a489092b3a5551))\n\n#### `openai_dart` - `v0.1.2`\n\n - **FEAT**: Allow to update OpenAI key without having to recreate the wrapper ([#246](https://github.com/davidmigloz/langchain_dart/issues/246)). ([05739bd1](https://github.com/davidmigloz/langchain_dart/commit/05739bd1a43a82e1e5ba24543ccc985d48d48286))\n\n#### `langchain_pinecone` - `v0.0.6+6`\n\n - Update a dependency to the latest release.\n\n\n## 2023-11-21\n\n### Changes\n\n---\n\nNew packages:\n\n - [`ollama_dart` - `v0.0.1`](#ollama_dart---v001)\n\nPackages with breaking changes:\n\n - There are no breaking changes in this release.\n\n---\n\n#### `ollama_dart` - `v0.0.1`\n\n - **FEAT**: Implement ollama_dart, a Dart client for Ollama API ([#238](https://github.com/davidmigloz/langchain_dart/issues/238)). ([d213aa9c](https://github.com/davidmigloz/langchain_dart/commit/d213aa9c5dec0aea11d656b5f16ddf3174f5b789))\n\n\n## 2023-11-20\n\n### Changes\n\n---\n\nPackages with breaking changes:\n\n - There are no breaking changes in this release.\n\nPackages with other changes:\n\n - [`langchain` - `v0.1.1+1`](#langchain---v0111)\n - [`langchain_pinecone` - `v0.0.6+5`](#langchain_pinecone---v0065)\n - [`langchain_openai` - `v0.1.2+2`](#langchain_openai---v0122)\n - [`langchain_chroma` - `v0.1.0+6`](#langchain_chroma---v0106)\n - [`langchain_google` - `v0.1.0+4`](#langchain_google---v0104)\n\nPackages with dependency updates only:\n\n> Packages listed below depend on other packages in this workspace that have had changes. Their versions have been incremented to bump the minimum dependency versions of the packages they depend upon in this project.\n\n - `langchain_openai` - `v0.1.2+2`\n - `langchain_chroma` - `v0.1.0+6`\n - `langchain_google` - `v0.1.0+4`\n\n---\n\n#### `langchain` - `v0.1.1+1`\n\n - **FIX**: Conditionally import dart:io in LocalFileStore ([#237](https://github.com/davidmigloz/langchain_dart/issues/237)). ([71d337e6](https://github.com/davidmigloz/langchain_dart/commit/71d337e62af49f173369e402fa6a72e363fd8724))\n\n#### `langchain_pinecone` - `v0.0.6+5`\n\n - **FIX**: Decode JSON responses as UTF-8 in Pinecone ([#236](https://github.com/davidmigloz/langchain_dart/issues/236)). ([edb427b1](https://github.com/davidmigloz/langchain_dart/commit/edb427b16e6cd938adcaaa7cf641f4df6632f479))\n\n\n## 2023-11-20\n\n### Changes\n\n---\n\nPackages with breaking changes:\n\n - There are no breaking changes in this release.\n\nPackages with other changes:\n\n - [`chromadb` - `v0.1.0+2`](#chromadb---v0102)\n - [`langchain_chroma` - `v0.1.0+5`](#langchain_chroma---v0105)\n\nPackages with dependency updates only:\n\n> Packages listed below depend on other packages in this workspace that have had changes. Their versions have been incremented to bump the minimum dependency versions of the packages they depend upon in this project.\n\n - `langchain_chroma` - `v0.1.0+5`\n\n---\n\n#### `chromadb` - `v0.1.0+2`\n\n - **FIX**: Decode JSON responses as UTF-8 ([#234](https://github.com/davidmigloz/langchain_dart/issues/234)) ([#235](https://github.com/davidmigloz/langchain_dart/issues/235)). ([29347763](https://github.com/davidmigloz/langchain_dart/commit/29347763fe04cb7c9199e33c643dbc585de0a7b8))\n\n\n## 2023-11-20\n\n### Changes\n\n---\n\nPackages with breaking changes:\n\n - There are no breaking changes in this release.\n\nPackages with other changes:\n\n - [`openai_dart` - `v0.1.1+2`](#openai_dart---v0112)\n - [`langchain_openai` - `v0.1.2+1`](#langchain_openai---v0121)\n\nPackages with dependency updates only:\n\n> Packages listed below depend on other packages in this workspace that have had changes. Their versions have been incremented to bump the minimum dependency versions of the packages they depend upon in this project.\n\n - `langchain_openai` - `v0.1.2+1`\n\n---\n\n#### `openai_dart` - `v0.1.1+2`\n\n - **FIX**: Decode JSON responses as UTF-8 ([#234](https://github.com/davidmigloz/langchain_dart/issues/234)). ([0bca67f4](https://github.com/davidmigloz/langchain_dart/commit/0bca67f4ea682ebd5a8b9d3c7319c9511229b0ba))\n\n\n## 2023-11-19\n\n### Changes\n\n---\n\nPackages with breaking changes:\n\n - There are no breaking changes in this release.\n\nPackages with other changes:\n\n - [`langchain` - `v0.1.1`](#langchain---v011)\n - [`langchain_openai` - `v0.1.2`](#langchain_openai---v012)\n - [`langchain_pinecone` - `v0.0.6+4`](#langchain_pinecone---v0064)\n - [`langchain_chroma` - `v0.1.0+4`](#langchain_chroma---v0104)\n - [`langchain_google` - `v0.1.0+3`](#langchain_google---v0103)\n\nPackages with dependency updates only:\n\n> Packages listed below depend on other packages in this workspace that have had changes. Their versions have been incremented to bump the minimum dependency versions of the packages they depend upon in this project.\n\n - `langchain_pinecone` - `v0.0.6+4`\n - `langchain_chroma` - `v0.1.0+4`\n - `langchain_google` - `v0.1.0+3`\n\n---\n\n#### `langchain` - `v0.1.1`\n\n - **FEAT**: Add support for OpenAIDallETool ([#231](https://github.com/davidmigloz/langchain_dart/issues/231)). ([541e8d77](https://github.com/davidmigloz/langchain_dart/commit/541e8d77d76246b25ffa8c4d3715b5ca728cfc3a))\n - **FEAT**: Support implementing custom agents using LCEL ([#230](https://github.com/davidmigloz/langchain_dart/issues/230)). ([625eeeb4](https://github.com/davidmigloz/langchain_dart/commit/625eeeb4ffa9d92c6fd8da003fa471f5d4752257))\n - **FEAT**: Add support for Runnable.mapInput() ([#229](https://github.com/davidmigloz/langchain_dart/issues/229)). ([7cc832ca](https://github.com/davidmigloz/langchain_dart/commit/7cc832ca82bd86b4031ca5f2c796e136ca646375))\n - **REFACTOR**: Rename RunnableMapFromItem to RunnableMapFromInput ([#228](https://github.com/davidmigloz/langchain_dart/issues/228)). ([7330cfcd](https://github.com/davidmigloz/langchain_dart/commit/7330cfcd0c7e19c831da1454c3ff4cc03d079cf7))\n - **REFACTOR**: Improve handling of input and output keys in chains ([#227](https://github.com/davidmigloz/langchain_dart/issues/227)). ([acf76b24](https://github.com/davidmigloz/langchain_dart/commit/acf76b240a076cf4b1f153bdaba9127580369d9e))\n\n#### `langchain_openai` - `v0.1.2`\n\n - **FEAT**: Add support for OpenAIDallETool ([#231](https://github.com/davidmigloz/langchain_dart/issues/231)). ([541e8d77](https://github.com/davidmigloz/langchain_dart/commit/541e8d77d76246b25ffa8c4d3715b5ca728cfc3a))\n - **FEAT**: Support implementing custom agents using LCEL ([#230](https://github.com/davidmigloz/langchain_dart/issues/230)). ([625eeeb4](https://github.com/davidmigloz/langchain_dart/commit/625eeeb4ffa9d92c6fd8da003fa471f5d4752257))\n\n\n## 2023-11-17\n\n### Changes\n\n---\n\nPackages with breaking changes:\n\n - There are no breaking changes in this release.\n\nPackages with other changes:\n\n - [`openai_dart` - `v0.1.1+1`](#openai_dart---v0111)\n - [`langchain_openai` - `v0.1.1+1`](#langchain_openai---v0111)\n\nPackages with dependency updates only:\n\n> Packages listed below depend on other packages in this workspace that have had changes. Their versions have been incremented to bump the minimum dependency versions of the packages they depend upon in this project.\n\n - `langchain_openai` - `v0.1.1+1`\n\n---\n\n#### `openai_dart` - `v0.1.1+1`\n\n - **FIX**: Fetch requests with big payloads dropping connection ([#226](https://github.com/davidmigloz/langchain_dart/issues/226)). ([1e771098](https://github.com/davidmigloz/langchain_dart/commit/1e771098d1090dd79846fca6520a1195efc5ac1e))\n\n\n## 2023-11-17\n\n### Changes\n\n---\n\nPackages with breaking changes:\n\n - There are no breaking changes in this release.\n\nPackages with other changes:\n\n - [`langchain` - `v0.1.0+2`](#langchain---v0102)\n - [`langchain_openai` - `v0.1.1`](#langchain_openai---v011)\n - [`openai_dart` - `v0.1.1`](#openai_dart---v011)\n - [`langchain_pinecone` - `v0.0.6+3`](#langchain_pinecone---v0063)\n - [`langchain_chroma` - `v0.1.0+3`](#langchain_chroma---v0103)\n - [`langchain_google` - `v0.1.0+2`](#langchain_google---v0102)\n\nPackages with dependency updates only:\n\n> Packages listed below depend on other packages in this workspace that have had changes. Their versions have been incremented to bump the minimum dependency versions of the packages they depend upon in this project.\n\n - `langchain_pinecone` - `v0.0.6+3`\n - `langchain_chroma` - `v0.1.0+3`\n - `langchain_google` - `v0.1.0+2`\n\n---\n\n#### `langchain` - `v0.1.0+2`\n\n - **DOCS**: Update README.md ([#225](https://github.com/davidmigloz/langchain_dart/issues/225)). ([afff8567](https://github.com/davidmigloz/langchain_dart/commit/afff856723f15022bcc3f0ba0285ff1ffed51c68))\n\n#### `langchain_openai` - `v0.1.1`\n\n - **FEAT**: Add Azure OpenAI API support ([#224](https://github.com/davidmigloz/langchain_dart/issues/224)). ([333fb7af](https://github.com/davidmigloz/langchain_dart/commit/333fb7af4b1edbdc716221609f2dc8f3923822cf))\n\n#### `openai_dart` - `v0.1.1`\n\n - **FEAT**: Add Azure OpenAI API support ([#224](https://github.com/davidmigloz/langchain_dart/issues/224)). ([333fb7af](https://github.com/davidmigloz/langchain_dart/commit/333fb7af4b1edbdc716221609f2dc8f3923822cf))\n\n## 2023-11-16\n\n### Changes\n\n---\n\nPackages with breaking changes:\n\n - There are no breaking changes in this release.\n\nPackages with other changes:\n\n - [`chromadb` - `v0.1.0+1`](#chromadb---v0101)\n - [`langchain` - `v0.1.0+1`](#langchain---v0101)\n - [`langchain_google` - `v0.1.0+1`](#langchain_google---v0101)\n - [`langchain_openai` - `v0.1.0+1`](#langchain_openai---v0101)\n - [`langchain_pinecone` - `v0.0.6+2`](#langchain_pinecone---v0062)\n - [`openai_dart` - `v0.1.0+1`](#openai_dart---v0101)\n - [`vertex_ai` - `v0.0.7+2`](#vertex_ai---v0072)\n - [`langchain_chroma` - `v0.1.0+2`](#langchain_chroma---v0102)\n\nPackages with dependency updates only:\n\n> Packages listed below depend on other packages in this workspace that have had changes. Their versions have been incremented to bump the minimum dependency versions of the packages they depend upon in this project.\n\n - `langchain_chroma` - `v0.1.0+2`\n\n---\n\n#### `chromadb` - `v0.1.0+1`\n\n - **DOCS**: Add public_member_api_docs lint rule and document missing APIs ([#223](https://github.com/davidmigloz/langchain_dart/issues/223)). ([52380433](https://github.com/davidmigloz/langchain_dart/commit/523804331783970870b023946c016be6c0797920))\n\n#### `langchain` - `v0.1.0+1`\n\n - **DOCS**: Add public_member_api_docs lint rule and document missing APIs ([#223](https://github.com/davidmigloz/langchain_dart/issues/223)). ([52380433](https://github.com/davidmigloz/langchain_dart/commit/523804331783970870b023946c016be6c0797920))\n\n#### `langchain_google` - `v0.1.0+1`\n\n - **DOCS**: Add public_member_api_docs lint rule and document missing APIs ([#223](https://github.com/davidmigloz/langchain_dart/issues/223)). ([52380433](https://github.com/davidmigloz/langchain_dart/commit/523804331783970870b023946c016be6c0797920))\n\n#### `langchain_openai` - `v0.1.0+1`\n\n - **DOCS**: Add public_member_api_docs lint rule and document missing APIs ([#223](https://github.com/davidmigloz/langchain_dart/issues/223)). ([52380433](https://github.com/davidmigloz/langchain_dart/commit/523804331783970870b023946c016be6c0797920))\n\n#### `langchain_pinecone` - `v0.0.6+2`\n\n - **DOCS**: Add public_member_api_docs lint rule and document missing APIs ([#223](https://github.com/davidmigloz/langchain_dart/issues/223)). ([52380433](https://github.com/davidmigloz/langchain_dart/commit/523804331783970870b023946c016be6c0797920))\n\n#### `openai_dart` - `v0.1.0+1`\n\n - **FIX**: Add missing `name` param in ChatCompletionMessage ([#222](https://github.com/davidmigloz/langchain_dart/issues/222)). ([6f186775](https://github.com/davidmigloz/langchain_dart/commit/6f186775f67cf3db5e28e4a15f896927b9af50ce))\n - **FIX**: Remove dependency on io.HttpException ([#221](https://github.com/davidmigloz/langchain_dart/issues/221)). ([95369e4c](https://github.com/davidmigloz/langchain_dart/commit/95369e4c1a9b8f277390b612df7d9bb21c19d82f))\n - **DOCS**: Add public_member_api_docs lint rule and document missing APIs ([#223](https://github.com/davidmigloz/langchain_dart/issues/223)). ([52380433](https://github.com/davidmigloz/langchain_dart/commit/523804331783970870b023946c016be6c0797920))\n\n#### `vertex_ai` - `v0.0.7+2`\n\n - **DOCS**: Add public_member_api_docs lint rule and document missing APIs ([#223](https://github.com/davidmigloz/langchain_dart/issues/223)). ([52380433](https://github.com/davidmigloz/langchain_dart/commit/523804331783970870b023946c016be6c0797920))\n\n\n## 2023-11-15\n\n### Changes\n\n---\n\nPackages with breaking changes:\n\n - [`langchain` - `v0.1.0`](#langchain---v010)\n - [`langchain_google` - `v0.1.0`](#langchain_google---v010)\n - [`langchain_openai` - `v0.1.0`](#langchain_openai---v010)\n - [`openai_dart` - `v0.1.0`](#openai_dart---v010)\n\nPackages with other changes:\n\n - [`langchain_pinecone` - `v0.0.6+1`](#langchain_pinecone---v0061)\n - [`langchain_chroma` - `v0.1.0+1`](#langchain_chroma---v0101)\n\nPackages with dependency updates only:\n\n> Packages listed below depend on other packages in this workspace that have had changes. Their versions have been incremented to bump the minimum dependency versions of the packages they depend upon in this project.\n\n - `langchain_pinecone` - `v0.0.6+1`\n - `langchain_chroma` - `v0.1.0+1`\n\n---\n\n#### `langchain` - `v0.1.0`\n\n - **BREAKING** **FEAT**: Add multi-modal messages support with OpenAI Vision ([#220](https://github.com/davidmigloz/langchain_dart/issues/220)). ([6da2e069](https://github.com/davidmigloz/langchain_dart/commit/6da2e069932782eed8c27da45c56b4c290373fac))\n\n > [Migration guide](https://github.com/davidmigloz/langchain_dart/issues/220)\n\n#### `langchain_google` - `v0.1.0`\n\n - **BREAKING** **FEAT**: Add multi-modal messages support with OpenAI Vision ([#220](https://github.com/davidmigloz/langchain_dart/issues/220)). ([6da2e069](https://github.com/davidmigloz/langchain_dart/commit/6da2e069932782eed8c27da45c56b4c290373fac))\n\n > [Migration guide](https://github.com/davidmigloz/langchain_dart/issues/220)\n\n#### `langchain_openai` - `v0.1.0`\n\n - **REFACTOR**: Align openai_dart client breaking changes ([#219](https://github.com/davidmigloz/langchain_dart/issues/219)). ([172db27f](https://github.com/davidmigloz/langchain_dart/commit/172db27f6da429e16dcda55678a73e1d885bb6d9))\n - **BREAKING** **FEAT**: Add multi-modal messages support with OpenAI Vision ([#220](https://github.com/davidmigloz/langchain_dart/issues/220)). ([6da2e069](https://github.com/davidmigloz/langchain_dart/commit/6da2e069932782eed8c27da45c56b4c290373fac))\n\n > [Migration guide](https://github.com/davidmigloz/langchain_dart/issues/220)\n\n#### `openai_dart` - `v0.1.0`\n\n - **BREAKING** **FEAT**: Add multi-modal support ([#218](https://github.com/davidmigloz/langchain_dart/issues/218)). ([14c8e7ef](https://github.com/davidmigloz/langchain_dart/commit/14c8e7ef7194400057d40422822df1127c4cb131))\n - **BREAKING** **FEAT**: Rename factory const to more meaningful names ([#215](https://github.com/davidmigloz/langchain_dart/issues/215)). ([7e4602fa](https://github.com/davidmigloz/langchain_dart/commit/7e4602fa86c55bd6d82a0aac253b1165afa21aeb))\n - **FEAT**: Add gpt-3.5-turbo-1106 chat model ([#217](https://github.com/davidmigloz/langchain_dart/issues/217)). ([73f37915](https://github.com/davidmigloz/langchain_dart/commit/73f37915e99d83bd458f8f8500385a24a64d3948))\n - **REFACTOR**: Improve request error handling ([#214](https://github.com/davidmigloz/langchain_dart/issues/214)). ([4a9f3d33](https://github.com/davidmigloz/langchain_dart/commit/4a9f3d335124526438651149e5b91c07921617a2))\n\n > Migration guides: [new factories](https://github.com/davidmigloz/langchain_dart/issues/215) and [multi-modal](https://github.com/davidmigloz/langchain_dart/issues/218)\n\n## 2023-11-13\n\n### Changes\n\n---\n\nPackages with breaking changes:\n\n- There are no breaking changes in this release.\n\nPackages with other changes:\n\n- [`langchain_pinecone` - `v0.0.6`](#langchain_pinecone---v006)\n\n---\n\n#### `langchain_pinecone` - `v0.0.6`\n\n- **FEAT**: Add support for global headers in Pinecone ([#213](https://github.com/davidmigloz/langchain_dart/issues/213)). ([8e0d221f](https://github.com/davidmigloz/langchain_dart/commit/8e0d221fad55b1fa62d9ff6f97476ee647837c6b))\n\n\n## 2023-11-12\n\n### Changes\n\n---\n\nPackages with breaking changes:\n\n - [`chromadb` - `v0.1.0`](#chromadb---v010)\n - [`langchain_chroma` - `v0.1.0`](#langchain_chroma---v010)\n\nPackages with other changes:\n\n - [`openai_dart` - `v0.0.2+2`](#openai_dart---v0022)\n - [`langchain_openai` - `v0.0.15+2`](#langchain_openai---v00152)\n\nPackages with dependency updates only:\n\n> Packages listed below depend on other packages in this workspace that have had changes. Their versions have been incremented to bump the minimum dependency versions of the packages they depend upon in this project.\n\n - `langchain_openai` - `v0.0.15+2`\n\n---\n\n#### `langchain_chroma` - `v0.1.0`\n\n - **BREAKING** **FEAT**: Chroma databases, tenants and global headers support ([#211](https://github.com/davidmigloz/langchain_dart/issues/211)). ([5a1d8397](https://github.com/davidmigloz/langchain_dart/commit/5a1d83971c78849f7185a674ffff527e0348511d))\n\n\n#### `chromadb` - `v0.1.0`\n\n - **BREAKING** **FEAT**: Multi-modal and tenants support ([#210](https://github.com/davidmigloz/langchain_dart/issues/210)). ([bfb0d89c](https://github.com/davidmigloz/langchain_dart/commit/bfb0d89cf82881090f6a50ee4d70b70f62e4302e))\n\n#### `openai_dart` - `v0.0.2+2`\n\n - **REFACTOR**: Migrate to generated client stream methods ([#208](https://github.com/davidmigloz/langchain_dart/issues/208)). ([9122f551](https://github.com/davidmigloz/langchain_dart/commit/9122f5517bb12a9596d22acfa6e81251f6d9afe8))\n - **FIX**: Fix integer overflow when targeting web ([#207](https://github.com/davidmigloz/langchain_dart/issues/207)). ([eaf69f32](https://github.com/davidmigloz/langchain_dart/commit/eaf69f32266abe4c8a4c99502fe9b1be2029d7d1))\n\n\n## 2023-11-09\n\n### Changes\n\n---\n\nPackages with breaking changes:\n\n - There are no breaking changes in this release.\n\nPackages with other changes:\n\n - [`langchain_chroma` - `v0.0.5+3`](#langchain_chroma---v0053)\n - [`langchain_google` - `v0.0.10+1`](#langchain_google---v00101)\n - [`langchain_openai` - `v0.0.15+1`](#langchain_openai---v00151)\n - [`langchain_pinecone` - `v0.0.5+2`](#langchain_pinecone---v0052)\n - [`openai_dart` - `v0.0.2+1`](#openai_dart---v0021)\n\n---\n\n#### `langchain_chroma` - `v0.0.5+3`\n\n - **DOCS**: Update vector stores documentation. ([dad60d24](https://github.com/davidmigloz/langchain_dart/commit/dad60d247fac157f2980f73c14ac88e9a0894fba))\n\n#### `langchain_google` - `v0.0.10+1`\n\n - **DOCS**: Update vector stores documentation. ([dad60d24](https://github.com/davidmigloz/langchain_dart/commit/dad60d247fac157f2980f73c14ac88e9a0894fba))\n\n#### `langchain_openai` - `v0.0.15+1`\n\n - **REFACTOR**: Rename ChatCompletionFunction to FunctionObject (internal) ([#206](https://github.com/davidmigloz/langchain_dart/issues/206)). ([0f06df3f](https://github.com/davidmigloz/langchain_dart/commit/0f06df3f9b32e5887976936b5fd2e6aa5a4f4f5b))\n\n#### `langchain_pinecone` - `v0.0.5+2`\n\n - **DOCS**: Update vector stores documentation. ([dad60d24](https://github.com/davidmigloz/langchain_dart/commit/dad60d247fac157f2980f73c14ac88e9a0894fba))\n\n#### `openai_dart` - `v0.0.2+1`\n\n - **REFACTOR**: Rename ChatCompletionFunction to FunctionObject ([#206](https://github.com/davidmigloz/langchain_dart/issues/206)). ([0f06df3f](https://github.com/davidmigloz/langchain_dart/commit/0f06df3f9b32e5887976936b5fd2e6aa5a4f4f5b))\n\n\n## 2023-11-07\n\n### Changes\n\n---\n\nPackages with breaking changes:\n\n - There are no breaking changes in this release.\n\nPackages with other changes:\n\n - [`langchain` - `v0.0.15`](#langchain---v0015)\n - [`langchain_google` - `v0.0.10`](#langchain_google---v0010)\n - [`langchain_openai` - `v0.0.15`](#langchain_openai---v0015)\n - [`openai_dart` - `v0.0.2`](#openai_dart---v002)\n - [`langchain_pinecone` - `v0.0.5+1`](#langchain_pinecone---v0051)\n - [`langchain_chroma` - `v0.0.5+2`](#langchain_chroma---v0052)\n\nPackages with dependency updates only:\n\n> Packages listed below depend on other packages in this workspace that have had changes. Their versions have been incremented to bump the minimum dependency versions of the packages they depend upon in this project.\n\n - `langchain_pinecone` - `v0.0.5+1`\n - `langchain_chroma` - `v0.0.5+2`\n\n---\n\n#### `langchain` - `v0.0.15`\n\n - **FEAT**: Add streaming support in LangChain Expression Language ([#192](https://github.com/davidmigloz/langchain_dart/issues/192)). ([2e4bcf91](https://github.com/davidmigloz/langchain_dart/commit/2e4bcf91f6b364b32b6f999e71252001ca6392c8))\n - **DOCS**: Add streaming to docs. ([bb87c190](https://github.com/davidmigloz/langchain_dart/commit/bb87c1901b34810aa2e841ed83da8e70703b9d08))\n - **FEAT**: Add streaming support to OutputFunctionsParsers ([#194](https://github.com/davidmigloz/langchain_dart/issues/194)). ([8b4e6a13](https://github.com/davidmigloz/langchain_dart/commit/8b4e6a138cd9942dd6ea1a97fe5e19e84a30000c))\n - **FIX**: Remove unused generic param in StringOutputParser ([#193](https://github.com/davidmigloz/langchain_dart/issues/193)). ([decd3176](https://github.com/davidmigloz/langchain_dart/commit/decd31765114bea1967f15e5fbd83110709938e4))\n\n#### `langchain_openai` - `v0.0.15`\n\n - **FEAT**: Add streaming support to OpenAI ([#196](https://github.com/davidmigloz/langchain_dart/issues/196)). ([b21fcb38](https://github.com/davidmigloz/langchain_dart/commit/b21fcb387685af8706db62caf33b24e0ccf9c73f))\n - **FEAT**: Support seed and system_fingerprint in OpenAI wrapper ([#204](https://github.com/davidmigloz/langchain_dart/issues/204)). ([c31b6795](https://github.com/davidmigloz/langchain_dart/commit/c31b67959ca7ce3d42e9832669fd18de11f41984))\n - **FEAT**: Add streaming support to ChatOpenAI ([#197](https://github.com/davidmigloz/langchain_dart/issues/197)). ([2268da78](https://github.com/davidmigloz/langchain_dart/commit/2268da783703b76422448128ea929e6fb6f805b6))\n - **FEAT**: Support seed, system_fingerprint and JSON Mode in ChatOpenAI ([#205](https://github.com/davidmigloz/langchain_dart/issues/205)). ([3332c228](https://github.com/davidmigloz/langchain_dart/commit/3332c2281b8a345ac7a6789202cbd5ac2225296b))\n\n#### `langchain_google` - `v0.0.10`\n\n - **FEAT**: Add result id in ChatVertexAI generations ([#195](https://github.com/davidmigloz/langchain_dart/issues/195)). ([a5bea6d3](https://github.com/davidmigloz/langchain_dart/commit/a5bea6d3aefbb53ed55d3abda0f51f5878445b72))\n\n#### `openai_dart` - `v0.0.2`\n\n - **FEAT**: Support new models API functionality ([#203](https://github.com/davidmigloz/langchain_dart/issues/203)). ([33ebe746](https://github.com/davidmigloz/langchain_dart/commit/33ebe746b509009ba41e417f36abf267d9d1c2ca))\n - **FEAT**: Support new images API functionality ([#202](https://github.com/davidmigloz/langchain_dart/issues/202)). ([fcf21daf](https://github.com/davidmigloz/langchain_dart/commit/fcf21dafbbdf4b1598ed8ddbfe30ebd09da65ada))\n - **FEAT**: Support new fine-tunning API functionality ([#201](https://github.com/davidmigloz/langchain_dart/issues/201)). ([f5f44ad8](https://github.com/davidmigloz/langchain_dart/commit/f5f44ad831c87c71ad995567748546b82ee231a4))\n - **FEAT**: Support new embeddings API functionality ([#200](https://github.com/davidmigloz/langchain_dart/issues/200)). ([9b43d85b](https://github.com/davidmigloz/langchain_dart/commit/9b43d85b63ddf916c38e7c5d7c65d5be32fa3015))\n - **FEAT**: Support new completion API functionality ([#199](https://github.com/davidmigloz/langchain_dart/issues/199)). ([f12f6f57](https://github.com/davidmigloz/langchain_dart/commit/f12f6f577c0e74db6160101796522c8786c4f37e))\n - **FEAT**: Support new chat completion API functionality ([#198](https://github.com/davidmigloz/langchain_dart/issues/198)). ([01820d69](https://github.com/davidmigloz/langchain_dart/commit/01820d697c9ffac09f77d2a16a5db6b5e6ed6fc6))\n - **FIX**: Handle nullable function call fields when streaming ([#191](https://github.com/davidmigloz/langchain_dart/issues/191)). ([8f23cf16](https://github.com/davidmigloz/langchain_dart/commit/8f23cf16c96f73d69a2abf17f2142b7eb4922a73))\n\n\n## 2023-11-03\n\n### Changes\n\n---\n\nPackages with breaking changes:\n\n - There are no breaking changes in this release.\n\nPackages with other changes:\n\n - [`langchain_chroma` - `v0.0.5+1`](#langchain_chroma---v0051)\n - [`langchain_openai` - `v0.0.14+1`](#langchain_openai---v00141)\n - [`langchain_pinecone` - `v0.0.5`](#langchain_pinecone---v005)\n\n---\n\n#### `langchain_chroma` - `v0.0.5+1`\n\n - **DOCS**: Update CHANGELOG.md. ([5ea4e532](https://github.com/davidmigloz/langchain_dart/commit/5ea4e5326e706a52d157284a281eb881e05117c5))\n\n#### `langchain_openai` - `v0.0.14+1`\n\n - **FIX**: Revert OpenAI maxTokens default to 256 ([#189](https://github.com/davidmigloz/langchain_dart/issues/189)). ([ab2ce6d4](https://github.com/davidmigloz/langchain_dart/commit/ab2ce6d4231ca0e2aff7aa3d9831625b10d0524d))\n - **DOCS**: Update CHANGELOG.md. ([5ea4e532](https://github.com/davidmigloz/langchain_dart/commit/5ea4e5326e706a52d157284a281eb881e05117c5))\n\n#### `langchain_pinecone` - `v0.0.5`\n\n - **FEAT**: Upgrade pinecone client to v0.6.0 ([#188](https://github.com/davidmigloz/langchain_dart/issues/188)). ([57e2587f](https://github.com/davidmigloz/langchain_dart/commit/57e2587fa3849e7aea199dd52e2cb2ce4f61946a))\n - **DOCS**: Update CHANGELOG.md. ([5ea4e532](https://github.com/davidmigloz/langchain_dart/commit/5ea4e5326e706a52d157284a281eb881e05117c5))\n\n\n## 2023-11-02\n\n### Changes\n\n---\n\nNew packages:\n\n- [`openai_dart` - `v0.0.1`](#openai_dart---v001)\n\nPackages with breaking changes:\n\n - [`langchain` - `v0.0.14`](#langchain---v0014)\n - [`langchain_chroma` - `v0.0.5`](#langchain_chroma---v005)\n - [`langchain_google` - `v0.0.9`](#langchain_google---v009)\n - [`langchain_pinecone` - `v0.0.4`](#langchain_pinecone---v004)\n\nPackages with other changes:\n\n - [`langchain_openai` - `v0.0.14`](#langchain_openai---v0014)\n\n---\n\n#### `langchain` - `v0.0.14`\n\n - **BREAKING** **FIX**: Change loaders lastModified metadata field to integer ([#172](https://github.com/davidmigloz/langchain_dart/issues/172)). ([72c724f8](https://github.com/davidmigloz/langchain_dart/commit/72c724f8a716e27b4a807b70bcbbafdd9feb0a18))\n - **BREAKING** **FEAT**: Update uuid internal dependency to 4.x.x ([#173](https://github.com/davidmigloz/langchain_dart/issues/173)). ([b01f4afe](https://github.com/davidmigloz/langchain_dart/commit/b01f4afea6cfcdf8a0aa6e1b11d3057efa6e5fc0))\n - **REFACTOR**: Don't require implement getFormatInstructions. ([d8b1286d](https://github.com/davidmigloz/langchain_dart/commit/d8b1286db59e02b60179e395eb43cdc3828582c2))\n - **DOCS**: Update docs. ([af7ee827](https://github.com/davidmigloz/langchain_dart/commit/af7ee8278f18620a54072bb9d1772882956d5c2d))\n\n#### `langchain_openai` - `v0.0.14`\n\n- **FEAT**: Migrate OpenAI to openai_dart client ([#184](https://github.com/davidmigloz/langchain_dart/issues/184)). ([6c90b371](https://github.com/davidmigloz/langchain_dart/commit/6c90b37183eb47354df91fc4870065afecaf3673))\n- **FEAT**: Migrate ChatOpenAI to openai_dart client ([#185](https://github.com/davidmigloz/langchain_dart/issues/185)). ([de8f487d](https://github.com/davidmigloz/langchain_dart/commit/de8f487dfddb10bee049539356a20b77b7556cad))\n- **FEAT**: Migrate OpenAIEmbeddings to openai_dart client ([#183](https://github.com/davidmigloz/langchain_dart/issues/183)). ([8f626fe8](https://github.com/davidmigloz/langchain_dart/commit/8f626fe8253f6b11fba0fcab0e143db93d3bce08))\n- **REFACTOR**: Remove dependency on dart_openai ([#186](https://github.com/davidmigloz/langchain_dart/issues/186)). ([273cfa2e](https://github.com/davidmigloz/langchain_dart/commit/273cfa2e84421161c979ddc41168d1b38981ca04))\n\n#### `langchain_chroma` - `v0.0.5`\n\n - **BREAKING** **FIX**: Change loaders lastModified metadata field to integer ([#172](https://github.com/davidmigloz/langchain_dart/issues/172)). ([72c724f8](https://github.com/davidmigloz/langchain_dart/commit/72c724f8a716e27b4a807b70bcbbafdd9feb0a18))\n - **BREAKING** **FEAT**: Update uuid internal dependency to 4.x.x ([#173](https://github.com/davidmigloz/langchain_dart/issues/173)). ([b01f4afe](https://github.com/davidmigloz/langchain_dart/commit/b01f4afea6cfcdf8a0aa6e1b11d3057efa6e5fc0))\n - **DOCS**: Update changelog. ([d45d624a](https://github.com/davidmigloz/langchain_dart/commit/d45d624a0ba12e53c4e78a29750cad30d66c61c5))\n\n#### `langchain_google` - `v0.0.9`\n\n - **BREAKING** **FEAT**: Update uuid internal dependency to 4.x.x ([#173](https://github.com/davidmigloz/langchain_dart/issues/173)). ([b01f4afe](https://github.com/davidmigloz/langchain_dart/commit/b01f4afea6cfcdf8a0aa6e1b11d3057efa6e5fc0))\n - **DOCS**: Update changelog. ([d45d624a](https://github.com/davidmigloz/langchain_dart/commit/d45d624a0ba12e53c4e78a29750cad30d66c61c5))\n\n#### `langchain_pinecone` - `v0.0.4`\n\n - **BREAKING** **FEAT**: Update uuid internal dependency to 4.x.x ([#173](https://github.com/davidmigloz/langchain_dart/issues/173)). ([b01f4afe](https://github.com/davidmigloz/langchain_dart/commit/b01f4afea6cfcdf8a0aa6e1b11d3057efa6e5fc0))\n\n#### `openai_dart` - `v0.0.1`\n\n - **FIX**: Fix static analysis warning ([#187](https://github.com/davidmigloz/langchain_dart/issues/187)). ([3fe91570](https://github.com/davidmigloz/langchain_dart/commit/3fe915705ca5a8b335333fa5ea94260040aaf0db))\n - **FIX**: Several fixes and improvments ([#182](https://github.com/davidmigloz/langchain_dart/issues/182)). ([115e8bef](https://github.com/davidmigloz/langchain_dart/commit/115e8bef43c82d907ce94518fa382657a1237fcc))\n - **FEAT**: Support different embedding response formats ([#180](https://github.com/davidmigloz/langchain_dart/issues/180)). ([4f676e87](https://github.com/davidmigloz/langchain_dart/commit/4f676e875f05a837343792c976701fa0cda0076e))\n - **FEAT**: Implement openai_dart, a Dart client for OpenAI API ([#178](https://github.com/davidmigloz/langchain_dart/issues/178)). ([fa5d032a](https://github.com/davidmigloz/langchain_dart/commit/fa5d032a6225933a79d4ff039732d893156ac92d))\n\n\n## 2023-09-17\n\n### Changes\n\n---\n\nPackages with breaking changes:\n\n - There are no breaking changes in this release.\n\nPackages with other changes:\n\n - [`langchain` - `v0.0.13`](#langchain---v0013)\n - [`langchain_openai` - `v0.0.13`](#langchain_openai---v0013)\n - [`langchain_pinecone` - `v0.0.3`](#langchain_pinecone---v003)\n - [`langchain_chroma` - `v0.0.4`](#langchain_chroma---v004)\n - [`langchain_google` - `v0.0.8`](#langchain_google---v008)\n\n---\n\n#### `langchain` - `v0.0.13`\n\n - Check out the [LangChain Expression Language documentation](https://langchaindart.com/#/expression_language/interface) for more details\n\n - **FEAT**: Add support for JsonOutputFunctionsParser ([#165](https://github.com/davidmigloz/langchain_dart/issues/165)). ([66c8e644](https://github.com/davidmigloz/langchain_dart/commit/66c8e64410d1dbf8b75e5734cb0cbb0e43dc0615))\n - **FEAT**: Add support for StringOutputParser ([#164](https://github.com/davidmigloz/langchain_dart/issues/164)). ([ee29e99a](https://github.com/davidmigloz/langchain_dart/commit/ee29e99a410c3cc6a7ae263fea1cde283f904edf))\n - **FEAT**: Implement LangChain Expression Language (LCEL) ([#163](https://github.com/davidmigloz/langchain_dart/issues/163)). ([85ea41af](https://github.com/davidmigloz/langchain_dart/commit/85ea41af9f5e2ff42bba620a60f765ca0f67c86c))\n - **FEAT**: Support custom doc prompt in StuffDocumentsQAChain ([#157](https://github.com/davidmigloz/langchain_dart/issues/157)). ([faa9d2d7](https://github.com/davidmigloz/langchain_dart/commit/faa9d2d768c2a70f17247d5703dd1d821af08240))\n\n#### `langchain_openai` - `v0.0.13`\n\n - **FEAT**: Implement LangChain Expression Language (LCEL) ([#163](https://github.com/davidmigloz/langchain_dart/issues/163)). ([85ea41af](https://github.com/davidmigloz/langchain_dart/commit/85ea41af9f5e2ff42bba620a60f765ca0f67c86c))\n\n#### `langchain_pinecone` - `v0.0.3`\n\n - **FIX**: Update pinecone client version ([#160](https://github.com/davidmigloz/langchain_dart/issues/160)). ([d15cc576](https://github.com/davidmigloz/langchain_dart/commit/d15cc5761563476fe7b5d66effd42ded077dbbbc))\n\n#### `langchain_chroma` - `v0.0.4`\n\n - Updated `langchain` dependency\n\n#### `langchain_google` - `v0.0.8`\n\n - Updated `langchain` dependency\n\n\n## 2023-09-08\n\n### Changes\n\n---\n\nPackages with breaking changes:\n\n - There are no breaking changes in this release.\n\nPackages with other changes:\n\n - [`langchain_google` - `v0.0.7+1`](#langchain_google---v0071)\n - [`langchain_pinecone` - `v0.0.2+1`](#langchain_pinecone---v0021)\n - [`vertex_ai` - `v0.0.7+1`](#vertex_ai---v0071)\n\n---\n\n#### `langchain_google` - `v0.0.7+1`\n\n - **REFACTOR**: Require `http.Client` instead of `AuthClient` ([#156](https://github.com/davidmigloz/langchain_dart/issues/156)). ([0f7fee7f](https://github.com/davidmigloz/langchain_dart/commit/0f7fee7f0780e5b650ec50307a7fda65e242e822))\n\n#### `langchain_pinecone` - `v0.0.2+1`\n\n - **REFACTOR**: Require `http.Client` instead of `AuthClient` ([#156](https://github.com/davidmigloz/langchain_dart/issues/156)). ([0f7fee7f](https://github.com/davidmigloz/langchain_dart/commit/0f7fee7f0780e5b650ec50307a7fda65e242e822))\n\n#### `vertex_ai` - `v0.0.7+1`\n\n - **REFACTOR**: Require `http.Client` instead of `AuthClient` ([#156](https://github.com/davidmigloz/langchain_dart/issues/156)). ([0f7fee7f](https://github.com/davidmigloz/langchain_dart/commit/0f7fee7f0780e5b650ec50307a7fda65e242e822))\n\n\n## 2023-09-05\n\n### Changes\n\n---\n\nPackages with breaking changes:\n\n - [`langchain` - `v0.0.12`](#langchain---v0012)\n - [`langchain_google` - `v0.0.7`](#langchain_google---v007)\n - [`langchain_openai` - `v0.0.12`](#langchain_openai---v0012)\n\nPackages with other changes:\n\n - [`langchain_pinecone` - `v0.0.2`](#langchain_pinecone---v002)\n - [`langchain_chroma` - `v0.0.3`](#langchain_chroma---v003)\n - [`vertex_ai` - `v0.0.7`](#vertex_ai---v007)\n - [`chromadb` - `v0.0.3`](#chromadb---v003)\n\n---\n\n#### `langchain` - `v0.0.12`\n\n - **BREAKING** **REFACTOR**: Change embedDocuments input to `List<Document>` ([#153](https://github.com/davidmigloz/langchain_dart/issues/153)). ([1b5d6fbf](https://github.com/davidmigloz/langchain_dart/commit/1b5d6fbf20bcbb7734581f91d66eff3a86731fec))\n - **DOCS**: Acknowledge sponsors in readme. ([092d94c8](https://github.com/davidmigloz/langchain_dart/commit/092d94c8ac166cf47f1ddab748b61d440f4b8585))\n - **DOCS**: Add topics to pubspecs. ([8c1d6297](https://github.com/davidmigloz/langchain_dart/commit/8c1d62970710cc326fd5930101918aaf16b18f74))\n\n#### `langchain_google` - `v0.0.7`\n\n - **BREAKING** **FEAT**: Add default and call options in VertexAI and ChatVertexAI ([#155](https://github.com/davidmigloz/langchain_dart/issues/155)). ([fe1b12ea](https://github.com/davidmigloz/langchain_dart/commit/fe1b12ea282cd587f9dc78bd959741781ebb6d35))\n - **BREAKING** **REFACTOR**: Change embedDocuments input to `List<Document>` ([#153](https://github.com/davidmigloz/langchain_dart/issues/153)). ([1b5d6fbf](https://github.com/davidmigloz/langchain_dart/commit/1b5d6fbf20bcbb7734581f91d66eff3a86731fec))\n - **FEAT**: Support document title in VertexAIEmbeddings ([#154](https://github.com/davidmigloz/langchain_dart/issues/154)). ([6b763731](https://github.com/davidmigloz/langchain_dart/commit/6b76373139bb50e8d0e59b3f63b54f6adae3d498))\n - **FEAT**: Support task type in VertexAIEmbeddings ([#151](https://github.com/davidmigloz/langchain_dart/issues/151)). ([8a2199e2](https://github.com/davidmigloz/langchain_dart/commit/8a2199e26a945f7d2ad8d3da3ca14e083172f6f1))\n - **DOCS**: Add topics to pubspecs. ([8c1d6297](https://github.com/davidmigloz/langchain_dart/commit/8c1d62970710cc326fd5930101918aaf16b18f74))\n\n#### `langchain_openai` - `v0.0.12`\n\n - **BREAKING** **REFACTOR**: Change embedDocuments input to `List<Document>` ([#153](https://github.com/davidmigloz/langchain_dart/issues/153)). ([1b5d6fbf](https://github.com/davidmigloz/langchain_dart/commit/1b5d6fbf20bcbb7734581f91d66eff3a86731fec))\n - **DOCS**: Add topics to pubspecs. ([8c1d6297](https://github.com/davidmigloz/langchain_dart/commit/8c1d62970710cc326fd5930101918aaf16b18f74))\n\n#### `langchain_pinecone` - `v0.0.2`\n\n - **DOCS**: Add topics to pubspecs. ([8c1d6297](https://github.com/davidmigloz/langchain_dart/commit/8c1d62970710cc326fd5930101918aaf16b18f74))\n - **DOCS**: Update changelog. ([df784ff1](https://github.com/davidmigloz/langchain_dart/commit/df784ff108584b0732ec9455f1531636256e9c4e))\n\n#### `langchain_chroma` - `v0.0.3`\n\n - **DOCS**: Add topics to pubspecs. ([8c1d6297](https://github.com/davidmigloz/langchain_dart/commit/8c1d62970710cc326fd5930101918aaf16b18f74))\n\n#### `vertex_ai` - `v0.0.7`\n\n - **FEAT**: Add support for stopSequence and candidateCount ([#150](https://github.com/davidmigloz/langchain_dart/issues/150)). ([eab7d968](https://github.com/davidmigloz/langchain_dart/commit/eab7d968f464db58a04f3453b4470ad3859536aa))\n - **FEAT**: Support task type and title in embeddings models ([#149](https://github.com/davidmigloz/langchain_dart/issues/149)). ([421d36bd](https://github.com/davidmigloz/langchain_dart/commit/421d36bdfe1ad8c4776811f6e1f0d3bf8c316ee3))\n - **FIX**: Fix typo in stop sequences field deserialization ([#152](https://github.com/davidmigloz/langchain_dart/issues/152)). ([4f7161da](https://github.com/davidmigloz/langchain_dart/commit/4f7161da55309bcbd5a62c21ba2a7fa703c4eb09))\n - **DOCS**: Add topics to pubspecs. ([8c1d6297](https://github.com/davidmigloz/langchain_dart/commit/8c1d62970710cc326fd5930101918aaf16b18f74))\n\n#### `chromadb` - `v0.0.3`\n\n - **DOCS**: Add topics to pubspecs. ([8c1d6297](https://github.com/davidmigloz/langchain_dart/commit/8c1d62970710cc326fd5930101918aaf16b18f74))\n\n\n## 2023-08-31\n\n### Changes\n\n---\n\nNew packages:\n\n- [`langchain_pinecone` - `v0.0.1`](#langchain_pinecone---v001)\n \nPackages with breaking changes:\n\n - [`langchain` - `v0.0.11`](#langchain---v0011)\n\nPackages with other changes:\n\n - [`langchain_google` - `v0.0.6`](#langchain_google---v006)\n - [`langchain_openai` - `v0.0.11`](#langchain_openai---v0011)\n - [`langchain_chroma` - `v0.0.2`](#langchain_chroma---v002)\n - [`vertex_ai` - `v0.0.6`](#vertex_ai---v006)\n - [`chromadb` - `v0.0.2`](#chromadb---v002)\n\n---\n\n#### `langchain` - `v0.0.11`\n\n - **DOCS**: Update readme. ([e1b5b295](https://github.com/davidmigloz/langchain_dart/commit/e1b5b2958bdf2b787c8b49aeeb6690c33c225943))\n - **BREAKING** **REFACTOR**: Remove addDocuments from VectorStoreRetriever ([#146](https://github.com/davidmigloz/langchain_dart/issues/146)). ([d32a5fd9](https://github.com/davidmigloz/langchain_dart/commit/d32a5fd94645d10deee5a35f0d83501f93be7308))\n - **BREAKING** **REFACTOR**: Rename VectorStoreRetrieverMemory and require vector store ([#145](https://github.com/davidmigloz/langchain_dart/issues/145)). ([67af3195](https://github.com/davidmigloz/langchain_dart/commit/67af319595755ec3c3834ceabaf4086cfa32ad8c))\n\n#### `langchain_pinecone` - `v0.0.1`\n\n - **DOCS**: Update readme. ([e1b5b295](https://github.com/davidmigloz/langchain_dart/commit/e1b5b2958bdf2b787c8b49aeeb6690c33c225943))\n - **DOCS**: Update packages example. ([4f8488fc](https://github.com/davidmigloz/langchain_dart/commit/4f8488fcb324e31b9d8dece7d1999333d7982253))\n - **FEAT**: Add support for Pinecone VectorStore ([#37](https://github.com/davidmigloz/langchain_dart/issues/37)). ([e43eef97](https://github.com/davidmigloz/langchain_dart/commit/e43eef979c329fc72a3eed72d818992287838a80))\n\n#### `langchain_google` - `v0.0.6`\n\n - **DOCS**: Update packages example. ([4f8488fc](https://github.com/davidmigloz/langchain_dart/commit/4f8488fcb324e31b9d8dece7d1999333d7982253))\n\n#### `langchain_openai` - `v0.0.11`\n\n - **FEAT**: Add ability to specify user in OpenAI and ChatOpenAI ([#143](https://github.com/davidmigloz/langchain_dart/issues/143)). ([457ab54e](https://github.com/davidmigloz/langchain_dart/commit/457ab54e45afd5aa382e284806dc73cfe0905c09))\n - **DOCS**: Update packages example. ([4f8488fc](https://github.com/davidmigloz/langchain_dart/commit/4f8488fcb324e31b9d8dece7d1999333d7982253))\n\n#### `langchain_chroma` - `v0.0.2`\n\n - **DOCS**: Update readme. ([e1b5b295](https://github.com/davidmigloz/langchain_dart/commit/e1b5b2958bdf2b787c8b49aeeb6690c33c225943))\n - **DOCS**: Update packages example. ([4f8488fc](https://github.com/davidmigloz/langchain_dart/commit/4f8488fcb324e31b9d8dece7d1999333d7982253))\n\n#### `vertex_ai` - `v0.0.6`\n\n - **FIX**: typecast exception when null publicationDate ([#148](https://github.com/davidmigloz/langchain_dart/issues/148)). ([d4afc131](https://github.com/davidmigloz/langchain_dart/commit/d4afc1315f7fc041a7730346942891bb966f8d14))\n - **DOCS**: Update packages example. ([4f8488fc](https://github.com/davidmigloz/langchain_dart/commit/4f8488fcb324e31b9d8dece7d1999333d7982253))\n\n#### `chromadb` - `v0.0.2`\n\n - **REFACTOR**: Update generated Chroma API client ([#142](https://github.com/davidmigloz/langchain_dart/issues/142)). ([4f0e7379](https://github.com/davidmigloz/langchain_dart/commit/4f0e7379f4408fe03a6433e3bdb6ebbe2262cbbc))\n\n\n## 2023-08-27\n\n### Changes\n\n---\n\nPackages with changes:\n\n- [`langchain` - `v0.0.10`](#langchain---v0010)\n- [`langchain_google` - `v0.0.5`](#langchain_google---v005)\n- [`langchain_openai` - `v0.0.10`](#langchain_openai---v0010)\n- [`vertex_ai` - `v0.0.5`](#vertex_ai---v005)\n\nNew packages:\n\n- [`langchain_chroma` - `v0.0.1`](#langchain_chroma---v001)\n- [`chromadb` - `v0.0.1`](#chromadb---v001)\n\n---\n\n#### `langchain` - `v0.0.10`\n\n- **DOCS**: Update readme. ([b61eda5b](https://github.com/davidmigloz/langchain_dart/commit/b61eda5ba506b4602592511c6a9be1e7aae5bf57))\n\n#### `langchain_google` - `v0.0.5`\n\n- **DOCS**: Fix typos. ([282cfa24](https://github.com/davidmigloz/langchain_dart/commit/282cfa24caa7b91ce28db6b1997af4c2c3ecf3e4))\n- **DOCS**: Update readme. ([b61eda5b](https://github.com/davidmigloz/langchain_dart/commit/b61eda5ba506b4602592511c6a9be1e7aae5bf57))\n\n#### `langchain_openai` - `v0.0.10`\n\n- **DOCS**: Update readme. ([b61eda5b](https://github.com/davidmigloz/langchain_dart/commit/b61eda5ba506b4602592511c6a9be1e7aae5bf57))\n\n#### `langchain_chroma` - `v0.0.1`\n\n- **FEAT**: Add support for Chroma VectorStore ([#139](https://github.com/davidmigloz/langchain_dart/issues/139)). ([098783b4](https://github.com/davidmigloz/langchain_dart/commit/098783b4895ab30bb61d07355a0b587ff76b9175))\n\n#### `vertex_ai` - `v0.0.5`\n\n- **DOCS**: Fix typos. ([282cfa24](https://github.com/davidmigloz/langchain_dart/commit/282cfa24caa7b91ce28db6b1997af4c2c3ecf3e4))\n- **DOCS**: Update readme. ([b61eda5b](https://github.com/davidmigloz/langchain_dart/commit/b61eda5ba506b4602592511c6a9be1e7aae5bf57))\n\n#### `chromadb` - `v0.0.1`\n\n- **FEAT**: Add Chroma embedding database API client ([#140](https://github.com/davidmigloz/langchain_dart/issues/140)). ([5fdcbc52](https://github.com/davidmigloz/langchain_dart/commit/5fdcbc528c1bbac1114a89433cf72bd8870fa4eb))\n\n\n## 2023-08-23\n\n### Changes\n\n---\n\nPackages with breaking changes:\n\n - There are no breaking changes in this release.\n\nPackages with other changes:\n\n - [`langchain` - `v0.0.9`](#langchain---v009)\n - [`langchain_google` - `v0.0.4`](#langchain_google---v004)\n - [`langchain_openai` - `v0.0.9`](#langchain_openai---v009)\n - [`vertex_ai` - `v0.0.4`](#vertex_ai---v004)\n\n---\n\n#### `langchain` - `v0.0.9`\n\n - **FEAT**: Support filtering in MemoryVectorStore ([#137](https://github.com/davidmigloz/langchain_dart/issues/137)). ([84da480f](https://github.com/davidmigloz/langchain_dart/commit/84da480f6820a81f092756f0194deb77c4cda151))\n - **FEAT**: Support filtering in VertexAI Matching Engine ([#136](https://github.com/davidmigloz/langchain_dart/issues/136)). ([768c6987](https://github.com/davidmigloz/langchain_dart/commit/768c6987de5b36b60090a1fe94f49483da11b885))\n - **FEAT**: Allow to pass vector search config ([#135](https://github.com/davidmigloz/langchain_dart/issues/135)). ([5b8fa5a3](https://github.com/davidmigloz/langchain_dart/commit/5b8fa5a3fcaf785615016be1d5da0a003178cfa9))\n - **DOCS**: Fix API documentation errors ([#138](https://github.com/davidmigloz/langchain_dart/issues/138)). ([1aa38fce](https://github.com/davidmigloz/langchain_dart/commit/1aa38fce17eed7f325e7872d03096740256d57be))\n\n#### `langchain_google` - `v0.0.4`\n\n - **FEAT**: Support filtering in VertexAI Matching Engine ([#136](https://github.com/davidmigloz/langchain_dart/issues/136)). ([768c6987](https://github.com/davidmigloz/langchain_dart/commit/768c6987de5b36b60090a1fe94f49483da11b885))\n - **FEAT**: Allow to pass vector search config ([#135](https://github.com/davidmigloz/langchain_dart/issues/135)). ([5b8fa5a3](https://github.com/davidmigloz/langchain_dart/commit/5b8fa5a3fcaf785615016be1d5da0a003178cfa9))\n - **DOCS**: Fix API documentation errors ([#138](https://github.com/davidmigloz/langchain_dart/issues/138)). ([1aa38fce](https://github.com/davidmigloz/langchain_dart/commit/1aa38fce17eed7f325e7872d03096740256d57be))\n\n#### `langchain_openai` - `v0.0.9`\n\n - **DOCS**: Update changelog. ([b211ab47](https://github.com/davidmigloz/langchain_dart/commit/b211ab4739b8feb17d00089ecca548716766272d))\n\n#### `vertex_ai` - `v0.0.4`\n\n - **DOCS**: Fix API documentation errors ([#138](https://github.com/davidmigloz/langchain_dart/issues/138)). ([1aa38fce](https://github.com/davidmigloz/langchain_dart/commit/1aa38fce17eed7f325e7872d03096740256d57be))\n\n\n## 2023-08-20\n\n### Changes\n\n---\n\nPackages with changes:\n\n - [`langchain_openai` - `v0.0.8+1`](#langchain_openai---v0081)\n\n---\n\n#### `langchain_openai` - `v0.0.8+1`\n\n - **FIX**: OpenAIFunctionsAgent wrong prompt order with no memory ([#134](https://github.com/davidmigloz/langchain_dart/issues/134)). ([8c9dcf22](https://github.com/davidmigloz/langchain_dart/commit/8c9dcf22e5fb0229bb98e5fd22492845d44bc531))\n\n\n## 2023-08-19\n\n### Changes\n\n---\n\nPackages with changes:\n\n - [`langchain` - `v0.0.8`](#langchain---v008)\n - [`langchain_google` - `v0.0.3`](#langchain_google---v003)\n - [`langchain_openai` - `v0.0.8`](#langchain_openai---v008)\n - [`vertex_ai` - `v0.0.3`](#vertex_ai---v003)\n\n---\n\n#### `langchain` - `v0.0.8`\n\n - **REFACTOR**: Rename store folder to chat_message_history ([#126](https://github.com/davidmigloz/langchain_dart/issues/126)). ([fa54c7e2](https://github.com/davidmigloz/langchain_dart/commit/fa54c7e22410182848b1936b64e85d9cf709eaeb))\n - **REFACTOR**: Fix Dart 3.1.0 linter issues ([#125](https://github.com/davidmigloz/langchain_dart/issues/125)). ([cc32f3f1](https://github.com/davidmigloz/langchain_dart/commit/cc32f3f13240c28cf174a9dbffc7d61bc061f843))\n - **FEAT**: Add support for LocalFileStore ([#132](https://github.com/davidmigloz/langchain_dart/issues/132)). ([2c508dce](https://github.com/davidmigloz/langchain_dart/commit/2c508dcea4959dbe755ee713de43dc20c9680640))\n - **FEAT**: Add support for CacheBackedEmbeddings ([#131](https://github.com/davidmigloz/langchain_dart/issues/131)). ([27d8b777](https://github.com/davidmigloz/langchain_dart/commit/27d8b777b4da360e57f32de6e1e1fc09ea6b6333))\n - **FEAT**: Add FakeEmbeddings testing model ([#130](https://github.com/davidmigloz/langchain_dart/issues/130)). ([f06920d7](https://github.com/davidmigloz/langchain_dart/commit/f06920d792d1083876b040744213d78c9b11bd4c))\n - **FEAT**: Add support for EncoderBackedStore ([#129](https://github.com/davidmigloz/langchain_dart/issues/129)). ([85bb3191](https://github.com/davidmigloz/langchain_dart/commit/85bb31918308f7a956afd0f991a78cf65e6dcd8d))\n - **FEAT**: Add support for InMemoryStore ([#128](https://github.com/davidmigloz/langchain_dart/issues/128)). ([699c0904](https://github.com/davidmigloz/langchain_dart/commit/699c09045fec3f91666f7ee264525cec8b16f910))\n - **FEAT**: Add support for InMemoryDocStore ([#127](https://github.com/davidmigloz/langchain_dart/issues/127)). ([d9d7268d](https://github.com/davidmigloz/langchain_dart/commit/d9d7268ddcd9e346f67e1278127e25ee467ea99c))\n - **FEAT**: Initial vectors, ids, and delete in MemoryVectorStore ([#123](https://github.com/davidmigloz/langchain_dart/issues/123)). ([f87a738d](https://github.com/davidmigloz/langchain_dart/commit/f87a738d6e9c78aabcbd95014dd4fac2d6c58817))\n\n#### `langchain_google` - `v0.0.3`\n\n - **FEAT**: Infeer queryRootUrl in VertexAIMatchingEngine ([#133](https://github.com/davidmigloz/langchain_dart/issues/133)). ([c5353368](https://github.com/davidmigloz/langchain_dart/commit/c5353368d1455756554f6640d33d0b3752476eb9))\n\n#### `langchain_openai` - `v0.0.8`\n\n - **REFACTOR**: Fix Dart 3.1.0 linter issues ([#125](https://github.com/davidmigloz/langchain_dart/issues/125)). ([cc32f3f1](https://github.com/davidmigloz/langchain_dart/commit/cc32f3f13240c28cf174a9dbffc7d61bc061f843))\n\n#### `vertex_ai` - `v0.0.3`\n\n - **REFACTOR**: Fix Dart 3.1.0 linter issues ([#125](https://github.com/davidmigloz/langchain_dart/issues/125)). ([cc32f3f1](https://github.com/davidmigloz/langchain_dart/commit/cc32f3f13240c28cf174a9dbffc7d61bc061f843))\n\n\n## 2023-08-16\n\n### Changes\n\n---\n\nPackages with changes:\n\n - [`langchain` - `v0.0.7+1`](#langchain---v0071)\n - [`langchain_openai` - `v0.0.7+1`](#langchain_openai---v0071)\n - [`langchain_google` - `v0.0.2+2`](#langchain_google---v0022)\n\nPackages with dependency updates only:\n\n> Packages listed below depend on other packages in this workspace that have had changes. Their versions have been incremented to bump the minimum dependency versions of the packages they depend upon in this project.\n\n - `langchain_openai` - `v0.0.7+1`\n - `langchain_google` - `v0.0.2+2`\n\n---\n\n#### `langchain` - `v0.0.7+1`\n\n - **FIX**: Text splitters were not preserving docs IDs ([#122](https://github.com/davidmigloz/langchain_dart/issues/122)). ([a9d7f098](https://github.com/davidmigloz/langchain_dart/commit/a9d7f098e650329fe43f35e2f0e11a1f61778e4f))\n\n\n## 2023-08-16\n\n### Changes\n\n---\n\nPackages with changes:\n\n - [`langchain_google` - `v0.0.2+1`](#langchain_google---v0021)\n - [`vertex_ai` - `v0.0.2+1`](#vertex_ai---v0021)\n\n---\n\n#### `langchain_google` - `v0.0.2+1`\n\n - **DOCS**: Add VertexAI Matching Engine sample setup script ([#121](https://github.com/davidmigloz/langchain_dart/issues/121)). ([ed2e1549](https://github.com/davidmigloz/langchain_dart/commit/ed2e1549ca1d6bb0223231bcbe0c1c4a6a198402))\n\n#### `vertex_ai` - `v0.0.2+1`\n\n - **FIX**: approximateNeighborsCount deserialization issue ([#120](https://github.com/davidmigloz/langchain_dart/issues/120)). ([f3381208](https://github.com/davidmigloz/langchain_dart/commit/f33812082191d3105d692f1d6bf461069df14750))\n - **DOCS**: Update readme. ([81f67c5e](https://github.com/davidmigloz/langchain_dart/commit/81f67c5e2f5963df9756d3245dd263b2060af173))\n\n\n## 2023-08-16\n\n### Changes\n\n---\n\nPackages with changes:\n\n - [`langchain` - `v0.0.7`](#langchain---v007)\n - [`langchain_google` - `v0.0.2`](#langchain_google---v002)\n - [`langchain_openai` - `v0.0.7`](#langchain_openai---v007)\n - [`vertex_ai` - `v0.0.2`](#vertex_ai---v002)\n\n---\n\n#### `langchain` - `v0.0.7`\n\n - **FEAT**: Integrate Vertex AI Matching Engine vector store ([#103](https://github.com/davidmigloz/langchain_dart/issues/103)). ([289c3eef](https://github.com/davidmigloz/langchain_dart/commit/289c3eef722206ac9dea0c968c036ad3289d10be))\n\n#### `langchain_google` - `v0.0.2`\n\n - **FEAT**: Integrate Vertex AI Matching Engine vector store ([#103](https://github.com/davidmigloz/langchain_dart/issues/103)). ([289c3eef](https://github.com/davidmigloz/langchain_dart/commit/289c3eef722206ac9dea0c968c036ad3289d10be))\n\n#### `langchain_openai` - `v0.0.7`\n\n - Updated `langchain` dependency\n\n#### `vertex_ai` - `v0.0.2`\n\n - **FEAT**: Add GCP Vertex AI Matching Engine client ([#116](https://github.com/davidmigloz/langchain_dart/issues/116)). ([2c1bbfcc](https://github.com/davidmigloz/langchain_dart/commit/2c1bbfcca22a4ddfb40bcb28e8ec8cbca865a9a5))\n\n\n## 2023-08-13\n\n### Changes\n\n---\n\nPackages with changes:\n\n - [`langchain` - `v0.0.6`](#langchain---v006)\n - [`langchain_openai` - `v0.0.6`](#langchain_openai---v006)\n\nNew packages:\n\n - [`langchain_google` - `v0.0.1`](#langchain_google---v001)\n - [`vertex_ai` - `v0.0.1`](#vertex_ai---v001)\n\n---\n\n#### `langchain` - `v0.0.6`\n\n - **REFACTOR**: Always await or explicitly discard Futures ([#106](https://github.com/davidmigloz/langchain_dart/issues/106)). ([989e93db](https://github.com/davidmigloz/langchain_dart/commit/989e93dbf6b5d61f053550219d88842156aeb492))\n - **FIX**: Fix OpenAIQAWithSourcesChain returning empty strings ([#113](https://github.com/davidmigloz/langchain_dart/issues/113)). ([6181ff8d](https://github.com/davidmigloz/langchain_dart/commit/6181ff8df77653d38cd84cb066776c04c0ff74ad))\n - **FIX**: VectorStore k variable was ignored ([#110](https://github.com/davidmigloz/langchain_dart/issues/110)). ([80e61eb7](https://github.com/davidmigloz/langchain_dart/commit/80e61eb7a11757f4e541ce5ba6033fb11b1b01f0))\n\n#### `langchain_openai` - `v0.0.6`\n\n - **REFACTOR**: Always await or explicitly discard Futures ([#106](https://github.com/davidmigloz/langchain_dart/issues/106)). ([989e93db](https://github.com/davidmigloz/langchain_dart/commit/989e93dbf6b5d61f053550219d88842156aeb492))\n - **FIX**: Fix OpenAIQAWithSourcesChain returning empty strings ([#113](https://github.com/davidmigloz/langchain_dart/issues/113)). ([6181ff8d](https://github.com/davidmigloz/langchain_dart/commit/6181ff8df77653d38cd84cb066776c04c0ff74ad))\n\n#### `langchain_google` - `v0.0.1`\n\n - **FEAT**: Integrate Google Vertex AI PaLM Text model ([#98](https://github.com/davidmigloz/langchain_dart/issues/98)). ([b2746c23](https://github.com/davidmigloz/langchain_dart/commit/b2746c235d68045ba20afd1f2be7c24dcccb5f24))\n - **FEAT**: Integrate Google Vertex AI PaLM Chat Model ([#99](https://github.com/davidmigloz/langchain_dart/issues/99)). ([3897595d](https://github.com/davidmigloz/langchain_dart/commit/3897595db597d5957ef80ae7a1de35c5f41265b8))\n - **FEAT**: Integrate Google Vertex AI PaLM Embeddings ([#100](https://github.com/davidmigloz/langchain_dart/issues/100)). ([d777eccc](https://github.com/davidmigloz/langchain_dart/commit/d777eccc0c81c58b322f28e6e3c4a8763f3f84b7))\n\n#### `vertex_ai` - `v0.0.1`\n\n - **REFACTOR**: Move Vertex AI client to its own package ([#111](https://github.com/davidmigloz/langchain_dart/issues/111)). ([d8aea156](https://github.com/davidmigloz/langchain_dart/commit/d8aea15633f1a9fb0df35cf9cc44bbc93ad46cd8))\n\n## 2023-08-09\n\n### Changes\n\n---\n\nPackages with changes:\n\n - [`langchain` - `v0.0.5+1`](#langchain---v0051)\n - [`langchain_openai` - `v0.0.5+1`](#langchain_openai---v0051)\n\n---\n\n#### `langchain` - `v0.0.5+1`\n\n - **FIX**: OpenAIOptions class not exported ([#104](https://github.com/davidmigloz/langchain_dart/issues/104)). ([e50efc3d](https://github.com/davidmigloz/langchain_dart/commit/e50efc3ddf0b13ece43298b2e3fee531e944601d))\n - **DOCS**: Improve RetrievalQAChain API documentation ([#95](https://github.com/davidmigloz/langchain_dart/issues/95)). ([e6d0a9d3](https://github.com/davidmigloz/langchain_dart/commit/e6d0a9d3abd65704883452e50b40344428f9580d))\n\n#### `langchain_openai` - `v0.0.5+1`\n\n - **FIX**: ChatOpenAIOptions class not exported ([#105](https://github.com/davidmigloz/langchain_dart/issues/105)). ([dfd77076](https://github.com/davidmigloz/langchain_dart/commit/dfd77076dfb60cd71aed3654f78c562ce0bc88bf))\n - **FIX**: OpenAIOptions class not exported ([#104](https://github.com/davidmigloz/langchain_dart/issues/104)). ([e50efc3d](https://github.com/davidmigloz/langchain_dart/commit/e50efc3ddf0b13ece43298b2e3fee531e944601d))\n\n\n## 2023-08-06\n\n### Changes\n\n---\n\nPackages with changes:\n\n - [`langchain` - `v0.0.5`](#langchain---v005)\n - [`langchain_openai` - `v0.0.5`](#langchain_openai---v005)\n\n---\n\n#### `langchain` - `v0.0.5`\n\n - **FIX**: Suff and MapReduce docs chains don't handle chat messages ([#92](https://github.com/davidmigloz/langchain_dart/issues/92)). ([19182ca1](https://github.com/davidmigloz/langchain_dart/commit/19182ca1921e53fc2cb0fa61d96d602aacf830f3))\n - **FEAT**: Update AgentExecutor constructor to use agent's tools ([#89](https://github.com/davidmigloz/langchain_dart/issues/89)). ([3af56a45](https://github.com/davidmigloz/langchain_dart/commit/3af56a45930fff84b11f6bec29c50502a490c2b4))\n - **FEAT**: Add MessagePlaceholder ([#87](https://github.com/davidmigloz/langchain_dart/issues/87)). ([23ee95b6](https://github.com/davidmigloz/langchain_dart/commit/23ee95b6cb0bb15701a141adc41ee1b826684ad0))\n - **DOCS**: Update CONTRIBUTING.md. ([5f2b9264](https://github.com/davidmigloz/langchain_dart/commit/5f2b92641ae1f20fcc8803c977428b81e3f525bd))\n - **DOCS**: Fix typo in MessagePlaceholder API docs ([#90](https://github.com/davidmigloz/langchain_dart/issues/90)). ([f53e1a2b](https://github.com/davidmigloz/langchain_dart/commit/f53e1a2b9dc81c89a66a368758cfd1ec7df4c0f9))\n\n#### `langchain_openai` - `v0.0.5`\n\n - **FIX**: FunctionChatMessage not saved properly in memory ([#88](https://github.com/davidmigloz/langchain_dart/issues/88)). ([d7b763de](https://github.com/davidmigloz/langchain_dart/commit/d7b763ded1abd59a964afd781558b3559a65d9ec))\n - **FEAT**: Update AgentExecutor constructor to use agent's tools ([#89](https://github.com/davidmigloz/langchain_dart/issues/89)). ([3af56a45](https://github.com/davidmigloz/langchain_dart/commit/3af56a45930fff84b11f6bec29c50502a490c2b4))\n - **DOCS**: Add example of using memory in OpenAIFunctionsAgent ([#91](https://github.com/davidmigloz/langchain_dart/issues/91)). ([898d5350](https://github.com/davidmigloz/langchain_dart/commit/898d53502713ec2fd1ecc93e76e7f941123b81a5))\n\n\n## 2023-08-05\n\n### Changes\n\n---\n\nPackages with changes:\n\n - [`langchain` - `v0.0.4`](#langchain---v004)\n - [`langchain_openai` - `v0.0.4`](#langchain_openai---v004)\n\n---\n\n#### `langchain` - `v0.0.4`\n\n - **REFACTOR**: Extract default memory key and prefixes to constants. ([750fd01a](https://github.com/davidmigloz/langchain_dart/commit/750fd01a74f94042cbc26684d6651b531fb0a93c))\n - **FIX**: systemChatMessage was ignored in OpenAIFunctionsAgent ([#86](https://github.com/davidmigloz/langchain_dart/issues/86)). ([cfe1e009](https://github.com/davidmigloz/langchain_dart/commit/cfe1e00972d481f83b9dc9e225a32b7077aa5fd4))\n - **FIX**: Allow to add memory to an agent executor ([#80](https://github.com/davidmigloz/langchain_dart/issues/80)). ([8110464c](https://github.com/davidmigloz/langchain_dart/commit/8110464c4b4ad53f3b1826722df76943d0d66621))\n - **FEAT**: Add ConversationSummaryMemory ([#27](https://github.com/davidmigloz/langchain_dart/issues/27)). ([f631d9e5](https://github.com/davidmigloz/langchain_dart/commit/f631d9e529d99319afe671b5aff441436e43ea31))\n - **FEAT**: Support LLMChain in OpenAIFunctionsAgent and memory. ([bd4a1cb9](https://github.com/davidmigloz/langchain_dart/commit/bd4a1cb9101ba385ce9613f9aa0b7e5474380f32))\n - **FEAT**: Return ChatMessage when LLMChain used with ChatModel. ([bb5f4d23](https://github.com/davidmigloz/langchain_dart/commit/bb5f4d2325ae1f615159f2ffd11cc8ec4e87ed3c))\n - **FEAT**: Add FakeChatModel for testing purposes. ([659783a6](https://github.com/davidmigloz/langchain_dart/commit/659783a6ccad9fc3046040f38c39805743ffdff1))\n - **FEAT**: Add support for ConversationTokenBufferMemory ([#26](https://github.com/davidmigloz/langchain_dart/issues/26)). ([8113d1c0](https://github.com/davidmigloz/langchain_dart/commit/8113d1c0dc742ce9f6c49018c4b012cd3823fac1))\n - **FEAT**: Improve SummarizeChain.mapReduce summaryMaxTokens name and docs. ([0be06e02](https://github.com/davidmigloz/langchain_dart/commit/0be06e02f280de54a2790d150fac142d9fbe4222))\n - **FEAT**: Add support for CsvLoader ([#77](https://github.com/davidmigloz/langchain_dart/issues/77)). ([41d24e76](https://github.com/davidmigloz/langchain_dart/commit/41d24e7632a77b08234951c0e6bf911530dff56a))\n - **FEAT**: Add ConversationBufferWindowMemory ([#25](https://github.com/davidmigloz/langchain_dart/issues/25)). ([9c271f7e](https://github.com/davidmigloz/langchain_dart/commit/9c271f7e7a31bc59c122a895daf238a0bb5ac7d0))\n\n#### `langchain_openai` - `v0.0.4`\n\n - **FIX**: systemChatMessage was ignored in OpenAIFunctionsAgent ([#86](https://github.com/davidmigloz/langchain_dart/issues/86)). ([cfe1e009](https://github.com/davidmigloz/langchain_dart/commit/cfe1e00972d481f83b9dc9e225a32b7077aa5fd4))\n - **FEAT**: Support LLMChain in OpenAIFunctionsAgent and memory. ([bd4a1cb9](https://github.com/davidmigloz/langchain_dart/commit/bd4a1cb9101ba385ce9613f9aa0b7e5474380f32))\n - **FEAT**: Return ChatMessage when LLMChain used with ChatModel. ([bb5f4d23](https://github.com/davidmigloz/langchain_dart/commit/bb5f4d2325ae1f615159f2ffd11cc8ec4e87ed3c))\n\n\n## 2023-07-28\n\n### Changes\n\n---\n\nPackages with changes:\n\n - [`langchain` - `v0.0.3`](#langchain---v003)\n - [`langchain_openai` - `v0.0.3`](#langchain_openai---v003)\n\n---\n\n#### `langchain` - `v0.0.3`\n\n - **FIX**: Loaders tests. ([f0498300](https://github.com/davidmigloz/langchain_dart/commit/f049830057fc1b8ff315469afd1512aa13ceb459))\n - **FEAT**: Update internal dependencies (including http to 1.1.0). ([8f3e8bc8](https://github.com/davidmigloz/langchain_dart/commit/8f3e8bc811df5c8bdba2c7e33b6c53ea0c2edad4))\n - **FEAT**: Add support for VectorStoreRetrieverMemory ([#54](https://github.com/davidmigloz/langchain_dart/issues/54)). ([72cd1b10](https://github.com/davidmigloz/langchain_dart/commit/72cd1b100ad88e7213ec12d432674ec4666ce172))\n\n#### `langchain_openai` - `v0.0.3`\n\n - **FEAT**: Update internal dependencies (including http to 1.1.0). ([8f3e8bc8](https://github.com/davidmigloz/langchain_dart/commit/8f3e8bc811df5c8bdba2c7e33b6c53ea0c2edad4))\n\n\n## 2023-07-23\n\n### Changes\n\n---\n\nPackages with changes:\n\n - [`langchain` - `v0.0.2`](#langchain---v002)\n - [`langchain_openai` - `v0.0.2`](#langchain_openai---v002)\n\n---\n\n#### `langchain` - `v0.0.2`\n\n - **FIX**: OpenAIQAWithSourcesChain throws exception. ([45c6cb9d](https://github.com/davidmigloz/langchain_dart/commit/45c6cb9d32be670902dd2fe4cb92597765590d85))\n - **FEAT**: Add support for SummarizeChain ([#58](https://github.com/davidmigloz/langchain_dart/issues/58)). ([9499fc04](https://github.com/davidmigloz/langchain_dart/commit/9499fc047ae8be7e7b9dfb0d0ef8678b84245f5d))\n - **FEAT**: Add support for SequentialChain class ([#30](https://github.com/davidmigloz/langchain_dart/issues/30)). ([381a6768](https://github.com/davidmigloz/langchain_dart/commit/381a676812992370da61ced0e59de5fadf0ef164))\n - **FEAT**: Add support for WebBaseLoader ([#74](https://github.com/davidmigloz/langchain_dart/issues/74)). ([0b5bf4b0](https://github.com/davidmigloz/langchain_dart/commit/0b5bf4b0fb2cf6e1a7be116920e9512233e7e613))\n - **FEAT**: Add Support for JsonLoader ([#72](https://github.com/davidmigloz/langchain_dart/issues/72)). ([2457a973](https://github.com/davidmigloz/langchain_dart/commit/2457a9735aacc2aeffcca2710ce0afc7be2f6f09))\n - **FEAT**: Add support for MapReduceDocumentsChain ([#59](https://github.com/davidmigloz/langchain_dart/issues/59)). ([9f2190c4](https://github.com/davidmigloz/langchain_dart/commit/9f2190c4d5f45378f91eaa02d52d8305f7da254e))\n - **FEAT**: Add support for ReduceDocumentsChain ([#70](https://github.com/davidmigloz/langchain_dart/issues/70)). ([34cf10bd](https://github.com/davidmigloz/langchain_dart/commit/34cf10bd485618bff4cddb5b29a1b46ac9f3a9fa))\n - **FEAT**: Support estimating the number of tokens for a given prompt ([#3](https://github.com/davidmigloz/langchain_dart/issues/3)). ([e22f22c8](https://github.com/davidmigloz/langchain_dart/commit/e22f22c89f188a019b96a7c0003dbd26471bebb7))\n - **FEAT**: Add support for CodeTextSplitter ([#63](https://github.com/davidmigloz/langchain_dart/issues/63)). ([92a8c7da](https://github.com/davidmigloz/langchain_dart/commit/92a8c7daccda2be38a25d4bdb0235c2f397225a2))\n - **FEAT**: Add support for RecursiveCharacterTextSplitter ([#61](https://github.com/davidmigloz/langchain_dart/issues/61)). ([697cdcbf](https://github.com/davidmigloz/langchain_dart/commit/697cdcbfef8fc45930de127cb5b7ee2eb3d7ec37))\n - **DOCS**: Document sequential chain. ([b9693a4e](https://github.com/davidmigloz/langchain_dart/commit/b9693a4e2dfcc6bfc74025ebb935865be942b266))\n - **DOCS**: Document text, json and web loaders. ([a95b3e9f](https://github.com/davidmigloz/langchain_dart/commit/a95b3e9f843fcffce9449ea93f343df793512a09))\n - **DOCS**: Update API docs. ([7bfa6d17](https://github.com/davidmigloz/langchain_dart/commit/7bfa6d17cf57aac05906b1401ac3967c21e6f403))\n - **DOCS**: Update readme. ([dd394715](https://github.com/davidmigloz/langchain_dart/commit/dd39471557b37da0d0c2a87dea0c067463a45f45))\n\n#### `langchain_openai` - `v0.0.2`\n\n - **FIX**: OpenAIQAWithSourcesChain throws exception. ([45c6cb9d](https://github.com/davidmigloz/langchain_dart/commit/45c6cb9d32be670902dd2fe4cb92597765590d85))\n - **FEAT**: Support estimating the number of tokens for a given prompt ([#3](https://github.com/davidmigloz/langchain_dart/issues/3)). ([e22f22c8](https://github.com/davidmigloz/langchain_dart/commit/e22f22c89f188a019b96a7c0003dbd26471bebb7))\n"
  },
  {
    "path": "CODE_OF_CONDUCT.md",
    "content": "\n# Contributor Covenant Code of Conduct\n\n## Our Pledge\n\nWe as members, contributors, and leaders pledge to make participation in our\ncommunity a harassment-free experience for everyone, regardless of age, body\nsize, visible or invisible disability, ethnicity, sex characteristics, gender\nidentity and expression, level of experience, education, socio-economic status,\nnationality, personal appearance, race, caste, color, religion, or sexual\nidentity and orientation.\n\nWe pledge to act and interact in ways that contribute to an open, welcoming,\ndiverse, inclusive, and healthy community.\n\n## Our Standards\n\nExamples of behavior that contributes to a positive environment for our\ncommunity include:\n\n* Demonstrating empathy and kindness toward other people\n* Being respectful of differing opinions, viewpoints, and experiences\n* Giving and gracefully accepting constructive feedback\n* Accepting responsibility and apologizing to those affected by our mistakes,\n  and learning from the experience\n* Focusing on what is best not just for us as individuals, but for the overall\n  community\n\nExamples of unacceptable behavior include:\n\n* The use of sexualized language or imagery, and sexual attention or advances of\n  any kind\n* Trolling, insulting or derogatory comments, and personal or political attacks\n* Public or private harassment\n* Publishing others' private information, such as a physical or email address,\n  without their explicit permission\n* Other conduct which could reasonably be considered inappropriate in a\n  professional setting\n\n## Enforcement Responsibilities\n\nCommunity leaders are responsible for clarifying and enforcing our standards of\nacceptable behavior and will take appropriate and fair corrective action in\nresponse to any behavior that they deem inappropriate, threatening, offensive,\nor harmful.\n\nCommunity leaders have the right and responsibility to remove, edit, or reject\ncomments, commits, code, wiki edits, issues, and other contributions that are\nnot aligned to this Code of Conduct, and will communicate reasons for moderation\ndecisions when appropriate.\n\n## Scope\n\nThis Code of Conduct applies within all community spaces, and also applies when\nan individual is officially representing the community in public spaces.\nExamples of representing our community include using an official e-mail address,\nposting via an official social media account, or acting as an appointed\nrepresentative at an online or offline event.\n\n## Enforcement\n\nInstances of abusive, harassing, or otherwise unacceptable behavior may be\nreported to the community leaders responsible for enforcement at the \nLangChain.dart Discord server. All complaints will be reviewed and investigated \npromptly and fairly.\n\nAll community leaders are obligated to respect the privacy and security of the\nreporter of any incident.\n\n## Enforcement Guidelines\n\nCommunity leaders will follow these Community Impact Guidelines in determining\nthe consequences for any action they deem in violation of this Code of Conduct:\n\n### 1. Correction\n\n**Community Impact**: Use of inappropriate language or other behavior deemed\nunprofessional or unwelcome in the community.\n\n**Consequence**: A private, written warning from community leaders, providing\nclarity around the nature of the violation and an explanation of why the\nbehavior was inappropriate. A public apology may be requested.\n\n### 2. Warning\n\n**Community Impact**: A violation through a single incident or series of\nactions.\n\n**Consequence**: A warning with consequences for continued behavior. No\ninteraction with the people involved, including unsolicited interaction with\nthose enforcing the Code of Conduct, for a specified period of time. This\nincludes avoiding interactions in community spaces as well as external channels\nlike social media. Violating these terms may lead to a temporary or permanent\nban.\n\n### 3. Temporary Ban\n\n**Community Impact**: A serious violation of community standards, including\nsustained inappropriate behavior.\n\n**Consequence**: A temporary ban from any sort of interaction or public\ncommunication with the community for a specified period of time. No public or\nprivate interaction with the people involved, including unsolicited interaction\nwith those enforcing the Code of Conduct, is allowed during this period.\nViolating these terms may lead to a permanent ban.\n\n### 4. Permanent Ban\n\n**Community Impact**: Demonstrating a pattern of violation of community\nstandards, including sustained inappropriate behavior, harassment of an\nindividual, or aggression toward or disparagement of classes of individuals.\n\n**Consequence**: A permanent ban from any sort of public interaction within the\ncommunity.\n\n## Attribution\n\nThis Code of Conduct is adapted from the [Contributor Covenant][homepage],\nversion 2.1, available at\n[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1].\n\nCommunity Impact Guidelines were inspired by\n[Mozilla's code of conduct enforcement ladder][Mozilla CoC].\n\nFor answers to common questions about this code of conduct, see the FAQ at\n[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at\n[https://www.contributor-covenant.org/translations][translations].\n\n[homepage]: https://www.contributor-covenant.org\n[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html\n[Mozilla CoC]: https://github.com/mozilla/diversity\n[FAQ]: https://www.contributor-covenant.org/faq\n[translations]: https://www.contributor-covenant.org/translations\n\n"
  },
  {
    "path": "CONTRIBUTING.md",
    "content": "# Contributing to LangChain.dart\n\n👋 Hi there! Thank you for even being interested in contributing to LangChain.dart.\nWe welcome contributions of any size and skill level, whether it be in the form of a new \nfeature, improved infra, or better documentation.\n\n## 🙋Where to start\n\nIf you are not sure what to work on, we have a few suggestions:\n\n- Look at the issues with the \"[good first issue](https://github.com/davidmigloz/langchain_dart/issues?q=is%3Aissue+is%3Aopen+label%3A%22f%3Agood+first+issue%22)\"\n  label. These are issues that we think are good targets for new contributors. If you are \n  interested in working on one of these, please comment on the issue so that we can assign it to \n  you. And any questions let us know, we're happy to guide you!\n- At the moment our main focus is reaching parity with the Python version across both \n  [integrations](https://langchain.com/integrations.html) and [features](https://langchain.com/features.html). \n  If you are interested in working on a specific integration or feature, just pick anything from \n  those lists not done yet, please let us know and we can help you get started.\n\nOnce you have picked an issue, read the rest of this document to get set up and start contributing!\n\n## 🗺️ Guidelines\n\n### 📁 Project structure\n\nThe project has a modular design where the core `langchain` package provides the LangChain API and\neach integration with a model provider, data store, etc. is provided by a separate package\n(e.g. `langchain_openai`).\n\nThe project is structured as follows:\n- `packages`: contains the core `langchain` package and all integration packages.\n- `docs`: contains the project documentation.\n- `examples`: contains sample apps that demonstrate how to use the LangChain.dart.\n\n### 👩‍💻 Workflow\n\nTo contribute to this project, please follow a\n[\"fork and pull request\"](https://github.com/firstcontributions/first-contributions) workflow.\n\nPull requests cannot land without passing the formatting, linting and testing checks first. See \n[Common Tasks](https://github.com/davidmigloz/langchain_dart/blob/main/CONTRIBUTING.md#-common-tasks) \nfor how to run these checks locally.\n\nThe project follows [Conventional Commits](https://www.conventionalcommits.org/) specification for commit messages.\nThis allows us to automatically generate the changelog and release notes. We use the following types:\n- `feat`: a new feature.\n- `fix`: a bug fix.\n- `refactor`: a code change that neither fixes a bug nor adds a feature.\n- `test`: adding missing tests or correcting existing tests.\n- `docs`: documentation only changes.\n- `chore`: changes to the build process or auxiliary tools.\n\nAfter the type comes the scope, we use the component name indicated in the issue (e.g. `chains`, `llms`, etc.).\nIf it is a breaking change, append a `!` after the type/scope (e.g. `feat(chains)!: ...`).\nAt the end of the commit message, add the ticket number (e.g. `#123`).\n\nThese are some examples of valid commit messages:\n```\nfeat(memory): Add support for ConversationTokenBufferMemory (#26)\nfix(agents): Allow to add memory to an agent executor (#80)\nrefactor(memory): Extract default memory key and prefixes to constants\ntests(memory): ChatMessageHistory removeLast and removeFirst (#82)\ndocs(agents): Add example of using memory in OpenAIFunctionsAgent (#91)\n```\n\nIt's essential that we maintain great documentation and testing. If you:\n- Add a new feature\n  * Add relevant documentation (API docs, project docs, examples, etc.)\n  * Add relevant unit or integration test\n- Fix a bug\n  * Add a relevant unit or integration test\n- Make an improvement\n  * Add or update relevant documentation (API docs, project docs, examples, etc.)\n  * Add or update relevant unit or integration test\n\nIf you add a new major piece of functionality, it is helpful to add an example to showcase how to\nuse it. Most of our users find examples to be the most helpful kind of documentation.\n\nExamples can be added in the `examples` directory of the repository. Each example should contain\na README that explains what the example does, and how to run it.\n\nWe're a small, building-oriented team. If there's something you'd like to add or change, opening a pull \nrequest is the best way to get our attention.\n\n### 🚩 GitHub issues\n\nOur [issues](https://github.com/davidmigloz/langchain_dart/issues) page is kept up to date\nwith bugs, improvements, and feature requests. There is a \n[taxonomy of labels](https://github.com/davidmigloz/langchain_dart/labels) to help with sorting and \ndiscovery of issues of interest. These include:\n\n- [doc-loaders](https://github.com/davidmigloz/langchain_dart/labels/c%3Adoc-loaders)\n- [doc-transformers](https://github.com/davidmigloz/langchain_dart/labels/c%3Adoc-transformers)\n- [prompts](https://github.com/davidmigloz/langchain_dart/labels/c%3Aprompts)\n- [llms](https://github.com/davidmigloz/langchain_dart/labels/c%3Allms)\n- [chat-models](https://github.com/davidmigloz/langchain_dart/labels/c%3Achat-models)\n- [output-parsers](https://github.com/davidmigloz/langchain_dart/labels/c%3Aoutput-parsers)\n- [chains](https://github.com/davidmigloz/langchain_dart/labels/c%3Achains)\n- [memory](https://github.com/davidmigloz/langchain_dart/labels/c%3Amemory)\n- [stores](https://github.com/davidmigloz/langchain_dart/labels/c%3Astores)\n- [embeddings](https://github.com/davidmigloz/langchain_dart/labels/c%3Aembeddings)\n- [retrievers](https://github.com/davidmigloz/langchain_dart/labels/c%3Aretrievers)\n- [agents](https://github.com/davidmigloz/langchain_dart/labels/c%3Aagents)\n- [tools](https://github.com/davidmigloz/langchain_dart/labels/c%3Atools)\n\nWe also have a [kanban board](https://github.com/users/davidmigloz/projects/2) for tracking the \nprogress of issues through the development process.\n\nIf you start working on an issue, please comment on it and we will assign it to yourself.\n\nIf you are adding an issue, please try to keep it focused on a single modular \nbug/improvement/feature. If the two issues are related, or blocking, please link them rather than \nkeep them as one single one.\n\nWe will try to keep these issues as up to date as possible, though with the rapid rate of develop \nin this field some may get out of date. If you notice this happening, please just let us know.\n\n### 🙋 Getting help\n\nAlthough we try to have a developer setup to make it as easy as possible for others to contribute \n(see below) it is possible that some pain point may arise around environment setup, linting, \ndocumentation, or other. Should that occur, please contact a maintainer on our \n[Discord](https://discord.gg/x4qbhqecVR)! Not only do we want to help get you unblocked, but we \nalso want to make sure that the process is smooth for future contributors.\n\nIn a similar vein, we do enforce certain linting, formatting, and documentation standards in the \ncodebase. If you are finding these difficult (or even just annoying) to work with, feel free to \ncontact a maintainer for help - we do not want these to get in the way of getting good code into \nthe codebase.\n\n## 🚀 Quick start\n\nOur primary goal is to make it as easy as possible for you to contribute to this project.\n\nThis project uses the following tools, which are worth getting familiar with if you plan to \ncontribute:\n\n- **[melos](https://melos.invertase.dev)** - a tool for managing Dart monorepos.\n- **[linter](https://dart-lang.github.io/linter)** - official Dart linter.\n- **[analyzer](https://pub.dev/packages/analyzer)** - official Dart static analyzer.\n- **[GitHub Actions](https://github.com/features/actions)** - a CI/CD tool for GitHub repositories.\n- **[docsify](https://docsify.js.org)** - static site generation for documentation.\n\nTo get started, run:\n\n```bash\nmelos bootstrap\n```\n\nThis will install all the dependencies and set up the monorepo. If you don't get any errors, \nyou are good to go!\n\n## ✅ Common tasks\n\n### Formatting\n\nWe use [dart format](https://dart.dev/tools/dart-format) to enforce \n[code formatting style](https://github.com/davidmigloz/langchain_dart/blob/main/analysis_options.yaml).\n\nTo run the formatter, run:\n\n```bash\nmelos format\n```\n\nIt will automatically fix (most) format errors.\n\n### Linting\n\nWe use the official [linter](https://dart-lang.github.io/linter) and \n[analyzer](https://pub.dev/packages/analyzer) to enforce \n[standard rules](https://github.com/davidmigloz/langchain_dart/blob/main/analysis_options.yaml).\n\nTo run the linter, run:\n\n```bash\nmelos lint\n```\n\n### Testing\n\nTo run all tests, run:\n\n```bash\nmelos test\n```\n\nIf you only want to run the tests for the packages that have changed compared to `main`, run:\n\n```bash\nmelos test:diff\n```\n\n## 📄 Documentation\n\nThe [API reference](https://pub.dev/documentation/langchain/latest/) docs are automatically \ngenerated from the code. For that reason, we ask that you add good documentation to all classes \nand methods.\n\nThe general [langchaindart.com](http://langchaindart.com) docs are written in Markdown and live in\nthe `docs` directory. The static site is generated using [docsify](https://docsify.js.org). If you \nadd a new major piece of functionality, please add a page to the docs that explains how to use it.\n\nYou can run a hot-reloading version of the docs static site by running:\n\n```bash\ndocsify serve docs\n```\n\n## 🏭 Release process\n\nAs of now, LangChain has an ad-hoc release process: releases are cut with high frequency via by\na developer and published to [pub.dev](https://pub.dev/packages/langchain).\n\nLangChain follows the [semver](https://semver.org/) versioning standard. However, as pre-1.0\nsoftware, even patch releases may contain\n[non-backwards-compatible changes](https://semver.org/#spec-item-4).\n\nTo create a release, run:\n\n```bash\nmelos version --no-private --no-git-tag-version\n```\n\nOr if you want to define the version manually:\n\n```bash\nmelos version -V langchain:x.x.x --no-private --no-git-tag-version\n```\n\nThis command will bump the version in the `langchain` package and all packages that depend on it.\nIt will also update the CHANGELOG.md for each package and commit the changes.\n\nUpdate any CHANGELOG.md details if needed. Then create a new release on GitHub and copy the \nCHANGELOG.md content into the release description.\n\nAfter that, you can publish the package to pub.dev:\n\n```bash\nmelos publish --no-dry-run --git-tag-version\n```\n\nFinally, drop by the [Discord](https://discord.gg/x4qbhqecVR) and let everyone know about the new\nrelease!\n"
  },
  {
    "path": "LICENSE",
    "content": "MIT License\n\nCopyright (c) 2023 David Miguel Lozano\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "analysis_options.yaml",
    "content": "analyzer:\n  language:\n    strict-inference: true\n    strict-raw-types: true\n  errors:\n    missing_required_param: error\n    missing_return: error\n    todo: ignore\n    sdk_version_since: ignore # TODO remove when fixed https://github.com/dart-lang/sdk/issues/52327\n  exclude:\n    - \"**/generated_plugin_registrant.dart\"\n    - \"**/generated/**\"\n    - \"**/*.gen.dart\"\n    - \"**/*.g.dart\"\n\nlinter:\n  rules: # https://dart-lang.github.io/linter/lints/{rule}.html\n    - always_declare_return_types\n    - annotate_overrides\n    - annotate_redeclares\n    - avoid_bool_literals_in_conditional_expressions\n    # - avoid_catches_without_on_clauses # blocked on https://github.com/dart-lang/linter/issues/3023\n    # - avoid_catching_errors # blocked on https://github.com/dart-lang/linter/issues/3023\n    - avoid_double_and_int_checks\n    - avoid_dynamic_calls\n    - avoid_empty_else\n    - avoid_equals_and_hash_code_on_mutable_classes\n    - avoid_escaping_inner_quotes\n    - avoid_field_initializers_in_const_classes\n    - avoid_function_literals_in_foreach_calls\n    - avoid_futureor_void\n    - avoid_implementing_value_types\n    - avoid_init_to_null\n    - avoid_js_rounded_ints\n    - avoid_multiple_declarations_per_line\n    - avoid_null_checks_in_equality_operators\n    - avoid_positional_boolean_parameters\n    - avoid_print\n    # - avoid_redundant_argument_values # I prefer to be explicit sometimes\n    - avoid_relative_lib_imports\n    - avoid_renaming_method_parameters\n    - avoid_return_types_on_setters\n    - avoid_returning_null_for_void\n    - avoid_returning_this\n    - avoid_setters_without_getters\n    - avoid_shadowing_type_parameters\n    - avoid_single_cascade_in_expression_statements\n    - avoid_slow_async_io\n    - avoid_type_to_string\n    - avoid_types_as_parameter_names\n    - avoid_unnecessary_containers\n    - avoid_unused_constructor_parameters\n    - avoid_void_async\n    - avoid_web_libraries_in_flutter\n    - await_only_futures\n    - camel_case_extensions\n    - camel_case_types\n    - cancel_subscriptions\n    - cascade_invocations\n    - cast_nullable_to_non_nullable\n    - collection_methods_unrelated_type\n    - combinators_ordering\n    - constant_identifier_names\n    - control_flow_in_finally\n    - curly_braces_in_flow_control_structures\n    - dangling_library_doc_comments\n    - depend_on_referenced_packages\n    - deprecated_consistency\n    #    - diagnostic_describe_all_properties # Disabled because it's very verbose\n    - directives_ordering\n    - discarded_futures\n    # - document_ignores # Disabled because it's very verbose\n    - empty_catches\n    - empty_constructor_bodies\n    - empty_statements\n    # - eol_at_end_of_file # Seems to be causing some perf issues (https://github.com/dart-lang/sdk/issues/55281#issuecomment-2519977056)\n    - exhaustive_cases\n    - file_names\n    - hash_and_equals\n    - implementation_imports\n    - implicit_call_tearoffs\n    - invalid_case_patterns\n    - invalid_runtime_check_with_js_interop_types\n    - join_return_with_assignment\n    - leading_newlines_in_multiline_strings\n    - library_annotations\n    - library_names\n    - library_prefixes\n    - library_private_types_in_public_api\n    - matching_super_parameters\n    - missing_code_block_language_in_doc_comment\n    - missing_whitespace_between_adjacent_strings\n    - no_adjacent_strings_in_list\n    - no_default_cases\n    - no_duplicate_case_values\n    - no_leading_underscores_for_library_prefixes\n    - no_leading_underscores_for_local_identifiers\n    - no_literal_bool_comparisons\n    - no_logic_in_create_state\n    - no_runtimeType_toString\n    - no_wildcard_variable_uses\n    - non_constant_identifier_names\n    - noop_primitive_operations\n    - null_check_on_nullable_type_parameter\n    - null_closures\n    - omit_obvious_local_variable_types\n    # - omit_obvious_property_types # Collides with type_annotate_public_apis\n    - only_throw_errors\n    - overridden_fields\n    - package_names\n    - package_prefixed_library_names\n    - parameter_assignments\n    - prefer_adjacent_string_concatenation\n    - prefer_asserts_in_initializer_lists\n    - prefer_collection_literals\n    - prefer_conditional_assignment\n    - prefer_const_constructors\n    - prefer_const_constructors_in_immutables\n    - prefer_const_declarations\n    - prefer_const_literals_to_create_immutables\n    - prefer_constructors_over_static_methods\n    - prefer_contains\n    - prefer_final_fields\n    - prefer_final_in_for_each\n    - prefer_final_locals\n    # - prefer_final_parameters # Very verbose\n    - prefer_for_elements_to_map_fromIterable\n    - prefer_foreach\n    - prefer_function_declarations_over_variables\n    - prefer_generic_function_type_aliases\n    - prefer_if_elements_to_conditional_expressions\n    - prefer_if_null_operators\n    - prefer_initializing_formals\n    - prefer_inlined_adds\n    - prefer_interpolation_to_compose_strings\n    - prefer_is_empty\n    - prefer_is_not_empty\n    - prefer_is_not_operator\n    - prefer_iterable_whereType\n    - prefer_null_aware_operators\n    - prefer_relative_imports\n    - prefer_single_quotes\n    - prefer_spread_collections\n    - prefer_typing_uninitialized_variables\n    - prefer_void_to_null\n    - provide_deprecation_message\n    - public_member_api_docs\n    - recursive_getters\n    - require_trailing_commas\n    - secure_pubspec_urls\n    - sized_box_for_whitespace\n    - slash_for_doc_comments\n    - sort_child_properties_last\n    - sort_unnamed_constructors_first\n    # - specify_nonobvious_local_variable_types # Disabled because it's very verbose\n    - specify_nonobvious_property_types\n    - strict_top_level_inference\n    - test_types_in_equals\n    - throw_in_finally\n    - tighten_type_of_initializing_formals\n    - type_annotate_public_apis\n    - type_init_formals\n    - type_literal_in_constant_pattern\n    - unawaited_futures\n    - unintended_html_in_doc_comment\n    - unnecessary_async\n    - unnecessary_await_in_return\n    - unnecessary_brace_in_string_interps\n    - unnecessary_breaks\n    - unnecessary_const\n    - unnecessary_constructor_name\n    - unnecessary_getters_setters\n    - unnecessary_lambdas\n    - unnecessary_late\n    - unnecessary_library_directive\n    - unnecessary_library_name\n    - unnecessary_new\n    - unnecessary_null_aware_assignments\n    - unnecessary_null_aware_operator_on_extension_on_nullable\n    - unnecessary_null_checks\n    - unnecessary_null_in_if_null_operators\n    - unnecessary_nullable_for_final_variable_declarations\n    - unnecessary_overrides\n    - unnecessary_parenthesis\n    - unnecessary_statements\n    - unnecessary_string_escapes\n    - unnecessary_string_interpolations\n    - unnecessary_this\n    - unnecessary_to_list_in_spreads\n    - unnecessary_underscores\n    - unreachable_from_main\n    - unrelated_type_equality_checks\n    - unsafe_variance\n    - use_build_context_synchronously\n    - use_colored_box\n    - use_decorated_box\n    - use_full_hex_values_for_flutter_colors\n    - use_function_type_syntax_for_parameters\n    - use_if_null_to_convert_nulls_to_bools\n    - use_is_even_rather_than_modulo\n    - use_key_in_widget_constructors\n    - use_late_for_private_fields_and_variables\n    - use_named_constants\n    - use_raw_strings\n    - use_rethrow_when_possible\n    - use_setters_to_change_properties\n    - use_string_in_part_of_directives\n    - use_super_parameters\n    - use_test_throws_matchers\n    - use_truncating_division\n    - valid_regexps\n    - void_checks\n"
  },
  {
    "path": "docs/.nojekyll",
    "content": ""
  },
  {
    "path": "docs/CNAME",
    "content": "langchaindart.dev\n"
  },
  {
    "path": "docs/README.md",
    "content": "# Welcome to LangChain.dart\n\n**LangChain.dart** is a Dart port of Python's [LangChain](https://github.com/hwchase17/langchain) framework.\n\n> **Attribution note:** most of the docs are just an adaptation of the original\n> [Python LangChain docs](https://python.langchain.com/).\n\n**LangChain** is a framework for developing applications powered by language models. It enables applications that are:\n\n1. **Context-aware:** connect a language model to sources of context (prompt instructions, few shot examples, content to ground its response in, etc.).\n2. **Reason:** rely on a language model to reason (about how to answer based on provided context, what actions to take, etc.)\n\nThe main value prop of LangChain are:\n\n1. **Components:** composable tools and integrations for working with language models. Components are modular and easy-to-use.\n2. **Off-the-shelf chains:** built-in assemblages of components for accomplishing higher-level tasks.\n\nOff-the-shelf chains make it easy to get started. Components make it easy to customize existing chains and build new ones.\n\nThe LangChain.dart framework is made up of several different packages:\n\nLangChain.dart has a modular design where the core [langchain](https://pub.dev/packages/langchain)\npackage provides the LangChain API and each integration with a model provider, database, etc. is\nprovided by a separate package.\n\n## Getting Started\n\n[Here’s](/get_started/installation) how to install LangChain, set up your environment, and start building.\n\nWe recommend following our [Quickstart guide](/get_started/getting_started.md) to familiarize yourself with the framework by building your first LangChain application.\n\n> **Note:** These docs are for the [LangChain Dart package](https://github.com/davidmigloz/langchain_dart). \n> For documentation on LangChain Python [click here](https://python.langchain.com), and for LangChain.js [click here](https://js.langchain.com).\n\n## LangChain Expression Language (LCEL)\n\nLCEL is a declarative way to compose chains. LCEL was designed from day 1 to support putting prototypes in production, with no code changes, from the simplest “prompt + LLM” chain to the most complex chains.\n\n- [Overview](/expression_language/expression_language): LCEL and its benefits\n- [Interface](/expression_language/interface): The standard interface for LCEL objects\n- [Cookbook](https://langchaindart.dev/#/expression_language/cookbook/prompt_llm_parser): Example code for accomplishing common tasks\n\n## Modules\n\nLangChain.dart provides standard, extendable interfaces and integrations for the following modules:\n\n![LangChain.dart](https://raw.githubusercontent.com/davidmigloz/langchain_dart/main/docs/img/langchain.dart.png)\n\n**[Model I/O](/modules/model_io/models/models.md)**  \nInterface with language models.\n\n**[Retrieval]()**  \nInterface with application-specific data.\n\n**[Agents](/modules/agents/agents.md)**  \nLet models choose which tools to use given high-level directives\n\n## Examples, ecosystem, and resources\n\n### Use cases\n\nWalkthroughes and best-practices for [common end-to-end use cases](https://python.langchain.com/docs/use_cases), like:\n\n- [Document question answering](https://python.langchain.com/docs/use_cases/question_answering/)\n- [Chatbots](https://python.langchain.com/docs/use_cases/chatbots/)\n- [Analyzing structured data](https://python.langchain.com/docs/use_cases/qa_structured/sql/)\n- and much more...\n\n### Guides\n\n[Best practices](https://python.langchain.com/docs/guides) for developing with LangChain.\n\n### Ecosystem\n\nLangChain is part of a [rich ecosystem](https://python.langchain.com/docs/ecosystem) of tools that integrate with our framework and build on top of it. Check out our growing list of integrations and dependent repos.\n\n## API reference\n\nHead to the [reference section](https://pub.dev/documentation/langchain/latest) for full documentation of all classes and methods in the LangChain Dart package.\n\n## Developer's guide\n\nCheck out the [developer's guide](https://github.com/davidmigloz/langchain_dart/blob/main/CONTRIBUTING.md) for guidelines on contributing and help getting your dev environment set up.\n\n### Community\n\nOur community is full of prolific developers, creative builders, and fantastic teachers. Join us on [GitHub](https://github.com/davidmigloz/langchain_dart) or [Discord](https://discord.gg/6adMQxSpJS) to ask questions, share feedback, meet other developers building with LangChain.dart, and dream about the future of LLM’s.\n"
  },
  {
    "path": "docs/_footer.md",
    "content": "Made with 💙 by [the LangChain.dart Community](https://github.com/davidmigloz/langchain_dart/graphs/contributors).<br/>\n[Become a Sponsor](https://github.com/sponsors/davidmigloz) 💖\n"
  },
  {
    "path": "docs/_sidebar.md",
    "content": "- [Get started](README.md)\n  - [Installation](/get_started/installation.md)\n  - [Quickstart](/get_started/quickstart.md)\n  - [Security](/get_started/security.md)\n- [LangChain Expression Language](/expression_language/expression_language.md)\n  - [Get started](/expression_language/get_started.md)\n  - [Runnable interface](/expression_language/interface.md)\n  - [Primitives](/expression_language/primitives.md)\n    - [Sequence: Chaining runnables](/expression_language/primitives/sequence.md)\n    - [Map: Formatting inputs & concurrency](/expression_language/primitives/map.md)\n    - [Passthrough: Passing inputs through](/expression_language/primitives/passthrough.md)\n    - [Mapper: Mapping inputs](/expression_language/primitives/mapper.md)\n    - [Function: Run custom logic](/expression_language/primitives/function.md)\n    - [Binding: Configuring runnables](/expression_language/primitives/binding.md)\n    - [Router: Routing inputs](/expression_language/primitives/router.md)\n    - [Retry: Retrying runnables](/expression_language/primitives/retry.md)\n  - [Streaming](/expression_language/streaming.md)\n  - [Fallbacks](/expression_language/fallbacks.md)\n  - Cookbook\n    - [Prompt + LLM](/expression_language/cookbook/prompt_llm_parser.md)\n    - [Multiple chains](/expression_language/cookbook/multiple_chains.md)\n    - [Adding memory](/expression_language/cookbook/adding_memory.md)\n    - [Retrieval](/expression_language/cookbook/retrieval.md)\n    - [Using Tools](/expression_language/cookbook/tools.md)\n- [Modules](/modules/modules.md)\n  - [Model IO](/modules/model_io/model_io.md)\n    - [Prompts](/modules/model_io/prompts/prompts.md)\n      - [Prompt Templates](/modules/model_io/prompts/prompt_templates/prompt_templates.md)\n        - [Connecting to a Feature Store](/modules/model_io/prompts/prompt_templates/connecting_to_a_feature_store.md)\n        - [Custom prompt template](/modules/model_io/prompts/prompt_templates/custom_prompt_template.md)\n        - [Few-shot prompt templates](/modules/model_io/prompts/prompt_templates/few_shot_examples.md)\n        - [Format template output](/modules/model_io/prompts/prompt_templates/format_output.md)\n        - [Template formats](/modules/model_io/prompts/prompt_templates/formats.md)\n        - [Types of MessagePromptTemplate](/modules/model_io/prompts/prompt_templates/msg_prompt_templates.md)\n        - [Partial prompt templates](/modules/model_io/prompts/prompt_templates/partial.md)\n        - [Composition](/modules/model_io/prompts/prompt_templates/prompt_composition.md)\n        - [Serialization](/modules/model_io/prompts/prompt_templates/prompt_serialization.md)\n        - [Validate template](/modules/model_io/prompts/prompt_templates/validate.md)\n      - [Example selectors](/modules/model_io/prompts/example_selectors/example_selectors.md)\n    - [Language models](/modules/model_io/models/models.md)\n      - [LLMs](/modules/model_io/models/llms/llms.md)\n        - How-to\n          - [Custom LLM](/modules/model_io/models/llms/how_to/custom_llm.md)\n          - [Fake LLM](/modules/model_io/models/llms/how_to/fake_llm.md)\n          - [Human input LLM](/modules/model_io/models/llms/how_to/human_input_llm.md)\n          - [Caching](/modules/model_io/models/llms/how_to/llm_caching.md)\n          - [Serialization](/modules/model_io/models/llms/how_to/llm_serialization.md)\n          - [Streaming](/modules/model_io/models/llms/how_to/llm_streaming.md)\n          - [Tracking token usage](/modules/model_io/models/llms/how_to/token_usage_tracking.md)\n        - Integrations\n          - [OpenAI](/modules/model_io/models/llms/integrations/openai.md)\n          - [GCP Vertex AI](/modules/model_io/models/llms/integrations/gcp_vertex_ai.md)\n          - [Ollama](/modules/model_io/models/llms/integrations/ollama.md)\n      - [Chat Models](/modules/model_io/models/chat_models/chat_models.md)\n        - How-to\n          - [Prompts](/modules/model_io/models/chat_models/how_to/prompts.md)\n          - [Streaming](/modules/model_io/models/chat_models/how_to/streaming.md)\n          - [Tool calling](/modules/model_io/models/chat_models/how_to/tools.md)\n          - [LLMChain](/modules/model_io/models/chat_models/how_to/llm_chain.md)\n        - Integrations\n          - [Anthropic](/modules/model_io/models/chat_models/integrations/anthropic.md) \n          - [OpenAI](/modules/model_io/models/chat_models/integrations/openai.md)\n          - [Firebase Vertex AI](/modules/model_io/models/chat_models/integrations/firebase_vertex_ai.md)\n          - [GCP Vertex AI](/modules/model_io/models/chat_models/integrations/gcp_vertex_ai.md)\n          - [Google AI](/modules/model_io/models/chat_models/integrations/googleai.md)\n          - [Ollama](/modules/model_io/models/chat_models/integrations/ollama.md)\n          - [Mistral AI](/modules/model_io/models/chat_models/integrations/mistralai.md)\n          - [OpenRouter](/modules/model_io/models/chat_models/integrations/open_router.md)\n          - [Together AI](/modules/model_io/models/chat_models/integrations/together_ai.md)\n          - [Anyscale](/modules/model_io/models/chat_models/integrations/anyscale.md)\n          - [Prem App](/modules/model_io/models/chat_models/integrations/prem.md)\n    - [Output parsers](/modules/model_io/output_parsers/output_parsers.md)\n      - [String output parser](/modules/model_io/output_parsers/string.md)\n      - [JSON output parser](/modules/model_io/output_parsers/json.md)\n      - [Tools output parser](/modules/model_io/output_parsers/tools.md)\n  - [Retrieval](/modules/retrieval/retrieval.md)\n    - [Document loaders](/modules/retrieval/document_loaders/document_loaders.md)\n      - How-to\n        - [Text](/modules/retrieval/document_loaders/how_to/text.md)\n        - [JSON](/modules/retrieval/document_loaders/how_to/json.md)\n        - [Web page](/modules/retrieval/document_loaders/how_to/web.md)\n        - [Directory](/modules/retrieval/document_loaders/how_to/directory.md)\n    - [Document transformers](/modules/retrieval/document_transformers/document_transformers.md)\n      - Text splitters\n        - [Split by character](/modules/retrieval/document_transformers/text_splitters/character_text_splitter.md)\n        - [Recursively split by character](/modules/retrieval/document_transformers/text_splitters/recursive_character_text_splitter.md)\n        - [Split code](/modules/retrieval/document_transformers/text_splitters/code_text_splitter.md)\n        - [Split markdown](/modules/retrieval/document_transformers/text_splitters/markdown_text_splitter.md)\n    - [Text embedding models](/modules/retrieval/text_embedding/text_embedding.md)\n      - Integrations\n        - [OpenAI](/modules/retrieval/text_embedding/integrations/openai.md)\n        - [GCP Vertex AI](/modules/retrieval/text_embedding/integrations/gcp_vertex_ai.md)\n        - [Google AI](/modules/retrieval/text_embedding/integrations/google_ai.md)\n        - [Ollama](/modules/retrieval/text_embedding/integrations/ollama.md)\n        - [Mistral AI](/modules/retrieval/text_embedding/integrations/mistralai.md)\n        - [Together AI](/modules/retrieval/text_embedding/integrations/together_ai.md)\n        - [Anyscale](/modules/retrieval/text_embedding/integrations/anyscale.md)\n        - [Prem App](/modules/retrieval/text_embedding/integrations/prem.md)\n    - [Vector stores](/modules/retrieval/vector_stores/vector_stores.md)\n      - Integrations\n        - [Memory](/modules/retrieval/vector_stores/integrations/memory.md)\n        - [ObjectBox](/modules/retrieval/vector_stores/integrations/objectbox.md)\n        - [Chroma](/modules/retrieval/vector_stores/integrations/chroma.md)\n        - [Pinecone](/modules/retrieval/vector_stores/integrations/pinecone.md)\n        - [Supabase](/modules/retrieval/vector_stores/integrations/supabase.md)\n        - [Vertex AI Vector Search](/modules/retrieval/vector_stores/integrations/vertex_ai.md)\n    - [Retrievers](/modules/retrieval/retrievers/retrievers.md)\n  - [Chains](/modules/chains/chains.md)\n    - How-to\n      - [Different call methods](/modules/chains/how_to/call_methods.md)\n      - [Custom chain](/modules/chains/how_to/custom_chain.md)\n      - [Debugging chains](/modules/chains/how_to/debugging.md)\n      - [Loading from LangChainHub](/modules/chains/how_to/from_hub.md)\n      - [Adding memory](/modules/chains/how_to/memory.md)\n      - [Serialization](/modules/chains/how_to/serialization.md)\n    - Foundational\n      - [LLM](/modules/chains/foundational/llm.md)\n      - [Sequential](/modules/chains/foundational/sequential.md)\n    - Documents\n      - [Stuff](/modules/chains/documents/stuff.md) \n      - [MapReduce](/modules/chains/documents/map_reduce.md)\n    - Popular\n      - [Summarize](/modules/chains/popular/summarize.md)\n  - [Memory](/modules/memory/memory.md)\n  - [Agents](/modules/agents/agents.md)\n    - [Agent types](/modules/agents/agent_types/agent_types.md)\n      - [Tools Agent](/modules/agents/agent_types/tools_agent.md)\n    - [Tools](/modules/agents/tools/tools.md)\n      - [Calculator](/modules/agents/tools/calculator.md)\n      - [DALL-E Image Generator](/modules/agents/tools/openai_dall_e.md)\n      - [Tavily Answer](/modules/agents/tools/tavily_answer.md)\n      - [Tavily Search Results](/modules/agents/tools/tavily_search_results.md)\n    - [Toolkits](/modules/agents/toolkits/toolkits.md)\n"
  },
  {
    "path": "docs/css/sidebar.css",
    "content": ".sidebar-nav {\n  margin-left: 15px;\n}\n\n.sidebar ul {\n  margin-left: 10px;\n  padding: 0;\n}\n\n.sidebar-nav li {\n  position: relative;\n  margin-top: 0;\n  margin-bottom: 0;\n  cursor: pointer\n}\n\n.sidebar-nav li.folder {\n  font-size: 14px;\n}\n\n.sidebar-nav ul:not(.app-sub-sidebar) > li:not(.file)::before {\n  content: '';\n  display: block;\n  position: absolute;\n  top: 14px;\n  left: -10px;\n  height: 4px;\n  width: 4px;\n  border-right: 1px solid #505d6b;\n  border-bottom: 1px solid #505d6b;\n  transform: rotate(-45deg);\n  transition: transform .1s\n}\n\n.sidebar-nav ul:not(.app-sub-sidebar) > li.open::before {\n  transform: rotate(45deg)\n}\n\n.sidebar-nav ul:not(.app-sub-sidebar) > li.collapse::before {\n  transform: rotate(-45deg)\n}\n"
  },
  {
    "path": "docs/css/style.css",
    "content": ".sidebar>h1 {\n    text-align: left;\n    margin: 0 0 15px 15px;\n    color: var(--accent);\n}\n"
  },
  {
    "path": "docs/css/toc.css",
    "content": "@media only screen and (max-width: 1299px) {\n  aside.toc-nav {\n    visibility: hidden;\n  }\n}\n\n@media only screen and (min-width: 1300px) {\n  section.content {\n    padding-right: 250px;\n  }\n}\n\naside.toc-nav {\n  position: fixed;\n  top: 70px;\n  right: 1%;\n  margin-right: 20px;\n  width: 250px;\n  z-index: 999999;\n  align-self: flex-start;\n  flex: 0 0 auto;\n  overflow-y: auto;\n  max-height: 70%;\n}\n\naside.toc-nav.nothing {\n  width: 0;\n}\n\n.page_toc {\n  position: relative;\n  left: 0;\n  margin: 10px 0;\n  border: none;\n  font-size: 1.0em;\n}\n\n.page_toc p.title {\n  margin: 0;\n  padding-bottom: 5px;\n  font-weight: 600;\n  font-size: 1.2em;\n}\n\n.page_toc .anchor:hover:after {\n  content: \"\";\n}\n.page_toc div[class^=\"lv\"] a:hover span {\n  color: var(--sidebar-nav-link-color--active, #42b983);\n}\n\n.page_toc div {\n  border-left: 2px solid #e8e8e8;\n  text-indent: 10px;\n  padding: 2px 0;\n  cursor: pointer;\n}\n\n.page_toc div.active {\n  border-left-color: var(--sidebar-nav-link-color--active, #42b983);\n  transition: border-left-color 0.23s;\n}\n\n.page_toc div.active a span {\n  color: var(--sidebar-nav-link-color--active, #42b983);\n  transition: color 0.23s;\n}\n\n.page_toc div[class^=\"lv\"] a {\n  color: var(--textColor, black);\n  text-decoration: none;\n  font-weight: 300;\n  line-height: 2em;\n  display: block;\n}\n\n.page_toc div[class^=\"lv\"] a span {\n  color: var(--sidebar-nav-link-color--hover, var(--sidebar-nav-link-color));\n  display: block;\n  overflow: hidden;\n  white-space: nowrap;\n  text-overflow:ellipsis;\n}\n\n.page_toc div.lv2 {\n  text-indent: 20px;\n}\n\n.page_toc div.lv3 {\n  text-indent: 30px;\n}\n\n.page_toc div.lv4 {\n  text-indent: 40px;\n}\n\n.page_toc div.lv5 {\n  text-indent: 50px;\n}\n\n.page_toc div.lv6 {\n  text-indent: 60px;\n}"
  },
  {
    "path": "docs/expression_language/cookbook/adding_memory.md",
    "content": "# Adding memory\n\nThis shows how to add memory to an arbitrary chain. Right now, you can use the memory classes but need to hook them up manually.\n\n```dart\nfinal openaiApiKey = Platform.environment['OPENAI_API_KEY'];\nfinal model = ChatOpenAI(apiKey: openaiApiKey);\nconst stringOutputParser = StringOutputParser<ChatResult>();\nfinal memory = ConversationBufferMemory(returnMessages: true);\n\nfinal promptTemplate = ChatPromptTemplate.fromPromptMessages([\n  SystemChatMessagePromptTemplate.fromTemplate(\n    'You are a helpful chatbot',\n  ),\n  const MessagesPlaceholder(variableName: 'history'),\n  HumanChatMessagePromptTemplate.fromTemplate('{input}'),\n]);\n\nfinal chain = Runnable.fromMap({\n      'input': Runnable.passthrough(),\n      'history': Runnable.mapInput(\n            (_) async {\n          final m = await memory.loadMemoryVariables();\n          return m['history'];\n        },\n      ),\n    }) |\n    promptTemplate |\n    model |\n    stringOutputParser;\n\nconst input1 = 'Hi, I am Bob';\nfinal output1 = await chain.invoke(input1);\nprint(output1);\n// Hello Bob! How can I assist you today?\n\nawait memory.saveContext(\n  inputValues: {'input': input1},\n  outputValues: {'output': output1},\n);\n\nconst input2 = \"What's my name?\";\nfinal output2 = await chain.invoke(input2);\nprint(output2);\n// Your name is Bob, as you mentioned earlier.\n```\n"
  },
  {
    "path": "docs/expression_language/cookbook/multiple_chains.md",
    "content": "# Multiple chains\n\nRunnables can easily be used to combine multiple Chains:\n\n```dart\nfinal openaiApiKey = Platform.environment['OPENAI_API_KEY'];\nfinal model = ChatOpenAI(apiKey: openaiApiKey);\nconst stringOutputParser = StringOutputParser<ChatResult>();\n\nfinal promptTemplate1 = ChatPromptTemplate.fromTemplate(\n  'What is the city {person} is from? Only respond with the name of the city.',\n);\n\nfinal promptTemplate2 = ChatPromptTemplate.fromTemplate(\n  'What country is the city {city} in? Respond in {language}.',\n);\n\nfinal cityChain = promptTemplate1 | model | stringOutputParser;\nfinal combinedChain = Runnable.fromMap({\n      'city': cityChain,\n      'language': Runnable.getItemFromMap('language'),\n    }) |\n    promptTemplate2 |\n    model |\n    stringOutputParser;\n\nfinal res = await combinedChain.invoke({\n  'person': 'Obama',\n  'language': 'Spanish',\n});\nprint(res);\n// La ciudad de Chicago se encuentra en los Estados Unidos.\n```\n\nWe use a `RunnableMap` to run two chains in pararell, one that gets the name of city and another that just propagates the `language` input. Finally, the output of the `RunnableMap` is passed to the second prompt and feed into the model.\n\n## Runnable.getItemFromMap and Runnable.passthrough\n\nIn the example above, we invoke the `combinedChain` with a `Map` and then use `Runnable.getItemFromMap` to propagate the `language` input to the second prompt. \n\nAnother typical use case is to invoke the chain with a single String input and then use the combination of `Runnable.fromMap` and `Runnable.passthrough` to build the input for the second prompt.\n\nLet's see another example with even more chains and a single String input:\n\n```dart\nfinal openaiApiKey = Platform.environment['OPENAI_API_KEY'];\nfinal model = ChatOpenAI(apiKey: openaiApiKey);\n\nfinal promptTemplate1 = ChatPromptTemplate.fromTemplate(\n  'Generate a {attribute} color. '\n  'Return the name of the color and nothing else:',\n);\nfinal promptTemplate2 = ChatPromptTemplate.fromTemplate(\n  'What is a fruit of color: {color}. '\n  'Return the name of the fruit and nothing else:',\n);\nfinal promptTemplate3 = ChatPromptTemplate.fromTemplate(\n  'What is a country with a flag that has the color: {color}. '\n  'Return the name of the country and nothing else:',\n);\nfinal promptTemplate4 = ChatPromptTemplate.fromTemplate(\n  'What is the color of {fruit} and the flag of {country}?',\n);\n\nfinal modelParser = model | StringOutputParser();\n\nfinal colorGenerator = Runnable.getMapFromInput('attribute') |\n    promptTemplate1 |\n    Runnable.fromMap({\n      'color': modelParser,\n    });\nfinal colorToFruit = promptTemplate2 | modelParser;\nfinal colorToCountry = promptTemplate3 | modelParser;\nfinal questionGenerator = colorGenerator | Runnable.fromMap({\n  'fruit': colorToFruit,\n  'country': colorToCountry,\n}) | promptTemplate4 | modelParser;\n\nfinal res = await questionGenerator.invoke('warm');\nprint(res);\n// The color of Apple is typically depicted as silver or gray for their logo \n// and products. The flag of Armenia consists of three horizontal stripes of \n// red, blue, and orange from top to bottom.\n```\n\n## Branching and Merging\n\nYou may want the output of one component to be processed by 2 or more other components. `RunnableMaps` let you split or fork the chain so multiple components can process the input in parallel. Later, other components can join or merge the results to synthesize a final response. This type of chain creates a computation graph that looks like the following:\n\n```\n     Input\n      / \\\n     /   \\\n Branch1 Branch2\n     \\   /\n      \\ /\n    Combine\n```\n\nLet's see an example:\n\n```dart\nfinal openaiApiKey = Platform.environment['OPENAI_API_KEY'];\nfinal model = ChatOpenAI(apiKey: openaiApiKey);\nconst stringOutputParser = StringOutputParser<ChatResult>();\n\nfinal planner = Runnable.getMapFromInput() |\n    ChatPromptTemplate.fromTemplate('Generate an argument about: {input}') |\n    model |\n    stringOutputParser |\n    Runnable.getMapFromInput('base_response');\n\nfinal argumentsFor = ChatPromptTemplate.fromTemplate(\n      'List the pros or positive aspects of {base_response}',\n    ) |\n    model |\n    stringOutputParser;\n\nfinal argumentsAgainst = ChatPromptTemplate.fromTemplate(\n      'List the cons or negative aspects of {base_response}',\n    ) |\n    model |\n    stringOutputParser;\n\nfinal finalResponder = ChatPromptTemplate.fromPromptMessages([\n      AIChatMessagePromptTemplate.fromTemplate(\n        '{original_response}'\n      ),\n      HumanChatMessagePromptTemplate.fromTemplate(\n        'Pros:\\n{results_1}\\n\\nCons:\\n{results_2}',\n      ),\n      SystemChatMessagePromptTemplate.fromTemplate(\n        'Generate a final response given the critique',\n      ),\n    ]) |\n    model |\n    stringOutputParser;\n\nfinal chain = planner |\n    Runnable.fromMap({\n      'results_1': argumentsFor,\n      'results_2': argumentsAgainst,\n      'original_response': Runnable.getItemFromMap('base_response'),\n    }) |\n    finalResponder;\n\nfinal res = await chain.invoke('Scrum');\nprint(res);\n// While Scrum has many benefits, it is essential to acknowledge and address\n// the potential cons or negative aspects that come with its implementation.\n// By understanding these challenges, teams can take necessary steps to\n// mitigate them and maximize the effectiveness of Scrum.\n//\n// To address the lack of predictability, teams can focus on improving their\n// estimation techniques, conducting regular progress tracking, and adopting\n// techniques like story point estimation or velocity tracking. This can\n// provide stakeholders with a better understanding of project timelines and\n// deliverables.\n//\n// ...\n//\n// In conclusion, while Scrum has its challenges, addressing these potential\n// cons through proactive measures can help maximize the benefits and\n// effectiveness of the framework. By continuously improving and adapting\n// Scrum practices, teams can overcome these challenges and achieve\n// successful project outcomes.\n```\n"
  },
  {
    "path": "docs/expression_language/cookbook/prompt_llm_parser.md",
    "content": "# Prompt + LLM\n\nThe most common and valuable composition is taking:\n\n```\nPromptTemplate / ChatPromptTemplate -> LLM / ChatModel -> OutputParser\n```\n\nAlmost all other chains you build will use this building block.\n\n## PromptTemplate + LLM\n\nThe simplest composition is just combing a prompt and model to create a chain that takes user input, adds it to a prompt, passes it to a model, and returns the raw model input.\n\nNote, you can mix and match `PromptTemplate`/`ChatPromptTemplate` and `LLM`/`ChatModel` as you like here.\n\n```dart\nfinal openaiApiKey = Platform.environment['OPENAI_API_KEY'];\nfinal model = ChatOpenAI(apiKey: openaiApiKey);\n\nfinal promptTemplate = ChatPromptTemplate.fromTemplate(\n  'Tell me a joke about {foo}',\n);\n\nfinal chain = promptTemplate | model;\n\nfinal res = await chain.invoke({'foo': 'bears'});\nprint(res);\n// ChatResult{\n//   id: chatcmpl-9LBNiPXHzWIwc02rR6sS1HTcL9pOk,\n//   output: AIChatMessage{\n//     content: Why don't bears wear shoes?\\nBecause they have bear feet!,\n//   },\n//   finishReason: FinishReason.stop,\n//   metadata: {\n//     model: gpt-4o-mini,\n//     created: 1714835666,\n//     system_fingerprint: fp_3b956da36b\n//   },\n//   usage: LanguageModelUsage{\n//     promptTokens: 13,\n//     responseTokens: 13,\n//     totalTokens: 26,\n//   },\n//   streaming: false\n// }\n```\n\nOften times we want to attach options that'll be passed to each model call. You can do this in two ways:\n\n1. Configuring the default options when instantiating the model. This will apply to all calls to the model.\n2. Configuring the options when using the model in a chain by using the `.bind` method. This will only apply to the calls in that chain.\n\nLet's look at some examples:\n\n### Attaching Stop Sequences\n\n```dart\nfinal openaiApiKey = Platform.environment['OPENAI_API_KEY'];\nfinal model = ChatOpenAI(apiKey: openaiApiKey);\n\nfinal promptTemplate = ChatPromptTemplate.fromTemplate(\n  'Tell me a joke about {foo}',\n);\n\nfinal chain = promptTemplate | model.bind(ChatOpenAIOptions(stop: ['\\n']));\n\nfinal res = await chain.invoke({'foo': 'bears'});\nprint(res);\n// ChatResult{\n//   id: chatcmpl-9LBOohTtdg12zD8zzz2GX1ib24UXO,\n//   output: AIChatMessage{\n//     content: Why don't bears wear shoes? ,\n//   },\n//   finishReason: FinishReason.stop,\n//   metadata: {\n//     model: gpt-4o-mini,\n//     created: 1714835734,\n//     system_fingerprint: fp_a450710239\n//   },\n//   usage: LanguageModelUsage{\n//     promptTokens: 13,\n//     responseTokens: 8,\n//     totalTokens: 21\n//   },\n//   streaming: false\n// }\n```\n\n### Attaching Tool Call information\n\n```dart\nfinal openaiApiKey = Platform.environment['OPENAI_API_KEY'];\nfinal model = ChatOpenAI(apiKey: openaiApiKey);\n\nfinal promptTemplate = ChatPromptTemplate.fromTemplate(\n  'Tell me a joke about {foo}',\n);\n\nconst tool = ToolSpec(\n  name: 'joke',\n  description: 'A joke',\n  inputJsonSchema: {\n    'type': 'object',\n    'properties': {\n      'setup': {\n        'type': 'string',\n        'description': 'The setup for the joke',\n      },\n      'punchline': {\n        'type': 'string',\n        'description': 'The punchline for the joke',\n      },\n    },\n    'required': ['setup', 'punchline'],\n  },\n);\n\nfinal chain = promptTemplate |\n    model.bind(\n      ChatOpenAIOptions(\n        tools: const [tool],\n        toolChoice: ChatToolChoice.forced(name: tool.name),\n      ),\n    );\n\nfinal res = await chain.invoke({'foo': 'bears'});\nprint(res);\n// ChatResult{\n//   id: chatcmpl-9LBPyaZcFMgjmOvkD0JJKAyA4Cihb,\n//   output: AIChatMessage{\n//     content: ,\n//     toolCalls: [\n//       AIChatMessageToolCall{\n//         id: call_JIhyfu6jdIXaDHfYzbBwCKdb,\n//         name: joke,\n//         argumentsRaw: {\"setup\":\"Why don't bears like fast food?\",\"punchline\":\"Because they can't catch it!\"},\n//         arguments: {\n//           setup: Why don't bears like fast food?,\n//           punchline: Because they can't catch it!\n//         },\n//       }\n//     ],\n//   },\n//   finishReason: FinishReason.stop,\n//   metadata: {\n//     model: gpt-4o-mini,\n//     created: 1714835806,\n//     system_fingerprint: fp_3b956da36b\n//   },\n//   usage: LanguageModelUsage{\n//     promptTokens: 77,\n//     responseTokens: 24,\n//     totalTokens: 101\n//   },\n//   streaming: false\n// }\n```\n\n## PromptTemplate + LLM + OutputParser\n\nWe can also add in an output parser to conveniently transform the raw LLM/ChatModel output into a consistent format.\n\n### String Output Parser\n\nIf we just want the string output, we can use the `StringOutputParser`:\n\n```dart\nfinal openaiApiKey = Platform.environment['OPENAI_API_KEY'];\nfinal model = ChatOpenAI(apiKey: openaiApiKey);\n\nfinal promptTemplate = ChatPromptTemplate.fromTemplate(\n  'Tell me a joke about {foo}',\n);\n\nfinal chain = promptTemplate | model | StringOutputParser();\n\nfinal res = await chain.invoke({'foo': 'bears'});\nprint(res);\n// Why don't bears wear shoes? Because they have bear feet!\n```\n\nNotice that this now returns a string - a much more workable format for downstream tasks.\n\n### Tools Output Parser\n\nWhen you specify a tool that the model should call, you may just want to parse the tool call directly.\n\n```dart\nfinal openaiApiKey = Platform.environment['OPENAI_API_KEY'];\nfinal model = ChatOpenAI(apiKey: openaiApiKey);\n\nfinal promptTemplate = ChatPromptTemplate.fromTemplate(\n  'Tell me a joke about {foo}',\n);\n\nconst tool = ToolSpec(\n  name: 'joke',\n  description: 'A joke',\n  inputJsonSchema: {\n    'type': 'object',\n    'properties': {\n      'setup': {\n        'type': 'string',\n        'description': 'The setup for the joke',\n      },\n      'punchline': {\n        'type': 'string',\n        'description': 'The punchline for the joke',\n      },\n    },\n    'required': ['setup', 'punchline'],\n  },\n);\n\nfinal chain = promptTemplate |\n    model.bind(\n      ChatOpenAIOptions(\n        tools: const [tool],\n        toolChoice: ChatToolChoice.forced(name: tool.name),\n      ),\n    ) |\n    ToolsOutputParser();\n\nfinal res = await chain.invoke({'foo': 'bears'});\nprint(res);\n// [ParsedToolCall{\n//   id: call_tDYrlcVwk7bCi9oh5IuknwHu,\n//   name: joke,\n//   arguments: {\n//     setup: What do you call a bear with no teeth?, \n//     punchline: A gummy bear!\n//   },\n// }]\n```\n\n## Simplifying input\n\nTo make invocation even simpler, we can add a `RunnableMap` to take care of creating the prompt input map with a `RunnablePassthrough` to get the input:\n\n```dart\nfinal map = Runnable.fromMap({\n  'foo': Runnable.passthrough(),\n});\nfinal chain = map | promptTemplate | model | StringOutputParser();\n```\n\n*`Runnable.passthrough()` is a convenience method that creates a `RunnablePassthrough` object. This is a `Runnable` that takes the input it receives and passes it through as output.*\n\nHowever, this is a bit verbose. We can simplify it by using `Runnable.getMapFromInput` which does the same under the hood:\n\n```dart\nfinal chain = Runnable.getMapFromInput('foo') |\n    promptTemplate |\n    model |\n    StringOutputParser();\n```\n\nNow, we can invoke the chain with just the input we care about:\n\n```dart\nfinal res = await chain.invoke('bears');\nprint(res);\n// Why don't bears wear shoes? Because they have bear feet!\n```\n"
  },
  {
    "path": "docs/expression_language/cookbook/retrieval.md",
    "content": "# Retrieval Augmented Generation (RAG)\n\nLet's look at adding in a retrieval step to a prompt and LLM, which adds up to a \"retrieval-augmented generation\" chain.\n\nFor this example, we are going to use Chroma vector store. First, we'll add some documents to the vector store:\n\n```dart\nfinal openaiApiKey = Platform.environment['OPENAI_API_KEY']!;\nfinal embeddings = OpenAIEmbeddings(apiKey: openaiApiKey);\n\nfinal vectorStore = Chroma(embeddings: embeddings);\nawait vectorStore.addDocuments(\n  documents: const [\n    Document(pageContent: 'Payment methods: iDEAL, PayPal and credit card'),\n    Document(pageContent: 'Free shipping: on orders over 30€'),\n  ],\n);\n```\n\nNow we can use the vector store as a retriever in a chain:\n\n```dart\nfinal retriever = vectorStore.asRetriever();\nfinal model = ChatOpenAI(apiKey: openaiApiKey);\n\nfinal promptTemplate = ChatPromptTemplate.fromTemplate('''\nAnswer the question based only on the following context:\n{context}\n\nQuestion: {question}''');\n\nfinal chain = Runnable.fromMap<String>({\n  'context': retriever | Runnable.mapInput((docs) => docs.join('\\n')),\n  'question': Runnable.passthrough(),\n}) | promptTemplate | model | StringOutputParser();\n\nfinal res1 = await chain.invoke('What payment methods do you accept?');\nprint(res1);\n// The payment methods accepted are iDEAL, PayPal, and credit card.\n\nawait chain.stream('How can I get free shipping?').forEach(stdout.write);\n// To get free shipping, you need to place an order over 30€.\n```\n\nImagine that we now want to answer the question in a different language. We will need to pass two parameters when invoking the chain. We can use \n\n```dart\nfinal promptTemplate = ChatPromptTemplate.fromTemplate('''\nAnswer the question based only on the following context:\n{context}\n\nQuestion: {question}\n\nAnswer in the following language: {language}''');\n\nfinal chain = Runnable.fromMap({\n      'context': Runnable.getItemFromMap<String>('question') |\n          (retriever | Runnable.mapInput((docs) => docs.join('\\n'))),\n      'question': Runnable.getItemFromMap('question'),\n      'language': Runnable.getItemFromMap('language'),\n    }) |\n    promptTemplate |\n    model |\n    StringOutputParser();\n\nfinal res1 = await chain.invoke({\n  'question': 'What payment methods do you accept?',\n  'language': 'es_ES',\n});\nprint(res1);\n// Aceptamos los siguientes métodos de pago: iDEAL, PayPal y tarjeta de crédito.\n\nawait chain.stream({\n  'question': 'How can I get free shipping?',\n  'language': 'nl_NL',\n}).forEach(stdout.write);\n// Om gratis verzending te krijgen, moet je bestellingen plaatsen van meer dan 30€.\n```\n\n*Note: you may have noticed that we added parentheses around the retriever. This is to workaround the type inference limitations of Dart when using the `|` operator. You won't need them if you use `.pipe` instead.*\n\n## Conversational Retrieval Chain\n\nBecause we can create `Runnable`s from functions we can add in conversation history via a formatting function. This allows us to recreate the popular `ConversationalRetrievalQAChain` to \"chat with data\":\n\n```dart\nfinal retriever = vectorStore.asRetriever();\nfinal model = ChatOpenAI(apiKey: openaiApiKey);\n\nfinal condenseQuestionPrompt = ChatPromptTemplate.fromTemplate('''\nGiven the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language.\n\nChat History:\n{chat_history}\nFollow Up Input: {question}\nStandalone question:''');\n\nfinal answerPrompt = ChatPromptTemplate.fromTemplate('''\nAnswer the question based only on the following context:\n{context}\n\nQuestion: {question}''');\n\nString combineDocuments(\n  final List<Document> documents, {\n  final String separator = '\\n\\n',\n}) {\n  return documents.map((final d) => d.pageContent).join(separator);\n}\n\nString formatChatHistory(final List<(String, String)> chatHistory) {\n  final formattedDialogueTurns = chatHistory.map((final dialogueTurn) {\n    final (human, ai) = dialogueTurn;\n    return 'Human: $human\\nAssistant: $ai';\n  });\n  return formattedDialogueTurns.join('\\n');\n}\n\nfinal inputs = Runnable.fromMap({\n  'standalone_question': Runnable.fromMap({\n        'question': Runnable.getItemFromMap('question'),\n        'chat_history': \n            Runnable.getItemFromMap<List<(String, String)>>('chat_history') |\n                Runnable.mapInput(formatChatHistory),\n      }) |\n      condenseQuestionPrompt |\n      model |\n      StringOutputParser(reduceOutputStream: true),\n});\n\nfinal context = Runnable.fromMap({\n  'context': Runnable.getItemFromMap<String>('standalone_question') |\n      retriever |\n      Runnable.mapInput<List<Document>, String>(combineDocuments),\n  'question': Runnable.getItemFromMap('standalone_question'),\n});\n\nfinal conversationalQaChain =\n    inputs | context | answerPrompt | model | StringOutputParser();\n\nfinal res1 = await conversationalQaChain.invoke({\n  'question': 'What payment methods do you accept?',\n  'chat_history': <(String, String)>[],\n});\nprint(res1);\n// The methods of payment that are currently accepted are iDEAL, PayPal, and credit card.\n\nawait conversationalQaChain.stream({\n  'question': 'Do I get free shipping?',\n  'chat_history': [('How much did you spend?', 'I spent 100€')],\n}).forEach(stdout.write);\n// Yes, shipping is free on orders over 30€.\n```\n\n### With Memory and returning source documents\n\nIn this example, we will add a memory to the chain and return the source documents from the retriever.\n\n```dart\nfinal retriever = vectorStore.asRetriever(\n  defaultOptions: const VectorStoreRetrieverOptions(\n    searchType: VectorStoreSimilaritySearch(k: 1),\n  ),\n);\nfinal model = ChatOpenAI(apiKey: openaiApiKey);\nconst stringOutputParser = StringOutputParser<ChatResult>();\nfinal memory = ConversationBufferMemory(\n  inputKey: 'question',\n  outputKey: 'answer',\n  memoryKey: 'history',\n  returnMessages: true,\n);\n\nfinal condenseQuestionPrompt = ChatPromptTemplate.fromTemplate('''\nGiven the following conversation and a follow up question, rephrase the follow up question to be a standalone question that includes all the details from the conversation in its original language\n\nChat History:\n{chat_history}\nFollow Up Input: {question}\nStandalone question:''');\n\nfinal answerPrompt = ChatPromptTemplate.fromTemplate('''\nAnswer the question based only on the following context:\n{context}\n\nQuestion: {question}''');\n\nString combineDocuments(\n  final List<Document> documents, {\n  final String separator = '\\n\\n',\n}) =>\n    documents.map((final d) => d.pageContent).join(separator);\n\nString formatChatHistory(final List<ChatMessage> chatHistory) {\n  final formattedDialogueTurns = chatHistory\n      .map(\n        (final msg) => switch (msg) {\n          HumanChatMessage _ => 'Human: ${msg.content}',\n          AIChatMessage _ => 'AI: ${msg.content}',\n          _ => '',\n        },\n      )\n      .toList();\n  return formattedDialogueTurns.join('\\n');\n}\n\n// First, we load the memory\nfinal loadedMemory = Runnable.fromMap({\n  'question': Runnable.getItemFromMap('question'),\n  'memory': Runnable.mapInput((_) => memory.loadMemoryVariables()),\n});\n\n// Next, we get the chat history from the memory\nfinal expandedMemory = Runnable.fromMap({\n  'question': Runnable.getItemFromMap('question'),\n  'chat_history': Runnable.getItemFromMap('memory') |\n      Runnable.mapInput<MemoryVariables, List<ChatMessage>>(\n        (final input) => input['history'],\n      ),\n});\n\n// Now, we generate a standalone question that includes the \n// necessary details from the chat history\nfinal standaloneQuestion = Runnable.fromMap({\n  'standalone_question': Runnable.fromMap({\n        'question': Runnable.getItemFromMap('question'),\n        'chat_history': Runnable.getItemFromMap<List<ChatMessage>>('chat_history') |\n            Runnable.mapInput(formatChatHistory),\n      }) |\n      condenseQuestionPrompt |\n      model |\n      stringOutputParser,\n});\n\n// Now we retrieve the documents\nfinal retrievedDocs = Runnable.fromMap({\n  'docs': Runnable.getItemFromMap('standalone_question') | retriever,\n  'question': Runnable.getItemFromMap('standalone_question'),\n});\n\n// Construct the inputs for the answer prompt\nfinal finalInputs = Runnable.fromMap({\n  'context': Runnable.getItemFromMap('docs') |\n      Runnable.mapInput<List<Document>, String>(combineDocuments),\n  'question': Runnable.getItemFromMap('question'),\n});\n\n// We prompt the model for an answer\nfinal answer = Runnable.fromMap({\n  'answer': finalInputs | answerPrompt | model | stringOutputParser,\n  'docs': Runnable.getItemFromMap('docs'),\n});\n\n// And finally, we put it all together\nfinal conversationalQaChain = loadedMemory |\n    expandedMemory |\n    standaloneQuestion |\n    retrievedDocs |\n    answer;\n\n// If we add some messages to the memory, \n// they will be used in the next invocation\nawait memory.saveContext(\n  inputValues: {\n    'question': ChatMessage.humanText('How much does my order cost?')\n  },\n  outputValues: {'answer': ChatMessage.ai('You have to pay 100€')},\n);\n\nfinal res = await conversationalQaChain.invoke({\n  'question': 'Do I get free shipping on the value of my order?',\n});\nprint(res);\n// {\n//   answer: Yes, based on the given context, you would receive free shipping on\n//     your order of 100€ since it exceeds the minimum requirement of 30€ for\n//     free shipping.,\n//   docs: [\n//     Document{\n//       id: 69974fe1-8436-40c7-87d1-c59c5ff1c6a6,\n//       pageContent: Free shipping: on orders over 30€,\n//       metadata: {},\n//     }\n//   ]\n// }\n```\n"
  },
  {
    "path": "docs/expression_language/cookbook/tools.md",
    "content": "# Tools\n\nTools are also runnables, and can therefore be used within a chain:\n\n```dart\nfinal openaiApiKey = Platform.environment['OPENAI_API_KEY'];\nfinal model = ChatOpenAI(apiKey: openaiApiKey);\nconst stringOutputParser = StringOutputParser<ChatResult>();\n\nfinal promptTemplate = ChatPromptTemplate.fromTemplate('''\nTurn the following user input into a math expression for a calculator. \nOutput only the math expression. Let's think step by step.\n\nINPUT:\n{input}\n\nMATH EXPRESSION:''');\n\nfinal chain = Runnable.getMapFromInput() |\n    promptTemplate |\n    model |\n    stringOutputParser |\n    Runnable.getMapFromInput() |\n    CalculatorTool();\n\nfinal res = await chain.invoke(\n  'If I had 3 apples and you had 5 apples but we ate 3. '\n  'If we cut the remaining apples in half, how many pieces would we have?',\n  options: const ChatOpenAIOptions(temperature: 0),\n);\nprint(res);\n// 10.0\n```\n"
  },
  {
    "path": "docs/expression_language/expression_language.md",
    "content": "# LangChain Expression Language (LCEL)\n\nLangChain Expression Language, or LCEL, is a declarative way to easily compose chains together. LCEL was designed from day 1 to support putting prototypes in production, with no code changes, from the simplest “prompt + LLM” chain to the most complex chains (we’ve seen folks successfully run LCEL chains with 100s of steps in production). To highlight a few of the reasons you might want to use LCEL:\n\n- **First-class streaming support:** When you build your chains with LCEL you get the best possible time-to-first-token (time elapsed until the first chunk of output comes out). For some chains this means eg. we stream tokens straight from an LLM to a streaming output parser, and you get back parsed, incremental chunks of output at the same rate as the LLM provider outputs the raw tokens.\n- **Optimized concurrent execution:** Whenever your LCEL chains have steps that can be executed concurrently (eg if you fetch documents from multiple retrievers) we automatically do it for the smallest possible latency.\n- **Retries and fallbacks:** Configure retries and fallbacks for any part of your LCEL chain. This is a great way to make your chains more reliable at scale.\n- **Access intermediate results:** For more complex chains it’s often very useful to access the results of intermediate steps even before the final output is produced. This can be used to let end-users know something is happening, or even just to debug your chain.\n"
  },
  {
    "path": "docs/expression_language/fallbacks.md",
    "content": "# Fallbacks\n\nWhen working with language models, you may often encounter issues from the underlying APIs, e.g. rate limits or downtime. Therefore, as you move your LLM applications into production it becomes more and more important to have contingencies for errors. That's why we've introduced the concept of fallbacks.\n\nCrucially, fallbacks can be applied not only on the LLM level but on the whole runnable level. This is important because often times different models require different prompts. So if your call to OpenAI fails, you don't just want to send the same prompt to Anthropic - you probably want to use e.g. a different prompt template.\n\n## Handling LLM API errors with fallbacks\n\nThis is maybe the most common use case for fallbacks. A request to an LLM API can fail for a variety of reasons - the API could be down, you could have hit a rate limit, or any number of things.  This Situation can be handled using Fallbacks.\n\nFallbacks can be created using  `withFallbacks()` function on the runnable that you are working on, for example `final runnablWithFallbacks = mainRunnable.withFallbacks([fallback1, fallback2])` this would create a `RunnableWithFallback` along with a list of fallbacks. When it is invoked, the `mainRunnable` would be called first, if it fails then fallbacks would be invoked sequentially until one of the fallback in list return output. If the `mainRunnable` succeeds and returns output then the fallbacks won't be called. \n\n## Fallback for chat models\n\n```dart\n// fake model will throw error during invoke and fallback model will be called\nfinal fakeOpenAIModel = ChatOpenAI(\n    defaultOptions: const ChatOpenAIOptions(model: 'tomato'),\n);\n\nfinal latestModel = ChatOpenAI(\n  defaultOptions: const ChatOpenAIOptions(model: 'gpt-4o'),\n);\n\nfinal modelWithFallbacks = fakeOpenAIModel.withFallbacks([latestModel]);\n\nfinal prompt = PromptValue.string('Explain why sky is blue in 2 lines');\n\nfinal res = await modelWithFallbacks.invoke(prompt);\nprint(res);\n/*\n{\n  \"ChatResult\": {\n    \"id\": \"chatcmpl-9nKBcFNkzo5qUrdNB92b36J0d1meA\",\n    \"output\": {\n      \"AIChatMessage\": {\n        \"content\": \"The sky appears blue because molecules in the Earth's atmosphere scatter shorter wavelength blue light from the sun more effectively than longer wavelengths like red. This scattering process is known as Rayleigh scattering.\",\n        \"toolCalls\": []\n      }\n    },\n    \"finishReason\": \"FinishReason.stop\",\n    \"metadata\": {\n      \"model\": \"gpt-4o-2024-05-13\",\n      \"created\": 1721542696,\n      \"system_fingerprint\": \"fp_400f27fa1f\"\n    },\n    \"usage\": {\n      \"LanguageModelUsage\": {\n        \"promptTokens\": 16,\n        \"promptBillableCharacters\": null,\n        \"responseTokens\": 36,\n        \"responseBillableCharacters\": null,\n        \"totalTokens\": 52\n      }\n    },\n    \"streaming\": false\n  }\n}\n*/\n```\n\nNote: if the options provided when invoking the runnable with fallbacks are not compatible with some of the fallbacks, they will be ignored. If you want to use different options for different fallbacks, provide them as `defaultOptions` when instantiating the fallbacks or use `bind()`. \n\n## Fallbacks for RunnableSequences with batch\n\n```dart\nfinal fakeOpenAIModel = ChatOpenAI(\n    defaultOptions: const ChatOpenAIOptions(model: 'tomato'),\n);\n\nfinal latestModel = ChatOpenAI(\n  defaultOptions: const ChatOpenAIOptions(model: 'gpt-4o'),\n);\n\nfinal promptTemplate = ChatPromptTemplate.fromTemplate('tell me a joke about {topic}');\n\nfinal badChain = promptTemplate.pipe(fakeOpenAIModel);\nfinal goodChain = promptTemplate.pipe(latestModel);\n\nfinal chainWithFallbacks = badChain.withFallbacks([goodChain]);\n\nfinal res = await chainWithFallbacks.batch(\n  [\n    {'topic': 'bears'},\n    {'topic': 'cats'},\n  ],\n);\nprint(res);\n/*\n[\n  {\n    \"id\": \"chatcmpl-9nKncT4IpAxbUxrEqEKGB0XUeyGRI\",\n    \"output\": {\n      \"content\": \"Sure! How about this one?\\n\\nWhy did the bear bring a suitcase to the forest?\\n\\nBecause it wanted to pack a lunch! 🐻🌲\",\n      \"toolCalls\": []\n    },\n    \"finishReason\": \"FinishReason.stop\",\n    \"metadata\": {\n      \"model\": \"gpt-4o-2024-05-13\",\n      \"created\": 1721545052,\n      \"system_fingerprint\": \"fp_400f27fa1f\"\n    },\n    \"usage\": {\n      \"promptTokens\": 13,\n      \"promptBillableCharacters\": null,\n      \"responseTokens\": 31,\n      \"responseBillableCharacters\": null,\n      \"totalTokens\": 44\n    },\n    \"streaming\": false\n  },\n  {\n    \"id\": \"chatcmpl-9nKnc58FpXFTPkzZfm2hHxJ5VSQQh\",\n    \"output\": {\n      \"content\": \"Sure, here's a cat joke for you:\\n\\nWhy was the cat sitting on the computer?\\n\\nBecause it wanted to keep an eye on the mouse!\",\n      \"toolCalls\": []\n    },\n    \"finishReason\": \"FinishReason.stop\",\n    \"metadata\": {\n      \"model\": \"gpt-4o-2024-05-13\",\n      \"created\": 1721545052,\n      \"system_fingerprint\": \"fp_c4e5b6fa31\"\n    },\n    \"usage\": {\n      \"promptTokens\": 13,\n      \"promptBillableCharacters\": null,\n      \"responseTokens\": 29,\n      \"responseBillableCharacters\": null,\n      \"totalTokens\": 42\n    },\n    \"streaming\": false\n  }\n]\n*/\n```\n"
  },
  {
    "path": "docs/expression_language/get_started.md",
    "content": "# Get started\n\nLCEL makes it easy to build complex chains from basic components, and supports out of the box functionality such as streaming, parallelism, and logging.\n\n# Basic example: prompt + model + output parser\n\nThe most basic and common use case is chaining a prompt template and a model together. To see how this works, let’s create a chain that takes a topic and generates a joke:\n\n```dart\nfinal openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n\nfinal promptTemplate = ChatPromptTemplate.fromTemplate(\n  'Tell me a joke about {topic}',\n);\nfinal model = ChatOpenAI(apiKey: openaiApiKey);\nconst outputParser = StringOutputParser<ChatResult>();\n\nfinal chain = promptTemplate.pipe(model).pipe(outputParser);\n\nfinal res = await chain.invoke({'topic': 'ice cream'});\nprint(res);\n// Why did the ice cream truck break down?\n// Because it had too many \"scoops\"!\n```\n\nNotice this line of this code, where we piece together then different components into a single chain using LCEL:\n\n```dart\nfinal chain = promptTemplate.pipe(model).pipe(outputParser);\n```\n\nThe `.pipe()` method (or `|` operator) is similar to a unix pipe operator, which chains together the different components feeds the output from one component as input into the next component.\n\nIn this chain the user input is passed to the prompt template, then the prompt template output is passed to the model, then the model output is passed to the output parser. Let’s take a look at each component individually to really understand what’s going on.\n\n## 1. Prompt\n\n`promptTemplate` is a `BasePromptTemplate`, which means it takes in a map of template variables and produces a `PromptValue`. A `PromptValue` is a wrapper around a completed prompt that can be passed to either an `LLM` (which takes a string as input) or `ChatModel` (which takes a sequence of messages as input). It can work with either language model type because it defines logic both for producing `ChatMessage` and for producing a string.\n\n```dart\nfinal promptValue = await promptTemplate.invoke({'topic': 'ice cream'});\n\nfinal messages = promptValue.toChatMessages();\nprint(messages);\n// [HumanChatMessage{\n//   content: ChatMessageContentText{\n//     text: Tell me a joke about ice cream,\n//   },\n// }]\n\nfinal string = promptValue.toString();\nprint(string);\n// Human: Tell me a joke about ice cream\n```\n\n## 2. Model\n\nThe `PromptValue` is then passed to `model`. In this case our `model` is a `ChatModel`, meaning it will output a `ChatMessage`.\n\n```dart\nfinal chatOutput = await model.invoke(promptValue);\nprint(chatOutput.output);\n// AIChatMessage{\n//   content: Why did the ice cream truck break down? \n//   Because it couldn't make it over the rocky road!,\n// }\n```\n\nIf our model was an `LLM`, it would output a `String`.\n\n```dart\nfinal llm = OpenAI(apiKey: openaiApiKey);\nfinal llmOutput = await llm.invoke(promptValue);\nprint(llmOutput.output);\n// Why did the ice cream go to therapy?\n// Because it had a rocky road!\n```\n\n## 3. Output parser\n\nAnd lastly we pass our `model` output to the `outputParser`, which is a `BaseOutputParser` meaning it takes either a `String` or a `ChatMessage` as input. The `StringOutputParser` specifically simple converts any input into a `String`.\n\n```dart\nfinal parsed = await outputParser.invoke(chatOutput);\nprint(parsed);\n// Why did the ice cream go to therapy?\n// Because it had a rocky road!\n```\n\n## 4. Entire Pipeline\n\nTo follow the steps along:\n\n1. We pass in user input on the desired topic as `{'topic': 'ice cream'}`\n2. The `promptTemplate` component takes the user input, which is then used to construct a `PromptValue` after using the `topic` to construct the prompt.\n3. The `model` component takes the generated prompt, and passes into the OpenAI chat model for evaluation. The generated output from the model is a `ChatMessage` object (specifically an `AIChatMessage`).\n4. Finally, the `outputParser` component takes in a `ChatMessage`, and transforms this into a `String`, which is returned from the invoke method.\n\n![Pipeline](img/pipeline.png)\n\nNote that if you’re curious about the output of any components, you can always test out a smaller version of the chain such as `promptTemplate` or `promptTemplate.pipe(model)` to see the intermediate results.\n\n```dart\nfinal input = {'topic': 'ice cream'};\n\nfinal res1 = await promptTemplate.invoke(input);\nprint(res1.toChatMessages());\n// [HumanChatMessage{\n//   content: ChatMessageContentText{\n//     text: Tell me a joke about ice cream,\n//   },\n// }]\n\nfinal res2 = await promptTemplate.pipe(model).invoke(input);\nprint(res2);\n// ChatResult{\n//   id: chatcmpl-9J37Tnjm1dGUXqXBF98k7jfexATZW,\n//   output: AIChatMessage{\n//     content: Why did the ice cream cone go to therapy? Because it had too many sprinkles of emotional issues!,\n//   },\n//   finishReason: FinishReason.stop,\n//   metadata: {\n//     model: gpt-4o-mini,\n//     created: 1714327251,\n//     system_fingerprint: fp_3b956da36b\n//   },\n//   usage: LanguageModelUsage{\n//     promptTokens: 14,\n//     promptBillableCharacters: null,\n//     responseTokens: 21,\n//     responseBillableCharacters: null,\n//     totalTokens: 35\n//     },\n//   streaming: false\n// }\n```\n\n## RAG Search Example\n\nFor our next example, we want to run a retrieval-augmented generation chain to add some context when responding to questions.\n\n```dart\n// 1. Create a vector store and add documents to it\nfinal vectorStore = MemoryVectorStore(\n  embeddings: OpenAIEmbeddings(apiKey: openaiApiKey),\n);\nawait vectorStore.addDocuments(\n  documents: [\n    Document(pageContent: 'LangChain was created by Harrison'),\n    Document(pageContent: 'David ported LangChain to Dart in LangChain.dart'),\n  ],\n);\n\n// 2. Define the retrieval chain\nfinal retriever = vectorStore.asRetriever();\nfinal setupAndRetrieval = Runnable.fromMap<String>({\n  'context': retriever.pipe(\n    Runnable.mapInput((docs) => docs.map((d) => d.pageContent).join('\\n')),\n  ),\n  'question': Runnable.passthrough(),\n});\n\n// 3. Construct a RAG prompt template\nfinal promptTemplate = ChatPromptTemplate.fromTemplates([\n  (ChatMessageType.system, 'Answer the question based on only the following context:\\n{context}'),\n  (ChatMessageType.human, '{question}'),\n]);\n\n// 4. Define the final chain\nfinal model = ChatOpenAI(apiKey: openaiApiKey);\nconst outputParser = StringOutputParser<ChatResult>();\nfinal chain = setupAndRetrieval\n    .pipe(promptTemplate)\n    .pipe(model)\n    .pipe(outputParser);\n\n// 5. Run the pipeline\nfinal res = await chain.invoke('Who created LangChain.dart?');\nprint(res);\n// David created LangChain.dart\n```\n\nIn this case, the composed chain is:\n\n```dart\nfinal chain = setupAndRetrieval\n    .pipe(promptTemplate)\n    .pipe(model)\n    .pipe(outputParser);\n```\n\nTo explain this, we first can see that the prompt template above takes in `context` and `question` as values to be substituted in the prompt. Before building the prompt template, we want to retrieve relevant documents to the search and include them as part of the context.\n\nAs a preliminary step, we’ve set up the retriever using an in memory store, which can retrieve documents based on a query. This is a runnable component as well that can be chained together with other components, but you can also try to run it separately:\n\n```dart\nfinal res1 = await retriever.invoke('Who created LangChain.dart?');\nprint(res1);\n// [Document{pageContent: David ported LangChain to Dart in LangChain.dart}, \n// Document{pageContent: LangChain was created by Harrison, metadata: {}}]\n```\n\nWe then use the `RunnableMap` to prepare the expected inputs into the prompt by using a string containing the combined retrieved documents as well as the original user question, using the `retriever` for document search, a `RunnableMapInput` to combine the documents and `RunnablePassthrough` to pass the user's question:\n\n```dart\nfinal setupAndRetrieval = Runnable.fromMap<String>({\n  'context': retriever.pipe(\n    Runnable.mapInput((docs) => docs.map((d) => d.pageContent).join('\\n')),\n  ),\n  'question': Runnable.passthrough(),\n});\n```\n\nTo review, the complete chain is:\n\n```dart\nfinal chain = setupAndRetrieval\n    .pipe(promptTemplate)\n    .pipe(model)\n    .pipe(outputParser);\n```\n\nWith the flow being:\n1. The first steps create a `RunnableMap` object with two entries. The first entry, `context` will include the combined document results fetched by the retriever. The second entry, `question` will contain the user’s original question. To pass on the `question`, we use `RunnablePassthrough` to copy this entry.\n2. Feed the map from the step above to the `promptTemplate` component. It then takes the user input which is `question` as well as the retrieved documents which is `context` to construct a prompt and output a `PromptValue`.\n3. The `model` component takes the generated prompt, and passes into the OpenAI LLM model for evaluation. The generated `output` from the model is a `ChatResult` object.\n4. Finally, the `outputParser` component takes in the `ChatResult`, and transforms this into a Dart String, which is returned from the invoke method.\n\n![RAG Pipeline](img/rag_pipeline.png)\n"
  },
  {
    "path": "docs/expression_language/interface.md",
    "content": "# Runnable interface\n\nTo make it as easy as possible to create custom chains, LangChain provides a `Runnable` interface that most components implement, including chat models, LLMs, output parsers, retrievers, prompt templates, and more.\n\nThis is a standard interface, which makes it easy to define custom chains as well as invoke them in a standard way. The standard interface includes:\n\n- `invoke`: call the chain on an input and return the output.\n- `stream`: call the chain on an input and stream the output.\n- `batch`: call the chain on a list of inputs and return a list of outputs.\n\nThe type of the input and output varies by component:\n\n| Component                   | Input Type             | Output Type            |\n|-----------------------------|------------------------|------------------------|\n| `PromptTemplate`            | `Map<String, dynamic>` | `PromptValue`          |\n| `ChatMessagePromptTemplate` | `Map<String, dynamic>` | `PromptValue`          |\n| `LLM`                       | `PromptValue`          | `LLMResult`            |\n| `ChatModel`                 | `PromptValue`          | `ChatResult`           |\n| `OutputParser`              | Any object             | Parser output type     |\n| `Retriever`                 | `String`               | `List<Document>`       |\n| `DocumentTransformer`       | `List<Document>`       | `List<Document>`       |\n| `Tool`                      | `Map<String, dynamic>` | `String`               |\n| `Chain`                     | `Map<String, dynamic>` | `Map<String, dynamic>` |\n\nThere are also several useful primitives for working with runnables, which you can read about in [this section](/expression_language/primitives.md).\n\n## Runnable interface\n\nLet's take a look at these methods! To do so, we'll create a super simple `PromptTemplate` + `ChatModel` chain.\n\n```dart\nfinal model = ChatOpenAI(apiKey: openaiApiKey);\n\nfinal promptTemplate = ChatPromptTemplate.fromTemplate(\n  'Tell me a joke about {topic}',\n);\n\nfinal chain = promptTemplate.pipe(model).pipe(StringOutputParser());\n```\n\nIn this example, we use the method `pipe` to combine runnables into a sequence. You can read more about this in the [RunnableSequence: Chaining runnables](/expression_language/primitives/sequence.md) section.\n\n### Invoke\n\nThe `invoke` method takes an input and returns the output of invoking the chain on that input.\n\n```dart\nfinal res = await chain.invoke({'topic': 'bears'});\nprint(res);\n// Why don't bears wear shoes? Because they have bear feet!\n```\n\n### Stream\n\nThe `stream` method takes an input and streams back chunks of the output.\n\n```dart\nfinal stream = chain.stream({'topic': 'bears'});\nint count = 0;\nawait for (final res in stream) {\n  print('$count: $res');\n  count++;\n}\n// 0:\n// 1: Why\n// 2:  don\n// 3: 't\n// 4:  bears\n// 5:  like\n// 6:  fast\n// 7:  food\n// 8: ?\n// 9: Because\n// 10:  they\n// 11:  can\n// 12: 't\n// 13:  catch\n// 14:  it\n// 15: !\n```\n\n### Batch\n\nBatches the invocation of the `Runnable` on the given `inputs`.\n\n```dart\nfinal res = await chain.batch([\n  {'topic': 'bears'},\n  {'topic': 'cats'},\n]);\nprint(res);\n//['Why did the bear break up with his girlfriend? Because she was too \"grizzly\" for him!',\n// 'Why was the cat sitting on the computer? Because it wanted to keep an eye on the mouse!']\n```\n\nIf the underlying provider supports batching, this method will try to batch the calls to the provider. Otherwise, it will just call `invoke` on each input concurrently. You can configure the concurrency limit by setting the `concurrencyLimit` field in the `options` parameter.\n\nYou can provide call options to the `batch` method using the `options` parameter. It can be:\n- `null`: the default options are used.\n- List with 1 element: the same options are used for all inputs.\n- List with the same length as the inputs: each input gets its own options.\n\n```dart\nfinal res = await chain.batch(\n  [\n    {'topic': 'bears'},\n    {'topic': 'cats'},\n  ],\n  options: [\n    const ChatOpenAIOptions(model: 'gpt-4o-mini', temperature: 0.5),\n    const ChatOpenAIOptions(model: 'gpt-4', temperature: 0.7),\n  ],\n);\nprint(res);\n//['Why did the bear break up with his girlfriend? Because he couldn't bear the relationship anymore!,',\n// 'Why don't cats play poker in the jungle? Because there's too many cheetahs!']\n```\n"
  },
  {
    "path": "docs/expression_language/primitives/binding.md",
    "content": "# RunnableBinding: Configuring runnables at runtime\n\nSometimes we want to invoke a `Runnable` within a `Runnable` sequence with constant options that are not part of the output of the preceding `Runnable` in the sequence, and which are not part of the user input. We can use `Runnable.bind()` to pass these options in.\n\nSuppose we have a simple prompt + model sequence:\n\n```dart\nfinal model = ChatOpenAI(apiKey: openaiApiKey);\nconst outputParser = StringOutputParser<ChatResult>();\n\nfinal promptTemplate = ChatPromptTemplate.fromTemplates([\n  (ChatMessageType.system,\n    'Write out the following equation using algebraic symbols then solve it. '\n        'Use the format\\n\\nEQUATION:...\\nSOLUTION:...\\n\\n'),\n  (ChatMessageType.human, '{equation_statement}'),\n]);\n\nfinal chain = Runnable.getMapFromInput<String>('equation_statement')\n    .pipe(promptTemplate)\n    .pipe(model)\n    .pipe(outputParser);\n\nfinal res = await chain.invoke('x raised to the third plus seven equals 12');\nprint(res);\n// EQUATION: \\(x^3 + 7 = 12\\)\n//\n// SOLUTION:\n// Subtract 7 from both sides:\n// \\(x^3 = 5\\)\n//\n// Take the cube root of both sides:\n// \\(x = \\sqrt[3]{5}\\)\n```\n\nand want to call the model with certain `stop` words:\n\n```dart\nfinal chain2 = Runnable.getMapFromInput<String>('equation_statement')\n    .pipe(promptTemplate)\n    .pipe(model.bind(ChatOpenAIOptions(stop: ['SOLUTION'])))\n    .pipe(outputParser);\nfinal res2 = await chain2.invoke('x raised to the third plus seven equals 12');\nprint(res2);\n// EQUATION: \\( x^3 + 7 = 12 \\)\n```\n\nYou can use this pattern to configure different options for the same runnable without having to create a new instance. For example, you can use different models for different prompts:\n\n```dart\nfinal chatModel = ChatOpenAI(apiKey: openaiApiKey);\nconst outputParser = StringOutputParser<ChatResult>();\nfinal prompt1 = PromptTemplate.fromTemplate('How are you {name}?');\nfinal prompt2 = PromptTemplate.fromTemplate('How old are you {name}?');\n\nfinal chain = Runnable.fromMap({\n  'q1': prompt1 |\n  chatModel.bind(ChatOpenAIOptions(model: 'gpt-4-turbo')) |\n  outputParser,\n  'q2': prompt2 |\n  chatModel.bind(ChatOpenAIOptions(model: 'gpt-4o-mini')) |\n  outputParser,\n});\n\nfinal res = await chain.invoke({'name': 'David'});\nprint(res);\n// {q1: Hello! I'm just a computer program, so I don't have feelings,\n// q2: I am an AI digital assistant, so I do not have an age like humans do.}\n```\n\nAnother similar use case is to use different `temperature` settings for different parts of the chain. You can easily do this by using `model.bind(ChatOpenAIOptions(temperature: 1))` as shown above.\n\n## Attaching tools\n\nOne particularly useful application of `Runnable.bind()` is to attach the tools that the model can call.\n\n```dart\nfinal model = ChatOpenAI(apiKey: openaiApiKey);\nfinal outputParser = ToolsOutputParser();\n\nfinal promptTemplate = ChatPromptTemplate.fromTemplates([\n  (ChatMessageType.system, 'Write out the following equation using algebraic symbols then solve it.'),\n  (ChatMessageType.human, '{equation_statement}'),\n]);\n\nconst tool = ToolSpec(\n  name: 'solver',\n  description: 'Formulates and solves an equation',\n  inputJsonSchema: {\n    'type': 'object',\n    'properties': {\n      'equation': {\n        'type': 'string',\n        'description': 'The algebraic expression of the equation',\n      },\n      'solution': {\n        'type': 'string',\n        'description': 'The solution to the equation',\n      },\n    },\n    'required': ['equation', 'solution'],\n  },\n);\n\nfinal chain = Runnable.getMapFromInput<String>('equation_statement')\n    .pipe(promptTemplate)\n    .pipe(model.bind(ChatOpenAIOptions(tools: [tool])))\n    .pipe(outputParser);\n\nfinal res = await chain.invoke('x raised to the third plus seven equals 12');\nprint(res);\n// [ParsedToolCall{\n//   id: call_T2Y3g7rU5s0CzEG4nL35FJYK,\n//   name: solver,\n//   arguments: {\n//     equation: x^3 + 7 = 12, \n//     solution: x = 1\n//   },\n// }]\n```\n"
  },
  {
    "path": "docs/expression_language/primitives/function.md",
    "content": "# Function: Run custom logic\n\nAs we discussed in the [Mapper: Mapping input values](/expression_language/primitives/map.md) section, it is common to need to map the output value of a previous runnable to a new value that conforms to the input requirements of the next runnable. `Runnable.mapInput`, `Runnable.mapInputStream`, `Runnable.getItemFromMap`, and `Runnable.getMapFromInput` are the easiest way to do that with minimal boilerplate. However, sometimes you may need more control over the input and output values. This is where `Runnable.fromFunction` comes in.\n\nThe main differences between `Runnable.mapInput` and `Runnable.fromFunction` are:\n- `Runnable.fromFunction` allows you to define separate logic for invoke vs stream.\n- `Runnabe.mapInput` allows you to access the invocation options.\n\n## Runnable.fromFunction\n\nIn the following example, we use `Runnable.fromFunction` to log the output value of the previous `Runnable`. Note that we have print different messages depending on whether the chain is invoked or streamed.\n\n```dart\nRunnable<T, RunnableOptions, T> logOutput<T extends Object>(String stepName) {\n  return Runnable.fromFunction<T, T>(\n    invoke: (input, options) {\n      print('Output from step \"$stepName\":\\n$input\\n---');\n      return Future.value(input);\n    },\n    stream: (inputStream, options) {\n      return inputStream.map((input) {\n        print('Chunk from step \"$stepName\":\\n$input\\n---');\n        return input;\n      });\n    },\n  );\n}\n\nfinal promptTemplate = ChatPromptTemplate.fromTemplates(const [\n  (\n    ChatMessageType.system,\n    'Write out the following equation using algebraic symbols then solve it. '\n        'Use the format:\\nEQUATION:...\\nSOLUTION:...\\n',\n  ),\n  (ChatMessageType.human, '{equation_statement}'),\n]);\n\nfinal chain = Runnable.getMapFromInput<String>('equation_statement')\n    .pipe(logOutput('getMapFromInput'))\n    .pipe(promptTemplate)\n    .pipe(logOutput('promptTemplate'))\n    .pipe(ChatOpenAI(apiKey: openaiApiKey))\n    .pipe(logOutput('chatModel'))\n    .pipe(const StringOutputParser())\n    .pipe(logOutput('outputParser'));\n```\n\nWhen we invoke the chain, we get the following output:\n```dart\nawait chain.invoke('x raised to the third plus seven equals 12');\n// Output from step \"getMapFromInput\":\n// {equation_statement: x raised to the third plus seven equals 12}\n// ---\n// Output from step \"promptTemplate\":\n// System: Write out the following equation using algebraic symbols then solve it. Use the format\n//\n// EQUATION:...\n// SOLUTION:...\n//\n// Human: x raised to the third plus seven equals 12\n// ---\n// Output from step \"chatModel\":\n// ChatResult{\n//   id: chatcmpl-9JcVxKcryIhASLnpSRMXkOE1t1R9G,\n//   output: AIChatMessage{\n//     content:\n//       EQUATION: \\( x^3 + 7 = 12 \\)\n//       SOLUTION:\n//       Subtract 7 from both sides of the equation:\n//       \\( x^3 = 5 \\)\n//\n//       Take the cube root of both sides:\n//       \\( x = \\sqrt[3]{5} \\)\n//\n//       Therefore, the solution is \\( x = \\sqrt[3]{5} \\),\n//   },\n//   finishReason: FinishReason.stop,\n//   metadata: {\n//     model: gpt-4o-mini,\n//     created: 1714463309,\n//     system_fingerprint: fp_3b956da36b\n//   },\n//   usage: LanguageModelUsage{\n//     promptTokens: 47,\n//     responseTokens: 76,\n//     totalTokens: 123\n//   },\n//   streaming: false\n// }\n// ---\n// Output from step \"outputParser\":\n// EQUATION: \\( x^3 + 7 = 12 \\)\n//\n// SOLUTION:\n// Subtract 7 from both sides of the equation:\n// \\( x^3 = 5 \\)\n//\n// Take the cube root of both sides:\n// \\( x = \\sqrt[3]{5} \\)\n//\n// Therefore, the solution is \\( x = \\sqrt[3]{5} \\)\n```\n\nWhen we stream the chain, we get the following output:\n```dart\nchain.stream('x raised to the third plus seven equals 12').listen((_){});\n// Chunk from step \"getMapFromInput\":\n// {equation_statement: x raised to the third plus seven equals 12}\n// ---\n// Chunk from step \"promptTemplate\":\n// System: Write out the following equation using algebraic symbols then solve it. Use the format:\n// EQUATION:...\n// SOLUTION:...\n// \n// Human: x raised to the third plus seven equals 12\n// ---\n// Chunk from step \"chatModel\":\n// ChatResult{\n//   id: chatcmpl-9JcdKMy2yBlJhW2fxVu43Qn0gqofK, \n//   output: AIChatMessage{\n//     content: E,\n//   },\n//   finishReason: FinishReason.unspecified,\n//   metadata: {\n//     model: gpt-4o-mini, \n//     created: 1714463766, \n//     system_fingerprint: fp_3b956da36b\n//   },\n//   usage: LanguageModelUsage{},\n//   streaming: true\n// }\n// ---\n// Chunk from step \"outputParser\":\n// E\n// ---\n// Chunk from step \"chatModel\":\n// ChatResult{\n//   id: chatcmpl-9JcdKMy2yBlJhW2fxVu43Qn0gqofK, \n//   output: AIChatMessage{\n//     content: QU,\n//   },\n//   finishReason: FinishReason.unspecified,\n//   metadata: {\n//     model: gpt-4o-mini, \n//     created: 1714463766, \n//     system_fingerprint: fp_3b956da36b\n//   },\n//   usage: LanguageModelUsage{},\n//   streaming: true\n// }\n// ---\n// Chunk from step \"outputParser\":\n// QU\n// ---\n// Chunk from step \"chatModel\":\n// ChatResult{\n//   id: chatcmpl-9JcdKMy2yBlJhW2fxVu43Qn0gqofK, \n//   output: AIChatMessage{\n//     content: ATION,\n//   },\n//   finishReason: FinishReason.unspecified,\n//   metadata: {\n//     model: gpt-4o-mini, \n//     created: 1714463766, \n//     system_fingerprint: fp_3b956da36b\n//   },\n//   usage: LanguageModelUsage{},\n//   streaming: true\n// }\n// ---\n// Chunk from step \"outputParser\":\n// ATION\n// ---\n// ...\n```\n"
  },
  {
    "path": "docs/expression_language/primitives/map.md",
    "content": "# RunnableMap: Formatting inputs & concurrency\n\nThe `RunnableMap` primitive is essentially a map whose values are runnables. It runs all of its values concurrently, and each value is called with the overall input of the `RunnableMap`. The final return value is a map with the results of each value under its appropriate key.\n\nIt is useful for running operations concurrently, but can also be useful for manipulating the output of one `Runnable` to match the input format of the next `Runnable` in a sequence.\n\nHere the input to prompt is expected to be a map with keys “context” and “question”. The user input is just the question. So we need to get the context using our retriever and passthrough the user input under the “question” key.\n\n```dart\nfinal vectorStore = MemoryVectorStore(\n  embeddings: OpenAIEmbeddings(apiKey: openaiApiKey),\n);\nawait vectorStore.addDocuments(\n  documents: [\n    Document(pageContent: 'LangChain was created by Harrison'),\n    Document(pageContent: 'David ported LangChain to Dart in LangChain.dart'),\n  ],\n);\nfinal retriever = vectorStore.asRetriever();\nfinal promptTemplate = ChatPromptTemplate.fromTemplates([\n  (ChatMessageType.system, 'Answer the question based on only the following context:\\n{context}'),\n  (ChatMessageType.human, '{question}'),\n]);\nfinal model = ChatOpenAI(apiKey: openaiApiKey);\nconst outputParser = StringOutputParser<ChatResult>();\n\nfinal retrievalChain = Runnable.fromMap<String>({\n  'context': retriever,\n  'question': Runnable.passthrough(),\n}).pipe(promptTemplate).pipe(model).pipe(outputParser);\n\nfinal res = await retrievalChain.invoke('Who created LangChain.dart?');\nprint(res);\n// David created LangChain.dart.\n```\n\n## Using Runnable.getItemFromMap as shorthand\n\nSometimes you need to extract one value from a map and pass it to the next `Runnable`. You can use `Runnable.getItemFromMap` to do this. It takes the input map and returns the value of the provided key.\n\n```dart\nfinal vectorStore = MemoryVectorStore(\n  embeddings: OpenAIEmbeddings(apiKey: openaiApiKey),\n);\nawait vectorStore.addDocuments(\n  documents: [\n    const Document(pageContent: 'LangChain was created by Harrison'),\n    const Document(\n      pageContent: 'David ported LangChain to Dart in LangChain.dart',\n    ),\n  ],\n);\nfinal retriever = vectorStore.asRetriever();\nfinal promptTemplate = ChatPromptTemplate.fromTemplates(const [\n  (\n    ChatMessageType.system,\n    'Answer the question based on only the following context:\\n{context}\\n'\n        'Answer in the following language: {language}',\n  ),\n  (ChatMessageType.human, '{question}'),\n]);\nfinal model = ChatOpenAI(apiKey: openaiApiKey);\nconst outputParser = StringOutputParser<ChatResult>();\n\nfinal retrievalChain = Runnable.fromMap<Map<String, dynamic>>({\n  'context': Runnable.getItemFromMap('question').pipe(retriever),\n  'question': Runnable.getItemFromMap('question'),\n  'language': Runnable.getItemFromMap('language'),\n}).pipe(promptTemplate).pipe(model).pipe(outputParser);\n\nfinal res = await retrievalChain.invoke({\n  'question': 'Who created LangChain.dart?',\n  'language': 'Spanish',\n});\nprint(res);\n// David portó LangChain a Dart en LangChain.dart\n```\n\n## Running steps concurrently\n\n`RunnableMap` makes it easy to execute multiple `Runnables` concurrently and to return the output of these Runnables as a map.\n\n```dart\nfinal openaiApiKey = Platform.environment['OPENAI_API_KEY'];\nfinal model = ChatOpenAI(apiKey: openaiApiKey);\nconst outputParser = StringOutputParser<ChatResult>();\n\nfinal jokeChain = PromptTemplate.fromTemplate('tell me a joke about {topic}')\n    .pipe(model)\n    .pipe(outputParser);\nfinal poemChain =\n    PromptTemplate.fromTemplate('write a 2-line poem about {topic}')\n        .pipe(model)\n        .pipe(outputParser);\n\nfinal mapChain = Runnable.fromMap<Map<String, dynamic>>({\n  'joke': jokeChain,\n  'poem': poemChain,\n});\n\nfinal res = await mapChain.invoke({\n  'topic': 'bear',\n});\nprint(res);\n// {joke: Why did the bear bring a flashlight to the party? Because he wanted to be the \"light\" of the party!, \n//  poem: In the forest's hush, the bear prowls wide, A silent guardian, a force of nature's pride.}\n```\n\nEach branch of the `RunnableMap` is still run on the same isolate, but they are run concurrently. In the example above, the two requests to the OpenAI API are made concurrently, without waiting for the first to finish before starting the second.\n"
  },
  {
    "path": "docs/expression_language/primitives/mapper.md",
    "content": "# Mapper: Mapping input values\n\nIt is common to need to map the output value of a previous runnable to a new value that conforms to the input requirements of the next runnable. This is where `Runnable.mapInput` comes in.\n\n## Runnable.mapInput\n\n`Runnable.mapInput` allows you to define a function that maps the input value to a new value.\n\nIn the following example, we retrieve a list of `Document` objects from our vector store, and we want to combine them into a single string to feed it in our prompt. To do this, we use `Runnable.mapInput` to implement the combination logic.\n\n```dart\nfinal vectorStore = MemoryVectorStore(\n  embeddings: OpenAIEmbeddings(apiKey: openaiApiKey),\n);\nawait vectorStore.addDocuments(\n  documents: [\n    Document(pageContent: 'LangChain was created by Harrison'),\n    Document(pageContent: 'David ported LangChain to Dart in LangChain.dart'),\n  ],\n);\n\nfinal retriever = vectorStore.asRetriever();\nfinal setupAndRetrieval = Runnable.fromMap<String>({\n  'context': retriever.pipe(\n    Runnable.mapInput((docs) => docs.map((d) => d.pageContent).join('\\n')),\n  ),\n  'question': Runnable.passthrough(),\n});\n\nfinal promptTemplate = ChatPromptTemplate.fromTemplates(const [\n  (ChatMessageType.system, 'Answer the question based on only the following context:\\n{context}'),\n  (ChatMessageType.human, '{question}'),\n]);\n\nfinal model = ChatOpenAI(apiKey: openaiApiKey);\nconst outputParser = StringOutputParser<ChatResult>();\nfinal chain = setupAndRetrieval\n    .pipe(promptTemplate)\n    .pipe(model)\n    .pipe(outputParser);\n\nfinal res = await chain.invoke('Who created LangChain.dart?');\nprint(res);\n// David created LangChain.dart\n```\n\n## Runnable.mapInputStream\n\nBy default, when running a chain using `stream` instead of `invoke`, `Runnable.mapInput` will be called for every item in the input stream. If you need more control over the input stream, you can use `Runnable.mapInputStream` instead which takes the input stream as a parameter and returns a new stream.\n\nIn the following example, the model streams the output in chunks and the output parser processes each of them individually. However, we want our chain to output only only the last chunk. We can use `Runnable.mapInputStream` to get the last chunk from the input stream.\n\n```dart\nfinal model = ChatOpenAI(\n  apiKey: openAiApiKey,\n  defaultOptions: ChatOpenAIOptions(\n    responseFormat: ChatOpenAIResponseFormat.jsonObject,\n  ),\n);\nfinal parser = JsonOutputParser<ChatResult>();\nfinal mapper = Runnable.mapInputStream((Stream<Map<String, dynamic>> inputStream) async* {\n  yield await inputStream.last;\n});\n\nfinal chain = model.pipe(parser).pipe(mapper);\n\nfinal stream = chain.stream(\n  PromptValue.string(\n    'Output a list of the countries france, spain and japan and their '\n        'populations in JSON format. Use a dict with an outer key of '\n        '\"countries\" which contains a list of countries. '\n        'Each country should have the key \"name\" and \"population\"',\n  ),\n);\nawait stream.forEach((final chunk) => print('$chunk|'));\n// {countries: [{name: France, population: 65273511}, {name: Spain, population: 46754778}, {name: Japan, population: 126476461}]}|\n```\n\n> Note: for more complex use-cases where you want to define separate logic for when the chain is run using `invoke` or `stream`, you can use `Runnable.function`.\n\n## Runnable.getItemFromMap\n\nSometimes the previous runnable returns a map, and you want to get a value from it to feed it to the next runnable. You can use `Runnable.getItemFromMap` to get a value from an input map.\n\nIn the following example, we want to feed to our retriever the question but the input is a map with several other values. We can use `Runnable.getItemFromMap` to get the question from the input map, as well as to propagate the other values to the next runnable.\n\n```dart\nfinal retrievalChain = Runnable.fromMap<Map<String, dynamic>>({\n  'context': Runnable.getItemFromMap('question').pipe(retriever),\n  'question': Runnable.getItemFromMap('question'),\n  'language': Runnable.getItemFromMap('language'),\n}).pipe(promptTemplate).pipe(model).pipe(outputParser);\n\nfinal res = await retrievalChain.invoke({\n  'question': 'Who created LangChain.dart?',\n  'language': 'Spanish',\n});\nprint(res);\n// David portó LangChain a Dart en LangChain.dart\n```\n\n> Note: this is equivalent to  \n> `Runnable.mapInput<Map<String, dynamic>, RunOutput>((input) => input[key])`\n\n## Runnable.getMapFromInput\n\nSometimes the previous runnable returns a single item, but the next runnable expects a map. You can use `Runnable.getMapFromInput` to format the input for the next runnable.\n\nIn the following example, we want our chain input type to be a String, but the prompt template expects a map. We can use `Runnable.getMapFromInput` to format the input for the prompt template.\n\n```dart\nfinal model = ChatOpenAI(apiKey: openaiApiKey);\nconst outputParser = StringOutputParser<ChatResult>();\n\nfinal promptTemplate = ChatPromptTemplate.fromTemplates([\n  (\n  ChatMessageType.system,\n  'Write out the following equation using algebraic symbols then solve it. '\n      'Use the format\\n\\nEQUATION:...\\nSOLUTION:...\\n\\n',\n  ),\n  (ChatMessageType.human, '{equation_statement}'),\n]);\n\nfinal chain = Runnable.getMapFromInput<String>('equation_statement')\n    .pipe(promptTemplate)\n    .pipe(model)\n    .pipe(outputParser);\n\nfinal res = await chain.invoke('x raised to the third plus seven equals 12');\nprint(res);\n// EQUATION: \\(x^3 + 7 = 12\\)\n//\n// SOLUTION:\n// Subtract 7 from both sides:\n// \\(x^3 = 5\\)\n//\n// Take the cube root of both sides:\n// \\(x = \\sqrt[3]{5}\\)\n```\n\n> Note: this is equivalent to  \n> `Runnable.mapInput<RunInput, Map<String, dynamic>>((input) => {key: input})`\n"
  },
  {
    "path": "docs/expression_language/primitives/passthrough.md",
    "content": "# Passthrough: Passing inputs through\n\n`RunnablePassthrough` on its own allows you to pass inputs unchanged. This typically is used in conjunction with `RunnableMap` to pass data through to a new key in the map.\n\nSee the example below:\n\n```dart\nfinal runnable = Runnable.fromMap<Map<String, dynamic>>({\n  'passed': Runnable.passthrough(),\n  'modified': Runnable.mapInput((input) => (input['num'] as int) + 1),\n});\n\nfinal res = await runnable.invoke({'num': 1});\nprint(res);\n// {passed: {num: 1}, modified: 2}\n```\n\nAs seen above, `passed` key was called with `RunnablePassthrough` and so it simply passed on `{'num': 1}`.\n\nWe also set a second key in the map with `modified`. This uses a map input to set a single value adding 1 to the num, which resulted in `modified` key with the value of 2.\n\n## Retrieval Example\n\nIn the example below, we see a use case where we use `RunnablePassthrough` along with `RunnableMap`.\n\n```dart\nfinal vectorStore = MemoryVectorStore(\n  embeddings: OpenAIEmbeddings(apiKey: openaiApiKey),\n);\nawait vectorStore.addDocuments(\n  documents: [\n    Document(pageContent: 'LangChain was created by Harrison'),\n    Document(pageContent: 'David ported LangChain to Dart in LangChain.dart'),\n  ],\n);\nfinal retriever = vectorStore.asRetriever();\nfinal promptTemplate = ChatPromptTemplate.fromTemplates([\n  (ChatMessageType.system, 'Answer the question based on only the following context:\\n{context}'),\n  (ChatMessageType.human, '{question}'),\n]);\nfinal model = ChatOpenAI(apiKey: openaiApiKey);\nconst outputParser = StringOutputParser<ChatResult>();\n\nfinal retrievalChain = Runnable.fromMap<String>({\n  'context': retriever,\n  'question': Runnable.passthrough(),\n}).pipe(promptTemplate).pipe(model).pipe(outputParser);\n\nfinal res = await retrievalChain.invoke('Who created LangChain.dart?');\nprint(res);\n// David created LangChain.dart.\n```\n\nHere the input to prompt is expected to be a map with keys “context” and “question”. The user input is just the question. So we need to get the context using our retriever and passthrough the user input under the “question” key. In this case, the RunnablePassthrough allows us to pass on the user’s question to the prompt and model.\n"
  },
  {
    "path": "docs/expression_language/primitives/retry.md",
    "content": "# RunnableRetry : Retrying Runnables\n\n`RunnableRetry` wraps a `Runnable` and retries it if it fails. It be created using `runnable.withRetry()`. \n\nBy default, the runnable will be retried 3 times with exponential backoff strategy.\n\n## Usage\n\n## Creating a RunnableRetry\n\n```dart\nfinal model = ChatOpenAI();\nfinal input = PromptValue.string('Explain why sky is blue in 2 lines');\n\nfinal modelWithRetry = model.withRetry();\nfinal res = await modelWithRetry.invoke(input);\nprint(res);\n```\n\n## Retrying a chain\n\n`RunnableRetry` can be used to retry any `Runnable`, including a chain of `Runnable`s.\n\nExample\n\n```dart\nfinal promptTemplate = ChatPromptTemplate.fromTemplate('tell me a joke about {topic}');\nfinal model = ChatOpenAI(\n  defaultOptions: ChatOpenAIOptions(model: 'gpt-4o'),\n);\nfinal chain = promptTemplate.pipe(model).withRetry();\n\nfinal res = await chain.batch(\n  [\n    {'topic': 'bears'},\n    {'topic': 'cats'},\n  ],\n);\nprint(res);\n```\n\n> In general, it's best to keep the scope of the retry as small as possible.\n\n## Configuring the retry\n\n```dart\n// passing a fake model to cause Exception\nfinal input = PromptValue.string('Explain why sky is blue in 2 lines');\nfinal model = ChatOpenAI(\n  defaultOptions: ChatOpenAIOptions(model: 'fake-model'),\n);\nfinal modelWithRetry = model.withRetry(\n    maxRetries: 3,\n    addJitter: true,\n);\nfinal res = await modelWithRetry.invoke(input);\nprint(res);\n// retried 3 times and returned Exception:\n// OpenAIClientException({\n//   \"uri\": \"https://api.openai.com/v1/chat/completions\",\n//   \"method\": \"POST\",\n//   \"code\": 404,\n//   \"message\": \"Unsuccessful response\",\n//   \"body\": {\n//     \"error\": {\n//       \"message\": \"The model `fake-model` does not exist or you do not have access to it.\",\n//       \"type\": \"invalid_request_error\",\n//       \"param\": null,\n//       \"code\": \"model_not_found\"\n//     }\n//   }\n// }) \n```\n\n## Passing delay durations\n\nIf you want to use custom delay durations for each retry attempt, you can pass a list of `Duration` objects to the `delayDurations` parameter.\n\n```dart\nfinal input = PromptValue.string('Explain why sky is blue in 2 lines');\nfinal model = ChatOpenAI(\n  defaultOptions: ChatOpenAIOptions(model: 'fake-model'),\n);\nfinal modelWithRetry = model.withRetry(\n    maxRetries: 3,\n    delayDurations: [\n      Duration(seconds: 1),\n      Duration(seconds: 2),\n      Duration(seconds: 3),\n    ],\n);\nfinal res = await modelWithRetry.invoke(input);\nprint(res);\n```\n"
  },
  {
    "path": "docs/expression_language/primitives/router.md",
    "content": "# Dynamically route logic based on input\n\nThis notebook covers how to do routing in the LangChain Expression Language.\n\nRouting allows you to create non-deterministic chains where the output of a previous step defines the next step. Routing helps provide structure and consistency around interactions with LLMs.\n\n## Using RunnableRouter\n\nWe’ll illustrate how to perform routing using a two-step sequence where the first step classifies an input question as being about LangChain, Anthropic, or Other, then routes to a corresponding prompt chain.\n\nFirst, let’s create a chain that will identify incoming questions as being about `LangChain`, `Anthropic`, or `Other`:\n\n```dart\nfinal chatModel = ChatOllama(\n  defaultOptions: ChatOllamaOptions(model: 'llama3.2'),\n);\n\nfinal classificationChain = PromptTemplate.fromTemplate('''\nGiven the user question below, classify it as either being about `LangChain`, `Anthropic`, or `Other`.\n\nDo not respond with more than one word.\n\n<question>\n{question}\n</question>\n\nClassification:\n''') | chatModel | StringOutputParser();\n\nfinal res1 = await classificationChain.invoke({\n  'question': 'how do I call Anthropic?',\n});\nprint(res1);\n// Anthropic\n```\n\nNow, let’s create three sub-chains:\n\n```dart\nfinal langchainChain = PromptTemplate.fromTemplate('''\nYou are an expert in langchain.\nAlways answer questions starting with \"As Harrison Chase told me\".\nRespond to the following question:\n\nQuestion: {question}\nAnswer:\n''') | chatModel | StringOutputParser();\n\nfinal anthropicChain = PromptTemplate.fromTemplate('''\nYou are an expert in anthropic.\nAlways answer questions starting with \"As Dario Amodei told me\".\nRespond to the following question:\n\nQuestion: {question}\nAnswer:\n''') | chatModel | StringOutputParser();\n\n  final generalChain = PromptTemplate.fromTemplate('''\nRespond to the following question:\n\nQuestion: {question}\nAnswer:\n  ''') | chatModel | StringOutputParser();\n```\n\n`RunnableRouter` is a type of runnable that takes a function that routes the input to a specific `Runnable`. You can use `Runnable.fromRouter` to create a `RunnableRouter`.\n\nIn this example, we will return one of the three chains we defined earlier based on the topic returned by the classification chain.\n\n```dart\nfinal router = Runnable.fromRouter((Map<String, dynamic> input, _) {\n  final topic = (input['topic'] as String).toLowerCase();\n  if (topic.contains('langchain')) {\n    return langchainChain;\n  } else if (topic.contains('anthropic')) {\n    return anthropicChain;\n  } else {\n    return generalChain;\n  }\n});\n\nfinal fullChain = Runnable.fromMap({\n      'topic': classificationChain,\n      'question': Runnable.getItemFromMap('question'),\n    }) | router;\n\nfinal res2 = await fullChain.invoke({\n  'question': 'how do I use Anthropic?',\n});\nprint(res2);\n// As Dario Amodei told me, using Anthropic is a straightforward process that...\n\nfinal res3 = await fullChain.invoke({\n  'question': 'how do I use LangChain?',\n});\nprint(res3);\n// As Harrison Chase told me, using LangChain is a breeze!\n\nfinal res4 = await fullChain.invoke({\n  'question': 'whats 2 + 2',\n});\nprint(res4);\n// The answer is... 4!\n```\n\n## Routing by semantic similarity\n\nOne especially useful technique is to use embeddings to route a query to the most relevant prompt. \n\nHere’s an example where we have two specialized prompts, one for physics and one for history. We will use embeddings to determine which prompt is best suited to answer a given question.\n\n```dart\nconst physicsTemplate = '''\nYou are a very smart physicist.\nYou are great at answering questions about physics (e.g. black holes, quantum mechanics, etc.)\nin a concise and easy to understand manner.\nWhen you don't know the answer to a question you admit that you don't know.\n\nHere is a question:\n{query}\n''';\n\nconst historyTemplate = '''\nYou are a very good historian.\nYou are great at answering history questions (e.g. about the Roman Empire, World War II, etc.) \nin a detailed and engaging manner. \nYou are able to provide a lot of context and background information.\n\n\nHere is a question:\n{query}\n''';\n\nfinal embeddings = OllamaEmbeddings(model: 'llama3.2');\nfinal promptTemplates = [physicsTemplate, historyTemplate];\nfinal promptEmbeddings = await embeddings.embedDocuments(\n  promptTemplates.map((final pt) => Document(pageContent: pt)).toList(),\n);\n\nfinal chain = Runnable.fromMap<String>({'query': Runnable.passthrough()}) |\n    Runnable.fromRouter((input, _) async {\n      final query = input['query'] as String;\n      final queryEmbedding = await embeddings.embedQuery(query);\n      final mostSimilarIndex = getIndexesMostSimilarEmbeddings(queryEmbedding, promptEmbeddings).first;\n      print('Using ${mostSimilarIndex == 0 ? 'Physicist' : 'Historian'}');\n      return PromptTemplate.fromTemplate(promptTemplates[mostSimilarIndex]);\n    }) |\n    ChatOllama(\n      defaultOptions: const ChatOllamaOptions(model: 'llama3.2'),\n    ) |\n    StringOutputParser();\n\nfinal res1 = await chain.invoke(\"What's a black hole?\");\nprint(res1);\n// Using Physicist\n// Black holes! One of my favorite topics!\n// A black hole is a region in space where the gravitational pull is so strong...\n\nfinal res2 = await chain.invoke('When did World War II end?');\nprint(res2);\n// Using Historian\n// A great question to start with! World War II ended on September 2, 1945...\n```\n"
  },
  {
    "path": "docs/expression_language/primitives/sequence.md",
    "content": "# RunnableSequence: Chaining runnables\n\nOne key advantage of the `Runnable` interface is that any two runnables can be “chained” together into sequences. The output of the previous runnable’s `.invoke()` call is passed as input to the next runnable. This can be done using the `.pipe()` method (or the `|` operator, which is a convenient shorthand for `.pipe()`). The resulting `RunnableSequence` is itself a runnable, which means it can be invoked, streamed, or piped just like any other runnable.\n\n> Note: when using the `|` operator, the output type of the last runnable is always resolved to `Object` because of [Dart limitations](https://github.com/dart-lang/language/issues/1044). If you need to preserve the output type, use the `.pipe()` method instead.\n\n## The pipe operator\n\nTo show off how this works, let’s go through an example. We’ll walk through a common pattern in LangChain: using a prompt template to format input into a chat model, and finally converting the chat message output into a string with an output parser.\n\n```dart\nfinal promptTemplate = ChatPromptTemplate.fromTemplate(\n  'Tell me a joke about {topic}',\n);\nfinal model = ChatOpenAI(apiKey: openaiApiKey);\nconst outputParser = StringOutputParser<ChatResult>();\n\nfinal chain = promptTemplate.pipe(model).pipe(outputParser);\n```\n\nPrompts and models are both runnable, and the output type from the prompt call is the same as the input type of the chat model, so we can chain them together. We can then invoke the resulting sequence like any other runnable:\n\n```dart\nfinal res = await chain.invoke({'topic': 'bears'});\nprint(res);\n// Why don't bears wear socks?\n// Because they have bear feet!\n```\n\n## Formatting inputs & output\n\nWe can even combine this chain with more runnables to create another chain. This may involve some input/output formatting using other types of runnables, depending on the required inputs and outputs of the chain components.\n\nFor example, let’s say we wanted to compose the joke generating chain with another chain that evaluates whether the generated joke was funny.\n\nWe would need to be careful with how we format the input into the next chain. In the below example, we use a `RunnableMap` which runs all of its values concurrently and returns a map with the results which can then be passed to the prompt template.\n\n```dart\nfinal analysisPrompt = ChatPromptTemplate.fromTemplate(\n  'is this a funny joke? {joke}',\n);\nfinal composedChain = Runnable.fromMap({\n  'joke': chain,\n}).pipe(analysisPrompt).pipe(model).pipe(outputParser);\n\nfinal res1 = await composedChain.invoke({'topic': 'bears'});\nprint(res1);\n// Some people may find this joke funny, especially if they enjoy puns or wordplay...\n```\n\nInstead of using `Runnable.fromMap`, we can use the convenience method `Runnable.getMapFromInput` which will automatically create a `RunnableMap` placing the input value into the map with the key specified.\n\n```dart\nfinal composedChain2 = chain\n    .pipe(Runnable.getMapFromInput('joke'))\n    .pipe(analysisPrompt)\n    .pipe(model)\n    .pipe(outputParser);\n```\n\nAnother option is to use `Runnable.mapInput` which allows to transform the input value using the provided function.\n\n```dart\nfinal composedChain3 = chain\n    .pipe(Runnable.mapInput((joke) => <String, dynamic>{'joke': joke}))\n    .pipe(analysisPrompt)\n    .pipe(model)\n    .pipe(outputParser);\n```\n\n## Runnable.fromList\n\nYou can also create a `RunnableSequence` from a list of runnables using `Runnable.fromList`. \n\n```dart\nfinal chain = Runnable.fromList([promptTemplate, chatModel]);\n```\n"
  },
  {
    "path": "docs/expression_language/primitives.md",
    "content": "# Primitives\n\nIn addition to various components that are usable with LCEL, LangChain also includes various primitives that help pass around and format data, bind arguments, invoke custom logic, and more.\n\nThis section goes into greater depth on where and how some of these components are useful.\n\n- [Sequence: Chaining runnables](/expression_language/primitives/sequence.md)\n- [Map: Formatting inputs & concurrency](/expression_language/primitives/map.md)\n- [Passthrough: Passing inputs through](/expression_language/primitives/passthrough.md)\n- [Mapper: Mapping inputs](/expression_language/primitives/mapper.md)\n- [Function: Run custom logic](/expression_language/primitives/function.md)\n- [Binding: Configuring runnables](/expression_language/primitives/binding.md)\n- [Router: Routing inputs](/expression_language/primitives/router.md)\n- [Retry: Retrying Runnable](/expression_language/primitives/retry.md)\n"
  },
  {
    "path": "docs/expression_language/streaming.md",
    "content": "# Streaming With LangChain\n\nStreaming is critical in making applications based on LLMs feel responsive to end-users.\n\nImportant LangChain primitives like LLMs, parsers, prompts, retrievers, and agents implement the LangChain [Runnable Interface](/expression_language/interface.md).\n\nThis guide will show you how to use `.stream()` to stream the final output of the chain.\n\n## Using Stream\n\nAll `Runnable` objects implement a method called `stream`.\n\nThese methods are designed to stream the final output in chunks, yielding each chunk as soon as it is available.\n\nStreaming is only possible if all steps in the program know how to process an **input stream**; i.e., process an input chunk one at a time, and yield a corresponding output chunk.\n\nThe complexity of this processing can vary, from straightforward tasks like emitting tokens produced by an LLM, to more challenging ones like streaming parts of JSON results before the entire JSON is complete.\n\nThe best place to start exploring streaming is with the single most important components in LLM apps – the models themselves!\n\n## LLMs and Chat Models\n\nLarge language models and their chat variants are the primary bottleneck in LLM based apps.\n\nLarge language models can take **several seconds** to generate a complete response to a query. This is far slower than the **~200-300 ms** threshold at which an application feels responsive to an end user.\n\nThe key strategy to make the application feel more responsive is to show intermediate progress; e.g., to stream the output from the model token by token.\n\n```dart\nfinal model = ChatOpenAI(apiKey: openAiApiKey);\n\nfinal stream = model.stream(PromptValue.string('Hello! Tell me about yourself.'));\nfinal chunks = <ChatResult>[];\nawait for (final chunk in stream) {\n  chunks.add(chunk);\n  stdout.write('${chunk.output.content}|');\n}\n// Hello|!| I| am| a| language| model| AI| created| by| Open|AI|,|...\n```\n\nLet’s have a look at one of the raw chunks:\n\n```dart\nprint(chunks.first);\n// ChatResult{\n//   id: chatcmpl-9IHQvyTl9fyVmF7P6zamGaX1XAN6d,\n//   output: AIChatMessage{\n//     content: Hello,\n//   },\n//   finishReason: FinishReason.unspecified,\n//   metadata: {\n//     model: gpt-4o-mini,\n//     created: 1714143945,\n//     system_fingerprint: fp_3b956da36b\n//   },\n//   streaming: true\n// }\n```\n\nWe got back a `ChatResult` instance as usual, but containing only a part of the full response (`Hello`). \n\nWe can identify results that are streamed by checking the `streaming` field. The result objects are additive by design – one can simply add them up using the `.concat()` method to get the state of the response so far!\n\n```dart\nfinal result = chunks.sublist(0, 6).reduce((prev, next) => prev.concat(next));\nprint(result);\n// ChatResult{\n//   id: chatcmpl-9IHQvyTl9fyVmF7P6zamGaX1XAN6d,\n//   output: AIChatMessage{\n//     content: Hello! I am a language model\n//   },\n//   finishReason: FinishReason.unspecified,\n//   metadata: {\n//     model: gpt-4o-mini,\n//     created: 1714143945,\n//     system_fingerprint: fp_3b956da36b\n//   },\n//   streaming: true\n// }\n```\n\n## Chains\n\nVirtually all LLM applications involve more steps than just a call to a language model.\n\nLet’s build a simple chain using LangChain Expression Language (LCEL) that combines a prompt, model and a parser and verify that streaming works.\n\nWe will use `StringOutputParser` to parse the output from the model. This is a simple parser that extracts the string output from the result returned by the model.\n\n> LCEL is a declarative way to specify a “program” by chaining together different LangChain primitives. Chains created using LCEL benefit from an automatic implementation of stream, allowing streaming of the final output. In fact, chains created with LCEL implement the entire standard Runnable interface. \n\n```dart\nfinal model = ChatOpenAI(apiKey: openAiApiKey);\nfinal prompt = ChatPromptTemplate.fromTemplate('Tell me a joke about {topic}');\nconst parser = StringOutputParser<ChatResult>();\n\nfinal chain = prompt.pipe(model).pipe(parser);\n\nfinal stream = chain.stream({'topic': 'parrot'});\nawait stream.forEach((final chunk) => stdout.write('$chunk|'));\n// |Why| don|'t| you| ever| play| hide| and| seek| with| a| par|rot|?|\n// |Because| they| always| squ|awk| when| they| find| you|!||\n```\n\nYou might notice above that parser actually doesn't block the streaming output from the model, and instead processes each chunk individually. Many of the LCEL primitives also support this kind of transform-style passthrough streaming, which can be very convenient when constructing apps.\n\n> You do not have to use the LangChain Expression Language to use LangChain and can instead rely on a standard imperative programming approach by calling invoke, batch or stream on each component individually, assigning the results to variables and then using them downstream as you see fit.  \n> \n> If that works for your needs, then that’s fine by us 👌!\n\n## Working with Input Streams\n\nWhat if you wanted to stream JSON from the output as it was being generated?\n\nIf you were to rely on `json.decode` to parse the partial json, the parsing would fail as the partial json wouldn't be valid json.\n\nYou'd likely be at a complete loss of what to do and claim that it wasn't possible to stream JSON.\n\nWell, turns out there is a way to do it - the parser needs to operate on the input stream, and attempt to “auto-complete” the partial json into a valid state.\n\nLet’s see such a parser in action to understand what this means.\n\n```dart\nfinal model = ChatOpenAI(\n  apiKey: openAiApiKey,\n  defaultOptions: const ChatOpenAIOptions(\n    responseFormat: ChatOpenAIResponseFormat.jsonObject,\n  ),\n);\nfinal parser = JsonOutputParser<ChatResult>();\n\nfinal chain = model.pipe(parser);\n\nfinal stream = chain.stream(\n  PromptValue.string(\n    'Output a list of the countries france, spain and japan and their '\n    'populations in JSON format. Use a dict with an outer key of '\n    '\"countries\" which contains a list of countries. '\n    'Each country should have the key \"name\" and \"population\"',\n  ),\n);\nawait stream.forEach((final chunk) => print('$chunk|'));\n// {}|\n// {countries: []}|\n// {countries: [{}]}|\n// {countries: [{name: }]}|\n// {countries: [{name: France}]}|\n// {countries: [{name: France, population: 670}]}|\n// {countries: [{name: France, population: 670760}]}|\n// {countries: [{name: France, population: 67076000}]}|\n// {countries: [{name: France, population: 67076000}, {}]}|\n// {countries: [{name: France, population: 67076000}, {name: }]}|\n// {countries: [{name: France, population: 67076000}, {name: Spain}]}|\n// {countries: [{name: France, population: 67076000}, {name: Spain, population: 467}]}|\n// {countries: [{name: France, population: 67076000}, {name: Spain, population: 467237}]}|\n// {countries: [{name: France, population: 67076000}, {name: Spain, population: 46723749}]}|\n// {countries: [{name: France, population: 67076000}, {name: Spain, population: 46723749}, {}]}|\n// {countries: [{name: France, population: 67076000}, {name: Spain, population: 46723749}, {name: }]}|\n// {countries: [{name: France, population: 67076000}, {name: Spain, population: 46723749}, {name: Japan}]}|\n// {countries: [{name: France, population: 67076000}, {name: Spain, population: 46723749}, {name: Japan, population: 126}]}|\n// {countries: [{name: France, population: 67076000}, {name: Spain, population: 46723749}, {name: Japan, population: 126476}]}|\n// {countries: [{name: France, population: 67076000}, {name: Spain, population: 46723749}, {name: Japan, population: 126476461}]}|\n```\n\n### Transforming Streams\n\nNow, instead of returning the complete JSON object, we want to extract the country names from the JSON as they are being generated. We can use `Runnable.mapInputStream` to transform the stream.\n\n```dart\nfinal mapper = Runnable.mapInputStream((Stream<Map<String, dynamic>> inputStream) {\n  return inputStream.map((input) {\n    final countries = (input['countries'] as List?)?.cast<Map<String, dynamic>>() ?? [];\n    final countryNames = countries\n        .map((country) => country['name'] as String?)\n        .where((c) => c != null && c.isNotEmpty);\n    return countryNames.join(', ');\n  }).distinct();\n});\n\nfinal chain = model.pipe(parser).pipe(mapper);\n\nfinal stream = chain.stream(\n  PromptValue.string(\n    'Output a list of the countries france, spain and japan and their '\n    'populations in JSON format. Use a dict with an outer key of '\n    '\"countries\" which contains a list of countries. '\n    'Each country should have the key \"name\" and \"population\"',\n  ),\n);\nawait stream.forEach(print);\n// France\n// France, Spain\n// France, Spain, Japan\n```\n\n## Non-streaming components\n\nThe following runnables cannot process individual input chunks and instead aggregate the streaming input from the previous step into a single value before processing it:\n- `PromptTemplate`\n- `ChatPromptTemplate`\n- `LLM`\n- `ChatModel`\n- `Retriever`\n- `Tool`\n- `RunnableFunction`\n- `RunnableRouter`\n\nLet see what happens when we try to stream them. 🤨\n\n```dart\nfinal openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n\nfinal vectorStore = MemoryVectorStore(\n  embeddings: OpenAIEmbeddings(apiKey: openaiApiKey),\n);\nawait vectorStore.addDocuments(\n  documents: const [\n    Document(pageContent: 'LangChain was created by Harrison'),\n    Document(\n      pageContent: 'David ported LangChain to Dart in LangChain.dart',\n    ),\n  ],\n);\nfinal retriever = vectorStore.asRetriever();\n\nawait retriever.stream('Who created LangChain.dart?').forEach(print);\n// [Document{pageContent: David ported LangChain to Dart in LangChain.dart}, \n// Document{pageContent: LangChain was created by Harrison}]\n```\n\nStream just yielded the final result from that component.\n\nThis is OK 🥹! Not all components have to implement streaming – in some cases streaming is either unnecessary, difficult or just doesn’t make sense.\n\nAn LCEL chain constructed using non-streaming components, will still be able to stream in a lot of cases, with streaming of partial output starting after the last non-streaming step in the chain.\n\n```dart\nfinal promptTemplate = ChatPromptTemplate.fromTemplates(const [\n  (\n    ChatMessageType.system,\n    'Answer the question based on only the following context:\\n{context}',\n  ),\n  (ChatMessageType.human, '{question}'),\n]);\nfinal model = ChatOpenAI(apiKey: openaiApiKey);\nconst outputParser = StringOutputParser<ChatResult>();\n\nfinal retrievalChain = Runnable.fromMap<String>({\n  'context': retriever,\n  'question': Runnable.passthrough(),\n}).pipe(promptTemplate).pipe(model).pipe(outputParser);\n\nawait retrievalChain\n    .stream('Who created LangChain.dart?')\n    .forEach((chunk) => stdout.write('$chunk|'));\n// |David| created| Lang|Chain|.dart|.||\n```\n"
  },
  {
    "path": "docs/get_started/get_started.md",
    "content": "# Get started\n\nGet started with LangChain:\n\n- [Installation](/get_started/installation.md)\n- [Quickstart](/get_started/quickstart.md)\n"
  },
  {
    "path": "docs/get_started/installation.md",
    "content": "# Installation\n\nTo get started, add LangChain.dart dependency to your project `pubspec.yaml` file:\n\n```yaml\ndependencies:\n  langchain: {version}\n```\n\nThen, run `pub get` to install the package.\n\nUsing LangChain will usually require integrations with one or more model providers, data stores, tools APIs, etc.\n\nFor example, if you wan to use OpenAI’s APIs, you will need to add LangChain.dart OpenAI package as well:\n\n```yaml\ndependencies:\n  langchain: {version}\n  langchain_openai: {version}\n```\n\nCheck out the [packages](https://github.com/davidmigloz/langchain_dart/tree/main#packages) section for a list of available packages.\n"
  },
  {
    "path": "docs/get_started/quickstart.md",
    "content": "# Quickstart\n\nIn this quickstart we'll show you how to:\n\n- Get setup with LangChain.dart\n- Use the most basic and common components of LangChain: prompt templates, models, and output parsers\n- Use LangChain Expression Language, the protocol that LangChain is built on and which facilitates component chaining\n- Build a simple application with LangChain\n\nThat's a fair amount to cover! Let's dive in.\n\n## Setup\n\nTo get started, follow the [installation instructions](/get_started/installation.md) to install LangChain.dart.\n\nUsing LangChain.dart will usually require integrations with one or more model providers, data stores, APIs, etc. For this example, we'll use OpenAI's model APIs.\n\nFirst we'll need to add LangChain.dart OpenAI package:\n\n```yaml\ndependencies:\n  langchain: { version }\n  langchain_openai: { version }\n```\n\nAccessing the OpenAI API requires an API key, which you can get by creating an account and heading [here](https://platform.openai.com/account/api-keys).\n\nThe library does not force you to use any specific key management strategy. You just need to pass the key on the `ChatOpenAI` constructor:\n\n```dart\nimport 'package:langchain/langchain.dart';\nimport 'package:langchain_openai/langchain_openai.dart';\n\nfinal llm = ChatOpenAI(apiKey: openaiApiKey);\n```\n\n## Building with LangChain.dart\n\nLangChain provides many modules that can be used to build language model applications. Modules can be used as standalone in simple applications, and they can be composed for more complex use cases. Composition is powered by LangChain Expression Language (LCEL), which defines a unified `Runnable` interface that many modules implement, making it possible to seamlessly chain components.\n\nThe simplest and most common chain contains three things:\n\n- LLM/Chat Model: The language model is the core reasoning engine here. In order to work with LangChain, you need to understand the different types of language models and how to work with them.\n- Prompt Template: This provides instructions to the language model. This controls what the language model outputs, so understanding how to construct prompts and different prompting strategies is crucial.\n- Output Parser: These translate the raw response from the language model to a more workable format, making it easy to use the output downstream.\n\nIn this guide we'll cover those three components individually, and then go over how to combine them. Understanding these concepts will set you up well for being able to use and customize LangChain applications. Most LangChain applications allow you to configure the model and/or the prompt, so knowing how to take advantage of this will be a big enabler.\n\n## LLM / Chat Model\n\nThere are two types of language models:\n\n- `LLM`: underlying model takes a string as input and returns a string.\n- `ChatModel`: underlying model takes a list of messages as input and returns a message.\n\nStrings are simple, but what exactly are messages? The base message interface is defined by `ChatMessage`, which has two required attributes:\n\n- `content`: The content of the message. Usually a string.\n- `role`: The entity from which the `ChatMessage` is coming.\n\nLangChain provides several objects to easily distinguish between different roles:\n\n- `HumanChatMessage`: A `ChatMessage` coming from a human/user.\n- `AIChatMessage`: A `ChatMessage` coming from an AI/assistant.\n- `SystemChatMessage`: A `ChatMessage` coming from the system.\n- `FunctionChatMessage` / `ToolChatMessage`: A `ChatMessage` containing the output of a function or tool call.\n\nIf none of those roles sound right, there is also a `CustomChatMessage` class where you can specify the role manually. \n\nLangChain provides a common interface that's shared by both `LLMs` and `ChatModels`. However, it's useful to understand the difference in order to most effectively construct prompts for a given language model.\n\nThe simplest way to call an `LLM` or `ChatModel` is using `.invoke()`, the universal call method for all LangChain Expression Language (LCEL) objects:\n\n- `LLM.invoke`: Takes in a string, returns a string.\n- `ChatModel.invoke`: Takes in a list of `ChatMessage`, returns a `ChatMessage`.\n\nThe input types for these methods are actually more general than this, but for simplicity here we can assume `LLMs` only take strings and `ChatModels` only takes lists of messages. Check out the \"Go deeper\" section below to learn more about model invocation.\n\nLet's see how to work with these different types of models and these different types of inputs. First, let's import an `LLM` and a `ChatModel`.\n\n```dart\nfinal llm = OpenAI(apiKey: openaiApiKey);\nfinal chatModel = ChatOpenAI(apiKey: openaiApiKey);\n```\n\n`LLM` and `ChatModel` objects are effectively configuration objects. You can initialize them with parameters like `temperature` and others, and pass them around.\n\n```dart\nconst text = 'What would be a good company name for a company that makes colorful socks?';\nfinal messages = [ChatMessage.humanText(text)];\n\nfinal res1 = await llm.invoke(PromptValue.string(text));\nprint(res1.output);\n// 'Feetful of Fun'\n\nfinal res2 = await chatModel.invoke(PromptValue.chat(messages));\nprint(res2.output);\n// AIChatMessage(content='RainbowSock Co.')\n```\n\n?> `LLM.invoke` and `ChatModel.invoke` take as input a `PromptValue`. This is an object that defines its own custom logic for returning its inputs either as a string or as messages. `LLM`s have logic for coercing any of these into a string, and `ChatModel`s have logic for coercing any of these to messages. The fact that `LLM` and `ChatModel` accept the same inputs means that you can directly swap them for one another in most chains without breaking anything, though it's of course important to think about how inputs are being coerced and how that may affect model performance. To dive deeper on models head to the [Language models](/modules/model_io/models/models.md) section.\n\n## Prompt templates\n\nMost LLM applications do not pass user input directly into an `LLM`. Usually they will add the user input to a larger piece of text, called a prompt template, that provides additional context on the specific task at hand.\n\nIn the previous example, the text we passed to the model contained instructions to generate a company name. For our application, it would be great if the user only had to provide the description of a company/product, without having to worry about giving the model instructions.\n\n`PromptTemplates` help with exactly this! They bundle up all the logic for going from user input into a fully formatted prompt. This can start off very simple - for example, a prompt to produce the above string would just be:\n\n```dart\nfinal prompt = PromptTemplate.fromTemplate(\n  'What is a good name for a company that makes {product}?',\n);\nfinal res = prompt.format({'product': 'colorful socks'});\nprint(res);\n// 'What is a good name for a company that makes colorful socks?'\n```\n\nHowever, the advantages of using these over raw string formatting are several. You can \"partial\" out variables - e.g. you can format only some of the variables at a time. You can compose them together, easily combining different templates into a single prompt. For explanations of these functionalities, see the [prompts](/modules/model_io/prompts/prompts.md) for more detail.\n\n`PromptTemplates` can also be used to produce a list of messages. In this case, the prompt not only contains information about the content, but also each message (its role, its position in the list, etc) Here, what happens most often is a `ChatPromptTemplate` is a list of `ChatMessagePromptTemplates`. Each `ChatMessagePromptTemplate` contains instructions for how to format that `ChatMessage` - its role, and then also its content. Let's take a look at this below:\n\n```dart\nconst template = 'You are a helpful assistant that translates {input_language} to {output_language}.';\nconst humanTemplate = '{text}';\n\nfinal chatPrompt = ChatPromptTemplate.fromTemplates([\n  (ChatMessageType.system, template),\n  (ChatMessageType.human, humanTemplate),\n]);\n\nfinal res = chatPrompt.formatMessages({\n  'input_language': 'English',\n  'output_language': 'French',\n  'text': 'I love programming.',\n});\nprint(res);\n// [\n//   SystemChatMessage(content='You are a helpful assistant that translates English to French.'),\n//   HumanChatMessage(content='I love programming.')\n// ]\n```\n\n`ChatPromptTemplates` can also be constructed in other ways - see the section on [prompts](/modules/model_io/prompts/prompts.md) for more detail.\n\n## Output parsers\n\n`OutputParsers` convert the raw output of an LLM into a format that can be used downstream. There are few main type of `OutputParsers`, including:\n\n- Convert text from LLM -> structured information (e.g. JSON).\n- Convert a `ChatMessage` into just a string.\n- Convert the extra information returned from a call besides the message (like OpenAI function invocation) into a string.\n\nFor full information on this, see the section on [output parsers](/modules/model_io/output_parsers/output_parsers.md).\n\nIn this getting started guide, we will write our own output parser - one that converts a comma separated list into a list.\n\n```dart\nclass CommaSeparatedListOutputParser \n    extends BaseOutputParser<ChatResult, OutputParserOptions, List<String>> {\n  \n  const CommaSeparatedListOutputParser()\n      : super(defaultOptions: const OutputParserOptions());\n\n  @override\n  Future<List<String>> invoke(\n      final ChatResult input, {\n      final OutputParserOptions? options,\n  }) async {\n    final message = input.output;\n    return message.content.trim().split(',');\n  }\n}\n```\n\n```dart\nfinal res = await const CommaSeparatedListOutputParser().invoke(\n  const ChatResult(\n    id: 'id',\n    output: AIChatMessage(content: 'hi, bye'),\n    finishReason: FinishReason.stop,\n    metadata: {},\n    usage: LanguageModelUsage(),\n  ),\n);\nprint(res);\n// ['hi',  'bye']\n```\n\n## Composing with LCEL\n\nWe can now combine all these into one chain. This chain will take input variables, pass those to a prompt template to create a prompt, pass the prompt to a language model, and then pass the output through an (optional) output parser. This is a convenient way to bundle up a modular piece of logic. Let's see it in action!\n\n```dart\nconst systemTemplate = '''\nYou are a helpful assistant who generates comma separated lists.\nA user will pass in a category, and you should generate 5 objects in that category in a comma separated list.\nONLY return a comma separated list, and nothing more.\n''';\nconst humanTemplate = '{text}';\n\nfinal chatPrompt = ChatPromptTemplate.fromTemplates([\n  (ChatMessageType.system, systemTemplate),\n  (ChatMessageType.human, humanTemplate),\n]);\n\nfinal chatModel = ChatOpenAI(apiKey: openAiApiKey);\n\nfinal chain = chatPrompt.pipe(chatModel).pipe(CommaSeparatedListOutputParser());\n// Alternative syntax:\n// final chain = chatPrompt | chatModel | CommaSeparatedListOutputParser();\n\nfinal res = await chain.invoke({'text': 'colors'});\nprint(res); // ['red', 'blue', 'green', 'yellow', 'orange']\n```\n\nNote that we are using the `.pipe` syntax (or alternatively the `|` syntax) to join these components together. This syntax is powered by the LangChain Expression Language (LCEL) and relies on the universal `Runnable` interface that all of these objects implement. To learn more about this syntax, read the documentation [here](/expression_language/expression_language.md).\n\n## Next steps\n\nThis is it! We've now gone over how to create the core building block of LangChain applications. There are a lot more features in all three of these than we can cover here. To continue on your journey:\n\n- Read up on [LangChain Expression Language](/expression_language/expression_language.md) to learn how to chain these components together.\n- [Dive deeper](/modules/model_io/model_io.md) into LLMs, prompts, and output parsers and learn the other [key components](/modules/modules.md).\n- Explore common [end-to-end use cases](https://python.langchain.com/docs/use_cases).\n"
  },
  {
    "path": "docs/get_started/security.md",
    "content": "# Security\n\nLangChain has a large ecosystem of integrations with various external resources like local and remote file systems, APIs and databases. These integrations allow developers to create versatile applications that combine the power of LLMs with the ability to access, interact with and manipulate external resources.\n\n## Best Practices\n\nWhen building such applications developers should remember to follow good security practices:\n\n- **[Limit Permissions](https://en.wikipedia.org/wiki/Principle_of_least_privilege)**: Scope permissions specifically to the application's need. Granting broad or excessive permissions can introduce significant security vulnerabilities. To avoid such vulnerabilities, consider using read-only credentials, disallowing access to sensitive resources, using sandboxing techniques (such as running inside a container), etc. as appropriate for your application.\n- **Anticipate Potential Misuse**: Just as humans can err, so can Large Language Models (LLMs). Always assume that any system access or credentials may be used in any way allowed by the permissions they are assigned. For example, if a pair of database credentials allows deleting data, it’s safest to assume that any LLM able to use those credentials may in fact delete data.\n- **[Defense in Depth](https://en.wikipedia.org/wiki/Defense_in_depth_(computing))**: No security technique is perfect. Fine-tuning and good chain design can reduce, but not eliminate, the odds that a Large Language Model (LLM) may make a mistake. It’s best to combine multiple layered security approaches rather than relying on any single layer of defense to ensure security. For example: use both read-only permissions and sandboxing to ensure that LLMs are only able to access data that is explicitly meant for them to use.\n\nRisks of not doing so include, but are not limited to:\n- Data corruption or loss.\n- Unauthorized access to confidential information.\n- Compromised performance or availability of critical resources.\n\nExample scenarios with mitigation strategies:\n- A user may ask an agent with access to the file system to delete files that should not be deleted or read the content of files that contain sensitive information. To mitigate, limit the agent to only use a specific directory and only allow it to read or write files that are safe to read or write. Consider further sandboxing the agent by running it in a container.\n- A user may ask an agent with write access to an external API to write malicious data to the API, or delete data from that API. To mitigate, give the agent read-only API keys, or limit it to only use endpoints that are already resistant to such misuse.\n- A user may ask an agent with access to a database to drop a table or mutate the schema. To mitigate, scope the credentials to only the tables that the agent needs to access and consider issuing READ-ONLY credentials.\n\nIf you're building applications that access external resources like file systems, APIs or databases, consider speaking with your company's security team to determine how to best design and secure your applications.\n\n## Reporting a Vulnerability\n\nPlease report security vulnerabilities via [GitHub Security](https://github.com/davidmigloz/langchain_dart/security). This will ensure the issue is promptly triaged and acted upon as needed.\n"
  },
  {
    "path": "docs/index.html",
    "content": "<!DOCTYPE html>\n<html lang=\"en\">\n<head>\n    <!-- Google tag (gtag.js) -->\n    <script async src=\"https://www.googletagmanager.com/gtag/js?id=G-MVYMYTERBK\"></script>\n    <script>\n        window.dataLayer = window.dataLayer || [];\n        function gtag(){dataLayer.push(arguments);}\n        gtag('js', new Date());\n\n        gtag('config', 'G-MVYMYTERBK');\n    </script>\n\n    <meta charset=\"UTF-8\">\n    <title>LangChain.dart docs</title>\n\n    <link rel=\"shortcut icon\" href=\"/favicon.ico\"/>\n\n    <meta http-equiv=\"X-UA-Compatible\" content=\"IE=edge,chrome=1\"/>\n    <meta name=\"description\"\n          content=\"LangChain.dart official library documentation. Build powerful LLM-based Dart/Flutter applications. Includes examples and tutorials.\">\n    <meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0, minimum-scale=1.0\">\n\n    <meta name=\"twitter:image:src\"\n          content=\"https://opengraph.githubassets.com/f1458e71b17f0b6bc45d4be505ff678233b118e8cb75c110083b69305e5aec71/davidmigloz/langchain_dart\"/>\n    <meta name=\"twitter:card\" content=\"summary_large_image\"/>\n    <meta name=\"twitter:title\"\n          content=\"Build powerful LLM-based Dart/Flutter applications.\"/>\n    <meta name=\"twitter:description\"\n          content=\"LangChain.dart official library documentation. Includes examples and tutorials.\"/>\n    <meta property=\"og:image\"\n          content=\"https://opengraph.githubassets.com/f1458e71b17f0b6bc45d4be505ff678233b118e8cb75c110083b69305e5aec71/davidmigloz/langchain_dart\"/>\n    <meta property=\"og:image:alt\"\n          content=\"LangChain.dart official library documentation. Build powerful LLM-based Dart/Flutter applications. Includes examples and tutorials.\"/>\n    <meta property=\"og:image:width\" content=\"1200\"/>\n    <meta property=\"og:image:height\" content=\"600\"/>\n    <meta property=\"og:site_name\" content=\"LangChain.dart\"/>\n    <meta property=\"og:type\" content=\"website\"/>\n    <meta property=\"og:title\"\n          content=\"Build powerful LLM-based Dart/Flutter applications.\"/>\n    <meta property=\"og:url\" content=\"https://langchaindart.dev\"/>\n    <meta property=\"og:description\"\n          content=\"LangChain.dart official library documentation. Includes examples and tutorials.\"/>\n\n    <link rel=\"stylesheet\" href=\"//cdn.jsdelivr.net/npm/docsify@4/lib/themes/vue.css\"/>\n    <link rel=\"stylesheet\" href=\"//cdn.jsdelivr.net/npm/docsify-darklight-theme@latest/dist/style.min.css\"/>\n    <link rel=\"stylesheet\" href=\"/css/toc.css\"/>\n    <link rel=\"stylesheet\" href=\"/css/sidebar.css\"/>\n    <link rel=\"stylesheet\" href=\"/css/style.css\"/>\n</head>\n<body>\n<div id=\"app\"></div>\n<script>\n    window.$docsify = {\n        name: 'LangChain.dart',\n        repo: 'https://github.com/davidmigloz/langchain_dart',\n        loadSidebar: true,\n        sidebarDisplayLevel: 1,\n        maxLevel: 2,\n        subMaxLevel: 1,\n        loadFooter: true,\n        search: 'auto',\n        auto2top: true,\n        formatUpdated: '{MM}/{DD} {HH}:{mm}',\n        darklightTheme: {\n            defaultTheme: 'light',\n        },\n        toc: {\n            tocMaxLevel: 5,\n            target: 'h2, h3, h4, h5, h6',\n            ignoreHeaders:  ['<!-- {docsify-ignore} -->', '<!-- {docsify-ignore-all} -->']\n        },\n        tabs: {\n            theme      : 'material',\n            tabComments: true,      // default\n            tabHeadings: true       // default\n        }\n    }\n</script>\n<!-- Docsify v4 -->\n<script src=\"//cdn.jsdelivr.net/npm/docsify@4\"></script>\n<script src=\"//cdn.jsdelivr.net/npm/docsify-darklight-theme@latest/dist/index.min.js\"></script>\n<script src=\"//cdn.jsdelivr.net/npm/prismjs@1/components/prism-dart.min.js\"></script>\n<script src=\"//cdn.jsdelivr.net/npm/prismjs@1/components/prism-markdown.min.js\"></script>\n<script src=\"//cdn.jsdelivr.net/npm/prismjs@1/components/prism-yaml.min.js\"></script>\n<script src=\"//cdn.jsdelivr.net/npm/prismjs@1/components/prism-json.min.js\"></script>\n<script src=\"//cdn.jsdelivr.net/npm/docsify/lib/plugins/search.min.js\"></script>\n<script src=\"//cdn.jsdelivr.net/npm/docsify-copy-code/dist/docsify-copy-code.min.js\"></script>\n<script src=\"//cdn.jsdelivr.net/npm/docsify-pagination/dist/docsify-pagination.min.js\"></script>\n<script src=\"//cdn.jsdelivr.net/npm/@alertbox/docsify-footer/dist/docsify-footer.min.js\"></script>\n<script src=\"//cdn.jsdelivr.net/npm/docsify-tabs@1\"></script>\n<script src=\"https://unpkg.com/docsify-plugin-toc@latest/dist/docsify-plugin-toc.min.js\"></script>\n<script src=\"//cdn.jsdelivr.net/npm/docsify-sidebar-collapse/dist/docsify-sidebar-collapse.min.js\"></script>\n\n</body>\n</html>\n"
  },
  {
    "path": "docs/modules/agents/agent_types/agent_types.md",
    "content": "# Agent types\n\n## Action agents\n\nAgents use an LLM to determine which actions to take and in what order. An\naction can either be using a tool and observing its output, or returning a\nresponse to the user. Here are the agents available in LangChain.\n\n### OpenAI Functions\n\nCertain OpenAI models (like `gpt-3.5-turbo` and `gpt-4`) have been\nexplicitly fine-tuned to detect when a function should to be called and respond\nwith the inputs that should be passed to the function. The OpenAI Functions\nAgent is designed to work with these models.\n[More info...](./openai_functions_agent.md)\n"
  },
  {
    "path": "docs/modules/agents/agent_types/tools_agent.md",
    "content": "# Tools Agent\n\nAn agent powered by the [tool calling API](/modules/model_io/models/chat_models/how_to/tools.md).\n\nThis agent is designed to work with any chat model that supports tool calling. It can interpret the model's output and decide when to call specific tools based on that output. \n\n**Supported models:**\nYou can use any chat model that supports tool calling, like `ChatOpenAI`, `ChatOllama`, `ChatAnthropic`, `ChatFirebaseVertexAI`, etc. Check the [tool calling docs](/modules/model_io/models/chat_models/how_to/tools.md) for a complete list.\n\n## Usage\n\nIn the following example, we use `ChatOllama` with the `llama3.2` model and a calculator tool (included in `langchain_community`) to calculate the result of a mathematical expression.\n\n```dart\nimport 'package:langchain/langchain.dart';\nimport 'package:langchain_community/langchain_community.dart';\nimport 'package:langchain_ollama/langchain_ollama.dart';\n\n//...\n\nfinal llm = ChatOllama(\n  defaultOptions: ChatOllamaOptions(\n    model: 'llama3.2',\n    temperature: 0,\n  ),\n);\nfinal tool = CalculatorTool();\nfinal agent = ToolsAgent.fromLLMAndTools(llm: llm, tools: [tool]);\nfinal executor = AgentExecutor(agent: agent);\nfinal res = await executor.run(\n  'What is 40 raised to the power of 0.43? '\n  'Return the result with 3 decimals.',\n);\nprint(res);\n// The result is: 4.885\n```\n\n## Custom tools\n\nYou can easily call your own functions by wrapping them in a `Tool`. You can also add memory to the agent by passing it when creating the agent.\n\nLet's see an example of how to do this.\n\nFirst, let's create a class that will be the input for our tool.\n\n```dart\n@immutable\nclass SearchInput {\n  const SearchInput({\n    required this.query,\n    required this.n,\n  });\n\n  final String query;\n  final int n;\n\n  SearchInput.fromJson(final Map<String, dynamic> json)\n      : this(\n    query: json['query'] as String,\n    n: json['n'] as int,\n  );\n}\n```\n\nNow let's define the tool:\n\n```dart\nfinal searchTool = Tool.fromFunction<SearchInput, String>(\n  name: 'search',\n  description: 'Tool for searching the web.',\n  inputJsonSchema: {\n    'type': 'object',\n    'properties': {\n      'query': {\n        'type': 'string',\n        'description': 'The query to search for',\n      },\n      'n': {\n        'type': 'integer',\n        'description': 'The number of results to return',\n      },\n    },\n    'required': ['query'],\n  },\n  func: callYourSearchFunction,\n  getInputFromJson: SearchInput.fromJson,\n);\n```\n\nNotice that we need to provide a function that converts the JSON input that the model will send to our tool into the input class that we defined.\n\nThe tool will call `callYourSearchFunction` function with the parsed input. For simplicity, we will just mock the search function.\n```dart\nString callYourSearchFunction(final SearchInput input) {\n    final n = input.n;\n    final res = List<String>.generate(\n      n,\n      (i) => 'Result ${i + 1}: ${String.fromCharCode(65 + i) * 3}',\n    );\n    return 'Results:\\n${res.join('\\n')}';\n}\n```\n\nNow we can create the agent and run it:\n\n```dart\nfinal llm = ChatOllama(\n  defaultOptions: ChatOllamaOptions(\n    model: 'llama3-groq-tool-use',\n    temperature: 0,\n  ),\n);\n\nfinal memory = ConversationBufferMemory(returnMessages: true);\nfinal agent = ToolsAgent.fromLLMAndTools(\n  llm: llm,\n  tools: [searchTool],\n  memory: memory,\n);\n\nfinal executor = AgentExecutor(agent: agent);\n\nfinal res1 = await executor.run(\n  'Search for cat names. Return only 3 results.',\n);\nprint(res1);\n// Here are the top 3 cat names I found: AAA, BBB, and CCC.\n```\n\n## Custom agent using LangChain Expression Language (LCEL)\n\nYou can replicate the functionality of the Tools Agent by using the LangChain Expression Language (LCEL) directly.\n\n```dart\nfinal openAiKey = Platform.environment['OPENAI_API_KEY'];\n\nfinal prompt = ChatPromptTemplate.fromTemplates([\n  (ChatMessageType.system, 'You are a helpful assistant'),\n  (ChatMessageType.human, '{input}'),\n  (ChatMessageType.messagesPlaceholder, 'agent_scratchpad'),\n]);\n\nfinal tool = CalculatorTool();\n\nfinal model = ChatOpenAI(\n  apiKey: openAiKey,\n  defaultOptions: ChatOpenAIOptions(\n    model: 'gpt-4o-mini',\n    temperature: 0,\n    tools: [tool],\n  ),\n);\n\nconst outputParser = ToolsAgentOutputParser();\n\nList<ChatMessage> buildScratchpad(final List<AgentStep> intermediateSteps) {\n  return intermediateSteps\n      .map((s) {\n        return s.action.messageLog +\n            [\n              ChatMessage.tool(\n                toolCallId: s.action.id,\n                content: s.observation,\n              ),\n            ];\n      })\n      .expand((m) => m)\n      .toList(growable: false);\n}\n\nfinal agent = Agent.fromRunnable(\n  Runnable.mapInput(\n    (AgentPlanInput planInput) => <String, dynamic>{\n      'input': planInput.inputs['input'],\n      'agent_scratchpad': buildScratchpad(planInput.intermediateSteps),\n    },\n  ).pipe(prompt).pipe(model).pipe(outputParser),\n  tools: [tool],\n);\nfinal executor = AgentExecutor(agent: agent);\n\nfinal res = await executor.invoke({\n  'input': 'What is 40 raised to the power of 0.43? '\n      'Return the result with 3 decimals.',\n});\nprint(res['output']);\n// The result of 40 raised to the power of 0.43 is approximately 4.885.\n```\n\nIn this way, you can create your own custom agents with full control over their behavior, while still leveraging the flexibility of the Tools Agent to work with various language models and tools.\n"
  },
  {
    "path": "docs/modules/agents/agents.md",
    "content": "# Agents\n\nSome applications require a flexible chain of calls to LLMs and other tools\nbased on user input. The **Agent** interface provides the flexibility for such\napplications. An agent has access to a suite of tools, and determines which ones\nto use depending on the user input. Agents can use multiple tools, and use the\noutput of one tool as the input to the next.\n\nThere are two main types of agents:\n\n- **Action agents:** at each time-step, decide on the next action using the\n  outputs of all previous actions.\n- **Plan-and-execute agents:** decide on the full sequence of actions up front,\n  then execute them all without updating the plan.\n\nAction agents are suitable for small tasks, while plan-and-execute agents are\nbetter for complex or long-running tasks that require maintaining long-term\nobjectives and focus. Often the best approach is to combine the dynamism of an\naction agent with the planning abilities of a plan-and-execute agent by letting\nthe plan-and-execute agent use action agents to execute plans.\n\nFor a full list of agent types see\n[agent types](/modules/agents/agent_types/agent_types.md). Additional\nabstractions involved in agents are:\n\n- **Tools:** the actions an agent can take. What tools you give an agent highly\n  depend on what you want the agent to do.\n- **Toolkits:** wrappers around collections of tools that can be used together a\n  specific use case. For example, in order for an agent to interact with a SQL\n  database it will likely need one tool to execute queries and another to\n  inspect tables.\n\n## Action agents\n\nAt a high-level an action agent:\n\n1. Receives user input.\n2. Decides which tool, if any, to use and the tool input.\n3. Calls the tool and records the output (also known as an \"observation\").\n4. Decides the next step using the history of tools, tool inputs, and\n   observations.\n5. Repeats 3-4 until it determines it can respond directly to the user.\n6. Action agents are wrapped in agent executors, which are responsible for\n   calling the agent, getting back an action and action input, calling the tool\n   that the action references with the generated input, getting the output of\n   the tool, and then passing all that information back into the agent to get\n   the next action it should take.\n\nAlthough an agent can be constructed in many ways, it typically involves these\ncomponents:\n\n- **Prompt template:** responsible for taking the user input and previous steps\n  and constructing a prompt to send to the language model.\n- **Language model:** takes the prompt with use input and action history and\n  decides what to do next.\n- **Output parser:** takes the output of the language model and parses it into\n  the next action or a final answer.\n\n## Plan-and-execute agents\n\nAt a high-level a plan-and-execute agent:\n\n1. Receives user input.\n2. Plans the full sequence of steps to take.\n3. Executes the steps in order, passing the outputs of past steps as inputs to\n   future steps.\n\nThe most typical implementation is to have the planner be a language model,\nand the executor be an action agent.\n\n## Get started\n\nFirst, let's load the language model we're going to use to control the agent.\n\n```dart\nfinal llm = ChatOpenAI(\n  apiKey: openAiKey,\n  defaultOptions: ChatOpenAIOptions(temperature: 0),\n);\n```\n\nNext, let's load some tools to use. In this case, we're going to use a\ncalculator.\n\n```dart\nfinal tool = CalculatorTool();\nfinal tools = [tool];\n```\n\nFinally, let's initialize an agent with the tools, the language model, and the\ntype of agent we want to use.\n\n```dart\nfinal agent = ToolsAgent.fromLLMAndTools(llm: llm, tools: tools);\n```\n\nNow let's create the agent executor and test it out!\n\n```dart\nfinal executor = AgentExecutor(agent: agent);\nfinal res = await executor.run('What is 40 raised to the 0.43 power? ');\nprint(res); // -> '40 raised to the power of 0.43 is approximately 4.8852' \n```\n"
  },
  {
    "path": "docs/modules/agents/toolkits/toolkits.md",
    "content": "# Toolkits\n\nToolkits are collections of tools that are designed to be used together for\nspecific tasks and have convenience loading methods.\n"
  },
  {
    "path": "docs/modules/agents/tools/calculator.md",
    "content": "# Calculator\n\nA tool that can be used to calculate the result of a math expression.\n\nExample:\n\n```dart\nfinal openaiApiKey = Platform.environment['OPENAI_API_KEY'];\nfinal llm = ChatOpenAI(\n  apiKey: openaiApiKey,\n  defaultOptions: const ChatOpenAIOptions(\n    model: 'gpt-4',\n    temperature: 0,\n  ),\n);\nfinal tool = CalculatorTool();\nfinal agent = ToolsAgent.fromLLMAndTools(llm: llm, tools: [tool]);\nfinal executor = AgentExecutor(agent: agent);\nfinal res = await executor.run('What is 40 raised to the 0.43 power? ');\nprint(res); // -> '40 raised to the power of 0.43 is approximately 4.8852'\n```\n"
  },
  {
    "path": "docs/modules/agents/tools/openai_dall_e.md",
    "content": "# DALL-E Image Generator\n\nWrapper for [OpenAI's DALL-E Image Generator API](https://platform.openai.com/docs/api-reference/images).\n\nGiven a prompt the model will generate an image.\n\nExample:\n\n```dart\nfinal llm = ChatOpenAI(\n  apiKey: openAiKey,\n  defaultOptions: const ChatOpenAIOptions(\n    model: 'gpt-4-turbo',\n    temperature: 0,\n  ),\n);\nfinal tools = <Tool>[\n  CalculatorTool(),\n  OpenAIDallETool(apiKey: openAiKey),\n];\nfinal agent = ToolsAgent.fromLLMAndTools(llm: llm, tools: tools);\nfinal executor = AgentExecutor(agent: agent);\nfinal res = await executor.run(\n  'Calculate the result of 40 raised to the power of 0.43 and generate a funny illustration with it. '\n  'Return ONLY the URL of the image. Do not add any explanation.',\n);\n```\n\nResult:\n![result](img/dall_e_3.png)\n"
  },
  {
    "path": "docs/modules/agents/tools/tavily_answer.md",
    "content": "# Tavily Answer\n\n## Overview\nThe `TavilyAnswerTool` is part of the [Tavily Search API](https://tavily.com) integration, specifically designed to provide direct answers to queries. This tool is optimized for scenarios where you need concise, accurate responses rather than raw search results.\n\n## Installation\n\nAdd these dependencies to your project:\n```yaml\ndependencies:\n  langchain: { version }\n  langchain_community: { version }\n```  \n\nInstall via terminal:\n\n#### Dart\n```bash\ndart pub add langchain langchain_community\n```\n\n#### Flutter\n```bash\nflutter pub add langchain langchain_community\n```\n\n## Configuration\n\n### Authentication Options\n\n#### 1. API Key (Recommended)\n```dart\nTavilyAnswerTool(apiKey: 'your_tavily_key')\n```\n\n#### 2. Base URL Override (For proxies/custom endpoints)\n```dart\nTavilyAnswerTool(baseUrl: 'https://your-proxy.com/')\n```\n\n## Basic Usage\n\n```dart\nimport 'package:langchain_community/langchain_community.dart';\n\nfinal tool = TavilyAnswerTool(\n  apiKey: Platform.environment['TAVILY_API_KEY'],\n);\n\nvoid main() async {\n  final answer = await tool.invoke('Explain quantum entanglement simply');\n  print(answer); \n  // -> 'Quantum entanglement is a phenomenon where two or more particles...'\n}\n```\n\n## Advanced Usage\n\n### Custom Configuration with Domain Filtering\n```dart\nfinal expertTool = TavilyAnswerTool(\n  baseUrl: 'https://science-proxy.com/',\n  defaultOptions: const TavilyAnswerToolOptions(\n    searchDepth: TavilySearchDepth.advanced,\n    includeDomains: ['nasa.gov', 'nature.com'],\n  ),\n);\n\nvoid main() async {\n  final expertAnswer = await expertTool.invoke('Latest Mars rover findings');\n  print(expertAnswer);\n  // -> The latest findings from NASA's Perseverance Mars rover...\n}\n```\n\n## Agent Integration Example\n\n```dart\nvoid main() async {\n  final aiAgent = ToolsAgent.fromLLMAndTools(\n    llm: ChatOpenAI(apiKey: openAiKey),\n    tools: [\n      TavilyAnswerTool(apiKey: 'tavily_key'),\n      CalculatorTool(),\n    ],\n  );\n\n  final executor = AgentExecutor(agent: aiAgent);\n\n  final res = await executor.run(\n    'Calculate the GDP growth rate of France in 2023 using official sources.',\n  );\n\n  print(res);\n  // -> \"The GDP growth rate of France in 2023 was 0.70%, as reported by EUROSTAT.\n}\n```\n\n## Error Handling\n\n```dart\ntry {\n  return await tool.invoke(query);\n} on TavilyClientException catch (e) {\n  print('Error ${e.message}: ${e.code}');\n  return 'Failed to retrieve data';\n}\n```"
  },
  {
    "path": "docs/modules/agents/tools/tavily_search_results.md",
    "content": "# Tavily Search Results\n\n## Overview\nThe `TavilySearchResultsTool` is a component of the [Tavily Search API](https://tavily.com) integration that returns structured search results. This tool is ideal when you need detailed search data including sources, URLs, and confidence scores.\n\n## Installation\n\nAdd these dependencies to your project:\n```yaml\ndependencies:\n  langchain: { version }\n  langchain_community: { version }\n```  \n\nInstall via terminal:\n\n#### Dart\n```bash\ndart pub add langchain langchain_community\n```\n\n#### Flutter\n```bash\nflutter pub add langchain langchain_community\n```\n\n## Configuration\n\n### Authentication Options\n\n#### 1. API Key (Recommended)\n```dart\nTavilySearchResultsTool(apiKey: 'your_tavily_key')\n```\n\n#### 2. Base URL Override (For proxies/custom endpoints)\n```dart\nTavilySearchResultsTool(baseUrl: 'https://your-proxy.com/')\n```\n\n## Basic Usage\n\n```dart\nfinal searchTool = TavilySearchResultsTool(apiKey: 'your_key');\n\nvoid main() async {\n  final response = await searchTool.invoke('Latest renewable energy innovations');\n  final results = response.results;\n  \n  print(results.first.title); // -> '5 smart renewable energy innovations...'\n  print(results.first.url);   // -> 'https://www.weforum.org/stories/...'\n  print(results.first.score); // -> 0.98855\n}\n```\n\n## Advanced Features\n\n### Result Processing\n```dart\nvoid processResults(TavilySearchResults response) {\n  // Filter high-confidence results\n  final highConfidence = results.where((r) => r.score > 0.9).toList();\n\n  // Extract URLs for verification\n  final urls = results.map((r) => r.url).toList();\n\n  // Get content from specific domains\n  final scientificSources = results.where(\n    (r) => r.url.contains('nature.com') || r.url.contains('science.org'),\n  ).toList();\n}\n```\n\n### Custom Configuration\n```dart\nfinal customSearchTool = TavilySearchResultsTool(\n  apiKey: 'your_key',\n  defaultOptions: const TavilySearchResultsToolOptions(\n    searchDepth: TavilySearchDepth.advanced,\n    maxResults: 10,\n    includeRawContent: true,\n    includeDomains: ['trusted-source.com'],\n    excludeDomains: ['untrusted-source.com'],\n  ),\n);\n```\n\n## Agent Integration Example\n\n```dart\nvoid main() async {\n  final aiAgent = ToolsAgent.fromLLMAndTools(\n    llm: ChatOpenAI(apiKey: openAiKey),\n    tools: [\n      TavilySearchResultsTool(apiKey: 'tavily_key'),\n      CalculatorTool(),\n    ],\n  );\n\n  final executor = AgentExecutor(agent: aiAgent);\n\n  final res = await executor.run(\n    'Find recent research papers on quantum computing.',\n  );\n\n  print(res);\n  // Here are some recent research papers and resources on quantum computing:\n  // \n  // 1. **Evidence for the utility of quantum computing before fault tolerance**  \n  //    - *Published in Nature (2023)*  \n  ...\n}\n```\n\n## Error Handling\n\n```dart\ntry {\nreturn await tool.invoke(query);\n} on TavilyClientException catch (e) {\nprint('Error ${e.message}: ${e.code}');\nreturn 'Failed to retrieve data';\n}\n```"
  },
  {
    "path": "docs/modules/agents/tools/tools.md",
    "content": "# Tools\n\nTools are interfaces that an agent can use to interact with the world.\n\n## Get started\n\nTools are functions that agents can use to interact with the world. These tools\ncan be generic utilities (e.g. search), other chains, or even other agents.\n\n```dart\nfinal tool = CalculatorTool();\n```\n"
  },
  {
    "path": "docs/modules/chains/chains.md",
    "content": "# Chains\n\nUsing an LLM in isolation is fine for simple applications, but more complex\napplications require chaining LLMs - either with each other or with other\ncomponents.\n\nLangChain provides the **Chain** interface for such \"chained\" applications. We\ndefine a Chain very generically as a sequence of calls to components, which can\ninclude other chains.\n\nThis idea of composing components together in a chain is simple but powerful.\nIt drastically simplifies and makes more modular the implementation of complex\napplications, which in turn makes it much easier to debug, maintain, and\nimprove your applications.\n\nFor more specifics check out:\n\n- [How-to]() for walkthroughs of different chain features.\n- [Foundational]() to get acquainted with core building blockchains.\n- [Document]() to learn how to incorporate documents into chains.\n- [Popular]() chains for the most common use cases.\n- [Additional]() to see some of the more advanced chains and integrations that\n  you can use out of the box.\n\n## Why do we need chains?\n\nChains allow us to combine multiple components together to create a single,\ncoherent application. For example, we can create a chain that takes user input,\nformats it with a PromptTemplate, and then passes the formatted response to an\nLLM. We can build more complex chains by combining multiple chains together, or\nby combining chains with other components.\n\n## Get started\n\nThe `LLMChain` is a simple chain that takes in a prompt template, formats it with the user input\nand returns the response from an LLM.\n\nTo use the `LLMChain`, first create a prompt template.\n\n```dart\nfinal llm = OpenAI(\n  apiKey: openaiApiKey,\n  defaultOptions: const OpenAIOptions(temperature: 0.9),\n);\nfinal prompt = PromptTemplate.fromTemplate(\n  'What is a good name for a company that makes {product}?',\n);\n```\n\nWe can now create a very simple chain that will take user input, format the prompt with it, and\nthen send it to the LLM.\n\n```dart\nfinal chain = LLMChain(llm: llm, prompt: prompt);\nfinal res = await chain.run('colorful socks');\nprint(res);\n// -> 'Colorful Toes Co.'\n```\n\nIf there are multiple variables, you can input them all at once using a dictionary.\n\n```dart\nfinal chain = LLMChain(llm: llm, prompt: prompt);\nfinal res = await chain.run(\n  {\n    'company': 'ABC Startup',\n    'product': 'colorful socks',\n  },\n);\nprint(res);\n// -> 'Socktopia Colourful Creations.'\n```\n\nYou can use a chat model in an LLMChain as well:\n\n```dart\nfinal chat = final chat = ChatOpenAI(\n  apiKey: openaiApiKey,\n  defaultOptions: const ChatOpenAIOptions(\n    temperature: 0,\n  ),\n);\n\nconst template = 'What is a good name for a company that makes {product}?';\nfinal humanMessagePrompt = HumanChatMessagePromptTemplate.fromTemplate(template);\n\nfinal chatPrompt = ChatPromptTemplate.fromPromptMessages([humanMessagePrompt]);\n\nfinal chain = LLMChain(llm: chat, prompt: chatPrompt);\n\nfinal res = await chain.run('colorful socks');\nprint(res);\n// -> 'Rainbow Socks Co.'\n```\n"
  },
  {
    "path": "docs/modules/chains/documents/map_reduce.md",
    "content": "# Stuff\n\nThe `MapReduceDocumentsChain` first applies an LLM chain to each document\nindividually (the Map step), treating the chain output as a new document. It\nthen passes all the new documents to a separate combine documents chain to get a\nsingle output (the Reduce step). It can optionally first compress, or collapse,\nthe mapped documents to make sure that they fit in the combine documents chain (\nwhich will often pass them to an LLM). This compression step is performed\nrecursively if necessary.\n\n![MapReduceDocumentsChain](img/map_reduce.jpg)\n*Image source: [LangChain docs](https://python.langchain.com/docs/modules/chains/document/map_reduce)*\n\n```dart\nfinal mapPrompt = PromptTemplate.fromTemplate(\n  'Summarize this content: {context}',\n);\nfinal mapLlmChain = LLMChain(prompt: mapPrompt, llm: llm);\nfinal reducePrompt = PromptTemplate.fromTemplate(\n  'Combine these summaries: {context}',\n);\nfinal reduceLlmChain = LLMChain(prompt: reducePrompt, llm: llm);\nfinal reduceDocsChain = StuffDocumentsChain(llmChain: reduceLlmChain);\nfinal reduceChain = MapReduceDocumentsChain(\n  mapLlmChain: mapLlmChain,\n  reduceDocumentsChain: reduceDocsChain,\n);\nconst docs = [\n  Document(pageContent: 'Hello 1!'),\n  Document(pageContent: 'Hello 2!'),\n  Document(pageContent: 'Hello 3!'),\n];\nfinal res = await reduceChain.run(docs);\n```\n"
  },
  {
    "path": "docs/modules/chains/documents/stuff.md",
    "content": "# Stuff\n\nThe `StuffDocumentsChain` (\"stuff\" as in \"to stuff\" or \"to fill\") is the most\nstraightforward of the document chains. It takes a list of documents, inserts\nthem all into a prompt and passes that prompt to an LLM.\n\nThis chain is well-suited for applications where documents are small and only a\nfew are passed in for most calls.\n\n![StuffDocumentsChain](img/stuff.jpg)\n*Image source: [LangChain docs](https://python.langchain.com/docs/modules/chains/document/stuff)*\n\n```dart\nfinal prompt = PromptTemplate.fromTemplate(\n  'Print {foo}. Context: {context}',\n);\nfinal llm = OpenAI(apiKey: openaiApiKey);\nfinal llmChain = LLMChain(prompt: prompt, llm: llm);\nfinal stuffChain = StuffDocumentsChain(llmChain: llmChain)\nconst foo = 'Hello world!';\nconst docs = [\n  Document(pageContent: 'Hello 1!'),\n  Document(pageContent: 'Hello 2!'),\n];\nfinal res = await stuffChain.call({\n  'foo': foo,\n  'input_documents': docs,\n});\n```\n"
  },
  {
    "path": "docs/modules/chains/foundational/llm.md",
    "content": "# LLM\n\nAn `LLMChain` is a simple chain that adds some functionality around language\nmodels. It is used widely throughout LangChain, including in other chains and\nagents.\n\nAn `LLMChain` consists of a `PromptTemplate` and a language model (either an \nLLM or chat model). It formats the prompt template using the input key values\nprovided (and also memory key values, if available), passes the formatted string\nto LLM and returns the LLM output.\n\n## Get started\n\n```dart\nfinal llm = OpenAI(\n  apiKey: openaiApiKey,\n  defaultOptions: const OpenAIOptions(temperature: 0.9),\n);\nfinal prompt = PromptTemplate.fromTemplate(\n  'What is a good name for a company that makes {product}?',\n);\nfinal chain = LLMChain(llm: llm, prompt: prompt);\nfinal res = await chain.run('colorful socks');\nprint(res);\n// -> 'Colorful Toes Co.'\n```\n"
  },
  {
    "path": "docs/modules/chains/foundational/sequential.md",
    "content": "# Sequential\n\nThe next step after calling a language model is make a series of calls to a\nlanguage model. This is particularly useful when you want to take the output\nfrom one call and use it as the input to another.\n\nIn this notebook we will walk through some examples for how to do this, using\nsequential chains. Sequential chains allow you to connect multiple chains and\ncompose them into pipelines that execute some specific scenario.\n\n## Types of Sequential Chains\n\nThere are two types of sequential chains:\n\n- `SimpleSequentialChain`: the simplest form of sequential chains, where each\n  step has a singular input/output, and the output of one step is the input to\n  the next.\n- `SequentialChain`: a more general form of sequential chains, allowing for\n  multiple inputs/outputs.\n\n### SimpleSequentialChain\n\nLet's see how we can use a `SimpleSequentialChain`:\n\n```dart\nfinal openAiApiKey = Platform.environment['OPENAI_API_KEY'];\nfinal llm = ChatOpenAI(apiKey: openAiApiKey);\n\n// This is an LLMChain to write a synopsis given a title of a play\nconst synopsisTemplate = '''\nYou are a playwright. Given the title of play, it is your job to write a synopsis for that title.\n\nTitle: {title}\nPlaywright: This is a synopsis for the above play:''';\nfinal synopsisPromptTemplate = PromptTemplate.fromTemplate(synopsisTemplate);\nfinal synopsisChain = LLMChain(llm: llm, prompt: synopsisPromptTemplate);\n\n// This is an LLMChain to write a review of a play given a synopsis\nconst reviewTemplate = '''\nYou are a play critic from the New York Times. Given the synopsis of play, it is your job to write a review for that play.\n\nPlay Synopsis:\n{synopsis}\nReview from a New York Times play critic of the above play:''';\nfinal reviewPromptTemplate = PromptTemplate.fromTemplate(reviewTemplate);\nfinal reviewChain = LLMChain(llm: llm, prompt: reviewPromptTemplate);\n\n// This is the overall chain where we run these two chains in sequence\nfinal overallChain = SimpleSequentialChain(chains: [synopsisChain, reviewChain]);\nfinal review = await overallChain.run('Tragedy at sunset on the beach');\nprint(review);\n```\n\n> In \"Tragedy at Sunset on the Beach,\" playwright delivers a riveting and \n> emotionally charged drama that captivates the audience from start to finish. \n> Set against the breathtaking backdrop of a tranquil beach at dusk, this play \n> explores the complexities of the human experience with depth and nuance...\n\n### Sequential Chain\n\nOf course, not all sequential chains will be as simple as passing a single\nstring as an argument and getting a single string as output for all steps in the\nchain. In this next example, we will experiment with more complex chains that\ninvolve multiple inputs, and where there also multiple final outputs.\n\nOf particular importance is how we name the input/output variable names. In the\nabove example we didn't have to think about that because we were just passing\nthe output of one chain directly as input to the next, but here we do have worry\nabout that because we have multiple inputs.\n\n```dart\nfinal openAiApiKey = Platform.environment['OPENAI_API_KEY'];\nfinal llm = ChatOpenAI(apiKey: openAiApiKey);\n\n// This is an LLMChain to write a synopsis given a title of a play\nconst synopsisTemplate = '''\nYou are a playwright. Given the title of play, it is your job to write a synopsis for that title.\n\nTitle: {title}\nEra: {era}\nPlaywright: This is a synopsis for the above play:''';\nfinal synopsisPromptTemplate = PromptTemplate.fromTemplate(synopsisTemplate);\nfinal synopsisChain = LLMChain(\n  llm: llm,\n  prompt: synopsisPromptTemplate,\n  outputKey: 'synopsis',\n);\n\n// This is an LLMChain to write a review of a play given a synopsis\nconst reviewTemplate = '''\nYou are a play critic from the New York Times. Given the synopsis of play, it is your job to write a review for that play.\n\nPlay Synopsis:\n{synopsis}\nReview from a New York Times play critic of the above play:''';\nfinal reviewPromptTemplate = PromptTemplate.fromTemplate(reviewTemplate);\nfinal reviewChain = LLMChain(\n  llm: llm,\n  prompt: reviewPromptTemplate,\n  outputKey: 'review',\n);\n\n// This is the overall chain where we run these two chains in sequence\nfinal overallChain = SequentialChain(chains: [synopsisChain, reviewChain]);\nfinal review = await overallChain.run({\n  'title': 'Tragedy at sunset on the beach',\n  'era': 'Victorian England',\n});\nprint(review);\n```\n\n> Tragedy at Sunset on the Beach: A Captivating Tale of Love and Sacrifice\n> \n> In the enchanting coastal town of Victorian England, Tragedy at Sunset on the \n> Beach transports its audience to a world where societal expectations and \n> forbidden love collide. This tragic play, set against the backdrop of a \n> breathtaking sunset on the beach, delves into the complexities of a society \n> bound by rigid rules and the heart-wrenching consequences of defying them...\n\n## Memory in Sequential Chains\n\nSometimes you may want to pass along some context to use in each step of the\nchain or in a later part of the chain, but maintaining and chaining together the\ninput/output variables can quickly get messy. Using `SimpleMemory` is a\nconvenient way to do manage this and clean up your chains.\n\nFor example, using the previous playwright `SequentialChain`, lets say you\nwanted to include some context about date, time and location of the play, and\nusing the generated synopsis and review, create some social media post text. You\ncould add these new context variables as input variables, or we can add\na `SimpleMemory` to the chain to manage this context:\n\n```dart\nfinal openAiApiKey = Platform.environment['OPENAI_API_KEY'];\nfinal llm = ChatOpenAI(apiKey: openAiApiKey);\n\n// This is an LLMChain to write a synopsis given a title of a play\nconst synopsisTemplate = '''\nYou are a playwright. Given the title of play, it is your job to write a synopsis for that title.\n\nTitle: {title}\nEra: {era}\nPlaywright: This is a synopsis for the above play:''';\nfinal synopsisPromptTemplate = PromptTemplate.fromTemplate(synopsisTemplate);\nfinal synopsisChain = LLMChain(\n  llm: llm,\n  prompt: synopsisPromptTemplate,\n  outputKey: 'synopsis',\n);\n\n// This is an LLMChain to write a review of a play given a synopsis\nconst reviewTemplate = '''\nYou are a play critic from the New York Times. Given the synopsis of play, it is your job to write a review for that play.\n\nPlay Synopsis:\n{synopsis}\nReview from a New York Times play critic of the above play:''';\nfinal reviewPromptTemplate = PromptTemplate.fromTemplate(reviewTemplate);\nfinal reviewChain = LLMChain(\n  llm: llm,\n  prompt: reviewPromptTemplate,\n  outputKey: 'review',\n);\n\n// This is an LLMChain to write a social post\nconst socialTemplate = '''\nYou are a social media manager for a theater company.  Given the title of play, the era it is set in, the date,time and location, the synopsis of the play, and the review of the play, it is your job to write a social media post for that play.\n\nHere is some context about the time and location of the play:\nDate and Time: {time}\nLocation: {location}\n\nPlay Synopsis:\n{synopsis}\nReview from a New York Times play critic of the above play:\n{review}\n\nSocial Media Post:''';\nfinal socialPromptTemplate = PromptTemplate.fromTemplate(socialTemplate);\nfinal socialChain = LLMChain(\n  llm: llm,\n  prompt: socialPromptTemplate,\n  outputKey: 'social_post_text',\n);\n\n// This is the overall chain where we run these three chains in sequence\nfinal overallChain = SequentialChain(\n  memory: const SimpleMemory(\n    memories: {\n      'time': 'December 25th, 8pm PST',\n      'location': 'Theater in the Park'\n    },\n  ),\n  chains: [synopsisChain, reviewChain, socialChain],\n);\nfinal review = await overallChain.run({\n  'title': 'Tragedy at sunset on the beach',\n  'era': 'Victorian England',\n});\nprint(review);\n```\n\n> 🌅 Don't miss out on the mesmerizing production of \"Tragedy at Sunset on the \n> Beach\" at Theater in the Park on December 25th at 8pm PST! 🎭✨ Transporting \n> audiences to the captivating world of a Victorian beach resort in England, \n> this gripping melodrama delves into the depths of human emotions, societal \n> constraints, and the consequences of choices made in the face of desire.\n> \n> 💔 Follow the lives of three prominent families as their idyllic holiday \n> takes a dark turn. Lady Adelaide, torn between duty and love; Lord Reginald, \n> embodying societal expectations; and the enigmatic artist, Mr. Theodore, who \n> radiates charm and mystery. Witness their intricate relationships unravel \n> against the backdrop of a breathtaking sunset on the beach.\n> \n> 🤫 Secrets, hidden desires, and forbidden love abound in this riveting tale. \n> The young and naive Miss Charlotte falls madly in love with a penniless poet, \n> Mr. Edmund, leading to a ticking time bomb of societal expectations and \n> fervent desires. And with the arrival of the mysterious Baron, disruption and \n> suspense only escalate, pushing the families to the brink of destruction.\n> \n> ✨ \"Tragedy at Sunset on the Beach\" is a thought-provoking exploration of \n> love, betrayal, and the consequences of choices made. This Victorian \n> melodrama will captivate you from the moment the curtains rise, leaving you \n> breathless until the shocking climax.\n> \n> ⭐️ The New York Times calls it a \"mesmerizing production\" that delves into \n> the complexities of human emotions, societal constraints, and the devastating \n> consequences of choices made in the face of desire.\n> \n> 🎟️ Get your tickets now for an unforgettable evening of theater that will \n> leave you contemplating the power of love, societal expectations, and the \n> fragility of happiness. Don't miss out on this captivating portrayal of \n> tragedy at its most poignant. Book your seats today! #TragedyAtSunset \n> #TheaterInthePark #VictorianMelodrama\n"
  },
  {
    "path": "docs/modules/chains/getting_started.md",
    "content": "# Getting Started\n\n## Different ways of calling chains\n\nAll classes inherited from `BaseChain` offer a few ways of running chain logic. The most direct one \nis by `call()` (a chain is a [callable object](https://dart.dev/language/callable-objects)):\n\n```dart\nfinal chat = final chat = ChatOpenAI(\n  apiKey: openaiApiKey,\n  defaultOptions: const ChatOpenAIOptions(\n    temperature: 0,\n  ),\n);\nconst template = 'Tell me a {adjective} joke';\nfinal prompt = PromptTemplate.fromTemplate(template);\nfinal chain = LLMChain(llm: chat, prompt: prompt);\nfinal res = await chain({'adjective': 'corny'});\nprint(res);\n// -> {adjective: corny, text: Why did the tomato turn red? Because it saw the salad dressing!}\n```\n\nBy default, `call()` returns both the input and output key values. You can configure it to only \nreturn output key values by setting `returnOnlyOutputs` to `true`.\n\n```dart\nfinal res = await chain({'adjective': 'corny'}, returnOnlyOutputs: true);\nprint(res);\n// -> {text: Why did the tomato turn red? Because it saw the salad dressing!}\n```\n\nIf the `Chain` only outputs one output key (i.e. only has one element in its `outputKeys`), you can \nuse `run` method. Note that run outputs a `String` instead of a `Map`.\n\n```dart\nfinal res = await chain.run({'adjective': 'corny'});\nprint(res);\n// -> 'Why did the tomato turn red? Because it saw the salad dressing!'\n```\n\nIn the case of one input key, you can input the string directly without specifying the input \nmapping.\n\n```dart\n// These two are equivalent\nfinal res1 = await chain.run({'adjective': 'corny'});\nfinal res2 = await chain.run('corny');\n// -> 'Why did the tomato turn red? Because it saw the salad dressing!'\n\n// These two are also equivalent\nfinal res3 = await chain({'adjective': 'corny'});\nfinal res4 = await chain('corny');\n// -> {adjective: corny, text: Why did the tomato turn red? Because it saw the salad dressing!}\n```\n\nTips: You can easily integrate a Chain object as a Tool in your Agent via its run method.\n\n## Add memory to chains\n\n`Chain` supports taking a `BaseMemory` object as its memory argument, allowing `Chain` object to \npersist data across multiple calls. In other words, it makes `Chain` a stateful object.\n\n```dart\nfinal chat = final chat = ChatOpenAI(\n  apiKey: openaiApiKey,\n  defaultOptions: const ChatOpenAIOptions(\n    temperature: 0,\n  ),\n);\nfinal memory = ConversationBufferMemory(returnMessages: true);\nfinal conversation = ConversationChain(llm: chat, memory: memory);\n\nfinal output1 = await conversation.run(\n  'Answer briefly. What are the first 3 colors of a rainbow?',\n);\nprint(output1);\n// -> 'The first three colors of a rainbow are red, orange, and yellow.'\n\nfinal output2 = await conversation.run('And the next 4?');\nprint(output2);\n// -> 'The next four colors of a rainbow are green, blue, indigo, and violet.'\n```\n\nEssentially, `BaseMemory` defines an interface of how LangChain stores memory. It allows reading \nof stored data through `loadMemoryVariables` method and storing new data through `saveContext` \nmethod. You can learn more about it in [Memory](/modules/memory/memory.md) section.\n\n## Debug Chain\n\nTODO\n\n## Combine chains with the `SequentialChain`\n\nTODO\n\n## Create a custom chain\n\nLangChain provides many chains out of the box, but sometimes you may want to create a custom chain \nfor your specific use case. For this example, we will create a custom chain that concatenates the \noutputs of 2 `LLMChains`.\n\nIn order to create a custom chain:\n\n1. Start by subclassing the `BaseChain` class. \n2. Fill out the `inputKeys` and `outputKeys` properties. \n3. Implement the `callInternal` method that runs the logic of the chain.\n\nThese steps are demonstrated in the example below:\n\n```dart\nclass ConcatenateChain extends BaseChain {\n  const ConcatenateChain({\n    required this.chain1,\n    required this.chain2,\n  });\n\n  final LLMChain chain1;\n  final LLMChain chain2;\n\n  @override\n  String get chainType => 'concat';\n\n  @override\n  Set<String> get inputKeys {\n    // Union of the input keys of the two chains\n    return <String>{\n      ...chain1.inputKeys,\n      ...chain2.inputKeys,\n    };\n  }\n\n  @override\n  Set<String> get outputKeys => const {'concat_output'};\n\n  @override\n  Future<ChainValues> callInternal(final ChainValues values) async {\n    final output1 = await chain1.run(values);\n    final output2 = await chain2.run(values);\n    return {'concat_output': output1 + output2};\n  }\n}\n```\n\nNow, we can try running the chain that we called:\n\n```dart\nfinal openaiApiKey = Platform.environment['OPENAI_API_KEY'];\nfinal llm = OpenAI(\n  apiKey: openaiApiKey,\n  defaultOptions: const OpenAIOptions(temperature: 0.9),\n);\n\nfinal prompt1 = PromptTemplate.fromTemplate(\n'What is a good name for a company that makes {product}?',\n);\nfinal chain1 = LLMChain(llm: llm, prompt: prompt1);\n\nfinal prompt2 = PromptTemplate.fromTemplate(\n'What is a good slogan for a company that makes {product}?',\n);\nfinal chain2 = LLMChain(llm: llm, prompt: prompt2);\n\nfinal concatChain = ConcatenateChain(chain1: chain1, chain2: chain2);\nfinal concatOutput = await concatChain.run('colorful socks');\nprint(concatOutput);\n// -> '\\nFancy Footwear.\\n\\nSock it to 'em with style!'\n```\n\nThat’s it! For more details about how to do cool things with Chains, check out the how-to guide \nfor chains.\n"
  },
  {
    "path": "docs/modules/chains/how_to/call_methods.md",
    "content": "# Different call methods\n\nAll classes inherited from `BaseChain` offer a few ways of running chain logic. \nThe most direct one is by `call()` (a chain is a \n[callable object](https://dart.dev/language/callable-objects)):\n\n```dart\nfinal chat = final chat = ChatOpenAI(\n  apiKey: openaiApiKey,\n  defaultOptions: const ChatOpenAIOptions(\n    temperature: 0,\n  ),\n);\nconst template = 'Tell me a {adjective} joke';\nfinal prompt = PromptTemplate.fromTemplate(template);\nfinal chain = LLMChain(llm: chat, prompt: prompt);\nfinal res = await chain({'adjective': 'corny'});\nprint(res);\n// -> {adjective: corny, text: Why did the tomato turn red? Because it saw the salad dressing!}\n```\n\nBy default, `call()` returns both the input and output key values. You can \nconfigure it to only return output key values by setting `returnOnlyOutputs` to \n`true`.\n\n```dart\nfinal res = await chain({'adjective': 'corny'}, returnOnlyOutputs: true);\nprint(res);\n// -> {text: Why did the tomato turn red? Because it saw the salad dressing!}\n```\n\nIf the `Chain` only outputs one output key (i.e. only has one element in its \n`outputKeys`), you can use `run` method. Note that run outputs a `String` \ninstead of a `Map`.\n\n```dart\nfinal res = await chain.run({'adjective': 'corny'});\nprint(res);\n// -> 'Why did the tomato turn red? Because it saw the salad dressing!'\n```\n\nIn the case of one input key, you can input the string directly without \nspecifying the input mapping.\n\n```dart\n// These two are equivalent\nfinal res1 = await chain.run({'adjective': 'corny'});\nfinal res2 = await chain.run('corny');\n// -> 'Why did the tomato turn red? Because it saw the salad dressing!'\n\n// These two are also equivalent\nfinal res3 = await chain({'adjective': 'corny'});\nfinal res4 = await chain('corny');\n// -> {adjective: corny, text: Why did the tomato turn red? Because it saw the salad dressing!}\n```\n\nTips: You can easily integrate a Chain object as a Tool in your Agent via its \nrun method.\n\n## Add memory to chains\n\n`Chain` supports taking a `BaseMemory` object as its memory argument, allowing\n`Chain` object to persist data across multiple calls. In other words, it makes \n`Chain` a stateful object.\n\n```dart\nfinal chat = final chat = ChatOpenAI(\n  apiKey: openaiApiKey,\n  defaultOptions: const ChatOpenAIOptions(\n    temperature: 0,\n  ),\n);\nfinal memory = ConversationBufferMemory(returnMessages: true);\nfinal conversation = ConversationChain(llm: chat, memory: memory);\n\nfinal output1 = await conversation.run(\n  'Answer briefly. What are the first 3 colors of a rainbow?',\n);\nprint(output1);\n// -> 'The first three colors of a rainbow are red, orange, and yellow.'\n\nfinal output2 = await conversation.run('And the next 4?');\nprint(output2);\n// -> 'The next four colors of a rainbow are green, blue, indigo, and violet.'\n```\n\nEssentially, `BaseMemory` defines an interface of how LangChain stores memory. \nIt allows reading of stored data through `loadMemoryVariables` method and \nstoring new data through `saveContext` method. You can learn more about it \nin [Memory](/modules/memory/memory.md) section.\n"
  },
  {
    "path": "docs/modules/chains/how_to/custom_chain.md",
    "content": "# Custom chain\n\nLangChain provides many chains out of the box, but sometimes you may want to create a custom chain \nfor your specific use case. For this example, we will create a custom chain that concatenates the \noutputs of 2 `LLMChains`.\n\nIn order to create a custom chain:\n\n1. Start by subclassing the `BaseChain` class. \n2. Fill out the `inputKeys` and `outputKeys` properties. \n3. Implement the `callInternal` method that runs the logic of the chain.\n\nThese steps are demonstrated in the example below:\n\n```dart\nclass ConcatenateChain extends BaseChain {\n  const ConcatenateChain({\n    required this.chain1,\n    required this.chain2,\n  });\n\n  final LLMChain chain1;\n  final LLMChain chain2;\n\n  @override\n  String get chainType => 'concat';\n\n  @override\n  Set<String> get inputKeys {\n    // Union of the input keys of the two chains\n    return <String>{\n      ...chain1.inputKeys,\n      ...chain2.inputKeys,\n    };\n  }\n\n  @override\n  Set<String> get outputKeys => const {'concat_output'};\n\n  @override\n  Future<ChainValues> callInternal(final ChainValues values) async {\n    final output1 = await chain1.run(values);\n    final output2 = await chain2.run(values);\n    return {'concat_output': output1 + output2};\n  }\n}\n```\n\nNow, we can try running the chain that we called:\n\n```dart\nfinal openaiApiKey = Platform.environment['OPENAI_API_KEY'];\nfinal llm = OpenAI(\n  apiKey: openaiApiKey,\n  defaultOptions: const OpenAIOptions(temperature: 0.9),\n);\n\nfinal prompt1 = PromptTemplate.fromTemplate(\n'What is a good name for a company that makes {product}?',\n);\nfinal chain1 = LLMChain(llm: llm, prompt: prompt1);\n\nfinal prompt2 = PromptTemplate.fromTemplate(\n'What is a good slogan for a company that makes {product}?',\n);\nfinal chain2 = LLMChain(llm: llm, prompt: prompt2);\n\nfinal concatChain = ConcatenateChain(chain1: chain1, chain2: chain2);\nfinal concatOutput = await concatChain.run('colorful socks');\nprint(concatOutput);\n// -> '\\nFancy Footwear.\\n\\nSock it to 'em with style!'\n```\n"
  },
  {
    "path": "docs/modules/chains/how_to/debugging.md",
    "content": "# Debugging chains\n\nTODO\n"
  },
  {
    "path": "docs/modules/chains/how_to/from_hub.md",
    "content": "# Loading from LangChainHub\n\nTODO\n"
  },
  {
    "path": "docs/modules/chains/how_to/memory.md",
    "content": "# Adding memory (state)\n\nTODO\n"
  },
  {
    "path": "docs/modules/chains/how_to/serialization.md",
    "content": "# Serialization\n\nTODO\n"
  },
  {
    "path": "docs/modules/chains/popular/summarize.md",
    "content": "# Summarize\n\nA summarization chain can be used to summarize multiple documents.\n\nThere are two methods to summarize documents:\n- `stuff` uses the `StuffDocumentsChain` to combine all the documents into\n  a single string, then prompts the model to summarize that string. This\n  method is limited by the context length limit of the model.\n- `mapReduce` uses the `MapReduceDocumentsChain` to summarize each document\n  individually, then combines the results into a single summary.\n\n## Stuff\n\nThe `stuff` method uses the `StuffDocumentsChain` to combine all the\ndocuments into a single string, then prompts the model to summarize that\nstring. This method is limited by the context length limit of the `llm`.\n\nExample:\n```dart\nfinal loader = TextLoader('path/to/file.txt');\nfinal docs = await loader.load();\n\nconst textSplitter = RecursiveCharacterTextSplitter();\nfinal docsChunks = textSplitter.splitDocuments(docs);\n\nfinal llm = ChatOpenAI(apiKey: openAIKey);\nfinal summarizeChain = SummarizeChain.stuff(llm: llm);\n\nfinal summary = await summarizeChain.run(docsChunks);\n```\n\n## MapReduce\n\nThe `mapReduce` method uses the `MapReduceDocumentsChain` to summarize\neach document individually, then combines the results into a single\nsummary.\n\nThe `MapReduceDocumentsChain` involves two chains behind the scenes:\n- `MapReduceDocumentsChain.mapLlmChain` this is the chain that is applied\n  to each document to create a summary.\n- `MapReduceDocumentsChain.reduceDocumentsChain this is a\n  `ReduceDocumentsChain` that reduces the summaries of each document into\n  a single summary.\n\nExample:\n```dart\nfinal loader = WebBaseLoader(['https://example.com']);\nfinal docs = await loader.load();\n\nconst textSplitter = RecursiveCharacterTextSplitter();\nfinal docsChunks = textSplitter.splitDocuments(docs);\n\nfinal llm = ChatOpenAI(apiKey: openAIKey);\nfinal summarizeChain = SummarizeChain.mapReduce(llm: llm);\n\nfinal summary = await summarizeChain.run(docsChunks);\n```\n"
  },
  {
    "path": "docs/modules/memory/memory.md",
    "content": "# Memory\n\nBy default, Chains and Agents are stateless, meaning that they treat each incoming query \nindependently (as are the underlying LLMs and chat models). In some applications (chatbots being a \nGREAT example) it is highly important to remember previous interactions, both at a short term but \nalso at a long term level. The Memory does exactly that.\n\nLangChain provides memory components in two forms. First, LangChain provides helper utilities for \nmanaging and manipulating previous chat messages. These are designed to be modular and useful \nregardless of how they are used. Secondly, LangChain provides easy ways to incorporate these \nutilities into chains.\n\n## Get started\n\nMemory involves keeping a concept of state around throughout a user’s interactions with a language\nmodel. A user’s interactions with a language model are captured in the concept of `ChatMessages`, so\nthis boils down to ingesting, capturing, transforming and extracting knowledge from a sequence of\nchat messages. There are many different ways to do this, each of which exists as its own memory\ntype.\n\nIn general, for each type of memory there are two ways to understanding using memory. These are the\nstandalone functions which extract information from a sequence of messages, and then there is the\nway you can use this type of memory in a chain.\n\nMemory can return multiple pieces of information (for example, the most recent N messages and a\nsummary of all previous messages). The returned information can either be a string or a list of\nmessages.\n\nIn this guide, we will walk through the simplest form of memory: “buffer” memory, which just\ninvolves keeping a buffer of all prior messages. We will show how to use the modular utility\nfunctions here, then show how it can be used in a chain (both returning a string as well as a\nlist of messages).\n\n### ChatMessageHistory\n\nOne of the core utility classes underpinning most (if not all) memory modules is the\n`ChatMessageHistory` class. This is a super lightweight wrapper which exposes convenience methods\nfor saving Human messages, AI messages, and then fetching them all.\n\nYou may want to use this class directly if you are managing memory outside of a chain.\n\n```dart\nfinal history = ChatMessageHistory();\n\nhistory.addHumanChatMessage('hi!');\nhistory.addAIChatMessage('whats up?');\n\nprint(await history.getChatMessages());\n// [HumanChatMessage(content='hi!', example=false),\n//  AIMessage(content='whats up?', example=false)]\n```\n\n### ConversationBufferMemory\n\nWe now show how to use this simple concept in a chain. We first showcase `ConversationBufferMemory`\nwhich is just a wrapper around `ChatMessageHistory` that extracts the messages in a variable.\n\nWe can first extract it as a string.\n\n```dart\nfinal memory = ConversationBufferMemory();\n\nmemory.chatHistory.addHumanChatMessage('hi!');\nmemory.chatHistory.addAIChatMessage('whats up\n\nprint(await memory.loadMemoryVariables());\n// {'history': 'Human: hi!\\nAI: whats up?'}\n```\n\nWe can also get the history as a list of messages:\n\n```dart\nfinal memory = ConversationBufferMemory(returnMessages: true);\n\nmemory.chatHistory.addHumanChatMessage('hi!');\nmemory.chatHistory.addAIChatMessage('whats up?');\n\nprint(await memory.loadMemoryVariables());\n// {'history': [HumanMessage(content='hi!', example=false),\n//   AIMessage(content='whats up?', example=false)]}\n```\n\n### Using in a chain\n\nFinally, let’s take a look at using this in a chain:\n\n```dart\nfinal llm = OpenAI(\n  apiKey: openaiApiKey,\n  defaultOptions: const OpenAIOptions(temperature: 0),\n);\nfinal conversation = ConversationChain(\n  llm: llm,\n  memory: ConversationBufferMemory(),\n);\nfinal output1 = await conversation.run('Hi there!');\nprint(output1);\n// -> 'Hi there! It's nice to meet you. How can I help you today?'\n```\n\nUnder the hood, the chain has formatted the prompt with the input and then passed it to the LLM.\n```dart\n> Entering new ConversationChain chain...\nPrompt after formatting:\nThe following is a friendly conversation between a human and an AI. The AI is talkative and \nprovides lots of specific details from its context. If the AI does not know the answer to a \nquestion, it truthfully says it does not know.\n\nCurrent conversation:\n\nHuman: Hi there!\nAI:\n\n> Finished chain.\n```\n\nAs it was the first message, 'Current conversation' is still empty. If we send another message, we\ncan see that the chain remembers the previous message and adds it to the context.\n\n```dart\nfinal output2 = await conversation.run(\n  \"I'm doing well! Just having a conversation with an AI.\",\n);\nprint(output2);\n// -> 'That's great! It's always nice to have a conversation with someone \n// new. What would you like to talk about?'\n```\n\nUnder the hood:\n\n```dart\n> Entering new ConversationChain chain...\nPrompt after formatting:\nThe following is a friendly conversation between a human and an AI. The AI is talkative and \nprovides lots of specific details from its context. If the AI does not know the answer to a \nquestion, it truthfully says it does not know.\n\nCurrent conversation:\nHuman: Hi there!\nAI:  Hi there! It's nice to meet you. How can I help you today?\nHuman: I'm doing well! Just having a conversation with an AI.\nAI:\n\n> Finished chain.\n```\n\nOne more message:\n\n```dart\nfinal output3 = await conversation.run(\n  'Tell me about yourself',\n);\nprint(output3);\n// -> 'Sure! I am an AI language model created by OpenAI. I was trained on a \n// large dataset of text from the internet, which allows me to understand and \n// generate human-like language. I can answer questions, provide information, \n// and even have conversations like this one. Is there anything else you'd \n// like to know about me?'\n```\n\nWe can see that the history is now 2 messages long:\n\n```dart\n> Entering new ConversationChain chain...\nPrompt after formatting:\nThe following is a friendly conversation between a human and an AI. The AI is talkative and \nprovides lots of specific details from its context. If the AI does not know the answer to a \nquestion, it truthfully says it does not know.\n\nCurrent conversation:\nHuman: Hi there!\nAI:  Hi there! It's nice to meet you. How can I help you today?\nHuman: I'm doing well! Just having a conversation with an AI.\nAI:  That's great! It's always nice to have a conversation with someone new. What would you \nlike to talk about?\nHuman: Tell me about yourself.\nAI:\n\n> Finished chain.\n```\n"
  },
  {
    "path": "docs/modules/model_io/model_io.md",
    "content": "# Model I/O\n\nThe core element of any language model application is...the model. LangChain gives you the building blocks to interface with any language model.\n\n- [Prompts](/modules/model_io/prompts/prompts.md): templatize, dynamically \n  select, and manage model inputs.\n- [Language models](/modules/model_io/models/models.md): make calls to language \n  models through common interfaces.\n- [Output parsers](/modules/model_io/output_parsers/output_parsers.md): extract \n  information from model outputs.\n\n![model_io_diagram](img/model_io.jpg)\n*Image source: [LangChain docs](https://python.langchain.com/docs/modules/model_io)*\n"
  },
  {
    "path": "docs/modules/model_io/models/chat_models/chat_models.md",
    "content": "# Chat models\n\nChat models are a variation on language models. While chat models use language\nmodels under the hood, the interface they expose is a bit different. Rather than\nexpose a \"text in, text out\" API, they expose an interface where \"chat messages\"\nare the inputs and outputs.\n\nChat model APIs are fairly new, so we are still figuring out the correct\nabstractions.\n\nThe following sections of documentation are provided:\n\n- **How-to guides**: Walkthroughs of core functionality, like streaming,\n  creating chat prompts, etc.\n- **Integrations**: How to use different chat model providers (OpenAI,\n  Anthropic, etc).\n\n## Get started\n\n### Setup\n\nFor this guide, we will work with an OpenAI chat model wrapper, although the functionalities\nhighlighted are generic for all LLM types.\n\nTo use LangChain you need to import the `langchain` package. As we are integrating with OpenAI,\nwe also need to import the `langchain_openai` package.\n```dart\nimport 'package:langchain/langchain.dart';\nimport 'package:langchain_openai/langchain_openai.dart';\n```\n\nWe can then instantiate the chat model:\n```dart\nfinal chatModel = final chat = ChatOpenAI(\n  apiKey: openaiApiKey,\n  defaultOptions: const ChatOpenAIOptions(\n    temperature: 0,\n  ),\n);\n```\n\n### Messages\n\nThe chat model interface is based around messages rather than raw text. The \ntypes of messages currently supported in LangChain are `AIChatMessage`, \n`HumanChatMessage`, `SystemChatMessage`, and `CustomChatMessage` – \n`CustomChatMessage` takes in an  arbitrary role parameter. Most of the time, \nyou’ll just be dealing with `HumanChatMessage`, `AIChatMessage`, and \n`SystemChatMessage`.\n\n### LCEL\n\nLLMs implement the `Runnable` interface, the basic building block of the LangChain Expression Language (LCEL). This means they support `invoke`, `stream`, and `batch` calls.\n\n```dart\nfinal messages = [\n  ChatMessage.humanText(\n    'Translate this sentence from English to French. I love programming.',\n  ),\n];\nfinal prompt = PromptValue.chat(messages);\nfinal chatRes = await chatModel.invoke(prompt);\n// -> [ChatGeneration{\n//       output: AIChatMessage{content: J'adore la programmation., example: false},\n//       generationInfo: {index: 0, finish_reason: stop}}]\n```\n\nOpenAI’s chat model supports multiple messages as input. See [here](https://platform.openai.com/docs/guides/gpt/chat-completions-vs-completions) for more information. Here is an example of sending a system and user message to the chat model:\n\n```dart\nfinal messages = [\n  ChatMessage.system('You are a helpful assistant that translates English to French.'),\n  ChatMessage.humanText('I love programming.')\n];\nfinal prompt = PromptValue.chat(messages);\nfinal chatRes = await chatModel.invoke(prompt);\nprint(chatRes);\n// -> [ChatGeneration{\n//       output: AIChatMessage{content: J'adore la programmation., example: false},\n//       generationInfo: {index: 0, finish_reason: stop}}]\n```\n\nThe `invoke` API return an `ChatResult` which contains a `ChatGeneration`\nobject with the `output` messages and some metadata about the generation. It \nalso contains some additional information like `usage` and `modelOutput`.\n\n```dart\nfinal chatRes1 = await chat.generate(messages);\nprint(chatRes1.generations);\n// -> [ChatGeneration{\n//       output: AIChatMessage{content: J'adore la programmation., example: false},\n//       generationInfo: {index: 0, finish_reason: stop}}]\nprint(chatRes1.usage?.totalTokens);\n// -> 36\nprint(chatRes1.modelOutput);\n// -> {id: chatcmpl-7QHTjpTCELFuGbxRaazFqvYtepXOc, created: 2023-06-11 17:41:11.000, model: gpt-4o-mini}\n```\n"
  },
  {
    "path": "docs/modules/model_io/models/chat_models/how_to/llm_chain.md",
    "content": "# LLMChain\n\n> DEPRECATED: `LLMChain` is deprecated in favour of LangChain Expression Language (LCEL).\n\nYou can use the existing `LLMChain` in a very similar way as \n[with LLMs](/modules/chains/chains?id=get-started) - provide a prompt and a \nmodel.\n\n```dart\nfinal chat = final chat = ChatOpenAI(\n  apiKey: openaiApiKey,\n  defaultOptions: const ChatOpenAIOptions(\n    temperature: 0,\n  ),\n);\n\nconst template = 'You are a helpful assistant that translates {input_language} to {output_language}.';\nfinal systemMessagePrompt = SystemChatMessagePromptTemplate.fromTemplate(template);\nconst humanTemplate = '{text}';\nfinal humanMessagePrompt = HumanChatMessagePromptTemplate.fromTemplate(humanTemplate);\n\nfinal chatPrompt = ChatPromptTemplate.fromPromptMessages([systemMessagePrompt, humanMessagePrompt]);\n\nfinal chain = LLMChain(llm: chat, prompt: chatPrompt);\n\nfinal res = await chain.run({\n  'input_language': 'English',\n  'output_language': 'French',\n  'text': 'I love programming.'\n});\nprint(res);\n// -> 'J'adore la programmation.'\n```\n"
  },
  {
    "path": "docs/modules/model_io/models/chat_models/how_to/prompts.md",
    "content": "# Prompts\n\nPrompts for Chat models are built around messages, instead of just plain text.\n\nYou can make use of templating by using a `ChatMessagePromptTemplate`. You can \nbuild a `ChatPromptTemplate` from one or more `ChatMessagePromptTemplate`s. You \ncan use `ChatPromptTemplate`’s `formatPrompt()` – this returns a `PromptValue`, \nwhich you can convert to a string or `ChatMessage` object, depending on whether \nyou want to use the formatted value as input to an LLM or Chat model.\n\nFor convenience, there is a `fromTemplate()` method exposed on the template. If \nyou were to use this template, this is what it would look like:\n\n```dart\nconst template = 'You are a helpful assistant that translates {input_language} to {output_language}.';\nfinal systemMessagePrompt = SystemChatMessagePromptTemplate.fromTemplate(template);\nconst humanTemplate = '{text}';\nfinal humanMessagePrompt = HumanChatMessagePromptTemplate.fromTemplate(humanTemplate);\n\nfinal chatPrompt = ChatPromptTemplate.fromPromptMessages(\n  [systemMessagePrompt, humanMessagePrompt],\n);\nfinal chatRes = await chat(\n  chatPrompt.formatPrompt({\n    'input_language': 'English',\n    'output_language': 'French',\n    'text': 'I love programming.'\n  }).toChatMessages(),\n);\nprint(chatRes);\n// -> AIChatMessage{content: J'adore la programmation., example: false}\n```\n\nIf you wanted to construct the `MessagePromptTemplate` more directly, you could \ncreate a `PromptTemplate` outside and then pass it in, eg:\n\n```dart\nfinal prompt= PromptTemplate.fromTemplate(\n  'You are a helpful assistant that translates {input_language} to {output_language}.',\n);\nfinal systemMessagePrompt = SystemChatMessagePromptTemplate(prompt: prompt);\n```\n"
  },
  {
    "path": "docs/modules/model_io/models/chat_models/how_to/streaming.md",
    "content": "# Streaming\n\nSome chat models provide a streaming response. This means that instead of waiting for the entire response to be returned, you can start processing it as soon as it's available. This is useful if you want to display the response to the user as it's being generated, or if you want to process the response as it's being generated.\n\nExample usage:\n\n```dart\nfinal openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n\nfinal promptTemplate = ChatPromptTemplate.fromTemplates(const [\n  (ChatMessageType.system, 'You are a helpful assistant that replies only with numbers in order without any spaces or commas'),\n  (ChatMessageType.human, 'List the numbers from 1 to {max_num}'),\n]);\nfinal chat = ChatOpenAI(apiKey: openaiApiKey);\nconst stringOutputParser = StringOutputParser<ChatResult>();\n\nfinal chain = promptTemplate.pipe(chat).pipe(stringOutputParser);\n\nfinal stream = chain.stream({'max_num': '9'});\nawait stream.forEach(print);\n// 123\n// 456\n// 789\n```\n"
  },
  {
    "path": "docs/modules/model_io/models/chat_models/how_to/tools.md",
    "content": "# Tool/function calling\n\n> We use the term \"tool calling\" interchangeably with \"function calling\". Although function calling is sometimes meant to refer to invocations of a single function, we treat all models as though they can return multiple tool or function calls in each message.\n\n> Tool calling is currently supported by:\n> - [`ChatAnthropic`](/modules/model_io/models/chat_models/integrations/anthropic.md)\n> - [`ChatFirebaseVertexAI`](/modules/model_io/models/chat_models/integrations/firebase_vertex_ai.md)\n> - [`ChatGoogleGenerativeAI`](/modules/model_io/models/chat_models/integrations/googleai.md)\n> - [`ChatOllama`](/modules/model_io/models/chat_models/integrations/ollama.md)\n> - [`ChatOpenAI`](/modules/model_io/models/chat_models/integrations/openai.md)\n\nTool calling allows a model to respond to a given prompt by generating output that matches a user-defined schema. While the name implies that the model is performing some action, this is actually not the case! The model is coming up with the arguments to a tool, and actually running the tool (or not) is up to the user - for example, if you want to extract output matching some schema from unstructured text, you could give the model an “extraction” tool that takes parameters matching the desired schema, then treat the generated output as your final result.\n\nA tool call includes an `id` of the call, the `name` of the tool to call, and a map with the `arguments` to pass to the tool. The `arguments` map is structured like `{argument_name: argument_value}`.\n\nMany LLM providers, including Anthropic, Cohere, Google, Mistral, OpenAI, and others, support variants of a tool calling feature. These features typically allow requests to the LLM to include available tools and their schemas, and for responses to include calls to these tools. For instance, given a search engine tool, an LLM might handle a query by first issuing a call to the search engine. The system calling the LLM can receive the tool call, execute it, and return the output to the LLM to inform its response. LangChain.dart includes a suite of [built-in tools](/modules/agents/tools/tools.md) and supports several methods for defining your own custom tools. Tool-calling is extremely useful for building tool-using chains and agents, and for getting structured outputs from models more generally.\n\nProviders adopt different conventions for formatting tool schemas and tool calls. For instance, Anthropic returns tool calls as parsed structures within a larger content block:\n\n```json\n[\n  {\n    \"text\": \"<thinking>\\nI should use a tool.\\n</thinking>\",\n    \"type\": \"text\"\n  },\n  {\n    \"id\": \"id_value\",\n    \"input\": {\"arg_name\": \"arg_value\"},\n    \"name\": \"tool_name\",\n    \"type\": \"tool_use\"\n  }\n]\n```\n\nwhereas OpenAI separates tool calls into a distinct parameter, with arguments as JSON strings:\n\n```json\n{\n  \"tool_calls\": [\n    {\n      \"id\": \"id_value\",\n      \"function\": {\n        \"arguments\": '{\"arg_name\": \"arg_value\"}',\n        \"name\": \"tool_name\"\n      },\n      \"type\": \"function\"\n    }\n  ]\n}\n```\n\nLangChain.dart implements standard interfaces for defining tools, passing them to LLMs, and representing tool calls.\n\n## Request\n\nFor a model to be able to invoke tools, you need to pass tool specs to it when making a chat request.\n\n### Defining tool schemas: LangChain Tool\n\nYou can define a tool spec using the `ToolSpec` class. A `ToolSpec` defines the name, description, and input [JSON Schema](https://json-schema.org) of a tool, but it does not define the actual tool implementation.\n\n```dart\nconst tool1 = ToolSpec(\n  name: 'joke',\n  description: 'A joke',\n  inputJsonSchema: {\n    'type': 'object',\n    'properties': {\n      'setup': {\n        'type': 'string',\n        'description': 'The setup for the joke',\n      },\n      'punchline': {\n        'type': 'string',\n        'description': 'The punchline for the joke',\n      },\n    },\n    'required': ['setup', 'punchline'],\n  },\n);\n```\n\nAll LangChain.dart `Tool`s implements `ToolSpec`, so they can be passed to the model directly.\n\n> The difference between `ToolSpec` and `Tool` is that `ToolSpec` only defines the schema of the tool, while `Tool` also defines the actual tool implementation.\n\n```json\nfinal tool2 = CalculatorTool();\nfinal tool3 = Tool.fromFunction<SearchInput, String>(\n  name: 'search',\n  description: 'Tool for searching the web.',\n  inputJsonSchema: const {\n    'type': 'object',\n    'properties': {\n      'query': {\n        'type': 'string',\n        'description': 'The query to search for',\n      },\n      'n': {\n        'type': 'number',\n        'description': 'The number of results to return',\n      },\n    },\n    'required': ['query'],\n  },\n  func: callYourSearchFunction,\n  getInputFromJson: SearchInput.fromJson,\n);\n```\n\n### Passing tools to model\n\nOnce you have defined the tools that the model can call, you can pass them to the model in several ways:\n\n- **When instantiating the model**: you can configure the tools in the `defaultOptions` field. This is useful if you want the tools to be available for all requests using the model.\n\n```dart\nfinal model = ChatOpenAI(\n    apiKey: openaiApiKey,\n    defaultOptions: ChatOpenAIOptions(\n      tools: [tool1, tool2, tool3],\n    ),\n);\n```\n\n- **Using `model.bind()`: you can bind tools to a specific model. This is useful if the tools are specific to a particular chain, but not to all requests that use the model.\n\n```dart\nfinal chain = model.bind(\n    ChatOpenAIOptions(\n      tools: [tool1, tool2, tool3],\n    ),\n).pipe(...);\n```\n\n- **When invoking/streaming the chain**: you can pass the tools in the `options` field when invoking/streaming the chain. This is useful when you want to use tools for a specific request only.\n\n```dart\nfinal res = await model.invoke(\n  input,\n  options: ChatOpenAIOptions(\n    tools: [tool1, tool2, tool3],\n  ),\n);\n\nfinal stream = model.stream(\n  input,\n  options: ChatOpenAIOptions(\n    tools: [tool1, tool2, tool3],\n  ),\n);\n```\n\nUnder the hood, LangChain.dart will pass the tools to the model in the correct format based on the provider's requirements.\n\n### Forcing a tool call\n\nWhen you just define the tools the model can call, the model can choose whether to return one tool call, multiple tool calls, or no tool calls at all. Some models support a `toolChoice` parameter that gives you some ability to force the model to call a tool. \n\nFor models that support this, you can pass in the name of the tool you want the model to always call:\n\n```dart\nChatOpenAIOptions(\n  tools: [tool1, tool2, tool3],\n  toolChoice: ChatToolChoice.forced(name: 'joke'),\n);\n```\n\nYou can force the model to call at least one tool, without specifying which tool specifically:\n\n```dart\nChatOpenAIOptions(\n  tools: [tool1, tool2, tool3],\n  toolChoice: ChatToolChoice.auto,\n);\n```\n\nOr you can force the model to not call any tools:\n\n```dart\nChatOpenAIOptions(\n  tools: [tool1, tool2, tool3],\n  toolChoice: ChatToolChoice.none,\n);\n```\n\n## Response\n\nOnce you have passed the tools to the model, the model can return tool calls in the response. \n\n### Reading tool calls from model output\n\nIf tool calls are included in a LLM response, they are attached to the corresponding `AIChatMessage` as a list of `AIChatMessageToolCall` objects in the `toolCalls` field. Messages with no tool calls default to an empty list.\n\nA `AIChatMessageToolCall` includes:\n- `id`: The tool call id. This is unique for each tool call in the response.\n- `name`: The name of the tool to call.\n- `argumentsRaw`: The raw JSON string containing the arguments returned by the model to pass to the tool.\n- `arguments`: The arguments returned by the model to pass to the tool parsed into a Dart map.\n\nExample:\n```dart\nfinal calculator = CalculatorTool();\nfinal model = ChatOpenAI(\n  apiKey: openAiApiKey,\n  defaultOptions: ChatOpenAIOptions(\n    model: 'gpt-4-turbo',\n    tools: [calculator],\n  ),\n);\nfinal res = await model.invoke(\n  PromptValue.string('Calculate 3 * 12 and 11 + 49'),\n);\nprint(res.output);\n// AIChatMessage{\n//   content: ,\n//   toolCalls: [\n//     AIChatMessageToolCall{\n//       id: call_Kgo8xBug6OkFbBVBmAgI2bm0,\n//       name: calculator,\n//       argumentsRaw: {\"input\": \"3 * 12\"},\n//       arguments: {\n//         input: 3 * 12\n//       },\n//     }, AIChatMessageToolCall{\n//       id: call_Tc2zOW8obEGh0iEtA5UNykqR,\n//       name: calculator,\n//       argumentsRaw: {\"input\": \"11 + 49\"},\n//       arguments: {\n//         input: 11 + 49\n//       },\n//     }\n//   ],\n// }\n```\n\nNote that the model does not always generate a valid JSON, in that case, `arguments` will be empty, but you can still see the raw response in `argumentsRaw`.\n\nThe model may also hallucinate parameters not defined by your tool schema. Validate the arguments in your code before calling your tool.\n\nIf desired, output parsers can further process the output. For example, you can extract the tool calls from the model response using `ToolsOutputParser`:\n\n```dart\nfinal chain = model.pipe(ToolsOutputParser());\nfinal res2 = await chain.invoke(\n  PromptValue.string('Calculate 3 * 12 and 11 + 49'),\n);\nprint(res2);\n// [ParsedToolCall{\n//   id: call_p4GmED1My56vV6XZi9ChljJN,\n//   name: calculator,\n//   arguments: {\n//     input: 3 * 12\n//   },\n// }, ParsedToolCall{\n//   id: call_eLJo7nII9EanFUcxy42WA5Pm,\n//   name: calculator,\n//   arguments: {\n//     input: 11 + 49\n//   },\n// }]\n```\n\n### Streaming\n\nWhen tools are called in a streaming context, message chunks will be populated with tool call chunk objects in a list.\n\n```dart\nfinal stream = model.stream(\n  PromptValue.string('Calculate 3 * 12 and 11 + 49'),\n);\nawait for (final chunk in stream) {\n  print(chunk.output.toolCalls);\n}\n// [] \n// [AIChatMessageToolCall{ id: call_bfU0p8DH8xvzZVXaDK3V42hN, name: calculator, argumentsRaw: , arguments: {}, }] \n// [AIChatMessageToolCall{ id: , name: , argumentsRaw: {\"in, arguments: {}, }] \n// [AIChatMessageToolCall{ id: , name: , argumentsRaw: put\":, arguments: {}, }] \n// [AIChatMessageToolCall{ id: , name: , argumentsRaw: \"3 * , arguments: {}, }] \n// [AIChatMessageToolCall{ id: , name: , argumentsRaw: 12\"}, arguments: {}, }] \n// [AIChatMessageToolCall{ id: call_WnHu44lmhkI3fFpEelyGBh6L, name: calculator, argumentsRaw: , arguments: {}, }] \n// [AIChatMessageToolCall{ id: , name: , argumentsRaw: {\"in, arguments: {}, }] \n// [AIChatMessageToolCall{ id: , name: , argumentsRaw: put\":, arguments: {}, }] \n// [AIChatMessageToolCall{ id: , name: , argumentsRaw: \"11 +, arguments: {}, }] \n// [AIChatMessageToolCall{ id: , name: , argumentsRaw: 49\", arguments: {}, }] \n// [AIChatMessageToolCall{ id: , name: , argumentsRaw: }, arguments: {}, }] []\n```\n\nAs you can see, the `arguments` field is always empty because the `argumentsRaw` is never a valid JSON.\n\n`ToolsOutputParser` is a useful tool for this case, as it concatenates the chunks progressively and tries to complete the partial JSON into a valid one:\n\n```dart\nfinal chain2 = model.pipe(ToolsOutputParser());\nfinal stream2 = chain2.stream(\n  PromptValue.string('Calculate 3 * 12 and 11 + 49'),\n);\nawait stream2.forEach(print);\n// [] \n// [ParsedToolCall{ id: call_gGXYQDJj9ZG4YmvLhZyLD442, name: calculator, arguments: {}, }] \n// [ParsedToolCall{ id: call_gGXYQDJj9ZG4YmvLhZyLD442, name: calculator, arguments: {input: 3 * }, }] \n// [ParsedToolCall{ id: call_gGXYQDJj9ZG4YmvLhZyLD442, name: calculator, arguments: {input: 3 * 12}, }] \n// [ParsedToolCall{ id: call_gGXYQDJj9ZG4YmvLhZyLD442, name: calculator, arguments: {input: 3 * 12}, }, ParsedToolCall{ id: call_axZ3Q5Ve8ZvLUB9NDXdwuUVh, name: calculator, arguments: {}, }] \n// [ParsedToolCall{ id: call_gGXYQDJj9ZG4YmvLhZyLD442, name: calculator, arguments: {input: 3 * 12}, }, ParsedToolCall{ id: call_axZ3Q5Ve8ZvLUB9NDXdwuUVh, name: calculator, arguments: {input: 11 +}, }] \n// [ParsedToolCall{ id: call_gGXYQDJj9ZG4YmvLhZyLD442, name: calculator, arguments: {input: 3 * 12}, }, ParsedToolCall{ id: call_axZ3Q5Ve8ZvLUB9NDXdwuUVh, name: calculator, arguments: {input: 11 + 49}, }]\n```\n\n### Passing tool outputs to model\n\nIf we’re using the model-generated tool invocations to actually call tools and want to pass the tool results back to the model, we can do so using `ToolChatMessage`.\n\n```dart\nfinal calculator = CalculatorTool();\nfinal model = ChatOpenAI(\n  apiKey: openAiApiKey,\n  defaultOptions: ChatOpenAIOptions(\n    model: 'gpt-4-turbo',\n    tools: [calculator],\n  ),\n);\n\nfinal messages = [\n  ChatMessage.humanText('Calculate 3 * 12 and 11 + 49'),\n];\n\nfinal res = await model.invoke(\n  PromptValue.chat(messages),\n);\nprint(res);\n// ChatResult{\n//   id: chatcmpl-9LYl0AecfrQClqOS4PNfczOvsBNvt,\n//   output: AIChatMessage{\n//     content: ,\n//     toolCalls: [\n//       AIChatMessageToolCall{\n//         id: call_5tXVddYtn9igaRAeUwfH7ZN2,\n//         name: calculator,\n//         argumentsRaw: {\"input\": \"3 * 12\"},\n//         arguments: {\n//           input: 3 * 12\n//         },\n//       },\n//       AIChatMessageToolCall{\n//         id: call_bQ3Yx6foKIYLXBMQas5ObhHa,\n//         name: calculator,\n//         argumentsRaw: {\"input\": \"11 + 49\"},\n//         arguments: {\n//           input: 11 + 49\n//         },\n//       }\n//     ],\n//   },\n//   finishReason: FinishReason.toolCalls,\n//   metadata: {\n//     model: gpt-4-turbo-2024-04-09,\n//     created: 1714925522,\n//     system_fingerprint: fp_3450ce39d5\n//   },\n//   usage: LanguageModelUsage{\n//     promptTokens: 91,\n//     responseTokens: 49,\n//     totalTokens: 140\n//   },\n//   streaming: false\n// }\n\nmessages.add(res.output);\n\nfor (final toolCall in res.output.toolCalls) {\n  final input = calculator.getInputFromJson(toolCall.arguments);\n  final toolRes = await calculator.invoke(input);\n  final toolMessage = ChatMessage.tool(\n    toolCallId: toolCall.id,\n    content: toolRes,\n  );\n  messages.add(toolMessage);\n}\n\nfinal res2 = await model.invoke(\n  PromptValue.chat(messages),\n);\nprint(res2.output.content);\n// The calculations yield the following results:\n// - 3 * 12 = 36\n// - 11 + 49 = 60\n```\n\n### Few-shot prompting\n\nFor more complex tool use it’s very useful to add few-shot examples to the prompt. We can do this by adding `AIChatMessages` with `AIChatMessageToolCall` and corresponding `ToolChatMessage` to our prompt.\n\nFor example, let's say that instead of using `+` for addition and `*` for multiplication, we want to use the plus (➕) and (✖️) emojis. We can add a few-shot example to the prompt to teach the model how to use the plus emoji:\n\n```dart\nfinal calculator = CalculatorTool();\nfinal model = ChatOpenAI(\n  apiKey: openAiApiKey,\n  defaultOptions: ChatOpenAIOptions(\n    model: 'gpt-4-turbo',\n    tools: [calculator],\n  ),\n);\n\nfinal examples = [\n  ChatMessage.humanText('Calculate 3 ✖️ 12 and 11 ➕ 49'),\n  ChatMessage.ai(\n    '',\n    toolCalls: [\n      AIChatMessageToolCall(\n        id: 'call_1',\n        name: 'calculator',\n        argumentsRaw: '{\"input\": \"3 * 12\"}',\n        arguments: {'input': '3 * 12'},\n      ),\n      AIChatMessageToolCall(\n        id: 'call_2',\n        name: 'calculator',\n        argumentsRaw: '{\"input\": \"11 + 49\"}',\n        arguments: {'input': '11 + 49'},\n      ),\n    ],\n  ),\n  ChatMessage.tool(toolCallId: 'call_1', content: '36'),\n  ChatMessage.tool(toolCallId: 'call_2', content: '60'),\n  ChatMessage.ai('The calculations yield the following results:\\n- 3 ✖️ 12 = 36\\n- 11 ➕ 49 = 60'),\n];\n\nfinal promptTemplate = ChatPromptTemplate.fromTemplates(const [\n  (ChatMessageType.system, 'You are bad at math but are an expert at using a calculator. '\n      'Use past tool usage as an example of how to correctly use the tools.'),\n  (ChatMessageType.messagesPlaceholder, 'examples'),\n  (ChatMessageType.human, '{query}'),\n]);\n\nfinal chain = promptTemplate.pipe(model).pipe(ToolsOutputParser());\nfinal res = await chain.invoke({\n  'query': 'Calculate 3 ✖️ 12 and 11 ➕ 49',\n  'examples': examples,\n});\nprint(res);\n// [ParsedToolCall{\n//   id: call_BtNoLk7IiQksMfad5897mSSu,\n//   name: calculator,\n//   arguments: {input: 3 * 12},\n// }, ParsedToolCall{\n//   id: call_ZAiFb1G71hwiwVvnqZFKHTuP,\n//   name: calculator,\n//   arguments: {input: 11 + 49},\n// }]\n```\n\nThe model interpreted the emojis correctly and returned the correct tool calls.\n"
  },
  {
    "path": "docs/modules/model_io/models/chat_models/integrations/anthropic.md",
    "content": "# ChatAnthropic\n\nWrapper around [Anthropic Messages API](https://docs.anthropic.com/en/api/messages) (aka Claude API).\n\n## Setup\n\nThe Anthropic API uses API keys for authentication. Visit your [API Keys](https://console.anthropic.com/settings/keys) page to retrieve the API key you'll use in your requests.\n\nThe following models are available:\n- `claude-3-5-sonnet-latest`\n- `claude-3-5-sonnet-20241022`\n- `claude-3-5-sonnet-20240620`\n- `claude-3-opus-latest`\n- `claude-3-opus-20240229`\n- `claude-3-sonnet-20240229`\n- `claude-3-haiku-20240307`\n- `claude-2.1`\n- `claude-2.0`\n- `claude-instant-1.2`\n\nMind that the list may not be up-to-date. See https://docs.anthropic.com/en/docs/about-claude/models for the updated list.\n\n## Usage\n\n```dart\nfinal apiKey = Platform.environment['ANTHROPIC_API_KEY'];\n\nfinal chatModel = ChatAnthropic(\n  apiKey: apiKey,\n  defaultOptions: ChatAnthropicOptions(\n    model: 'claude-3-5-sonnet-20241022',\n    temperature: 0,\n  ),\n);\n\nfinal chatPrompt = ChatPromptTemplate.fromTemplates([\n  (ChatMessageType.system, 'You are a helpful assistant that translates {input_language} to {output_language}.'),\n  (ChatMessageType.human, 'Text to translate:\\n{text}'),\n]);\n\nfinal chain = chatPrompt | chatModel | StringOutputParser();\n\nfinal res = await chain.invoke({\n  'input_language': 'English',\n  'output_language': 'French',\n  'text': 'I love programming.',\n});\nprint(res);\n// -> 'J'adore programmer.'\n```\n\n## Multimodal support\n\n```dart\nfinal apiKey = Platform.environment['ANTHROPIC_API_KEY'];\n\nfinal chatModel = ChatAnthropic(\n  apiKey: apiKey,\n  defaultOptions: ChatAnthropicOptions(\n    model: 'claude-3-5-sonnet-20241022',\n    temperature: 0,\n  ),\n);\n\nfinal res = await chatModel.invoke(\n  PromptValue.chat([\n    ChatMessage.human(\n      ChatMessageContent.multiModal([\n        ChatMessageContent.text('What fruit is this?'),\n        ChatMessageContent.image(\n          mimeType: 'image/jpeg',\n          data: base64.encode(\n            await File('./bin/assets/apple.jpeg').readAsBytes(),\n          ),\n        ),\n      ]),\n    ),\n  ]),\n);\n\nprint(res.output.content);\n// -> 'The fruit in the image is an apple.'\n```\n\n## Streaming\n\n```dart\nfinal apiKey = Platform.environment['ANTHROPIC_API_KEY'];\n\nfinal promptTemplate = ChatPromptTemplate.fromTemplates([\n  (ChatMessageType.system, 'You are a helpful assistant that replies only with numbers in order without any spaces or commas.'),\n  (ChatMessageType.human, 'List the numbers from 1 to {max_num}'),\n]);\n\nfinal chatModel = ChatAnthropic(\n  apiKey: apiKey,\n  defaultOptions: ChatAnthropicOptions(\n    model: 'claude-3-5-sonnet-20241022',\n    temperature: 0,\n  ),\n);\n\nfinal chain = promptTemplate.pipe(chatModel).pipe(const StringOutputParser());\n\nfinal stream = chain.stream({'max_num': '30'});\nawait stream.forEach(print);\n// 123\n// 456789101\n// 112131415161\n// 718192021222\n// 324252627282\n// 930\n```\n\n## Tool calling\n\n`ChatAnthropic` supports tool calling.\n\nCheck the [docs](/modules/model_io/models/chat_models/how_to/tools.md) for more information on how to use tools.\n\nExample:\n```dart\nconst tool = ToolSpec(\n  name: 'get_current_weather',\n  description: 'Get the current weather in a given location',\n  inputJsonSchema: {\n    'type': 'object',\n    'properties': {\n      'location': {\n        'type': 'string',\n        'description': 'The city and country, e.g. San Francisco, US',\n      },\n    },\n    'required': ['location'],\n  },\n);\nfinal chatModel = ChatAnthropic(\n  apiKey: apiKey,\n  defaultOptions: ChatAnthropicOptions(\n    model: 'claude-3-5-sonnet-20241022',\n    temperature: 0,\n    tools: [tool],\n  ),\n);\n\nfinal res = await model.invoke(\n  PromptValue.string('What’s the weather like in Boston and Madrid right now in celsius?'),\n);\n```\n"
  },
  {
    "path": "docs/modules/model_io/models/chat_models/integrations/anyscale.md",
    "content": "# Anyscale\n\n[Anyscale](https://www.anyscale.com/) offers a unified OpenAI-compatible API for a broad range of [models](https://docs.endpoints.anyscale.com/guides/models/#chat-models) running serverless or on your own dedicated instances.\n\nIt also allows to fine-tune models on your own data or train new models from scratch.\n\nYou can consume Anyscale API using the `ChatOpenAI` wrapper in the same way you would use the OpenAI API.\n\nThe only difference is that you need to change the base URL to `https://api.endpoints.anyscale.com/v1`:\n\n```dart\nfinal chatModel = ChatOpenAI(\n  apiKey: anyscaleApiKey,\n  baseUrl: 'https://api.endpoints.anyscale.com/v1',\n  defaultOptions: const ChatOpenAIOptions(\n    model: 'meta-llama/Llama-2-70b-chat-hf',\n  ),\n);\n```\n\n## Invoke\n\n```dart\nfinal anyscaleApiKey = Platform.environment['ANYSCALE_API_KEY'];\n\nfinal promptTemplate = ChatPromptTemplate.fromTemplates(const [\n  (\n  ChatMessageType.system,\n  'You are a helpful assistant that translates {input_language} to {output_language}.',\n  ),\n  (ChatMessageType.human, '{text}'),\n]);\n\nfinal chatModel = ChatOpenAI(\n  apiKey: anyscaleApiKey,\n  baseUrl: 'https://api.endpoints.anyscale.com/v1',\n  defaultOptions: const ChatOpenAIOptions(\n    model: 'meta-llama/Llama-2-70b-chat-hf',\n  ),\n);\n\nfinal chain = promptTemplate | chatModel | StringOutputParser();\n\nfinal res = await chain.invoke({\n  'input_language': 'English',\n  'output_language': 'French',\n  'text': 'I love programming.',\n});\nprint(res);\n// -> \"I love programming\" se traduit en français sous la forme \"J'aime passionnément la programmation\"\n```\n\n## Stream\n\n```dart\nfinal anyscaleApiKey = Platform.environment['ANYSCALE_API_KEY'];\n\nfinal promptTemplate = ChatPromptTemplate.fromTemplates(const [\n  (\n  ChatMessageType.system,\n  'You are a helpful assistant that replies only with numbers '\n      'in order without any spaces or commas',\n  ),\n  (ChatMessageType.human, 'List the numbers from 1 to {max_num}'),\n]);\n\nfinal chatModel = ChatOpenAI(\n  apiKey: anyscaleApiKey,\n  baseUrl: 'https://api.endpoints.anyscale.com/v1',\n  defaultOptions: const ChatOpenAIOptions(\n    model: 'mistralai/Mixtral-8x7B-Instruct-v0.1',\n  ),\n);\n\nfinal chain = promptTemplate.pipe(chatModel).pipe(StringOutputParser());\n\nfinal stream = chain.stream({'max_num': '9'});\nawait stream.forEach(print);\n// 1\n// 2\n// 3\n// ...\n// 9\n```\n"
  },
  {
    "path": "docs/modules/model_io/models/chat_models/integrations/firebase_vertex_ai.md",
    "content": "# Firebase AI (Vertex AI / Google AI)\n\nThe [Firebase AI Logic](https://firebase.google.com/docs/ai-logic) gives you access to the latest generative AI models from Google: the Gemini models. If you need to call the Gemini API directly from your mobile or web app you can use the `ChatFirebaseVertexAI` class instead of the [`ChatVertexAI`](/modules/model_io/models/chat_models/integrations/gcp_vertex_ai.md) class which is designed to be used on the server-side.\n\n`ChatFirebaseVertexAI` is built specifically for use with mobile and web apps, offering security options against unauthorized clients as well as integrations with other Firebase services.\n\n## Supported Backends\n\n`ChatFirebaseVertexAI` supports two backends:\n\n- **Vertex AI** (default): Requires Blaze plan (pay-as-you-go). Best for production use.\n- **Google AI**: Available on free Spark plan. Good for development and testing.\n\n## Key capabilities\n\n- **Multimodal input**: The Gemini models are multimodal, so prompts sent to the Gemini API can include text, images (even PDFs), video, and audio.\n- **Growing suite of capabilities**: You can call the Gemini API directly from your mobile or web app, build an AI chat experience, use function calling, and more.\n- **Security for production apps**: Use Firebase App Check to protect the Vertex AI Gemini API from abuse by unauthorized clients.\n- **Robust infrastructure**: Take advantage of scalable infrastructure that's built for use with mobile and web apps, like managing structured data with Firebase database offerings (like Cloud Firestore) and dynamically setting run-time configurations with Firebase Remote Config.\n\n## Setup\n\n### 1. Set up a Firebase project\n\nCheck the [Firebase documentation](https://firebase.google.com/docs/ai-logic/get-started?platform=flutter) for the latest information on how to set up Firebase AI in your Firebase project.\n\n**For Vertex AI backend:**\n1. Upgrade your billing plan to the Blaze pay-as-you-go pricing plan.\n2. Enable the required APIs (`aiplatform.googleapis.com` and `firebaseml.googleapis.com`).\n3. Integrate the Firebase SDK into your app (if you haven't already).\n4. Recommended: Enable Firebase App Check to protect the API from abuse.\n\n**For Google AI backend:**\n1. You can use the free Spark plan.\n2. Configure your Gemini API key in the Firebase console.\n3. Integrate the Firebase SDK into your app (if you haven't already).\n\n### 2. Add the LangChain.dart Firebase package\n\nAdd the `langchain_firebase` package to your `pubspec.yaml` file.\n\n```yaml\ndependencies:\n  langchain: {version}\n  langchain_firebase: {version}\n```\n\nInternally, `langchain_firebase` uses the [`firebase_ai`](https://pub.dev/packages/firebase_ai) SDK to interact with the Firebase AI Logic API.\n\n### 3. Initialize your Firebase app\n\n```yaml \nawait Firebase.initializeApp();\n```\n\n### 4. Call the Gemini API\n\n**Using Vertex AI backend (default):**\n\n```dart\nfinal chatModel = ChatFirebaseVertexAI();\nfinal chatPrompt = ChatPromptTemplate.fromTemplates([\n  (ChatMessageType.system, 'You are a helpful assistant that translates {input_language} to {output_language}.'),\n  (ChatMessageType.human, 'Text to translate:\\n{text}'),\n]);\n\nfinal chain = chatPrompt | chatModel | StringOutputParser();\n\nfinal res = await chain.invoke({\n  'input_language': 'English',\n  'output_language': 'French',\n  'text': 'I love programming.',\n});\nprint(res);\n// -> 'J'adore programmer.'\n```\n\n**Using Google AI backend:**\n\n```dart\nfinal chatModel = ChatFirebaseVertexAI(\n  defaultBackend: FirebaseAIBackend.googleAI,\n);\n```\n\n> Check out the [sample project](https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_firebase/example) to see a complete project using Firebase AI.\n\n## Available models\n\nThe following models are available:\n- `gemini-1.5-flash`:\n  * text / image / audio -> text model\n  * Max input token: 1048576\n  * Max output tokens: 8192\n- `gemini-1.5-pro`:\n  * text / image / audio -> text model\n  * Max input token: 2097152\n  * Max output tokens: 8192\n- `gemini-1.0-pro-vision`:\n  * text / image -> text model\n  * Max input token: 16384\n  * Max output tokens: 2048\n- `gemini-1.0-pro`\n  * text -> text model\n  * Max input token: 32760\n  * Max output tokens: 8192\n      \nMind that this list may not be up-to-date. Refer to the [documentation](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models) for the updated list.\n\n## Multimodal support\n\n```dart\nfinal chatModel = ChatFirebaseVertexAI(\n  defaultOptions: ChatFirebaseVertexAIOptions(\n    model: 'gemini-1.5-pro',\n  ),\n);\nfinal res = await chatModel.invoke(\n  PromptValue.chat([\n    ChatMessage.human(\n      ChatMessageContent.multiModal([\n        ChatMessageContent.text('What fruit is this?'),\n        ChatMessageContent.image(\n          mimeType: 'image/jpeg',\n          data: base64.encode(\n            await File('./bin/assets/apple.jpeg').readAsBytes(),\n          ),\n        ),\n      ]),\n    ),\n  ]),\n);\nprint(res.output.content);\n// -> 'That is an apple.'\n```\n\n## Streaming\n\n```dart\nfinal promptTemplate = ChatPromptTemplate.fromTemplates(const [\n  (ChatMessageType.system, 'You are a helpful assistant that replies only with numbers in order without any spaces or commas.'),\n  (ChatMessageType.human, 'List the numbers from 1 to {max_num}'),\n]);\n\nfinal chatModel = ChatFirebaseVertexAI(\n  defaultOptions: ChatFirebaseVertexAIOptions(\n    model: 'gemini-1.5-pro',\n  ),\n);\n\nfinal chain = promptTemplate.pipe(chatModel).pipe(StringOutputParser());\n\nfinal stream = chain.stream({'max_num': '30'});\nawait stream.forEach(print);\n// 1\n// 2345678910111213\n// 1415161718192021\n// 222324252627282930 \n```\n\n## Tool calling\n\n`ChatGoogleGenerativeAI` supports tool calling.\n\nCheck the [docs](https://langchaindart.dev/#/modules/model_io/models/chat_models/how_to/tools) for more information on how to use tools.\n\nExample:\n```dart\nconst tool = ToolSpec(\n  name: 'get_current_weather',\n  description: 'Get the current weather in a given location',\n  inputJsonSchema: {\n    'type': 'object',\n    'properties': {\n      'location': {\n        'type': 'string',\n        'description': 'The city and state, e.g. San Francisco, CA',\n      },\n    },\n    'required': ['location'],\n  },\n);\nfinal chatModel = ChatFirebaseVertexAI(\n  defaultOptions: ChatFirebaseVertexAIOptions(\n    model: 'gemini-1.5-pro',\n    temperature: 0,\n    tools: [tool],\n  ),\n);\nfinal res = await model.invoke(\n  PromptValue.string('What’s the weather like in Boston and Madrid right now in celsius?'),\n);\n```\n\n## Prevent abuse with Firebase App Check\n\nYou can use Firebase App Check to protect the Vertex AI Gemini API from abuse by unauthorized clients. Check the [Firebase documentation](https://firebase.google.com/docs/vertex-ai/app-check) for more information.\n\n## Locations\n\nWhen initializing the Vertex AI service, you can optionally specify a location in which to run the service and access a model. If you don't specify a location, the default is us-central1. See the list of [available locations](https://firebase.google.com/docs/vertex-ai/locations?platform=flutter#available-locations).\n\n```dart\nfinal chatModel = ChatFirebaseVertexAI(\n  location: 'us-central1',\n);\n```\n\n## Alternatives\n\n- [`ChatVertexAI`](/modules/model_io/models/chat_models/integrations/gcp_vertex_ai.md): Use this class to call the Vertex AI Gemini API from the server-side.\n- [`ChatGoogleGenerativeAI`](/modules/model_io/models/chat_models/integrations/googleai.md): Use this class to call the Google AI Gemini API from a pure Dart environment (server-side or CLI apps).\n"
  },
  {
    "path": "docs/modules/model_io/models/chat_models/integrations/gcp_vertex_ai.md",
    "content": "# GCP Chat Vertex AI\n\nWrapper around [GCP Vertex AI chat models](https://cloud.google.com/vertex-ai/docs/generative-ai/chat/test-chat-prompts) API (aka PaLM API for chat).\n\n## Set up your Google Cloud Platform project\n\n1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager).\n2. [Make sure that billing is enabled for your project](https://cloud.google.com/billing/docs/how-to/modify-project).\n3. [Enable the Vertex AI API](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com).\n4. [Configure the Vertex AI location](https://cloud.google.com/vertex-ai/docs/general/locations).\n\n### Authentication\n\nTo create an instance of `ChatVertexAI` you need to provide an HTTP client that handles authentication. The easiest way to do this is to use [`AuthClient`](https://pub.dev/documentation/googleapis_auth/latest/googleapis_auth/AuthClient-class.html) from the [googleapis_auth](https://pub.dev/packages/googleapis_auth) package.\n\nTo create an instance of `VertexAI` you need to provide an [`AuthClient`](https://pub.dev/documentation/googleapis_auth/latest/googleapis_auth/AuthClient-class.html) instance.\n\nThere are several ways to obtain an `AuthClient` depending on your use case. Check out the [googleapis_auth](https://pub.dev/packages/googleapis_auth) package documentation for more details.\n\nExample using a service account JSON:\n\n```dart\nfinal serviceAccountCredentials = ServiceAccountCredentials.fromJson(\n  json.decode(serviceAccountJson),\n);\nfinal authClient = await clientViaServiceAccount(\n  serviceAccountCredentials,\n  [ChatVertexAI.cloudPlatformScope],\n);\nfinal chatVertexAi = ChatVertexAI(\n  httpClient: authClient,\n  project: 'your-project-id',\n);\n```\n\nThe service account should have the following [permission](https://cloud.google.com/vertex-ai/docs/general/iam-permissions):\n- `aiplatform.endpoints.predict`\n\nThe required [OAuth2 scope](https://developers.google.com/identity/protocols/oauth2/scopes) is:\n- `https://www.googleapis.com/auth/cloud-platform` (you can use the constant `ChatVertexAI.cloudPlatformScope`)\n\nSee: https://cloud.google.com/vertex-ai/docs/generative-ai/access-control\n\n### Available models\n\n- `chat-bison`\n  * Max input token: 4096\n  * Max output tokens: 1024\n  * Training data: Up to Feb 2023\n  * Max turns: 2500\n- `chat-bison-32k`\n  * Max input and output tokens combined: 32k\n  * Training data: Up to Aug 2023\n  * Max turns: 2500\n\nThe previous list of models may not be exhaustive or up-to-date. Check out the [Vertex AI documentation](https://cloud.google.com/vertex-ai/docs/generative-ai/learn/models) for the latest list of available models.\n\n### Model options\n\nYou can define default options to use when calling the model (e.g. temperature, stop sequences, etc. ) using the `defaultOptions` parameter.\n\nThe default options can be overridden when calling the model using the `options` parameter.\n\nExample:\n```dart\nfinal chatModel = ChatVertexAI(\n  httpClient: authClient,\n  project: 'your-project-id',\n  defaultOptions: ChatVertexAIOptions(\n    temperature: 0.9,\n  ),\n);\nfinal result = await chatModel(\n  [ChatMessage.humanText('Hello')],\n  options: ChatVertexAIOptions(\n    temperature: 0.5,\n   ),\n);\n```\n\n### Full example\n\n```dart\nimport 'package:langchain/langchain.dart';\nimport 'package:langchain_google/langchain_google.dart';\n\nvoid main() async {\n  final chat = ChatVertexAI(\n    httpClient: await _getAuthHttpClient(),\n    project: _getProjectId(),\n    defaultOptions: const ChatVertexAIOptions(\n      temperature: 0,\n    ),\n  );\n  while (true) {\n    stdout.write('> ');\n    final usrMsg = ChatMessage.humanText(stdin.readLineSync() ?? '');\n    final aiMsg = await chat([usrMsg]);\n    print(aiMsg.content);\n  }\n}\n\nFuture<AuthClient> _getAuthHttpClient() async {\n  final serviceAccountCredentials = ServiceAccountCredentials.fromJson(\n    json.decode(Platform.environment['VERTEX_AI_SERVICE_ACCOUNT']!),\n  );\n  return clientViaServiceAccount(\n    serviceAccountCredentials,\n    [VertexAI.cloudPlatformScope],\n  );\n}\n\nString _getProjectId() {\n  return Platform.environment['VERTEX_AI_PROJECT_ID']!;\n}\n```\n"
  },
  {
    "path": "docs/modules/model_io/models/chat_models/integrations/googleai.md",
    "content": "# ChatGoogleGenerativeAI\n\nWrapper around [Google AI for Developers](https://ai.google.dev/) API (aka Gemini API).\n\n## Setup\n\nTo use `ChatGoogleGenerativeAI` you need to have an API key. You can get one [here](https://aistudio.google.com/app/apikey).\n\nThe following models are available:\n- `gemini-1.5-flash`:\n  * text / image / audio -> text model\n  * Max input token: 1048576\n  * Max output tokens: 8192\n- `gemini-1.5-pro`:\n  * text / image / audio -> text model\n  * Max input token: 2097152\n  * Max output tokens: 8192\n- `gemini-1.0-pro` (or `gemini-pro`):\n  * text -> text model\n  * Max input token: 32760\n  * Max output tokens: 8192\n- `aqa`:\n  * text -> text model\n  * Max input token: 7168\n  * Max output tokens: 1024\n\nMind that this list may not be up-to-date. Refer to the [documentation](https://ai.google.dev/gemini-api/docs/models/gemini) for the updated list.\n\n## Usage\n\n```dart\nfinal apiKey = Platform.environment['GOOGLEAI_API_KEY'];\n\nfinal chatModel = ChatGoogleGenerativeAI(\n  apiKey: apiKey,\n  defaultOptions: ChatGoogleGenerativeAIOptions(\n    model: 'gemini-1.5-pro',\n    temperature: 0,\n  ),\n);\n\nfinal chatPrompt = ChatPromptTemplate.fromTemplates([\n  (ChatMessageType.system, 'You are a helpful assistant that translates {input_language} to {output_language}.'),\n  (ChatMessageType.human, 'Text to translate:\\n{text}'),\n]);\n\nfinal chain = chatPrompt | chatModel | StringOutputParser();\n\nfinal res = await chain.invoke({\n  'input_language': 'English',\n  'output_language': 'French',\n  'text': 'I love programming.',\n});\nprint(res);\n// -> 'J'adore programmer.'\n```\n\n## Multimodal support\n\n```dart\nfinal apiKey = Platform.environment['GOOGLEAI_API_KEY'];\n\nfinal chatModel = ChatGoogleGenerativeAI(\n  apiKey: apiKey,\n  defaultOptions: ChatGoogleGenerativeAIOptions(\n    model: 'gemini-1.5-pro',\n    temperature: 0,\n  ),\n);\nfinal res = await chatModel.invoke(\n  PromptValue.chat([\n    ChatMessage.human(\n      ChatMessageContent.multiModal([\n        ChatMessageContent.text('What fruit is this?'),\n        ChatMessageContent.image(\n          mimeType: 'image/jpeg',\n          data: base64.encode(\n            await File('./bin/assets/apple.jpeg').readAsBytes(),\n          ),\n        ),\n      ]),\n    ),\n  ]),\n);\nprint(res.output.content);\n// -> 'That is an apple.'\n```\n\n## Streaming\n\n```dart\nfinal apiKey = Platform.environment['GOOGLEAI_API_KEY'];\n\nfinal promptTemplate = ChatPromptTemplate.fromTemplates(const [\n  (ChatMessageType.system, 'You are a helpful assistant that replies only with numbers in order without any spaces or commas.'),\n  (ChatMessageType.human, 'List the numbers from 1 to {max_num}'),\n]);\n\nfinal chatModel = ChatGoogleGenerativeAI(\n  apiKey: apiKey,\n  defaultOptions: const ChatGoogleGenerativeAIOptions(\n    model: 'gemini-1.5-pro',\n    temperature: 0,\n  ),\n);\n\nfinal chain = promptTemplate.pipe(chatModel).pipe(StringOutputParser());\n\nfinal stream = chain.stream({'max_num': '30'});\nawait stream.forEach(print);\n// 1\n// 2345678910111213\n// 1415161718192021\n// 222324252627282930 \n```\n\n## Tool calling\n\n`ChatGoogleGenerativeAI` supports tool calling.\n\nCheck the [docs](https://langchaindart.dev/#/modules/model_io/models/chat_models/how_to/tools) for more information on how to use tools.\n\nExample:\n```dart\nconst tool = ToolSpec(\n  name: 'get_current_weather',\n  description: 'Get the current weather in a given location',\n  inputJsonSchema: {\n    'type': 'object',\n    'properties': {\n      'location': {\n        'type': 'string',\n        'description': 'The city and state, e.g. San Francisco, CA',\n      },\n    },\n    'required': ['location'],\n  },\n);\nfinal chatModel = ChatGoogleGenerativeAI(\n  defaultOptions: ChatGoogleGenerativeAIOptions(\n    model: 'gemini-1.5-pro',\n    temperature: 0,\n    tools: [tool],\n  ),\n);\nfinal res = await model.invoke(\n  PromptValue.string('What’s the weather like in Boston and Madrid right now in celsius?'),\n);\n```\n\n## Code execution\n\n`ChatGoogleGenerativeAI` supports [code execution](https://ai.google.dev/gemini-api/docs/code-execution?lang=python#billing), just set `enableCodeExecution` to `true` in the options.\n\n```dart\nfinal chatModel = ChatGoogleGenerativeAI(\n  apiKey: apiKey,\n  defaultOptions: ChatGoogleGenerativeAIOptions(\n    model: 'gemini-1.5-flash',\n    enableCodeExecution: true,\n  ),\n);\nfinal res = await chatModel.invoke(\n  PromptValue.string(\n    'Calculate the fibonacci sequence up to 10 terms. '\n        'Return only the last term without explanations.',\n  ),\n);\n\nfinal text = res.output.content;\nprint(text); // 34\n\nfinal executableCode = res.metadata['executable_code'] as String;\nprint(executableCode);\n\nfinal codeExecutionResult = res.metadata['code_execution_result'] as Map<String, dynamic>;\nprint(codeExecutionResult);\n```\n"
  },
  {
    "path": "docs/modules/model_io/models/chat_models/integrations/mistralai.md",
    "content": "# ChatMistralAI\n\nWrapper around [Mistral AI](https://mistral.ai/) Chat Completions API.\n\nMistral AI brings the strongest open generative models to the developers, along with efficient ways to deploy and customise them for production.\n\n> Note: Mistral AI API is currently in closed beta. You can request access [here](https://console.mistral.ai).\n\n## Setup\n\nTo use `ChatMistralAI` you need to have a Mistral AI account and an API key. You can get one [here](https://console.mistral.ai/users/).\n\nThe following models are available at the moment:\n- `mistral-tiny`: Mistral 7B Instruct v0.2 (a minor release of Mistral 7B Instruct). It only works in English and obtains 7.6 on MT-Bench.\n- `mistral-small`: Mixtral 8x7B. It masters English/French/Italian/German/Spanish and code and obtains 8.3 on MT-Bench.\n- `mistral-medium`: a prototype model, that is currently among the top serviced models available based on standard benchmarks. It masters English/French/Italian/German/Spanish and code and obtains a score of 8.6 on MT-Bench.\n\n## Usage\n\n```dart\nfinal chatModel = ChatMistralAI(\n  apiKey: 'apiKey',\n  defaultOptions: ChatMistralAIOptions(\n    model: 'mistral-small',\n    temperature: 0,\n  ),\n);\n\nconst template = 'You are a helpful assistant that translates {input_language} to {output_language}.';\nfinal systemMessagePrompt = SystemChatMessagePromptTemplate.fromTemplate(template);\nconst humanTemplate = '{text}';\nfinal humanMessagePrompt = HumanChatMessagePromptTemplate.fromTemplate(humanTemplate);\nfinal chatPrompt = ChatPromptTemplate.fromPromptMessages(\n  [systemMessagePrompt, humanMessagePrompt],\n);\n\nfinal chain = chatPrompt | chatModel | StringOutputParser();\n\nfinal res = await chain.invoke({\n  'input_language': 'English',\n  'output_language': 'French',\n  'text': 'I love programming.',\n});\nprint(res);\n// -> 'J'aime la programmation.'\n```\n\n## Streaming\n\n```dart\nfinal promptTemplate = ChatPromptTemplate.fromPromptMessages([\n  SystemChatMessagePromptTemplate.fromTemplate(\n    'You are a helpful assistant that replies only with numbers '\n        'in order without any spaces or commas',\n  ),\n  HumanChatMessagePromptTemplate.fromTemplate(\n    'List the numbers from 1 to {max_num}',\n  ),\n]);\nfinal chat = ChatMistralAI(\n  apiKey: 'apiKey',\n  defaultOptions: ChatMistralAIOptions(\n    model: 'mistral-medium',\n    temperature: 0,\n  ),\n);\n\nfinal chain = promptTemplate.pipe(chat).pipe(StringOutputParser());\n\nfinal stream = chain.stream({'max_num': '9'});\nawait stream.forEach(print);\n// 12\n// 345\n// 67\n// 89\n```\n"
  },
  {
    "path": "docs/modules/model_io/models/chat_models/integrations/ollama.md",
    "content": "# ChatOllama\n\nWrapper around [Ollama](https://ollama.ai) Completions API that enables to interact with the LLMs in a chat-like fashion.\n\nOllama allows you to run open-source large language models, such as Llama 3.2 or Gemma 2, locally.\n\nOllama bundles model weights, configuration, and data into a single package, defined by a Modelfile. It optimizes setup and configuration details, including GPU usage.\n\n## Setup\n\nFollow [these instructions](https://github.com/jmorganca/ollama) to set up and run a local Ollama instance:\n\n1. Download and install [Ollama](https://ollama.ai)\n2. Fetch a model via `ollama pull <model family>`\n  * e.g., for Llama 3: `ollama pull llama3.2`\n3. Instantiate the `ChatOllama` class with the downloaded model.\n\n```dart\nfinal chatModel = ChatOllama(\n  defaultOptions: ChatOllamaOptions(\n    model: 'llama3.2',\n  ),\n);\n```\n\nFor a complete list of supported models and model variants, see the [Ollama model library](https://ollama.ai/library).\n\n### Ollama base URL\n\nBy default, `ChatOllama` uses 'http://localhost:11434/api' as base URL (default Ollama API URL). But if you are running Ollama on a different host, you can override it using the `baseUrl` parameter.\n\n```dart\nfinal chatModel = ChatOllama(\n  defaultOptions: ChatOllamaOptions(\n    baseUrl: 'https://your-remote-server-where-ollama-is-running.com',\n    model: 'llama3.2',\n  ),\n);\n```\n\n## Usage\n\n```dart\nfinal promptTemplate = ChatPromptTemplate.fromTemplates([\n  (ChatMessageType.system, 'You are a helpful assistant that translates {input_language} to {output_language}.'),\n  (ChatMessageType.human, '{text}'),\n]);\n\nfinal chatModel = ChatOllama(\n  defaultOptions: ChatOllamaOptions(\n    model: 'llama3.2',\n    temperature: 0,\n  ),\n);\n\nfinal chain = promptTemplate | chatModel | StringOutputParser();\n\nfinal res = await chain.invoke({\n  'input_language': 'English',\n  'output_language': 'French',\n  'text': 'I love programming.',\n});\nprint(res);\n// -> 'La traduction est : \"J'aime le programming.'\n```\n\n### Streaming\n\nOllama supports streaming the output as the model generates it. \n\n```dart\nfinal promptTemplate = ChatPromptTemplate.fromTemplates([\n  (ChatMessageType.system, 'You are a helpful assistant that replies only with numbers in order without any spaces or commas'),\n  (ChatMessageType.human, 'List the numbers from 1 to {max_num}'),\n]);\nfinal chat = ChatOllama(\n  defaultOptions: ChatOllamaOptions(\n    model: 'llama3.2',\n    temperature: 0,\n  ),\n);\nfinal chain = promptTemplate.pipe(chat).pipe(StringOutputParser());\n\nfinal stream = chain.stream({'max_num': '9'});\nawait stream.forEach(print);\n// 123\n// 456\n// 789\n```\n\n### Multimodal support\n\nOllama has support for multi-modal LLMs, such as [bakllava](https://ollama.ai/library/bakllava) and [llava](https://ollama.ai/library/llava).\n\nYou can provide several base64-encoded `png` or `jpeg` images. Images up to 100MB in size are supported.\n\n```dart\nfinal chatModel = ChatOllama(\n  defaultOptions: ChatOllamaOptions(\n    model: 'llava',\n    temperature: 0,\n  ),\n);\nfinal prompt = ChatMessage.human(\n  ChatMessageContent.multiModal([\n    ChatMessageContent.text('What fruit is this?'),\n    ChatMessageContent.image(\n      data: base64.encode(\n        await File('./bin/assets/apple.jpeg').readAsBytes(),\n      ),\n    ),\n  ]),\n);\nfinal res = await chatModel.invoke(PromptValue.chat([prompt]));\nprint(res.output.content);\n// -> 'An Apple'\n```\n\n### Tool calling\n\n`ChatOllama` offers support for native tool calling. This enables a model to answer a given prompt using tool(s) it knows about, making it possible for models to perform more complex tasks or interact with the outside world. It follows the standard [LangChain.dart tools API](/modules/model_io/models/chat_models/how_to/tools.md), so you can use it in the same way as you would with other providers that support tool-calling (e.g. `ChatOpenAI`, `ChatAnthropic`, etc.).\n\n**Notes:**\n- Tool calling requires [Ollama 0.3.0](https://github.com/ollama/ollama/releases/tag/v0.3.0) or newer.\n- Streaming tool calls is not supported at the moment.\n- Not all models support tool calls. Check the Ollama catalogue for models that have the `Tools` tag (e.g. [`llama3.2`](https://ollama.com/library/llama3.2) or [`llama3-groq-tool-use`](https://ollama.com/library/llama3-groq-tool-use)).\n- At the moment, small models like `llama3.2` [cannot reliably maintain a conversation alongside tool calling definitions](https://llama.meta.com/docs/model-cards-and-prompt-formats/llama3_1/#llama-3.1-instruct). They can be used for zero-shot tool calling, but for multi-turn conversations it's recommended to use larger models like `llama3.2:70b` or `llama3.2:405b`.\n\n```dart\nconst tool = ToolSpec(\n  name: 'get_current_weather',\n  description: 'Get the current weather in a given location',\n  inputJsonSchema: {\n    'type': 'object',\n    'properties': {\n      'location': {\n        'type': 'string',\n        'description': 'The city and country, e.g. San Francisco, US',\n      },\n    },\n    'required': ['location'],\n  },\n);\n\nfinal chatModel = ChatOllama(\n  defaultOptions: ChatOllamaOptions(\n    model: 'llama3.2',\n    temperature: 0,\n    tools: [tool],\n  ),\n);\n\nfinal res = await chatModel.invoke(\n  PromptValue.string('What’s the weather like in Boston and Madrid right now in celsius?'),\n);\nprint(res.output.toolCalls);\n// [AIChatMessageToolCall{\n//   id: a621064b-03b3-4ca6-8278-f37504901034,\n//   name: get_current_weather,\n//   arguments: {location: Boston, US},\n// }, \n// AIChatMessageToolCall{\n//   id: f160d9ba-ae7d-4abc-a910-2b6cd503ec53,\n//   name: get_current_weather,\n//   arguments: {location: Madrid, ES},\n// }]\n```\n\nAs you can see, `ChatOllama` support calling multiple tools in a single request.\n\nIf you want to customize how the model should respond to tool calls, you can use the `toolChoice` parameter:\n\n```dart\nfinal chatModel = ChatOllama(\n  defaultOptions: ChatOllamaOptions(\n    model: 'llama3.2',\n    temperature: 0,\n    tools: [tool],\n    toolChoice: ChatToolChoice.forced(name: 'get_current_weather'),\n  ),\n);\n```\n\n**Pro-tip:** You can improve tool-calling performance of small models by using few-shot prompting. You can find out how to do this [here](https://langchaindart.dev/#/modules/model_io/models/chat_models/how_to/tools?id=few-shot-prompting) and in this [blog post](https://blog.langchain.dev/few-shot-prompting-to-improve-tool-calling-performance).\n\n### JSON mode\n\nYou can force the model to produce JSON output that you can easily parse using `JsonOutputParser`, useful for extracting structured data.\n\n```dart\nfinal promptTemplate = ChatPromptTemplate.fromTemplates(const [\n  (ChatMessageType.system, 'You are an assistant that respond question using JSON format.'),\n  (ChatMessageType.human, '{question}'),\n]);\nfinal chat = ChatOllama(\n  defaultOptions: ChatOllamaOptions(\n    model: 'llama3.2',\n    temperature: 0,\n    format: OllamaResponseFormat.json,\n  ),\n);\n\nfinal chain = Runnable.getMapFromInput<String>('question')\n    .pipe(promptTemplate)\n    .pipe(chat)\n    .pipe(JsonOutputParser());\n\nfinal res = await chain.invoke(\n  'What is the population of Spain, The Netherlands, and France?',\n);\nprint(res);\n// {Spain: 46735727, The Netherlands: 17398435, France: 65273538}\n```\n\n## Examples\n\n### Answering questions with data from an external API\n\nImagine you have an API that provides flight times between two cities:\n\n```dart\n// Simulates an API call to get flight times\n// In a real application, this would fetch data from a live database or API\nString getFlightTimes(String departure, String arrival) {\n  final flights = {\n    'NYC-LAX': {\n      'departure': '08:00 AM',\n      'arrival': '11:30 AM',\n      'duration': '5h 30m',\n    },\n    'LAX-NYC': {\n      'departure': '02:00 PM',\n      'arrival': '10:30 PM',\n      'duration': '5h 30m',\n    },\n    'LHR-JFK': {\n      'departure': '10:00 AM',\n      'arrival': '01:00 PM',\n      'duration': '8h 00m',\n    },\n    'JFK-LHR': {\n      'departure': '09:00 PM',\n      'arrival': '09:00 AM',\n      'duration': '7h 00m',\n    },\n    'CDG-DXB': {\n      'departure': '11:00 AM',\n      'arrival': '08:00 PM',\n      'duration': '6h 00m',\n    },\n    'DXB-CDG': {\n      'departure': '03:00 AM',\n      'arrival': '07:30 AM',\n      'duration': '7h 30m',\n    },\n  };\n\n  final key = '${departure.toUpperCase()}-${arrival.toUpperCase()}';\n  return jsonEncode(flights[key] ?? {'error': 'Flight not found'});\n}\n```\n\nUsing the tool calling capabilities of Ollama, we can provide the model with the ability to call this API whenever it needs to get flight times to answer a question.\n\n```dart\nconst getFlightTimesTool = ToolSpec(\n  name: 'get_flight_times',\n  description: 'Get the flight times between two cities',\n  inputJsonSchema: {\n    'type': 'object',\n    'properties': {\n      'departure': {\n        'type': 'string',\n        'description': 'The departure city (airport code)',\n      },\n      'arrival': {\n        'type': 'string',\n        'description': 'The arrival city (airport code)',\n      },\n    },\n    'required': ['departure', 'arrival'],\n  },\n);\n\nfinal chatModel = ChatOllama(\n  defaultOptions: const ChatOllamaOptions(\n    model: 'llama3.2',\n    temperature: 0,\n    tools: [getFlightTimesTool],\n  ),\n);\n\nfinal messages = [\n  ChatMessage.humanText(\n    'What is the flight time from New York (NYC) to Los Angeles (LAX)?',\n  ),\n];\n\n// First API call: Send the query and function description to the model\nfinal response = await chatModel.invoke(PromptValue.chat(messages));\n\nmessages.add(response.output);\n\n// Check if the model decided to use the provided function\nif (response.output.toolCalls.isEmpty) {\n  print(\"The model didn't use the function. Its response was:\");\n  print(response.output.content);\n  return;\n}\n\n// Process function calls made by the model\nfor (final toolCall in response.output.toolCalls) {\n  final functionResponse = getFlightTimes(\n    toolCall.arguments['departure'],\n    toolCall.arguments['arrival'],\n  );\n  // Add function response to the conversation\n  messages.add(\n    ChatMessage.tool(\n      toolCallId: toolCall.id,\n      content: functionResponse,\n    ),\n  );\n}\n\n// Second API call: Get final response from the model\nfinal finalResponse = await chatModel.invoke(PromptValue.chat(messages));\nprint(finalResponse.output.content);\n// The flight time from New York (NYC) to Los Angeles (LAX) is approximately 5 hours and 30 minutes.\n```\n\n### Extracting structured data with tools\n\nA useful application of tool calling is extracting structured data from unstructured text. In the following example, we use a tool to extract the names, heights, and hair colors of people mentioned in a passage.\n\n```dart\nconst tool = ToolSpec(\n  name: 'information_extraction',\n  description: 'Extracts the relevant information from the passage',\n  inputJsonSchema: {\n    'type': 'object',\n    'properties': {\n      'people': {\n        'type': 'array',\n        'items': {\n          'type': 'object',\n          'properties': {\n            'name': {\n              'type': 'string',\n              'description': 'The name of a person',\n            },\n            'height': {\n              'type': 'number',\n              'description': 'The height of the person in cm',\n            },\n            'hair_color': {\n              'type': 'string',\n              'description': 'The hair color of the person',\n              'enum': ['black', 'brown', 'blonde', 'red', 'gray', 'white'],\n            },\n          },\n          'required': ['name', 'height', 'hair_color'],\n        },\n      },\n    },\n    'required': ['people'],\n  },\n);\n\nfinal model = ChatOllama(\n  defaultOptions: ChatOllamaOptions(\n    options: ChatOllamaOptions(\n      model: 'llama3.2',\n      temperature: 0,\n    ),\n    tools: [tool],\n    toolChoice: ChatToolChoice.forced(name: tool.name),\n  ),\n);\n\nfinal promptTemplate = ChatPromptTemplate.fromTemplate('''\nExtract and save the relevant entities mentioned in the following passage together with their properties. \n\nPassage:\n{input}''');\n\nfinal chain = Runnable.getMapFromInput<String>()\n    .pipe(promptTemplate)\n    .pipe(model)\n    .pipe(ToolsOutputParser());\n\nfinal res = await chain.invoke(\n  'Alex is 5 feet tall. '\n  'Claudia is 1 foot taller than Alex and jumps higher than him. '\n  'Claudia has orange hair and Alex is blonde.',\n);\nfinal extractedData = res.first.arguments;\nprint(extractedData);\n// {\n//   people: [\n//     {\n//       name: Alex,\n//       height: 152,\n//       hair_color: blonde\n//     },\n//     {\n//       name: Claudia, \n//       height: 183,\n//       hair_color: orange\n//     }\n//   ]\n// }\n```\n\n### RAG (Retrieval-Augmented Generation) pipeline\n\nWe can easily create a fully local RAG pipeline using `OllamaEmbeddings` and `ChatOllama`.\n\n```dart\n// 1. Create a vector store and add documents to it\nfinal vectorStore = MemoryVectorStore(\n  embeddings: OllamaEmbeddings(model: 'jina/jina-embeddings-v2-small-en'),\n);\nawait vectorStore.addDocuments(\n  documents: [\n    Document(pageContent: 'LangChain was created by Harrison'),\n    Document(pageContent: 'David ported LangChain to Dart in LangChain.dart'),\n  ],\n);\n\n// 2. Construct a RAG prompt template\nfinal promptTemplate = ChatPromptTemplate.fromTemplates([\n  (ChatMessageType.system, 'Answer the question based on only the following context:\\n{context}'),\n  (ChatMessageType.human, '{question}'),\n]);\n\n// 3. Define the model to use and the vector store retriever\nfinal chatModel = ChatOllama(\n  defaultOptions: ChatOllamaOptions(model: 'llama3.2'),\n);\nfinal retriever = vectorStore.asRetriever(\n  defaultOptions: VectorStoreRetrieverOptions(\n    searchType: VectorStoreSimilaritySearch(k: 1),\n  ),\n);\n\n// 4. Create a Runnable that combines the retrieved documents into a single string\nfinal docCombiner = Runnable.mapInput<List<Document>, String>((docs) {\n  return docs.map((final d) => d.pageContent).join('\\n');\n});\n\n// 4. Define the RAG pipeline\nfinal chain = Runnable.fromMap<String>({\n  'context': retriever.pipe(docCombiner),\n  'question': Runnable.passthrough(),\n}).pipe(promptTemplate).pipe(chatModel).pipe(StringOutputParser());\n\n// 5. Run the pipeline\nfinal res = await chain.invoke('Who created LangChain.dart?');\nprint(res);\n// Based on the context provided, David created LangChain.dart.\n```\n"
  },
  {
    "path": "docs/modules/model_io/models/chat_models/integrations/open_router.md",
    "content": "# OpenRouter\n\n[OpenRouter](https://openrouter.ai/) offers a unified OpenAI-compatible API for a broad range of [models](https://openrouter.ai/models).\n\nYou can also let users pay for their own models via their [OAuth PKCE](https://openrouter.ai/docs#oauth) flow.\n\nYou can consume OpenRouter API using the `ChatOpenAI` wrapper in the same way you would use the OpenAI API.\n\nThe only difference is that you need to change the base URL to `https://openrouter.ai/api/v1`:\n\n```dart\nfinal chatModel = ChatOpenAI(\n  apiKey: openRouterApiKey,\n  baseUrl: 'https://openrouter.ai/api/v1',\n  defaultOptions: const ChatOpenAIOptions(\n    model: 'mistralai/mistral-small',\n  ),\n);\n```\n\nOpenRouter allows you to specify an optional `HTTP-Referer` header to identify your app and make it discoverable to users on openrouter.ai. You can also include an optional `X-Title` header to set or modify the title of your app.\n\n```dart\n final chatModel = ChatOpenAI(\n  apiKey: openRouterApiKey,\n  baseUrl: 'https://openrouter.ai/api/v1',\n  headers: {\n    'HTTP-Referer': 'com.myapp',\n    'X-Title': 'OpenRouterTest',\n  },\n  defaultOptions: const ChatOpenAIOptions(\n    model: 'mistralai/mistral-small',\n  ),\n);\n```\n\n## Invoke\n\n```dart\nfinal openRouterApiKey = Platform.environment['OPEN_ROUTER_API_KEY'];\n\nfinal promptTemplate = ChatPromptTemplate.fromTemplates(const [\n  (\n  ChatMessageType.system,\n  'You are a helpful assistant that translates {input_language} to {output_language}.',\n  ),\n  (ChatMessageType.human, '{text}'),\n]);\n\nfinal chatModel = ChatOpenAI(\n  apiKey: openRouterApiKey,\n  baseUrl: 'https://openrouter.ai/api/v1',\n  defaultOptions: const ChatOpenAIOptions(\n    model: 'mistralai/mistral-small',\n  ),\n);\n\nfinal chain = promptTemplate | chatModel | StringOutputParser();\n\nfinal res = await chain.invoke({\n  'input_language': 'English',\n  'output_language': 'French',\n  'text': 'I love programming.',\n});\nprint(res);\n// -> 'J'aime la programmation.'\n```\n\n## Stream\n\n```dart\nfinal openRouterApiKey = Platform.environment['OPEN_ROUTER_API_KEY'];\n\nfinal promptTemplate = ChatPromptTemplate.fromTemplates(const [\n  (\n  ChatMessageType.system,\n  'You are a helpful assistant that replies only with numbers '\n      'in order without any spaces or commas',\n  ),\n  (ChatMessageType.human, 'List the numbers from 1 to {max_num}'),\n]);\n\nfinal chatModel = ChatOpenAI(\n  apiKey: openRouterApiKey,\n  baseUrl: 'https://openrouter.ai/api/v1',\n  defaultOptions: const ChatOpenAIOptions(\n    model: 'mistralai/mistral-small',\n  ),\n);\n\nfinal chain = promptTemplate.pipe(chatModel).pipe(StringOutputParser());\n\nfinal stream = chain.stream({'max_num': '9'});\nawait stream.forEach(print);\n// 123\n// 456789\n```\n\n## Tool calling\n\nOpenRouter supports [tool calling](https://openrouter.ai/docs#tool-calls). \n\nCheck the [docs](/modules/model_io/models/chat_models/how_to/tools.md) for more information on how to use tools.\n\nIn the following example we use the `joke` tool to generate jokes. We stream the joke generation using the `ToolsOutputParser' which tries to \"auto-complete\" the partial json from each chunk into a valid state.\n\n```dart\nfinal openRouterApiKey = Platform.environment['OPEN_ROUTER_API_KEY'];\nconst tool = ToolSpec(\n  name: 'joke',\n  description: 'A joke',\n  inputJsonSchema: {\n    'type': 'object',\n    'properties': {\n      'setup': {\n        'type': 'string',\n        'description': 'The setup for the joke',\n      },\n      'punchline': {\n        'type': 'string',\n        'description': 'The punchline to the joke',\n      },\n    },\n    'required': ['location', 'punchline'],\n  },\n);\nfinal promptTemplate = ChatPromptTemplate.fromTemplate(\n  'tell me a long joke about {foo}',\n);\nfinal chat = ChatOpenAI(\n  apiKey: openRouterApiKey,\n  baseUrl: 'https://openrouter.ai/api/v1',\n  defaultOptions: ChatOpenAIOptions(\n    model: 'gpt-4o',\n    tools: [tool],\n    toolChoice: ChatToolChoice.forced(name: 'joke'),\n  ),\n);\nfinal outputParser = ToolsOutputParser();\n\nfinal chain = promptTemplate.pipe(chat).pipe(outputParser);\n\nfinal stream = chain.stream({'foo': 'bears'});\nawait for (final chunk in stream) {\n  final args = chunk.first.arguments;\n  print(args);\n}\n// {}\n// {setup: }\n// {setup: Why don't}\n// {setup: Why don't bears}\n// {setup: Why don't bears like fast food}\n// {setup: Why don't bears like fast food?, punchline: }\n// {setup: Why don't bears like fast food?, punchline: Because}\n// {setup: Why don't bears like fast food?, punchline: Because they can't}\n// {setup: Why don't bears like fast food?, punchline: Because they can't catch it!}\n```\n"
  },
  {
    "path": "docs/modules/model_io/models/chat_models/integrations/openai.md",
    "content": "# OpenAI\n\nThis notebook provides a quick overview for getting started with [OpenAI](https://platform.openai.com/docs/introduction) chat models. For detailed documentation of all `ChatOpenAI` features and configurations head to the [API reference](https://pub.dev/documentation/langchain_openai/latest/langchain_openai/ChatOpenAI-class.html).\n\nOpenAI has several chat models. You can find information about their latest models and their costs, context windows, and supported input types in the [OpenAI docs](https://platform.openai.com/docs/models).\n\n> Note that certain OpenAI models can also be accessed via the [Microsoft Azure platform](https://azure.microsoft.com/en-us/products/ai-services/openai-service). Check out the API reference for more information on how to use the Azure with `ChatOpenAI`.\n\n## Setup\n\nTo access OpenAI models you'll need to create an OpenAI account, get an API key, and install the [langchain_openai](https://pub.dev/packages/langchain_openai) integration package.\n\n### Credentials\n\nHead to the [OpenAI Platform](https://platform.openai.com), sign up and get your [API key](https://platform.openai.com/account/api-keys).\n\n### Installation\n\nThe LangChain.dart OpenAI integration lives in the [langchain_openai](https://pub.dev/packages/langchain_openai) package:\n\n```yaml\ndart pub add langchain_openai\n```\n\n## Usage\n\n### Instantiation\n\nNow we can instantiate our model object and generate chat completions:\n\n```dart\nfinal openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n\nfinal chatModel = ChatOpenAI(\n  apiKey: openaiApiKey,\n  defaultOptions: ChatOpenAIOptions(\n    model: 'gpt-4o'\n    temperature: 0,\n    // ...other options\n  ),\n);\n```\n\nIf you are using a proxy, you can override the base URL, headers, and other options:\n\n```dart\nfinal client = ChatOpenAI(\n  baseUrl: 'https://my-proxy.com',\n  headers: {'x-my-proxy-header': 'value'},\n);\n```\n\n### Invocation\n\nNow you can generate completions by calling the `invoke` method:\n\n```dart\nfinal messages = [\n  ChatMessage.system('You are a helpful assistant that translates English to French.'),\n  ChatMessage.humanText('I love programming.'),\n];\nfinal prompt = PromptValue.chat(messages);\nfinal res = await llm.invoke(prompt);\n// -> 'J'adore la programmation.'\n```\n\n### Chaining\n\nWe can chain our model with a prompt template or output parser to create a more complex pipeline:\n\n```dart\nfinal promptTemplate = ChatPromptTemplate.fromTemplates([\n  (ChatMessageType.system, 'You are a helpful assistant that translates {input_language} to {output_language}.'),\n  (ChatMessageType.human, '{text}'),\n]);\n\nfinal chain = promptTemplate | chatModel | StringOutputParser();\n\nfinal res = await chain.invoke({\n  'input_language': 'English',\n  'output_language': 'French',\n  'text': 'I love programming.',\n});\nprint(res);\n// -> 'J'adore la programmation.'\n```\n\n### Streaming\n\nOpenAI models support [streaming](/expression_language/streaming.md) the output of th model as it is generated. \n\n```dart\nfinal openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n\nfinal promptTemplate = ChatPromptTemplate.fromTemplates([\n  (ChatMessageType.system,\n    'You are a helpful assistant that replies only with numbers '\n      'in order without any spaces or commas',\n  ),\n  (ChatMessageType.human, 'List the numbers from 1 to {max_num}'),\n]);\n\nfinal chat = ChatOpenAI(apiKey: openaiApiKey);\n\nfinal chain = promptTemplate.pipe(chat).pipe(StringOutputParser());\n\nfinal stream = chain.stream({'max_num': '9'});\nawait stream.forEach(print);\n// 123\n// 456\n// 789\n```\n\n### Multimodal support\n\nOpenAI's models have [vision capabilities](https://platform.openai.com/docs/guides/vision), meaning the models can take in images and answer questions about them.\n\nYou can send the image as a base64-encoded string:\n\n```dart\nfinal prompt = PromptValue.chat([\n  ChatMessage.system('You are a helpful assistant.'),\n  ChatMessage.human(\n    ChatMessageContent.multiModal([\n      ChatMessageContent.text('What fruit is this?'),\n      ChatMessageContent.image(\n        mimeType: 'image/jpeg',\n        data: '/9j/4AAQSkZJRgABAQAAAQABAAD...Rdu1j//2Q==', // base64-encoded image\n      ),\n    ]),\n  ),\n]);\n```\n\nOr you can send the URL where the image is hosted:\n\n```dart\nfinal prompt = PromptValue.chat([\n  ChatMessage.system('You are a helpful assistant.'),\n  ChatMessage.human(\n    ChatMessageContent.multiModal([\n      ChatMessageContent.text('What fruit is this?'),\n      ChatMessageContent.image(\n        data: 'https://upload.wikimedia.org/wikipedia/commons/9/92/95apple.jpeg',\n      ),\n    ]),\n  ),\n]);\n```\n\n### Tool calling\n\nOpenAI has a [tool calling](/modules/model_io/models/chat_models/how_to/tools.md) (we use \"tool calling\" and \"function calling\" interchangeably here) API that lets you describe tools and their arguments, and have the model return a JSON object with a tool to invoke and the inputs to that tool. tool-calling is extremely useful for building tool-using chains and agents, and for getting structured outputs from models more generally.\n\n\n```dart\nconst tool = ToolSpec(\n  name: 'get_current_weather',\n  description: 'Get the current weather in a given location',\n  inputJsonSchema: {\n    'type': 'object',\n    'properties': {\n      'location': {\n        'type': 'string',\n        'description': 'The city and country, e.g. San Francisco, US',\n      },\n    },\n    'required': ['location'],\n  },\n);\n\nfinal chatModel = ChatOllama(\n  defaultOptions: ChatOllamaOptions(\n    model: 'gpt-4o'\n    temperature: 0,\n    tools: [tool],\n  ),\n);\n\nfinal res = await chatModel.invoke(\n  PromptValue.string('What’s the weather like in Boston and Madrid right now in celsius?'),\n);\nprint(res.output.toolCalls);\n// [AIChatMessageToolCall{\n//   id: a621064b-03b3-4ca6-8278-f37504901034,\n//   name: get_current_weather,\n//   arguments: {location: Boston, US},\n// }, \n// AIChatMessageToolCall{\n//   id: f160d9ba-ae7d-4abc-a910-2b6cd503ec53,\n//   name: get_current_weather,\n//   arguments: {location: Madrid, ES},\n// }]\n```\n\nNotice that the returned `AIChatMessage` has a `toolCalls` field. This contains in a standardized tool call format that is model-provider agnostic.\n\nYou can also stream OpenAI tool calls. `ToolsOutputParser` is a useful tool for this case, as it concatenates the chunks progressively and tries to complete the partial JSON into a valid one:\n\n```dart\nconst tool = ToolSpec(\n  name: 'joke',\n  description: 'A joke',\n  inputJsonSchema: {\n    'type': 'object',\n    'properties': {\n      'setup': {\n        'type': 'string',\n        'description': 'The setup for the joke',\n      },\n      'punchline': {\n        'type': 'string',\n        'description': 'The punchline to the joke',\n      },\n    },\n    'required': ['location', 'punchline'],\n  },\n);\nfinal promptTemplate = ChatPromptTemplate.fromTemplate(\n  'tell me a long joke about {foo}',\n);\nfinal chat = ChatOpenAI(\n  apiKey: openaiApiKey,\n  defaultOptions: ChatOpenAIOptions(\n    tools: [tool],\n    toolChoice: ChatToolChoice.forced(name: 'joke'),\n  ),\n);\nfinal outputParser = ToolsOutputParser();\n\nfinal chain = promptTemplate.pipe(chat).pipe(outputParser);\n\nfinal stream = chain.stream({'foo': 'bears'});\nawait for (final chunk in stream) {\n  final args = chunk.first.arguments;\n  print(args);\n}\n// {}\n// {setup: }\n// {setup: Why don't}\n// {setup: Why don't bears}\n// {setup: Why don't bears like fast food}\n// {setup: Why don't bears like fast food?, punchline: }\n// {setup: Why don't bears like fast food?, punchline: Because}\n// {setup: Why don't bears like fast food?, punchline: Because they can't}\n// {setup: Why don't bears like fast food?, punchline: Because they can't catch it!}\n```\n\n### Structured Outputs\n\n[Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) is a feature that ensures the model will always generate responses that adhere to your supplied JSON Schema, so you don't need to worry about the model omitting a required key, or hallucinating an invalid enum value.\n\n```dart\nfinal prompt = PromptValue.chat([\n  ChatMessage.system(\n    'Extract the data of any companies mentioned in the '\n    'following statement. Return a JSON list.',\n  ),\n  ChatMessage.humanText(\n    'Google was founded in the USA, while Deepmind was founded in the UK',\n  ),\n]);\nfinal chatModel = ChatOpenAI(\n  apiKey: openaiApiKey,\n  defaultOptions: ChatOpenAIOptions(\n    model: 'gpt-4o',\n    temperature: 0,\n    responseFormat: ChatOpenAIResponseFormat.jsonSchema(\n      ChatOpenAIJsonSchema(\n        name: 'Companies',\n        description: 'A list of companies',\n        strict: true,\n        schema: {\n          'type': 'object',\n          'properties': {\n            'companies': {\n              'type': 'array',\n              'items': {\n                'type': 'object',\n                'properties': {\n                  'name': {'type': 'string'},\n                  'origin': {'type': 'string'},\n                },\n                'additionalProperties': false,\n                'required': ['name', 'origin'],\n              },\n            },\n          },\n          'additionalProperties': false,\n          'required': ['companies'],\n        },\n      ),\n    ),\n  ),\n);\n\nfinal res = await chatModel.invoke(prompt);\n// {\n//   \"companies\": [\n//     {\n//       \"name\": \"Google\",\n//       \"origin\": \"USA\"\n//     },\n//     {\n//       \"name\": \"Deepmind\",\n//       \"origin\": \"UK\"\n//     }\n//   ]\n// }\n```\n\nWhen you use `strict: true`, the model outputs will match the supplied schema exactly. Mind that the strict mode only support a [subset of JSON schema](https://platform.openai.com/docs/guides/structured-outputs/supported-schemas) for performance reasons. Under-the-hood, OpenAI uses a technique known as constrained sampling or constrained decoding. For each JSON Schema, they compute a grammar that represents that schema, and pre-process its components to make it easily accessible during model sampling. This is why the first request with a new schema incurs a latency penalty. Typical schemas take under 10 seconds to process on the first request, but more complex schemas may take up to a minute.\n\n### JSON mode\n\nWhen [JSON mode](https://platform.openai.com/docs/guides/structured-outputs/json-mode) is turned on, the model's output is ensured to be valid JSON. You can use it in combination with a `JsonOutputParser` to parse the response into a JSON map.\n\n> JSON mode is a more basic version of the Structured Outputs feature. While JSON mode ensures that model output is valid JSON, Structured Outputs reliably matches the model's output to the schema you specify. It is recommended that you use Structured Outputs if it is supported for your use case.\n\n```dart\nfinal prompt = PromptValue.chat([\n  ChatMessage.system(\n    \"Extract the 'name' and 'origin' of any companies mentioned in the \"\n        'following statement. Return a JSON list.',\n  ),\n  ChatMessage.humanText(\n    'Google was founded in the USA, while Deepmind was founded in the UK',\n  ),\n]);\nfinal llm = ChatOpenAI(\n  apiKey: openaiApiKey,\n  defaultOptions: const ChatOpenAIOptions(\n    model: 'gpt-4-turbo',\n    temperature: 0,\n    responseFormat: ChatOpenAIResponseFormat.jsonObject,\n  ),\n);\nfinal chain = llm.pipe(JsonOutputParser());\nfinal res = await chain.invoke(prompt);\nprint(res);\n// {\n//   \"companies\": [\n//     {\n//       \"name\": \"Google\",\n//       \"origin\": \"USA\"\n//     },\n//     {\n//       \"name\": \"Deepmind\",\n//       \"origin\": \"UK\"\n//     }\n//   ]\n// }\n```\n\n### Fine-tuning\n\nYou can call [fine-tuned OpenAI models](https://platform.openai.com/docs/guides/fine-tuning) by passing in your corresponding modelName parameter.\n\nThis generally takes the form of `ft:{OPENAI_MODEL_NAME}:{ORG_NAME}::{MODEL_ID}`. For example:\n\n```dart\nfinal chatModel = ChatOpenAI(\n  apiKey: openaiApiKey,\n  defaultOptions: ChatOpenAIOptions(\n    model: 'ft:gpt-3.5-turbo-0613:langchain::7qTVM5AR'\n  ),\n);\n```\n\n## API reference\n\nFor detailed documentation of all ChatOpenAI features and configurations head to the [API reference](https://pub.dev/documentation/langchain_openai/latest).\n"
  },
  {
    "path": "docs/modules/model_io/models/chat_models/integrations/prem.md",
    "content": "# Prem App\n\nYou can easily run local models using [Prem app](https://www.premai.io/#PremApp). \nIt creates a local server that exposes a REST API with the same interface as \nthe OpenAI API.\n\n```dart\nconst localUrl = 'http://localhost:8000'; // Check Prem app for the actual URL\nfinal chat = ChatOpenAI(baseUrl: localUrl);\n\nconst template = 'You are a helpful assistant that translates {input_language} to {output_language}.';\nfinal systemMessagePrompt = SystemChatMessagePromptTemplate.fromTemplate(template);\nconst humanTemplate = '{text}';\nfinal humanMessagePrompt = HumanChatMessagePromptTemplate.fromTemplate(humanTemplate);\n\nfinal chatPrompt = ChatPromptTemplate.fromPromptMessages([systemMessagePrompt, humanMessagePrompt]);\nfinal formattedPrompt = chatPrompt.formatPrompt({\n  'input_language': 'English',\n  'output_language': 'French',\n  'text': 'I love programming.'\n}).toChatMessages();\n\nfinal output = chat.predictMessages(formattedPrompt);\n```\n"
  },
  {
    "path": "docs/modules/model_io/models/chat_models/integrations/together_ai.md",
    "content": "# Together AI\n\n[Together AI](https://www.together.ai) offers a unified OpenAI-compatible API for a broad range of [models](https://api.together.xyz/playground) running serverless or on your own dedicated instances.\n\nIt also allows to fine-tune models on your own data or train new models from scratch.\n\nYou can consume Together AI API using the `ChatOpenAI` wrapper in the same way you would use the OpenAI API.\n\nThe only difference is that you need to change the base URL to `https://api.together.xyz/v1`:\n\n```dart\nfinal chatModel = ChatOpenAI(\n  apiKey: togetherAiApiKey,\n  baseUrl: 'https://api.together.xyz/v1',\n  defaultOptions: const ChatOpenAIOptions(\n    model: 'mistralai/Mistral-7B-Instruct-v0.2',\n  ),\n);\n```\n\n## Invoke\n\n```dart\nfinal togetherAiApiKey = Platform.environment['TOGETHER_AI_API_KEY'];\n\nfinal promptTemplate = ChatPromptTemplate.fromTemplates(const [\n  (\n  ChatMessageType.system,\n  'You are a helpful assistant that translates {input_language} to {output_language}.',\n  ),\n  (ChatMessageType.human, '{text}'),\n]);\n\nfinal chatModel = ChatOpenAI(\n  apiKey: togetherAiApiKey,\n  baseUrl: 'https://api.together.xyz/v1',\n  defaultOptions: const ChatOpenAIOptions(\n    model: 'mistralai/Mistral-7B-Instruct-v0.2',\n  ),\n);\n\nfinal chain = promptTemplate | chatModel | StringOutputParser();\n\nfinal res = await chain.invoke({\n  'input_language': 'English',\n  'output_language': 'French',\n  'text': 'I love programming.',\n});\nprint(res);\n// -> 'J'aime programmer'\n```\n\n## Stream\n\n```dart\nfinal togetherAiApiKey = Platform.environment['TOGETHER_AI_API_KEY'];\n\nfinal promptTemplate = ChatPromptTemplate.fromTemplates(const [\n  (\n  ChatMessageType.system,\n  'You are a helpful assistant that replies only with numbers '\n      'in order without any spaces or commas',\n  ),\n  (ChatMessageType.human, 'List the numbers from 1 to {max_num}'),\n]);\n\nfinal chatModel = ChatOpenAI(\n  apiKey: togetherAiApiKey,\n  baseUrl: 'https://api.together.xyz/v1',\n  defaultOptions: const ChatOpenAIOptions(\n    model: 'mistralai/Mistral-7B-Instruct-v0.2',\n  ),\n);\n\nfinal chain = promptTemplate.pipe(chatModel).pipe(StringOutputParser());\n\nfinal stream = chain.stream({'max_num': '9'});\nawait stream.forEach(print);\n// 1\n// 2\n// 3\n// ...\n// 9\n```\n"
  },
  {
    "path": "docs/modules/model_io/models/getting_started.md",
    "content": "# Getting Started\n\nOne of the core value props of LangChain is that it provides a standard interface to models.\n- Language Models: good for text generation.\n- Text Embedding Models: good for turning text into a numerical representation.\n\n## Language Models\n\nThere are two different sub-types of Language Models:\n- LLMs: these wrap APIs which take text in and return text.\n- ChatModels: these wrap models which take chat messages in and return a chat message.\n\nThis is a subtle difference, but a value proposition of LangChain is that we provide a unified \ninterface across these. This is nice because although the underlying APIs are actually quite \ndifferent, you often want to use them interchangeably.\n\nTo see this, let's look at `OpenAI` (a wrapper around OpenAI's LLM) vs `ChatOpenAI` (a wrapper \naround OpenAI's ChatModel).\n\nTo use LangChain you need to import the `langchain` package. As we are integrating with OpenAI, \nwe also need to import the `langchain_openai` package.\n```dart\nimport 'package:langchain/langchain.dart';\nimport 'package:langchain_openai/langchain_openai.dart';\n```\n\nWe can then instantiate the models:\n```dart\nfinal llm = OpenAI(apiKey: openaiApiKey);\nfinal chatModel = ChatOpenAI(apiKey: openaiApiKey);\n```\n\n### `text` -> `text` interface\n\n```dart\nfinal llmRes = await llm.generatePrompt(const StringPromptValue('say hi!'));\nprint(llmRes.generations.first.first.output);\n// -> 'Hi there! How can I help you?'\n\nfinal chatRes = await chatModel.generatePrompt(const StringPromptValue('say hi!'));\nprint(chatRes.generations.first.output);\n// -> AIChatMessage{content: 'Hello! How can I assist you today?'}\n```\n\nAs it is the main functionality of the LLM, we also provide a shorthand:\n````dart\nfinal llmRes = await llm('say hi!');\nprint(llmRes);\n// -> '\\n\\nHi there!'\n````\n\n### `messages` -> `messages` interface\n\n```dart\nfinal llmRes = await llm.generatePrompt(ChatPromptValue([ChatMessage.humanText('say hi!')]));\nprint(llmRes.generations.first.first.output);\n// -> 'Robot: Hi there!'\n\nfinal chatRes = await chatModel.generatePrompt(ChatPromptValue([ChatMessage.humanText('say hi!')]));\nprint(chatRes.generations.first.output);\n// -> AIChatMessage{content: 'Hello there! How may I assist you today?'}\n```\n\nAs it is the main functionality of the Chat model, we also provide a shorthand:\n```dart\nfinal chatRes = await chatModel([ChatMessage.humanText('say hi!')]);\nprint(chatRes); // [ChatMessage.ai(content: '\\n\\nHi there! How can I help you?')]\n```\n"
  },
  {
    "path": "docs/modules/model_io/models/llms/how_to/custom_llm.md",
    "content": "# Custom LLM\n\nThis tutorial goes over how to create a custom LLM wrapper, in case you want to\nuse your own LLM or a different wrapper than one that is supported in LangChain.\n\nTo create you custom LLM you have to:\n\n- Extend `SimpleLLM` (or `BaseLLM` if you need more control).\n- Implement the `callInternal()` method that takes in a string, some optional \n  stop words, and returns a string.\n\nLet's implement a very simple custom LLM that just returns the first N\ncharacters of the input.\n\n```dart\nclass CustomLLM extends SimpleLLM {\n  const CustomLLM({\n    required this.n,\n  });\n\n  final int n;\n\n  @override\n  String get modelType => 'custom';\n\n  @override\n  Future<String> callInternal(\n      final String prompt, {\n        final LLMOptions? options,\n      }) {\n    return Future<String>.value(prompt.substring(0, n));\n  }\n}\n```\n\nWe can now use this as an any other LLM.\n\n```dart\nconst llm = CustomLLM(n: 10);\nfinal result = await llm('This is a foobar thing');\nprint(result);\n// -> 'This is a '\n```\n"
  },
  {
    "path": "docs/modules/model_io/models/llms/how_to/fake_llm.md",
    "content": "# Fake LLMs\n\nWe expose some fake LLM classes that can be used for testing. This allows you \nto mock out calls to the LLM and simulate what would happen if the LLM \nresponded in a certain way.\n\n## FakeLLM\n\nYou can configure a list of responses that the LLM will return in order.\n\nExample:\n```dart\ntest('Test LLMChain call', () async {\n  final model = FakeLLM(responses: ['Hello world!']);\n  final prompt = PromptTemplate.fromTemplate('Print {foo}');\n  final chain = LLMChain(prompt: prompt, llm: model);\n  final res = await chain.call({'foo': 'Hello world!'});\n  expect(res['text'], 'Hello world!');\n  expect(res['foo'], 'Hello world!');\n});\n```\n\n### FakeEchoLLM\n\nThis LLM will simply echo back the input.\n\nExample:\n```dart\nconst llm = FakeEchoLLM();\nfinal result = await llm('Hello world!');\nprint(result);\n// -> 'Hello world!'\n```\n"
  },
  {
    "path": "docs/modules/model_io/models/llms/how_to/human_input_llm.md",
    "content": "# Human input LLM\n\nTODO\n"
  },
  {
    "path": "docs/modules/model_io/models/llms/how_to/llm_caching.md",
    "content": "# Caching\n\nTODO\n"
  },
  {
    "path": "docs/modules/model_io/models/llms/how_to/llm_serialization.md",
    "content": "# Serialization\n\nTODO\n"
  },
  {
    "path": "docs/modules/model_io/models/llms/how_to/llm_streaming.md",
    "content": "# Streaming\n\nSome LLMs provide a streaming response. This means that instead of waiting for the entire response to be returned, you can start processing it as soon as it's available. This is useful if you want to display the response to the user as it's being generated, or if you want to process the response as it's being generated.\n\nExample usage:\n\n```dart\nfinal openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n\nfinal promptTemplate = PromptTemplate.fromTemplate(\n  'List the numbers from 1 to {max_num} in order without any spaces or commas',\n);\nfinal llm = OpenAI(apiKey: openaiApiKey);\nconst stringOutputParser = StringOutputParser<LLMResult>();\n\nfinal chain = promptTemplate | llm | stringOutputParser;\n\nfinal stream = chain.stream({'max_num': '9'});\nawait stream.forEach(print);\n// 123\n// 45\n// 67\n// 89\n```\n"
  },
  {
    "path": "docs/modules/model_io/models/llms/how_to/token_usage_tracking.md",
    "content": "# Tracking token usage\n\nThis tutorial goes over how to track your token usage for specific calls. It is\ncurrently only implemented for the OpenAI API.\n\n```dart\nfinal openai = OpenAI(\n  apiKey: openaiApiKey,\n  defaultOptions: const OpenAIOptions(temperature: 0.9),\n);\nfinal prompt = PromptValue.string('Tell me a joke');\nfinal result = await llm.invoke(prompt);\nfinal usage = result.usage;\nprint(usage?.promptTokens);   // 4\nprint(usage?.responseTokens); // 20\nprint(usage?.totalTokens);    // 24\n```\n"
  },
  {
    "path": "docs/modules/model_io/models/llms/integrations/gcp_vertex_ai.md",
    "content": "# GCP Vertex AI\n\nWrapper around [GCP Vertex AI text models](https://cloud.google.com/vertex-ai/docs/generative-ai/text/test-text-prompts) API (aka PaLM API for text).\n\n## Set up your Google Cloud Platform project\n\n1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager).\n2. [Make sure that billing is enabled for your project](https://cloud.google.com/billing/docs/how-to/modify-project).\n3. [Enable the Vertex AI API](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com).\n4. [Configure the Vertex AI location](https://cloud.google.com/vertex-ai/docs/general/locations).\n\n### Authentication\n\nTo create an instance of `VertexAI` you need to provide an HTTP client that handles authentication. The easiest way to do this is to use [`AuthClient`](https://pub.dev/documentation/googleapis_auth/latest/googleapis_auth/AuthClient-class.html) from the [googleapis_auth](https://pub.dev/packages/googleapis_auth) package.\n\nTo create an instance of `VertexAI` you need to provide an [`AuthClient`](https://pub.dev/documentation/googleapis_auth/latest/googleapis_auth/AuthClient-class.html) instance.\n\nThere are several ways to obtain an `AuthClient` depending on your use case. Check out the [googleapis_auth](https://pub.dev/packages/googleapis_auth) package documentation for more details.\n\nExample using a service account JSON:\n\n```dart\nfinal serviceAccountCredentials = ServiceAccountCredentials.fromJson(\n  json.decode(serviceAccountJson),\n);\nfinal authClient = await clientViaServiceAccount(\n  serviceAccountCredentials,\n  [VertexAI.cloudPlatformScope],\n);\nfinal vertexAi = VertexAI(\n  httpClient: authClient,\n  project: 'your-project-id',\n);\n```\n\nThe service account should have the following [permission](https://cloud.google.com/vertex-ai/docs/general/iam-permissions):\n- `aiplatform.endpoints.predict`\n\nThe required [OAuth2 scope](https://developers.google.com/identity/protocols/oauth2/scopes) is:\n- `https://www.googleapis.com/auth/cloud-platform` (you can use the constant `VertexAI.cloudPlatformScope`)\n\nSee: https://cloud.google.com/vertex-ai/docs/generative-ai/access-control\n\n### Available models\n\n- `text-bison`\n  * Max input token: 8192\n  * Max output tokens: 1024\n  * Training data: Up to Feb 2023\n- `text-bison-32k`\n  * Max input and output tokens combined: 32k\n  * Training data: Up to Aug 2023\n\nThe previous list of models may not be exhaustive or up-to-date. Check out the [Vertex AI documentation](https://cloud.google.com/vertex-ai/docs/generative-ai/learn/models) for the latest list of available models.\n\n### Model options\n\nYou can define default options to use when calling the model (e.g. temperature, stop sequences, etc. ) using the `defaultOptions` parameter.\n\nThe default options can be overridden when calling the model using the `options` parameter.\n\nExample:\n```dart\nfinal llm = VertexAI(\n  httpClient: authClient,\n  project: 'your-project-id',\n  defaultOptions: VertexAIOptions(\n    temperature: 0.9,\n  ),\n);\nfinal result = await llm(\n  'Hello world!',\n  options: VertexAIOptions(\n    temperature: 0.5,\n   ),\n);\n```\n\n### Full example\n\n```dart\nimport 'package:langchain/langchain.dart';\nimport 'package:langchain_google/langchain_google.dart';\n\nvoid main() async {\n  final openai = VertexAI(\n    httpClient: await _getAuthHttpClient(),\n    project: _getProjectId(),\n    defaultOptions: const VertexAIOptions(\n      temperature: 0.9,\n    ),\n  );\n  final result = await openai('Tell me a joke');\n  print(result);\n}\n\nFuture<AuthClient> _getAuthHttpClient() async {\n  final serviceAccountCredentials = ServiceAccountCredentials.fromJson(\n    json.decode(Platform.environment['VERTEX_AI_SERVICE_ACCOUNT']!),\n  );\n  return clientViaServiceAccount(\n    serviceAccountCredentials,\n    [VertexAI.cloudPlatformScope],\n  );\n}\n\nString _getProjectId() {\n  return Platform.environment['VERTEX_AI_PROJECT_ID']!;\n}\n```\n"
  },
  {
    "path": "docs/modules/model_io/models/llms/integrations/ollama.md",
    "content": "# Ollama\n\nWrapper around [Ollama](https://ollama.ai) Completions API.\n\nOllama allows you to run open-source large language models, such as Llama 3, locally.\n\nOllama bundles model weights, configuration, and data into a single package, defined by a Modelfile.\n\nIt optimizes setup and configuration details, including GPU usage.\n\nFor a complete list of supported models and model variants, see the [Ollama model library](https://ollama.ai/library).\n\n## Setup\n\nFollow [these instructions](https://github.com/jmorganca/ollama) to set up and run a local Ollama instance:\n\n1. Download and install [Ollama](https://ollama.ai)\n2. Fetch a model via `ollama pull <model family>`\n  * e.g., for Llama 3: `ollama pull llama3.2`\n\n## Usage\n\n```dart\nfinal prompt = PromptTemplate.fromTemplate(\n  'What is a good name for a company that makes {product}?',\n);\nfinal llm = Ollama(\n  defaultOptions: OllamaOptions(\n    model: 'llama3.2',\n  ),\n);\nfinal chain = prompt | llm | StringOutputParser();\nfinal res = await chain.invoke({'product': 'colorful socks'});\nprint(res);\n// -> 'SoleMates'\n```\n\n## Streaming\n\n```dart\nfinal promptTemplate = PromptTemplate.fromTemplate(\n  'List the numbers from 1 to {max_num} in order without any spaces or commas',\n);\nfinal llm = Ollama(\n  defaultOptions: OllamaOptions(\n    model: 'llama3.2',\n  ),\n);\nfinal chain = promptTemplate | llm | StringOutputParser();\nfinal stream = chain.stream({'max_num': '9'});\nawait stream.forEach(print);\n// 1\n// 2\n// 3\n// ..\n// 9\n```\n"
  },
  {
    "path": "docs/modules/model_io/models/llms/integrations/openai.md",
    "content": "# OpenAI\n\n[OpenAI](https://platform.openai.com/docs/introduction) offers a spectrum of\nmodels with different levels of power suitable for different tasks.\n\nThis example goes over how to use LangChain to interact with\nOpenAI [models](https://platform.openai.com/docs/models).\n\n```dart\nimport 'package:langchain/langchain.dart';\nimport 'package:langchain_openai/langchain_openai.dart';\n\nfinal openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n\nfinal prompt = PromptTemplate.fromTemplate(\n  'What is a good name for a company that makes {product}?',\n);\nfinal llm = OpenAI(apiKey: openaiApiKey);\n\nfinal chain = prompt | llm | StringOutputParser();\n\nfinal res = await chain.invoke({'product': 'colorful socks'});\nprint(res);\n// -> '\\n\\nSocktastic!'\n```\n\n## Streaming\n\n```dart\nfinal openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n\nfinal promptTemplate = PromptTemplate.fromTemplate(\n  'List the numbers from 1 to {max_num} in order without any spaces or commas',\n);\nfinal llm = OpenAI(apiKey: openaiApiKey);\nconst stringOutputParser = StringOutputParser<LLMResult>();\n\nfinal chain = promptTemplate | llm | stringOutputParser;\n\nfinal stream = chain.stream({'max_num': '9'});\nawait stream.forEach(print);\n// 123\n// 45\n// 67\n// 89\n```\n"
  },
  {
    "path": "docs/modules/model_io/models/llms/llms.md",
    "content": "# LLMs\n\nLarge Language Models (LLMs) are a core component of LangChain. LangChain does\nnot serve its own LLMs, but rather provides a standard interface for\ninteracting with many different LLMs.\n\nFor more detailed documentation check out our:\n- **How-to guides**: Walkthroughs of core functionality, like streaming, async,\n  etc.\n- **Integrations**: How to use different LLM providers (OpenAI, Anthropic, \n  etc.).\n\n## Get started\n\nThere are lots of LLM providers (OpenAI, Cohere, Hugging Face, etc) - the LLM \nclass is designed to provide a standard interface for all of them.\n\nIn this walkthrough we'll work with an OpenAI LLM wrapper, although the \nfunctionalities highlighted are generic for all LLM types.\n\n### Setup\n\nTo get started, follow\nthe [installation instructions](/get_started/installation.md) to install\nLangChain.\n\nUsing LangChain will usually require integrations with one or more model\nproviders, data stores, APIs, etc. For this example, we'll use OpenAI's model\nAPIs.\n\nFirst we'll need to add LangChain.dart OpenAI package:\n\n```yaml\ndependencies:\n  langchain: { version }\n  langchain_openai: { version }\n```\n\nAccessing the API requires an API key, which you can get by creating an account\nand heading [here](https://platform.openai.com/account/api-keys).\n\nThe library does not force you to use any specific key management strategy. You\njust need to pass the key on the `OpenAI` constructor:\n\n```dart\nimport 'package:langchain/langchain.dart';\nimport 'package:langchain_openai/langchain_openai.dart';\n\nfinal llm = OpenAI(apiKey: openaiApiKey);\n```\n\n### LCEL\n\nLLMs implement the `Runnable` interface, the basic building block of the LangChain Expression Language (LCEL). This means they support `invoke`, `stream`, and `batch` calls.\n\n```dart\nfinal prompt = PromptValue.string('Tell me a joke');\nfinal result = await llm.invoke(prompt);\n// LLMGeneration(output='\\n\\nWhy did the chicken cross the road?\\n\\nTo get to the other side!')\n```\n\nThe response is a `LLMGeneration` object, which contains the output of the model, as well as other useful information like the amount of tokens used for the generation.\n\n```dart\nprint(result.usage?.totalUsage); // 641\n```\n\nYou can also access provider specific information that is returned.\nThis information is NOT standardized across providers.\n\n```dart\nprint(llmRes.modelOutput);\n// {\n//   id: cmpl-7PZIWeUBRWBe4DoPQt3BEWyw3vxsF, \n//   created: 2023-06-09 18:30:40.000,\n//   model: text-ada-001\n// }\n```\n"
  },
  {
    "path": "docs/modules/model_io/models/models.md",
    "content": "# Language models\n\nLangChain provides interfaces and integrations for two types of models:\n\n- [LLMs](/modules/model_io/models/llms/llms.md): Models that take a text string\n  as input and return a text string.\n- [Chat models](/modules/model_io/models/chat_models/chat_models.md): Models\n  that are backed by a language model but take a list of Chat Messages as input\n  and return a Chat Message.\n\n## LLMs vs Chat Models\n\nLLMs and Chat Models are subtly but importantly different. LLMs in LangChain\nrefer to pure text completion models. The APIs they wrap take a string prompt\nas input and output a string completion. OpenAI's GPT-3 is implemented as an\nLLM. Chat models are often backed by LLMs but tuned specifically for having\nconversations. And, crucially, their provider APIs expose a different interface\nthan pure text completion models. Instead of a single string, they take a list\nof chat messages as input. Usually these messages are labeled with the speaker\n(usually one of \"System\", \"AI\", and \"Human\"). And they return a (\"AI\") chat\nmessage as output. GPT-4 and Anthropic's Claude are both implemented as Chat\nModels.\n\nTo make it possible to swap LLMs and Chat Models, both implement the Base\nLanguage Model interface. This exposes common methods \"predict\", which takes a\nstring and returns a string, and \"predict messages\", which takes messages and\nreturns a message. If you are using a specific model it's recommended you use\nthe methods specific to that model class (i.e., \"predict\" for LLMs and \"predict\nmessages\" for Chat Models), but if you're creating an application that should\nwork with different types of models the shared interface can be helpful.\n"
  },
  {
    "path": "docs/modules/model_io/output_parsers/json.md",
    "content": "# JSON output parser\n\nThe `JsonOutputParser` takes the output of the previous `Runnable` in the chain, converts it to a string, and then parses it as a JSON Map. \n\nThis is useful for extracting structured data from the output of the model. Or to use the output of one model as the input for the prompt template of another model.\n\nA lot of model providers support JSON mode, which ensures the model will respond with valid JSON.\n\n## Usage\n\nIn the following example, we use GPT-4 Turbo with JSON mode enabled to output a list of countries and their populations. We then use the `JsonOutputParser` to parse the JSON output into a Dart Map.\n\n```dart\nfinal promptTemplate = ChatPromptTemplate.fromTemplate(\n  'Output a list of the countries {countries} and their '\n      'populations in JSON format. Use a dict with an outer key of '\n      '\"countries\" which contains a list of countries. '\n      'Each country should have the key \"name\" and \"population\"',\n);\nfinal model = ChatOpenAI(\n  apiKey: openAiApiKey,\n  defaultOptions: ChatOpenAIOptions(\n    model: 'gpt-4-turbo',\n    responseFormat: ChatOpenAIResponseFormat.jsonObject,\n  ),\n);\nfinal parser = JsonOutputParser<ChatResult>();\n\nfinal chain = promptTemplate.pipe(model).pipe(parser);\n\nfinal res = await chain.invoke({\n  'countries': ['France', 'Spain', 'Japan'].join(', '),\n});\nprint(res);\n// {countries: [{name: France, population: 67413000}, {name: Spain, population: 47350000}, {name: Japan, population: 125584838}]}\n```\n\nThe `JsonOutputParser` also supports streaming. It converts the incomplete JSON chunks being emitted to valid JSON Maps incrementally.\n\nIf we just stream the output of the model without using the `JsonOutputParser`, we would get something like this:\n\n```dart\nfinal noJsonParser = promptTemplate.pipe(model).pipe(StringOutputParser());\nfinal stream1 = noJsonParser.stream({\n  'countries': ['France', 'Spain', 'Japan'].join(', '),\n});\nawait stream1.forEach((final chunk) => print('$chunk|'));\n// |\n// {\n// |\n// |\n// \"|\n// countries|\n// \":|\n// [\n// |\n// |\n// {\n// |\n// |\n// \"|\n// name|\n// \":|\n// \"|\n// France|\n// ...\n```\n\nWhich is not very useful, as we cannot parse those chunks into a valid JSON Map. However, if we use the `JsonOutputParser`, we get valid incremental JSON Maps: \n\n```dart\nfinal withJsonParser = promptTemplate.pipe(model).pipe(JsonOutputParser());\nfinal stream2 = withJsonParser.stream({\n  'countries': ['France', 'Spain', 'Japan'].join(', '),\n});\nawait stream2.forEach((final chunk) => print('$chunk|'));\n// {}|\n// {countries: []}|\n// {countries: [{name: France}]}|\n// {countries: [{name: France, population: 67390000}, {}]}|\n// {countries: [{name: France, population: 67390000}, {name: Spain}]}|\n// {countries: [{name: France, population: 67390000}, {name: Spain, population: 47350000}]}|\n// {countries: [{name: France, population: 67390000}, {name: Spain, population: 47350000}, {name: Japan}]}|\n// {countries: [{name: France, population: 67390000}, {name: Spain, population: 47350000}, {name: Japan, population: 125360000}]}|\n```\n"
  },
  {
    "path": "docs/modules/model_io/output_parsers/output_parsers.md",
    "content": "# Output parsers\n\nLanguage models output text. But many times you may want to get more structured information than just text back. This is where output parsers come in.\n\nOutput parsers are classes that help structure language model responses.\n"
  },
  {
    "path": "docs/modules/model_io/output_parsers/string.md",
    "content": "# String output parser\n\nThe `StringOutputParser` takes the output of the previous `Runnable` in the chain and converts it into a String. \n\nIf the input is:\n- `null`, the parser returns an empty String.\n- A `LLMResult`, the parser returns the output String.\n- A `ChatResult`, the parser returns the content of the output message as a String.\n- A `ChatMessage`, the parser returns the content of the message as a String.\n- A `Document`, the parser returns the page content as a String.\n- Anything else, the parser returns the String representation of the input.\n\n## Usage\n\nHere we use the `StringOutputParser` to convert the output of a chat model into a string:\n\n```dart\nfinal openaiApiKey = Platform.environment['OPENAI_API_KEY'];\nfinal model = ChatOpenAI(apiKey: openaiApiKey);\n\nfinal promptTemplate = ChatPromptTemplate.fromTemplate(\n'Tell me a joke about {topic}',\n);\n\nfinal chain = promptTemplate.pipe(model).pipe(StringOutputParser());\n\nfinal res = await chain.invoke({'topic': 'bears'});\nprint(res);\n// Why don't bears wear shoes? Because they have bear feet!\n```\n\nWe can also use it with streaming:\n\n```dart\nfinal openaiApiKey = Platform.environment['OPENAI_API_KEY'];\nfinal model = ChatOpenAI(apiKey: openaiApiKey);\n\nfinal promptTemplate = ChatPromptTemplate.fromTemplate(\n  'Tell me a joke about {topic}',\n);\n\nfinal chain = promptTemplate | model | StringOutputParser();\n\nfinal stream = chain.stream({'topic': 'bears'});\n\nint count = 0;\nawait for (final res in stream) {\n  print('$count: $res');\n  count++;\n}\n// 0:\n// 1: Why\n// 2:  don\n// 3: 't\n// 4:  bears\n// 5:  like\n// 6:  fast\n// 7:  food\n// 8: ?\n// 9: Because\n// 10:  they\n// 11:  can\n// 12: 't\n// 13:  catch\n// 14:  it\n// 15: !\n```\n"
  },
  {
    "path": "docs/modules/model_io/output_parsers/tools.md",
    "content": "# Tools output parsers\n\nThe tools output parser is a useful tool for parsing tool calls responses from models that support tool calling (aka function calling models) such as OpenAI, Google AI or Mistral.\n\nLet's consider the following chain:\n\n```dart\nfinal openaiApiKey = Platform.environment['OPENAI_API_KEY'];\nconst tool = ToolSpec(\n  name: 'joke',\n  description: 'A joke',\n  inputJsonSchema: {\n    'type': 'object',\n    'properties': {\n      'setup': {\n        'type': 'string',\n        'description': 'The setup for the joke',\n      },\n      'punchline': {\n        'type': 'string',\n        'description': 'The punchline to the joke',\n      },\n    },\n    'required': ['location', 'punchline'],\n  },\n);\nfinal promptTemplate = ChatPromptTemplate.fromTemplate(\n  'tell me a long joke about {foo}',\n);\nfinal chat = ChatOpenAI(\n  apiKey: openaiApiKey,\n  defaultOptions: ChatOpenAIOptions(\n    temperature: 0,\n  ),\n).bind(\n  ChatOpenAIOptions(\n    tools: [tool],\n    toolChoice: ChatToolChoice.forced(name: 'joke'),\n  ),\n);\nfinal outputParser = ToolsOutputParser();\nfinal chain = promptTemplate.pipe(chat).pipe(outputParser);\nfinal res = await chain.invoke({'foo': 'bears'});\nprint(res);\n// [ParsedToolCall{\n//   id: call_5TU1iYgYO3Z81eAuTe7J23f7,\n//   name: joke,\n//   arguments: {\n//     setup: Why don't bears like fast food restaurants?, \n//     punchline: Because they can't bear the wait!\n//   },\n// }]\n```\n\nIt can also be used with streaming. The output parser attempts to “auto-complete” the partial json from each chunk into a valid state:\n\n```dart\nfinal stream = chain.stream({'foo': 'bears'});\nawait for (final res in stream) {\n    final args = res.first.arguments;\n    print(args);\n}\n// {}\n// {setup: }\n// {setup: Why}\n// {setup: Why don't}\n// {setup: Why don't bears}\n// {setup: Why don't bears like}\n// {setup: Why don't bears like fast food}\n// {setup: Why don't bears like fast food restaurants?}\n// {setup: Why don't bears like fast food restaurants?, punchline: }\n// {setup: Why don't bears like fast food restaurants?, punchline: Because}\n// {setup: Why don't bears like fast food restaurants?, punchline: Because they can't}\n// {setup: Why don't bears like fast food restaurants?, punchline: Because they can't catch}\n// {setup: Why don't bears like fast food restaurants?, punchline: Because they can't catch them!}\n```\n"
  },
  {
    "path": "docs/modules/model_io/prompts/example_selectors/example_selectors.md",
    "content": "# Example selectors\n\nThis functionality is still not implemented in LangChain.dart. \n"
  },
  {
    "path": "docs/modules/model_io/prompts/prompt_templates/connecting_to_a_feature_store.md",
    "content": "# Connecting to a Feature Store\n\nTODO\n"
  },
  {
    "path": "docs/modules/model_io/prompts/prompt_templates/custom_prompt_template.md",
    "content": "# Custom prompt template\n\nLet's suppose we want the LLM to generate English language explanations of a\nfunction given its name. To achieve this task, we will create a custom prompt\ntemplate that takes in the function name as input, and formats the prompt\ntemplate to provide the source code of the function.\n\n## Why are custom prompt templates needed?\n\nLangChain provides a set of default prompt templates that can be used to\ngenerate prompts for a variety of tasks. However, there may be cases where the\ndefault prompt templates do not meet your needs. For example, you may want to\ncreate a prompt template with specific dynamic instructions for your language\nmodel. In such cases, you can create a custom prompt template.\n\nTake a look at the current set of default prompt\ntemplates [here](/modules/model_io/prompts/prompt_templates/prompt_templates.md).\n\n## Creating a Custom Prompt Template\n\nThere are essentially two distinct prompt templates available - string prompt\ntemplates and chat prompt templates. String prompt templates provides a simple\nprompt in string format, while chat prompt templates produces a more structured\nprompt to be used with a chat API.\n\nIn this guide, we will create a custom prompt using a string prompt template.\n\nTo create a custom string prompt template, there are two requirements:\n\n1. It has an `inputVariables` attribute that exposes what input variables the\n   prompt template expects.\n2. It exposes a format method that takes in keyword arguments corresponding to\n   the expected `inputVariables` and returns the formatted prompt.\n\nWe will create a custom prompt template that takes in the name of a function\nand formats a prompt to ask the language model to provide an explanation of the\nfunction.\n\nTo achieve this, let's first create a function that will return the source code\nof a function given its name.\n\n```dart\n/// Get the source code of the function (e.g. using GitHub API).\nString getSourceCode(final String functionName) {\n   // For the example, we just return a hardcoded function source code\n   return '''\nString helloWorld() {\n  return 'Hello world!';\n}\n''';\n}\n```\n\nNext, we'll create a custom prompt template that takes in the function name as\ninput, and formats the prompt template to provide the source code of the\nfunction.\n\n```dart\n/// A custom prompt template that takes in the function name as input, and\n/// formats the prompt template to provide the source code of the function\n/// to the language model and ask it to explain the function.\nfinal class FunctionExplainerPromptTemplate extends BaseStringPromptTemplate {\n   const FunctionExplainerPromptTemplate()\n           : super(inputVariables: const {_functionNameInputKey});\n\n   static const _functionNameInputKey = 'function_name';\n\n   @override\n   String format([final InputValues values = const {}]) {\n      final functionName = values[_functionNameInputKey];\n      final sourceCode = getSourceCode(functionName);\n      final prompt = '''\nGiven the function name and source code, generate an English language explanation of the function.\nFunction Name: $functionName\nSource Code:\n$sourceCode\nExplanation:\n    ''';\n      return prompt;\n   }\n\n   @override\n   BasePromptTemplate copyWith({\n      final Set<String>? inputVariables,\n      final Map<String, dynamic>? partialVariables,\n   }) {\n      return const FunctionExplainerPromptTemplate();\n   }\n\n   @override\n   String get type => 'function-explainer';\n}\n```\n\n## Use the custom prompt template\n\nNow that we have created a custom prompt template, we can use it to generate\nprompts for our task.\n\n```dart\nfinal openai = OpenAI(\n  apiKey: openaiApiKey,\n  defaultOptions: const OpenAIOptions(temperature: 0.9),\n);\n\nconst fnExplainer = FunctionExplainerPromptTemplate();\nfinal prompt = fnExplainer.formatPrompt({\n   FunctionExplainerPromptTemplate.functionNameInputKey: 'helloWorld',\n});\n\nfinal result = await openai.generatePrompt(prompt);\nprint(result.firstOutputAsString);\n// -> 'This function is named helloWorld and it returns a string with the \n// phrase \"Hello world!\".\n```\n"
  },
  {
    "path": "docs/modules/model_io/prompts/prompt_templates/few_shot_examples.md",
    "content": "# Few-shot prompt templates\n\nThis functionality is still not implemented in LangChain.dart. You can follow\nthe progress of this feature in \n[this issue](https://github.com/davidmigloz/langchain_dart/issues/9).\n"
  },
  {
    "path": "docs/modules/model_io/prompts/prompt_templates/format_output.md",
    "content": "# Format output\n\nThe output of the format method is available as `String`, `List<ChatMessages>` \nand `ChatPromptValue`.\n\nAs `String`:\n\n```dart\nfinal formattedPrompt = chatPrompt.formatPrompt({\n  'input_language': 'English',\n  'output_language': 'French',\n  'text': 'I love programming.'\n}).toString();\nprint(formattedPrompt);\n// -> System: You are a helpful assistant that translates English to French.\n//    Human: I love programming.\n```\n\nAs `List<ChatMessages>`:\n\n```dart\nfinal formattedPrompt = chatPrompt.formatPrompt({\n  'input_language': 'English',\n  'output_language': 'French',\n  'text': 'I love programming.'\n}).toChatMessages();\nprint(formattedPrompt);\n// -> [SystemChatMessage{content: You are a helpful assistant that translates English to French.}, \n//     HumanChatMessage{content: I love programming., example: false}]\n```\n\nAs `ChatPromptValue`:\n\n```dart\nfinal formattedPrompt = chatPrompt.formatPrompt({\n  'input_language': 'English',\n  'output_language': 'French',\n  'text': 'I love programming.'\n});\nprint(formattedPrompt);\n// -> ChatPromptValue{messages: \n//      [SystemChatMessage{content: You are a helpful assistant that translates English to French.}, \n//       HumanChatMessage{content: I love programming., example: false}]}\n```\n"
  },
  {
    "path": "docs/modules/model_io/prompts/prompt_templates/formats.md",
    "content": "# Template formats\n\nCurrently, `PromptTemplate` only supports Python's \n[formatted string literal](https://docs.python.org/3/reference/lexical_analysis.html#f-strings) \n(f-string) format.\n"
  },
  {
    "path": "docs/modules/model_io/prompts/prompt_templates/msg_prompt_templates.md",
    "content": "# Types of ChatMessagePromptTemplate\n\nLangChain provides different types of `ChatMessagePromptTemplate`. The most commonly used are \n`AIChatMessagePromptTemplate`, `SystemChatMessagePromptTemplate` and \n`HumanChatMessagePromptTemplate`, which create an AI message, system message and human message \nrespectively.\n\nHowever, in cases where the chat model supports taking chat message with arbitrary role, you can \nuse `CustomChatMessagePromptTemplate`, which allows user to specify the role name.\n\n```dart\nconst prompt = 'May the {subject} be with you';\nfinal chatMessagePrompt = CustomChatMessagePromptTemplate.fromTemplate(\n  prompt,\n  role: 'Jedi',\n);\nfinal formattedPrompt = chatMessagePrompt.format({'subject': 'force'});\nprint(formattedPrompt);\n// -> CustomChatMessage{content: May the force be with you, role: Jedi}\n```\n\nLangChain also provides `MessagesPlaceholder`, which gives you full control of what messages to be \nrendered during formatting. This can be useful when you are uncertain of what role you should be \nusing for your message prompt templates or when you wish to insert a list of messages during \nformatting.\n\n```dart\nfinal humanMessageTemplate = HumanChatMessagePromptTemplate.fromTemplate(\n  'Summarize our conversation so far in {word_count} words.',\n);\n\nfinal chatPromptTemplate = ChatPromptTemplate.fromPromptMessages([\n  MessagesPlaceholder(variableName: 'conversation'),\n  humanMessageTemplate,\n]);\n\nfinal humanMessage = ChatMessage.humanText(\n  'What is the best way to learn programming?',\n);\nfinal aiMessage = ChatMessage.ai('''\n1. Choose a programming language: Decide on a programming language that you want to learn. \n\n2. Start with the basics: Familiarize yourself with the basic programming concepts such as variables, data types and control structures.\n\n3. Practice, practice, practice: The best way to learn programming is through hands-on experience\n    ''');\n\nfinal promptValue = chatPromptTemplate.formatPrompt(\n  {\n    'conversation': [humanMessage, aiMessage],\n    'word_count': 10,\n  },\n);\nfinal formattedPrompt = promptValue.toChatMessages();\nprint(formattedPrompt);\n// [HumanChatMessage{content: What is the best way to learn programming?},\n//  AIChatMessage{\n//   content: 1. Choose a programming language: Decide on a programming language\n//               that you want to learn.\n//            2. Start with the basics: Familiarize yourself with the basic\n//               programming concepts such as variables, data types and control structures.\n//            3. Practice, practice, practice: The best way to learn\n//               programming is through hands-on experience},\n//  HumanChatMessage{content: Summarize our conversation so far in 10 words.}]\n```\n"
  },
  {
    "path": "docs/modules/model_io/prompts/prompt_templates/partial.md",
    "content": "# Partial prompt templates\n\nLike other methods, it can make sense to \"partial\" a prompt template - eg pass\nin a subset of the required values, as to create a new prompt template which\nexpects only the remaining subset of values.\n\nLangChain supports this in two ways:\n\n- Partial formatting with string values.\n- Partial formatting with functions that return string values.\n\nThese two different ways support different use cases. In the examples below, we\ngo over the motivations for both use cases as well as how to do it in LangChain.\n\n## Partial With Strings\n\nOne common use case for wanting to partial a prompt template is if you get some\nof the variables before others. For example, suppose you have a prompt template\nthat requires two variables, foo and baz. If you get the foo value early on in\nthe chain, but the baz value later, it can be annoying to wait until you have\nboth variables in the same place to pass them to the prompt template. Instead,\nyou can partial the prompt template with the foo value, and then pass the\npartialed prompt template along and just use that. Below is an example of doing\nthis:\n\n```dart\n\nfinal prompt = PromptTemplate.fromTemplate('{foo}{bar}');\nfinal partialPrompt = prompt.partial({'foo': 'foo'});\n\nfinal res = partialPrompt.format({'bar': 'baz'});\n\nprint(res);\n// -> 'foobaz'\n```\n\nYou can also just initialize the prompt with the partialed variables.\n\n```dart\n\nfinal prompt = PromptTemplate.fromTemplate(\n  '{foo}{bar}',\n  partialVariables: const {'foo': 'foo'},\n);\nfinal res = prompt.format({'bar': 'baz'});\n\nprint(res);\n// -> 'foobaz'\n```\n\n## Partial With Functions\n\nThis functionality is still not implemented in LangChain.dart.\nYou can follow the progress of this feature\nin [this issue](https://github.com/davidmigloz/langchain_dart/issues/51).\n"
  },
  {
    "path": "docs/modules/model_io/prompts/prompt_templates/prompt_composition.md",
    "content": "# Composition\n\nThis tutorial goes over how to compose multiple prompts together. This can be\nuseful when you want to reuse parts of prompts. This can be done with a\nPipelinePrompt. A PipelinePrompt consists of two main parts:\n\n- Final prompt: This is the final prompt that is returned.\n- Pipeline prompts: This is a list of tuples, consisting of a string name and a\n  prompt template. Each prompt template will be formatted and then passed to\n  future prompt templates as a variable with the same name.\n\n```dart\nconst fullTemplate = '''\n{introduction}\n\n{example}\n\n{start}\n''';\n\nfinal fullPrompt = PromptTemplate.fromTemplate(fullTemplate);\n\nconst introductionTemplate = 'You are impersonating {person}.';\nfinal introductionPrompt = PromptTemplate.fromTemplate(introductionTemplate);\n\nconst exampleTemplate = '''\nHere's an example of an interaction: \n\nQ: {example_q}\nA: {example_a}''';\nfinal examplePrompt = PromptTemplate.fromTemplate(exampleTemplate);\n\nconst startTemplate = '''\nNow, do this for real!\n\nQ: {input}\nA:''';\nfinal startPrompt = PromptTemplate.fromTemplate(startTemplate);\n\nfinal inputPrompts = [\n  ('introduction', introductionPrompt),\n  ('example', examplePrompt),\n  ('start', startPrompt)\n];\nfinal pipelinePrompt = PipelinePromptTemplate(\n  finalPrompt: fullPrompt,\n  pipelinePrompts: inputPrompts,\n);\n\nprint(pipelinePrompt.inputVariables);\n// -> {person, example_q, example_a, input}\n\nfinal formatted = pipelinePrompt.format({\n  'person': 'Elon Musk',\n  'example_q': \"What's your favorite car?\",\n  'example_a': 'Telsa',\n  'input': \"What's your favorite social media site?\",\n});\nprint(formatted);\n// '''You are impersonating Elon Musk.\n// \n// Here's an example of an interaction: \n// \n// Q: What's your favorite car?\n// A: Telsa\n// \n// Now, do this for real!\n// \n// Q: What's your favorite social media site?\n// A:'''\n```\n"
  },
  {
    "path": "docs/modules/model_io/prompts/prompt_templates/prompt_serialization.md",
    "content": "# Serialization\n\nIt is often preferable to store prompts not as Dart code but as files. This\ncan make it easy to share, store, and version prompts. This tutorial covers how\nto do that in LangChain, walking through all the different types of prompts and\nthe different serialization options.\n\n## PromptTemplate\n\nThis section covers examples for loading a `PromptTemplate`.\n\n### Loading Template from a File\n\nThis shows an example of storing the template in a separate file and then\nreferencing it in the config.\n\n`simple_template.txt`:\n```txt\nTell me a {adjective} joke about {content}.\n```\n\nTo load `simple_template.txt` as a `PromptTemplate`:\n\n```dart\nfinal prompt = await PromptTemplate.fromFile(templateFile);\nprint(prompt.format({'adjective': 'funny', 'content': 'chickens'}));\n// -> 'Tell me a funny joke about chickens.'\n```\n"
  },
  {
    "path": "docs/modules/model_io/prompts/prompt_templates/prompt_templates.md",
    "content": "# Prompt templates\n\nLanguage models take text as input - that text is commonly referred to as a \nprompt. Typically this is not simply a hardcoded string but rather a \ncombination of a template, some examples, and user input. LangChain provides \nseveral classes and functions to make constructing and working with prompts \neasy.\n\n## What is a prompt template?\n\nA prompt template refers to a reproducible way to generate a prompt. It \ncontains a text string (\"the template\"), that can take in a set of parameters \nfrom the end user and generates a prompt.\n\nThe prompt template may contain:\n- Instructions to the language model.\n- A set of few shot examples to help the language model generate a better \n  response.\n- A question to the language model.\n\nHere's the simplest example:\n\n```dart\nconst template = '''\nI want you to act as a naming consultant for new companies.\nWhat is a good name for a company that makes {product}?\n''';\nfinal promptTemplate = PromptTemplate.fromTemplate(template);\nfinal prompt = promptTemplate.format({'product': 'colorful socks'});\nprint(prompt);\n// 'I want you to act as a naming consultant for new companies.\n// What is a good name for a company that makes colorful socks?'\n```\n\n## Create a prompt template\n\nYou can create simple hardcoded prompts using the `PromptTemplate` class. Prompt templates can take \nany number of input variables, and can be formatted to generate a prompt.\n\n```dart\n// An example prompt with no input variables\nfinal noInputPrompt = PromptTemplate(\n  inputVariables: const {},\n  template: 'Tell me a joke.',\n);\nprint(noInputPrompt.format());\n// -> 'Tell me a joke.'\n\n// An example prompt with one input variable\nfinal oneInputPrompt = PromptTemplate(\n  inputVariables: const {'adjective'},\n  template: 'Tell me a {adjective} joke.',\n);\nprint(oneInputPrompt.format({'adjective': 'funny'}));\n// -> 'Tell me a funny joke.'\n\n// An example prompt with multiple input variables\nfinal multipleInputPrompt = PromptTemplate(\n  inputVariables: const {'adjective', 'content'},\n  template: 'Tell me a {adjective} joke about {content}.',\n);\nprint(multipleInputPrompt.format({'adjective': 'funny', 'content': 'AI'}));\n// -> 'Tell me a funny joke about AI.'\n```\n\nIf you do not wish to specify `inputVariables` manually, you can also create a `PromptTemplate` \nusing `PromptTemplate.fromTemplate` factory constructor. LangChain will automatically infer the \n`inputVariables` based on the template passed.\n\n```dart\nconst template = 'Tell me a {adjective} joke about {content}.';\nfinal promptTemplate = PromptTemplate.fromTemplate(template);\nprint(promptTemplate.inputVariables);\n// -> {'adjective', 'content'}\nprint(promptTemplate.format({'adjective': 'funny', 'content': 'chickens'}));\n// -> 'Tell me a funny joke about chickens.'\n```\n\nYou can create custom prompt templates that format the prompt in any way you want. \nFor more information, see \n[Custom Prompt Templates](/modules/model_io/prompts/prompt_templates/custom_prompt_template).\n\n## Chat prompt template\n\n[Chat Models](/modules/model_io/models/chat_models/chat_models.md) take a list \nof chat messages as input - this list commonly referred to as a prompt. These \nchat messages differ from raw string (which you would pass into a LLM model) in \nthat every message is associated with a role.\n\nFor example, in \n[OpenAI Chat Completion API](https://platform.openai.com/docs/guides/gpt/chat-completions-api), a \nchat message can be associated with the AI, human or system role. The model is \nsupposed to follow instruction from system chat message more closely.\n\nLangChain provides several prompt templates to make constructing and working \nwith prompts easily. You are encouraged to use these chat related prompt \ntemplates instead of `PromptTemplate` when querying chat models to fully \nexploit the potential of underlying chat model.\n\nTo create a message template associated with a role, you use \n`ChatMessagePromptTemplate`.\n\nFor convenience, there is a `fromTemplate()` method exposed on the template. If \nyou were to use this template, this is what it would look like:\n\n```dart\nconst template = 'You are a helpful assistant that translates {input_language} to {output_language}.';\nfinal systemMessagePrompt = SystemChatMessagePromptTemplate.fromTemplate(template);\nconst humanTemplate = '{text}';\nfinal humanMessagePrompt = HumanChatMessagePromptTemplate.fromTemplate(humanTemplate);\n```\n\nIf you wanted to construct the `MessagePromptTemplate` more directly, you could \ncreate a `PromptTemplate` outside and then pass it in, eg:\n\n```dart\nfinal prompt = PromptTemplate.fromTemplate(\n  'You are a helpful assistant that translates {input_language} to {output_language}.',\n);\nfinal systemMessagePrompt2 = SystemChatMessagePromptTemplate(prompt: prompt);\n```\n\nAfter that, you can build a `ChatPromptTemplate` from one or more \n`ChatMessagePromptTemplates`. You can build a `ChatPromptTemplate` from one or \nmore `ChatMessagePromptTemplate`s. You can use `ChatPromptTemplate`’s \n`formatPrompt()` – this returns a `PromptValue`, which you can convert to a \nstring or `ChatMessage` object, depending on whether you want to use the \nformatted value as input to an LLM or Chat model.\n\n```dart\nfinal chatPrompt = ChatPromptTemplate.fromPromptMessages([systemMessagePrompt, humanMessagePrompt]);\nfinal formattedPrompt = chatPrompt.formatPrompt({\n  'input_language': 'English',\n  'output_language': 'French',\n  'text': 'I love programming.'\n}).toChatMessages();\nprint(formattedPrompt);\n// -> [SystemChatMessage{content: You are a helpful assistant that translates English to French.}, \n//     HumanChatMessage{content: I love programming., example: false}]\n\nfinal chatRes = await chat(formattedPrompt);\nprint(chatRes);\n// -> AIChatMessage{content: J'adore la programmation., example: false}\n```\n"
  },
  {
    "path": "docs/modules/model_io/prompts/prompt_templates/validate.md",
    "content": "# Validate template\n\nThe default constructor of `PromptTemplate` will not validate the template \nstring. If you want to validate you can call `validateTemplate()`. It will \ncheck whether the `inputVariables` match the variables defined in `template`. \n\n```dart\nconst template = 'I am learning langchain because {reason}.';\n\nfinal promptTemplate = PromptTemplate(\n  inputVariables: const {'reason', 'foo'},\n  template: template,\n  validateTemplate: false,\n);\n// -> No exception\n\nfinal promptTemplate1 = PromptTemplate(\n  inputVariables: const {'reason', 'foo'},\n  template: template,\n);\npromptTemplate1.validateTemplate();\n// -> 'Exception: Invalid template: 1 variables found, but 2 expected.}'\n\n\n```\n\nThe factory constructors `fromTemplate()` and `fromExamples()` will validate the\ntemplate string by default (you don't need to explicitly call \n`validateTemplate()`). You can disable this behavior by setting \n`validateTemplate` to `false`.\n\n```dart\nconst template = 'I am learning langchain because {reason}.';\n\nfinal promptTemplate = PromptTemplate.fromTemplate(template);\n// -> 'Exception: Invalid template: 1 variables found, but 2 expected.}'\n\nfinal promptTemplate1 = PromptTemplate.fromTemplate(\n  template,\n  validateTemplate: false,\n);\n// -> No exception\n```\n\n_Note: the reason why the default constructor does not validate the template by\ndefault is because that allows to define it as `const` constructor._ "
  },
  {
    "path": "docs/modules/model_io/prompts/prompts.md",
    "content": "# Prompts\n\nA prompt for a language model is a set of instructions or input provided by a user to guide the model's response, helping it understand the context and generate relevant and coherent language-based output, such as answering questions, completing sentences, or engaging in a conversation.\n\nLangChain provides several classes and functions to make constructing and working with prompts easy.\n\n- [Prompt templates](/modules/model_io/prompts/prompt_templates/prompt_templates.md): parametrize model inputs.\n- [Example selectors](/modules/model_io/prompts/example_selectors/example_selectors.md): dynamically select examples to include in prompts.\n"
  },
  {
    "path": "docs/modules/modules.md",
    "content": "# Modules\n\nLangChain provides standard, extendable interfaces and external integrations for\nthe following modules, listed from least to most complex:\n\n**[Model I/O](/modules/model_io/models/models.md)**  \nInterface with language models.\n\n**[Retrieval](/modules/retrieval/retrieval.md)**  \nInterface with application-specific data.\n\n**[Chains](/modules/chains/chains.md)**  \nConstruct sequences of calls.\n\n**[Agents]()**  \nLet chains choose which tools to use given high-level directives.\n\n**[Memory](/modules/memory/memory.md)**  \nPersist application state between runs of a chain.\n\n**[Callbacks]()**  \nLog and stream intermediate steps of any chain.\n"
  },
  {
    "path": "docs/modules/retrieval/document_loaders/document_loaders.md",
    "content": "# Document loaders\n\nUse document loaders to load data from a source as `Document`'s. A `Document` is\na piece of text and associated metadata. For example, there are document loaders\nfor loading a simple .txt file, for loading the text contents of any web page,\nor even for loading a transcript of a YouTube video.\n\nDocument loaders expose two methods:\n\n- `lazyLoad()`: returns a `Stream` of `Document`'s. This is useful for loading\n  large amounts of data, as it allows you to process each `Document` as it is\n  loaded, rather than waiting for the entire data set to be loaded in memory.\n- `load()`: returns a list of `Document`'s. Under the hood, this method calls\n  `lazyLoad()` and collects the results into a list. Use this method only with\n  small data sets.\n\n## Get started\n\nThe simplest loader reads in a file as text and places it all into one\n`Document`.\n\n```dart\n\nconst filePath = 'example.txt';\nconst loader = TextLoader(filePath);\nfinal docs = await loader.load();\n```\n"
  },
  {
    "path": "docs/modules/retrieval/document_loaders/how_to/directory.md",
    "content": "# Directory\n\nUse `DirectoryLoader` to load `Document`s from multiple files in a directory with extensive customization options.\n\n## Overview\n\nThe `DirectoryLoader` is a versatile document loader that allows you to load documents from a directory with powerful filtering, sampling, and customization capabilities. It supports multiple file types out of the box and provides extensive configuration options.\n\n## Basic Usage\n\n```dart\n// Load all text files from a directory recursively\nfinal loader = DirectoryLoader(\n  '/path/to/documents',\n  glob: '*.txt',\n  recursive: true,\n);\nfinal documents = await loader.load();\n```\n\n## Constructor Parameters\n\n### `filePath` (required)\n- Type: `String`\n- Description: The path to the directory containing documents to load.\n\n### `glob`\n- Type: `String`\n- Default: `'*'` (all files)\n- Description: A glob pattern to match files. Only files matching this pattern will be loaded.\n- Examples:\n  ```dart\n  // Load only JSON and text files\n  DirectoryLoader('/path', glob: '*.{txt,json}')\n  \n  // Load files starting with 'report'\n  DirectoryLoader('/path', glob: 'report*')\n  ```\n\n### `recursive`\n- Type: `bool`\n- Default: `true`\n- Description: Whether to search recursively in subdirectories.\n\n### `exclude`\n- Type: `List<String>`\n- Default: `[]`\n- Description: Glob patterns to exclude from loading.\n- Example:\n  ```dart\n  DirectoryLoader(\n    '/path',\n    exclude: ['*.tmp', 'draft*'],\n  )\n  ```\n\n### `loaderMap`\n- Type: `Map<String, BaseDocumentLoader Function(String)>`\n- Default: `DirectoryLoader.defaultLoaderMap`\n- Description: A map to customize loaders for different file types.\n- Default Supported Types:\n  - `.txt`: TextLoader\n  - `.json`: JsonLoader (with root schema)\n  - `.csv` and `.tsv`: CsvLoader\n- Example of extending loaders:\n  ```dart\n  final loader = DirectoryLoader(\n    '/path/to/docs',\n    loaderMap: {\n      // Add a custom loader for XML files\n      '.xml': (path) => CustomXmlLoader(path),\n      \n      // Combine with default loaders\n      ...DirectoryLoader.defaultLoaderMap,\n    },\n  );\n  ```\n\n### `loadHidden`\n- Type: `bool`\n- Default: `false`\n- Description: Whether to load hidden files.\n- Platform Specific: \n  - On Unix-like systems (Linux, macOS): Identifies hidden files by names starting with '.'\n  - On Windows: May not work as expected due to different hidden file conventions\n  - Recommended to use platform-specific checks for comprehensive hidden file handling across different operating systems\n- Example of platform-aware hidden file checking:\n  ```dart\n  import 'dart:io' show Platform;\n\n  bool isHiddenFile(File file) {\n    if (Platform.isWindows) {\n      // Windows-specific hidden file check\n      return (File(file.path).statSync().modeString().startsWith('h'));\n    } else {\n      // Unix-like systems\n      return path.basename(file.path).startsWith('.');\n    }\n  }\n  ```\n\n### `sampleSize`\n- Type: `int`\n- Default: `0` (load all files)\n- Description: Maximum number of files to load.\n- Example:\n  ```dart\n  // Load only 10 files\n  DirectoryLoader('/path', sampleSize: 10)\n  ```\n\n### `randomizeSample`\n- Type: `bool`\n- Default: `false`\n- Description: Whether to randomize the sample of files.\n\n### `sampleSeed`\n- Type: `int?`\n- Default: `null`\n- Description: Seed for random sampling to ensure reproducibility.\n- Example:\n  ```dart\n  // Consistent random sampling\n  DirectoryLoader(\n    '/path', \n    sampleSize: 10,\n    randomizeSample: true,\n    sampleSeed: 42,\n  )\n  ```\n\n### `metadataBuilder`\n- Type: `Map<String, dynamic> Function(File file, Map<String, dynamic> defaultMetadata)?`\n- Default: `null`\n- Description: A custom function to build metadata for each document.\n- Example:\n  ```dart\n  final loader = DirectoryLoader(\n    '/path',\n    metadataBuilder: (file, defaultMetadata) {\n      return {\n        ...defaultMetadata,\n        'custom_tag': 'important_document',\n        'processing_date': DateTime.now().toIso8601String(),\n      };\n    },\n  );\n  ```\n\n## Default Metadata\n\nBy default, each document receives metadata including:\n- `source`: Full file path\n- `name`: Filename\n- `extension`: File extension\n- `size`: File size in bytes\n- `lastModified`: Last modification timestamp (milliseconds since epoch)\n\n## Lazy Loading\n\nThe `DirectoryLoader` supports lazy loading through the `lazyLoad()` method, which returns a `Stream<Document>`. This is useful for processing large numbers of documents without loading everything into memory at once.\n\n```dart\nfinal loader = DirectoryLoader('/path/to/documents');\nawait for (final document in loader.lazyLoad()) {\n  // Process each document as it's loaded\n  print(document.pageContent);\n}\n```\n\n## Error Handling\n\n- Throws an `ArgumentError` if the blob pattern is empty\n\n## Advanced Example\n\n```dart\nfinal loader = DirectoryLoader(\n  '/path/to/documents',\n  glob: '*.{txt,json,csv}',      // Multiple file types\n  recursive: true,               // Search subdirectories\n  exclude: ['temp*', '*.backup'], // Exclude temp and backup files\n  loadHidden: false,             // Ignore hidden files\n  sampleSize: 50,                // Load only 50 files\n  randomizeSample: true,         // Randomize the sample\n  sampleSeed: 123,               // Reproducible random sampling\n  loaderMap: {\n    // Custom loader for a specific file type\n    '.json': (path) => CustomJsonLoader(path),\n  },\n  metadataBuilder: (file, defaultMetadata) {\n    // Add custom metadata\n    return {\n      ...defaultMetadata,\n      'category': _categorizeFile(file),\n    };\n  },\n);\n\nfinal documents = await loader.load();\n```\n\n## Best Practices\n\n- Use `lazyLoad()` for large directories to manage memory efficiently\n- Provide specific glob patterns to reduce unnecessary file processing\n- Customize loaders for specialized file types\n- Use `metadataBuilder` to add context-specific information to documents\n\n## Limitations\n\n- Relies on file system access\n- Performance may vary with large directories"
  },
  {
    "path": "docs/modules/retrieval/document_loaders/how_to/json.md",
    "content": "# JSON\n\nUse `JsonLoader` to load data from a JSON file.\n\nThis loader reads a JSON file located at `filePath` and extracts\n`Document`s based on the provided JSON path schema `jpSchema`.\nEach `Document` represents a matching JSON object or value found in the\nfile.\n\n```dart\nfinal loader = JsonLoader(\n  'path/to/file.json',\n  jpSchema: '$..yourJsonPath',\n);\nfinal documents = await loader.load();\n```\n\nDocumentation related to JSON path schemas:\n- https://pub.dev/packages/json_path\n- https://goessner.net/articles/JsonPath\n\n## Metadata builder\n\nIf you want to customize the metadata for each `Document`, you can provide\na `metadataBuilder` function that takes in the JSON object extracted by the\n`jpSchema` and the default file metadata and returns a map of the updated\nmetadata.\n"
  },
  {
    "path": "docs/modules/retrieval/document_loaders/how_to/text.md",
    "content": "# Text\n\nUse `TextLoader` to load data from a text file.\n\n```dart\nfinal loader = TextLoader('path/to/file.txt');\nfinal documents = await loader.load();\n```\n"
  },
  {
    "path": "docs/modules/retrieval/document_loaders/how_to/web.md",
    "content": "# Web page\n\nUse `WebBaseLoader` to load data from web pages.\n\nIt uses [http](https://pub.dev/packages/http) to fetch the web page content.\nAnd [beautiful_soup_dart](https://pub.dev/packages/beautiful_soup_dart) to\nparse the HTML content.\n\n```dart\nfinal loader = WebBaseLoader('https://en.wikipedia.org/wiki/Wikipedia');\nfinal documents = await loader.load();\n```\n"
  },
  {
    "path": "docs/modules/retrieval/document_transformers/document_transformers.md",
    "content": "# Document transformers\n\nOnce you've loaded documents, you'll often want to transform them to better suit\nyour application. The simplest example is you may want to split a long document\ninto smaller chunks that can fit into your model's context window. LangChain has\na number of built-in document transformers that make it easy to split, combine,\nfilter, and otherwise manipulate documents.\n\n## Text splitters\n\nWhen you want to deal with long pieces of text, it is necessary to split up that\ntext into chunks. As simple as this sounds, there is a lot of potential\ncomplexity here. Ideally, you want to keep the semantically related pieces of\ntext together. What \"semantically related\" means could depend on the type of\ntext. This tutorial showcases several ways to do that.\n\nAt a high level, text splitters work as following:\n\n1. Split the text up into small, semantically meaningful chunks (often\n   sentences).\n2. Start combining these small chunks into a larger chunk until you reach a\n   certain size (as measured by some function).\n3. Once you reach that size, make that chunk its own piece of text and then\n   start creating a new chunk of text with some overlap (to keep context between\n   chunks).\n\nThat means there are two different axes along which you can customize your text\nsplitter:\n\n1. How the text is split.\n2. How the chunk size is measured.\n\n## Get started with text splitters\n\nThe most basic text splitter is the `CharacterTextSplitter`. This splits based\non characters (by default `\\n\\n`) and measure chunk length by number of\ncharacters.\n\nThe default recommended text splitter is the `RecursiveCharacterTextSplitter`. This text splitter \ntakes a list of characters. It tries to create chunks based on splitting on the first character, \nbut if any chunks are too large it then moves onto the next character, and so forth. By default \nthe characters it tries to split on are `[\"\\n\\n\", \"\\n\", \" \", \"\"]`.\n\nIn addition to controlling which characters you can split on, you can also\ncontrol a few other things:\n\n- `lengthFunction`: how the length of chunks is calculated. Defaults to just\n  counting number of characters, but it's pretty common to pass a token counter\n  here.\n- `chunkSize`: the maximum size of your chunks (as measured by the length\n  function).\n- `chunkOverlap`: the maximum overlap between chunks. It can be nice to have\n  some overlap to maintain some continuity between chunks (eg do a sliding\n  window).\n- `addStartIndex`: whether to include the starting position of each chunk within\n  the original document in the metadata.\n\n```dart\nconst filePath = 'state_of_the_union.txt';\nconst loader = TextLoader(filePath);\nfinal documents = await loader.load();\nconst textSplitter = RecursiveCharacterTextSplitter(\n  chunkSize: 800,\n  chunkOverlap: 0,\n);\nfinal docs = textSplitter.splitDocuments(documents);\n```\n"
  },
  {
    "path": "docs/modules/retrieval/document_transformers/text_splitters/character_text_splitter.md",
    "content": "# Split by character\n\nThis is the simplest method. This splits based on characters (by default `\\n\\n`) and measure chunk \nlength by number of characters.\n\n1. How the text is split: by single character.\n2. How the chunk size is measured: by number of characters.\n\nIf you have a list of documents, you can split them like this:\n```dart\nconst filePath = 'state_of_the_union.txt';\nconst loader = TextLoader(filePath);\nfinal documents = await loader.load();\nconst textSplitter = CharacterTextSplitter(\n  chunkSize: 1000,\n  chunkOverlap: 200,\n);\nfinal docs = textSplitter.splitDocuments(documents);\n```\n\nIf you have a text instead, you can split it like this:\n```dart\nconst text = 'This is a long piece of text...';\nfinal texts = textSplitter.splitText(text);\n```\n\nHere's an example of passing metadata along with the documents, notice that it is split along with \nthe documents.\n\n```dart\nconst text1 = 'This is a long piece of text...';\nconst text2 = 'This is a long piece of text...';\nfinal metadatas = [\n  {'document': 1},\n  {'document': 2},\n];\nfinal docs = textSplitter.createDocuments(\n  [text1, text2],\n  metadatas: metadatas,\n);\n```\n"
  },
  {
    "path": "docs/modules/retrieval/document_transformers/text_splitters/code_text_splitter.md",
    "content": "# Split code\n\n`CodeTextSplitter` allows you to split source code. It supports multiple languages \n(see [CodeLanguage] enum). \n\nIt tries to split along class definitions, function definitions, and control flow statements.\n\nHere's an example of how to use it:\n\n```dart\nconst code = '''\nvoid main() {\n  print(\"Hello, World!\");\n}\n''';\nfinal splitter = CodeTextSplitter(\n  language: CodeLanguage.dart,\n  chunkSize: 16,\n  chunkOverlap: 0,\n);\nfinal chunks = splitter.splitText(code);\nprint(chunks);\n// [\n// 'void main() {',\n// 'print(\"Hello,',\n// 'World!\");',\n// '}',\n// ]\n```\n"
  },
  {
    "path": "docs/modules/retrieval/document_transformers/text_splitters/markdown_text_splitter.md",
    "content": "# Split Markdown\n\nLangChain.dart provides two different types of text splitters specifically designed for Markdown documents:\n\n1. **MarkdownTextSplitter**: A basic splitter that splits markdown text along common markdown structures.\n2. **MarkdownHeaderTextSplitter**: An advanced splitter that can maintain header hierarchy metadata when splitting documents.\n\nThis guide explains how to use both splitters effectively.\n\n## MarkdownTextSplitter\n\nThe `MarkdownTextSplitter` splits Markdown documents along common Markdown-formatted structures like headings, code blocks, and horizontal lines.\n\n```dart\nimport 'package:langchain/langchain.dart';\n\nvoid main() {\n  final text = '''\n# Header 1\nThis is text under header 1.\n\n## Header 2\nThis is text under header 2.\n\n### Header 3\nThis is text under header 3.\n''';\n\n  final splitter = MarkdownTextSplitter(\n    chunkSize: 100,\n    chunkOverlap: 0,\n  );\n  \n  final docs = splitter.createDocuments([text]);\n  \n  for (final doc in docs) {\n    print('--- Document ---');\n    print(doc.pageContent);\n    print('--------------');\n  }\n}\n```\n\nThe `MarkdownTextSplitter` is an extension of the `RecursiveCharacterTextSplitter` that uses markdown-specific separators to break text in a sensible way.\n\n## MarkdownHeaderTextSplitter\n\nThe `MarkdownHeaderTextSplitter` is an advanced splitter that can split markdown documents based on headers while preserving the header hierarchy in the document metadata.\n\nThis is particularly useful for:\n- Creating a hierarchical document structure based on headings\n- Maintaining the context of where each chunk came from\n- Enabling more sophisticated retrieval with metadata filtering\n\n### Basic Usage\n\n```dart\nimport 'package:langchain/langchain.dart';\n\nvoid main() {\n  const markdownDocument = '''\n# My Document\n\n## Introduction\nThis is an introduction to the document.\n\n## Main Section\nThis is the main section with important content.\n\n### Subsection A\nThis is subsection A with more specific details.\n\n## Conclusion\nThis concludes the document.\n''';\n\n  // Define headers to track and their corresponding metadata keys\n  final headersToSplitOn = [\n    ('#', 'Header 1'),\n    ('##', 'Header 2'),\n    ('###', 'Header 3'),\n  ];\n\n  // Create the splitter\n  final splitter = MarkdownHeaderTextSplitter(\n    headersToSplitOn: headersToSplitOn,\n  );\n\n  // Split the document\n  final docs = splitter.splitText(markdownDocument);\n\n  // Print the results\n  for (final doc in docs) {\n    print('--- Document ---');\n    print('Content: ${doc.pageContent}');\n    print('Metadata: ${doc.metadata}');\n    print('--------------');\n  }\n}\n```\n\n### Output\n\nThe output of the above code would be:\n\n```\n--- Document ---\nContent: This is an introduction to the document.\nMetadata: {Header 1: My Document, Header 2: Introduction}\n--------------\n--- Document ---\nContent: This is the main section with important content.\nMetadata: {Header 1: My Document, Header 2: Main Section}\n--------------\n--- Document ---\nContent: This is subsection A with more specific details.\nMetadata: {Header 1: My Document, Header 2: Main Section, Header 3: Subsection A}\n--------------\n--- Document ---\nContent: This concludes the document.\nMetadata: {Header 1: My Document, Header 2: Conclusion}\n--------------\n```\n\n### Configuration Options\n\nThe `MarkdownHeaderTextSplitter` includes several configuration options:\n\n1. **`headersToSplitOn`**: List of tuples with header indicators and metadata keys.\n   ```dart\n   headersToSplitOn: [\n     ('#', 'Header 1'),\n     ('##', 'Header 2'),\n     ('###', 'Header 3'),\n   ]\n   ```\n\n2. **`returnEachLine`**: If `true`, returns each line as an individual document. Default is `false`.\n   ```dart\n   returnEachLine: false,\n   ```\n\n3. **`stripHeaders`**: If `true`, removes the headers from the content. Default is `true`.\n   ```dart\n   stripHeaders: true,\n   ```\n\n### Preserving Headers\n\nYou can choose to keep the headers in the document content by setting `stripHeaders: false`:\n\n```dart\nfinal splitter = MarkdownHeaderTextSplitter(\n  headersToSplitOn: headersToSplitOn,\n  stripHeaders: false,\n);\n```\n\nWith this configuration, the headers will be preserved in the document content:\n\n```\n--- Document ---\nContent: # My Document\n## Introduction\nThis is an introduction to the document.\nMetadata: {Header 1: My Document, Header 2: Introduction}\n--------------\n```\n\n### Handling Code Blocks\n\nThe splitter intelligently handles fenced code blocks (```` ``` ```` or `~~~`) to ensure that Markdown syntax within code blocks doesn't interfere with the splitting logic.\n\n### Handling Invisible Characters\n\nThe splitter automatically cleans up invisible/non-printable characters from the text, ensuring more reliable header detection.\n\n## Use Cases\n\n1. **Hierarchical Document Navigation**: Maintain the structure of complex documents\n2. **Enhanced Context Retrieval**: Include header context in document chunks\n3. **Metadata-Based Filtering**: Filter retrieval results based on specific headers\n4. **Document Section Targeting**: Target specific sections of a document\n\n## Comparison with Other Splitters\n\n- **RecursiveCharacterTextSplitter**: General-purpose splitter without understanding of document structure\n- **MarkdownTextSplitter**: Basic markdown awareness but no metadata preservation\n- **MarkdownHeaderTextSplitter**: Full header hierarchy awareness with metadata preservation\n\nThe `MarkdownHeaderTextSplitter` is particularly valuable when working with structured Markdown documents where maintaining the document's hierarchy improves downstream tasks like retrieval or question answering.\n"
  },
  {
    "path": "docs/modules/retrieval/document_transformers/text_splitters/recursive_character_text_splitter.md",
    "content": "# Recursively split by character\n\nThis text splitter is the recommended one for generic text. It is parameterized by a list of \ncharacters. It tries to split on them in order until the chunks are small enough. The default list \nis `[\"\\n\\n\", \"\\n\", \" \", \"\"]`. This has the effect of trying to keep all paragraphs (and then \nsentences, and then words) together as long as possible, as those would generically seem to be the \nstrongest semantically related pieces of text.\n\n1. How the text is split: by list of characters.\n2. How the chunk size is measured: by number of characters.\n\nIf you have a list of documents, you can split them like this:\n```dart\nconst filePath = 'state_of_the_union.txt';\nconst loader = TextLoader(filePath);\nfinal documents = await loader.load();\nconst textSplitter = RecursiveCharacterTextSplitter(\n  chunkSize: 1000,\n  chunkOverlap: 200,\n);\nfinal docs = textSplitter.splitDocuments(documents);\n```\n\nIf you have a text instead, you can split it like this:\n```dart\nconst text = 'This is a long piece of text...';\nfinal texts = textSplitter.splitText(text);\n```\n\nHere's an example of passing metadata along with the documents, notice that it is split along with \nthe documents.\n\n```dart\nconst text1 = 'This is a long piece of text...';\nconst text2 = 'This is a long piece of text...';\nfinal metadatas = [\n  {'document': 1},\n  {'document': 2},\n];\nfinal docs = textSplitter.createDocuments(\n  [text1, text2],\n  metadatas: metadatas,\n);\n```\n"
  },
  {
    "path": "docs/modules/retrieval/retrieval.md",
    "content": "# Retrieval\n\nMany LLM applications require user-specific data that is not part of the model's\ntraining set. LangChain gives you the building blocks to load, transform, store\nand query your data via:\n\n- [Document loaders](/modules/retrieval/document_loaders/document_loaders.md):\n  load documents from many different sources.\n- [Document transformers](/modules/retrieval/document_transformers/document_transformers.md):\n  split documents, drop redundant documents, and more.\n- [Text embedding models](/modules/retrieval/text_embedding/text_embedding.md):\n  take unstructured text and turn it into a list of floating point numbers.\n- [Vector stores](/modules/retrieval/vector_stores/vector_stores.md):\n  Store and search over embedded data.\n- [Retrievers](/modules/retrieval/retrievers/retrievers.md): Query your\n  data.\n\n![data connection diagram](img/retrieval.jpg)\n*Image\nsource: [LangChain docs](https://python.langchain.com/docs/modules/retrieval/)*\n\n"
  },
  {
    "path": "docs/modules/retrieval/retrievers/retrievers.md",
    "content": "# Retrievers\n\nA retriever is an interface that returns documents given an unstructured query.\nIt is more general than a vector store. A retriever does not need to be able to\nstore documents, only to return (or retrieve) it. Vector stores can be used as\nthe backbone of a retriever, but there are other types of retrievers as well.\n\n## Get started\n\nThe public API of the `BaseRetriever` class in LangChain is as follows:\n\n```dart\nabstract interface class BaseRetriever {\n  Future<List<Document>> getRelevantDocuments(final String query);\n}\n```\n\nIt's that simple! You can call `getRelevantDocuments` method to retrieve\ndocuments relevant to a query, where \"relevance\" is defined by the specific\nretriever object you are calling.\n\nOf course, we also help construct what we think useful Retrievers are. The main\ntype of Retriever that we focus on is a VectorStore retriever. We will focus on\nthat for the rest of this guide.\n\nIn order to understand what a vector store retriever is, it's important to\nunderstand what a vector store is. So let's look at that.\n\nThis example showcases question answering over documents. We have chosen this as the example for getting started because it nicely combines a lot of different elements (Text splitters, embeddings, vectorstores) and then also shows how to use them in a chain.\n\nQuestion answering over documents consists of four steps:\n\n1. Create an index\n2. Create a Retriever from that index\n3. Create a question answering chain\n4. Ask questions!\n\n```dart\nconst filePath = './test/chains/assets/state_of_the_union.txt';\nconst loader = TextLoader(filePath);\nfinal documents = await loader.load();\nconst textSplitter = CharacterTextSplitter(\n  chunkSize: 800,\n  chunkOverlap: 0,\n);\nfinal texts = textSplitter.splitDocuments(documents);\nfinal textsWithSources = texts\n    .mapIndexed(\n      (final i, final d) => d.copyWith(\n        metadata: {\n          ...d.metadata,\n          'source': '$i-pl',\n        },\n      ),\n    )\n    .toList(growable: false);\nfinal embeddings = OpenAIEmbeddings(apiKey: openaiApiKey);\nfinal docSearch = await MemoryVectorStore.fromDocuments(\n  documents: textsWithSources,\n  embeddings: embeddings,\n);\nfinal llm = ChatOpenAI(\n  apiKey: openAiKey,\n  defaultOptions: const ChatOpenAIOptions(temperature: 0),\n);\nfinal qaChain = OpenAIQAWithSourcesChain(llm: llm);\nfinal docPrompt = PromptTemplate.fromTemplate(\n  'Content: {page_content}\\nSource: {source}',\n);\nfinal finalQAChain = StuffDocumentsChain(\n  llmChain: qaChain,\n  documentPrompt: docPrompt,\n);\nfinal retrievalQA = RetrievalQAChain(\n  retriever: docSearch.asRetriever(),\n  combineDocumentsChain: finalQAChain,\n);\nconst query = 'What did President Biden say about Russia?';\nfinal res = await retrievalQA(query);\n```\n"
  },
  {
    "path": "docs/modules/retrieval/text_embedding/integrations/anyscale.md",
    "content": "# Anyscale Embeddings\n\n[Anyscale](https://www.anyscale.com/) offers several [embedding models](https://docs.endpoints.anyscale.com/guides/models/#embedding-models) through its OpenAI compatible API.\n\nYou can consume Anyscale API using the `OpenAIEmbeddings` wrapper in the same way you would use the OpenAI API.\n\nThe only difference is that you need to change the base URL to `https://api.endpoints.anyscale.com/v1`:\n\n```dart\nfinal anyscaleApiKey = Platform.environment['ANYSCALE_API_KEY'];\nfinal embeddings = OpenAIEmbeddings(\n  apiKey: anyscaleApiKey,\n  baseUrl: 'https://api.endpoints.anyscale.com/v1',\n  model: 'thenlper/gte-large',\n);\n\n// Embedding a document\nconst doc = Document(pageContent: 'This is a test document.');\nfinal res1 = await embeddings.embedDocuments([doc]);\nprint(res1);\n// [[-0.0011281073093414307, -0.013280618004500866, 0.02164546772837639, ...]]\n\n// Embedding a retrieval query\nconst text = 'This is a test query.';\nfinal res2 = await embeddings.embedQuery(text);\nprint(res2);\n// [-0.027850965037941933, 0.00269310618750751, 0.008118202909827232, ...]\n\nembeddings.close();\n```\n"
  },
  {
    "path": "docs/modules/retrieval/text_embedding/integrations/gcp_vertex_ai.md",
    "content": "# VertexAIEmbeddings\n\n```dart\nfinal embeddings = VertexAIEmbeddings(\n  httpClient: authClient,\n  project: 'your-project-id',\n);\nconst text = 'This is a test document.';\nfinal res = await embeddings.embedQuery(text);\nfinal res = await embeddings.embedDocuments([text]);\n```\n"
  },
  {
    "path": "docs/modules/retrieval/text_embedding/integrations/google_ai.md",
    "content": "# Google AI Embeddings\n\nThe embedding service in the [Gemini API](https://ai.google.dev/docs/embeddings_guide) generates state-of-the-art embeddings for words, phrases, and sentences. The resulting embeddings can then be used for NLP tasks, such as semantic search, text classification and clustering among many others.\n\n## Available models\n\n- `text-embedding-004`\n  * Dimensions: 768 (with support for reduced dimensionality)\n\nThe previous list of models may not be exhaustive or up-to-date. Check out the [Google AI documentation](https://ai.google.dev/models/gemini) for the latest list of available models.\n\n### Task type\n\nGoogle AI support specifying a 'task type' when embedding documents. The task type is then used by the model to improve the quality of the embeddings. \n\nThis integration uses the specifies the following task type:\n- `retrievalDocument`: for embedding documents\n- `retrievalQuery`: for embedding queries\n\n## Usage\n\n```dart\nfinal apiKey = Platform.environment['GOOGLEAI_API_KEY'];\nfinal embeddings = GoogleGenerativeAIEmbeddings(\n  apiKey: apiKey,\n);\n\n// Embedding a document\nconst doc = Document(pageContent: 'This is a test document.');\nfinal res1 = await embeddings.embedDocuments([doc]);\nprint(res1);\n// [[0.05677966, 0.0030236526, -0.06441004, ...]]\n\n// Embedding a retrieval query\nconst text = 'This is a test query.';\nfinal res2 = await embeddings.embedQuery(text);\nprint(res2);\n// [0.025963314, -0.06858828, -0.026590854, ...]\n\nembeddings.close();\n```\n\n### Title\n\nGoogle AI support specifying a document title when embedding documents. The title is then used by the model to improve the quality of the embeddings.\n\nTo specify a document title, add the title to the document's metadata. Then, specify the metadata key in the [docTitleKey] parameter.\n\nExample:\n```dart\nfinal embeddings = GoogleGenerativeAIEmbeddings(\n  apiKey: 'your-api-key',\n);\nfinal result = await embeddings.embedDocuments([\n  Document(\n    pageContent: 'Hello world',\n    metadata: {'title': 'Hello!'},\n  ),\n]);\n```\n"
  },
  {
    "path": "docs/modules/retrieval/text_embedding/integrations/mistralai.md",
    "content": "# MistralAIEmbeddings\n\nWrapper around [Mistral AI](https://mistral.ai/) Embeddings API.\n\nMistral AI brings the strongest open generative models to the developers, along with efficient ways to deploy and customise them for production.\n\n> Note: Mistral AI API is currently in closed beta. You can request access [here](https://console.mistral.ai).\n\n## Setup\n\nTo use `MistralAIEmbeddings` you need to have a Mistral AI account and an API key. You can get one [here](https://console.mistral.ai/users/).\n\nThe following models are available at the moment:\n- `mistral-embed`: an embedding model with a 1024 embedding dimensions designed with retrieval capabilities in mind. It achieves a retrieval score of 55.26 on MTEB.\n\n## Usage\n\n```dart\nfinal embeddings = MistralAIEmbeddings(apiKey: 'apiKey');\nconst text = 'This is a test document.';\nfinal res = await embeddings.embedQuery(text);\nfinal res = await embeddings.embedDocuments([text]);\n```\n"
  },
  {
    "path": "docs/modules/retrieval/text_embedding/integrations/ollama.md",
    "content": "# OllamaEmbeddings\n\n```dart\nfinal embeddings = OllamaEmbeddings(model: 'llama3.2');\nconst text = 'This is a test document.';\nfinal res = await embeddings.embedQuery(text);\nfinal res = await embeddings.embedDocuments([text]);\n```\n"
  },
  {
    "path": "docs/modules/retrieval/text_embedding/integrations/openai.md",
    "content": "# OpenAIEmbeddings\n\nYou can use the `OpenAIEmbeddings` wrapper to consume OpenAI embedding models.\n\n```dart\nfinal openAiApiKey = Platform.environment['OPENAI_API_KEY'];\nfinal embeddings = OpenAIEmbeddings(apiKey: openAiApiKey);\n\n// Embedding a document\nconst doc = Document(pageContent: 'This is a test document.');\nfinal res1 = await embeddings.embedDocuments([doc]);\nprint(res1);\n// [[-0.003105443, 0.011136302, -0.0040295827, -0.011749065, ...]]\n\n// Embedding a retrieval query\nconst text = 'This is a test query.';\nfinal res2 = await embeddings.embedQuery(text);\nprint(res2);\n// [-0.005047946, 0.0050882488, -0.0051957234, -0.019143905, ...]\n\nembeddings.close();\n```\n"
  },
  {
    "path": "docs/modules/retrieval/text_embedding/integrations/prem.md",
    "content": "# Prem App\n\nYou can easily run local embedding models using [Prem app](https://www.premai.io/#PremApp). \nIt creates a local server that exposes a REST API with the same interface as \nthe OpenAI API.\n\n```dart\nconst localUrl = 'http://localhost:8000'; // Check Prem app for the actual URL\nfinal embeddings = OpenAIEmbeddings(baseUrl: localUrl);\nconst text = 'This is a test document.';\nfinal res = await embeddings.embedQuery(text);\nfinal res = await embeddings.embedDocuments([text]);\n```\n"
  },
  {
    "path": "docs/modules/retrieval/text_embedding/integrations/together_ai.md",
    "content": "# Together AI Embeddings\n\n[Together AI](https://www.together.ai/) offers several leading [embedding models](https://docs.together.ai/docs/embedding-models#embedding-models) through its OpenAI compatible API.\n\nYou can consume Together AI API using the `OpenAIEmbeddings` wrapper in the same way you would use the OpenAI API.\n\nThe only difference is that you need to change the base URL to `https://api.together.xyz/v1`:\n\n```dart\nfinal togetherAiApiKey = Platform.environment['TOGETHER_AI_API_KEY'];\nfinal embeddings = OpenAIEmbeddings(\n  apiKey: togetherAiApiKey,\n  baseUrl: 'https://api.together.xyz/v1',\n  model: 'togethercomputer/m2-bert-80M-32k-retrieval',\n);\n\n// Embedding a document\nconst doc = Document(pageContent: 'This is a test document.');\nfinal res1 = await embeddings.embedDocuments([doc]);\nprint(res1);\n// [[-0.038838703, 0.0580902, 0.022614542, 0.0078403875, ...]]\n\n// Embedding a retrieval query\nconst text = 'This is a test query.';\nfinal res2 = await embeddings.embedQuery(text);\nprint(res2);\n// [-0.019722218, 0.04656633, -0.0074559706, 0.005712764, ...]\n\nembeddings.close();\n```\n"
  },
  {
    "path": "docs/modules/retrieval/text_embedding/text_embedding.md",
    "content": "# Text embedding models\n\nThe Embeddings class is a class designed for interfacing with text embedding\nmodels. There are lots of embedding model providers (OpenAI, Cohere, Hugging\nFace, etc) - this class is designed to provide a standard interface for all of\nthem.\n\nEmbeddings create a vector representation of a piece of text. This is useful\nbecause it means we can think about text in the vector space, and do things like\nsemantic search where we look for pieces of text that are most similar in the\nvector space.\n\nThe base Embeddings class in LangChain exposes two methods: one for embedding\ndocuments and one for embedding a query. The former takes as input multiple\ntexts, while the latter takes a single text. The reason for having these as two\nseparate methods is that some embedding providers have different embedding\nmethods for documents (to be searched over) vs queries (the search query\nitself).\n\n## Get started\n\n### Setup\n\nWe are going to be using `OpenAIEmbeddings` for this example.\n\n```dart\nfinal openaiApiKey = Platform.environment['OPENAI_API_KEY'];\nfinal embeddings = OpenAIEmbeddings(apiKey: openaiApiKey);\n```\n\n### Embedding documents\n\n```dart\nfinal res = await embeddings.embedDocuments(['Hello world', 'Bye bye']);\n```\n\n### Embedding a query\n\n```dart\nfinal res = await embeddings.embedQuery('Hello world');\n```\n"
  },
  {
    "path": "docs/modules/retrieval/vector_stores/integrations/chroma.md",
    "content": "# Chroma\n\nVector store for [Chroma](https://www.trychroma.com/) open-source embedding database.\n\nThis vector stores requires Chroma to be running in client/server mode.\n\nThe server can run on your local computer via docker or be easily deployed to any cloud provider.\n\n## Running Chroma in client/server mode\n\nYou can run a Chroma server in two ways:\n\n### Using Python client \n\nThe Python client supports spinning up a Chroma server easily:\n\n```sh\npip install chromadb\nchroma run --path /db_path\n```\n\n### Using Docker\n\nOtherwise, you can run the Chroma server using Docker:\n\n```sh\ndocker pull chromadb/chroma\ndocker run -p 8000:8000 chromadb/chroma\n```\n\nBy default, the Chroma client will connect to a server running on `http://localhost:8000`. To connect to a different server, pass the `baseUrl` parameter to the constructor.\n\n```dart\nfinal vectorStore = Chroma(\n  embeddings: OpenAIEmbeddings(apiKey: openaiApiKey),\n  baseUrl: 'http://localhost:8888',\n);\n```\n\n## Collections\n\nChroma lets you manage collections of embeddings, using the collection primitive.\n\nYou can configure the collection to use in the `collectionName` parameter.\n\nYou can also configure the metadata to associate with the collection in the `collectionMetadata` parameter.\n\n## Changing the distance function\n\nYou can change the distance function of the embedding space by setting the value of `hnsw:space` in `collectionMetadata`. Valid options are \"l2\", \"ip\", or \"cosine\". The default is \"l2\".\n\n## Filtering\n\nChroma supports filtering queries by metadata and document contents. The `where` filter is used to filter by metadata, and the `whereDocument` filter is used to filter by document contents.\n\nFor example:\n```dart\nfinal vectorStore = Chroma(...);\nfinal res = await vectorStore.similaritySearch(\n  query: 'What should I feed my cat?',\n  config: ChromaSimilaritySearch(\n    k: 5,\n    scoreThreshold: 0.8,\n    where: {'class: 'cat'},\n  ),\n);\n```\n\nChroma supports a wide range of operators for filtering. Check out the [filtering section](https://docs.trychroma.com/usage-guide?lang=js#using-where-filters) of the Chroma docs for more info.\n"
  },
  {
    "path": "docs/modules/retrieval/vector_stores/integrations/memory.md",
    "content": "# MemoryVectorStore\n\n`MemoryVectorStore` is an in-memory, ephemeral vector store that stores embeddings in-memory and does an exact, linear search for the most similar embeddings. The default similarity metric is cosine similarity.\n\nThis class is useful for testing and prototyping, but it is not recommended for production use cases. See other vector store integrations for production use cases.\n\n```dart\nconst filePath = './test/chains/assets/state_of_the_union.txt';\nconst loader = TextLoader(filePath);\nfinal documents = await loader.load();\nconst textSplitter = CharacterTextSplitter(\n  chunkSize: 800,\n  chunkOverlap: 0,\n);\nfinal texts = textSplitter.splitDocuments(documents);\nfinal textsWithSources = texts\n    .mapIndexed(\n      (final i, final d) => d.copyWith(\n        metadata: {\n          ...d.metadata,\n          'source': '$i-pl',\n        },\n      ),\n    )\n    .toList(growable: false);\nfinal embeddings = OpenAIEmbeddings(apiKey: openaiApiKey);\nfinal docSearch = await MemoryVectorStore.fromDocuments(\n  documents: textsWithSources,\n  embeddings: embeddings,\n);\nfinal llm = ChatOpenAI(\n  apiKey: openAiKey,\n  defaultOptions: ChatOpenAIOptions(temperature: 0),\n);\nfinal qaChain = OpenAIQAWithSourcesChain(llm: llm);\nfinal docPrompt = PromptTemplate.fromTemplate(\n  'Content: {page_content}\\nSource: {source}',\n);\nfinal finalQAChain = StuffDocumentsChain(\n  llmChain: qaChain,\n  documentPrompt: docPrompt,\n);\nfinal retrievalQA = RetrievalQAChain(\n  retriever: docSearch.asRetriever(),\n  combineDocumentsChain: finalQAChain,\n);\nconst query = 'What did President Biden say about Russia?';\nfinal res = await retrievalQA(query);\n```\n"
  },
  {
    "path": "docs/modules/retrieval/vector_stores/integrations/objectbox.md",
    "content": "# ObjectBox\n\nVector store for the [ObjectBox](https://objectbox.io/) on-device database.\n\nObjectBox features:\n- Embedded Database that runs inside your application without latency\n- Vector search based is state-of-the-art HNSW algorithm that scales very well with growing data volume\n- HNSW is tightly integrated within ObjectBox's internal database. Vector Search doesn’t just run “on top of database persistence”\n- With this deep integration ObjectBox does not need to keep all vectors in memory\n- Multi-layered caching: if a vector is not in-memory, ObjectBox fetches it from disk\n- Not just a vector database: you can store any data in ObjectBox, not just vectors. You won’t need a second database\n- Low minimum hardware requirements: e.g. an old Raspberry Pi comfortably runs ObjectBox smoothly\n- Low memory footprint: ObjectBox itself just takes a few MB of memory. The entire binary is only about 3 MB (compressed around 1 MB)\n- Scales with hardware: efficient resource usage is also an advantage when running on more capable devices like the latest phones, desktops and servers\n\nOfficial ObjectBox resources:\n- [ObjectBox Vector Search docs](https://docs.objectbox.io/ann-vector-search)\n- [The first On-Device Vector Database: ObjectBox 4.0](https://objectbox.io/the-first-on-device-vector-database-objectbox-4-0)\n- [On-device Vector Database for Dart/Flutter](https://objectbox.io/on-device-vector-database-for-dart-flutter)\n\n## Setup\n\nLangChain.dart offers two classes for working with ObjectBox:\n- `ObjectBoxVectorStore`: This vector stores creates a `Store` with an `ObjectBoxDocument` entity that persists LangChain `Document`s along with their embeddings.\n- `BaseObjectBoxVectorStore`: If you need more control over the entity (e.g. if you need to persist custom fields), you can use this class instead.\n\n###  1. Add ObjectBox to your project\n\nSee the [ObjectBox documentation](https://docs.objectbox.io/getting-started) to learn how to add ObjectBox to your project.\n\nNote that the integration differs depending on whether you are building a Flutter application or a pure Dart application.\n\n### 2. Add the LangChain.dart Community package\n\nAdd the `langchain_community` package to your `pubspec.yaml` file.\n\n```yaml\ndependencies:\n  langchain: {version}\n  langchain_community: {version}\n```\n\n### 3. Instantiate the ObjectBox vector store\n\n```dart\nfinal embeddings = OllamaEmbeddings(model: 'jina/jina-embeddings-v2-small-en');\nfinal vectorStore = ObjectBoxVectorStore.open(\n  embeddings: embeddings,\n  dimensions: 512,\n);\n```\n\nThe dimensions parameter specifies the number of dimensions of the embeddings. It will depend on the embeddings model you are using. In this example, we are using the [jina/jina-embeddings-v2-small-en](https://ollama.com/jina/jina-embeddings-v2-small-en) model, which has 512 dimensions.\n\nThe `ObjectBoxVectorStore` constructor allows you to customize the ObjectBox store that is created under the hood. For example, you can change the directory where the database is stored:\n\n```dart\nfinal vectorStore = ObjectBoxVectorStore.open(\n  embeddings: embeddings,\n  dimensions: 512,\n  directory: 'path/to/db',\n);\n```\n\n## Usage\n\n### Storing vectors\n\n```dart\nfinal res = await vectorStore.addDocuments(\n  documents: [\n    Document(\n      pageContent: 'The cat sat on the mat',\n      metadata: {'cat': 'animal'},\n    ),\n    Document(\n      pageContent: 'The dog chased the ball.',\n      metadata: {'cat': 'animal'},\n    ),\n  ],\n);\n```\n\n### Querying vectors\n\n```dart\nfinal res = await vectorStore.similaritySearch(\n  query: 'Where is the cat?',\n  config: const ObjectBoxSimilaritySearch(k: 1),\n);\n```\n\nYou can change the minimum similarity score threshold by setting the `scoreThreshold` parameter in the `ObjectBoxSimilaritySearch` config object.\n\n#### Filtering\n\nYou can use the `ObjectBoxSimilaritySearch` class to pass ObjectBox-specific filtering options.\n\n`ObjectBoxVectorStore` supports filtering queries by id, content or metadata using ObjectBox's `Condition`. You can define the filter condition in the `ObjectBoxSimilaritySearch.filterCondition` parameter. Use the `ObjectBoxDocumentProps` class to reference the entity fields to use in the query.\n\nFor example:\n```dart\nfinal res = await vectorStore.similaritySearch(\n  query: 'What should I feed my cat?',\n  config: ObjectBoxSimilaritySearch(\n    k: 5,\n    scoreThreshold: 0.8,\n    filterCondition: ObjectBoxDocumentProps.id.equals('my-id')\n        .or(ObjectBoxDocumentProps.metadata.contains('some-text')),\n  ),\n);\n```\n\n### Deleting vectors\n\nTo delete documents, you can use the `delete` method passing the ids of the documents you want to delete.\n\n```dart\nawait vectorStore.delete(ids: ['9999']);\n```\n\nYou can also use `deleteWhere` to delete documents based on a condition.\n\n```dart\nawait vectorStore.deleteWhere(\n  ObjectBoxDocumentProps.metadata.contains('cat'),\n);\n```\n\n## Example: Building a Fully Local RAG App with ObjectBox and Ollama\n\nThis example demonstrates how to build a fully local RAG (Retrieval-Augmented Generation) app using ObjectBox and Ollama. The app retrieves blog posts, splits them into chunks, and stores them in an ObjectBox vector store. It then uses the stored information to generate responses to user questions.\n\n![RAG Pipeline](img/objectbox.png)\n\n#### Prerequisites\n\nBefore running the example, make sure you have the following:\n\n- Ollama installed (see the [Ollama documentation](https://ollama.com/) for installation instructions).\n- [jina/jina-embeddings-v2-small-en](https://ollama.com/jina/jina-embeddings-v2-small-en) and [llama3:8b](https://ollama.com/library/llama3:8b) models downloaded.\n\n#### Steps\n\n**Step 1: Retrieving and Storing Documents**\n\n1. Retrieve several posts from the ObjectBox blog using a `WebBaseLoader` document loader.\n2. Split the retrieved posts into chunks using a `RecursiveCharacterTextSplitter`.\n3. Create embeddings from the document chunks using the `jina/jina-embeddings-v2-small-en` embeddings model via `OllamaEmbeddings`.\n4. Add the document chunks and their corresponding embeddings to the `ObjectBoxVectorStore`.\n\n> Note: this step only needs to be executed once (unless the documents change). The stored documents can be used for multiple queries.\n\n**Step 2: Constructing the RAG Pipeline**\n\n1. Set up a retrieval pipeline that takes a user question as input and retrieves the most relevant documents from the ObjectBox vector store.\n2. Format the retrieved documents into a single string containing the source, title, and content of each document.\n3. Pass the formatted string to the Llama 3 model to generate a response to the user question.\n\n```dart\n// 1. Instantiate vector store\nfinal vectorStore = ObjectBoxVectorStore.open(\n  embeddings: OllamaEmbeddings(model: 'jina/jina-embeddings-v2-small-en'),\n  dimensions: 512,\n);\n\n// 2. Load documents\nconst loader = WebBaseLoader([\n  'https://objectbox.io/on-device-vector-databases-and-edge-ai/',\n  'https://objectbox.io/the-first-on-device-vector-database-objectbox-4-0/',\n  'https://objectbox.io/on-device-vector-database-for-dart-flutter/',\n  'https://objectbox.io/evolution-of-search-traditional-vs-vector-search//',\n]);\nfinal List<Document> docs = await loader.load();\n\n// 3. Split docs into chunks\nconst splitter = RecursiveCharacterTextSplitter(\n  chunkSize: 500,\n  chunkOverlap: 0,\n);\nfinal List<Document> chunkedDocs = await splitter.invoke(docs);\n\n// 4. Add documents to vector store\nawait vectorStore.addDocuments(documents: chunkedDocs);\n\n// 5. Construct a RAG prompt template\nfinal promptTemplate = ChatPromptTemplate.fromTemplates([\n  (ChatMessageType.system,\n    '''\nYou are an assistant for question-answering tasks.\n\nUse the following pieces of retrieved context to answer the user question.\n\nContext:\n{context}\n\nIf you don't know the answer, just say that you don't know. \nUse three sentences maximum and keep the answer concise.\nCite the source you used to answer the question.\n\nExample:\n\"\"\"\nOne sentence [1]. Another sentence [2]. \n\nSources:\n[1] https://example.com/1\n[2] https://example.com/2\n\"\"\"\n'''\n  ),\n  (ChatMessageType.human, '{question}'),\n]);\n\n// 6. Define the model to use and the vector store retriever\nfinal chatModel = ChatOllama(\n  defaultOptions: ChatOllamaOptions(model: 'llama3.2'),\n);\nfinal retriever = vectorStore.asRetriever();\n\n// 7. Create a Runnable that combines the retrieved documents into a single formatted string\nfinal docCombiner = Runnable.mapInput<List<Document>, String>((docs) {\n  return docs.map((d) => '''\nSource: ${d.metadata['source']}\nTitle: ${d.metadata['title']}\nContent: ${d.pageContent}\n---\n''').join('\\n');\n});\n\n// 8. Define the RAG pipeline\nfinal chain = Runnable.fromMap<String>({\n  'context': retriever.pipe(docCombiner),\n  'question': Runnable.passthrough(),\n}).pipe(promptTemplate).pipe(chatModel).pipe(StringOutputParser());\n\n// 9. Run the pipeline\nfinal stream = chain.stream(\n  'Which algorithm does ObjectBox Vector Search use? Can I use it in Flutter apps?',\n);\nawait stream.forEach(stdout.write);\n// According to the sources provided, ObjectBox Vector Search uses the HNSW\n// (Hierarchical Navigable Small World) algorithm [1].\n//\n// And yes, you can use it in Flutter apps. The article specifically mentions\n// that ObjectBox 4.0 introduces an on-device vector database for the\n// Dart/Flutter platform [2].\n//\n// Sources:\n// [1] https://objectbox.io/first-on-device-vector-database-objectbox-4-0/\n// [2] https://objectbox.io/on-device-vector-database-for-dart-flutter/\n```\n\n## Example: Wikivoyage EU\n\nCheck out the [Wikivoyage EU example](https://github.com/davidmigloz/langchain_dart/tree/main/examples/wikivoyage_eu), to see how to build a fully local chatbot that uses RAG to plan vacation plans in Europe.\n\n## Advance\n\n### BaseObjectBoxVectorStore\n\nIf you need more control over the entity (e.g. if you are using ObjectBox to store other entities, or if you need to customize the Document entity class.), you can use the `BaseObjectBoxVectorStore` class instead of `ObjectBoxVectorStore`.\n\n`BaseObjectBoxVectorStore` requires the following parameters:\n- `embeddings`: The embeddings model to use.\n- `box`: The ObjectBox `Box` instance to use.\n- `createEntity`: A function that creates an entity from the given data.\n- `createDocument`: A function that creates a LangChain's `Document` from the given entity.\n- `getIdProperty`: A function that returns the ID property of the entity.\n- `getEmbeddingProperty`: A function that returns the embedding property of the entity.\n\nHere is an example of how to use this class:\n\nFirst, you can define our own Document entity class instead of using the one provided by the [ObjectBoxVectorStore]. In this way, you can customize the entity to your needs. You will need to define the mapping logic between the entity and the LangChain [Document] model.\n\n```dart\n@Entity()\nclass MyDocumentEntity {\n  MyDocumentEntity({\n    required this.id,\n    required this.content,\n    required this.metadata,\n    required this.embedding,\n  });\n  @Id()\n  int internalId = 0;\n  @Unique(onConflict: ConflictStrategy.replace)\n  String id;\n  String content;\n  String metadata;\n  @HnswIndex(\n    dimensions: 768,\n    distanceType: VectorDistanceType.cosine,\n  )\n  @Property(type: PropertyType.floatVector)\n  List<double> embedding;\n  factory MyDocumentEntity.fromModel(\n    Document doc, List<double> embedding,\n  ) => MyDocumentEntity(\n        id: doc.id ?? '',\n        content: doc.pageContent,\n        metadata: jsonEncode(doc.metadata),\n        embedding: embedding,\n      );\n  Document toModel() => Document(\n        id: id,\n        pageContent: content,\n        metadata: jsonDecode(metadata),\n      );\n}\n```\n\nAfter defining the entity class, you will need to run the ObjectBox generator:\n\n```sh\ndart run build_runner build --delete-conflicting-outputs\n```\n\nThen, you just need to create your custom vector store class that extends [BaseObjectBoxVectorStore] and wire everything up:\n\n```dart\nclass MyCustomVectorStore extends BaseObjectBoxVectorStore<MyDocumentEntity> {\n  MyCustomVectorStore({\n    required super.embeddings,\n    required Store store,\n  }) : super(\n          box: store.box<MyDocumentEntity>(),\n          createEntity: (\n            String id,\n            String content,\n            String metadata,\n            List<double> embedding,\n          ) =>\n              MyDocumentEntity(\n            id: id,\n            content: content,\n            metadata: metadata,\n            embedding: embedding,\n          ),\n          createDocument: (MyDocumentEntity docDto) => docDto.toModel(),\n          getIdProperty: () => MyDocumentEntity_.id,\n          getEmbeddingProperty: () => MyDocumentEntity_.embedding,\n        );\n}\n```\n\nNow you can use the [MyCustomVectorStore] class to store and search documents.\n"
  },
  {
    "path": "docs/modules/retrieval/vector_stores/integrations/pinecone.md",
    "content": "# Pinecone\n\nVector store for [Pinecone](https://www.pinecone.io/) vector database.\n\nTo use Pinecone, you must have an API key. To find your API key, open the Pinecone console and click API Keys.\n\nBefore using this vector store you need to create an index in Pinecone. You can do that in the Pinecone console or using a Pinecone API client. Check out the [Pinecone documentation](https://docs.pinecone.io/docs/choosing-index-type-and-size) for more information regarding index type and size.\n\nAfter creating the index, configure the index name in the `indexName` parameter and the cloud region in the `environment` parameter.\n\n```dart\nfinal vectorStore = Pinecone(\n  apiKey: pineconeApiKey,\n  indexName: 'langchain-dart',\n  environment: 'gcp-starter',\n  embeddings: embeddings,\n);\n```\n\nPinecone indexes store records with vector data. Each record in a Pinecone index always contains a unique ID and an array of floats representing a dense vector embedding. It can also contain a sparse vector embedding for hybrid search and metadata key-value pairs for filtered queries.\n\nWhen you add documents to the index using this class, the document's page content will be stored in the index's metadata. You can configure the metadata key in the `docPageContentKey` parameter.\n\nMind that Pinecone supports 40kb of metadata per vector.\n\nYou can organize the vectors added to an index into partitions, or \"namespaces,\" to limit queries and other vector operations to only one such namespace at a time. You can configure the namespace in the `namespace` parameter.\n\n### Filtering\n\nMetadata filter expressions can be included with queries to limit the search to only vectors matching the filter expression.\n\nFor example:\n```dart\nfinal vectorStore = VectorStore(...);\nfinal res = await vectorStore.similaritySearch(\n  query: 'What should I feed my cat?',\n  config: PineconeSimilaritySearch(\n    k: 5,\n    scoreThreshold: 0.8,\n    filter: {'class: 'cat'},\n  ),\n);\n```\n\nPinecone supports a wide range of operators for filtering. Check out the [filtering section](https://docs.pinecone.io/docs/metadata-filtering#metadata-query-language) of the Pinecone docs for more info.\n"
  },
  {
    "path": "docs/modules/retrieval/vector_stores/integrations/supabase.md",
    "content": "# Supabase\n\nVector store for [Supabase Vector](https://supabase.com/vector) embedding database.\n\nIt uses [`pgvector`](https://github.com/pgvector/pgvector) extension to store, query, and index vector embeddings in a Postgres database instance.\n\n> [Supabase Vector Docs](https://supabase.com/docs/guides/ai)\n\n## Setup\n\n### Enable `pgvector` extension\n\n1. Go to the [Database page](https://supabase.com/dashboard/project/_/database/tables) in the Dashboard.\n2. Click on Extensions in the sidebar.\n3. Search for \"vector\" and enable the extension.\n4. Select \"extensions\" schema.\n\nAlternatively, you can run the following SQL query in the [SQL editor](https://supabase.com/dashboard/project/_/sql):\n\n```sql\n -- Example: enable the \"vector\" extension.\ncreate extension vector\nwith\n  schema extensions;\n```\n\n### Create a table to store vectors\n\nAfter enabling the `vector` extension, you will get access to a new data type called `vector`. The size of the vector (indicated in parentheses) represents the number of dimensions stored in that vector.\n\nCreate a table to store the vectors:\n \n```sql\ncreate table documents (\n  id bigserial primary key,\n  content text,\n  metadata jsonb,\n  embedding vector(1536)\n);\n```\n\nIn the above SQL snippet, we create a `documents` table with the following columns:\n- `id`: a unique identifier for each document. If the id is null, it will be auto-generated.\n- `content`: the text content of the document (i.e. `Document.pageContent`).\n- `metadata`: a JSON object containing metadata about the document (i.e. `Document.metadata`).\n- `embedding`: the vector embedding of the document. In this example, we are using a vector generated using OpenAI's `text-embedding-3-small` embedding model, which produces 1536 dimensions. Change this to the number of dimensions produced by your embedding model. For example, if you are generating embeddings using the open source [gte-small](https://huggingface.co/Supabase/gte-small) model, you would set this number to 384 since that model produces 384 dimensions.\n\n### Create Postgres function to query vectors\n\n[`supabase`](https://pub.dev/packages/supabase) client (used internally by LangChain.dart) connects to your Postgres instance via [PostgREST](https://supabase.com/docs/guides/ai/docs/guides/getting-started/architecture#postgrest-api). PostgREST does not currently support `pgvector` similarity operators, so we need to implement a Postgres function `match_documents` that executes the query. This function will be called by LangChain.dart via the `rpc()` method.\n\n```sql\ncreate or replace function match_documents (\n  query_embedding vector(1536),\n  match_count int,\n  match_threshold float,\n  filter jsonb\n) returns table (\n  id bigint,\n  content text,\n  metadata jsonb,\n  similarity float\n)\nlanguage sql stable\nas $$\n  select\n    documents.id,\n    documents.content,\n    documents.metadata,\n    1 - (documents.embedding <=> query_embedding) as similarity\nfrom documents\nwhere metadata @> filter\n  and 1 - (documents.embedding <=> query_embedding) > match_threshold\norder by (documents.embedding <=> query_embedding) asc\n    limit match_count;\n$$;\n```\n\nThis function takes a `query_embedding` argument and compares it to all other embeddings in the `documents` table. Each comparison returns a similarity score. If the similarity is greater than the `match_threshold` argument, it is returned. The number of rows returned is limited by the `match_count` argument.\n\nMake sure to change the `vector(1536)` type to match the number of dimensions in your embedding model.\n\n### Instantiate Supabase vector store\n\n```dart\nfinal embeddings = OpenAIEmbeddings(apiKey: openaiApiKey);\nfinal vectorStore = Supabase(\n  tableName: 'documents',\n  embeddings: embeddings,\n  supabaseUrl: supabaseUrl,\n  supabaseKey: supabaseApiKey,\n);\n```\n\nYou can find your Supabase URL and API key in the [Project API Settings](https://supabase.com/dashboard/project/_/settings/api) page.\n\n## Usage\n\n### Storing vectors\n\n```dart\nfinal res = await vectorStore.addDocuments(\n  documents: [\n    const Document(\n      pageContent: 'The cat sat on the mat',\n      metadata: {'cat': 'animal'},\n    ),\n    const Document(\n      pageContent: 'The dog chased the ball.',\n      metadata: {'cat': 'animal'},\n    ),\n  ],\n);\n```\n\n### Querying vectors\n\n```dart\nfinal res = await vectorStore.similaritySearch(\n  query: 'Where is the cat?',\n  config: const SupabaseSimilaritySearch(k: 1),\n);\n```\n\nYou can change the minimum similarity score threshold by setting the `scoreThreshold` parameter in the `SupabaseSimilaritySearch` config object.\n\n#### Filtering\n\nYou can filter the query by metadata by setting the `filter` parameter in the `SupabaseSimilaritySearch` config object.\n\nMetadata is stored as binary JSON. As a result, allowed metadata types are drawn from JSON primitive types (bool, String, num). \n\nThe technical limit of a metadata field associated with a vector is 1GB. In practice, you should keep metadata fields as small as possible to maximize performance.\n\nThe metadata query language is based loosely on [mongodb's selectors](https://www.mongodb.com/docs/manual/reference/operator/query/). Check the [Supabase docs](https://supabase.com/docs/guides/ai/python/metadata#metadata-query-language) for more information.\n\n```dart\nfinal res = await vectorStore.similaritySearch(\n  query: 'Where is the cat?',\n  config: const SupabaseSimilaritySearch(\n    k: 10,\n    filter: {'cat': 'animal'},\n  ),\n);\n```\n\nOr using filtering operators:\n\n```dart\nfinal res = await vectorStore.similaritySearch(\n  query: 'Where is the cat?',\n  config: const SupabaseSimilaritySearch(\n    k: 10,\n    filter: {\n      'cat': {r'ne': 'person'},\n    },\n  ),\n);\n```\n\n### Deleting vectors\n\n```dart\nawait vectorStore.delete(ids: ['9999']);\n```\n\n## Advance\n\n### Indexes\n\nOnce your vector table starts to grow, you will likely want to add an index to speed up queries. See [Vector indexes](https://supabase.com/docs/guides/ai/vector-indexes) to learn how vector indexes work and how to create them.\n\n### Changing the distance function\n\nYou can change the distance function of the embedding space by modifying the distance operator used in the Postgres function.\n\n`pgvector` supports 3 operators for computing distance:\n- `<=>`: Cosine distance\n- `<->`: Euclidean distance\n- `<#>`: Negative inner product\n"
  },
  {
    "path": "docs/modules/retrieval/vector_stores/integrations/vertex_ai.md",
    "content": "# Vertex AI Vector Search (former Vertex AI Matching Engine)\n\nA vector store that uses [Vertex AI Vector Search](https://cloud.google.com/vertex-ai/docs/vector-search/overview).\n\nVertex AI Vector Search provides a high-scale low latency vector database.\n\nThis vector stores relies on two GCP services:\n- Vertex AI Matching Engine: to store the vectors and perform similarity searches.\n- Google Cloud Storage: to store the documents and the vectors to add to the index.\n\nCurrently it only supports Batch Updates, it doesn't support Streaming Updates. Batch Updates take around 1h to be applied to the index ([more info](https://cloud.google.com/vertex-ai/docs/matching-engine/update-rebuild-index#update_index_content_with_batch_updates)).\n\n## Set up your Google Cloud Platform project\n\n1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager).\n2. [Make sure that billing is enabled for your project](https://cloud.google.com/billing/docs/how-to/modify-project).\n3. [Enable the Vertex AI API](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com).\n4. [Configure the Vertex AI location](https://cloud.google.com/vertex-ai/docs/general/locations).\n\n## Create your Vertex AI Vector Search index\n\nTo use this vector store, first you need to create a Vertex AI Vector Search index and expose it in a Vertex AI index endpoint.\n\nYou can use [vertex_ai](https://pub.dev/packages/vertex_ai) Dart package to do that.\n\nCheck out [this sample](https://github.com/davidmigloz/langchain_dart/tree/main/examples/vertex_ai_matching_engine_setup) script that creates the index and index endpoint ready to be used with LangChains.dart.\n\n## Authentication\n\nTo create an instance of `VertexAIMatchingEngine` you need to provide an HTTP client that handles authentication. The easiest way to do this is to use [`AuthClient`](https://pub.dev/documentation/googleapis_auth/latest/googleapis_auth/AuthClient-class.html) from the [googleapis_auth](https://pub.dev/packages/googleapis_auth) package.\n\nThere are several ways to obtain an `AuthClient` depending on your use case. Check out the [googleapis_auth](https://pub.dev/packages/googleapis_auth) package documentation for more details.\n\nExample using a service account JSON:\n\n```dart\nfinal serviceAccountCredentials = ServiceAccountCredentials.fromJson(\n  json.decode(serviceAccountJson),\n);\nfinal authClient = await clientViaServiceAccount(\n  serviceAccountCredentials,\n  VertexAIMatchingEngine.cloudPlatformScopes,\n);\nfinal vectorStore = VertexAIMatchingEngine(\n  httpClient: authClient,\n  project: 'your-project-id',\n  location: 'europe-west1',\n  indexId: 'your-index-id',\n  gcsBucketName: 'your-gcs-bucket-name',\n  embeddings: embeddings,\n);\n```\n\nThe minimum required permissions for the service account if you just need to query the index are:\n- `aiplatform.indexes.get`\n- `aiplatform.indexEndpoints.get`\n- `aiplatform.indexEndpoints.queryVectors`\n- `storage.objects.get`\n\nIf you also need to add new vectors to the index, the service account should have the following permissions as well:\n- `aiplatform.indexes.update`\n- `storage.objects.create`\n- `storage.objects.update`\n\nThe required[OAuth2 scope](https://developers.google.com/identity/protocols/oauth2/scopes) is:\n- `https://www.googleapis.com/auth/cloud-platform`\n- `https://www.googleapis.com/auth/devstorage.full_control`\n\nYou can use the constant `VertexAIMatchingEngine.cloudPlatformScopes`.\n\n## Vector attributes filtering\n\nVertex AI Matching Engine allows you to add attributes to the vectors that you can later use to restrict vector matching searches to a subset of the index.\n\nTo add attributes to the vectors, add a `restricts` key to the document metadata with the attributes that you want to add. For example:\n\n```dart\nfinal doc = Document(\n id: 'doc1',\n pageContent: 'The cat is a domestic species of small carnivorous mammal',\n metadata: {\n   'restricts': [\n     {\n       'namespace': 'class',\n       'allow': ['cat', 'pet']\n     },\n     {\n       'namespace': 'category',\n       'allow': ['feline']\n     }\n   ],\n   'otherMetadata': '...',\n },\n);\n```\n\nCheck out [the documentation](https://cloud.google.com/vertex-ai/docs/matching-engine/filtering) for more details.\n\nAfter adding the attributes to the documents, you can use the use them to restrict the similarity search results. Example:\n\n```dart\nfinal vectorStore = VertexAIMatchingEngine(...);\nfinal res = await vectorStore.similaritySearch(\n  query: 'What should I feed my cat?',\n  config: VertexAIMatchingEngineSimilaritySearch(\n    k: 5,\n    scoreThreshold: 0.8,\n    filters: [\n      const VertexAIMatchingEngineFilter(\n        namespace: 'class',\n        allowList: ['cat'],\n      ),\n    ],\n  ),\n);\n```\n"
  },
  {
    "path": "docs/modules/retrieval/vector_stores/vector_stores.md",
    "content": "# Vector stores\n\nOne of the most common ways to store and search over unstructured data is to\nembed it and store the resulting embedding vectors, and then at query time to\nembed the unstructured query and retrieve the embedding vectors that are 'most\nsimilar' to the embedded query. A vector store takes care of storing embedded\ndata and performing vector search for you.\n\n## Get started\n\nThis walkthrough showcases basic functionality related to VectorStores. A key\npart of working with vector stores is creating the vector to put in them, which\nis usually created via embeddings. Therefore, it is recommended that you\nfamiliarize yourself with the text embedding model interfaces before diving into\nthis.\n\n```dart\nconst filePath = './test/chains/assets/state_of_the_union.txt';\nconst loader = TextLoader(filePath);\nfinal documents = await loader.load();\nconst textSplitter = CharacterTextSplitter(\n  chunkSize: 800,\n  chunkOverlap: 0,\n);\nfinal texts = textSplitter.splitDocuments(documents);\nfinal textsWithSources = texts\n    .mapIndexed(\n      (final i, final d) => d.copyWith(\n        metadata: {\n          ...d.metadata,\n          'source': '$i-pl',\n        },\n      ),\n    )\n    .toList(growable: false);\nfinal embeddings = OpenAIEmbeddings(apiKey: openaiApiKey);\nfinal docSearch = await MemoryVectorStore.fromDocuments(\n  documents: textsWithSources,\n  embeddings: embeddings,\n);\n```\n\n### Similarity search\n\n```dart\nconst query = 'What did the president say about Ketanji Brown Jackson';\nfinal docs = await docSearch.similaritySearch(query: query);\n```\n\n### Similarity search by vector\n\n```dart\nfinal embeddingVector = await embeddings.embedQuery(query);\nfinal docs = await docSearch.similaritySearchByVector(\n    embedding: embeddingVector,\n);\n```\n"
  },
  {
    "path": "editorconfig.txt",
    "content": "# editorconfig\nroot = true\n\n[*]\nindent_style = space\nindent_size = 2\nend_of_line = lf\ncharset = utf-8\ntrim_trailing_whitespace = true\ninsert_final_newline = true\nquote_type = single"
  },
  {
    "path": "examples/browser_summarizer/.gitignore",
    "content": "# Miscellaneous\n*.class\n*.log\n*.pyc\n*.swp\n.DS_Store\n.atom/\n.buildlog/\n.history\n.svn/\nmigrate_working_dir/\n\n# IntelliJ related\n*.iml\n*.ipr\n*.iws\n.idea/\n\n# The .vscode folder contains launch configuration and tasks you configure in\n# VS Code which you may wish to be included in version control, so this line\n# is commented out by default.\n#.vscode/\n\n# Flutter/Dart/Pub related\n**/doc/api/\n**/ios/Flutter/.last_build_id\n.dart_tool/\n.flutter-plugins\n.flutter-plugins-dependencies\n.packages\n.pub-cache/\n.pub/\n/build/\n\n# Symbolication related\napp.*.symbols\n\n# Obfuscation related\napp.*.map.json\n\n# Android Studio will place build artifacts here\n/android/app/debug\n/android/app/profile\n/android/app/release\n"
  },
  {
    "path": "examples/browser_summarizer/.metadata",
    "content": "# This file tracks properties of this Flutter project.\n# Used by Flutter tool to assess capabilities and perform upgrades etc.\n#\n# This file should be version controlled.\n\nversion:\n  revision: 796c8ef79279f9c774545b3771238c3098dbefab\n  channel: stable\n\nproject_type: app\n\n# Tracks metadata for the flutter migrate command\nmigration:\n  platforms:\n    - platform: root\n      create_revision: 796c8ef79279f9c774545b3771238c3098dbefab\n      base_revision: 796c8ef79279f9c774545b3771238c3098dbefab\n    - platform: web\n      create_revision: 796c8ef79279f9c774545b3771238c3098dbefab\n      base_revision: 796c8ef79279f9c774545b3771238c3098dbefab\n\n  # User provided section\n\n  # List of Local paths (relative to this file) that should be\n  # ignored by the migrate tool.\n  #\n  # Files that are not part of the templates will be ignored by default.\n  unmanaged_files:\n    - 'lib/main.dart'\n    - 'ios/Runner.xcodeproj/project.pbxproj'\n"
  },
  {
    "path": "examples/browser_summarizer/README.md",
    "content": "# Browser summarizer\n\nThis sample app demonstrates how to build a Flutter-based Chrome extension that \nsummarizes the content of a webpage using OpenAI's GPT-4o mini model.\n\n![Browser summarizer](browser-summarizer.gif)\n\n## Usage\n\n**Building the project:**\n\n```bash\nflutter build web --csp --no-web-resources-cdn\n```\n\n**Installing the extension:**\n\n1. Open the Extension Management page by navigating to `chrome://extensions`.\n2. Enable \"Developer Mode\" by clicking the toggle located in the upper-right \n   corner.\n3. Click \"Load unpacked\" and select the generated build/web folder from our \n   project.\n\nTo use the extension you need an OpenAI key, you can create one \n[here](https://platform.openai.com/account/api-keys).\n"
  },
  {
    "path": "examples/browser_summarizer/lib/app.dart",
    "content": "// ignore_for_file: public_member_api_docs\nimport 'package:flutter/material.dart';\nimport 'package:flutter_bloc/flutter_bloc.dart';\n\nimport 'popup/pop_up_screen.dart';\nimport 'settings/settings_repository.dart';\n\nclass App extends StatelessWidget {\n  const App({super.key, required this.settingsRepository});\n\n  final SettingsRepository settingsRepository;\n\n  @override\n  Widget build(final BuildContext context) {\n    return RepositoryProvider.value(\n      value: settingsRepository,\n      child: MaterialApp(\n        theme: ThemeData(\n          colorScheme: ColorScheme.fromSeed(seedColor: Colors.lightGreen),\n          useMaterial3: true,\n        ),\n        home: const PopUpScreen(),\n      ),\n    );\n  }\n}\n"
  },
  {
    "path": "examples/browser_summarizer/lib/chrome/chrome_api.dart",
    "content": "// ignore_for_file: public_member_api_docs\nimport 'package:chrome_extension/tabs.dart';\n\n/// Query Chrome tabs based on given parameters\nFuture<List<Tab>> query(final ParameterQueryTabs parameterQueryTabs) async {\n  final chromeTabs = await chrome.tabs.query(\n    QueryInfo(\n      active: parameterQueryTabs.active,\n      lastFocusedWindow: parameterQueryTabs.lastFocusedWindow,\n    ),\n  );\n\n  return chromeTabs\n      .map((chromeTab) => Tab(chromeTab.url ?? ''))\n      .toList(growable: false);\n}\n\n/// Chrome tab representation in Dart\nclass Tab {\n  final String url;\n\n  Tab(this.url);\n}\n\n/// Parameters for querying tabs\nclass ParameterQueryTabs {\n  final bool active;\n  final bool lastFocusedWindow;\n\n  ParameterQueryTabs({required this.active, required this.lastFocusedWindow});\n}\n"
  },
  {
    "path": "examples/browser_summarizer/lib/main.dart",
    "content": "import 'package:flutter/material.dart';\nimport 'package:shared_preferences/shared_preferences.dart';\n\nimport 'app.dart';\nimport 'settings/settings_repository.dart';\n\nvoid main() async {\n  final settingsRepository = SettingsRepository(\n    await SharedPreferences.getInstance(),\n  );\n  runApp(App(settingsRepository: settingsRepository));\n}\n"
  },
  {
    "path": "examples/browser_summarizer/lib/popup/bloc/pop_up_screen_cubit.dart",
    "content": "// ignore_for_file: avoid_web_libraries_in_flutter, public_member_api_docs\nimport 'package:equatable/equatable.dart';\nimport 'package:flutter/foundation.dart';\nimport 'package:flutter_bloc/flutter_bloc.dart';\nimport 'package:langchain/langchain.dart';\nimport 'package:langchain_community/langchain_community.dart';\nimport 'package:langchain_openai/langchain_openai.dart';\n\nimport '../../chrome/chrome_api.dart';\nimport '../../settings/settings_repository.dart';\n\npart 'pop_up_screen_state.dart';\n\nclass PopUpScreenCubit extends Cubit<PopUpScreenState> {\n  PopUpScreenCubit({required this.settingsRepository})\n    : super(const PopUpScreenState()) {\n    _init();\n  }\n\n  final SettingsRepository settingsRepository;\n\n  void _init() {\n    final openAIKey = settingsRepository.getOpenAiKey();\n    emit(\n      state.copyWith(\n        status: openAIKey == null\n            ? PopUpScreenStatus.setUp\n            : PopUpScreenStatus.idle,\n        openAiKey: openAIKey,\n      ),\n    );\n  }\n\n  void onOpenAiKeyChanged(final String openAIKey) {\n    emit(state.copyWith(openAiKey: openAIKey));\n  }\n\n  Future<void> onSaveOpenAiKeyPressed() async {\n    final openAIKey = state.openAiKey;\n    if (openAIKey == null) {\n      return;\n    }\n\n    await settingsRepository.saveOpenAiKey(openAIKey);\n    emit(state.copyWith(status: PopUpScreenStatus.idle));\n  }\n\n  Future<void> onSummarizePressed() async {\n    emit(state.copyWith(status: PopUpScreenStatus.summarizing, summary: ''));\n\n    final docs = await _getCurrentTabContent();\n    final summary = await _generateSummary(docs);\n\n    emit(state.copyWith(status: PopUpScreenStatus.idle, summary: summary));\n  }\n\n  Future<List<Document>> _getCurrentTabContent() async {\n    final tabs = await query(\n      ParameterQueryTabs(active: true, lastFocusedWindow: true),\n    );\n\n    final url = tabs.first.url.trim();\n    final proxyUrl = 'https://corsproxy.io/?${Uri.encodeComponent(url)}';\n\n    final loader = WebBaseLoader([proxyUrl]);\n    return loader.load();\n  }\n\n  Future<String> _generateSummary(final List<Document> docs) {\n    const textSplitter = RecursiveCharacterTextSplitter(chunkSize: 5000);\n    final List<Document> docsChunks = textSplitter.splitDocuments(docs);\n\n    final openAIKey = state.openAiKey;\n    final llm = ChatOpenAI(apiKey: openAIKey);\n\n    final summarizeChain = SummarizeChain.mapReduce(\n      llm: llm,\n      mapPrompt: PromptTemplate.fromTemplate('''\nWrite a concise summary of the following text. \nAvoid unnecessary info. Write at 5th-grade level.\n\n\"{context}\"\n\nCONCISE SUMMARY:'''),\n      combinePrompt: PromptTemplate.fromTemplate('''\nSummarize the following text in bullet points using markdown.\nWrite a maximum of 5 bullet points.\n\n\"{context}\"\n\nBULLET POINT SUMMARY:'''),\n    );\n    return summarizeChain.run(docsChunks);\n  }\n}\n"
  },
  {
    "path": "examples/browser_summarizer/lib/popup/bloc/pop_up_screen_state.dart",
    "content": "// ignore_for_file: public_member_api_docs\npart of 'pop_up_screen_cubit.dart';\n\n@immutable\nclass PopUpScreenState extends Equatable {\n  const PopUpScreenState({\n    this.status = PopUpScreenStatus.loading,\n    this.openAiKey,\n    this.summary,\n  });\n\n  final PopUpScreenStatus status;\n  final String? openAiKey;\n  final String? summary;\n\n  PopUpScreenState copyWith({\n    final PopUpScreenStatus? status,\n    final String? openAiKey,\n    final String? summary,\n  }) {\n    return PopUpScreenState(\n      status: status ?? this.status,\n      openAiKey: openAiKey ?? this.openAiKey,\n      summary: summary ?? this.summary,\n    );\n  }\n\n  @override\n  List<Object?> get props => [status, openAiKey, summary];\n}\n\nenum PopUpScreenStatus { loading, setUp, idle, summarizing }\n"
  },
  {
    "path": "examples/browser_summarizer/lib/popup/pop_up_screen.dart",
    "content": "// ignore_for_file: public_member_api_docs\nimport 'package:flutter/material.dart';\nimport 'package:flutter_bloc/flutter_bloc.dart';\nimport 'package:flutter_markdown/flutter_markdown.dart';\n\nimport '../settings/settings_repository.dart';\nimport 'bloc/pop_up_screen_cubit.dart';\n\nclass PopUpScreen extends StatelessWidget {\n  const PopUpScreen({super.key});\n\n  @override\n  Widget build(final BuildContext context) {\n    return BlocProvider(\n      create: (final _) => PopUpScreenCubit(\n        settingsRepository: context.read<SettingsRepository>(),\n      ),\n      child: const _Scaffold(),\n    );\n  }\n}\n\nclass _Scaffold extends StatelessWidget {\n  const _Scaffold();\n\n  @override\n  Widget build(final BuildContext context) {\n    final theme = Theme.of(context);\n    return Scaffold(\n      appBar: AppBar(\n        backgroundColor: theme.colorScheme.inversePrimary,\n        title: const Text('Summarizer'),\n      ),\n      body: const _Body(),\n    );\n  }\n}\n\nclass _Body extends StatelessWidget {\n  const _Body();\n\n  @override\n  Widget build(final BuildContext context) {\n    return BlocBuilder<PopUpScreenCubit, PopUpScreenState>(\n      buildWhen: (final previous, final current) =>\n          previous.status != current.status,\n      builder: (final context, final state) {\n        return switch (state.status) {\n          PopUpScreenStatus.loading => const _LoadingBody(),\n          PopUpScreenStatus.setUp => const _SetUpBody(),\n          PopUpScreenStatus.idle => const _IdleBody(),\n          PopUpScreenStatus.summarizing => const _SummarizingBody(),\n        };\n      },\n    );\n  }\n}\n\nclass _LoadingBody extends StatelessWidget {\n  const _LoadingBody();\n\n  @override\n  Widget build(final BuildContext context) {\n    return const Center(child: CircularProgressIndicator());\n  }\n}\n\nclass _SetUpBody extends StatelessWidget {\n  const _SetUpBody();\n\n  @override\n  Widget build(final BuildContext context) {\n    final cubit = context.read<PopUpScreenCubit>();\n    return Padding(\n      padding: const EdgeInsets.all(16),\n      child: Column(\n        mainAxisSize: MainAxisSize.min,\n        children: [\n          TextField(\n            decoration: const InputDecoration(\n              prefixIcon: Icon(Icons.password),\n              labelText: 'OpenAI API key',\n              filled: true,\n            ),\n            obscureText: true,\n            onChanged: cubit.onOpenAiKeyChanged,\n          ),\n          const SizedBox(height: 16),\n          FilledButton(\n            onPressed: cubit.onSaveOpenAiKeyPressed,\n            child: const Text('Save'),\n          ),\n        ],\n      ),\n    );\n  }\n}\n\nclass _IdleBody extends StatelessWidget {\n  const _IdleBody();\n\n  @override\n  Widget build(final BuildContext context) {\n    final theme = Theme.of(context);\n    final cubit = context.read<PopUpScreenCubit>();\n    return BlocBuilder<PopUpScreenCubit, PopUpScreenState>(\n      buildWhen: (final previous, final current) =>\n          previous.summary != current.summary,\n      builder: (final context, final state) {\n        if (state.summary == null || state.summary!.isEmpty) {\n          return Center(\n            child: Column(\n              mainAxisSize: MainAxisSize.min,\n              children: [\n                const Text(\"You don't want to read this long article, do you?\"),\n                const SizedBox(height: 16),\n                FilledButton(\n                  onPressed: cubit.onSummarizePressed,\n                  child: const Text('Summarize it!'),\n                ),\n              ],\n            ),\n          );\n        }\n\n        return SingleChildScrollView(\n          child: Padding(\n            padding: const EdgeInsets.all(16),\n            child: Column(\n              mainAxisSize: MainAxisSize.min,\n              crossAxisAlignment: CrossAxisAlignment.start,\n              children: [\n                if (state.summary != null) ...[\n                  Text('Summary:', style: theme.textTheme.titleMedium),\n                  const SizedBox(height: 8),\n                  MarkdownBody(data: cubit.state.summary ?? ''),\n                  const SizedBox(height: 16),\n                  const Divider(),\n                  const SizedBox(height: 16),\n                ],\n                Center(\n                  child: FilledButton(\n                    onPressed: cubit.onSummarizePressed,\n                    child: const Text('Summarize it again!'),\n                  ),\n                ),\n              ],\n            ),\n          ),\n        );\n      },\n    );\n  }\n}\n\nclass _SummarizingBody extends StatelessWidget {\n  const _SummarizingBody();\n\n  @override\n  Widget build(final BuildContext context) {\n    return const Center(\n      child: Column(\n        mainAxisSize: MainAxisSize.min,\n        children: [\n          CircularProgressIndicator(),\n          SizedBox(height: 16),\n          Text('Summarizing...'),\n        ],\n      ),\n    );\n  }\n}\n"
  },
  {
    "path": "examples/browser_summarizer/lib/settings/settings_repository.dart",
    "content": "// ignore_for_file: public_member_api_docs\nimport 'package:shared_preferences/shared_preferences.dart';\n\nclass SettingsRepository {\n  const SettingsRepository(final SharedPreferences sp) : _sp = sp;\n\n  final SharedPreferences _sp;\n\n  static const _openAiKey = 'open_ai_key';\n\n  String? getOpenAiKey() {\n    return _sp.getString(_openAiKey);\n  }\n\n  Future<void> saveOpenAiKey(final String openAiKey) async {\n    await _sp.setString(_openAiKey, openAiKey);\n  }\n}\n"
  },
  {
    "path": "examples/browser_summarizer/pubspec.yaml",
    "content": "name: browser_summarizer\ndescription: Summarize any webpage with a click of a button!\nversion: 1.0.0\npublish_to: none\n\nenvironment:\n  sdk: \">=3.9.0 <4.0.0\"\nresolution: workspace\n\ndependencies:\n  flutter:\n    sdk: flutter\n  chrome_extension: ^0.4.0\n  equatable: ^2.0.7\n  flutter_bloc: ^9.1.1\n  flutter_markdown: ^0.7.7\n  langchain: ^0.8.1\n  langchain_community: 0.4.0+2\n  langchain_openai: ^0.8.1+1\n  shared_preferences: ^2.5.3\n\nflutter:\n  uses-material-design: true\n"
  },
  {
    "path": "examples/browser_summarizer/web/index.html",
    "content": "<!DOCTYPE html>\n<html style=\"height: 400px; width: 600px\">\n\n<head>\n    <meta charset=\"UTF-8\">\n    <meta name=\"description\" content=\"Summarize any webpage with a click of a button!\">\n    <title>Summarizer</title>\n</head>\n\n<body>\n<script src=\"main.dart.js\" type=\"application/javascript\"></script>\n</body>\n\n</html>\n"
  },
  {
    "path": "examples/browser_summarizer/web/manifest.json",
    "content": "{\n  \"manifest_version\": 3,\n  \"name\": \"Summarizer\",\n  \"description\": \"Summarize any webpage with a click of a button!\",\n  \"version\": \"1.0.0\",\n  \"content_security_policy\": {\n    \"extension_pages\": \"script-src 'self' 'wasm-unsafe-eval'; object-src 'self';\"\n  },\n  \"permissions\": [\n    \"activeTab\"\n  ],\n  \"action\": {\n    \"default_popup\": \"index.html\",\n    \"default_icon\": {\n      \"16\": \"icons/icon16.png\",\n      \"32\": \"icons/icon32.png\",\n      \"48\": \"icons/icon48.png\",\n      \"128\": \"icons/icon128.png\"\n    }\n  },\n  \"icons\": {\n    \"16\": \"icons/icon16.png\",\n    \"32\": \"icons/icon32.png\",\n    \"48\": \"icons/icon48.png\",\n    \"128\": \"icons/icon128.png\"\n  }\n}\n"
  },
  {
    "path": "examples/docs_examples/.gitignore",
    "content": "# https://dart.dev/guides/libraries/private-files\n# Created by `dart pub`\n.dart_tool/\n"
  },
  {
    "path": "examples/docs_examples/README.md",
    "content": "# Docs examples\n\nExamples used in https://langchaindart.dev documentation.\n"
  },
  {
    "path": "examples/docs_examples/bin/expression_language/cookbook/adding_memory.dart",
    "content": "// ignore_for_file: avoid_print\nimport 'dart:io';\n\nimport 'package:langchain/langchain.dart';\nimport 'package:langchain_openai/langchain_openai.dart';\n\nvoid main(final List<String> arguments) async {\n  await _chatbotWithMemory();\n}\n\nFuture<void> _chatbotWithMemory() async {\n  final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n  final model = ChatOpenAI(apiKey: openaiApiKey);\n  const stringOutputParser = StringOutputParser<ChatResult>();\n\n  final promptTemplate = ChatPromptTemplate.fromPromptMessages([\n    SystemChatMessagePromptTemplate.fromTemplate('You are a helpful chatbot'),\n    const MessagesPlaceholder(variableName: 'history'),\n    HumanChatMessagePromptTemplate.fromTemplate('{input}'),\n  ]);\n\n  final memory = ConversationBufferMemory(returnMessages: true);\n\n  final chain =\n      Runnable.fromMap({\n        'input': Runnable.passthrough(),\n        'history': Runnable.mapInput((_) async {\n          final m = await memory.loadMemoryVariables();\n          return m['history'];\n        }),\n      }) |\n      promptTemplate |\n      model |\n      stringOutputParser;\n\n  const input1 = 'Hi, I am Bob';\n  final output1 = await chain.invoke(input1);\n  print(output1);\n  // Hello Bob! How can I assist you today?\n\n  await memory.saveContext(\n    inputValues: {'input': input1},\n    outputValues: {'output': output1},\n  );\n\n  const input2 = \"What's my name?\";\n  final output2 = await chain.invoke(input2);\n  print(output2);\n  // Your name is Bob, as you mentioned earlier.\n}\n"
  },
  {
    "path": "examples/docs_examples/bin/expression_language/cookbook/multiple_chains.dart",
    "content": "// ignore_for_file: avoid_print\nimport 'dart:io';\n\nimport 'package:langchain/langchain.dart';\nimport 'package:langchain_openai/langchain_openai.dart';\n\nvoid main(final List<String> arguments) async {\n  await _multipleChains1();\n  await _multipleChains2();\n  await _branchingAndMerging();\n}\n\nFuture<void> _multipleChains1() async {\n  final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n  final model = ChatOpenAI(apiKey: openaiApiKey);\n  const stringOutputParser = StringOutputParser<ChatResult>();\n\n  final promptTemplate1 = ChatPromptTemplate.fromTemplate(\n    'What is the city {person} is from? Only respond with the name of the city.',\n  );\n\n  final promptTemplate2 = ChatPromptTemplate.fromTemplate(\n    'What country is the city {city} in? Respond in {language}.',\n  );\n\n  final cityChain = promptTemplate1 | model | stringOutputParser;\n  final combinedChain =\n      Runnable.fromMap({\n        'city': cityChain,\n        'language': Runnable.getItemFromMap('language'),\n      }) |\n      promptTemplate2 |\n      model |\n      stringOutputParser;\n\n  final res = await combinedChain.invoke({\n    'person': 'Obama',\n    'language': 'Spanish',\n  });\n  print(res);\n  // La ciudad de Chicago se encuentra en los Estados Unidos.\n}\n\nFuture<void> _multipleChains2() async {\n  final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n  final model = ChatOpenAI(apiKey: openaiApiKey);\n\n  final promptTemplate1 = ChatPromptTemplate.fromTemplate(\n    'Generate a {attribute} color. '\n    'Return the name of the color and nothing else:',\n  );\n  final promptTemplate2 = ChatPromptTemplate.fromTemplate(\n    'What is a fruit of color: {color}. '\n    'Return the name of the fruit and nothing else:',\n  );\n  final promptTemplate3 = ChatPromptTemplate.fromTemplate(\n    'What is a country with a flag that has the color: {color}. '\n    'Return the name of the country and nothing else:',\n  );\n  final promptTemplate4 = ChatPromptTemplate.fromTemplate(\n    'What is the color of {fruit} and the flag of {country}?',\n  );\n\n  final modelParser = model | const StringOutputParser();\n\n  final colorGenerator =\n      Runnable.getMapFromInput('attribute') |\n      promptTemplate1 |\n      Runnable.fromMap({'color': modelParser});\n  final colorToFruit = promptTemplate2 | modelParser;\n  final colorToCountry = promptTemplate3 | modelParser;\n  final questionGenerator =\n      colorGenerator |\n      Runnable.fromMap({'fruit': colorToFruit, 'country': colorToCountry}) |\n      promptTemplate4 |\n      modelParser;\n\n  final res = await questionGenerator.invoke('warm');\n  print(res);\n  // The color of Apple is typically depicted as silver or gray for their logo\n  // and products. The flag of Armenia consists of three horizontal stripes of\n  // red, blue, and orange from top to bottom.\n}\n\nFuture<void> _branchingAndMerging() async {\n  final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n  final model = ChatOpenAI(apiKey: openaiApiKey);\n  const stringOutputParser = StringOutputParser<ChatResult>();\n\n  final planner =\n      Runnable.getMapFromInput() |\n      ChatPromptTemplate.fromTemplate('Generate an argument about: {input}') |\n      model |\n      stringOutputParser |\n      Runnable.getMapFromInput('base_response');\n\n  final argumentsFor =\n      ChatPromptTemplate.fromTemplate(\n        'List the pros or positive aspects of {base_response}',\n      ) |\n      model |\n      stringOutputParser;\n\n  final argumentsAgainst =\n      ChatPromptTemplate.fromTemplate(\n        'List the cons or negative aspects of {base_response}',\n      ) |\n      model |\n      stringOutputParser;\n\n  final finalResponder =\n      ChatPromptTemplate.fromPromptMessages([\n        AIChatMessagePromptTemplate.fromTemplate('{original_response}'),\n        HumanChatMessagePromptTemplate.fromTemplate(\n          'Pros:\\n{results_1}\\n\\nCons:\\n{results_2}',\n        ),\n        SystemChatMessagePromptTemplate.fromTemplate(\n          'Generate a final response given the critique',\n        ),\n      ]) |\n      model |\n      stringOutputParser;\n\n  final chain =\n      planner |\n      Runnable.fromMap({\n        'results_1': argumentsFor,\n        'results_2': argumentsAgainst,\n        'original_response': Runnable.getItemFromMap('base_response'),\n      }) |\n      finalResponder;\n\n  final res = await chain.invoke('Scrum');\n  print(res);\n  // While Scrum has many benefits, it is essential to acknowledge and address\n  // the potential cons or negative aspects that come with its implementation.\n  // By understanding these challenges, teams can take necessary steps to\n  // mitigate them and maximize the effectiveness of Scrum.\n  //\n  // To address the lack of predictability, teams can focus on improving their\n  // estimation techniques, conducting regular progress tracking, and adopting\n  // techniques like story point estimation or velocity tracking. This can\n  // provide stakeholders with a better understanding of project timelines and\n  // deliverables.\n  //\n  // To overcome dependency on a strong product owner, organizations can invest\n  // in training and supporting the product owner role, ensuring they have the\n  // necessary skills, knowledge, and availability to effectively manage the\n  // product backlog. Collaboration between the product owner and the\n  // development team is crucial to ensure alignment and timely decision-making.\n  //\n  // To manage the increased time and resource requirements, teams can optimize\n  // the Scrum ceremonies and ensure they are conducted efficiently. This\n  // includes keeping meetings focused and concise, setting clear agendas, and\n  // leveraging tools and technologies to streamline communication and\n  // collaboration.\n  //\n  // While Scrum is primarily designed for software development, organizations\n  // can explore adapting its principles to non-development tasks by tailoring\n  // and modifying the framework to suit the specific needs and nature of the\n  // project. This may involve adjusting the timeframes, deliverables, and\n  // ceremonies to align with the requirements of non-development projects.\n  //\n  // To control scope creep, teams should establish robust change management\n  // processes and ensure that any additions or modifications to the project\n  // scope are properly evaluated, prioritized, and communicated to all\n  // stakeholders. Regular communication and alignment between the product\n  // owner, development team, and stakeholders are crucial in managing scope\n  // effectively.\n  //\n  // To mitigate risks associated with team collaboration and communication,\n  // organizations can invest in training and fostering a culture of open\n  // communication, collaboration, and conflict resolution. Regular\n  // team-building activities and clear communication channels can help foster\n  // effective collaboration and address any conflicts that may arise.\n  //\n  // To address potential team dynamics and conflicts, it is important to\n  // foster a culture of trust, respect, and shared accountability within the\n  // team. Clear roles and responsibilities, effective communication, and a\n  // supportive team environment are key to minimizing conflicts and ensuring\n  // productive collaboration.\n  //\n  // To overcome resistance to change and address the learning curve,\n  // organizations should invest in comprehensive training and provide ongoing\n  // support and guidance to team members and stakeholders. By educating and\n  // involving everyone in the Scrum framework, organizations can facilitate a\n  // smooth transition and foster a positive mindset towards change.\n  //\n  // In conclusion, while Scrum has its challenges, addressing these potential\n  // cons through proactive measures can help maximize the benefits and\n  // effectiveness of the framework. By continuously improving and adapting\n  // Scrum practices, teams can overcome these challenges and achieve\n  // successful project outcomes.\n}\n"
  },
  {
    "path": "examples/docs_examples/bin/expression_language/cookbook/prompt_llm_parser.dart",
    "content": "// ignore_for_file: avoid_print\nimport 'dart:io';\n\nimport 'package:langchain/langchain.dart';\nimport 'package:langchain_openai/langchain_openai.dart';\n\nvoid main(final List<String> arguments) async {\n  await _promptTemplateLLM();\n  await _attachingStopSequences();\n  await _attachingToolCallInformation();\n  await _promptTemplateLLMStringOutputParser();\n  await _promptTemplateLLMToolsOutputParser();\n  await _simplifyingInput();\n}\n\nFuture<void> _promptTemplateLLM() async {\n  final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n  final model = ChatOpenAI(apiKey: openaiApiKey);\n\n  final promptTemplate = ChatPromptTemplate.fromTemplate(\n    'Tell me a joke about {foo}',\n  );\n\n  final chain = promptTemplate | model;\n\n  final res = await chain.invoke({'foo': 'bears'});\n  print(res);\n  // ChatResult{\n  //   id: chatcmpl-9LBNiPXHzWIwc02rR6sS1HTcL9pOk,\n  //   output: AIChatMessage{\n  //     content: Why don't bears wear shoes?\\nBecause they have bear feet!,\n  //   },\n  //   finishReason: FinishReason.stop,\n  //   metadata: {\n  //     model: gpt-4o-mini,\n  //     created: 1714835666,\n  //     system_fingerprint: fp_3b956da36b\n  //   },\n  //   usage: LanguageModelUsage{\n  //     promptTokens: 13,\n  //     responseTokens: 13,\n  //     totalTokens: 26,\n  //   },\n  //   streaming: false\n  // }\n}\n\nFuture<void> _attachingStopSequences() async {\n  final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n  final model = ChatOpenAI(apiKey: openaiApiKey);\n\n  final promptTemplate = ChatPromptTemplate.fromTemplate(\n    'Tell me a joke about {foo}',\n  );\n\n  final chain =\n      promptTemplate | model.bind(const ChatOpenAIOptions(stop: ['\\n']));\n\n  final res = await chain.invoke({'foo': 'bears'});\n  print(res);\n  // ChatResult{\n  //   id: chatcmpl-9LBOohTtdg12zD8zzz2GX1ib24UXO,\n  //   output: AIChatMessage{\n  //     content: Why don't bears wear shoes? ,\n  //   },\n  //   finishReason: FinishReason.stop,\n  //   metadata: {\n  //     model: gpt-4o-mini,\n  //     created: 1714835734,\n  //     system_fingerprint: fp_a450710239\n  //   },\n  //   usage: LanguageModelUsage{\n  //     promptTokens: 13,\n  //     responseTokens: 8,\n  //     totalTokens: 21\n  //   },\n  //   streaming: false\n  // }\n}\n\nFuture<void> _attachingToolCallInformation() async {\n  final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n  final model = ChatOpenAI(apiKey: openaiApiKey);\n\n  final promptTemplate = ChatPromptTemplate.fromTemplate(\n    'Tell me a joke about {foo}',\n  );\n\n  const tool = ToolSpec(\n    name: 'joke',\n    description: 'A joke',\n    inputJsonSchema: {\n      'type': 'object',\n      'properties': {\n        'setup': {'type': 'string', 'description': 'The setup for the joke'},\n        'punchline': {\n          'type': 'string',\n          'description': 'The punchline for the joke',\n        },\n      },\n      'required': ['setup', 'punchline'],\n    },\n  );\n\n  final chain =\n      promptTemplate |\n      model.bind(\n        ChatOpenAIOptions(\n          tools: const [tool],\n          toolChoice: ChatToolChoice.forced(name: tool.name),\n        ),\n      );\n\n  final res = await chain.invoke({'foo': 'bears'});\n  print(res);\n  // ChatResult{\n  //   id: chatcmpl-9LBPyaZcFMgjmOvkD0JJKAyA4Cihb,\n  //   output: AIChatMessage{\n  //     content: ,\n  //     toolCalls: [\n  //       AIChatMessageToolCall{\n  //         id: call_JIhyfu6jdIXaDHfYzbBwCKdb,\n  //         name: joke,\n  //         argumentsRaw: {\"setup\":\"Why don't bears like fast food?\",\"punchline\":\"Because they can't catch it!\"},\n  //         arguments: {\n  //           setup: Why don't bears like fast food?,\n  //           punchline: Because they can't catch it!\n  //         },\n  //       }\n  //     ],\n  //   },\n  //   finishReason: FinishReason.stop,\n  //   metadata: {\n  //     model: gpt-4o-mini,\n  //     created: 1714835806,\n  //     system_fingerprint: fp_3b956da36b\n  //   },\n  //   usage: LanguageModelUsage{\n  //     promptTokens: 77,\n  //     responseTokens: 24,\n  //     totalTokens: 101\n  //   },\n  //   streaming: false\n  // }\n}\n\nFuture<void> _promptTemplateLLMStringOutputParser() async {\n  final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n  final model = ChatOpenAI(apiKey: openaiApiKey);\n\n  final promptTemplate = ChatPromptTemplate.fromTemplate(\n    'Tell me a joke about {foo}',\n  );\n\n  final chain = promptTemplate | model | const StringOutputParser();\n\n  final res = await chain.invoke({'foo': 'bears'});\n  print(res);\n  // Why don't bears wear shoes? Because they have bear feet!\n}\n\nFuture<void> _promptTemplateLLMToolsOutputParser() async {\n  final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n  final model = ChatOpenAI(apiKey: openaiApiKey);\n\n  final promptTemplate = ChatPromptTemplate.fromTemplate(\n    'Tell me a joke about {foo}',\n  );\n\n  const tool = ToolSpec(\n    name: 'joke',\n    description: 'A joke',\n    inputJsonSchema: {\n      'type': 'object',\n      'properties': {\n        'setup': {'type': 'string', 'description': 'The setup for the joke'},\n        'punchline': {\n          'type': 'string',\n          'description': 'The punchline for the joke',\n        },\n      },\n      'required': ['setup', 'punchline'],\n    },\n  );\n\n  final chain =\n      promptTemplate |\n      model.bind(\n        ChatOpenAIOptions(\n          tools: const [tool],\n          toolChoice: ChatToolChoice.forced(name: tool.name),\n        ),\n      ) |\n      ToolsOutputParser();\n\n  final res = await chain.invoke({'foo': 'bears'});\n  print(res);\n  // [ParsedToolCall{\n  //   id: call_tDYrlcVwk7bCi9oh5IuknwHu,\n  //   name: joke,\n  //   arguments: {\n  //     setup: What do you call a bear with no teeth?,\n  //     punchline: A gummy bear!\n  //   },\n  // }]\n}\n\nFuture<void> _simplifyingInput() async {\n  final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n  final model = ChatOpenAI(apiKey: openaiApiKey);\n\n  final promptTemplate = ChatPromptTemplate.fromTemplate(\n    'Tell me a joke about {foo}',\n  );\n\n  final chain =\n      Runnable.getMapFromInput('foo') |\n      promptTemplate |\n      model |\n      const StringOutputParser();\n\n  final res = await chain.invoke('bears');\n  print(res);\n  // Why don't bears wear shoes? Because they have bear feet!\n}\n"
  },
  {
    "path": "examples/docs_examples/bin/expression_language/cookbook/retrieval.dart",
    "content": "// ignore_for_file: avoid_print, avoid_redundant_argument_values\nimport 'dart:io';\n\nimport 'package:langchain/langchain.dart';\nimport 'package:langchain_chroma/langchain_chroma.dart';\nimport 'package:langchain_openai/langchain_openai.dart';\n\nvoid main(final List<String> arguments) async {\n  await _addDocumentsToVectorStore();\n  await _rag();\n  await _ragMultipleInputs();\n  await _conversationalRetrievalChain();\n  await _conversationalRetrievalChainMemoryAndDocs();\n}\n\nFuture<void> _addDocumentsToVectorStore() async {\n  final openaiApiKey = Platform.environment['OPENAI_API_KEY']!;\n  final embeddings = OpenAIEmbeddings(apiKey: openaiApiKey);\n\n  final vectorStore = Chroma(embeddings: embeddings);\n  await vectorStore.addDocuments(\n    documents: const [\n      Document(pageContent: 'Payment methods: iDEAL, PayPal and credit card'),\n      Document(pageContent: 'Free shipping: on orders over 30€'),\n    ],\n  );\n}\n\nFuture<void> _rag() async {\n  final openaiApiKey = Platform.environment['OPENAI_API_KEY']!;\n  final embeddings = OpenAIEmbeddings(apiKey: openaiApiKey);\n  final vectorStore = Chroma(embeddings: embeddings);\n\n  final retriever = vectorStore.asRetriever();\n  final model = ChatOpenAI(apiKey: openaiApiKey);\n\n  final promptTemplate = ChatPromptTemplate.fromTemplate('''\nAnswer the question based only on the following context:\n{context}\n\nQuestion: {question}''');\n\n  final chain =\n      Runnable.fromMap<String>({\n        'context': retriever | Runnable.mapInput((docs) => docs.join('\\n')),\n        'question': Runnable.passthrough(),\n      }) |\n      promptTemplate |\n      model |\n      const StringOutputParser();\n\n  final res1 = await chain.invoke('What payment methods do you accept?');\n  print(res1);\n  // The payment methods accepted are iDEAL, PayPal, and credit card.\n\n  await chain.stream('How can I get free shipping?').forEach(stdout.write);\n  // To get free shipping, you need to place an order over 30€.\n}\n\nFuture<void> _ragMultipleInputs() async {\n  final openaiApiKey = Platform.environment['OPENAI_API_KEY']!;\n  final embeddings = OpenAIEmbeddings(apiKey: openaiApiKey);\n  final vectorStore = Chroma(embeddings: embeddings);\n\n  final retriever = vectorStore.asRetriever();\n  final model = ChatOpenAI(apiKey: openaiApiKey);\n\n  final promptTemplate = ChatPromptTemplate.fromTemplate('''\nAnswer the question based only on the following context:\n{context}\n\nQuestion: {question}\n\nAnswer in the following language: {language}''');\n\n  final chain =\n      Runnable.fromMap({\n        'context':\n            Runnable.getItemFromMap<String>('question') |\n            (retriever | Runnable.mapInput((docs) => docs.join('\\n'))),\n        'question': Runnable.getItemFromMap('question'),\n        'language': Runnable.getItemFromMap('language'),\n      }) |\n      promptTemplate |\n      model |\n      const StringOutputParser();\n\n  final res1 = await chain.invoke({\n    'question': 'What payment methods do you accept?',\n    'language': 'es_ES',\n  });\n  print(res1);\n  // Aceptamos los siguientes métodos de pago: iDEAL, PayPal y tarjeta de\n  // crédito.\n\n  await chain\n      .stream({'question': 'How can I get free shipping?', 'language': 'nl_NL'})\n      .forEach(stdout.write);\n  // Om gratis verzending te krijgen, moet je bestellingen plaatsen van meer\n  // dan 30€.\n}\n\nFuture<void> _conversationalRetrievalChain() async {\n  final openaiApiKey = Platform.environment['OPENAI_API_KEY']!;\n  final embeddings = OpenAIEmbeddings(apiKey: openaiApiKey);\n  final vectorStore = Chroma(embeddings: embeddings);\n\n  final retriever = vectorStore.asRetriever();\n  final model = ChatOpenAI(apiKey: openaiApiKey);\n\n  final condenseQuestionPrompt = ChatPromptTemplate.fromTemplate('''\nGiven the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language.\n\nChat History:\n{chat_history}\nFollow Up Input: {question}\nStandalone question:''');\n\n  final answerPrompt = ChatPromptTemplate.fromTemplate('''\nAnswer the question based only on the following context:\n{context}\n\nQuestion: {question}''');\n\n  String combineDocuments(\n    final List<Document> documents, {\n    final String separator = '\\n\\n',\n  }) {\n    return documents.map((final d) => d.pageContent).join(separator);\n  }\n\n  String formatChatHistory(final List<(String, String)> chatHistory) {\n    final formattedDialogueTurns = chatHistory.map((final dialogueTurn) {\n      final (human, ai) = dialogueTurn;\n      return 'Human: $human\\nAssistant: $ai';\n    });\n    return formattedDialogueTurns.join('\\n');\n  }\n\n  final inputs = Runnable.fromMap({\n    'standalone_question':\n        Runnable.fromMap({\n          'question': Runnable.getItemFromMap('question'),\n          'chat_history':\n              Runnable.getItemFromMap<List<(String, String)>>('chat_history') |\n              Runnable.mapInput(formatChatHistory),\n        }) |\n        condenseQuestionPrompt |\n        model |\n        const StringOutputParser(reduceOutputStream: true),\n  });\n\n  final context = Runnable.fromMap({\n    'context':\n        Runnable.getItemFromMap<String>('standalone_question') |\n        retriever |\n        Runnable.mapInput<List<Document>, String>(combineDocuments),\n    'question': Runnable.getItemFromMap('standalone_question'),\n  });\n\n  final conversationalQaChain =\n      inputs | context | answerPrompt | model | const StringOutputParser();\n\n  final res1 = await conversationalQaChain.invoke({\n    'question': 'What payment methods do you accept?',\n    'chat_history': <(String, String)>[],\n  });\n  print(res1);\n  // The methods of payment that are currently accepted are iDEAL, PayPal, and\n  // credit card.\n\n  await conversationalQaChain\n      .stream({\n        'question': 'Do I get free shipping?',\n        'chat_history': [('How much did you spend?', 'I spent 100€')],\n      })\n      .forEach(stdout.write);\n  // Yes, shipping is free on orders over 30€.\n}\n\nFuture<void> _conversationalRetrievalChainMemoryAndDocs() async {\n  final openaiApiKey = Platform.environment['OPENAI_API_KEY']!;\n  final embeddings = OpenAIEmbeddings(apiKey: openaiApiKey);\n  final vectorStore = Chroma(embeddings: embeddings);\n\n  final retriever = vectorStore.asRetriever(\n    defaultOptions: const VectorStoreRetrieverOptions(\n      searchType: VectorStoreSimilaritySearch(k: 1),\n    ),\n  );\n  final model = ChatOpenAI(apiKey: openaiApiKey);\n  const stringOutputParser = StringOutputParser<ChatResult>();\n  final memory = ConversationBufferMemory(\n    inputKey: 'question',\n    outputKey: 'answer',\n    memoryKey: 'history',\n    returnMessages: true,\n  );\n\n  final condenseQuestionPrompt = ChatPromptTemplate.fromTemplate('''\nGiven the following conversation and a follow up question, rephrase the follow up question to be a standalone question that includes all the details from the conversation in its original language\n\nChat History:\n{chat_history}\nFollow Up Input: {question}\nStandalone question:''');\n\n  final answerPrompt = ChatPromptTemplate.fromTemplate('''\nAnswer the question based only on the following context:\n{context}\n\nQuestion: {question}''');\n\n  String combineDocuments(\n    final List<Document> documents, {\n    final String separator = '\\n\\n',\n  }) => documents.map((final d) => d.pageContent).join(separator);\n\n  String formatChatHistory(final List<ChatMessage> chatHistory) {\n    final formattedDialogueTurns = chatHistory\n        .map(\n          (final msg) => switch (msg) {\n            HumanChatMessage _ => 'Human: ${msg.content}',\n            AIChatMessage _ => 'AI: ${msg.content}',\n            _ => '',\n          },\n        )\n        .toList();\n    return formattedDialogueTurns.join('\\n');\n  }\n\n  // First, we load the memory\n  final loadedMemory = Runnable.fromMap({\n    'question': Runnable.getItemFromMap('question'),\n    'memory': Runnable.mapInput((_) => memory.loadMemoryVariables()),\n  });\n\n  // Next, we get the chat history from the memory\n  final expandedMemory = Runnable.fromMap({\n    'question': Runnable.getItemFromMap('question'),\n    'chat_history':\n        Runnable.getItemFromMap('memory') |\n        Runnable.mapInput<MemoryVariables, List<ChatMessage>>(\n          (final input) => input['history'],\n        ),\n  });\n\n  // Now, we generate a standalone question that includes the\n  // necessary details from the chat history\n  final standaloneQuestion = Runnable.fromMap({\n    'standalone_question':\n        Runnable.fromMap({\n          'question': Runnable.getItemFromMap('question'),\n          'chat_history':\n              Runnable.getItemFromMap<List<ChatMessage>>('chat_history') |\n              Runnable.mapInput(formatChatHistory),\n        }) |\n        condenseQuestionPrompt |\n        model |\n        stringOutputParser,\n  });\n\n  // Now we retrieve the documents\n  final retrievedDocs = Runnable.fromMap({\n    'docs': Runnable.getItemFromMap('standalone_question') | retriever,\n    'question': Runnable.getItemFromMap('standalone_question'),\n  });\n\n  // Construct the inputs for the answer prompt\n  final finalInputs = Runnable.fromMap({\n    'context':\n        Runnable.getItemFromMap('docs') |\n        Runnable.mapInput<List<Document>, String>(combineDocuments),\n    'question': Runnable.getItemFromMap('question'),\n  });\n\n  // We prompt the model for an answer\n  final answer = Runnable.fromMap({\n    'answer': finalInputs | answerPrompt | model | stringOutputParser,\n    'docs': Runnable.getItemFromMap('docs'),\n  });\n\n  // And finally, we put it all together\n  final conversationalQaChain =\n      loadedMemory |\n      expandedMemory |\n      standaloneQuestion |\n      retrievedDocs |\n      answer;\n\n  // If we add some messages to the memory,\n  // they will be used in the next invocation\n  await memory.saveContext(\n    inputValues: {\n      'question': ChatMessage.humanText('How much does my order cost?'),\n    },\n    outputValues: {'answer': ChatMessage.ai('You have to pay 100€')},\n  );\n\n  final res = await conversationalQaChain.invoke({\n    'question': 'Do I get free shipping on the value of my order?',\n  });\n  print(res);\n  // {\n  //   answer: Yes, based on the given context, you would receive free shipping\n  //     on your order of 100€ since it exceeds the minimum requirement of 30€\n  //     for free shipping.,\n  //   docs: [\n  //     Document{\n  //       id: 69974fe1-8436-40c7-87d1-c59c5ff1c6a6,\n  //       pageContent: Free shipping: on orders over 30€,\n  //       metadata: {},\n  //     }\n  //   ]\n  // }\n}\n"
  },
  {
    "path": "examples/docs_examples/bin/expression_language/cookbook/routing.dart",
    "content": "// ignore_for_file: avoid_print\nimport 'package:langchain/langchain.dart';\nimport 'package:langchain_ollama/langchain_ollama.dart';\n\nvoid main(final List<String> arguments) async {\n  await _runnableRouter();\n  await _routingBySemanticSimilarity();\n}\n\nFuture<void> _runnableRouter() async {\n  final chatModel = ChatOllama(\n    defaultOptions: const ChatOllamaOptions(model: 'llama3.2'),\n  );\n\n  final classificationChain =\n      PromptTemplate.fromTemplate('''\nGiven the user question below, classify it as either being about `LangChain`, `Anthropic`, or `Other`.\n\nDo not respond with more than one word.\n\n<question>\n{question}\n</question>\n\nClassification:\n  ''') |\n      chatModel |\n      const StringOutputParser();\n\n  final res1 = await classificationChain.invoke({\n    'question': 'how do I call Anthropic?',\n  });\n  print(res1);\n  // Anthropic\n\n  final langchainChain =\n      PromptTemplate.fromTemplate('''\nYou are an expert in langchain.\nAlways answer questions starting with \"As Harrison Chase told me\".\nRespond to the following question:\n\nQuestion: {question}\nAnswer:\n  ''') |\n      chatModel |\n      const StringOutputParser();\n\n  final anthropicChain =\n      PromptTemplate.fromTemplate('''\nYou are an expert in anthropic.\nAlways answer questions starting with \"As Dario Amodei told me\".\nRespond to the following question:\n\nQuestion: {question}\nAnswer:\n  ''') |\n      chatModel |\n      const StringOutputParser();\n\n  final generalChain =\n      PromptTemplate.fromTemplate('''\nRespond to the following question:\n\nQuestion: {question}\nAnswer:\n  ''') |\n      chatModel |\n      const StringOutputParser();\n\n  final router = Runnable.fromRouter((Map<String, dynamic> input, _) {\n    final topic = (input['topic'] as String).toLowerCase();\n    if (topic.contains('langchain')) {\n      return langchainChain;\n    } else if (topic.contains('anthropic')) {\n      return anthropicChain;\n    } else {\n      return generalChain;\n    }\n  });\n\n  final fullChain =\n      Runnable.fromMap({\n        'topic': classificationChain,\n        'question': Runnable.getItemFromMap('question'),\n      }) |\n      router;\n\n  final res2 = await fullChain.invoke({'question': 'how do I use Anthropic?'});\n  print(res2);\n  // As Dario Amodei told me, using Anthropic is a straightforward process that...\n\n  final res3 = await fullChain.invoke({'question': 'how do I use LangChain?'});\n  print(res3);\n  // As Harrison Chase told me, using LangChain is a breeze!\n\n  final res4 = await fullChain.invoke({'question': 'whats 2 + 2'});\n  print(res4);\n  // The answer is... 4!\n}\n\nFuture<void> _routingBySemanticSimilarity() async {\n  const physicsTemplate = '''\nYou are a very smart physicist.\nYou are great at answering questions about physics (e.g. black holes, quantum mechanics, etc.)\nin a concise and easy to understand manner.\nWhen you don't know the answer to a question you admit that you don't know.\n\nHere is a question:\n{query}\n''';\n\n  const historyTemplate = '''\nYou are a very good historian.\nYou are great at answering history questions (e.g. about the Roman Empire, World War II, etc.) \nin a detailed and engaging manner. \nYou are able to provide a lot of context and background information.\n\n\nHere is a question:\n{query}\n''';\n\n  final embeddings = OllamaEmbeddings(model: 'llama3.2');\n  final promptTemplates = [physicsTemplate, historyTemplate];\n  final promptEmbeddings = await embeddings.embedDocuments(\n    promptTemplates.map((final pt) => Document(pageContent: pt)).toList(),\n  );\n\n  final chain =\n      Runnable.fromMap<String>({'query': Runnable.passthrough()}) |\n      Runnable.fromRouter((input, _) async {\n        final query = input['query'] as String;\n        final queryEmbedding = await embeddings.embedQuery(query);\n        final mostSimilarIndex = getIndexesMostSimilarEmbeddings(\n          queryEmbedding,\n          promptEmbeddings,\n        ).first;\n        print('Using ${mostSimilarIndex == 0 ? 'Physicist' : 'Historian'}');\n        return PromptTemplate.fromTemplate(promptTemplates[mostSimilarIndex]);\n      }) |\n      ChatOllama(defaultOptions: const ChatOllamaOptions(model: 'llama3.2')) |\n      const StringOutputParser();\n\n  final res1 = await chain.invoke(\"What's a black hole?\");\n  print(res1);\n  // Using Physicist\n  // Black holes! One of my favorite topics!\n  // A black hole is a region in space where the gravitational pull is so strong...\n\n  final res2 = await chain.invoke('When did World War II end?');\n  print(res2);\n  // Using Historian\n  // A great question to start with! World War II ended on September 2, 1945...\n}\n"
  },
  {
    "path": "examples/docs_examples/bin/expression_language/cookbook/streaming.dart",
    "content": "// ignore_for_file: avoid_print\nimport 'dart:io';\n\nimport 'package:langchain/langchain.dart';\nimport 'package:langchain_openai/langchain_openai.dart';\n\nvoid main(final List<String> arguments) async {\n  await _languageModels();\n  await _chains();\n  await _inputStreams();\n  await _inputStreamMapper();\n  await _nonStreamingComponents();\n}\n\nFuture<void> _languageModels() async {\n  final openAiApiKey = Platform.environment['OPENAI_API_KEY'];\n  final model = ChatOpenAI(apiKey: openAiApiKey);\n\n  final stream = model.stream(\n    PromptValue.string('Hello! Tell me about yourself.'),\n  );\n  final chunks = <ChatResult>[];\n  await for (final chunk in stream) {\n    chunks.add(chunk);\n    stdout.write('${chunk.output.content}|');\n  }\n  // Hello|!| I| am| a| language| model| AI| created| by| Open|AI|,|...\n\n  print(chunks.first);\n  // ChatResult{\n  //   id: chatcmpl-9IHQvyTl9fyVmF7P6zamGaX1XAN6d,\n  //   output: AIChatMessage{\n  //     content: Hello,\n  //   },\n  //   finishReason: FinishReason.unspecified,\n  //   metadata: {\n  //     model: gpt-4o-mini,\n  //     created: 1714143945,\n  //     system_fingerprint: fp_3b956da36b\n  //   },\n  //   streaming: true\n  // }\n\n  final result = chunks.sublist(0, 6).reduce((prev, next) => prev.concat(next));\n  print(result);\n  // ChatResult{\n  //   id: chatcmpl-9IHQvyTl9fyVmF7P6zamGaX1XAN6d,\n  //   output: AIChatMessage{\n  //     content: Hello! I am a language model\n  //   },\n  //   finishReason: FinishReason.unspecified,\n  //   metadata: {\n  //     model: gpt-4o-mini,\n  //     created: 1714143945,\n  //     system_fingerprint: fp_3b956da36b\n  //   },\n  //   streaming: true\n  // }\n}\n\nFuture<void> _chains() async {\n  final openAiApiKey = Platform.environment['OPENAI_API_KEY'];\n\n  final model = ChatOpenAI(apiKey: openAiApiKey);\n  final prompt = ChatPromptTemplate.fromTemplate(\n    'Tell me a joke about {topic}',\n  );\n  const parser = StringOutputParser<ChatResult>();\n\n  final chain = prompt.pipe(model).pipe(parser);\n\n  final stream = chain.stream({'topic': 'parrot'});\n  await stream.forEach((final chunk) => stdout.write('$chunk|'));\n  // |Why| don|'t| you| ever| play| hide| and| seek| with| a| par|rot|?|\n  // |Because| they| always| squ|awk| when| they| find| you|!||\n}\n\nFuture<void> _inputStreams() async {\n  final openAiApiKey = Platform.environment['OPENAI_API_KEY'];\n\n  final model = ChatOpenAI(\n    apiKey: openAiApiKey,\n    defaultOptions: const ChatOpenAIOptions(\n      responseFormat: ChatOpenAIResponseFormat.jsonObject,\n    ),\n  );\n  final parser = JsonOutputParser<ChatResult>();\n\n  final chain = model.pipe(parser);\n\n  final stream = chain.stream(\n    PromptValue.string(\n      'Output a list of the countries france, spain and japan and their '\n      'populations in JSON format. Use a dict with an outer key of '\n      '\"countries\" which contains a list of countries. '\n      'Each country should have the key \"name\" and \"population\"',\n    ),\n  );\n  await stream.forEach((final chunk) => print('$chunk|'));\n  // {}|\n  // {countries: []}|\n  // {countries: [{}]}|\n  // {countries: [{name: }]}|\n  // {countries: [{name: France}]}|\n  // {countries: [{name: France, population: 670}]}|\n  // {countries: [{name: France, population: 670760}]}|\n  // {countries: [{name: France, population: 67076000}]}|\n  // {countries: [{name: France, population: 67076000}, {}]}|\n  // {countries: [{name: France, population: 67076000}, {name: }]}|\n  // {countries: [{name: France, population: 67076000}, {name: Spain}]}|\n  // {countries: [{name: France, population: 67076000}, {name: Spain, population: 467}]}|\n  // {countries: [{name: France, population: 67076000}, {name: Spain, population: 467237}]}|\n  // {countries: [{name: France, population: 67076000}, {name: Spain, population: 46723749}]}|\n  // {countries: [{name: France, population: 67076000}, {name: Spain, population: 46723749}, {}]}|\n  // {countries: [{name: France, population: 67076000}, {name: Spain, population: 46723749}, {name: }]}|\n  // {countries: [{name: France, population: 67076000}, {name: Spain, population: 46723749}, {name: Japan}]}|\n  // {countries: [{name: France, population: 67076000}, {name: Spain, population: 46723749}, {name: Japan, population: 126}]}|\n  // {countries: [{name: France, population: 67076000}, {name: Spain, population: 46723749}, {name: Japan, population: 126476}]}|\n  // {countries: [{name: France, population: 67076000}, {name: Spain, population: 46723749}, {name: Japan, population: 126476461}]}|\n}\n\nFuture<void> _inputStreamMapper() async {\n  final openAiApiKey = Platform.environment['OPENAI_API_KEY'];\n\n  final model = ChatOpenAI(\n    apiKey: openAiApiKey,\n    defaultOptions: const ChatOpenAIOptions(\n      responseFormat: ChatOpenAIResponseFormat.jsonObject,\n    ),\n  );\n  final parser = JsonOutputParser<ChatResult>();\n  final mapper = Runnable.mapInputStream((\n    Stream<Map<String, dynamic>> inputStream,\n  ) {\n    return inputStream.map((input) {\n      final countries =\n          (input['countries'] as List?)?.cast<Map<String, dynamic>>() ?? [];\n      final countryNames = countries\n          .map((country) => country['name'] as String?)\n          .where((c) => c != null && c.isNotEmpty);\n      return countryNames.join(', ');\n    }).distinct();\n  });\n\n  final chain = model.pipe(parser).pipe(mapper);\n\n  final stream = chain.stream(\n    PromptValue.string(\n      'Output a list of the countries france, spain and japan and their '\n      'populations in JSON format. Use a dict with an outer key of '\n      '\"countries\" which contains a list of countries. '\n      'Each country should have the key \"name\" and \"population\"',\n    ),\n  );\n  await stream.forEach(print);\n  // France\n  // France, Spain\n  // France, Spain, Japan\n}\n\nFuture<void> _nonStreamingComponents() async {\n  final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n\n  final vectorStore = MemoryVectorStore(\n    embeddings: OpenAIEmbeddings(apiKey: openaiApiKey),\n  );\n  await vectorStore.addDocuments(\n    documents: const [\n      Document(pageContent: 'LangChain was created by Harrison'),\n      Document(pageContent: 'David ported LangChain to Dart in LangChain.dart'),\n    ],\n  );\n  final retriever = vectorStore.asRetriever();\n\n  await retriever.stream('Who created LangChain.dart?').forEach(print);\n  // [Document{pageContent: David ported LangChain to Dart in LangChain.dart},\n  // Document{pageContent: LangChain was created by Harrison}]\n\n  final promptTemplate = ChatPromptTemplate.fromTemplates(const [\n    (\n      ChatMessageType.system,\n      'Answer the question based on only the following context:\\n{context}',\n    ),\n    (ChatMessageType.human, '{question}'),\n  ]);\n  final model = ChatOpenAI(apiKey: openaiApiKey);\n  const outputParser = StringOutputParser<ChatResult>();\n\n  final retrievalChain = Runnable.fromMap<String>({\n    'context': retriever,\n    'question': Runnable.passthrough(),\n  }).pipe(promptTemplate).pipe(model).pipe(outputParser);\n\n  await retrievalChain\n      .stream('Who created LangChain.dart?')\n      .forEach((chunk) => stdout.write('$chunk|'));\n  // |David| created| Lang|Chain|.dart|.||\n}\n"
  },
  {
    "path": "examples/docs_examples/bin/expression_language/cookbook/tools.dart",
    "content": "// ignore_for_file: avoid_print\nimport 'dart:io';\n\nimport 'package:langchain/langchain.dart';\nimport 'package:langchain_community/langchain_community.dart';\nimport 'package:langchain_openai/langchain_openai.dart';\n\nvoid main(final List<String> arguments) async {\n  await _calculator();\n}\n\nFuture<void> _calculator() async {\n  final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n  final model = ChatOpenAI(apiKey: openaiApiKey);\n  const stringOutputParser = StringOutputParser<ChatResult>();\n\n  final promptTemplate = ChatPromptTemplate.fromTemplate('''\nTurn the following user input into a math expression for a calculator. \nOutput only the math expression. Let's think step by step.\n\nINPUT:\n{input}\n\nMATH EXPRESSION:''');\n\n  final chain =\n      Runnable.getMapFromInput() |\n      promptTemplate |\n      model |\n      stringOutputParser |\n      Runnable.getMapFromInput() |\n      CalculatorTool();\n\n  final res = await chain.invoke(\n    'If I had 3 apples and you had 5 apples but we ate 3. '\n    'If we cut the remaining apples in half, how many pieces would we have?',\n    options: const ChatOpenAIOptions(temperature: 0),\n  );\n  print(res);\n  // 10.0\n}\n"
  },
  {
    "path": "examples/docs_examples/bin/expression_language/fallbacks.dart",
    "content": "// ignore_for_file: avoid_print\nimport 'dart:io';\n\nimport 'package:langchain/langchain.dart';\nimport 'package:langchain_openai/langchain_openai.dart';\n\nvoid main() async {\n  await _modelWithFallbacks();\n  await _modelWithMultipleFallbacks();\n  await _chainWithFallbacks();\n}\n\nFuture<void> _modelWithFallbacks() async {\n  final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n\n  final fakeOpenAIModel = ChatOpenAI(\n    defaultOptions: const ChatOpenAIOptions(model: 'tomato'),\n  );\n\n  final latestModel = ChatOpenAI(\n    apiKey: openaiApiKey,\n    defaultOptions: const ChatOpenAIOptions(model: 'gpt-4o'),\n  );\n\n  final modelWithFallbacks = fakeOpenAIModel.withFallbacks([latestModel]);\n\n  final prompt = PromptValue.string('Explain why sky is blue in 2 lines');\n\n  final res = await modelWithFallbacks.invoke(prompt);\n  print(res);\n  /*\n{\n  \"ChatResult\": {\n    \"id\": \"chatcmpl-9nKBcFNkzo5qUrdNB92b36J0d1meA\",\n    \"output\": {\n      \"AIChatMessage\": {\n        \"content\": \"The sky appears blue because molecules in the Earth's atmosphere scatter shorter wavelength blue light from the sun more effectively than longer wavelengths like red. This scattering process is known as Rayleigh scattering.\",\n        \"toolCalls\": []\n      }\n    },\n    \"finishReason\": \"FinishReason.stop\",\n    \"metadata\": {\n      \"model\": \"gpt-4o-2024-05-13\",\n      \"created\": 1721542696,\n      \"system_fingerprint\": \"fp_400f27fa1f\"\n    },\n    \"usage\": {\n      \"LanguageModelUsage\": {\n        \"promptTokens\": 16,\n        \"promptBillableCharacters\": null,\n        \"responseTokens\": 36,\n        \"responseBillableCharacters\": null,\n        \"totalTokens\": 52\n      }\n    },\n    \"streaming\": false\n  }\n}\n*/\n}\n\nFuture<void> _modelWithMultipleFallbacks() async {\n  final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n\n  final fakeOpenAIModel1 = ChatOpenAI(\n    defaultOptions: const ChatOpenAIOptions(model: 'tomato'),\n  );\n\n  final fakeOpenAIModel2 = ChatOpenAI(\n    defaultOptions: const ChatOpenAIOptions(model: 'potato'),\n  );\n\n  final latestModel = ChatOpenAI(\n    apiKey: openaiApiKey,\n    defaultOptions: const ChatOpenAIOptions(model: 'gpt-4o'),\n  );\n\n  final modelWithFallbacks = fakeOpenAIModel1.withFallbacks([\n    fakeOpenAIModel2,\n    latestModel,\n  ]);\n\n  final prompt = PromptValue.string('Explain why sky is blue in 2 lines');\n\n  final res = await modelWithFallbacks.invoke(prompt);\n  print(res);\n  /*\n  {\n  \"id\": \"chatcmpl-9nLKW345nrh0nzmw18iO35XnoQ2jo\",\n  \"output\": {\n    \"content\": \"The sky appears blue due to Rayleigh scattering, where shorter blue wavelengths of sunlight are scattered more than other colors by the molecules in Earth's atmosphere. This scattering disperses blue light in all directions, making the sky look blue.\",\n    \"toolCalls\": []\n  },\n  \"finishReason\": \"FinishReason.stop\",\n  \"metadata\": {\n    \"model\": \"gpt-4o-2024-05-13\",\n    \"created\": 1721547092,\n    \"system_fingerprint\": \"fp_c4e5b6fa31\"\n  },\n  \"usage\": {\n    \"promptTokens\": 16,\n    \"promptBillableCharacters\": null,\n    \"responseTokens\": 45,\n    \"responseBillableCharacters\": null,\n    \"totalTokens\": 61\n  },\n  \"streaming\": false\n}\n*/\n}\n\nFuture<void> _chainWithFallbacks() async {\n  final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n\n  final fakeOpenAIModel = ChatOpenAI(\n    defaultOptions: const ChatOpenAIOptions(model: 'tomato'),\n  );\n\n  final latestModel = ChatOpenAI(\n    apiKey: openaiApiKey,\n    defaultOptions: const ChatOpenAIOptions(model: 'gpt-4o'),\n  );\n\n  final promptTemplate = ChatPromptTemplate.fromTemplate(\n    'tell me a joke about {topic}',\n  );\n\n  final badChain = promptTemplate.pipe(fakeOpenAIModel);\n  final goodChain = promptTemplate.pipe(latestModel);\n\n  final chainWithFallbacks = badChain.withFallbacks([goodChain]);\n\n  final res = await chainWithFallbacks.batch([\n    {'topic': 'bears'},\n    {'topic': 'cats'},\n  ]);\n  print(res);\n  /*\n[\n  {\n    \"id\": \"chatcmpl-9nKncT4IpAxbUxrEqEKGB0XUeyGRI\",\n    \"output\": {\n      \"content\": \"Sure! How about this one?\\n\\nWhy did the bear bring a suitcase to the forest?\\n\\nBecause it wanted to pack a lunch! 🐻🌲\",\n      \"toolCalls\": []\n    },\n    \"finishReason\": \"FinishReason.stop\",\n    \"metadata\": {\n      \"model\": \"gpt-4o-2024-05-13\",\n      \"created\": 1721545052,\n      \"system_fingerprint\": \"fp_400f27fa1f\"\n    },\n    \"usage\": {\n      \"promptTokens\": 13,\n      \"promptBillableCharacters\": null,\n      \"responseTokens\": 31,\n      \"responseBillableCharacters\": null,\n      \"totalTokens\": 44\n    },\n    \"streaming\": false\n  },\n  {\n    \"id\": \"chatcmpl-9nKnc58FpXFTPkzZfm2hHxJ5VSQQh\",\n    \"output\": {\n      \"content\": \"Sure, here's a cat joke for you:\\n\\nWhy was the cat sitting on the computer?\\n\\nBecause it wanted to keep an eye on the mouse!\",\n      \"toolCalls\": []\n    },\n    \"finishReason\": \"FinishReason.stop\",\n    \"metadata\": {\n      \"model\": \"gpt-4o-2024-05-13\",\n      \"created\": 1721545052,\n      \"system_fingerprint\": \"fp_c4e5b6fa31\"\n    },\n    \"usage\": {\n      \"promptTokens\": 13,\n      \"promptBillableCharacters\": null,\n      \"responseTokens\": 29,\n      \"responseBillableCharacters\": null,\n      \"totalTokens\": 42\n    },\n    \"streaming\": false\n  }\n]\n*/\n}\n"
  },
  {
    "path": "examples/docs_examples/bin/expression_language/get_started.dart",
    "content": "// ignore_for_file: avoid_print\nimport 'dart:io';\n\nimport 'package:langchain/langchain.dart';\nimport 'package:langchain_openai/langchain_openai.dart';\n\nvoid main(final List<String> arguments) async {\n  await _promptModelOutputParser();\n  await _ragSearch();\n}\n\nFuture<void> _promptModelOutputParser() async {\n  final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n\n  final promptTemplate = ChatPromptTemplate.fromTemplate(\n    'Tell me a joke about {topic}',\n  );\n  final model = ChatOpenAI(apiKey: openaiApiKey);\n  const outputParser = StringOutputParser<ChatResult>();\n\n  final chain = promptTemplate.pipe(model).pipe(outputParser);\n\n  final res = await chain.invoke({'topic': 'ice cream'});\n  print(res);\n  // Why did the ice cream truck break down?\n  // Because it had too many \"scoops\"!\n\n  // 1. Prompt\n\n  final promptValue = await promptTemplate.invoke({'topic': 'ice cream'});\n\n  final messages = promptValue.toChatMessages();\n  print(messages);\n  // [HumanChatMessage{\n  //   content: ChatMessageContentText{\n  //     text: Tell me a joke about ice cream,\n  //   },\n  // }]\n\n  final string = promptValue.toString();\n  print(string);\n  // Human: Tell me a joke about ice cream\n\n  // 2. Model\n\n  final chatOutput = await model.invoke(promptValue);\n  print(chatOutput.output);\n  // AIChatMessage{\n  //   content: Why did the ice cream truck break down?\n  //   Because it couldn't make it over the rocky road!,\n  // }\n\n  final llm = OpenAI(apiKey: openaiApiKey);\n  final llmOutput = await llm.invoke(promptValue);\n  print(llmOutput.output);\n  // Why did the ice cream go to therapy?\n  // Because it had a rocky road!\n\n  // 3. Output parser\n\n  final parsed = await outputParser.invoke(chatOutput);\n  print(parsed);\n  // Why did the ice cream go to therapy?\n  // Because it had a rocky road!\n\n  final input = {'topic': 'ice cream'};\n\n  final res1 = await promptTemplate.invoke(input);\n  print(res1.toChatMessages());\n  // [HumanChatMessage{\n  //   content: ChatMessageContentText{\n  //     text: Tell me a joke about ice cream,\n  //   },\n  // }]\n\n  final res2 = await promptTemplate.pipe(model).invoke(input);\n  print(res2);\n  // ChatResult{\n  //   id: chatcmpl-9J37Tnjm1dGUXqXBF98k7jfexATZW,\n  //   output: AIChatMessage{\n  //     content: Why did the ice cream cone go to therapy? Because it had too many sprinkles of emotional issues!,\n  //   },\n  //   finishReason: FinishReason.stop,\n  //   metadata: {\n  //     model: gpt-4o-mini,\n  //     created: 1714327251,\n  //     system_fingerprint: fp_3b956da36b\n  //   },\n  //   usage: LanguageModelUsage{\n  //     promptTokens: 14,\n  //     promptBillableCharacters: null,\n  //     responseTokens: 21,\n  //     responseBillableCharacters: null,\n  //     totalTokens: 35\n  //     },\n  //   streaming: false\n  // }\n}\n\nFuture<void> _ragSearch() async {\n  final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n\n  // 1. Create a vector store and add documents to it\n  final vectorStore = MemoryVectorStore(\n    embeddings: OpenAIEmbeddings(apiKey: openaiApiKey),\n  );\n  await vectorStore.addDocuments(\n    documents: const [\n      Document(pageContent: 'LangChain was created by Harrison'),\n      Document(pageContent: 'David ported LangChain to Dart in LangChain.dart'),\n    ],\n  );\n\n  // 2. Define the retrieval chain\n  final retriever = vectorStore.asRetriever();\n  final setupAndRetrieval = Runnable.fromMap<String>({\n    'context': retriever.pipe(\n      Runnable.mapInput((docs) => docs.map((d) => d.pageContent).join('\\n')),\n    ),\n    'question': Runnable.passthrough(),\n  });\n\n  // 3. Construct a RAG prompt template\n  final promptTemplate = ChatPromptTemplate.fromTemplates(const [\n    (\n      ChatMessageType.system,\n      'Answer the question based on only the following context:\\n{context}',\n    ),\n    (ChatMessageType.human, '{question}'),\n  ]);\n\n  // 4. Define the final chain\n  final model = ChatOpenAI(apiKey: openaiApiKey);\n  const outputParser = StringOutputParser<ChatResult>();\n  final chain = setupAndRetrieval\n      .pipe(promptTemplate)\n      .pipe(model)\n      .pipe(outputParser);\n\n  // 5. Run the pipeline\n  final res = await chain.invoke('Who created LangChain.dart?');\n  print(res);\n  // David created LangChain.dart\n}\n"
  },
  {
    "path": "examples/docs_examples/bin/expression_language/interface.dart",
    "content": "// ignore_for_file: avoid_print\nimport 'dart:io';\n\nimport 'package:langchain/langchain.dart';\nimport 'package:langchain_openai/langchain_openai.dart';\n\nvoid main(final List<String> arguments) async {\n  await _runnableInterfaceInvoke();\n  await _runnableInterfaceStream();\n  await _runnableInterfaceBatch();\n  await _runnableInterfaceBatchOptions();\n}\n\nFuture<void> _runnableInterfaceInvoke() async {\n  final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n  final model = ChatOpenAI(apiKey: openaiApiKey);\n\n  final promptTemplate = ChatPromptTemplate.fromTemplate(\n    'Tell me a joke about {topic}',\n  );\n\n  final chain = promptTemplate.pipe(model).pipe(const StringOutputParser());\n\n  final res = await chain.invoke({'topic': 'bears'});\n  print(res);\n  // Why don't bears wear shoes? Because they have bear feet!\n}\n\nFuture<void> _runnableInterfaceStream() async {\n  final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n  final model = ChatOpenAI(apiKey: openaiApiKey);\n\n  final promptTemplate = ChatPromptTemplate.fromTemplate(\n    'Tell me a joke about {topic}',\n  );\n\n  final chain = promptTemplate.pipe(model).pipe(const StringOutputParser());\n\n  final stream = chain.stream({'topic': 'bears'});\n\n  var count = 0;\n  await for (final res in stream) {\n    print('$count: $res');\n    count++;\n  }\n  // 0:\n  // 1: Why\n  // 2:  don\n  // 3: 't\n  // 4:  bears\n  // 5:  like\n  // 6:  fast\n  // 7:  food\n  // 8: ?\n  // 9: Because\n  // 10:  they\n  // 11:  can\n  // 12: 't\n  // 13:  catch\n  // 14:  it\n  // 15: !\n}\n\nFuture<void> _runnableInterfaceBatch() async {\n  final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n  final model = ChatOpenAI(apiKey: openaiApiKey);\n\n  final promptTemplate = ChatPromptTemplate.fromTemplate(\n    'Tell me a joke about {topic}',\n  );\n\n  final chain = promptTemplate.pipe(model).pipe(const StringOutputParser());\n\n  final res = await chain.batch([\n    {'topic': 'bears'},\n    {'topic': 'cats'},\n  ]);\n  print(res);\n  //['Why did the bear break up with his girlfriend? Because she was too \"grizzly\" for him!',\n  // 'Why was the cat sitting on the computer? Because it wanted to keep an eye on the mouse!']\n}\n\nFuture<void> _runnableInterfaceBatchOptions() async {\n  final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n  final model = ChatOpenAI(apiKey: openaiApiKey);\n\n  final promptTemplate = ChatPromptTemplate.fromTemplate(\n    'Tell me a joke about {topic}',\n  );\n\n  final chain = promptTemplate.pipe(model).pipe(const StringOutputParser());\n\n  final res = await chain.batch(\n    [\n      {'topic': 'bears'},\n      {'topic': 'cats'},\n    ],\n    options: [\n      const ChatOpenAIOptions(model: 'gpt-4o-mini', temperature: 0.5),\n      const ChatOpenAIOptions(model: 'gpt-4', temperature: 0.7),\n    ],\n  );\n  print(res);\n  //['Why did the bear break up with his girlfriend? Because he couldn't bear the relationship anymore!,',\n  // 'Why don't cats play poker in the jungle? Because there's too many cheetahs!']\n}\n"
  },
  {
    "path": "examples/docs_examples/bin/expression_language/primitives/binding.dart",
    "content": "// ignore_for_file: avoid_print\nimport 'dart:io';\n\nimport 'package:langchain/langchain.dart';\nimport 'package:langchain_openai/langchain_openai.dart';\n\nvoid main(final List<String> arguments) async {\n  await _binding();\n  await _differentModels();\n  await _toolCalling();\n}\n\nFuture<void> _binding() async {\n  final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n  final model = ChatOpenAI(apiKey: openaiApiKey);\n  const outputParser = StringOutputParser<ChatResult>();\n\n  final promptTemplate = ChatPromptTemplate.fromTemplates(const [\n    (\n      ChatMessageType.system,\n      'Write out the following equation using algebraic symbols then solve it. '\n          'Use the format\\n\\nEQUATION:...\\nSOLUTION:...\\n\\n',\n    ),\n    (ChatMessageType.human, '{equation_statement}'),\n  ]);\n\n  final chain = Runnable.getMapFromInput<String>(\n    'equation_statement',\n  ).pipe(promptTemplate).pipe(model).pipe(outputParser);\n\n  final res = await chain.invoke('x raised to the third plus seven equals 12');\n  print(res);\n  // EQUATION: \\(x^3 + 7 = 12\\)\n  //\n  // SOLUTION:\n  // Subtract 7 from both sides:\n  // \\(x^3 = 5\\)\n  //\n  // Take the cube root of both sides:\n  // \\(x = \\sqrt[3]{5}\\)\n\n  final chain2 = Runnable.getMapFromInput<String>('equation_statement')\n      .pipe(promptTemplate)\n      .pipe(model.bind(const ChatOpenAIOptions(stop: ['SOLUTION'])))\n      .pipe(outputParser);\n  final res2 = await chain2.invoke(\n    'x raised to the third plus seven equals 12',\n  );\n  print(res2);\n  // EQUATION: \\( x^3 + 7 = 12 \\)\n}\n\nFuture<void> _differentModels() async {\n  final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n  final chatModel = ChatOpenAI(apiKey: openaiApiKey);\n  const outputParser = StringOutputParser<ChatResult>();\n  final prompt1 = PromptTemplate.fromTemplate('How are you {name}?');\n  final prompt2 = PromptTemplate.fromTemplate('How old are you {name}?');\n  final chain = Runnable.fromMap({\n    'q1':\n        prompt1 |\n        chatModel.bind(const ChatOpenAIOptions(model: 'gpt-4-turbo')) |\n        outputParser,\n    'q2':\n        prompt2 |\n        chatModel.bind(const ChatOpenAIOptions(model: 'gpt-4o-mini')) |\n        outputParser,\n  });\n  final res = await chain.invoke({'name': 'David'});\n  print(res);\n  // {q1: Hello! I'm just a computer program, so I don't have feelings,\n  // q2: I am an AI digital assistant, so I do not have an age like humans do.}\n}\n\nFuture<void> _toolCalling() async {\n  final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n  final model = ChatOpenAI(apiKey: openaiApiKey);\n  final outputParser = ToolsOutputParser();\n\n  final promptTemplate = ChatPromptTemplate.fromTemplates(const [\n    (\n      ChatMessageType.system,\n      'Write out the following equation using algebraic symbols then solve it.',\n    ),\n    (ChatMessageType.human, '{equation_statement}'),\n  ]);\n\n  const tool = ToolSpec(\n    name: 'solver',\n    description: 'Formulates and solves an equation',\n    inputJsonSchema: {\n      'type': 'object',\n      'properties': {\n        'equation': {\n          'type': 'string',\n          'description': 'The algebraic expression of the equation',\n        },\n        'solution': {\n          'type': 'string',\n          'description': 'The solution to the equation',\n        },\n      },\n      'required': ['equation', 'solution'],\n    },\n  );\n\n  final chain = Runnable.getMapFromInput<String>('equation_statement')\n      .pipe(promptTemplate)\n      .pipe(model.bind(const ChatOpenAIOptions(tools: [tool])))\n      .pipe(outputParser);\n\n  final res = await chain.invoke('x raised to the third plus seven equals 12');\n  print(res);\n  // [ParsedToolCall{\n  //   id: call_T2Y3g7rU5s0CzEG4nL35FJYK,\n  //   name: solver,\n  //   arguments: {\n  //     equation: x^3 + 7 = 12,\n  //     solution: x = 1\n  //   },\n  // }]\n}\n"
  },
  {
    "path": "examples/docs_examples/bin/expression_language/primitives/function.dart",
    "content": "// ignore_for_file: avoid_print\nimport 'dart:io';\n\nimport 'package:langchain/langchain.dart';\nimport 'package:langchain_openai/langchain_openai.dart';\n\nvoid main(final List<String> arguments) async {\n  await _function();\n}\n\nFuture<void> _function() async {\n  final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n\n  Runnable<T, RunnableOptions, T> logOutput<T extends Object>(String stepName) {\n    return Runnable.fromFunction<T, T>(\n      invoke: (input, options) {\n        print('Output from step \"$stepName\":\\n$input\\n---');\n        return Future.value(input);\n      },\n      stream: (inputStream, options) {\n        return inputStream.map((input) {\n          print('Chunk from step \"$stepName\":\\n$input\\n---');\n          return input;\n        });\n      },\n    );\n  }\n\n  final promptTemplate = ChatPromptTemplate.fromTemplates(const [\n    (\n      ChatMessageType.system,\n      'Write out the following equation using algebraic symbols then solve it. '\n          'Use the format:\\nEQUATION:...\\nSOLUTION:...\\n',\n    ),\n    (ChatMessageType.human, '{equation_statement}'),\n  ]);\n\n  final chain = Runnable.getMapFromInput<String>('equation_statement')\n      .pipe(logOutput('getMapFromInput'))\n      .pipe(promptTemplate)\n      .pipe(logOutput('promptTemplate'))\n      .pipe(ChatOpenAI(apiKey: openaiApiKey))\n      .pipe(logOutput('chatModel'))\n      .pipe(const StringOutputParser())\n      .pipe(logOutput('outputParser'));\n\n  // await chain.invoke('x raised to the third plus seven equals 12');\n  // Output from step \"getMapFromInput\":\n  // {equation_statement: x raised to the third plus seven equals 12}\n  // ---\n  // Output from step \"promptTemplate\":\n  // System: Write out the following equation using algebraic symbols then solve it. Use the format\n  //\n  // EQUATION:...\n  // SOLUTION:...\n  //\n  // Human: x raised to the third plus seven equals 12\n  // ---\n  // Output from step \"chatModel\":\n  // ChatResult{\n  //   id: chatcmpl-9JcVxKcryIhASLnpSRMXkOE1t1R9G,\n  //   output: AIChatMessage{\n  //     content:\n  //       EQUATION: \\( x^3 + 7 = 12 \\)\n  //       SOLUTION:\n  //       Subtract 7 from both sides of the equation:\n  //       \\( x^3 = 5 \\)\n  //\n  //       Take the cube root of both sides:\n  //       \\( x = \\sqrt[3]{5} \\)\n  //\n  //       Therefore, the solution is \\( x = \\sqrt[3]{5} \\),\n  //   },\n  //   finishReason: FinishReason.stop,\n  //   metadata: {\n  //     model: gpt-4o-mini,\n  //     created: 1714463309,\n  //     system_fingerprint: fp_3b956da36b\n  //   },\n  //   usage: LanguageModelUsage{\n  //     promptTokens: 47,\n  //     responseTokens: 76,\n  //     totalTokens: 123\n  //   },\n  //   streaming: false\n  // }\n  // ---\n  // Output from step \"outputParser\":\n  // EQUATION: \\( x^3 + 7 = 12 \\)\n  //\n  // SOLUTION:\n  // Subtract 7 from both sides of the equation:\n  // \\( x^3 = 5 \\)\n  //\n  // Take the cube root of both sides:\n  // \\( x = \\sqrt[3]{5} \\)\n  //\n  // Therefore, the solution is \\( x = \\sqrt[3]{5} \\)\n\n  chain.stream('x raised to the third plus seven equals 12').listen((_) {});\n  // Chunk from step \"getMapFromInput\":\n  // {equation_statement: x raised to the third plus seven equals 12}\n  // ---\n  // Chunk from step \"promptTemplate\":\n  // System: Write out the following equation using algebraic symbols then solve it. Use the format:\n  // EQUATION:...\n  // SOLUTION:...\n  //\n  // Human: x raised to the third plus seven equals 12\n  // ---\n  // Chunk from step \"chatModel\":\n  // ChatResult{\n  //   id: chatcmpl-9JcdKMy2yBlJhW2fxVu43Qn0gqofK,\n  //   output: AIChatMessage{\n  //     content: E,\n  //   },\n  //   finishReason: FinishReason.unspecified,\n  //   metadata: {\n  //     model: gpt-4o-mini,\n  //     created: 1714463766,\n  //     system_fingerprint: fp_3b956da36b\n  //   },\n  //   usage: LanguageModelUsage{},\n  //   streaming: true\n  // }\n  // ---\n  // Chunk from step \"outputParser\":\n  // E\n  // ---\n  // Chunk from step \"chatModel\":\n  // ChatResult{\n  //   id: chatcmpl-9JcdKMy2yBlJhW2fxVu43Qn0gqofK,\n  //   output: AIChatMessage{\n  //     content: QU,\n  //   },\n  //   finishReason: FinishReason.unspecified,\n  //   metadata: {\n  //     model: gpt-4o-mini,\n  //     created: 1714463766,\n  //     system_fingerprint: fp_3b956da36b\n  //   },\n  //   usage: LanguageModelUsage{},\n  //   streaming: true\n  // }\n  // ---\n  // Chunk from step \"outputParser\":\n  // QU\n  // ---\n  // Chunk from step \"chatModel\":\n  // ChatResult{\n  //   id: chatcmpl-9JcdKMy2yBlJhW2fxVu43Qn0gqofK,\n  //   output: AIChatMessage{\n  //     content: ATION,\n  //   },\n  //   finishReason: FinishReason.unspecified,\n  //   metadata: {\n  //     model: gpt-4o-mini,\n  //     created: 1714463766,\n  //     system_fingerprint: fp_3b956da36b\n  //   },\n  //   usage: LanguageModelUsage{},\n  //   streaming: true\n  // }\n  // ---\n  // Chunk from step \"outputParser\":\n  // ATION\n  // ---\n  // ...\n}\n"
  },
  {
    "path": "examples/docs_examples/bin/expression_language/primitives/map.dart",
    "content": "// ignore_for_file: avoid_print\nimport 'dart:io';\n\nimport 'package:langchain/langchain.dart';\nimport 'package:langchain_openai/langchain_openai.dart';\n\nvoid main(final List<String> arguments) async {\n  await _map();\n  await _getItem();\n  await _concurrency();\n}\n\nFuture<void> _map() async {\n  final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n\n  final vectorStore = MemoryVectorStore(\n    embeddings: OpenAIEmbeddings(apiKey: openaiApiKey),\n  );\n  await vectorStore.addDocuments(\n    documents: [\n      const Document(pageContent: 'LangChain was created by Harrison'),\n      const Document(\n        pageContent: 'David ported LangChain to Dart in LangChain.dart',\n      ),\n    ],\n  );\n  final retriever = vectorStore.asRetriever();\n  final promptTemplate = ChatPromptTemplate.fromTemplates(const [\n    (\n      ChatMessageType.system,\n      'Answer the question based on only the following context:\\n{context}',\n    ),\n    (ChatMessageType.human, '{question}'),\n  ]);\n  final model = ChatOpenAI(apiKey: openaiApiKey);\n  const outputParser = StringOutputParser<ChatResult>();\n\n  final retrievalChain = Runnable.fromMap<String>({\n    'context': retriever,\n    'question': Runnable.passthrough(),\n  }).pipe(promptTemplate).pipe(model).pipe(outputParser);\n\n  final res = await retrievalChain.invoke('Who created LangChain.dart?');\n  print(res);\n  // David created LangChain.dart.\n}\n\nFuture<void> _getItem() async {\n  final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n\n  final vectorStore = MemoryVectorStore(\n    embeddings: OpenAIEmbeddings(apiKey: openaiApiKey),\n  );\n  await vectorStore.addDocuments(\n    documents: const [\n      Document(pageContent: 'LangChain was created by Harrison'),\n      Document(pageContent: 'David ported LangChain to Dart in LangChain.dart'),\n    ],\n  );\n  final retriever = vectorStore.asRetriever();\n  final promptTemplate = ChatPromptTemplate.fromTemplates(const [\n    (\n      ChatMessageType.system,\n      'Answer the question based on only the following context:\\n{context}\\n'\n          'Answer in the following language: {language}',\n    ),\n    (ChatMessageType.human, '{question}'),\n  ]);\n  final model = ChatOpenAI(apiKey: openaiApiKey);\n  const outputParser = StringOutputParser<ChatResult>();\n\n  final retrievalChain = Runnable.fromMap<Map<String, dynamic>>({\n    'context': Runnable.getItemFromMap('question').pipe(retriever),\n    'question': Runnable.getItemFromMap('question'),\n    'language': Runnable.getItemFromMap('language'),\n  }).pipe(promptTemplate).pipe(model).pipe(outputParser);\n\n  final res = await retrievalChain.invoke({\n    'question': 'Who created LangChain.dart?',\n    'language': 'Spanish',\n  });\n  print(res);\n  // David portó LangChain a Dart en LangChain.dart\n}\n\nFuture<void> _concurrency() async {\n  final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n  final model = ChatOpenAI(apiKey: openaiApiKey);\n  const outputParser = StringOutputParser<ChatResult>();\n\n  final jokeChain = PromptTemplate.fromTemplate(\n    'tell me a joke about {topic}',\n  ).pipe(model).pipe(outputParser);\n  final poemChain = PromptTemplate.fromTemplate(\n    'write a 2-line poem about {topic}',\n  ).pipe(model).pipe(outputParser);\n\n  final mapChain = Runnable.fromMap<Map<String, dynamic>>({\n    'joke': jokeChain,\n    'poem': poemChain,\n  });\n\n  final res = await mapChain.invoke({'topic': 'bear'});\n  print(res);\n  // {joke: Why did the bear bring a flashlight to the party? Because he wanted to be the \"light\" of the party!,\n  //  poem: In the forest's hush, the bear prowls wide, A silent guardian, a force of nature's pride.}\n}\n"
  },
  {
    "path": "examples/docs_examples/bin/expression_language/primitives/mapper.dart",
    "content": "// ignore_for_file: avoid_print\nimport 'dart:io';\n\nimport 'package:langchain/langchain.dart';\nimport 'package:langchain_openai/langchain_openai.dart';\n\nvoid main(final List<String> arguments) async {\n  await _mapInput();\n  await _mapInputStream();\n  await _getItemFromMap();\n  await _getMapFromInput();\n}\n\nFuture<void> _mapInput() async {\n  final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n\n  // 1. Create a vector store and add documents to it\n  final vectorStore = MemoryVectorStore(\n    embeddings: OpenAIEmbeddings(apiKey: openaiApiKey),\n  );\n  await vectorStore.addDocuments(\n    documents: const [\n      Document(pageContent: 'LangChain was created by Harrison'),\n      Document(pageContent: 'David ported LangChain to Dart in LangChain.dart'),\n    ],\n  );\n\n  // 2. Define the retrieval chain\n  final retriever = vectorStore.asRetriever();\n  final setupAndRetrieval = Runnable.fromMap<String>({\n    'context': retriever.pipe(\n      Runnable.mapInput((docs) => docs.map((d) => d.pageContent).join('\\n')),\n    ),\n    'question': Runnable.passthrough(),\n  });\n\n  // 3. Construct a RAG prompt template\n  final promptTemplate = ChatPromptTemplate.fromTemplates(const [\n    (\n      ChatMessageType.system,\n      'Answer the question based on only the following context:\\n{context}',\n    ),\n    (ChatMessageType.human, '{question}'),\n  ]);\n\n  // 4. Define the final chain\n  final model = ChatOpenAI(apiKey: openaiApiKey);\n  const outputParser = StringOutputParser<ChatResult>();\n  final chain = setupAndRetrieval\n      .pipe(promptTemplate)\n      .pipe(model)\n      .pipe(outputParser);\n\n  // 5. Run the pipeline\n  final res = await chain.invoke('Who created LangChain.dart?');\n  print(res);\n  // David created LangChain.dart\n}\n\nFuture<void> _mapInputStream() async {\n  final openAiApiKey = Platform.environment['OPENAI_API_KEY'];\n\n  final model = ChatOpenAI(\n    apiKey: openAiApiKey,\n    defaultOptions: const ChatOpenAIOptions(\n      responseFormat: ChatOpenAIResponseFormat.jsonObject,\n    ),\n  );\n  final parser = JsonOutputParser<ChatResult>();\n  final mapper = Runnable.mapInputStream((\n    Stream<Map<String, dynamic>> inputStream,\n  ) async* {\n    yield await inputStream.last;\n  });\n\n  final chain = model.pipe(parser).pipe(mapper);\n\n  final stream = chain.stream(\n    PromptValue.string(\n      'Output a list of the countries france, spain and japan and their '\n      'populations in JSON format. Use a dict with an outer key of '\n      '\"countries\" which contains a list of countries. '\n      'Each country should have the key \"name\" and \"population\"',\n    ),\n  );\n  await stream.forEach((final chunk) => print('$chunk|'));\n  // {countries: [{name: France, population: 65273511}, {name: Spain, population: 46754778}, {name: Japan, population: 126476461}]}|\n}\n\nFuture<void> _getItemFromMap() async {\n  final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n\n  final vectorStore = MemoryVectorStore(\n    embeddings: OpenAIEmbeddings(apiKey: openaiApiKey),\n  );\n  await vectorStore.addDocuments(\n    documents: const [\n      Document(pageContent: 'LangChain was created by Harrison'),\n      Document(pageContent: 'David ported LangChain to Dart in LangChain.dart'),\n    ],\n  );\n  final retriever = vectorStore.asRetriever();\n  final promptTemplate = ChatPromptTemplate.fromTemplates(const [\n    (\n      ChatMessageType.system,\n      'Answer the question based on only the following context:\\n{context}\\n'\n          'Answer in the following language: {language}',\n    ),\n    (ChatMessageType.human, '{question}'),\n  ]);\n  final model = ChatOpenAI(apiKey: openaiApiKey);\n  const outputParser = StringOutputParser<ChatResult>();\n\n  final retrievalChain = Runnable.fromMap<Map<String, dynamic>>({\n    'context': Runnable.getItemFromMap('question').pipe(retriever),\n    'question': Runnable.getItemFromMap('question'),\n    'language': Runnable.getItemFromMap('language'),\n  }).pipe(promptTemplate).pipe(model).pipe(outputParser);\n\n  final res = await retrievalChain.invoke({\n    'question': 'Who created LangChain.dart?',\n    'language': 'Spanish',\n  });\n  print(res);\n  // David portó LangChain a Dart en LangChain.dart\n}\n\nFuture<void> _getMapFromInput() async {\n  final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n  final model = ChatOpenAI(apiKey: openaiApiKey);\n  const outputParser = StringOutputParser<ChatResult>();\n\n  final promptTemplate = ChatPromptTemplate.fromTemplates(const [\n    (\n      ChatMessageType.system,\n      'Write out the following equation using algebraic symbols then solve it. '\n          'Use the format\\n\\nEQUATION:...\\nSOLUTION:...\\n\\n',\n    ),\n    (ChatMessageType.human, '{equation_statement}'),\n  ]);\n\n  final chain = Runnable.getMapFromInput<String>(\n    'equation_statement',\n  ).pipe(promptTemplate).pipe(model).pipe(outputParser);\n\n  final res = await chain.invoke('x raised to the third plus seven equals 12');\n  print(res);\n  // EQUATION: \\(x^3 + 7 = 12\\)\n  //\n  // SOLUTION:\n  // Subtract 7 from both sides:\n  // \\(x^3 = 5\\)\n  //\n  // Take the cube root of both sides:\n  // \\(x = \\sqrt[3]{5}\\)\n}\n"
  },
  {
    "path": "examples/docs_examples/bin/expression_language/primitives/passthrough.dart",
    "content": "// ignore_for_file: avoid_print\nimport 'dart:io';\n\nimport 'package:langchain/langchain.dart';\nimport 'package:langchain_openai/langchain_openai.dart';\n\nvoid main(final List<String> arguments) async {\n  await _passthrough();\n  await _retrieval();\n}\n\nFuture<void> _passthrough() async {\n  final runnable = Runnable.fromMap<Map<String, dynamic>>({\n    'passed': Runnable.passthrough(),\n    'modified': Runnable.mapInput((input) => (input['num'] as int) + 1),\n  });\n\n  final res = await runnable.invoke({'num': 1});\n  print(res);\n  // {passed: {num: 1}, modified: 2}\n}\n\nFuture<void> _retrieval() async {\n  final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n\n  final vectorStore = MemoryVectorStore(\n    embeddings: OpenAIEmbeddings(apiKey: openaiApiKey),\n  );\n  await vectorStore.addDocuments(\n    documents: const [\n      Document(pageContent: 'LangChain was created by Harrison'),\n      Document(pageContent: 'David ported LangChain to Dart in LangChain.dart'),\n    ],\n  );\n  final retriever = vectorStore.asRetriever();\n  final promptTemplate = ChatPromptTemplate.fromTemplates(const [\n    (\n      ChatMessageType.system,\n      'Answer the question based on only the following context:\\n{context}',\n    ),\n    (ChatMessageType.human, '{question}'),\n  ]);\n  final model = ChatOpenAI(apiKey: openaiApiKey);\n  const outputParser = StringOutputParser<ChatResult>();\n\n  final retrievalChain = Runnable.fromMap<String>({\n    'context': retriever,\n    'question': Runnable.passthrough(),\n  }).pipe(promptTemplate).pipe(model).pipe(outputParser);\n\n  final res = await retrievalChain.invoke('Who created LangChain.dart?');\n  print(res);\n  // David created LangChain.dart.\n}\n"
  },
  {
    "path": "examples/docs_examples/bin/expression_language/primitives/retry.dart",
    "content": "// ignore_for_file: avoid_print\nimport 'dart:io';\nimport 'package:langchain/langchain.dart';\nimport 'package:langchain_openai/langchain_openai.dart';\n\nvoid main() async {\n  await _modelWithRetry();\n  await _chainWithRetry();\n  await _withRetryOptions();\n  await _withDelayDurations();\n}\n\nFuture<void> _modelWithRetry() async {\n  final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n  final model = ChatOpenAI(apiKey: openaiApiKey);\n  final input = PromptValue.string('Explain why sky is blue in 2 lines');\n\n  final modelWithRetry = model.withRetry();\n  final res = await modelWithRetry.invoke(input);\n  print(res);\n  /*\n  ChatResult{\n  id: chatcmpl-9zmFYnu19Pd6ss3zVFHlKN71DILtx,\n  output: AIChatMessage{\n  content: The sky appears blue due to Rayleigh scattering, where shorter wavelengths of sunlight (blue light) are scattered more than longer wavelengths (red light) by the molecules in the Earth's atmosphere. This scattering effect is most prominent when the sun is high in the sky.,\n  toolCalls: [],\n},\n  finishReason: FinishReason.stop,\n  metadata: {model: gpt-4o-mini-2024-07-18, created: 1724510508, system_fingerprint: fp_48196bc67a},\n  usage: LanguageModelUsage{\n  promptTokens: 16,\n  promptBillableCharacters: null,\n  responseTokens: 52,\n  responseBillableCharacters: null,\n  totalTokens: 68}\n,\n  streaming: false\n}\n*/\n}\n\nFuture<void> _chainWithRetry() async {\n  final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n  final promptTemplate = ChatPromptTemplate.fromTemplate(\n    'tell me a joke about {topic}',\n  );\n  final model = ChatOpenAI(\n    apiKey: openaiApiKey,\n    defaultOptions: const ChatOpenAIOptions(model: 'gpt-4o'),\n  );\n  final chain = promptTemplate.pipe(model).withRetry();\n\n  final res = await chain.batch([\n    {'topic': 'bears'},\n    {'topic': 'cats'},\n  ]);\n  print(res);\n  /*\n  [ChatResult{\n  id: chatcmpl-9zmjiMfHP2WP3PhM6YXdoHXS02ZAm,\n  output: AIChatMessage{\n  content: Sure, here's a bear-themed joke for you:\n\nWhy did the bear refuse to play cards?\n\nBecause he was afraid he might get spotted—he couldn’t bear the tension! 🐻♠️,\n  toolCalls: [],\n},\n  finishReason: FinishReason.stop,\n  metadata: {model: gpt-4o-2024-05-13, created: 1724512378, system_fingerprint: fp_3aa7262c27},\n  usage: LanguageModelUsage{\n  promptTokens: 13,\n  promptBillableCharacters: null,\n  responseTokens: 41,\n  responseBillableCharacters: null,\n  totalTokens: 54}\n,\n  streaming: false\n}, ChatResult{\n  id: chatcmpl-9zmji1gxCZ4yR3UtX7Af4TBrRhPP1,\n  output: AIChatMessage{\n  content: Sure, here's one for you:\n\nWhy did the cat sit on the computer?\n\nBecause it wanted to keep an eye on the mouse! 🐱🖱️,\n  toolCalls: [],\n},\n  finishReason: FinishReason.stop,\n  metadata: {model: gpt-4o-2024-05-13, created: 1724512378, system_fingerprint: fp_c9aa9c0491},\n  usage: LanguageModelUsage{\n  promptTokens: 13,\n  promptBillableCharacters: null,\n  responseTokens: 34,\n  responseBillableCharacters: null,\n  totalTokens: 47}\n,\n  streaming: false\n}]\n*/\n}\n\nFuture<void> _withRetryOptions() async {\n  final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n  final input = PromptValue.string('Explain why sky is blue in 2 lines');\n  final model = ChatOpenAI(\n    apiKey: openaiApiKey,\n    defaultOptions: const ChatOpenAIOptions(model: 'fake-model'),\n  );\n  final modelWithRetry = model.withRetry(maxRetries: 3, addJitter: true);\n  final res = await modelWithRetry.invoke(input);\n  print(res);\n  /*\n  retry attempt 0 with delay duration 0:00:01.082000\n  retry attempt 1 with delay duration 0:00:02.073000\n  retry attempt 2 with delay duration 0:00:04.074000\n  Unhandled exception:\n  Exception: Function failed to return response and max retries exceeded, Error: OpenAIClientException({\n  \"uri\": \"https://api.openai.com/v1/chat/completions\",\n  \"method\": \"POST\",\n  \"code\": 404,\n  \"message\": \"Unsuccessful response\",\n  \"body\": {\n    \"error\": {\n      \"message\": \"The model `fake-model` does not exist or you do not have access to it.\",\n      \"type\": \"invalid_request_error\",\n      \"param\": null,\n      \"code\": \"model_not_found\"\n    }\n  }\n})*/\n}\n\nFuture<void> _withDelayDurations() async {\n  final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n  final input = PromptValue.string('Explain why sky is blue in 2 lines');\n  final model = ChatOpenAI(\n    apiKey: openaiApiKey,\n    defaultOptions: const ChatOpenAIOptions(model: 'fake-model'),\n  );\n  final modelWithRetry = model.withRetry(\n    maxRetries: 3,\n    addJitter: false,\n    delayDurations: const [\n      Duration(seconds: 1),\n      Duration(seconds: 2),\n      Duration(seconds: 3),\n    ],\n  );\n  final res = await modelWithRetry.invoke(input);\n  print(res);\n  // retried with delays provided in RetryOptions\n  /*\nretry attempt 0 with delay duration 0:00:01.000000\nretry attempt 1 with delay duration 0:00:02.000000\nretry attempt 2 with delay duration 0:00:03.000000\nUnhandled exception:\nException: Function failed to return response and max retries exceeded, Error: OpenAIClientException({\n  \"uri\": \"https://api.openai.com/v1/chat/completions\",\n  \"method\": \"POST\",\n  \"code\": 401,\n  \"message\": \"Unsuccessful response\",\n  \"body\": {\n    \"error\": {\n      \"message\": \"You didn't provide an API key. You need to provide your API key in an Authorization header using Bearer auth (i.e. Authorization: Bearer YOUR_KEY), or as the password field (with blank username) if you're accessing the API from your browser and are prompted for a username and password. You can obtain an API key from https://platform.openai.com/account/api-keys.\",\n      \"type\": \"invalid_request_error\",\n      \"param\": null,\n      \"code\": null\n    }\n  }\n})\n*/\n}\n"
  },
  {
    "path": "examples/docs_examples/bin/expression_language/primitives/sequence.dart",
    "content": "// ignore_for_file: avoid_print\nimport 'dart:io';\n\nimport 'package:langchain/langchain.dart';\nimport 'package:langchain_openai/langchain_openai.dart';\n\nvoid main(final List<String> arguments) async {\n  await _pipe();\n}\n\nFuture<void> _pipe() async {\n  final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n\n  final promptTemplate = ChatPromptTemplate.fromTemplate(\n    'Tell me a joke about {topic}',\n  );\n  final model = ChatOpenAI(apiKey: openaiApiKey);\n  const outputParser = StringOutputParser<ChatResult>();\n\n  final chain = promptTemplate.pipe(model).pipe(outputParser);\n\n  final res = await chain.invoke({'topic': 'bears'});\n  print(res);\n  // Why don't bears wear socks?\n  // Because they have bear feet!\n\n  final analysisPrompt = ChatPromptTemplate.fromTemplate(\n    'is this a funny joke? {joke}',\n  );\n\n  final composedChain = Runnable.fromMap({\n    'joke': chain,\n  }).pipe(analysisPrompt).pipe(model).pipe(outputParser);\n  final res1 = await composedChain.invoke({'topic': 'bears'});\n  print(res1);\n  // Some people may find this joke funny, especially if they enjoy puns or wordplay...\n\n  final composedChain2 = chain\n      .pipe(Runnable.getMapFromInput('joke'))\n      .pipe(analysisPrompt)\n      .pipe(model)\n      .pipe(outputParser);\n  final res2 = await composedChain2.invoke({'topic': 'bears'});\n  print(res2);\n\n  final composedChain3 = chain\n      .pipe(Runnable.mapInput((joke) => <String, dynamic>{'joke': joke}))\n      .pipe(analysisPrompt)\n      .pipe(model)\n      .pipe(outputParser);\n  final res3 = await composedChain3.invoke({'topic': 'bears'});\n  print(res3);\n}\n"
  },
  {
    "path": "examples/docs_examples/bin/get_started/quickstart.dart",
    "content": "// ignore_for_file: avoid_print\nimport 'dart:io';\n\nimport 'package:langchain/langchain.dart';\nimport 'package:langchain_openai/langchain_openai.dart';\n\nvoid main(final List<String> arguments) async {\n  await _llmChatModel();\n  await _promptTemplates();\n  await _chatPromptTemplates();\n  await _commaSeparatedListOutputParser();\n  await _composingLcel();\n}\n\nFuture<void> _llmChatModel() async {\n  final openAiApiKey = Platform.environment['OPENAI_API_KEY'];\n\n  final llm = OpenAI(apiKey: openAiApiKey);\n  final chatModel = ChatOpenAI(apiKey: openAiApiKey);\n\n  const text =\n      'What would be a good company name for a company that makes colorful socks?';\n  final messages = [ChatMessage.humanText(text)];\n\n  final res1 = await llm.invoke(PromptValue.string(text));\n  print(res1.output);\n  // 'Feetful of Fun'\n\n  final res2 = await chatModel.invoke(PromptValue.chat(messages));\n  print(res2.output);\n  // AIChatMessage(content='RainbowSock Co.')\n}\n\nFuture<void> _promptTemplates() async {\n  final prompt = PromptTemplate.fromTemplate(\n    'What is a good name for a company that makes {product}?',\n  );\n  final res = prompt.format({'product': 'colorful socks'});\n  print(res);\n  // 'What is a good name for a company that makes colorful socks?'\n}\n\nFuture<void> _chatPromptTemplates() async {\n  const template =\n      'You are a helpful assistant that translates {input_language} to {output_language}.';\n  const humanTemplate = '{text}';\n\n  final chatPrompt = ChatPromptTemplate.fromTemplates(const [\n    (ChatMessageType.system, template),\n    (ChatMessageType.human, humanTemplate),\n  ]);\n\n  final res = chatPrompt.formatMessages({\n    'input_language': 'English',\n    'output_language': 'French',\n    'text': 'I love programming.',\n  });\n  print(res);\n  // [\n  //   SystemMessage(content='You are a helpful assistant that translates English to French.'),\n  //   HumanMessage(content='I love programming.')\n  // ]\n}\n\nFuture<void> _commaSeparatedListOutputParser() async {\n  final res = await const CommaSeparatedListOutputParser().invoke(\n    const ChatResult(\n      id: 'id',\n      output: AIChatMessage(content: 'hi, bye'),\n      finishReason: FinishReason.stop,\n      metadata: {},\n      usage: LanguageModelUsage(),\n    ),\n  );\n  print(res);\n  // ['hi',  'bye']\n}\n\nclass CommaSeparatedListOutputParser\n    extends BaseOutputParser<ChatResult, OutputParserOptions, List<String>> {\n  const CommaSeparatedListOutputParser()\n    : super(defaultOptions: const OutputParserOptions());\n\n  @override\n  Future<List<String>> invoke(\n    final ChatResult input, {\n    final OutputParserOptions? options,\n  }) async {\n    final message = input.output;\n    return message.content.trim().split(',');\n  }\n}\n\nFuture<void> _composingLcel() async {\n  final openAiApiKey = Platform.environment['OPENAI_API_KEY'];\n\n  const systemTemplate = '''\nYou are a helpful assistant who generates comma separated lists.\nA user will pass in a category, and you should generate 5 objects in that category in a comma separated list.\nONLY return a comma separated list, and nothing more.\n''';\n  const humanTemplate = '{text}';\n\n  final chatPrompt = ChatPromptTemplate.fromTemplates(const [\n    (ChatMessageType.system, systemTemplate),\n    (ChatMessageType.human, humanTemplate),\n  ]);\n\n  final chatModel = ChatOpenAI(apiKey: openAiApiKey);\n\n  final chain = chatPrompt\n      .pipe(chatModel)\n      .pipe(const CommaSeparatedListOutputParser());\n\n  // Alternative syntax:\n  // final chain = chatPrompt | chatModel | CommaSeparatedListOutputParser();\n\n  final res = await chain.invoke({'text': 'colors'});\n  print(res);\n  // ['red', 'blue', 'green', 'yellow', 'orange']\n}\n"
  },
  {
    "path": "examples/docs_examples/bin/modules/agents/agent_types/tools_agent.dart",
    "content": "// ignore_for_file: avoid_print, unreachable_from_main\nimport 'dart:io';\n\nimport 'package:langchain/langchain.dart';\nimport 'package:langchain_community/langchain_community.dart';\nimport 'package:langchain_ollama/langchain_ollama.dart';\nimport 'package:langchain_openai/langchain_openai.dart';\n\nvoid main() async {\n  await _toolsAgent();\n  await _toolsAgentCustomToolsMemory();\n  await _toolsAgentLCEL();\n}\n\nFuture<void> _toolsAgent() async {\n  final llm = ChatOllama(\n    defaultOptions: const ChatOllamaOptions(model: 'llama3.2', temperature: 0),\n  );\n  final tool = CalculatorTool();\n  final agent = ToolsAgent.fromLLMAndTools(llm: llm, tools: [tool]);\n  final executor = AgentExecutor(agent: agent);\n  final res = await executor.run(\n    'What is 40 raised to the power of 0.43? '\n    'Return the result with 3 decimals.',\n  );\n  print(res);\n  // The result is: 4.885\n}\n\nFuture<void> _toolsAgentCustomToolsMemory() async {\n  final tool = Tool.fromFunction<SearchInput, String>(\n    name: 'search',\n    description: 'Tool for searching the web.',\n    inputJsonSchema: const {\n      'type': 'object',\n      'properties': {\n        'query': {'type': 'string', 'description': 'The query to search for'},\n        'n': {\n          'type': 'integer',\n          'description': 'The number of results to return',\n        },\n      },\n      'required': ['query'],\n    },\n    func: callYourSearchFunction,\n    getInputFromJson: SearchInput.fromJson,\n  );\n\n  final llm = ChatOllama(\n    defaultOptions: const ChatOllamaOptions(\n      model: 'llama3-groq-tool-use',\n      temperature: 0,\n    ),\n  );\n\n  final memory = ConversationBufferMemory(returnMessages: true);\n  final agent = ToolsAgent.fromLLMAndTools(\n    llm: llm,\n    tools: [tool],\n    memory: memory,\n  );\n\n  final executor = AgentExecutor(agent: agent);\n\n  final res1 = await executor.run(\n    'Search for cat names. Return only 3 results.',\n  );\n  print(res1);\n  // Here are 3 search results for \"cats\":\n  // 1. Result 1\n  // 2. Result 2\n  // 3. Result 3\n}\n\nclass SearchInput {\n  const SearchInput({required this.query, required this.n});\n\n  final String query;\n  final int n;\n\n  SearchInput.fromJson(final Map<String, dynamic> json)\n    : this(query: json['query'] as String, n: json['n'] as int);\n}\n\nString callYourSearchFunction(final SearchInput input) {\n  final n = input.n;\n  final res = List<String>.generate(\n    n,\n    (i) => 'Result ${i + 1}: ${String.fromCharCode(65 + i) * 3}',\n  );\n  return 'Results:\\n${res.join('\\n')}';\n}\n\nFuture<void> _toolsAgentLCEL() async {\n  final openAiKey = Platform.environment['OPENAI_API_KEY'];\n\n  final prompt = ChatPromptTemplate.fromTemplates(const [\n    (ChatMessageType.system, 'You are a helpful assistant'),\n    (ChatMessageType.human, '{input}'),\n    (ChatMessageType.messagesPlaceholder, 'agent_scratchpad'),\n  ]);\n\n  final tool = CalculatorTool();\n\n  final model = ChatOpenAI(\n    apiKey: openAiKey,\n    defaultOptions: ChatOpenAIOptions(\n      model: 'gpt-4o-mini',\n      temperature: 0,\n      tools: [tool],\n    ),\n  );\n\n  const outputParser = ToolsAgentOutputParser();\n\n  List<ChatMessage> buildScratchpad(final List<AgentStep> intermediateSteps) {\n    return intermediateSteps\n        .map((s) {\n          return s.action.messageLog +\n              [\n                ChatMessage.tool(\n                  toolCallId: s.action.id,\n                  content: s.observation,\n                ),\n              ];\n        })\n        .expand((m) => m)\n        .toList(growable: false);\n  }\n\n  final agent = Agent.fromRunnable(\n    Runnable.mapInput(\n      (AgentPlanInput planInput) => <String, dynamic>{\n        'input': planInput.inputs['input'],\n        'agent_scratchpad': buildScratchpad(planInput.intermediateSteps),\n      },\n    ).pipe(prompt).pipe(model).pipe(outputParser),\n    tools: [tool],\n  );\n  final executor = AgentExecutor(agent: agent);\n\n  final res = await executor.invoke({\n    'input':\n        'What is 40 raised to the power of 0.43? '\n        'Return the result with 3 decimals.',\n  });\n  print(res['output']);\n  // The result of 40 raised to the power of 0.43 is approximately 4.885.\n}\n"
  },
  {
    "path": "examples/docs_examples/bin/modules/agents/tools/calculator.dart",
    "content": "// ignore_for_file: avoid_print\nimport 'dart:io';\n\nimport 'package:langchain/langchain.dart';\nimport 'package:langchain_community/langchain_community.dart';\nimport 'package:langchain_openai/langchain_openai.dart';\n\nvoid main() async {\n  final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n  final llm = ChatOpenAI(\n    apiKey: openaiApiKey,\n    defaultOptions: const ChatOpenAIOptions(model: 'gpt-4', temperature: 0),\n  );\n  final tool = CalculatorTool();\n  final agent = ToolsAgent.fromLLMAndTools(llm: llm, tools: [tool]);\n  final executor = AgentExecutor(agent: agent);\n  final res = await executor.run('What is 40 raised to the 0.43 power? ');\n  print(res); // -> '40 raised to the power of 0.43 is approximately 4.8852'\n}\n"
  },
  {
    "path": "examples/docs_examples/bin/modules/agents/tools/openai_dalle.dart",
    "content": "// ignore_for_file: avoid_print\nimport 'dart:io';\n\nimport 'package:langchain/langchain.dart';\nimport 'package:langchain_community/langchain_community.dart';\nimport 'package:langchain_openai/langchain_openai.dart';\n\nvoid main() async {\n  final openAiKey = Platform.environment['OPENAI_API_KEY'];\n  final llm = ChatOpenAI(\n    apiKey: openAiKey,\n    defaultOptions: const ChatOpenAIOptions(\n      model: 'gpt-4-turbo',\n      temperature: 0,\n    ),\n  );\n  final tools = <Tool>[CalculatorTool(), OpenAIDallETool(apiKey: openAiKey)];\n  final agent = ToolsAgent.fromLLMAndTools(llm: llm, tools: tools);\n  final executor = AgentExecutor(agent: agent);\n  final res = await executor.run(\n    'Calculate the result of 40 raised to the power of 0.43 and generate a funny illustration with it. '\n    'Return ONLY the URL of the image. Do not add any explanation.',\n  );\n  print(res);\n  // https://oaidalleapiprodscus.blob.core.windows.net/private/org-dtDDtkEGoFccn5xaP5W1p3Rr/user-3XZA7QXb1LF8ADMIxEZC0Qp4/img-EV9QNf2sNZfdjqmTnPp5RKMf.png?st=2023-11-19T14:54:29Z&se=2023-11-19T16:54:29Z&sp=r&sv=2021-08-06&sr=b&rscd=inline&rsct=image/png&skoid=6aaadede-4fb3-4698-a8f6-684d7786b067&sktid=a48cca56-e6da-484e-a814-9c849652bcb3&skt=2023-11-18T20:08:16Z&ske=2023-11-19T20:08:16Z&sks=b&skv=2021-08-06&sig=vOi21StWgPSVBptFpxk1ToBphdWKWc3YcRPdHREAXGI%3D\n}\n"
  },
  {
    "path": "examples/docs_examples/bin/modules/model_io/models/chat_models/how_to/streaming.dart",
    "content": "// ignore_for_file: avoid_print\nimport 'dart:io';\n\nimport 'package:langchain/langchain.dart';\nimport 'package:langchain_openai/langchain_openai.dart';\n\nvoid main(final List<String> arguments) async {\n  await _chatOpenAIStreaming();\n}\n\nFuture<void> _chatOpenAIStreaming() async {\n  final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n\n  final promptTemplate = ChatPromptTemplate.fromTemplates(const [\n    (\n      ChatMessageType.system,\n      'You are a helpful assistant that replies only with numbers in order without any spaces or commas',\n    ),\n    (ChatMessageType.human, 'List the numbers from 1 to {max_num}'),\n  ]);\n  final chat = ChatOpenAI(apiKey: openaiApiKey);\n  const stringOutputParser = StringOutputParser<ChatResult>();\n\n  final chain = promptTemplate.pipe(chat).pipe(stringOutputParser);\n\n  final stream = chain.stream({'max_num': '9'});\n  await stream.forEach(print);\n  // 123\n  // 456\n  // 789\n}\n"
  },
  {
    "path": "examples/docs_examples/bin/modules/model_io/models/chat_models/how_to/tools.dart",
    "content": "// ignore_for_file: avoid_print, unused_local_variable, unreachable_from_main\nimport 'dart:io';\n\nimport 'package:langchain/langchain.dart';\nimport 'package:langchain_community/langchain_community.dart';\nimport 'package:langchain_openai/langchain_openai.dart';\n\nvoid main(final List<String> arguments) async {\n  await _definingTools();\n  await _toolResponse();\n  await _toolMessage();\n  await _fewShotPrompting();\n}\n\nFuture<void> _definingTools() async {\n  final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n\n  const tool1 = ToolSpec(\n    name: 'joke',\n    description: 'A joke',\n    inputJsonSchema: {\n      'type': 'object',\n      'properties': {\n        'setup': {'type': 'string', 'description': 'The setup for the joke'},\n        'punchline': {\n          'type': 'string',\n          'description': 'The punchline for the joke',\n        },\n      },\n      'required': ['setup', 'punchline'],\n    },\n  );\n  final tool2 = CalculatorTool();\n  final tool3 = Tool.fromFunction<SearchInput, String>(\n    name: 'search',\n    description: 'Tool for searching the web.',\n    inputJsonSchema: const {\n      'type': 'object',\n      'properties': {\n        'query': {'type': 'string', 'description': 'The query to search for'},\n        'n': {\n          'type': 'number',\n          'description': 'The number of results to return',\n        },\n      },\n      'required': ['query'],\n    },\n    func: callYourSearchFunction,\n    getInputFromJson: SearchInput.fromJson,\n  );\n\n  final model = ChatOpenAI(\n    apiKey: openaiApiKey,\n    defaultOptions: ChatOpenAIOptions(tools: [tool1, tool2, tool3]),\n  );\n\n  final chain = model.bind(ChatOpenAIOptions(tools: [tool1, tool2, tool3]));\n\n  final res = await model.invoke(\n    PromptValue.string('input'),\n    options: ChatOpenAIOptions(tools: [tool1, tool2, tool3]),\n  );\n  final stream = model.stream(\n    PromptValue.string('input'),\n    options: ChatOpenAIOptions(tools: [tool1, tool2, tool3]),\n  );\n\n  ChatOpenAIOptions(\n    tools: [tool1, tool2, tool3],\n    toolChoice: ChatToolChoice.forced(name: 'joke'),\n  );\n  ChatOpenAIOptions(\n    tools: [tool1, tool2, tool3],\n    toolChoice: ChatToolChoice.auto,\n  );\n  ChatOpenAIOptions(\n    tools: [tool1, tool2, tool3],\n    toolChoice: ChatToolChoice.none,\n  );\n}\n\nString callYourSearchFunction(final SearchInput input) {\n  return 'Results:\\n${List<String>.generate(input.n, (final i) => 'Result ${i + 1}').join('\\n')}';\n}\n\nclass SearchInput {\n  const SearchInput({required this.query, required this.n});\n\n  final String query;\n  final int n;\n\n  SearchInput.fromJson(final Map<String, dynamic> json)\n    : this(query: json['query'] as String, n: json['n'] as int);\n}\n\nFuture<void> _toolResponse() async {\n  final openAiApiKey = Platform.environment['OPENAI_API_KEY'];\n\n  final calculator = CalculatorTool();\n\n  final model = ChatOpenAI(\n    apiKey: openAiApiKey,\n    defaultOptions: ChatOpenAIOptions(\n      model: 'gpt-4-turbo',\n      tools: [calculator],\n    ),\n  );\n\n  final res = await model.invoke(\n    PromptValue.string('Calculate 3 * 12 and 11 + 49'),\n  );\n  print(res);\n  // ChatResult{\n  //   id: chatcmpl-9LY96mq6BYOEhBXQoOSBKVGBartYZ,\n  //   output: AIChatMessage{\n  //     content: ,\n  //     toolCalls: [\n  //       AIChatMessageToolCall{\n  //         id: call_Kgo8xBug6OkFbBVBmAgI2bm0,\n  //         name: calculator,\n  //         argumentsRaw: {\"input\": \"3 * 12\"},\n  //         arguments: {\n  //           input: 3 * 12\n  //         },\n  //       }, AIChatMessageToolCall{\n  //         id: call_Tc2zOW8obEGh0iEtA5UNykqR,\n  //         name: calculator,\n  //         argumentsRaw: {\"input\": \"11 + 49\"},\n  //         arguments: {\n  //           input: 11 + 49\n  //         },\n  //       }\n  //     ],\n  //   }\n  //   finishReason: FinishReason.toolCalls,\n  //   metadata: {\n  //     model: gpt-4-turbo-2024-04-09,\n  //     created: 1714923172,\n  //     system_fingerprint: fp_ea6eb70039\n  //   },\n  //   usage: LanguageModelUsage{\n  //     promptTokens: 91,\n  //     responseTokens: 49,\n  //     totalTokens: 140\n  //   },\n  //   streaming: false\n  // }\n\n  final chain = model.pipe(ToolsOutputParser());\n  final res2 = await chain.invoke(\n    PromptValue.string('Calculate 3 * 12 and 11 + 49'),\n  );\n  print(res2);\n  // [ParsedToolCall{\n  //   id: call_p4GmED1My56vV6XZi9ChljJN,\n  //   name: calculator,\n  //   arguments: {\n  //     input: 3 * 12\n  //   },\n  // }, ParsedToolCall{\n  //   id: call_eLJo7nII9EanFUcxy42WA5Pm,\n  //   name: calculator,\n  //   arguments: {\n  //     input: 11 + 49\n  //   },\n  // }]\n\n  // final stream = model.stream(\n  //   PromptValue.string('Calculate 3 * 12 and 11 + 49'),\n  // );\n  // await for (final chunk in stream) {\n  //   print(chunk.output.toolCalls);\n  // }\n  // []\n  // [AIChatMessageToolCall{ id: call_bfU0p8DH8xvzZVXaDK3V42hN, name: calculator, argumentsRaw: , arguments: {}, }]\n  // [AIChatMessageToolCall{ id: , name: , argumentsRaw: {\"in, arguments: {}, }]\n  // [AIChatMessageToolCall{ id: , name: , argumentsRaw: put\":, arguments: {}, }]\n  // [AIChatMessageToolCall{ id: , name: , argumentsRaw: \"3 * , arguments: {}, }]\n  // [AIChatMessageToolCall{ id: , name: , argumentsRaw: 12\"}, arguments: {}, }]\n  // [AIChatMessageToolCall{ id: call_WnHu44lmhkI3fFpEelyGBh6L, name: calculator, argumentsRaw: , arguments: {}, }]\n  // [AIChatMessageToolCall{ id: , name: , argumentsRaw: {\"in, arguments: {}, }]\n  // [AIChatMessageToolCall{ id: , name: , argumentsRaw: put\":, arguments: {}, }]\n  // [AIChatMessageToolCall{ id: , name: , argumentsRaw: \"11 +, arguments: {}, }]\n  // [AIChatMessageToolCall{ id: , name: , argumentsRaw: 49\", arguments: {}, }]\n  // [AIChatMessageToolCall{ id: , name: , argumentsRaw: }, arguments: {}, }] []\n\n  final chain2 = model.pipe(ToolsOutputParser());\n  final stream2 = chain2.stream(\n    PromptValue.string('Calculate 3 * 12 and 11 + 49'),\n  );\n  await stream2.forEach(print);\n  // []\n  // [ParsedToolCall{ id: call_gGXYQDJj9ZG4YmvLhZyLD442, name: calculator, arguments: {}, }]\n  // [ParsedToolCall{ id: call_gGXYQDJj9ZG4YmvLhZyLD442, name: calculator, arguments: {input: 3 * }, }]\n  // [ParsedToolCall{ id: call_gGXYQDJj9ZG4YmvLhZyLD442, name: calculator, arguments: {input: 3 * 12}, }]\n  // [ParsedToolCall{ id: call_gGXYQDJj9ZG4YmvLhZyLD442, name: calculator, arguments: {input: 3 * 12}, }, ParsedToolCall{ id: call_axZ3Q5Ve8ZvLUB9NDXdwuUVh, name: calculator, arguments: {}, }]\n  // [ParsedToolCall{ id: call_gGXYQDJj9ZG4YmvLhZyLD442, name: calculator, arguments: {input: 3 * 12}, }, ParsedToolCall{ id: call_axZ3Q5Ve8ZvLUB9NDXdwuUVh, name: calculator, arguments: {input: 11 +}, }]\n  // [ParsedToolCall{ id: call_gGXYQDJj9ZG4YmvLhZyLD442, name: calculator, arguments: {input: 3 * 12}, }, ParsedToolCall{ id: call_axZ3Q5Ve8ZvLUB9NDXdwuUVh, name: calculator, arguments: {input: 11 + 49}, }]\n}\n\nFuture<void> _toolMessage() async {\n  final openAiApiKey = Platform.environment['OPENAI_API_KEY'];\n\n  final calculator = CalculatorTool();\n\n  final model = ChatOpenAI(\n    apiKey: openAiApiKey,\n    defaultOptions: ChatOpenAIOptions(\n      model: 'gpt-4-turbo',\n      tools: [calculator],\n    ),\n  );\n\n  final messages = [ChatMessage.humanText('Calculate 3 * 12 and 11 + 49')];\n\n  final res = await model.invoke(PromptValue.chat(messages));\n  print(res);\n  // ChatResult{\n  //   id: chatcmpl-9LYl0AecfrQClqOS4PNfczOvsBNvt,\n  //   output: AIChatMessage{\n  //     content: ,\n  //     toolCalls: [\n  //       AIChatMessageToolCall{\n  //         id: call_5tXVddYtn9igaRAeUwfH7ZN2,\n  //         name: calculator,\n  //         argumentsRaw: {\"input\": \"3 * 12\"},\n  //         arguments: {\n  //           input: 3 * 12\n  //         },\n  //       },\n  //       AIChatMessageToolCall{\n  //         id: call_bQ3Yx6foKIYLXBMQas5ObhHa,\n  //         name: calculator,\n  //         argumentsRaw: {\"input\": \"11 + 49\"},\n  //         arguments: {\n  //           input: 11 + 49\n  //         },\n  //       }\n  //     ],\n  //   },\n  //   finishReason: FinishReason.toolCalls,\n  //   metadata: {\n  //     model: gpt-4-turbo-2024-04-09,\n  //     created: 1714925522,\n  //     system_fingerprint: fp_3450ce39d5\n  //   },\n  //   usage: LanguageModelUsage{\n  //     promptTokens: 91,\n  //     responseTokens: 49,\n  //     totalTokens: 140\n  //   },\n  //   streaming: false\n  // }\n\n  messages.add(res.output);\n\n  for (final toolCall in res.output.toolCalls) {\n    final input = calculator.getInputFromJson(toolCall.arguments);\n    final toolRes = await calculator.invoke(input);\n    final toolMessage = ChatMessage.tool(\n      toolCallId: toolCall.id,\n      content: toolRes,\n    );\n    messages.add(toolMessage);\n  }\n\n  final res2 = await model.invoke(PromptValue.chat(messages));\n  print(res2.output.content);\n  // The calculations yield the following results:\n  // - 3 * 12 = 36\n  // - 11 + 49 = 60\n}\n\nFuture<void> _fewShotPrompting() async {\n  final openAiApiKey = Platform.environment['OPENAI_API_KEY'];\n\n  final calculator = CalculatorTool();\n\n  final model = ChatOpenAI(\n    apiKey: openAiApiKey,\n    defaultOptions: ChatOpenAIOptions(\n      model: 'gpt-4-turbo',\n      tools: [calculator],\n    ),\n  );\n\n  final examples = [\n    ChatMessage.humanText('Calculate 3 ✖️ 12 and 11 ➕ 49'),\n    ChatMessage.ai(\n      '',\n      toolCalls: const [\n        AIChatMessageToolCall(\n          id: 'call_1',\n          name: 'calculator',\n          argumentsRaw: '{\"input\": \"3 * 12\"}',\n          arguments: {'input': '3 * 12'},\n        ),\n        AIChatMessageToolCall(\n          id: 'call_2',\n          name: 'calculator',\n          argumentsRaw: '{\"input\": \"11 + 49\"}',\n          arguments: {'input': '11 + 49'},\n        ),\n      ],\n    ),\n    ChatMessage.tool(toolCallId: 'call_1', content: '36'),\n    ChatMessage.tool(toolCallId: 'call_2', content: '60'),\n    ChatMessage.ai(\n      'The calculations yield the following results:\\n- 3 ✖️ 12 = 36\\n- 11 ➕ 49 = 60',\n    ),\n  ];\n\n  final promptTemplate = ChatPromptTemplate.fromTemplates(const [\n    (\n      ChatMessageType.system,\n      'You are bad at math but are an expert at using a calculator. '\n          'Use past tool usage as an example of how to correctly use the tools.',\n    ),\n    (ChatMessageType.messagesPlaceholder, 'examples'),\n    (ChatMessageType.human, '{query}'),\n  ]);\n\n  final chain = promptTemplate.pipe(model).pipe(ToolsOutputParser());\n  final res = await chain.invoke({\n    'query': 'Calculate 3 ✖️ 12 and 11 ➕ 49',\n    'examples': examples,\n  });\n  print(res);\n  // [ParsedToolCall{\n  //   id: call_BtNoLk7IiQksMfad5897mSSu,\n  //   name: calculator,\n  //   arguments: {input: 3 * 12},\n  // }, ParsedToolCall{\n  //   id: call_ZAiFb1G71hwiwVvnqZFKHTuP,\n  //   name: calculator,\n  //   arguments: {input: 11 + 49},\n  // }]\n}\n"
  },
  {
    "path": "examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/anthropic.dart",
    "content": "// ignore_for_file: avoid_print\nimport 'dart:convert';\nimport 'dart:io';\n\nimport 'package:langchain/langchain.dart';\nimport 'package:langchain_anthropic/langchain_anthropic.dart';\n\nvoid main(final List<String> arguments) async {\n  await _invokeModel();\n  await _multiModal();\n  await _streaming();\n}\n\nFuture<void> _invokeModel() async {\n  final apiKey = Platform.environment['ANTHROPIC_API_KEY'];\n\n  final chatModel = ChatAnthropic(\n    apiKey: apiKey,\n    defaultOptions: const ChatAnthropicOptions(\n      model: 'claude-3-5-sonnet-20241022',\n      temperature: 0,\n    ),\n  );\n\n  final chatPrompt = ChatPromptTemplate.fromTemplates(const [\n    (\n      ChatMessageType.system,\n      'You are a helpful assistant that translates {input_language} to {output_language}.',\n    ),\n    (ChatMessageType.human, 'Text to translate:\\n{text}'),\n  ]);\n\n  final chain = chatPrompt | chatModel | const StringOutputParser();\n\n  final res = await chain.invoke({\n    'input_language': 'English',\n    'output_language': 'French',\n    'text': 'I love programming.',\n  });\n  print(res);\n  // -> 'J'adore programmer.'\n\n  chatModel.close();\n}\n\nFuture<void> _multiModal() async {\n  final apiKey = Platform.environment['ANTHROPIC_API_KEY'];\n\n  final chatModel = ChatAnthropic(\n    apiKey: apiKey,\n    defaultOptions: const ChatAnthropicOptions(\n      model: 'claude-3-5-sonnet-20241022',\n      temperature: 0,\n    ),\n  );\n  final res = await chatModel.invoke(\n    PromptValue.chat([\n      ChatMessage.human(\n        ChatMessageContent.multiModal([\n          ChatMessageContent.text('What fruit is this?'),\n          ChatMessageContent.image(\n            mimeType: 'image/jpeg',\n            data: base64.encode(\n              await File('./bin/assets/apple.jpeg').readAsBytes(),\n            ),\n          ),\n        ]),\n      ),\n    ]),\n  );\n  print(res.output.content);\n  // -> 'The fruit in the image is an apple.'\n\n  chatModel.close();\n}\n\nFuture<void> _streaming() async {\n  final apiKey = Platform.environment['ANTHROPIC_API_KEY'];\n\n  final promptTemplate = ChatPromptTemplate.fromTemplates(const [\n    (\n      ChatMessageType.system,\n      'You are a helpful assistant that replies only with numbers '\n          'in order without any spaces or commas.',\n    ),\n    (ChatMessageType.human, 'List the numbers from 1 to {max_num}'),\n  ]);\n\n  final chatModel = ChatAnthropic(\n    apiKey: apiKey,\n    defaultOptions: const ChatAnthropicOptions(\n      model: 'claude-3-5-sonnet-20241022',\n      temperature: 0,\n    ),\n  );\n\n  final chain = promptTemplate.pipe(chatModel).pipe(const StringOutputParser());\n\n  final stream = chain.stream({'max_num': '30'});\n  await stream.forEach(print);\n  // 123\n  // 456789101\n  // 112131415161\n  // 718192021222\n  // 324252627282\n  // 930\n\n  chatModel.close();\n}\n"
  },
  {
    "path": "examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/anyscale.dart",
    "content": "// ignore_for_file: avoid_print\nimport 'dart:io';\n\nimport 'package:langchain/langchain.dart';\nimport 'package:langchain_openai/langchain_openai.dart';\n\nvoid main(final List<String> arguments) async {\n  await _anyscaleInvoke();\n  await _anyscaleStreaming();\n}\n\nFuture<void> _anyscaleInvoke() async {\n  final anyscaleApiKey = Platform.environment['ANYSCALE_API_KEY'];\n\n  final promptTemplate = ChatPromptTemplate.fromTemplates(const [\n    (\n      ChatMessageType.system,\n      'You are a helpful assistant that translates {input_language} to {output_language}.',\n    ),\n    (ChatMessageType.human, '{text}'),\n  ]);\n\n  final chatModel = ChatOpenAI(\n    apiKey: anyscaleApiKey,\n    baseUrl: 'https://api.endpoints.anyscale.com/v1',\n    defaultOptions: const ChatOpenAIOptions(\n      model: 'meta-llama/Llama-2-70b-chat-hf',\n    ),\n  );\n\n  final chain = promptTemplate | chatModel | const StringOutputParser();\n\n  final res = await chain.invoke({\n    'input_language': 'English',\n    'output_language': 'French',\n    'text': 'I love programming.',\n  });\n  print(res);\n  // -> \"I love programming\" se traduit en français sous la forme \"J'aime passionnément la programmation\"\n}\n\nFuture<void> _anyscaleStreaming() async {\n  final anyscaleApiKey = Platform.environment['ANYSCALE_API_KEY'];\n\n  final promptTemplate = ChatPromptTemplate.fromTemplates(const [\n    (\n      ChatMessageType.system,\n      'You are a helpful assistant that replies only with numbers '\n          'in order without any spaces or commas',\n    ),\n    (ChatMessageType.human, 'List the numbers from 1 to {max_num}'),\n  ]);\n\n  final chatModel = ChatOpenAI(\n    apiKey: anyscaleApiKey,\n    baseUrl: 'https://api.endpoints.anyscale.com/v1',\n    defaultOptions: const ChatOpenAIOptions(\n      model: 'mistralai/Mixtral-8x7B-Instruct-v0.1',\n    ),\n  );\n\n  final chain = promptTemplate.pipe(chatModel).pipe(const StringOutputParser());\n\n  final stream = chain.stream({'max_num': '9'});\n  await stream.forEach(print);\n  // 1\n  // 2\n  // 3\n  // ...\n  // 9\n}\n"
  },
  {
    "path": "examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/googleai.dart",
    "content": "// ignore_for_file: avoid_print\nimport 'dart:convert';\nimport 'dart:io';\n\nimport 'package:langchain/langchain.dart';\nimport 'package:langchain_google/langchain_google.dart';\n\nvoid main(final List<String> arguments) async {\n  await _chatGoogleGenerativeAI();\n  await _chatGoogleGenerativeAIMultiModal();\n  await _chatOpenAIStreaming();\n  await _codeExecution();\n}\n\nFuture<void> _chatGoogleGenerativeAI() async {\n  final apiKey = Platform.environment['GOOGLEAI_API_KEY'];\n\n  final chatModel = ChatGoogleGenerativeAI(\n    apiKey: apiKey,\n    defaultOptions: const ChatGoogleGenerativeAIOptions(\n      model: 'gemini-1.5-pro-latest',\n      temperature: 0,\n    ),\n  );\n\n  final chatPrompt = ChatPromptTemplate.fromTemplates(const [\n    (\n      ChatMessageType.system,\n      'You are a helpful assistant that translates {input_language} to {output_language}.',\n    ),\n    (ChatMessageType.human, 'Text to translate:\\n{text}'),\n  ]);\n\n  final chain = chatPrompt | chatModel | const StringOutputParser();\n\n  final res = await chain.invoke({\n    'input_language': 'English',\n    'output_language': 'French',\n    'text': 'I love programming.',\n  });\n  print(res);\n  // -> 'J'adore programmer.'\n\n  chatModel.close();\n}\n\nFuture<void> _chatGoogleGenerativeAIMultiModal() async {\n  final apiKey = Platform.environment['GOOGLEAI_API_KEY'];\n\n  final chatModel = ChatGoogleGenerativeAI(\n    apiKey: apiKey,\n    defaultOptions: const ChatGoogleGenerativeAIOptions(\n      model: 'gemini-1.5-pro-latest',\n      temperature: 0,\n    ),\n  );\n  final res = await chatModel.invoke(\n    PromptValue.chat([\n      ChatMessage.human(\n        ChatMessageContent.multiModal([\n          ChatMessageContent.text('What fruit is this?'),\n          ChatMessageContent.image(\n            mimeType: 'image/jpeg',\n            data: base64.encode(\n              await File('./bin/assets/apple.jpeg').readAsBytes(),\n            ),\n          ),\n        ]),\n      ),\n    ]),\n  );\n  print(res.output.content);\n  // -> 'That is an apple.'\n\n  chatModel.close();\n}\n\nFuture<void> _chatOpenAIStreaming() async {\n  final apiKey = Platform.environment['GOOGLEAI_API_KEY'];\n\n  final promptTemplate = ChatPromptTemplate.fromTemplates(const [\n    (\n      ChatMessageType.system,\n      'You are a helpful assistant that replies only with numbers '\n          'in order without any spaces or commas.',\n    ),\n    (ChatMessageType.human, 'List the numbers from 1 to {max_num}'),\n  ]);\n\n  final chatModel = ChatGoogleGenerativeAI(\n    apiKey: apiKey,\n    defaultOptions: const ChatGoogleGenerativeAIOptions(\n      model: 'gemini-1.5-pro-latest',\n      temperature: 0,\n    ),\n  );\n\n  final chain = promptTemplate.pipe(chatModel).pipe(const StringOutputParser());\n\n  final stream = chain.stream({'max_num': '30'});\n  await stream.forEach(print);\n  // 1\n  // 2345678910111213\n  // 1415161718192021\n  // 222324252627282930\n\n  chatModel.close();\n}\n\nFuture<void> _codeExecution() async {\n  final apiKey = Platform.environment['GOOGLEAI_API_KEY'];\n\n  final chatModel = ChatGoogleGenerativeAI(\n    apiKey: apiKey,\n    defaultOptions: const ChatGoogleGenerativeAIOptions(\n      model: 'gemini-1.5-flash',\n      enableCodeExecution: true,\n    ),\n  );\n\n  final res = await chatModel.invoke(\n    PromptValue.string(\n      'Calculate the fibonacci sequence up to 10 terms. '\n      'Return only the last term without explanations.',\n    ),\n  );\n  final text = res.output.content;\n  print(text); // 34\n  final executableCode = res.metadata['executable_code'] as String;\n  print(executableCode);\n  final codeExecutionResult =\n      res.metadata['code_execution_result'] as Map<String, dynamic>;\n  print(codeExecutionResult);\n}\n"
  },
  {
    "path": "examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/mistralai.dart",
    "content": "// ignore_for_file: avoid_print, avoid_redundant_argument_values\nimport 'dart:io';\n\nimport 'package:langchain/langchain.dart';\nimport 'package:langchain_mistralai/langchain_mistralai.dart';\n\nvoid main(final List<String> arguments) async {\n  final apiKey = Platform.environment['MISTRAL_API_KEY']!;\n  await _chatMistralAI(apiKey);\n  await _chatMistralAIStreaming(apiKey);\n}\n\nFuture<void> _chatMistralAI(final String apiKey) async {\n  final chatModel = ChatMistralAI(\n    apiKey: apiKey,\n    defaultOptions: const ChatMistralAIOptions(\n      model: 'mistral-small',\n      temperature: 0,\n    ),\n  );\n\n  const template =\n      'You are a helpful assistant that translates {input_language} to {output_language}.';\n  final systemMessagePrompt = SystemChatMessagePromptTemplate.fromTemplate(\n    template,\n  );\n  const humanTemplate = '{text}';\n  final humanMessagePrompt = HumanChatMessagePromptTemplate.fromTemplate(\n    humanTemplate,\n  );\n  final chatPrompt = ChatPromptTemplate.fromPromptMessages([\n    systemMessagePrompt,\n    humanMessagePrompt,\n  ]);\n\n  final chain = chatPrompt | chatModel | const StringOutputParser();\n\n  final res = await chain.invoke({\n    'input_language': 'English',\n    'output_language': 'French',\n    'text': 'I love programming.',\n  });\n  print(res);\n  // -> 'J'aime la programmation.'\n}\n\nFuture<void> _chatMistralAIStreaming(final String apiKey) async {\n  final promptTemplate = ChatPromptTemplate.fromPromptMessages([\n    SystemChatMessagePromptTemplate.fromTemplate(\n      'You are a helpful assistant that replies only with numbers '\n      'in order without any spaces or commas',\n    ),\n    HumanChatMessagePromptTemplate.fromTemplate(\n      'List the numbers from 1 to {max_num}',\n    ),\n  ]);\n  final chat = ChatMistralAI(\n    apiKey: apiKey,\n    defaultOptions: const ChatMistralAIOptions(\n      model: 'mistral-medium',\n      temperature: 0,\n    ),\n  );\n  const stringOutputParser = StringOutputParser<ChatResult>();\n\n  final chain = promptTemplate.pipe(chat).pipe(stringOutputParser);\n\n  final stream = chain.stream({'max_num': '9'});\n  await stream.forEach(print);\n  // 12\n  // 345\n  // 67\n  // 89\n}\n"
  },
  {
    "path": "examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/ollama.dart",
    "content": "// ignore_for_file: avoid_print, avoid_redundant_argument_values\nimport 'dart:convert';\nimport 'dart:io';\n\nimport 'package:langchain/langchain.dart';\nimport 'package:langchain_ollama/langchain_ollama.dart';\n\nvoid main(final List<String> arguments) async {\n  await _chatOllama();\n  await _chatOllamaStreaming();\n  await _chatOllamaMultimodal();\n  await _chatOllamaToolCalling();\n  await _chatOllamaJsonMode();\n  await _extraction();\n  await _flights();\n  await _rag();\n}\n\nFuture<void> _chatOllama() async {\n  final promptTemplate = ChatPromptTemplate.fromTemplates(const [\n    (\n      ChatMessageType.system,\n      'You are a helpful assistant that translates {input_language} to {output_language}.',\n    ),\n    (ChatMessageType.human, '{text}'),\n  ]);\n\n  final chatModel = ChatOllama(\n    defaultOptions: const ChatOllamaOptions(model: 'llama3.2', temperature: 0),\n  );\n\n  final chain = promptTemplate | chatModel | const StringOutputParser();\n\n  final res = await chain.invoke({\n    'input_language': 'English',\n    'output_language': 'French',\n    'text': 'I love programming.',\n  });\n  print(res);\n  // -> 'La traduction est : \"J'aime le programming.'\n}\n\nFuture<void> _chatOllamaStreaming() async {\n  final promptTemplate = ChatPromptTemplate.fromTemplates(const [\n    (\n      ChatMessageType.system,\n      'You are a helpful assistant that replies only with numbers '\n          'in order without any spaces or commas',\n    ),\n    (ChatMessageType.human, 'List the numbers from 1 to {max_num}'),\n  ]);\n  final chat = ChatOllama(\n    defaultOptions: const ChatOllamaOptions(model: 'llama3.2', temperature: 0),\n  );\n  final chain = promptTemplate.pipe(chat).pipe(const StringOutputParser());\n\n  final stream = chain.stream({'max_num': '9'});\n  await stream.forEach(print);\n  // 1\n  // 2\n  // 3\n  // ..\n  // 9\n}\n\nFuture<void> _chatOllamaMultimodal() async {\n  final chatModel = ChatOllama(\n    defaultOptions: const ChatOllamaOptions(model: 'llava', temperature: 0),\n  );\n  final prompt = ChatMessage.human(\n    ChatMessageContent.multiModal([\n      ChatMessageContent.text('What fruit is this?'),\n      ChatMessageContent.image(\n        data: base64.encode(\n          await File('./bin/assets/apple.jpeg').readAsBytes(),\n        ),\n      ),\n    ]),\n  );\n  final res = await chatModel.invoke(PromptValue.chat([prompt]));\n  print(res.output.content);\n  // -> 'An Apple'\n}\n\nFuture<void> _chatOllamaToolCalling() async {\n  const tool = ToolSpec(\n    name: 'get_current_weather',\n    description: 'Get the current weather in a given location',\n    inputJsonSchema: {\n      'type': 'object',\n      'properties': {\n        'location': {\n          'type': 'string',\n          'description': 'The city and country, e.g. San Francisco, US',\n        },\n      },\n      'required': ['location'],\n    },\n  );\n\n  final chatModel = ChatOllama(\n    defaultOptions: const ChatOllamaOptions(\n      model: 'llama3.2',\n      temperature: 0,\n      tools: [tool],\n    ),\n  );\n\n  final res = await chatModel.invoke(\n    PromptValue.string(\n      'What’s the weather like in Boston and Madrid right now in celsius?',\n    ),\n  );\n  print(res.output.toolCalls);\n  // [AIChatMessageToolCall{\n  //   id: a621064b-03b3-4ca6-8278-f37504901034,\n  //   name: get_current_weather,\n  //   arguments: {location: Boston, US},\n  // },\n  // AIChatMessageToolCall{\n  //   id: f160d9ba-ae7d-4abc-a910-2b6cd503ec53,\n  //   name: get_current_weather,\n  //   arguments: {location: Madrid, ES},\n  // }]\n}\n\nFuture<void> _chatOllamaJsonMode() async {\n  final promptTemplate = ChatPromptTemplate.fromTemplates(const [\n    (\n      ChatMessageType.system,\n      'You are an assistant that respond question using JSON format.',\n    ),\n    (ChatMessageType.human, '{question}'),\n  ]);\n  final chat = ChatOllama(\n    defaultOptions: const ChatOllamaOptions(\n      model: 'llama3.2',\n      temperature: 0,\n      format: OllamaResponseFormat.json,\n    ),\n  );\n\n  final chain = Runnable.getMapFromInput<String>(\n    'question',\n  ).pipe(promptTemplate).pipe(chat).pipe(JsonOutputParser());\n\n  final res = await chain.invoke(\n    'What is the population of Spain, The Netherlands, and France?',\n  );\n  print(res);\n  // {Spain: 46735727, The Netherlands: 17398435, France: 65273538}\n}\n\nFuture<void> _extraction() async {\n  const tool = ToolSpec(\n    name: 'information_extraction',\n    description: 'Extracts the relevant information from the passage',\n    inputJsonSchema: {\n      'type': 'object',\n      'properties': {\n        'people': {\n          'type': 'array',\n          'items': {\n            'type': 'object',\n            'properties': {\n              'name': {'type': 'string', 'description': 'The name of a person'},\n              'height': {\n                'type': 'number',\n                'description': 'The height of the person in cm',\n              },\n              'hair_color': {\n                'type': 'string',\n                'description': 'The hair color of the person',\n                'enum': ['black', 'brown', 'blonde', 'red', 'gray', 'white'],\n              },\n            },\n            'required': ['name', 'height', 'hair_color'],\n          },\n        },\n      },\n      'required': ['people'],\n    },\n  );\n\n  final model = ChatOllama(\n    defaultOptions: ChatOllamaOptions(\n      model: 'llama3.2',\n      temperature: 0,\n      tools: const [tool],\n      toolChoice: ChatToolChoice.forced(name: tool.name),\n    ),\n  );\n\n  final promptTemplate = ChatPromptTemplate.fromTemplate('''\nExtract and save the relevant entities mentioned in the following passage together with their properties. \n\nPassage:\n{input}''');\n\n  final chain = Runnable.getMapFromInput<String>()\n      .pipe(promptTemplate)\n      .pipe(model)\n      .pipe(ToolsOutputParser());\n\n  final res = await chain.invoke(\n    'Alex is 5 feet tall. '\n    'Claudia is 1 foot taller than Alex and jumps higher than him. '\n    'Claudia has orange hair and Alex is blonde.',\n  );\n  final extractedData = res.first.arguments;\n  print(extractedData);\n  // {\n  //   people: [\n  //     {\n  //       name: Alex,\n  //       height: 152,\n  //       hair_color: blonde\n  //     },\n  //     {\n  //       name: Claudia,\n  //       height: 183,\n  //       hair_color: orange\n  //     }\n  //   ]\n  // }\n}\n\n// Simulates an API call to get flight times\n// In a real application, this would fetch data from a live database or API\nString getFlightTimes(String departure, String arrival) {\n  final flights = {\n    'NYC-LAX': {\n      'departure': '08:00 AM',\n      'arrival': '11:30 AM',\n      'duration': '5h 30m',\n    },\n    'LAX-NYC': {\n      'departure': '02:00 PM',\n      'arrival': '10:30 PM',\n      'duration': '5h 30m',\n    },\n    'LHR-JFK': {\n      'departure': '10:00 AM',\n      'arrival': '01:00 PM',\n      'duration': '8h 00m',\n    },\n    'JFK-LHR': {\n      'departure': '09:00 PM',\n      'arrival': '09:00 AM',\n      'duration': '7h 00m',\n    },\n    'CDG-DXB': {\n      'departure': '11:00 AM',\n      'arrival': '08:00 PM',\n      'duration': '6h 00m',\n    },\n    'DXB-CDG': {\n      'departure': '03:00 AM',\n      'arrival': '07:30 AM',\n      'duration': '7h 30m',\n    },\n  };\n\n  final key = '${departure.toUpperCase()}-${arrival.toUpperCase()}';\n  return jsonEncode(flights[key] ?? {'error': 'Flight not found'});\n}\n\nFuture<void> _flights() async {\n  const getFlightTimesTool = ToolSpec(\n    name: 'get_flight_times',\n    description: 'Get the flight times between two cities',\n    inputJsonSchema: {\n      'type': 'object',\n      'properties': {\n        'departure': {\n          'type': 'string',\n          'description': 'The departure city (airport code)',\n        },\n        'arrival': {\n          'type': 'string',\n          'description': 'The arrival city (airport code)',\n        },\n      },\n      'required': ['departure', 'arrival'],\n    },\n  );\n\n  final chatModel = ChatOllama(\n    defaultOptions: const ChatOllamaOptions(\n      model: 'llama3.2',\n      temperature: 0,\n      tools: [getFlightTimesTool],\n    ),\n  );\n\n  final messages = [\n    ChatMessage.humanText(\n      'What is the flight time from New York (NYC) to Los Angeles (LAX)?',\n    ),\n  ];\n\n  // First API call: Send the query and function description to the model\n  final response = await chatModel.invoke(PromptValue.chat(messages));\n\n  messages.add(response.output);\n\n  // Check if the model decided to use the provided function\n  if (response.output.toolCalls.isEmpty) {\n    print(\"The model didn't use the function. Its response was:\");\n    print(response.output.content);\n    return;\n  }\n\n  // Process function calls made by the model\n  for (final toolCall in response.output.toolCalls) {\n    final functionResponse = getFlightTimes(\n      toolCall.arguments['departure'],\n      toolCall.arguments['arrival'],\n    );\n    // Add function response to the conversation\n    messages.add(\n      ChatMessage.tool(toolCallId: toolCall.id, content: functionResponse),\n    );\n  }\n\n  // Second API call: Get final response from the model\n  final finalResponse = await chatModel.invoke(PromptValue.chat(messages));\n  print(finalResponse.output.content);\n  // The flight time from New York (NYC) to Los Angeles (LAX) is approximately 5 hours and 30 minutes.\n}\n\nFuture<void> _rag() async {\n  // 1. Create a vector store and add documents to it\n  final vectorStore = MemoryVectorStore(\n    embeddings: OllamaEmbeddings(model: 'jina/jina-embeddings-v2-small-en'),\n  );\n  await vectorStore.addDocuments(\n    documents: [\n      const Document(pageContent: 'LangChain was created by Harrison'),\n      const Document(\n        pageContent: 'David ported LangChain to Dart in LangChain.dart',\n      ),\n    ],\n  );\n\n  // 2. Construct a RAG prompt template\n  final promptTemplate = ChatPromptTemplate.fromTemplates(const [\n    (\n      ChatMessageType.system,\n      'Answer the question based on only the following context:\\n{context}',\n    ),\n    (ChatMessageType.human, '{question}'),\n  ]);\n\n  // 3. Define the model to use and the vector store retriever\n  final chatModel = ChatOllama(\n    defaultOptions: const ChatOllamaOptions(model: 'llama3.2'),\n  );\n  final retriever = vectorStore.asRetriever(\n    defaultOptions: const VectorStoreRetrieverOptions(\n      searchType: VectorStoreSimilaritySearch(k: 1),\n    ),\n  );\n\n  // 4. Create a Runnable that combines the retrieved documents into a single string\n  final docCombiner = Runnable.mapInput<List<Document>, String>((docs) {\n    return docs.map((final d) => d.pageContent).join('\\n');\n  });\n\n  // 4. Define the RAG pipeline\n  final chain = Runnable.fromMap<String>({\n    'context': retriever.pipe(docCombiner),\n    'question': Runnable.passthrough(),\n  }).pipe(promptTemplate).pipe(chatModel).pipe(const StringOutputParser());\n\n  // 5. Run the pipeline\n  final res = await chain.invoke('Who created LangChain.dart?');\n  print(res);\n  // Based on the context provided, David created LangChain.dart.\n}\n"
  },
  {
    "path": "examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/open_router.dart",
    "content": "// ignore_for_file: avoid_print\nimport 'dart:io';\n\nimport 'package:langchain/langchain.dart';\nimport 'package:langchain_openai/langchain_openai.dart';\n\nvoid main(final List<String> arguments) async {\n  await _openRouter();\n  await _openRouterStreaming();\n  await _openRouterStreamingTools();\n}\n\nFuture<void> _openRouter() async {\n  final openRouterApiKey = Platform.environment['OPEN_ROUTER_API_KEY'];\n\n  final promptTemplate = ChatPromptTemplate.fromTemplates(const [\n    (\n      ChatMessageType.system,\n      'You are a helpful assistant that translates {input_language} to {output_language}.',\n    ),\n    (ChatMessageType.human, '{text}'),\n  ]);\n\n  final chatModel = ChatOpenAI(\n    apiKey: openRouterApiKey,\n    baseUrl: 'https://openrouter.ai/api/v1',\n    defaultOptions: const ChatOpenAIOptions(model: 'mistralai/mistral-small'),\n  );\n\n  final chain = promptTemplate | chatModel | const StringOutputParser();\n\n  final res = await chain.invoke({\n    'input_language': 'English',\n    'output_language': 'French',\n    'text': 'I love programming.',\n  });\n  print(res);\n  // -> 'J'aime la programmation.'\n}\n\nFuture<void> _openRouterStreaming() async {\n  final openRouterApiKey = Platform.environment['OPEN_ROUTER_API_KEY'];\n\n  final promptTemplate = ChatPromptTemplate.fromTemplates(const [\n    (\n      ChatMessageType.system,\n      'You are a helpful assistant that replies only with numbers '\n          'in order without any spaces or commas',\n    ),\n    (ChatMessageType.human, 'List the numbers from 1 to {max_num}'),\n  ]);\n\n  final chatModel = ChatOpenAI(\n    apiKey: openRouterApiKey,\n    baseUrl: 'https://openrouter.ai/api/v1',\n    defaultOptions: const ChatOpenAIOptions(model: 'mistralai/mistral-small'),\n  );\n\n  final chain = promptTemplate.pipe(chatModel).pipe(const StringOutputParser());\n\n  final stream = chain.stream({'max_num': '9'});\n  await stream.forEach(print);\n  // 123\n  // 456789\n}\n\nFuture<void> _openRouterStreamingTools() async {\n  final openRouterApiKey = Platform.environment['OPEN_ROUTER_API_KEY'];\n\n  const tool = ToolSpec(\n    name: 'joke',\n    description: 'A joke',\n    inputJsonSchema: {\n      'type': 'object',\n      'properties': {\n        'setup': {'type': 'string', 'description': 'The setup for the joke'},\n        'punchline': {\n          'type': 'string',\n          'description': 'The punchline to the joke',\n        },\n      },\n      'required': ['location', 'punchline'],\n    },\n  );\n  final promptTemplate = ChatPromptTemplate.fromTemplate(\n    'tell me a long joke about {foo}',\n  );\n  final chat = ChatOpenAI(\n    apiKey: openRouterApiKey,\n    baseUrl: 'https://openrouter.ai/api/v1',\n    defaultOptions: ChatOpenAIOptions(\n      model: 'gpt-4o',\n      tools: const [tool],\n      toolChoice: ChatToolChoice.forced(name: 'joke'),\n    ),\n  );\n  final outputParser = ToolsOutputParser();\n\n  final chain = promptTemplate.pipe(chat).pipe(outputParser);\n\n  final stream = chain.stream({'foo': 'bears'});\n  await for (final chunk in stream) {\n    final args = chunk.first.arguments;\n    print(args);\n  }\n  // {}\n  // {setup: }\n  // {setup: Why don't}\n  // {setup: Why don't bears}\n  // {setup: Why don't bears like fast food}\n  // {setup: Why don't bears like fast food?, punchline: }\n  // {setup: Why don't bears like fast food?, punchline: Because}\n  // {setup: Why don't bears like fast food?, punchline: Because they can't}\n  // {setup: Why don't bears like fast food?, punchline: Because they can't catch it!}\n}\n"
  },
  {
    "path": "examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/openai.dart",
    "content": "// ignore_for_file: avoid_print\nimport 'dart:io';\n\nimport 'package:langchain/langchain.dart';\nimport 'package:langchain_openai/langchain_openai.dart';\n\nvoid main(final List<String> arguments) async {\n  await _chatOpenAI();\n  await _chatOpenAIStreaming();\n  await _chatOpenAIStreamingTools();\n  await _chatOpenAIJsonMode();\n}\n\nFuture<void> _chatOpenAI() async {\n  final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n\n  final promptTemplate = ChatPromptTemplate.fromTemplates(const [\n    (\n      ChatMessageType.system,\n      'You are a helpful assistant that translates {input_language} to {output_language}.',\n    ),\n    (ChatMessageType.human, '{text}'),\n  ]);\n\n  final chatModel = ChatOpenAI(\n    apiKey: openaiApiKey,\n    defaultOptions: const ChatOpenAIOptions(temperature: 0),\n  );\n\n  final chain = promptTemplate | chatModel | const StringOutputParser();\n\n  final res = await chain.invoke({\n    'input_language': 'English',\n    'output_language': 'French',\n    'text': 'I love programming.',\n  });\n  print(res);\n  // -> 'J'adore la programmation.'\n}\n\nFuture<void> _chatOpenAIStreaming() async {\n  final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n\n  final promptTemplate = ChatPromptTemplate.fromTemplates(const [\n    (\n      ChatMessageType.system,\n      'You are a helpful assistant that replies only with numbers '\n          'in order without any spaces or commas',\n    ),\n    (ChatMessageType.human, 'List the numbers from 1 to {max_num}'),\n  ]);\n\n  final chat = ChatOpenAI(apiKey: openaiApiKey);\n\n  final chain = promptTemplate.pipe(chat).pipe(const StringOutputParser());\n\n  final stream = chain.stream({'max_num': '9'});\n  await stream.forEach(print);\n  // 123\n  // 456\n  // 789\n}\n\nFuture<void> _chatOpenAIStreamingTools() async {\n  final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n\n  const tool = ToolSpec(\n    name: 'joke',\n    description: 'A joke',\n    inputJsonSchema: {\n      'type': 'object',\n      'properties': {\n        'setup': {'type': 'string', 'description': 'The setup for the joke'},\n        'punchline': {\n          'type': 'string',\n          'description': 'The punchline to the joke',\n        },\n      },\n      'required': ['location', 'punchline'],\n    },\n  );\n  final promptTemplate = ChatPromptTemplate.fromTemplate(\n    'tell me a long joke about {foo}',\n  );\n  final chat = ChatOpenAI(\n    apiKey: openaiApiKey,\n    defaultOptions: ChatOpenAIOptions(\n      tools: const [tool],\n      toolChoice: ChatToolChoice.forced(name: 'joke'),\n    ),\n  );\n  final outputParser = ToolsOutputParser();\n\n  final chain = promptTemplate.pipe(chat).pipe(outputParser);\n\n  final stream = chain.stream({'foo': 'bears'});\n  await for (final chunk in stream) {\n    final args = chunk.first.arguments;\n    print(args);\n  }\n  // {}\n  // {setup: }\n  // {setup: Why don't}\n  // {setup: Why don't bears}\n  // {setup: Why don't bears like fast food}\n  // {setup: Why don't bears like fast food?, punchline: }\n  // {setup: Why don't bears like fast food?, punchline: Because}\n  // {setup: Why don't bears like fast food?, punchline: Because they can't}\n  // {setup: Why don't bears like fast food?, punchline: Because they can't catch it!}\n}\n\nFuture<void> _chatOpenAIJsonMode() async {\n  final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n\n  final prompt = PromptValue.chat([\n    ChatMessage.system(\n      \"Extract the 'name' and 'origin' of any companies mentioned in the \"\n      'following statement. Return a JSON list.',\n    ),\n    ChatMessage.humanText(\n      'Google was founded in the USA, while Deepmind was founded in the UK',\n    ),\n  ]);\n  final llm = ChatOpenAI(\n    apiKey: openaiApiKey,\n    defaultOptions: const ChatOpenAIOptions(\n      model: 'gpt-4-turbo',\n      temperature: 0,\n      responseFormat: ChatOpenAIResponseFormat.jsonObject,\n    ),\n  );\n  final chain = llm.pipe(JsonOutputParser());\n  final res = await chain.invoke(prompt);\n  print(res);\n  // {\n  //   \"companies\": [\n  //     {\n  //       \"name\": \"Google\",\n  //       \"origin\": \"USA\"\n  //     },\n  //     {\n  //       \"name\": \"Deepmind\",\n  //       \"origin\": \"UK\"\n  //     }\n  //   ]\n  // }\n}\n"
  },
  {
    "path": "examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/together_ai.dart",
    "content": "// ignore_for_file: avoid_print\nimport 'dart:io';\n\nimport 'package:langchain/langchain.dart';\nimport 'package:langchain_openai/langchain_openai.dart';\n\nvoid main(final List<String> arguments) async {\n  await _togetherAiInvoke();\n  await _togetherAiStreaming();\n}\n\nFuture<void> _togetherAiInvoke() async {\n  final togetherAiApiKey = Platform.environment['TOGETHER_AI_API_KEY'];\n\n  final promptTemplate = ChatPromptTemplate.fromTemplates(const [\n    (\n      ChatMessageType.system,\n      'You are a helpful assistant that translates {input_language} to {output_language}.',\n    ),\n    (ChatMessageType.human, '{text}'),\n  ]);\n\n  final chatModel = ChatOpenAI(\n    apiKey: togetherAiApiKey,\n    baseUrl: 'https://api.together.xyz/v1',\n    defaultOptions: const ChatOpenAIOptions(\n      model: 'mistralai/Mistral-7B-Instruct-v0.2',\n    ),\n  );\n\n  final chain = promptTemplate | chatModel | const StringOutputParser();\n\n  final res = await chain.invoke({\n    'input_language': 'English',\n    'output_language': 'French',\n    'text': 'I love programming.',\n  });\n  print(res);\n  // -> 'J'aime programmer'\n}\n\nFuture<void> _togetherAiStreaming() async {\n  final togetherAiApiKey = Platform.environment['TOGETHER_AI_API_KEY'];\n\n  final promptTemplate = ChatPromptTemplate.fromTemplates(const [\n    (\n      ChatMessageType.system,\n      'You are a helpful assistant that replies only with numbers '\n          'in order without any spaces or commas',\n    ),\n    (ChatMessageType.human, 'List the numbers from 1 to {max_num}'),\n  ]);\n\n  final chatModel = ChatOpenAI(\n    apiKey: togetherAiApiKey,\n    baseUrl: 'https://api.together.xyz/v1',\n    defaultOptions: const ChatOpenAIOptions(\n      model: 'mistralai/Mistral-7B-Instruct-v0.2',\n    ),\n  );\n\n  final chain = promptTemplate.pipe(chatModel).pipe(const StringOutputParser());\n\n  final stream = chain.stream({'max_num': '9'});\n  await stream.forEach(print);\n  // 1\n  // 2\n  // 3\n  // ...\n  // 9\n}\n"
  },
  {
    "path": "examples/docs_examples/bin/modules/model_io/models/llms/how_to/llm_streaming.dart",
    "content": "// ignore_for_file: avoid_print\nimport 'dart:io';\n\nimport 'package:langchain/langchain.dart';\nimport 'package:langchain_openai/langchain_openai.dart';\n\nvoid main(final List<String> arguments) async {\n  await _openAIStreaming();\n}\n\nFuture<void> _openAIStreaming() async {\n  final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n  final promptTemplate = PromptTemplate.fromTemplate(\n    'List the numbers from 1 to {max_num} in order without any spaces or commas',\n  );\n  final llm = OpenAI(apiKey: openaiApiKey);\n  const stringOutputParser = StringOutputParser<LLMResult>();\n  final chain = promptTemplate | llm | stringOutputParser;\n\n  final stream = chain.stream({'max_num': '9'});\n  await stream.forEach(print);\n  // 123\n  // 45\n  // 67\n  // 89\n}\n"
  },
  {
    "path": "examples/docs_examples/bin/modules/model_io/models/llms/integrations/ollama.dart",
    "content": "// ignore_for_file: avoid_print, avoid_redundant_argument_values\nimport 'package:langchain/langchain.dart';\nimport 'package:langchain_ollama/langchain_ollama.dart';\n\nvoid main(final List<String> arguments) async {\n  await _ollama();\n  await _ollamaStreaming();\n}\n\nFuture<void> _ollama() async {\n  final prompt = PromptTemplate.fromTemplate(\n    'What is a good name for a company that makes {product}?',\n  );\n  final llm = Ollama(defaultOptions: const OllamaOptions(model: 'llama3.2'));\n\n  final chain = prompt | llm | const StringOutputParser();\n  final res = await chain.invoke({'product': 'colorful socks'});\n  print(res);\n  // -> 'SoleMates'\n}\n\nFuture<void> _ollamaStreaming() async {\n  final promptTemplate = PromptTemplate.fromTemplate(\n    'List the numbers from 1 to {max_num} in order without any spaces or commas',\n  );\n  final llm = Ollama(defaultOptions: const OllamaOptions(model: 'llama3.2'));\n  const stringOutputParser = StringOutputParser<LLMResult>();\n  final chain = promptTemplate | llm | stringOutputParser;\n\n  final stream = chain.stream({'max_num': '9'});\n  await stream.forEach(print);\n  // 1\n  // 2\n  // 3\n  // ..\n  // 9\n}\n"
  },
  {
    "path": "examples/docs_examples/bin/modules/model_io/models/llms/integrations/openai.dart",
    "content": "// ignore_for_file: avoid_print\nimport 'dart:io';\n\nimport 'package:langchain/langchain.dart';\nimport 'package:langchain_openai/langchain_openai.dart';\n\nvoid main(final List<String> arguments) async {\n  await _openAI();\n  await _openAIStreaming();\n}\n\nFuture<void> _openAI() async {\n  final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n  final prompt = PromptTemplate.fromTemplate(\n    'What is a good name for a company that makes {product}?',\n  );\n  final llm = OpenAI(apiKey: openaiApiKey);\n\n  final chain = prompt | llm | const StringOutputParser();\n  final res = await chain.invoke({'product': 'colorful socks'});\n  print(res);\n  // -> '\\n\\nSocktastic!'\n}\n\nFuture<void> _openAIStreaming() async {\n  final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n  final promptTemplate = PromptTemplate.fromTemplate(\n    'List the numbers from 1 to {max_num} in order without any spaces or commas',\n  );\n  final llm = OpenAI(apiKey: openaiApiKey);\n  const stringOutputParser = StringOutputParser<LLMResult>();\n  final chain = promptTemplate | llm | stringOutputParser;\n\n  final stream = chain.stream({'max_num': '9'});\n  await stream.forEach(print);\n  // 123\n  // 45\n  // 67\n  // 89\n}\n"
  },
  {
    "path": "examples/docs_examples/bin/modules/model_io/output_parsers/json.dart",
    "content": "// ignore_for_file: avoid_print\nimport 'dart:io';\n\nimport 'package:langchain/langchain.dart';\nimport 'package:langchain_openai/langchain_openai.dart';\n\nvoid main(final List<String> arguments) async {\n  await _invoke();\n  await _streaming();\n}\n\nFuture<void> _invoke() async {\n  final openAiApiKey = Platform.environment['OPENAI_API_KEY'];\n  final promptTemplate = ChatPromptTemplate.fromTemplate(\n    'Output a list of the countries {countries} and their '\n    'populations in JSON format. Use a dict with an outer key of '\n    '\"countries\" which contains a list of countries. '\n    'Each country should have the key \"name\" and \"population\"',\n  );\n\n  final model = ChatOpenAI(\n    apiKey: openAiApiKey,\n    defaultOptions: const ChatOpenAIOptions(\n      model: 'gpt-4-turbo',\n      responseFormat: ChatOpenAIResponseFormat.jsonObject,\n    ),\n  );\n  final parser = JsonOutputParser<ChatResult>();\n  final chain = promptTemplate.pipe(model).pipe(parser);\n\n  final res = await chain.invoke({\n    'countries': ['France', 'Spain', 'Japan'].join(', '),\n  });\n  print(res);\n  // {countries: [{name: France, population: 67413000}, {name: Spain, population: 47350000}, {name: Japan, population: 125584838}]}\n}\n\nFuture<void> _streaming() async {\n  final openAiApiKey = Platform.environment['OPENAI_API_KEY'];\n\n  final promptTemplate = ChatPromptTemplate.fromTemplate(\n    'Output a list of the countries {countries} and their '\n    'populations in JSON format. Use a dict with an outer key of '\n    '\"countries\" which contains a list of countries. '\n    'Each country should have the key \"name\" and \"population\"',\n  );\n\n  final model = ChatOpenAI(\n    apiKey: openAiApiKey,\n    defaultOptions: const ChatOpenAIOptions(\n      model: 'gpt-4-turbo',\n      responseFormat: ChatOpenAIResponseFormat.jsonObject,\n    ),\n  );\n\n  final noJsonParser = promptTemplate\n      .pipe(model)\n      .pipe(const StringOutputParser());\n  final stream1 = noJsonParser.stream({\n    'countries': ['France', 'Spain', 'Japan'].join(', '),\n  });\n  await stream1.forEach((final chunk) => print('$chunk|'));\n  // |\n  // {\n  // |\n  // |\n  // \"|\n  // countries|\n  // \":|\n  // [\n  // |\n  // |\n  // {\n  // |\n  // |\n  // \"|\n  // name|\n  // \":|\n  // \"|\n  // France|\n  // ...\n\n  final withJsonParser = promptTemplate.pipe(model).pipe(JsonOutputParser());\n  final stream2 = withJsonParser.stream({\n    'countries': ['France', 'Spain', 'Japan'].join(', '),\n  });\n  await stream2.forEach((final chunk) => print('$chunk|'));\n  // {}|\n  // {countries: []}|\n  // {countries: [{name: France}]}|\n  // {countries: [{name: France, population: 67390000}, {}]}|\n  // {countries: [{name: France, population: 67390000}, {name: Spain}]}|\n  // {countries: [{name: France, population: 67390000}, {name: Spain, population: 47350000}]}|\n  // {countries: [{name: France, population: 67390000}, {name: Spain, population: 47350000}, {name: Japan}]}|\n  // {countries: [{name: France, population: 67390000}, {name: Spain, population: 47350000}, {name: Japan, population: 125360000}]}|\n}\n"
  },
  {
    "path": "examples/docs_examples/bin/modules/model_io/output_parsers/string.dart",
    "content": "// ignore_for_file: avoid_print\nimport 'dart:io';\n\nimport 'package:langchain/langchain.dart';\nimport 'package:langchain_openai/langchain_openai.dart';\n\nvoid main(final List<String> arguments) async {\n  await _invoke();\n  await _streaming();\n}\n\nFuture<void> _invoke() async {\n  final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n  final model = ChatOpenAI(apiKey: openaiApiKey);\n\n  final promptTemplate = ChatPromptTemplate.fromTemplate(\n    'Tell me a joke about {topic}',\n  );\n\n  final chain = promptTemplate.pipe(model).pipe(const StringOutputParser());\n\n  final res = await chain.invoke({'topic': 'bears'});\n  print(res);\n  // Why don't bears wear shoes? Because they have bear feet!\n}\n\nFuture<void> _streaming() async {\n  final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n  final model = ChatOpenAI(apiKey: openaiApiKey);\n\n  final promptTemplate = ChatPromptTemplate.fromTemplate(\n    'Tell me a joke about {topic}',\n  );\n\n  final chain = promptTemplate | model | const StringOutputParser();\n\n  final stream = chain.stream({'topic': 'bears'});\n\n  var count = 0;\n  await for (final res in stream) {\n    print('$count: $res');\n    count++;\n  }\n  // 0:\n  // 1: Why\n  // 2:  don\n  // 3: 't\n  // 4:  bears\n  // 5:  like\n  // 6:  fast\n  // 7:  food\n  // 8: ?\n  // 9: Because\n  // 10:  they\n  // 11:  can\n  // 12: 't\n  // 13:  catch\n  // 14:  it\n  // 15: !\n}\n"
  },
  {
    "path": "examples/docs_examples/bin/modules/model_io/output_parsers/tools.dart",
    "content": "// ignore_for_file: avoid_print\nimport 'dart:io';\n\nimport 'package:langchain/langchain.dart';\nimport 'package:langchain_openai/langchain_openai.dart';\n\nvoid main(final List<String> arguments) async {\n  await _toolsOutputParser();\n}\n\nFuture<void> _toolsOutputParser() async {\n  final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n\n  const tool = ToolSpec(\n    name: 'joke',\n    description: 'A joke',\n    inputJsonSchema: {\n      'type': 'object',\n      'properties': {\n        'setup': {'type': 'string', 'description': 'The setup for the joke'},\n        'punchline': {\n          'type': 'string',\n          'description': 'The punchline to the joke',\n        },\n      },\n      'required': ['location', 'punchline'],\n    },\n  );\n  final promptTemplate = ChatPromptTemplate.fromTemplate(\n    'tell me a long joke about {foo}',\n  );\n  final chat =\n      ChatOpenAI(\n        apiKey: openaiApiKey,\n        defaultOptions: const ChatOpenAIOptions(temperature: 0),\n      ).bind(\n        ChatOpenAIOptions(\n          tools: const [tool],\n          toolChoice: ChatToolChoice.forced(name: 'joke'),\n        ),\n      );\n  final outputParser = ToolsOutputParser();\n  final chain = promptTemplate.pipe(chat).pipe(outputParser);\n  final res = await chain.invoke({'foo': 'bears'});\n  print(res);\n  // [ParsedToolCall{\n  //   id: call_5TU1iYgYO3Z81eAuTe7J23f7,\n  //   name: joke,\n  //   arguments: {\n  //     setup: Why don't bears like fast food restaurants?,\n  //     punchline: Because they can't bear the wait!\n  //   },\n  // }]\n\n  final stream = chain.stream({'foo': 'bears'});\n  await for (final res in stream) {\n    final args = res.first.arguments;\n    print(args);\n  }\n  // {}\n  // {setup: }\n  // {setup: Why}\n  // {setup: Why don't}\n  // {setup: Why don't bears}\n  // {setup: Why don't bears like}\n  // {setup: Why don't bears like fast food}\n  // {setup: Why don't bears like fast food restaurants?}\n  // {setup: Why don't bears like fast food restaurants?, punchline: }\n  // {setup: Why don't bears like fast food restaurants?, punchline: Because}\n  // {setup: Why don't bears like fast food restaurants?, punchline: Because they can't}\n  // {setup: Why don't bears like fast food restaurants?, punchline: Because they can't catch}\n  // {setup: Why don't bears like fast food restaurants?, punchline: Because they can't catch them!}\n}\n"
  },
  {
    "path": "examples/docs_examples/bin/modules/retrieval/text_embedding/integrations/anyscale.dart",
    "content": "// ignore_for_file: avoid_print\nimport 'dart:io';\n\nimport 'package:langchain/langchain.dart';\nimport 'package:langchain_openai/langchain_openai.dart';\n\nvoid main(final List<String> arguments) async {\n  final anyscaleApiKey = Platform.environment['ANYSCALE_API_KEY'];\n  final embeddings = OpenAIEmbeddings(\n    apiKey: anyscaleApiKey,\n    baseUrl: 'https://api.endpoints.anyscale.com/v1',\n    model: 'thenlper/gte-large',\n  );\n\n  // Embedding a document\n  const doc = Document(pageContent: 'This is a test document.');\n  final res1 = await embeddings.embedDocuments([doc]);\n  print(res1);\n  // [[-0.0011281073093414307, -0.013280618004500866, 0.02164546772837639, ...]]\n\n  // Embedding a retrieval query\n  const text = 'This is a test query.';\n  final res2 = await embeddings.embedQuery(text);\n  print(res2);\n  // [-0.027850965037941933, 0.00269310618750751, 0.008118202909827232, ...]\n\n  embeddings.close();\n}\n"
  },
  {
    "path": "examples/docs_examples/bin/modules/retrieval/text_embedding/integrations/google_ai.dart",
    "content": "// ignore_for_file: avoid_print\nimport 'dart:io';\n\nimport 'package:langchain/langchain.dart';\nimport 'package:langchain_google/langchain_google.dart';\n\nvoid main(final List<String> arguments) async {\n  final apiKey = Platform.environment['GOOGLEAI_API_KEY'];\n  final embeddings = GoogleGenerativeAIEmbeddings(apiKey: apiKey);\n\n  // Embedding a document\n  const doc = Document(pageContent: 'This is a test document.');\n  final res1 = await embeddings.embedDocuments([doc]);\n  print(res1);\n  // [[0.05677966, 0.0030236526, -0.06441004, ...]]\n\n  // Embedding a retrieval query\n  const text = 'This is a test query.';\n  final res2 = await embeddings.embedQuery(text);\n  print(res2);\n  // [0.025963314, -0.06858828, -0.026590854, ...]\n\n  embeddings.close();\n}\n"
  },
  {
    "path": "examples/docs_examples/bin/modules/retrieval/text_embedding/integrations/openai.dart",
    "content": "// ignore_for_file: avoid_print\nimport 'dart:io';\n\nimport 'package:langchain/langchain.dart';\nimport 'package:langchain_openai/langchain_openai.dart';\n\nvoid main(final List<String> arguments) async {\n  final openAiApiKey = Platform.environment['OPENAI_API_KEY'];\n  final embeddings = OpenAIEmbeddings(apiKey: openAiApiKey);\n\n  // Embedding a document\n  const doc = Document(pageContent: 'This is a test document.');\n  final res1 = await embeddings.embedDocuments([doc]);\n  print(res1);\n  // [[-0.003105443, 0.011136302, -0.0040295827, -0.011749065, ...]]\n\n  // Embedding a retrieval query\n  const text = 'This is a test query.';\n  final res2 = await embeddings.embedQuery(text);\n  print(res2);\n  // [-0.005047946, 0.0050882488, -0.0051957234, -0.019143905, ...]\n\n  embeddings.close();\n}\n"
  },
  {
    "path": "examples/docs_examples/bin/modules/retrieval/text_embedding/integrations/together_ai.dart",
    "content": "// ignore_for_file: avoid_print\nimport 'dart:io';\n\nimport 'package:langchain/langchain.dart';\nimport 'package:langchain_openai/langchain_openai.dart';\n\nvoid main(final List<String> arguments) async {\n  final togetherAiApiKey = Platform.environment['TOGETHER_AI_API_KEY'];\n  final embeddings = OpenAIEmbeddings(\n    apiKey: togetherAiApiKey,\n    baseUrl: 'https://api.together.xyz/v1',\n    model: 'togethercomputer/m2-bert-80M-32k-retrieval',\n  );\n\n  // Embedding a document\n  const doc = Document(pageContent: 'This is a test document.');\n  final res1 = await embeddings.embedDocuments([doc]);\n  print(res1);\n  // [[-0.038838703, 0.0580902, 0.022614542, 0.0078403875, ...]]\n\n  // Embedding a retrieval query\n  const text = 'This is a test query.';\n  final res2 = await embeddings.embedQuery(text);\n  print(res2);\n  // [-0.019722218, 0.04656633, -0.0074559706, 0.005712764, ...]\n\n  embeddings.close();\n}\n"
  },
  {
    "path": "examples/docs_examples/bin/modules/retrieval/vector_stores/integrations/objectbox.dart",
    "content": "import 'dart:io';\n\nimport 'package:langchain/langchain.dart';\nimport 'package:langchain_community/langchain_community.dart';\nimport 'package:langchain_ollama/langchain_ollama.dart';\n\nvoid main() async {\n  await _rag();\n}\n\nFuture<void> _rag() async {\n  // 1. Instantiate vector store\n  final vectorStore = ObjectBoxVectorStore.open(\n    embeddings: OllamaEmbeddings(model: 'jina/jina-embeddings-v2-small-en'),\n    dimensions: 512,\n    directory: 'bin/modules/retrieval/vector_stores/integrations',\n  );\n\n  // 2. Load documents\n  const loader = WebBaseLoader([\n    'https://objectbox.io/on-device-vector-databases-and-edge-ai/',\n    'https://objectbox.io/the-first-on-device-vector-database-objectbox-4-0/',\n    'https://objectbox.io/on-device-vector-database-for-dart-flutter/',\n    'https://objectbox.io/evolution-of-search-traditional-vs-vector-search//',\n  ]);\n  final List<Document> docs = await loader.load();\n\n  // 3. Split docs into chunks\n  const splitter = RecursiveCharacterTextSplitter(\n    chunkSize: 500,\n    chunkOverlap: 0,\n  );\n  final List<Document> chunkedDocs = await splitter.invoke(docs);\n\n  // 4. Add documents to vector store\n  await vectorStore.addDocuments(documents: chunkedDocs);\n\n  // 5. Construct a RAG prompt template\n  final promptTemplate = ChatPromptTemplate.fromTemplates(const [\n    (\n      ChatMessageType.system,\n      '''\nYou are an assistant for question-answering tasks.\n\nUse the following pieces of retrieved context to answer the user question.\n\nContext:\n{context}\n\nIf you don't know the answer, just say that you don't know. \nUse three sentences maximum and keep the answer concise.\nCite the source you used to answer the question.\n\nExample:\n\"\"\"\nOne sentence [1]. Another sentence [2]. \n\nSources:\n[1] https://example.com/1\n[2] https://example.com/2\n\"\"\"\n''',\n    ),\n    (ChatMessageType.human, '{question}'),\n  ]);\n\n  // 6. Define the model to use and the vector store retriever\n  final chatModel = ChatOllama(\n    defaultOptions: const ChatOllamaOptions(model: 'llama3.2'),\n  );\n  final retriever = vectorStore.asRetriever();\n\n  // 7. Create a Runnable that combines the retrieved documents into a single string\n  final docCombiner = Runnable.mapInput<List<Document>, String>((docs) {\n    return docs\n        .map(\n          (final d) =>\n              '''\nSource: ${d.metadata['source']}\nTitle: ${d.metadata['title']}\nContent: ${d.pageContent}\n---\n''',\n        )\n        .join('\\n');\n  });\n\n  // 8. Define the RAG pipeline\n  final chain = Runnable.fromMap<String>({\n    'context': retriever.pipe(docCombiner),\n    'question': Runnable.passthrough(),\n  }).pipe(promptTemplate).pipe(chatModel).pipe(const StringOutputParser());\n\n  // 9. Run the pipeline\n  final stream = chain.stream(\n    'Which algorithm does ObjectBox Vector Search use? Can I use it in Flutter apps?',\n  );\n  await stream.forEach(stdout.write);\n  // According to the sources provided, ObjectBox Vector Search uses the HNSW\n  // (Hierarchical Navigable Small World) algorithm [1].\n  //\n  // And yes, you can use it in Flutter apps. The article specifically mentions\n  // that ObjectBox 4.0 introduces an on-device vector database for the\n  // Dart/Flutter platform [2].\n  //\n  // Sources:\n  // [1] https://objectbox.io/first-on-device-vector-database-objectbox-4-0/\n  // [2] https://objectbox.io/on-device-vector-database-for-dart-flutter/\n}\n"
  },
  {
    "path": "examples/docs_examples/bin/readme.dart",
    "content": "// ignore_for_file: avoid_print\nimport 'dart:io';\n\nimport 'package:langchain/langchain.dart';\nimport 'package:langchain_google/langchain_google.dart';\nimport 'package:langchain_openai/langchain_openai.dart';\n\nvoid main(final List<String> arguments) async {\n  await _callLLM();\n  await _rag();\n}\n\nFuture<void> _callLLM() async {\n  final googleApiKey = Platform.environment['GOOGLE_API_KEY'];\n  final model = ChatGoogleGenerativeAI(apiKey: googleApiKey);\n  final prompt = PromptValue.string('Hello world!');\n  final result = await model.invoke(prompt);\n  print(result);\n  // Hello everyone! I'm new here and excited to be part of this community.\n}\n\nFuture<void> _rag() async {\n  final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n\n  // 1. Create a vector store and add documents to it\n  final vectorStore = MemoryVectorStore(\n    embeddings: OpenAIEmbeddings(apiKey: openaiApiKey),\n  );\n  await vectorStore.addDocuments(\n    documents: const [\n      Document(pageContent: 'LangChain was created by Harrison'),\n      Document(pageContent: 'David ported LangChain to Dart in LangChain.dart'),\n    ],\n  );\n\n  // 2. Define the retrieval chain\n  final retriever = vectorStore.asRetriever();\n  final setupAndRetrieval = Runnable.fromMap<String>({\n    'context': retriever.pipe(\n      Runnable.mapInput((docs) => docs.map((d) => d.pageContent).join('\\n')),\n    ),\n    'question': Runnable.passthrough(),\n  });\n\n  // 3. Construct a RAG prompt template\n  final promptTemplate = ChatPromptTemplate.fromTemplates(const [\n    (\n      ChatMessageType.system,\n      'Answer the question based on only the following context:\\n{context}',\n    ),\n    (ChatMessageType.human, '{question}'),\n  ]);\n\n  // 4. Define the final chain\n  final model = ChatOpenAI(apiKey: openaiApiKey);\n  const outputParser = StringOutputParser<ChatResult>();\n  final chain = setupAndRetrieval\n      .pipe(promptTemplate)\n      .pipe(model)\n      .pipe(outputParser);\n\n  // 5. Run the pipeline\n  final res = await chain.invoke('Who created LangChain.dart?');\n  print(res);\n  // David created LangChain.dart\n}\n"
  },
  {
    "path": "examples/docs_examples/pubspec.yaml",
    "content": "name: docs_examples\ndescription: Examples used in langchaindart.dev documentation.\nversion: 1.0.0\npublish_to: none\n\nenvironment:\n  sdk: \">=3.9.0 <4.0.0\"\nresolution: workspace\n\ndependencies:\n  langchain: ^0.8.1\n  langchain_anthropic: ^0.3.1\n  langchain_chroma: ^0.3.0+2\n  langchain_community: 0.4.0+2\n  langchain_google: ^0.7.1+2\n  langchain_mistralai: ^0.3.1+1\n  langchain_ollama: ^0.4.1\n  langchain_openai: ^0.8.1+1\n"
  },
  {
    "path": "examples/hello_world_backend/Dockerfile",
    "content": "# Official Dart image: https://hub.docker.com/_/dart\n# Specify the Dart SDK base image version using dart:<version> (ex: dart:2.12)\nFROM dart:stable AS build\n\n# Resolve app dependencies.\nWORKDIR /app\nCOPY pubspec.* ./\nRUN dart pub get\n\n# Copy app source code and AOT compile it.\nCOPY . .\n# Ensure packages are still up-to-date if anything has changed\nRUN dart pub get --offline\nRUN dart compile exe bin/server.dart -o bin/server\n\n# Build minimal serving image from AOT-compiled `/server` and required system\n# libraries and configuration files stored in `/runtime/` from the build stage.\nFROM scratch\nCOPY --from=build /runtime/ /\nCOPY --from=build /app/bin/server /app/bin/\n\n# Start server.\nEXPOSE 8080\nCMD [\"/app/bin/server\"]\n"
  },
  {
    "path": "examples/hello_world_backend/README.md",
    "content": "# Hello world backend\n\nThis sample demonstrates how to build a simple backend service that interacts with an LLM using\nLangChain.dart.\n\nIt exposes a REST API that given a list of topics, generates a sonnet about them.\n\nThe HTTP server is implemented using [package:shelf](https://pub.dev/packages/shelf).\n\nYou can find all the details in the [LangChain.dart 101: what can you build with it?](https://blog.langchaindart.dev/langchain-dart-101-what-can-you-build-with-it-%EF%B8%8F-99a92ccaec5f)\nblog post.\n\n![Hello world backend](hello_world_backend.gif)\n\n## Usage\n\n- Listens on \"any IP\" (0.0.0.0) instead of loop-back (localhost, 127.0.0.1) to\n  allow remote connections.\n- Defaults to listening on port `8080`, but this can be configured by setting\n  the `PORT` environment variable. (This is also the convention used by\n  [Cloud Run](https://cloud.google.com/run).)\n- Includes `Dockerfile` for easy containerization\n\nTo run this server locally, run as follows:\n\n```bash\n$ dart run bin/server.dart\n```\n\nThen send a request:\n\n```bash\n$ curl -X POST \\\n    -H \"Content-Type: application/json\" \\\n    -d '{\n      \"topics\": [\"bikes\", \"amsterdam\"]\n    }' \\\n    http://0.0.0.0:8080/v1/sonnets\n```\n\nTo deploy on [Cloud Run](https://cloud.google.com/run), click here\n\n[![Run on Google Cloud](https://deploy.cloud.run/button.svg)](https://deploy.cloud.run/?git_repo=https://github.com/davidmigloz/langchain_dart.git&dir=examples/hello_world_backend)\n\nor follow\n[these instructions](https://cloud.google.com/run/docs/quickstarts/build-and-deploy/other).\n"
  },
  {
    "path": "examples/hello_world_backend/bin/api.dart",
    "content": "// ignore_for_file: avoid_dynamic_calls\nimport 'dart:convert';\n\nimport 'package:shelf/shelf.dart';\nimport 'package:shelf_router/shelf_router.dart';\n\nimport 'sonnets.dart';\n\nclass Api {\n  final sonnetsService = SonnetsService();\n\n  Handler get handler {\n    final router = Router()..post('/v1/sonnets', _sonnetHandler);\n    return router.call;\n  }\n\n  Future<Response> _sonnetHandler(final Request request) async {\n    final payload = jsonDecode(await request.readAsString());\n    final topics = payload['topics'];\n\n    final sonnet = await sonnetsService.generateSonnet(topics.cast<String>());\n\n    return Response.ok(\n      headers: {'Content-type': 'application/json'},\n      jsonEncode({'sonnet': sonnet}),\n    );\n  }\n}\n"
  },
  {
    "path": "examples/hello_world_backend/bin/server.dart",
    "content": "// ignore_for_file: avoid_print\nimport 'dart:io';\n\nimport 'package:shelf/shelf.dart';\nimport 'package:shelf/shelf_io.dart' as io;\n\nimport 'api.dart';\n\nFuture<void> main() async {\n  final port = int.parse(Platform.environment['PORT'] ?? '8080');\n  final api = Api();\n  final handler = const Pipeline()\n      .addMiddleware(logRequests())\n      .addHandler(api.handler);\n  final server = await io.serve(handler, InternetAddress.anyIPv4, port);\n  print('Serving at http://${server.address.host}:${server.port}');\n}\n"
  },
  {
    "path": "examples/hello_world_backend/bin/sonnets.dart",
    "content": "import 'dart:io';\n\nimport 'package:langchain/langchain.dart';\nimport 'package:langchain_openai/langchain_openai.dart';\n\nclass SonnetsService {\n  SonnetsService() {\n    final openAiApiKey = Platform.environment['OPENAI_API_KEY'];\n    if (openAiApiKey == null) {\n      stderr.writeln(\n        'You need to set your OpenAI key in the '\n        'OPENAI_API_KEY environment variable.',\n      );\n      exit(64);\n    }\n    _llm = ChatOpenAI(\n      apiKey: openAiApiKey,\n      defaultOptions: const ChatOpenAIOptions(temperature: 0.9),\n    );\n  }\n\n  late final ChatOpenAI _llm;\n  final _chatPromptTemplate = ChatPromptTemplate.fromPromptMessages([\n    SystemChatMessagePromptTemplate.fromTemplate(\n      'I would like you to assume the role of a poet from the Shakespeare school.',\n    ),\n    HumanChatMessagePromptTemplate.fromTemplate(\n      'Create a sonnet using vivid imagery and rhyme about the following topics: {topics}',\n    ),\n  ]);\n\n  Future<String> generateSonnet(final List<String> topics) async {\n    final prompt = _chatPromptTemplate.formatMessages({'topics': topics});\n    final response = await _llm.call(prompt);\n    return response.content;\n  }\n}\n"
  },
  {
    "path": "examples/hello_world_backend/pubspec.yaml",
    "content": "name: hello_world_backend\ndescription: A sample backend service integrating LangChain.\nversion: 1.0.0\npublish_to: none\n\nenvironment:\n  sdk: \">=3.9.0 <4.0.0\"\nresolution: workspace\n\ndependencies:\n  langchain: ^0.8.1\n  langchain_openai: ^0.8.1+1\n  shelf: ^1.4.2\n  shelf_router: ^1.1.4\n"
  },
  {
    "path": "examples/hello_world_cli/.gitignore",
    "content": "# https://dart.dev/guides/libraries/private-files\n# Created by `dart pub`\n.dart_tool/\n"
  },
  {
    "path": "examples/hello_world_cli/README.md",
    "content": "# Hello world CLI\n\nThis sample app demonstrates how to call an LLM from a CLI application using LangChain.dart.\n\nYou can find all the details in the [LangChain.dart 101: what can you build with it?](https://blog.langchaindart.dev/langchain-dart-101-what-can-you-build-with-it-%EF%B8%8F-99a92ccaec5f) \nblog post.\n\n## Usage\n\n```bash\ndart run bin/hello_world_cli.dart\n```\n\n![Hello world CLI](hello_world_cli.gif)\n"
  },
  {
    "path": "examples/hello_world_cli/bin/hello_world_cli.dart",
    "content": "import 'dart:io';\n\nimport 'package:langchain/langchain.dart';\nimport 'package:langchain_openai/langchain_openai.dart';\n\nvoid main(final List<String> arguments) async {\n  final openAiApiKey = Platform.environment['OPENAI_API_KEY'];\n\n  if (openAiApiKey == null) {\n    stderr.writeln(\n      'You need to set your OpenAI key in the '\n      'OPENAI_API_KEY environment variable.',\n    );\n    exit(1);\n  }\n\n  final llm = ChatOpenAI(\n    apiKey: openAiApiKey,\n    defaultOptions: const ChatOpenAIOptions(temperature: 0.9),\n  );\n\n  stdout.writeln('How can I help you?');\n\n  while (true) {\n    stdout.write('> ');\n    final query = stdin.readLineSync() ?? '';\n    final humanMessage = ChatMessage.humanText(query);\n    final aiMessage = await llm.call([humanMessage]);\n    stdout.writeln(aiMessage.content.trim());\n  }\n}\n"
  },
  {
    "path": "examples/hello_world_cli/pubspec.yaml",
    "content": "name: hello_world_cli\ndescription: A sample CLI app integrating LangChain.\nversion: 1.0.0\npublish_to: none\n\nenvironment:\n  sdk: \">=3.9.0 <4.0.0\"\nresolution: workspace\n\ndependencies:\n  langchain: ^0.8.1\n  langchain_openai: ^0.8.1+1\n"
  },
  {
    "path": "examples/hello_world_flutter/.gitignore",
    "content": "# Miscellaneous\n*.class\n*.log\n*.pyc\n*.swp\n.DS_Store\n.atom/\n.build/\n.buildlog/\n.history\n.svn/\n.swiftpm/\nmigrate_working_dir/\n\n# IntelliJ related\n*.iml\n*.ipr\n*.iws\n.idea/\n\n# The .vscode folder contains launch configuration and tasks you configure in\n# VS Code which you may wish to be included in version control, so this line\n# is commented out by default.\n#.vscode/\n\n# Flutter/Dart/Pub related\n**/doc/api/\n**/ios/Flutter/.last_build_id\n.dart_tool/\n.flutter-plugins\n.flutter-plugins-dependencies\n.packages\n.pub-cache/\n.pub/\n/build/\n\n# Symbolication related\napp.*.symbols\n\n# Obfuscation related\napp.*.map.json\n\n# Android Studio will place build artifacts here\n/android/app/debug\n/android/app/profile\n/android/app/release\n"
  },
  {
    "path": "examples/hello_world_flutter/.metadata",
    "content": "# This file tracks properties of this Flutter project.\n# Used by Flutter tool to assess capabilities and perform upgrades etc.\n#\n# This file should be version controlled.\n\nversion:\n  revision: 796c8ef79279f9c774545b3771238c3098dbefab\n  channel: stable\n\nproject_type: app\n\n# Tracks metadata for the flutter migrate command\nmigration:\n  platforms:\n    - platform: root\n      create_revision: 796c8ef79279f9c774545b3771238c3098dbefab\n      base_revision: 796c8ef79279f9c774545b3771238c3098dbefab\n    - platform: android\n      create_revision: 796c8ef79279f9c774545b3771238c3098dbefab\n      base_revision: 796c8ef79279f9c774545b3771238c3098dbefab\n    - platform: ios\n      create_revision: 796c8ef79279f9c774545b3771238c3098dbefab\n      base_revision: 796c8ef79279f9c774545b3771238c3098dbefab\n    - platform: linux\n      create_revision: 796c8ef79279f9c774545b3771238c3098dbefab\n      base_revision: 796c8ef79279f9c774545b3771238c3098dbefab\n    - platform: macos\n      create_revision: 796c8ef79279f9c774545b3771238c3098dbefab\n      base_revision: 796c8ef79279f9c774545b3771238c3098dbefab\n    - platform: web\n      create_revision: 796c8ef79279f9c774545b3771238c3098dbefab\n      base_revision: 796c8ef79279f9c774545b3771238c3098dbefab\n    - platform: windows\n      create_revision: 796c8ef79279f9c774545b3771238c3098dbefab\n      base_revision: 796c8ef79279f9c774545b3771238c3098dbefab\n\n  # User provided section\n\n  # List of Local paths (relative to this file) that should be\n  # ignored by the migrate tool.\n  #\n  # Files that are not part of the templates will be ignored by default.\n  unmanaged_files:\n    - 'lib/main.dart'\n    - 'ios/Runner.xcodeproj/project.pbxproj'\n"
  },
  {
    "path": "examples/hello_world_flutter/README.md",
    "content": "# Hello World Flutter\n\nThis sample application demonstrates how to call various remote and local LLMs from a Flutter application using LangChain.dart.\n\n![Hello World Flutter](hello_world_flutter.gif)\n\n## Usage\n\n```bash\nflutter run\n```\n\n- To use the remote providers you need to provide your API key. \n- To use local models you need to have the [Ollama](https://ollama.ai/) app running and the model downloaded. \n"
  },
  {
    "path": "examples/hello_world_flutter/android/.gitignore",
    "content": "gradle-wrapper.jar\n/.gradle\n/captures/\n/gradlew\n/gradlew.bat\n/local.properties\nGeneratedPluginRegistrant.java\n\n# Remember to never publicly share your keystore.\n# See https://flutter.dev/docs/deployment/android#reference-the-keystore-from-the-app\nkey.properties\n**/*.keystore\n**/*.jks\n"
  },
  {
    "path": "examples/hello_world_flutter/android/app/build.gradle",
    "content": "plugins {\n    id 'com.android.application'\n    id 'kotlin-android'\n    id 'dev.flutter.flutter-gradle-plugin'\n}\n\ndef localProperties = new Properties()\ndef localPropertiesFile = rootProject.file('local.properties')\nif (localPropertiesFile.exists()) {\n    localPropertiesFile.withReader('UTF-8') { reader ->\n        localProperties.load(reader)\n    }\n}\n\ndef flutterVersionCode = localProperties.getProperty('flutter.versionCode')\nif (flutterVersionCode == null) {\n    flutterVersionCode = '1'\n}\n\ndef flutterVersionName = localProperties.getProperty('flutter.versionName')\nif (flutterVersionName == null) {\n    flutterVersionName = '1.0'\n}\n\nandroid {\n    namespace \"com.example.hello_world_flutter\"\n    compileSdkVersion flutter.compileSdkVersion\n    ndkVersion flutter.ndkVersion\n\n    compileOptions {\n        sourceCompatibility JavaVersion.VERSION_17\n        targetCompatibility JavaVersion.VERSION_17\n    }\n\n    kotlinOptions {\n        jvmTarget = '17'\n    }\n\n    sourceSets {\n        main.java.srcDirs += 'src/main/kotlin'\n    }\n\n    defaultConfig {\n        applicationId \"com.example.hello_world_flutter\"\n        minSdkVersion flutter.minSdkVersion\n        targetSdkVersion flutter.targetSdkVersion\n        versionCode flutterVersionCode.toInteger()\n        versionName flutterVersionName\n    }\n\n    buildTypes {\n        release {\n            signingConfig signingConfigs.debug\n        }\n    }\n}\n\nflutter {\n    source '../..'\n}\n"
  },
  {
    "path": "examples/hello_world_flutter/android/app/src/debug/AndroidManifest.xml",
    "content": "<manifest xmlns:android=\"http://schemas.android.com/apk/res/android\">\n    <!-- The INTERNET permission is required for development. Specifically,\n         the Flutter tool needs it to communicate with the running application\n         to allow setting breakpoints, to provide hot reload, etc.\n    -->\n    <uses-permission android:name=\"android.permission.INTERNET\"/>\n</manifest>\n"
  },
  {
    "path": "examples/hello_world_flutter/android/app/src/main/AndroidManifest.xml",
    "content": "<manifest xmlns:android=\"http://schemas.android.com/apk/res/android\">\n    <application\n        android:label=\"hello_world_flutter\"\n        android:name=\"${applicationName}\"\n        android:icon=\"@mipmap/ic_launcher\">\n        <activity\n            android:name=\".MainActivity\"\n            android:exported=\"true\"\n            android:launchMode=\"singleTop\"\n            android:theme=\"@style/LaunchTheme\"\n            android:configChanges=\"orientation|keyboardHidden|keyboard|screenSize|smallestScreenSize|locale|layoutDirection|fontScale|screenLayout|density|uiMode\"\n            android:hardwareAccelerated=\"true\"\n            android:windowSoftInputMode=\"adjustResize\">\n            <!-- Specifies an Android theme to apply to this Activity as soon as\n                 the Android process has started. This theme is visible to the user\n                 while the Flutter UI initializes. After that, this theme continues\n                 to determine the Window background behind the Flutter UI. -->\n            <meta-data\n              android:name=\"io.flutter.embedding.android.NormalTheme\"\n              android:resource=\"@style/NormalTheme\"\n              />\n            <intent-filter>\n                <action android:name=\"android.intent.action.MAIN\"/>\n                <category android:name=\"android.intent.category.LAUNCHER\"/>\n            </intent-filter>\n        </activity>\n        <!-- Don't delete the meta-data below.\n             This is used by the Flutter tool to generate GeneratedPluginRegistrant.java -->\n        <meta-data\n            android:name=\"flutterEmbedding\"\n            android:value=\"2\" />\n    </application>\n</manifest>\n"
  },
  {
    "path": "examples/hello_world_flutter/android/app/src/main/kotlin/com/example/hello_world_flutter/MainActivity.kt",
    "content": "package com.example.hello_world_flutter\n\nimport io.flutter.embedding.android.FlutterActivity\n\nclass MainActivity: FlutterActivity() {\n}\n"
  },
  {
    "path": "examples/hello_world_flutter/android/app/src/main/res/drawable/launch_background.xml",
    "content": "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<!-- Modify this file to customize your launch splash screen -->\n<layer-list xmlns:android=\"http://schemas.android.com/apk/res/android\">\n    <item android:drawable=\"@android:color/white\" />\n\n    <!-- You can insert your own image assets here -->\n    <!-- <item>\n        <bitmap\n            android:gravity=\"center\"\n            android:src=\"@mipmap/launch_image\" />\n    </item> -->\n</layer-list>\n"
  },
  {
    "path": "examples/hello_world_flutter/android/app/src/main/res/drawable-v21/launch_background.xml",
    "content": "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<!-- Modify this file to customize your launch splash screen -->\n<layer-list xmlns:android=\"http://schemas.android.com/apk/res/android\">\n    <item android:drawable=\"?android:colorBackground\" />\n\n    <!-- You can insert your own image assets here -->\n    <!-- <item>\n        <bitmap\n            android:gravity=\"center\"\n            android:src=\"@mipmap/launch_image\" />\n    </item> -->\n</layer-list>\n"
  },
  {
    "path": "examples/hello_world_flutter/android/app/src/main/res/values/styles.xml",
    "content": "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<resources>\n    <!-- Theme applied to the Android Window while the process is starting when the OS's Dark Mode setting is off -->\n    <style name=\"LaunchTheme\" parent=\"@android:style/Theme.Light.NoTitleBar\">\n        <!-- Show a splash screen on the activity. Automatically removed when\n             the Flutter engine draws its first frame -->\n        <item name=\"android:windowBackground\">@drawable/launch_background</item>\n    </style>\n    <!-- Theme applied to the Android Window as soon as the process has started.\n         This theme determines the color of the Android Window while your\n         Flutter UI initializes, as well as behind your Flutter UI while its\n         running.\n\n         This Theme is only used starting with V2 of Flutter's Android embedding. -->\n    <style name=\"NormalTheme\" parent=\"@android:style/Theme.Light.NoTitleBar\">\n        <item name=\"android:windowBackground\">?android:colorBackground</item>\n    </style>\n</resources>\n"
  },
  {
    "path": "examples/hello_world_flutter/android/app/src/main/res/values-night/styles.xml",
    "content": "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<resources>\n    <!-- Theme applied to the Android Window while the process is starting when the OS's Dark Mode setting is on -->\n    <style name=\"LaunchTheme\" parent=\"@android:style/Theme.Black.NoTitleBar\">\n        <!-- Show a splash screen on the activity. Automatically removed when\n             the Flutter engine draws its first frame -->\n        <item name=\"android:windowBackground\">@drawable/launch_background</item>\n    </style>\n    <!-- Theme applied to the Android Window as soon as the process has started.\n         This theme determines the color of the Android Window while your\n         Flutter UI initializes, as well as behind your Flutter UI while its\n         running.\n\n         This Theme is only used starting with V2 of Flutter's Android embedding. -->\n    <style name=\"NormalTheme\" parent=\"@android:style/Theme.Black.NoTitleBar\">\n        <item name=\"android:windowBackground\">?android:colorBackground</item>\n    </style>\n</resources>\n"
  },
  {
    "path": "examples/hello_world_flutter/android/app/src/profile/AndroidManifest.xml",
    "content": "<manifest xmlns:android=\"http://schemas.android.com/apk/res/android\">\n    <!-- The INTERNET permission is required for development. Specifically,\n         the Flutter tool needs it to communicate with the running application\n         to allow setting breakpoints, to provide hot reload, etc.\n    -->\n    <uses-permission android:name=\"android.permission.INTERNET\"/>\n</manifest>\n"
  },
  {
    "path": "examples/hello_world_flutter/android/build.gradle",
    "content": "allprojects {\n    repositories {\n        google()\n        mavenCentral()\n    }\n}\n\nrootProject.buildDir = '../build'\nsubprojects {\n    project.buildDir = \"${rootProject.buildDir}/${project.name}\"\n}\nsubprojects {\n    project.evaluationDependsOn(':app')\n}\n\ntasks.register(\"clean\", Delete) {\n    delete rootProject.buildDir\n}\n"
  },
  {
    "path": "examples/hello_world_flutter/android/gradle/wrapper/gradle-wrapper.properties",
    "content": "distributionBase=GRADLE_USER_HOME\ndistributionPath=wrapper/dists\nzipStoreBase=GRADLE_USER_HOME\nzipStorePath=wrapper/dists\ndistributionUrl=https\\://services.gradle.org/distributions/gradle-8.6-bin.zip\n"
  },
  {
    "path": "examples/hello_world_flutter/android/gradle.properties",
    "content": "org.gradle.jvmargs=-Xmx8g -XX:+HeapDumpOnOutOfMemoryError -XX:+UseParallelGC -Dfile.encoding=UTF-8\nandroid.useAndroidX=true\nandroid.enableJetifier=true\n"
  },
  {
    "path": "examples/hello_world_flutter/android/settings.gradle",
    "content": "pluginManagement {\n    def flutterSdkPath = {\n        def properties = new Properties()\n        file(\"local.properties\").withInputStream { properties.load(it) }\n        def flutterSdkPath = properties.getProperty(\"flutter.sdk\")\n        assert flutterSdkPath != null, \"flutter.sdk not set in local.properties\"\n        return flutterSdkPath\n    }()\n\n    includeBuild(\"$flutterSdkPath/packages/flutter_tools/gradle\")\n\n    repositories {\n        google()\n        mavenCentral()\n        gradlePluginPortal()\n    }\n}\n\nplugins {\n    id \"dev.flutter.flutter-plugin-loader\" version \"1.0.0\"\n    id \"com.android.application\" version \"8.2.2\" apply false\n    id \"org.jetbrains.kotlin.android\" version \"1.9.23\" apply false\n}\n\ninclude \":app\"\n"
  },
  {
    "path": "examples/hello_world_flutter/devtools_options.yaml",
    "content": "description: This file stores settings for Dart & Flutter DevTools.\ndocumentation: https://docs.flutter.dev/tools/devtools/extensions#configure-extension-enablement-states\nextensions:\n"
  },
  {
    "path": "examples/hello_world_flutter/ios/.gitignore",
    "content": "**/dgph\n*.mode1v3\n*.mode2v3\n*.moved-aside\n*.pbxuser\n*.perspectivev3\n**/*sync/\n.sconsign.dblite\n.tags*\n**/.vagrant/\n**/DerivedData/\nIcon?\n**/Pods/\n**/.symlinks/\nprofile\nxcuserdata\n**/.generated/\nFlutter/App.framework\nFlutter/Flutter.framework\nFlutter/Flutter.podspec\nFlutter/Generated.xcconfig\nFlutter/ephemeral/\nFlutter/app.flx\nFlutter/app.zip\nFlutter/flutter_assets/\nFlutter/flutter_export_environment.sh\nServiceDefinitions.json\nRunner/GeneratedPluginRegistrant.*\n\n# Exceptions to above rules.\n!default.mode1v3\n!default.mode2v3\n!default.pbxuser\n!default.perspectivev3\n"
  },
  {
    "path": "examples/hello_world_flutter/ios/Flutter/AppFrameworkInfo.plist",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n<plist version=\"1.0\">\n<dict>\n  <key>CFBundleDevelopmentRegion</key>\n  <string>en</string>\n  <key>CFBundleExecutable</key>\n  <string>App</string>\n  <key>CFBundleIdentifier</key>\n  <string>io.flutter.flutter.app</string>\n  <key>CFBundleInfoDictionaryVersion</key>\n  <string>6.0</string>\n  <key>CFBundleName</key>\n  <string>App</string>\n  <key>CFBundlePackageType</key>\n  <string>FMWK</string>\n  <key>CFBundleShortVersionString</key>\n  <string>1.0</string>\n  <key>CFBundleSignature</key>\n  <string>????</string>\n  <key>CFBundleVersion</key>\n  <string>1.0</string>\n  <key>MinimumOSVersion</key>\n  <string>12.0</string>\n</dict>\n</plist>\n"
  },
  {
    "path": "examples/hello_world_flutter/ios/Flutter/Debug.xcconfig",
    "content": "#include? \"Pods/Target Support Files/Pods-Runner/Pods-Runner.debug.xcconfig\"\n#include \"Generated.xcconfig\"\n"
  },
  {
    "path": "examples/hello_world_flutter/ios/Flutter/Release.xcconfig",
    "content": "#include? \"Pods/Target Support Files/Pods-Runner/Pods-Runner.release.xcconfig\"\n#include \"Generated.xcconfig\"\n"
  },
  {
    "path": "examples/hello_world_flutter/ios/Podfile",
    "content": "# Uncomment this line to define a global platform for your project\n# platform :ios, '12.0'\n\n# CocoaPods analytics sends network stats synchronously affecting flutter build latency.\nENV['COCOAPODS_DISABLE_STATS'] = 'true'\n\nproject 'Runner', {\n  'Debug' => :debug,\n  'Profile' => :release,\n  'Release' => :release,\n}\n\ndef flutter_root\n  generated_xcode_build_settings_path = File.expand_path(File.join('..', 'Flutter', 'Generated.xcconfig'), __FILE__)\n  unless File.exist?(generated_xcode_build_settings_path)\n    raise \"#{generated_xcode_build_settings_path} must exist. If you're running pod install manually, make sure flutter pub get is executed first\"\n  end\n\n  File.foreach(generated_xcode_build_settings_path) do |line|\n    matches = line.match(/FLUTTER_ROOT\\=(.*)/)\n    return matches[1].strip if matches\n  end\n  raise \"FLUTTER_ROOT not found in #{generated_xcode_build_settings_path}. Try deleting Generated.xcconfig, then run flutter pub get\"\nend\n\nrequire File.expand_path(File.join('packages', 'flutter_tools', 'bin', 'podhelper'), flutter_root)\n\nflutter_ios_podfile_setup\n\ntarget 'Runner' do\n  use_frameworks!\n\n  flutter_install_all_ios_pods File.dirname(File.realpath(__FILE__))\n  target 'RunnerTests' do\n    inherit! :search_paths\n  end\nend\n\npost_install do |installer|\n  installer.pods_project.targets.each do |target|\n    flutter_additional_ios_build_settings(target)\n  end\nend\n"
  },
  {
    "path": "examples/hello_world_flutter/ios/Runner/AppDelegate.swift",
    "content": "import UIKit\nimport Flutter\n\n@UIApplicationMain\n@objc class AppDelegate: FlutterAppDelegate {\n  override func application(\n    _ application: UIApplication,\n    didFinishLaunchingWithOptions launchOptions: [UIApplication.LaunchOptionsKey: Any]?\n  ) -> Bool {\n    GeneratedPluginRegistrant.register(with: self)\n    return super.application(application, didFinishLaunchingWithOptions: launchOptions)\n  }\n}\n"
  },
  {
    "path": "examples/hello_world_flutter/ios/Runner/Assets.xcassets/AppIcon.appiconset/Contents.json",
    "content": "{\n  \"images\" : [\n    {\n      \"size\" : \"20x20\",\n      \"idiom\" : \"iphone\",\n      \"filename\" : \"Icon-App-20x20@2x.png\",\n      \"scale\" : \"2x\"\n    },\n    {\n      \"size\" : \"20x20\",\n      \"idiom\" : \"iphone\",\n      \"filename\" : \"Icon-App-20x20@3x.png\",\n      \"scale\" : \"3x\"\n    },\n    {\n      \"size\" : \"29x29\",\n      \"idiom\" : \"iphone\",\n      \"filename\" : \"Icon-App-29x29@1x.png\",\n      \"scale\" : \"1x\"\n    },\n    {\n      \"size\" : \"29x29\",\n      \"idiom\" : \"iphone\",\n      \"filename\" : \"Icon-App-29x29@2x.png\",\n      \"scale\" : \"2x\"\n    },\n    {\n      \"size\" : \"29x29\",\n      \"idiom\" : \"iphone\",\n      \"filename\" : \"Icon-App-29x29@3x.png\",\n      \"scale\" : \"3x\"\n    },\n    {\n      \"size\" : \"40x40\",\n      \"idiom\" : \"iphone\",\n      \"filename\" : \"Icon-App-40x40@2x.png\",\n      \"scale\" : \"2x\"\n    },\n    {\n      \"size\" : \"40x40\",\n      \"idiom\" : \"iphone\",\n      \"filename\" : \"Icon-App-40x40@3x.png\",\n      \"scale\" : \"3x\"\n    },\n    {\n      \"size\" : \"60x60\",\n      \"idiom\" : \"iphone\",\n      \"filename\" : \"Icon-App-60x60@2x.png\",\n      \"scale\" : \"2x\"\n    },\n    {\n      \"size\" : \"60x60\",\n      \"idiom\" : \"iphone\",\n      \"filename\" : \"Icon-App-60x60@3x.png\",\n      \"scale\" : \"3x\"\n    },\n    {\n      \"size\" : \"20x20\",\n      \"idiom\" : \"ipad\",\n      \"filename\" : \"Icon-App-20x20@1x.png\",\n      \"scale\" : \"1x\"\n    },\n    {\n      \"size\" : \"20x20\",\n      \"idiom\" : \"ipad\",\n      \"filename\" : \"Icon-App-20x20@2x.png\",\n      \"scale\" : \"2x\"\n    },\n    {\n      \"size\" : \"29x29\",\n      \"idiom\" : \"ipad\",\n      \"filename\" : \"Icon-App-29x29@1x.png\",\n      \"scale\" : \"1x\"\n    },\n    {\n      \"size\" : \"29x29\",\n      \"idiom\" : \"ipad\",\n      \"filename\" : \"Icon-App-29x29@2x.png\",\n      \"scale\" : \"2x\"\n    },\n    {\n      \"size\" : \"40x40\",\n      \"idiom\" : \"ipad\",\n      \"filename\" : \"Icon-App-40x40@1x.png\",\n      \"scale\" : \"1x\"\n    },\n    {\n      \"size\" : \"40x40\",\n      \"idiom\" : \"ipad\",\n      \"filename\" : \"Icon-App-40x40@2x.png\",\n      \"scale\" : \"2x\"\n    },\n    {\n      \"size\" : \"76x76\",\n      \"idiom\" : \"ipad\",\n      \"filename\" : \"Icon-App-76x76@1x.png\",\n      \"scale\" : \"1x\"\n    },\n    {\n      \"size\" : \"76x76\",\n      \"idiom\" : \"ipad\",\n      \"filename\" : \"Icon-App-76x76@2x.png\",\n      \"scale\" : \"2x\"\n    },\n    {\n      \"size\" : \"83.5x83.5\",\n      \"idiom\" : \"ipad\",\n      \"filename\" : \"Icon-App-83.5x83.5@2x.png\",\n      \"scale\" : \"2x\"\n    },\n    {\n      \"size\" : \"1024x1024\",\n      \"idiom\" : \"ios-marketing\",\n      \"filename\" : \"Icon-App-1024x1024@1x.png\",\n      \"scale\" : \"1x\"\n    }\n  ],\n  \"info\" : {\n    \"version\" : 1,\n    \"author\" : \"xcode\"\n  }\n}\n"
  },
  {
    "path": "examples/hello_world_flutter/ios/Runner/Assets.xcassets/LaunchImage.imageset/Contents.json",
    "content": "{\n  \"images\" : [\n    {\n      \"idiom\" : \"universal\",\n      \"filename\" : \"LaunchImage.png\",\n      \"scale\" : \"1x\"\n    },\n    {\n      \"idiom\" : \"universal\",\n      \"filename\" : \"LaunchImage@2x.png\",\n      \"scale\" : \"2x\"\n    },\n    {\n      \"idiom\" : \"universal\",\n      \"filename\" : \"LaunchImage@3x.png\",\n      \"scale\" : \"3x\"\n    }\n  ],\n  \"info\" : {\n    \"version\" : 1,\n    \"author\" : \"xcode\"\n  }\n}\n"
  },
  {
    "path": "examples/hello_world_flutter/ios/Runner/Assets.xcassets/LaunchImage.imageset/README.md",
    "content": "# Launch Screen Assets\n\nYou can customize the launch screen with your own desired assets by replacing the image files in this directory.\n\nYou can also do it by opening your Flutter project's Xcode project with `open ios/Runner.xcworkspace`, selecting `Runner/Assets.xcassets` in the Project Navigator and dropping in the desired images."
  },
  {
    "path": "examples/hello_world_flutter/ios/Runner/Base.lproj/LaunchScreen.storyboard",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?>\n<document type=\"com.apple.InterfaceBuilder3.CocoaTouch.Storyboard.XIB\" version=\"3.0\" toolsVersion=\"12121\" systemVersion=\"16G29\" targetRuntime=\"iOS.CocoaTouch\" propertyAccessControl=\"none\" useAutolayout=\"YES\" launchScreen=\"YES\" colorMatched=\"YES\" initialViewController=\"01J-lp-oVM\">\n    <dependencies>\n        <deployment identifier=\"iOS\"/>\n        <plugIn identifier=\"com.apple.InterfaceBuilder.IBCocoaTouchPlugin\" version=\"12089\"/>\n    </dependencies>\n    <scenes>\n        <!--View Controller-->\n        <scene sceneID=\"EHf-IW-A2E\">\n            <objects>\n                <viewController id=\"01J-lp-oVM\" sceneMemberID=\"viewController\">\n                    <layoutGuides>\n                        <viewControllerLayoutGuide type=\"top\" id=\"Ydg-fD-yQy\"/>\n                        <viewControllerLayoutGuide type=\"bottom\" id=\"xbc-2k-c8Z\"/>\n                    </layoutGuides>\n                    <view key=\"view\" contentMode=\"scaleToFill\" id=\"Ze5-6b-2t3\">\n                        <autoresizingMask key=\"autoresizingMask\" widthSizable=\"YES\" heightSizable=\"YES\"/>\n                        <subviews>\n                            <imageView opaque=\"NO\" clipsSubviews=\"YES\" multipleTouchEnabled=\"YES\" contentMode=\"center\" image=\"LaunchImage\" translatesAutoresizingMaskIntoConstraints=\"NO\" id=\"YRO-k0-Ey4\">\n                            </imageView>\n                        </subviews>\n                        <color key=\"backgroundColor\" red=\"1\" green=\"1\" blue=\"1\" alpha=\"1\" colorSpace=\"custom\" customColorSpace=\"sRGB\"/>\n                        <constraints>\n                            <constraint firstItem=\"YRO-k0-Ey4\" firstAttribute=\"centerX\" secondItem=\"Ze5-6b-2t3\" secondAttribute=\"centerX\" id=\"1a2-6s-vTC\"/>\n                            <constraint firstItem=\"YRO-k0-Ey4\" firstAttribute=\"centerY\" secondItem=\"Ze5-6b-2t3\" secondAttribute=\"centerY\" id=\"4X2-HB-R7a\"/>\n                        </constraints>\n                    </view>\n                </viewController>\n                <placeholder placeholderIdentifier=\"IBFirstResponder\" id=\"iYj-Kq-Ea1\" userLabel=\"First Responder\" sceneMemberID=\"firstResponder\"/>\n            </objects>\n            <point key=\"canvasLocation\" x=\"53\" y=\"375\"/>\n        </scene>\n    </scenes>\n    <resources>\n        <image name=\"LaunchImage\" width=\"168\" height=\"185\"/>\n    </resources>\n</document>\n"
  },
  {
    "path": "examples/hello_world_flutter/ios/Runner/Base.lproj/Main.storyboard",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?>\n<document type=\"com.apple.InterfaceBuilder3.CocoaTouch.Storyboard.XIB\" version=\"3.0\" toolsVersion=\"10117\" systemVersion=\"15F34\" targetRuntime=\"iOS.CocoaTouch\" propertyAccessControl=\"none\" useAutolayout=\"YES\" useTraitCollections=\"YES\" initialViewController=\"BYZ-38-t0r\">\n    <dependencies>\n        <deployment identifier=\"iOS\"/>\n        <plugIn identifier=\"com.apple.InterfaceBuilder.IBCocoaTouchPlugin\" version=\"10085\"/>\n    </dependencies>\n    <scenes>\n        <!--Flutter View Controller-->\n        <scene sceneID=\"tne-QT-ifu\">\n            <objects>\n                <viewController id=\"BYZ-38-t0r\" customClass=\"FlutterViewController\" sceneMemberID=\"viewController\">\n                    <layoutGuides>\n                        <viewControllerLayoutGuide type=\"top\" id=\"y3c-jy-aDJ\"/>\n                        <viewControllerLayoutGuide type=\"bottom\" id=\"wfy-db-euE\"/>\n                    </layoutGuides>\n                    <view key=\"view\" contentMode=\"scaleToFill\" id=\"8bC-Xf-vdC\">\n                        <rect key=\"frame\" x=\"0.0\" y=\"0.0\" width=\"600\" height=\"600\"/>\n                        <autoresizingMask key=\"autoresizingMask\" widthSizable=\"YES\" heightSizable=\"YES\"/>\n                        <color key=\"backgroundColor\" white=\"1\" alpha=\"1\" colorSpace=\"custom\" customColorSpace=\"calibratedWhite\"/>\n                    </view>\n                </viewController>\n                <placeholder placeholderIdentifier=\"IBFirstResponder\" id=\"dkx-z0-nzr\" sceneMemberID=\"firstResponder\"/>\n            </objects>\n        </scene>\n    </scenes>\n</document>\n"
  },
  {
    "path": "examples/hello_world_flutter/ios/Runner/Info.plist",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n<plist version=\"1.0\">\n<dict>\n\t<key>CFBundleDevelopmentRegion</key>\n\t<string>$(DEVELOPMENT_LANGUAGE)</string>\n\t<key>CFBundleDisplayName</key>\n\t<string>Hello World Flutter</string>\n\t<key>CFBundleExecutable</key>\n\t<string>$(EXECUTABLE_NAME)</string>\n\t<key>CFBundleIdentifier</key>\n\t<string>$(PRODUCT_BUNDLE_IDENTIFIER)</string>\n\t<key>CFBundleInfoDictionaryVersion</key>\n\t<string>6.0</string>\n\t<key>CFBundleName</key>\n\t<string>hello_world_flutter</string>\n\t<key>CFBundlePackageType</key>\n\t<string>APPL</string>\n\t<key>CFBundleShortVersionString</key>\n\t<string>$(FLUTTER_BUILD_NAME)</string>\n\t<key>CFBundleSignature</key>\n\t<string>????</string>\n\t<key>CFBundleVersion</key>\n\t<string>$(FLUTTER_BUILD_NUMBER)</string>\n\t<key>LSRequiresIPhoneOS</key>\n\t<true/>\n\t<key>UILaunchStoryboardName</key>\n\t<string>LaunchScreen</string>\n\t<key>UIMainStoryboardFile</key>\n\t<string>Main</string>\n\t<key>UISupportedInterfaceOrientations</key>\n\t<array>\n\t\t<string>UIInterfaceOrientationPortrait</string>\n\t\t<string>UIInterfaceOrientationLandscapeLeft</string>\n\t\t<string>UIInterfaceOrientationLandscapeRight</string>\n\t</array>\n\t<key>UISupportedInterfaceOrientations~ipad</key>\n\t<array>\n\t\t<string>UIInterfaceOrientationPortrait</string>\n\t\t<string>UIInterfaceOrientationPortraitUpsideDown</string>\n\t\t<string>UIInterfaceOrientationLandscapeLeft</string>\n\t\t<string>UIInterfaceOrientationLandscapeRight</string>\n\t</array>\n\t<key>UIViewControllerBasedStatusBarAppearance</key>\n\t<false/>\n\t<key>CADisableMinimumFrameDurationOnPhone</key>\n\t<true/>\n\t<key>UIApplicationSupportsIndirectInputEvents</key>\n\t<true/>\n</dict>\n</plist>\n"
  },
  {
    "path": "examples/hello_world_flutter/ios/Runner/Runner-Bridging-Header.h",
    "content": "#import \"GeneratedPluginRegistrant.h\"\n"
  },
  {
    "path": "examples/hello_world_flutter/ios/Runner.xcodeproj/project.pbxproj",
    "content": "// !$*UTF8*$!\n{\n\tarchiveVersion = 1;\n\tclasses = {\n\t};\n\tobjectVersion = 54;\n\tobjects = {\n\n/* Begin PBXBuildFile section */\n\t\t1498D2341E8E89220040F4C2 /* GeneratedPluginRegistrant.m in Sources */ = {isa = PBXBuildFile; fileRef = 1498D2331E8E89220040F4C2 /* GeneratedPluginRegistrant.m */; };\n\t\t3B3967161E833CAA004F5970 /* AppFrameworkInfo.plist in Resources */ = {isa = PBXBuildFile; fileRef = 3B3967151E833CAA004F5970 /* AppFrameworkInfo.plist */; };\n\t\t74858FAF1ED2DC5600515810 /* AppDelegate.swift in Sources */ = {isa = PBXBuildFile; fileRef = 74858FAE1ED2DC5600515810 /* AppDelegate.swift */; };\n\t\t97C146FC1CF9000F007C117D /* Main.storyboard in Resources */ = {isa = PBXBuildFile; fileRef = 97C146FA1CF9000F007C117D /* Main.storyboard */; };\n\t\t97C146FE1CF9000F007C117D /* Assets.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = 97C146FD1CF9000F007C117D /* Assets.xcassets */; };\n\t\t97C147011CF9000F007C117D /* LaunchScreen.storyboard in Resources */ = {isa = PBXBuildFile; fileRef = 97C146FF1CF9000F007C117D /* LaunchScreen.storyboard */; };\n\t\t331C808B294A63AB00263BE5 /* RunnerTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 331C807B294A618700263BE5 /* RunnerTests.swift */; };\n/* End PBXBuildFile section */\n\n/* Begin PBXContainerItemProxy section */\n\t\t331C8085294A63A400263BE5 /* PBXContainerItemProxy */ = {\n\t\t\tisa = PBXContainerItemProxy;\n\t\t\tcontainerPortal = 97C146E61CF9000F007C117D /* Project object */;\n\t\t\tproxyType = 1;\n\t\t\tremoteGlobalIDString = 97C146ED1CF9000F007C117D;\n\t\t\tremoteInfo = Runner;\n\t\t};\n/* End PBXContainerItemProxy section */\n\n/* Begin PBXCopyFilesBuildPhase section */\n\t\t9705A1C41CF9048500538489 /* Embed Frameworks */ = {\n\t\t\tisa = PBXCopyFilesBuildPhase;\n\t\t\tbuildActionMask = 2147483647;\n\t\t\tdstPath = \"\";\n\t\t\tdstSubfolderSpec = 10;\n\t\t\tfiles = (\n\t\t\t);\n\t\t\tname = \"Embed Frameworks\";\n\t\t\trunOnlyForDeploymentPostprocessing = 0;\n\t\t};\n/* End PBXCopyFilesBuildPhase section */\n\n/* Begin PBXFileReference section */\n\t\t1498D2321E8E86230040F4C2 /* GeneratedPluginRegistrant.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = GeneratedPluginRegistrant.h; sourceTree = \"<group>\"; };\n\t\t1498D2331E8E89220040F4C2 /* GeneratedPluginRegistrant.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GeneratedPluginRegistrant.m; sourceTree = \"<group>\"; };\n\t\t3B3967151E833CAA004F5970 /* AppFrameworkInfo.plist */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.plist.xml; name = AppFrameworkInfo.plist; path = Flutter/AppFrameworkInfo.plist; sourceTree = \"<group>\"; };\n\t\t74858FAD1ED2DC5600515810 /* Runner-Bridging-Header.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = \"Runner-Bridging-Header.h\"; sourceTree = \"<group>\"; };\n\t\t74858FAE1ED2DC5600515810 /* AppDelegate.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = AppDelegate.swift; sourceTree = \"<group>\"; };\n\t\t7AFA3C8E1D35360C0083082E /* Release.xcconfig */ = {isa = PBXFileReference; lastKnownFileType = text.xcconfig; name = Release.xcconfig; path = Flutter/Release.xcconfig; sourceTree = \"<group>\"; };\n\t\t9740EEB21CF90195004384FC /* Debug.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; name = Debug.xcconfig; path = Flutter/Debug.xcconfig; sourceTree = \"<group>\"; };\n\t\t9740EEB31CF90195004384FC /* Generated.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; name = Generated.xcconfig; path = Flutter/Generated.xcconfig; sourceTree = \"<group>\"; };\n\t\t97C146EE1CF9000F007C117D /* Runner.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = Runner.app; sourceTree = BUILT_PRODUCTS_DIR; };\n\t\t97C146FB1CF9000F007C117D /* Base */ = {isa = PBXFileReference; lastKnownFileType = file.storyboard; name = Base; path = Base.lproj/Main.storyboard; sourceTree = \"<group>\"; };\n\t\t97C146FD1CF9000F007C117D /* Assets.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Assets.xcassets; sourceTree = \"<group>\"; };\n\t\t97C147001CF9000F007C117D /* Base */ = {isa = PBXFileReference; lastKnownFileType = file.storyboard; name = Base; path = Base.lproj/LaunchScreen.storyboard; sourceTree = \"<group>\"; };\n\t\t97C147021CF9000F007C117D /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = \"<group>\"; };\n\t\t331C807B294A618700263BE5 /* RunnerTests.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = RunnerTests.swift; sourceTree = \"<group>\"; };\n\t\t331C8081294A63A400263BE5 /* RunnerTests.xctest */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = RunnerTests.xctest; sourceTree = BUILT_PRODUCTS_DIR; };\n/* End PBXFileReference section */\n\n/* Begin PBXFrameworksBuildPhase section */\n\t\t97C146EB1CF9000F007C117D /* Frameworks */ = {\n\t\t\tisa = PBXFrameworksBuildPhase;\n\t\t\tbuildActionMask = 2147483647;\n\t\t\tfiles = (\n\t\t\t);\n\t\t\trunOnlyForDeploymentPostprocessing = 0;\n\t\t};\n/* End PBXFrameworksBuildPhase section */\n\n/* Begin PBXGroup section */\n\t\t9740EEB11CF90186004384FC /* Flutter */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t3B3967151E833CAA004F5970 /* AppFrameworkInfo.plist */,\n\t\t\t\t9740EEB21CF90195004384FC /* Debug.xcconfig */,\n\t\t\t\t7AFA3C8E1D35360C0083082E /* Release.xcconfig */,\n\t\t\t\t9740EEB31CF90195004384FC /* Generated.xcconfig */,\n\t\t\t);\n\t\t\tname = Flutter;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t331C8082294A63A400263BE5 /* RunnerTests */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t331C807B294A618700263BE5 /* RunnerTests.swift */,\n\t\t\t);\n\t\t\tpath = RunnerTests;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t97C146E51CF9000F007C117D = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t9740EEB11CF90186004384FC /* Flutter */,\n\t\t\t\t97C146F01CF9000F007C117D /* Runner */,\n\t\t\t\t97C146EF1CF9000F007C117D /* Products */,\n\t\t\t\t331C8082294A63A400263BE5 /* RunnerTests */,\n\t\t\t);\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t97C146EF1CF9000F007C117D /* Products */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t97C146EE1CF9000F007C117D /* Runner.app */,\n\t\t\t\t331C8081294A63A400263BE5 /* RunnerTests.xctest */,\n\t\t\t);\n\t\t\tname = Products;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t97C146F01CF9000F007C117D /* Runner */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t97C146FA1CF9000F007C117D /* Main.storyboard */,\n\t\t\t\t97C146FD1CF9000F007C117D /* Assets.xcassets */,\n\t\t\t\t97C146FF1CF9000F007C117D /* LaunchScreen.storyboard */,\n\t\t\t\t97C147021CF9000F007C117D /* Info.plist */,\n\t\t\t\t1498D2321E8E86230040F4C2 /* GeneratedPluginRegistrant.h */,\n\t\t\t\t1498D2331E8E89220040F4C2 /* GeneratedPluginRegistrant.m */,\n\t\t\t\t74858FAE1ED2DC5600515810 /* AppDelegate.swift */,\n\t\t\t\t74858FAD1ED2DC5600515810 /* Runner-Bridging-Header.h */,\n\t\t\t);\n\t\t\tpath = Runner;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n/* End PBXGroup section */\n\n/* Begin PBXNativeTarget section */\n\t\t331C8080294A63A400263BE5 /* RunnerTests */ = {\n\t\t\tisa = PBXNativeTarget;\n\t\t\tbuildConfigurationList = 331C8087294A63A400263BE5 /* Build configuration list for PBXNativeTarget \"RunnerTests\" */;\n\t\t\tbuildPhases = (\n\t\t\t\t331C807D294A63A400263BE5 /* Sources */,\n\t\t\t\t331C807E294A63A400263BE5 /* Frameworks */,\n\t\t\t\t331C807F294A63A400263BE5 /* Resources */,\n\t\t\t);\n\t\t\tbuildRules = (\n\t\t\t);\n\t\t\tdependencies = (\n\t\t\t\t331C8086294A63A400263BE5 /* PBXTargetDependency */,\n\t\t\t);\n\t\t\tname = RunnerTests;\n\t\t\tproductName = RunnerTests;\n\t\t\tproductReference = 331C8081294A63A400263BE5 /* RunnerTests.xctest */;\n\t\t\tproductType = \"com.apple.product-type.bundle.unit-test\";\n\t\t};\n\t\t97C146ED1CF9000F007C117D /* Runner */ = {\n\t\t\tisa = PBXNativeTarget;\n\t\t\tbuildConfigurationList = 97C147051CF9000F007C117D /* Build configuration list for PBXNativeTarget \"Runner\" */;\n\t\t\tbuildPhases = (\n\t\t\t\t9740EEB61CF901F6004384FC /* Run Script */,\n\t\t\t\t97C146EA1CF9000F007C117D /* Sources */,\n\t\t\t\t97C146EB1CF9000F007C117D /* Frameworks */,\n\t\t\t\t97C146EC1CF9000F007C117D /* Resources */,\n\t\t\t\t9705A1C41CF9048500538489 /* Embed Frameworks */,\n\t\t\t\t3B06AD1E1E4923F5004D2608 /* Thin Binary */,\n\t\t\t);\n\t\t\tbuildRules = (\n\t\t\t);\n\t\t\tdependencies = (\n\t\t\t);\n\t\t\tname = Runner;\n\t\t\tproductName = Runner;\n\t\t\tproductReference = 97C146EE1CF9000F007C117D /* Runner.app */;\n\t\t\tproductType = \"com.apple.product-type.application\";\n\t\t};\n/* End PBXNativeTarget section */\n\n/* Begin PBXProject section */\n\t\t97C146E61CF9000F007C117D /* Project object */ = {\n\t\t\tisa = PBXProject;\n\t\t\tattributes = {\n\t\t\t\tLastUpgradeCheck = 1510;\n\t\t\t\tORGANIZATIONNAME = \"\";\n\t\t\t\tTargetAttributes = {\n\t\t\t\t\t331C8080294A63A400263BE5 = {\n\t\t\t\t\t\tCreatedOnToolsVersion = 14.0;\n\t\t\t\t\t\tTestTargetID = 97C146ED1CF9000F007C117D;\n\t\t\t\t\t};\n\t\t\t\t\t97C146ED1CF9000F007C117D = {\n\t\t\t\t\t\tCreatedOnToolsVersion = 7.3.1;\n\t\t\t\t\t\tLastSwiftMigration = 1100;\n\t\t\t\t\t};\n\t\t\t\t};\n\t\t\t};\n\t\t\tbuildConfigurationList = 97C146E91CF9000F007C117D /* Build configuration list for PBXProject \"Runner\" */;\n\t\t\tcompatibilityVersion = \"Xcode 9.3\";\n\t\t\tdevelopmentRegion = en;\n\t\t\thasScannedForEncodings = 0;\n\t\t\tknownRegions = (\n\t\t\t\ten,\n\t\t\t\tBase,\n\t\t\t);\n\t\t\tmainGroup = 97C146E51CF9000F007C117D;\n\t\t\tproductRefGroup = 97C146EF1CF9000F007C117D /* Products */;\n\t\t\tprojectDirPath = \"\";\n\t\t\tprojectRoot = \"\";\n\t\t\ttargets = (\n\t\t\t\t97C146ED1CF9000F007C117D /* Runner */,\n\t\t\t\t331C8080294A63A400263BE5 /* RunnerTests */,\n\t\t\t);\n\t\t};\n/* End PBXProject section */\n\n/* Begin PBXResourcesBuildPhase section */\n\t\t331C807F294A63A400263BE5 /* Resources */ = {\n\t\t\tisa = PBXResourcesBuildPhase;\n\t\t\tbuildActionMask = 2147483647;\n\t\t\tfiles = (\n\t\t\t);\n\t\t\trunOnlyForDeploymentPostprocessing = 0;\n\t\t};\n\t\t97C146EC1CF9000F007C117D /* Resources */ = {\n\t\t\tisa = PBXResourcesBuildPhase;\n\t\t\tbuildActionMask = 2147483647;\n\t\t\tfiles = (\n\t\t\t\t97C147011CF9000F007C117D /* LaunchScreen.storyboard in Resources */,\n\t\t\t\t3B3967161E833CAA004F5970 /* AppFrameworkInfo.plist in Resources */,\n\t\t\t\t97C146FE1CF9000F007C117D /* Assets.xcassets in Resources */,\n\t\t\t\t97C146FC1CF9000F007C117D /* Main.storyboard in Resources */,\n\t\t\t);\n\t\t\trunOnlyForDeploymentPostprocessing = 0;\n\t\t};\n/* End PBXResourcesBuildPhase section */\n\n/* Begin PBXShellScriptBuildPhase section */\n\t\t3B06AD1E1E4923F5004D2608 /* Thin Binary */ = {\n\t\t\tisa = PBXShellScriptBuildPhase;\n\t\t\talwaysOutOfDate = 1;\n\t\t\tbuildActionMask = 2147483647;\n\t\t\tfiles = (\n\t\t\t);\n\t\t\tinputPaths = (\n\t\t\t\t\"${TARGET_BUILD_DIR}/${INFOPLIST_PATH}\",\n\t\t\t);\n\t\t\tname = \"Thin Binary\";\n\t\t\toutputPaths = (\n\t\t\t);\n\t\t\trunOnlyForDeploymentPostprocessing = 0;\n\t\t\tshellPath = /bin/sh;\n\t\t\tshellScript = \"/bin/sh \\\"$FLUTTER_ROOT/packages/flutter_tools/bin/xcode_backend.sh\\\" embed_and_thin\";\n\t\t};\n\t\t9740EEB61CF901F6004384FC /* Run Script */ = {\n\t\t\tisa = PBXShellScriptBuildPhase;\n\t\t\talwaysOutOfDate = 1;\n\t\t\tbuildActionMask = 2147483647;\n\t\t\tfiles = (\n\t\t\t);\n\t\t\tinputPaths = (\n\t\t\t);\n\t\t\tname = \"Run Script\";\n\t\t\toutputPaths = (\n\t\t\t);\n\t\t\trunOnlyForDeploymentPostprocessing = 0;\n\t\t\tshellPath = /bin/sh;\n\t\t\tshellScript = \"/bin/sh \\\"$FLUTTER_ROOT/packages/flutter_tools/bin/xcode_backend.sh\\\" build\";\n\t\t};\n/* End PBXShellScriptBuildPhase section */\n\n/* Begin PBXSourcesBuildPhase section */\n\t\t331C807D294A63A400263BE5 /* Sources */ = {\n\t\t\tisa = PBXSourcesBuildPhase;\n\t\t\tbuildActionMask = 2147483647;\n\t\t\tfiles = (\n\t\t\t\t331C808B294A63AB00263BE5 /* RunnerTests.swift in Sources */,\n\t\t\t);\n\t\t\trunOnlyForDeploymentPostprocessing = 0;\n\t\t};\n\t\t97C146EA1CF9000F007C117D /* Sources */ = {\n\t\t\tisa = PBXSourcesBuildPhase;\n\t\t\tbuildActionMask = 2147483647;\n\t\t\tfiles = (\n\t\t\t\t74858FAF1ED2DC5600515810 /* AppDelegate.swift in Sources */,\n\t\t\t\t1498D2341E8E89220040F4C2 /* GeneratedPluginRegistrant.m in Sources */,\n\t\t\t);\n\t\t\trunOnlyForDeploymentPostprocessing = 0;\n\t\t};\n/* End PBXSourcesBuildPhase section */\n\n/* Begin PBXTargetDependency section */\n\t\t331C8086294A63A400263BE5 /* PBXTargetDependency */ = {\n\t\t\tisa = PBXTargetDependency;\n\t\t\ttarget = 97C146ED1CF9000F007C117D /* Runner */;\n\t\t\ttargetProxy = 331C8085294A63A400263BE5 /* PBXContainerItemProxy */;\n\t\t};\n/* End PBXTargetDependency section */\n\n/* Begin PBXVariantGroup section */\n\t\t97C146FA1CF9000F007C117D /* Main.storyboard */ = {\n\t\t\tisa = PBXVariantGroup;\n\t\t\tchildren = (\n\t\t\t\t97C146FB1CF9000F007C117D /* Base */,\n\t\t\t);\n\t\t\tname = Main.storyboard;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t97C146FF1CF9000F007C117D /* LaunchScreen.storyboard */ = {\n\t\t\tisa = PBXVariantGroup;\n\t\t\tchildren = (\n\t\t\t\t97C147001CF9000F007C117D /* Base */,\n\t\t\t);\n\t\t\tname = LaunchScreen.storyboard;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n/* End PBXVariantGroup section */\n\n/* Begin XCBuildConfiguration section */\n\t\t249021D3217E4FDB00AE95B9 /* Profile */ = {\n\t\t\tisa = XCBuildConfiguration;\n\t\t\tbuildSettings = {\n\t\t\t\tALWAYS_SEARCH_USER_PATHS = NO;\n\t\t\t\tCLANG_ANALYZER_NONNULL = YES;\n\t\t\t\tCLANG_CXX_LANGUAGE_STANDARD = \"gnu++0x\";\n\t\t\t\tCLANG_CXX_LIBRARY = \"libc++\";\n\t\t\t\tCLANG_ENABLE_MODULES = YES;\n\t\t\t\tCLANG_ENABLE_OBJC_ARC = YES;\n\t\t\t\tCLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES;\n\t\t\t\tCLANG_WARN_BOOL_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_COMMA = YES;\n\t\t\t\tCLANG_WARN_CONSTANT_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES;\n\t\t\t\tCLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR;\n\t\t\t\tCLANG_WARN_EMPTY_BODY = YES;\n\t\t\t\tCLANG_WARN_ENUM_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_INFINITE_RECURSION = YES;\n\t\t\t\tCLANG_WARN_INT_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES;\n\t\t\t\tCLANG_WARN_OBJC_LITERAL_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR;\n\t\t\t\tCLANG_WARN_RANGE_LOOP_ANALYSIS = YES;\n\t\t\t\tCLANG_WARN_STRICT_PROTOTYPES = YES;\n\t\t\t\tCLANG_WARN_SUSPICIOUS_MOVE = YES;\n\t\t\t\tCLANG_WARN_UNREACHABLE_CODE = YES;\n\t\t\t\tCLANG_WARN__DUPLICATE_METHOD_MATCH = YES;\n\t\t\t\t\"CODE_SIGN_IDENTITY[sdk=iphoneos*]\" = \"iPhone Developer\";\n\t\t\t\tCOPY_PHASE_STRIP = NO;\n\t\t\t\tDEBUG_INFORMATION_FORMAT = \"dwarf-with-dsym\";\n\t\t\t\tENABLE_NS_ASSERTIONS = NO;\n\t\t\t\tENABLE_STRICT_OBJC_MSGSEND = YES;\n\t\t\t\tGCC_C_LANGUAGE_STANDARD = gnu99;\n\t\t\t\tGCC_NO_COMMON_BLOCKS = YES;\n\t\t\t\tGCC_WARN_64_TO_32_BIT_CONVERSION = YES;\n\t\t\t\tGCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR;\n\t\t\t\tGCC_WARN_UNDECLARED_SELECTOR = YES;\n\t\t\t\tGCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE;\n\t\t\t\tGCC_WARN_UNUSED_FUNCTION = YES;\n\t\t\t\tGCC_WARN_UNUSED_VARIABLE = YES;\n\t\t\t\tIPHONEOS_DEPLOYMENT_TARGET = 12.0;\n\t\t\t\tMTL_ENABLE_DEBUG_INFO = NO;\n\t\t\t\tSDKROOT = iphoneos;\n\t\t\t\tSUPPORTED_PLATFORMS = iphoneos;\n\t\t\t\tTARGETED_DEVICE_FAMILY = \"1,2\";\n\t\t\t\tVALIDATE_PRODUCT = YES;\n\t\t\t};\n\t\t\tname = Profile;\n\t\t};\n\t\t249021D4217E4FDB00AE95B9 /* Profile */ = {\n\t\t\tisa = XCBuildConfiguration;\n\t\t\tbaseConfigurationReference = 7AFA3C8E1D35360C0083082E /* Release.xcconfig */;\n\t\t\tbuildSettings = {\n\t\t\t\tASSETCATALOG_COMPILER_APPICON_NAME = AppIcon;\n\t\t\t\tCLANG_ENABLE_MODULES = YES;\n\t\t\t\tCURRENT_PROJECT_VERSION = \"$(FLUTTER_BUILD_NUMBER)\";\n\t\t\t\tDEVELOPMENT_TEAM = 8G826UNG36;\n\t\t\t\tENABLE_BITCODE = NO;\n\t\t\t\tINFOPLIST_FILE = Runner/Info.plist;\n\t\t\t\tLD_RUNPATH_SEARCH_PATHS = (\n\t\t\t\t\t\"$(inherited)\",\n\t\t\t\t\t\"@executable_path/Frameworks\",\n\t\t\t\t);\n\t\t\t\tPRODUCT_BUNDLE_IDENTIFIER = com.example.helloWorldFlutter;\n\t\t\t\tPRODUCT_NAME = \"$(TARGET_NAME)\";\n\t\t\t\tSWIFT_OBJC_BRIDGING_HEADER = \"Runner/Runner-Bridging-Header.h\";\n\t\t\t\tSWIFT_VERSION = 5.0;\n\t\t\t\tVERSIONING_SYSTEM = \"apple-generic\";\n\t\t\t};\n\t\t\tname = Profile;\n\t\t};\n\t\t331C8088294A63A400263BE5 /* Debug */ = {\n\t\t\tisa = XCBuildConfiguration;\n\t\t\tbaseConfigurationReference = AE0B7B92F70575B8D7E0D07E /* Pods-RunnerTests.debug.xcconfig */;\n\t\t\tbuildSettings = {\n\t\t\t\tBUNDLE_LOADER = \"$(TEST_HOST)\";\n\t\t\t\tCODE_SIGN_STYLE = Automatic;\n\t\t\t\tCURRENT_PROJECT_VERSION = 1;\n\t\t\t\tGENERATE_INFOPLIST_FILE = YES;\n\t\t\t\tMARKETING_VERSION = 1.0;\n\t\t\t\tPRODUCT_BUNDLE_IDENTIFIER = com.example.helloWorldFlutter.RunnerTests;\n\t\t\t\tPRODUCT_NAME = \"$(TARGET_NAME)\";\n\t\t\t\tSWIFT_ACTIVE_COMPILATION_CONDITIONS = DEBUG;\n\t\t\t\tSWIFT_OPTIMIZATION_LEVEL = \"-Onone\";\n\t\t\t\tSWIFT_VERSION = 5.0;\n\t\t\t\tTEST_HOST = \"$(BUILT_PRODUCTS_DIR)/Runner.app/$(BUNDLE_EXECUTABLE_FOLDER_PATH)/Runner\";\n\t\t\t};\n\t\t\tname = Debug;\n\t\t};\n\t\t331C8089294A63A400263BE5 /* Release */ = {\n\t\t\tisa = XCBuildConfiguration;\n\t\t\tbaseConfigurationReference = 89B67EB44CE7B6631473024E /* Pods-RunnerTests.release.xcconfig */;\n\t\t\tbuildSettings = {\n\t\t\t\tBUNDLE_LOADER = \"$(TEST_HOST)\";\n\t\t\t\tCODE_SIGN_STYLE = Automatic;\n\t\t\t\tCURRENT_PROJECT_VERSION = 1;\n\t\t\t\tGENERATE_INFOPLIST_FILE = YES;\n\t\t\t\tMARKETING_VERSION = 1.0;\n\t\t\t\tPRODUCT_BUNDLE_IDENTIFIER = com.example.helloWorldFlutter.RunnerTests;\n\t\t\t\tPRODUCT_NAME = \"$(TARGET_NAME)\";\n\t\t\t\tSWIFT_VERSION = 5.0;\n\t\t\t\tTEST_HOST = \"$(BUILT_PRODUCTS_DIR)/Runner.app/$(BUNDLE_EXECUTABLE_FOLDER_PATH)/Runner\";\n\t\t\t};\n\t\t\tname = Release;\n\t\t};\n\t\t331C808A294A63A400263BE5 /* Profile */ = {\n\t\t\tisa = XCBuildConfiguration;\n\t\t\tbaseConfigurationReference = 640959BDD8F10B91D80A66BE /* Pods-RunnerTests.profile.xcconfig */;\n\t\t\tbuildSettings = {\n\t\t\t\tBUNDLE_LOADER = \"$(TEST_HOST)\";\n\t\t\t\tCODE_SIGN_STYLE = Automatic;\n\t\t\t\tCURRENT_PROJECT_VERSION = 1;\n\t\t\t\tGENERATE_INFOPLIST_FILE = YES;\n\t\t\t\tMARKETING_VERSION = 1.0;\n\t\t\t\tPRODUCT_BUNDLE_IDENTIFIER = com.example.helloWorldFlutter.RunnerTests;\n\t\t\t\tPRODUCT_NAME = \"$(TARGET_NAME)\";\n\t\t\t\tSWIFT_VERSION = 5.0;\n\t\t\t\tTEST_HOST = \"$(BUILT_PRODUCTS_DIR)/Runner.app/$(BUNDLE_EXECUTABLE_FOLDER_PATH)/Runner\";\n\t\t\t};\n\t\t\tname = Profile;\n\t\t};\n\t\t97C147031CF9000F007C117D /* Debug */ = {\n\t\t\tisa = XCBuildConfiguration;\n\t\t\tbuildSettings = {\n\t\t\t\tALWAYS_SEARCH_USER_PATHS = NO;\n\t\t\t\tCLANG_ANALYZER_NONNULL = YES;\n\t\t\t\tCLANG_CXX_LANGUAGE_STANDARD = \"gnu++0x\";\n\t\t\t\tCLANG_CXX_LIBRARY = \"libc++\";\n\t\t\t\tCLANG_ENABLE_MODULES = YES;\n\t\t\t\tCLANG_ENABLE_OBJC_ARC = YES;\n\t\t\t\tCLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES;\n\t\t\t\tCLANG_WARN_BOOL_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_COMMA = YES;\n\t\t\t\tCLANG_WARN_CONSTANT_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES;\n\t\t\t\tCLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR;\n\t\t\t\tCLANG_WARN_EMPTY_BODY = YES;\n\t\t\t\tCLANG_WARN_ENUM_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_INFINITE_RECURSION = YES;\n\t\t\t\tCLANG_WARN_INT_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES;\n\t\t\t\tCLANG_WARN_OBJC_LITERAL_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR;\n\t\t\t\tCLANG_WARN_RANGE_LOOP_ANALYSIS = YES;\n\t\t\t\tCLANG_WARN_STRICT_PROTOTYPES = YES;\n\t\t\t\tCLANG_WARN_SUSPICIOUS_MOVE = YES;\n\t\t\t\tCLANG_WARN_UNREACHABLE_CODE = YES;\n\t\t\t\tCLANG_WARN__DUPLICATE_METHOD_MATCH = YES;\n\t\t\t\t\"CODE_SIGN_IDENTITY[sdk=iphoneos*]\" = \"iPhone Developer\";\n\t\t\t\tCOPY_PHASE_STRIP = NO;\n\t\t\t\tDEBUG_INFORMATION_FORMAT = dwarf;\n\t\t\t\tENABLE_STRICT_OBJC_MSGSEND = YES;\n\t\t\t\tENABLE_TESTABILITY = YES;\n\t\t\t\tGCC_C_LANGUAGE_STANDARD = gnu99;\n\t\t\t\tGCC_DYNAMIC_NO_PIC = NO;\n\t\t\t\tGCC_NO_COMMON_BLOCKS = YES;\n\t\t\t\tGCC_OPTIMIZATION_LEVEL = 0;\n\t\t\t\tGCC_PREPROCESSOR_DEFINITIONS = (\n\t\t\t\t\t\"DEBUG=1\",\n\t\t\t\t\t\"$(inherited)\",\n\t\t\t\t);\n\t\t\t\tGCC_WARN_64_TO_32_BIT_CONVERSION = YES;\n\t\t\t\tGCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR;\n\t\t\t\tGCC_WARN_UNDECLARED_SELECTOR = YES;\n\t\t\t\tGCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE;\n\t\t\t\tGCC_WARN_UNUSED_FUNCTION = YES;\n\t\t\t\tGCC_WARN_UNUSED_VARIABLE = YES;\n\t\t\t\tIPHONEOS_DEPLOYMENT_TARGET = 12.0;\n\t\t\t\tMTL_ENABLE_DEBUG_INFO = YES;\n\t\t\t\tONLY_ACTIVE_ARCH = YES;\n\t\t\t\tSDKROOT = iphoneos;\n\t\t\t\tTARGETED_DEVICE_FAMILY = \"1,2\";\n\t\t\t};\n\t\t\tname = Debug;\n\t\t};\n\t\t97C147041CF9000F007C117D /* Release */ = {\n\t\t\tisa = XCBuildConfiguration;\n\t\t\tbuildSettings = {\n\t\t\t\tALWAYS_SEARCH_USER_PATHS = NO;\n\t\t\t\tCLANG_ANALYZER_NONNULL = YES;\n\t\t\t\tCLANG_CXX_LANGUAGE_STANDARD = \"gnu++0x\";\n\t\t\t\tCLANG_CXX_LIBRARY = \"libc++\";\n\t\t\t\tCLANG_ENABLE_MODULES = YES;\n\t\t\t\tCLANG_ENABLE_OBJC_ARC = YES;\n\t\t\t\tCLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES;\n\t\t\t\tCLANG_WARN_BOOL_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_COMMA = YES;\n\t\t\t\tCLANG_WARN_CONSTANT_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES;\n\t\t\t\tCLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR;\n\t\t\t\tCLANG_WARN_EMPTY_BODY = YES;\n\t\t\t\tCLANG_WARN_ENUM_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_INFINITE_RECURSION = YES;\n\t\t\t\tCLANG_WARN_INT_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES;\n\t\t\t\tCLANG_WARN_OBJC_LITERAL_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR;\n\t\t\t\tCLANG_WARN_RANGE_LOOP_ANALYSIS = YES;\n\t\t\t\tCLANG_WARN_STRICT_PROTOTYPES = YES;\n\t\t\t\tCLANG_WARN_SUSPICIOUS_MOVE = YES;\n\t\t\t\tCLANG_WARN_UNREACHABLE_CODE = YES;\n\t\t\t\tCLANG_WARN__DUPLICATE_METHOD_MATCH = YES;\n\t\t\t\t\"CODE_SIGN_IDENTITY[sdk=iphoneos*]\" = \"iPhone Developer\";\n\t\t\t\tCOPY_PHASE_STRIP = NO;\n\t\t\t\tDEBUG_INFORMATION_FORMAT = \"dwarf-with-dsym\";\n\t\t\t\tENABLE_NS_ASSERTIONS = NO;\n\t\t\t\tENABLE_STRICT_OBJC_MSGSEND = YES;\n\t\t\t\tGCC_C_LANGUAGE_STANDARD = gnu99;\n\t\t\t\tGCC_NO_COMMON_BLOCKS = YES;\n\t\t\t\tGCC_WARN_64_TO_32_BIT_CONVERSION = YES;\n\t\t\t\tGCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR;\n\t\t\t\tGCC_WARN_UNDECLARED_SELECTOR = YES;\n\t\t\t\tGCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE;\n\t\t\t\tGCC_WARN_UNUSED_FUNCTION = YES;\n\t\t\t\tGCC_WARN_UNUSED_VARIABLE = YES;\n\t\t\t\tIPHONEOS_DEPLOYMENT_TARGET = 12.0;\n\t\t\t\tMTL_ENABLE_DEBUG_INFO = NO;\n\t\t\t\tSDKROOT = iphoneos;\n\t\t\t\tSUPPORTED_PLATFORMS = iphoneos;\n\t\t\t\tSWIFT_COMPILATION_MODE = wholemodule;\n\t\t\t\tSWIFT_OPTIMIZATION_LEVEL = \"-O\";\n\t\t\t\tTARGETED_DEVICE_FAMILY = \"1,2\";\n\t\t\t\tVALIDATE_PRODUCT = YES;\n\t\t\t};\n\t\t\tname = Release;\n\t\t};\n\t\t97C147061CF9000F007C117D /* Debug */ = {\n\t\t\tisa = XCBuildConfiguration;\n\t\t\tbaseConfigurationReference = 9740EEB21CF90195004384FC /* Debug.xcconfig */;\n\t\t\tbuildSettings = {\n\t\t\t\tASSETCATALOG_COMPILER_APPICON_NAME = AppIcon;\n\t\t\t\tCLANG_ENABLE_MODULES = YES;\n\t\t\t\tCURRENT_PROJECT_VERSION = \"$(FLUTTER_BUILD_NUMBER)\";\n\t\t\t\tDEVELOPMENT_TEAM = 8G826UNG36;\n\t\t\t\tENABLE_BITCODE = NO;\n\t\t\t\tINFOPLIST_FILE = Runner/Info.plist;\n\t\t\t\tLD_RUNPATH_SEARCH_PATHS = (\n\t\t\t\t\t\"$(inherited)\",\n\t\t\t\t\t\"@executable_path/Frameworks\",\n\t\t\t\t);\n\t\t\t\tPRODUCT_BUNDLE_IDENTIFIER = com.example.helloWorldFlutter;\n\t\t\t\tPRODUCT_NAME = \"$(TARGET_NAME)\";\n\t\t\t\tSWIFT_OBJC_BRIDGING_HEADER = \"Runner/Runner-Bridging-Header.h\";\n\t\t\t\tSWIFT_OPTIMIZATION_LEVEL = \"-Onone\";\n\t\t\t\tSWIFT_VERSION = 5.0;\n\t\t\t\tVERSIONING_SYSTEM = \"apple-generic\";\n\t\t\t};\n\t\t\tname = Debug;\n\t\t};\n\t\t97C147071CF9000F007C117D /* Release */ = {\n\t\t\tisa = XCBuildConfiguration;\n\t\t\tbaseConfigurationReference = 7AFA3C8E1D35360C0083082E /* Release.xcconfig */;\n\t\t\tbuildSettings = {\n\t\t\t\tASSETCATALOG_COMPILER_APPICON_NAME = AppIcon;\n\t\t\t\tCLANG_ENABLE_MODULES = YES;\n\t\t\t\tCURRENT_PROJECT_VERSION = \"$(FLUTTER_BUILD_NUMBER)\";\n\t\t\t\tDEVELOPMENT_TEAM = 8G826UNG36;\n\t\t\t\tENABLE_BITCODE = NO;\n\t\t\t\tINFOPLIST_FILE = Runner/Info.plist;\n\t\t\t\tLD_RUNPATH_SEARCH_PATHS = (\n\t\t\t\t\t\"$(inherited)\",\n\t\t\t\t\t\"@executable_path/Frameworks\",\n\t\t\t\t);\n\t\t\t\tPRODUCT_BUNDLE_IDENTIFIER = com.example.helloWorldFlutter;\n\t\t\t\tPRODUCT_NAME = \"$(TARGET_NAME)\";\n\t\t\t\tSWIFT_OBJC_BRIDGING_HEADER = \"Runner/Runner-Bridging-Header.h\";\n\t\t\t\tSWIFT_VERSION = 5.0;\n\t\t\t\tVERSIONING_SYSTEM = \"apple-generic\";\n\t\t\t};\n\t\t\tname = Release;\n\t\t};\n/* End XCBuildConfiguration section */\n\n/* Begin XCConfigurationList section */\n\t\t331C8087294A63A400263BE5 /* Build configuration list for PBXNativeTarget \"RunnerTests\" */ = {\n\t\t\tisa = XCConfigurationList;\n\t\t\tbuildConfigurations = (\n\t\t\t\t331C8088294A63A400263BE5 /* Debug */,\n\t\t\t\t331C8089294A63A400263BE5 /* Release */,\n\t\t\t\t331C808A294A63A400263BE5 /* Profile */,\n\t\t\t);\n\t\t\tdefaultConfigurationIsVisible = 0;\n\t\t\tdefaultConfigurationName = Release;\n\t\t};\n\t\t97C146E91CF9000F007C117D /* Build configuration list for PBXProject \"Runner\" */ = {\n\t\t\tisa = XCConfigurationList;\n\t\t\tbuildConfigurations = (\n\t\t\t\t97C147031CF9000F007C117D /* Debug */,\n\t\t\t\t97C147041CF9000F007C117D /* Release */,\n\t\t\t\t249021D3217E4FDB00AE95B9 /* Profile */,\n\t\t\t);\n\t\t\tdefaultConfigurationIsVisible = 0;\n\t\t\tdefaultConfigurationName = Release;\n\t\t};\n\t\t97C147051CF9000F007C117D /* Build configuration list for PBXNativeTarget \"Runner\" */ = {\n\t\t\tisa = XCConfigurationList;\n\t\t\tbuildConfigurations = (\n\t\t\t\t97C147061CF9000F007C117D /* Debug */,\n\t\t\t\t97C147071CF9000F007C117D /* Release */,\n\t\t\t\t249021D4217E4FDB00AE95B9 /* Profile */,\n\t\t\t);\n\t\t\tdefaultConfigurationIsVisible = 0;\n\t\t\tdefaultConfigurationName = Release;\n\t\t};\n/* End XCConfigurationList section */\n\t};\n\trootObject = 97C146E61CF9000F007C117D /* Project object */;\n}\n"
  },
  {
    "path": "examples/hello_world_flutter/ios/Runner.xcodeproj/project.xcworkspace/contents.xcworkspacedata",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<Workspace\n   version = \"1.0\">\n   <FileRef\n      location = \"self:\">\n   </FileRef>\n</Workspace>\n"
  },
  {
    "path": "examples/hello_world_flutter/ios/Runner.xcodeproj/project.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n<plist version=\"1.0\">\n<dict>\n\t<key>IDEDidComputeMac32BitWarning</key>\n\t<true/>\n</dict>\n</plist>\n"
  },
  {
    "path": "examples/hello_world_flutter/ios/Runner.xcodeproj/project.xcworkspace/xcshareddata/WorkspaceSettings.xcsettings",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n<plist version=\"1.0\">\n<dict>\n\t<key>PreviewsEnabled</key>\n\t<false/>\n</dict>\n</plist>\n"
  },
  {
    "path": "examples/hello_world_flutter/ios/Runner.xcodeproj/xcshareddata/xcschemes/Runner.xcscheme",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<Scheme\n   LastUpgradeVersion = \"1510\"\n   version = \"1.3\">\n   <BuildAction\n      parallelizeBuildables = \"YES\"\n      buildImplicitDependencies = \"YES\">\n      <BuildActionEntries>\n         <BuildActionEntry\n            buildForTesting = \"YES\"\n            buildForRunning = \"YES\"\n            buildForProfiling = \"YES\"\n            buildForArchiving = \"YES\"\n            buildForAnalyzing = \"YES\">\n            <BuildableReference\n               BuildableIdentifier = \"primary\"\n               BlueprintIdentifier = \"97C146ED1CF9000F007C117D\"\n               BuildableName = \"Runner.app\"\n               BlueprintName = \"Runner\"\n               ReferencedContainer = \"container:Runner.xcodeproj\">\n            </BuildableReference>\n         </BuildActionEntry>\n      </BuildActionEntries>\n   </BuildAction>\n   <TestAction\n      buildConfiguration = \"Debug\"\n      selectedDebuggerIdentifier = \"Xcode.DebuggerFoundation.Debugger.LLDB\"\n      selectedLauncherIdentifier = \"Xcode.DebuggerFoundation.Launcher.LLDB\"\n      shouldUseLaunchSchemeArgsEnv = \"YES\">\n      <MacroExpansion>\n         <BuildableReference\n            BuildableIdentifier = \"primary\"\n            BlueprintIdentifier = \"97C146ED1CF9000F007C117D\"\n            BuildableName = \"Runner.app\"\n            BlueprintName = \"Runner\"\n            ReferencedContainer = \"container:Runner.xcodeproj\">\n         </BuildableReference>\n      </MacroExpansion>\n      <Testables>\n         <TestableReference\n            skipped = \"NO\"\n            parallelizable = \"YES\">\n            <BuildableReference\n               BuildableIdentifier = \"primary\"\n               BlueprintIdentifier = \"331C8080294A63A400263BE5\"\n               BuildableName = \"RunnerTests.xctest\"\n               BlueprintName = \"RunnerTests\"\n               ReferencedContainer = \"container:Runner.xcodeproj\">\n            </BuildableReference>\n         </TestableReference>\n      </Testables>\n   </TestAction>\n   <LaunchAction\n      buildConfiguration = \"Debug\"\n      selectedDebuggerIdentifier = \"Xcode.DebuggerFoundation.Debugger.LLDB\"\n      selectedLauncherIdentifier = \"Xcode.DebuggerFoundation.Launcher.LLDB\"\n      launchStyle = \"0\"\n      useCustomWorkingDirectory = \"NO\"\n      ignoresPersistentStateOnLaunch = \"NO\"\n      debugDocumentVersioning = \"YES\"\n      debugServiceExtension = \"internal\"\n      allowLocationSimulation = \"YES\">\n      <BuildableProductRunnable\n         runnableDebuggingMode = \"0\">\n         <BuildableReference\n            BuildableIdentifier = \"primary\"\n            BlueprintIdentifier = \"97C146ED1CF9000F007C117D\"\n            BuildableName = \"Runner.app\"\n            BlueprintName = \"Runner\"\n            ReferencedContainer = \"container:Runner.xcodeproj\">\n         </BuildableReference>\n      </BuildableProductRunnable>\n   </LaunchAction>\n   <ProfileAction\n      buildConfiguration = \"Profile\"\n      shouldUseLaunchSchemeArgsEnv = \"YES\"\n      savedToolIdentifier = \"\"\n      useCustomWorkingDirectory = \"NO\"\n      debugDocumentVersioning = \"YES\">\n      <BuildableProductRunnable\n         runnableDebuggingMode = \"0\">\n         <BuildableReference\n            BuildableIdentifier = \"primary\"\n            BlueprintIdentifier = \"97C146ED1CF9000F007C117D\"\n            BuildableName = \"Runner.app\"\n            BlueprintName = \"Runner\"\n            ReferencedContainer = \"container:Runner.xcodeproj\">\n         </BuildableReference>\n      </BuildableProductRunnable>\n   </ProfileAction>\n   <AnalyzeAction\n      buildConfiguration = \"Debug\">\n   </AnalyzeAction>\n   <ArchiveAction\n      buildConfiguration = \"Release\"\n      revealArchiveInOrganizer = \"YES\">\n   </ArchiveAction>\n</Scheme>\n"
  },
  {
    "path": "examples/hello_world_flutter/ios/Runner.xcworkspace/contents.xcworkspacedata",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<Workspace\n   version = \"1.0\">\n   <FileRef\n      location = \"group:Runner.xcodeproj\">\n   </FileRef>\n</Workspace>\n"
  },
  {
    "path": "examples/hello_world_flutter/ios/Runner.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n<plist version=\"1.0\">\n<dict>\n\t<key>IDEDidComputeMac32BitWarning</key>\n\t<true/>\n</dict>\n</plist>\n"
  },
  {
    "path": "examples/hello_world_flutter/ios/Runner.xcworkspace/xcshareddata/WorkspaceSettings.xcsettings",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n<plist version=\"1.0\">\n<dict>\n\t<key>PreviewsEnabled</key>\n\t<false/>\n</dict>\n</plist>\n"
  },
  {
    "path": "examples/hello_world_flutter/ios/RunnerTests/RunnerTests.swift",
    "content": "import Flutter\nimport UIKit\nimport XCTest\n\nclass RunnerTests: XCTestCase {\n\n  func testExample() {\n    // If you add code to the Runner application, consider adding tests here.\n    // See https://developer.apple.com/documentation/xctest for more information about using XCTest.\n  }\n\n}\n"
  },
  {
    "path": "examples/hello_world_flutter/lib/app.dart",
    "content": "// ignore_for_file: public_member_api_docs\nimport 'package:flutter/material.dart';\n\nimport 'home/home_screen.dart';\n\nclass MyApp extends StatelessWidget {\n  const MyApp({super.key});\n\n  @override\n  Widget build(final BuildContext context) {\n    return MaterialApp(\n      theme: ThemeData(\n        colorScheme: ColorScheme.fromSeed(seedColor: Colors.lightGreen),\n        useMaterial3: true,\n      ),\n      debugShowCheckedModeBanner: false,\n      home: const HomeScreen(),\n    );\n  }\n}\n"
  },
  {
    "path": "examples/hello_world_flutter/lib/home/bloc/home_screen_cubit.dart",
    "content": "// ignore_for_file: public_member_api_docs\nimport 'package:equatable/equatable.dart';\nimport 'package:flutter/foundation.dart';\nimport 'package:flutter_bloc/flutter_bloc.dart';\nimport 'package:langchain/langchain.dart';\nimport 'package:langchain_google/langchain_google.dart';\nimport 'package:langchain_mistralai/langchain_mistralai.dart';\nimport 'package:langchain_ollama/langchain_ollama.dart';\nimport 'package:langchain_openai/langchain_openai.dart';\n\nimport 'providers.dart';\n\npart 'home_screen_state.dart';\n\nclass HomeScreenCubit extends Cubit<HomeScreenState> {\n  HomeScreenCubit() : super(const HomeScreenState()) {\n    _updateChain();\n  }\n\n  RunnableSequence<String, String>? chain;\n\n  void onProviderChanged(final Provider provider) {\n    emit(\n      state.copyWith(\n        status: HomeScreenStatus.idle,\n        provider: provider,\n        response: '',\n      ),\n    );\n    _updateChain();\n  }\n\n  void onModelChanged(final String model) {\n    final newModel = {...state.model, state.provider: model};\n    emit(state.copyWith(model: newModel));\n    _updateChain();\n  }\n\n  void onApiKeyChanged(final String apiKey) {\n    final newApiKey = {...state.apiKey, state.provider: apiKey};\n    emit(state.copyWith(apiKey: newApiKey));\n    _updateChain();\n  }\n\n  void onBaseUrlChanged(final String baseUrl) {\n    final newBaseUrl = {...state.baseUrl, state.provider: baseUrl};\n    emit(state.copyWith(baseUrl: newBaseUrl));\n    _updateChain();\n  }\n\n  void onQueryChanged(final String query) {\n    emit(state.copyWith(query: query));\n  }\n\n  Future<void> onSubmitPressed() async {\n    if (!_validateInput()) return;\n    emit(state.copyWith(status: HomeScreenStatus.generating, response: ''));\n\n    assert(chain != null);\n    final stream = chain!.stream(state.query).handleError(_onErrorGenerating);\n    await for (final result in stream) {\n      emit(\n        state.copyWith(\n          status: HomeScreenStatus.idle,\n          response: (state.response) + result,\n        ),\n      );\n    }\n  }\n\n  bool _validateInput() {\n    final provider = state.provider;\n    if (provider.isRemote && (state.apiKey[provider] ?? '').isEmpty) {\n      emit(\n        state.copyWith(\n          status: HomeScreenStatus.idle,\n          error: HomeScreenError.apiKeyEmpty,\n        ),\n      );\n      return false;\n    }\n\n    if (state.query.isEmpty) {\n      emit(\n        state.copyWith(\n          status: HomeScreenStatus.idle,\n          error: HomeScreenError.queryEmpty,\n        ),\n      );\n      return false;\n    }\n\n    return true;\n  }\n\n  void _updateChain() {\n    try {\n      final provider = state.provider;\n      final model = state.model;\n      final apiKey = state.apiKey;\n\n      final chatModel =\n          switch (provider) {\n                Provider.googleAI => ChatGoogleGenerativeAI(\n                  apiKey: apiKey[provider] ?? '',\n                  baseUrl: state.baseUrl[provider] ?? provider.defaultBaseUrl,\n                  defaultOptions: ChatGoogleGenerativeAIOptions(\n                    model: model[provider] ?? provider.defaultModel,\n                  ),\n                ),\n                Provider.mistral => ChatMistralAI(\n                  apiKey: apiKey[provider] ?? '',\n                  baseUrl: state.baseUrl[provider] ?? provider.defaultBaseUrl,\n                  defaultOptions: ChatMistralAIOptions(\n                    model: model[provider] ?? provider.defaultModel,\n                  ),\n                ),\n                Provider.openAI => ChatOpenAI(\n                  apiKey: apiKey[provider] ?? '',\n                  baseUrl: state.baseUrl[provider] ?? provider.defaultBaseUrl,\n                  defaultOptions: ChatOpenAIOptions(\n                    model: model[provider] ?? provider.defaultModel,\n                  ),\n                ),\n                Provider.ollama => ChatOllama(\n                  baseUrl: state.baseUrl[provider] ?? provider.defaultBaseUrl,\n                  defaultOptions: ChatOllamaOptions(\n                    model: model[provider] ?? provider.defaultModel,\n                  ),\n                ),\n              }\n              as BaseChatModel<ChatModelOptions>;\n\n      chain?.close();\n      chain = Runnable.getMapFromInput<String>('query')\n          .pipe(\n            ChatPromptTemplate.fromTemplates(const [\n              (\n                ChatMessageType.system,\n                'Your are a helpful assistant. Reply to the user using Markdown.',\n              ),\n              (ChatMessageType.human, '{query}'),\n            ]),\n          )\n          .pipe(chatModel)\n          .pipe(const StringOutputParser());\n    } catch (_) {\n      // Ignore invalid base URL exceptions\n    }\n  }\n\n  void _onErrorGenerating(final Object error) {\n    emit(\n      state.copyWith(\n        status: HomeScreenStatus.idle,\n        error: HomeScreenError.generationError,\n      ),\n    );\n  }\n}\n"
  },
  {
    "path": "examples/hello_world_flutter/lib/home/bloc/home_screen_state.dart",
    "content": "// ignore_for_file: public_member_api_docs\npart of 'home_screen_cubit.dart';\n\n@immutable\nclass HomeScreenState extends Equatable {\n  const HomeScreenState({\n    this.status = HomeScreenStatus.idle,\n    this.error,\n    this.provider = Provider.ollama,\n    this.model = const {},\n    this.apiKey = const {},\n    this.baseUrl = const {},\n    this.query = '',\n    this.response = '',\n  });\n\n  final HomeScreenStatus status;\n  final HomeScreenError? error;\n  final Provider provider;\n  final Map<Provider, String> model;\n  final Map<Provider, String> apiKey;\n  final Map<Provider, String> baseUrl;\n  final String query;\n  final String response;\n\n  HomeScreenState copyWith({\n    final HomeScreenStatus? status,\n    final HomeScreenError? error,\n    final Provider? provider,\n    final Map<Provider, String>? model,\n    final Map<Provider, String>? apiKey,\n    final Map<Provider, String>? baseUrl,\n    final String? query,\n    final String? response,\n  }) {\n    return HomeScreenState(\n      status: status ?? this.status,\n      error: error,\n      provider: provider ?? this.provider,\n      model: model ?? this.model,\n      apiKey: apiKey ?? this.apiKey,\n      baseUrl: baseUrl ?? this.baseUrl,\n      query: query ?? this.query,\n      response: response ?? this.response,\n    );\n  }\n\n  @override\n  List<Object?> get props => [\n    status,\n    error,\n    provider,\n    model,\n    apiKey,\n    baseUrl,\n    query,\n    response,\n  ];\n}\n\nenum HomeScreenStatus { idle, generating }\n\nenum HomeScreenError {\n  modelEmpty,\n  apiKeyEmpty,\n  baseUrlEmpty,\n  queryEmpty,\n  generationError,\n}\n"
  },
  {
    "path": "examples/hello_world_flutter/lib/home/bloc/providers.dart",
    "content": "// ignore_for_file: public_member_api_docs\n\nenum Provider {\n  googleAI(\n    name: 'GoogleAI',\n    defaultModel: 'gemini-1.5-pro',\n    defaultBaseUrl: 'https://generativelanguage.googleapis.com/v1beta',\n    isRemote: true,\n  ),\n  mistral(\n    name: 'Mistral',\n    defaultModel: 'mistral-small',\n    defaultBaseUrl: 'https://api.mistral.ai/v1',\n    isRemote: true,\n  ),\n  openAI(\n    name: 'OpenAI',\n    defaultModel: 'gpt-4o',\n    defaultBaseUrl: 'https://api.openai.com/v1',\n    isRemote: true,\n  ),\n  ollama(\n    name: 'Ollama',\n    defaultModel: 'llama3.2',\n    defaultBaseUrl: 'http://localhost:11434/api',\n    isRemote: false,\n  );\n\n  const Provider({\n    required this.name,\n    required this.defaultModel,\n    required this.defaultBaseUrl,\n    required this.isRemote,\n  });\n\n  final String name;\n  final String defaultModel;\n  final String defaultBaseUrl;\n  final bool isRemote;\n}\n"
  },
  {
    "path": "examples/hello_world_flutter/lib/home/home_screen.dart",
    "content": "// ignore_for_file: public_member_api_docs\nimport 'package:flutter/material.dart';\nimport 'package:flutter_bloc/flutter_bloc.dart';\nimport 'package:flutter_markdown/flutter_markdown.dart';\n\nimport 'bloc/home_screen_cubit.dart';\nimport 'bloc/providers.dart';\n\nclass HomeScreen extends StatelessWidget {\n  const HomeScreen({super.key});\n\n  @override\n  Widget build(final BuildContext context) {\n    return BlocProvider(\n      create: (final _) => HomeScreenCubit(),\n      child: const _Scaffold(),\n    );\n  }\n}\n\nclass _Scaffold extends StatelessWidget {\n  const _Scaffold();\n\n  @override\n  Widget build(final BuildContext context) {\n    final theme = Theme.of(context);\n    return Scaffold(\n      appBar: AppBar(\n        backgroundColor: theme.colorScheme.inversePrimary,\n        title: const Text('🦜️🔗 LangChain.dart'),\n      ),\n      body: const _Body(),\n    );\n  }\n}\n\nclass _Body extends StatelessWidget {\n  const _Body();\n\n  @override\n  Widget build(final BuildContext context) {\n    final cubit = context.read<HomeScreenCubit>();\n    return BlocListener<HomeScreenCubit, HomeScreenState>(\n      listenWhen: (final previous, final current) =>\n          previous.error != current.error,\n      listener: (final context, final state) {\n        if (state.error == HomeScreenError.generationError) {\n          ScaffoldMessenger.of(context).showSnackBar(\n            const SnackBar(\n              content: Text('An error occurred while generating the response'),\n            ),\n          );\n        }\n      },\n      child: SingleChildScrollView(\n        child: Padding(\n          padding: const EdgeInsets.all(16),\n          child: Column(\n            mainAxisSize: MainAxisSize.min,\n            crossAxisAlignment: CrossAxisAlignment.start,\n            children: [\n              const _ProviderSelector(),\n              const SizedBox(height: 16),\n              Row(\n                crossAxisAlignment: CrossAxisAlignment.start,\n                children: [\n                  Expanded(child: _ApiKeyTextField(cubit)),\n                  const SizedBox(width: 16),\n                  Expanded(child: _BaseUrlTextField(cubit)),\n                ],\n              ),\n              const SizedBox(height: 16),\n              _ModelTextField(cubit),\n              const SizedBox(height: 16),\n              _QueryTextField(cubit),\n              const SizedBox(height: 16),\n              const _SubmitButton(),\n              const SizedBox(height: 12),\n              const Divider(),\n              const SizedBox(height: 16),\n              const _Response(),\n            ],\n          ),\n        ),\n      ),\n    );\n  }\n}\n\nclass _ProviderSelector extends StatelessWidget {\n  const _ProviderSelector();\n\n  @override\n  Widget build(final BuildContext context) {\n    final cubit = context.read<HomeScreenCubit>();\n    return BlocBuilder<HomeScreenCubit, HomeScreenState>(\n      buildWhen: (final previous, final current) =>\n          previous.provider != current.provider,\n      builder: (final context, final state) {\n        return Center(\n          child: SegmentedButton<Provider>(\n            segments: Provider.values\n                .map(\n                  (final provider) => ButtonSegment<Provider>(\n                    value: provider,\n                    label: Text(provider.name),\n                    icon: Icon(\n                      provider.isRemote\n                          ? Icons.cloud_outlined\n                          : Icons.install_desktop_outlined,\n                    ),\n                  ),\n                )\n                .toList(),\n            selected: {state.provider},\n            onSelectionChanged: (final Set<Provider> newSelection) {\n              cubit.onProviderChanged(newSelection.first);\n            },\n          ),\n        );\n      },\n    );\n  }\n}\n\nclass _ModelTextField extends _BaseTextField {\n  const _ModelTextField(this.cubit);\n\n  final HomeScreenCubit cubit;\n\n  @override\n  String get labelText => 'Model name';\n\n  @override\n  bool get obscureText => false;\n\n  @override\n  IconData get prefixIcon => Icons.link;\n\n  @override\n  HomeScreenError get errorType => HomeScreenError.modelEmpty;\n\n  @override\n  String get errorText => 'Model name cannot be empty';\n\n  @override\n  String onProviderChanged(final HomeScreenState state) =>\n      state.model[state.provider] ?? state.provider.defaultModel;\n\n  @override\n  void onTextChanged(final String value) => cubit.onModelChanged(value);\n}\n\nclass _ApiKeyTextField extends _BaseTextField {\n  const _ApiKeyTextField(this.cubit);\n\n  final HomeScreenCubit cubit;\n\n  @override\n  String get labelText => 'API key';\n\n  @override\n  bool get obscureText => true;\n\n  @override\n  IconData get prefixIcon => Icons.password;\n\n  @override\n  HomeScreenError get errorType => HomeScreenError.apiKeyEmpty;\n\n  @override\n  String get errorText => 'Api API key cannot be empty';\n\n  @override\n  String onProviderChanged(final HomeScreenState state) =>\n      state.apiKey[state.provider] ?? '';\n\n  @override\n  void onTextChanged(final String value) => cubit.onApiKeyChanged(value);\n}\n\nclass _BaseUrlTextField extends _BaseTextField {\n  const _BaseUrlTextField(this.cubit);\n\n  final HomeScreenCubit cubit;\n\n  @override\n  String get labelText => 'Base URL';\n\n  @override\n  bool get obscureText => false;\n\n  @override\n  IconData get prefixIcon => Icons.language;\n\n  @override\n  HomeScreenError get errorType => HomeScreenError.baseUrlEmpty;\n\n  @override\n  String get errorText => 'Base URL cannot be empty';\n\n  @override\n  String onProviderChanged(final HomeScreenState state) =>\n      state.baseUrl[state.provider] ?? state.provider.defaultBaseUrl;\n\n  @override\n  void onTextChanged(final String value) => cubit.onBaseUrlChanged(value);\n}\n\nclass _QueryTextField extends _BaseTextField {\n  const _QueryTextField(this.cubit);\n\n  final HomeScreenCubit cubit;\n\n  @override\n  String get labelText => 'Enter question';\n\n  @override\n  bool get obscureText => false;\n\n  @override\n  IconData get prefixIcon => Icons.question_answer;\n\n  @override\n  HomeScreenError get errorType => HomeScreenError.queryEmpty;\n\n  @override\n  String get errorText => 'Question cannot be empty';\n\n  @override\n  String onProviderChanged(final HomeScreenState state) => '';\n\n  @override\n  void onTextChanged(final String value) => cubit.onQueryChanged(value);\n}\n\nclass _SubmitButton extends StatelessWidget {\n  const _SubmitButton();\n\n  @override\n  Widget build(final BuildContext context) {\n    final cubit = context.read<HomeScreenCubit>();\n    return Center(\n      child: BlocBuilder<HomeScreenCubit, HomeScreenState>(\n        builder: (final context, final state) {\n          if (state.status == HomeScreenStatus.generating) {\n            return const CircularProgressIndicator();\n          }\n\n          return FilledButton(\n            onPressed: cubit.onSubmitPressed,\n            child: const Text('Submit'),\n          );\n        },\n      ),\n    );\n  }\n}\n\nclass _Response extends StatelessWidget {\n  const _Response();\n\n  @override\n  Widget build(final BuildContext context) {\n    return BlocBuilder<HomeScreenCubit, HomeScreenState>(\n      builder: (final context, final state) {\n        final response = state.response;\n        if (response.isEmpty) {\n          return const SizedBox.shrink();\n        }\n\n        final theme = Theme.of(context);\n        return Column(\n          mainAxisSize: MainAxisSize.min,\n          crossAxisAlignment: CrossAxisAlignment.start,\n          children: [\n            Text('Response', style: theme.textTheme.headlineSmall),\n            Markdown(\n              data: state.response,\n              shrinkWrap: true,\n              padding: EdgeInsets.zero,\n            ),\n          ],\n        );\n      },\n    );\n  }\n}\n\nabstract class _BaseTextField extends StatefulWidget {\n  const _BaseTextField();\n\n  String get labelText;\n\n  bool get obscureText;\n\n  IconData get prefixIcon;\n\n  HomeScreenError get errorType;\n\n  String get errorText;\n\n  String onProviderChanged(final HomeScreenState state);\n\n  void onTextChanged(final String value);\n\n  @override\n  _BaseTextFieldState createState() => _BaseTextFieldState();\n}\n\nclass _BaseTextFieldState extends State<_BaseTextField> {\n  late TextEditingController _controller;\n\n  @override\n  void initState() {\n    super.initState();\n    _controller = TextEditingController();\n  }\n\n  @override\n  Widget build(BuildContext context) {\n    return BlocBuilder<HomeScreenCubit, HomeScreenState>(\n      buildWhen: (previous, current) =>\n          previous.provider != current.provider ||\n          previous.error != current.error,\n      builder: (context, state) {\n        _controller.text = widget.onProviderChanged(state);\n        return TextField(\n          controller: _controller,\n          obscureText: widget.obscureText,\n          decoration: InputDecoration(\n            prefixIcon: Icon(widget.prefixIcon),\n            labelText: widget.labelText,\n            filled: true,\n            errorText: state.error == widget.errorType\n                ? widget.errorText\n                : null,\n          ),\n          onChanged: widget.onTextChanged,\n        );\n      },\n    );\n  }\n\n  @override\n  void dispose() {\n    _controller.dispose();\n    super.dispose();\n  }\n}\n"
  },
  {
    "path": "examples/hello_world_flutter/lib/main.dart",
    "content": "import 'package:flutter/material.dart';\n\nimport 'app.dart';\n\nvoid main() {\n  runApp(const MyApp());\n}\n"
  },
  {
    "path": "examples/hello_world_flutter/linux/.gitignore",
    "content": "flutter/ephemeral\n"
  },
  {
    "path": "examples/hello_world_flutter/linux/CMakeLists.txt",
    "content": "# Project-level configuration.\ncmake_minimum_required(VERSION 3.10)\nproject(runner LANGUAGES CXX)\n\n# The name of the executable created for the application. Change this to change\n# the on-disk name of your application.\nset(BINARY_NAME \"hello_world_flutter\")\n# The unique GTK application identifier for this application. See:\n# https://wiki.gnome.org/HowDoI/ChooseApplicationID\nset(APPLICATION_ID \"com.example.hello_world_flutter\")\n\n# Explicitly opt in to modern CMake behaviors to avoid warnings with recent\n# versions of CMake.\ncmake_policy(SET CMP0063 NEW)\n\n# Load bundled libraries from the lib/ directory relative to the binary.\nset(CMAKE_INSTALL_RPATH \"$ORIGIN/lib\")\n\n# Root filesystem for cross-building.\nif(FLUTTER_TARGET_PLATFORM_SYSROOT)\n  set(CMAKE_SYSROOT ${FLUTTER_TARGET_PLATFORM_SYSROOT})\n  set(CMAKE_FIND_ROOT_PATH ${CMAKE_SYSROOT})\n  set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)\n  set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY)\n  set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)\n  set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)\nendif()\n\n# Define build configuration options.\nif(NOT CMAKE_BUILD_TYPE AND NOT CMAKE_CONFIGURATION_TYPES)\n  set(CMAKE_BUILD_TYPE \"Debug\" CACHE\n    STRING \"Flutter build mode\" FORCE)\n  set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS\n    \"Debug\" \"Profile\" \"Release\")\nendif()\n\n# Compilation settings that should be applied to most targets.\n#\n# Be cautious about adding new options here, as plugins use this function by\n# default. In most cases, you should add new options to specific targets instead\n# of modifying this function.\nfunction(APPLY_STANDARD_SETTINGS TARGET)\n  target_compile_features(${TARGET} PUBLIC cxx_std_14)\n  target_compile_options(${TARGET} PRIVATE -Wall -Werror)\n  target_compile_options(${TARGET} PRIVATE \"$<$<NOT:$<CONFIG:Debug>>:-O3>\")\n  target_compile_definitions(${TARGET} PRIVATE \"$<$<NOT:$<CONFIG:Debug>>:NDEBUG>\")\nendfunction()\n\n# Flutter library and tool build rules.\nset(FLUTTER_MANAGED_DIR \"${CMAKE_CURRENT_SOURCE_DIR}/flutter\")\nadd_subdirectory(${FLUTTER_MANAGED_DIR})\n\n# System-level dependencies.\nfind_package(PkgConfig REQUIRED)\npkg_check_modules(GTK REQUIRED IMPORTED_TARGET gtk+-3.0)\n\nadd_definitions(-DAPPLICATION_ID=\"${APPLICATION_ID}\")\n\n# Define the application target. To change its name, change BINARY_NAME above,\n# not the value here, or `flutter run` will no longer work.\n#\n# Any new source files that you add to the application should be added here.\nadd_executable(${BINARY_NAME}\n  \"main.cc\"\n  \"my_application.cc\"\n  \"${FLUTTER_MANAGED_DIR}/generated_plugin_registrant.cc\"\n)\n\n# Apply the standard set of build settings. This can be removed for applications\n# that need different build settings.\napply_standard_settings(${BINARY_NAME})\n\n# Add dependency libraries. Add any application-specific dependencies here.\ntarget_link_libraries(${BINARY_NAME} PRIVATE flutter)\ntarget_link_libraries(${BINARY_NAME} PRIVATE PkgConfig::GTK)\n\n# Run the Flutter tool portions of the build. This must not be removed.\nadd_dependencies(${BINARY_NAME} flutter_assemble)\n\n# Only the install-generated bundle's copy of the executable will launch\n# correctly, since the resources must in the right relative locations. To avoid\n# people trying to run the unbundled copy, put it in a subdirectory instead of\n# the default top-level location.\nset_target_properties(${BINARY_NAME}\n  PROPERTIES\n  RUNTIME_OUTPUT_DIRECTORY \"${CMAKE_BINARY_DIR}/intermediates_do_not_run\"\n)\n\n\n# Generated plugin build rules, which manage building the plugins and adding\n# them to the application.\ninclude(flutter/generated_plugins.cmake)\n\n\n# === Installation ===\n# By default, \"installing\" just makes a relocatable bundle in the build\n# directory.\nset(BUILD_BUNDLE_DIR \"${PROJECT_BINARY_DIR}/bundle\")\nif(CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT)\n  set(CMAKE_INSTALL_PREFIX \"${BUILD_BUNDLE_DIR}\" CACHE PATH \"...\" FORCE)\nendif()\n\n# Start with a clean build bundle directory every time.\ninstall(CODE \"\n  file(REMOVE_RECURSE \\\"${BUILD_BUNDLE_DIR}/\\\")\n  \" COMPONENT Runtime)\n\nset(INSTALL_BUNDLE_DATA_DIR \"${CMAKE_INSTALL_PREFIX}/data\")\nset(INSTALL_BUNDLE_LIB_DIR \"${CMAKE_INSTALL_PREFIX}/lib\")\n\ninstall(TARGETS ${BINARY_NAME} RUNTIME DESTINATION \"${CMAKE_INSTALL_PREFIX}\"\n  COMPONENT Runtime)\n\ninstall(FILES \"${FLUTTER_ICU_DATA_FILE}\" DESTINATION \"${INSTALL_BUNDLE_DATA_DIR}\"\n  COMPONENT Runtime)\n\ninstall(FILES \"${FLUTTER_LIBRARY}\" DESTINATION \"${INSTALL_BUNDLE_LIB_DIR}\"\n  COMPONENT Runtime)\n\nforeach(bundled_library ${PLUGIN_BUNDLED_LIBRARIES})\n  install(FILES \"${bundled_library}\"\n    DESTINATION \"${INSTALL_BUNDLE_LIB_DIR}\"\n    COMPONENT Runtime)\nendforeach(bundled_library)\n\n# Fully re-copy the assets directory on each build to avoid having stale files\n# from a previous install.\nset(FLUTTER_ASSET_DIR_NAME \"flutter_assets\")\ninstall(CODE \"\n  file(REMOVE_RECURSE \\\"${INSTALL_BUNDLE_DATA_DIR}/${FLUTTER_ASSET_DIR_NAME}\\\")\n  \" COMPONENT Runtime)\ninstall(DIRECTORY \"${PROJECT_BUILD_DIR}/${FLUTTER_ASSET_DIR_NAME}\"\n  DESTINATION \"${INSTALL_BUNDLE_DATA_DIR}\" COMPONENT Runtime)\n\n# Install the AOT library on non-Debug builds only.\nif(NOT CMAKE_BUILD_TYPE MATCHES \"Debug\")\n  install(FILES \"${AOT_LIBRARY}\" DESTINATION \"${INSTALL_BUNDLE_LIB_DIR}\"\n    COMPONENT Runtime)\nendif()\n"
  },
  {
    "path": "examples/hello_world_flutter/linux/flutter/CMakeLists.txt",
    "content": "# This file controls Flutter-level build steps. It should not be edited.\ncmake_minimum_required(VERSION 3.10)\n\nset(EPHEMERAL_DIR \"${CMAKE_CURRENT_SOURCE_DIR}/ephemeral\")\n\n# Configuration provided via flutter tool.\ninclude(${EPHEMERAL_DIR}/generated_config.cmake)\n\n# TODO: Move the rest of this into files in ephemeral. See\n# https://github.com/flutter/flutter/issues/57146.\n\n# Serves the same purpose as list(TRANSFORM ... PREPEND ...),\n# which isn't available in 3.10.\nfunction(list_prepend LIST_NAME PREFIX)\n    set(NEW_LIST \"\")\n    foreach(element ${${LIST_NAME}})\n        list(APPEND NEW_LIST \"${PREFIX}${element}\")\n    endforeach(element)\n    set(${LIST_NAME} \"${NEW_LIST}\" PARENT_SCOPE)\nendfunction()\n\n# === Flutter Library ===\n# System-level dependencies.\nfind_package(PkgConfig REQUIRED)\npkg_check_modules(GTK REQUIRED IMPORTED_TARGET gtk+-3.0)\npkg_check_modules(GLIB REQUIRED IMPORTED_TARGET glib-2.0)\npkg_check_modules(GIO REQUIRED IMPORTED_TARGET gio-2.0)\n\nset(FLUTTER_LIBRARY \"${EPHEMERAL_DIR}/libflutter_linux_gtk.so\")\n\n# Published to parent scope for install step.\nset(FLUTTER_LIBRARY ${FLUTTER_LIBRARY} PARENT_SCOPE)\nset(FLUTTER_ICU_DATA_FILE \"${EPHEMERAL_DIR}/icudtl.dat\" PARENT_SCOPE)\nset(PROJECT_BUILD_DIR \"${PROJECT_DIR}/build/\" PARENT_SCOPE)\nset(AOT_LIBRARY \"${PROJECT_DIR}/build/lib/libapp.so\" PARENT_SCOPE)\n\nlist(APPEND FLUTTER_LIBRARY_HEADERS\n  \"fl_basic_message_channel.h\"\n  \"fl_binary_codec.h\"\n  \"fl_binary_messenger.h\"\n  \"fl_dart_project.h\"\n  \"fl_engine.h\"\n  \"fl_json_message_codec.h\"\n  \"fl_json_method_codec.h\"\n  \"fl_message_codec.h\"\n  \"fl_method_call.h\"\n  \"fl_method_channel.h\"\n  \"fl_method_codec.h\"\n  \"fl_method_response.h\"\n  \"fl_plugin_registrar.h\"\n  \"fl_plugin_registry.h\"\n  \"fl_standard_message_codec.h\"\n  \"fl_standard_method_codec.h\"\n  \"fl_string_codec.h\"\n  \"fl_value.h\"\n  \"fl_view.h\"\n  \"flutter_linux.h\"\n)\nlist_prepend(FLUTTER_LIBRARY_HEADERS \"${EPHEMERAL_DIR}/flutter_linux/\")\nadd_library(flutter INTERFACE)\ntarget_include_directories(flutter INTERFACE\n  \"${EPHEMERAL_DIR}\"\n)\ntarget_link_libraries(flutter INTERFACE \"${FLUTTER_LIBRARY}\")\ntarget_link_libraries(flutter INTERFACE\n  PkgConfig::GTK\n  PkgConfig::GLIB\n  PkgConfig::GIO\n)\nadd_dependencies(flutter flutter_assemble)\n\n# === Flutter tool backend ===\n# _phony_ is a non-existent file to force this command to run every time,\n# since currently there's no way to get a full input/output list from the\n# flutter tool.\nadd_custom_command(\n  OUTPUT ${FLUTTER_LIBRARY} ${FLUTTER_LIBRARY_HEADERS}\n    ${CMAKE_CURRENT_BINARY_DIR}/_phony_\n  COMMAND ${CMAKE_COMMAND} -E env\n    ${FLUTTER_TOOL_ENVIRONMENT}\n    \"${FLUTTER_ROOT}/packages/flutter_tools/bin/tool_backend.sh\"\n      ${FLUTTER_TARGET_PLATFORM} ${CMAKE_BUILD_TYPE}\n  VERBATIM\n)\nadd_custom_target(flutter_assemble DEPENDS\n  \"${FLUTTER_LIBRARY}\"\n  ${FLUTTER_LIBRARY_HEADERS}\n)\n"
  },
  {
    "path": "examples/hello_world_flutter/linux/flutter/generated_plugin_registrant.cc",
    "content": "//\n//  Generated file. Do not edit.\n//\n\n// clang-format off\n\n#include \"generated_plugin_registrant.h\"\n\n\nvoid fl_register_plugins(FlPluginRegistry* registry) {\n}\n"
  },
  {
    "path": "examples/hello_world_flutter/linux/flutter/generated_plugin_registrant.h",
    "content": "//\n//  Generated file. Do not edit.\n//\n\n// clang-format off\n\n#ifndef GENERATED_PLUGIN_REGISTRANT_\n#define GENERATED_PLUGIN_REGISTRANT_\n\n#include <flutter_linux/flutter_linux.h>\n\n// Registers Flutter plugins.\nvoid fl_register_plugins(FlPluginRegistry* registry);\n\n#endif  // GENERATED_PLUGIN_REGISTRANT_\n"
  },
  {
    "path": "examples/hello_world_flutter/linux/flutter/generated_plugins.cmake",
    "content": "#\n# Generated file, do not edit.\n#\n\nlist(APPEND FLUTTER_PLUGIN_LIST\n)\n\nlist(APPEND FLUTTER_FFI_PLUGIN_LIST\n)\n\nset(PLUGIN_BUNDLED_LIBRARIES)\n\nforeach(plugin ${FLUTTER_PLUGIN_LIST})\n  add_subdirectory(flutter/ephemeral/.plugin_symlinks/${plugin}/linux plugins/${plugin})\n  target_link_libraries(${BINARY_NAME} PRIVATE ${plugin}_plugin)\n  list(APPEND PLUGIN_BUNDLED_LIBRARIES $<TARGET_FILE:${plugin}_plugin>)\n  list(APPEND PLUGIN_BUNDLED_LIBRARIES ${${plugin}_bundled_libraries})\nendforeach(plugin)\n\nforeach(ffi_plugin ${FLUTTER_FFI_PLUGIN_LIST})\n  add_subdirectory(flutter/ephemeral/.plugin_symlinks/${ffi_plugin}/linux plugins/${ffi_plugin})\n  list(APPEND PLUGIN_BUNDLED_LIBRARIES ${${ffi_plugin}_bundled_libraries})\nendforeach(ffi_plugin)\n"
  },
  {
    "path": "examples/hello_world_flutter/linux/main.cc",
    "content": "#include \"my_application.h\"\n\nint main(int argc, char** argv) {\n  g_autoptr(MyApplication) app = my_application_new();\n  return g_application_run(G_APPLICATION(app), argc, argv);\n}\n"
  },
  {
    "path": "examples/hello_world_flutter/linux/my_application.cc",
    "content": "#include \"my_application.h\"\n\n#include <flutter_linux/flutter_linux.h>\n#ifdef GDK_WINDOWING_X11\n#include <gdk/gdkx.h>\n#endif\n\n#include \"flutter/generated_plugin_registrant.h\"\n\nstruct _MyApplication {\n  GtkApplication parent_instance;\n  char** dart_entrypoint_arguments;\n};\n\nG_DEFINE_TYPE(MyApplication, my_application, GTK_TYPE_APPLICATION)\n\n// Implements GApplication::activate.\nstatic void my_application_activate(GApplication* application) {\n  MyApplication* self = MY_APPLICATION(application);\n  GtkWindow* window =\n      GTK_WINDOW(gtk_application_window_new(GTK_APPLICATION(application)));\n\n  // Use a header bar when running in GNOME as this is the common style used\n  // by applications and is the setup most users will be using (e.g. Ubuntu\n  // desktop).\n  // If running on X and not using GNOME then just use a traditional title bar\n  // in case the window manager does more exotic layout, e.g. tiling.\n  // If running on Wayland assume the header bar will work (may need changing\n  // if future cases occur).\n  gboolean use_header_bar = TRUE;\n#ifdef GDK_WINDOWING_X11\n  GdkScreen* screen = gtk_window_get_screen(window);\n  if (GDK_IS_X11_SCREEN(screen)) {\n    const gchar* wm_name = gdk_x11_screen_get_window_manager_name(screen);\n    if (g_strcmp0(wm_name, \"GNOME Shell\") != 0) {\n      use_header_bar = FALSE;\n    }\n  }\n#endif\n  if (use_header_bar) {\n    GtkHeaderBar* header_bar = GTK_HEADER_BAR(gtk_header_bar_new());\n    gtk_widget_show(GTK_WIDGET(header_bar));\n    gtk_header_bar_set_title(header_bar, \"hello_world_flutter\");\n    gtk_header_bar_set_show_close_button(header_bar, TRUE);\n    gtk_window_set_titlebar(window, GTK_WIDGET(header_bar));\n  } else {\n    gtk_window_set_title(window, \"hello_world_flutter\");\n  }\n\n  gtk_window_set_default_size(window, 1280, 720);\n  gtk_widget_show(GTK_WIDGET(window));\n\n  g_autoptr(FlDartProject) project = fl_dart_project_new();\n  fl_dart_project_set_dart_entrypoint_arguments(project, self->dart_entrypoint_arguments);\n\n  FlView* view = fl_view_new(project);\n  gtk_widget_show(GTK_WIDGET(view));\n  gtk_container_add(GTK_CONTAINER(window), GTK_WIDGET(view));\n\n  fl_register_plugins(FL_PLUGIN_REGISTRY(view));\n\n  gtk_widget_grab_focus(GTK_WIDGET(view));\n}\n\n// Implements GApplication::local_command_line.\nstatic gboolean my_application_local_command_line(GApplication* application, gchar*** arguments, int* exit_status) {\n  MyApplication* self = MY_APPLICATION(application);\n  // Strip out the first argument as it is the binary name.\n  self->dart_entrypoint_arguments = g_strdupv(*arguments + 1);\n\n  g_autoptr(GError) error = nullptr;\n  if (!g_application_register(application, nullptr, &error)) {\n     g_warning(\"Failed to register: %s\", error->message);\n     *exit_status = 1;\n     return TRUE;\n  }\n\n  g_application_activate(application);\n  *exit_status = 0;\n\n  return TRUE;\n}\n\n// Implements GObject::dispose.\nstatic void my_application_dispose(GObject* object) {\n  MyApplication* self = MY_APPLICATION(object);\n  g_clear_pointer(&self->dart_entrypoint_arguments, g_strfreev);\n  G_OBJECT_CLASS(my_application_parent_class)->dispose(object);\n}\n\nstatic void my_application_class_init(MyApplicationClass* klass) {\n  G_APPLICATION_CLASS(klass)->activate = my_application_activate;\n  G_APPLICATION_CLASS(klass)->local_command_line = my_application_local_command_line;\n  G_OBJECT_CLASS(klass)->dispose = my_application_dispose;\n}\n\nstatic void my_application_init(MyApplication* self) {}\n\nMyApplication* my_application_new() {\n  return MY_APPLICATION(g_object_new(my_application_get_type(),\n                                     \"application-id\", APPLICATION_ID,\n                                     \"flags\", G_APPLICATION_NON_UNIQUE,\n                                     nullptr));\n}\n"
  },
  {
    "path": "examples/hello_world_flutter/linux/my_application.h",
    "content": "#ifndef FLUTTER_MY_APPLICATION_H_\n#define FLUTTER_MY_APPLICATION_H_\n\n#include <gtk/gtk.h>\n\nG_DECLARE_FINAL_TYPE(MyApplication, my_application, MY, APPLICATION,\n                     GtkApplication)\n\n/**\n * my_application_new:\n *\n * Creates a new Flutter-based application.\n *\n * Returns: a new #MyApplication.\n */\nMyApplication* my_application_new();\n\n#endif  // FLUTTER_MY_APPLICATION_H_\n"
  },
  {
    "path": "examples/hello_world_flutter/macos/.gitignore",
    "content": "# Flutter-related\n**/Flutter/ephemeral/\n**/Pods/\n\n# Xcode-related\n**/dgph\n**/xcuserdata/\n"
  },
  {
    "path": "examples/hello_world_flutter/macos/Flutter/Flutter-Debug.xcconfig",
    "content": "#include? \"Pods/Target Support Files/Pods-Runner/Pods-Runner.debug.xcconfig\"\n#include \"ephemeral/Flutter-Generated.xcconfig\"\n"
  },
  {
    "path": "examples/hello_world_flutter/macos/Flutter/Flutter-Release.xcconfig",
    "content": "#include? \"Pods/Target Support Files/Pods-Runner/Pods-Runner.release.xcconfig\"\n#include \"ephemeral/Flutter-Generated.xcconfig\"\n"
  },
  {
    "path": "examples/hello_world_flutter/macos/Flutter/GeneratedPluginRegistrant.swift",
    "content": "//\n//  Generated file. Do not edit.\n//\n\nimport FlutterMacOS\nimport Foundation\n\n\nfunc RegisterGeneratedPlugins(registry: FlutterPluginRegistry) {\n}\n"
  },
  {
    "path": "examples/hello_world_flutter/macos/Podfile",
    "content": "platform :osx, '10.15'\n\n# CocoaPods analytics sends network stats synchronously affecting flutter build latency.\nENV['COCOAPODS_DISABLE_STATS'] = 'true'\n\nproject 'Runner', {\n  'Debug' => :debug,\n  'Profile' => :release,\n  'Release' => :release,\n}\n\ndef flutter_root\n  generated_xcode_build_settings_path = File.expand_path(File.join('..', 'Flutter', 'ephemeral', 'Flutter-Generated.xcconfig'), __FILE__)\n  unless File.exist?(generated_xcode_build_settings_path)\n    raise \"#{generated_xcode_build_settings_path} must exist. If you're running pod install manually, make sure \\\"flutter pub get\\\" is executed first\"\n  end\n\n  File.foreach(generated_xcode_build_settings_path) do |line|\n    matches = line.match(/FLUTTER_ROOT\\=(.*)/)\n    return matches[1].strip if matches\n  end\n  raise \"FLUTTER_ROOT not found in #{generated_xcode_build_settings_path}. Try deleting Flutter-Generated.xcconfig, then run \\\"flutter pub get\\\"\"\nend\n\nrequire File.expand_path(File.join('packages', 'flutter_tools', 'bin', 'podhelper'), flutter_root)\n\nflutter_macos_podfile_setup\n\ntarget 'Runner' do\n  use_frameworks!\n\n  flutter_install_all_macos_pods File.dirname(File.realpath(__FILE__))\n  target 'RunnerTests' do\n    inherit! :search_paths\n  end\nend\n\npost_install do |installer|\n  installer.pods_project.targets.each do |target|\n    flutter_additional_macos_build_settings(target)\n  end\nend\n"
  },
  {
    "path": "examples/hello_world_flutter/macos/Runner/AppDelegate.swift",
    "content": "import Cocoa\nimport FlutterMacOS\n\n@main\nclass AppDelegate: FlutterAppDelegate {\n  override func applicationShouldTerminateAfterLastWindowClosed(_ sender: NSApplication) -> Bool {\n    return true\n  }\n\n  override func applicationSupportsSecureRestorableState(_ app: NSApplication) -> Bool {\n    return true\n  }\n}\n"
  },
  {
    "path": "examples/hello_world_flutter/macos/Runner/Assets.xcassets/AppIcon.appiconset/Contents.json",
    "content": "{\n  \"images\" : [\n    {\n      \"size\" : \"16x16\",\n      \"idiom\" : \"mac\",\n      \"filename\" : \"app_icon_16.png\",\n      \"scale\" : \"1x\"\n    },\n    {\n      \"size\" : \"16x16\",\n      \"idiom\" : \"mac\",\n      \"filename\" : \"app_icon_32.png\",\n      \"scale\" : \"2x\"\n    },\n    {\n      \"size\" : \"32x32\",\n      \"idiom\" : \"mac\",\n      \"filename\" : \"app_icon_32.png\",\n      \"scale\" : \"1x\"\n    },\n    {\n      \"size\" : \"32x32\",\n      \"idiom\" : \"mac\",\n      \"filename\" : \"app_icon_64.png\",\n      \"scale\" : \"2x\"\n    },\n    {\n      \"size\" : \"128x128\",\n      \"idiom\" : \"mac\",\n      \"filename\" : \"app_icon_128.png\",\n      \"scale\" : \"1x\"\n    },\n    {\n      \"size\" : \"128x128\",\n      \"idiom\" : \"mac\",\n      \"filename\" : \"app_icon_256.png\",\n      \"scale\" : \"2x\"\n    },\n    {\n      \"size\" : \"256x256\",\n      \"idiom\" : \"mac\",\n      \"filename\" : \"app_icon_256.png\",\n      \"scale\" : \"1x\"\n    },\n    {\n      \"size\" : \"256x256\",\n      \"idiom\" : \"mac\",\n      \"filename\" : \"app_icon_512.png\",\n      \"scale\" : \"2x\"\n    },\n    {\n      \"size\" : \"512x512\",\n      \"idiom\" : \"mac\",\n      \"filename\" : \"app_icon_512.png\",\n      \"scale\" : \"1x\"\n    },\n    {\n      \"size\" : \"512x512\",\n      \"idiom\" : \"mac\",\n      \"filename\" : \"app_icon_1024.png\",\n      \"scale\" : \"2x\"\n    }\n  ],\n  \"info\" : {\n    \"version\" : 1,\n    \"author\" : \"xcode\"\n  }\n}\n"
  },
  {
    "path": "examples/hello_world_flutter/macos/Runner/Base.lproj/MainMenu.xib",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<document type=\"com.apple.InterfaceBuilder3.Cocoa.XIB\" version=\"3.0\" toolsVersion=\"14490.70\" targetRuntime=\"MacOSX.Cocoa\" propertyAccessControl=\"none\" useAutolayout=\"YES\" customObjectInstantitationMethod=\"direct\">\n    <dependencies>\n        <deployment identifier=\"macosx\"/>\n        <plugIn identifier=\"com.apple.InterfaceBuilder.CocoaPlugin\" version=\"14490.70\"/>\n        <capability name=\"documents saved in the Xcode 8 format\" minToolsVersion=\"8.0\"/>\n    </dependencies>\n    <objects>\n        <customObject id=\"-2\" userLabel=\"File's Owner\" customClass=\"NSApplication\">\n            <connections>\n                <outlet property=\"delegate\" destination=\"Voe-Tx-rLC\" id=\"GzC-gU-4Uq\"/>\n            </connections>\n        </customObject>\n        <customObject id=\"-1\" userLabel=\"First Responder\" customClass=\"FirstResponder\"/>\n        <customObject id=\"-3\" userLabel=\"Application\" customClass=\"NSObject\"/>\n        <customObject id=\"Voe-Tx-rLC\" customClass=\"AppDelegate\" customModule=\"Runner\" customModuleProvider=\"target\">\n            <connections>\n                <outlet property=\"applicationMenu\" destination=\"uQy-DD-JDr\" id=\"XBo-yE-nKs\"/>\n                <outlet property=\"mainFlutterWindow\" destination=\"QvC-M9-y7g\" id=\"gIp-Ho-8D9\"/>\n            </connections>\n        </customObject>\n        <customObject id=\"YLy-65-1bz\" customClass=\"NSFontManager\"/>\n        <menu title=\"Main Menu\" systemMenu=\"main\" id=\"AYu-sK-qS6\">\n            <items>\n                <menuItem title=\"APP_NAME\" id=\"1Xt-HY-uBw\">\n                    <modifierMask key=\"keyEquivalentModifierMask\"/>\n                    <menu key=\"submenu\" title=\"APP_NAME\" systemMenu=\"apple\" id=\"uQy-DD-JDr\">\n                        <items>\n                            <menuItem title=\"About APP_NAME\" id=\"5kV-Vb-QxS\">\n                                <modifierMask key=\"keyEquivalentModifierMask\"/>\n                                <connections>\n                                    <action selector=\"orderFrontStandardAboutPanel:\" target=\"-1\" id=\"Exp-CZ-Vem\"/>\n                                </connections>\n                            </menuItem>\n                            <menuItem isSeparatorItem=\"YES\" id=\"VOq-y0-SEH\"/>\n                            <menuItem title=\"Preferences…\" keyEquivalent=\",\" id=\"BOF-NM-1cW\"/>\n                            <menuItem isSeparatorItem=\"YES\" id=\"wFC-TO-SCJ\"/>\n                            <menuItem title=\"Services\" id=\"NMo-om-nkz\">\n                                <modifierMask key=\"keyEquivalentModifierMask\"/>\n                                <menu key=\"submenu\" title=\"Services\" systemMenu=\"services\" id=\"hz9-B4-Xy5\"/>\n                            </menuItem>\n                            <menuItem isSeparatorItem=\"YES\" id=\"4je-JR-u6R\"/>\n                            <menuItem title=\"Hide APP_NAME\" keyEquivalent=\"h\" id=\"Olw-nP-bQN\">\n                                <connections>\n                                    <action selector=\"hide:\" target=\"-1\" id=\"PnN-Uc-m68\"/>\n                                </connections>\n                            </menuItem>\n                            <menuItem title=\"Hide Others\" keyEquivalent=\"h\" id=\"Vdr-fp-XzO\">\n                                <modifierMask key=\"keyEquivalentModifierMask\" option=\"YES\" command=\"YES\"/>\n                                <connections>\n                                    <action selector=\"hideOtherApplications:\" target=\"-1\" id=\"VT4-aY-XCT\"/>\n                                </connections>\n                            </menuItem>\n                            <menuItem title=\"Show All\" id=\"Kd2-mp-pUS\">\n                                <modifierMask key=\"keyEquivalentModifierMask\"/>\n                                <connections>\n                                    <action selector=\"unhideAllApplications:\" target=\"-1\" id=\"Dhg-Le-xox\"/>\n                                </connections>\n                            </menuItem>\n                            <menuItem isSeparatorItem=\"YES\" id=\"kCx-OE-vgT\"/>\n                            <menuItem title=\"Quit APP_NAME\" keyEquivalent=\"q\" id=\"4sb-4s-VLi\">\n                                <connections>\n                                    <action selector=\"terminate:\" target=\"-1\" id=\"Te7-pn-YzF\"/>\n                                </connections>\n                            </menuItem>\n                        </items>\n                    </menu>\n                </menuItem>\n                <menuItem title=\"Edit\" id=\"5QF-Oa-p0T\">\n                    <modifierMask key=\"keyEquivalentModifierMask\"/>\n                    <menu key=\"submenu\" title=\"Edit\" id=\"W48-6f-4Dl\">\n                        <items>\n                            <menuItem title=\"Undo\" keyEquivalent=\"z\" id=\"dRJ-4n-Yzg\">\n                                <connections>\n                                    <action selector=\"undo:\" target=\"-1\" id=\"M6e-cu-g7V\"/>\n                                </connections>\n                            </menuItem>\n                            <menuItem title=\"Redo\" keyEquivalent=\"Z\" id=\"6dh-zS-Vam\">\n                                <connections>\n                                    <action selector=\"redo:\" target=\"-1\" id=\"oIA-Rs-6OD\"/>\n                                </connections>\n                            </menuItem>\n                            <menuItem isSeparatorItem=\"YES\" id=\"WRV-NI-Exz\"/>\n                            <menuItem title=\"Cut\" keyEquivalent=\"x\" id=\"uRl-iY-unG\">\n                                <connections>\n                                    <action selector=\"cut:\" target=\"-1\" id=\"YJe-68-I9s\"/>\n                                </connections>\n                            </menuItem>\n                            <menuItem title=\"Copy\" keyEquivalent=\"c\" id=\"x3v-GG-iWU\">\n                                <connections>\n                                    <action selector=\"copy:\" target=\"-1\" id=\"G1f-GL-Joy\"/>\n                                </connections>\n                            </menuItem>\n                            <menuItem title=\"Paste\" keyEquivalent=\"v\" id=\"gVA-U4-sdL\">\n                                <connections>\n                                    <action selector=\"paste:\" target=\"-1\" id=\"UvS-8e-Qdg\"/>\n                                </connections>\n                            </menuItem>\n                            <menuItem title=\"Paste and Match Style\" keyEquivalent=\"V\" id=\"WeT-3V-zwk\">\n                                <modifierMask key=\"keyEquivalentModifierMask\" option=\"YES\" command=\"YES\"/>\n                                <connections>\n                                    <action selector=\"pasteAsPlainText:\" target=\"-1\" id=\"cEh-KX-wJQ\"/>\n                                </connections>\n                            </menuItem>\n                            <menuItem title=\"Delete\" id=\"pa3-QI-u2k\">\n                                <modifierMask key=\"keyEquivalentModifierMask\"/>\n                                <connections>\n                                    <action selector=\"delete:\" target=\"-1\" id=\"0Mk-Ml-PaM\"/>\n                                </connections>\n                            </menuItem>\n                            <menuItem title=\"Select All\" keyEquivalent=\"a\" id=\"Ruw-6m-B2m\">\n                                <connections>\n                                    <action selector=\"selectAll:\" target=\"-1\" id=\"VNm-Mi-diN\"/>\n                                </connections>\n                            </menuItem>\n                            <menuItem isSeparatorItem=\"YES\" id=\"uyl-h8-XO2\"/>\n                            <menuItem title=\"Find\" id=\"4EN-yA-p0u\">\n                                <modifierMask key=\"keyEquivalentModifierMask\"/>\n                                <menu key=\"submenu\" title=\"Find\" id=\"1b7-l0-nxx\">\n                                    <items>\n                                        <menuItem title=\"Find…\" tag=\"1\" keyEquivalent=\"f\" id=\"Xz5-n4-O0W\">\n                                            <connections>\n                                                <action selector=\"performFindPanelAction:\" target=\"-1\" id=\"cD7-Qs-BN4\"/>\n                                            </connections>\n                                        </menuItem>\n                                        <menuItem title=\"Find and Replace…\" tag=\"12\" keyEquivalent=\"f\" id=\"YEy-JH-Tfz\">\n                                            <modifierMask key=\"keyEquivalentModifierMask\" option=\"YES\" command=\"YES\"/>\n                                            <connections>\n                                                <action selector=\"performFindPanelAction:\" target=\"-1\" id=\"WD3-Gg-5AJ\"/>\n                                            </connections>\n                                        </menuItem>\n                                        <menuItem title=\"Find Next\" tag=\"2\" keyEquivalent=\"g\" id=\"q09-fT-Sye\">\n                                            <connections>\n                                                <action selector=\"performFindPanelAction:\" target=\"-1\" id=\"NDo-RZ-v9R\"/>\n                                            </connections>\n                                        </menuItem>\n                                        <menuItem title=\"Find Previous\" tag=\"3\" keyEquivalent=\"G\" id=\"OwM-mh-QMV\">\n                                            <connections>\n                                                <action selector=\"performFindPanelAction:\" target=\"-1\" id=\"HOh-sY-3ay\"/>\n                                            </connections>\n                                        </menuItem>\n                                        <menuItem title=\"Use Selection for Find\" tag=\"7\" keyEquivalent=\"e\" id=\"buJ-ug-pKt\">\n                                            <connections>\n                                                <action selector=\"performFindPanelAction:\" target=\"-1\" id=\"U76-nv-p5D\"/>\n                                            </connections>\n                                        </menuItem>\n                                        <menuItem title=\"Jump to Selection\" keyEquivalent=\"j\" id=\"S0p-oC-mLd\">\n                                            <connections>\n                                                <action selector=\"centerSelectionInVisibleArea:\" target=\"-1\" id=\"IOG-6D-g5B\"/>\n                                            </connections>\n                                        </menuItem>\n                                    </items>\n                                </menu>\n                            </menuItem>\n                            <menuItem title=\"Spelling and Grammar\" id=\"Dv1-io-Yv7\">\n                                <modifierMask key=\"keyEquivalentModifierMask\"/>\n                                <menu key=\"submenu\" title=\"Spelling\" id=\"3IN-sU-3Bg\">\n                                    <items>\n                                        <menuItem title=\"Show Spelling and Grammar\" keyEquivalent=\":\" id=\"HFo-cy-zxI\">\n                                            <connections>\n                                                <action selector=\"showGuessPanel:\" target=\"-1\" id=\"vFj-Ks-hy3\"/>\n                                            </connections>\n                                        </menuItem>\n                                        <menuItem title=\"Check Document Now\" keyEquivalent=\";\" id=\"hz2-CU-CR7\">\n                                            <connections>\n                                                <action selector=\"checkSpelling:\" target=\"-1\" id=\"fz7-VC-reM\"/>\n                                            </connections>\n                                        </menuItem>\n                                        <menuItem isSeparatorItem=\"YES\" id=\"bNw-od-mp5\"/>\n                                        <menuItem title=\"Check Spelling While Typing\" id=\"rbD-Rh-wIN\">\n                                            <modifierMask key=\"keyEquivalentModifierMask\"/>\n                                            <connections>\n                                                <action selector=\"toggleContinuousSpellChecking:\" target=\"-1\" id=\"7w6-Qz-0kB\"/>\n                                            </connections>\n                                        </menuItem>\n                                        <menuItem title=\"Check Grammar With Spelling\" id=\"mK6-2p-4JG\">\n                                            <modifierMask key=\"keyEquivalentModifierMask\"/>\n                                            <connections>\n                                                <action selector=\"toggleGrammarChecking:\" target=\"-1\" id=\"muD-Qn-j4w\"/>\n                                            </connections>\n                                        </menuItem>\n                                        <menuItem title=\"Correct Spelling Automatically\" id=\"78Y-hA-62v\">\n                                            <modifierMask key=\"keyEquivalentModifierMask\"/>\n                                            <connections>\n                                                <action selector=\"toggleAutomaticSpellingCorrection:\" target=\"-1\" id=\"2lM-Qi-WAP\"/>\n                                            </connections>\n                                        </menuItem>\n                                    </items>\n                                </menu>\n                            </menuItem>\n                            <menuItem title=\"Substitutions\" id=\"9ic-FL-obx\">\n                                <modifierMask key=\"keyEquivalentModifierMask\"/>\n                                <menu key=\"submenu\" title=\"Substitutions\" id=\"FeM-D8-WVr\">\n                                    <items>\n                                        <menuItem title=\"Show Substitutions\" id=\"z6F-FW-3nz\">\n                                            <modifierMask key=\"keyEquivalentModifierMask\"/>\n                                            <connections>\n                                                <action selector=\"orderFrontSubstitutionsPanel:\" target=\"-1\" id=\"oku-mr-iSq\"/>\n                                            </connections>\n                                        </menuItem>\n                                        <menuItem isSeparatorItem=\"YES\" id=\"gPx-C9-uUO\"/>\n                                        <menuItem title=\"Smart Copy/Paste\" id=\"9yt-4B-nSM\">\n                                            <modifierMask key=\"keyEquivalentModifierMask\"/>\n                                            <connections>\n                                                <action selector=\"toggleSmartInsertDelete:\" target=\"-1\" id=\"3IJ-Se-DZD\"/>\n                                            </connections>\n                                        </menuItem>\n                                        <menuItem title=\"Smart Quotes\" id=\"hQb-2v-fYv\">\n                                            <modifierMask key=\"keyEquivalentModifierMask\"/>\n                                            <connections>\n                                                <action selector=\"toggleAutomaticQuoteSubstitution:\" target=\"-1\" id=\"ptq-xd-QOA\"/>\n                                            </connections>\n                                        </menuItem>\n                                        <menuItem title=\"Smart Dashes\" id=\"rgM-f4-ycn\">\n                                            <modifierMask key=\"keyEquivalentModifierMask\"/>\n                                            <connections>\n                                                <action selector=\"toggleAutomaticDashSubstitution:\" target=\"-1\" id=\"oCt-pO-9gS\"/>\n                                            </connections>\n                                        </menuItem>\n                                        <menuItem title=\"Smart Links\" id=\"cwL-P1-jid\">\n                                            <modifierMask key=\"keyEquivalentModifierMask\"/>\n                                            <connections>\n                                                <action selector=\"toggleAutomaticLinkDetection:\" target=\"-1\" id=\"Gip-E3-Fov\"/>\n                                            </connections>\n                                        </menuItem>\n                                        <menuItem title=\"Data Detectors\" id=\"tRr-pd-1PS\">\n                                            <modifierMask key=\"keyEquivalentModifierMask\"/>\n                                            <connections>\n                                                <action selector=\"toggleAutomaticDataDetection:\" target=\"-1\" id=\"R1I-Nq-Kbl\"/>\n                                            </connections>\n                                        </menuItem>\n                                        <menuItem title=\"Text Replacement\" id=\"HFQ-gK-NFA\">\n                                            <modifierMask key=\"keyEquivalentModifierMask\"/>\n                                            <connections>\n                                                <action selector=\"toggleAutomaticTextReplacement:\" target=\"-1\" id=\"DvP-Fe-Py6\"/>\n                                            </connections>\n                                        </menuItem>\n                                    </items>\n                                </menu>\n                            </menuItem>\n                            <menuItem title=\"Transformations\" id=\"2oI-Rn-ZJC\">\n                                <modifierMask key=\"keyEquivalentModifierMask\"/>\n                                <menu key=\"submenu\" title=\"Transformations\" id=\"c8a-y6-VQd\">\n                                    <items>\n                                        <menuItem title=\"Make Upper Case\" id=\"vmV-6d-7jI\">\n                                            <modifierMask key=\"keyEquivalentModifierMask\"/>\n                                            <connections>\n                                                <action selector=\"uppercaseWord:\" target=\"-1\" id=\"sPh-Tk-edu\"/>\n                                            </connections>\n                                        </menuItem>\n                                        <menuItem title=\"Make Lower Case\" id=\"d9M-CD-aMd\">\n                                            <modifierMask key=\"keyEquivalentModifierMask\"/>\n                                            <connections>\n                                                <action selector=\"lowercaseWord:\" target=\"-1\" id=\"iUZ-b5-hil\"/>\n                                            </connections>\n                                        </menuItem>\n                                        <menuItem title=\"Capitalize\" id=\"UEZ-Bs-lqG\">\n                                            <modifierMask key=\"keyEquivalentModifierMask\"/>\n                                            <connections>\n                                                <action selector=\"capitalizeWord:\" target=\"-1\" id=\"26H-TL-nsh\"/>\n                                            </connections>\n                                        </menuItem>\n                                    </items>\n                                </menu>\n                            </menuItem>\n                            <menuItem title=\"Speech\" id=\"xrE-MZ-jX0\">\n                                <modifierMask key=\"keyEquivalentModifierMask\"/>\n                                <menu key=\"submenu\" title=\"Speech\" id=\"3rS-ZA-NoH\">\n                                    <items>\n                                        <menuItem title=\"Start Speaking\" id=\"Ynk-f8-cLZ\">\n                                            <modifierMask key=\"keyEquivalentModifierMask\"/>\n                                            <connections>\n                                                <action selector=\"startSpeaking:\" target=\"-1\" id=\"654-Ng-kyl\"/>\n                                            </connections>\n                                        </menuItem>\n                                        <menuItem title=\"Stop Speaking\" id=\"Oyz-dy-DGm\">\n                                            <modifierMask key=\"keyEquivalentModifierMask\"/>\n                                            <connections>\n                                                <action selector=\"stopSpeaking:\" target=\"-1\" id=\"dX8-6p-jy9\"/>\n                                            </connections>\n                                        </menuItem>\n                                    </items>\n                                </menu>\n                            </menuItem>\n                        </items>\n                    </menu>\n                </menuItem>\n                <menuItem title=\"View\" id=\"H8h-7b-M4v\">\n                    <modifierMask key=\"keyEquivalentModifierMask\"/>\n                    <menu key=\"submenu\" title=\"View\" id=\"HyV-fh-RgO\">\n                        <items>\n                            <menuItem title=\"Enter Full Screen\" keyEquivalent=\"f\" id=\"4J7-dP-txa\">\n                                <modifierMask key=\"keyEquivalentModifierMask\" control=\"YES\" command=\"YES\"/>\n                                <connections>\n                                    <action selector=\"toggleFullScreen:\" target=\"-1\" id=\"dU3-MA-1Rq\"/>\n                                </connections>\n                            </menuItem>\n                        </items>\n                    </menu>\n                </menuItem>\n                <menuItem title=\"Window\" id=\"aUF-d1-5bR\">\n                    <modifierMask key=\"keyEquivalentModifierMask\"/>\n                    <menu key=\"submenu\" title=\"Window\" systemMenu=\"window\" id=\"Td7-aD-5lo\">\n                        <items>\n                            <menuItem title=\"Minimize\" keyEquivalent=\"m\" id=\"OY7-WF-poV\">\n                                <connections>\n                                    <action selector=\"performMiniaturize:\" target=\"-1\" id=\"VwT-WD-YPe\"/>\n                                </connections>\n                            </menuItem>\n                            <menuItem title=\"Zoom\" id=\"R4o-n2-Eq4\">\n                                <modifierMask key=\"keyEquivalentModifierMask\"/>\n                                <connections>\n                                    <action selector=\"performZoom:\" target=\"-1\" id=\"DIl-cC-cCs\"/>\n                                </connections>\n                            </menuItem>\n                            <menuItem isSeparatorItem=\"YES\" id=\"eu3-7i-yIM\"/>\n                            <menuItem title=\"Bring All to Front\" id=\"LE2-aR-0XJ\">\n                                <modifierMask key=\"keyEquivalentModifierMask\"/>\n                                <connections>\n                                    <action selector=\"arrangeInFront:\" target=\"-1\" id=\"DRN-fu-gQh\"/>\n                                </connections>\n                            </menuItem>\n                        </items>\n                    </menu>\n                </menuItem>\n                <menuItem title=\"Help\" id=\"EPT-qC-fAb\">\n                    <modifierMask key=\"keyEquivalentModifierMask\"/>\n                    <menu key=\"submenu\" title=\"Help\" systemMenu=\"help\" id=\"rJ0-wn-3NY\"/>\n                </menuItem>\n            </items>\n            <point key=\"canvasLocation\" x=\"142\" y=\"-258\"/>\n        </menu>\n        <window title=\"APP_NAME\" allowsToolTipsWhenApplicationIsInactive=\"NO\" autorecalculatesKeyViewLoop=\"NO\" releasedWhenClosed=\"NO\" animationBehavior=\"default\" id=\"QvC-M9-y7g\" customClass=\"MainFlutterWindow\" customModule=\"Runner\" customModuleProvider=\"target\">\n            <windowStyleMask key=\"styleMask\" titled=\"YES\" closable=\"YES\" miniaturizable=\"YES\" resizable=\"YES\"/>\n            <rect key=\"contentRect\" x=\"335\" y=\"390\" width=\"800\" height=\"600\"/>\n            <rect key=\"screenRect\" x=\"0.0\" y=\"0.0\" width=\"2560\" height=\"1577\"/>\n            <view key=\"contentView\" wantsLayer=\"YES\" id=\"EiT-Mj-1SZ\">\n                <rect key=\"frame\" x=\"0.0\" y=\"0.0\" width=\"800\" height=\"600\"/>\n                <autoresizingMask key=\"autoresizingMask\"/>\n            </view>\n        </window>\n    </objects>\n</document>\n"
  },
  {
    "path": "examples/hello_world_flutter/macos/Runner/Configs/AppInfo.xcconfig",
    "content": "// Application-level settings for the Runner target.\n//\n// This may be replaced with something auto-generated from metadata (e.g., pubspec.yaml) in the\n// future. If not, the values below would default to using the project name when this becomes a\n// 'flutter create' template.\n\n// The application's name. By default this is also the title of the Flutter window.\nPRODUCT_NAME = hello_world_flutter\n\n// The application's bundle identifier\nPRODUCT_BUNDLE_IDENTIFIER = com.example.helloWorldFlutter\n\n// The copyright displayed in application information\nPRODUCT_COPYRIGHT = Copyright © 2023 com.example. All rights reserved.\n"
  },
  {
    "path": "examples/hello_world_flutter/macos/Runner/Configs/Debug.xcconfig",
    "content": "#include \"../../Flutter/Flutter-Debug.xcconfig\"\n#include \"Warnings.xcconfig\"\n"
  },
  {
    "path": "examples/hello_world_flutter/macos/Runner/Configs/Release.xcconfig",
    "content": "#include \"../../Flutter/Flutter-Release.xcconfig\"\n#include \"Warnings.xcconfig\"\n"
  },
  {
    "path": "examples/hello_world_flutter/macos/Runner/Configs/Warnings.xcconfig",
    "content": "WARNING_CFLAGS = -Wall -Wconditional-uninitialized -Wnullable-to-nonnull-conversion -Wmissing-method-return-type -Woverlength-strings\nGCC_WARN_UNDECLARED_SELECTOR = YES\nCLANG_UNDEFINED_BEHAVIOR_SANITIZER_NULLABILITY = YES\nCLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE\nCLANG_WARN__DUPLICATE_METHOD_MATCH = YES\nCLANG_WARN_PRAGMA_PACK = YES\nCLANG_WARN_STRICT_PROTOTYPES = YES\nCLANG_WARN_COMMA = YES\nGCC_WARN_STRICT_SELECTOR_MATCH = YES\nCLANG_WARN_OBJC_REPEATED_USE_OF_WEAK = YES\nCLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES\nGCC_WARN_SHADOW = YES\nCLANG_WARN_UNREACHABLE_CODE = YES\n"
  },
  {
    "path": "examples/hello_world_flutter/macos/Runner/DebugProfile.entitlements",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n<plist version=\"1.0\">\n<dict>\n\t<key>com.apple.security.app-sandbox</key>\n\t<true/>\n\t<key>com.apple.security.cs.allow-jit</key>\n\t<true/>\n\t<key>com.apple.security.network.client</key>\n\t<true/>\n\t<key>com.apple.security.network.server</key>\n\t<true/>\n</dict>\n</plist>\n"
  },
  {
    "path": "examples/hello_world_flutter/macos/Runner/Info.plist",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n<plist version=\"1.0\">\n<dict>\n\t<key>CFBundleDevelopmentRegion</key>\n\t<string>$(DEVELOPMENT_LANGUAGE)</string>\n\t<key>CFBundleExecutable</key>\n\t<string>$(EXECUTABLE_NAME)</string>\n\t<key>CFBundleIconFile</key>\n\t<string></string>\n\t<key>CFBundleIdentifier</key>\n\t<string>$(PRODUCT_BUNDLE_IDENTIFIER)</string>\n\t<key>CFBundleInfoDictionaryVersion</key>\n\t<string>6.0</string>\n\t<key>CFBundleName</key>\n\t<string>$(PRODUCT_NAME)</string>\n\t<key>CFBundlePackageType</key>\n\t<string>APPL</string>\n\t<key>CFBundleShortVersionString</key>\n\t<string>$(FLUTTER_BUILD_NAME)</string>\n\t<key>CFBundleVersion</key>\n\t<string>$(FLUTTER_BUILD_NUMBER)</string>\n\t<key>LSMinimumSystemVersion</key>\n\t<string>$(MACOSX_DEPLOYMENT_TARGET)</string>\n\t<key>NSHumanReadableCopyright</key>\n\t<string>$(PRODUCT_COPYRIGHT)</string>\n\t<key>NSMainNibFile</key>\n\t<string>MainMenu</string>\n\t<key>NSPrincipalClass</key>\n\t<string>NSApplication</string>\n</dict>\n</plist>\n"
  },
  {
    "path": "examples/hello_world_flutter/macos/Runner/MainFlutterWindow.swift",
    "content": "import Cocoa\nimport FlutterMacOS\n\nclass MainFlutterWindow: NSWindow {\n  override func awakeFromNib() {\n    let flutterViewController = FlutterViewController()\n    let windowFrame = self.frame\n    self.contentViewController = flutterViewController\n    self.setFrame(windowFrame, display: true)\n\n    RegisterGeneratedPlugins(registry: flutterViewController)\n\n    super.awakeFromNib()\n  }\n}\n"
  },
  {
    "path": "examples/hello_world_flutter/macos/Runner/Release.entitlements",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n<plist version=\"1.0\">\n<dict>\n\t<key>com.apple.security.app-sandbox</key>\n\t<true/>\n</dict>\n</plist>\n"
  },
  {
    "path": "examples/hello_world_flutter/macos/Runner.xcodeproj/project.pbxproj",
    "content": "// !$*UTF8*$!\n{\n\tarchiveVersion = 1;\n\tclasses = {\n\t};\n\tobjectVersion = 54;\n\tobjects = {\n\n/* Begin PBXAggregateTarget section */\n\t\t33CC111A2044C6BA0003C045 /* Flutter Assemble */ = {\n\t\t\tisa = PBXAggregateTarget;\n\t\t\tbuildConfigurationList = 33CC111B2044C6BA0003C045 /* Build configuration list for PBXAggregateTarget \"Flutter Assemble\" */;\n\t\t\tbuildPhases = (\n\t\t\t\t33CC111E2044C6BF0003C045 /* ShellScript */,\n\t\t\t);\n\t\t\tdependencies = (\n\t\t\t);\n\t\t\tname = \"Flutter Assemble\";\n\t\t\tproductName = FLX;\n\t\t};\n/* End PBXAggregateTarget section */\n\n/* Begin PBXBuildFile section */\n\t\t331C80D8294CF71000263BE5 /* RunnerTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 331C80D7294CF71000263BE5 /* RunnerTests.swift */; };\n\t\t335BBD1B22A9A15E00E9071D /* GeneratedPluginRegistrant.swift in Sources */ = {isa = PBXBuildFile; fileRef = 335BBD1A22A9A15E00E9071D /* GeneratedPluginRegistrant.swift */; };\n\t\t33CC10F12044A3C60003C045 /* AppDelegate.swift in Sources */ = {isa = PBXBuildFile; fileRef = 33CC10F02044A3C60003C045 /* AppDelegate.swift */; };\n\t\t33CC10F32044A3C60003C045 /* Assets.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = 33CC10F22044A3C60003C045 /* Assets.xcassets */; };\n\t\t33CC10F62044A3C60003C045 /* MainMenu.xib in Resources */ = {isa = PBXBuildFile; fileRef = 33CC10F42044A3C60003C045 /* MainMenu.xib */; };\n\t\t33CC11132044BFA00003C045 /* MainFlutterWindow.swift in Sources */ = {isa = PBXBuildFile; fileRef = 33CC11122044BFA00003C045 /* MainFlutterWindow.swift */; };\n/* End PBXBuildFile section */\n\n/* Begin PBXContainerItemProxy section */\n\t\t331C80D9294CF71000263BE5 /* PBXContainerItemProxy */ = {\n\t\t\tisa = PBXContainerItemProxy;\n\t\t\tcontainerPortal = 33CC10E52044A3C60003C045 /* Project object */;\n\t\t\tproxyType = 1;\n\t\t\tremoteGlobalIDString = 33CC10EC2044A3C60003C045;\n\t\t\tremoteInfo = Runner;\n\t\t};\n\t\t33CC111F2044C79F0003C045 /* PBXContainerItemProxy */ = {\n\t\t\tisa = PBXContainerItemProxy;\n\t\t\tcontainerPortal = 33CC10E52044A3C60003C045 /* Project object */;\n\t\t\tproxyType = 1;\n\t\t\tremoteGlobalIDString = 33CC111A2044C6BA0003C045;\n\t\t\tremoteInfo = FLX;\n\t\t};\n/* End PBXContainerItemProxy section */\n\n/* Begin PBXCopyFilesBuildPhase section */\n\t\t33CC110E2044A8840003C045 /* Bundle Framework */ = {\n\t\t\tisa = PBXCopyFilesBuildPhase;\n\t\t\tbuildActionMask = 2147483647;\n\t\t\tdstPath = \"\";\n\t\t\tdstSubfolderSpec = 10;\n\t\t\tfiles = (\n\t\t\t);\n\t\t\tname = \"Bundle Framework\";\n\t\t\trunOnlyForDeploymentPostprocessing = 0;\n\t\t};\n/* End PBXCopyFilesBuildPhase section */\n\n/* Begin PBXFileReference section */\n\t\t331C80D5294CF71000263BE5 /* RunnerTests.xctest */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = RunnerTests.xctest; sourceTree = BUILT_PRODUCTS_DIR; };\n\t\t331C80D7294CF71000263BE5 /* RunnerTests.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = RunnerTests.swift; sourceTree = \"<group>\"; };\n\t\t333000ED22D3DE5D00554162 /* Warnings.xcconfig */ = {isa = PBXFileReference; lastKnownFileType = text.xcconfig; path = Warnings.xcconfig; sourceTree = \"<group>\"; };\n\t\t335BBD1A22A9A15E00E9071D /* GeneratedPluginRegistrant.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = GeneratedPluginRegistrant.swift; sourceTree = \"<group>\"; };\n\t\t33CC10ED2044A3C60003C045 /* hello_world_flutter.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = \"hello_world_flutter.app\"; sourceTree = BUILT_PRODUCTS_DIR; };\n\t\t33CC10F02044A3C60003C045 /* AppDelegate.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = AppDelegate.swift; sourceTree = \"<group>\"; };\n\t\t33CC10F22044A3C60003C045 /* Assets.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; name = Assets.xcassets; path = Runner/Assets.xcassets; sourceTree = \"<group>\"; };\n\t\t33CC10F52044A3C60003C045 /* Base */ = {isa = PBXFileReference; lastKnownFileType = file.xib; name = Base; path = Base.lproj/MainMenu.xib; sourceTree = \"<group>\"; };\n\t\t33CC10F72044A3C60003C045 /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; name = Info.plist; path = Runner/Info.plist; sourceTree = \"<group>\"; };\n\t\t33CC11122044BFA00003C045 /* MainFlutterWindow.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = MainFlutterWindow.swift; sourceTree = \"<group>\"; };\n\t\t33CEB47222A05771004F2AC0 /* Flutter-Debug.xcconfig */ = {isa = PBXFileReference; lastKnownFileType = text.xcconfig; path = \"Flutter-Debug.xcconfig\"; sourceTree = \"<group>\"; };\n\t\t33CEB47422A05771004F2AC0 /* Flutter-Release.xcconfig */ = {isa = PBXFileReference; lastKnownFileType = text.xcconfig; path = \"Flutter-Release.xcconfig\"; sourceTree = \"<group>\"; };\n\t\t33CEB47722A0578A004F2AC0 /* Flutter-Generated.xcconfig */ = {isa = PBXFileReference; lastKnownFileType = text.xcconfig; name = \"Flutter-Generated.xcconfig\"; path = \"ephemeral/Flutter-Generated.xcconfig\"; sourceTree = \"<group>\"; };\n\t\t33E51913231747F40026EE4D /* DebugProfile.entitlements */ = {isa = PBXFileReference; lastKnownFileType = text.plist.entitlements; path = DebugProfile.entitlements; sourceTree = \"<group>\"; };\n\t\t33E51914231749380026EE4D /* Release.entitlements */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.plist.entitlements; path = Release.entitlements; sourceTree = \"<group>\"; };\n\t\t33E5194F232828860026EE4D /* AppInfo.xcconfig */ = {isa = PBXFileReference; lastKnownFileType = text.xcconfig; path = AppInfo.xcconfig; sourceTree = \"<group>\"; };\n\t\t7AFA3C8E1D35360C0083082E /* Release.xcconfig */ = {isa = PBXFileReference; lastKnownFileType = text.xcconfig; path = Release.xcconfig; sourceTree = \"<group>\"; };\n\t\t9740EEB21CF90195004384FC /* Debug.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = Debug.xcconfig; sourceTree = \"<group>\"; };\n/* End PBXFileReference section */\n\n/* Begin PBXFrameworksBuildPhase section */\n\t\t331C80D2294CF70F00263BE5 /* Frameworks */ = {\n\t\t\tisa = PBXFrameworksBuildPhase;\n\t\t\tbuildActionMask = 2147483647;\n\t\t\tfiles = (\n\t\t\t);\n\t\t\trunOnlyForDeploymentPostprocessing = 0;\n\t\t};\n\t\t33CC10EA2044A3C60003C045 /* Frameworks */ = {\n\t\t\tisa = PBXFrameworksBuildPhase;\n\t\t\tbuildActionMask = 2147483647;\n\t\t\tfiles = (\n\t\t\t);\n\t\t\trunOnlyForDeploymentPostprocessing = 0;\n\t\t};\n/* End PBXFrameworksBuildPhase section */\n\n/* Begin PBXGroup section */\n\t\t331C80D6294CF71000263BE5 /* RunnerTests */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t331C80D7294CF71000263BE5 /* RunnerTests.swift */,\n\t\t\t);\n\t\t\tpath = RunnerTests;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t33BA886A226E78AF003329D5 /* Configs */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t33E5194F232828860026EE4D /* AppInfo.xcconfig */,\n\t\t\t\t9740EEB21CF90195004384FC /* Debug.xcconfig */,\n\t\t\t\t7AFA3C8E1D35360C0083082E /* Release.xcconfig */,\n\t\t\t\t333000ED22D3DE5D00554162 /* Warnings.xcconfig */,\n\t\t\t);\n\t\t\tpath = Configs;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t33CC10E42044A3C60003C045 = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t33FAB671232836740065AC1E /* Runner */,\n\t\t\t\t33CEB47122A05771004F2AC0 /* Flutter */,\n\t\t\t\t331C80D6294CF71000263BE5 /* RunnerTests */,\n\t\t\t\t33CC10EE2044A3C60003C045 /* Products */,\n\t\t\t\tD73912EC22F37F3D000D13A0 /* Frameworks */,\n\t\t\t);\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t33CC10EE2044A3C60003C045 /* Products */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t33CC10ED2044A3C60003C045 /* hello_world_flutter.app */,\n\t\t\t\t331C80D5294CF71000263BE5 /* RunnerTests.xctest */,\n\t\t\t);\n\t\t\tname = Products;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t33CC11242044D66E0003C045 /* Resources */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t33CC10F22044A3C60003C045 /* Assets.xcassets */,\n\t\t\t\t33CC10F42044A3C60003C045 /* MainMenu.xib */,\n\t\t\t\t33CC10F72044A3C60003C045 /* Info.plist */,\n\t\t\t);\n\t\t\tname = Resources;\n\t\t\tpath = ..;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t33CEB47122A05771004F2AC0 /* Flutter */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t335BBD1A22A9A15E00E9071D /* GeneratedPluginRegistrant.swift */,\n\t\t\t\t33CEB47222A05771004F2AC0 /* Flutter-Debug.xcconfig */,\n\t\t\t\t33CEB47422A05771004F2AC0 /* Flutter-Release.xcconfig */,\n\t\t\t\t33CEB47722A0578A004F2AC0 /* Flutter-Generated.xcconfig */,\n\t\t\t);\n\t\t\tpath = Flutter;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t33FAB671232836740065AC1E /* Runner */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t33CC10F02044A3C60003C045 /* AppDelegate.swift */,\n\t\t\t\t33CC11122044BFA00003C045 /* MainFlutterWindow.swift */,\n\t\t\t\t33E51913231747F40026EE4D /* DebugProfile.entitlements */,\n\t\t\t\t33E51914231749380026EE4D /* Release.entitlements */,\n\t\t\t\t33CC11242044D66E0003C045 /* Resources */,\n\t\t\t\t33BA886A226E78AF003329D5 /* Configs */,\n\t\t\t);\n\t\t\tpath = Runner;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\tD73912EC22F37F3D000D13A0 /* Frameworks */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t);\n\t\t\tname = Frameworks;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n/* End PBXGroup section */\n\n/* Begin PBXNativeTarget section */\n\t\t331C80D4294CF70F00263BE5 /* RunnerTests */ = {\n\t\t\tisa = PBXNativeTarget;\n\t\t\tbuildConfigurationList = 331C80DE294CF71000263BE5 /* Build configuration list for PBXNativeTarget \"RunnerTests\" */;\n\t\t\tbuildPhases = (\n\t\t\t\t331C80D1294CF70F00263BE5 /* Sources */,\n\t\t\t\t331C80D2294CF70F00263BE5 /* Frameworks */,\n\t\t\t\t331C80D3294CF70F00263BE5 /* Resources */,\n\t\t\t);\n\t\t\tbuildRules = (\n\t\t\t);\n\t\t\tdependencies = (\n\t\t\t\t331C80DA294CF71000263BE5 /* PBXTargetDependency */,\n\t\t\t);\n\t\t\tname = RunnerTests;\n\t\t\tproductName = RunnerTests;\n\t\t\tproductReference = 331C80D5294CF71000263BE5 /* RunnerTests.xctest */;\n\t\t\tproductType = \"com.apple.product-type.bundle.unit-test\";\n\t\t};\n\t\t33CC10EC2044A3C60003C045 /* Runner */ = {\n\t\t\tisa = PBXNativeTarget;\n\t\t\tbuildConfigurationList = 33CC10FB2044A3C60003C045 /* Build configuration list for PBXNativeTarget \"Runner\" */;\n\t\t\tbuildPhases = (\n\t\t\t\t33CC10E92044A3C60003C045 /* Sources */,\n\t\t\t\t33CC10EA2044A3C60003C045 /* Frameworks */,\n\t\t\t\t33CC10EB2044A3C60003C045 /* Resources */,\n\t\t\t\t33CC110E2044A8840003C045 /* Bundle Framework */,\n\t\t\t\t3399D490228B24CF009A79C7 /* ShellScript */,\n\t\t\t);\n\t\t\tbuildRules = (\n\t\t\t);\n\t\t\tdependencies = (\n\t\t\t\t33CC11202044C79F0003C045 /* PBXTargetDependency */,\n\t\t\t);\n\t\t\tname = Runner;\n\t\t\tproductName = Runner;\n\t\t\tproductReference = 33CC10ED2044A3C60003C045 /* hello_world_flutter.app */;\n\t\t\tproductType = \"com.apple.product-type.application\";\n\t\t};\n/* End PBXNativeTarget section */\n\n/* Begin PBXProject section */\n\t\t33CC10E52044A3C60003C045 /* Project object */ = {\n\t\t\tisa = PBXProject;\n\t\t\tattributes = {\n\t\t\t\tLastSwiftUpdateCheck = 0920;\n\t\t\t\tLastUpgradeCheck = 1510;\n\t\t\t\tORGANIZATIONNAME = \"\";\n\t\t\t\tTargetAttributes = {\n\t\t\t\t\t331C80D4294CF70F00263BE5 = {\n\t\t\t\t\t\tCreatedOnToolsVersion = 14.0;\n\t\t\t\t\t\tTestTargetID = 33CC10EC2044A3C60003C045;\n\t\t\t\t\t};\n\t\t\t\t\t33CC10EC2044A3C60003C045 = {\n\t\t\t\t\t\tCreatedOnToolsVersion = 9.2;\n\t\t\t\t\t\tLastSwiftMigration = 1100;\n\t\t\t\t\t\tProvisioningStyle = Automatic;\n\t\t\t\t\t\tSystemCapabilities = {\n\t\t\t\t\t\t\tcom.apple.Sandbox = {\n\t\t\t\t\t\t\t\tenabled = 1;\n\t\t\t\t\t\t\t};\n\t\t\t\t\t\t};\n\t\t\t\t\t};\n\t\t\t\t\t33CC111A2044C6BA0003C045 = {\n\t\t\t\t\t\tCreatedOnToolsVersion = 9.2;\n\t\t\t\t\t\tProvisioningStyle = Manual;\n\t\t\t\t\t};\n\t\t\t\t};\n\t\t\t};\n\t\t\tbuildConfigurationList = 33CC10E82044A3C60003C045 /* Build configuration list for PBXProject \"Runner\" */;\n\t\t\tcompatibilityVersion = \"Xcode 9.3\";\n\t\t\tdevelopmentRegion = en;\n\t\t\thasScannedForEncodings = 0;\n\t\t\tknownRegions = (\n\t\t\t\ten,\n\t\t\t\tBase,\n\t\t\t);\n\t\t\tmainGroup = 33CC10E42044A3C60003C045;\n\t\t\tproductRefGroup = 33CC10EE2044A3C60003C045 /* Products */;\n\t\t\tprojectDirPath = \"\";\n\t\t\tprojectRoot = \"\";\n\t\t\ttargets = (\n\t\t\t\t33CC10EC2044A3C60003C045 /* Runner */,\n\t\t\t\t331C80D4294CF70F00263BE5 /* RunnerTests */,\n\t\t\t\t33CC111A2044C6BA0003C045 /* Flutter Assemble */,\n\t\t\t);\n\t\t};\n/* End PBXProject section */\n\n/* Begin PBXResourcesBuildPhase section */\n\t\t331C80D3294CF70F00263BE5 /* Resources */ = {\n\t\t\tisa = PBXResourcesBuildPhase;\n\t\t\tbuildActionMask = 2147483647;\n\t\t\tfiles = (\n\t\t\t);\n\t\t\trunOnlyForDeploymentPostprocessing = 0;\n\t\t};\n\t\t33CC10EB2044A3C60003C045 /* Resources */ = {\n\t\t\tisa = PBXResourcesBuildPhase;\n\t\t\tbuildActionMask = 2147483647;\n\t\t\tfiles = (\n\t\t\t\t33CC10F32044A3C60003C045 /* Assets.xcassets in Resources */,\n\t\t\t\t33CC10F62044A3C60003C045 /* MainMenu.xib in Resources */,\n\t\t\t);\n\t\t\trunOnlyForDeploymentPostprocessing = 0;\n\t\t};\n/* End PBXResourcesBuildPhase section */\n\n/* Begin PBXShellScriptBuildPhase section */\n\t\t3399D490228B24CF009A79C7 /* ShellScript */ = {\n\t\t\tisa = PBXShellScriptBuildPhase;\n\t\t\talwaysOutOfDate = 1;\n\t\t\tbuildActionMask = 2147483647;\n\t\t\tfiles = (\n\t\t\t);\n\t\t\tinputFileListPaths = (\n\t\t\t);\n\t\t\tinputPaths = (\n\t\t\t);\n\t\t\toutputFileListPaths = (\n\t\t\t);\n\t\t\toutputPaths = (\n\t\t\t);\n\t\t\trunOnlyForDeploymentPostprocessing = 0;\n\t\t\tshellPath = /bin/sh;\n\t\t\tshellScript = \"echo \\\"$PRODUCT_NAME.app\\\" > \\\"$PROJECT_DIR\\\"/Flutter/ephemeral/.app_filename && \\\"$FLUTTER_ROOT\\\"/packages/flutter_tools/bin/macos_assemble.sh embed\\n\";\n\t\t};\n\t\t33CC111E2044C6BF0003C045 /* ShellScript */ = {\n\t\t\tisa = PBXShellScriptBuildPhase;\n\t\t\tbuildActionMask = 2147483647;\n\t\t\tfiles = (\n\t\t\t);\n\t\t\tinputFileListPaths = (\n\t\t\t\tFlutter/ephemeral/FlutterInputs.xcfilelist,\n\t\t\t);\n\t\t\tinputPaths = (\n\t\t\t\tFlutter/ephemeral/tripwire,\n\t\t\t);\n\t\t\toutputFileListPaths = (\n\t\t\t\tFlutter/ephemeral/FlutterOutputs.xcfilelist,\n\t\t\t);\n\t\t\toutputPaths = (\n\t\t\t);\n\t\t\trunOnlyForDeploymentPostprocessing = 0;\n\t\t\tshellPath = /bin/sh;\n\t\t\tshellScript = \"\\\"$FLUTTER_ROOT\\\"/packages/flutter_tools/bin/macos_assemble.sh && touch Flutter/ephemeral/tripwire\";\n\t\t};\n/* End PBXShellScriptBuildPhase section */\n\n/* Begin PBXSourcesBuildPhase section */\n\t\t331C80D1294CF70F00263BE5 /* Sources */ = {\n\t\t\tisa = PBXSourcesBuildPhase;\n\t\t\tbuildActionMask = 2147483647;\n\t\t\tfiles = (\n\t\t\t\t331C80D8294CF71000263BE5 /* RunnerTests.swift in Sources */,\n\t\t\t);\n\t\t\trunOnlyForDeploymentPostprocessing = 0;\n\t\t};\n\t\t33CC10E92044A3C60003C045 /* Sources */ = {\n\t\t\tisa = PBXSourcesBuildPhase;\n\t\t\tbuildActionMask = 2147483647;\n\t\t\tfiles = (\n\t\t\t\t33CC11132044BFA00003C045 /* MainFlutterWindow.swift in Sources */,\n\t\t\t\t33CC10F12044A3C60003C045 /* AppDelegate.swift in Sources */,\n\t\t\t\t335BBD1B22A9A15E00E9071D /* GeneratedPluginRegistrant.swift in Sources */,\n\t\t\t);\n\t\t\trunOnlyForDeploymentPostprocessing = 0;\n\t\t};\n/* End PBXSourcesBuildPhase section */\n\n/* Begin PBXTargetDependency section */\n\t\t331C80DA294CF71000263BE5 /* PBXTargetDependency */ = {\n\t\t\tisa = PBXTargetDependency;\n\t\t\ttarget = 33CC10EC2044A3C60003C045 /* Runner */;\n\t\t\ttargetProxy = 331C80D9294CF71000263BE5 /* PBXContainerItemProxy */;\n\t\t};\n\t\t33CC11202044C79F0003C045 /* PBXTargetDependency */ = {\n\t\t\tisa = PBXTargetDependency;\n\t\t\ttarget = 33CC111A2044C6BA0003C045 /* Flutter Assemble */;\n\t\t\ttargetProxy = 33CC111F2044C79F0003C045 /* PBXContainerItemProxy */;\n\t\t};\n/* End PBXTargetDependency section */\n\n/* Begin PBXVariantGroup section */\n\t\t33CC10F42044A3C60003C045 /* MainMenu.xib */ = {\n\t\t\tisa = PBXVariantGroup;\n\t\t\tchildren = (\n\t\t\t\t33CC10F52044A3C60003C045 /* Base */,\n\t\t\t);\n\t\t\tname = MainMenu.xib;\n\t\t\tpath = Runner;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n/* End PBXVariantGroup section */\n\n/* Begin XCBuildConfiguration section */\n\t\t331C80DB294CF71000263BE5 /* Debug */ = {\n\t\t\tisa = XCBuildConfiguration;\n\t\t\tbuildSettings = {\n\t\t\t\tBUNDLE_LOADER = \"$(TEST_HOST)\";\n\t\t\t\tCURRENT_PROJECT_VERSION = 1;\n\t\t\t\tGENERATE_INFOPLIST_FILE = YES;\n\t\t\t\tMARKETING_VERSION = 1.0;\n\t\t\t\tPRODUCT_BUNDLE_IDENTIFIER = com.example.helloWorldFlutter.RunnerTests;\n\t\t\t\tPRODUCT_NAME = \"$(TARGET_NAME)\";\n\t\t\t\tSWIFT_VERSION = 5.0;\n\t\t\t\tTEST_HOST = \"$(BUILT_PRODUCTS_DIR)/hello_world_flutter.app/$(BUNDLE_EXECUTABLE_FOLDER_PATH)/hello_world_flutter\";\n\t\t\t};\n\t\t\tname = Debug;\n\t\t};\n\t\t331C80DC294CF71000263BE5 /* Release */ = {\n\t\t\tisa = XCBuildConfiguration;\n\t\t\tbuildSettings = {\n\t\t\t\tBUNDLE_LOADER = \"$(TEST_HOST)\";\n\t\t\t\tCURRENT_PROJECT_VERSION = 1;\n\t\t\t\tGENERATE_INFOPLIST_FILE = YES;\n\t\t\t\tMARKETING_VERSION = 1.0;\n\t\t\t\tPRODUCT_BUNDLE_IDENTIFIER = com.example.helloWorldFlutter.RunnerTests;\n\t\t\t\tPRODUCT_NAME = \"$(TARGET_NAME)\";\n\t\t\t\tSWIFT_VERSION = 5.0;\n\t\t\t\tTEST_HOST = \"$(BUILT_PRODUCTS_DIR)/hello_world_flutter.app/$(BUNDLE_EXECUTABLE_FOLDER_PATH)/hello_world_flutter\";\n\t\t\t};\n\t\t\tname = Release;\n\t\t};\n\t\t331C80DD294CF71000263BE5 /* Profile */ = {\n\t\t\tisa = XCBuildConfiguration;\n\t\t\tbuildSettings = {\n\t\t\t\tBUNDLE_LOADER = \"$(TEST_HOST)\";\n\t\t\t\tCURRENT_PROJECT_VERSION = 1;\n\t\t\t\tGENERATE_INFOPLIST_FILE = YES;\n\t\t\t\tMARKETING_VERSION = 1.0;\n\t\t\t\tPRODUCT_BUNDLE_IDENTIFIER = com.example.helloWorldFlutter.RunnerTests;\n\t\t\t\tPRODUCT_NAME = \"$(TARGET_NAME)\";\n\t\t\t\tSWIFT_VERSION = 5.0;\n\t\t\t\tTEST_HOST = \"$(BUILT_PRODUCTS_DIR)/hello_world_flutter.app/$(BUNDLE_EXECUTABLE_FOLDER_PATH)/hello_world_flutter\";\n\t\t\t};\n\t\t\tname = Profile;\n\t\t};\n\t\t338D0CE9231458BD00FA5F75 /* Profile */ = {\n\t\t\tisa = XCBuildConfiguration;\n\t\t\tbaseConfigurationReference = 7AFA3C8E1D35360C0083082E /* Release.xcconfig */;\n\t\t\tbuildSettings = {\n\t\t\t\tALWAYS_SEARCH_USER_PATHS = NO;\n\t\t\t\tCLANG_ANALYZER_NONNULL = YES;\n\t\t\t\tCLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE;\n\t\t\t\tCLANG_CXX_LANGUAGE_STANDARD = \"gnu++14\";\n\t\t\t\tCLANG_CXX_LIBRARY = \"libc++\";\n\t\t\t\tCLANG_ENABLE_MODULES = YES;\n\t\t\t\tCLANG_ENABLE_OBJC_ARC = YES;\n\t\t\t\tCLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES;\n\t\t\t\tCLANG_WARN_BOOL_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_CONSTANT_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES;\n\t\t\t\tCLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR;\n\t\t\t\tCLANG_WARN_DOCUMENTATION_COMMENTS = YES;\n\t\t\t\tCLANG_WARN_EMPTY_BODY = YES;\n\t\t\t\tCLANG_WARN_ENUM_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_INFINITE_RECURSION = YES;\n\t\t\t\tCLANG_WARN_INT_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_OBJC_LITERAL_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR;\n\t\t\t\tCLANG_WARN_RANGE_LOOP_ANALYSIS = YES;\n\t\t\t\tCLANG_WARN_SUSPICIOUS_MOVE = YES;\n\t\t\t\tCODE_SIGN_IDENTITY = \"-\";\n\t\t\t\tCOPY_PHASE_STRIP = NO;\n\t\t\t\tDEBUG_INFORMATION_FORMAT = \"dwarf-with-dsym\";\n\t\t\t\tENABLE_NS_ASSERTIONS = NO;\n\t\t\t\tENABLE_STRICT_OBJC_MSGSEND = YES;\n\t\t\t\tGCC_C_LANGUAGE_STANDARD = gnu11;\n\t\t\t\tGCC_NO_COMMON_BLOCKS = YES;\n\t\t\t\tGCC_WARN_64_TO_32_BIT_CONVERSION = YES;\n\t\t\t\tGCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR;\n\t\t\t\tGCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE;\n\t\t\t\tGCC_WARN_UNUSED_FUNCTION = YES;\n\t\t\t\tGCC_WARN_UNUSED_VARIABLE = YES;\n\t\t\t\tMACOSX_DEPLOYMENT_TARGET = 10.15;\n\t\t\t\tMTL_ENABLE_DEBUG_INFO = NO;\n\t\t\t\tSDKROOT = macosx;\n\t\t\t\tSWIFT_COMPILATION_MODE = wholemodule;\n\t\t\t\tSWIFT_OPTIMIZATION_LEVEL = \"-O\";\n\t\t\t};\n\t\t\tname = Profile;\n\t\t};\n\t\t338D0CEA231458BD00FA5F75 /* Profile */ = {\n\t\t\tisa = XCBuildConfiguration;\n\t\t\tbaseConfigurationReference = 33E5194F232828860026EE4D /* AppInfo.xcconfig */;\n\t\t\tbuildSettings = {\n\t\t\t\tASSETCATALOG_COMPILER_APPICON_NAME = AppIcon;\n\t\t\t\tCLANG_ENABLE_MODULES = YES;\n\t\t\t\tCODE_SIGN_ENTITLEMENTS = Runner/DebugProfile.entitlements;\n\t\t\t\tCODE_SIGN_STYLE = Automatic;\n\t\t\t\tCOMBINE_HIDPI_IMAGES = YES;\n\t\t\t\tINFOPLIST_FILE = Runner/Info.plist;\n\t\t\t\tLD_RUNPATH_SEARCH_PATHS = (\n\t\t\t\t\t\"$(inherited)\",\n\t\t\t\t\t\"@executable_path/../Frameworks\",\n\t\t\t\t);\n\t\t\t\tPROVISIONING_PROFILE_SPECIFIER = \"\";\n\t\t\t\tSWIFT_VERSION = 5.0;\n\t\t\t};\n\t\t\tname = Profile;\n\t\t};\n\t\t338D0CEB231458BD00FA5F75 /* Profile */ = {\n\t\t\tisa = XCBuildConfiguration;\n\t\t\tbuildSettings = {\n\t\t\t\tCODE_SIGN_STYLE = Manual;\n\t\t\t\tPRODUCT_NAME = \"$(TARGET_NAME)\";\n\t\t\t};\n\t\t\tname = Profile;\n\t\t};\n\t\t33CC10F92044A3C60003C045 /* Debug */ = {\n\t\t\tisa = XCBuildConfiguration;\n\t\t\tbaseConfigurationReference = 9740EEB21CF90195004384FC /* Debug.xcconfig */;\n\t\t\tbuildSettings = {\n\t\t\t\tALWAYS_SEARCH_USER_PATHS = NO;\n\t\t\t\tCLANG_ANALYZER_NONNULL = YES;\n\t\t\t\tCLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE;\n\t\t\t\tCLANG_CXX_LANGUAGE_STANDARD = \"gnu++14\";\n\t\t\t\tCLANG_CXX_LIBRARY = \"libc++\";\n\t\t\t\tCLANG_ENABLE_MODULES = YES;\n\t\t\t\tCLANG_ENABLE_OBJC_ARC = YES;\n\t\t\t\tCLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES;\n\t\t\t\tCLANG_WARN_BOOL_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_CONSTANT_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES;\n\t\t\t\tCLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR;\n\t\t\t\tCLANG_WARN_DOCUMENTATION_COMMENTS = YES;\n\t\t\t\tCLANG_WARN_EMPTY_BODY = YES;\n\t\t\t\tCLANG_WARN_ENUM_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_INFINITE_RECURSION = YES;\n\t\t\t\tCLANG_WARN_INT_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_OBJC_LITERAL_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR;\n\t\t\t\tCLANG_WARN_RANGE_LOOP_ANALYSIS = YES;\n\t\t\t\tCLANG_WARN_SUSPICIOUS_MOVE = YES;\n\t\t\t\tCODE_SIGN_IDENTITY = \"-\";\n\t\t\t\tCOPY_PHASE_STRIP = NO;\n\t\t\t\tDEBUG_INFORMATION_FORMAT = dwarf;\n\t\t\t\tENABLE_STRICT_OBJC_MSGSEND = YES;\n\t\t\t\tENABLE_TESTABILITY = YES;\n\t\t\t\tGCC_C_LANGUAGE_STANDARD = gnu11;\n\t\t\t\tGCC_DYNAMIC_NO_PIC = NO;\n\t\t\t\tGCC_NO_COMMON_BLOCKS = YES;\n\t\t\t\tGCC_OPTIMIZATION_LEVEL = 0;\n\t\t\t\tGCC_PREPROCESSOR_DEFINITIONS = (\n\t\t\t\t\t\"DEBUG=1\",\n\t\t\t\t\t\"$(inherited)\",\n\t\t\t\t);\n\t\t\t\tGCC_WARN_64_TO_32_BIT_CONVERSION = YES;\n\t\t\t\tGCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR;\n\t\t\t\tGCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE;\n\t\t\t\tGCC_WARN_UNUSED_FUNCTION = YES;\n\t\t\t\tGCC_WARN_UNUSED_VARIABLE = YES;\n\t\t\t\tMACOSX_DEPLOYMENT_TARGET = 10.15;\n\t\t\t\tMTL_ENABLE_DEBUG_INFO = YES;\n\t\t\t\tONLY_ACTIVE_ARCH = YES;\n\t\t\t\tSDKROOT = macosx;\n\t\t\t\tSWIFT_ACTIVE_COMPILATION_CONDITIONS = DEBUG;\n\t\t\t\tSWIFT_OPTIMIZATION_LEVEL = \"-Onone\";\n\t\t\t};\n\t\t\tname = Debug;\n\t\t};\n\t\t33CC10FA2044A3C60003C045 /* Release */ = {\n\t\t\tisa = XCBuildConfiguration;\n\t\t\tbaseConfigurationReference = 7AFA3C8E1D35360C0083082E /* Release.xcconfig */;\n\t\t\tbuildSettings = {\n\t\t\t\tALWAYS_SEARCH_USER_PATHS = NO;\n\t\t\t\tCLANG_ANALYZER_NONNULL = YES;\n\t\t\t\tCLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE;\n\t\t\t\tCLANG_CXX_LANGUAGE_STANDARD = \"gnu++14\";\n\t\t\t\tCLANG_CXX_LIBRARY = \"libc++\";\n\t\t\t\tCLANG_ENABLE_MODULES = YES;\n\t\t\t\tCLANG_ENABLE_OBJC_ARC = YES;\n\t\t\t\tCLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES;\n\t\t\t\tCLANG_WARN_BOOL_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_CONSTANT_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES;\n\t\t\t\tCLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR;\n\t\t\t\tCLANG_WARN_DOCUMENTATION_COMMENTS = YES;\n\t\t\t\tCLANG_WARN_EMPTY_BODY = YES;\n\t\t\t\tCLANG_WARN_ENUM_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_INFINITE_RECURSION = YES;\n\t\t\t\tCLANG_WARN_INT_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_OBJC_LITERAL_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR;\n\t\t\t\tCLANG_WARN_RANGE_LOOP_ANALYSIS = YES;\n\t\t\t\tCLANG_WARN_SUSPICIOUS_MOVE = YES;\n\t\t\t\tCODE_SIGN_IDENTITY = \"-\";\n\t\t\t\tCOPY_PHASE_STRIP = NO;\n\t\t\t\tDEBUG_INFORMATION_FORMAT = \"dwarf-with-dsym\";\n\t\t\t\tENABLE_NS_ASSERTIONS = NO;\n\t\t\t\tENABLE_STRICT_OBJC_MSGSEND = YES;\n\t\t\t\tGCC_C_LANGUAGE_STANDARD = gnu11;\n\t\t\t\tGCC_NO_COMMON_BLOCKS = YES;\n\t\t\t\tGCC_WARN_64_TO_32_BIT_CONVERSION = YES;\n\t\t\t\tGCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR;\n\t\t\t\tGCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE;\n\t\t\t\tGCC_WARN_UNUSED_FUNCTION = YES;\n\t\t\t\tGCC_WARN_UNUSED_VARIABLE = YES;\n\t\t\t\tMACOSX_DEPLOYMENT_TARGET = 10.15;\n\t\t\t\tMTL_ENABLE_DEBUG_INFO = NO;\n\t\t\t\tSDKROOT = macosx;\n\t\t\t\tSWIFT_COMPILATION_MODE = wholemodule;\n\t\t\t\tSWIFT_OPTIMIZATION_LEVEL = \"-O\";\n\t\t\t};\n\t\t\tname = Release;\n\t\t};\n\t\t33CC10FC2044A3C60003C045 /* Debug */ = {\n\t\t\tisa = XCBuildConfiguration;\n\t\t\tbaseConfigurationReference = 33E5194F232828860026EE4D /* AppInfo.xcconfig */;\n\t\t\tbuildSettings = {\n\t\t\t\tASSETCATALOG_COMPILER_APPICON_NAME = AppIcon;\n\t\t\t\tCLANG_ENABLE_MODULES = YES;\n\t\t\t\tCODE_SIGN_ENTITLEMENTS = Runner/DebugProfile.entitlements;\n\t\t\t\tCODE_SIGN_STYLE = Automatic;\n\t\t\t\tCOMBINE_HIDPI_IMAGES = YES;\n\t\t\t\tINFOPLIST_FILE = Runner/Info.plist;\n\t\t\t\tLD_RUNPATH_SEARCH_PATHS = (\n\t\t\t\t\t\"$(inherited)\",\n\t\t\t\t\t\"@executable_path/../Frameworks\",\n\t\t\t\t);\n\t\t\t\tPROVISIONING_PROFILE_SPECIFIER = \"\";\n\t\t\t\tSWIFT_OPTIMIZATION_LEVEL = \"-Onone\";\n\t\t\t\tSWIFT_VERSION = 5.0;\n\t\t\t};\n\t\t\tname = Debug;\n\t\t};\n\t\t33CC10FD2044A3C60003C045 /* Release */ = {\n\t\t\tisa = XCBuildConfiguration;\n\t\t\tbaseConfigurationReference = 33E5194F232828860026EE4D /* AppInfo.xcconfig */;\n\t\t\tbuildSettings = {\n\t\t\t\tASSETCATALOG_COMPILER_APPICON_NAME = AppIcon;\n\t\t\t\tCLANG_ENABLE_MODULES = YES;\n\t\t\t\tCODE_SIGN_ENTITLEMENTS = Runner/Release.entitlements;\n\t\t\t\tCODE_SIGN_STYLE = Automatic;\n\t\t\t\tCOMBINE_HIDPI_IMAGES = YES;\n\t\t\t\tINFOPLIST_FILE = Runner/Info.plist;\n\t\t\t\tLD_RUNPATH_SEARCH_PATHS = (\n\t\t\t\t\t\"$(inherited)\",\n\t\t\t\t\t\"@executable_path/../Frameworks\",\n\t\t\t\t);\n\t\t\t\tPROVISIONING_PROFILE_SPECIFIER = \"\";\n\t\t\t\tSWIFT_VERSION = 5.0;\n\t\t\t};\n\t\t\tname = Release;\n\t\t};\n\t\t33CC111C2044C6BA0003C045 /* Debug */ = {\n\t\t\tisa = XCBuildConfiguration;\n\t\t\tbuildSettings = {\n\t\t\t\tCODE_SIGN_STYLE = Manual;\n\t\t\t\tPRODUCT_NAME = \"$(TARGET_NAME)\";\n\t\t\t};\n\t\t\tname = Debug;\n\t\t};\n\t\t33CC111D2044C6BA0003C045 /* Release */ = {\n\t\t\tisa = XCBuildConfiguration;\n\t\t\tbuildSettings = {\n\t\t\t\tCODE_SIGN_STYLE = Automatic;\n\t\t\t\tPRODUCT_NAME = \"$(TARGET_NAME)\";\n\t\t\t};\n\t\t\tname = Release;\n\t\t};\n/* End XCBuildConfiguration section */\n\n/* Begin XCConfigurationList section */\n\t\t331C80DE294CF71000263BE5 /* Build configuration list for PBXNativeTarget \"RunnerTests\" */ = {\n\t\t\tisa = XCConfigurationList;\n\t\t\tbuildConfigurations = (\n\t\t\t\t331C80DB294CF71000263BE5 /* Debug */,\n\t\t\t\t331C80DC294CF71000263BE5 /* Release */,\n\t\t\t\t331C80DD294CF71000263BE5 /* Profile */,\n\t\t\t);\n\t\t\tdefaultConfigurationIsVisible = 0;\n\t\t\tdefaultConfigurationName = Release;\n\t\t};\n\t\t33CC10E82044A3C60003C045 /* Build configuration list for PBXProject \"Runner\" */ = {\n\t\t\tisa = XCConfigurationList;\n\t\t\tbuildConfigurations = (\n\t\t\t\t33CC10F92044A3C60003C045 /* Debug */,\n\t\t\t\t33CC10FA2044A3C60003C045 /* Release */,\n\t\t\t\t338D0CE9231458BD00FA5F75 /* Profile */,\n\t\t\t);\n\t\t\tdefaultConfigurationIsVisible = 0;\n\t\t\tdefaultConfigurationName = Release;\n\t\t};\n\t\t33CC10FB2044A3C60003C045 /* Build configuration list for PBXNativeTarget \"Runner\" */ = {\n\t\t\tisa = XCConfigurationList;\n\t\t\tbuildConfigurations = (\n\t\t\t\t33CC10FC2044A3C60003C045 /* Debug */,\n\t\t\t\t33CC10FD2044A3C60003C045 /* Release */,\n\t\t\t\t338D0CEA231458BD00FA5F75 /* Profile */,\n\t\t\t);\n\t\t\tdefaultConfigurationIsVisible = 0;\n\t\t\tdefaultConfigurationName = Release;\n\t\t};\n\t\t33CC111B2044C6BA0003C045 /* Build configuration list for PBXAggregateTarget \"Flutter Assemble\" */ = {\n\t\t\tisa = XCConfigurationList;\n\t\t\tbuildConfigurations = (\n\t\t\t\t33CC111C2044C6BA0003C045 /* Debug */,\n\t\t\t\t33CC111D2044C6BA0003C045 /* Release */,\n\t\t\t\t338D0CEB231458BD00FA5F75 /* Profile */,\n\t\t\t);\n\t\t\tdefaultConfigurationIsVisible = 0;\n\t\t\tdefaultConfigurationName = Release;\n\t\t};\n/* End XCConfigurationList section */\n\t};\n\trootObject = 33CC10E52044A3C60003C045 /* Project object */;\n}\n"
  },
  {
    "path": "examples/hello_world_flutter/macos/Runner.xcodeproj/project.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n<plist version=\"1.0\">\n<dict>\n\t<key>IDEDidComputeMac32BitWarning</key>\n\t<true/>\n</dict>\n</plist>\n"
  },
  {
    "path": "examples/hello_world_flutter/macos/Runner.xcodeproj/xcshareddata/xcschemes/Runner.xcscheme",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<Scheme\n   LastUpgradeVersion = \"1510\"\n   version = \"1.3\">\n   <BuildAction\n      parallelizeBuildables = \"YES\"\n      buildImplicitDependencies = \"YES\">\n      <BuildActionEntries>\n         <BuildActionEntry\n            buildForTesting = \"YES\"\n            buildForRunning = \"YES\"\n            buildForProfiling = \"YES\"\n            buildForArchiving = \"YES\"\n            buildForAnalyzing = \"YES\">\n            <BuildableReference\n               BuildableIdentifier = \"primary\"\n               BlueprintIdentifier = \"33CC10EC2044A3C60003C045\"\n               BuildableName = \"hello_world_flutter.app\"\n               BlueprintName = \"Runner\"\n               ReferencedContainer = \"container:Runner.xcodeproj\">\n            </BuildableReference>\n         </BuildActionEntry>\n      </BuildActionEntries>\n   </BuildAction>\n   <TestAction\n      buildConfiguration = \"Debug\"\n      selectedDebuggerIdentifier = \"Xcode.DebuggerFoundation.Debugger.LLDB\"\n      selectedLauncherIdentifier = \"Xcode.DebuggerFoundation.Launcher.LLDB\"\n      shouldUseLaunchSchemeArgsEnv = \"YES\">\n      <MacroExpansion>\n         <BuildableReference\n            BuildableIdentifier = \"primary\"\n            BlueprintIdentifier = \"33CC10EC2044A3C60003C045\"\n            BuildableName = \"hello_world_flutter.app\"\n            BlueprintName = \"Runner\"\n            ReferencedContainer = \"container:Runner.xcodeproj\">\n         </BuildableReference>\n      </MacroExpansion>\n      <Testables>\n         <TestableReference\n            skipped = \"NO\"\n            parallelizable = \"YES\">\n            <BuildableReference\n               BuildableIdentifier = \"primary\"\n               BlueprintIdentifier = \"331C80D4294CF70F00263BE5\"\n               BuildableName = \"RunnerTests.xctest\"\n               BlueprintName = \"RunnerTests\"\n               ReferencedContainer = \"container:Runner.xcodeproj\">\n            </BuildableReference>\n         </TestableReference>\n      </Testables>\n   </TestAction>\n   <LaunchAction\n      buildConfiguration = \"Debug\"\n      selectedDebuggerIdentifier = \"Xcode.DebuggerFoundation.Debugger.LLDB\"\n      selectedLauncherIdentifier = \"Xcode.DebuggerFoundation.Launcher.LLDB\"\n      launchStyle = \"0\"\n      useCustomWorkingDirectory = \"NO\"\n      ignoresPersistentStateOnLaunch = \"NO\"\n      debugDocumentVersioning = \"YES\"\n      debugServiceExtension = \"internal\"\n      enableGPUValidationMode = \"1\"\n      allowLocationSimulation = \"YES\">\n      <BuildableProductRunnable\n         runnableDebuggingMode = \"0\">\n         <BuildableReference\n            BuildableIdentifier = \"primary\"\n            BlueprintIdentifier = \"33CC10EC2044A3C60003C045\"\n            BuildableName = \"hello_world_flutter.app\"\n            BlueprintName = \"Runner\"\n            ReferencedContainer = \"container:Runner.xcodeproj\">\n         </BuildableReference>\n      </BuildableProductRunnable>\n   </LaunchAction>\n   <ProfileAction\n      buildConfiguration = \"Profile\"\n      shouldUseLaunchSchemeArgsEnv = \"YES\"\n      savedToolIdentifier = \"\"\n      useCustomWorkingDirectory = \"NO\"\n      debugDocumentVersioning = \"YES\">\n      <BuildableProductRunnable\n         runnableDebuggingMode = \"0\">\n         <BuildableReference\n            BuildableIdentifier = \"primary\"\n            BlueprintIdentifier = \"33CC10EC2044A3C60003C045\"\n            BuildableName = \"hello_world_flutter.app\"\n            BlueprintName = \"Runner\"\n            ReferencedContainer = \"container:Runner.xcodeproj\">\n         </BuildableReference>\n      </BuildableProductRunnable>\n   </ProfileAction>\n   <AnalyzeAction\n      buildConfiguration = \"Debug\">\n   </AnalyzeAction>\n   <ArchiveAction\n      buildConfiguration = \"Release\"\n      revealArchiveInOrganizer = \"YES\">\n   </ArchiveAction>\n</Scheme>\n"
  },
  {
    "path": "examples/hello_world_flutter/macos/Runner.xcworkspace/contents.xcworkspacedata",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<Workspace\n   version = \"1.0\">\n   <FileRef\n      location = \"group:Runner.xcodeproj\">\n   </FileRef>\n</Workspace>\n"
  },
  {
    "path": "examples/hello_world_flutter/macos/Runner.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n<plist version=\"1.0\">\n<dict>\n\t<key>IDEDidComputeMac32BitWarning</key>\n\t<true/>\n</dict>\n</plist>\n"
  },
  {
    "path": "examples/hello_world_flutter/macos/RunnerTests/RunnerTests.swift",
    "content": "import FlutterMacOS\nimport Cocoa\nimport XCTest\n\nclass RunnerTests: XCTestCase {\n\n  func testExample() {\n    // If you add code to the Runner application, consider adding tests here.\n    // See https://developer.apple.com/documentation/xctest for more information about using XCTest.\n  }\n\n}\n"
  },
  {
    "path": "examples/hello_world_flutter/pubspec.yaml",
    "content": "name: hello_world_flutter\ndescription: A sample Flutter app integrating LangChain.\nversion: 1.0.0\npublish_to: none\n\nenvironment:\n  sdk: \">=3.9.0 <4.0.0\"\nresolution: workspace\n\ndependencies:\n  flutter:\n    sdk: flutter\n  equatable: ^2.0.7\n  flutter_bloc: ^9.1.1\n  flutter_markdown: ^0.7.7\n  langchain: ^0.8.1\n  langchain_google: ^0.7.1+2\n  langchain_mistralai: ^0.3.1+1\n  langchain_ollama: ^0.4.1\n  langchain_openai: ^0.8.1+1\n\nflutter:\n  uses-material-design: true\n"
  },
  {
    "path": "examples/hello_world_flutter/web/flutter_bootstrap.js",
    "content": "{{flutter_js}}\n{{flutter_build_config}}\n\n_flutter.loader.load({\n  serviceWorkerSettings: {\n    serviceWorkerVersion: {{flutter_service_worker_version}},\n  },\n  onEntrypointLoaded: async function(engineInitializer) {\n    const appRunner = await engineInitializer.initializeEngine({useColorEmoji: true});\n    await appRunner.runApp();\n  },\n});\n"
  },
  {
    "path": "examples/hello_world_flutter/web/index.html",
    "content": "<!DOCTYPE html>\n<html>\n<head>\n    <base href=\"$FLUTTER_BASE_HREF\">\n\n    <meta charset=\"UTF-8\">\n    <meta content=\"IE=Edge\" http-equiv=\"X-UA-Compatible\">\n    <meta content=\"A sample Flutter app integrating LangChain.\" name=\"description\">\n\n    <!-- iOS meta tags & icons -->\n    <meta content=\"yes\" name=\"apple-mobile-web-app-capable\">\n    <meta content=\"black\" name=\"apple-mobile-web-app-status-bar-style\">\n    <meta content=\"hello_world_flutter\" name=\"apple-mobile-web-app-title\">\n    <link href=\"icons/Icon-192.png\" rel=\"apple-touch-icon\">\n\n    <!-- Favicon -->\n    <link href=\"favicon.png\" rel=\"icon\" type=\"image/png\"/>\n\n    <title>Hello World Flutter</title>\n    <link href=\"manifest.json\" rel=\"manifest\">\n</head>\n<body>\n    <script src=\"flutter_bootstrap.js\" async></script>\n</body>\n</html>\n"
  },
  {
    "path": "examples/hello_world_flutter/web/manifest.json",
    "content": "{\n    \"name\": \"hello_world_flutter\",\n    \"short_name\": \"Hello World Flutter\",\n    \"start_url\": \".\",\n    \"display\": \"standalone\",\n    \"background_color\": \"#0175C2\",\n    \"theme_color\": \"#0175C2\",\n    \"description\": \"A sample Flutter app integrating LangChain.\",\n    \"orientation\": \"portrait-primary\",\n    \"prefer_related_applications\": false,\n    \"icons\": [\n        {\n            \"src\": \"icons/Icon-192.png\",\n            \"sizes\": \"192x192\",\n            \"type\": \"image/png\"\n        },\n        {\n            \"src\": \"icons/Icon-512.png\",\n            \"sizes\": \"512x512\",\n            \"type\": \"image/png\"\n        },\n        {\n            \"src\": \"icons/Icon-maskable-192.png\",\n            \"sizes\": \"192x192\",\n            \"type\": \"image/png\",\n            \"purpose\": \"maskable\"\n        },\n        {\n            \"src\": \"icons/Icon-maskable-512.png\",\n            \"sizes\": \"512x512\",\n            \"type\": \"image/png\",\n            \"purpose\": \"maskable\"\n        }\n    ]\n}\n"
  },
  {
    "path": "examples/hello_world_flutter/windows/.gitignore",
    "content": "flutter/ephemeral/\n\n# Visual Studio user-specific files.\n*.suo\n*.user\n*.userosscache\n*.sln.docstates\n\n# Visual Studio build-related files.\nx64/\nx86/\n\n# Visual Studio cache files\n# files ending in .cache can be ignored\n*.[Cc]ache\n# but keep track of directories ending in .cache\n!*.[Cc]ache/\n"
  },
  {
    "path": "examples/hello_world_flutter/windows/CMakeLists.txt",
    "content": "# Project-level configuration.\ncmake_minimum_required(VERSION 3.14)\nproject(hello_world_flutter LANGUAGES CXX)\n\n# The name of the executable created for the application. Change this to change\n# the on-disk name of your application.\nset(BINARY_NAME \"hello_world_flutter\")\n\n# Explicitly opt in to modern CMake behaviors to avoid warnings with recent\n# versions of CMake.\ncmake_policy(SET CMP0063 NEW)\n\n# Define build configuration option.\nget_property(IS_MULTICONFIG GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG)\nif(IS_MULTICONFIG)\n  set(CMAKE_CONFIGURATION_TYPES \"Debug;Profile;Release\"\n    CACHE STRING \"\" FORCE)\nelse()\n  if(NOT CMAKE_BUILD_TYPE AND NOT CMAKE_CONFIGURATION_TYPES)\n    set(CMAKE_BUILD_TYPE \"Debug\" CACHE\n      STRING \"Flutter build mode\" FORCE)\n    set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS\n      \"Debug\" \"Profile\" \"Release\")\n  endif()\nendif()\n# Define settings for the Profile build mode.\nset(CMAKE_EXE_LINKER_FLAGS_PROFILE \"${CMAKE_EXE_LINKER_FLAGS_RELEASE}\")\nset(CMAKE_SHARED_LINKER_FLAGS_PROFILE \"${CMAKE_SHARED_LINKER_FLAGS_RELEASE}\")\nset(CMAKE_C_FLAGS_PROFILE \"${CMAKE_C_FLAGS_RELEASE}\")\nset(CMAKE_CXX_FLAGS_PROFILE \"${CMAKE_CXX_FLAGS_RELEASE}\")\n\n# Use Unicode for all projects.\nadd_definitions(-DUNICODE -D_UNICODE)\n\n# Compilation settings that should be applied to most targets.\n#\n# Be cautious about adding new options here, as plugins use this function by\n# default. In most cases, you should add new options to specific targets instead\n# of modifying this function.\nfunction(APPLY_STANDARD_SETTINGS TARGET)\n  target_compile_features(${TARGET} PUBLIC cxx_std_17)\n  target_compile_options(${TARGET} PRIVATE /W4 /WX /wd\"4100\")\n  target_compile_options(${TARGET} PRIVATE /EHsc)\n  target_compile_definitions(${TARGET} PRIVATE \"_HAS_EXCEPTIONS=0\")\n  target_compile_definitions(${TARGET} PRIVATE \"$<$<CONFIG:Debug>:_DEBUG>\")\nendfunction()\n\n# Flutter library and tool build rules.\nset(FLUTTER_MANAGED_DIR \"${CMAKE_CURRENT_SOURCE_DIR}/flutter\")\nadd_subdirectory(${FLUTTER_MANAGED_DIR})\n\n# Application build; see runner/CMakeLists.txt.\nadd_subdirectory(\"runner\")\n\n\n# Generated plugin build rules, which manage building the plugins and adding\n# them to the application.\ninclude(flutter/generated_plugins.cmake)\n\n\n# === Installation ===\n# Support files are copied into place next to the executable, so that it can\n# run in place. This is done instead of making a separate bundle (as on Linux)\n# so that building and running from within Visual Studio will work.\nset(BUILD_BUNDLE_DIR \"$<TARGET_FILE_DIR:${BINARY_NAME}>\")\n# Make the \"install\" step default, as it's required to run.\nset(CMAKE_VS_INCLUDE_INSTALL_TO_DEFAULT_BUILD 1)\nif(CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT)\n  set(CMAKE_INSTALL_PREFIX \"${BUILD_BUNDLE_DIR}\" CACHE PATH \"...\" FORCE)\nendif()\n\nset(INSTALL_BUNDLE_DATA_DIR \"${CMAKE_INSTALL_PREFIX}/data\")\nset(INSTALL_BUNDLE_LIB_DIR \"${CMAKE_INSTALL_PREFIX}\")\n\ninstall(TARGETS ${BINARY_NAME} RUNTIME DESTINATION \"${CMAKE_INSTALL_PREFIX}\"\n  COMPONENT Runtime)\n\ninstall(FILES \"${FLUTTER_ICU_DATA_FILE}\" DESTINATION \"${INSTALL_BUNDLE_DATA_DIR}\"\n  COMPONENT Runtime)\n\ninstall(FILES \"${FLUTTER_LIBRARY}\" DESTINATION \"${INSTALL_BUNDLE_LIB_DIR}\"\n  COMPONENT Runtime)\n\nif(PLUGIN_BUNDLED_LIBRARIES)\n  install(FILES \"${PLUGIN_BUNDLED_LIBRARIES}\"\n    DESTINATION \"${INSTALL_BUNDLE_LIB_DIR}\"\n    COMPONENT Runtime)\nendif()\n\n# Fully re-copy the assets directory on each build to avoid having stale files\n# from a previous install.\nset(FLUTTER_ASSET_DIR_NAME \"flutter_assets\")\ninstall(CODE \"\n  file(REMOVE_RECURSE \\\"${INSTALL_BUNDLE_DATA_DIR}/${FLUTTER_ASSET_DIR_NAME}\\\")\n  \" COMPONENT Runtime)\ninstall(DIRECTORY \"${PROJECT_BUILD_DIR}/${FLUTTER_ASSET_DIR_NAME}\"\n  DESTINATION \"${INSTALL_BUNDLE_DATA_DIR}\" COMPONENT Runtime)\n\n# Install the AOT library on non-Debug builds only.\ninstall(FILES \"${AOT_LIBRARY}\" DESTINATION \"${INSTALL_BUNDLE_DATA_DIR}\"\n  CONFIGURATIONS Profile;Release\n  COMPONENT Runtime)\n"
  },
  {
    "path": "examples/hello_world_flutter/windows/flutter/CMakeLists.txt",
    "content": "# This file controls Flutter-level build steps. It should not be edited.\ncmake_minimum_required(VERSION 3.14)\n\nset(EPHEMERAL_DIR \"${CMAKE_CURRENT_SOURCE_DIR}/ephemeral\")\n\n# Configuration provided via flutter tool.\ninclude(${EPHEMERAL_DIR}/generated_config.cmake)\n\n# TODO: Move the rest of this into files in ephemeral. See\n# https://github.com/flutter/flutter/issues/57146.\nset(WRAPPER_ROOT \"${EPHEMERAL_DIR}/cpp_client_wrapper\")\n\n# === Flutter Library ===\nset(FLUTTER_LIBRARY \"${EPHEMERAL_DIR}/flutter_windows.dll\")\n\n# Published to parent scope for install step.\nset(FLUTTER_LIBRARY ${FLUTTER_LIBRARY} PARENT_SCOPE)\nset(FLUTTER_ICU_DATA_FILE \"${EPHEMERAL_DIR}/icudtl.dat\" PARENT_SCOPE)\nset(PROJECT_BUILD_DIR \"${PROJECT_DIR}/build/\" PARENT_SCOPE)\nset(AOT_LIBRARY \"${PROJECT_DIR}/build/windows/app.so\" PARENT_SCOPE)\n\nlist(APPEND FLUTTER_LIBRARY_HEADERS\n  \"flutter_export.h\"\n  \"flutter_windows.h\"\n  \"flutter_messenger.h\"\n  \"flutter_plugin_registrar.h\"\n  \"flutter_texture_registrar.h\"\n)\nlist(TRANSFORM FLUTTER_LIBRARY_HEADERS PREPEND \"${EPHEMERAL_DIR}/\")\nadd_library(flutter INTERFACE)\ntarget_include_directories(flutter INTERFACE\n  \"${EPHEMERAL_DIR}\"\n)\ntarget_link_libraries(flutter INTERFACE \"${FLUTTER_LIBRARY}.lib\")\nadd_dependencies(flutter flutter_assemble)\n\n# === Wrapper ===\nlist(APPEND CPP_WRAPPER_SOURCES_CORE\n  \"core_implementations.cc\"\n  \"standard_codec.cc\"\n)\nlist(TRANSFORM CPP_WRAPPER_SOURCES_CORE PREPEND \"${WRAPPER_ROOT}/\")\nlist(APPEND CPP_WRAPPER_SOURCES_PLUGIN\n  \"plugin_registrar.cc\"\n)\nlist(TRANSFORM CPP_WRAPPER_SOURCES_PLUGIN PREPEND \"${WRAPPER_ROOT}/\")\nlist(APPEND CPP_WRAPPER_SOURCES_APP\n  \"flutter_engine.cc\"\n  \"flutter_view_controller.cc\"\n)\nlist(TRANSFORM CPP_WRAPPER_SOURCES_APP PREPEND \"${WRAPPER_ROOT}/\")\n\n# Wrapper sources needed for a plugin.\nadd_library(flutter_wrapper_plugin STATIC\n  ${CPP_WRAPPER_SOURCES_CORE}\n  ${CPP_WRAPPER_SOURCES_PLUGIN}\n)\napply_standard_settings(flutter_wrapper_plugin)\nset_target_properties(flutter_wrapper_plugin PROPERTIES\n  POSITION_INDEPENDENT_CODE ON)\nset_target_properties(flutter_wrapper_plugin PROPERTIES\n  CXX_VISIBILITY_PRESET hidden)\ntarget_link_libraries(flutter_wrapper_plugin PUBLIC flutter)\ntarget_include_directories(flutter_wrapper_plugin PUBLIC\n  \"${WRAPPER_ROOT}/include\"\n)\nadd_dependencies(flutter_wrapper_plugin flutter_assemble)\n\n# Wrapper sources needed for the runner.\nadd_library(flutter_wrapper_app STATIC\n  ${CPP_WRAPPER_SOURCES_CORE}\n  ${CPP_WRAPPER_SOURCES_APP}\n)\napply_standard_settings(flutter_wrapper_app)\ntarget_link_libraries(flutter_wrapper_app PUBLIC flutter)\ntarget_include_directories(flutter_wrapper_app PUBLIC\n  \"${WRAPPER_ROOT}/include\"\n)\nadd_dependencies(flutter_wrapper_app flutter_assemble)\n\n# === Flutter tool backend ===\n# _phony_ is a non-existent file to force this command to run every time,\n# since currently there's no way to get a full input/output list from the\n# flutter tool.\nset(PHONY_OUTPUT \"${CMAKE_CURRENT_BINARY_DIR}/_phony_\")\nset_source_files_properties(\"${PHONY_OUTPUT}\" PROPERTIES SYMBOLIC TRUE)\nadd_custom_command(\n  OUTPUT ${FLUTTER_LIBRARY} ${FLUTTER_LIBRARY_HEADERS}\n    ${CPP_WRAPPER_SOURCES_CORE} ${CPP_WRAPPER_SOURCES_PLUGIN}\n    ${CPP_WRAPPER_SOURCES_APP}\n    ${PHONY_OUTPUT}\n  COMMAND ${CMAKE_COMMAND} -E env\n    ${FLUTTER_TOOL_ENVIRONMENT}\n    \"${FLUTTER_ROOT}/packages/flutter_tools/bin/tool_backend.bat\"\n      windows-x64 $<CONFIG>\n  VERBATIM\n)\nadd_custom_target(flutter_assemble DEPENDS\n  \"${FLUTTER_LIBRARY}\"\n  ${FLUTTER_LIBRARY_HEADERS}\n  ${CPP_WRAPPER_SOURCES_CORE}\n  ${CPP_WRAPPER_SOURCES_PLUGIN}\n  ${CPP_WRAPPER_SOURCES_APP}\n)\n"
  },
  {
    "path": "examples/hello_world_flutter/windows/flutter/generated_plugin_registrant.cc",
    "content": "//\n//  Generated file. Do not edit.\n//\n\n// clang-format off\n\n#include \"generated_plugin_registrant.h\"\n\n\nvoid RegisterPlugins(flutter::PluginRegistry* registry) {\n}\n"
  },
  {
    "path": "examples/hello_world_flutter/windows/flutter/generated_plugin_registrant.h",
    "content": "//\n//  Generated file. Do not edit.\n//\n\n// clang-format off\n\n#ifndef GENERATED_PLUGIN_REGISTRANT_\n#define GENERATED_PLUGIN_REGISTRANT_\n\n#include <flutter/plugin_registry.h>\n\n// Registers Flutter plugins.\nvoid RegisterPlugins(flutter::PluginRegistry* registry);\n\n#endif  // GENERATED_PLUGIN_REGISTRANT_\n"
  },
  {
    "path": "examples/hello_world_flutter/windows/flutter/generated_plugins.cmake",
    "content": "#\n# Generated file, do not edit.\n#\n\nlist(APPEND FLUTTER_PLUGIN_LIST\n)\n\nlist(APPEND FLUTTER_FFI_PLUGIN_LIST\n)\n\nset(PLUGIN_BUNDLED_LIBRARIES)\n\nforeach(plugin ${FLUTTER_PLUGIN_LIST})\n  add_subdirectory(flutter/ephemeral/.plugin_symlinks/${plugin}/windows plugins/${plugin})\n  target_link_libraries(${BINARY_NAME} PRIVATE ${plugin}_plugin)\n  list(APPEND PLUGIN_BUNDLED_LIBRARIES $<TARGET_FILE:${plugin}_plugin>)\n  list(APPEND PLUGIN_BUNDLED_LIBRARIES ${${plugin}_bundled_libraries})\nendforeach(plugin)\n\nforeach(ffi_plugin ${FLUTTER_FFI_PLUGIN_LIST})\n  add_subdirectory(flutter/ephemeral/.plugin_symlinks/${ffi_plugin}/windows plugins/${ffi_plugin})\n  list(APPEND PLUGIN_BUNDLED_LIBRARIES ${${ffi_plugin}_bundled_libraries})\nendforeach(ffi_plugin)\n"
  },
  {
    "path": "examples/hello_world_flutter/windows/runner/CMakeLists.txt",
    "content": "cmake_minimum_required(VERSION 3.14)\nproject(runner LANGUAGES CXX)\n\n# Define the application target. To change its name, change BINARY_NAME in the\n# top-level CMakeLists.txt, not the value here, or `flutter run` will no longer\n# work.\n#\n# Any new source files that you add to the application should be added here.\nadd_executable(${BINARY_NAME} WIN32\n  \"flutter_window.cpp\"\n  \"main.cpp\"\n  \"utils.cpp\"\n  \"win32_window.cpp\"\n  \"${FLUTTER_MANAGED_DIR}/generated_plugin_registrant.cc\"\n  \"Runner.rc\"\n  \"runner.exe.manifest\"\n)\n\n# Apply the standard set of build settings. This can be removed for applications\n# that need different build settings.\napply_standard_settings(${BINARY_NAME})\n\n# Add preprocessor definitions for the build version.\ntarget_compile_definitions(${BINARY_NAME} PRIVATE \"FLUTTER_VERSION=\\\"${FLUTTER_VERSION}\\\"\")\ntarget_compile_definitions(${BINARY_NAME} PRIVATE \"FLUTTER_VERSION_MAJOR=${FLUTTER_VERSION_MAJOR}\")\ntarget_compile_definitions(${BINARY_NAME} PRIVATE \"FLUTTER_VERSION_MINOR=${FLUTTER_VERSION_MINOR}\")\ntarget_compile_definitions(${BINARY_NAME} PRIVATE \"FLUTTER_VERSION_PATCH=${FLUTTER_VERSION_PATCH}\")\ntarget_compile_definitions(${BINARY_NAME} PRIVATE \"FLUTTER_VERSION_BUILD=${FLUTTER_VERSION_BUILD}\")\n\n# Disable Windows macros that collide with C++ standard library functions.\ntarget_compile_definitions(${BINARY_NAME} PRIVATE \"NOMINMAX\")\n\n# Add dependency libraries and include directories. Add any application-specific\n# dependencies here.\ntarget_link_libraries(${BINARY_NAME} PRIVATE flutter flutter_wrapper_app)\ntarget_link_libraries(${BINARY_NAME} PRIVATE \"dwmapi.lib\")\ntarget_include_directories(${BINARY_NAME} PRIVATE \"${CMAKE_SOURCE_DIR}\")\n\n# Run the Flutter tool portions of the build. This must not be removed.\nadd_dependencies(${BINARY_NAME} flutter_assemble)\n"
  },
  {
    "path": "examples/hello_world_flutter/windows/runner/Runner.rc",
    "content": "// Microsoft Visual C++ generated resource script.\n//\n#pragma code_page(65001)\n#include \"resource.h\"\n\n#define APSTUDIO_READONLY_SYMBOLS\n/////////////////////////////////////////////////////////////////////////////\n//\n// Generated from the TEXTINCLUDE 2 resource.\n//\n#include \"winres.h\"\n\n/////////////////////////////////////////////////////////////////////////////\n#undef APSTUDIO_READONLY_SYMBOLS\n\n/////////////////////////////////////////////////////////////////////////////\n// English (United States) resources\n\n#if !defined(AFX_RESOURCE_DLL) || defined(AFX_TARG_ENU)\nLANGUAGE LANG_ENGLISH, SUBLANG_ENGLISH_US\n\n#ifdef APSTUDIO_INVOKED\n/////////////////////////////////////////////////////////////////////////////\n//\n// TEXTINCLUDE\n//\n\n1 TEXTINCLUDE\nBEGIN\n    \"resource.h\\0\"\nEND\n\n2 TEXTINCLUDE\nBEGIN\n    \"#include \"\"winres.h\"\"\\r\\n\"\n    \"\\0\"\nEND\n\n3 TEXTINCLUDE\nBEGIN\n    \"\\r\\n\"\n    \"\\0\"\nEND\n\n#endif    // APSTUDIO_INVOKED\n\n\n/////////////////////////////////////////////////////////////////////////////\n//\n// Icon\n//\n\n// Icon with lowest ID value placed first to ensure application icon\n// remains consistent on all systems.\nIDI_APP_ICON            ICON                    \"resources\\\\app_icon.ico\"\n\n\n/////////////////////////////////////////////////////////////////////////////\n//\n// Version\n//\n\n#if defined(FLUTTER_VERSION_MAJOR) && defined(FLUTTER_VERSION_MINOR) && defined(FLUTTER_VERSION_PATCH) && defined(FLUTTER_VERSION_BUILD)\n#define VERSION_AS_NUMBER FLUTTER_VERSION_MAJOR,FLUTTER_VERSION_MINOR,FLUTTER_VERSION_PATCH,FLUTTER_VERSION_BUILD\n#else\n#define VERSION_AS_NUMBER 1,0,0,0\n#endif\n\n#if defined(FLUTTER_VERSION)\n#define VERSION_AS_STRING FLUTTER_VERSION\n#else\n#define VERSION_AS_STRING \"1.0.0\"\n#endif\n\nVS_VERSION_INFO VERSIONINFO\n FILEVERSION VERSION_AS_NUMBER\n PRODUCTVERSION VERSION_AS_NUMBER\n FILEFLAGSMASK VS_FFI_FILEFLAGSMASK\n#ifdef _DEBUG\n FILEFLAGS VS_FF_DEBUG\n#else\n FILEFLAGS 0x0L\n#endif\n FILEOS VOS__WINDOWS32\n FILETYPE VFT_APP\n FILESUBTYPE 0x0L\nBEGIN\n    BLOCK \"StringFileInfo\"\n    BEGIN\n        BLOCK \"040904e4\"\n        BEGIN\n            VALUE \"CompanyName\", \"com.example\" \"\\0\"\n            VALUE \"FileDescription\", \"hello_world_flutter\" \"\\0\"\n            VALUE \"FileVersion\", VERSION_AS_STRING \"\\0\"\n            VALUE \"InternalName\", \"hello_world_flutter\" \"\\0\"\n            VALUE \"LegalCopyright\", \"Copyright (C) 2023 com.example. All rights reserved.\" \"\\0\"\n            VALUE \"OriginalFilename\", \"hello_world_flutter.exe\" \"\\0\"\n            VALUE \"ProductName\", \"hello_world_flutter\" \"\\0\"\n            VALUE \"ProductVersion\", VERSION_AS_STRING \"\\0\"\n        END\n    END\n    BLOCK \"VarFileInfo\"\n    BEGIN\n        VALUE \"Translation\", 0x409, 1252\n    END\nEND\n\n#endif    // English (United States) resources\n/////////////////////////////////////////////////////////////////////////////\n\n\n\n#ifndef APSTUDIO_INVOKED\n/////////////////////////////////////////////////////////////////////////////\n//\n// Generated from the TEXTINCLUDE 3 resource.\n//\n\n\n/////////////////////////////////////////////////////////////////////////////\n#endif    // not APSTUDIO_INVOKED\n"
  },
  {
    "path": "examples/hello_world_flutter/windows/runner/flutter_window.cpp",
    "content": "#include \"flutter_window.h\"\n\n#include <optional>\n\n#include \"flutter/generated_plugin_registrant.h\"\n\nFlutterWindow::FlutterWindow(const flutter::DartProject& project)\n    : project_(project) {}\n\nFlutterWindow::~FlutterWindow() {}\n\nbool FlutterWindow::OnCreate() {\n  if (!Win32Window::OnCreate()) {\n    return false;\n  }\n\n  RECT frame = GetClientArea();\n\n  // The size here must match the window dimensions to avoid unnecessary surface\n  // creation / destruction in the startup path.\n  flutter_controller_ = std::make_unique<flutter::FlutterViewController>(\n      frame.right - frame.left, frame.bottom - frame.top, project_);\n  // Ensure that basic setup of the controller was successful.\n  if (!flutter_controller_->engine() || !flutter_controller_->view()) {\n    return false;\n  }\n  RegisterPlugins(flutter_controller_->engine());\n  SetChildContent(flutter_controller_->view()->GetNativeWindow());\n\n  flutter_controller_->engine()->SetNextFrameCallback([&]() {\n    this->Show();\n  });\n\n  return true;\n}\n\nvoid FlutterWindow::OnDestroy() {\n  if (flutter_controller_) {\n    flutter_controller_ = nullptr;\n  }\n\n  Win32Window::OnDestroy();\n}\n\nLRESULT\nFlutterWindow::MessageHandler(HWND hwnd, UINT const message,\n                              WPARAM const wparam,\n                              LPARAM const lparam) noexcept {\n  // Give Flutter, including plugins, an opportunity to handle window messages.\n  if (flutter_controller_) {\n    std::optional<LRESULT> result =\n        flutter_controller_->HandleTopLevelWindowProc(hwnd, message, wparam,\n                                                      lparam);\n    if (result) {\n      return *result;\n    }\n  }\n\n  switch (message) {\n    case WM_FONTCHANGE:\n      flutter_controller_->engine()->ReloadSystemFonts();\n      break;\n  }\n\n  return Win32Window::MessageHandler(hwnd, message, wparam, lparam);\n}\n"
  },
  {
    "path": "examples/hello_world_flutter/windows/runner/flutter_window.h",
    "content": "#ifndef RUNNER_FLUTTER_WINDOW_H_\n#define RUNNER_FLUTTER_WINDOW_H_\n\n#include <flutter/dart_project.h>\n#include <flutter/flutter_view_controller.h>\n\n#include <memory>\n\n#include \"win32_window.h\"\n\n// A window that does nothing but host a Flutter view.\nclass FlutterWindow : public Win32Window {\n public:\n  // Creates a new FlutterWindow hosting a Flutter view running |project|.\n  explicit FlutterWindow(const flutter::DartProject& project);\n  virtual ~FlutterWindow();\n\n protected:\n  // Win32Window:\n  bool OnCreate() override;\n  void OnDestroy() override;\n  LRESULT MessageHandler(HWND window, UINT const message, WPARAM const wparam,\n                         LPARAM const lparam) noexcept override;\n\n private:\n  // The project to run.\n  flutter::DartProject project_;\n\n  // The Flutter instance hosted by this window.\n  std::unique_ptr<flutter::FlutterViewController> flutter_controller_;\n};\n\n#endif  // RUNNER_FLUTTER_WINDOW_H_\n"
  },
  {
    "path": "examples/hello_world_flutter/windows/runner/main.cpp",
    "content": "#include <flutter/dart_project.h>\n#include <flutter/flutter_view_controller.h>\n#include <windows.h>\n\n#include \"flutter_window.h\"\n#include \"utils.h\"\n\nint APIENTRY wWinMain(_In_ HINSTANCE instance, _In_opt_ HINSTANCE prev,\n                      _In_ wchar_t *command_line, _In_ int show_command) {\n  // Attach to console when present (e.g., 'flutter run') or create a\n  // new console when running with a debugger.\n  if (!::AttachConsole(ATTACH_PARENT_PROCESS) && ::IsDebuggerPresent()) {\n    CreateAndAttachConsole();\n  }\n\n  // Initialize COM, so that it is available for use in the library and/or\n  // plugins.\n  ::CoInitializeEx(nullptr, COINIT_APARTMENTTHREADED);\n\n  flutter::DartProject project(L\"data\");\n\n  std::vector<std::string> command_line_arguments =\n      GetCommandLineArguments();\n\n  project.set_dart_entrypoint_arguments(std::move(command_line_arguments));\n\n  FlutterWindow window(project);\n  Win32Window::Point origin(10, 10);\n  Win32Window::Size size(1280, 720);\n  if (!window.Create(L\"hello_world_flutter\", origin, size)) {\n    return EXIT_FAILURE;\n  }\n  window.SetQuitOnClose(true);\n\n  ::MSG msg;\n  while (::GetMessage(&msg, nullptr, 0, 0)) {\n    ::TranslateMessage(&msg);\n    ::DispatchMessage(&msg);\n  }\n\n  ::CoUninitialize();\n  return EXIT_SUCCESS;\n}\n"
  },
  {
    "path": "examples/hello_world_flutter/windows/runner/resource.h",
    "content": "//{{NO_DEPENDENCIES}}\n// Microsoft Visual C++ generated include file.\n// Used by Runner.rc\n//\n#define IDI_APP_ICON                    101\n\n// Next default values for new objects\n//\n#ifdef APSTUDIO_INVOKED\n#ifndef APSTUDIO_READONLY_SYMBOLS\n#define _APS_NEXT_RESOURCE_VALUE        102\n#define _APS_NEXT_COMMAND_VALUE         40001\n#define _APS_NEXT_CONTROL_VALUE         1001\n#define _APS_NEXT_SYMED_VALUE           101\n#endif\n#endif\n"
  },
  {
    "path": "examples/hello_world_flutter/windows/runner/runner.exe.manifest",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>\n<assembly xmlns=\"urn:schemas-microsoft-com:asm.v1\" manifestVersion=\"1.0\">\n  <application xmlns=\"urn:schemas-microsoft-com:asm.v3\">\n    <windowsSettings>\n      <dpiAwareness xmlns=\"http://schemas.microsoft.com/SMI/2016/WindowsSettings\">PerMonitorV2</dpiAwareness>\n    </windowsSettings>\n  </application>\n  <compatibility xmlns=\"urn:schemas-microsoft-com:compatibility.v1\">\n    <application>\n      <!-- Windows 10 and Windows 11 -->\n      <supportedOS Id=\"{8e0f7a12-bfb3-4fe8-b9a5-48fd50a15a9a}\"/>\n      <!-- Windows 8.1 -->\n      <supportedOS Id=\"{1f676c76-80e1-4239-95bb-83d0f6d0da78}\"/>\n      <!-- Windows 8 -->\n      <supportedOS Id=\"{4a2f28e3-53b9-4441-ba9c-d69d4a4a6e38}\"/>\n      <!-- Windows 7 -->\n      <supportedOS Id=\"{35138b9a-5d96-4fbd-8e2d-a2440225f93a}\"/>\n    </application>\n  </compatibility>\n</assembly>\n"
  },
  {
    "path": "examples/hello_world_flutter/windows/runner/utils.cpp",
    "content": "#include \"utils.h\"\n\n#include <flutter_windows.h>\n#include <io.h>\n#include <stdio.h>\n#include <windows.h>\n\n#include <iostream>\n\nvoid CreateAndAttachConsole() {\n  if (::AllocConsole()) {\n    FILE *unused;\n    if (freopen_s(&unused, \"CONOUT$\", \"w\", stdout)) {\n      _dup2(_fileno(stdout), 1);\n    }\n    if (freopen_s(&unused, \"CONOUT$\", \"w\", stderr)) {\n      _dup2(_fileno(stdout), 2);\n    }\n    std::ios::sync_with_stdio();\n    FlutterDesktopResyncOutputStreams();\n  }\n}\n\nstd::vector<std::string> GetCommandLineArguments() {\n  // Convert the UTF-16 command line arguments to UTF-8 for the Engine to use.\n  int argc;\n  wchar_t** argv = ::CommandLineToArgvW(::GetCommandLineW(), &argc);\n  if (argv == nullptr) {\n    return std::vector<std::string>();\n  }\n\n  std::vector<std::string> command_line_arguments;\n\n  // Skip the first argument as it's the binary name.\n  for (int i = 1; i < argc; i++) {\n    command_line_arguments.push_back(Utf8FromUtf16(argv[i]));\n  }\n\n  ::LocalFree(argv);\n\n  return command_line_arguments;\n}\n\nstd::string Utf8FromUtf16(const wchar_t* utf16_string) {\n  if (utf16_string == nullptr) {\n    return std::string();\n  }\n  int target_length = ::WideCharToMultiByte(\n      CP_UTF8, WC_ERR_INVALID_CHARS, utf16_string,\n      -1, nullptr, 0, nullptr, nullptr)\n    -1; // remove the trailing null character\n  int input_length = (int)wcslen(utf16_string);\n  std::string utf8_string;\n  if (target_length <= 0 || target_length > utf8_string.max_size()) {\n    return utf8_string;\n  }\n  utf8_string.resize(target_length);\n  int converted_length = ::WideCharToMultiByte(\n      CP_UTF8, WC_ERR_INVALID_CHARS, utf16_string,\n      input_length, utf8_string.data(), target_length, nullptr, nullptr);\n  if (converted_length == 0) {\n    return std::string();\n  }\n  return utf8_string;\n}\n"
  },
  {
    "path": "examples/hello_world_flutter/windows/runner/utils.h",
    "content": "#ifndef RUNNER_UTILS_H_\n#define RUNNER_UTILS_H_\n\n#include <string>\n#include <vector>\n\n// Creates a console for the process, and redirects stdout and stderr to\n// it for both the runner and the Flutter library.\nvoid CreateAndAttachConsole();\n\n// Takes a null-terminated wchar_t* encoded in UTF-16 and returns a std::string\n// encoded in UTF-8. Returns an empty std::string on failure.\nstd::string Utf8FromUtf16(const wchar_t* utf16_string);\n\n// Gets the command line arguments passed in as a std::vector<std::string>,\n// encoded in UTF-8. Returns an empty std::vector<std::string> on failure.\nstd::vector<std::string> GetCommandLineArguments();\n\n#endif  // RUNNER_UTILS_H_\n"
  },
  {
    "path": "examples/hello_world_flutter/windows/runner/win32_window.cpp",
    "content": "#include \"win32_window.h\"\n\n#include <dwmapi.h>\n#include <flutter_windows.h>\n\n#include \"resource.h\"\n\nnamespace {\n\n/// Window attribute that enables dark mode window decorations.\n///\n/// Redefined in case the developer's machine has a Windows SDK older than\n/// version 10.0.22000.0.\n/// See: https://docs.microsoft.com/windows/win32/api/dwmapi/ne-dwmapi-dwmwindowattribute\n#ifndef DWMWA_USE_IMMERSIVE_DARK_MODE\n#define DWMWA_USE_IMMERSIVE_DARK_MODE 20\n#endif\n\nconstexpr const wchar_t kWindowClassName[] = L\"FLUTTER_RUNNER_WIN32_WINDOW\";\n\n/// Registry key for app theme preference.\n///\n/// A value of 0 indicates apps should use dark mode. A non-zero or missing\n/// value indicates apps should use light mode.\nconstexpr const wchar_t kGetPreferredBrightnessRegKey[] =\n  L\"Software\\\\Microsoft\\\\Windows\\\\CurrentVersion\\\\Themes\\\\Personalize\";\nconstexpr const wchar_t kGetPreferredBrightnessRegValue[] = L\"AppsUseLightTheme\";\n\n// The number of Win32Window objects that currently exist.\nstatic int g_active_window_count = 0;\n\nusing EnableNonClientDpiScaling = BOOL __stdcall(HWND hwnd);\n\n// Scale helper to convert logical scaler values to physical using passed in\n// scale factor\nint Scale(int source, double scale_factor) {\n  return static_cast<int>(source * scale_factor);\n}\n\n// Dynamically loads the |EnableNonClientDpiScaling| from the User32 module.\n// This API is only needed for PerMonitor V1 awareness mode.\nvoid EnableFullDpiSupportIfAvailable(HWND hwnd) {\n  HMODULE user32_module = LoadLibraryA(\"User32.dll\");\n  if (!user32_module) {\n    return;\n  }\n  auto enable_non_client_dpi_scaling =\n      reinterpret_cast<EnableNonClientDpiScaling*>(\n          GetProcAddress(user32_module, \"EnableNonClientDpiScaling\"));\n  if (enable_non_client_dpi_scaling != nullptr) {\n    enable_non_client_dpi_scaling(hwnd);\n  }\n  FreeLibrary(user32_module);\n}\n\n}  // namespace\n\n// Manages the Win32Window's window class registration.\nclass WindowClassRegistrar {\n public:\n  ~WindowClassRegistrar() = default;\n\n  // Returns the singleton registrar instance.\n  static WindowClassRegistrar* GetInstance() {\n    if (!instance_) {\n      instance_ = new WindowClassRegistrar();\n    }\n    return instance_;\n  }\n\n  // Returns the name of the window class, registering the class if it hasn't\n  // previously been registered.\n  const wchar_t* GetWindowClass();\n\n  // Unregisters the window class. Should only be called if there are no\n  // instances of the window.\n  void UnregisterWindowClass();\n\n private:\n  WindowClassRegistrar() = default;\n\n  static WindowClassRegistrar* instance_;\n\n  bool class_registered_ = false;\n};\n\nWindowClassRegistrar* WindowClassRegistrar::instance_ = nullptr;\n\nconst wchar_t* WindowClassRegistrar::GetWindowClass() {\n  if (!class_registered_) {\n    WNDCLASS window_class{};\n    window_class.hCursor = LoadCursor(nullptr, IDC_ARROW);\n    window_class.lpszClassName = kWindowClassName;\n    window_class.style = CS_HREDRAW | CS_VREDRAW;\n    window_class.cbClsExtra = 0;\n    window_class.cbWndExtra = 0;\n    window_class.hInstance = GetModuleHandle(nullptr);\n    window_class.hIcon =\n        LoadIcon(window_class.hInstance, MAKEINTRESOURCE(IDI_APP_ICON));\n    window_class.hbrBackground = 0;\n    window_class.lpszMenuName = nullptr;\n    window_class.lpfnWndProc = Win32Window::WndProc;\n    RegisterClass(&window_class);\n    class_registered_ = true;\n  }\n  return kWindowClassName;\n}\n\nvoid WindowClassRegistrar::UnregisterWindowClass() {\n  UnregisterClass(kWindowClassName, nullptr);\n  class_registered_ = false;\n}\n\nWin32Window::Win32Window() {\n  ++g_active_window_count;\n}\n\nWin32Window::~Win32Window() {\n  --g_active_window_count;\n  Destroy();\n}\n\nbool Win32Window::Create(const std::wstring& title,\n                         const Point& origin,\n                         const Size& size) {\n  Destroy();\n\n  const wchar_t* window_class =\n      WindowClassRegistrar::GetInstance()->GetWindowClass();\n\n  const POINT target_point = {static_cast<LONG>(origin.x),\n                              static_cast<LONG>(origin.y)};\n  HMONITOR monitor = MonitorFromPoint(target_point, MONITOR_DEFAULTTONEAREST);\n  UINT dpi = FlutterDesktopGetDpiForMonitor(monitor);\n  double scale_factor = dpi / 96.0;\n\n  HWND window = CreateWindow(\n      window_class, title.c_str(), WS_OVERLAPPEDWINDOW,\n      Scale(origin.x, scale_factor), Scale(origin.y, scale_factor),\n      Scale(size.width, scale_factor), Scale(size.height, scale_factor),\n      nullptr, nullptr, GetModuleHandle(nullptr), this);\n\n  if (!window) {\n    return false;\n  }\n\n  UpdateTheme(window);\n\n  return OnCreate();\n}\n\nbool Win32Window::Show() {\n  return ShowWindow(window_handle_, SW_SHOWNORMAL);\n}\n\n// static\nLRESULT CALLBACK Win32Window::WndProc(HWND const window,\n                                      UINT const message,\n                                      WPARAM const wparam,\n                                      LPARAM const lparam) noexcept {\n  if (message == WM_NCCREATE) {\n    auto window_struct = reinterpret_cast<CREATESTRUCT*>(lparam);\n    SetWindowLongPtr(window, GWLP_USERDATA,\n                     reinterpret_cast<LONG_PTR>(window_struct->lpCreateParams));\n\n    auto that = static_cast<Win32Window*>(window_struct->lpCreateParams);\n    EnableFullDpiSupportIfAvailable(window);\n    that->window_handle_ = window;\n  } else if (Win32Window* that = GetThisFromHandle(window)) {\n    return that->MessageHandler(window, message, wparam, lparam);\n  }\n\n  return DefWindowProc(window, message, wparam, lparam);\n}\n\nLRESULT\nWin32Window::MessageHandler(HWND hwnd,\n                            UINT const message,\n                            WPARAM const wparam,\n                            LPARAM const lparam) noexcept {\n  switch (message) {\n    case WM_DESTROY:\n      window_handle_ = nullptr;\n      Destroy();\n      if (quit_on_close_) {\n        PostQuitMessage(0);\n      }\n      return 0;\n\n    case WM_DPICHANGED: {\n      auto newRectSize = reinterpret_cast<RECT*>(lparam);\n      LONG newWidth = newRectSize->right - newRectSize->left;\n      LONG newHeight = newRectSize->bottom - newRectSize->top;\n\n      SetWindowPos(hwnd, nullptr, newRectSize->left, newRectSize->top, newWidth,\n                   newHeight, SWP_NOZORDER | SWP_NOACTIVATE);\n\n      return 0;\n    }\n    case WM_SIZE: {\n      RECT rect = GetClientArea();\n      if (child_content_ != nullptr) {\n        // Size and position the child window.\n        MoveWindow(child_content_, rect.left, rect.top, rect.right - rect.left,\n                   rect.bottom - rect.top, TRUE);\n      }\n      return 0;\n    }\n\n    case WM_ACTIVATE:\n      if (child_content_ != nullptr) {\n        SetFocus(child_content_);\n      }\n      return 0;\n\n    case WM_DWMCOLORIZATIONCOLORCHANGED:\n      UpdateTheme(hwnd);\n      return 0;\n  }\n\n  return DefWindowProc(window_handle_, message, wparam, lparam);\n}\n\nvoid Win32Window::Destroy() {\n  OnDestroy();\n\n  if (window_handle_) {\n    DestroyWindow(window_handle_);\n    window_handle_ = nullptr;\n  }\n  if (g_active_window_count == 0) {\n    WindowClassRegistrar::GetInstance()->UnregisterWindowClass();\n  }\n}\n\nWin32Window* Win32Window::GetThisFromHandle(HWND const window) noexcept {\n  return reinterpret_cast<Win32Window*>(\n      GetWindowLongPtr(window, GWLP_USERDATA));\n}\n\nvoid Win32Window::SetChildContent(HWND content) {\n  child_content_ = content;\n  SetParent(content, window_handle_);\n  RECT frame = GetClientArea();\n\n  MoveWindow(content, frame.left, frame.top, frame.right - frame.left,\n             frame.bottom - frame.top, true);\n\n  SetFocus(child_content_);\n}\n\nRECT Win32Window::GetClientArea() {\n  RECT frame;\n  GetClientRect(window_handle_, &frame);\n  return frame;\n}\n\nHWND Win32Window::GetHandle() {\n  return window_handle_;\n}\n\nvoid Win32Window::SetQuitOnClose(bool quit_on_close) {\n  quit_on_close_ = quit_on_close;\n}\n\nbool Win32Window::OnCreate() {\n  // No-op; provided for subclasses.\n  return true;\n}\n\nvoid Win32Window::OnDestroy() {\n  // No-op; provided for subclasses.\n}\n\nvoid Win32Window::UpdateTheme(HWND const window) {\n  DWORD light_mode;\n  DWORD light_mode_size = sizeof(light_mode);\n  LSTATUS result = RegGetValue(HKEY_CURRENT_USER, kGetPreferredBrightnessRegKey,\n                               kGetPreferredBrightnessRegValue,\n                               RRF_RT_REG_DWORD, nullptr, &light_mode,\n                               &light_mode_size);\n\n  if (result == ERROR_SUCCESS) {\n    BOOL enable_dark_mode = light_mode == 0;\n    DwmSetWindowAttribute(window, DWMWA_USE_IMMERSIVE_DARK_MODE,\n                          &enable_dark_mode, sizeof(enable_dark_mode));\n  }\n}\n"
  },
  {
    "path": "examples/hello_world_flutter/windows/runner/win32_window.h",
    "content": "#ifndef RUNNER_WIN32_WINDOW_H_\n#define RUNNER_WIN32_WINDOW_H_\n\n#include <windows.h>\n\n#include <functional>\n#include <memory>\n#include <string>\n\n// A class abstraction for a high DPI-aware Win32 Window. Intended to be\n// inherited from by classes that wish to specialize with custom\n// rendering and input handling\nclass Win32Window {\n public:\n  struct Point {\n    unsigned int x;\n    unsigned int y;\n    Point(unsigned int x, unsigned int y) : x(x), y(y) {}\n  };\n\n  struct Size {\n    unsigned int width;\n    unsigned int height;\n    Size(unsigned int width, unsigned int height)\n        : width(width), height(height) {}\n  };\n\n  Win32Window();\n  virtual ~Win32Window();\n\n  // Creates a win32 window with |title| that is positioned and sized using\n  // |origin| and |size|. New windows are created on the default monitor. Window\n  // sizes are specified to the OS in physical pixels, hence to ensure a\n  // consistent size this function will scale the inputted width and height as\n  // as appropriate for the default monitor. The window is invisible until\n  // |Show| is called. Returns true if the window was created successfully.\n  bool Create(const std::wstring& title, const Point& origin, const Size& size);\n\n  // Show the current window. Returns true if the window was successfully shown.\n  bool Show();\n\n  // Release OS resources associated with window.\n  void Destroy();\n\n  // Inserts |content| into the window tree.\n  void SetChildContent(HWND content);\n\n  // Returns the backing Window handle to enable clients to set icon and other\n  // window properties. Returns nullptr if the window has been destroyed.\n  HWND GetHandle();\n\n  // If true, closing this window will quit the application.\n  void SetQuitOnClose(bool quit_on_close);\n\n  // Return a RECT representing the bounds of the current client area.\n  RECT GetClientArea();\n\n protected:\n  // Processes and route salient window messages for mouse handling,\n  // size change and DPI. Delegates handling of these to member overloads that\n  // inheriting classes can handle.\n  virtual LRESULT MessageHandler(HWND window,\n                                 UINT const message,\n                                 WPARAM const wparam,\n                                 LPARAM const lparam) noexcept;\n\n  // Called when CreateAndShow is called, allowing subclass window-related\n  // setup. Subclasses should return false if setup fails.\n  virtual bool OnCreate();\n\n  // Called when Destroy is called.\n  virtual void OnDestroy();\n\n private:\n  friend class WindowClassRegistrar;\n\n  // OS callback called by message pump. Handles the WM_NCCREATE message which\n  // is passed when the non-client area is being created and enables automatic\n  // non-client DPI scaling so that the non-client area automatically\n  // responds to changes in DPI. All other messages are handled by\n  // MessageHandler.\n  static LRESULT CALLBACK WndProc(HWND const window,\n                                  UINT const message,\n                                  WPARAM const wparam,\n                                  LPARAM const lparam) noexcept;\n\n  // Retrieves a class instance pointer for |window|\n  static Win32Window* GetThisFromHandle(HWND const window) noexcept;\n\n  // Update the window frame's theme to match the system theme.\n  static void UpdateTheme(HWND const window);\n\n  bool quit_on_close_ = false;\n\n  // window handle for top level window.\n  HWND window_handle_ = nullptr;\n\n  // window handle for hosted content.\n  HWND child_content_ = nullptr;\n};\n\n#endif  // RUNNER_WIN32_WINDOW_H_\n"
  },
  {
    "path": "examples/vertex_ai_matching_engine_setup/.gitignore",
    "content": "# https://dart.dev/guides/libraries/private-files\n# Created by `dart pub`\n.dart_tool/\n"
  },
  {
    "path": "examples/vertex_ai_matching_engine_setup/README.md",
    "content": "# Vertex AI Matching Engine Setup\n\nScript that creates a [Vertex AI Matching Engine](https://cloud.google.com/vertex-ai/docs/matching-engine/overview) \nindex and index endpoint ready to be used with LangChains.dart \n[`VertexAIMatchingEngine`](https://pub.dev/documentation/langchain_google/latest/langchain_google/VertexAIMatchingEngine-class.html) \nvector store.\n\n## Usage\n\n1. Create a Cloud Storage bucket.\n2. Create a test document and upload it to the bucket: `/documents/0.json`  \n   JSON structure: `{\"id\": \"0\", \"pageContent\": \"...\", \"metadata\": {}}`\n3. Create its embedding and place it in the bucket: /indexes/index.json  \n   JSON structure: `{\"id\": \"0\", \"embedding\": [0.1, 0.2, 0.3, ...]}`\n4. Change the config in the script (`projectId`, `projectLocation`, etc.)\n5. Run this script: `dart run bin/vertex_ai_matching_engine_setup.dart`\n6. The script will output the configuration for `VertexAIMatchingEngine`.\n\nExample output:\n\n```dart\nfinal vectorStore = VertexAIMatchingEngine(\n  authHttpClient: authClient,\n  project: 'my-project-id',\n  location: 'europe-west1',\n  queryRootUrl: 'https://xxxxxxxxxx.europe-west1-xxxxxxxxxxxx.vdb.vertexai.goog/',\n  indexId: 'xxxxxxxxxx',\n  gcsBucketName: 'my_index_bucket',\n  embeddings: embeddings,\n);\n```\n"
  },
  {
    "path": "examples/vertex_ai_matching_engine_setup/bin/vertex_ai_matching_engine_setup.dart",
    "content": "// ignore_for_file: avoid_print\nimport 'dart:convert';\nimport 'dart:io';\n\nimport 'package:gcloud/storage.dart';\nimport 'package:googleapis_auth/auth_io.dart';\nimport 'package:vertex_ai/vertex_ai.dart';\n\n/// Creates a Vertex AI Matching Engine index and index endpoint ready to be\n/// used with LangChains.dart `VertexAIMatchingEngine` vector store.\n///\n/// Steps:\n/// 1. Create a Cloud Storage bucket.\n/// 2. Create a test document and upload it to the bucket: `/documents/0.json`\n///    JSON structure: `{\"id\": \"0\", \"pageContent\": \"...\", \"metadata\": {}}`\n/// 3. Create its embedding and place it in the bucket: /indexes/index.json\n///    JSON structure: `{\"id\": \"0\", \"embedding\": [0.1, 0.2, 0.3, ...]}`\n/// 4. Change the config below (`projectId`, `projectLocation`, etc.)\n/// 5. Run this script.\n/// 6. The script will output the configuration for `VertexAIMatchingEngine`.\nvoid main(final List<String> arguments) async {\n  // Config\n  const projectId = 'my-project-id';\n  const projectLocation = 'europe-west1';\n  const indexName = 'my_index';\n  const indexDescription = 'My index description';\n  const bucketName = 'my_index_bucket';\n  const embeddingsDimensions = 768;\n  const shardSize = VertexAIShardSize.small;\n\n  // Get authenticated HTTP client\n  print('\\n> Authenticating...');\n  final serviceAccountCredentials = ServiceAccountCredentials.fromJson(\n    json.decode(Platform.environment['VERTEX_AI_SERVICE_ACCOUNT']!),\n  );\n  final authClient = await clientViaServiceAccount(serviceAccountCredentials, [\n    VertexAIGenAIClient.cloudPlatformScope,\n    ...Storage.SCOPES,\n  ]);\n\n  // Get Vertex AI client\n  print('\\n> Creating client...');\n  final marchingEngine = VertexAIMatchingEngineClient(\n    httpClient: authClient,\n    project: projectId,\n    location: projectLocation,\n  );\n\n  // Create index\n  print('\\n> Creating index (takes around 30min)...');\n  VertexAIOperation indexOperation = await marchingEngine.indexes.create(\n    displayName: indexName,\n    description: indexDescription,\n    metadata: const VertexAIIndexRequestMetadata(\n      contentsDeltaUri: 'gs://$bucketName/indexes',\n      config: VertexAINearestNeighborSearchConfig(\n        dimensions: embeddingsDimensions,\n        algorithmConfig: VertexAIBruteForceAlgorithmConfig(),\n        shardSize: shardSize,\n      ),\n    ),\n  );\n\n  // Poll for operation completion\n  while (!indexOperation.done) {\n    print('In progress...');\n    await Future<void>.delayed(const Duration(seconds: 10));\n    indexOperation = await marchingEngine.indexes.operations.get(\n      name: indexOperation.name,\n    );\n  }\n\n  // Create index endpoint\n  print('\\n> Creating index endpoint (takes around 1min)...');\n  const indexEndpointName = '${indexName}_endpoint';\n  VertexAIOperation indexEndpointOperation = await marchingEngine.indexEndpoints\n      .create(\n        displayName: indexEndpointName,\n        description: 'Index endpoint of $indexName',\n        publicEndpointEnabled: true,\n      );\n\n  // Poll for operation completion\n  while (!indexEndpointOperation.done) {\n    print('In progress...');\n    await Future<void>.delayed(const Duration(seconds: 10));\n    indexEndpointOperation = await marchingEngine.indexEndpoints.operations.get(\n      name: indexEndpointOperation.name,\n    );\n  }\n\n  // Get created index and endpoint\n  print('\\n> Getting index and endpoint...');\n  final indexes = await marchingEngine.indexes.list();\n  final index = indexes.firstWhere(\n    (final index) => index.displayName == indexName,\n  );\n  final indexId = index.id;\n  final indexEndpoints = await marchingEngine.indexEndpoints.list();\n  final indexEndpoint = indexEndpoints.firstWhere(\n    (final indexEndpoint) => indexEndpoint.displayName == indexEndpointName,\n  );\n  final indexEndpointId = indexEndpoint.id;\n  print('Index ID: $indexId');\n  print('Index endpoint ID: $indexEndpointId');\n\n  // Deploy index to endpoint\n  print('\\n> Deploying index to endpoint (takes around 30min)...');\n  VertexAIOperation deployOperation = await marchingEngine.indexEndpoints\n      .deployIndex(\n        indexId: indexId,\n        indexEndpointId: indexEndpointId,\n        deployedIndexId: '${indexName}_deployed',\n        deployedIndexDisplayName: '${indexName}_deployed',\n      );\n\n  // Poll for operation completion\n  while (!deployOperation.done) {\n    print('In progress...');\n    await Future<void>.delayed(const Duration(seconds: 10));\n    deployOperation = await marchingEngine.indexEndpoints.operations.get(\n      name: deployOperation.name,\n    );\n  }\n\n  // Get deployed index\n  print('\\n> Index ready to be used!');\n  print('You can now use it in LangChain.dart:');\n  print('''\nfinal vectorStore = VertexAIMatchingEngine(\n  httpClient: authClient,\n  project: '$projectId',\n  location: '$projectLocation',\n  queryRootUrl: 'http://${indexEndpoint.publicEndpointDomainName}/',\n  indexId: '$indexId',\n  gcsBucketName: '$bucketName',\n  embeddings: embeddings,\n);''');\n}\n"
  },
  {
    "path": "examples/vertex_ai_matching_engine_setup/pubspec.yaml",
    "content": "name: vertex_ai_matching_engine_setup\ndescription: A script to setup Vertex AI Matching Engine to be used in LangChain.\nversion: 1.0.0\npublish_to: none\n\nenvironment:\n  sdk: \">=3.9.0 <4.0.0\"\nresolution: workspace\n\ndependencies:\n  gcloud: ^0.9.0\n  googleapis_auth: ^2.0.0\n  http: ^1.5.0\n  vertex_ai: ^0.2.1\n"
  },
  {
    "path": "examples/wikivoyage_eu/.gitignore",
    "content": "# https://dart.dev/guides/libraries/private-files\n# Created by `dart pub`\n.dart_tool/\n"
  },
  {
    "path": "examples/wikivoyage_eu/README.md",
    "content": "# Wikivoyage EU\n\nThis example demonstrates how to build a fully local Retrieval Augmented Generation (RAG) pipeline with Llama 3 and ObjectBox using LangChain.dart and Ollama.\n\n> This example is adapted from [Ashmi Banerjee](https://ashmibanerjee.com)'s workshop \"[Building a RAG using Google Gemma and MongoDB](https://colab.research.google.com/drive/1CviSVwnwl73ph-AhTB0Z8vYcOQrjityk)\".\n\n![RAG Pipeline](rag.png)\n*Figure 1: RAG Architecture (source: [Ashmi Banerjee](https://colab.research.google.com/drive/1CviSVwnwl73ph-AhTB0Z8vYcOQrjityk))*\n\n## Setup\n\n### 1. Install Ollama \n\n- Go to the [Ollama](https://ollama.ai/) website and download the latest version of the Ollama app.\n\n### 2. Download models\n\n- For this example we will be using the following models:\n  * Embedding model: [`jina/jina-embeddings-v2-small-en`](https://ollama.com/jina/jina-embeddings-v2-small-en)\n  * LLM: [`llama3.2`](https://ollama.com/library/llama3.2)\n- Open your terminal and run:\n```bash\nollama pull jina/jina-embeddings-v2-small-en\nollama run llama3.2\n``` \n\n### 3. Setup ObjectBox\n\n- We will be using [ObjectBox](https://objectbox.io) for our vector store.\n- In order to use ObjectBox, we need to download the ObjectBox C library. You can find more information on how to do this [here](https://docs.objectbox.io/getting-started).\n```bash\nbash <(curl -s https://raw.githubusercontent.com/objectbox/objectbox-dart/main/install.sh) \n```\n\n### 4. Get dependencies\n\n```bash\ndart pub get\n```\n\n## How it works\n\nThe example has two scripts:\n1. `injestion.dart`: This script reads the Wikivoyage dataset, creates embeddings from the data and stores it in the ObjectBox database.\n2. `wikivoyage_eu.dart`: This script implements the chatbot implementing the RAG pipeline.\n\n### Ingestion\n\nWe will be using data from [Wikivoyage](https://wikivoyage.org), a freely accessible online travel guide authored by volunteers.\n\nThe `wikivoyage_eu_dataset.csv` file contains data from 160 European cities, including the city name, country, coordinates, population and a brief description:\n\n| city      | country     | lat     | lng    | population | abstract                                                                                                                                                                                                           |\n|-----------|-------------|---------|--------|------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|\n| Amsterdam | Netherlands | 52.3728 | 4.8936 | 1459402.0  | Amsterdam is the capital of the Netherlands. It is known for the canals that cross the city, its impressive architecture, museums and art gallerie, its notorious red light district, and more than 1,500 bridges. |\n\nThe  script does the following:\n1. It uses LangChain.dart's `CsvLoader` to load the `wikivoyage_eu_dataset.csv` dataset.\n2. It uses the `jina/jina-embeddings-v2-small-en` model to create embeddings for each city's data. The generated embeddings have 1024 dimensions.\n   + *As the data for each city is not very large, we won't be chunking it into smaller parts, but you could easily do that using the `RecursiveCharacterTextSplitter` class.*\n3. It stores the embeddings in the ObjectBox vector database.\n\nYou can run the script using:\n```bash\n$ dart run bin/injestion.dart\nAdded 160 documents to the vector store.\n```\n\n### Chatbot\n\nThe chatbot script implements the RAG pipeline. It does the following:\n1. Takes a user query as input.\n2. Uses the `jina/jina-embeddings-v2-small-en` model to create an embedding for the query.\n3. Retrieves the 5 most similar documents from the ObjectBox database.\n4. Builds a prompt using the retrieved documents and the query.\n5. Uses the `llama3.2` model to generate a response to the prompt.\n\nYou can run the script using:\n```bash\n$ dart run bin/wikivoyage_eu.dart\n```\n\n![Wikivoyage EU](wikivoyage_eu.gif)\n\n## Conclusion\n\nThis example demonstrates how to build a simple RAG pipeline that can run locally on your machine. You can easily extend this example to build more complex RAG pipelines with more advance retrieval and generation techniques. Check out the [LangChain.dart](https://langchaindart.dev/) documentation for more information. \n\nFor simplicity, this example is a CLI application. However, you can easily adapt this code to work in a Flutter app. To get started with ObjectBox in Flutter, refer to the [ObjectBox documentation](https://docs.objectbox.io/getting-started).\n"
  },
  {
    "path": "examples/wikivoyage_eu/bin/injestion.dart",
    "content": "// ignore_for_file: avoid_print\nimport 'package:langchain_community/langchain_community.dart';\nimport 'package:langchain_ollama/langchain_ollama.dart';\n\nvoid main() async {\n  final loader = CsvLoader('bin/wikivoyage_eu_dataset.csv');\n  final docs = await loader.load();\n\n  final embeddings = OllamaEmbeddings(\n    model: 'jina/jina-embeddings-v2-small-en',\n  );\n  final vectorStore = ObjectBoxVectorStore.open(\n    embeddings: embeddings,\n    dimensions: 512,\n  );\n\n  final ids = await vectorStore.addDocuments(documents: docs);\n  print('Added ${ids.length} documents to the vector store.');\n\n  embeddings.close();\n}\n"
  },
  {
    "path": "examples/wikivoyage_eu/bin/wikivoyage_eu.dart",
    "content": "import 'dart:io';\n\nimport 'package:langchain/langchain.dart';\nimport 'package:langchain_community/langchain_community.dart';\nimport 'package:langchain_ollama/langchain_ollama.dart';\n\nvoid main() async {\n  final vectorStore = ObjectBoxVectorStore.open(\n    embeddings: OllamaEmbeddings(model: 'jina/jina-embeddings-v2-small-en'),\n    dimensions: 512,\n  );\n\n  final retriever = vectorStore.asRetriever(\n    defaultOptions: VectorStoreRetrieverOptions(\n      searchType: ObjectBoxSimilaritySearch(k: 5),\n    ),\n  );\n  final setupAndRetrieval = Runnable.fromMap<String>({\n    'context': retriever.pipe(\n      Runnable.mapInput(\n        (docs) => docs.map((d) => d.pageContent).join('\\n---\\n'),\n      ),\n    ),\n    'question': Runnable.passthrough(),\n  });\n\n  final promptTemplate = ChatPromptTemplate.fromTemplates(const [\n    (\n      ChatMessageType.system,\n      '''\nHere is some data from Wikivoyage about travel destinations in Europe:\n\n<context>\n{context}\n</context>\n\nPlease read the Wikivoyage data carefully and consider how you can best answer the user's question using only the information provided.\n\nUse ANSI escape codes instead of Markdown to format your answer. \nFor example, `\\x1B[1m<text>\\x1B[0m` will make \"<text>\" bold.\n\nIf the user's question is not about Europe, just respond with:\n\"I can only help you with vacation planning in Europe.\"\nDo not provide any other suggestion if the question is not about Europe.\n''',\n    ),\n    (ChatMessageType.human, '{question}'),\n  ]);\n\n  final model = ChatOllama(\n    defaultOptions: const ChatOllamaOptions(model: 'llama3.2'),\n  );\n  const outputParser = StringOutputParser<ChatResult>();\n  final chain =\n      setupAndRetrieval //\n          .pipe(promptTemplate)\n          .pipe(model)\n          .pipe(outputParser);\n\n  stdout.writeln(\n    'Hello! Ask me anything about your vacation plans in Europe, '\n    'and I will provide you with the best itinerary.',\n  );\n\n  while (true) {\n    stdout.write('> ');\n    final query = stdin.readLineSync() ?? '';\n\n    if (query.toLowerCase() == 'q') {\n      break;\n    }\n\n    final stream = chain.stream(query);\n    await stream.forEach(stdout.write);\n    stdout.write('\\n\\n');\n  }\n\n  chain.close();\n}\n"
  },
  {
    "path": "examples/wikivoyage_eu/bin/wikivoyage_eu_dataset.csv",
    "content": "city,country,lat,lng,population,abstract\nAalborg,Denmark,57.05,9.9167,143598.0,\"Aalborg is the largest city in North Jutland, Denmark. Its population, as of 2016, is 134,672, making it the fourth largest city in Denmark.\"\nAdana,Turkey,37.0,35.3213,1765981.0,\"Adana is a city on the Cilician Plains of central Turkey, on the Seyhan River about 50 km from the Mediterranean coast. It's industrial and mostly modern but with several places of interest in its historic centre.\"\nAmsterdam,Netherlands,52.3728,4.8936,1459402.0,\"Amsterdam is the capital of the Netherlands. It is known for the canals that cross the city, its impressive architecture, museums and art gallerie, its notorious red light district, and more than 1,500 bridges.\"\nAncona,Italy,43.6169,13.5167,100924.0,Ancona is the capital of the Italian region called the Marches and an important port city on the coast of the Adriatic Sea.\nAnkara,Turkey,39.93,32.85,5503985.0,\"Ankara is the capital of Turkey, central within the country on the plateau of Central Anatolia. It's a sprawling modern place around an ancient citadel, and in 2022 had a population of almost 5.\"\nAntalya,Turkey,36.8874,30.7075,2426356.0,\"Antalya is a city in Pamphylia on the Turkish Mediterranean coast, and the chief resort of the \"\"Turkish Riviera\"\". It's a metropolis with a population of  2.\"\nArad,Romania,46.175,21.3125,159074.0,There is more than one place in the world called Arad. You might be looking for:\nArkhangelsk,Russia,64.55,40.5333,351488.0,\"Arkhangelsk (population 350,000 in 2018) is a regional center in Northwestern Russia, located on both banks of Northern Dvina river near its mouth on the White Sea, about 1250 km by road to the north of Moscow and about 1100 km northeast of Saint Petersburg. It is part of the Silver Ring of cultural and historical centers of Northwestern Russia.\"\nAstrakhan,Russia,46.35,48.035,532504.0,Astrakhan (Russian: А́страхань AH-struh-khun) is a city in Russia.\nBaia Mare,Romania,47.6667,23.5833,123738.0,Baia Mare is a city in north-western Romania.\nBaku,Azerbaijan,40.3667,49.8352,2300500.0,Baku (Azeri: Bakı) is the capital of Azerbaijan and is the largest city in the Caucasus. Baku's Old Town has UNESCO World Heritage status.\nBarcelona,Spain,41.3825,2.1769,4800000.0,\"Barcelona is Spain's second largest city, with a population of nearly two million people, and the capital of Catalonia. A major port on the northeastern Mediterranean coast of Spain, Barcelona has a wide variety of attractions that bring in tourists from around the globe.\"\nBari,Italy,41.1253,16.8667,323370.0,\"Bari (Bari dialect: Bare) is the capital of the Apulia region of Italy, on the Adriatic Sea. With a population of 317,000 (in 2019), it's the second largest city in Southern Italy after Naples.\"\nBatman,Turkey,37.887,41.132,447106.0,\"Batman (pronounced as baat-maan, not like the name of the superhero; Kurdish: Iluh) is a city in southeastern Anatolia. It is the capital of an important oil producing province.\"\nBelgrade,Serbia,44.82,20.46,1378682.0,\"Belgrade (Serbian: Београд, Beograd) is the capital of the Republic of Serbia and the country's largest city. Belgrade has been re-emerging as a tourist destination in the past years.\"\nBergen,Norway,60.3894,5.33,267117.0,\"Bergen is Norway's second largest city and the most popular gateway to the fjords of West Norway. The city is renowned for its great location amidst mountains, fjords, and the ocean.\"\nBerlin,Germany,52.52,13.405,4473101.0,\"Berlin is Germany's capital and biggest city. Within the city limits, Berlin in 2022 had a population of 3.\"\nBologna,Italy,44.4939,11.3428,392564.0,\"Bologna (Emilian: Bulåggna) is a beautiful and historic city in the Emilia-Romagna region of Northeast Italy. It has the oldest university in the Western world, a lively student population, excellent food, a striking brick terracotta-roofed cityscape, and lots to see and do.\"\nBordeaux,France,44.84,-0.58,260958.0,\"Bordeaux is a city in the Gironde region of southwest France, standing on the River Garonne. It's the country's fifth largest city, with a population of 259,809 in 2020, and another million living in its associated towns.\"\nBraga,Portugal,41.5503,-8.42,181494.0,\"Braga is one of the five largest cities of Portugal, situated in the Minho region in the North of the country. It is known for its abundance of churches and thus called the \"\"city of archbishops\"\".\"\nBratislava,Slovakia,48.1439,17.1097,475503.0,\"Bratislava (Hungarian: Pozsony, German: Pressburg, known as Prešporok before 1919), is the capital and largest city of Slovakia. It has a population of more than 475,000 (2021), and is the administrative, cultural and economic centre of the country.\"\nBremen,Germany,53.0833,8.8,566573.0,\"The Free Hanseatic City of Bremen  is a city in northern Germany with a major port on the River Weser. The population is 567,000 (2020).\"\nBrest,Belarus,52.1347,23.6569,340723.0,There is more than one place called Brest:\nBrno,Czechia,49.1925,16.6083,382405.0,\"Brno (pronounced Bruhno) (German: Brünn, Štatl in the local dialect) is the major city of Moravia (a historical region in the Czech Republic). It is the largest city in Moravia and the second-largest city in the Czech Republic by population and area.\"\nBrussels,Belgium,50.8467,4.3525,1743000.0,\"Brussels (French: Bruxelles, Dutch: Brussel) is the capital of Belgium and one of the three administrative regions within the country, together with Flanders and Wallonia. Apart from its role within its country, it is also an internationally important city, hosting numerous international institutions, and in particular the headquarters of NATO and the core institutions of the European Union.\"\nBudapest,Hungary,47.4925,19.0514,2997958.0,\"Budapest is the capital city of Hungary. With a unique, youthful atmosphere, world-class classical music scene, a pulsating nightlife increasingly appreciated among European youth, and last but not least, an exceptional offer of natural thermal baths, Budapest is one of Europe's most delightful and enjoyable cities.\"\nBurgas,Bulgaria,42.503,27.4702,210813.0,Burgas (also Bourgas) is a city on the Black Sea coast of Bulgaria. It is a large industrial centre with many tourist attractions in the region.\nBursa,Turkey,40.1833,29.05,2901396.0,\"Bursa is a large city in the Southern Marmara region of Turkey, 20 km inland from the Marmara coast. It's the country's fourth-largest city, with a population of 2,161,990 in 2021, and with another million living in the wider metro area.\"\nBydgoszcz,Poland,53.1219,18.0003,346739.0,\"Bydgoszcz (German: Bromberg) is a major city of 360,000 in Poland and with suburban area the agglomeration has nearly 500,000. It has well preserved 19th-century architecture and was known as Little Berlin before the world wars.\"\nCagliari,Italy,39.2278,9.1111,154106.0,\"Cagliari (Sardinian: Casteddu, \"\"castle\"\"; Latin: Caralis) is the capital city of the Italian island of Sardinia.\"\nCheboksary,Russia,56.1333,47.25,489498.0,\"Cheboksary (Russian: Чебокса́ры chee-bahk-SAH-ree) is the capital of Chuvashia in the Volga Region of the Russian Federation. About 600,000 people live here and in the nearby satellite city Novocheboksarsk.\"\nChelyabinsk,Russia,55.15,61.4,1202371.0,\"Chelyabinsk (Russian: Челя́бинск cheel-YAH-beensk) is a big city, with more than a million inhabitants, the capital of Chelyabinsk Oblast in the European part of Russia.\"\nCluj-Napoca,Romania,46.7667,23.5833,324576.0,\"Cluj-Napoca (Romanian), Kolozsvár (Hungarian) or Klausenburg (German) is the capital of Cluj county and the unofficial capital of the historical region of Transylvania. The city, with about 320,000 people (2016), is very pleasant, and it is a great experience for those who want to see urban Transylvanian life at its best.\"\nCoimbra,Portugal,40.2111,-8.4292,143396.0,\"Coimbra is the traditional capital city of Central Portugal's historic Beira Litoral region. With over 140,000 inhabitants (2021), it is the largest municipality there and one of Portugal's four largest metropolises.\"\nCopenhagen,Denmark,55.6761,12.5683,1366301.0,\"Copenhagen (Danish: København) is the capital city of Denmark and forms the moderate conurbation that one million Danes call home. It is big enough to form a small Danish metropolis, with shopping, culture and nightlife par excellence, yet small enough still to feel intimate and be safe.\"\nCork,Ireland,51.8972,-8.47,222333.0,\"Cork is the principal city of County Cork in southwest Ireland. It was already the second-largest city in Ireland when in 2019 its boundaries were extended, to have a population of 210,000.\"\nCraiova,Romania,44.3333,23.8167,269506.0,\"Craiova with 306,000 inhabitants (2016), is one of the five largest cities of Romania. Craiova is in the southwestern region of the country and hosts the administrative buildings of the Dolj County and of the Oltenia district.\"\nDebrecen,Hungary,47.53,21.6392,328642.0,[a Nagytemplom télen.jpg|thumb|400px|The Great Church of Debrecen in winter]\nDenizli,Turkey,37.7667,29.0833,1027782.0,\"Denizli is a city in the Southern Aegean region of Turkey, which most visitors simply transit to reach Pamukkale 20 km north. It's a typical modern Turkish city, far from picturesque, but does have enough sights of its own if your schedule allows.\"\nDijon,France,47.3167,5.0167,158002.0,\"Dijon is the largest city in the eastern French region of Bourgogne-Franche-Comté. Dijon is best known for its mustard (named after the town), which is no longer produced in its metropolitan area, but it is still one of the most beautiful cities in France, and its historic buildings and byways were not heavily damaged by bombing in World War II and are largely intact.\"\nDonetsk,Ukraine,48.0028,37.8053,929063.0,\"Donetsk (Ukrainian: Донецьк, Russian: Доне́цк) is a city in the Donetsk People's Republic, on the banks of the River Kalmius.\"\nDresden,Germany,51.05,13.74,561922.0,\"Dresden is the capital of Saxony (Sachsen). It's often referred to locally as Elbflorenz, or \"\"Florence on the Elbe\"\", reflecting its location on the Elbe river and its historical role as a centre for the arts and beautiful architecture - much like Florence in Italy.\"\nDublin,Ireland,53.35,-6.2603,1173179.0,\"Dublin (Irish: Baile Átha Cliath, \"\"Town of the Hurdled Ford\"\") is the capital city of Ireland. Its vibrancy, nightlife and tourist attractions are world renowned and it's the most popular entry point for international visitors to Ireland.\"\nErfurt,Germany,50.9833,11.0333,213835.0,Erfurt is the capital of the German state of Thuringia (Thüringen). The city is the largest one in that province and likewise a major transportation hub.\nErzincan,Turkey,39.7464,39.4914,157452.0,\"Erzincan is a city in Eastern Anatolia. It's modern, on a grid pattern, as its predecessor was destroyed by an earthquake in 1939.\"\nErzurum,Turkey,39.9086,41.2769,767848.0,\"Erzurum is a city in Eastern Anatolia, and is the hub for visiting eastern Turkey.\"\nGaziantep,Turkey,37.0628,37.3792,2028563.0,\"Gaziantep is a city in Southeastern Anatolia. Although it is a major city in Turkey (counting almost 2 million inhabitants) and known as the Turkish capital of gastronomy, it counts very few international tourists.\"\nGeneva,Switzerland,46.2017,6.1469,201818.0,thumb|200px|right|The old town of Geneva in the winter\nHamburg,Germany,53.55,10.0,2484800.0,\"The Free and Hanseatic City of Hamburg (Freie und Hansestadt Hamburg) is Germany's second-largest city and, at the same time, one of Germany's 16 federal states or Bundesländer. Prior to the formation of the modern German state, Hamburg for centuries enjoyed a status as de facto independent city state and regional power and trade hub in the North Sea.\"\nHelsinki,Finland,60.1708,24.9375,1268296.0,Helsinki (Helsingfors in Swedish) is Finland's capital and largest city. Helsinki combines modern and historic architectural styles with beautiful open spaces.\nInnsbruck,Austria,47.2683,11.3933,132493.0,\"Innsbruck is the fifth-largest city in Austria and the provincial capital of Tyrol, as well as one of the largest cities in the Alps. It is in a valley of the river Inn between mountain ranges of above 2000 m above sea level, halfway between Bavaria and northern Italy, and is a hub of a region popular for skiing and other mountain-related activities and a busy tourist destination.\"\nIoannina,Greece,39.6636,20.8522,113094.0,\"Ioannina (Ιωάννινα) (population: 112,486 (2011)) is a beautiful city in Northern Greece whose old town is surrounded by tall defensive walls.\"\nIsparta,Turkey,37.7647,30.5567,258375.0,\"Isparta (Greek: Σπάρτη, Baris) is a city of 220,000 inhabitants in the Lakes District of Mediterranean Turkey.\"\nIstanbul,Turkey,41.0136,28.955,16079000.0,\"Istanbul (Turkish: İstanbul) is a very large city of fantastic history, culture and beauty. Called Byzantium in ancient times, the city's name was changed to Constantinople in 324 CE when it was rebuilt by the first Christian Roman Emperor, Constantine.\"\nIvano-Frankivsk,Ukraine,48.9228,24.7106,238196.0,\"Ivano-Frankivsk (Ukrainian: Івано-Франківськ, also transliterated Ivano-Frankovsk from Russian: Ивано-Франковск) (formerly in Polish: Stanisławów, German: Stanislau) is a city in the Ukrainian part of East Galicia.\"\nIzmir,Turkey,38.42,27.14,4320519.0,\"thumb|270px|Clock tower in Konak Square, iconic symbol of the city\"\nKahramanmaras,Turkey,37.5833,36.9333,443575.0,\"Kahramanmaraş, which used to be known as Maraş, is a city in Turkey, located on the crossroad of southern, eastern and southeastern Turkey.\"\nKaliningrad,Russia,54.7003,20.4531,475056.0,\"Kaliningrad (Russian: Калинингра́д kuh-leen-een-GRAHD) , also known by its original German name, Königsberg, is the capital city of Kaliningrad Oblast in Russia. It has about 475,000 inhabitants (2018).\"\nKars,Turkey,40.6078,43.0958,115891.0,\"Kars is a city in Eastern Anatolia. It is most frequently visited as a jumping off point for travelers going to Ani, but it is a viable destination in its own right for its 19th-century Russian imperial buildings, and, of course, its role as the setting for Orhan Pamuk's famous novel Snow.\"\nKaunas,Lithuania,54.8972,23.8861,381007.0,\"Kaunas is the second-largest city in Lithuania, with a population of some 288,000 people. The main reason to visit is its charming Old Town, connected to the 19th century New Town ranged along Laisvės alėja.\"\nKayseri,Turkey,38.7225,35.4875,1389680.0,\"Kayseri is a city in Central Anatolia, 350 km southeast of Ankara. In 2021 the population was 1.\"\nKazan,Russia,55.7964,49.1089,1243500.0,Kazan (Russian: Каза́нь kuh-ZAHN)  is the capital of Russia's republic of Tatarstan and the center of the world Tatar culture.\nKharkiv,Ukraine,49.9925,36.2311,1446107.0,\"Kharkiv (Ukrainian: Харків, also transliterated Kharkov from Russian: Харьков) is a major city in the Kharkiv region of Ukraine and is the second largest city in Ukraine with a population of over 1.5 million inhabitants.\"\nKiel,Germany,54.3233,10.1394,246601.0,\"Kiel is the capital city of the German state of Schleswig-Holstein and has a population of roughly 248,000 (2018). It is located on the Baltic Sea at the end of the \"\"Kieler Förde\"\".\"\nKirov,Russia,58.6,49.65,501468.0,\"Kirov (Russian: Ки́ров KEE-ruhf) is the capital city of Kirov Oblast, Russia.\"\nKlagenfurt,Austria,46.6167,14.3,101403.0,Klagenfurt (Slovenian: Celovec) is the capital of Carinthia in Austria. It was one of the eight host cities in the 2008 European Football Championships.\nKonya,Turkey,37.8667,32.4833,2232374.0,\"Konya is a city in Central Anatolia in Turkey, known as the city of \"\"whirling dervishes\"\" and for its outstanding Seljuk architecture. In 2021 Konya metropolis had a population of 2,277,017, the sixth largest in Turkey, but the area of most interest is compact.\"\nKrasnodar,Russia,45.0333,38.9667,948827.0,\"Krasnodar is the capital of Krasnodar Krai in southern Russia, with a popolulation in 2018 of just under 900,000. Its main industries are based on agriculture and food.\"\nKutaisi,Georgia,42.25,42.7,147900.0,\"Kutaisi is a city in the Rioni Region of Georgia. The city itself is very cinematographic and charming, and a visit to Kutaisi is almost mandatory to see the Bagrati Cathedral and Gelati Monastery, which are UNESCO World Heritage sites and offer views from the mountain slopes over the city and the Rioni River.\"\nLille,France,50.6278,3.0583,234475.0,\"Lille (Dutch: Rijsel) is the capital of the Hauts-de-France region in northern France and the core of one of the largest metropolitan agglomerations in the country. Historically, it has also been the capital of Flanders, and later an industrial powerhouse, thanks to which it now boasts a large and handsome historic centre.\"\nLjubljana,Slovenia,46.0514,14.5061,286745.0,\"Ljubljana (\"\"lee-oo-blee-AH-nuh\"\") is the small but delightful capital of Slovenia. While the city's population had grown to 295,500 in 2020, the sights and amenities are concentrated in the charming old centre.\"\nLondon,United Kingdom,51.5072,-0.1275,11262000.0,\"Noisy, vibrant and truly multicultural,  London is a megalopolis of people, ideas and frenetic energy. The capital and largest city of the United Kingdom sits on the River Thames in South-East England, Greater London has a population of a little over 9 million.\"\nLuxembourg,Luxembourg,49.6117,6.1319,132780.0,\"The Grand Duchy of Luxembourg (Luxembourgish: Groussherzogtum Lëtzebuerg, French: Grand-Duché de Luxembourg, German: Großherzogtum Luxemburg), is a landlocked Benelux country at the crossroads of Germanic and Latin cultures.\"\nLviv,Ukraine,49.8425,24.0322,724314.0,\"Lviv (also spelled L'viv; Ukrainian: Львів; Polish: Lwów, German: Lemberg, Russian: Львов), formerly known as Lvov after its Russian name, is in Western Ukraine and used to be the capital of East Galicia. It's the biggest city of the region and a major Ukrainian cultural centre on the UNESCO World Heritage List.\"\nLyon,France,45.76,4.84,522969.0,\"Lyon is the capital of the French administrative region of Auvergne-Rhône-Alpes. A city of half a million, Lyon alone is the country's third-largest city, but its metropolitan area is only second in population to Paris.\"\nMaastricht,Netherlands,50.85,5.6833,277721.0,\"By many considered to be the most beautiful city of the country, Maastricht is the southernmost city in the Netherlands. It's the capital of the province of Limburg and famous for what the Dutch call the \"\"Burgundian\"\" way of life.\"\nMadrid,Spain,40.4169,-3.7033,6211000.0,\"Madrid is Spain's capital and largest city. A city that has been marked by Spain's varied and tumultuous history, Madrid has some of Europe's most impressive cultural and architectural heritage, which includes grand avenues, plazas, buildings and monuments, world-class art galleries and museums, highly popular football teams, and cultural events of international fame for everyone.\"\nMagdeburg,Germany,52.1317,11.6392,236188.0,\"Magdeburg is the capital city of the Bundesland of Saxony-Anhalt, Germany, with a population of 240,000 (2018). Magdeburg has become a modern city with numerous interesting sights of high importance and uniqueness, as well as many parks, which make Magdeburg the third greenest city in Germany.\"\nMalatya,Turkey,38.3486,38.3194,426381.0,thumb|350px|New Mosque at the central square\nMilan,Italy,45.4669,9.19,1366180.0,\"Milan (Italian: Milano; Milanese: Milan) is financially the most important city in Italy, and home to the Borsa Italiana stock exchange. It is the second most populous city proper in the country, but sits at the centre of Italy's largest urban and metropolitan area.\"\nMinsk,Belarus,53.9,27.5667,2009786.0,\"Minsk (Belarusian: Мінск, Russian: Минск) is the capital and largest city of the Republic of Belarus. Its population is about two million people in 2021.\"\nMiskolc,Hungary,48.0833,20.6667,150695.0,\"Miskolc, with population of about 157,000 (2017), is the third largest city in Hungary, located in the north-east of the country, east of Bükk mountains.\"\nMoscow,Russia,55.7558,37.6178,17332000.0,\"Since its founding in 1147, Moscow (Russian: Москва, Moskva) has been at the crossroads of history as the capital of empires and a frequent target for invaders. As the capital of the Russian Empire, the Soviet Union, and, today, the Russian Federation, it has played a central role in the development of the largest country in the world.\"\nMunich,Germany,48.1375,11.575,2606021.0,\"Munich (German: München, Bavarian: Minga) is the capital of the federal state of Bavaria in the south of Germany. Within the city limits, Munich in 2021 had a population of just under 1.\"\nMurcia,Spain,37.9861,-1.1303,672773.0,You could be looking for:\nMurmansk,Russia,68.9706,33.075,298096.0,\"Murmansk (Russian: Му́рманск) is a city in the extreme northwest of Russia and the world's largest city north of the Arctic Circle. It lies in the Kola Bay on the Kola Peninsula, by the Barents Sea.\"\nMykolaiv,Ukraine,46.975,31.995,498748.0,\"Mykolaiv (Ukrainian: Миколаїв, also transliterated Nikolaev or Nikolayev from Russian: Николаев) is a city in Southern Ukraine. It is an important shipbuilding centre and transportation hub for Ukraine, and has a large military presence.\"\nNalchik,Russia,43.4833,43.6167,265162.0,\"Nalchik is the capital city of Kabardino-Balkaria, a republic located in the very south of the Russian Federation.\"\nNantes,France,47.2181,-1.5528,318808.0,\"Nantes (Breton: Naoned) is the capital of Pays de la Loire region in northwest France. Historically it was part of Brittany, whose dukes built up its castle and made the town their capital.\"\nNaples,Italy,40.8333,14.25,966144.0,\"Naples (Italian: Napoli; Neapolitan: Napule) in Italy, an ancient port on the Mediterranean sea. With just short of a million citizens, is the third most populous municipality.\"\nNevsehir,Turkey,38.6264,34.7139,153117.0,\"Nevşehir is one of the major cities in Cappadoccia Region, which displays a beautiful combination of nature and history. The traditional main sources of income of the city, carpet weaving and viticulture, have been overtaken by tourism, because of its proximity to the underground shelters, the fairy chimneys, monasteries, caravanserais and the famous rock-hewn churches of Göreme.\"\nNicosia,Cyprus,35.1725,33.365,330000.0,Nicosia (Greek: Λευκωσία; Turkish: Lefkoşa) is the capital of Cyprus and is the largest city by far.\nNovi Sad,Serbia,45.2542,19.8425,380000.0,thumb|right|350px|Freedom square (Trg Slobode)\nOradea,Romania,47.0722,21.9211,196367.0,\"Oradea is one the few undiscovered gems of Romania's tourism. Despite being one of the largest and most important cities in Transylvania, and having a high degree of administrative, economic and commercial importance, it is often overlooked by tourists in favor of other Transylvanian cities such as Brasov, Sibiu, Sighisoara or Cluj-Napoca.\"\nOrenburg,Russia,51.7667,55.1,564773.0,\"Orenburg (Russian: Оренб'ург, Uh-rehn-BOORK) is the capital of Orenburg Oblast. Every citizen will point you the sign at the bridge across the Ural river, supposedly landmarking the geographical border between Europe and Asia (the actual boundary is further east).\"\nPamplona,Spain,42.8167,-1.65,203418.0,\"Pamplona (Basque: Iruña) is a city in Navarra, Spain. It is most famous world-wide for its San Fermín festival, held each year from July 6-14.\"\nParis,France,48.8567,2.3522,11060000.0,thumb|300px|The Eiffel Tower and the river Seine\nPenza,Russia,53.2,45.0,523726.0,There's more than one place called Penza:\nPerm,Russia,58.0139,56.2489,1048005.0,\"Perm (Russian: Пермь p`yehr`m`) is a city in Perm Krai, Russia.\"\nPerugia,Italy,43.1122,12.3889,165683.0,\"Perugia is a city in the Italian region of Umbria. It has an important university that attracts many foreign students, is a major center of medieval art, has a stunningly beautiful central area and is home of the Umbria Jazz Festival.\"\nPetrozavodsk,Russia,61.7833,34.35,278551.0,thumb|350 px|Old and New Petrozavodsk\nPlovdiv,Bulgaria,42.15,24.75,383540.0,thumb|Old Plovdiv\nPodgorica,Montenegro,42.4413,19.2629,150977.0,\"Podgorica (Montenegrin: Подгорица) is the capital of Montenegro. While not a typical European eye candy, the city is definitely worth visiting, owing to its interesting mix of old and new, its café culture and nightlife, and its laid back Mediterranean atmosphere.\"\nPorto,Portugal,41.1621,-8.622,1278210.0,\"Porto is Portugal's second largest city and the capital of the Northern region, and a busy industrial and commercial centre. The city isn't very populous (about 238,000 inhabitants in 2024), but the Porto metropolitan area has some 1.\"\nPrague,Czechia,50.0875,14.4214,1335084.0,\"Prague (Czech: Praha) is the capital and largest city of the Czech Republic. The city's historic buildings and narrow, winding streets are testament to its centuries-old role as capital of the historic region of Bohemia.\"\nPristina,Kosovo,42.6633,21.1622,161751.0,\"Pristina (Albanian: Prishtinë, Serbian: Priština), the capital city of Kosovo, is not beautiful: it is messy, with centuries-old Ottoman heritage competing with communist designs and post-communist architectural monstrosities. However, there is a powerful draw to this city of 162,000 people (2011), offering much to passing visitors.\"\nPskov,Russia,57.8167,28.3333,209840.0,\"Pskov is the largest city and administrative capital of Pskov Oblast. One of the oldest cities in the country, it has preserved many unique architectural monuments of the 12th-16th centuries.\"\nRennes,France,48.1147,-1.6794,220488.0,\"Rennes is the chief city of Brittany in northwest France. It's mostly modern and industrial, but has many grand 18th and 19th century buildings, and survivors of earlier times.\"\nRiga,Latvia,56.9489,24.1064,920643.0,\"Riga is the financial, creative, and cultural centre of Latvia. It is the capital and the largest city in Latvia, it is also the largest city in the Baltic States.\"\nRijeka,Croatia,45.3272,14.4411,191293.0,\"Rijeka (literally \"\"River\"\" in Croatian language) is a city in Kvarner Bay, a northern inlet of the Adriatic Sea in Croatia. It is the principal seaport of the country.\"\nRivne,Ukraine,50.6192,26.2519,246574.0,\"Rivne (Ukrainian: Рівне, also transliterated Rovno from Russian: Ровно) (Polish: Równe) is a city in Western Ukraine.\"\nRome,Italy,41.8931,12.4828,2872800.0,\"Rome (Italian and Latin: Roma), the 'Eternal City', is the capital and largest city of Italy and of the Lazio region. It's the famed city of the Roman Empire, the Seven Hills, La Dolce Vita, the Vatican City and Three Coins in the Fountain.\"\nRouen,France,49.4428,1.0886,112321.0,\"Rouen is the capital of the French region of Upper Normandy on the River Seine, 135 km (approximately 90 minutes drive) northwest from the centre of Paris. The city has a population of 110,000 and its metropolitan area includes some 666,000 inhabitants (2017).\"\nSaint Petersburg,Russia,59.95,30.3167,5384342.0,\"Saint Petersburg (Russian: Са́нкт-Петербу́рг Sankt-Peterburg), known as Petrograd (Петроград) in 1914-1924 and Leningrad (Ленинград) in 1924-1991, is the second largest city of Russia, with 5.6 million inhabitants (2021), and the former capital of the Russian Empire.\"\nSalzburg,Austria,47.8,13.045,155021.0,\"Salzburg is a city in Austria, near the border with Germany's Bavaria state, with a population of 157,000 (2020). It was the setting for the 1965 movie The Sound of Music, so you may think you know all there is to see in Salzburg if you have seen the movie.\"\nSamara,Russia,53.2028,50.1408,1169719.0,thumb|300px|Iversky Convent\nSamsun,Turkey,41.2903,36.3336,1335716.0,\"Samsun, in the Central Karadeniz region of Turkey, is the largest city on the Turkish Black Sea coast.\"\nSantander,Spain,43.4628,-3.805,172221.0,\"Santander is the capital and largest city of the province of Cantabria in Spain. It's on the north coast, with many beaches, ferries from Britain, and a small historic centre.\"\nSarajevo,Bosnia and Herzegovina,43.8564,18.4131,419957.0,\"Sarajevo is the capital of Bosnia and Herzegovina, and its largest city, with 420,000 citizens in its urban area (2013). Sarajevo metropolitan area that has a population of 555,000 also includes some neighbourhoods of \"\"East Sarajevo\"\" that are a part of Republika Srpska.\"\nSaratov,Russia,51.5333,46.0167,845300.0,Saratov (Russian: Сара́тов suh-RAH-tuhf) is a city in the Volga region of Russia.\nSatu Mare,Romania,47.79,22.89,102411.0,\"Satu Mare is a city in the Maramureș region of Romania. As of 2021, it had a population of 91,520.\"\nSibiu,Romania,45.7928,24.1519,147245.0,\"Sibiu is a town in southern Transylvania, Romania, 280 km by road from Bucharest. The old town centre is very attractive.\"\nSiirt,Turkey,37.925,41.9458,166332.0,Siirt is a city in Southeastern Anatolia.\nSimferopol,Ukraine,44.9484,34.1,341799.0,\"Simferopol (Russian: Симферополь, Ukrainian: Сімферополь) is the capital city of the Crimea.\"\nSivas,Turkey,39.75,37.0167,377561.0,\"Sivas is a city in Central Anatolia, with a population in 2020 of 335,570. By road it's 450 km east of Ankara, and stands at 1278 m elevation.\"\nSkopje,Macedonia,41.9961,21.4317,640000.0,\"Skopje (Macedonian: Скопје, Albanian: Shkup, Turkish: Üsküp) is the capital and largest city of the Republic of North Macedonia. Skopje is city of many cultures and many centuries.\"\nSofia,Bulgaria,42.7,23.33,1547779.0,Sofia (София) is the capital of Bulgaria. It is also the biggest city in the country with about 2 million citizens (including suburbs).\nStavanger,Norway,58.97,5.7314,237369.0,\"Stavanger is Norway's fourth largest city, at 145,000 citizens (2021). It is the largest city in, and the administrative centre of, Rogaland county in West Norway.\"\nStavropol,Russia,45.05,41.9833,450680.0,Stravropol (Ставрополь) is a city in Russia.\nStockholm,Sweden,59.3294,18.0686,1611776.0,\"Stockholm is Sweden's capital and largest city, with nearly a million inhabitants in the city, and 2.4 million within Stockholm County (as of 2021).\"\nStrasbourg,France,48.5833,7.7458,290576.0,\"thumb|300px|Strasbourg railway station, known for the sky dome\"\nStuttgart,Germany,48.7775,9.18,2787724.0,\"Stuttgart is the capital of the Bundesland of Baden-Württemberg in Germany. With a population of approximately 632,000 in the immediate city (2017) and more than 5.\"\nSyktyvkar,Russia,61.6667,50.8167,245313.0,thumb|300px|Street scene in Syktyvkar.\nSzczecin,Poland,53.4325,14.5481,403833.0,\"Szczecin, (pronounced Shchetsin, German: Stettin, Latin: Stetinum) is a maritime port city and the capital of Zachodniopomorskie in Poland. The city has a population of over 400,000, with almost 780,000 living in its metro area (2019).\"\nTallinn,Estonia,59.4372,24.7453,438341.0,\"Tallinn is Estonia's capital and largest city. Tallinn is an important port of the Baltic Sea, with the busy passenger section of the port reaching the foothill of the picturesque medieval Old Town, which has been astonishingly well preserved and was inscribed on the UNESCO World Heritage List in 1997.\"\nTampere,Finland,61.4981,23.76,334112.0,thumb|350px|View to Näsinneula tower in Tampere\nTbilisi,Georgia,41.7225,44.7925,1118035.0,\"Tbilisi (Georgian: , Russian: ), is the capital city of the country of Georgia, lying on the banks of the Mtkvari River. The metropolitan area covers 726 km² (280 mi²) and has a population of approximately 1.\"\nThessaloniki,Greece,40.6403,22.9347,824676.0,\"Thessaloniki (Greek: Θεσσαλονίκη, Albanian, Turkish: Selanik, Serbian, Bulgarian, Macedonian: Солун, Solun, Judaeo-Spanish: סאלוניקו / Saloniko, Romanian: Salonic, Aromanian: Sãrunã, French: Salonique) is the capital of the administrative region of Central Macedonia and the whole historical region of Macedonia, Greece, and is, at about one million inhabitants (2011), the second largest city in the country. More importantly, it is a city with a continuous 3,000-year history, preserving relics of its Roman, Byzantine and Ottoman past and of its formerly dominant Jewish population.\"\nTirana,Albania,41.3289,19.8178,418495.0,\"Tirana (Albanian: Tiranë) is the bustling and relatively modernised capital of Albania. It is the most important economic, financial, political and trade centre in the country.\"\nToulouse,France,43.6045,1.444,493465.0,\"Toulouse is the chief city of Haute-Garonne in the Occitanie region of France. It stands north of the Pyrenees on the River Garonne, halfway between the Atlantic and the Mediterranean.\"\nTrabzon,Turkey,41.005,39.7225,426882.0,\"Trabzon (formerly Trebizond) is the largest city in the Eastern Karadeniz region of Turkey.   Trabzon functioned as an independent state or empire during several periods in its long history, ruling over a vast area from Sinop in the west to Georgia in the east, even including territory in Crimea.\"\nTurku,Finland,60.45,22.2667,252468.0,\"Turku (Swedish: Åbo) is Finland's oldest city and the biggest one until the mid 1800s. Believed to have been founded in the early 13th century, it is the cradle of modern Finnish culture and has extensively influenced Finnish history.\"\nUfa,Russia,54.7261,55.9475,1115560.0,\"Ufa (Russian: Уфа́ oo-FAH, Bashkirː ӨФӨ oe-FOE), the capital of Bashkortostan, is a large, interesting, and rapidly developing city, with a population of over 1.1 million in 2018.\"\nUzhhorod,Ukraine,48.6239,22.295,114897.0,\"Uzhhorod (Ukrainian: Ужгород, also transliterated Uzhgorod from Russian: Ужгород; Hungarian: Ungvár, German: Uschhorod) is a city in Western Ukraine, the administrative center of Zakarpatska Oblast (Transcarpthian Region).  The population of Uzhhorod is multiethnic.\"\nValencia,Spain,39.47,-0.3764,792492.0,\"Valencia (València in Catalan/Valencian) is a charming old city and the capital of the Valencian Community. With just over 830,000 inhabitants in 2023, it is Spain’s third-largest city and, after Barcelona, the most significant cultural centre along the Spanish Mediterranean coast.\"\nValladolid,Spain,41.6528,-4.7236,297775.0,You may be looking for:\nVan,Turkey,38.4942,43.38,353419.0,\"Van (pronounced vahn in Turkish, wahn in Kurdish) is a city in Eastern Anatolia, Turkey. For Turks from the other regions of Turkey, it has a surprising beach resort feel in an area where their country is farthest from the sea.\"\nVarna,Bulgaria,43.2167,27.9167,348668.0,\"Varna (Варна) is a large city on the Black Sea coast in the northeast of Bulgaria. It's the larger of the country's two major sea ports (the other one is Burgas), and a gateway to the seaside resorts on the northern part of the coast.\"\nVienna,Austria,48.2083,16.3725,1973403.0,\"Vienna (German: Wien; Austro-Bavarian: Wean) is the capital of Austria and by far its most populous city, with an urban population of 2 million and a metropolitan population of 2.9 million (2023).\"\nVilnius,Lithuania,54.6872,25.28,708203.0,\"Vilnius is the capital and largest city of Lithuania. It has a beautiful baroque Old Town, listed as a , and excellent tourist facilities in all price ranges.\"\nVinnytsia,Ukraine,49.2333,28.4833,371855.0,\"Vinnytsia (Ukrainian: Вінниця, also transliterated Vinnitsa from Russian: Винница) is a city in Central Ukraine, the administrative center of the Vinnytsia region. 267 km southwest of Kyiv, it has been known since the Middle Ages, and is home to a former Soviet Cold War airbase.\"\nVitoria-Gasteiz,Spain,42.85,-2.6833,253672.0,\"Vitoria-Gasteiz (Spanish: Vitoria, Basque: Gasteiz) is in the heart of the Basque Country in Spain. The old town has some of the best preserved medieval streets and plazas in the region and it is one of very few cities with two cathedrals.\"\nVladikavkaz,Russia,43.04,44.6775,306978.0,Vladikavkaz is the capital city of North Ossetia and a major transit hub for the North Caucasus region. Its position on the Georgian Military Highway makes it a staging post for journeys to both Georgia and South Ossetia.\nVolgograd,Russia,48.7086,44.5147,1015586.0,\"Volgograd (Russian: Волгогра́д vuhl-gah-GRAHD) is a large city along the west bank of the Volga River in Southern Russia. It used to be known as Stalingrad, a name which the city is still known as on several war-related dates each year (according to local legislation).\"\nVoronezh,Russia,51.6717,39.2106,1050602.0,[of the Annunciation]\nWarsaw,Poland,52.23,21.0111,1860281.0,Warsaw (Polish: Warszawa) is Poland's capital and largest city. Warsaw is a bustling metropolis and one of the European Union's fastest-developing capitals and the Union's ninth most populous urban centre.\nZagreb,Croatia,45.8167,15.9833,809268.0,thumb|350px|right|Ban Jelačić Square\nZaporizhzhia,Ukraine,47.85,35.1175,741717.0,\"Zaporizhzhia (Ukrainian: Запоріжжя, also transliterated Zaporozhye from Russian: Запорожье) is a city in Ukraine.\"\nZaragoza,Spain,41.65,-0.8833,675301.0,\"Zaragoza is the capital and largest city of Aragon in Spain, and one of Spain's five largest cities, but it is one of the least known outside of Spain. Founded on the river Ebro during the Roman Empire as Cesaraugusta, Zaragoza now holds a large cultural and architectural heritage attesting to 2,000 years of affluence and importance.\"\nZurich,Switzerland,47.3744,8.5411,436332.0,\"Zurich (German: Zürich, Swiss German: Züri) is the largest city in Switzerland, with a population of some 435,000 (2018) in the city, and 1.3 million (2009) in the metro area.\"\n"
  },
  {
    "path": "examples/wikivoyage_eu/pubspec.yaml",
    "content": "name: wikivoyage_eu\ndescription: Wikivoyage EU chatbot using llama3.2 and ObjectBox.\nversion: 1.0.0\npublish_to: none\n\nenvironment:\n  sdk: \">=3.9.0 <4.0.0\"\nresolution: workspace\n\ndependencies:\n  langchain: ^0.8.1\n  langchain_ollama: ^0.4.1\n  langchain_community: 0.4.0+2\n"
  },
  {
    "path": "packages/anthropic_sdk_dart/README.md",
    "content": "# anthropic_sdk_dart\n\nThis package has been moved to a new repository:\n\n**New location:** https://github.com/davidmigloz/ai_clients_dart/tree/main/packages/anthropic_sdk_dart\n\nPlease update your dependencies to use the package from its new home.\n"
  },
  {
    "path": "packages/chromadb/README.md",
    "content": "# chromadb\n\nThis package has been moved to a new repository:\n\n**New location:** https://github.com/davidmigloz/ai_clients_dart/tree/main/packages/chromadb\n\nPlease update your dependencies to use the package from its new home.\n"
  },
  {
    "path": "packages/googleai_dart/README.md",
    "content": "# googleai_dart\n\nThis package has been moved to a new repository:\n\n**New location:** https://github.com/davidmigloz/ai_clients_dart/tree/main/packages/googleai_dart\n\nPlease update your dependencies to use the package from its new home.\n"
  },
  {
    "path": "packages/langchain/.gitignore",
    "content": "# https://dart.dev/guides/libraries/private-files\n# Created by `dart pub`\n.dart_tool/\n\n# Avoid committing pubspec.lock for library packages; see\n# https://dart.dev/guides/libraries/private-files#pubspeclock.\npubspec.lock\n"
  },
  {
    "path": "packages/langchain/CHANGELOG.md",
    "content": "## 0.8.1\n\n - **FEAT**: Add listModels() API for LLMs and Embeddings ([#371](https://github.com/davidmigloz/langchain_dart/issues/371)) ([#844](https://github.com/davidmigloz/langchain_dart/issues/844)). ([4b737389](https://github.com/davidmigloz/langchain_dart/commit/4b7373894d5b8701b6d00d153c1741931a49b3a1))\n - **FIX**(langchain): Properly serialize non-String tool outputs in AgentExecutor ([#821](https://github.com/davidmigloz/langchain_dart/issues/821)). ([3891164c](https://github.com/davidmigloz/langchain_dart/commit/3891164c11d0e7dd809b179d15444dd2da71aca0))\n\n## 0.8.0+1\n\n - **REFACTOR**: Fix pub format warnings ([#809](https://github.com/davidmigloz/langchain_dart/issues/809)). ([640cdefb](https://github.com/davidmigloz/langchain_dart/commit/640cdefbede9c0a0182fb6bb4005a20aa6f35635))\n\n## 0.8.0\n\n> Note: This release has breaking changes.\n\n - **FEAT**: Upgrade to http v1.5.0 ([#785](https://github.com/davidmigloz/langchain_dart/issues/785)). ([f7c87790](https://github.com/davidmigloz/langchain_dart/commit/f7c8779011015b5a4a7f3a07dca32bde1bb2ea88))\n - **BREAKING** **BUILD**: Require Dart >=3.8.0 ([#792](https://github.com/davidmigloz/langchain_dart/issues/792)). ([b887f5c6](https://github.com/davidmigloz/langchain_dart/commit/b887f5c62e307b3a510c5049e3d1fbe7b7b4f4c9))\n\n## 0.7.9\n\n - Update a dependency to the latest release.\n - **DOCS**: Remove Code Assist AI badge ([#752](https://github.com/davidmigloz/langchain_dart/issues/752)). ([dc0e70df](https://github.com/davidmigloz/langchain_dart/commit/dc0e70dfd9866267456b6caf0b76bf0cc646a425))\n\n## 0.7.8+1\n\n - **BUILD**: Update dependencies ([#751](https://github.com/davidmigloz/langchain_dart/issues/751)). ([250a3c6](https://github.com/davidmigloz/langchain_dart/commit/250a3c6a6c1815703a61a142ba839c0392a31015))\n\n## 0.7.8\n\n - **FEAT**: Implement Markdown text splitter ([#635](https://github.com/davidmigloz/langchain_dart/issues/635)). ([242e4be2](https://github.com/davidmigloz/langchain_dart/commit/242e4be227503f93120b209bca350ed6a055f362))\n - **FEAT**: Update dependencies (requires Dart 3.6.0) ([#709](https://github.com/davidmigloz/langchain_dart/issues/709)). ([9e3467f7](https://github.com/davidmigloz/langchain_dart/commit/9e3467f7caabe051a43c0eb3c1110bc4a9b77b81))\n - **FEAT**: Add to/fromMap serialization to ChatMessage, PromptValue & ChatHistory ([#681](https://github.com/davidmigloz/langchain_dart/issues/681)). ([d239c7c7](https://github.com/davidmigloz/langchain_dart/commit/d239c7c7b4a1504559e475466be7f176521a0473))\n - **FIX**: Correctly calculate start_index when using chunkOverlap in TextSplitter ([#640](https://github.com/davidmigloz/langchain_dart/issues/640)). ([71dd5ac3](https://github.com/davidmigloz/langchain_dart/commit/71dd5ac31351d0ea45989c43a250a35668cb01b6))\n - **FIX**: Fix linter issues ([#656](https://github.com/davidmigloz/langchain_dart/issues/656)). ([88a79c65](https://github.com/davidmigloz/langchain_dart/commit/88a79c65aad23bcf5859e58a7375a4b686cf02ef))\n - **FIX**: Made apiKey optional for `TavilyAnswerTool` and `TavilySearchResultsTool` ([#646](https://github.com/davidmigloz/langchain_dart/issues/646)). ([5085ea4a](https://github.com/davidmigloz/langchain_dart/commit/5085ea4ad8b5cd072832e73afcbb7075a6375307))\n\n## 0.7.7+2\n\n - **FEAT**: Add support for DirectoryLoader ([#620](https://github.com/davidmigloz/langchain_dart/issues/620)). ([4730f2a3](https://github.com/davidmigloz/langchain_dart/commit/4730f2a376b152ea38e5204125209ef01f29cab9))\n - **REFACTOR**: Add new lint rules and fix issues ([#621](https://github.com/davidmigloz/langchain_dart/issues/621)). ([60b10e00](https://github.com/davidmigloz/langchain_dart/commit/60b10e008acf55ebab90789ad08d2449a44b69d8))\n\n## 0.7.7+1\n\n - **FIX**: UUID 'Namespace' can't be assigned to the parameter type 'String?' ([#566](https://github.com/davidmigloz/langchain_dart/issues/566)). ([1e93a595](https://github.com/davidmigloz/langchain_dart/commit/1e93a595f2f166da2cae3f7cfcdbb28892abf9b5))\n\n## 0.7.7\n\n - **REFACTOR**: Update deprecated UUID constant ([#558](https://github.com/davidmigloz/langchain_dart/issues/558)). ([8d9f14b4](https://github.com/davidmigloz/langchain_dart/commit/8d9f14b4c394f4652727eadf5849355cd9fa2f19))\n\n## 0.7.6\n\n - **FEAT**: Add retry support for Runnables ([#540](https://github.com/davidmigloz/langchain_dart/issues/540)). ([1099725d](https://github.com/davidmigloz/langchain_dart/commit/1099725d88de4103381edad533209a9a098bdb7f))\n\n## 0.7.5\n\n - **FEAT**: Add ToolsAgent for models with tool-calling support ([#530](https://github.com/davidmigloz/langchain_dart/issues/530)). ([f3ee5b44](https://github.com/davidmigloz/langchain_dart/commit/f3ee5b44c4ffa378343ec4ee1e08d8e594a6cb36))\n - **FEAT**: Deprecate OpenAIToolsAgent in favour of ToolsAgent ([#532](https://github.com/davidmigloz/langchain_dart/issues/532)). ([68d8011a](https://github.com/davidmigloz/langchain_dart/commit/68d8011a9aa09368875ba0434839d12623ba2bab))\n - **DOCS**: Add Code Assist AI in README and documentation ([#538](https://github.com/davidmigloz/langchain_dart/issues/538)). ([e752464c](https://github.com/davidmigloz/langchain_dart/commit/e752464c0d2fc7e0ccc878933b0ef934c9527567))\n\n## 0.7.4\n\n - **FEAT**: Add Fallback support for Runnables ([#501](https://github.com/davidmigloz/langchain_dart/issues/501)). ([5887858d](https://github.com/davidmigloz/langchain_dart/commit/5887858d667d43c49978291ea98a92cab0069971))\n - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43))\n - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560))\n - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d))\n - **DOCS**: Update README.md with Ollama tool call support. ([e016b0bd](https://github.com/davidmigloz/langchain_dart/commit/e016b0bd02065971faab2a3a48be625ff33a08cf))\n\n## 0.7.3\n\n> Note: Anthropic integration (`ChatAnthropic`) is now available in the new [`langchain_anthropic`](https://pub.dev/packages/langchain_anthropic) package.\n\n- **FEAT**: Add support for TavilySearchResultsTool and TavilyAnswerTool ([#467](https://github.com/davidmigloz/langchain_dart/issues/467)). ([a9f35755](https://github.com/davidmigloz/langchain_dart/commit/a9f35755dfac9d257efb251c4a6c5948673c2f6c))\n- **DOCS**: Document existing integrations in README.md. ([cc4246c8](https://github.com/davidmigloz/langchain_dart/commit/cc4246c8ab907de2c82843bff145edfffe32d302))\n\n## 0.7.2\n\n> Note: ObjectBox Vector DB integration (`ObjectBoxVectorStore`) is now available in the [`langchain_community`](https://pub.dev/packages/langchain_community) package.\n\n - **FEAT**: Add support for ObjectBoxVectorStore ([#438](https://github.com/davidmigloz/langchain_dart/issues/438)). ([81e167a6](https://github.com/davidmigloz/langchain_dart/commit/81e167a6888fff9f8db381caaef6ee788acef3a8))\n   + Check out the [ObjectBoxVectorStore documentation](https://langchaindart.dev/#/modules/retrieval/vector_stores/integrations/objectbox?id=objectbox) \n - **REFACTOR**: Migrate to langchaindart.dev domain ([#434](https://github.com/davidmigloz/langchain_dart/issues/434)). ([358f79d6](https://github.com/davidmigloz/langchain_dart/commit/358f79d6e0bae2ecd657aeed2eae7fad16d97c18))\n\n## 0.7.1\n\n> Note: VertexAI for Firebase (`ChatFirebaseVertexAI`) is now available in the new [`langchain_firebase`](https://pub.dev/packages/langchain_firebase) package.\n\n - **DOCS**: Add docs for ChatFirebaseVertexAI ([#422](https://github.com/davidmigloz/langchain_dart/issues/422)). ([8d0786bc](https://github.com/davidmigloz/langchain_dart/commit/8d0786bc6228ce86de962d30e9c2cc9728a08f3f))\n - **DOCS**: Update ChatOllama docs ([#417](https://github.com/davidmigloz/langchain_dart/issues/417)). ([9d30b1a1](https://github.com/davidmigloz/langchain_dart/commit/9d30b1a1c811d73cfa27110b8c3c10b10da1801e))\n\n## 0.7.0\n\n> Note: This release has breaking changes.  \n> If you are using \"function calling\" check [how to migrate to \"tool calling\"](https://github.com/davidmigloz/langchain_dart/issues/400).\n\n - **BREAKING** **FEAT**: Migrate from function calling to tool calling ([#400](https://github.com/davidmigloz/langchain_dart/issues/400)). ([44413b83](https://github.com/davidmigloz/langchain_dart/commit/44413b8321b1188ff6b4027b1972a7ee0002761e))\n - **BREAKING** **REFACTOR**: Improve Tool abstractions ([#398](https://github.com/davidmigloz/langchain_dart/issues/398)). ([2a50aec2](https://github.com/davidmigloz/langchain_dart/commit/2a50aec28385068f9be32392020d727fc9a1561e))\n\n## 0.6.0+1\n\n - **FIX**: Allow async functions in Runnable.mapInput ([#396](https://github.com/davidmigloz/langchain_dart/issues/396)). ([e4c35092](https://github.com/davidmigloz/langchain_dart/commit/e4c3509267b7be28e2b0fa334a9255baadabfb6a))\n\n## 0.6.0\n\n> Note: This release has breaking changes.  \n> If you are using `Runnable.fromFunction` check the [migration guide](https://github.com/davidmigloz/langchain_dart/issues/394).\n\n - **FEAT** Add support for RunnableRouter ([#386](https://github.com/davidmigloz/langchain_dart/issues/386)). ([827e262](https://github.com/davidmigloz/langchain_dart/commit/827e2627535941d702e8fbe300ca1426ddf50efe))\n - **FEAT**: Add support for Runnable.mapInputStream ([#393](https://github.com/davidmigloz/langchain_dart/issues/393)). ([a2b6bbb5](https://github.com/davidmigloz/langchain_dart/commit/a2b6bbb5ea7a65c36d1e955f9f96298cf2384afc))\n - **FEAT**: Add support for JsonOutputParser ([#392](https://github.com/davidmigloz/langchain_dart/issues/392)). ([c6508f0f](https://github.com/davidmigloz/langchain_dart/commit/c6508f0fadde3fd4d93accbcae5cea37b7beca20))\n - **FEAT**: Reduce input stream for PromptTemplate, LLM, ChatModel, Retriever and Tool ([#388](https://github.com/davidmigloz/langchain_dart/issues/388)). ([b59bcd40](https://github.com/davidmigloz/langchain_dart/commit/b59bcd409f4904fb2e16f928b3c7206a186ab3f4))\n - **BREAKING** **FEAT**: Support different logic for streaming in RunnableFunction ([#394](https://github.com/davidmigloz/langchain_dart/issues/394)). ([8bb2b8ed](https://github.com/davidmigloz/langchain_dart/commit/8bb2b8ede18bfe3a4f266b78ca32f1dfb83db1b1))\n - **DOCS**: Update LangChain Expression Language documentation ([#395](https://github.com/davidmigloz/langchain_dart/issues/395)). ([6ce75e5f](https://github.com/davidmigloz/langchain_dart/commit/6ce75e5fe6492c951f9b5209d7a2c3077ad178d2))\n\n## 0.5.0+1\n\n - **DOCS**: Update README.md. ([8139113a](https://github.com/davidmigloz/langchain_dart/commit/8139113a3ca8faa94145cbb6b1b80ca3bc2f3979))\n\n## 0.5.0\n\n> Note: This release has breaking changes.  \n> [Migration guide](https://github.com/davidmigloz/langchain_dart/discussions/374)\n\n - **BREAKING** **REFACTOR**: Introduce langchain_core and langchain_community packages ([#328](https://github.com/davidmigloz/langchain_dart/issues/328)). ([5fa520e6](https://github.com/davidmigloz/langchain_dart/commit/5fa520e663602d9cdfcab0c62a053090fa02b02e))\n - **BREAKING** **REFACTOR**: Simplify LLMResult and ChatResult classes ([#363](https://github.com/davidmigloz/langchain_dart/issues/363)). ([ffe539c1](https://github.com/davidmigloz/langchain_dart/commit/ffe539c13f92cce5f564107430163b44be1dfd96))\n - **BREAKING** **REFACTOR**: Simplify Output Parsers ([#367](https://github.com/davidmigloz/langchain_dart/issues/367)). ([f24b7058](https://github.com/davidmigloz/langchain_dart/commit/f24b7058949fba47ba624f071a3f548b8f6e915e))\n - **BREAKING** **REFACTOR**: Remove deprecated generate and predict APIs ([#335](https://github.com/davidmigloz/langchain_dart/issues/335)). ([c55fe50f](https://github.com/davidmigloz/langchain_dart/commit/c55fe50f0040cc04cbd2e90bca475887c093c654))\n - **REFACTOR**: Simplify internal .stream implementation ([#364](https://github.com/davidmigloz/langchain_dart/issues/364)). ([c83fed22](https://github.com/davidmigloz/langchain_dart/commit/c83fed22b2b89d5e51211984b12ec126a3ca225e))\n - **FEAT**: Implement .batch support ([#370](https://github.com/davidmigloz/langchain_dart/issues/370)). ([d254f929](https://github.com/davidmigloz/langchain_dart/commit/d254f929b03d9c950029e55c66831f9f89cc14a9))\n - **FEAT**: Add reduceOutputStream option to StringOutputParser ([#368](https://github.com/davidmigloz/langchain_dart/issues/368)). ([7f9a9fae](https://github.com/davidmigloz/langchain_dart/commit/7f9a9faeef93685ff810a88bbfe866da4b843369))\n - **DOCS**: Update LCEL docs. ([ab3ab573](https://github.com/davidmigloz/langchain_dart/commit/ab3ab573f62d9a497e7c82308da0a044337e957d))\n - **DOCS**: Add RAG example using OllamaEmbeddings and ChatOllama ([#337](https://github.com/davidmigloz/langchain_dart/issues/337)). ([8bddc6c0](https://github.com/davidmigloz/langchain_dart/commit/8bddc6c05b762be357a3c3ed0f6fc4af3aad866a))\n\n## 0.4.2\n\n - **FEAT**: Update meta and test dependencies ([#331](https://github.com/davidmigloz/langchain_dart/issues/331)). ([912370ee](https://github.com/davidmigloz/langchain_dart/commit/912370ee0ba667ee9153303395a457e6caf5c72d))\n\n## 0.4.1\n\n - **DOCS**: Update Supabase docs. ([4a2a5329](https://github.com/davidmigloz/langchain_dart/commit/4a2a532931cac7577102d78b0ec8a5cc4eafb93c))\n - **DOCS**: Update CHANGELOG.md. ([d0d46534](https://github.com/davidmigloz/langchain_dart/commit/d0d46534565d6f52d819d62329e8917e00bc7030))\n\n## 0.4.0\n\n - **DOCS**: Update embeddings documentation ([#313](https://github.com/davidmigloz/langchain_dart/issues/313)). ([43463481](https://github.com/davidmigloz/langchain_dart/commit/4346348108dc105a1daaedc932641e725b648f3e))\n\n## 0.3.3\n\n - **DOCS**: Add Anyscale and Together AI documentation ([#305](https://github.com/davidmigloz/langchain_dart/issues/305)). ([7daa3eb0](https://github.com/davidmigloz/langchain_dart/commit/7daa3eb052c32baa7473d7532c795b7f242ed9fc))\n\n## 0.3.2\n\n - **REFACTOR**: Make all LLM options fields nullable and add copyWith ([#284](https://github.com/davidmigloz/langchain_dart/issues/284)). ([57eceb9b](https://github.com/davidmigloz/langchain_dart/commit/57eceb9b47da42cf19f64ddd88bfbd2c9676fd5e))\n - **FIX**: Export ConversationSummaryMemory ([#283](https://github.com/davidmigloz/langchain_dart/issues/283)). ([76b01d23](https://github.com/davidmigloz/langchain_dart/commit/76b01d2376c0d9727d1f4681dba83a46f4b02b3a))\n - **FEAT**: Update internal dependencies ([#291](https://github.com/davidmigloz/langchain_dart/issues/291)). ([69621cc6](https://github.com/davidmigloz/langchain_dart/commit/69621cc61659980d046518ee20ce055e806cba1f))\n\n## 0.3.1+1\n\n - **FIX**: Export token_buffer.dart (ConversationTokenBufferMemory) ([#280](https://github.com/davidmigloz/langchain_dart/issues/280)). ([265fcb4b](https://github.com/davidmigloz/langchain_dart/commit/265fcb4b68a5aa6144456868aebf023e1b0ce539))\n\n## 0.3.1\n\n - **FEAT**: Make ChatPromptTemplates more convenient to use ([#275](https://github.com/davidmigloz/langchain_dart/issues/275)). ([9f8e6f75](https://github.com/davidmigloz/langchain_dart/commit/9f8e6f75543a41b87aff72fbeb249acf859a9562))\n\n## 0.3.0\n\n> Note: This release has breaking changes.  \n> [Migration guide](https://github.com/davidmigloz/langchain_dart/issues/269) \n\n - **BREAKING** **REFACTOR**: Make MIME Type mandatory for base64 images in prompt ([#269](https://github.com/davidmigloz/langchain_dart/issues/269)). ([2fe076bb](https://github.com/davidmigloz/langchain_dart/commit/2fe076bb8d2ddacfee6ec077c3f564bff919dace))\n - **FEAT**: Allow to pass options to countTokens method ([#268](https://github.com/davidmigloz/langchain_dart/issues/268)). ([4ecb123b](https://github.com/davidmigloz/langchain_dart/commit/4ecb123bd34f0b01d377045b97dace89676d5d16))\n - **DOCS**: Update README.md and docs ([#272](https://github.com/davidmigloz/langchain_dart/issues/272)). ([306a1fdd](https://github.com/davidmigloz/langchain_dart/commit/306a1fdd6504ef28dc2066953ae575e975ab9025))\n\n## 0.2.1\n\n - **FEAT**: Support customizing Tool input description ([#258](https://github.com/davidmigloz/langchain_dart/issues/258)). ([a9a1b2a0](https://github.com/davidmigloz/langchain_dart/commit/a9a1b2a0f4fa5fee320e9ca5b46a99a0b834035c))\n - **DOCS**: Update Mistral AI documentation ([#265](https://github.com/davidmigloz/langchain_dart/issues/265)). ([59b4127e](https://github.com/davidmigloz/langchain_dart/commit/59b4127eddb7a04bafa34b11b071336ab336e7a9))\n\n## 0.2.0\n\n> Note: This release has breaking changes.\n>\n> Migration guides:\n> - [`Retriever`](https://github.com/davidmigloz/langchain_dart/issues/248)\n> - [`Tools`](https://github.com/davidmigloz/langchain_dart/issues/243)\n\n - **BREAKING** **FEAT**: Move all retriever config options to RetrieverOptions ([#248](https://github.com/davidmigloz/langchain_dart/issues/248)). ([f5785b77](https://github.com/davidmigloz/langchain_dart/commit/f5785b772c11750bb57f4b143f978a84743f9222))\n - **BREAKING** **FEAT**: Allow to pass call options to tools ([#243](https://github.com/davidmigloz/langchain_dart/issues/243)). ([4a01adb9](https://github.com/davidmigloz/langchain_dart/commit/4a01adb9346b33cdb148d0f0aa7196e2b16867a9))\n - **FEAT**: Allow to mutate default options ([#256](https://github.com/davidmigloz/langchain_dart/issues/256)). ([cb5e4058](https://github.com/davidmigloz/langchain_dart/commit/cb5e4058fb89f33c8495ac22fb240ce92daa683c))\n - **REFACTOR**: Use JsonPath.readValues in JsonLoader ([#245](https://github.com/davidmigloz/langchain_dart/issues/245)). ([3e159254](https://github.com/davidmigloz/langchain_dart/commit/3e159254379d03b70655f274b6fe81fc07a5095f))\n - **FIX**: Out of rage error in ConversationBufferWindowMemory ([#249](https://github.com/davidmigloz/langchain_dart/issues/249)). ([1b38bff7](https://github.com/davidmigloz/langchain_dart/commit/1b38bff7eff10327cd0154c0a8d47bd363870e2d))\n - **FIX**: PromptTemplate stream should only emit if it has all inputs ([#247](https://github.com/davidmigloz/langchain_dart/issues/247)). ([a56a2ec5](https://github.com/davidmigloz/langchain_dart/commit/a56a2ec5e084d5c140b0e8469707ecaa19dfdaff))\n\n## 0.1.1+1\n\n - **FIX**: Conditionally import dart:io in LocalFileStore ([#237](https://github.com/davidmigloz/langchain_dart/issues/237)). ([71d337e6](https://github.com/davidmigloz/langchain_dart/commit/71d337e62af49f173369e402fa6a72e363fd8724))\n\n## 0.1.1\n\n - **FEAT**: Add support for OpenAIDallETool ([#231](https://github.com/davidmigloz/langchain_dart/issues/231)). ([541e8d77](https://github.com/davidmigloz/langchain_dart/commit/541e8d77d76246b25ffa8c4d3715b5ca728cfc3a))\n - **FEAT**: Support implementing custom agents using LCEL ([#230](https://github.com/davidmigloz/langchain_dart/issues/230)). ([625eeeb4](https://github.com/davidmigloz/langchain_dart/commit/625eeeb4ffa9d92c6fd8da003fa471f5d4752257))\n - **FEAT**: Add support for Runnable.mapInput() ([#229](https://github.com/davidmigloz/langchain_dart/issues/229)). ([7cc832ca](https://github.com/davidmigloz/langchain_dart/commit/7cc832ca82bd86b4031ca5f2c796e136ca646375))\n - **REFACTOR**: Rename RunnableMapFromItem to RunnableMapFromInput ([#228](https://github.com/davidmigloz/langchain_dart/issues/228)). ([7330cfcd](https://github.com/davidmigloz/langchain_dart/commit/7330cfcd0c7e19c831da1454c3ff4cc03d079cf7))\n - **REFACTOR**: Improve handling of input and output keys in chains ([#227](https://github.com/davidmigloz/langchain_dart/issues/227)). ([acf76b24](https://github.com/davidmigloz/langchain_dart/commit/acf76b240a076cf4b1f153bdaba9127580369d9e))\n\n## 0.1.0+2\n\n - **DOCS**: Update README.md ([#225](https://github.com/davidmigloz/langchain_dart/issues/225)). ([afff8567](https://github.com/davidmigloz/langchain_dart/commit/afff856723f15022bcc3f0ba0285ff1ffed51c68))\n\n## 0.1.0+1\n\n - **DOCS**: Add public_member_api_docs lint rule and document missing APIs ([#223](https://github.com/davidmigloz/langchain_dart/issues/223)). ([52380433](https://github.com/davidmigloz/langchain_dart/commit/523804331783970870b023946c016be6c0797920))\n\n## 0.1.0\n\n> Note: This release has breaking changes.  \n> [Migration guide](https://github.com/davidmigloz/langchain_dart/issues/220)\n\n - **BREAKING** **FEAT**: Add multi-modal messages support with OpenAI Vision ([#220](https://github.com/davidmigloz/langchain_dart/issues/220)). ([6da2e069](https://github.com/davidmigloz/langchain_dart/commit/6da2e069932782eed8c27da45c56b4c290373fac))\n\n## 0.0.15\n\n - **FEAT**: Add streaming support in LangChain Expression Language ([#192](https://github.com/davidmigloz/langchain_dart/issues/192)). ([2e4bcf91](https://github.com/davidmigloz/langchain_dart/commit/2e4bcf91f6b364b32b6f999e71252001ca6392c8))\n - **DOCS**: Add streaming to docs. ([bb87c190](https://github.com/davidmigloz/langchain_dart/commit/bb87c1901b34810aa2e841ed83da8e70703b9d08))\n - **FEAT**: Add streaming support to OutputFunctionsParsers ([#194](https://github.com/davidmigloz/langchain_dart/issues/194)). ([8b4e6a13](https://github.com/davidmigloz/langchain_dart/commit/8b4e6a138cd9942dd6ea1a97fe5e19e84a30000c))\n - **FIX**: Remove unused generic param in StringOutputParser ([#193](https://github.com/davidmigloz/langchain_dart/issues/193)). ([decd3176](https://github.com/davidmigloz/langchain_dart/commit/decd31765114bea1967f15e5fbd83110709938e4))\n\n## 0.0.14\n\n> Note: This release has breaking changes.\n\n - **REFACTOR**: Don't require implement getFormatInstructions. ([d8b1286d](https://github.com/davidmigloz/langchain_dart/commit/d8b1286db59e02b60179e395eb43cdc3828582c2))\n - **DOCS**: Update docs. ([af7ee827](https://github.com/davidmigloz/langchain_dart/commit/af7ee8278f18620a54072bb9d1772882956d5c2d))\n - **BREAKING** **FIX**: Change loaders lastModified metadata field to integer ([#172](https://github.com/davidmigloz/langchain_dart/issues/172)). ([72c724f8](https://github.com/davidmigloz/langchain_dart/commit/72c724f8a716e27b4a807b70bcbbafdd9feb0a18))\n - **BREAKING** **FEAT**: Update uuid internal dependency to 4.x.x ([#173](https://github.com/davidmigloz/langchain_dart/issues/173)). ([b01f4afe](https://github.com/davidmigloz/langchain_dart/commit/b01f4afea6cfcdf8a0aa6e1b11d3057efa6e5fc0))\n\n## 0.0.13\n\n> Check out the [LangChain Expression Language documentation](https://langchaindart.dev/#/expression_language/interface) for more details\n\n - **FEAT**: Add support for JsonOutputFunctionsParser ([#165](https://github.com/davidmigloz/langchain_dart/issues/165)). ([66c8e644](https://github.com/davidmigloz/langchain_dart/commit/66c8e64410d1dbf8b75e5734cb0cbb0e43dc0615))\n - **FEAT**: Add support for StringOutputParser ([#164](https://github.com/davidmigloz/langchain_dart/issues/164)). ([ee29e99a](https://github.com/davidmigloz/langchain_dart/commit/ee29e99a410c3cc6a7ae263fea1cde283f904edf))\n - **FEAT**: Implement LangChain Expression Language (LCEL) ([#163](https://github.com/davidmigloz/langchain_dart/issues/163)). ([85ea41af](https://github.com/davidmigloz/langchain_dart/commit/85ea41af9f5e2ff42bba620a60f765ca0f67c86c))\n - **FEAT**: Support custom doc prompt in StuffDocumentsQAChain ([#157](https://github.com/davidmigloz/langchain_dart/issues/157)). ([faa9d2d7](https://github.com/davidmigloz/langchain_dart/commit/faa9d2d768c2a70f17247d5703dd1d821af08240))\n\n## 0.0.12\n\n> Note: This release has breaking changes.\n\n - **DOCS**: Acknowledge sponsors in readme. ([092d94c8](https://github.com/davidmigloz/langchain_dart/commit/092d94c8ac166cf47f1ddab748b61d440f4b8585))\n - **DOCS**: Add topics to pubspecs. ([8c1d6297](https://github.com/davidmigloz/langchain_dart/commit/8c1d62970710cc326fd5930101918aaf16b18f74))\n - **BREAKING** **REFACTOR**: Change embedDocuments input to `List<Document>` ([#153](https://github.com/davidmigloz/langchain_dart/issues/153)). ([1b5d6fbf](https://github.com/davidmigloz/langchain_dart/commit/1b5d6fbf20bcbb7734581f91d66eff3a86731fec))\n\n## 0.0.11\n\n> Note: This release has breaking changes.\n\n - **DOCS**: Update readme. ([e1b5b295](https://github.com/davidmigloz/langchain_dart/commit/e1b5b2958bdf2b787c8b49aeeb6690c33c225943))\n - **BREAKING** **REFACTOR**: Remove addDocuments from VectorStoreRetriever ([#146](https://github.com/davidmigloz/langchain_dart/issues/146)). ([d32a5fd9](https://github.com/davidmigloz/langchain_dart/commit/d32a5fd94645d10deee5a35f0d83501f93be7308))\n - **BREAKING** **REFACTOR**: Rename VectorStoreRetrieverMemory and require vector store ([#145](https://github.com/davidmigloz/langchain_dart/issues/145)). ([67af3195](https://github.com/davidmigloz/langchain_dart/commit/67af319595755ec3c3834ceabaf4086cfa32ad8c))\n\n## 0.0.10\n\n - **FEAT**: Add support for Chroma VectorStore ([#139](https://github.com/davidmigloz/langchain_dart/issues/139)). ([098783b4](https://github.com/davidmigloz/langchain_dart/commit/098783b4895ab30bb61d07355a0b587ff76b9175))\n - **DOCS**: Update readme. ([b61eda5b](https://github.com/davidmigloz/langchain_dart/commit/b61eda5ba506b4602592511c6a9be1e7aae5bf57))\n\n## 0.0.9\n\n - **FEAT**: Support filtering in MemoryVectorStore ([#137](https://github.com/davidmigloz/langchain_dart/issues/137)). ([84da480f](https://github.com/davidmigloz/langchain_dart/commit/84da480f6820a81f092756f0194deb77c4cda151))\n - **FEAT**: Support filtering in VertexAI Matching Engine ([#136](https://github.com/davidmigloz/langchain_dart/issues/136)). ([768c6987](https://github.com/davidmigloz/langchain_dart/commit/768c6987de5b36b60090a1fe94f49483da11b885))\n - **FEAT**: Allow to pass vector search config ([#135](https://github.com/davidmigloz/langchain_dart/issues/135)). ([5b8fa5a3](https://github.com/davidmigloz/langchain_dart/commit/5b8fa5a3fcaf785615016be1d5da0a003178cfa9))\n - **DOCS**: Fix API documentation errors ([#138](https://github.com/davidmigloz/langchain_dart/issues/138)). ([1aa38fce](https://github.com/davidmigloz/langchain_dart/commit/1aa38fce17eed7f325e7872d03096740256d57be))\n\n## 0.0.8\n\n - **REFACTOR**: Rename store folder to chat_message_history ([#126](https://github.com/davidmigloz/langchain_dart/issues/126)). ([fa54c7e2](https://github.com/davidmigloz/langchain_dart/commit/fa54c7e22410182848b1936b64e85d9cf709eaeb))\n - **REFACTOR**: Fix Dart 3.1.0 linter issues ([#125](https://github.com/davidmigloz/langchain_dart/issues/125)). ([cc32f3f1](https://github.com/davidmigloz/langchain_dart/commit/cc32f3f13240c28cf174a9dbffc7d61bc061f843))\n - **FEAT**: Add support for LocalFileStore ([#132](https://github.com/davidmigloz/langchain_dart/issues/132)). ([2c508dce](https://github.com/davidmigloz/langchain_dart/commit/2c508dcea4959dbe755ee713de43dc20c9680640))\n - **FEAT**: Add support for CacheBackedEmbeddings ([#131](https://github.com/davidmigloz/langchain_dart/issues/131)). ([27d8b777](https://github.com/davidmigloz/langchain_dart/commit/27d8b777b4da360e57f32de6e1e1fc09ea6b6333))\n - **FEAT**: Add FakeEmbeddings testing model ([#130](https://github.com/davidmigloz/langchain_dart/issues/130)). ([f06920d7](https://github.com/davidmigloz/langchain_dart/commit/f06920d792d1083876b040744213d78c9b11bd4c))\n - **FEAT**: Add support for EncoderBackedStore ([#129](https://github.com/davidmigloz/langchain_dart/issues/129)). ([85bb3191](https://github.com/davidmigloz/langchain_dart/commit/85bb31918308f7a956afd0f991a78cf65e6dcd8d))\n - **FEAT**: Add support for InMemoryStore ([#128](https://github.com/davidmigloz/langchain_dart/issues/128)). ([699c0904](https://github.com/davidmigloz/langchain_dart/commit/699c09045fec3f91666f7ee264525cec8b16f910))\n - **FEAT**: Add support for InMemoryDocStore ([#127](https://github.com/davidmigloz/langchain_dart/issues/127)). ([d9d7268d](https://github.com/davidmigloz/langchain_dart/commit/d9d7268ddcd9e346f67e1278127e25ee467ea99c))\n - **FEAT**: Initial vectors, ids, and delete in MemoryVectorStore ([#123](https://github.com/davidmigloz/langchain_dart/issues/123)). ([f87a738d](https://github.com/davidmigloz/langchain_dart/commit/f87a738d6e9c78aabcbd95014dd4fac2d6c58817))\n\n## 0.0.7+1\n\n - **FIX**: Text splitters were not preserving docs IDs ([#122](https://github.com/davidmigloz/langchain_dart/issues/122)). ([a9d7f098](https://github.com/davidmigloz/langchain_dart/commit/a9d7f098e650329fe43f35e2f0e11a1f61778e4f))\n\n## 0.0.7\n\n - **FEAT**: Integrate Vertex AI Matching Engine vector store ([#103](https://github.com/davidmigloz/langchain_dart/issues/103)). ([289c3eef](https://github.com/davidmigloz/langchain_dart/commit/289c3eef722206ac9dea0c968c036ad3289d10be))\n - **DOCS**: Update readme. ([a64860ce](https://github.com/davidmigloz/langchain_dart/commit/a64860ceda8fe926b720086cf7c86df2b02abf35))\n - **DOCS**: Update readme. ([8a58f4a1](https://github.com/davidmigloz/langchain_dart/commit/8a58f4a1923f474bc331e2d02b9cf14b79194331))\n\n## 0.0.6\n\n - **REFACTOR**: Move Vertex AI client to its own package ([#111](https://github.com/davidmigloz/langchain_dart/issues/111)). ([d8aea156](https://github.com/davidmigloz/langchain_dart/commit/d8aea15633f1a9fb0df35cf9cc44bbc93ad46cd8))\n - **REFACTOR**: Always await or explicitly discard Futures ([#106](https://github.com/davidmigloz/langchain_dart/issues/106)). ([989e93db](https://github.com/davidmigloz/langchain_dart/commit/989e93dbf6b5d61f053550219d88842156aeb492))\n - **FIX**: Fix OpenAIQAWithSourcesChain returning empty strings ([#113](https://github.com/davidmigloz/langchain_dart/issues/113)). ([6181ff8d](https://github.com/davidmigloz/langchain_dart/commit/6181ff8df77653d38cd84cb066776c04c0ff74ad))\n - **FIX**: VectorStore k variable was ignored ([#110](https://github.com/davidmigloz/langchain_dart/issues/110)). ([80e61eb7](https://github.com/davidmigloz/langchain_dart/commit/80e61eb7a11757f4e541ce5ba6033fb11b1b01f0))\n - **FEAT**: Integrate Google Vertex AI PaLM Chat Model ([#99](https://github.com/davidmigloz/langchain_dart/issues/99)). ([3897595d](https://github.com/davidmigloz/langchain_dart/commit/3897595db597d5957ef80ae7a1de35c5f41265b8))\n - **FEAT**: Integrate Google Vertex AI PaLM Text model ([#98](https://github.com/davidmigloz/langchain_dart/issues/98)). ([b2746c23](https://github.com/davidmigloz/langchain_dart/commit/b2746c235d68045ba20afd1f2be7c24dcccb5f24))\n\n## 0.0.5+1\n\n - **FIX**: OpenAIOptions class not exported ([#104](https://github.com/davidmigloz/langchain_dart/issues/104)). ([e50efc3d](https://github.com/davidmigloz/langchain_dart/commit/e50efc3ddf0b13ece43298b2e3fee531e944601d))\n - **DOCS**: Improve RetrievalQAChain API documentation ([#95](https://github.com/davidmigloz/langchain_dart/issues/95)). ([e6d0a9d3](https://github.com/davidmigloz/langchain_dart/commit/e6d0a9d3abd65704883452e50b40344428f9580d))\n\n## 0.0.5\n\n - **FIX**: Suff and MapReduce docs chains don't handle chat messages ([#92](https://github.com/davidmigloz/langchain_dart/issues/92)). ([19182ca1](https://github.com/davidmigloz/langchain_dart/commit/19182ca1921e53fc2cb0fa61d96d602aacf830f3))\n - **FEAT**: Update AgentExecutor constructor to use agent's tools ([#89](https://github.com/davidmigloz/langchain_dart/issues/89)). ([3af56a45](https://github.com/davidmigloz/langchain_dart/commit/3af56a45930fff84b11f6bec29c50502a490c2b4))\n - **FEAT**: Add MessagePlaceholder ([#87](https://github.com/davidmigloz/langchain_dart/issues/87)). ([23ee95b6](https://github.com/davidmigloz/langchain_dart/commit/23ee95b6cb0bb15701a141adc41ee1b826684ad0))\n - **DOCS**: Update CONTRIBUTING.md. ([5f2b9264](https://github.com/davidmigloz/langchain_dart/commit/5f2b92641ae1f20fcc8803c977428b81e3f525bd))\n - **DOCS**: Fix typo in MessagePlaceholder API docs ([#90](https://github.com/davidmigloz/langchain_dart/issues/90)). ([f53e1a2b](https://github.com/davidmigloz/langchain_dart/commit/f53e1a2b9dc81c89a66a368758cfd1ec7df4c0f9))\n\n## 0.0.4\n\n - **REFACTOR**: Extract default memory key and prefixes to constants. ([750fd01a](https://github.com/davidmigloz/langchain_dart/commit/750fd01a74f94042cbc26684d6651b531fb0a93c))\n - **FIX**: systemChatMessage was ignored in OpenAIFunctionsAgent ([#86](https://github.com/davidmigloz/langchain_dart/issues/86)). ([cfe1e009](https://github.com/davidmigloz/langchain_dart/commit/cfe1e00972d481f83b9dc9e225a32b7077aa5fd4))\n - **FIX**: Allow to add memory to an agent executor ([#80](https://github.com/davidmigloz/langchain_dart/issues/80)). ([8110464c](https://github.com/davidmigloz/langchain_dart/commit/8110464c4b4ad53f3b1826722df76943d0d66621))\n - **FEAT**: Add ConversationSummaryMemory ([#27](https://github.com/davidmigloz/langchain_dart/issues/27)). ([f631d9e5](https://github.com/davidmigloz/langchain_dart/commit/f631d9e529d99319afe671b5aff441436e43ea31))\n - **FEAT**: Support LLMChain in OpenAIFunctionsAgent and memory. ([bd4a1cb9](https://github.com/davidmigloz/langchain_dart/commit/bd4a1cb9101ba385ce9613f9aa0b7e5474380f32))\n - **FEAT**: Return ChatMessage when LLMChain used with ChatModel. ([bb5f4d23](https://github.com/davidmigloz/langchain_dart/commit/bb5f4d2325ae1f615159f2ffd11cc8ec4e87ed3c))\n - **FEAT**: Add FakeChatModel for testing purposes. ([659783a6](https://github.com/davidmigloz/langchain_dart/commit/659783a6ccad9fc3046040f38c39805743ffdff1))\n - **FEAT**: Add support for ConversationTokenBufferMemory ([#26](https://github.com/davidmigloz/langchain_dart/issues/26)). ([8113d1c0](https://github.com/davidmigloz/langchain_dart/commit/8113d1c0dc742ce9f6c49018c4b012cd3823fac1))\n - **FEAT**: Improve SummarizeChain.mapReduce summaryMaxTokens name and docs. ([0be06e02](https://github.com/davidmigloz/langchain_dart/commit/0be06e02f280de54a2790d150fac142d9fbe4222))\n - **FEAT**: Add support for CsvLoader ([#77](https://github.com/davidmigloz/langchain_dart/issues/77)). ([41d24e76](https://github.com/davidmigloz/langchain_dart/commit/41d24e7632a77b08234951c0e6bf911530dff56a))\n - **FEAT**: Add ConversationBufferWindowMemory ([#25](https://github.com/davidmigloz/langchain_dart/issues/25)). ([9c271f7e](https://github.com/davidmigloz/langchain_dart/commit/9c271f7e7a31bc59c122a895daf238a0bb5ac7d0))\n\n## 0.0.3\n\n - **FIX**: Loaders tests. ([f0498300](https://github.com/davidmigloz/langchain_dart/commit/f049830057fc1b8ff315469afd1512aa13ceb459))\n - **FEAT**: Update internal dependencies (including http to 1.1.0). ([8f3e8bc8](https://github.com/davidmigloz/langchain_dart/commit/8f3e8bc811df5c8bdba2c7e33b6c53ea0c2edad4))\n - **FEAT**: Add support for VectorStoreRetrieverMemory ([#54](https://github.com/davidmigloz/langchain_dart/issues/54)). ([72cd1b10](https://github.com/davidmigloz/langchain_dart/commit/72cd1b100ad88e7213ec12d432674ec4666ce172))\n\n## 0.0.2\n\n - **FIX**: OpenAIQAWithSourcesChain throws exception. ([45c6cb9d](https://github.com/davidmigloz/langchain_dart/commit/45c6cb9d32be670902dd2fe4cb92597765590d85))\n - **FEAT**: Add support for SummarizeChain ([#58](https://github.com/davidmigloz/langchain_dart/issues/58)). ([9499fc04](https://github.com/davidmigloz/langchain_dart/commit/9499fc047ae8be7e7b9dfb0d0ef8678b84245f5d))\n - **FEAT**: Add support for SequentialChain class ([#30](https://github.com/davidmigloz/langchain_dart/issues/30)). ([381a6768](https://github.com/davidmigloz/langchain_dart/commit/381a676812992370da61ced0e59de5fadf0ef164))\n - **FEAT**: Add support for WebBaseLoader ([#74](https://github.com/davidmigloz/langchain_dart/issues/74)). ([0b5bf4b0](https://github.com/davidmigloz/langchain_dart/commit/0b5bf4b0fb2cf6e1a7be116920e9512233e7e613))\n - **FEAT**: Add Support for JsonLoader ([#72](https://github.com/davidmigloz/langchain_dart/issues/72)). ([2457a973](https://github.com/davidmigloz/langchain_dart/commit/2457a9735aacc2aeffcca2710ce0afc7be2f6f09))\n - **FEAT**: Add support for MapReduceDocumentsChain ([#59](https://github.com/davidmigloz/langchain_dart/issues/59)). ([9f2190c4](https://github.com/davidmigloz/langchain_dart/commit/9f2190c4d5f45378f91eaa02d52d8305f7da254e))\n - **FEAT**: Add support for ReduceDocumentsChain ([#70](https://github.com/davidmigloz/langchain_dart/issues/70)). ([34cf10bd](https://github.com/davidmigloz/langchain_dart/commit/34cf10bd485618bff4cddb5b29a1b46ac9f3a9fa))\n - **FEAT**: Support estimating the number of tokens for a given prompt ([#3](https://github.com/davidmigloz/langchain_dart/issues/3)). ([e22f22c8](https://github.com/davidmigloz/langchain_dart/commit/e22f22c89f188a019b96a7c0003dbd26471bebb7))\n - **FEAT**: Add support for CodeTextSplitter ([#63](https://github.com/davidmigloz/langchain_dart/issues/63)). ([92a8c7da](https://github.com/davidmigloz/langchain_dart/commit/92a8c7daccda2be38a25d4bdb0235c2f397225a2))\n - **FEAT**: Add support for RecursiveCharacterTextSplitter ([#61](https://github.com/davidmigloz/langchain_dart/issues/61)). ([697cdcbf](https://github.com/davidmigloz/langchain_dart/commit/697cdcbfef8fc45930de127cb5b7ee2eb3d7ec37))\n - **DOCS**: Document sequential chain. ([b9693a4e](https://github.com/davidmigloz/langchain_dart/commit/b9693a4e2dfcc6bfc74025ebb935865be942b266))\n - **DOCS**: Document text, json and web loaders. ([a95b3e9f](https://github.com/davidmigloz/langchain_dart/commit/a95b3e9f843fcffce9449ea93f343df793512a09))\n - **DOCS**: Update API docs. ([7bfa6d17](https://github.com/davidmigloz/langchain_dart/commit/7bfa6d17cf57aac05906b1401ac3967c21e6f403))\n - **DOCS**: Update readme. ([dd394715](https://github.com/davidmigloz/langchain_dart/commit/dd39471557b37da0d0c2a87dea0c067463a45f45))\n\n## 0.0.1\n\n - Initial public release. \n\nCheck out the announcement post for all the details: \nhttps://blog.langchaindart.dev/introducing-langchain-dart-6b1d34fc41ef\n\n## 0.0.1-dev.7\n\n- Add support for Agent class (#33).\n- Add support for AgentExecutor class (#56).\n- Update hello_world_flutter example with local models.\n\n## 0.0.1-dev.6\n\n- Add support for PipelinePromptTemplate class (#18).\n- LLMChain improvements (#43).\n\n## 0.0.1-dev.5\n\n- Add support for TextLoader (#47).\n- Add support for BaseLoader (#46).\n- Add support for RetrievalQAChain class (#42).\n- Add support for StuffDocumentsQAChain (#50).\n- Add support for StuffDocumentsChain (#49).\n- Add support for BaseCombineDocumentsChain class (#41).\n- Add support for ConditionalPromptSelector (#48).\n- Add support for VectorStoreRetriever class (#45).\n- Add support for MemoryVectorStore class (#44).\n- Add support for VectorStore class (#36).\n- Add support for OpenAIEmbeddings (#38).\n- Add support for CharacterTextSplitter class (#39).\n- Add support for OpenAI functions (#35).\n- Add support for Calculator tool (#32).\n- Add support for Tool class (#31).\n\n## 0.0.1-dev.4\n\n- Add support for LLMChain class (#20).\n- Add support for ChatMessageHistory class (#29).\n- Add support for ConversationBufferMemory class (#24).\n- Add support for ConversationChain (#21).\n- Add support for SimpleMemory class (#23).\n\n## 0.0.1-dev.3\n\n- Add support for ChatPromptTemplate class (#8).\n\n## 0.0.1-dev.2\n\n- Add support for LLMs - `BaseLLM` class (#14).\n- Add support for Chat models - `BaseChatModel` class (#10).\n- Add support for prompt templates - `PromptTemplate` class (#7).\n- Publish LangChain.dart documentation on http://langchaindart.dev.\n\n## 0.0.1-dev.1\n\n- Bootstrap project.\n"
  },
  {
    "path": "packages/langchain/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2023 David Miguel Lozano\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "packages/langchain/README.md",
    "content": "# 🦜️🔗 LangChain.dart\n\n[![tests](https://img.shields.io/github/actions/workflow/status/davidmigloz/langchain_dart/test.yaml?logo=github&label=tests)](https://github.com/davidmigloz/langchain_dart/actions/workflows/test.yaml)\n[![docs](https://img.shields.io/github/actions/workflow/status/davidmigloz/langchain_dart/pages%2Fpages-build-deployment?logo=github&label=docs)](https://github.com/davidmigloz/langchain_dart/actions/workflows/pages/pages-build-deployment)\n[![langchain](https://img.shields.io/pub/v/langchain.svg)](https://pub.dev/packages/langchain)\n![Discord](https://img.shields.io/discord/1123158322812555295?label=discord)\n[![MIT](https://img.shields.io/badge/license-MIT-purple.svg)](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE)\n\nBuild LLM-powered Dart/Flutter applications.\n\n## What is LangChain.dart?\n\nLangChain.dart is an unofficial Dart port of the popular [LangChain](https://github.com/hwchase17/langchain) Python framework created by [Harrison Chase](https://www.linkedin.com/in/harrison-chase-961287118).\n\nLangChain provides a set of ready-to-use components for working with language models and a standard interface for chaining them together to formulate more advanced use cases (e.g. chatbots, Q&A with RAG, agents, summarization, translation, extraction, recsys, etc.).\n\nThe components can be grouped into a few core modules:\n\n![LangChain.dart](https://raw.githubusercontent.com/davidmigloz/langchain_dart/main/docs/img/langchain.dart.png)\n\n- 📃 **Model I/O:** LangChain offers a unified API for interacting with various LLM providers (e.g. OpenAI, Google, Mistral, Ollama, etc.), allowing developers to switch between them with ease. Additionally, it provides tools for managing model inputs (prompt templates and example selectors) and parsing the resulting model outputs (output parsers).\n- 📚 **Retrieval:** assists in loading user data (via document loaders), transforming it (with text splitters), extracting its meaning (using embedding models), storing (in vector stores) and retrieving it (through retrievers) so that it can be used to ground the model's responses (i.e. Retrieval-Augmented Generation or RAG). \n- 🤖 **Agents:** \"bots\" that leverage LLMs to make informed decisions about which available tools (such as web search, calculators, database lookup, etc.) to use to accomplish the designated task.\n\nThe different components can be composed together using the [LangChain Expression Language (LCEL)](https://langchaindart.dev/#/expression_language/get_started).\n\n## Motivation\n\nLarge Language Models (LLMs) have revolutionized Natural Language Processing (NLP), serving as essential components in a wide range of applications, such as question-answering, summarization, translation, and text generation.\n\nThe adoption of LLMs is creating a new tech stack in its wake. However, emerging libraries and tools are predominantly being developed for the Python and JavaScript ecosystems. As a result, the number of applications leveraging LLMs in these ecosystems has grown exponentially.\n\nIn contrast, the Dart / Flutter ecosystem has not experienced similar growth, which can likely be attributed to the scarcity of Dart and Flutter libraries that streamline the complexities associated with working with LLMs.\n\nLangChain.dart aims to fill this gap by abstracting the intricacies of working with LLMs in Dart and Flutter, enabling developers to harness their combined potential effectively.\n\n## Packages\n\nLangChain.dart has a modular design that allows developers to import only the components they need. The ecosystem consists of several packages:\n\n### [`langchain_core`](https://pub.dev/packages/langchain_core) [![langchain_core](https://img.shields.io/pub/v/langchain_core.svg)](https://pub.dev/packages/langchain_core)\n\nContains only the core abstractions as well as LangChain Expression Language as a way to compose them together. \n\n> Depend on this package to build frameworks on top of LangChain.dart or to interoperate with it.\n\n### [`langchain`](https://pub.dev/packages/langchain) [![langchain](https://img.shields.io/pub/v/langchain.svg)](https://pub.dev/packages/langchain)\n\nContains higher-level and use-case specific chains, agents, and retrieval algorithms that are at the core of the application's cognitive architecture.\n\n> Depend on this package to build LLM applications with LangChain.dart.  \n> \n> This package exposes `langchain_core` so you don't need to depend on it explicitly.\n\n### [`langchain_community`](https://pub.dev/packages/langchain_community) [![langchain_community](https://img.shields.io/pub/v/langchain_community.svg)](https://pub.dev/packages/langchain_community)\n\nContains third-party integrations and community-contributed components that are not part of the core LangChain.dart API.\n\n> Depend on this package if you want to use any of the integrations or components it provides.\n\n### Integration-specific packages\n\nPopular third-party integrations (e.g. [`langchain_openai`](https://pub.dev/packages/langchain_openai), [`langchain_google`](https://pub.dev/packages/langchain_google), [`langchain_ollama`](https://pub.dev/packages/langchain_ollama), etc.) are moved to their own packages so that they can be imported independently without depending on the entire `langchain_community` package.\n\n> Depend on an integration-specific package if you want to use the specific integration.\n\n| Package                                                             | Version                                                                                                                      | Description                                                                                                                                                                           |\n|---------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|\n| [langchain_anthropic](https://pub.dev/packages/langchain_anthropic) | [![langchain_anthropic](https://img.shields.io/pub/v/langchain_anthropic.svg)](https://pub.dev/packages/langchain_anthropic) | Anthopic integration (Claude 3.5 Sonnet, Opus, Haiku, Instant, etc.)                                                                                                                  |\n| [langchain_chroma](https://pub.dev/packages/langchain_chroma)       | [![langchain_chroma](https://img.shields.io/pub/v/langchain_chroma.svg)](https://pub.dev/packages/langchain_chroma)          | Chroma vector database integration                                                                                                                                                    |\n| [langchain_firebase](https://pub.dev/packages/langchain_firebase)   | [![langchain_firebase](https://img.shields.io/pub/v/langchain_firebase.svg)](https://pub.dev/packages/langchain_firebase)    | Firebase integration (VertexAI for Firebase (Gemini 1.5 Pro, Gemini 1.5 Flash, etc.))                                                                                                 |\n| [langchain_google](https://pub.dev/packages/langchain_google)       | [![langchain_google](https://img.shields.io/pub/v/langchain_google.svg)](https://pub.dev/packages/langchain_google)          | Google integration (GoogleAI, VertexAI, Gemini, PaLM 2, Embeddings, Vector Search, etc.)                                                                                              |\n| [langchain_mistralai](https://pub.dev/packages/langchain_mistralai) | [![langchain_mistralai](https://img.shields.io/pub/v/langchain_mistralai.svg)](https://pub.dev/packages/langchain_mistralai) | Mistral AI integration (Mistral-7B, Mixtral 8x7B, Mixtral 8x22B, Mistral Small, Mistral Large, embeddings, etc.).                                                                     |\n| [langchain_ollama](https://pub.dev/packages/langchain_ollama)       | [![langchain_ollama](https://img.shields.io/pub/v/langchain_ollama.svg)](https://pub.dev/packages/langchain_ollama)          | Ollama integration (Llama 3.2, Gemma 2, Phi-3.5, Mistral nemo, WizardLM-2, CodeGemma, Command R, LLaVA, DBRX, Qwen, Dolphin, DeepSeek Coder, Vicuna, Orca, etc.)                      |\n| [langchain_openai](https://pub.dev/packages/langchain_openai)       | [![langchain_openai](https://img.shields.io/pub/v/langchain_openai.svg)](https://pub.dev/packages/langchain_openai)          | OpenAI integration (GPT-4o, o1, Embeddings, Tools, Vision, DALL·E 3, etc.) and OpenAI Compatible services (TogetherAI, Anyscale, OpenRouter, One API, Groq, Llamafile, GPT4All, etc.) |\n| [langchain_pinecone](https://pub.dev/packages/langchain_pinecone)   | [![langchain_pinecone](https://img.shields.io/pub/v/langchain_pinecone.svg)](https://pub.dev/packages/langchain_pinecone)    | Pinecone vector database integration                                                                                                                                                  |\n| [langchain_supabase](https://pub.dev/packages/langchain_supabase)   | [![langchain_supabase](https://img.shields.io/pub/v/langchain_supabase.svg)](https://pub.dev/packages/langchain_supabase)    | Supabase Vector database integration                                                                                                                                                  |\n\n<p align=\"center\">\n    <img src=\"https://raw.githubusercontent.com/davidmigloz/langchain_dart/main/docs/img/langchain_packages.png\" width=\"500\">\n</p>\n\n### API clients packages\n\nThe following packages are maintained (and used internally) by LangChain.dart, although they can also be used independently:\n\n> Depend on an API client package if you just want to consume the API of a specific provider directly without using LangChain.dart abstractions. \n\n| Package                                                               | Version                                                                                                                         | Description                                                                    | \n|-----------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------|\n| [anthropic_sdk_dart](https://pub.dev/packages/anthropic_sdk_dart)     | [![anthropic_sdk_dart](https://img.shields.io/pub/v/anthropic_sdk_dart.svg)](https://pub.dev/packages/anthropic_sdk_dart)       | [Anthropic](https://docs.anthropic.com/en/api) API client                      |\n| [chromadb](https://pub.dev/packages/chromadb)                         | [![chromadb](https://img.shields.io/pub/v/chromadb.svg)](https://pub.dev/packages/chromadb)                                     | [Chroma DB](https://trychroma.com/) API client                                 |\n| [googleai_dart](https://pub.dev/packages/googleai_dart)               | [![googleai_dart](https://img.shields.io/pub/v/googleai_dart.svg)](https://pub.dev/packages/googleai_dart)                      | [Google AI for Developers](https://ai.google.dev/) API client                  |\n| [mistralai_dart](https://pub.dev/packages/mistralai_dart)             | [![mistralai_dart](https://img.shields.io/pub/v/mistralai_dart.svg)](https://pub.dev/packages/mistralai_dart)                   | [Mistral AI](https://docs.mistral.ai/api) API client                           |\n| [ollama_dart](https://pub.dev/packages/ollama_dart)                   | [![ollama_dart](https://img.shields.io/pub/v/ollama_dart.svg)](https://pub.dev/packages/ollama_dart)                            | [Ollama](https://ollama.ai/) API client                                        |\n| [openai_dart](https://pub.dev/packages/openai_dart)                   | [![openai_dart](https://img.shields.io/pub/v/openai_dart.svg)](https://pub.dev/packages/openai_dart)                            | [OpenAI](https://platform.openai.com/docs/api-reference) API client            |\n| [openai_realtime_dart](https://pub.dev/packages/openai_realtime_dart) | [![openai_realtime_dart](https://img.shields.io/pub/v/openai_realtime_dart.svg)](https://pub.dev/packages/openai_realtime_dart) | [OpenAI Realtime](https://platform.openai.com/docs/guides/realtime) API client |\n| [tavily_dart](https://pub.dev/packages/tavily_dart)                   | [![tavily_dart](https://img.shields.io/pub/v/tavily_dart.svg)](https://pub.dev/packages/tavily_dart)                            | [Tavily](https://tavily.com) API client                                        |\n| [vertex_ai](https://pub.dev/packages/vertex_ai)                       | [![vertex_ai](https://img.shields.io/pub/v/vertex_ai.svg)](https://pub.dev/packages/vertex_ai)                                  | [GCP Vertex AI](https://cloud.google.com/vertex-ai) API client                 |\n\n## Integrations\n\nThe following integrations are available in LangChain.dart:\n\n### Chat Models\n\n| Chat model                                                                                                              | Package                                                             | Streaming | Multi-modal | Tool-call | Description                                                                                                                                                                                                                                                                                                                                                                                                                                                                                          |\n|-------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------|-----------|-------------|-----------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|\n| [ChatAnthropic](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/anthropic)                 | [langchain_anthropic](https://pub.dev/packages/langchain_anthropic) | ✔         | ✔           | ✔         | [Anthropic Messages API](https://docs.anthropic.com/en/api/messages) (aka Claude API)                                                                                                                                                                                                                                                                                                                                                                                                                |\n| [ChatFirebaseVertexAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/firebase_vertex_ai) | [langchain_firebase](https://pub.dev/packages/langchain_firebase)   | ✔         | ✔           | ✔         | [Vertex AI for Firebase API](https://firebase.google.com/docs/vertex-ai) (aka Gemini API)                                                                                                                                                                                                                                                                                                                                                                                                            |\n| [ChatGoogleGenerativeAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/googleai)         | [langchain_google](https://pub.dev/packages/langchain_google)       | ✔         | ✔           | ✔         | [Google AI for Developers API](https://ai.google.dev) (aka Gemini API)                                                                                                                                                                                                                                                                                                                                                                                                                               |\n| [ChatMistralAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/mistralai)                 | [langchain_mistralai](https://pub.dev/packages/langchain_mistralai) | ✔         |             |           | [Mistral Chat API](https://ollama.ai)                                                                                                                                                                                                                                                                                                                                                                                                                                                                |\n| [ChatOllama](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/ollama)                       | [langchain_ollama](https://pub.dev/packages/langchain_ollama)       | ✔         | ✔           | ✔         | [Ollama Chat API](https://ollama.ai)                                                                                                                                                                                                                                                                                                                                                                                                                                                                 |\n| [ChatOpenAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/openai)                       | [langchain_openai](https://pub.dev/packages/langchain_openai)       | ✔         | ✔           | ✔         | [OpenAI Chat API](https://platform.openai.com/docs/api-reference/chat) and OpenAI Chat API compatible services ([GitHub Models](https://github.com/marketplace/models), [TogetherAI](https://www.together.ai/), [Anyscale](https://www.anyscale.com/), [OpenRouter](https://openrouter.ai), [One API](https://github.com/songquanpeng/one-api), [Groq](https://groq.com/), [Llamafile](https://llamafile.ai/), [GPT4All](https://gpt4all.io/), [FastChat](https://github.com/lm-sys/FastChat), etc.) | \n| [ChatVertexAI](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/gcp_vertex_ai)              | [langchain_google](https://pub.dev/packages/langchain_google)       |           |             |           | [GCP Vertex AI Chat API](https://cloud.google.com/vertex-ai)                                                                                                                                                                                                                                                                                                                                                                                                                                         |\n\n### LLMs\n\n_Note: Prefer using Chat Models over LLMs as many providers have deprecated them._\n\n| LLM                                                                                             | Package                                                       | Streaming | Description                                                                          |\n|-------------------------------------------------------------------------------------------------|---------------------------------------------------------------|-----------|--------------------------------------------------------------------------------------|\n| [Ollama](https://langchaindart.dev/#/modules/model_io/models/llms/integrations/ollama)          | [langchain_ollama](https://pub.dev/packages/langchain_ollama) | ✔         | [Ollama Completions API](https://ollama.ai)                                          |\n| [OpenAI](https://langchaindart.dev/#/modules/model_io/models/llms/integrations/openai)          | [langchain_openai](https://pub.dev/packages/langchain_openai) | ✔         | [OpenAI Completions API](https://platform.openai.com/docs/api-reference/completions) | \n| [VertexAI](https://langchaindart.dev/#/modules/model_io/models/llms/integrations/gcp_vertex_ai) | [langchain_google](https://pub.dev/packages/langchain_google) |           | [GCP Vertex AI Text API](https://cloud.google.com/vertex-ai)                         |\n\n### Embedding Models\n\n| Embedding model                                                                                                     | Package                                                             | Description                                                                        |\n|---------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------|------------------------------------------------------------------------------------|\n| [GoogleGenerativeAIEmbeddings](https://langchaindart.dev/#/modules/retrieval/text_embedding/integrations/google_ai) | [langchain_google](https://pub.dev/packages/langchain_google)       | [Google AI Embeddings API](https://ai.google.dev)                                  |\n| [MistralAIEmbeddings](https://langchaindart.dev/#/modules/retrieval/text_embedding/integrations/mistralai)          | [langchain_mistralai](https://pub.dev/packages/langchain_mistralai) | [Mistral Embeddings API](https://docs.mistral.ai)                                  |\n| [OllamaEmbeddings](https://langchaindart.dev/#/modules/retrieval/text_embedding/integrations/ollama)                | [langchain_ollama](https://pub.dev/packages/langchain_ollama)       | [Ollama Embeddings API](https://ollama.ai)                                         |\n| [OpenAIEmbeddings](https://langchaindart.dev/#/modules/retrieval/text_embedding/integrations/openai)                | [langchain_openai](https://pub.dev/packages/langchain_openai)       | [OpenAI Embeddings API](https://platform.openai.com/docs/api-reference/embeddings) | \n| [VertexAIEmbeddings](https://langchaindart.dev/#/modules/retrieval/text_embedding/integrations/gcp_vertex_ai)       | [langchain_google](https://pub.dev/packages/langchain_google)       | [GCP Vertex AI Embeddings API](https://cloud.google.com/vertex-ai)                 |\n\n### Vector Stores\n\n| Vector store                                                                                                 | Package                                                             | Description                                                                                                                    |\n|--------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------|\n| [Chroma](https://langchaindart.dev/#/modules/retrieval/vector_stores/integrations/chroma)                    | [langchain_chroma](https://pub.dev/packages/langchain_chroma)       | [Chroma](https://trychroma.com/) integration                                                                                   |\n| [MemoryVectorStore](https://langchaindart.dev/#/modules/retrieval/vector_stores/integrations/memory)         | [langchain](https://pub.dev/packages/langchain)                     | In-memory vector store for prototype and testing                                                                               |\n| [ObjectBoxVectorStore](https://langchaindart.dev/#/modules/retrieval/vector_stores/integrations/objectbox)   | [langchain_community](https://pub.dev/packages/langchain_community) | [ObjectBox](https://objectbox.io/) integration                                                                                 |\n| [Pinecone](https://langchaindart.dev/#/modules/retrieval/vector_stores/integrations/pinecone)                | [langchain_pinecone](https://pub.dev/packages/langchain_pinecone)   | [Pinecone](https://pinecone.io/) integration                                                                                   |\n| [Supabase](https://langchaindart.dev/#/modules/retrieval/vector_stores/integrations/supabase)                | [langchain_supabase](https://pub.dev/packages/langchain_supabase)   | [Supabase Vector](https://supabase.com/vector) integration                                                                     |\n| [VertexAIMatchingEngine](https://langchaindart.dev/#/modules/retrieval/vector_stores/integrations/vertex_ai) | [langchain_google](https://pub.dev/packages/langchain_google)       | [Vertex AI Vector Search](https://cloud.google.com/vertex-ai/docs/vector-search/overview) (former Matching Engine) integration |\n\n### Tools\n\n| Tool                                                                                              | Package                                                             | Description                                                                                |\n|---------------------------------------------------------------------------------------------------|---------------------------------------------------------------------|--------------------------------------------------------------------------------------------|\n| [CalculatorTool](https://langchaindart.dev/#/modules/agents/tools/calculator)                     | [langchain_community](https://pub.dev/packages/langchain_community) | To calculate math expressions                                                              |\n| [OpenAIDallETool](https://langchaindart.dev/#/modules/agents/tools/openai_dall_e)                 | [langchain_openai](https://pub.dev/packages/langchain_openai)       | [OpenAI's DALL-E Image Generator](https://platform.openai.com/docs/api-reference/images)   | \n| [TavilyAnswerTool](https://langchaindart.dev/#/modules/agents/tools/tavily_answer)                | [langchain_community](https://pub.dev/packages/langchain_community) | Returns an answer for a query using the [Tavily](https://tavily.com) search engine         |\n| [TavilySearchResultsTool](https://langchaindart.dev/#/modules/agents/tools/tavily_search_results) | [langchain_community](https://pub.dev/packages/langchain_community) | Returns a list of results for a query using the [Tavily](https://tavily.com) search engine |\n\n## Getting started\n\nTo start using LangChain.dart, add `langchain` as a dependency to your `pubspec.yaml` file. Also, include the dependencies for the specific integrations you want to use (e.g.`langchain_community`, `langchain_openai`, `langchain_google`, etc.):\n\n```yaml\ndependencies:\n  langchain: {version}\n  langchain_community: {version}\n  langchain_openai: {version}\n  langchain_google: {version}\n  ...\n```\n\nThe most basic building block of LangChain.dart is calling an LLM on some prompt. LangChain.dart provides a unified interface for calling different LLMs. For example, we can use `ChatGoogleGenerativeAI` to call Google's Gemini model:\n\n```dart\nfinal model = ChatGoogleGenerativeAI(apiKey: googleApiKey);\nfinal prompt = PromptValue.string('Hello world!');\nfinal result = await model.invoke(prompt);\n// Hello everyone! I'm new here and excited to be part of this community.\n```\n\nBut the power of LangChain.dart comes from chaining together multiple components to implement complex use cases. For example, a RAG (Retrieval-Augmented Generation) pipeline that would accept a user query, retrieve relevant documents from a vector store, format them using prompt templates, invoke the model, and parse the output:\n\n```dart\n// 1. Create a vector store and add documents to it\nfinal vectorStore = MemoryVectorStore(\n  embeddings: OpenAIEmbeddings(apiKey: openaiApiKey),\n);\nawait vectorStore.addDocuments(\n  documents: [\n    Document(pageContent: 'LangChain was created by Harrison'),\n    Document(pageContent: 'David ported LangChain to Dart in LangChain.dart'),\n  ],\n);\n\n// 2. Define the retrieval chain\nfinal retriever = vectorStore.asRetriever();\nfinal setupAndRetrieval = Runnable.fromMap<String>({\n  'context': retriever.pipe(\n    Runnable.mapInput((docs) => docs.map((d) => d.pageContent).join('\\n')),\n  ),\n  'question': Runnable.passthrough(),\n});\n\n// 3. Construct a RAG prompt template\nfinal promptTemplate = ChatPromptTemplate.fromTemplates([\n  (ChatMessageType.system, 'Answer the question based on only the following context:\\n{context}'),\n  (ChatMessageType.human, '{question}'),\n]);\n\n// 4. Define the final chain\nfinal model = ChatOpenAI(apiKey: openaiApiKey);\nconst outputParser = StringOutputParser<ChatResult>();\nfinal chain = setupAndRetrieval\n    .pipe(promptTemplate)\n    .pipe(model)\n    .pipe(outputParser);\n\n// 5. Run the pipeline\nfinal res = await chain.invoke('Who created LangChain.dart?');\nprint(res);\n// David created LangChain.dart\n```\n\n## Documentation\n\n- [LangChain.dart documentation](https://langchaindart.dev)\n- [Sample apps](https://github.com/davidmigloz/langchain_dart/tree/main/examples)\n- [LangChain.dart blog](https://blog.langchaindart.dev)\n- [Project board](https://github.com/users/davidmigloz/projects/2/views/1)\n\n## Community\n\nStay up-to-date on the latest news and updates on the field, have great discussions, and get help in the official [LangChain.dart Discord server](https://discord.gg/x4qbhqecVR).\n\n[![LangChain.dart Discord server](https://invidget.switchblade.xyz/x4qbhqecVR?theme=light)](https://discord.gg/x4qbhqecVR)\n\n## Contribute\n\n| 📢 **Call for Collaborators** 📢                                        |\n|-------------------------------------------------------------------------|\n| We are looking for collaborators to join the core group of maintainers. |\n\nNew contributors welcome! Check out our [Contributors Guide](https://github.com/davidmigloz/langchain_dart/blob/main/CONTRIBUTING.md) for help getting started.\n\nJoin us on [Discord](https://discord.gg/x4qbhqecVR) to meet other maintainers. We'll help you get your first contribution in no time!\n\n## Related projects\n\n- [LangChain](https://github.com/langchain-ai/langchain): The original Python LangChain project.\n- [LangChain.js](https://github.com/langchain-ai/langchainjs): A JavaScript port of LangChain.\n- [LangChain.go](https://github.com/tmc/langchaingo): A Go port of LangChain.\n- [LangChain.rb](https://github.com/andreibondarev/langchainrb): A Ruby port of LangChain.\n\n## Sponsors\n\n<p align=\"center\">\n  <a href=\"https://github.com/sponsors/davidmigloz\">\n    <img src='https://raw.githubusercontent.com/davidmigloz/sponsors/main/sponsors.svg'/>\n  </a>\n</p>\n\n## License\n\nLangChain.dart is licensed under the [MIT License](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE).\n"
  },
  {
    "path": "packages/langchain/example/langchain_example.dart",
    "content": "// ignore_for_file: avoid_print\nimport 'package:langchain/langchain.dart';\n\nvoid main() async {\n  final promptTemplate = PromptTemplate.fromTemplate(\n    'tell me a joke about {subject}',\n  );\n  final llm = FakeLLM(\n    responses: ['Why did the AI go on a diet? Because it had too many bytes!'],\n  );\n  final chain = promptTemplate.pipe(llm).pipe(const StringOutputParser());\n  final result = await chain.invoke({'subject': 'AI'});\n  print(result);\n  // Why did the AI go on a diet? Because it had too many bytes!\n}\n"
  },
  {
    "path": "packages/langchain/lib/langchain.dart",
    "content": "/// Build powerful LLM-based Dart/Flutter applications.\nlibrary;\n\nexport 'src/agents/agents.dart';\nexport 'src/chains/chains.dart';\nexport 'src/chat_history/chat_history.dart';\nexport 'src/chat_models/chat_models.dart';\nexport 'src/document_loaders/document_loaders.dart';\nexport 'src/documents/documents.dart';\nexport 'src/embeddings/embeddings.dart';\nexport 'src/exceptions/exceptions.dart';\nexport 'src/langchain/langchain.dart';\nexport 'src/language_models/language_models.dart';\nexport 'src/llms/llms.dart';\nexport 'src/memory/memory.dart';\nexport 'src/output_parsers/output_parsers.dart';\nexport 'src/prompts/prompts.dart';\nexport 'src/retrievers/retrievers.dart';\nexport 'src/runnables/runnables.dart';\nexport 'src/stores/stores.dart';\nexport 'src/text_splitters/text_splitters.dart';\nexport 'src/tools/tools.dart';\nexport 'src/utils/utils.dart';\nexport 'src/vector_stores/vector_stores.dart';\n"
  },
  {
    "path": "packages/langchain/lib/src/agents/agents.dart",
    "content": "export 'package:langchain_core/agents.dart';\n\nexport 'executor.dart';\nexport 'tools.dart';\n"
  },
  {
    "path": "packages/langchain/lib/src/agents/executor.dart",
    "content": "import 'dart:convert';\n\nimport 'package:langchain_core/agents.dart';\nimport 'package:langchain_core/chains.dart';\nimport 'package:langchain_core/output_parsers.dart';\nimport 'package:langchain_core/tools.dart';\nimport 'package:meta/meta.dart';\n\nimport '../tools/exception.dart';\n\n/// {@template agent_executor}\n/// A chain responsible for executing the actions of an agent using tools.\n/// It receives user input and passes it to the agent, which then decides which\n/// tool/s to use and what action/s to take.\n///\n/// The [AgentExecutor] calls the specified tool with the generated input,\n/// retrieves the output, and passes it back to the agent to determine the next\n/// action. This process continues until the agent determines it can directly\n/// respond to the user or completes its task.\n///\n/// If you add [memory] to the [AgentExecutor], it will save the\n/// [AgentExecutor]'s inputs and outputs. It won't save the agent's\n/// intermediate inputs and outputs. If you want to save the agent's\n/// intermediate inputs and outputs, you should add [memory] to the agent\n/// instead.\n/// {@endtemplate}\nclass AgentExecutor extends BaseChain {\n  /// {@macro agent_executor}\n  AgentExecutor({\n    required this.agent,\n    super.memory,\n    this.returnIntermediateSteps = false,\n    this.maxIterations = 15,\n    this.maxExecutionTime,\n    this.earlyStoppingMethod = AgentEarlyStoppingMethod.force,\n    this.handleParsingErrors,\n  }) : _internalTools = [...agent.tools, ExceptionTool()] {\n    assert(\n      _validateMultiActionAgentTools(),\n      'Tools that have `returnDirect=true` are not allowed in multi-action agents',\n    );\n  }\n\n  /// The agent to run for creating a plan and determining actions to take at\n  /// each step of the execution loop.\n  final BaseActionAgent agent;\n\n  /// The valid tools the agent can call plus some internal tools used by the\n  /// executor.\n  final List<Tool> _internalTools;\n\n  /// Whether to return the agent's trajectory of intermediate steps at the\n  /// end in addition to the final output.\n  final bool returnIntermediateSteps;\n\n  /// The maximum number of steps to take before ending the execution loop.\n  /// Setting to null could lead to an infinite loop.\n  final int? maxIterations;\n\n  /// The maximum amount of wall clock time to spend in the execution loop.\n  final Duration? maxExecutionTime;\n\n  /// The method to use for early stopping if the agent never returns\n  /// [AgentFinish].\n  final AgentEarlyStoppingMethod earlyStoppingMethod;\n\n  /// Handles errors raised by the agent's output parser.\n  /// The response from this handler will be used as the tool input.\n  final Map<String, dynamic> Function(OutputParserException)?\n  handleParsingErrors;\n\n  /// Output key for the agent's intermediate steps output.\n  static const intermediateStepsOutputKey = 'intermediate_steps';\n\n  @override\n  Set<String> get inputKeys => agent.inputKeys;\n\n  @override\n  Set<String> get outputKeys => {\n    ...agent.returnValues,\n    if (returnIntermediateSteps) intermediateStepsOutputKey,\n  };\n\n  /// Validate that tools are compatible with multi action agent.\n  bool _validateMultiActionAgentTools() {\n    final agent = this.agent;\n    final tools = _internalTools;\n    if (agent is BaseMultiActionAgent) {\n      for (final tool in tools) {\n        if (tool.returnDirect) {\n          return false;\n        }\n      }\n    }\n    return true;\n  }\n\n  @override\n  Future<ChainValues> callInternal(final ChainValues inputs) async {\n    final List<AgentStep> intermediateSteps = [];\n\n    // Construct a mapping of tool name to tool for easy lookup\n    final nameToToolMap = {for (final tool in _internalTools) tool.name: tool};\n\n    // Let's start tracking the number of iterations and time elapsed\n    var iterations = 0;\n    final stopwatch = Stopwatch()..start();\n\n    ChainValues onAgentFinished(final AgentFinish result) {\n      return {\n        ...result.returnValues,\n        if (returnIntermediateSteps)\n          intermediateStepsOutputKey: intermediateSteps,\n      };\n    }\n\n    // We now enter the agent loop (until it returns something).\n    while (_shouldContinue(iterations, stopwatch.elapsed)) {\n      final (result, nextSteps) = await takeNextStep(\n        nameToToolMap,\n        inputs,\n        intermediateSteps,\n      );\n\n      if (result != null) {\n        return onAgentFinished(result);\n      }\n\n      if (nextSteps != null) {\n        intermediateSteps.addAll(nextSteps);\n\n        if (nextSteps.length == 1) {\n          final nextStep = nextSteps.first;\n          final tool = nameToToolMap[nextStep.action.tool];\n\n          if (tool != null && tool.returnDirect) {\n            return onAgentFinished(\n              AgentFinish(\n                returnValues: {agent.returnValues.first: nextStep.observation},\n              ),\n            );\n          }\n        }\n      }\n\n      iterations += 1;\n    }\n\n    final stopped = agent.returnStoppedResponse(\n      earlyStoppingMethod,\n      intermediateSteps,\n    );\n    return onAgentFinished(stopped);\n  }\n\n  /// Returns whether the execution loop should continue.\n  bool _shouldContinue(final int iterations, final Duration timeElapsed) {\n    if (maxIterations != null && iterations >= maxIterations!) {\n      return false;\n    }\n    if (maxExecutionTime != null && timeElapsed >= maxExecutionTime!) {\n      return false;\n    }\n    return true;\n  }\n\n  /// Take a single step in the thought-action-observation loop.\n  /// Override this to take control of how the agent makes and acts on choices.\n  @visibleForOverriding\n  Future<(AgentFinish? result, List<AgentStep>? nextSteps)> takeNextStep(\n    final Map<String, Tool> nameToToolMap,\n    final ChainValues inputs,\n    final List<AgentStep> intermediateSteps,\n  ) async {\n    List<BaseAgentAction> actions;\n\n    try {\n      // Call the LLM to see what to do\n      actions = await agent.plan(AgentPlanInput(inputs, intermediateSteps));\n    } on OutputParserException catch (e) {\n      if (handleParsingErrors == null) rethrow;\n      actions = [\n        AgentAction(\n          id: 'error',\n          tool: ExceptionTool.toolName,\n          toolInput: handleParsingErrors!(e),\n          log: e.toString(),\n        ),\n      ];\n    }\n\n    final List<AgentStep> result = [];\n    for (final action in actions) {\n      // If the tool chosen is the finishing tool, then we end and return\n      if (action is AgentFinish) {\n        return (action, null);\n      }\n      // Otherwise, we run the tool\n      final agentAction = action as AgentAction;\n      final tool = nameToToolMap[agentAction.tool];\n      String observation;\n      if (tool != null) {\n        final toolInput = tool.getInputFromJson(agentAction.toolInput);\n        final toolOutput = await tool.invoke(toolInput);\n        observation = toolOutput is String\n            ? toolOutput\n            : jsonEncode(toolOutput);\n      } else {\n        observation =\n            '${agentAction.tool} is not a valid tool, try another one.';\n      }\n      final step = AgentStep(action: action, observation: observation);\n      result.add(step);\n    }\n    return (null, result);\n  }\n\n  @override\n  String get chainType => 'agent_executor';\n}\n"
  },
  {
    "path": "packages/langchain/lib/src/agents/tools.dart",
    "content": "import 'package:langchain_core/agents.dart';\nimport 'package:langchain_core/chains.dart';\nimport 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/exceptions.dart';\nimport 'package:langchain_core/memory.dart';\nimport 'package:langchain_core/output_parsers.dart';\nimport 'package:langchain_core/prompts.dart';\nimport 'package:langchain_core/tools.dart';\n\nconst _systemChatMessagePromptTemplate = SystemChatMessagePromptTemplate(\n  prompt: PromptTemplate(\n    inputVariables: {},\n    template: 'You are a helpful AI assistant',\n  ),\n);\n\n/// {@template tools_agent}\n/// An agent powered by the tool calling API.\n///\n/// Example:\n/// ```dart\n/// final llm = ChatOllama(\n///   defaultOptions: ChatOllamaOptions(\n///     model: 'llama3-groq-tool-use',\n///     temperature: 0,\n///   ),\n/// );\n/// final tools = [CalculatorTool()];\n/// final agent = ToolsAgent.fromLLMAndTools(llm: llm, tools: tools);\n/// final executor = AgentExecutor(agent: agent);\n/// final res = await executor.run('What is 40 raised to the 0.43 power? ');\n/// ```\n///\n/// You can use any chat model that supports tools, like `ChatOpenAI`,\n/// `ChatOllama`, `ChatAnthropic`, `ChatFirebaseVertexAI`, etc. Check the\n/// [documentation](https://langchaindart.dev/#/modules/model_io/models/chat_models/how_to/tools)\n/// for a complete list.\n///\n/// You can easily add memory to the agent using the memory parameter from the\n/// [ToolsAgent.fromLLMAndTools] constructor. Make sure you enable\n/// [BaseChatMemory.returnMessages] on your memory, as the agent works with\n/// [ChatMessage]s. The default prompt template already takes care of adding\n/// the history to the prompt. For example:\n/// ```dart\n/// final memory = ConversationBufferMemory(returnMessages: true);\n/// final agent = ToolsAgent.fromLLMAndTools(\n///   llm: llm,\n///   tools: tools,\n///   memory: memory,\n/// );\n/// ```\n///\n/// If you need to use your own [llmChain] make sure your prompt template\n/// includes:\n/// - `MessagePlaceholder(variableName: agentInputKey)`: the input to the agent.\n/// - If you are using memory:\n///   * `MessagesPlaceholder(variableName: '{memoryKey}')`: the history of chat\n///      messages.\n/// - If you are not using memory:\n///   * `MessagesPlaceholder(variableName: BaseActionAgent.agentScratchpadInputKey)`:\n///     the intermediary work of the agent (if you are using memory, the agent\n///     uses the memory to store the intermediary work).\n/// Example:\n/// ```dart\n/// ChatPromptTemplate.fromTemplates([\n///   (ChatMessageType.system, 'You are a helpful AI assistant'),\n///   (ChatMessageType.messagesPlaceholder, 'history'),\n///   (ChatMessageType.messagePlaceholder, 'input'),\n/// ]);\n/// ```\n///\n/// You can use [ToolsAgent.createPrompt] to build the prompt\n/// template if you only need to customize the system message or add some\n/// extra messages.\n/// {@endtemplate}\nclass ToolsAgent extends BaseSingleActionAgent {\n  /// {@macro tools_agent}\n  ToolsAgent({required this.llmChain, required super.tools})\n    : _parser = const ToolsAgentOutputParser(),\n      assert(\n        llmChain.memory != null ||\n            llmChain.prompt.inputVariables.contains(\n              BaseActionAgent.agentScratchpadInputKey,\n            ),\n        '`${BaseActionAgent.agentScratchpadInputKey}` should be one of the '\n        'variables in the prompt, got ${llmChain.prompt.inputVariables}',\n      ),\n      assert(\n        llmChain.memory == null || llmChain.memory!.returnMessages,\n        'The memory must have `returnMessages` set to true',\n      );\n\n  /// Chain to use to call the LLM.\n  ///\n  /// If the chain does not have a memory, the prompt MUST include a variable\n  /// called [BaseActionAgent.agentScratchpadInputKey] where the agent can put\n  /// its intermediary work.\n  ///\n  /// If the chain has a memory, the agent will use the memory to store the\n  /// intermediary work.\n  ///\n  /// The memory must have [BaseChatMemory.returnMessages] set to true for\n  /// the agent to work properly.\n  final LLMChain<BaseChatModel, ChatModelOptions, BaseChatMemory> llmChain;\n\n  /// Parser to use to parse the output of the LLM.\n  final ToolsAgentOutputParser _parser;\n\n  /// The key for the input to the agent.\n  static const agentInputKey = 'input';\n\n  @override\n  Set<String> get inputKeys => {agentInputKey};\n\n  /// Construct an [ToolsAgent] from an [llm] and [tools].\n  ///\n  /// - [llm] - The model to use for the agent.\n  /// - [tools] - The tools the agent has access to. You can omit this field if\n  ///   you have already configured the tools in the [llm].\n  /// - [memory] - The memory to use for the agent.\n  /// - [systemChatMessage] message to use as the system message that will be\n  ///   the first in the prompt. Default: \"You are a helpful AI assistant\".\n  /// - [extraPromptMessages] prompt messages that will be placed between the\n  ///   system message and the input from the agent.\n  factory ToolsAgent.fromLLMAndTools({\n    required final BaseChatModel llm,\n    final List<Tool>? tools,\n    final BaseChatMemory? memory,\n    final SystemChatMessagePromptTemplate systemChatMessage =\n        _systemChatMessagePromptTemplate,\n    final List<ChatMessagePromptTemplate>? extraPromptMessages,\n  }) {\n    assert(\n      tools != null || llm.defaultOptions.tools != null,\n      'Tools must be provided or configured in the llm',\n    );\n    assert(\n      tools != null || llm.defaultOptions.tools!.every((tool) => tool is Tool),\n      'All elements in `tools` must be of type `Tool` or its subclasses',\n    );\n\n    final actualTools = tools ?? llm.defaultOptions.tools!.cast<Tool>();\n\n    return ToolsAgent(\n      llmChain: LLMChain(\n        llm: llm,\n        llmOptions: llm.defaultOptions.copyWith(tools: actualTools),\n        prompt: createPrompt(\n          systemChatMessage: systemChatMessage,\n          extraPromptMessages: extraPromptMessages,\n          memory: memory,\n        ),\n        memory: memory,\n      ),\n      tools: actualTools,\n    );\n  }\n\n  @override\n  Future<List<BaseAgentAction>> plan(final AgentPlanInput input) async {\n    final llmChainInputs = _constructLlmChainInputs(\n      input.intermediateSteps,\n      input.inputs,\n    );\n    final ChainValues output = await llmChain.invoke(llmChainInputs);\n    final predictedMessage = output[LLMChain.defaultOutputKey] as AIChatMessage;\n    return _parser.parseChatMessage(predictedMessage);\n  }\n\n  Map<String, dynamic> _constructLlmChainInputs(\n    final List<AgentStep> intermediateSteps,\n    final InputValues inputs,\n  ) {\n    final dynamic agentInput;\n\n    // If there is a memory, we pass the last agent step as a function message.\n    // Otherwise, we pass the input as a human message.\n    if (llmChain.memory != null && intermediateSteps.isNotEmpty) {\n      final lastStep = intermediateSteps.last;\n      final functionMsg = ChatMessage.tool(\n        toolCallId: lastStep.action.id,\n        content: lastStep.observation,\n      );\n      agentInput = functionMsg;\n    } else {\n      agentInput = switch (inputs[agentInputKey]) {\n        final String inputStr => ChatMessage.humanText(inputStr),\n        final ChatMessage inputMsg => inputMsg,\n        final List<ChatMessage> inputMsgs => inputMsgs,\n        _ => throw LangChainException(\n          message:\n              'Agent expected a String or ChatMessage as input,'\n              ' got ${inputs[agentInputKey]}',\n        ),\n      };\n    }\n\n    return {\n      ...inputs,\n      agentInputKey: agentInput,\n      if (llmChain.memory == null)\n        BaseActionAgent.agentScratchpadInputKey: _constructScratchPad(\n          intermediateSteps,\n        ),\n    };\n  }\n\n  List<ChatMessage> _constructScratchPad(\n    final List<AgentStep> intermediateSteps,\n  ) {\n    return [\n      ...intermediateSteps\n          .map((final s) {\n            return s.action.messageLog +\n                [\n                  ChatMessage.tool(\n                    toolCallId: s.action.id,\n                    content: s.observation,\n                  ),\n                ];\n          })\n          .expand((final m) => m),\n    ];\n  }\n\n  @override\n  String get agentType => 'tool-agent';\n\n  /// Creates prompt for this agent.\n  ///\n  /// It takes care of adding the necessary placeholders to handle the\n  /// intermediary work of the agent or the memory.\n  ///\n  /// - [systemChatMessage] message to use as the system message that will be\n  ///   the first in the prompt.\n  /// - [extraPromptMessages] prompt messages that will be placed between the\n  ///   system message and the new human input.\n  /// - [memory] optional memory to use for the agent.\n  static BasePromptTemplate createPrompt({\n    final SystemChatMessagePromptTemplate systemChatMessage =\n        _systemChatMessagePromptTemplate,\n    final List<ChatMessagePromptTemplate>? extraPromptMessages,\n    final BaseChatMemory? memory,\n  }) {\n    return ChatPromptTemplate.fromPromptMessages([\n      systemChatMessage,\n      ...?extraPromptMessages,\n      for (final memoryKey in memory?.memoryKeys ?? {})\n        MessagesPlaceholder(variableName: memoryKey),\n      const MessagePlaceholder(variableName: agentInputKey),\n      if (memory == null)\n        const MessagesPlaceholder(\n          variableName: BaseActionAgent.agentScratchpadInputKey,\n        ),\n    ]);\n  }\n}\n\n/// {@template tools_agent_output_parser}\n/// Parser for [ToolsAgent].\n///\n/// It parses the output of the LLM and returns the corresponding\n/// [BaseAgentAction] to be executed.\n/// {@endtemplate}\nclass ToolsAgentOutputParser\n    extends\n        BaseOutputParser<\n          ChatResult,\n          OutputParserOptions,\n          List<BaseAgentAction>\n        > {\n  /// {@macro tools_agent_output_parser}\n  const ToolsAgentOutputParser()\n    : super(defaultOptions: const OutputParserOptions());\n\n  @override\n  Future<List<BaseAgentAction>> invoke(\n    final ChatResult input, {\n    final OutputParserOptions? options,\n  }) {\n    return parseChatMessage(input.output);\n  }\n\n  /// Parses the [message] and returns the corresponding [BaseAgentAction].\n  Future<List<BaseAgentAction>> parseChatMessage(\n    final AIChatMessage message,\n  ) async {\n    final toolCalls = message.toolCalls;\n    if (toolCalls.isNotEmpty) {\n      return toolCalls\n          .map((final toolCall) {\n            return AgentAction(\n              id: toolCall.id,\n              tool: toolCall.name,\n              toolInput: toolCall.arguments,\n              log:\n                  'Invoking: `${toolCall.name}` '\n                  'with `${toolCall.arguments}`\\n'\n                  'Responded: ${message.content}\\n',\n              messageLog: [message],\n            );\n          })\n          .toList(growable: false);\n    } else {\n      return [\n        AgentFinish(\n          returnValues: {'output': message.content},\n          log: message.content,\n        ),\n      ];\n    }\n  }\n}\n"
  },
  {
    "path": "packages/langchain/lib/src/chains/base.dart",
    "content": "\n"
  },
  {
    "path": "packages/langchain/lib/src/chains/chains.dart",
    "content": "export 'package:langchain_core/chains.dart';\n\nexport 'base.dart';\nexport 'combine_documents/combine_documents.dart';\nexport 'conversation.dart';\nexport 'question_answering/question_answering.dart';\nexport 'retrieval_qa.dart';\nexport 'sequential.dart';\nexport 'summarization/summarization.dart';\n"
  },
  {
    "path": "packages/langchain/lib/src/chains/combine_documents/base.dart",
    "content": "import 'package:langchain_core/chains.dart';\nimport 'package:langchain_core/documents.dart';\nimport 'package:langchain_core/prompts.dart';\nimport 'package:meta/meta.dart';\n\n/// {@template base_combine_documents_chain}\n/// Base interface for chains combining documents,\n/// such as [StuffDocumentsChain].\n///\n/// Subclasses of this chain deal with combining documents in a variety of\n/// ways. This base class exists to add some uniformity in the interface these\n/// types of chains should expose. Namely, they expect an input key related to\n/// the documents to use (default [defaultInputKey]), and then also expose a\n/// method to calculate the length of a prompt from documents (useful for\n/// outside callers to use to determine whether it's safe to pass a list of\n/// documents into this chain or whether that will longer than the context\n/// length).\n/// {@endtemplate}\nabstract class BaseCombineDocumentsChain extends BaseChain {\n  /// {@macro base_combine_documents_chain}\n  const BaseCombineDocumentsChain({\n    this.inputKey = defaultInputKey,\n    this.outputKey = defaultOutputKey,\n  });\n\n  /// Key to use for input documents.\n  final String inputKey;\n\n  /// Key to use for output text.\n  final String outputKey;\n\n  /// Default [inputKey] value.\n  static const defaultInputKey = 'input_documents';\n\n  /// Default [outputKey] value.\n  static const defaultOutputKey = 'output';\n\n  /// Prompt variable to use for the page content.\n  static const pageContentPromptVar = 'page_content';\n\n  @override\n  Set<String> get inputKeys => {inputKey};\n\n  @override\n  Set<String> get outputKeys => {outputKey};\n\n  @override\n  Future<ChainValues> callInternal(final ChainValues inputs) {\n    final docs = inputs[inputKey] as List<Document>;\n    final otherInputKeys = inputs.keys.toSet().difference({inputKey});\n    final otherInputs = {for (final key in otherInputKeys) key: inputs[key]};\n    return combineDocs(docs, inputs: otherInputs);\n  }\n\n  /// Returns the prompt length (number of tokens) given the documents passed\n  /// in.\n  ///\n  /// This can be used by a caller to determine whether passing in a list of\n  /// documents would exceed a certain prompt length. This useful when trying\n  /// to ensure that the size of a prompt remains below a certain context limit.\n  ///\n  /// - [docs] is the list of documents to combine.\n  /// - [inputs] is a map of other inputs to use in the combination.\n  ///\n  /// Returns null if the combine method doesn't depend on the prompt length.\n  /// Otherwise, the length of the prompt in tokens.\n  Future<int?> promptLength(\n    final List<Document> docs, {\n    final InputValues inputs = const {},\n  });\n\n  /// Combines the given [docs] into a single string.\n  ///\n  /// - [docs] is the list of documents to combine.\n  /// - [inputs] is a map of other inputs to use in the combination.\n  ///\n  /// Returns the output of the chain.\n  Future<ChainValues> combineDocs(\n    final List<Document> docs, {\n    final InputValues inputs = const {},\n  });\n\n  /// Formats a document into a string based on a prompt template.\n  @protected\n  String formatDocument(final Document doc, final BasePromptTemplate prompt) {\n    final baseInfo = {pageContentPromptVar: doc.pageContent, ...doc.metadata};\n\n    final missingMetadata = prompt.inputVariables.difference(\n      baseInfo.keys.toSet(),\n    );\n    if (missingMetadata.isNotEmpty) {\n      final requiredMetadata = prompt.inputVariables.difference({\n        pageContentPromptVar,\n      });\n      throw PromptException(\n        message:\n            'Document prompt requires documents to have metadata '\n            'variables: $requiredMetadata. Received document with missing '\n            'metadata: $missingMetadata',\n      );\n    }\n\n    return prompt.format({\n      for (final key in prompt.inputVariables) key: baseInfo[key],\n    });\n  }\n}\n"
  },
  {
    "path": "packages/langchain/lib/src/chains/combine_documents/combine_documents.dart",
    "content": "export 'base.dart';\nexport 'map_reduce.dart';\nexport 'reduce.dart';\nexport 'stuff.dart';\n"
  },
  {
    "path": "packages/langchain/lib/src/chains/combine_documents/map_reduce.dart",
    "content": "import 'package:langchain_core/chains.dart';\nimport 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/documents.dart';\nimport 'package:langchain_core/prompts.dart';\n\nimport 'base.dart';\nimport 'reduce.dart';\nimport 'stuff.dart';\n\n/// {@template map_reduce_documents_chain}\n/// Chain that combines documents by mapping them individually and then\n/// reducing the results.\n///\n/// This involves two chains:\n/// - [mapLlmChain] this is the chain that maps the documents individually.\n/// - [reduceDocumentsChain] this is the chain that reduces the results of\n///  applying [mapLlmChain] to each document.\n///\n/// The chain works as follows:\n/// - [mapLlmChain] is applied to each document individually.\n/// - The results of applying [mapLlmChain] to each document are combined by\n///   calling [reduceDocumentsChain] and the result is returned.\n///\n/// Example:\n/// ```dart\n/// final mapPrompt = PromptTemplate.fromTemplate(\n///   'Summarize this content: {context}',\n/// );\n/// final mapLlmChain = LLMChain(prompt: mapPrompt, llm: llm);\n/// final reducePrompt = PromptTemplate.fromTemplate(\n///   'Combine these summaries: {context}',\n/// );\n/// final reduceLlmChain = LLMChain(prompt: reducePrompt, llm: llm);\n/// final reduceDocsChain = StuffDocumentsChain(llmChain: reduceLlmChain);\n/// final reduceChain = MapReduceDocumentsChain(\n///   mapLlmChain: mapLlmChain,\n///   reduceDocumentsChain: reduceDocsChain,\n/// );\n/// const docs = [\n///   Document(pageContent: 'Hello 1!'),\n///   Document(pageContent: 'Hello 2!'),\n///   Document(pageContent: 'Hello 3!'),\n/// ];\n/// final res = await reduceChain.run(docs);\n/// ```\n/// {@endtemplate}\nclass MapReduceDocumentsChain extends BaseCombineDocumentsChain {\n  /// {@macro map_reduce_documents_chain}\n  MapReduceDocumentsChain({\n    required this.mapLlmChain,\n    required this.reduceDocumentsChain,\n    super.inputKey = defaultInputKey,\n    super.outputKey = defaultOutputKey,\n    this.mapLlmChainDocumentPromptVar = defaultLlmChainDocumentPromptVar,\n    this.returnIntermediateSteps = false,\n  }) {\n    _initLlmChainDocumentPromptVar();\n  }\n\n  /// Chain to apply to each document individually.\n  final LLMChain mapLlmChain;\n\n  /// Chain to use to reduce the results of applying [mapLlmChain] to each doc.\n  /// This typically either a [ReduceDocumentsChain] or [StuffDocumentsChain].\n  final BaseCombineDocumentsChain reduceDocumentsChain;\n\n  /// The variable name in the [mapLlmChain] where to put the documents in.\n  /// If only one variable in the [mapLlmChain], this doesn't need to be provided.\n  String mapLlmChainDocumentPromptVar;\n\n  /// Return the results of the map steps in the output.\n  final bool returnIntermediateSteps;\n\n  /// Default [inputKey] value.\n  static const String defaultInputKey =\n      BaseCombineDocumentsChain.defaultInputKey;\n\n  /// Default [outputKey] value.\n  static const String defaultOutputKey =\n      BaseCombineDocumentsChain.defaultOutputKey;\n\n  /// Default value for [mapLlmChainDocumentPromptVar].\n  static const defaultLlmChainDocumentPromptVar = 'context';\n\n  /// Output key for the chain's intermediate steps output.\n  static const intermediateStepsOutputKey = 'intermediate_steps';\n\n  @override\n  String get chainType => 'map_reduce_documents_chain';\n\n  @override\n  Set<String> get inputKeys => {inputKey, ...reduceDocumentsChain.inputKeys};\n\n  @override\n  Set<String> get outputKeys => {\n    outputKey,\n    if (returnIntermediateSteps) intermediateStepsOutputKey,\n  };\n\n  void _initLlmChainDocumentPromptVar() {\n    // If only one variable is present in the llmChain.prompt,\n    // we can infer that the formatted documents should be passed in\n    // with this variable name\n    final llmChainInputVariables = mapLlmChain.prompt.inputVariables;\n    if (llmChainInputVariables.length == 1) {\n      mapLlmChainDocumentPromptVar = llmChainInputVariables.first;\n    } else if (mapLlmChainDocumentPromptVar.isEmpty) {\n      throw ArgumentError(\n        'llmChainDocumentPromptVar must be provided if there are multiple '\n        'llmChain input variables',\n      );\n    } else if (!llmChainInputVariables.contains(mapLlmChainDocumentPromptVar)) {\n      throw ArgumentError(\n        'llmChainDocumentPromptVar ($mapLlmChainDocumentPromptVar) was not found '\n        'in llmChain input variables',\n      );\n    }\n  }\n\n  @override\n  Future<int?> promptLength(\n    final List<Document> docs, {\n    final InputValues inputs = const {},\n  }) async {\n    // This combine method doesn't depend on the prompt length\n    return null;\n  }\n\n  /// Combines the documents in a map reduce manner.\n  ///\n  /// First, mapping [mapLlmChain] over all documents, then reducing the results\n  /// using [reduceDocumentsChain].\n  ///\n  /// - [docs] is the list of documents to combine.\n  /// - [inputs] is a map of other inputs to use in the combination.\n  ///\n  /// Returns the output of the chain.\n  @override\n  Future<ChainValues> combineDocs(\n    final List<Document> docs, {\n    final InputValues inputs = const {},\n  }) async {\n    final mapResults = await mapLlmChain.apply(\n      docs\n          .map(\n            (final d) => {\n              ...inputs,\n              mapLlmChainDocumentPromptVar: d.pageContent,\n            },\n          )\n          .toList(growable: false),\n    );\n\n    final questionResultKey = mapLlmChain.outputKey;\n    final resultDocs = List<Document>.generate(\n      mapResults.length,\n      (final i) => Document(\n        pageContent: _getContent(mapResults[i][questionResultKey]),\n        metadata: docs[i].metadata,\n      ),\n    );\n\n    final output = await reduceDocumentsChain.combineDocs(\n      resultDocs,\n      inputs: inputs,\n    );\n\n    if (returnIntermediateSteps) {\n      final intermediateSteps = mapResults\n          .map((final r) => _getContent(r[questionResultKey]))\n          .toList(growable: false);\n      return {...output, intermediateStepsOutputKey: intermediateSteps};\n    }\n\n    return output;\n  }\n\n  String _getContent(final dynamic content) => switch (content) {\n    final AIChatMessage resultMsg => resultMsg.content,\n    _ => content,\n  };\n}\n"
  },
  {
    "path": "packages/langchain/lib/src/chains/combine_documents/reduce.dart",
    "content": "import 'package:langchain_core/chains.dart';\nimport 'package:langchain_core/documents.dart';\nimport 'package:langchain_core/prompts.dart';\n\nimport 'base.dart';\nimport 'stuff.dart';\n\n/// {@template reduce_documents_chain}\n/// Chain that combines documents by recursively reducing them if needed.\n///\n/// This involves two chains:\n/// - [combineDocumentsChain] this is the chain that combines the documents.\n/// - [collapseDocumentsChain] this is the chain that collapses the documents\n///   if they exceed [tokenMax].\n///\n/// The chain works as follows:\n/// - If the number of tokens resulting of formatting the prompt from\n///   [combineDocumentsChain] is less than [tokenMax], then\n///   [combineDocumentsChain] is called with the documents and the result is\n///   returned.\n/// - Otherwise, the documents are split into groups of max [tokenMax]\n///   tokens and [collapseDocumentsChain] is called for each group. Then, the\n///   resulting documents are combined by calling [combineDocumentsChain] and\n///   the result is returned.\n///\n/// Example:\n/// ```dart\n/// final finalPrompt = PromptTemplate.fromTemplate(\n///   'Summarize this content: {context}',\n/// );\n/// final finalLlmChain = LLMChain(prompt: finalPrompt, llm: llm);\n/// final combineDocsChain = StuffDocumentsChain(llmChain: finalLlmChain);\n///\n/// final collapsePrompt = PromptTemplate.fromTemplate(\n///   'Collapse this content: {context}',\n/// );\n/// final collapseLlmChain = LLMChain(prompt: collapsePrompt, llm: llm);\n/// final collapseDocsChain = StuffDocumentsChain(llmChain: collapseLlmChain);\n///\n/// final reduceChain = ReduceDocumentsChain(\n///   combineDocumentsChain: combineDocsChain,\n///   collapseDocumentsChain: collapseDocsChain,\n/// );\n///\n/// const docs = [\n///   Document(pageContent: 'Hello world 1!'),\n///   Document(pageContent: 'Hello world 2!'),\n///   Document(pageContent: 'Hello world 3!'),\n///   Document(pageContent: 'Hello world 4!'),\n/// ];\n/// final res = await reduceChain.run(docs);\n/// ```\n/// {@endtemplate}\nclass ReduceDocumentsChain extends BaseCombineDocumentsChain {\n  /// {@macro reduce_documents_chain}\n  const ReduceDocumentsChain({\n    super.inputKey = defaultInputKey,\n    super.outputKey = defaultOutputKey,\n    required this.combineDocumentsChain,\n    this.collapseDocumentsChain,\n    this.tokenMax = defaultTokenMax,\n  });\n\n  /// Final chain to call to combine documents.\n  /// This is typically a [StuffDocumentsChain].\n  final BaseCombineDocumentsChain combineDocumentsChain;\n\n  /// Chain to use to collapse documents if needed until they can all fit.\n  /// If null, [combineDocumentsChain] will be used.\n  /// This is typically a [StuffDocumentsChain].\n  final BaseCombineDocumentsChain? collapseDocumentsChain;\n\n  /// The maximum number of tokens to group documents into. For example, if\n  /// set to 3000 then documents will be grouped into chunks of no greater than\n  /// 3000 tokens before trying to combine them into a smaller chunk.\n  ///\n  /// This is useful to avoid exceeding the context size when combining the\n  /// documents.\n  ///\n  /// It is assumed that each document to combine is less than\n  /// [tokenMax] tokens.\n  final int tokenMax;\n\n  /// Default [inputKey] value.\n  static const String defaultInputKey =\n      BaseCombineDocumentsChain.defaultInputKey;\n\n  /// Default [outputKey] value.\n  static const String defaultOutputKey =\n      BaseCombineDocumentsChain.defaultOutputKey;\n\n  /// Default [tokenMax] value.\n  static const defaultTokenMax = 3000;\n\n  @override\n  String get chainType => 'reduce_documents_chain';\n\n  @override\n  Future<int?> promptLength(\n    final List<Document> docs, {\n    final InputValues inputs = const {},\n  }) async {\n    // This combine method doesn't depend on the prompt length\n    return null;\n  }\n\n  /// Combine multiple documents.\n  ///\n  /// - [docs] the documents to combine. It is assumed that each one is less\n  ///   than [tokenMax] tokens.\n  /// - [inputs] additional parameters to be passed to LLM calls (like other\n  ///   input variables besides the documents).\n  ///\n  /// Returns the output of the chain.\n  @override\n  Future<ChainValues> combineDocs(\n    final List<Document> docs, {\n    final InputValues inputs = const {},\n  }) async {\n    final resultDocs = await _splitAndCollapseDocs(docs, inputs: inputs);\n    return combineDocumentsChain.combineDocs(resultDocs, inputs: inputs);\n  }\n\n  /// Splits the documents into smaller chunks that are each less than\n  /// [tokenMax] tokens. And then collapses them into a single document.\n  Future<List<Document>> _splitAndCollapseDocs(\n    final List<Document> docs, {\n    final InputValues inputs = const {},\n  }) async {\n    final lengthFunc = combineDocumentsChain.promptLength;\n\n    var resultDocs = docs;\n    int? numTokens = await lengthFunc(resultDocs, inputs: inputs);\n\n    while (numTokens != null && numTokens > tokenMax) {\n      final newResultDocList = await _splitDocs(\n        docs,\n        inputs,\n        lengthFunc,\n        tokenMax,\n      );\n      resultDocs = [];\n      for (final docs in newResultDocList) {\n        final newDoc = await _collapseDocs(docs, inputs);\n        resultDocs.add(newDoc);\n      }\n      numTokens = await lengthFunc(resultDocs, inputs: inputs);\n    }\n\n    return resultDocs;\n  }\n\n  /// Split a list of documents into smaller lists of documents that are each\n  /// less than [tokenMax] tokens.\n  Future<List<List<Document>>> _splitDocs(\n    final List<Document> docs,\n    final InputValues inputs,\n    final Future<int?> Function(List<Document> docs, {InputValues inputs})\n    lengthFunc,\n    final int tokenMax,\n  ) async {\n    final List<List<Document>> newResultDocList = [];\n    List<Document> subResultDocs = [];\n\n    for (final doc in docs) {\n      subResultDocs.add(doc);\n      final numTokens = await lengthFunc(subResultDocs, inputs: inputs);\n      if (numTokens != null && numTokens > tokenMax) {\n        assert(\n          subResultDocs.length > 1,\n          'We should never have a single document that is longer than the tokenMax.',\n        );\n        newResultDocList.add(\n          subResultDocs.sublist(0, subResultDocs.length - 1),\n        );\n        subResultDocs = subResultDocs.sublist(subResultDocs.length - 1);\n      }\n    }\n    newResultDocList.add(subResultDocs);\n    return newResultDocList;\n  }\n\n  /// Combines multiple documents into one using [collapseDocumentsChain] (or\n  /// [combineDocumentsChain] if [collapseDocumentsChain] is null).\n  /// The metadata of the different documents is also combined.\n  Future<Document> _collapseDocs(\n    final List<Document> docs,\n    final InputValues inputs,\n  ) async {\n    final collapseChain = collapseDocumentsChain ?? combineDocumentsChain;\n    final result = await collapseChain.run({\n      ...inputs,\n      BaseCombineDocumentsChain.defaultInputKey: docs,\n    });\n    final combinedMetadata = {...docs[0].metadata};\n    for (var i = 1; i < docs.length; i++) {\n      docs[i].metadata.forEach((final key, final value) {\n        if (combinedMetadata.containsKey(key) && value is String) {\n          combinedMetadata[key] = '${combinedMetadata[key]}, $value';\n        } else {\n          combinedMetadata[key] = value;\n        }\n      });\n    }\n    return Document(pageContent: result, metadata: combinedMetadata);\n  }\n}\n"
  },
  {
    "path": "packages/langchain/lib/src/chains/combine_documents/stuff.dart",
    "content": "import 'package:langchain_core/chains.dart';\nimport 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/documents.dart';\nimport 'package:langchain_core/prompts.dart';\n\nimport 'base.dart';\n\n/// {@template stuff_documents_chain}\n/// Chain that combines documents by stuffing into context.\n///\n/// This chain takes a list of documents and first combines them into a single\n/// string. It does this by formatting each document into a string with the\n/// [documentPrompt] and then joining them together with [documentSeparator].\n/// It then adds that new resulting string to the inputs with the variable\n/// name set by [llmChainStuffedDocumentPromptVar]. Those inputs are then\n/// passed to the [llmChain] who will format the prompt and call the model.\n///\n/// The content of each document is formatted using [documentPrompt].\n/// By default, it just takes the content of the document.\n///\n/// Example:\n/// ```dart\n/// final prompt = PromptTemplate.fromTemplate(\n///   'Print {foo}. Context: {context}',\n/// );\n/// final llm = OpenAI(apiKey: openaiApiKey);\n/// final llmChain = LLMChain(prompt: prompt, llm: llm);\n/// final stuffChain = StuffDocumentsChain(llmChain: llmChain)\n/// const foo = 'Hello world!';\n/// const docs = [\n///   Document(pageContent: 'Hello 1!'),\n///   Document(pageContent: 'Hello 2!'),\n/// ];\n/// final res = await stuffChain.call({\n///   'foo': foo,\n///   'input_documents': docs,\n/// });\n/// ```\n/// {@endtemplate}\nclass StuffDocumentsChain extends BaseCombineDocumentsChain {\n  /// {@macro stuff_documents_chain}\n  StuffDocumentsChain({\n    required this.llmChain,\n    super.inputKey = defaultInputKey,\n    super.outputKey = defaultOutputKey,\n    this.documentPrompt = defaultDocumentPrompt,\n    this.documentSeparator = defaultDocumentSeparator,\n    this.llmChainStuffedDocumentPromptVar =\n        defaultLlmChainStuffedDocumentPromptVar,\n  }) {\n    _initLlmChainDocumentPromptVar();\n  }\n\n  /// LLM wrapper to use after formatting documents.\n  final LLMChain llmChain;\n\n  /// Prompt to use to format each document.\n  final BasePromptTemplate documentPrompt;\n\n  /// The string with which to join the formatted documents.\n  final String documentSeparator;\n\n  /// The variable name in the [LLMChain.prompt] where to put the documents in.\n  /// If only one variable in the [llmChain], this doesn't need to be provided.\n  String llmChainStuffedDocumentPromptVar;\n\n  /// Default [inputKey] value.\n  static const String defaultInputKey =\n      BaseCombineDocumentsChain.defaultInputKey;\n\n  /// Default [outputKey] value.\n  static const String defaultOutputKey =\n      BaseCombineDocumentsChain.defaultOutputKey;\n\n  /// Default [documentPrompt] value.\n  static const defaultDocumentPrompt = PromptTemplate(\n    inputVariables: {StuffDocumentsChain.pageContentPromptVar},\n    template: '{${StuffDocumentsChain.pageContentPromptVar}}',\n  );\n\n  /// Default value for [documentSeparator].\n  static const defaultDocumentSeparator = '\\n\\n';\n\n  /// Default value for [llmChainStuffedDocumentPromptVar].\n  static const defaultLlmChainStuffedDocumentPromptVar = 'context';\n\n  /// Prompt variable to use for the page content.\n  static const String pageContentPromptVar =\n      BaseCombineDocumentsChain.pageContentPromptVar;\n\n  @override\n  Set<String> get inputKeys => {\n    inputKey,\n    ...llmChain.inputKeys.difference({llmChainStuffedDocumentPromptVar}),\n  };\n\n  @override\n  String get chainType => 'stuff_documents_chain';\n\n  void _initLlmChainDocumentPromptVar() {\n    // If only one variable is present in the llmChain.prompt,\n    // we can infer that the formatted documents should be passed in\n    // with this variable name\n    final llmChainInputVariables = llmChain.prompt.inputVariables;\n    if (llmChainInputVariables.length == 1) {\n      llmChainStuffedDocumentPromptVar = llmChainInputVariables.first;\n    } else if (llmChainStuffedDocumentPromptVar.isEmpty) {\n      throw ArgumentError(\n        'llmChainStuffedDocumentPromptVar must be provided if there are '\n        'multiple llmChain input variables',\n      );\n    } else if (!llmChainInputVariables.contains(\n      llmChainStuffedDocumentPromptVar,\n    )) {\n      throw ArgumentError(\n        'llmChainStuffedDocumentPromptVar ($llmChainStuffedDocumentPromptVar) '\n        'was not found in llmChain input variables',\n      );\n    }\n  }\n\n  @override\n  Future<int?> promptLength(\n    final List<Document> docs, {\n    final InputValues inputs = const {},\n  }) {\n    final llmInputs = _getInputs(docs, inputs);\n    final prompt = llmChain.prompt.formatPrompt(llmInputs);\n    return llmChain.llm.countTokens(prompt);\n  }\n\n  /// Stuff all documents into one prompt and pass to LLM.\n  ///\n  /// - [docs] the documents to combine.\n  /// - [inputs] the inputs to pass to the [llmChain].\n  ///\n  /// Returns the output of the chain.\n  @override\n  Future<ChainValues> combineDocs(\n    final List<Document> docs, {\n    final InputValues inputs = const {},\n  }) async {\n    final llmInputs = _getInputs(docs, inputs);\n    final llmOutput = await llmChain.call(llmInputs);\n    final content = llmOutput[llmChain.outputKey];\n    final output = switch (content) {\n      final AIChatMessage resultMsg => resultMsg.content,\n      _ => content,\n    };\n    return {\n      outputKey: output,\n      if (!llmChain.returnFinalOnly)\n        LLMChain.fullGenerationOutputKey:\n            llmOutput[LLMChain.fullGenerationOutputKey],\n    };\n  }\n\n  /// Returns a map with all the input values for the prompt and the\n  /// a string containing all the formatted documents to be passed in the\n  /// prompt.\n  Map<String, dynamic> _getInputs(\n    final List<Document> docs,\n    final InputValues inputs,\n  ) {\n    // Format each document according to the prompt\n    final docStrings = docs\n        .map((final doc) => formatDocument(doc, documentPrompt))\n        .toList(growable: false);\n    // Join the documents together to put them in the prompt\n    final promptInputValues = {\n      for (final key in inputs.keys)\n        if (llmChain.prompt.inputVariables.contains(key)) key: inputs[key],\n    };\n\n    return {\n      ...promptInputValues,\n      llmChainStuffedDocumentPromptVar: docStrings.join(documentSeparator),\n    };\n  }\n}\n"
  },
  {
    "path": "packages/langchain/lib/src/chains/conversation.dart",
    "content": "// ignore_for_file: avoid_redundant_argument_values\nimport 'package:langchain_core/chains.dart';\nimport 'package:langchain_core/memory.dart';\nimport 'package:langchain_core/prompts.dart';\n\nimport '../memory/buffer.dart';\n\nconst _defaultTemplate = '''\nThe following is a friendly conversation between a human and an AI. The AI is \ntalkative and provides lots of specific details from its context. If the AI \ndoes not know the answer to a question, it truthfully says it does not know.\n\nCurrent conversation:\n{history}\nHuman: {input}\nAI:''';\n\nconst _memoryKey = 'history';\n\n/// {@template conversation_chain}\n/// Chain that carries on a conversation, loading context from memory\n/// and calling an LLM with it.\n///\n/// By default, the [ConversationChain] has a simple type of memory that\n/// remembers all previous inputs/outputs and adds them to the context that\n/// is passed to the LLM (see [ConversationBufferMemory]).\n///\n/// Example:\n/// ```dart\n/// final chain = ConversationChain(llm: OpenAI(apiKey: '...'));\n/// final res = await chain.run('Hello world!');\n/// ```\n///\n/// - [prompt] is the prompt that will be used to call the LLM.\n/// - [llm] is the LLM that will be called.\n/// - [outputParser] is the parser that will be used to parse the output.\n/// - [memory] is the memory that will be used to store and load context.\n/// - [inputKey] is the key of the value that needs to be passed to the chain.\n///   (e.g. if the prompt template has two input variables ('foo' and 'bar') and\n///   'foo' is loaded from memory, then 'bar' is the input key).\n/// - [outputKey] is the key in the returned map that contains the output of the\n///   chain execution.\n/// {@endtemplate}\nclass ConversationChain extends LLMChain {\n  /// {@macro conversation_chain}\n  ConversationChain({\n    super.prompt = const PromptTemplate(\n      template: _defaultTemplate,\n      inputVariables: {_memoryKey, 'input'},\n    ),\n    required super.llm,\n    super.outputParser,\n    final BaseMemory? memory,\n    this.inputKey = 'input',\n    super.outputKey = 'response',\n  }) : super(memory: memory ?? ConversationBufferMemory(memoryKey: _memoryKey));\n\n  /// The key of the input value.\n  final String inputKey;\n\n  @override\n  Set<String> get inputKeys => {inputKey};\n}\n"
  },
  {
    "path": "packages/langchain/lib/src/chains/question_answering/question_answering.dart",
    "content": "export 'stuff.dart';\n"
  },
  {
    "path": "packages/langchain/lib/src/chains/question_answering/stuff.dart",
    "content": "import 'package:langchain_core/chains.dart';\nimport 'package:langchain_core/language_models.dart';\nimport 'package:langchain_core/prompts.dart';\n\nimport '../combine_documents/stuff.dart';\n\nconst _promptTemplate = '''\nUse the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.\n\n{context}\n\nQuestion: {question}\nHelpful Answer:''';\n\nconst _prompt = PromptTemplate(\n  template: _promptTemplate,\n  inputVariables: {'context', 'question'},\n);\n\nconst _systemTemplate = '''\nUse the following pieces of context to answer the users question.\nIf you don't know the answer, just say that you don't know, don't try to make up an answer.\n----------------\n{context}''';\n\nconst List<StringMessagePromptTemplate> _messages = [\n  SystemChatMessagePromptTemplate(\n    prompt: PromptTemplate(\n      template: _systemTemplate,\n      inputVariables: {'context'},\n    ),\n  ),\n  HumanChatMessagePromptTemplate(\n    prompt: PromptTemplate(\n      template: '{question}',\n      inputVariables: {'question'},\n    ),\n  ),\n];\n\nconst _chatPrompt = ChatPromptTemplate(\n  promptMessages: _messages,\n  inputVariables: {'context', 'question'},\n);\n\nfinal _promptSelector = ConditionalPromptSelector(\n  defaultPrompt: _prompt,\n  conditionals: [PromptCondition.isChatModel(_chatPrompt)],\n);\n\n/// {@template stuff_documents_qa_chain}\n/// Stuffs documents with a question answering prompt.\n/// {@endtemplate}\nclass StuffDocumentsQAChain extends StuffDocumentsChain {\n  /// {@macro stuff_documents_qa_chain}\n  StuffDocumentsQAChain({\n    final BasePromptTemplate? prompt,\n    required final BaseLanguageModel llm,\n    super.inputKey = StuffDocumentsChain.defaultInputKey,\n    super.outputKey = StuffDocumentsChain.defaultOutputKey,\n    super.documentPrompt = StuffDocumentsChain.defaultDocumentPrompt,\n    super.documentSeparator = StuffDocumentsChain.defaultDocumentSeparator,\n  }) : super(\n         llmChain: LLMChain(\n           prompt: prompt ?? _promptSelector.getPrompt(llm),\n           llm: llm,\n         ),\n       );\n}\n"
  },
  {
    "path": "packages/langchain/lib/src/chains/retrieval_qa.dart",
    "content": "import 'package:langchain_core/chains.dart';\nimport 'package:langchain_core/documents.dart';\nimport 'package:langchain_core/language_models.dart';\nimport 'package:langchain_core/prompts.dart';\nimport 'package:langchain_core/retrievers.dart';\n\nimport 'combine_documents/combine_documents.dart';\nimport 'question_answering/question_answering.dart';\n\n/// {@template retrieval_qa_chain}\n/// Chain for question-answering against the documents retrieved by the\n/// [retriever].\n///\n/// It retrieves the documents using the [retriever] and then combines them\n/// using the [combineDocumentsChain].\n///\n/// For convenience, you can instantiate this chain using the factory\n/// constructor [RetrievalQAChain.fromLlm]. By default, it uses a prompt\n/// template optimized for question answering that includes the retrieved\n/// documents and the question to answer. The documents are inserted in the\n/// prompt using a [StuffDocumentsQAChain].\n///\n/// The chain returns two outputs:\n/// - `result` (or the output key specified in the constructor): the answer to\n///   the question.\n/// - `source_documents`: the documents used to answer the question.\n///\n/// Note: as the chain returns two outputs you can only call it using the\n/// [call] method. The [run] method is not supported.\n///\n/// Example:\n/// ```dart\n/// final retriever = VectorStoreRetriever(vectorStore: vectorStore);\n/// final retrievalQA = RetrievalQAChain.fromLlm(\n///   llm: llm,\n///   retriever: retriever,\n/// );\n/// final res = await retrievalQA({\n///   RetrievalQAChain.defaultInputKey: 'What did I say?',\n/// });\n/// final answer = res[RetrievalQAChain.defaultOutputKey];\n/// final docs = res[RetrievalQAChain.sourceDocumentsOutputKey];\n/// ```\n///\n/// If you need more flexibility, you can use the primary constructor which\n/// allows you to specify the [retriever] and the [combineDocumentsChain].\n/// Your prompt should include the `{context}` and `{question}` variables to\n/// be replaced by the documents and the question respectively.\n///\n/// Example:\n/// ```dart\n/// final llmChain = LLMChain(prompt: prompt, llm: llm);\n/// final stuffChain = StuffDocumentsChain(llmChain: llmChain);\n/// final retrievalQA = RetrievalQAChain(\n///   retriever: retriever,\n///   combineDocumentsChain: stuffChain,\n/// );\n/// final res = await retrievalQA({\n///   RetrievalQAChain.defaultInputKey: 'What did I say?',\n/// });\n/// ```\n/// {@endtemplate}\nclass RetrievalQAChain extends BaseChain {\n  /// {@macro retrieval_qa_chain}\n  const RetrievalQAChain({\n    required this.retriever,\n    required this.combineDocumentsChain,\n    this.inputKey = defaultInputKey,\n    this.outputKey = defaultOutputKey,\n    this.combineDocumentsChainInputKey =\n        BaseCombineDocumentsChain.defaultInputKey,\n  });\n\n  /// Retriever to use.\n  final Retriever retriever;\n\n  /// Chain to use to combine the documents.\n  final BaseCombineDocumentsChain combineDocumentsChain;\n\n  /// Key to use for the input query.\n  final String inputKey;\n\n  /// Key to use for output text.\n  final String outputKey;\n\n  /// Key to use for inputting the documents to [combineDocumentsChain].\n  final String combineDocumentsChainInputKey;\n\n  /// Default input key for the query.\n  static const defaultInputKey = 'query';\n\n  /// Default output key for the output of the chain.\n  static const defaultOutputKey = 'result';\n\n  /// Output key to use for returning the source documents.\n  static const sourceDocumentsOutputKey = 'source_documents';\n\n  /// Prompt variable to use for the question.\n  static const questionPromptVar = 'question';\n\n  @override\n  Set<String> get inputKeys => {inputKey};\n\n  @override\n  Set<String> get outputKeys => {outputKey, sourceDocumentsOutputKey};\n\n  @override\n  String get chainType => 'retrieval_qa';\n\n  /// Creates a [RetrievalQAChain] from a [BaseLanguageModel] and a\n  /// [Retriever].\n  ///\n  /// By default, it uses a prompt template optimized for question answering\n  /// that includes the retrieved documents and the question.\n  ///\n  /// The documents are combined using a [StuffDocumentsChain].\n  ///\n  /// Example:\n  /// ```dart\n  /// final retriever = VectorStoreRetriever(vectorStore: vectorStore);\n  /// final retrievalQA = RetrievalQAChain.fromLlm(\n  ///   llm: llm,\n  ///   retriever: retriever,\n  /// );\n  /// final res = await retrievalQA({\n  ///   RetrievalQAChain.defaultInputKey: 'What did I say?',\n  /// });\n  /// ```\n  ///\n  /// If you want to use a different prompt template, you can pass it in\n  /// [prompt]. Use 'context' and 'question' as the variable names.\n  factory RetrievalQAChain.fromLlm({\n    required final BaseLanguageModel llm,\n    required final Retriever retriever,\n    final PromptTemplate? prompt,\n  }) {\n    return RetrievalQAChain(\n      retriever: retriever,\n      combineDocumentsChain: StuffDocumentsQAChain(llm: llm, prompt: prompt),\n    );\n  }\n\n  @override\n  Future<ChainValues> callInternal(final ChainValues inputs) async {\n    final query = inputs[inputKey] as String;\n\n    final docs = await _getDocs(query);\n    final combineDocumentsChainInputs = {\n      combineDocumentsChainInputKey: docs,\n      questionPromptVar: query,\n    };\n    final answer = await combineDocumentsChain.call(\n      combineDocumentsChainInputs,\n    );\n\n    final output = <String, dynamic>{};\n    for (final entry in answer.entries) {\n      final key = entry.key;\n      final value = entry.value;\n\n      if (key == combineDocumentsChain.outputKey) {\n        output[outputKey] = value;\n      } else if (key == combineDocumentsChain.inputKey) {\n        output[sourceDocumentsOutputKey] = value;\n      } else {\n        output[key] = value;\n      }\n    }\n\n    return output;\n  }\n\n  /// Returns the documents to do question answering over.\n  Future<List<Document>> _getDocs(final String query) {\n    return retriever.getRelevantDocuments(query);\n  }\n}\n"
  },
  {
    "path": "packages/langchain/lib/src/chains/sequential.dart",
    "content": "import 'package:langchain_core/chains.dart';\nimport 'package:langchain_core/memory.dart';\n\n/// {@template sequential_chain}\n/// Chain that combines multiple chains where the output of the one\n/// chain is the input of the next chain.\n///\n/// If you don't provide [inputKeys] and [outputKeys], they will be inferred\n/// from the chains.\n///\n/// Example:\n/// ```dart\n/// final chain1 = FakeChain(\n///   inputVariables: {'foo', 'test'},\n///   outputVariables: {'bar'},\n/// );\n/// final chain2 = FakeChain(\n///   inputVariables: {'bar', 'foo'},\n///   outputVariables: {'baz'},\n/// );\n/// final chain = SequentialChain(chains: [chain1, chain2]);\n/// final output = await chain({'foo': '123', 'test': '456'});\n/// ```\n///\n/// If all the chains have only one input and one output, you can use\n/// [SimpleSequentialChain] instead.\n/// {@endtemplate}\nclass SequentialChain extends BaseChain {\n  /// {@macro sequential_chain}\n  SequentialChain({\n    required this.chains,\n    super.memory,\n    final Set<String>? inputKeys,\n    final Set<String>? outputKeys,\n    this.returnIntermediateOutputs = false,\n  }) : inputKeys = inputKeys ?? _inferInputKeys(chains, memory),\n       outputKeys =\n           outputKeys ??\n           _inferOutputKeys(chains, returnIntermediateOutputs, memory) {\n    assert(_isChainValid());\n  }\n\n  /// The chains to run sequentially.\n  final List<BaseChain> chains;\n\n  /// Whether the chain should return all intermediate outputs or just the\n  /// final output. By default, only the final output of the chain is returned.\n  /// Setting it to true can be useful if you want to see the outputs of each\n  /// step in the chain for debugging or analysis purposes.\n  bool returnIntermediateOutputs;\n\n  @override\n  String get chainType => 'sequential_chain';\n\n  @override\n  Set<String> inputKeys;\n\n  @override\n  Set<String> outputKeys;\n\n  /// Infers the input keys for the chain from the [chains].\n  static Set<String> _inferInputKeys(\n    final List<BaseChain> chains,\n    final BaseMemory? memory,\n  ) {\n    final inputKeys = {\n      for (final chain in chains)\n        ...chain.inputKeys.difference(chain.memory?.memoryKeys ?? const {}),\n    };\n    final outputKeys = {for (final chain in chains) ...chain.outputKeys};\n    return inputKeys\n        .difference(outputKeys)\n        .difference(memory?.memoryKeys ?? const {});\n  }\n\n  /// Infers the output keys for the chain from the [chains].\n  static Set<String> _inferOutputKeys(\n    final List<BaseChain> chains,\n    final bool returnIntermediateOutputs,\n    final BaseMemory? memory,\n  ) {\n    if (returnIntermediateOutputs) {\n      return {\n        ...?memory?.memoryKeys,\n        for (final chain in chains) ...{\n          ...chain.outputKeys,\n          ...?memory?.memoryKeys,\n        },\n      };\n    }\n    return chains.last.outputKeys;\n  }\n\n  /// Checks if the chain is valid.\n  bool _isChainValid() {\n    if (chains.isEmpty) {\n      throw AssertionError('Sequential chain must have at least one chain.');\n    }\n\n    if (memory != null) {\n      final intersection = inputKeys.intersection(memory!.memoryKeys);\n      if (intersection.isNotEmpty) {\n        throw AssertionError(\n          'Input keys and memory keys must not overlap. Got: $intersection',\n        );\n      }\n    }\n\n    final knownKeys = inputKeys.union(memory?.memoryKeys ?? const {});\n\n    for (final chain in chains) {\n      final missingKeys = chain.inputKeys\n          .difference(knownKeys)\n          .difference(chain.memory?.memoryKeys ?? const {});\n      if (missingKeys.isNotEmpty) {\n        throw AssertionError(\n          'Missing required input keys $missingKeys for chain '\n          '\"${chain.chainType}\". Only got $knownKeys.',\n        );\n      }\n\n      final overlappingOutputKeys = knownKeys.intersection(chain.outputKeys);\n      if (overlappingOutputKeys.isNotEmpty) {\n        throw AssertionError(\n          'Chain \"${chain.chainType}\" returns keys that already exist: '\n          '$overlappingOutputKeys.',\n        );\n      }\n\n      knownKeys.addAll(chain.outputKeys);\n    }\n\n    if (outputKeys.isNotEmpty) {\n      final missingKeys = outputKeys.difference(knownKeys);\n      if (missingKeys.isNotEmpty) {\n        throw AssertionError(\n          'Expected output keys that were not found: $missingKeys',\n        );\n      }\n    }\n\n    return true;\n  }\n\n  @override\n  Future<ChainValues> callInternal(final ChainValues inputs) async {\n    final knownValues = {...inputs};\n    for (final chain in chains) {\n      final outputs = await chain.call(knownValues, returnOnlyOutputs: true);\n      knownValues.addAll(outputs);\n    }\n    return {\n      for (final outputKey in outputKeys) outputKey: knownValues[outputKey],\n    };\n  }\n}\n\n/// {@template simple_sequential_chain}\n/// [SimpleSequentialChain] is a simpler form of [SequentialChain], where each\n/// step has a singular input/output, and the output of one step is the input\n/// to the next.\n///\n/// It is suitable for cases where you only need to pass a single string as an\n/// argument and get a single string as output for all steps in the chain.\n///\n/// Example:\n/// ```dart\n/// final chain1 = FakeChain(\n///   inputVariables: {'foo'},\n///   outputVariables: {'bar'},\n/// );\n/// final chain2 = FakeChain(\n///   inputVariables: {'bar'},\n///   outputVariables: {'baz'},\n/// );\n/// final chain = SimpleSequentialChain(chains: [chain1, chain2]);\n/// final output = await chain({'input': '123'});\n/// ```\n/// {@endtemplate}\nclass SimpleSequentialChain extends BaseChain {\n  /// {@macro simple_sequential_chain}\n  SimpleSequentialChain({\n    required this.chains,\n    super.memory,\n    final String inputKey = defaultInputKey,\n    final String outputKey = defaultOutputKey,\n    this.trimOutputs = false,\n  }) : inputKeys = {inputKey},\n       outputKeys = {outputKey} {\n    assert(_isChainValid());\n  }\n\n  /// The chains to run sequentially.\n  final List<BaseChain> chains;\n\n  /// Whether to trim the outputs of the chains before passing them to the next\n  /// chain. By default, the outputs are not trimmed.\n  final bool trimOutputs;\n\n  @override\n  String get chainType => 'simple_sequential_chain';\n\n  @override\n  Set<String> inputKeys;\n\n  @override\n  Set<String> outputKeys;\n\n  /// Default input key for the input of the chain.\n  static const defaultInputKey = 'input';\n\n  /// Default output key for the output of the chain.\n  static const defaultOutputKey = 'output';\n\n  /// Checks if the chain is valid.\n  bool _isChainValid() {\n    for (final chain in chains) {\n      if (chain.inputKeys.length != 1) {\n        throw AssertionError(\n          'Chains used in SimpleSequentialChain should all have one input. '\n          'Got ${chain.chainType} with ${chain.inputKeys.length} inputs.',\n        );\n      }\n\n      if (chain.outputKeys.length != 1) {\n        throw AssertionError(\n          'Chains used in SimpleSequentialChain should all have one output. '\n          'Got ${chain.chainType} with ${chain.outputKeys.length} outputs.',\n        );\n      }\n    }\n    return true;\n  }\n\n  @override\n  Future<ChainValues> callInternal(final ChainValues inputs) async {\n    dynamic input = inputs[inputKeys.first];\n    for (final chain in chains) {\n      input = await chain.run(input);\n      if (trimOutputs && input is String) {\n        input = input.trim();\n      }\n    }\n    return {outputKeys.first: input};\n  }\n}\n"
  },
  {
    "path": "packages/langchain/lib/src/chains/summarization/summarization.dart",
    "content": "export 'summarize.dart';\n"
  },
  {
    "path": "packages/langchain/lib/src/chains/summarization/summarize.dart",
    "content": "import 'package:langchain_core/chains.dart';\nimport 'package:langchain_core/language_models.dart';\nimport 'package:langchain_core/prompts.dart';\n\nimport '../combine_documents/combine_documents.dart';\n\nconst _template = '''\nWrite a concise summary of the following:\n\n\n\"{context}\"\n\n\nCONCISE SUMMARY:''';\n\nconst _promptTemplate = PromptTemplate(\n  template: _template,\n  inputVariables: {'context'},\n);\n\n/// Chain for summarizing documents.\n///\n/// There are two methods to summarize documents:\n/// - [stuff] uses the [StuffDocumentsChain] to combine all the documents into\n///   a single string, then prompts the model to summarize that string. This\n///   method is limited by the context length limit of the model.\n/// - [mapReduce] uses the [MapReduceDocumentsChain] to summarize each document\n///   individually, then combines the results into a single summary.\nabstract class SummarizeChain {\n  /// The [stuff] method uses the [StuffDocumentsChain] to combine all the\n  /// documents into a single string, then prompts the model to summarize that\n  /// string. This method is limited by the context length limit of the [llm].\n  ///\n  /// - [llm] is the language model to use for summarization.\n  /// - [inputKey] is the input key where the documents to summarize will be\n  ///   placed.\n  /// - [outputKey] is the output key where the summary will be placed.\n  /// - [promptTemplate] is the prompt to use to summarize the documents.\n  ///   The default prompt template instructs the model to create a\n  ///   \"concise summary\".\n  /// - [documentPrompt] is the prompt to use to format each document before\n  ///   combining them. The default prompt just takes the content of the\n  ///   document.\n  /// - [stuffedDocumentPromptVar] is the variable used in the [promptTemplate]\n  ///   to indicate where the stuffed document should be placed.\n  /// - [documentSeparator] is the separator used to join the documents while\n  ///   stuffing them.\n  ///\n  /// Example:\n  /// ```dart\n  /// final loader = TextLoader('path/to/file.txt');\n  /// final docs = await loader.load();\n  ///\n  /// const textSplitter = RecursiveCharacterTextSplitter();\n  /// final docsChunks = textSplitter.splitDocuments(docs);\n  ///\n  /// final llm = ChatOpenAI(apiKey: openAIKey);\n  /// final summarizeChain = SummarizeChain.stuff(llm: llm);\n  ///\n  /// final summary = await summarizeChain.run(docsChunks);\n  /// ```\n  static StuffDocumentsChain stuff({\n    required final BaseLanguageModel llm,\n    final String inputKey = SummarizeChain.defaultInputKey,\n    final String outputKey = SummarizeChain.defaultOutputKey,\n    final BasePromptTemplate promptTemplate = _promptTemplate,\n    final BasePromptTemplate documentPrompt =\n        StuffDocumentsChain.defaultDocumentPrompt,\n    final String stuffedDocumentPromptVar =\n        StuffDocumentsChain.defaultLlmChainStuffedDocumentPromptVar,\n    final String documentSeparator =\n        StuffDocumentsChain.defaultDocumentSeparator,\n  }) {\n    final llmChain = LLMChain(llm: llm, prompt: promptTemplate);\n\n    return StuffDocumentsChain(\n      llmChain: llmChain,\n      inputKey: inputKey,\n      outputKey: outputKey,\n      documentPrompt: documentPrompt,\n      llmChainStuffedDocumentPromptVar: stuffedDocumentPromptVar,\n      documentSeparator: documentSeparator,\n    );\n  }\n\n  /// The [mapReduce] method uses the [MapReduceDocumentsChain] to summarize\n  /// each document individually, then combines the results into a single\n  /// summary.\n  ///\n  /// The [MapReduceDocumentsChain] involves two chains behind the scenes:\n  /// - [MapReduceDocumentsChain.mapLlmChain] this is the chain that is applied\n  ///   to each document to create a summary.\n  /// - [MapReduceDocumentsChain.reduceDocumentsChain] this is a\n  ///   [ReduceDocumentsChain] that reduces the summaries of each document into\n  ///   a single summary.\n  ///\n  /// - [llm] is the language model to use for summarization.\n  /// - [inputKey] is the input key where the documents to summarize will be\n  ///   placed.\n  /// - [outputKey] is the output key where the summary will be placed.\n  /// - [mapPrompt] is the prompt to use to summarize each document\n  ///   individually.\n  /// - [mapDocumentPromptVar] is the variable used in the [mapPrompt] to\n  ///   indicate where the document should be placed.\n  /// - [combinePrompt] is the prompt to use to summarize the summaries of each\n  ///   document.\n  /// - [combineLlm] is the language model to use to summarize the summaries of\n  ///   each document. By default, [llm] is used.\n  /// - [combineDocumentPrompt] is the prompt to use to format each individual\n  ///   document before summarizing it. The default prompt just takes the\n  ///   content of the document.\n  /// - [combineDocumentPromptVar] is the variable used in the [combinePrompt]\n  ///   to indicate where the summaries should be placed.\n  /// - [combineDocumentSeparator] is the separator used to join the summaries.\n  /// - [combineInputMaxTokens] is the maximum number of tokens allowed for the\n  ///   input of the final combine call. If the sum of the lengths of the\n  ///   summaries of each document exceeds this limit, the summaries will be\n  ///   collapsed using [collapseLlm] before they are combined into the final\n  ///   summary. Set this to a value lower than the context length limit of the\n  ///   model. For example: if the model context length is 4,097, you can set\n  ///   [combineInputMaxTokens] to 3,000 to have 1,097 tokens left for the final\n  ///   summary generation.\n  /// - [collapsePrompt] is the prompt to use to collapse the final summary if\n  ///   it exceeds the [combineInputMaxTokens] limit. By default,\n  ///   [combinePrompt] is used.\n  /// - [collapseLlm] is the language model to use to collapse the final\n  ///   summary. By default, [combineLlm] is used if it is not null, otherwise\n  ///   [llm] is used.\n  /// - [collapseDocumentPrompt] is the prompt to use to format the final\n  ///   summary before collapsing it. The default prompt just takes the content\n  ///   of the document.\n  /// - [collapseDocumentPromptVar] is the variable used in the [collapsePrompt]\n  ///   to indicate where the summary to be collapsed should be placed.\n  /// - [collapseDocumentSeparator] is the separator used to join the summary\n  ///   to be collapsed.\n  /// - [returnIntermediateSteps] indicates whether to return the intermediate\n  ///   steps of the summarization process. If true, the intermediate steps\n  ///   will be placed in the [MapReduceDocumentsChain.intermediateStepsOutputKey]\n  ///   output key.\n  ///\n  /// Example:\n  /// ```dart\n  /// final loader = WebBaseLoader(['https://example.com']);\n  /// final docs = await loader.load();\n  ///\n  /// const textSplitter = RecursiveCharacterTextSplitter();\n  /// final docsChunks = textSplitter.splitDocuments(docs);\n  ///\n  /// final llm = ChatOpenAI(apiKey: openAIKey);\n  /// final summarizeChain = SummarizeChain.mapReduce(llm: llm);\n  ///\n  /// final summary = await summarizeChain.run(docsChunks);\n  /// ```\n  static MapReduceDocumentsChain mapReduce({\n    required final BaseLanguageModel llm,\n    final String inputKey = SummarizeChain.defaultInputKey,\n    final String outputKey = SummarizeChain.defaultOutputKey,\n    final BasePromptTemplate mapPrompt = _promptTemplate,\n    final String mapDocumentPromptVar =\n        MapReduceDocumentsChain.defaultLlmChainDocumentPromptVar,\n    final BasePromptTemplate combinePrompt = _promptTemplate,\n    final BaseLanguageModel? combineLlm,\n    final BasePromptTemplate combineDocumentPrompt =\n        StuffDocumentsChain.defaultDocumentPrompt,\n    final String combineDocumentPromptVar =\n        StuffDocumentsChain.defaultLlmChainStuffedDocumentPromptVar,\n    final String combineDocumentSeparator =\n        StuffDocumentsChain.defaultDocumentSeparator,\n    final int combineInputMaxTokens = ReduceDocumentsChain.defaultTokenMax,\n    final BasePromptTemplate? collapsePrompt,\n    final BaseLanguageModel? collapseLlm,\n    final BasePromptTemplate collapseDocumentPrompt =\n        StuffDocumentsChain.defaultDocumentPrompt,\n    final String collapseDocumentPromptVar =\n        StuffDocumentsChain.defaultLlmChainStuffedDocumentPromptVar,\n    final String collapseDocumentSeparator =\n        StuffDocumentsChain.defaultDocumentSeparator,\n    final bool returnIntermediateSteps = false,\n  }) {\n    final finalCombineLlm = combineLlm ?? llm;\n    final combineLlmChain = LLMChain(\n      llm: finalCombineLlm,\n      prompt: combinePrompt,\n    );\n\n    final combineDocumentsChain = StuffDocumentsChain(\n      llmChain: combineLlmChain,\n      documentPrompt: combineDocumentPrompt,\n      llmChainStuffedDocumentPromptVar: combineDocumentPromptVar,\n      documentSeparator: combineDocumentSeparator,\n    );\n\n    StuffDocumentsChain? collapseDocumentsChain;\n    if (collapsePrompt != null) {\n      final finalCollapseLLm = collapseLlm ?? combineLlm ?? llm;\n      final collapseLlmChain = LLMChain(\n        llm: finalCollapseLLm,\n        prompt: collapsePrompt,\n      );\n      collapseDocumentsChain = StuffDocumentsChain(\n        llmChain: collapseLlmChain,\n        documentPrompt: collapseDocumentPrompt,\n        llmChainStuffedDocumentPromptVar: collapseDocumentPromptVar,\n        documentSeparator: collapseDocumentSeparator,\n      );\n    }\n\n    final reduceDocumentsChain = ReduceDocumentsChain(\n      combineDocumentsChain: combineDocumentsChain,\n      collapseDocumentsChain: collapseDocumentsChain,\n      tokenMax: combineInputMaxTokens,\n    );\n\n    final mapLlmChain = LLMChain(llm: llm, prompt: mapPrompt);\n\n    return MapReduceDocumentsChain(\n      inputKey: inputKey,\n      outputKey: outputKey,\n      mapLlmChain: mapLlmChain,\n      reduceDocumentsChain: reduceDocumentsChain,\n      mapLlmChainDocumentPromptVar: mapDocumentPromptVar,\n      returnIntermediateSteps: returnIntermediateSteps,\n    );\n  }\n\n  /// Default input key for the summarization chain where to place the\n  /// documents to summarize.\n  static const String defaultInputKey =\n      BaseCombineDocumentsChain.defaultInputKey;\n\n  /// Default output key for the summarization chain where to place the\n  /// summary.\n  static const String defaultOutputKey =\n      BaseCombineDocumentsChain.defaultOutputKey;\n}\n"
  },
  {
    "path": "packages/langchain/lib/src/chat_history/chat_history.dart",
    "content": "export 'package:langchain_core/chat_history.dart';\n\nexport 'in_memory.dart';\n"
  },
  {
    "path": "packages/langchain/lib/src/chat_history/in_memory.dart",
    "content": "import 'dart:collection';\n\nimport 'package:langchain_core/chat_history.dart';\nimport 'package:langchain_core/chat_models.dart';\n\n/// {@template chat_message_history}\n/// A simple in-memory implementation of a chat message history.\n/// Stores messages in an in memory list.\n/// {@endtemplate}\nfinal class ChatMessageHistory extends BaseChatMessageHistory {\n  /// {@macro chat_message_history}\n  ChatMessageHistory({final List<ChatMessage>? messages})\n    : _messages = Queue.from(messages ?? <ChatMessage>[]);\n\n  final Queue<ChatMessage> _messages;\n\n  @override\n  Future<List<ChatMessage>> getChatMessages() {\n    return Future.value(_messages.toList(growable: false));\n  }\n\n  /// Convert the history to a map.\n  Map<String, dynamic> toMap() => {\n    'messages': _messages.map((message) => message.toMap()).toList(),\n  };\n\n  /// Convert the history from a map.\n  factory ChatMessageHistory.fromMap(Map<String, dynamic> map) =>\n      ChatMessageHistory(\n        messages: (map['messages'] as List<dynamic>)\n            .whereType<Map<String, dynamic>>()\n            .map(ChatMessage.fromMap)\n            .toList(),\n      );\n\n  @override\n  Future<void> addChatMessage(final ChatMessage message) async {\n    _messages.add(message);\n  }\n\n  @override\n  Future<ChatMessage> removeFirst() {\n    return Future.value(_messages.removeFirst());\n  }\n\n  @override\n  Future<ChatMessage> removeLast() {\n    return Future.value(_messages.removeLast());\n  }\n\n  @override\n  Future<void> clear() async {\n    _messages.clear();\n  }\n}\n"
  },
  {
    "path": "packages/langchain/lib/src/chat_models/chat_models.dart",
    "content": "export 'package:langchain_core/chat_models.dart';\n"
  },
  {
    "path": "packages/langchain/lib/src/document_loaders/document_loaders.dart",
    "content": "export 'package:langchain_core/document_loaders.dart';\n"
  },
  {
    "path": "packages/langchain/lib/src/documents/documents.dart",
    "content": "export 'package:langchain_core/documents.dart';\n"
  },
  {
    "path": "packages/langchain/lib/src/embeddings/cache.dart",
    "content": "import 'dart:convert';\nimport 'dart:typed_data';\n\nimport 'package:crypto/crypto.dart';\nimport 'package:langchain_core/documents.dart';\nimport 'package:langchain_core/embeddings.dart';\nimport 'package:langchain_core/stores.dart';\nimport 'package:uuid/uuid.dart';\n\nimport '../stores/encoder_backed.dart';\n\n/// {@template cache_backed_embeddings}\n/// Wrapper around an embedder that caches embeddings in a key-value store to\n/// avoid recomputing embeddings for the same text.\n///\n/// When embedding a new document, the method first checks the cache for the\n/// embeddings. If the embeddings are not found, the method uses the underlying\n/// embedder to embed the documents and stores the results in the cache.\n///\n/// The factory constructor [CacheBackedEmbeddings.fromByteStore] can be used\n/// to create a cache backed embeddings that uses a [EncoderBackedStore] which\n/// generates the keys for the cache by hashing the text.\n///\n/// You can use a [InMemoryStore] (mainly for testing or prototyping),\n/// a [LocalFileStore] or your custom implementation of [BaseStore].\n///\n/// The [CacheBackedEmbeddings.embedQuery] method does not support caching at\n/// the moment.\n/// {@endtemplate}\nclass CacheBackedEmbeddings extends Embeddings {\n  /// {@macro cache_backed_embeddings}\n  const CacheBackedEmbeddings({\n    required this.underlyingEmbeddings,\n    required this.documentEmbeddingsStore,\n  });\n\n  /// The embedder to use for computing embeddings.\n  final Embeddings underlyingEmbeddings;\n\n  /// The store to use for caching embeddings.\n  final BaseStore<String, List<double>> documentEmbeddingsStore;\n\n  /// Create a cache backed embeddings that uses a [EncoderBackedStore] which\n  /// generates the keys for the cache by hashing the text.\n  ///\n  /// - [underlyingEmbeddings] is the embedder to use for computing embeddings.\n  /// - [documentEmbeddingsStore] is the store to use for caching embeddings.\n  /// - [namespace] is the namespace to use for the cache. This namespace is\n  ///   used to avoid collisions of the same text embedded using different\n  ///   embeddings models. For example, you can set it to the name of the\n  ///   embedding model used.\n  ///\n  /// Example:\n  /// ```dart\n  /// final cacheBackedEmbeddings = CacheBackedEmbeddings.fromByteStore(\n  ///   underlyingEmbeddings: OpenAIEmbeddings(apiKey: openaiApiKey),\n  ///   documentEmbeddingsStore: InMemoryStore(),\n  ///   namespace: 'text-embedding-3-small',\n  /// );\n  factory CacheBackedEmbeddings.fromByteStore({\n    required final Embeddings underlyingEmbeddings,\n    required final BaseStore<String, Uint8List> documentEmbeddingsStore,\n    final String namespace = '',\n  }) {\n    return CacheBackedEmbeddings(\n      underlyingEmbeddings: underlyingEmbeddings,\n      documentEmbeddingsStore: EncoderBackedStore(\n        store: documentEmbeddingsStore,\n        encoder: EmbeddingsByteStoreEncoder(namespace: namespace),\n      ),\n    );\n  }\n\n  @override\n  Future<List<List<double>>> embedDocuments(\n    final List<Document> documents,\n  ) async {\n    final texts = documents\n        .map((final doc) => doc.pageContent)\n        .toList(growable: false);\n    final vectors = await documentEmbeddingsStore.get(texts);\n    final missingIndices = [\n      for (var i = 0; i < texts.length; i++)\n        if (vectors[i] == null) i,\n    ];\n    final missingDocs = missingIndices\n        .map((final i) => documents[i])\n        .toList(growable: false);\n\n    if (missingDocs.isNotEmpty) {\n      final missingVectors = await underlyingEmbeddings.embedDocuments(\n        missingDocs,\n      );\n      final missingVectorPairs = missingIndices\n          .map((final i) => (texts[i], missingVectors[i]))\n          .toList(growable: false);\n      await documentEmbeddingsStore.set(missingVectorPairs);\n      for (var i = 0; i < missingIndices.length; i++) {\n        vectors[missingIndices[i]] = missingVectors[i];\n      }\n    }\n    return vectors.cast();\n  }\n\n  /// Embed query text.\n  ///\n  /// This method does not support caching at the moment.\n  ///\n  /// Support for caching queries is easily to implement, but might make\n  /// sense to hold off to see the most common patterns.\n  ///\n  /// If the cache has an eviction policy, we may need to be a bit more careful\n  /// about sharing the cache between documents and queries. Generally,\n  /// one is OK evicting query caches, but document caches should be kept.\n  @override\n  Future<List<double>> embedQuery(final String query) {\n    return underlyingEmbeddings.embedQuery(query);\n  }\n}\n\n/// {@template embeddings_byte_store_encoder}\n/// Encoder that transforms values to and from bytes.\n/// {@endtemplate}\nclass EmbeddingsByteStoreEncoder\n    implements StoreEncoder<String, List<double>, String, Uint8List> {\n  /// {@macro embeddings_byte_store_encoder}\n  const EmbeddingsByteStoreEncoder({\n    this.namespace = '',\n    this.uuid = const Uuid(),\n  });\n\n  /// The namespace to use for the cache keys.\n  final String namespace;\n\n  /// The uuid generator to use for generating the cache keys.\n  final Uuid uuid;\n\n  @override\n  String encodeKey(final String key) {\n    final keyHash = sha1.convert(utf8.encode(key)).toString();\n    return uuid.v5(Namespace.url.value, keyHash);\n  }\n\n  @override\n  Uint8List encodeValue(final List<double> value) {\n    return utf8.encoder.convert(json.encode(value));\n  }\n\n  @override\n  String decodeKey(final String encodedKey) => throw UnimplementedError(\n    'Decoding keys is not supported for the _ByteStoreEncoder.',\n  );\n\n  @override\n  List<double> decodeValue(final Uint8List encodedValue) {\n    // ignore: avoid_dynamic_calls\n    return json.decode(utf8.decode(encodedValue)).cast<double>();\n  }\n}\n"
  },
  {
    "path": "packages/langchain/lib/src/embeddings/embeddings.dart",
    "content": "export 'package:langchain_core/embeddings.dart';\n\nexport 'cache.dart';\n"
  },
  {
    "path": "packages/langchain/lib/src/exceptions/exceptions.dart",
    "content": "export 'package:langchain_core/exceptions.dart';\n"
  },
  {
    "path": "packages/langchain/lib/src/langchain/langchain.dart",
    "content": "export 'package:langchain_core/langchain.dart';\n"
  },
  {
    "path": "packages/langchain/lib/src/language_models/language_models.dart",
    "content": "export 'package:langchain_core/language_models.dart';\n"
  },
  {
    "path": "packages/langchain/lib/src/llms/llms.dart",
    "content": "export 'package:langchain_core/llms.dart';\n"
  },
  {
    "path": "packages/langchain/lib/src/memory/buffer.dart",
    "content": "import 'package:langchain_core/chat_history.dart';\nimport 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/memory.dart';\n\nimport '../chat_history/in_memory.dart';\n\n/// {@template conversation_buffer_memory}\n/// Buffer for storing a conversation in-memory and then retrieving the\n/// messages at a later time.\n///\n/// It uses [ChatMessageHistory] as in-memory storage by default.\n///\n/// Example:\n/// ```dart\n/// final memory = ConversationBufferMemory();\n/// await memory.saveContext({'foo': 'bar'}, {'bar': 'foo'});\n/// final res = await memory.loadMemoryVariables();\n/// // {'history': 'Human: bar\\nAI: foo'}\n/// ```\n/// {@endtemplate}\nfinal class ConversationBufferMemory extends BaseChatMemory {\n  /// {@macro conversation_buffer_memory}\n  ConversationBufferMemory({\n    final BaseChatMessageHistory? chatHistory,\n    super.inputKey,\n    super.outputKey,\n    super.returnMessages = false,\n    this.memoryKey = BaseMemory.defaultMemoryKey,\n    this.systemPrefix = SystemChatMessage.defaultPrefix,\n    this.humanPrefix = HumanChatMessage.defaultPrefix,\n    this.aiPrefix = AIChatMessage.defaultPrefix,\n    this.toolPrefix = ToolChatMessage.defaultPrefix,\n  }) : super(chatHistory: chatHistory ?? ChatMessageHistory());\n\n  /// The memory key to use for the chat history.\n  /// This will be passed as input variable to the prompt.\n  final String memoryKey;\n\n  /// The prefix to use for system messages if [returnMessages] is false.\n  final String systemPrefix;\n\n  /// The prefix to use for human messages if [returnMessages] is false.\n  final String humanPrefix;\n\n  /// The prefix to use for AI messages if [returnMessages] is false.\n  final String aiPrefix;\n\n  /// The prefix to use for tool messages if [returnMessages] is false.\n  final String toolPrefix;\n\n  @override\n  Set<String> get memoryKeys => {memoryKey};\n\n  @override\n  Future<MemoryVariables> loadMemoryVariables([\n    final MemoryInputValues values = const {},\n  ]) async {\n    final messages = await chatHistory.getChatMessages();\n    if (returnMessages) {\n      return {memoryKey: messages};\n    }\n    return {\n      memoryKey: messages.toBufferString(\n        systemPrefix: systemPrefix,\n        humanPrefix: humanPrefix,\n        aiPrefix: aiPrefix,\n        toolPrefix: toolPrefix,\n      ),\n    };\n  }\n}\n"
  },
  {
    "path": "packages/langchain/lib/src/memory/buffer_window.dart",
    "content": "import 'package:langchain_core/chat_history.dart';\nimport 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/memory.dart';\n\nimport '../chat_history/in_memory.dart';\n\n/// {@template conversation_buffer_window_memory}\n/// [ConversationBufferWindowMemory] is a type of memory that stores a\n/// conversation in [chatHistory] and then retrieves the last [k] interactions\n/// with the model (i.e. the last [k] input messages and the last [k] output\n/// messages).\n///\n/// It uses [ChatMessageHistory] as in-memory storage by default.\n///\n/// Example:\n/// ```dart\n/// final memory = ConversationBufferWindowMemory(k: 10);\n/// await memory.saveContext({'input': 'bar'}, {'output': 'foo'});\n/// final res = await memory.loadMemoryVariables();\n/// // {'history': 'Human: bar\\nAI: foo'}\n/// ```\n/// {@endtemplate}\nfinal class ConversationBufferWindowMemory extends BaseChatMemory {\n  /// {@macro conversation_buffer_window_memory}\n  ConversationBufferWindowMemory({\n    final BaseChatMessageHistory? chatHistory,\n    super.inputKey,\n    super.outputKey,\n    super.returnMessages = false,\n    this.k = 5,\n    this.memoryKey = BaseMemory.defaultMemoryKey,\n    this.systemPrefix = SystemChatMessage.defaultPrefix,\n    this.humanPrefix = HumanChatMessage.defaultPrefix,\n    this.aiPrefix = AIChatMessage.defaultPrefix,\n    this.toolPrefix = ToolChatMessage.defaultPrefix,\n  }) : super(chatHistory: chatHistory ?? ChatMessageHistory());\n\n  /// Number of interactions to store in the buffer.\n  final int k;\n\n  /// The memory key to use for the chat history.\n  /// This will be passed as input variable to the prompt.\n  final String memoryKey;\n\n  /// The prefix to use for system messages if [returnMessages] is false.\n  final String systemPrefix;\n\n  /// The prefix to use for human messages if [returnMessages] is false.\n  final String humanPrefix;\n\n  /// The prefix to use for AI messages if [returnMessages] is false.\n  final String aiPrefix;\n\n  /// The prefix to use for tool messages if [returnMessages] is false.\n  final String toolPrefix;\n\n  @override\n  Set<String> get memoryKeys => {memoryKey};\n\n  @override\n  Future<MemoryVariables> loadMemoryVariables([\n    final MemoryInputValues values = const {},\n  ]) async {\n    final messages = k > 0 ? await _getChatMessages() : <ChatMessage>[];\n    if (returnMessages) {\n      return {memoryKey: messages};\n    }\n    return {\n      memoryKey: messages.toBufferString(\n        systemPrefix: systemPrefix,\n        humanPrefix: humanPrefix,\n        aiPrefix: aiPrefix,\n        toolPrefix: toolPrefix,\n      ),\n    };\n  }\n\n  Future<List<ChatMessage>> _getChatMessages() async {\n    final historyMessages = await chatHistory.getChatMessages();\n    return historyMessages.length > k * 2\n        ? historyMessages.sublist(historyMessages.length - k * 2)\n        : historyMessages;\n  }\n}\n"
  },
  {
    "path": "packages/langchain/lib/src/memory/memory.dart",
    "content": "export 'package:langchain_core/memory.dart';\n\nexport 'buffer.dart';\nexport 'buffer_window.dart';\nexport 'simple.dart';\nexport 'summary.dart';\nexport 'token_buffer.dart';\nexport 'vector_store.dart';\n"
  },
  {
    "path": "packages/langchain/lib/src/memory/simple.dart",
    "content": "import 'package:langchain_core/memory.dart';\n\n/// {@template simple_memory}\n/// Simple memory for storing context or other bits of information that\n/// shouldn't ever change between prompts.\n/// {@endtemplate}\nfinal class SimpleMemory implements BaseMemory {\n  /// {@macro simple_memory}\n  const SimpleMemory({this.memories = const {}});\n\n  /// The memories to store.\n  final Map<String, dynamic> memories;\n\n  @override\n  Set<String> get memoryKeys => memories.keys.toSet();\n\n  @override\n  Future<MemoryVariables> loadMemoryVariables([\n    final MemoryInputValues values = const {},\n  ]) async {\n    return memories;\n  }\n\n  @override\n  Future<void> saveContext({\n    required final MemoryInputValues inputValues,\n    required final MemoryOutputValues outputValues,\n  }) async {\n    // Nothing should be saved or changed, my memory is set in stone\n  }\n\n  @override\n  Future<void> clear() async {\n    // Nothing to clear, got a memory like a vault\n  }\n}\n"
  },
  {
    "path": "packages/langchain/lib/src/memory/summary.dart",
    "content": "import 'package:langchain_core/chat_history.dart';\nimport 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/language_models.dart';\nimport 'package:langchain_core/memory.dart';\nimport 'package:langchain_core/prompts.dart';\n\nimport '../chains/chains.dart';\nimport '../chat_history/in_memory.dart';\n\nconst _template = '''\nProgressively summarize the lines of conversation provided, adding onto the previous summary returning a new summary.\n\nEXAMPLE\nCurrent summary:\nThe human asks what the AI thinks of artificial intelligence. The AI thinks artificial intelligence is a force for good.\n\nNew lines of conversation:\nHuman: Why do you think artificial intelligence is a force for good?\nAI: Because artificial intelligence will help humans reach their full potential.\n\nNew summary:\nThe human asks what the AI thinks of artificial intelligence. The AI thinks artificial intelligence is a force for good because it will help humans reach their full potential.\nEND OF EXAMPLE\n\nCurrent summary:\n{summary}\n\nNew lines of conversation:\n{new_lines}\n\nNew summary:''';\n\nconst _promptTemplate = PromptTemplate(\n  template: _template,\n  inputVariables: {'summary', 'new_lines'},\n);\n\n/// {@template conversation_summary_memory}\n/// Memory that summarizes a conversation over time. This is useful for longer\n/// conversations where keeping the full message history would take up too many\n/// tokens.\n///\n/// It requires an [llm] to summarize the conversation. You can customize the\n/// summarization prompt using [summaryPromptTemplate].\n///\n/// Every time [saveContext] is called, it will generate a new summary of the\n/// conversation using the previous summary and the new messages.\n///\n/// It uses [ChatMessageHistory] as in-memory storage by default.\n///\n/// You can provide an initial summary using [initialSummary]. If you have a\n/// [chatHistory] with previous messages, you can use the factory constructor\n/// [ConversationSummaryMemory.fromChatHistory] to generate an initial summary.\n///\n/// Example:\n/// ```dart\n/// final memory = ConversationSummaryMemory(llm: OpenAI(apiKey: '...'));\n/// await memory.saveContext({'foo': 'bar'}, {'bar': 'foo'});\n/// final res = await memory.loadMemoryVariables();\n/// // {'history': 'System: Human said bar'}\n/// ```\n/// {@endtemplate}\nfinal class ConversationSummaryMemory<LLMType extends BaseLanguageModel>\n    extends BaseChatMemory {\n  /// {@macro conversation_summary_memory}\n  ConversationSummaryMemory({\n    final BaseChatMessageHistory? chatHistory,\n    super.inputKey,\n    super.outputKey,\n    super.returnMessages = false,\n    required this.llm,\n    final String initialSummary = '',\n    this.summaryPromptTemplate = _promptTemplate,\n    this.summaryMessageBuilder,\n    this.memoryKey = BaseMemory.defaultMemoryKey,\n    this.systemPrefix = SystemChatMessage.defaultPrefix,\n    this.humanPrefix = HumanChatMessage.defaultPrefix,\n    this.aiPrefix = AIChatMessage.defaultPrefix,\n    this.toolPrefix = ToolChatMessage.defaultPrefix,\n  }) : _buffer = initialSummary,\n       super(chatHistory: chatHistory ?? ChatMessageHistory());\n\n  /// Language model to use for summarizing the conversation.\n  final LLMType llm;\n\n  /// [PromptTemplate] to use for summarizing previous conversations.\n  /// This currently expect two input variables: `summary` and `new_lines`.\n  /// summary is the previous summary and new_lines are the new messages\n  /// formatted to add.\n  final BasePromptTemplate summaryPromptTemplate;\n\n  /// A builder to construct the chat message that contains the summary.\n  /// If null, it will use [ChatMessage.system].\n  final ChatMessage Function(String summary)? summaryMessageBuilder;\n\n  /// The memory key to use for the chat history.\n  /// This will be passed as input variable to the prompt.\n  final String memoryKey;\n\n  /// The prefix to use for system messages if [returnMessages] is false.\n  final String systemPrefix;\n\n  /// The prefix to use for human messages if [returnMessages] is false.\n  final String humanPrefix;\n\n  /// The prefix to use for AI messages if [returnMessages] is false.\n  final String aiPrefix;\n\n  /// The prefix to use for tool messages if [returnMessages] is false.\n  final String toolPrefix;\n\n  /// Store the summarized chat history in memory.\n  /// This does not concatenate, changes every new [ChatMessage] are added.\n  String _buffer;\n\n  @override\n  Set<String> get memoryKeys => {memoryKey};\n\n  /// Instantiate a [ConversationSummaryMemory] from a list of [ChatMessage]s.\n  /// Use this factory method if you want to generate a [ConversationSummaryMemory]\n  /// with pre-loaded history and don't have the summary of the messages.\n  /// Required a [BaseLanguageModel] to use for summarizing.\n  static Future<ConversationSummaryMemory> fromMessages({\n    required final BaseLanguageModel llm,\n    required final BaseChatMessageHistory chatHistory,\n    final String? inputKey,\n    final String? outputKey,\n    final bool returnMessages = false,\n    final String initialSummary = '',\n    final PromptTemplate summaryPromptTemplate = _promptTemplate,\n    final ChatMessage Function(String summary)? summaryMessageBuilder,\n    final String memoryKey = BaseMemory.defaultMemoryKey,\n    final String systemPrefix = SystemChatMessage.defaultPrefix,\n    final String humanPrefix = HumanChatMessage.defaultPrefix,\n    final String aiPrefix = AIChatMessage.defaultPrefix,\n    final String toolPrefix = ToolChatMessage.defaultPrefix,\n    final int summaryStep = 2,\n  }) async {\n    final memory = ConversationSummaryMemory(\n      chatHistory: chatHistory,\n      llm: llm,\n      inputKey: inputKey,\n      outputKey: outputKey,\n      returnMessages: returnMessages,\n      initialSummary: initialSummary,\n      summaryPromptTemplate: summaryPromptTemplate,\n      summaryMessageBuilder: summaryMessageBuilder,\n      memoryKey: memoryKey,\n      systemPrefix: systemPrefix,\n      humanPrefix: humanPrefix,\n      aiPrefix: aiPrefix,\n      toolPrefix: toolPrefix,\n    );\n    final messages = await chatHistory.getChatMessages();\n    for (var i = 0; i < messages.length; i += summaryStep) {\n      final summary = await memory._summarize(\n        messages.sublist(i, i + summaryStep),\n        memory._buffer,\n      );\n      memory._buffer = summary;\n    }\n    return memory;\n  }\n\n  @override\n  Future<MemoryVariables> loadMemoryVariables([\n    final MemoryInputValues values = const {},\n  ]) async {\n    final messages = <ChatMessage>[];\n    if (_buffer.isNotEmpty) {\n      final msg =\n          summaryMessageBuilder?.call(_buffer) ?? ChatMessage.system(_buffer);\n      messages.add(msg);\n    }\n\n    if (returnMessages) {\n      return {memoryKey: messages};\n    }\n    return {\n      memoryKey: messages.toBufferString(\n        systemPrefix: systemPrefix,\n        humanPrefix: humanPrefix,\n        aiPrefix: aiPrefix,\n        toolPrefix: toolPrefix,\n      ),\n    };\n  }\n\n  @override\n  Future<void> saveContext({\n    required final MemoryInputValues inputValues,\n    required final MemoryOutputValues outputValues,\n  }) async {\n    await super.saveContext(\n      inputValues: inputValues,\n      outputValues: outputValues,\n    );\n    final messages = await chatHistory.getChatMessages();\n    _buffer = await _summarize(messages.sublist(messages.length - 2), _buffer);\n  }\n\n  Future<String> _summarize(\n    final List<ChatMessage> messages,\n    final String currentSummary,\n  ) {\n    final input = messages.toBufferString(\n      systemPrefix: systemPrefix,\n      humanPrefix: humanPrefix,\n      aiPrefix: aiPrefix,\n      toolPrefix: toolPrefix,\n    );\n    return LLMChain(\n      llm: llm,\n      prompt: summaryPromptTemplate,\n    ).run({'new_lines': input, 'summary': currentSummary});\n  }\n\n  @override\n  Future<void> clear() async {\n    await super.clear();\n    _buffer = '';\n  }\n}\n"
  },
  {
    "path": "packages/langchain/lib/src/memory/token_buffer.dart",
    "content": "import 'package:langchain_core/chat_history.dart';\nimport 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/language_models.dart';\nimport 'package:langchain_core/memory.dart';\nimport 'package:langchain_core/prompts.dart';\n\nimport '../chat_history/in_memory.dart';\n\n/// {@template conversation_token_buffer_memory}\n/// Rolling buffer for storing a conversation and then retrieving the messages\n/// at a later time.\n///\n/// It uses token length (rather than number of interactions like\n/// [ConversationBufferWindowMemory]) to determine when to flush old\n/// interactions from the buffer. This allows it to keep more context while\n/// staying under a max token limit.\n///\n/// It uses [ChatMessageHistory] as in-memory storage by default.\n///\n/// Example:\n/// ```dart\n/// final memory = ConversationTokenBufferMemory(llm: OpenAI(apiKey: '...'));\n/// await memory.saveContext({'foo': 'bar'}, {'bar': 'foo'});\n/// final res = await memory.loadMemoryVariables();\n/// // {'history': 'Human: bar\\nAI: foo'}\n/// ```\n/// {@endtemplate}\nfinal class ConversationTokenBufferMemory<LLMType extends BaseLanguageModel>\n    extends BaseChatMemory {\n  /// {@macro conversation_token_buffer_memory}\n  ConversationTokenBufferMemory({\n    final BaseChatMessageHistory? chatHistory,\n    super.inputKey,\n    super.outputKey,\n    super.returnMessages = false,\n    this.maxTokenLimit = 2000,\n    required this.llm,\n    this.memoryKey = BaseMemory.defaultMemoryKey,\n    this.systemPrefix = SystemChatMessage.defaultPrefix,\n    this.humanPrefix = HumanChatMessage.defaultPrefix,\n    this.aiPrefix = AIChatMessage.defaultPrefix,\n    this.toolPrefix = ToolChatMessage.defaultPrefix,\n  }) : super(chatHistory: chatHistory ?? ChatMessageHistory());\n\n  /// Max number of tokens to use.\n  final int maxTokenLimit;\n\n  /// Language model to use for counting tokens.\n  final LLMType llm;\n\n  /// The memory key to use for the chat history.\n  /// This will be passed as input variable to the prompt.\n  final String memoryKey;\n\n  /// The prefix to use for system messages if [returnMessages] is false.\n  final String systemPrefix;\n\n  /// The prefix to use for human messages if [returnMessages] is false.\n  final String humanPrefix;\n\n  /// The prefix to use for AI messages if [returnMessages] is false.\n  final String aiPrefix;\n\n  /// The prefix to use for tool messages if [returnMessages] is false.\n  final String toolPrefix;\n\n  @override\n  Set<String> get memoryKeys => {memoryKey};\n\n  @override\n  Future<MemoryVariables> loadMemoryVariables([\n    final MemoryInputValues values = const {},\n  ]) async {\n    final messages = await chatHistory.getChatMessages();\n    if (returnMessages) {\n      return {memoryKey: messages};\n    }\n    return {\n      memoryKey: messages.toBufferString(\n        systemPrefix: systemPrefix,\n        humanPrefix: humanPrefix,\n        aiPrefix: aiPrefix,\n        toolPrefix: toolPrefix,\n      ),\n    };\n  }\n\n  @override\n  Future<void> saveContext({\n    required final MemoryInputValues inputValues,\n    required final MemoryOutputValues outputValues,\n  }) async {\n    await super.saveContext(\n      inputValues: inputValues,\n      outputValues: outputValues,\n    );\n    List<ChatMessage> buffer = await chatHistory.getChatMessages();\n    int currentBufferLength = await llm.countTokens(PromptValue.chat(buffer));\n    // Prune buffer if it exceeds max token limit\n    if (currentBufferLength > maxTokenLimit) {\n      while (currentBufferLength > maxTokenLimit) {\n        await chatHistory.removeFirst();\n        buffer = await chatHistory.getChatMessages();\n        currentBufferLength = await llm.countTokens(PromptValue.chat(buffer));\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "packages/langchain/lib/src/memory/vector_store.dart",
    "content": "import 'package:langchain_core/documents.dart';\nimport 'package:langchain_core/memory.dart';\nimport 'package:langchain_core/vector_stores.dart';\n\n/// {@template vector_store_retriever_memory}\n/// Memory backed by a vector store.\n/// {@endtemplate}\nclass VectorStoreMemory implements BaseMemory {\n  /// {@macro vector_store_retriever_memory}\n  VectorStoreMemory({\n    required this.vectorStore,\n    this.searchType = const VectorStoreSimilaritySearch(),\n    this.memoryKey = defaultMemoryKey,\n    this.inputKey,\n    this.excludeInputKeys = const {},\n    this.returnDocs = false,\n  });\n\n  /// VectorStoreRetriever object to connect to.\n  final VectorStore vectorStore;\n\n  /// The type of search to perform.\n  final VectorStoreSearchType searchType;\n\n  /// Name of the key where the memories are in the map returned by\n  /// [loadMemoryVariables].\n  final String memoryKey;\n\n  /// The input key to use for the query to the vector store.\n  ///\n  /// If null, the input key is inferred from the prompt (the input key hat\n  /// was filled in by the user (i.e. not a memory key)).\n  final String? inputKey;\n\n  /// Input keys to exclude in addition to memory key when constructing the\n  /// document.\n  final Set<String> excludeInputKeys;\n\n  /// Whether or not to return the result of querying the database directly.\n  /// If false, the page content of all the documents is returned as a single\n  /// string.\n  final bool returnDocs;\n\n  /// Default key for [memoryKey].\n  static const defaultMemoryKey = 'memory';\n\n  @override\n  Set<String> get memoryKeys => {memoryKey};\n\n  @override\n  Future<MemoryVariables> loadMemoryVariables([\n    final MemoryInputValues values = const {},\n  ]) async {\n    final promptInputKey = inputKey ?? getPromptInputKey(values, memoryKeys);\n    final query = values[promptInputKey];\n    final docs = await vectorStore.search(query: query, searchType: searchType);\n    return {\n      memoryKey: returnDocs\n          ? docs\n          : docs.map((final Document doc) => doc.pageContent).join('\\n'),\n    };\n  }\n\n  @override\n  Future<void> saveContext({\n    required final MemoryInputValues inputValues,\n    required final MemoryOutputValues outputValues,\n  }) async {\n    final docs = _buildDocuments(inputValues, outputValues);\n    await vectorStore.addDocuments(documents: docs);\n  }\n\n  /// Builds the documents to save to the vector store from the given\n  /// [inputValues] and [outputValues].\n  List<Document> _buildDocuments(\n    final MemoryInputValues inputValues,\n    final MemoryOutputValues outputValues,\n  ) {\n    final excludeKeys = {memoryKey, ...excludeInputKeys};\n    final filteredInputs = {\n      for (final entry in inputValues.entries)\n        if (!excludeKeys.contains(entry.key)) entry.key: entry.value,\n    };\n    final inputsOutputs = {...filteredInputs, ...outputValues};\n    final pageContent = inputsOutputs.entries\n        .map((final entry) {\n          return '${entry.key}: ${entry.value}';\n        })\n        .join('\\n');\n    return [Document(pageContent: pageContent)];\n  }\n\n  @override\n  Future<void> clear() async {\n    // Nothing to clear\n  }\n}\n"
  },
  {
    "path": "packages/langchain/lib/src/output_parsers/output_parsers.dart",
    "content": "export 'package:langchain_core/output_parsers.dart';\n"
  },
  {
    "path": "packages/langchain/lib/src/prompts/prompts.dart",
    "content": "export 'package:langchain_core/prompts.dart';\n"
  },
  {
    "path": "packages/langchain/lib/src/retrievers/retrievers.dart",
    "content": "export 'package:langchain_core/retrievers.dart';\n"
  },
  {
    "path": "packages/langchain/lib/src/runnables/runnables.dart",
    "content": "export 'package:langchain_core/runnables.dart';\n"
  },
  {
    "path": "packages/langchain/lib/src/stores/encoder_backed.dart",
    "content": "import 'package:langchain_core/stores.dart';\n\n/// {@template encoder_backed_store}\n/// Wraps a store with key and value encoders/decoders.\n///\n/// This is useful for stores that only support certain types of keys and\n/// values. You can wrap the store with an encoder that converts the keys and\n/// values to the supported types.\n/// {@endtemplate}\nclass EncoderBackedStore<K, V, EK, EV> implements BaseStore<K, V> {\n  /// {@macro encoder_backed_store}\n  EncoderBackedStore({required this.store, required this.encoder});\n\n  /// The underlying store.\n  final BaseStore<EK, EV> store;\n\n  /// The encoder/decoder for keys and values.\n  final StoreEncoder<K, V, EK, EV> encoder;\n\n  @override\n  Future<List<V?>> get(final List<K> keys) async {\n    final encodedKeys = keys.map(encoder.encodeKey).toList(growable: false);\n    final encodedValues = await store.get(encodedKeys);\n    return encodedValues\n        .map((final value) => value == null ? null : encoder.decodeValue(value))\n        .toList(growable: false);\n  }\n\n  @override\n  Future<void> set(final List<(K, V)> keyValuePairs) async {\n    final encodedKeyValuePairs = keyValuePairs\n        .map(\n          (final pair) =>\n              (encoder.encodeKey(pair.$1), encoder.encodeValue(pair.$2)),\n        )\n        .toList(growable: false);\n    await store.set(encodedKeyValuePairs);\n  }\n\n  @override\n  Future<void> delete(final List<K> keys) async {\n    final encodedKeys = keys.map(encoder.encodeKey).toList(growable: false);\n    await store.delete(encodedKeys);\n  }\n\n  @override\n  Stream<K> yieldKeys({final String? prefix}) async* {\n    final encodedKeys = store.yieldKeys(prefix: prefix);\n    await for (final encodedKey in encodedKeys) {\n      yield encoder.decodeKey(encodedKey);\n    }\n  }\n}\n\n/// {@template encoder}\n/// Encoder/decoder for keys and values.\n/// {@endtemplate}\nabstract interface class StoreEncoder<K, V, EK, EV> {\n  /// {@macro encoder}\n  const StoreEncoder();\n\n  /// Encodes a key.\n  EK encodeKey(final K key);\n\n  /// Encodes a value.\n  EV encodeValue(final V value);\n\n  /// Decodes a key.\n  K decodeKey(final EK key);\n\n  /// Decodes a value.\n  V decodeValue(final EV value);\n}\n"
  },
  {
    "path": "packages/langchain/lib/src/stores/file_system_io.dart",
    "content": "import 'dart:async';\nimport 'dart:io';\nimport 'dart:typed_data';\n\nimport 'package:langchain_core/stores.dart';\n\n/// {@template local_file_store_io}\n/// A simple file system implementation of a [BaseStore].\n///\n/// Given a [rootPath], each key is mapped to a file relative to that path.\n///\n/// Note: [LocalFileStore] is not supported for web.\n///\n/// Example:\n/// ```dart\n/// final store = LocalFileStore('/tmp');\n/// ```\n/// {@endtemplate}\nclass LocalFileStore implements BaseStore<String, Uint8List> {\n  /// {@macro local_file_store_io}\n  LocalFileStore(this.rootPath);\n\n  /// The root path of the store.\n  final String rootPath;\n\n  @override\n  Future<List<Uint8List?>> get(final List<String> keys) {\n    return Future.wait(\n      keys.map((final key) async {\n        final file = _getLocalFile(key);\n        return file.existsSync() ? file.readAsBytes() : null;\n      }),\n    );\n  }\n\n  @override\n  Future<void> set(final List<(String, Uint8List)> keyValuePairs) async {\n    for (final pair in keyValuePairs) {\n      final file = _getLocalFile(pair.$1);\n      await file.writeAsBytes(pair.$2);\n    }\n  }\n\n  @override\n  Future<void> delete(final List<String> keys) async {\n    for (final key in keys) {\n      final file = _getLocalFile(key);\n      if (file.existsSync()) {\n        await file.delete();\n      }\n    }\n  }\n\n  @override\n  Stream<String> yieldKeys({final String? prefix}) async* {\n    final directory = Directory(rootPath);\n    await for (final entity in directory.list()) {\n      if (entity is File) {\n        final fileName = entity.path.split('/').last;\n        if (prefix == null || fileName.startsWith(prefix)) {\n          yield fileName;\n        }\n      }\n    }\n  }\n\n  File _getLocalFile(final String key) {\n    return File('$rootPath/$key');\n  }\n}\n"
  },
  {
    "path": "packages/langchain/lib/src/stores/file_system_stub.dart",
    "content": "import 'dart:typed_data';\n\nimport 'package:langchain_core/stores.dart';\n\n/// {@template local_file_store_stub}\n/// A simple file system implementation of a [BaseStore].\n///\n/// Given a [rootPath], each key is mapped to a file relative to that path.\n///\n/// Note: [LocalFileStore] is not supported for web.\n///\n/// Example:\n/// ```dart\n/// final store = LocalFileStore('/tmp');\n/// ```\n/// {@endtemplate}\nclass LocalFileStore implements BaseStore<String, Uint8List> {\n  /// {@macro local_file_store_stub}\n  LocalFileStore(this.rootPath) {\n    throw _unimplementedError;\n  }\n\n  /// The root path of the store.\n  final String rootPath;\n\n  @override\n  Future<void> delete(final List<String> keys) {\n    throw _unimplementedError;\n  }\n\n  @override\n  Future<List<Uint8List?>> get(final List<String> keys) {\n    throw _unimplementedError;\n  }\n\n  @override\n  Future<void> set(final List<(String, Uint8List)> keyValuePairs) {\n    throw _unimplementedError;\n  }\n\n  @override\n  Stream<String> yieldKeys({final String? prefix}) {\n    throw _unimplementedError;\n  }\n\n  UnimplementedError get _unimplementedError =>\n      throw UnimplementedError('LocalFileStore is not supported for web.');\n}\n"
  },
  {
    "path": "packages/langchain/lib/src/stores/in_memory.dart",
    "content": "import 'package:langchain_core/stores.dart';\n\n/// {@template in_memory_store}\n/// In-memory implementation of the BaseStore using a dictionary.\n/// {@endtemplate}\nclass InMemoryStore<K, V> implements BaseStore<K, V> {\n  /// {@macro in_memory_store}\n  InMemoryStore({final Map<K, V>? initialData}) : _store = {...?initialData};\n\n  final Map<K, V> _store;\n\n  @override\n  Future<List<V?>> get(final List<K> keys) async {\n    return keys.map((final key) => _store[key]).toList(growable: false);\n  }\n\n  @override\n  Future<void> set(final List<(K, V)> keyValuePairs) async {\n    for (final pair in keyValuePairs) {\n      _store[pair.$1] = pair.$2;\n    }\n  }\n\n  @override\n  Future<void> delete(final List<K> keys) async {\n    keys.forEach(_store.remove);\n  }\n\n  @override\n  Stream<K> yieldKeys({final String? prefix}) async* {\n    for (final key in _store.keys) {\n      if (prefix == null || key.toString().startsWith(prefix)) {\n        yield key;\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "packages/langchain/lib/src/stores/stores.dart",
    "content": "export 'package:langchain_core/stores.dart';\n\nexport 'encoder_backed.dart';\nexport 'file_system_stub.dart' if (dart.library.io) 'file_system_io.dart';\nexport 'in_memory.dart';\n"
  },
  {
    "path": "packages/langchain/lib/src/text_splitters/character.dart",
    "content": "import 'text_splitter.dart';\nimport 'utils.dart';\n\n/// {@template character_text_splitter}\n/// Implementation of [TextSplitter] that looks at characters.\n///\n/// - How the text is split: by single character (by default \"\\n\\n\").\n/// - How the chunk size is measured: by number of characters.\n/// {@endtemplate}\nclass CharacterTextSplitter extends TextSplitter {\n  /// {@macro character_text_splitter}\n  const CharacterTextSplitter({\n    this.separator = '\\n\\n',\n    super.chunkSize,\n    super.chunkOverlap,\n    super.lengthFunction,\n    super.keepSeparator,\n    super.addStartIndex,\n  });\n\n  /// The separator that is used to split the text.\n  final String separator;\n\n  @override\n  List<String> splitText(final String text) {\n    // First we naively split the large input into a bunch of smaller ones\n    final splits = splitTextWithRegex(text, separator, keepSeparator);\n    final finalSeparator = keepSeparator ? '' : separator;\n    return mergeSplits(splits, finalSeparator);\n  }\n}\n"
  },
  {
    "path": "packages/langchain/lib/src/text_splitters/code.dart",
    "content": "import 'recursive_character.dart';\n\n/// {@template code_text_splitter}\n/// [CodeTextSplitter] allows you to split source code.\n/// It supports multiple languages (see [CodeLanguage] enum).\n/// It tries to split along class definitions, function definitions, and control\n/// flow statements.\n/// {@endtemplate}\nclass CodeTextSplitter extends RecursiveCharacterTextSplitter {\n  /// {@macro code_text_splitter}\n  CodeTextSplitter({\n    required final CodeLanguage language,\n    super.chunkSize,\n    super.chunkOverlap,\n    super.lengthFunction,\n    super.keepSeparator = true,\n    super.addStartIndex,\n  }) : super(\n         separators: RecursiveCharacterTextSplitter.getSeparatorsForLanguage(\n           language,\n         ),\n       );\n}\n\n/// Supported programming languages for [CodeTextSplitter].\nenum CodeLanguage {\n  /// C++ code\n  cpp,\n\n  /// Dart code\n  dart,\n\n  /// Golang code\n  go,\n\n  /// HTML code\n  html,\n\n  /// Java code\n  java,\n\n  /// Javascript code\n  js,\n\n  /// Latex code\n  latex,\n\n  /// Markdown code\n  markdown,\n\n  /// PHP code\n  php,\n\n  /// Protocol Buffer code\n  proto,\n\n  /// Python code\n  python,\n\n  /// RST code\n  rst,\n\n  /// Ruby code\n  ruby,\n\n  /// Rust code\n  rust,\n\n  /// Scala code\n  scala,\n\n  /// Solidity code\n  solidity,\n\n  /// Swift code\n  swift,\n}\n"
  },
  {
    "path": "packages/langchain/lib/src/text_splitters/markdown.dart",
    "content": "import '../documents/documents.dart';\nimport '../tools/printable_char.dart';\nimport 'code.dart';\nimport 'recursive_character.dart';\n\n/// {@template markdown_text_splitter}\n/// Attempts to split the text along Markdown-formatted headings.\n/// {@endtemplate}\nclass MarkdownTextSplitter extends RecursiveCharacterTextSplitter {\n  /// {@macro markdown_text_splitter}\n  MarkdownTextSplitter({\n    super.chunkSize,\n    super.chunkOverlap,\n    super.lengthFunction,\n    super.keepSeparator,\n    super.addStartIndex,\n  }) : super(\n         separators: RecursiveCharacterTextSplitter.getSeparatorsForLanguage(\n           CodeLanguage.markdown,\n         ),\n       );\n}\n\n/// {@template markdown_header_text_splitter}\n/// Splitting markdown files based on specified headers.\n/// {@endtemplate}\nclass MarkdownHeaderTextSplitter {\n  /// Whether to return each line with associated headers\n  final bool returnEachLine;\n\n  /// List of headers to split on, each as a tuple of (header prefix, metadata key)\n  final List<(String, String)> headersToSplitOn;\n\n  /// Whether to strip headers from the content of chunks\n  final bool stripHeaders;\n\n  /// Create a new MarkdownHeaderTextSplitter.\n  ///\n  /// [headersToSplitOn] Headers we want to track\n  /// [returnEachLine] Return each line w/ associated headers\n  /// [stripHeaders] Strip split headers from the content of the chunk\n  MarkdownHeaderTextSplitter({\n    required List<(String, String)> headersToSplitOn,\n    this.returnEachLine = false,\n    this.stripHeaders = true,\n  }) : headersToSplitOn = headersToSplitOn.toList() {\n    // Sort headers by length in descending order\n    this.headersToSplitOn.sort((a, b) => b.$1.length.compareTo(a.$1.length));\n  }\n\n  /// Combine lines with common metadata into chunks.\n  List<Document> _aggregateLinesToChunks(List<_LineType> lines) {\n    final aggregatedChunks = <_LineType>[];\n\n    for (final line in lines) {\n      if (aggregatedChunks.isNotEmpty &&\n          _mapEquals(aggregatedChunks.last.metadata, line.metadata)) {\n        // If same metadata, append content\n        final old = aggregatedChunks.last;\n        aggregatedChunks.last = _LineType(\n          metadata: old.metadata,\n          content: '${old.content}  \\n${line.content}',\n        );\n      } else if (aggregatedChunks.isNotEmpty &&\n          !_mapEquals(aggregatedChunks.last.metadata, line.metadata) &&\n          aggregatedChunks.last.metadata.length < line.metadata.length &&\n          aggregatedChunks.last.content.split('\\n').last.startsWith('#') &&\n          !stripHeaders) {\n        // Handle nested headers\n        final old = aggregatedChunks.last;\n        aggregatedChunks.last = _LineType(\n          metadata: line.metadata,\n          content: '${old.content}  \\n${line.content}',\n        );\n      } else {\n        // New chunk\n        aggregatedChunks.add(line);\n      }\n    }\n\n    return aggregatedChunks\n        .map(\n          (chunk) =>\n              Document(pageContent: chunk.content, metadata: chunk.metadata),\n        )\n        .toList();\n  }\n\n  /// Split markdown file.\n  List<Document> splitText(String text) {\n    final lines = text.split('\\n');\n    final linesWithMetadata = <_LineType>[];\n    final currentContent = <String>[];\n    var currentMetadata = <String, String>{};\n    final headerStack = <_HeaderType>[];\n    final initialMetadata = <String, String>{};\n    var inCodeBlock = false;\n    var openingFence = '';\n\n    for (final line in lines) {\n      var strippedLine = line.trim();\n\n      final buffer = StringBuffer();\n      // Iterate through each rune (Unicode code point) in the string\n      for (final rune in strippedLine.runes) {\n        // Add only printable characters to the buffer\n        if (isPrintable(rune)) {\n          buffer.writeCharCode(rune);\n        }\n      }\n      strippedLine = buffer.toString();\n\n      if (!inCodeBlock) {\n        if (strippedLine.startsWith('```') &&\n            '```'.allMatches(strippedLine).length == 1) {\n          inCodeBlock = true;\n          openingFence = '```';\n        } else if (strippedLine.startsWith('~~~')) {\n          inCodeBlock = true;\n          openingFence = '~~~';\n        }\n      } else {\n        if (strippedLine.startsWith(openingFence)) {\n          inCodeBlock = false;\n          openingFence = '';\n        }\n      }\n\n      if (inCodeBlock) {\n        currentContent.add(strippedLine);\n        continue;\n      }\n\n      var headerFound = false;\n      for (final (sep, name) in headersToSplitOn) {\n        if (strippedLine.startsWith(sep) &&\n            (strippedLine.length == sep.length ||\n                strippedLine[sep.length] == ' ')) {\n          final currentHeaderLevel = sep.length;\n          while (headerStack.isNotEmpty &&\n              headerStack.last.level >= currentHeaderLevel) {\n            final poppedHeader = headerStack.removeLast();\n            initialMetadata.remove(poppedHeader.name);\n          }\n\n          final header = _HeaderType(\n            level: currentHeaderLevel,\n            name: name,\n            data: strippedLine.substring(sep.length).trim(),\n          );\n          headerStack.add(header);\n          initialMetadata[name] = header.data;\n\n          if (currentContent.isNotEmpty) {\n            linesWithMetadata.add(\n              _LineType(\n                content: currentContent.join('\\n'),\n                metadata: Map.from(currentMetadata),\n              ),\n            );\n            currentContent.clear();\n          }\n\n          if (!stripHeaders) {\n            currentContent.add(strippedLine);\n          }\n\n          headerFound = true;\n          break;\n        }\n      }\n\n      if (!headerFound) {\n        if (strippedLine.isNotEmpty) {\n          currentContent.add(strippedLine);\n        } else if (currentContent.isNotEmpty) {\n          linesWithMetadata.add(\n            _LineType(\n              content: currentContent.join('\\n'),\n              metadata: Map.from(currentMetadata),\n            ),\n          );\n          currentContent.clear();\n        }\n      }\n\n      currentMetadata = Map.from(initialMetadata);\n    }\n\n    if (currentContent.isNotEmpty) {\n      linesWithMetadata.add(\n        _LineType(\n          content: currentContent.join('\\n'),\n          metadata: currentMetadata,\n        ),\n      );\n    }\n\n    return returnEachLine\n        ? linesWithMetadata\n              .map(\n                (chunk) => Document(\n                  pageContent: chunk.content,\n                  metadata: chunk.metadata,\n                ),\n              )\n              .toList()\n        : _aggregateLinesToChunks(linesWithMetadata);\n  }\n}\n\n/// Represents a line with metadata in the markdown document\nclass _LineType {\n  /// The metadata associated with this line, usually header information\n  final Map<String, String> metadata;\n\n  /// The content text of this line\n  final String content;\n\n  /// Creates a new LineType instance\n  ///\n  /// [metadata] The metadata map associated with this line\n  /// [content] The text content of this line\n  _LineType({required this.metadata, required this.content});\n}\n\n/// Represents a header with its level and data\nclass _HeaderType {\n  /// The level of the header (1 for #, 2 for ##, etc.)\n  final int level;\n\n  /// The metadata key name for this header\n  final String name;\n\n  /// The content/value of the header\n  final String data;\n\n  /// Creates a new HeaderType instance\n  ///\n  /// [level] The level of the header (1 for #, 2 for ##, etc.)\n  /// [name] The metadata key name for this header\n  /// [data] The content/value of the header\n  _HeaderType({required this.level, required this.name, required this.data});\n}\n\n/// Helper function to compare maps for equality\n///\n/// [a] First map to compare\n/// [b] Second map to compare\n/// Returns true if maps are equal, false otherwise\nbool _mapEquals(Map<String, String>? a, Map<String, String>? b) {\n  if (a == null || b == null) return a == b;\n  if (a.length != b.length) return false;\n  for (final key in a.keys) {\n    if (a[key] != b[key]) return false;\n  }\n  return true;\n}\n"
  },
  {
    "path": "packages/langchain/lib/src/text_splitters/recursive_character.dart",
    "content": "import 'code.dart';\nimport 'text_splitter.dart';\nimport 'utils.dart';\n\n/// {@template recursive_character_text_splitter}\n/// Implementation of splitting text that looks at characters.\n/// Recursively tries to split by different characters to find one that works.\n/// {@endtemplate}\nclass RecursiveCharacterTextSplitter extends TextSplitter {\n  /// {@macro recursive_character_text_splitter}\n  const RecursiveCharacterTextSplitter({\n    this.separators = const ['\\n\\n', '\\n', ' ', ''],\n    super.chunkSize = 4000,\n    super.chunkOverlap = 200,\n    super.lengthFunction = TextSplitter.defaultLengthFunction,\n    super.keepSeparator = true,\n    super.addStartIndex = false,\n  });\n\n  /// List of separators to use for splitting.\n  final List<String> separators;\n\n  @override\n  List<String> splitText(final String text) {\n    return _splitText(text, separators);\n  }\n\n  List<String> _splitText(final String text, final List<String> separators) {\n    final List<String> finalChunks = [];\n\n    // Get appropriate separator to use\n    String separator = separators.last;\n    List<String> newSeparators = [];\n    for (var i = 0; i < separators.length; i++) {\n      final s = separators[i];\n      if (s == '') {\n        separator = s;\n        break;\n      }\n      if (text.contains(RegExp(s))) {\n        separator = s;\n        newSeparators = separators.sublist(i + 1);\n        break;\n      }\n    }\n\n    // Now that we have the separator, split the text\n    final splits = splitTextWithRegex(text, separator, keepSeparator);\n\n    // Now go merging things, recursively splitting longer texts\n    final goodSplits = <String>[];\n    separator = keepSeparator ? '' : separator;\n    for (final s in splits) {\n      if (lengthFunction(s) < chunkSize) {\n        goodSplits.add(s);\n      } else {\n        if (goodSplits.isNotEmpty) {\n          final mergedText = mergeSplits(goodSplits, separator);\n          finalChunks.addAll(mergedText);\n          goodSplits.clear();\n        }\n        if (newSeparators.isEmpty) {\n          finalChunks.add(s);\n        } else {\n          final otherInfo = _splitText(s, newSeparators);\n          finalChunks.addAll(otherInfo);\n        }\n      }\n    }\n    if (goodSplits.isNotEmpty) {\n      final mergedText = mergeSplits(goodSplits, separator);\n      finalChunks.addAll(mergedText);\n    }\n\n    return finalChunks;\n  }\n\n  /// Returns a list of separator strings optimized for splitting text in a given programming language.\n  ///\n  /// This method provides language-specific separator patterns that allow the recursive character\n  /// splitter to more intelligently break down code by relevant structural elements\n  /// (like functions, classes, control structures) before falling back to generic whitespace separators.\n  ///\n  /// [language] The programming language to get separators for\n  /// Returns a list of separator strings, ordered from most specific to most general\n  static List<String> getSeparatorsForLanguage(final CodeLanguage language) {\n    return switch (language) {\n      CodeLanguage.cpp => [\n        // Split along class definitions\n        '\\nclass ',\n        // Split along function definitions\n        '\\nvoid ',\n        '\\nint ',\n        '\\nfloat ',\n        '\\ndouble ',\n        // Split along control flow statements\n        '\\nif ',\n        '\\nfor ',\n        '\\nwhile ',\n        '\\nswitch ',\n        '\\ncase ',\n        // Split by the normal type of lines\n        '\\n\\n',\n        '\\n',\n        ' ',\n        '',\n      ],\n      CodeLanguage.dart => [\n        // Split along class definitions\n        '\\nclass ',\n        '\\nenum ',\n        // Split along method definitions\n        '\\nvoid ',\n        '\\nint ',\n        '\\ndouble ',\n        '\\nString ',\n        // Split along control flow statements\n        '\\nif ',\n        '\\nfor ',\n        '\\nwhile ',\n        '\\nswitch ',\n        '\\ncase ',\n        // Split by the normal type of lines\n        '\\n\\n',\n        '\\n',\n        ' ',\n        '',\n      ],\n      CodeLanguage.go => [\n        // Split along function definitions\n        '\\nfunc ',\n        '\\nvar ',\n        '\\nconst ',\n        '\\ntype ',\n        // Split along control flow statements\n        '\\nif ',\n        '\\nfor ',\n        '\\nswitch ',\n        '\\ncase ',\n        // Split by the normal type of lines\n        '\\n\\n',\n        '\\n',\n        ' ',\n        '',\n      ],\n      CodeLanguage.html => [\n        // First, try to split along HTML tags\n        '<body',\n        '<div',\n        '<p',\n        '<br',\n        '<li',\n        '<h1',\n        '<h2',\n        '<h3',\n        '<h4',\n        '<h5',\n        '<h6',\n        '<span',\n        '<table',\n        '<tr',\n        '<td',\n        '<th',\n        '<ul',\n        '<ol',\n        '<header',\n        '<footer',\n        '<nav',\n        // Head\n        '<head',\n        '<style',\n        '<script',\n        '<meta',\n        '<title',\n        '',\n      ],\n      CodeLanguage.java => [\n        // Split along class definitions\n        '\\nclass ',\n        // Split along method definitions\n        '\\npublic ',\n        '\\nprotected ',\n        '\\nprivate ',\n        '\\nstatic ',\n        // Split along control flow statements\n        '\\nif ',\n        '\\nfor ',\n        '\\nwhile ',\n        '\\nswitch ',\n        '\\ncase ',\n        // Split by the normal type of lines\n        '\\n\\n',\n        '\\n',\n        ' ',\n        '',\n      ],\n      CodeLanguage.js => [\n        // Split along function definitions\n        '\\nfunction ',\n        '\\nconst ',\n        '\\nlet ',\n        '\\nvar ',\n        '\\nclass ',\n        // Split along control flow statements\n        '\\nif ',\n        '\\nfor ',\n        '\\nwhile ',\n        '\\nswitch ',\n        '\\ncase ',\n        '\\ndefault ',\n        // Split by the normal type of lines\n        '\\n\\n',\n        '\\n',\n        ' ',\n        '',\n      ],\n      CodeLanguage.latex => [\n        // First, try to split along Latex sections\n        '\\n\\\\\\\\title{',\n        '\\n\\\\\\\\author{',\n        '\\n\\\\\\\\chapter{',\n        '\\n\\\\\\\\section{',\n        '\\n\\\\\\\\subsection{',\n        '\\n\\\\\\\\subsubsection{',\n        // Now split by environments\n        '\\n\\\\\\\\begin{document}',\n        '\\n\\\\\\\\begin{enumerate}',\n        '\\n\\\\\\\\begin{itemize}',\n        '\\n\\\\\\\\begin{description}',\n        '\\n\\\\\\\\begin{list}',\n        '\\n\\\\\\\\begin{quote}',\n        '\\n\\\\\\\\begin{quotation}',\n        '\\n\\\\\\\\begin{verse}',\n        '\\n\\\\\\\\begin{verbatim}',\n        // Now split by math environments\n        '\\n\\\\\\\\begin{align}',\n        r'$$',\n        r'$',\n        // Now split by the normal type of lines\n        '\\n\\n',\n        '\\n',\n        ' ',\n        '',\n      ],\n      CodeLanguage.markdown => [\n        // # First, try to split along Markdown headings (starting with level 2)\n        '\\n#{1,6} ',\n        // Note the alternative syntax for headings (below) is not handled here:\n        // ```\n        // Heading level 2\n        // ---------------\n        // ```\n        // End of code block\n        '```\\n',\n        // Horizontal lines\n        '\\n\\\\*\\\\*\\\\*+\\n',\n        '\\n---+\\n',\n        '\\n___+\\n',\n        // Note that this splitter doesn't handle horizontal lines defined\n        // by *three or more* of ***, ---, or ___, but this is not handled\n        '\\n\\n',\n        '\\n',\n        ' ',\n        '',\n      ],\n      CodeLanguage.php => [\n        // Split along function definitions\n        '\\nfunction ',\n        // Split along class definitions\n        '\\nclass ',\n        // Split along control flow statements\n        '\\nif ',\n        '\\nforeach ',\n        '\\nwhile ',\n        '\\ndo ',\n        '\\nswitch ',\n        '\\ncase ',\n        // Split by the normal type of lines\n        '\\n\\n',\n        '\\n',\n        ' ',\n        '',\n      ],\n      CodeLanguage.proto => [\n        // Split along message definitions\n        '\\nmessage ',\n        // Split along service definitions\n        '\\nservice ',\n        // Split along enum definitions\n        '\\nenum ',\n        // Split along option definitions\n        '\\noption ',\n        // Split along import statements\n        '\\nimport ',\n        // Split along syntax declarations\n        '\\nsyntax ',\n        // Split by the normal type of lines\n        '\\n\\n',\n        '\\n',\n        ' ',\n        '',\n      ],\n      CodeLanguage.python => [\n        // First, try to split along class definitions\n        '\\nclass ',\n        '\\ndef ',\n        '\\n\\tdef ',\n        // Now split by the normal type of lines\n        '\\n\\n',\n        '\\n',\n        ' ',\n        '',\n      ],\n      CodeLanguage.rst => [\n        // Split along section titles\n        '\\n=+\\n',\n        '\\n-+\\n',\n        '\\n\\\\*+\\n',\n        // Split along directive markers\n        '\\n\\n.. *\\n\\n',\n        // Split by the normal type of lines\n        '\\n\\n',\n        '\\n',\n        ' ',\n        '',\n      ],\n      CodeLanguage.ruby => [\n        // Split along method definitions\n        '\\ndef ',\n        '\\nclass ',\n        // Split along control flow statements\n        '\\nif ',\n        '\\nunless ',\n        '\\nwhile ',\n        '\\nfor ',\n        '\\ndo ',\n        '\\nbegin ',\n        '\\nrescue ',\n        // Split by the normal type of lines\n        '\\n\\n',\n        '\\n',\n        ' ',\n        '',\n      ],\n      CodeLanguage.rust => [\n        // Split along function definitions\n        '\\nfn ',\n        '\\nconst ',\n        '\\nlet ',\n        // Split along control flow statements\n        '\\nif ',\n        '\\nwhile ',\n        '\\nfor ',\n        '\\nloop ',\n        '\\nmatch ',\n        '\\nconst ',\n        // Split by the normal type of lines\n        '\\n\\n',\n        '\\n',\n        ' ',\n        '',\n      ],\n      CodeLanguage.scala => [\n        // Split along class definitions\n        '\\nclass ',\n        '\\nobject ',\n        // Split along method definitions\n        '\\ndef ',\n        '\\nval ',\n        '\\nvar ',\n        // Split along control flow statements\n        '\\nif ',\n        '\\nfor ',\n        '\\nwhile ',\n        '\\nmatch ',\n        '\\ncase ',\n        // Split by the normal type of lines\n        '\\n\\n',\n        '\\n',\n        ' ',\n        '',\n      ],\n      CodeLanguage.solidity => [\n        // Split along compiler information definitions\n        '\\npragma ',\n        '\\nusing ',\n        // Split along contract definitions\n        '\\ncontract ',\n        '\\ninterface ',\n        '\\nlibrary ',\n        // Split along method definitions\n        '\\nconstructor ',\n        '\\ntype ',\n        '\\nfunction ',\n        '\\nevent ',\n        '\\nmodifier ',\n        '\\nerror ',\n        '\\nstruct ',\n        '\\nenum ',\n        // Split along control flow statements\n        '\\nif ',\n        '\\nfor ',\n        '\\nwhile ',\n        '\\ndo while ',\n        '\\nassembly ',\n        // Split by the normal type of lines\n        '\\n\\n',\n        '\\n',\n        ' ',\n        '',\n      ],\n      CodeLanguage.swift => [\n        // Split along function definitions\n        '\\nfunc ',\n        // Split along class definitions\n        '\\nclass ',\n        '\\nstruct ',\n        '\\nenum ',\n        // Split along control flow statements\n        '\\nif ',\n        '\\nfor ',\n        '\\nwhile ',\n        '\\ndo ',\n        '\\nswitch ',\n        '\\ncase ',\n        // Split by the normal type of lines\n        '\\n\\n',\n        '\\n',\n        ' ',\n        '',\n      ],\n    };\n  }\n}\n"
  },
  {
    "path": "packages/langchain/lib/src/text_splitters/text_splitter.dart",
    "content": "import 'package:characters/characters.dart';\nimport 'package:langchain_core/documents.dart';\nimport 'package:meta/meta.dart';\n\n/// {@template text_splitter}\n/// Interface for splitting text into chunks.\n/// {@endtemplate}\nabstract class TextSplitter extends BaseDocumentTransformer {\n  /// {@macro text_splitter}\n  const TextSplitter({\n    this.chunkSize = 4000,\n    this.chunkOverlap = 200,\n    this.lengthFunction = defaultLengthFunction,\n    this.keepSeparator = false,\n    this.addStartIndex = false,\n  }) : assert(chunkOverlap <= chunkSize);\n\n  /// Maximum size of chunks to return.\n  final int chunkSize;\n\n  /// Overlap in characters between chunks.\n  final int chunkOverlap;\n\n  /// Function that measures the length of given chunks.\n  final int Function(String) lengthFunction;\n\n  /// Whether to keep the separator in the chunks.\n  final bool keepSeparator;\n\n  /// If `true`, includes chunk's `start_index` in metadata.\n  final bool addStartIndex;\n\n  /// Default length function for [TextSplitter].\n  /// Measures the length of the given chunk by counting its characters.\n  static int defaultLengthFunction(final String chunk) =>\n      chunk.characters.length;\n\n  /// Split text into multiple components.\n  List<String> splitText(final String text);\n\n  /// Creates documents from a list of texts.\n  List<Document> createDocuments(\n    final List<String> texts, {\n    final List<String>? ids,\n    final List<Map<String, dynamic>>? metadatas,\n  }) {\n    final meta = metadatas ?? List.filled(texts.length, <String, dynamic>{});\n\n    return texts\n        .asMap()\n        .entries\n        .expand((final entry) {\n          final textIndex = entry.key;\n          final text = entry.value;\n          final chunks = splitText(text);\n          var index = -1;\n          var previousChunkLen = 0;\n          return chunks.map((final chunk) {\n            String? id = ids?[textIndex];\n            if (id != null && id.isEmpty) {\n              id = null;\n            }\n            final metadata = {...meta[textIndex]};\n            if (addStartIndex) {\n              final offset = index + previousChunkLen - chunkOverlap;\n              index = text.indexOf(chunk, offset > 0 ? offset : 0);\n              metadata['start_index'] = index;\n              previousChunkLen = chunk.length;\n            }\n            return Document(id: id, pageContent: chunk, metadata: metadata);\n          });\n        })\n        .toList(growable: false);\n  }\n\n  /// Splits documents.\n  List<Document> splitDocuments(final List<Document> documents) {\n    final ids = documents\n        .map((final doc) => doc.id ?? '')\n        .toList(growable: false);\n    final texts = documents\n        .map((final doc) => doc.pageContent)\n        .toList(growable: false);\n    final metadatas = documents\n        .map((final doc) => doc.metadata)\n        .toList(growable: false);\n    return createDocuments(texts, ids: ids, metadatas: metadatas);\n  }\n\n  /// Joins documents into a single document with the given separator.\n  String? _joinDocs(\n    final List<String> docs, {\n    required final String separator,\n  }) {\n    final text = docs.join(separator).trim();\n    return text.isEmpty ? null : text;\n  }\n\n  /// Merges smaller pieces into medium size chunks to send to the LLM.\n  @protected\n  @visibleForTesting\n  List<String> mergeSplits(final List<String> splits, final String separator) {\n    final separatorLen = lengthFunction(separator);\n\n    final docs = <String>[];\n    var currentDoc = <String>[];\n    var total = 0;\n\n    for (final d in splits) {\n      final len = lengthFunction(d);\n\n      if (total + len + (currentDoc.isNotEmpty ? separatorLen : 0) >\n          chunkSize) {\n        if (total > chunkSize) {\n          // TODO Log warning:\n          // 'Created a chunk of size $total,\n          // which is longer than the specified $chunkSize'\n        }\n        if (currentDoc.isNotEmpty) {\n          final doc = _joinDocs(currentDoc, separator: separator);\n          if (doc != null) {\n            docs.add(doc);\n          }\n          // Keep on popping if:\n          // - we have a larger chunk than in the chunk overlap\n          // - or if we still have any chunks and the length is long\n          while (total > chunkOverlap ||\n              (total + len + (currentDoc.isNotEmpty ? separatorLen : 0) >\n                      chunkSize &&\n                  total > 0)) {\n            total -=\n                lengthFunction(currentDoc[0]) +\n                (currentDoc.length > 1 ? separatorLen : 0);\n            currentDoc = currentDoc.sublist(1);\n          }\n        }\n      }\n\n      currentDoc.add(d);\n      total += len + (currentDoc.length > 1 ? separatorLen : 0);\n    }\n\n    final doc = _joinDocs(currentDoc, separator: separator);\n    if (doc != null) {\n      docs.add(doc);\n    }\n\n    return docs;\n  }\n\n  @override\n  Future<List<Document>> transformDocuments(\n    final List<Document> documents,\n  ) async {\n    return splitDocuments(documents);\n  }\n}\n"
  },
  {
    "path": "packages/langchain/lib/src/text_splitters/text_splitters.dart",
    "content": "export 'character.dart';\nexport 'code.dart';\nexport 'markdown.dart';\nexport 'recursive_character.dart';\nexport 'text_splitter.dart';\n"
  },
  {
    "path": "packages/langchain/lib/src/text_splitters/utils.dart",
    "content": "// ignore_for_file: avoid_positional_boolean_parameters\n\n/// Split the text with a regex.\n///\n/// If the separator is not empty, then the text will be split by the separator.\n/// If the separator is empty, then the text will be split into individual\n/// characters.\n/// If [keepSeparator] is true, then the separator will be kept in the split.\n/// Otherwise, the separator will be removed.\nList<String> splitTextWithRegex(\n  final String text,\n  final String separator,\n  final bool keepSeparator,\n) {\n  // Now that we have the separator, split the text\n  List<String> splits;\n  if (separator.isNotEmpty) {\n    splits = text.split(RegExp(keepSeparator ? '(?=$separator)' : separator));\n  } else {\n    splits = text.split('');\n  }\n  return splits.where((final s) => s.isNotEmpty).toList(growable: false);\n}\n"
  },
  {
    "path": "packages/langchain/lib/src/tools/exception.dart",
    "content": "import 'dart:async';\n\nimport 'package:langchain_core/tools.dart';\n\n/// {@template exception_tool}\n/// A tool used when the agent throws an [OutputParserException].\n///\n/// Returns the output of [AgentExecutor.handleParsingErrors].\n/// {@endtemplate}\nfinal class ExceptionTool extends StringTool<ToolOptions> {\n  /// {@macro exception_tool}\n  ExceptionTool()\n    : super(\n        name: toolName,\n        description: 'Called when the agent throws an OutputParserException',\n      );\n\n  /// The name of the tool.\n  static const toolName = '_exception';\n\n  @override\n  Future<String> invokeInternal(\n    final String toolInput, {\n    final ToolOptions? options,\n  }) {\n    return Future.value(toolInput);\n  }\n}\n"
  },
  {
    "path": "packages/langchain/lib/src/tools/printable_char.dart",
    "content": "// Copyright 2009 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n// https://github.com/golang/go/blob/master/LICENSE\n\n//This source file has ben taken from https://raw.githubusercontent.com/xxgreg/dart_printable_char/refs/heads/master/lib/printable_char.dart\n\npart 'printable_char_table.dart';\n\n/// isPrintable reports whether the rune is defined as a Graphic by Unicode,\n/// except that the only spacing character is ASCII space, U+0020.\n/// Such characters include letters, marks, numbers, punctuation, and symbols\n/// from categories L, M, N, P, S.\nbool isPrintable(int rune) {\n  // Fast check for Latin-1\n  if (rune <= 0xFF) {\n    if (0x20 <= rune && rune <= 0x7E) {\n      // All the ASCII is printable from space through DEL-1.\n      return true;\n    }\n    if (0xA1 <= rune && rune <= 0xFF) {\n      // Similarly for ¡ through ÿ...\n      return rune != 0xAD; // ...except for the bizarre soft hyphen.\n    }\n    return false;\n  }\n\n  // Same algorithm, either on uint16 or uint32 value.\n  // First, find first i such that isPrint[i] >= x.\n  // This is the index of either the start or end of a pair that might span x.\n  // The start is even (isPrint[i&~1]) and the end is odd (isPrint[i|1]).\n  // If we find x in a range, make sure x is not in isNotPrint list.\n\n  var i = 0;\n  var j = 0;\n  if (0 <= rune && rune < 1 << 16) {\n    i = _bsearch(_isPrint16, rune);\n    if (i >= _isPrint16.length ||\n        rune < _isPrint16[i & ~1] ||\n        _isPrint16[i | 1] < rune) {\n      return false;\n    }\n    j = _bsearch(_isNotPrint16, rune);\n    return j >= _isNotPrint16.length || _isNotPrint16[j] != rune;\n  }\n\n  i = _bsearch(_isPrint32, rune);\n  if (i >= _isPrint32.length ||\n      rune < _isPrint32[i & ~1] ||\n      _isPrint32[i | 1] < rune) {\n    return false;\n  }\n  if (rune >= 0x20000) {\n    return true;\n  }\n  final adjustedRune = rune - 0x10000;\n  j = _bsearch(_isNotPrint32, adjustedRune);\n  return j >= _isNotPrint32.length || _isNotPrint32[j] != adjustedRune;\n}\n\n/// IsGraphic reports whether the rune is defined as a Graphic by Unicode.\n///\n/// Such characters include letters, marks, numbers, punctuation, symbols, and\n/// spaces, from categories L, M, N, P, S, Zs.\nbool isGraphic(int rune) {\n  if (isPrintable(rune)) return true;\n  if (rune > 0xFFFF) return false;\n  final i = _bsearch(_isGraphic, rune);\n  return i < _isGraphic.length && rune == _isGraphic[i];\n}\n\n// bsearch returns the smallest i such that a[i] >= x.\n// If there is no such i, bsearch returns len(a).\nint _bsearch(List<int> a, int x) {\n  var i = 0;\n  var j = a.length;\n  while (i < j) {\n    final h = i + (j - i) ~/ 2;\n    if (a[h] < x) {\n      i = h + 1;\n    } else {\n      j = h;\n    }\n  }\n  return i;\n}\n"
  },
  {
    "path": "packages/langchain/lib/src/tools/printable_char_table.dart",
    "content": "// Copyright 2013 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n// https://github.com/golang/go/blob/master/LICENSE\n\n// Original modified to generate dart source.\n\n//This source file has ben taken from https://raw.githubusercontent.com/xxgreg/dart_printable_char/refs/heads/master/lib/src/tables.dart\n\npart of 'printable_char.dart';\n\n// (448+137+90)*2 + (418)*4 = 3022 bytes\n\nconst _isPrint16 = <int>[\n  0x0020,\n  0x007e,\n  0x00a1,\n  0x0377,\n  0x037a,\n  0x037f,\n  0x0384,\n  0x0556,\n  0x0559,\n  0x058a,\n  0x058d,\n  0x05c7,\n  0x05d0,\n  0x05ea,\n  0x05ef,\n  0x05f4,\n  0x0606,\n  0x061b,\n  0x061e,\n  0x070d,\n  0x0710,\n  0x074a,\n  0x074d,\n  0x07b1,\n  0x07c0,\n  0x07fa,\n  0x07fd,\n  0x082d,\n  0x0830,\n  0x085b,\n  0x085e,\n  0x086a,\n  0x08a0,\n  0x08bd,\n  0x08d3,\n  0x098c,\n  0x098f,\n  0x0990,\n  0x0993,\n  0x09b2,\n  0x09b6,\n  0x09b9,\n  0x09bc,\n  0x09c4,\n  0x09c7,\n  0x09c8,\n  0x09cb,\n  0x09ce,\n  0x09d7,\n  0x09d7,\n  0x09dc,\n  0x09e3,\n  0x09e6,\n  0x09fe,\n  0x0a01,\n  0x0a0a,\n  0x0a0f,\n  0x0a10,\n  0x0a13,\n  0x0a39,\n  0x0a3c,\n  0x0a42,\n  0x0a47,\n  0x0a48,\n  0x0a4b,\n  0x0a4d,\n  0x0a51,\n  0x0a51,\n  0x0a59,\n  0x0a5e,\n  0x0a66,\n  0x0a76,\n  0x0a81,\n  0x0ab9,\n  0x0abc,\n  0x0acd,\n  0x0ad0,\n  0x0ad0,\n  0x0ae0,\n  0x0ae3,\n  0x0ae6,\n  0x0af1,\n  0x0af9,\n  0x0b0c,\n  0x0b0f,\n  0x0b10,\n  0x0b13,\n  0x0b39,\n  0x0b3c,\n  0x0b44,\n  0x0b47,\n  0x0b48,\n  0x0b4b,\n  0x0b4d,\n  0x0b56,\n  0x0b57,\n  0x0b5c,\n  0x0b63,\n  0x0b66,\n  0x0b77,\n  0x0b82,\n  0x0b8a,\n  0x0b8e,\n  0x0b95,\n  0x0b99,\n  0x0b9f,\n  0x0ba3,\n  0x0ba4,\n  0x0ba8,\n  0x0baa,\n  0x0bae,\n  0x0bb9,\n  0x0bbe,\n  0x0bc2,\n  0x0bc6,\n  0x0bcd,\n  0x0bd0,\n  0x0bd0,\n  0x0bd7,\n  0x0bd7,\n  0x0be6,\n  0x0bfa,\n  0x0c00,\n  0x0c39,\n  0x0c3d,\n  0x0c4d,\n  0x0c55,\n  0x0c5a,\n  0x0c60,\n  0x0c63,\n  0x0c66,\n  0x0c6f,\n  0x0c78,\n  0x0cb9,\n  0x0cbc,\n  0x0ccd,\n  0x0cd5,\n  0x0cd6,\n  0x0cde,\n  0x0ce3,\n  0x0ce6,\n  0x0cf2,\n  0x0d00,\n  0x0d4f,\n  0x0d54,\n  0x0d63,\n  0x0d66,\n  0x0d7f,\n  0x0d82,\n  0x0d96,\n  0x0d9a,\n  0x0dbd,\n  0x0dc0,\n  0x0dc6,\n  0x0dca,\n  0x0dca,\n  0x0dcf,\n  0x0ddf,\n  0x0de6,\n  0x0def,\n  0x0df2,\n  0x0df4,\n  0x0e01,\n  0x0e3a,\n  0x0e3f,\n  0x0e5b,\n  0x0e81,\n  0x0e84,\n  0x0e87,\n  0x0e8a,\n  0x0e8d,\n  0x0e8d,\n  0x0e94,\n  0x0ea7,\n  0x0eaa,\n  0x0ebd,\n  0x0ec0,\n  0x0ecd,\n  0x0ed0,\n  0x0ed9,\n  0x0edc,\n  0x0edf,\n  0x0f00,\n  0x0f6c,\n  0x0f71,\n  0x0fda,\n  0x1000,\n  0x10c7,\n  0x10cd,\n  0x10cd,\n  0x10d0,\n  0x124d,\n  0x1250,\n  0x125d,\n  0x1260,\n  0x128d,\n  0x1290,\n  0x12b5,\n  0x12b8,\n  0x12c5,\n  0x12c8,\n  0x1315,\n  0x1318,\n  0x135a,\n  0x135d,\n  0x137c,\n  0x1380,\n  0x1399,\n  0x13a0,\n  0x13f5,\n  0x13f8,\n  0x13fd,\n  0x1400,\n  0x169c,\n  0x16a0,\n  0x16f8,\n  0x1700,\n  0x1714,\n  0x1720,\n  0x1736,\n  0x1740,\n  0x1753,\n  0x1760,\n  0x1773,\n  0x1780,\n  0x17dd,\n  0x17e0,\n  0x17e9,\n  0x17f0,\n  0x17f9,\n  0x1800,\n  0x180d,\n  0x1810,\n  0x1819,\n  0x1820,\n  0x1878,\n  0x1880,\n  0x18aa,\n  0x18b0,\n  0x18f5,\n  0x1900,\n  0x192b,\n  0x1930,\n  0x193b,\n  0x1940,\n  0x1940,\n  0x1944,\n  0x196d,\n  0x1970,\n  0x1974,\n  0x1980,\n  0x19ab,\n  0x19b0,\n  0x19c9,\n  0x19d0,\n  0x19da,\n  0x19de,\n  0x1a1b,\n  0x1a1e,\n  0x1a7c,\n  0x1a7f,\n  0x1a89,\n  0x1a90,\n  0x1a99,\n  0x1aa0,\n  0x1aad,\n  0x1ab0,\n  0x1abe,\n  0x1b00,\n  0x1b4b,\n  0x1b50,\n  0x1b7c,\n  0x1b80,\n  0x1bf3,\n  0x1bfc,\n  0x1c37,\n  0x1c3b,\n  0x1c49,\n  0x1c4d,\n  0x1c88,\n  0x1c90,\n  0x1cba,\n  0x1cbd,\n  0x1cc7,\n  0x1cd0,\n  0x1cf9,\n  0x1d00,\n  0x1f15,\n  0x1f18,\n  0x1f1d,\n  0x1f20,\n  0x1f45,\n  0x1f48,\n  0x1f4d,\n  0x1f50,\n  0x1f7d,\n  0x1f80,\n  0x1fd3,\n  0x1fd6,\n  0x1fef,\n  0x1ff2,\n  0x1ffe,\n  0x2010,\n  0x2027,\n  0x2030,\n  0x205e,\n  0x2070,\n  0x2071,\n  0x2074,\n  0x209c,\n  0x20a0,\n  0x20bf,\n  0x20d0,\n  0x20f0,\n  0x2100,\n  0x218b,\n  0x2190,\n  0x2426,\n  0x2440,\n  0x244a,\n  0x2460,\n  0x2b73,\n  0x2b76,\n  0x2b95,\n  0x2b98,\n  0x2cf3,\n  0x2cf9,\n  0x2d27,\n  0x2d2d,\n  0x2d2d,\n  0x2d30,\n  0x2d67,\n  0x2d6f,\n  0x2d70,\n  0x2d7f,\n  0x2d96,\n  0x2da0,\n  0x2e4e,\n  0x2e80,\n  0x2ef3,\n  0x2f00,\n  0x2fd5,\n  0x2ff0,\n  0x2ffb,\n  0x3001,\n  0x3096,\n  0x3099,\n  0x30ff,\n  0x3105,\n  0x31ba,\n  0x31c0,\n  0x31e3,\n  0x31f0,\n  0x4db5,\n  0x4dc0,\n  0x9fef,\n  0xa000,\n  0xa48c,\n  0xa490,\n  0xa4c6,\n  0xa4d0,\n  0xa62b,\n  0xa640,\n  0xa6f7,\n  0xa700,\n  0xa7b9,\n  0xa7f7,\n  0xa82b,\n  0xa830,\n  0xa839,\n  0xa840,\n  0xa877,\n  0xa880,\n  0xa8c5,\n  0xa8ce,\n  0xa8d9,\n  0xa8e0,\n  0xa953,\n  0xa95f,\n  0xa97c,\n  0xa980,\n  0xa9d9,\n  0xa9de,\n  0xaa36,\n  0xaa40,\n  0xaa4d,\n  0xaa50,\n  0xaa59,\n  0xaa5c,\n  0xaac2,\n  0xaadb,\n  0xaaf6,\n  0xab01,\n  0xab06,\n  0xab09,\n  0xab0e,\n  0xab11,\n  0xab16,\n  0xab20,\n  0xab65,\n  0xab70,\n  0xabed,\n  0xabf0,\n  0xabf9,\n  0xac00,\n  0xd7a3,\n  0xd7b0,\n  0xd7c6,\n  0xd7cb,\n  0xd7fb,\n  0xf900,\n  0xfa6d,\n  0xfa70,\n  0xfad9,\n  0xfb00,\n  0xfb06,\n  0xfb13,\n  0xfb17,\n  0xfb1d,\n  0xfbc1,\n  0xfbd3,\n  0xfd3f,\n  0xfd50,\n  0xfd8f,\n  0xfd92,\n  0xfdc7,\n  0xfdf0,\n  0xfdfd,\n  0xfe00,\n  0xfe19,\n  0xfe20,\n  0xfe6b,\n  0xfe70,\n  0xfefc,\n  0xff01,\n  0xffbe,\n  0xffc2,\n  0xffc7,\n  0xffca,\n  0xffcf,\n  0xffd2,\n  0xffd7,\n  0xffda,\n  0xffdc,\n  0xffe0,\n  0xffee,\n  0xfffc,\n  0xfffd,\n];\n\nconst _isNotPrint16 = <int>[\n  0x00ad,\n  0x038b,\n  0x038d,\n  0x03a2,\n  0x0530,\n  0x0590,\n  0x06dd,\n  0x083f,\n  0x085f,\n  0x08b5,\n  0x08e2,\n  0x0984,\n  0x09a9,\n  0x09b1,\n  0x09de,\n  0x0a04,\n  0x0a29,\n  0x0a31,\n  0x0a34,\n  0x0a37,\n  0x0a3d,\n  0x0a5d,\n  0x0a84,\n  0x0a8e,\n  0x0a92,\n  0x0aa9,\n  0x0ab1,\n  0x0ab4,\n  0x0ac6,\n  0x0aca,\n  0x0b00,\n  0x0b04,\n  0x0b29,\n  0x0b31,\n  0x0b34,\n  0x0b5e,\n  0x0b84,\n  0x0b91,\n  0x0b9b,\n  0x0b9d,\n  0x0bc9,\n  0x0c0d,\n  0x0c11,\n  0x0c29,\n  0x0c45,\n  0x0c49,\n  0x0c57,\n  0x0c8d,\n  0x0c91,\n  0x0ca9,\n  0x0cb4,\n  0x0cc5,\n  0x0cc9,\n  0x0cdf,\n  0x0cf0,\n  0x0d04,\n  0x0d0d,\n  0x0d11,\n  0x0d45,\n  0x0d49,\n  0x0d84,\n  0x0db2,\n  0x0dbc,\n  0x0dd5,\n  0x0dd7,\n  0x0e83,\n  0x0e89,\n  0x0e98,\n  0x0ea0,\n  0x0ea4,\n  0x0ea6,\n  0x0eac,\n  0x0eba,\n  0x0ec5,\n  0x0ec7,\n  0x0f48,\n  0x0f98,\n  0x0fbd,\n  0x0fcd,\n  0x10c6,\n  0x1249,\n  0x1257,\n  0x1259,\n  0x1289,\n  0x12b1,\n  0x12bf,\n  0x12c1,\n  0x12d7,\n  0x1311,\n  0x1680,\n  0x170d,\n  0x176d,\n  0x1771,\n  0x191f,\n  0x1a5f,\n  0x1dfa,\n  0x1f58,\n  0x1f5a,\n  0x1f5c,\n  0x1f5e,\n  0x1fb5,\n  0x1fc5,\n  0x1fdc,\n  0x1ff5,\n  0x208f,\n  0x2bc9,\n  0x2bff,\n  0x2c2f,\n  0x2c5f,\n  0x2d26,\n  0x2da7,\n  0x2daf,\n  0x2db7,\n  0x2dbf,\n  0x2dc7,\n  0x2dcf,\n  0x2dd7,\n  0x2ddf,\n  0x2e9a,\n  0x3040,\n  0x3130,\n  0x318f,\n  0x321f,\n  0x32ff,\n  0xa9ce,\n  0xa9ff,\n  0xab27,\n  0xab2f,\n  0xfb37,\n  0xfb3d,\n  0xfb3f,\n  0xfb42,\n  0xfb45,\n  0xfe53,\n  0xfe67,\n  0xfe75,\n  0xffe7,\n];\n\nconst _isPrint32 = <int>[\n  0x010000,\n  0x01004d,\n  0x010050,\n  0x01005d,\n  0x010080,\n  0x0100fa,\n  0x010100,\n  0x010102,\n  0x010107,\n  0x010133,\n  0x010137,\n  0x01019b,\n  0x0101a0,\n  0x0101a0,\n  0x0101d0,\n  0x0101fd,\n  0x010280,\n  0x01029c,\n  0x0102a0,\n  0x0102d0,\n  0x0102e0,\n  0x0102fb,\n  0x010300,\n  0x010323,\n  0x01032d,\n  0x01034a,\n  0x010350,\n  0x01037a,\n  0x010380,\n  0x0103c3,\n  0x0103c8,\n  0x0103d5,\n  0x010400,\n  0x01049d,\n  0x0104a0,\n  0x0104a9,\n  0x0104b0,\n  0x0104d3,\n  0x0104d8,\n  0x0104fb,\n  0x010500,\n  0x010527,\n  0x010530,\n  0x010563,\n  0x01056f,\n  0x01056f,\n  0x010600,\n  0x010736,\n  0x010740,\n  0x010755,\n  0x010760,\n  0x010767,\n  0x010800,\n  0x010805,\n  0x010808,\n  0x010838,\n  0x01083c,\n  0x01083c,\n  0x01083f,\n  0x01089e,\n  0x0108a7,\n  0x0108af,\n  0x0108e0,\n  0x0108f5,\n  0x0108fb,\n  0x01091b,\n  0x01091f,\n  0x010939,\n  0x01093f,\n  0x01093f,\n  0x010980,\n  0x0109b7,\n  0x0109bc,\n  0x0109cf,\n  0x0109d2,\n  0x010a06,\n  0x010a0c,\n  0x010a35,\n  0x010a38,\n  0x010a3a,\n  0x010a3f,\n  0x010a48,\n  0x010a50,\n  0x010a58,\n  0x010a60,\n  0x010a9f,\n  0x010ac0,\n  0x010ae6,\n  0x010aeb,\n  0x010af6,\n  0x010b00,\n  0x010b35,\n  0x010b39,\n  0x010b55,\n  0x010b58,\n  0x010b72,\n  0x010b78,\n  0x010b91,\n  0x010b99,\n  0x010b9c,\n  0x010ba9,\n  0x010baf,\n  0x010c00,\n  0x010c48,\n  0x010c80,\n  0x010cb2,\n  0x010cc0,\n  0x010cf2,\n  0x010cfa,\n  0x010d27,\n  0x010d30,\n  0x010d39,\n  0x010e60,\n  0x010e7e,\n  0x010f00,\n  0x010f27,\n  0x010f30,\n  0x010f59,\n  0x011000,\n  0x01104d,\n  0x011052,\n  0x01106f,\n  0x01107f,\n  0x0110c1,\n  0x0110d0,\n  0x0110e8,\n  0x0110f0,\n  0x0110f9,\n  0x011100,\n  0x011146,\n  0x011150,\n  0x011176,\n  0x011180,\n  0x0111cd,\n  0x0111d0,\n  0x0111f4,\n  0x011200,\n  0x01123e,\n  0x011280,\n  0x0112a9,\n  0x0112b0,\n  0x0112ea,\n  0x0112f0,\n  0x0112f9,\n  0x011300,\n  0x01130c,\n  0x01130f,\n  0x011310,\n  0x011313,\n  0x011344,\n  0x011347,\n  0x011348,\n  0x01134b,\n  0x01134d,\n  0x011350,\n  0x011350,\n  0x011357,\n  0x011357,\n  0x01135d,\n  0x011363,\n  0x011366,\n  0x01136c,\n  0x011370,\n  0x011374,\n  0x011400,\n  0x01145e,\n  0x011480,\n  0x0114c7,\n  0x0114d0,\n  0x0114d9,\n  0x011580,\n  0x0115b5,\n  0x0115b8,\n  0x0115dd,\n  0x011600,\n  0x011644,\n  0x011650,\n  0x011659,\n  0x011660,\n  0x01166c,\n  0x011680,\n  0x0116b7,\n  0x0116c0,\n  0x0116c9,\n  0x011700,\n  0x01171a,\n  0x01171d,\n  0x01172b,\n  0x011730,\n  0x01173f,\n  0x011800,\n  0x01183b,\n  0x0118a0,\n  0x0118f2,\n  0x0118ff,\n  0x0118ff,\n  0x011a00,\n  0x011a47,\n  0x011a50,\n  0x011a83,\n  0x011a86,\n  0x011aa2,\n  0x011ac0,\n  0x011af8,\n  0x011c00,\n  0x011c45,\n  0x011c50,\n  0x011c6c,\n  0x011c70,\n  0x011c8f,\n  0x011c92,\n  0x011cb6,\n  0x011d00,\n  0x011d36,\n  0x011d3a,\n  0x011d47,\n  0x011d50,\n  0x011d59,\n  0x011d60,\n  0x011d98,\n  0x011da0,\n  0x011da9,\n  0x011ee0,\n  0x011ef8,\n  0x012000,\n  0x012399,\n  0x012400,\n  0x012474,\n  0x012480,\n  0x012543,\n  0x013000,\n  0x01342e,\n  0x014400,\n  0x014646,\n  0x016800,\n  0x016a38,\n  0x016a40,\n  0x016a69,\n  0x016a6e,\n  0x016a6f,\n  0x016ad0,\n  0x016aed,\n  0x016af0,\n  0x016af5,\n  0x016b00,\n  0x016b45,\n  0x016b50,\n  0x016b77,\n  0x016b7d,\n  0x016b8f,\n  0x016e40,\n  0x016e9a,\n  0x016f00,\n  0x016f44,\n  0x016f50,\n  0x016f7e,\n  0x016f8f,\n  0x016f9f,\n  0x016fe0,\n  0x016fe1,\n  0x017000,\n  0x0187f1,\n  0x018800,\n  0x018af2,\n  0x01b000,\n  0x01b11e,\n  0x01b170,\n  0x01b2fb,\n  0x01bc00,\n  0x01bc6a,\n  0x01bc70,\n  0x01bc7c,\n  0x01bc80,\n  0x01bc88,\n  0x01bc90,\n  0x01bc99,\n  0x01bc9c,\n  0x01bc9f,\n  0x01d000,\n  0x01d0f5,\n  0x01d100,\n  0x01d126,\n  0x01d129,\n  0x01d172,\n  0x01d17b,\n  0x01d1e8,\n  0x01d200,\n  0x01d245,\n  0x01d2e0,\n  0x01d2f3,\n  0x01d300,\n  0x01d356,\n  0x01d360,\n  0x01d378,\n  0x01d400,\n  0x01d49f,\n  0x01d4a2,\n  0x01d4a2,\n  0x01d4a5,\n  0x01d4a6,\n  0x01d4a9,\n  0x01d50a,\n  0x01d50d,\n  0x01d546,\n  0x01d54a,\n  0x01d6a5,\n  0x01d6a8,\n  0x01d7cb,\n  0x01d7ce,\n  0x01da8b,\n  0x01da9b,\n  0x01daaf,\n  0x01e000,\n  0x01e018,\n  0x01e01b,\n  0x01e02a,\n  0x01e800,\n  0x01e8c4,\n  0x01e8c7,\n  0x01e8d6,\n  0x01e900,\n  0x01e94a,\n  0x01e950,\n  0x01e959,\n  0x01e95e,\n  0x01e95f,\n  0x01ec71,\n  0x01ecb4,\n  0x01ee00,\n  0x01ee24,\n  0x01ee27,\n  0x01ee3b,\n  0x01ee42,\n  0x01ee42,\n  0x01ee47,\n  0x01ee54,\n  0x01ee57,\n  0x01ee64,\n  0x01ee67,\n  0x01ee9b,\n  0x01eea1,\n  0x01eebb,\n  0x01eef0,\n  0x01eef1,\n  0x01f000,\n  0x01f02b,\n  0x01f030,\n  0x01f093,\n  0x01f0a0,\n  0x01f0ae,\n  0x01f0b1,\n  0x01f0f5,\n  0x01f100,\n  0x01f10c,\n  0x01f110,\n  0x01f16b,\n  0x01f170,\n  0x01f1ac,\n  0x01f1e6,\n  0x01f202,\n  0x01f210,\n  0x01f23b,\n  0x01f240,\n  0x01f248,\n  0x01f250,\n  0x01f251,\n  0x01f260,\n  0x01f265,\n  0x01f300,\n  0x01f6d4,\n  0x01f6e0,\n  0x01f6ec,\n  0x01f6f0,\n  0x01f6f9,\n  0x01f700,\n  0x01f773,\n  0x01f780,\n  0x01f7d8,\n  0x01f800,\n  0x01f80b,\n  0x01f810,\n  0x01f847,\n  0x01f850,\n  0x01f859,\n  0x01f860,\n  0x01f887,\n  0x01f890,\n  0x01f8ad,\n  0x01f900,\n  0x01f90b,\n  0x01f910,\n  0x01f970,\n  0x01f973,\n  0x01f976,\n  0x01f97a,\n  0x01f9a2,\n  0x01f9b0,\n  0x01f9b9,\n  0x01f9c0,\n  0x01f9c2,\n  0x01f9d0,\n  0x01f9ff,\n  0x01fa60,\n  0x01fa6d,\n  0x020000,\n  0x02a6d6,\n  0x02a700,\n  0x02b734,\n  0x02b740,\n  0x02b81d,\n  0x02b820,\n  0x02cea1,\n  0x02ceb0,\n  0x02ebe0,\n  0x02f800,\n  0x02fa1d,\n  0x0e0100,\n  0x0e01ef,\n];\n\nconst _isNotPrint32 = <int>[\n  // add 0x10000 to each entry\n  0x000c,\n  0x0027,\n  0x003b,\n  0x003e,\n  0x018f,\n  0x039e,\n  0x0809,\n  0x0836,\n  0x0856,\n  0x08f3,\n  0x0a04,\n  0x0a14,\n  0x0a18,\n  0x10bd,\n  0x1135,\n  0x11e0,\n  0x1212,\n  0x1287,\n  0x1289,\n  0x128e,\n  0x129e,\n  0x1304,\n  0x1329,\n  0x1331,\n  0x1334,\n  0x133a,\n  0x145a,\n  0x145c,\n  0x1c09,\n  0x1c37,\n  0x1ca8,\n  0x1d07,\n  0x1d0a,\n  0x1d3b,\n  0x1d3e,\n  0x1d66,\n  0x1d69,\n  0x1d8f,\n  0x1d92,\n  0x246f,\n  0x6a5f,\n  0x6b5a,\n  0x6b62,\n  0xd455,\n  0xd49d,\n  0xd4ad,\n  0xd4ba,\n  0xd4bc,\n  0xd4c4,\n  0xd506,\n  0xd515,\n  0xd51d,\n  0xd53a,\n  0xd53f,\n  0xd545,\n  0xd551,\n  0xdaa0,\n  0xe007,\n  0xe022,\n  0xe025,\n  0xee04,\n  0xee20,\n  0xee23,\n  0xee28,\n  0xee33,\n  0xee38,\n  0xee3a,\n  0xee48,\n  0xee4a,\n  0xee4c,\n  0xee50,\n  0xee53,\n  0xee58,\n  0xee5a,\n  0xee5c,\n  0xee5e,\n  0xee60,\n  0xee63,\n  0xee6b,\n  0xee73,\n  0xee78,\n  0xee7d,\n  0xee7f,\n  0xee8a,\n  0xeea4,\n  0xeeaa,\n  0xf0c0,\n  0xf0d0,\n  0xf93f,\n  0xf97b,\n];\n\n// isGraphic lists the graphic runes not matched by IsPrint.\nconst _isGraphic = <int>[\n  0x00a0,\n  0x1680,\n  0x2000,\n  0x2001,\n  0x2002,\n  0x2003,\n  0x2004,\n  0x2005,\n  0x2006,\n  0x2007,\n  0x2008,\n  0x2009,\n  0x200a,\n  0x202f,\n  0x205f,\n  0x3000,\n];\n"
  },
  {
    "path": "packages/langchain/lib/src/tools/tools.dart",
    "content": "export 'package:langchain_core/tools.dart';\n\nexport 'exception.dart';\n"
  },
  {
    "path": "packages/langchain/lib/src/utils/utils.dart",
    "content": "export 'package:langchain_core/utils.dart'\n    show\n        RetryOptions,\n        calculateSimilarity,\n        cosineSimilarity,\n        getIndexesMostSimilarEmbeddings;\n"
  },
  {
    "path": "packages/langchain/lib/src/vector_stores/memory.dart",
    "content": "import 'package:collection/collection.dart';\nimport 'package:langchain_core/documents.dart';\nimport 'package:langchain_core/embeddings.dart';\nimport 'package:langchain_core/utils.dart';\nimport 'package:langchain_core/vector_stores.dart';\nimport 'package:meta/meta.dart';\nimport 'package:uuid/uuid.dart';\n\n/// Vector store that stores vectors in memory.\n///\n/// By default, it uses cosine similarity to compare vectors.\n///\n/// It iterates over all vectors in the store to find the most similar vectors.\n/// This is not efficient for large vector stores as it has a time complexity\n/// of O(vector_dimensionality * num_vectors).\n///\n/// This class is useful for testing and prototyping, but it is not recommended\n/// for production use cases. See other vector store integrations for\n/// production use cases.\n///\n/// ### Filtering\n///\n/// You can filter the search space before running the similarity search by\n/// providing a [VectorStoreSearchType.filter] that will be matched against the\n/// metadata of the documents in the vector store.\n///\n/// Example:\n/// ```dart\n/// final vs = MemoryVectorStore(...);\n/// final res = await store.similaritySearch(\n///   query: 'Test query',\n///   config: const VectorStoreSimilaritySearch(\n///     filter: {'type': 'foo'},\n///   ),\n/// );\n/// ```\n///\n/// This query will only consider documents that have a metadata field `type`\n/// with value `foo`.\nclass MemoryVectorStore extends VectorStore {\n  /// Main constructor for [MemoryVectorStore].\n  ///\n  /// - [embeddings] is the embeddings model to use to embed the documents.\n  /// - [similarityFunction] is the similarity function to use when comparing\n  ///   vectors. By default, it uses cosine similarity.\n  /// - [initialMemoryVectors] is an optional list of [MemoryVector] to\n  ///   initialize the vector store with. This is useful when loading a vector\n  ///   store from a database or file.\n  ///\n  /// If you want to create and populate a [MemoryVectorStore] from a list of\n  /// documents or texts, use [MemoryVectorStore.fromDocuments] or\n  /// [MemoryVectorStore.fromText].\n  MemoryVectorStore({\n    required super.embeddings,\n    this.similarityFunction = cosineSimilarity,\n    final List<MemoryVector>? initialMemoryVectors,\n  }) : memoryVectors = [...?initialMemoryVectors];\n\n  /// Similarity function to use when comparing vectors.\n  final double Function(List<double> a, List<double> b) similarityFunction;\n\n  /// Vectors stored in memory.\n  final List<MemoryVector> memoryVectors;\n\n  /// UUID generator.\n  final _uuid = const Uuid();\n\n  /// Creates a vector store from a list of documents.\n  ///\n  /// - [documents] is a list of documents to add to the vector store. If no\n  ///   document id is provided, a random uuid will be generated.\n  /// - [embeddings] is the embeddings model to use to embed the documents.\n  static Future<MemoryVectorStore> fromDocuments({\n    required final List<Document> documents,\n    required final Embeddings embeddings,\n  }) async {\n    final vs = MemoryVectorStore(embeddings: embeddings);\n    final docs = documents\n        .map(\n          (final doc) => doc.id == null || doc.id!.isEmpty\n              ? doc.copyWith(id: vs._uuid.v4())\n              : doc,\n        )\n        .toList(growable: false);\n    await vs.addDocuments(documents: docs);\n    return vs;\n  }\n\n  /// Creates a vector store from a list of texts.\n  ///\n  /// - [ids] is a list of ids to add to the vector store. If no id is provided,\n  ///   a random uuid will be generated.\n  /// - [texts] is a list of texts to add to the vector store.\n  /// - [metadatas] is a list of metadata to add to the vector store.\n  /// - [embeddings] is the embeddings model to use to embed the texts.\n  static Future<MemoryVectorStore> fromText({\n    final List<String>? ids,\n    required final List<String> texts,\n    final List<Map<String, dynamic>>? metadatas,\n    required final Embeddings embeddings,\n  }) async {\n    assert(\n      ids == null || ids.length == texts.length,\n      'ids and texts must have the same length',\n    );\n    assert(\n      metadatas == null || metadatas.length == texts.length,\n      'metadatas and texts must have the same length',\n    );\n    final vs = MemoryVectorStore(embeddings: embeddings);\n    await vs.addDocuments(\n      documents: texts\n          .mapIndexed(\n            (final i, final text) => Document(\n              id: ids?[i] ?? vs._uuid.v4(),\n              pageContent: text,\n              metadata: metadatas?[i] ?? const {},\n            ),\n          )\n          .toList(growable: false),\n    );\n    return vs;\n  }\n\n  @override\n  Future<List<String>> addVectors({\n    required final List<List<double>> vectors,\n    required final List<Document> documents,\n  }) async {\n    memoryVectors.addAll(\n      vectors.mapIndexed((final i, final vector) {\n        final doc = documents[i];\n        return MemoryVector(document: doc, embedding: vector);\n      }),\n    );\n    return const [];\n  }\n\n  @override\n  Future<void> delete({required final List<String> ids}) async {\n    memoryVectors.removeWhere(\n      (final vector) => ids.contains(vector.document.id),\n    );\n  }\n\n  @override\n  Future<List<(Document, double)>> similaritySearchByVectorWithScores({\n    required final List<double> embedding,\n    final VectorStoreSimilaritySearch config =\n        const VectorStoreSimilaritySearch(),\n  }) async {\n    var entries = memoryVectors;\n    if (config.filter != null) {\n      final filter = config.filter!;\n      entries = entries\n          .where(\n            (final entry) => filter.keys.every(\n              (final key) => entry.document.metadata[key] == filter[key],\n            ),\n          )\n          .toList(growable: false);\n    }\n\n    var searches = entries\n        .map(\n          (final entry) =>\n              (entry.document, similarityFunction(embedding, entry.embedding)),\n        )\n        .sorted((final a, final b) => (a.$2 > b.$2 ? -1 : 1))\n        .take(config.k);\n\n    if (config.scoreThreshold != null) {\n      searches = searches.where(\n        (final search) => search.$2 >= config.scoreThreshold!,\n      );\n    }\n\n    return searches.toList(growable: false);\n  }\n}\n\n/// {@template memory_vector}\n/// Represents an entry of [MemoryVectorStore].\n/// {@endtemplate}\n@immutable\nclass MemoryVector {\n  /// {@macro memory_vector}\n  const MemoryVector({required this.document, required this.embedding});\n\n  /// Document associated with the vector.\n  final Document document;\n\n  /// Vector embedding.\n  final List<double> embedding;\n\n  /// Creates a vector from a map.\n  factory MemoryVector.fromMap(final Map<String, dynamic> map) {\n    return MemoryVector(\n      document: Document.fromMap(map['document'] as Map<String, dynamic>),\n      embedding: map['embedding'] as List<double>,\n    );\n  }\n\n  /// Converts the vector to a map.\n  Map<String, dynamic> toMap() {\n    return {'document': document.toMap(), 'embedding': embedding};\n  }\n\n  @override\n  bool operator ==(covariant final MemoryVector other) {\n    return identical(this, other) ||\n        runtimeType == other.runtimeType &&\n            document == other.document &&\n            const ListEquality<double>().equals(embedding, other.embedding);\n  }\n\n  @override\n  int get hashCode =>\n      document.hashCode ^ const ListEquality<double>().hash(embedding);\n\n  @override\n  String toString() {\n    return 'MemoryVector{'\n        'document: $document, '\n        'embedding: ${embedding.length}}';\n  }\n}\n"
  },
  {
    "path": "packages/langchain/lib/src/vector_stores/vector_stores.dart",
    "content": "export 'package:langchain_core/vector_stores.dart';\n\nexport 'memory.dart';\n"
  },
  {
    "path": "packages/langchain/pubspec.yaml",
    "content": "name: langchain\ndescription: Build powerful LLM-based Dart and Flutter applications with LangChain.dart.\nversion: 0.8.1\nrepository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain\nissue_tracker: https://github.com/davidmigloz/langchain_dart/issues\nhomepage: https://github.com/davidmigloz/langchain_dart\ndocumentation: https://langchaindart.dev\n\nfunding:\n  - https://github.com/sponsors/davidmigloz\n\ntopics:\n  - nlp\n  - gen-ai\n  - llms\n  - langchain\n\nenvironment:\n  sdk: \">=3.9.0 <4.0.0\"\nresolution: workspace\n\ndependencies:\n  characters: ^1.4.0\n  collection: ^1.19.1\n  crypto: ^3.0.6\n  langchain_core: 0.4.1\n  meta: ^1.16.0\n  uuid: ^4.5.1\n\ndev_dependencies:\n  test: ^1.26.2\n  langchain_community: ^0.4.0+2\n  langchain_openai: ^0.8.1+1\n  langchain_ollama: ^0.4.1\n"
  },
  {
    "path": "packages/langchain/test/agents/assets/state_of_the_union.txt",
    "content": "Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans.  \n\nLast year COVID-19 kept us apart. This year we are finally together again. \n\nTonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. \n\nWith a duty to one another to the American people to the Constitution. \n\nAnd with an unwavering resolve that freedom will always triumph over tyranny. \n\nSix days ago, Russia’s Vladimir Putin sought to shake the foundations of the free world thinking he could make it bend to his menacing ways. But he badly miscalculated. \n\nHe thought he could roll into Ukraine and the world would roll over. Instead he met a wall of strength he never imagined. \n\nHe met the Ukrainian people. \n\nFrom President Zelenskyy to every Ukrainian, their fearlessness, their courage, their determination, inspires the world. \n\nGroups of citizens blocking tanks with their bodies. Everyone from students to retirees teachers turned soldiers defending their homeland. \n\nIn this struggle as President Zelenskyy said in his speech to the European Parliament “Light will win over darkness.” The Ukrainian Ambassador to the United States is here tonight. \n\nLet each of us here tonight in this Chamber send an unmistakable signal to Ukraine and to the world. \n\nPlease rise if you are able and show that, Yes, we the United States of America stand with the Ukrainian people. \n\nThroughout our history we’ve learned this lesson when dictators do not pay a price for their aggression they cause more chaos.   \n\nThey keep moving.   \n\nAnd the costs and the threats to America and the world keep rising.   \n\nThat’s why the NATO Alliance was created to secure peace and stability in Europe after World War 2. \n\nThe United States is a member along with 29 other nations. \n\nIt matters. American diplomacy matters. American resolve matters. \n\nPutin’s latest attack on Ukraine was premeditated and unprovoked. \n\nHe rejected repeated efforts at diplomacy. \n\nHe thought the West and NATO wouldn’t respond. And he thought he could divide us at home. Putin was wrong. We were ready.  Here is what we did.   \n\nWe prepared extensively and carefully. \n\nWe spent months building a coalition of other freedom-loving nations from Europe and the Americas to Asia and Africa to confront Putin. \n\nI spent countless hours unifying our European allies. We shared with the world in advance what we knew Putin was planning and precisely how he would try to falsely justify his aggression.  \n\nWe countered Russia’s lies with truth.   \n\nAnd now that he has acted the free world is holding him accountable. \n\nAlong with twenty-seven members of the European Union including France, Germany, Italy, as well as countries like the United Kingdom, Canada, Japan, Korea, Australia, New Zealand, and many others, even Switzerland. \n\nWe are inflicting pain on Russia and supporting the people of Ukraine. Putin is now isolated from the world more than ever. \n\nTogether with our allies –we are right now enforcing powerful economic sanctions. \n\nWe are cutting off Russia’s largest banks from the international financial system.  \n\nPreventing Russia’s central bank from defending the Russian Ruble making Putin’s $630 Billion “war fund” worthless.   \n\nWe are choking off Russia’s access to technology that will sap its economic strength and weaken its military for years to come.  \n\nTonight I say to the Russian oligarchs and corrupt leaders who have bilked billions of dollars off this violent regime no more. \n\nThe U.S. Department of Justice is assembling a dedicated task force to go after the crimes of Russian oligarchs.  \n\nWe are joining with our European allies to find and seize your yachts your luxury apartments your private jets. We are coming for your ill-begotten gains. \n\nAnd tonight I am announcing that we will join our allies in closing off American air space to all Russian flights – further isolating Russia – and adding an additional squeeze –on their economy. The Ruble has lost 30% of its value. \n\nThe Russian stock market has lost 40% of its value and trading remains suspended. Russia’s economy is reeling and Putin alone is to blame. \n\nTogether with our allies we are providing support to the Ukrainians in their fight for freedom. Military assistance. Economic assistance. Humanitarian assistance. \n\nWe are giving more than $1 Billion in direct assistance to Ukraine. \n\nAnd we will continue to aid the Ukrainian people as they defend their country and to help ease their suffering.  \n\nLet me be clear, our forces are not engaged and will not engage in conflict with Russian forces in Ukraine.  \n\nOur forces are not going to Europe to fight in Ukraine, but to defend our NATO Allies – in the event that Putin decides to keep moving west.  \n\nFor that purpose we’ve mobilized American ground forces, air squadrons, and ship deployments to protect NATO countries including Poland, Romania, Latvia, Lithuania, and Estonia. \n\nAs I have made crystal clear the United States and our Allies will defend every inch of territory of NATO countries with the full force of our collective power.  \n\nAnd we remain clear-eyed. The Ukrainians are fighting back with pure courage. But the next few days weeks, months, will be hard on them.  \n\nPutin has unleashed violence and chaos.  But while he may make gains on the battlefield – he will pay a continuing high price over the long run. \n\nAnd a proud Ukrainian people, who have known 30 years  of independence, have repeatedly shown that they will not tolerate anyone who tries to take their country backwards.  \n\nTo all Americans, I will be honest with you, as I’ve always promised. A Russian dictator, invading a foreign country, has costs around the world. \n\nAnd I’m taking robust action to make sure the pain of our sanctions  is targeted at Russia’s economy. And I will use every tool at our disposal to protect American businesses and consumers. \n\nTonight, I can announce that the United States has worked with 30 other countries to release 60 Million barrels of oil from reserves around the world.  \n\nAmerica will lead that effort, releasing 30 Million barrels from our own Strategic Petroleum Reserve. And we stand ready to do more if necessary, unified with our allies.  \n\nThese steps will help blunt gas prices here at home. And I know the news about what’s happening can seem alarming. \n\nBut I want you to know that we are going to be okay. \n\nWhen the history of this era is written Putin’s war on Ukraine will have left Russia weaker and the rest of the world stronger. \n\nWhile it shouldn’t have taken something so terrible for people around the world to see what’s at stake now everyone sees it clearly. \n\nWe see the unity among leaders of nations and a more unified Europe a more unified West. And we see unity among the people who are gathering in cities in large crowds around the world even in Russia to demonstrate their support for Ukraine.  \n\nIn the battle between democracy and autocracy, democracies are rising to the moment, and the world is clearly choosing the side of peace and security. \n\nThis is a real test. It’s going to take time. So let us continue to draw inspiration from the iron will of the Ukrainian people. \n\nTo our fellow Ukrainian Americans who forge a deep bond that connects our two nations we stand with you. \n\nPutin may circle Kyiv with tanks, but he will never gain the hearts and souls of the Ukrainian people. \n\nHe will never extinguish their love of freedom. He will never weaken the resolve of the free world. \n\nWe meet tonight in an America that has lived through two of the hardest years this nation has ever faced. \n\nThe pandemic has been punishing. \n\nAnd so many families are living paycheck to paycheck, struggling to keep up with the rising cost of food, gas, housing, and so much more. \n\nI understand. \n\nI remember when my Dad had to leave our home in Scranton, Pennsylvania to find work. I grew up in a family where if the price of food went up, you felt it. \n\nThat’s why one of the first things I did as President was fight to pass the American Rescue Plan.  \n\nBecause people were hurting. We needed to act, and we did. \n\nFew pieces of legislation have done more in a critical moment in our history to lift us out of crisis. \n\nIt fueled our efforts to vaccinate the nation and combat COVID-19. It delivered immediate economic relief for tens of millions of Americans.  \n\nHelped put food on their table, keep a roof over their heads, and cut the cost of health insurance. \n\nAnd as my Dad used to say, it gave people a little breathing room. \n\nAnd unlike the $2 Trillion tax cut passed in the previous administration that benefitted the top 1% of Americans, the American Rescue Plan helped working people—and left no one behind. \n\nAnd it worked. It created jobs. Lots of jobs. \n\nIn fact—our economy created over 6.5 Million new jobs just last year, more jobs created in one year  \nthan ever before in the history of America. \n\nOur economy grew at a rate of 5.7% last year, the strongest growth in nearly 40 years, the first step in bringing fundamental change to an economy that hasn’t worked for the working people of this nation for too long.  \n\nFor the past 40 years we were told that if we gave tax breaks to those at the very top, the benefits would trickle down to everyone else. \n\nBut that trickle-down theory led to weaker economic growth, lower wages, bigger deficits, and the widest gap between those at the top and everyone else in nearly a century. \n\nVice President Harris and I ran for office with a new economic vision for America. \n\nInvest in America. Educate Americans. Grow the workforce. Build the economy from the bottom up  \nand the middle out, not from the top down.  \n\nBecause we know that when the middle class grows, the poor have a ladder up and the wealthy do very well. \n\nAmerica used to have the best roads, bridges, and airports on Earth. \n\nNow our infrastructure is ranked 13th in the world. \n\nWe won’t be able to compete for the jobs of the 21st Century if we don’t fix that. \n\nThat’s why it was so important to pass the Bipartisan Infrastructure Law—the most sweeping investment to rebuild America in history. \n\nThis was a bipartisan effort, and I want to thank the members of both parties who worked to make it happen. \n\nWe’re done talking about infrastructure weeks. \n\nWe’re going to have an infrastructure decade. \n\nIt is going to transform America and put us on a path to win the economic competition of the 21st Century that we face with the rest of the world—particularly with China.  \n\nAs I’ve told Xi Jinping, it is never a good bet to bet against the American people. \n\nWe’ll create good jobs for millions of Americans, modernizing roads, airports, ports, and waterways all across America. \n\nAnd we’ll do it all to withstand the devastating effects of the climate crisis and promote environmental justice. \n\nWe’ll build a national network of 500,000 electric vehicle charging stations, begin to replace poisonous lead pipes—so every child—and every American—has clean water to drink at home and at school, provide affordable high-speed internet for every American—urban, suburban, rural, and tribal communities. \n\n4,000 projects have already been announced. \n\nAnd tonight, I’m announcing that this year we will start fixing over 65,000 miles of highway and 1,500 bridges in disrepair. \n\nWhen we use taxpayer dollars to rebuild America – we are going to Buy American: buy American products to support American jobs. \n\nThe federal government spends about $600 Billion a year to keep the country safe and secure. \n\nThere’s been a law on the books for almost a century \nto make sure taxpayers’ dollars support American jobs and businesses. \n\nEvery Administration says they’ll do it, but we are actually doing it. \n\nWe will buy American to make sure everything from the deck of an aircraft carrier to the steel on highway guardrails are made in America. \n\nBut to compete for the best jobs of the future, we also need to level the playing field with China and other competitors. \n\nThat’s why it is so important to pass the Bipartisan Innovation Act sitting in Congress that will make record investments in emerging technologies and American manufacturing. \n\nLet me give you one example of why it’s so important to pass it. \n\nIf you travel 20 miles east of Columbus, Ohio, you’ll find 1,000 empty acres of land. \n\nIt won’t look like much, but if you stop and look closely, you’ll see a “Field of dreams,” the ground on which America’s future will be built. \n\nThis is where Intel, the American company that helped build Silicon Valley, is going to build its $20 billion semiconductor “mega site”. \n\nUp to eight state-of-the-art factories in one place. 10,000 new good-paying jobs. \n\nSome of the most sophisticated manufacturing in the world to make computer chips the size of a fingertip that power the world and our everyday lives. \n\nSmartphones. The Internet. Technology we have yet to invent. \n\nBut that’s just the beginning. \n\nIntel’s CEO, Pat Gelsinger, who is here tonight, told me they are ready to increase their investment from  \n$20 billion to $100 billion. \n\nThat would be one of the biggest investments in manufacturing in American history. \n\nAnd all they’re waiting for is for you to pass this bill. \n\nSo let’s not wait any longer. Send it to my desk. I’ll sign it.  \n\nAnd we will really take off. \n\nAnd Intel is not alone. \n\nThere’s something happening in America. \n\nJust look around and you’ll see an amazing story. \n\nThe rebirth of the pride that comes from stamping products “Made In America.” The revitalization of American manufacturing.   \n\nCompanies are choosing to build new factories here, when just a few years ago, they would have built them overseas. \n\nThat’s what is happening. Ford is investing $11 billion to build electric vehicles, creating 11,000 jobs across the country. \n\nGM is making the largest investment in its history—$7 billion to build electric vehicles, creating 4,000 jobs in Michigan. \n\nAll told, we created 369,000 new manufacturing jobs in America just last year. \n\nPowered by people I’ve met like JoJo Burgess, from generations of union steelworkers from Pittsburgh, who’s here with us tonight. \n\nAs Ohio Senator Sherrod Brown says, “It’s time to bury the label “Rust Belt.” \n\nIt’s time. \n\nBut with all the bright spots in our economy, record job growth and higher wages, too many families are struggling to keep up with the bills.  \n\nInflation is robbing them of the gains they might otherwise feel. \n\nI get it. That’s why my top priority is getting prices under control. \n\nLook, our economy roared back faster than most predicted, but the pandemic meant that businesses had a hard time hiring enough workers to keep up production in their factories. \n\nThe pandemic also disrupted global supply chains. \n\nWhen factories close, it takes longer to make goods and get them from the warehouse to the store, and prices go up. \n\nLook at cars. \n\nLast year, there weren’t enough semiconductors to make all the cars that people wanted to buy. \n\nAnd guess what, prices of automobiles went up. \n\nSo—we have a choice. \n\nOne way to fight inflation is to drive down wages and make Americans poorer.  \n\nI have a better plan to fight inflation. \n\nLower your costs, not your wages. \n\nMake more cars and semiconductors in America. \n\nMore infrastructure and innovation in America. \n\nMore goods moving faster and cheaper in America. \n\nMore jobs where you can earn a good living in America. \n\nAnd instead of relying on foreign supply chains, let’s make it in America. \n\nEconomists call it “increasing the productive capacity of our economy.” \n\nI call it building a better America. \n\nMy plan to fight inflation will lower your costs and lower the deficit. \n\n17 Nobel laureates in economics say my plan will ease long-term inflationary pressures. Top business leaders and most Americans support my plan. And here’s the plan: \n\nFirst – cut the cost of prescription drugs. Just look at insulin. One in ten Americans has diabetes. In Virginia, I met a 13-year-old boy named Joshua Davis.  \n\nHe and his Dad both have Type 1 diabetes, which means they need insulin every day. Insulin costs about $10 a vial to make.  \n\nBut drug companies charge families like Joshua and his Dad up to 30 times more. I spoke with Joshua’s mom. \n\nImagine what it’s like to look at your child who needs insulin and have no idea how you’re going to pay for it.  \n\nWhat it does to your dignity, your ability to look your child in the eye, to be the parent you expect to be. \n\nJoshua is here with us tonight. Yesterday was his birthday. Happy birthday, buddy.  \n\nFor Joshua, and for the 200,000 other young people with Type 1 diabetes, let’s cap the cost of insulin at $35 a month so everyone can afford it.  \n\nDrug companies will still do very well. And while we’re at it let Medicare negotiate lower prices for prescription drugs, like the VA already does. \n\nLook, the American Rescue Plan is helping millions of families on Affordable Care Act plans save $2,400 a year on their health care premiums. Let’s close the coverage gap and make those savings permanent. \n\nSecond – cut energy costs for families an average of $500 a year by combatting climate change.  \n\nLet’s provide investments and tax credits to weatherize your homes and businesses to be energy efficient and you get a tax credit; double America’s clean energy production in solar, wind, and so much more;  lower the price of electric vehicles, saving you another $80 a month because you’ll never have to pay at the gas pump again. \n\nThird – cut the cost of child care. Many families pay up to $14,000 a year for child care per child.  \n\nMiddle-class and working families shouldn’t have to pay more than 7% of their income for care of young children.  \n\nMy plan will cut the cost in half for most families and help parents, including millions of women, who left the workforce during the pandemic because they couldn’t afford child care, to be able to get back to work. \n\nMy plan doesn’t stop there. It also includes home and long-term care. More affordable housing. And Pre-K for every 3- and 4-year-old.  \n\nAll of these will lower costs. \n\nAnd under my plan, nobody earning less than $400,000 a year will pay an additional penny in new taxes. Nobody.  \n\nThe one thing all Americans agree on is that the tax system is not fair. We have to fix it.  \n\nI’m not looking to punish anyone. But let’s make sure corporations and the wealthiest Americans start paying their fair share. \n\nJust last year, 55 Fortune 500 corporations earned $40 billion in profits and paid zero dollars in federal income tax.  \n\nThat’s simply not fair. That’s why I’ve proposed a 15% minimum tax rate for corporations. \n\nWe got more than 130 countries to agree on a global minimum tax rate so companies can’t get out of paying their taxes at home by shipping jobs and factories overseas. \n\nThat’s why I’ve proposed closing loopholes so the very wealthy don’t pay a lower tax rate than a teacher or a firefighter.  \n\nSo that’s my plan. It will grow the economy and lower costs for families. \n\nSo what are we waiting for? Let’s get this done. And while you’re at it, confirm my nominees to the Federal Reserve, which plays a critical role in fighting inflation.  \n\nMy plan will not only lower costs to give families a fair shot, it will lower the deficit. \n\nThe previous Administration not only ballooned the deficit with tax cuts for the very wealthy and corporations, it undermined the watchdogs whose job was to keep pandemic relief funds from being wasted. \n\nBut in my administration, the watchdogs have been welcomed back. \n\nWe’re going after the criminals who stole billions in relief money meant for small businesses and millions of Americans.  \n\nAnd tonight, I’m announcing that the Justice Department will name a chief prosecutor for pandemic fraud. \n\nBy the end of this year, the deficit will be down to less than half what it was before I took office.  \n\nThe only president ever to cut the deficit by more than one trillion dollars in a single year. \n\nLowering your costs also means demanding more competition. \n\nI’m a capitalist, but capitalism without competition isn’t capitalism. \n\nIt’s exploitation—and it drives up prices. \n\nWhen corporations don’t have to compete, their profits go up, your prices go up, and small businesses and family farmers and ranchers go under. \n\nWe see it happening with ocean carriers moving goods in and out of America. \n\nDuring the pandemic, these foreign-owned companies raised prices by as much as 1,000% and made record profits. \n\nTonight, I’m announcing a crackdown on these companies overcharging American businesses and consumers. \n\nAnd as Wall Street firms take over more nursing homes, quality in those homes has gone down and costs have gone up.  \n\nThat ends on my watch. \n\nMedicare is going to set higher standards for nursing homes and make sure your loved ones get the care they deserve and expect. \n\nWe’ll also cut costs and keep the economy going strong by giving workers a fair shot, provide more training and apprenticeships, hire them based on their skills not degrees. \n\nLet’s pass the Paycheck Fairness Act and paid leave.  \n\nRaise the minimum wage to $15 an hour and extend the Child Tax Credit, so no one has to raise a family in poverty. \n\nLet’s increase Pell Grants and increase our historic support of HBCUs, and invest in what Jill—our First Lady who teaches full-time—calls America’s best-kept secret: community colleges. \n\nAnd let’s pass the PRO Act when a majority of workers want to form a union—they shouldn’t be stopped.  \n\nWhen we invest in our workers, when we build the economy from the bottom up and the middle out together, we can do something we haven’t done in a long time: build a better America. \n\nFor more than two years, COVID-19 has impacted every decision in our lives and the life of the nation. \n\nAnd I know you’re tired, frustrated, and exhausted. \n\nBut I also know this. \n\nBecause of the progress we’ve made, because of your resilience and the tools we have, tonight I can say  \nwe are moving forward safely, back to more normal routines.  \n\nWe’ve reached a new moment in the fight against COVID-19, with severe cases down to a level not seen since last July.  \n\nJust a few days ago, the Centers for Disease Control and Prevention—the CDC—issued new mask guidelines. \n\nUnder these new guidelines, most Americans in most of the country can now be mask free.   \n\nAnd based on the projections, more of the country will reach that point across the next couple of weeks. \n\nThanks to the progress we have made this past year, COVID-19 need no longer control our lives.  \n\nI know some are talking about “living with COVID-19”. Tonight – I say that we will never just accept living with COVID-19. \n\nWe will continue to combat the virus as we do other diseases. And because this is a virus that mutates and spreads, we will stay on guard. \n\nHere are four common sense steps as we move forward safely.  \n\nFirst, stay protected with vaccines and treatments. We know how incredibly effective vaccines are. If you’re vaccinated and boosted you have the highest degree of protection. \n\nWe will never give up on vaccinating more Americans. Now, I know parents with kids under 5 are eager to see a vaccine authorized for their children. \n\nThe scientists are working hard to get that done and we’ll be ready with plenty of vaccines when they do. \n\nWe’re also ready with anti-viral treatments. If you get COVID-19, the Pfizer pill reduces your chances of ending up in the hospital by 90%.  \n\nWe’ve ordered more of these pills than anyone in the world. And Pfizer is working overtime to get us 1 Million pills this month and more than double that next month.  \n\nAnd we’re launching the “Test to Treat” initiative so people can get tested at a pharmacy, and if they’re positive, receive antiviral pills on the spot at no cost.  \n\nIf you’re immunocompromised or have some other vulnerability, we have treatments and free high-quality masks. \n\nWe’re leaving no one behind or ignoring anyone’s needs as we move forward. \n\nAnd on testing, we have made hundreds of millions of tests available for you to order for free.   \n\nEven if you already ordered free tests tonight, I am announcing that you can order more from covidtests.gov starting next week. \n\nSecond – we must prepare for new variants. Over the past year, we’ve gotten much better at detecting new variants. \n\nIf necessary, we’ll be able to deploy new vaccines within 100 days instead of many more months or years.  \n\nAnd, if Congress provides the funds we need, we’ll have new stockpiles of tests, masks, and pills ready if needed. \n\nI cannot promise a new variant won’t come. But I can promise you we’ll do everything within our power to be ready if it does.  \n\nThird – we can end the shutdown of schools and businesses. We have the tools we need. \n\nIt’s time for Americans to get back to work and fill our great downtowns again.  People working from home can feel safe to begin to return to the office.   \n\nWe’re doing that here in the federal government. The vast majority of federal workers will once again work in person. \n\nOur schools are open. Let’s keep it that way. Our kids need to be in school. \n\nAnd with 75% of adult Americans fully vaccinated and hospitalizations down by 77%, most Americans can remove their masks, return to work, stay in the classroom, and move forward safely. \n\nWe achieved this because we provided free vaccines, treatments, tests, and masks. \n\nOf course, continuing this costs money. \n\nI will soon send Congress a request. \n\nThe vast majority of Americans have used these tools and may want to again, so I expect Congress to pass it quickly.   \n\nFourth, we will continue vaccinating the world.     \n\nWe’ve sent 475 Million vaccine doses to 112 countries, more than any other nation. \n\nAnd we won’t stop. \n\nWe have lost so much to COVID-19. Time with one another. And worst of all, so much loss of life. \n\nLet’s use this moment to reset. Let’s stop looking at COVID-19 as a partisan dividing line and see it for what it is: A God-awful disease.  \n\nLet’s stop seeing each other as enemies, and start seeing each other for who we really are: Fellow Americans.  \n\nWe can’t change how divided we’ve been. But we can change how we move forward—on COVID-19 and other issues we must face together. \n\nI recently visited the New York City Police Department days after the funerals of Officer Wilbert Mora and his partner, Officer Jason Rivera. \n\nThey were responding to a 9-1-1 call when a man shot and killed them with a stolen gun. \n\nOfficer Mora was 27 years old. \n\nOfficer Rivera was 22. \n\nBoth Dominican Americans who’d grown up on the same streets they later chose to patrol as police officers. \n\nI spoke with their families and told them that we are forever in debt for their sacrifice, and we will carry on their mission to restore the trust and safety every community deserves. \n\nI’ve worked on these issues a long time. \n\nI know what works: Investing in crime preventionand community police officers who’ll walk the beat, who’ll know the neighborhood, and who can restore trust and safety. \n\nSo let’s not abandon our streets. Or choose between safety and equal justice. \n\nLet’s come together to protect our communities, restore trust, and hold law enforcement accountable. \n\nThat’s why the Justice Department required body cameras, banned chokeholds, and restricted no-knock warrants for its officers. \n\nThat’s why the American Rescue Plan provided $350 Billion that cities, states, and counties can use to hire more police and invest in proven strategies like community violence interruption—trusted messengers breaking the cycle of violence and trauma and giving young people hope.  \n\nWe should all agree: The answer is not to Defund the police. The answer is to FUND the police with the resources and training they need to protect our communities. \n\nI ask Democrats and Republicans alike: Pass my budget and keep our neighborhoods safe.  \n\nAnd I will keep doing everything in my power to crack down on gun trafficking and ghost guns you can buy online and make at home—they have no serial numbers and can’t be traced. \n\nAnd I ask Congress to pass proven measures to reduce gun violence. Pass universal background checks. Why should anyone on a terrorist list be able to purchase a weapon? \n\nBan assault weapons and high-capacity magazines. \n\nRepeal the liability shield that makes gun manufacturers the only industry in America that can’t be sued. \n\nThese laws don’t infringe on the Second Amendment. They save lives. \n\nThe most fundamental right in America is the right to vote – and to have it counted. And it’s under assault. \n\nIn state after state, new laws have been passed, not only to suppress the vote, but to subvert entire elections. \n\nWe cannot let this happen. \n\nTonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n\nTonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n\nOne of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n\nAnd I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence. \n\nA former top litigator in private practice. A former federal public defender. And from a family of public school educators and police officers. A consensus builder. Since she’s been nominated, she’s received a broad range of support—from the Fraternal Order of Police to former judges appointed by Democrats and Republicans. \n\nAnd if we are to advance liberty and justice, we need to secure the Border and fix the immigration system. \n\nWe can do both. At our border, we’ve installed new technology like cutting-edge scanners to better detect drug smuggling.  \n\nWe’ve set up joint patrols with Mexico and Guatemala to catch more human traffickers.  \n\nWe’re putting in place dedicated immigration judges so families fleeing persecution and violence can have their cases heard faster. \n\nWe’re securing commitments and supporting partners in South and Central America to host more refugees and secure their own borders. \n\nWe can do all this while keeping lit the torch of liberty that has led generations of immigrants to this land—my forefathers and so many of yours. \n\nProvide a pathway to citizenship for Dreamers, those on temporary status, farm workers, and essential workers. \n\nRevise our laws so businesses have the workers they need and families don’t wait decades to reunite. \n\nIt’s not only the right thing to do—it’s the economically smart thing to do. \n\nThat’s why immigration reform is supported by everyone from labor unions to religious leaders to the U.S. Chamber of Commerce. \n\nLet’s get it done once and for all. \n\nAdvancing liberty and justice also requires protecting the rights of women. \n\nThe constitutional right affirmed in Roe v. Wade—standing precedent for half a century—is under attack as never before. \n\nIf we want to go forward—not backward—we must protect access to health care. Preserve a woman’s right to choose. And let’s continue to advance maternal health care in America. \n\nAnd for our LGBTQ+ Americans, let’s finally get the bipartisan Equality Act to my desk. The onslaught of state laws targeting transgender Americans and their families is wrong. \n\nAs I said last year, especially to our younger transgender Americans, I will always have your back as your President, so you can be yourself and reach your God-given potential. \n\nWhile it often appears that we never agree, that isn’t true. I signed 80 bipartisan bills into law last year. From preventing government shutdowns to protecting Asian-Americans from still-too-common hate crimes to reforming military justice. \n\nAnd soon, we’ll strengthen the Violence Against Women Act that I first wrote three decades ago. It is important for us to show the nation that we can come together and do big things. \n\nSo tonight I’m offering a Unity Agenda for the Nation. Four big things we can do together.  \n\nFirst, beat the opioid epidemic. \n\nThere is so much we can do. Increase funding for prevention, treatment, harm reduction, and recovery.  \n\nGet rid of outdated rules that stop doctors from prescribing treatments. And stop the flow of illicit drugs by working with state and local law enforcement to go after traffickers. \n\nIf you’re suffering from addiction, know you are not alone. I believe in recovery, and I celebrate the 23 million Americans in recovery. \n\nSecond, let’s take on mental health. Especially among our children, whose lives and education have been turned upside down.  \n\nThe American Rescue Plan gave schools money to hire teachers and help students make up for lost learning.  \n\nI urge every parent to make sure your school does just that. And we can all play a part—sign up to be a tutor or a mentor. \n\nChildren were also struggling before the pandemic. Bullying, violence, trauma, and the harms of social media. \n\nAs Frances Haugen, who is here with us tonight, has shown, we must hold social media platforms accountable for the national experiment they’re conducting on our children for profit. \n\nIt’s time to strengthen privacy protections, ban targeted advertising to children, demand tech companies stop collecting personal data on our children. \n\nAnd let’s get all Americans the mental health services they need. More people they can turn to for help, and full parity between physical and mental health care. \n\nThird, support our veterans. \n\nVeterans are the best of us. \n\nI’ve always believed that we have a sacred obligation to equip all those we send to war and care for them and their families when they come home. \n\nMy administration is providing assistance with job training and housing, and now helping lower-income veterans get VA care debt-free.  \n\nOur troops in Iraq and Afghanistan faced many dangers. \n\nOne was stationed at bases and breathing in toxic smoke from “burn pits” that incinerated wastes of war—medical and hazard material, jet fuel, and more. \n\nWhen they came home, many of the world’s fittest and best trained warriors were never the same. \n\nHeadaches. Numbness. Dizziness. \n\nA cancer that would put them in a flag-draped coffin. \n\nI know. \n\nOne of those soldiers was my son Major Beau Biden. \n\nWe don’t know for sure if a burn pit was the cause of his brain cancer, or the diseases of so many of our troops. \n\nBut I’m committed to finding out everything we can. \n\nCommitted to military families like Danielle Robinson from Ohio. \n\nThe widow of Sergeant First Class Heath Robinson.  \n\nHe was born a soldier. Army National Guard. Combat medic in Kosovo and Iraq. \n\nStationed near Baghdad, just yards from burn pits the size of football fields. \n\nHeath’s widow Danielle is here with us tonight. They loved going to Ohio State football games. He loved building Legos with their daughter. \n\nBut cancer from prolonged exposure to burn pits ravaged Heath’s lungs and body. \n\nDanielle says Heath was a fighter to the very end. \n\nHe didn’t know how to stop fighting, and neither did she. \n\nThrough her pain she found purpose to demand we do better. \n\nTonight, Danielle—we are. \n\nThe VA is pioneering new ways of linking toxic exposures to diseases, already helping more veterans get benefits. \n\nAnd tonight, I’m announcing we’re expanding eligibility to veterans suffering from nine respiratory cancers. \n\nI’m also calling on Congress: pass a law to make sure veterans devastated by toxic exposures in Iraq and Afghanistan finally get the benefits and comprehensive health care they deserve. \n\nAnd fourth, let’s end cancer as we know it. \n\nThis is personal to me and Jill, to Kamala, and to so many of you. \n\nCancer is the #2 cause of death in America–second only to heart disease. \n\nLast month, I announced our plan to supercharge  \nthe Cancer Moonshot that President Obama asked me to lead six years ago. \n\nOur goal is to cut the cancer death rate by at least 50% over the next 25 years, turn more cancers from death sentences into treatable diseases.  \n\nMore support for patients and families. \n\nTo get there, I call on Congress to fund ARPA-H, the Advanced Research Projects Agency for Health. \n\nIt’s based on DARPA—the Defense Department project that led to the Internet, GPS, and so much more.  \n\nARPA-H will have a singular purpose—to drive breakthroughs in cancer, Alzheimer’s, diabetes, and more. \n\nA unity agenda for the nation. \n\nWe can do this. \n\nMy fellow Americans—tonight , we have gathered in a sacred space—the citadel of our democracy. \n\nIn this Capitol, generation after generation, Americans have debated great questions amid great strife, and have done great things. \n\nWe have fought for freedom, expanded liberty, defeated totalitarianism and terror. \n\nAnd built the strongest, freest, and most prosperous nation the world has ever known. \n\nNow is the hour. \n\nOur moment of responsibility. \n\nOur test of resolve and conscience, of history itself. \n\nIt is in this moment that our character is formed. Our purpose is found. Our future is forged. \n\nWell I know this nation.  \n\nWe will meet the test. \n\nTo protect freedom and liberty, to expand fairness and opportunity. \n\nWe will save democracy. \n\nAs hard as these times have been, I am more optimistic about America today than I have been my whole life. \n\nBecause I see the future that is within our grasp. \n\nBecause I know there is simply nothing beyond our capacity. \n\nWe are the only nation on Earth that has always turned every crisis we have faced into an opportunity. \n\nThe only nation that can be defined by a single word: possibilities. \n\nSo on this night, in our 245th year as a nation, I have come to report on the State of the Union. \n\nAnd my report is this: the State of the Union is strong—because you, the American people, are strong. \n\nWe are stronger today than we were a year ago. \n\nAnd we will be stronger a year from now than we are today. \n\nNow is our moment to meet and overcome the challenges of our time. \n\nAnd we will, as one people. \n\nOne America. \n\nThe United States of America. \n\nMay God bless you all. May God protect our troops."
  },
  {
    "path": "packages/langchain/test/agents/executor_test.dart",
    "content": "// ignore_for_file: unused_element, unused_element_parameter\nimport 'dart:async';\nimport 'dart:convert';\n\nimport 'package:langchain/langchain.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('AgentExecutor tests', () {\n    test('should return result when maxIterations is reached', () async {\n      final tool = _MockTool();\n      final agent = _SingleActionMockAgent(\n        tools: [tool],\n        actions: [\n          AgentAction(id: 'id', tool: tool.name, toolInput: {'input': 'mock'}),\n        ],\n      );\n      final executor = AgentExecutor(agent: agent, maxIterations: 1);\n      final result = await executor.run('test');\n      expect(result, 'Agent stopped due to iteration limit or time limit.');\n    });\n\n    test('should return result when maxExecutionTime is reached', () async {\n      final tool = _MockTool();\n      final agent = _SingleActionMockAgent(\n        tools: [tool],\n        actions: [\n          AgentAction(id: 'id', tool: tool.name, toolInput: {'input': 'mock'}),\n        ],\n      );\n      final executor = AgentExecutor(\n        agent: agent,\n        maxExecutionTime: const Duration(milliseconds: 1),\n      );\n      final result = await executor.run('test');\n      expect(result, 'Agent stopped due to iteration limit or time limit.');\n    });\n\n    test('should return AgentFinish result if action is AgentFinish', () async {\n      final tool = _MockTool();\n      final agent = _SingleActionMockAgent(\n        tools: [tool],\n        actions: [\n          AgentAction(id: 'id', tool: tool.name, toolInput: {'input': 'mock'}),\n          const AgentFinish(\n            returnValues: {BaseActionAgent.agentReturnKey: 'mock'},\n          ),\n        ],\n      );\n      final executor = AgentExecutor(agent: agent);\n      final result = await executor.run('test');\n      expect(result, 'mock');\n    });\n\n    test('should run InvalidTool if tool not found in map', () async {\n      final tool = _MockTool(name: 'invalid_tool');\n      final agent = _SingleActionMockAgent(\n        tools: [tool],\n        actions: [\n          const AgentAction(\n            id: 'id',\n            tool: 'tool',\n            toolInput: {'input': 'mock'},\n          ),\n        ],\n      );\n      final executor = AgentExecutor(\n        agent: agent,\n        maxIterations: 1,\n        returnIntermediateSteps: true,\n      );\n      final result = await executor.call('test');\n      final intermediateSteps =\n          result[AgentExecutor.intermediateStepsOutputKey] as List<AgentStep>;\n      expect(\n        intermediateSteps.first.observation,\n        'tool is not a valid tool, try another one.',\n      );\n    });\n\n    test(\n      'should handle OutputParserException with handleParsingErrors function',\n      () async {\n        final tool = _MockTool();\n        final agent = _SingleActionMockAgent(\n          throwOutputParserException: true,\n          tools: [tool],\n          actions: [\n            const AgentAction(\n              id: 'id',\n              tool: 'invalid_tool',\n              toolInput: {'input': 'mock'},\n            ),\n          ],\n        );\n        final executor = AgentExecutor(\n          agent: agent,\n          handleParsingErrors: (final _) => {'input': 'fallback'},\n          maxIterations: 1,\n          returnIntermediateSteps: true,\n        );\n        final result = await executor.call('test');\n        final intermediateSteps =\n            result[AgentExecutor.intermediateStepsOutputKey] as List<AgentStep>;\n        expect(intermediateSteps.first.observation, 'fallback');\n      },\n    );\n\n    test('Test RunnableAgent', () async {\n      final agent = Agent.fromRunnable(\n        Runnable.mapInput(\n          (final AgentPlanInput planInput) => {\n            'input': planInput.inputs,\n            'intermediateSteps': planInput.intermediateSteps,\n          },\n        ).pipe(\n          Runnable.mapInput(\n            (final _) => [\n              const AgentFinish(\n                returnValues: {BaseActionAgent.agentReturnKey: 'mock'},\n              ),\n            ],\n          ),\n        ),\n        tools: [],\n      );\n\n      final executor = AgentExecutor(agent: agent);\n      final result = await executor.run('test');\n      expect(result, 'mock');\n    });\n  });\n\n  group('Tool output serialization tests', () {\n    test('should preserve String tool output as-is', () async {\n      final tool = _GenericMockTool<String>(output: 'string-result');\n      final agent = _SequentialActionMockAgent(\n        tools: [tool],\n        actionSequence: [\n          [\n            AgentAction(\n              id: 'id',\n              tool: tool.name,\n              toolInput: {'input': 'test'},\n            ),\n          ],\n          [\n            const AgentFinish(\n              returnValues: {BaseActionAgent.agentReturnKey: 'done'},\n            ),\n          ],\n        ],\n      );\n      final executor = AgentExecutor(\n        agent: agent,\n        returnIntermediateSteps: true,\n      );\n      final result = await executor.call('test');\n      final intermediateSteps =\n          result[AgentExecutor.intermediateStepsOutputKey] as List<AgentStep>;\n      expect(intermediateSteps.first.observation, 'string-result');\n    });\n\n    test('should JSON-encode Map tool output', () async {\n      final mapOutput = {'status': 'ok', 'count': 42};\n      final tool = _GenericMockTool<Map<String, dynamic>>(output: mapOutput);\n      final agent = _SequentialActionMockAgent(\n        tools: [tool],\n        actionSequence: [\n          [\n            AgentAction(\n              id: 'id',\n              tool: tool.name,\n              toolInput: {'input': 'test'},\n            ),\n          ],\n          [\n            const AgentFinish(\n              returnValues: {BaseActionAgent.agentReturnKey: 'done'},\n            ),\n          ],\n        ],\n      );\n      final executor = AgentExecutor(\n        agent: agent,\n        returnIntermediateSteps: true,\n      );\n      final result = await executor.call('test');\n      final intermediateSteps =\n          result[AgentExecutor.intermediateStepsOutputKey] as List<AgentStep>;\n      expect(intermediateSteps.first.observation, jsonEncode(mapOutput));\n      expect(jsonDecode(intermediateSteps.first.observation), {\n        'status': 'ok',\n        'count': 42,\n      });\n    });\n\n    test('should JSON-encode List tool output', () async {\n      final listOutput = ['item1', 'item2', 'item3'];\n      final tool = _GenericMockTool<List<String>>(output: listOutput);\n      final agent = _SequentialActionMockAgent(\n        tools: [tool],\n        actionSequence: [\n          [\n            AgentAction(\n              id: 'id',\n              tool: tool.name,\n              toolInput: {'input': 'test'},\n            ),\n          ],\n          [\n            const AgentFinish(\n              returnValues: {BaseActionAgent.agentReturnKey: 'done'},\n            ),\n          ],\n        ],\n      );\n      final executor = AgentExecutor(\n        agent: agent,\n        returnIntermediateSteps: true,\n      );\n      final result = await executor.call('test');\n      final intermediateSteps =\n          result[AgentExecutor.intermediateStepsOutputKey] as List<AgentStep>;\n      expect(intermediateSteps.first.observation, jsonEncode(listOutput));\n      expect(jsonDecode(intermediateSteps.first.observation), [\n        'item1',\n        'item2',\n        'item3',\n      ]);\n    });\n\n    test('should JSON-encode int tool output', () async {\n      final tool = _GenericMockTool<int>(output: 42);\n      final agent = _SequentialActionMockAgent(\n        tools: [tool],\n        actionSequence: [\n          [\n            AgentAction(\n              id: 'id',\n              tool: tool.name,\n              toolInput: {'input': 'test'},\n            ),\n          ],\n          [\n            const AgentFinish(\n              returnValues: {BaseActionAgent.agentReturnKey: 'done'},\n            ),\n          ],\n        ],\n      );\n      final executor = AgentExecutor(\n        agent: agent,\n        returnIntermediateSteps: true,\n      );\n      final result = await executor.call('test');\n      final intermediateSteps =\n          result[AgentExecutor.intermediateStepsOutputKey] as List<AgentStep>;\n      expect(intermediateSteps.first.observation, '42');\n    });\n\n    test('should JSON-encode nested structure tool output', () async {\n      final nestedOutput = {\n        'results': [\n          {'id': 1, 'name': 'first'},\n          {'id': 2, 'name': 'second'},\n        ],\n        'metadata': {'total': 2, 'page': 1},\n      };\n      final tool = _GenericMockTool<Map<String, dynamic>>(output: nestedOutput);\n      final agent = _SequentialActionMockAgent(\n        tools: [tool],\n        actionSequence: [\n          [\n            AgentAction(\n              id: 'id',\n              tool: tool.name,\n              toolInput: {'input': 'test'},\n            ),\n          ],\n          [\n            const AgentFinish(\n              returnValues: {BaseActionAgent.agentReturnKey: 'done'},\n            ),\n          ],\n        ],\n      );\n      final executor = AgentExecutor(\n        agent: agent,\n        returnIntermediateSteps: true,\n      );\n      final result = await executor.call('test');\n      final intermediateSteps =\n          result[AgentExecutor.intermediateStepsOutputKey] as List<AgentStep>;\n      expect(intermediateSteps.first.observation, jsonEncode(nestedOutput));\n      final decoded =\n          jsonDecode(intermediateSteps.first.observation)\n              as Map<String, dynamic>;\n      expect(decoded['results'], hasLength(2));\n      final metadata = decoded['metadata'] as Map<String, dynamic>;\n      expect(metadata['total'], 2);\n    });\n\n    test('should JSON-encode bool tool output', () async {\n      final tool = _GenericMockTool<bool>(output: true);\n      final agent = _SequentialActionMockAgent(\n        tools: [tool],\n        actionSequence: [\n          [\n            AgentAction(\n              id: 'id',\n              tool: tool.name,\n              toolInput: {'input': 'test'},\n            ),\n          ],\n          [\n            const AgentFinish(\n              returnValues: {BaseActionAgent.agentReturnKey: 'done'},\n            ),\n          ],\n        ],\n      );\n      final executor = AgentExecutor(\n        agent: agent,\n        returnIntermediateSteps: true,\n      );\n      final result = await executor.call('test');\n      final intermediateSteps =\n          result[AgentExecutor.intermediateStepsOutputKey] as List<AgentStep>;\n      expect(intermediateSteps.first.observation, 'true');\n    });\n  });\n}\n\nfinal class _MockTool extends StringTool {\n  _MockTool({super.name = 'tool', super.returnDirect = false})\n    : super(description: '$name-description');\n\n  @override\n  Future<String> invokeInternal(\n    final String toolInput, {\n    final ToolOptions? options,\n  }) async {\n    return '$name-output';\n  }\n}\n\nfinal class _SingleActionMockAgent extends BaseActionAgent {\n  _SingleActionMockAgent({\n    super.tools = const [],\n    this.actions = const [],\n    this.throwOutputParserException = false,\n  });\n\n  final List<BaseAgentAction> actions;\n  final bool throwOutputParserException;\n\n  int planCount = 0;\n\n  @override\n  String get agentType => 'mock-single-action-agent';\n\n  @override\n  Set<String> get inputKeys => {'mock-input-key'};\n\n  @override\n  Future<List<BaseAgentAction>> plan(final AgentPlanInput input) async {\n    if (throwOutputParserException) {\n      throw const OutputParserException(message: 'mock');\n    }\n    planCount++;\n    return actions;\n  }\n}\n\nfinal class _MultiActionMockAgent extends BaseMultiActionAgent {\n  _MultiActionMockAgent({super.tools = const [], this.actions = const []});\n\n  final List<BaseAgentAction> actions;\n\n  int planCount = 0;\n\n  @override\n  String get agentType => 'mock-multi-action-agent';\n\n  @override\n  Set<String> get inputKeys => {'mock-input-key'};\n\n  @override\n  Future<List<BaseAgentAction>> plan(final AgentPlanInput input) async {\n    planCount++;\n    return actions;\n  }\n}\n\n/// A mock agent that returns actions sequentially from a list of action lists.\n/// Each call to plan() returns the next list in the sequence.\nfinal class _SequentialActionMockAgent extends BaseActionAgent {\n  _SequentialActionMockAgent({\n    super.tools = const [],\n    required this.actionSequence,\n  });\n\n  /// A list of action lists. Each call to plan() returns the next list.\n  final List<List<BaseAgentAction>> actionSequence;\n\n  int _callCount = 0;\n\n  @override\n  String get agentType => 'mock-sequential-action-agent';\n\n  @override\n  Set<String> get inputKeys => {'mock-input-key'};\n\n  @override\n  Future<List<BaseAgentAction>> plan(final AgentPlanInput input) async {\n    if (_callCount >= actionSequence.length) {\n      return [\n        const AgentFinish(\n          returnValues: {BaseActionAgent.agentReturnKey: 'exhausted'},\n        ),\n      ];\n    }\n    return actionSequence[_callCount++];\n  }\n}\n\n/// A generic mock tool that can return any type of output.\n/// Used to test tool output serialization in AgentExecutor.\nfinal class _GenericMockTool<T extends Object>\n    extends Tool<String, ToolOptions, T> {\n  _GenericMockTool({required T output, super.name = 'generic_tool'})\n    : _output = output,\n      super(\n        description: 'A generic mock tool for testing',\n        inputJsonSchema: const {\n          'type': 'object',\n          'properties': {\n            'input': {'type': 'string'},\n          },\n        },\n      );\n\n  final T _output;\n\n  @override\n  Future<T> invokeInternal(\n    final String input, {\n    final ToolOptions? options,\n  }) async {\n    return _output;\n  }\n\n  @override\n  String getInputFromJson(final Map<String, dynamic> json) {\n    return json['input'] as String? ?? '';\n  }\n}\n"
  },
  {
    "path": "packages/langchain/test/agents/tools_agent_test.dart",
    "content": "// ignore_for_file: unnecessary_async\n\n@TestOn('vm')\n@Timeout(Duration(minutes: 50))\nlibrary; // Uses dart:io\n\nimport 'dart:async';\nimport 'dart:io';\n\nimport 'package:langchain/langchain.dart';\nimport 'package:langchain_community/langchain_community.dart';\nimport 'package:langchain_ollama/langchain_ollama.dart';\nimport 'package:langchain_openai/langchain_openai.dart';\nimport 'package:meta/meta.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  late BaseChatModel llm;\n  const defaultOllamaModel = 'llama3-groq-tool-use';\n  const defaultOpenAIModel = 'gpt-4o-mini';\n\n  group(\n    'ChatToolsAgent using Ollama tests',\n    skip: Platform.environment.containsKey('CI'),\n    () {\n      setUp(() {\n        llm = ChatOllama(\n          defaultOptions: ChatOllamaOptions(\n            model: defaultOllamaModel,\n            temperature: 0,\n            tools: [CalculatorTool(), searchTool],\n            keepAlive: 1,\n          ),\n        );\n      });\n\n      test('Test ChatToolsAgent with calculator tool', () async {\n        await testAgentWithCalculator(llm);\n      });\n\n      test('Test ToolsAgent with messages memory', () async {\n        await testMemory(llm, returnMessages: true);\n      });\n\n      test('Test ToolsAgent with string memory throws error', () {\n        expect(\n          () async => testMemory(llm, returnMessages: false),\n          throwsA(isA<AssertionError>()),\n        );\n      });\n\n      test('Test ToolsAgent LCEL equivalent using Ollama', () async {\n        final res = await testLCDLEquivalent(llm: llm, tool: CalculatorTool())\n            .invoke({\n              'input':\n                  'What is 40 raised to the power of 0.43? '\n                  'Return the result with 3 decimals.',\n            });\n        expect(res['output'], contains('4.88'));\n      });\n    },\n  );\n\n  group('ChatToolsAgent using OpenAi tests', () {\n    setUp(() {\n      final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n      llm = ChatOpenAI(\n        apiKey: openaiApiKey,\n        defaultOptions: ChatOpenAIOptions(\n          model: defaultOpenAIModel,\n          tools: [CalculatorTool(), searchTool],\n        ),\n      );\n    });\n\n    test('Test ChatToolsAgent with calculator tool', () async {\n      await testAgentWithCalculator(llm);\n    });\n\n    test('Test ToolsAgent with messages memory', () async {\n      await testMemory(llm, returnMessages: true);\n    });\n\n    test('Test ToolsAgent with string memory throws error', () {\n      expect(\n        () async => testMemory(llm, returnMessages: false),\n        throwsA(isA<AssertionError>()),\n      );\n    });\n\n    test('Test ToolsAgent LCEL equivalent using OpenAi', () async {\n      final res = await testLCDLEquivalent(llm: llm, tool: CalculatorTool())\n          .invoke({\n            'input':\n                'What is 40 raised to the power of 0.43? '\n                'Return the result with 3 decimals.',\n          });\n      expect(res['output'], contains('4.88'));\n    });\n  });\n}\n\nFuture<void> testAgentWithCalculator(\n  BaseChatModel<ChatModelOptions> llm,\n) async {\n  final agent = ToolsAgent.fromLLMAndTools(llm: llm);\n  final executor = AgentExecutor(agent: agent);\n  final res = await executor.run(\n    'What is 40 raised to the power of 0.43? '\n    'Return the result with 3 decimals.',\n  );\n  expect(res, contains('4.885'));\n}\n\nFuture<void> testMemory(\n  BaseChatModel llm, {\n  required final bool returnMessages,\n}) async {\n  final memory = ConversationBufferMemory(returnMessages: returnMessages);\n  final agent = ToolsAgent.fromLLMAndTools(llm: llm, memory: memory);\n\n  final executor = AgentExecutor(agent: agent);\n\n  final res1 = await executor.run(\n    'Search for cat names. Return only 3 results.',\n  );\n\n  expect(res1, contains('AAA'));\n  expect(res1, contains('BBB'));\n  expect(res1, contains('CCC'));\n  expect(res1, isNot(contains('DDD')));\n\n  final res2 = await executor.run(\n    'How many results did the search return? Respond with a number.',\n  );\n  expect(res2, contains('3'));\n  expect(res2, isNot(contains('1')));\n  expect(res2, isNot(contains('2')));\n  expect(res2, isNot(contains('4')));\n\n  final res3 = await executor.run('What was the last result?');\n  expect(res3, contains('CCC'));\n}\n\nAgentExecutor testLCDLEquivalent({\n  required BaseChatModel<ChatModelOptions> llm,\n  required Tool tool,\n}) {\n  final prompt = ChatPromptTemplate.fromTemplates(const [\n    (ChatMessageType.system, 'You are a helpful assistant'),\n    (ChatMessageType.human, '{input}'),\n    (ChatMessageType.messagesPlaceholder, 'agent_scratchpad'),\n  ]);\n\n  final agent = Agent.fromRunnable(\n    Runnable.mapInput(\n      (AgentPlanInput planInput) => <String, dynamic>{\n        'input': planInput.inputs['input'],\n        'agent_scratchpad': planInput.intermediateSteps\n            .map((s) {\n              return s.action.messageLog +\n                  [\n                    ChatMessage.tool(\n                      toolCallId: s.action.id,\n                      content: s.observation,\n                    ),\n                  ];\n            })\n            .expand((m) => m)\n            .toList(growable: false),\n      },\n    ).pipe(prompt).pipe(llm).pipe(const ToolsAgentOutputParser()),\n    tools: [tool],\n  );\n\n  return AgentExecutor(agent: agent);\n}\n\n@immutable\nclass _SearchInput {\n  const _SearchInput({required this.query, required this.n});\n\n  final String query;\n  final int n;\n\n  _SearchInput.fromJson(final Map<String, dynamic> json)\n    : this(query: json['query'] as String, n: json['n'] as int);\n\n  @override\n  bool operator ==(covariant _SearchInput other) =>\n      identical(this, other) || query == other.query && n == other.n;\n\n  @override\n  int get hashCode => query.hashCode ^ n.hashCode;\n}\n\nfinal Tool<Object, ToolOptions, Object> searchTool =\n    Tool.fromFunction<_SearchInput, String>(\n      name: 'search',\n      description: 'Tool for searching the web.',\n      inputJsonSchema: const {\n        'type': 'object',\n        'properties': {\n          'query': {'type': 'string', 'description': 'The query to search for'},\n          'n': {\n            'type': 'integer',\n            'description': 'The number of results to return',\n          },\n        },\n        'required': ['query'],\n      },\n      func: (final _SearchInput toolInput) {\n        final n = toolInput.n;\n        final res = List<String>.generate(\n          n,\n          (i) => 'Result ${i + 1}: ${String.fromCharCode(65 + i) * 3}',\n        );\n        return 'Results:\\n${res.join('\\n')}';\n      },\n      getInputFromJson: _SearchInput.fromJson,\n    );\n"
  },
  {
    "path": "packages/langchain/test/chains/base_test.dart",
    "content": "// ignore_for_file: unused_element, unnecessary_async\nimport 'dart:async';\n\nimport 'package:langchain/langchain.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('BaseChain tests', () {\n    test('Test base chain input logic - input map', () async {\n      final chain = _FakeChain();\n      final res = await chain.call({'input': 'test'});\n      expect(res, equals({'input': 'test', 'output': 'test'}));\n    });\n\n    test('Test base chain input logic - input map - only outputs', () async {\n      final chain = _FakeChain();\n      final res = await chain.call({'input': 'test'}, returnOnlyOutputs: true);\n      expect(res, equals({'output': 'test'}));\n    });\n\n    test('Test base chain input logic - explicit input keys', () async {\n      final chain = _FakeChain(inputVariables: {'input'});\n      final res = await chain.call({'input': 'test'});\n      expect(res, equals({'input': 'test', 'output': 'test'}));\n    });\n\n    test(\n      'Test base chain input logic - explicit input keys - invalid input',\n      () {\n        final chain = _FakeChain(inputVariables: {'input'});\n        expect(\n          () async => chain.call({'inputInvalid': 'test'}),\n          throwsA(isA<ArgumentError>()),\n        );\n      },\n    );\n\n    test('Test base chain input logic - explicit output keys', () async {\n      final chain = _FakeChain(outputVariables: {'output'});\n      final res = await chain.call({'input': 'test'});\n      expect(res, equals({'input': 'test', 'output': 'test'}));\n    });\n\n    test(\n      'Test base chain input logic - explicit output keys - invalid output',\n      () {\n        final chain = _FakeChain(outputVariables: {'outputOther'});\n        expect(\n          () async => chain.call({'input': 'test'}),\n          throwsA(isA<ArgumentError>()),\n        );\n      },\n    );\n\n    test('Test base chain input logic - direct input', () async {\n      final chain = _FakeChain();\n      final res = await chain.call('test');\n      expect(res, equals({'input': 'test', 'output': 'test'}));\n    });\n\n    test(\n      'Test base chain input logic - direct input with explicit input key',\n      () async {\n        final chain = _FakeChain(inputVariables: {'inputDirect'});\n        final res = await chain.call('test');\n        expect(res, equals({'inputDirect': 'test', 'output': 'test'}));\n      },\n    );\n\n    test('Test base chain input logic - with memory', () async {\n      final memory = ConversationBufferMemory();\n      await memory.saveContext(\n        inputValues: {'input': 'test1'},\n        outputValues: {'output': 'test1'},\n      );\n      final chain = _FakeChain(memory: memory);\n      final res1 = await chain.call({'input': 'test2'});\n      expect(\n        res1,\n        equals({\n          'input': 'test2',\n          'history': 'Human: test1\\nAI: test1',\n          'output': 'test2',\n        }),\n      );\n      final res2 = await chain.call({'input': 'test3'});\n      expect(\n        res2,\n        equals({\n          'input': 'test3',\n          'history':\n              'Human: test1\\nAI: test1\\n'\n              'Human: test2\\nAI: test2',\n          'output': 'test3',\n        }),\n      );\n    });\n  });\n\n  group('Runnable tests', () {\n    test('Chain as Runnable', () async {\n      final model = FakeLLM(responses: ['Hello world!']);\n      final prompt = PromptTemplate.fromTemplate('Print {foo}');\n      final run = LLMChain(prompt: prompt, llm: model);\n      final res = await run.invoke({'foo': 'Hello world!'});\n      expect(res[LLMChain.defaultOutputKey], 'Hello world!');\n    });\n\n    test('Streaming Chain', () async {\n      final model = FakeLLM(responses: ['Hello world!']);\n      final prompt = PromptTemplate.fromTemplate('Print {foo}');\n      final run = LLMChain(prompt: prompt, llm: model);\n      final stream = run.stream({'foo': 'Hello world!'});\n\n      final streamList = await stream.toList();\n      expect(streamList.length, 1);\n      expect(streamList.first, isA<Map<String, dynamic>>());\n\n      final res = streamList.first;\n      expect(res[LLMChain.defaultOutputKey], 'Hello world!');\n    });\n  });\n}\n\nclass _FakeChain extends BaseChain {\n  _FakeChain({\n    this.inputVariables = const {},\n    this.outputVariables = const {},\n    super.memory,\n  });\n\n  final Set<String> inputVariables;\n\n  final Set<String> outputVariables;\n\n  @override\n  String get chainType => 'fake_chain';\n\n  @override\n  Set<String> get inputKeys => inputVariables;\n\n  @override\n  Set<String> get outputKeys => outputVariables;\n\n  @override\n  Future<ChainValues> callInternal(final ChainValues inputs) async {\n    return {\n      for (final key in inputs.keys)\n        key.replaceAll(inputVariables.firstOrNull ?? 'input', 'output'):\n            inputs[key],\n    };\n  }\n}\n"
  },
  {
    "path": "packages/langchain/test/chains/combine_documents/map_reduce_test.dart",
    "content": "import 'package:langchain/langchain.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('MapReduceDocumentsChain tests', () {\n    Future<void> testMapReduceDocumentsChain(\n      final BaseLanguageModel model,\n    ) async {\n      final mapPrompt = PromptTemplate.fromTemplate(\n        'Summarize this content: {context}',\n      );\n      final mapLlmChain = LLMChain(prompt: mapPrompt, llm: model);\n\n      final reducePrompt = PromptTemplate.fromTemplate(\n        'Combine these summaries: {context}',\n      );\n      final reduceLlmChain = LLMChain(prompt: reducePrompt, llm: model);\n      final reduceDocsChain = StuffDocumentsChain(llmChain: reduceLlmChain);\n\n      final reduceChain = MapReduceDocumentsChain(\n        mapLlmChain: mapLlmChain,\n        reduceDocumentsChain: reduceDocsChain,\n        returnIntermediateSteps: true,\n      );\n\n      const docs = [\n        Document(pageContent: 'Hello 1!'),\n        Document(pageContent: 'Hello 2!'),\n        Document(pageContent: 'Hello 3!'),\n      ];\n      final res = await reduceChain(docs);\n      expect(res[MapReduceDocumentsChain.defaultOutputKey], 'Hello 123!');\n      expect(res[MapReduceDocumentsChain.intermediateStepsOutputKey], [\n        '1',\n        '2',\n        '3',\n      ]);\n    }\n\n    test('Test MapReduceDocumentsChain with LLM', () async {\n      final model = FakeLLM(\n        responses: [\n          // Summarize this content: Hello 1!\n          '1',\n          // Summarize this content: Hello 2!\n          '2',\n          // Summarize this content: Hello 3!\n          '3',\n          // Combine these summaries: 123\n          'Hello 123!',\n        ],\n      );\n      await testMapReduceDocumentsChain(model);\n    });\n\n    test('Test MapReduceDocumentsChain with Chat model', () async {\n      final model = FakeChatModel(\n        responses: [\n          // Summarize this content: Hello 1!\n          '1',\n          // Summarize this content: Hello 2!\n          '2',\n          // Summarize this content: Hello 3!\n          '3',\n          // Combine these summaries: 123\n          'Hello 123!',\n        ],\n      );\n      await testMapReduceDocumentsChain(model);\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain/test/chains/combine_documents/reduce_test.dart",
    "content": "import 'package:langchain/langchain.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('ReduceDocumentsChain tests', () {\n    test('Test reduce', () async {\n      final llm = FakeLLM(\n        responses: [\n          // Summarize this content: Hello 1!\\n\\nHello 2!\\n\\nHello 3!\\n\\nHello 4!\n          'Hello 1234!',\n        ],\n      );\n\n      final finalPrompt = PromptTemplate.fromTemplate(\n        'Summarize this content: {context}',\n      );\n      final finalLlmChain = LLMChain(prompt: finalPrompt, llm: llm);\n      final combineDocsChain = StuffDocumentsChain(llmChain: finalLlmChain);\n\n      final reduceChain = ReduceDocumentsChain(\n        combineDocumentsChain: combineDocsChain,\n      );\n\n      const docs = [\n        Document(pageContent: 'Hello 1!'),\n        Document(pageContent: 'Hello 2!'),\n        Document(pageContent: 'Hello 3!'),\n        Document(pageContent: 'Hello 4!'),\n      ];\n      final res = await reduceChain.run(docs);\n      expect(res, 'Hello 1234!');\n    });\n\n    test('Test reduce and collapse', () async {\n      final llm = FakeLLM(\n        responses: [\n          // Collapse this content: Hello 1!\\n\\nHello 2!\\n\\nHello 3!\n          'Hello 123!',\n          // Collapse this content: Hello 4!\n          'Hello 4!',\n          // Summarize this content: Hello 123!\\n\\nHello 4!\n          'Hello 1234!',\n        ],\n      );\n\n      final finalPrompt = PromptTemplate.fromTemplate(\n        'Summarize this content: {context}',\n      );\n      final finalLlmChain = LLMChain(prompt: finalPrompt, llm: llm);\n      final combineDocsChain = StuffDocumentsChain(llmChain: finalLlmChain);\n\n      final collapsePrompt = PromptTemplate.fromTemplate(\n        'Collapse this content: {context}',\n      );\n      final collapseLlmChain = LLMChain(prompt: collapsePrompt, llm: llm);\n      final collapseDocsChain = StuffDocumentsChain(llmChain: collapseLlmChain);\n\n      final reduceChain = ReduceDocumentsChain(\n        combineDocumentsChain: combineDocsChain,\n        collapseDocumentsChain: collapseDocsChain,\n        tokenMax: 7,\n      );\n\n      const docs = [\n        Document(pageContent: 'Hello 1!'),\n        Document(pageContent: 'Hello 2!'),\n        Document(pageContent: 'Hello 3!'),\n        Document(pageContent: 'Hello 4!'),\n      ];\n      final res = await reduceChain.run(docs);\n      expect(res, 'Hello 1234!');\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain/test/chains/combine_documents/stuff_test.dart",
    "content": "import 'package:langchain/langchain.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('StuffDocumentsChain tests', () {\n    Future<void> testStuffDocumentsChain(final BaseLanguageModel model) async {\n      final prompt = PromptTemplate.fromTemplate(\n        'Print {foo}. Context: {context}',\n      );\n      final llmChain = LLMChain(prompt: prompt, llm: model);\n      final stuffChain = StuffDocumentsChain(llmChain: llmChain);\n\n      const foo = 'Hello world!';\n      const docs = [\n        Document(pageContent: 'Hello 1!'),\n        Document(pageContent: 'Hello 2!'),\n      ];\n      final res = await stuffChain.call({'foo': foo, 'input_documents': docs});\n      expect(res['foo'], foo);\n      expect(res[StuffDocumentsChain.defaultInputKey], docs);\n      expect(\n        res[StuffDocumentsChain.defaultOutputKey],\n        'Print Hello world!. Context: Hello 1!\\n\\nHello 2!',\n      );\n    }\n\n    test('Test StuffDocumentsChain with LLM', () async {\n      const model = FakeEchoLLM();\n      await testStuffDocumentsChain(model);\n    });\n\n    test('Test StuffDocumentsChain with Chat model', () async {\n      const model = FakeEchoChatModel();\n      await testStuffDocumentsChain(model);\n    });\n\n    test('Test promptLength', () async {\n      const model = FakeEchoLLM();\n      final prompt = PromptTemplate.fromTemplate(\n        'Print {foo}. Context: {context}',\n      );\n      final llmChain = LLMChain(prompt: prompt, llm: model);\n      final stuffChain = StuffDocumentsChain(llmChain: llmChain);\n\n      const foo = 'Hello world!';\n      const docs = [\n        Document(pageContent: 'Hello 1!'),\n        Document(pageContent: 'Hello 2!'),\n      ];\n\n      final tokens = await stuffChain.promptLength(docs, inputs: {'foo': foo});\n      expect(tokens, 7);\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain/test/chains/conversation_test.dart",
    "content": "import 'package:langchain/langchain.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('ConversationChain tests', () {\n    test('Test conversation chain works', () async {\n      const llm = FakeEchoLLM();\n      final chain = ConversationChain(llm: llm);\n\n      final prompt = chain.prompt as PromptTemplate;\n      final template = prompt.template;\n\n      const userInput1 = 'Hello';\n      final expectedRes1 = template\n          .replaceAll('{history}', '')\n          .replaceAll('{input}', userInput1);\n      final res1 = await chain.run(userInput1);\n      expect(res1, expectedRes1);\n\n      const userInput2 = 'World';\n      final expectedRes2 = template\n          .replaceAll('{history}', 'Human: $userInput1\\nAI: $expectedRes1')\n          .replaceAll('{input}', userInput2);\n      final res2 = await chain.run(userInput2);\n      expect(res2, expectedRes2);\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain/test/chains/retrieval_qa_test.dart",
    "content": "import 'package:langchain/langchain.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('RetrievalQAChain tests', () {\n    test('Test RetrievalQAChain', () async {\n      final documents = [\n        const Document(pageContent: 'hello'),\n        const Document(pageContent: 'hi'),\n        const Document(pageContent: 'bye'),\n        const Document(pageContent: \"what's this\"),\n      ];\n      final embeddings = _FakeEmbeddings();\n      final vectorStore = MemoryVectorStore(embeddings: embeddings);\n      await vectorStore.addDocuments(documents: documents);\n      final retriever = VectorStoreRetriever(vectorStore: vectorStore);\n\n      const model = FakeEchoLLM();\n      final prompt = PromptTemplate.fromTemplate(\n        '{context}\\n\\nQuestion: {question}',\n      );\n      final llmChain = LLMChain(prompt: prompt, llm: model);\n      final stuffChain = StuffDocumentsChain(llmChain: llmChain);\n\n      final retrievalQA = RetrievalQAChain(\n        retriever: retriever,\n        combineDocumentsChain: stuffChain,\n      );\n\n      const query = 'What did I say?';\n      final res = await retrievalQA({'query': query});\n      expect(res['query'], query);\n      expect(\n        res['result'],\n        \"what's this\\n\\nbye\\n\\nhi\\n\\nhello\\n\\nQuestion: What did I say?\",\n      );\n    });\n\n    test('Test RetrievalQAChain.fromLlm', () async {\n      final documents = [\n        const Document(pageContent: 'hello'),\n        const Document(pageContent: 'hi'),\n        const Document(pageContent: 'bye'),\n        const Document(pageContent: \"what's this\"),\n      ];\n      final embeddings = _FakeEmbeddings();\n      final vectorStore = MemoryVectorStore(embeddings: embeddings);\n      await vectorStore.addDocuments(documents: documents);\n      final retriever = VectorStoreRetriever(vectorStore: vectorStore);\n\n      const llm = FakeEchoLLM();\n      final retrievalQA = RetrievalQAChain.fromLlm(\n        llm: llm,\n        retriever: retriever,\n      );\n\n      const query = 'What did I say?';\n      final res = await retrievalQA({'query': query});\n\n      const expectedRes = '''\nUse the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.\n\nwhat's this\n\nbye\n\nhi\n\nhello\n\nQuestion: What did I say?\nHelpful Answer:''';\n\n      expect(res['query'], query);\n      expect(res['result'], expectedRes);\n    });\n  });\n}\n\nclass _FakeEmbeddings extends Embeddings {\n  _FakeEmbeddings();\n\n  @override\n  Future<List<double>> embedQuery(final String query) async {\n    return [0, 1];\n  }\n\n  @override\n  Future<List<List<double>>> embedDocuments(\n    final List<Document> documents,\n  ) async {\n    return List.generate(documents.length, (final i) => [0, 1 / i]);\n  }\n}\n"
  },
  {
    "path": "packages/langchain/test/chains/sequential_test.dart",
    "content": "import 'package:langchain/langchain.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('SequentialChain tests', () {\n    test('Test sequential on single input chains', () async {\n      final chain1 = _FakeChain(\n        inputVariables: {'foo'},\n        outputVariables: {'bar'},\n      );\n      final chain2 = _FakeChain(\n        inputVariables: {'bar'},\n        outputVariables: {'baz'},\n      );\n      final chain = SequentialChain(\n        chains: [chain1, chain2],\n        inputKeys: {'foo'},\n      );\n      final output = await chain({'foo': '123'});\n      final expectedOutput = {'baz': '123foofoo', 'foo': '123'};\n      expect(output, equals(expectedOutput));\n    });\n\n    test('Test sequential on multiple input chains', () async {\n      final chain1 = _FakeChain(\n        inputVariables: {'foo', 'test'},\n        outputVariables: {'bar'},\n      );\n      final chain2 = _FakeChain(\n        inputVariables: {'bar', 'foo'},\n        outputVariables: {'baz'},\n      );\n      final chain = SequentialChain(\n        chains: [chain1, chain2],\n        inputKeys: {'foo', 'test'},\n      );\n      final output = await chain({'foo': '123', 'test': '456'});\n      final expectedOutput = {\n        'baz': '123 456foo 123foo',\n        'foo': '123',\n        'test': '456',\n      };\n      expect(output, equals(expectedOutput));\n    });\n\n    test(\n      'Test input and output keys are infer correctly if not provided',\n      () async {\n        final chain1 = _FakeChain(\n          inputVariables: {'foo', 'test'},\n          outputVariables: {'bar'},\n        );\n        final chain2 = _FakeChain(\n          inputVariables: {'bar', 'foo'},\n          outputVariables: {'baz'},\n        );\n        final chain = SequentialChain(chains: [chain1, chain2]);\n        final output = await chain({'foo': '123', 'test': '456'});\n        final expectedOutput = {\n          'baz': '123 456foo 123foo',\n          'foo': '123',\n          'test': '456',\n        };\n        expect(output, equals(expectedOutput));\n      },\n    );\n\n    test('Test sequential usage with memory', () async {\n      const memory = SimpleMemory(memories: {'zab': 'rab'});\n      final chain1 = _FakeChain(\n        inputVariables: {'foo'},\n        outputVariables: {'bar'},\n      );\n      final chain2 = _FakeChain(\n        inputVariables: {'bar'},\n        outputVariables: {'baz'},\n      );\n      final chain = SequentialChain(\n        memory: memory,\n        chains: [chain1, chain2],\n        inputKeys: {'foo'},\n      );\n      final output = await chain({'foo': '123'});\n      final expectedOutput = {'baz': '123foofoo', 'foo': '123', 'zab': 'rab'};\n      expect(output, equals(expectedOutput));\n    });\n\n    test('Test assert is raised when memory keys and input keys overlap', () {\n      const memory = SimpleMemory(memories: {'zab': 'rab', 'foo': 'rab'});\n      final chain1 = _FakeChain(\n        inputVariables: {'foo'},\n        outputVariables: {'bar'},\n      );\n      final chain2 = _FakeChain(\n        inputVariables: {'bar'},\n        outputVariables: {'baz'},\n      );\n      expect(\n        () => SequentialChain(\n          memory: memory,\n          chains: [chain1, chain2],\n          inputKeys: {'foo'},\n        ),\n        throwsA(isA<AssertionError>()),\n      );\n    });\n\n    test('Test memory in one of the internal chains', () async {\n      final memory = ConversationBufferMemory(memoryKey: 'bla');\n      await memory.saveContext(\n        inputValues: {'input': 'yo'},\n        outputValues: {'output': 'ya'},\n      );\n\n      final chain1 = _FakeChain(\n        inputVariables: {'foo'},\n        outputVariables: {'bar'},\n        memory: memory,\n      );\n      final chain2 = _FakeChain(\n        inputVariables: {'bar'},\n        outputVariables: {'baz'},\n      );\n      final chain = SequentialChain(\n        chains: [chain1, chain2],\n        inputKeys: {'foo'},\n      );\n      final output = await chain({'foo': '123'});\n      final expectedOutput = {\n        'foo': '123',\n        'baz': '123 Human: yo\\nAI: yafoofoo',\n      };\n      expect(output, equals(expectedOutput));\n    });\n\n    test('Test sequential usage on multiple output chains', () async {\n      final chain1 = _FakeChain(\n        inputVariables: {'foo'},\n        outputVariables: {'bar', 'test'},\n      );\n      final chain2 = _FakeChain(\n        inputVariables: {'bar', 'foo'},\n        outputVariables: {'baz'},\n      );\n      final chain = SequentialChain(\n        chains: [chain1, chain2],\n        inputKeys: {'foo'},\n      );\n      final output = await chain({'foo': '123'});\n      final expectedOutput = {'baz': '123foo 123foo', 'foo': '123'};\n      expect(output, equals(expectedOutput));\n    });\n\n    test('Test error is raised when input keys are missing', () {\n      final chain1 = _FakeChain(\n        inputVariables: {'foo'},\n        outputVariables: {'bar'},\n      );\n      final chain2 = _FakeChain(\n        inputVariables: {'bar', 'test'},\n        outputVariables: {'baz'},\n      );\n      expect(\n        () => SequentialChain(chains: [chain1, chain2], inputKeys: {'foo'}),\n        throwsA(isA<AssertionError>()),\n      );\n    });\n\n    test('Test error is raised when bad outputs are specified', () {\n      final chain1 = _FakeChain(\n        inputVariables: {'foo'},\n        outputVariables: {'bar'},\n      );\n      final chain2 = _FakeChain(\n        inputVariables: {'bar'},\n        outputVariables: {'baz'},\n      );\n      expect(\n        () => SequentialChain(\n          chains: [chain1, chain2],\n          inputKeys: {'foo'},\n          outputKeys: {'test'},\n        ),\n        throwsA(isA<AssertionError>()),\n      );\n    });\n\n    test('Test returnOnlyOutputs', () async {\n      final chain1 = _FakeChain(\n        inputVariables: {'foo'},\n        outputVariables: {'bar'},\n      );\n      final chain2 = _FakeChain(\n        inputVariables: {'bar'},\n        outputVariables: {'baz'},\n      );\n      final chain = SequentialChain(\n        chains: [chain1, chain2],\n        inputKeys: {'foo'},\n        outputKeys: {'bar', 'baz'},\n      );\n      final output = await chain({'foo': '123'}, returnOnlyOutputs: true);\n      final expectedOutput = {'baz': '123foofoo', 'bar': '123foo'};\n      expect(output, equals(expectedOutput));\n    });\n\n    test('Test error is raised when input variables are overlapping', () {\n      final chain1 = _FakeChain(\n        inputVariables: {'foo'},\n        outputVariables: {'bar', 'test'},\n      );\n      final chain2 = _FakeChain(\n        inputVariables: {'bar'},\n        outputVariables: {'baz'},\n      );\n      expect(\n        () => SequentialChain(\n          chains: [chain1, chain2],\n          inputKeys: {'foo', 'test'},\n        ),\n        throwsA(isA<AssertionError>()),\n      );\n    });\n  });\n\n  group('SimpleSequentialChain tests', () {\n    test('Test simple sequential functionality', () async {\n      final chain1 = _FakeChain(\n        inputVariables: {'foo'},\n        outputVariables: {'bar'},\n      );\n      final chain2 = _FakeChain(\n        inputVariables: {'bar'},\n        outputVariables: {'baz'},\n      );\n      final chain = SimpleSequentialChain(chains: [chain1, chain2]);\n      final output = await chain({'input': '123'});\n      final expectedOutput = {'output': '123foofoo', 'input': '123'};\n      expect(output, equals(expectedOutput));\n    });\n\n    test('Test error raised if multiple input variables are expected', () {\n      final chain1 = _FakeChain(\n        inputVariables: {'foo'},\n        outputVariables: {'bar'},\n      );\n      final chain2 = _FakeChain(\n        inputVariables: {'bar', 'foo'},\n        outputVariables: {'baz'},\n      );\n      expect(\n        () => SimpleSequentialChain(chains: [chain1, chain2]),\n        throwsA(isA<AssertionError>()),\n      );\n    });\n\n    test('Test error raised if multiple output variables are expected', () {\n      final chain1 = _FakeChain(\n        inputVariables: {'foo'},\n        outputVariables: {'bar', 'grok'},\n      );\n      final chain2 = _FakeChain(\n        inputVariables: {'bar'},\n        outputVariables: {'baz'},\n      );\n      expect(\n        () => SimpleSequentialChain(chains: [chain1, chain2]),\n        throwsA(isA<AssertionError>()),\n      );\n    });\n  });\n}\n\n/// Fake chain for testing purposes.\n///\n/// The output variable is the input values concatenated with space and with the\n/// string 'foo' appended to the end.\nclass _FakeChain extends BaseChain {\n  _FakeChain({\n    required this.inputVariables,\n    required this.outputVariables,\n    super.memory,\n  });\n\n  final Set<String> inputVariables;\n\n  final Set<String> outputVariables;\n\n  @override\n  String get chainType => 'fake_chain';\n\n  @override\n  Set<String> get inputKeys => inputVariables;\n\n  @override\n  Set<String> get outputKeys => outputVariables;\n\n  @override\n  Future<ChainValues> callInternal(final ChainValues inputs) async {\n    final outputs = <String, dynamic>{};\n    for (final variable in outputVariables) {\n      final variables = [\n        for (final k in {...inputVariables, ...?memory?.memoryKeys}) inputs[k],\n      ];\n      outputs[variable] = \"${variables.join(' ')}foo\";\n    }\n    return outputs;\n  }\n}\n"
  },
  {
    "path": "packages/langchain/test/chains/summarization/summarize_test.dart",
    "content": "import 'package:langchain/langchain.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('SummarizeChain tests', () {\n    test('Test SummarizeChain.stuff', () async {\n      final llm = FakeHandlerLLM(\n        handler: (final prompt, final options, final callCount) {\n          switch (callCount) {\n            case 1:\n              expect(\n                prompt,\n                'Write a concise summary of the following:\\n\\n\\n\"Hello 1!\\n\\nHello 2!\"\\n\\n\\nCONCISE SUMMARY:',\n              );\n              return 'Hello 12!';\n            default:\n              throw TestFailure('Unexpected call count: $callCount');\n          }\n        },\n      );\n\n      final stuffSummarizeChain = SummarizeChain.stuff(llm: llm);\n\n      const docs = [\n        Document(pageContent: 'Hello 1!'),\n        Document(pageContent: 'Hello 2!'),\n      ];\n      final res = await stuffSummarizeChain.call({\n        SummarizeChain.defaultInputKey: docs,\n      });\n\n      expect(res[SummarizeChain.defaultOutputKey], 'Hello 12!');\n    });\n\n    test('Test SummarizeChain.mapReduce', () async {\n      final llm = FakeHandlerLLM(\n        handler: (final prompt, final options, final callCount) {\n          switch (callCount) {\n            case 1:\n              expect(\n                prompt,\n                'Write a concise summary of the following:\\n\\n\\n\"Hello 1!\"\\n\\n\\nCONCISE SUMMARY:',\n              );\n              return '1';\n            case 2:\n              expect(\n                prompt,\n                'Write a concise summary of the following:\\n\\n\\n\"Hello 2!\"\\n\\n\\nCONCISE SUMMARY:',\n              );\n              return '2';\n            case 3:\n              expect(\n                prompt,\n                'Write a concise summary of the following:\\n\\n\\n\"Hello 3!\"\\n\\n\\nCONCISE SUMMARY:',\n              );\n              return '3';\n            case 4:\n              expect(\n                prompt,\n                'Write a concise summary of the following:\\n\\n\\n\"1\\n\\n2\\n\\n3\"\\n\\n\\nCONCISE SUMMARY:',\n              );\n              return 'Hello 123!';\n            default:\n              throw TestFailure('Unexpected call count: $callCount');\n          }\n        },\n      );\n\n      final mapReduceSummarizeChain = SummarizeChain.mapReduce(llm: llm);\n\n      const docs = [\n        Document(pageContent: 'Hello 1!'),\n        Document(pageContent: 'Hello 2!'),\n        Document(pageContent: 'Hello 3!'),\n      ];\n      final res = await mapReduceSummarizeChain.call({\n        SummarizeChain.defaultInputKey: docs,\n      });\n\n      expect(res[SummarizeChain.defaultOutputKey], 'Hello 123!');\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain/test/chat_history/in_memory_test.dart",
    "content": "import 'package:langchain/langchain.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('ChatMessageHistory tests', () {\n    test('Test addMessage and getMessages', () async {\n      final history = ChatMessageHistory();\n      final message = ChatMessage.humanText('This is a test');\n      await history.addChatMessage(message);\n      expect(await history.getChatMessages(), [message]);\n    });\n\n    test('Test addUserMessage', () async {\n      final history = ChatMessageHistory();\n      await history.addHumanChatMessage('This is a human msg');\n      final messages = await history.getChatMessages();\n      expect(messages.first, isA<HumanChatMessage>());\n      expect(messages.first.contentAsString, 'This is a human msg');\n    });\n\n    test('Test addAIChatMessage', () async {\n      final history = ChatMessageHistory();\n      await history.addAIChatMessage('This is an AI msg');\n      final messages = await history.getChatMessages();\n      expect(messages.first, isA<AIChatMessage>());\n      expect(messages.first.contentAsString, 'This is an AI msg');\n    });\n\n    test('Test removeLast', () async {\n      final history = ChatMessageHistory();\n      final message = ChatMessage.humanText('This is a test');\n      final message2 = ChatMessage.ai('This is an AI msg');\n      await history.addChatMessage(message);\n      await history.addChatMessage(message2);\n      final oldestMessage = await history.removeLast();\n      expect(oldestMessage, isA<AIChatMessage>());\n      expect(oldestMessage.contentAsString, 'This is an AI msg');\n      final messages = await history.getChatMessages();\n      expect(messages.length, 1);\n      expect(messages.first, isA<HumanChatMessage>());\n      expect(messages.first.contentAsString, 'This is a test');\n    });\n\n    test('Test removeFirst', () async {\n      final history = ChatMessageHistory();\n      final message = ChatMessage.humanText('This is a test');\n      final message2 = ChatMessage.ai('This is an AI msg');\n      await history.addChatMessage(message);\n      await history.addChatMessage(message2);\n      final oldestMessage = await history.removeFirst();\n      expect(oldestMessage, isA<HumanChatMessage>());\n      expect(oldestMessage.contentAsString, 'This is a test');\n      final messages = await history.getChatMessages();\n      expect(messages.length, 1);\n      expect(messages.first, isA<AIChatMessage>());\n      expect(messages.first.contentAsString, 'This is an AI msg');\n    });\n\n    test('Test clear', () async {\n      final history = ChatMessageHistory();\n      final message = ChatMessage.humanText('This is a test');\n      await history.addChatMessage(message);\n      await history.clear();\n      expect(await history.getChatMessages(), <ChatMessage>[]);\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain/test/embeddings/cache.dart",
    "content": "import 'dart:convert';\nimport 'dart:typed_data';\n\nimport 'package:crypto/crypto.dart';\nimport 'package:langchain/langchain.dart';\nimport 'package:test/test.dart';\nimport 'package:uuid/uuid.dart';\n\nvoid main() {\n  group('CacheBackedEmbeddings', () {\n    late InMemoryStore<String, List<double>> store;\n    late CacheBackedEmbeddings cacheBackedEmbeddings;\n\n    setUp(() {\n      store = InMemoryStore();\n      cacheBackedEmbeddings = CacheBackedEmbeddings(\n        underlyingEmbeddings: FakeEmbeddings(deterministic: false),\n        documentEmbeddingsStore: store,\n      );\n    });\n\n    test(\n      'embedDocuments returns correct embeddings, and fills missing embeddings',\n      () async {\n        final preStoreRes = await store.get(['testDoc']);\n        expect(preStoreRes.first, isNull);\n        final res1 = await cacheBackedEmbeddings.embedDocuments([\n          const Document(pageContent: 'testDoc'),\n        ]);\n        final storeRes1 = await store.get(['testDoc']);\n        expect(res1, storeRes1);\n        final res2 = await cacheBackedEmbeddings.embedDocuments([\n          const Document(pageContent: 'testDoc'),\n        ]);\n        expect(res2, storeRes1);\n        final newDocStoreRes = await store.get(['newDoc']);\n        expect(newDocStoreRes.first, isNull);\n        final res3 = await cacheBackedEmbeddings.embedDocuments([\n          const Document(pageContent: 'newDoc'),\n        ]);\n        final storeRes3 = await store.get(['newDoc']);\n        expect(res3, storeRes3);\n      },\n    );\n\n    test('embedQuery is not cached', () async {\n      final result = await cacheBackedEmbeddings.embedQuery('testQuery');\n      final storeResult = await store.get(['testQuery']);\n      expect(result.first, isNotNull);\n      expect(storeResult.first, isNull);\n    });\n  });\n\n  group('CacheBackedEmbeddings.fromByteStore', () {\n    late InMemoryStore<String, Uint8List> store;\n    late CacheBackedEmbeddings cacheBackedEmbeddings;\n\n    setUp(() {\n      store = InMemoryStore();\n      cacheBackedEmbeddings = CacheBackedEmbeddings.fromByteStore(\n        underlyingEmbeddings: FakeEmbeddings(),\n        documentEmbeddingsStore: store,\n      );\n    });\n\n    test(\n      'embedDocuments returns correct embeddings, and fills missing embeddings',\n      () async {\n        final res1 = await cacheBackedEmbeddings.embedDocuments([\n          const Document(pageContent: 'testDoc'),\n        ]);\n        final res2 = await cacheBackedEmbeddings.embedDocuments([\n          const Document(pageContent: 'testDoc'),\n        ]);\n        expect(res1, res2);\n        final res3 = await cacheBackedEmbeddings.embedDocuments([\n          const Document(pageContent: 'newDoc'),\n        ]);\n        expect(res3, isNot(res2));\n      },\n    );\n\n    test('embedQuery is not cached', () async {\n      final result = await cacheBackedEmbeddings.embedQuery('testQuery');\n      final storeResult = await store.get(['testQuery']);\n      expect(result.first, isNotNull);\n      expect(storeResult.first, isNull);\n    });\n  });\n\n  group('EmbeddingsByteStoreEncoder tests', () {\n    const namespace = 'test';\n    const uuid = Uuid();\n    const key = 'key';\n    final keyHash = sha1.convert(utf8.encode(key)).toString();\n    final expectedEncodedKey = uuid.v5(Namespace.url.value, keyHash);\n    final value = [0.1, 0.2, 0.3];\n    final expectedEncodedValue = Uint8List.fromList(\n      utf8.encode(json.encode(value)),\n    );\n\n    const encoder = EmbeddingsByteStoreEncoder(namespace: namespace);\n\n    test('encodeKey returns encoded key', () {\n      final result = encoder.encodeKey(key);\n      expect(result, expectedEncodedKey);\n    });\n\n    test('encodeValue returns encoded value', () {\n      final result = encoder.encodeValue(value);\n      expect(result, expectedEncodedValue);\n    });\n\n    test('decodeKey throws UnimplementedError', () {\n      expect(\n        () => encoder.decodeKey('anyKey'),\n        throwsA(isA<UnimplementedError>()),\n      );\n    });\n\n    test('decodeValue returns decoded value', () {\n      final result = encoder.decodeValue(expectedEncodedValue);\n      expect(result, value);\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain/test/memory/buffer_test.dart",
    "content": "// ignore_for_file: unnecessary_async\n\nimport 'package:langchain/langchain.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('ConversationBufferMemory tests', () {\n    test('Test buffer memory', () async {\n      final memory = ConversationBufferMemory();\n      final result1 = await memory.loadMemoryVariables();\n      expect(result1, {BaseMemory.defaultMemoryKey: ''});\n\n      await memory.saveContext(\n        inputValues: {'foo': 'bar'},\n        outputValues: {'bar': 'foo'},\n      );\n      const expectedString = 'Human: bar\\nAI: foo';\n      final result2 = await memory.loadMemoryVariables();\n      expect(result2, {BaseMemory.defaultMemoryKey: expectedString});\n    });\n\n    test('Test buffer memory return messages', () async {\n      final memory = ConversationBufferMemory(returnMessages: true);\n      final result1 = await memory.loadMemoryVariables();\n      expect(result1, {BaseMemory.defaultMemoryKey: <ChatMessage>[]});\n\n      await memory.saveContext(\n        inputValues: {'foo': 'bar'},\n        outputValues: {'bar': 'foo'},\n      );\n      final expectedResult = [\n        ChatMessage.humanText('bar'),\n        ChatMessage.ai('foo'),\n      ];\n      final result2 = await memory.loadMemoryVariables();\n      expect(result2, {BaseMemory.defaultMemoryKey: expectedResult});\n    });\n\n    test('Test chat message as input and output', () async {\n      final memory = ConversationBufferMemory(returnMessages: true);\n      final result1 = await memory.loadMemoryVariables();\n      expect(result1, {BaseMemory.defaultMemoryKey: <ChatMessage>[]});\n\n      await memory.saveContext(\n        inputValues: {\n          'foo': ChatMessage.tool(toolCallId: 'foo', content: 'bar'),\n        },\n        outputValues: {'bar': ChatMessage.ai('baz')},\n      );\n      final expectedResult = [\n        ChatMessage.tool(toolCallId: 'foo', content: 'bar'),\n        ChatMessage.ai('baz'),\n      ];\n      final result2 = await memory.loadMemoryVariables();\n      expect(result2, {BaseMemory.defaultMemoryKey: expectedResult});\n    });\n\n    test('Test buffer memory with pre-loaded history', () async {\n      final pastMessages = [\n        ChatMessage.humanText(\"My name's Jonas\"),\n        ChatMessage.ai('Nice to meet you, Jonas!'),\n      ];\n      final memory = ConversationBufferMemory(\n        returnMessages: true,\n        chatHistory: ChatMessageHistory(messages: pastMessages),\n      );\n      final result = await memory.loadMemoryVariables();\n      expect(result, {BaseMemory.defaultMemoryKey: pastMessages});\n    });\n\n    test('Test clear memory', () async {\n      final memory = ConversationBufferMemory();\n      await memory.saveContext(\n        inputValues: {'foo': 'bar'},\n        outputValues: {'bar': 'foo'},\n      );\n      const expectedString = 'Human: bar\\nAI: foo';\n      final result1 = await memory.loadMemoryVariables();\n      expect(result1, {BaseMemory.defaultMemoryKey: expectedString});\n\n      await memory.clear();\n      final result2 = await memory.loadMemoryVariables();\n      expect(result2, {BaseMemory.defaultMemoryKey: ''});\n    });\n\n    test(\n      'Test reserved keys are ignored when selecting prompt input keys',\n      () async {\n        final memory = ConversationBufferMemory(returnMessages: true);\n        await memory.saveContext(\n          inputValues: {\n            'foo': 'bar',\n            'stop': 'stop',\n            BaseActionAgent.agentScratchpadInputKey: 'baz',\n          },\n          outputValues: {'bar': 'foo'},\n        );\n        final expectedResult = [\n          ChatMessage.humanText('bar'),\n          ChatMessage.ai('foo'),\n        ];\n        final result1 = await memory.loadMemoryVariables();\n        expect(result1, {BaseMemory.defaultMemoryKey: expectedResult});\n      },\n    );\n\n    test('Test multiple input values with inputKey specified', () async {\n      final memory = ConversationBufferMemory(\n        returnMessages: true,\n        inputKey: 'foo2',\n      );\n      await memory.saveContext(\n        inputValues: {\n          'foo1': 'bar1',\n          'foo2': 'bar2',\n          BaseActionAgent.agentScratchpadInputKey: 'baz',\n        },\n        outputValues: {'bar': 'foo'},\n      );\n      final expectedResult = [\n        ChatMessage.humanText('bar2'),\n        ChatMessage.ai('foo'),\n      ];\n      final result1 = await memory.loadMemoryVariables();\n      expect(result1, {BaseMemory.defaultMemoryKey: expectedResult});\n    });\n\n    test('Test error is thrown if inputKey not specified when using with '\n        'multiple input values', () {\n      final memory = ConversationBufferMemory(returnMessages: true);\n\n      // expect throws exception if no input keys are selected\n      expect(\n        () async => memory.saveContext(\n          inputValues: {'foo1': 'bar1', 'foo2': 'bar2'},\n          outputValues: {'bar': 'foo'},\n        ),\n        throwsException,\n      );\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain/test/memory/buffer_window_test.dart",
    "content": "import 'package:langchain/langchain.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('ConversationBufferWindowMemory tests', () {\n    test('Test buffer memory', () async {\n      final memory = ConversationBufferWindowMemory();\n      final result1 = await memory.loadMemoryVariables();\n      expect(result1, {'history': ''});\n\n      await memory.saveContext(\n        inputValues: {'foo': 'bar'},\n        outputValues: {'bar': 'foo'},\n      );\n      const expectedString = 'Human: bar\\nAI: foo';\n      final result2 = await memory.loadMemoryVariables();\n      expect(result2, {BaseMemory.defaultMemoryKey: expectedString});\n    });\n\n    test('Test buffer memory return messages', () async {\n      final memory = ConversationBufferWindowMemory(k: 1, returnMessages: true);\n      final result1 = await memory.loadMemoryVariables();\n      expect(result1, {BaseMemory.defaultMemoryKey: <ChatMessage>[]});\n\n      await memory.saveContext(\n        inputValues: {'foo': 'bar'},\n        outputValues: {'bar': 'foo'},\n      );\n      final expectedResult = [\n        ChatMessage.humanText('bar'),\n        ChatMessage.ai('foo'),\n      ];\n      final result2 = await memory.loadMemoryVariables();\n      expect(result2, {BaseMemory.defaultMemoryKey: expectedResult});\n\n      await memory.saveContext(\n        inputValues: {'foo': 'bar1'},\n        outputValues: {'bar': 'foo1'},\n      );\n\n      final expectedResult2 = [\n        ChatMessage.humanText('bar1'),\n        ChatMessage.ai('foo1'),\n      ];\n      final result3 = await memory.loadMemoryVariables();\n      expect(result3, {BaseMemory.defaultMemoryKey: expectedResult2});\n    });\n\n    test('Test buffer memory with pre-loaded history', () async {\n      final pastMessages = [\n        ChatMessage.humanText(\"My name's Jonas\"),\n        ChatMessage.ai('Nice to meet you, Jonas!'),\n      ];\n      final memory = ConversationBufferWindowMemory(\n        returnMessages: true,\n        chatHistory: ChatMessageHistory(messages: pastMessages),\n      );\n      final result = await memory.loadMemoryVariables();\n      expect(result, {BaseMemory.defaultMemoryKey: pastMessages});\n    });\n\n    test('Test k limit', () async {\n      final m1 = ChatMessage.humanText(\"My name's Jonas\");\n      final m2 = ChatMessage.ai('Nice to meet you, Jonas!');\n      final m3 = ChatMessage.humanText('What is your name?');\n      final m4 = ChatMessage.ai(\"My name's GPT-3\");\n      final pastMessages = [m1, m2, m3, m4];\n      // k = 0\n      final memory0 = ConversationBufferWindowMemory(\n        returnMessages: true,\n        k: 0,\n        chatHistory: ChatMessageHistory(messages: pastMessages),\n      );\n      final res0 = await memory0.loadMemoryVariables();\n      expect(res0[BaseMemory.defaultMemoryKey], isEmpty);\n      // k = 1\n      final memory1 = ConversationBufferWindowMemory(\n        returnMessages: true,\n        k: 1,\n        chatHistory: ChatMessageHistory(messages: pastMessages),\n      );\n      final res1 = await memory1.loadMemoryVariables();\n      expect(res1[BaseMemory.defaultMemoryKey], equals([m3, m4]));\n      // k = 2\n      final memory2 = ConversationBufferWindowMemory(\n        returnMessages: true,\n        k: 2,\n        chatHistory: ChatMessageHistory(messages: pastMessages),\n      );\n      final res2 = await memory2.loadMemoryVariables();\n      expect(res2[BaseMemory.defaultMemoryKey], equals([m1, m2, m3, m4]));\n      // k = 3\n      final memory3 = ConversationBufferWindowMemory(\n        returnMessages: true,\n        k: 3,\n        chatHistory: ChatMessageHistory(messages: pastMessages),\n      );\n      final res3 = await memory3.loadMemoryVariables();\n      expect(res3[BaseMemory.defaultMemoryKey], equals([m1, m2, m3, m4]));\n    });\n\n    test('Test clear memory', () async {\n      final memory = ConversationBufferWindowMemory();\n      await memory.saveContext(\n        inputValues: {'foo': 'bar'},\n        outputValues: {'bar': 'foo'},\n      );\n      const expectedString = 'Human: bar\\nAI: foo';\n      final result1 = await memory.loadMemoryVariables();\n      expect(result1, {BaseMemory.defaultMemoryKey: expectedString});\n\n      await memory.clear();\n      final result2 = await memory.loadMemoryVariables();\n      expect(result2, {BaseMemory.defaultMemoryKey: ''});\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain/test/memory/simple_test.dart",
    "content": "import 'package:langchain/langchain.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('SimpleMemory tests', () {\n    test('Test simple memory', () async {\n      const memories = {'foo': 'bar', 'bar': 'foo'};\n      const memory = SimpleMemory(memories: memories);\n      expect(await memory.loadMemoryVariables(), memories);\n      await memory.saveContext(\n        inputValues: {'foo': 'bar2', 'bar': 'foo2'},\n        outputValues: {'foo': 'bar'},\n      );\n      expect(await memory.loadMemoryVariables(), memories);\n      await memory.clear();\n      expect(await memory.loadMemoryVariables(), memories);\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain/test/memory/summary_test.dart",
    "content": "import 'package:langchain/langchain.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('ConversationSummaryMemory tests', () {\n    test('Test summary memory', () async {\n      final model = FakeHandlerLLM(\n        handler: (final prompt, final options, final callCount) {\n          switch (callCount) {\n            case 1:\n              expect(\n                prompt,\n                'Progressively summarize the lines of conversation provided, adding onto the previous summary returning a new summary.\\n\\n'\n                'EXAMPLE\\n'\n                'Current summary:\\n'\n                'The human asks what the AI thinks of artificial intelligence. The AI thinks artificial intelligence is a force for good.\\n\\n'\n                'New lines of conversation:\\n'\n                'Human: Why do you think artificial intelligence is a force for good?\\n'\n                'AI: Because artificial intelligence will help humans reach their full potential.\\n\\n'\n                'New summary:\\n'\n                'The human asks what the AI thinks of artificial intelligence. The AI thinks artificial intelligence is a force for good because it will help humans reach their full potential.\\n'\n                'END OF EXAMPLE\\n\\n'\n                'Current summary:\\n\\n'\n                '\\n'\n                'New lines of conversation:\\n'\n                'Human: bar\\nAI: foo\\n\\n'\n                'New summary:',\n              );\n              return 'Human said foo';\n            default:\n              throw TestFailure('Unexpected call count: $callCount');\n          }\n        },\n      );\n      final memory = ConversationSummaryMemory(llm: model);\n      final result1 = await memory.loadMemoryVariables();\n      expect(result1, {BaseMemory.defaultMemoryKey: ''});\n\n      await memory.saveContext(\n        inputValues: {'foo': 'bar'},\n        outputValues: {'bar': 'foo'},\n      );\n      final result2 = await memory.loadMemoryVariables();\n      expect(result2, {BaseMemory.defaultMemoryKey: 'System: Human said foo'});\n    });\n\n    test('Test summary memory return system messages', () async {\n      final model = FakeHandlerLLM(\n        handler: (final prompt, final options, final callCount) {\n          switch (callCount) {\n            case 1:\n              expect(\n                prompt,\n                'Progressively summarize the lines of conversation provided, adding onto the previous summary returning a new summary.\\n\\n'\n                'EXAMPLE\\n'\n                'Current summary:\\n'\n                'The human asks what the AI thinks of artificial intelligence. The AI thinks artificial intelligence is a force for good.\\n\\n'\n                'New lines of conversation:\\n'\n                'Human: Why do you think artificial intelligence is a force for good?\\n'\n                'AI: Because artificial intelligence will help humans reach their full potential.\\n\\n'\n                'New summary:\\n'\n                'The human asks what the AI thinks of artificial intelligence. The AI thinks artificial intelligence is a force for good because it will help humans reach their full potential.\\n'\n                'END OF EXAMPLE\\n\\n'\n                'Current summary:\\n\\n'\n                '\\n'\n                'New lines of conversation:\\n'\n                'Human: bar\\nAI: foo\\n\\n'\n                'New summary:',\n              );\n              return 'Human said foo';\n            case 2:\n              expect(\n                prompt,\n                'Progressively summarize the lines of conversation provided, adding onto the previous summary returning a new summary.\\n\\n'\n                'EXAMPLE\\n'\n                'Current summary:\\n'\n                'The human asks what the AI thinks of artificial intelligence. The AI thinks artificial intelligence is a force for good.\\n\\n'\n                'New lines of conversation:\\n'\n                'Human: Why do you think artificial intelligence is a force for good?\\n'\n                'AI: Because artificial intelligence will help humans reach their full potential.\\n\\n'\n                'New summary:\\n'\n                'The human asks what the AI thinks of artificial intelligence. The AI thinks artificial intelligence is a force for good because it will help humans reach their full potential.\\n'\n                'END OF EXAMPLE\\n\\n'\n                'Current summary:\\nHuman said foo\\n'\n                '\\n'\n                'New lines of conversation:\\n'\n                'Human: bar1\\nAI: foo1\\n\\n'\n                'New summary:',\n              );\n              return 'Human said bar';\n            default:\n              throw TestFailure('Unexpected call count: $callCount');\n          }\n        },\n      );\n      final memory = ConversationSummaryMemory(\n        llm: model,\n        returnMessages: true,\n      );\n      final result1 = await memory.loadMemoryVariables();\n      expect(result1, {BaseMemory.defaultMemoryKey: <ChatMessage>[]});\n\n      await memory.saveContext(\n        inputValues: {'foo': 'bar'},\n        outputValues: {'bar': 'foo'},\n      );\n      final expectedResult = [ChatMessage.system('Human said foo')];\n      final result2 = await memory.loadMemoryVariables();\n      expect(result2, {BaseMemory.defaultMemoryKey: expectedResult});\n\n      await memory.saveContext(\n        inputValues: {'foo': 'bar1'},\n        outputValues: {'bar': 'foo1'},\n      );\n\n      final expectedResult2 = [ChatMessage.system('Human said bar')];\n      final result3 = await memory.loadMemoryVariables();\n      expect(result3, {BaseMemory.defaultMemoryKey: expectedResult2});\n    });\n\n    test('Test buffer memory with pre-loaded history', () async {\n      const prompt = PromptTemplate(\n        inputVariables: {'summary', 'new_lines'},\n        template: 'Please summary {summary} with {new_lines}',\n      );\n      final model = FakeHandlerLLM(\n        handler: (final prompt, final options, final callCount) {\n          switch (callCount) {\n            case 1:\n              expect(\n                prompt,\n                \"Please summary  with Human: My name's Jonas\\nAI: Nice to meet you, Jonas!\",\n              );\n              return 'Human said foo';\n            default:\n              throw TestFailure('Unexpected call count: $callCount');\n          }\n        },\n      );\n      final pastMessages = [\n        ChatMessage.humanText(\"My name's Jonas\"),\n        ChatMessage.ai('Nice to meet you, Jonas!'),\n      ];\n      final memory = await ConversationSummaryMemory.fromMessages(\n        llm: model,\n        chatHistory: ChatMessageHistory(messages: pastMessages),\n        summaryPromptTemplate: prompt,\n      );\n      final result = await memory.loadMemoryVariables();\n      expect(result, {BaseMemory.defaultMemoryKey: 'System: Human said foo'});\n    });\n\n    test('Test clear memory', () async {\n      const prompt = PromptTemplate(\n        inputVariables: {'summary', 'new_lines'},\n        template: 'Please summary {summary} with {new_lines}',\n      );\n      final model = FakeHandlerLLM(\n        handler: (final prompt, final options, final callCount) {\n          switch (callCount) {\n            case 1:\n              expect(\n                prompt,\n                'Please summary Human said bar with Human: bar\\nAI: foo',\n              );\n              return 'Human said foo';\n            default:\n              throw TestFailure('Unexpected call count: $callCount');\n          }\n        },\n      );\n      final memory = ConversationSummaryMemory(\n        llm: model,\n        summaryPromptTemplate: prompt,\n        initialSummary: 'Human said bar',\n      );\n      await memory.saveContext(\n        inputValues: {'foo': 'bar'},\n        outputValues: {'bar': 'foo'},\n      );\n      const expectedString = 'System: Human said foo';\n      final result1 = await memory.loadMemoryVariables();\n      expect(result1, {BaseMemory.defaultMemoryKey: expectedString});\n\n      await memory.clear();\n      final result2 = await memory.loadMemoryVariables();\n      expect(result2, {BaseMemory.defaultMemoryKey: ''});\n    });\n\n    test('Test summaryMessageBuilder', () async {\n      const prompt = PromptTemplate(\n        inputVariables: {'summary', 'new_lines'},\n        template: 'Please summary {summary} with {new_lines}',\n      );\n      final model = FakeHandlerLLM(\n        handler: (final prompt, final options, final callCount) {\n          switch (callCount) {\n            case 1:\n              expect(\n                prompt,\n                \"Please summary  with Human: My name's Jonas\\nAI: Nice to meet you, Jonas!\",\n              );\n              return 'Human said foo';\n            default:\n              throw TestFailure('Unexpected call count: $callCount');\n          }\n        },\n      );\n      final memory = ConversationSummaryMemory(\n        llm: model,\n        summaryPromptTemplate: prompt,\n        summaryMessageBuilder: ChatMessage.ai,\n      );\n      await memory.saveContext(\n        inputValues: {'foo': \"My name's Jonas\"},\n        outputValues: {'bar': 'Nice to meet you, Jonas!'},\n      );\n      final result = await memory.loadMemoryVariables();\n      expect(result, {BaseMemory.defaultMemoryKey: 'AI: Human said foo'});\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain/test/memory/token_buffer_test.dart",
    "content": "import 'package:langchain/langchain.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('ConversationTokenBufferMemory tests', () {\n    test('Test buffer memory', () async {\n      const model = FakeEchoLLM();\n      final memory = ConversationTokenBufferMemory(llm: model);\n      final result1 = await memory.loadMemoryVariables();\n      expect(result1, {'history': ''});\n\n      await memory.saveContext(\n        inputValues: {'foo': 'bar'},\n        outputValues: {'bar': 'foo'},\n      );\n      const expectedString = 'Human: bar\\nAI: foo';\n      final result2 = await memory.loadMemoryVariables();\n      expect(result2, {BaseMemory.defaultMemoryKey: expectedString});\n    });\n\n    test('Test buffer memory return messages', () async {\n      const model = FakeEchoLLM();\n      final memory = ConversationTokenBufferMemory(\n        llm: model,\n        returnMessages: true,\n        maxTokenLimit: 4,\n      );\n      final result1 = await memory.loadMemoryVariables();\n      expect(result1, {BaseMemory.defaultMemoryKey: <ChatMessage>[]});\n\n      await memory.saveContext(\n        inputValues: {'foo': 'bar'},\n        outputValues: {'bar': 'foo'},\n      );\n      final expectedResult = [\n        ChatMessage.humanText('bar'),\n        ChatMessage.ai('foo'),\n      ];\n      final result2 = await memory.loadMemoryVariables();\n      expect(result2, {BaseMemory.defaultMemoryKey: expectedResult});\n\n      await memory.saveContext(\n        inputValues: {'foo': 'bar1'},\n        outputValues: {'bar': 'foo1'},\n      );\n\n      final expectedResult2 = [\n        ChatMessage.ai('foo'),\n        ChatMessage.humanText('bar1'),\n        ChatMessage.ai('foo1'),\n      ];\n      final result3 = await memory.loadMemoryVariables();\n      expect(result3, {BaseMemory.defaultMemoryKey: expectedResult2});\n    });\n\n    test('Test buffer memory with pre-loaded history', () async {\n      final pastMessages = [\n        ChatMessage.humanText(\"My name's Jonas\"),\n        ChatMessage.ai('Nice to meet you, Jonas!'),\n      ];\n      const model = FakeEchoLLM();\n      final memory = ConversationTokenBufferMemory(\n        llm: model,\n        maxTokenLimit: 3,\n        returnMessages: true,\n        chatHistory: ChatMessageHistory(messages: pastMessages),\n      );\n      final result = await memory.loadMemoryVariables();\n      expect(result, {BaseMemory.defaultMemoryKey: pastMessages});\n    });\n\n    test('Test clear memory', () async {\n      final memory = ConversationBufferMemory();\n      await memory.saveContext(\n        inputValues: {'foo': 'bar'},\n        outputValues: {'bar': 'foo'},\n      );\n      const expectedString = 'Human: bar\\nAI: foo';\n      final result1 = await memory.loadMemoryVariables();\n      expect(result1, {BaseMemory.defaultMemoryKey: expectedString});\n\n      await memory.clear();\n      final result2 = await memory.loadMemoryVariables();\n      expect(result2, {BaseMemory.defaultMemoryKey: ''});\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain/test/memory/vector_store_test.dart",
    "content": "import 'package:langchain/langchain.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('VectorStoreMemory tests', () {\n    test('Test vector store memory', () async {\n      final embeddings = _FakeEmbeddings();\n      final vectorStore = MemoryVectorStore(embeddings: embeddings);\n      final memory = VectorStoreMemory(vectorStore: vectorStore);\n\n      final result1 = await memory.loadMemoryVariables({'input': 'foo'});\n      expect(result1[VectorStoreMemory.defaultMemoryKey], '');\n\n      await memory.saveContext(\n        inputValues: {'foo': 'bar'},\n        outputValues: {'bar': 'foo'},\n      );\n      final result2 = await memory.loadMemoryVariables({'input': 'foo'});\n      expect(result2[VectorStoreMemory.defaultMemoryKey], 'foo: bar\\nbar: foo');\n    });\n\n    test('Test returnDocs', () async {\n      final embeddings = _FakeEmbeddings();\n      final vectorStore = MemoryVectorStore(embeddings: embeddings);\n      final memory = VectorStoreMemory(\n        vectorStore: vectorStore,\n        returnDocs: true,\n      );\n\n      await memory.saveContext(\n        inputValues: {'foo': 'bar'},\n        outputValues: {'bar': 'foo'},\n      );\n      final result = await memory.loadMemoryVariables({'input': 'foo'});\n      const expectedDoc = Document(pageContent: 'foo: bar\\nbar: foo');\n      expect(result[VectorStoreMemory.defaultMemoryKey], [expectedDoc]);\n    });\n\n    test('Test excludeInputKeys', () async {\n      final embeddings = _FakeEmbeddings();\n      final vectorStore = MemoryVectorStore(embeddings: embeddings);\n      final memory = VectorStoreMemory(\n        vectorStore: vectorStore,\n        excludeInputKeys: {'foo'},\n      );\n\n      final result1 = await memory.loadMemoryVariables({'input': 'foo'});\n      expect(result1[VectorStoreMemory.defaultMemoryKey], '');\n\n      await memory.saveContext(\n        inputValues: {'foo': 'bar'},\n        outputValues: {'bar': 'foo'},\n      );\n      final result2 = await memory.loadMemoryVariables({'input': 'foo'});\n      expect(result2[VectorStoreMemory.defaultMemoryKey], 'bar: foo');\n    });\n  });\n}\n\nclass _FakeEmbeddings extends Embeddings {\n  @override\n  Future<List<List<double>>> embedDocuments(\n    final List<Document> documents,\n  ) async {\n    return documents\n        .map((final doc) => _embed(doc.pageContent))\n        .toList(growable: false);\n  }\n\n  @override\n  Future<List<double>> embedQuery(final String query) async {\n    return _embed(query);\n  }\n\n  List<double> _embed(final String text) {\n    return switch (text) {\n      'foo' => [1.0, 1.0],\n      'bar' => [-1.0, -1.0],\n      'foo: bar\\nbar: foo' => [1.0, -1.0],\n      'bar: foo' => [-1.0, 1.0],\n      _ => throw Exception('Unknown text: $text'),\n    };\n  }\n}\n"
  },
  {
    "path": "packages/langchain/test/stores/encoder_backed.dart",
    "content": "import 'package:langchain/langchain.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('EncoderBackedStore tests', () {\n    test(\n      'EncoderBackedStore should encode and decode key-value pairs using InMemoryStore',\n      () async {\n        final encoderBackedStore = EncoderBackedStore(\n          store: InMemoryStore<String, String>(),\n          encoder: SampleEncoder(),\n        );\n\n        final keyValuePairs = [(1, 'One'), (2, 'Two'), (3, 'Three')];\n\n        await encoderBackedStore.set(keyValuePairs);\n\n        // Verify if the key-value pairs are properly encoded and stored\n        final keys = keyValuePairs.map((final pair) => pair.$1).toList();\n        final values = await encoderBackedStore.get(keys);\n\n        for (var i = 0; i < keyValuePairs.length; i++) {\n          expect(values[i], equals(keyValuePairs[i].$2));\n        }\n\n        // Test for delete function\n        await encoderBackedStore.delete([keyValuePairs[0].$1]);\n\n        // Check if the first key is deleted\n        final deletedValues = await encoderBackedStore.get(keys);\n        expect(deletedValues[0], equals(null));\n\n        // Test yieldKeys\n        final stream = encoderBackedStore.yieldKeys();\n        await for (final key in stream) {\n          expect(key, isNot(equals(keyValuePairs[0].$1)));\n        }\n      },\n    );\n  });\n}\n\nclass SampleEncoder implements StoreEncoder<int, String, String, String> {\n  @override\n  String encodeKey(final int key) => '$key';\n\n  @override\n  String encodeValue(final String value) => \"'$value'\";\n\n  @override\n  int decodeKey(final String encodedKey) => int.parse(encodedKey);\n\n  @override\n  String decodeValue(final String encodedValue) =>\n      encodedValue.substring(1, encodedValue.length - 1);\n}\n"
  },
  {
    "path": "packages/langchain/test/stores/file_system.dart",
    "content": "import 'dart:io';\nimport 'dart:typed_data';\n\nimport 'package:langchain/langchain.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  late String tempDirPath;\n  late Directory tempDir;\n\n  setUp(() {\n    tempDir = Directory.systemTemp.createTempSync();\n    tempDirPath = tempDir.path;\n  });\n\n  tearDown(() {\n    tempDir.deleteSync(recursive: true);\n  });\n\n  test('should create, retrieve and delete a file', () async {\n    final store = LocalFileStore(tempDirPath);\n    final data = Uint8List.fromList([1, 2, 3]);\n\n    await store.set([('testKey', data)]);\n    expect(await File('$tempDirPath/testKey').readAsBytes(), data);\n\n    final readData = await store.get(['testKey']);\n    expect(readData.length, 1);\n    expect(readData.first, data);\n\n    await store.delete(['testKey']);\n    expect(File('$tempDirPath/testKey').existsSync(), false);\n  });\n\n  test('should yield the correct keys', () async {\n    final store = LocalFileStore(tempDirPath);\n    final data = Uint8List.fromList([1, 2, 3]);\n\n    await store.set([('keyA', data), ('keyB', data), ('prefixKeyC', data)]);\n\n    final keys = await store.yieldKeys().toList();\n    expect(keys, containsAll(<String>['keyA', 'keyB', 'prefixKeyC']));\n\n    final prefixedKeys = await store.yieldKeys(prefix: 'prefix').toList();\n    expect(prefixedKeys, contains('prefixKeyC'));\n  });\n\n  test('should handle a missing key gracefully', () async {\n    final store = LocalFileStore(tempDirPath);\n\n    // No exception should be thrown here.\n    await store.delete(['missingKey']);\n    final readData = await store.get(['missingKey']);\n\n    // The returned data should be a list of one item being null.\n    expect(readData.length, 1);\n    expect(readData.first, isNull);\n  });\n}\n"
  },
  {
    "path": "packages/langchain/test/stores/in_memory.dart",
    "content": "import 'package:langchain/langchain.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('InMemoryStore tests', () {\n    final store = InMemoryStore<String, int>();\n\n    test('set and get', () async {\n      final pairs = [('key1', 1), ('key2', 2)];\n      await store.set(pairs);\n      final values = await store.get(['key1', 'key2']);\n      expect(values, equals([1, 2]));\n    });\n\n    test('delete', () async {\n      await store.set([('key3', 3)]);\n      await store.delete(['key3']);\n      final values = await store.get(['key3']);\n      expect(values, equals([null]));\n    });\n\n    test('yieldKeys', () async {\n      await store.set([('key4', 4)]);\n      await store.set([('prefixKey5', 5)]);\n      final List<String> keys = await store\n          .yieldKeys(prefix: 'prefix')\n          .toList();\n      expect(keys, equals(['prefixKey5']));\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain/test/text_splitters/character_test.dart",
    "content": "import 'package:langchain/langchain.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('CharacterTextSplitter tests', () {\n    test('Test splitting by character count', () {\n      const text = 'foo bar baz 123';\n      const splitter = CharacterTextSplitter(\n        separator: ' ',\n        chunkSize: 7,\n        chunkOverlap: 3,\n      );\n      final output = splitter.splitText(text);\n      final expectedOutput = ['foo bar', 'bar baz', 'baz 123'];\n      expect(output, expectedOutput);\n    });\n\n    test(\n      'Test splitting by character count does not create empty documents',\n      () {\n        const text = 'foo  bar';\n        const splitter = CharacterTextSplitter(\n          separator: ' ',\n          chunkSize: 2,\n          chunkOverlap: 0,\n        );\n        final output = splitter.splitText(text);\n        final expectedOutput = ['foo', 'bar'];\n        expect(output, expectedOutput);\n      },\n    );\n\n    test('Test edge cases are separators', () {\n      const text = 'f b';\n      const splitter = CharacterTextSplitter(\n        separator: ' ',\n        chunkSize: 2,\n        chunkOverlap: 0,\n      );\n      final output = splitter.splitText(text);\n      final expectedOutput = ['f', 'b'];\n      expect(output, expectedOutput);\n    });\n\n    test('Test splitting by character count on long words', () {\n      const text = 'foo bar baz a a';\n      const splitter = CharacterTextSplitter(\n        separator: ' ',\n        chunkSize: 3,\n        chunkOverlap: 1,\n      );\n      final output = splitter.splitText(text);\n      final expectedOutput = ['foo', 'bar', 'baz', 'a a'];\n      expect(output, expectedOutput);\n    });\n\n    test('Test splitting by character count when shorter words are first', () {\n      const text = 'a a foo bar baz';\n      const splitter = CharacterTextSplitter(\n        separator: ' ',\n        chunkSize: 3,\n        chunkOverlap: 1,\n      );\n      final output = splitter.splitText(text);\n      final expectedOutput = ['a a', 'foo', 'bar', 'baz'];\n      expect(output, expectedOutput);\n    });\n\n    test('Test splitting by characters when splits not found easily', () {\n      const text = 'foo bar baz 123';\n      const splitter = CharacterTextSplitter(\n        separator: ' ',\n        chunkSize: 1,\n        chunkOverlap: 1,\n      );\n      final output = splitter.splitText(text);\n      final expectedOutput = ['foo', 'bar', 'baz', '123'];\n      expect(output, expectedOutput);\n    });\n\n    test('Test merging splits with a given separator', () {\n      const splitter = CharacterTextSplitter(\n        separator: ' ',\n        chunkSize: 9,\n        chunkOverlap: 2,\n      );\n      final splits = ['foo', 'bar', 'baz'];\n      final expectedOutput = ['foo bar', 'baz'];\n      final output = splitter.mergeSplits(splits, ' ');\n      expect(output, expectedOutput);\n    });\n\n    test('Test split documents method', () {\n      const docs = [\n        Document(id: '1', pageContent: 'foo bar', metadata: {'m': '1'}),\n        Document(id: '2', pageContent: 'baz', metadata: {'m': '2'}),\n      ];\n      const splitter = CharacterTextSplitter(\n        separator: ' ',\n        chunkSize: 3,\n        chunkOverlap: 0,\n      );\n      final splitDocs = splitter.splitDocuments(docs);\n      final expectedDocs = [\n        const Document(id: '1', pageContent: 'foo', metadata: {'m': '1'}),\n        const Document(id: '1', pageContent: 'bar', metadata: {'m': '1'}),\n        const Document(id: '2', pageContent: 'baz', metadata: {'m': '2'}),\n      ];\n      expect(splitDocs, equals(expectedDocs));\n    });\n\n    test('Test create documents method', () {\n      final texts = ['foo bar', 'baz'];\n      const splitter = CharacterTextSplitter(\n        separator: ' ',\n        chunkSize: 3,\n        chunkOverlap: 0,\n      );\n      final docs = splitter.createDocuments(texts);\n      final expectedDocs = [\n        const Document(pageContent: 'foo'),\n        const Document(pageContent: 'bar'),\n        const Document(pageContent: 'baz'),\n      ];\n      expect(docs, equals(expectedDocs));\n    });\n\n    test('Test create documents with metadata method', () {\n      final texts = ['foo bar', 'baz'];\n      const splitter = CharacterTextSplitter(\n        separator: ' ',\n        chunkSize: 3,\n        chunkOverlap: 0,\n      );\n      final docs = splitter.createDocuments(\n        texts,\n        metadatas: [\n          {'source': '1'},\n          {'source': '2'},\n        ],\n      );\n      final expectedDocs = [\n        const Document(pageContent: 'foo', metadata: {'source': '1'}),\n        const Document(pageContent: 'bar', metadata: {'source': '1'}),\n        const Document(pageContent: 'baz', metadata: {'source': '2'}),\n      ];\n      expect(docs, expectedDocs);\n    });\n\n    test('Test create documents method with start index', () {\n      final texts = ['foo bar baz 123'];\n      const splitter = CharacterTextSplitter(\n        separator: ' ',\n        chunkSize: 7,\n        chunkOverlap: 3,\n        addStartIndex: true,\n      );\n      final docs = splitter.createDocuments(texts);\n      final expectedDocs = [\n        const Document(pageContent: 'foo bar', metadata: {'start_index': 0}),\n        const Document(pageContent: 'bar baz', metadata: {'start_index': 4}),\n        const Document(pageContent: 'baz 123', metadata: {'start_index': 8}),\n      ];\n      expect(docs, equals(expectedDocs));\n    });\n\n    test('Test that metadatas are not shallow', () {\n      final texts = ['foo bar'];\n      const splitter = CharacterTextSplitter(\n        separator: ' ',\n        chunkSize: 3,\n        chunkOverlap: 0,\n      );\n      final docs = splitter.createDocuments(\n        texts,\n        metadatas: [\n          {'source': '1'},\n        ],\n      );\n      final expectedDocs = [\n        const Document(pageContent: 'foo', metadata: {'source': '1'}),\n        const Document(pageContent: 'bar', metadata: {'source': '1'}),\n      ];\n      expect(docs, equals(expectedDocs));\n      docs[0].metadata['foo'] = 1;\n      expect(docs[0].metadata, {'source': '1', 'foo': 1});\n      expect(docs[1].metadata, {'source': '1'});\n    });\n\n    test('Test splitting with keepSeparator enabled', () {\n      const text = 'fooXbarXbaz';\n      const splitter = CharacterTextSplitter(\n        separator: 'X',\n        chunkSize: 7,\n        chunkOverlap: 3,\n        keepSeparator: true,\n      );\n\n      final output = splitter.splitText(text);\n      final expectedOutput = ['fooXbar', 'Xbaz'];\n      expect(output, expectedOutput);\n    });\n\n    test('Test splitting where text length is equal to chunk size', () {\n      const text = 'foobar';\n      const splitter = CharacterTextSplitter(\n        separator: ' ',\n        chunkSize: 6,\n        chunkOverlap: 3,\n      );\n\n      final output = splitter.splitText(text);\n      final expectedOutput = ['foobar'];\n      expect(output, expectedOutput);\n    });\n\n    test('Test splitting where chunkSize is greater than text length', () {\n      const text = 'foo bar baz';\n      const splitter = CharacterTextSplitter(\n        separator: ' ',\n        chunkSize: 50,\n        chunkOverlap: 3,\n      );\n\n      final output = splitter.splitText(text);\n      final expectedOutput = ['foo bar baz'];\n      expect(output, expectedOutput);\n    });\n\n    test('Test splitting where text has trailing whitespaces', () {\n      const text = 'foo bar baz     ';\n      const splitter = CharacterTextSplitter(\n        separator: ' ',\n        chunkSize: 7,\n        chunkOverlap: 3,\n      );\n\n      final output = splitter.splitText(text);\n      final expectedOutput = ['foo bar', 'bar baz'];\n      expect(output, expectedOutput);\n    });\n\n    test('Test splitting where text has special characters', () {\n      const text = r'foo $$$$ bar baz';\n      const splitter = CharacterTextSplitter(\n        separator: ' ',\n        chunkSize: 9,\n        chunkOverlap: 3,\n      );\n\n      final output = splitter.splitText(text);\n      final expectedOutput = [r'foo $$$$', 'bar baz'];\n      expect(output, expectedOutput);\n    });\n\n    test('Test chunkSize smaller than length of a single word', () {\n      const text = 'foo bar';\n      const splitter = CharacterTextSplitter(\n        separator: ' ',\n        chunkSize: 2,\n        chunkOverlap: 0,\n      );\n\n      final output = splitter.splitText(text);\n      final expectedOutput = ['foo', 'bar'];\n      expect(output, expectedOutput);\n    });\n\n    test('Test multiple documents creation', () {\n      final texts = ['foo bar baz', '123 456 789'];\n      const splitter = CharacterTextSplitter(\n        separator: ' ',\n        chunkSize: 7,\n        chunkOverlap: 0,\n        addStartIndex: true,\n      );\n      final docs = splitter.createDocuments(texts);\n      final expectedDocs = [\n        const Document(pageContent: 'foo bar', metadata: {'start_index': 0}),\n        const Document(pageContent: 'baz', metadata: {'start_index': 8}),\n        const Document(pageContent: '123 456', metadata: {'start_index': 0}),\n        const Document(pageContent: '789', metadata: {'start_index': 8}),\n      ];\n      expect(docs, equals(expectedDocs));\n    });\n\n    test('Test splitting with an empty string', () {\n      const text = '';\n      const splitter = CharacterTextSplitter(\n        separator: ' ',\n        chunkSize: 7,\n        chunkOverlap: 3,\n      );\n\n      final output = splitter.splitText(text);\n      final expectedOutput = <String>[]; // no chunks should be generated\n      expect(output, expectedOutput);\n    });\n\n    test('Test splitting by character count.', () {\n      const text = 'foo bar baz 123';\n      const splitter = CharacterTextSplitter(\n        separator: ' ',\n        chunkSize: 7,\n        chunkOverlap: 3,\n      );\n      final output = splitter.splitText(text);\n      final expectedOutput = ['foo bar', 'bar baz', 'baz 123'];\n      expect(output, expectedOutput);\n    });\n\n    test(\n      \"Test splitting by character count doesn't create empty documents.\",\n      () {\n        const text = 'foo  bar';\n        const splitter = CharacterTextSplitter(\n          separator: ' ',\n          chunkSize: 2,\n          chunkOverlap: 0,\n        );\n        final output = splitter.splitText(text);\n        final expectedOutput = ['foo', 'bar'];\n        expect(output, expectedOutput);\n      },\n    );\n\n    test('Test splitting by character count on long words.', () {\n      const text = 'foo bar baz a a';\n      const splitter = CharacterTextSplitter(\n        separator: ' ',\n        chunkSize: 3,\n        chunkOverlap: 1,\n      );\n      final output = splitter.splitText(text);\n      final expectedOutput = ['foo', 'bar', 'baz', 'a a'];\n      expect(output, expectedOutput);\n    });\n\n    test('Test splitting by character count when shorter words are first.', () {\n      const text = 'a a foo bar baz';\n      const splitter = CharacterTextSplitter(\n        separator: ' ',\n        chunkSize: 3,\n        chunkOverlap: 1,\n      );\n      final output = splitter.splitText(text);\n      final expectedOutput = ['a a', 'foo', 'bar', 'baz'];\n      expect(output, expectedOutput);\n    });\n\n    test('Test splitting by characters when splits not found easily.', () {\n      const text = 'foo bar baz 123';\n      const splitter = CharacterTextSplitter(\n        separator: ' ',\n        chunkSize: 1,\n        chunkOverlap: 0,\n      );\n      final output = splitter.splitText(text);\n      final expectedOutput = ['foo', 'bar', 'baz', '123'];\n      expect(output, expectedOutput);\n    });\n\n    test('Test invalid arguments.', () {\n      expect(\n        () => CharacterTextSplitter(chunkSize: 2, chunkOverlap: 4),\n        throwsA(isA<AssertionError>()),\n      );\n    });\n\n    test('Test create documents method.', () {\n      final texts = ['foo bar', 'baz'];\n      const splitter = CharacterTextSplitter(\n        separator: ' ',\n        chunkSize: 3,\n        chunkOverlap: 0,\n      );\n      final docs = splitter.createDocuments(texts);\n      const expectedDocs = [\n        Document(pageContent: 'foo'),\n        Document(pageContent: 'bar'),\n        Document(pageContent: 'baz'),\n      ];\n      expect(docs, equals(expectedDocs));\n    });\n\n    test('Test create documents with metadata method.', () {\n      final texts = ['foo bar', 'baz'];\n      const splitter = CharacterTextSplitter(\n        separator: ' ',\n        chunkSize: 3,\n        chunkOverlap: 0,\n      );\n      final docs = splitter.createDocuments(\n        texts,\n        metadatas: [\n          {'source': '1'},\n          {'source': '2'},\n        ],\n      );\n      const expectedDocs = [\n        Document(pageContent: 'foo', metadata: {'source': '1'}),\n        Document(pageContent: 'bar', metadata: {'source': '1'}),\n        Document(pageContent: 'baz', metadata: {'source': '2'}),\n      ];\n      expect(docs, equals(expectedDocs));\n    });\n  });\n\n  group('Runnable tests', () {\n    test('DocumentTransformer as Runnable', () async {\n      const run = CharacterTextSplitter(\n        separator: ' ',\n        chunkSize: 7,\n        chunkOverlap: 3,\n      );\n      final res = await run.invoke([\n        const Document(pageContent: 'foo bar baz 123'),\n      ]);\n      expect(\n        res,\n        equals([\n          const Document(pageContent: 'foo bar'),\n          const Document(pageContent: 'bar baz'),\n          const Document(pageContent: 'baz 123'),\n        ]),\n      );\n    });\n\n    test('Streaming DocumentTransformer', () async {\n      const run = CharacterTextSplitter(\n        separator: ' ',\n        chunkSize: 7,\n        chunkOverlap: 3,\n      );\n      final stream = run.stream([\n        const Document(pageContent: 'foo bar baz 123'),\n      ]);\n\n      final streamList = await stream.toList();\n      expect(streamList.length, 1);\n      expect(streamList.first, isA<List<Document>>());\n\n      final item = streamList.first;\n      expect(item, [\n        const Document(pageContent: 'foo bar'),\n        const Document(pageContent: 'bar baz'),\n        const Document(pageContent: 'baz 123'),\n      ]);\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain/test/text_splitters/code_test.dart",
    "content": "import 'package:langchain/langchain.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('CodeTextSplitter tests', () {\n    const chunkSize = 16;\n\n    test('Test C++ code', () {\n      final splitter = CodeTextSplitter(\n        language: CodeLanguage.cpp,\n        chunkSize: chunkSize,\n        chunkOverlap: 0,\n      );\n      const code = '''\n#include <iostream>\n\nint main() {\n    std::cout << \"Hello, World!\" << std::endl;\n    return 0;\n}\n''';\n      final chunks = splitter.splitText(code);\n\n      expect(chunks, [\n        '#include',\n        '<iostream>',\n        'int main() {',\n        'std::cout',\n        '<< \"Hello,',\n        'World!\" <<',\n        'std::endl;',\n        'return 0;\\n}',\n      ]);\n    });\n\n    test('Test Dart code splitter', () {\n      final splitter = CodeTextSplitter(\n        language: CodeLanguage.dart,\n        chunkSize: chunkSize,\n        chunkOverlap: 0,\n      );\n      const code = '''\nvoid main() {\n  print(\"Hello, World!\");\n}\n''';\n      final chunks = splitter.splitText(code);\n\n      expect(chunks, ['void main() {', 'print(\"Hello,', 'World!\");', '}']);\n    });\n\n    test('Test Golang code', () {\n      final splitter = CodeTextSplitter(\n        language: CodeLanguage.go,\n        chunkSize: chunkSize,\n        chunkOverlap: 0,\n      );\n      const code = '''\npackage main\n\nimport \"fmt\"\n\nfunc helloWorld() {\n    fmt.Println(\"Hello, World!\")\n}\n\nfunc main() {\n    helloWorld()\n}''';\n      final chunks = splitter.splitText(code);\n\n      expect(chunks, [\n        'package main',\n        'import \"fmt\"',\n        'func',\n        'helloWorld() {',\n        'fmt.Println(\"He',\n        'llo,',\n        'World!\")',\n        '}',\n        'func main() {',\n        'helloWorld()',\n        '}',\n      ]);\n    });\n\n    test('Test HTML code splitter', () {\n      final splitter = CodeTextSplitter(\n        language: CodeLanguage.html,\n        chunkSize: 60,\n        chunkOverlap: 0,\n      );\n      const code = '''\n<h1>Sample Document</h1>\n    <h2>Section</h2>\n        <p id=\"1234\">Reference content.</p>\n\n    <h2>Lists</h2>\n        <ul>\n            <li>Item 1</li>\n            <li>Item 2</li>\n            <li>Item 3</li>\n        </ul>\n\n        <h3>A block</h3>\n            <div class=\"amazing\">\n                <p>Some text</p>\n                <p>Some more text</p>\n            </div>''';\n      final chunks = splitter.splitText(code);\n\n      expect(chunks, [\n        '<h1>Sample Document</h1>\\n    <h2>Section</h2>',\n        '<p id=\"1234\">Reference content.</p>',\n        '<h2>Lists</h2>\\n        <ul>',\n        '<li>Item 1</li>\\n            <li>Item 2</li>',\n        '<li>Item 3</li>\\n        </ul>',\n        '<h3>A block</h3>',\n        '<div class=\"amazing\">',\n        '<p>Some text</p>',\n        '<p>Some more text</p>\\n            </div>',\n      ]);\n    });\n\n    test('Test Java code', () {\n      final splitter = CodeTextSplitter(\n        language: CodeLanguage.java,\n        chunkSize: chunkSize,\n        chunkOverlap: 0,\n      );\n      const code = '''\npublic class HelloWorld {\n    public static void main(String[] args) {\n        System.out.println(\"Hello, World!\");\n    }\n}\n''';\n      final chunks = splitter.splitText(code);\n\n      expect(chunks, [\n        'public class',\n        'HelloWorld {',\n        'public',\n        'static void',\n        'main(String[]',\n        'args) {',\n        'System.out.prin',\n        'tln(\"Hello,',\n        'World!\");',\n        '}\\n}',\n      ]);\n    });\n\n    test('Test JavaScript code', () {\n      final splitter = CodeTextSplitter(\n        language: CodeLanguage.js,\n        chunkSize: chunkSize,\n        chunkOverlap: 0,\n      );\n      const code = '''\nfunction helloWorld() {\n  console.log(\"Hello, World!\");\n}\n\n// Call the function\nhelloWorld();\n''';\n      final chunks = splitter.splitText(code);\n\n      expect(chunks, [\n        'function',\n        'helloWorld() {',\n        'console.log(\"He',\n        'llo,',\n        'World!\");',\n        '}',\n        '// Call the',\n        'function',\n        'helloWorld();',\n      ]);\n    });\n\n    test('Test LaTeX code splitter', () {\n      final splitter = CodeTextSplitter(\n        language: CodeLanguage.latex,\n        chunkSize: chunkSize,\n        chunkOverlap: 0,\n      );\n      const code = r'''\n\\documentclass{article}\n\n\\title{Sections and Chapters}\n\\author{Overleaf}\n\\date{\\today}\n\n\\begin{document}\n\\maketitle\n\\section{Introduction}\n\nThis is a section.\n\\end{document}\n''';\n      final chunks = splitter.splitText(code);\n\n      expect(chunks, [\n        r'\\documentclass{a',\n        r'rticle}',\n        r'\\title{Sections',\n        r'and Chapters}',\n        r'\\author{Overlea',\n        r'f}',\n        r'\\date{\\today}',\n        r'\\begin{document',\n        r'}',\n        r'\\maketitle',\n        r'\\section{Introd',\n        r'uction}',\n        r'This is a',\n        r'section.',\n        r'\\end{document}',\n      ]);\n    });\n\n    test('Test Markdown code splitter', () {\n      final splitter = CodeTextSplitter(\n        language: CodeLanguage.markdown,\n        chunkSize: chunkSize,\n        chunkOverlap: 0,\n      );\n      const code = '''\n# Sample Document\n\n## Section\n\nThis is the content of the section.\n\n## Lists\n\n- Item 1\n- Item 2\n- Item 3\n\n### Horizontal lines\n\n***********\n____________\n-------------------\n\n#### Code blocks\n```\nThis is a code block\n```\n''';\n      final chunks = splitter.splitText(code);\n\n      expect(chunks, [\n        '# Sample',\n        'Document',\n        '## Section',\n        'This is the',\n        'content of the',\n        'section.',\n        '## Lists',\n        '- Item 1',\n        '- Item 2',\n        '- Item 3',\n        '### Horizontal',\n        'lines',\n        '***********',\n        '____________',\n        '---------------',\n        '----',\n        '#### Code',\n        'blocks',\n        '```',\n        'This is a code',\n        'block',\n        '```',\n      ]);\n    });\n\n    test('Test PHP code splitter', () {\n      final splitter = CodeTextSplitter(\n        language: CodeLanguage.php,\n        chunkSize: chunkSize,\n        chunkOverlap: 0,\n      );\n      const code = '''\n<?php\nfunction hello_world() {\n    echo \"Hello, World!\";\n}\n\nhello_world();\n?>''';\n      final chunks = splitter.splitText(code);\n\n      expect(chunks, [\n        '<?php',\n        'function',\n        'hello_world() {',\n        'echo',\n        '\"Hello,',\n        'World!\";',\n        '}',\n        'hello_world();',\n        '?>',\n      ]);\n    });\n\n    test('Test proto file', () {\n      final splitter = CodeTextSplitter(\n        language: CodeLanguage.proto,\n        chunkSize: chunkSize,\n        chunkOverlap: 0,\n      );\n      const code = '''\nsyntax = \"proto3\";\n\npackage example;\n\nmessage Person {\n    string name = 1;\n    int32 age = 2;\n    repeated string hobbies = 3;\n}\n''';\n      final chunks = splitter.splitText(code);\n\n      expect(chunks, [\n        'syntax =',\n        '\"proto3\";',\n        'package',\n        'example;',\n        'message Person',\n        '{',\n        'string name',\n        '= 1;',\n        'int32 age =',\n        '2;',\n        'repeated',\n        'string hobbies',\n        '= 3;',\n        '}',\n      ]);\n    });\n\n    test('Test Python code', () {\n      final splitter = CodeTextSplitter(\n        language: CodeLanguage.python,\n        chunkSize: chunkSize,\n        chunkOverlap: 0,\n      );\n      const code = '''\ndef hello_world():\n    print(\"Hello, World!\")\n\n# Call the function\nhello_world()''';\n      final chunks = splitter.splitText(code);\n\n      expect(chunks, [\n        'def',\n        'hello_world():',\n        'print(\"Hello,',\n        'World!\")',\n        '# Call the',\n        'function',\n        'hello_world()',\n      ]);\n    });\n\n    test('Test RST code', () {\n      final splitter = CodeTextSplitter(\n        language: CodeLanguage.rst,\n        chunkSize: chunkSize,\n        chunkOverlap: 0,\n      );\n      const code = '''\nSample Document\n===============\n\nSection\n-------\n\nThis is the content of the section.\n\nLists\n-----\n\n- Item 1\n- Item 2\n- Item 3\n\nComment\n*******\nNot a comment\n\n.. This is a comment\n''';\n      final chunks = splitter.splitText(code);\n\n      expect(chunks, [\n        'Sample Document',\n        '===============',\n        'Section',\n        '-------',\n        'This is the',\n        'content of the',\n        'section.',\n        'Lists',\n        '-----',\n        '- Item 1',\n        '- Item 2',\n        '- Item 3',\n        'Comment',\n        '*******',\n        'Not a comment',\n        '.. This is a',\n        'comment',\n      ]);\n    });\n\n    test('Test Ruby code splitter', () {\n      final splitter = CodeTextSplitter(\n        language: CodeLanguage.ruby,\n        chunkSize: chunkSize,\n        chunkOverlap: 0,\n      );\n      const code = '''\ndef hello_world\n  puts \"Hello, World!\"\nend\n\nhello_world''';\n      final chunks = splitter.splitText(code);\n\n      expect(chunks, [\n        'def hello_world',\n        'puts \"Hello,',\n        'World!\"',\n        'end',\n        'hello_world',\n      ]);\n    });\n\n    test('Test Rust code splitter', () {\n      final splitter = CodeTextSplitter(\n        language: CodeLanguage.rust,\n        chunkSize: chunkSize,\n        chunkOverlap: 0,\n      );\n      const code = '''\nfn main() {\n    println!(\"Hello, World!\");\n}''';\n      final chunks = splitter.splitText(code);\n\n      expect(chunks, ['fn main() {', 'println!(\"Hello', ',', 'World!\");', '}']);\n    });\n\n    test('Test Scala code splitter', () {\n      final splitter = CodeTextSplitter(\n        language: CodeLanguage.scala,\n        chunkSize: chunkSize,\n        chunkOverlap: 0,\n      );\n      const code = '''\nobject HelloWorld {\n  def main(args: Array[String]): Unit = {\n    println(\"Hello, World!\")\n  }\n}''';\n      final chunks = splitter.splitText(code);\n\n      expect(chunks, [\n        'object',\n        'HelloWorld {',\n        'def',\n        'main(args:',\n        'Array[String]):',\n        'Unit = {',\n        'println(\"Hello,',\n        'World!\")',\n        '}\\n}',\n      ]);\n    });\n\n    test('Test Swift code splitter', () {\n      final splitter = CodeTextSplitter(\n        language: CodeLanguage.swift,\n        chunkSize: chunkSize,\n        chunkOverlap: 0,\n      );\n      const code = '''\nfunc helloWorld() {\n    print(\"Hello, World!\")\n}\n\nhelloWorld()''';\n      final chunks = splitter.splitText(code);\n\n      expect(chunks, [\n        'func',\n        'helloWorld() {',\n        'print(\"Hello,',\n        'World!\")',\n        '}',\n        'helloWorld()',\n      ]);\n    });\n\n    test('Test Solidity code splitter', () {\n      final splitter = CodeTextSplitter(\n        language: CodeLanguage.solidity,\n        chunkSize: chunkSize,\n        chunkOverlap: 0,\n      );\n      const code = '''\npragma solidity ^0.8.20;\ncontract HelloWorld {\n    function add(uint a, uint b) pure public returns(uint) {\n      return  a + b;\n    }\n}\n''';\n      final chunks = splitter.splitText(code);\n\n      expect(chunks, [\n        'pragma solidity',\n        '^0.8.20;',\n        'contract',\n        'HelloWorld {',\n        'function',\n        'add(uint a,',\n        'uint b) pure',\n        'public',\n        'returns(uint) {',\n        'return  a',\n        '+ b;',\n        '}\\n}',\n      ]);\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain/test/text_splitters/markdown_test.dart",
    "content": "import 'package:langchain/langchain.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('MarkdownHeaderTextSplitter', () {\n    test('Test markdown splitter by header: Case 1', () {\n      const markdownDocument =\n          '# Foo\\n\\n'\n          '    ## Bar\\n\\n'\n          'Hi this is Jim\\n\\n'\n          'Hi this is Joe\\n\\n'\n          ' ## Baz\\n\\n'\n          ' Hi this is Molly';\n\n      final headersToSplitOn = [('#', 'Header 1'), ('##', 'Header 2')];\n\n      final markdownSplitter = MarkdownHeaderTextSplitter(\n        headersToSplitOn: headersToSplitOn,\n      );\n\n      final output = markdownSplitter.splitText(markdownDocument);\n\n      final expectedOutput = [\n        const Document(\n          pageContent: 'Hi this is Jim  \\nHi this is Joe',\n          metadata: {'Header 1': 'Foo', 'Header 2': 'Bar'},\n        ),\n        const Document(\n          pageContent: 'Hi this is Molly',\n          metadata: {'Header 1': 'Foo', 'Header 2': 'Baz'},\n        ),\n      ];\n\n      expect(output, equals(expectedOutput));\n    });\n\n    test('Test markdown splitter by header: Case 2', () {\n      const markdownDocument =\n          '# Foo\\n\\n'\n          '    ## Bar\\n\\n'\n          'Hi this is Jim\\n\\n'\n          'Hi this is Joe\\n\\n'\n          ' ### Boo \\n\\n'\n          ' Hi this is Lance \\n\\n'\n          ' ## Baz\\n\\n'\n          ' Hi this is Molly';\n\n      final headersToSplitOn = [\n        ('#', 'Header 1'),\n        ('##', 'Header 2'),\n        ('###', 'Header 3'),\n      ];\n\n      final markdownSplitter = MarkdownHeaderTextSplitter(\n        headersToSplitOn: headersToSplitOn,\n      );\n\n      final output = markdownSplitter.splitText(markdownDocument);\n\n      final expectedOutput = [\n        const Document(\n          pageContent: 'Hi this is Jim  \\nHi this is Joe',\n          metadata: {'Header 1': 'Foo', 'Header 2': 'Bar'},\n        ),\n        const Document(\n          pageContent: 'Hi this is Lance',\n          metadata: {'Header 1': 'Foo', 'Header 2': 'Bar', 'Header 3': 'Boo'},\n        ),\n        const Document(\n          pageContent: 'Hi this is Molly',\n          metadata: {'Header 1': 'Foo', 'Header 2': 'Baz'},\n        ),\n      ];\n\n      expect(output, equals(expectedOutput));\n    });\n\n    test('Test markdown splitter by header: Case 3', () {\n      const markdownDocument =\n          '# Foo\\n\\n'\n          '    ## Bar\\n\\n'\n          'Hi this is Jim\\n\\n'\n          'Hi this is Joe\\n\\n'\n          ' ### Boo \\n\\n'\n          ' Hi this is Lance \\n\\n'\n          ' #### Bim \\n\\n'\n          ' Hi this is John \\n\\n'\n          ' ## Baz\\n\\n'\n          ' Hi this is Molly';\n\n      final headersToSplitOn = [\n        ('#', 'Header 1'),\n        ('##', 'Header 2'),\n        ('###', 'Header 3'),\n        ('####', 'Header 4'),\n      ];\n\n      final markdownSplitter = MarkdownHeaderTextSplitter(\n        headersToSplitOn: headersToSplitOn,\n      );\n\n      final output = markdownSplitter.splitText(markdownDocument);\n\n      final expectedOutput = [\n        const Document(\n          pageContent: 'Hi this is Jim  \\nHi this is Joe',\n          metadata: {'Header 1': 'Foo', 'Header 2': 'Bar'},\n        ),\n        const Document(\n          pageContent: 'Hi this is Lance',\n          metadata: {'Header 1': 'Foo', 'Header 2': 'Bar', 'Header 3': 'Boo'},\n        ),\n        const Document(\n          pageContent: 'Hi this is John',\n          metadata: {\n            'Header 1': 'Foo',\n            'Header 2': 'Bar',\n            'Header 3': 'Boo',\n            'Header 4': 'Bim',\n          },\n        ),\n        const Document(\n          pageContent: 'Hi this is Molly',\n          metadata: {'Header 1': 'Foo', 'Header 2': 'Baz'},\n        ),\n      ];\n\n      expect(output, equals(expectedOutput));\n    });\n\n    test('Test markdown splitter by header: Preserve Headers 1', () {\n      const markdownDocument =\n          '# Foo\\n\\n'\n          '    ## Bat\\n\\n'\n          'Hi this is Jim\\n\\n'\n          'Hi Joe\\n\\n'\n          '## Baz\\n\\n'\n          '# Bar\\n\\n'\n          'This is Alice\\n\\n'\n          'This is Bob';\n\n      final headersToSplitOn = [('#', 'Header 1')];\n\n      final markdownSplitter = MarkdownHeaderTextSplitter(\n        headersToSplitOn: headersToSplitOn,\n        stripHeaders: false,\n      );\n\n      final output = markdownSplitter.splitText(markdownDocument);\n\n      final expectedOutput = [\n        const Document(\n          pageContent: '# Foo  \\n## Bat  \\nHi this is Jim  \\nHi Joe  \\n## Baz',\n          metadata: {'Header 1': 'Foo'},\n        ),\n        const Document(\n          pageContent: '# Bar  \\nThis is Alice  \\nThis is Bob',\n          metadata: {'Header 1': 'Bar'},\n        ),\n      ];\n\n      expect(output, equals(expectedOutput));\n    });\n\n    test('Test markdown splitter by header: Preserve Headers 2', () {\n      const markdownDocument =\n          '# Foo\\n\\n'\n          '    ## Bar\\n\\n'\n          'Hi this is Jim\\n\\n'\n          'Hi this is Joe\\n\\n'\n          '### Boo \\n\\n'\n          'Hi this is Lance\\n\\n'\n          '## Baz\\n\\n'\n          'Hi this is Molly\\n'\n          '    ## Buz\\n'\n          '# Bop';\n\n      final headersToSplitOn = [\n        ('#', 'Header 1'),\n        ('##', 'Header 2'),\n        ('###', 'Header 3'),\n      ];\n\n      final markdownSplitter = MarkdownHeaderTextSplitter(\n        headersToSplitOn: headersToSplitOn,\n        stripHeaders: false,\n      );\n\n      final output = markdownSplitter.splitText(markdownDocument);\n\n      final expectedOutput = [\n        const Document(\n          pageContent: '# Foo  \\n## Bar  \\nHi this is Jim  \\nHi this is Joe',\n          metadata: {'Header 1': 'Foo', 'Header 2': 'Bar'},\n        ),\n        const Document(\n          pageContent: '### Boo  \\nHi this is Lance',\n          metadata: {'Header 1': 'Foo', 'Header 2': 'Bar', 'Header 3': 'Boo'},\n        ),\n        const Document(\n          pageContent: '## Baz  \\nHi this is Molly',\n          metadata: {'Header 1': 'Foo', 'Header 2': 'Baz'},\n        ),\n        const Document(\n          pageContent: '## Buz',\n          metadata: {'Header 1': 'Foo', 'Header 2': 'Buz'},\n        ),\n        const Document(pageContent: '# Bop', metadata: {'Header 1': 'Bop'}),\n      ];\n\n      expect(output, equals(expectedOutput));\n    });\n\n    test('Test markdown splitter by header: Fenced code block', () {\n      const markdownDocument =\n          '# This is a Header\\n\\n'\n          '```\\n'\n          'foo()\\n'\n          '# Not a header\\n'\n          'bar()\\n'\n          '```';\n\n      final headersToSplitOn = [('#', 'Header 1'), ('##', 'Header 2')];\n\n      final markdownSplitter = MarkdownHeaderTextSplitter(\n        headersToSplitOn: headersToSplitOn,\n      );\n\n      final output = markdownSplitter.splitText(markdownDocument);\n\n      final expectedOutput = [\n        const Document(\n          pageContent: '```\\nfoo()\\n# Not a header\\nbar()\\n```',\n          metadata: {'Header 1': 'This is a Header'},\n        ),\n      ];\n\n      expect(output, equals(expectedOutput));\n    });\n\n    test('Test markdown splitter by header: Interleaved fenced code block', () {\n      const markdownDocument =\n          '# This is a Header\\n\\n'\n          '```\\n'\n          'foo\\n'\n          '# Not a header\\n'\n          '~~~\\n'\n          '# Not a header\\n'\n          '```';\n\n      final headersToSplitOn = [('#', 'Header 1'), ('##', 'Header 2')];\n\n      final markdownSplitter = MarkdownHeaderTextSplitter(\n        headersToSplitOn: headersToSplitOn,\n      );\n\n      final output = markdownSplitter.splitText(markdownDocument);\n\n      final expectedOutput = [\n        const Document(\n          pageContent: '```\\nfoo\\n# Not a header\\n~~~\\n# Not a header\\n```',\n          metadata: {'Header 1': 'This is a Header'},\n        ),\n      ];\n\n      expect(output, equals(expectedOutput));\n    });\n\n    test('Test markdown splitter by header: With invisible characters', () {\n      const markdownDocument = '\\uFEFF# Foo\\n\\nfoo()\\n\\uFEFF## Bar\\n\\nbar()';\n\n      final headersToSplitOn = [('#', 'Header 1'), ('##', 'Header 2')];\n\n      final markdownSplitter = MarkdownHeaderTextSplitter(\n        headersToSplitOn: headersToSplitOn,\n      );\n\n      final output = markdownSplitter.splitText(markdownDocument);\n\n      final expectedOutput = [\n        const Document(pageContent: 'foo()', metadata: {'Header 1': 'Foo'}),\n        const Document(\n          pageContent: 'bar()',\n          metadata: {'Header 1': 'Foo', 'Header 2': 'Bar'},\n        ),\n      ];\n\n      expect(output, equals(expectedOutput));\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain/test/text_splitters/recursive_character_test.dart",
    "content": "import 'dart:math';\n\nimport 'package:langchain/langchain.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('RecursiveCharacterTextSplitter tests', () {\n    List<String> testIterativeTextSplitter({\n      required final int chunkSize,\n      required final bool keepSeparator,\n    }) {\n      final finalChunkSize = chunkSize + (keepSeparator ? 1 : 0);\n\n      final splitter = RecursiveCharacterTextSplitter(\n        chunkSize: finalChunkSize,\n        chunkOverlap: 0,\n        separators: ['X', 'Y'],\n        keepSeparator: keepSeparator,\n      );\n\n      const text = '....5X..3Y...4X....5Y...';\n      final output = splitter.splitText(text);\n\n      for (final chunk in output) {\n        expect(\n          chunk.length <= finalChunkSize,\n          isTrue,\n          reason: 'Chunk is larger than $finalChunkSize',\n        );\n      }\n      return output;\n    }\n\n    test('Test iterative text splitter keep separator', () {\n      const chunkSize = 5;\n      final output = testIterativeTextSplitter(\n        chunkSize: chunkSize,\n        keepSeparator: true,\n      );\n\n      expect(output, ['....5', 'X..3', 'Y...4', 'X....5', 'Y...']);\n    });\n\n    test('Test iterative text splitter discard separator', () {\n      const chunkSize = 5;\n      final output = testIterativeTextSplitter(\n        chunkSize: chunkSize,\n        keepSeparator: false,\n      );\n\n      expect(output, ['....5', '..3', '...4', '....5', '...']);\n    });\n\n    test('Test chunk overlap', () {\n      const text = 'abcdefghijklmnopqrstuvwxyz';\n      const splitter = RecursiveCharacterTextSplitter(\n        chunkSize: 10,\n        chunkOverlap: 4,\n      );\n      final output = splitter.splitText(text);\n\n      // With chunk size 10 and overlap 4, the expected chunks would be:\n      // 1. \"abcdefghij\" (first 10 chars)\n      // 2. \"ghijklmnop\" (starts at index 6, giving overlap of 4 chars \"ghij\")\n      // 3. \"mnopqrstuv\" (starts at index 12, giving overlap of 4 chars \"mnop\")\n      // 4. \"stuvwxyz\" (remaining chars, with overlap of 4 chars \"stuv\")\n      final expectedOutput = [\n        'abcdefghij',\n        'ghijklmnop',\n        'mnopqrstuv',\n        'stuvwxyz',\n      ];\n\n      expect(output, expectedOutput);\n\n      // Verify overlaps between consecutive chunks\n      for (var i = 0; i < output.length - 1; i++) {\n        final currentChunk = output[i];\n        final nextChunk = output[i + 1];\n\n        final overlap = _findOverlap(currentChunk, nextChunk);\n\n        // Last chunk might have less overlap if text length doesn't align perfectly\n        if (i < output.length - 2 ||\n            nextChunk.length >= splitter.chunkOverlap) {\n          expect(\n            overlap.length,\n            splitter.chunkOverlap,\n            reason:\n                'Overlap between chunks $i and ${i + 1}, '\n                'should be ${splitter.chunkOverlap} characters',\n          );\n        }\n      }\n    });\n\n    test('Test chunk overlap with document metadata', () {\n      const text = 'abcdefghijklmnopqrstuvwxyz';\n      const splitter = RecursiveCharacterTextSplitter(\n        chunkSize: 10,\n        chunkOverlap: 4,\n        addStartIndex: true,\n      );\n\n      final docs = splitter.createDocuments([text]);\n\n      // Verify correct number of documents\n      expect(docs.length, 4);\n\n      // Verify document content matches expected chunks\n      expect(docs.map((d) => d.pageContent).toList(), [\n        'abcdefghij',\n        'ghijklmnop',\n        'mnopqrstuv',\n        'stuvwxyz',\n      ]);\n\n      // Verify start indexes are correct in metadata\n      expect(docs[0].metadata['start_index'], 0);\n      expect(docs[1].metadata['start_index'], 6);\n      expect(docs[2].metadata['start_index'], 12);\n      expect(docs[3].metadata['start_index'], 18);\n\n      // Verify that each start_index points to the correct position in the original text\n      for (final doc in docs) {\n        final startIndex = doc.metadata['start_index'] as int;\n        final content = doc.pageContent;\n\n        expect(\n          text.substring(startIndex, startIndex + content.length),\n          content,\n          reason: 'Content at start_index does not match document content',\n        );\n      }\n    });\n\n    test('Test chunk overlap with repeating patterns', () {\n      // Text with repeating patterns to highlight the bug fixed\n      const text = 'AAABBBAAABBBAAABBB';\n\n      const splitter = RecursiveCharacterTextSplitter(\n        chunkSize: 6,\n        chunkOverlap: 3,\n        addStartIndex: true,\n      );\n\n      final docs = splitter.createDocuments([text]);\n\n      // Expected chunks with proper overlap positioning\n      final chunks = docs.map((d) => d.pageContent).toList();\n      expect(chunks.length, 5);\n      expect(chunks[0], 'AAABBB');\n      expect(chunks[1], 'BBBAAA');\n      expect(chunks[2], 'AAABBB');\n      expect(chunks[3], 'BBBAAA');\n      expect(chunks[4], 'AAABBB');\n\n      // Verify start indexes (these values depend on correct overlap behavior)\n      expect(docs[0].metadata['start_index'], 0);\n      expect(docs[1].metadata['start_index'], 3);\n      expect(docs[2].metadata['start_index'], 6);\n      expect(docs[3].metadata['start_index'], 9);\n      expect(docs[4].metadata['start_index'], 12);\n\n      // Verify that each chunk starts at the right position in the original text\n      for (final doc in docs) {\n        final startIndex = doc.metadata['start_index'] as int;\n        final content = doc.pageContent;\n\n        expect(\n          text.substring(startIndex, startIndex + content.length),\n          content,\n          reason: 'Content at start_index does not match document content',\n        );\n      }\n\n      // Verify proper progression through the text\n      for (var i = 0; i < docs.length - 1; i++) {\n        final currentDoc = docs[i];\n        final nextDoc = docs[i + 1];\n\n        final currentStartIndex = currentDoc.metadata['start_index'] as int;\n        final nextStartIndex = nextDoc.metadata['start_index'] as int;\n\n        // With the fix, each chunk should start at exactly 3 characters after the previous one\n        // (with chunkSize=6 and chunkOverlap=3, we move forward by 3 characters each time)\n        expect(\n          nextStartIndex,\n          currentStartIndex +\n              currentDoc.pageContent.length -\n              splitter.chunkOverlap,\n          reason:\n              \"Next chunk's start_index should reflect the proper overlap calculation\",\n        );\n\n        // Verify specifically that we advance by exactly the right amount\n        expect(\n          nextStartIndex - currentStartIndex,\n          3,\n          reason: 'Should advance by (chunkSize - chunkOverlap) positions',\n        );\n      }\n    });\n\n    test('Test start_index calculation with pattern repetition', () {\n      const text = 'ABABCABACABABCABAB';\n\n      const splitter = RecursiveCharacterTextSplitter(\n        chunkSize: 5,\n        chunkOverlap: 2,\n        addStartIndex: true,\n      );\n\n      final docs = splitter.createDocuments([text]);\n\n      // Verify that chunks have correct content\n      final chunks = docs.map((d) => d.pageContent).toList();\n      expect(chunks[0], 'ABABC'); // First chunk starts at index 0\n      expect(chunks[1], 'BCABA'); // Second chunk starts at index 3\n      expect(chunks[2], 'BACAB'); // Third chunk starts at index 6\n\n      // Verify start indexes are correct and properly respect the overlap\n      expect(docs[0].metadata['start_index'], 0);\n      expect(docs[1].metadata['start_index'], 3);\n      expect(docs[2].metadata['start_index'], 6);\n\n      // For each consecutive chunk pair, verify the correct spacing\n      for (var i = 0; i < docs.length - 1; i++) {\n        final currentDoc = docs[i];\n        final nextDoc = docs[i + 1];\n\n        final currentStartIndex = currentDoc.metadata['start_index'] as int;\n        final nextStartIndex = nextDoc.metadata['start_index'] as int;\n        final chunkLength = currentDoc.pageContent.length;\n\n        // Each chunk should advance by exactly (chunkLength - overlap) positions\n        final expectedAdvance = chunkLength - splitter.chunkOverlap;\n        final actualAdvance = nextStartIndex - currentStartIndex;\n\n        expect(\n          actualAdvance,\n          expectedAdvance,\n          reason:\n              'Chunks should advance by exactly (chunkLength - overlap) positions',\n        );\n\n        // Directly calculate the expected next position\n        final expectedNextPosition =\n            currentStartIndex + chunkLength - splitter.chunkOverlap;\n\n        expect(\n          nextStartIndex,\n          expectedNextPosition,\n          reason:\n              'Next chunk should start at exactly the correct overlapping position',\n        );\n\n        // Verify that text at calculated position matches next chunk content\n        final nextChunkContent = nextDoc.pageContent;\n        final textAtNextPosition = text.substring(\n          nextStartIndex,\n          nextStartIndex + nextChunkContent.length,\n        );\n\n        expect(\n          textAtNextPosition,\n          nextChunkContent,\n          reason:\n              'Text at calculated next position should match next chunk content',\n        );\n      }\n    });\n\n    test('Test correct start_index with repetitive pattern', () {\n      const text = 'ABCDEFG ABCDEFG ABCDEFG';\n\n      const splitter = RecursiveCharacterTextSplitter(\n        chunkSize: 10,\n        chunkOverlap: 3,\n        addStartIndex: true,\n      );\n\n      final docs = splitter.createDocuments([text]);\n\n      // Verify each chunk's content matches the text at its start_index\n      for (final doc in docs) {\n        final startIndex = doc.metadata['start_index'] as int;\n        final content = doc.pageContent;\n\n        expect(\n          text.substring(startIndex, startIndex + content.length),\n          content,\n          reason: 'Content at start_index should match the chunk content',\n        );\n      }\n\n      // Verify proper progression from one chunk to the next\n      for (var i = 0; i < docs.length - 1; i++) {\n        final nextDoc = docs[i + 1];\n\n        final nextStartIndex = nextDoc.metadata['start_index'] as int;\n\n        // Due to the pattern being repeated, the algorithm might find the next occurrence\n        // which could be further ahead, so we verify the content is correct rather than\n        // the exact position calculation\n        final expectedText = text.substring(\n          nextStartIndex,\n          nextStartIndex + nextDoc.pageContent.length,\n        );\n\n        expect(\n          nextDoc.pageContent,\n          expectedText,\n          reason: 'Next chunk content must match text at its start_index',\n        );\n      }\n    });\n\n    test('Test iterative text splitter.', () {\n      const text = '''\nHi.\n\nI'm Harrison.\n\nHow? Are? You?\nOkay then f f f f.\nThis is a weird text to write, but gotta test the splittingggg some how.\n\nBye!\n\n-H.''';\n      const splitter = RecursiveCharacterTextSplitter(\n        chunkSize: 10,\n        chunkOverlap: 1,\n      );\n      final output = splitter.splitText(text);\n      final expectedOutput = [\n        'Hi.',\n        \"I'm\",\n        'Harrison.',\n        'How? Are?',\n        'You?',\n        'Okay then',\n        'f f f f.',\n        'This is a',\n        'weird',\n        'text to',\n        'write,',\n        'but gotta',\n        'test the',\n        'splitting',\n        'gggg',\n        'some how.',\n        'Bye!',\n        '-H.',\n      ];\n      expect(output, expectedOutput);\n    });\n  });\n}\n\n/// Helper function to find overlapping text between two strings\nString _findOverlap(String first, String second) {\n  if (first.isEmpty || second.isEmpty) return '';\n\n  // Find the maximum overlapping suffix of first that is a prefix of second\n  for (int i = min(first.length, second.length); i > 0; i--) {\n    final suffix = first.substring(first.length - i);\n    final prefix = second.substring(0, i);\n\n    if (suffix == prefix) {\n      return suffix;\n    }\n  }\n\n  return '';\n}\n"
  },
  {
    "path": "packages/langchain/test/text_splitters/utils_test.dart",
    "content": "import 'package:langchain/src/text_splitters/utils.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('TextSplitter utils tests', () {\n    test('Test splitTextWithRegex keepSeparator=false', () {\n      const text = 'This is a weird text to write';\n      final output = splitTextWithRegex(text, ' ', false);\n      expect(output, ['This', 'is', 'a', 'weird', 'text', 'to', 'write']);\n    });\n\n    test('Test splitTextWithRegex keepSeparator=true', () {\n      const text = 'This is a weird text to write';\n      final output = splitTextWithRegex(text, ' ', true);\n      expect(output, ['This', ' is', ' a', ' weird', ' text', ' to', ' write']);\n    });\n\n    test('Test splitTextWithRegex empty string separator', () {\n      const text = 'splittingggg';\n      final output = splitTextWithRegex(text, '', true);\n      expect(output, [\n        's',\n        'p',\n        'l',\n        'i',\n        't',\n        't',\n        'i',\n        'n',\n        'g',\n        'g',\n        'g',\n        'g',\n      ]);\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain/test/vector_stores/memory_test.dart",
    "content": "import 'package:langchain/langchain.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('MemoryVectorStore tests', () {\n    test('Test MemoryVectorStore.fromDocuments', () async {\n      const embeddings = _FakeEmbeddings();\n      final store = await MemoryVectorStore.fromDocuments(\n        documents: [\n          const Document(id: '1', pageContent: 'hello'),\n          const Document(id: '2', pageContent: 'hi'),\n          const Document(id: '3', pageContent: 'bye'),\n          const Document(id: '4', pageContent: \"what's this\"),\n        ],\n        embeddings: embeddings,\n      );\n\n      final results = await store.similaritySearch(\n        query: 'chao',\n        config: const VectorStoreSimilaritySearch(k: 1),\n      );\n\n      expect(results.length, 1);\n      expect(results.first.id, '3');\n      expect(results.first.pageContent, 'bye');\n    });\n\n    test('Test MemoryVectorStore.fromText', () async {\n      const embeddings = _FakeEmbeddings();\n      final store = await MemoryVectorStore.fromText(\n        ids: const ['1', '2', '3', '4'],\n        texts: const ['hello', 'hi', 'bye', \"what's this\"],\n        embeddings: embeddings,\n      );\n\n      final results = await store.similaritySearch(\n        query: 'chao',\n        config: const VectorStoreSimilaritySearch(k: 1),\n      );\n\n      expect(results.length, 1);\n      expect(results.first.id, '3');\n      expect(results.first.pageContent, 'bye');\n    });\n\n    test('Test MemoryVectorStore with initialMemoryVectors', () async {\n      const embeddings = _FakeEmbeddings();\n      final store = MemoryVectorStore(\n        embeddings: embeddings,\n        initialMemoryVectors: [\n          MemoryVector(\n            document: const Document(id: '1', pageContent: 'hello'),\n            embedding: _helloVector,\n          ),\n          MemoryVector(\n            document: const Document(id: '2', pageContent: 'hi'),\n            embedding: _hiVector,\n          ),\n          MemoryVector(\n            document: const Document(id: '3', pageContent: 'bye'),\n            embedding: _byeVector,\n          ),\n          MemoryVector(\n            document: const Document(id: '4', pageContent: \"what's this\"),\n            embedding: _whatsThisVector,\n          ),\n        ],\n      );\n\n      final results = await store.similaritySearch(\n        query: 'chao',\n        config: const VectorStoreSimilaritySearch(k: 1),\n      );\n\n      expect(results.length, 1);\n      expect(results.first.id, '3');\n      expect(results.first.pageContent, 'bye');\n    });\n\n    test('Test delete entry', () async {\n      const embeddings = _FakeEmbeddings();\n      final store = await MemoryVectorStore.fromDocuments(\n        documents: [\n          const Document(id: '1', pageContent: 'hello'),\n          const Document(id: '2', pageContent: 'hi'),\n          const Document(id: '3', pageContent: 'bye'),\n          const Document(id: '4', pageContent: \"what's this\"),\n        ],\n        embeddings: embeddings,\n      );\n      await store.delete(ids: ['3']);\n\n      final results = await store.similaritySearch(\n        query: 'chao',\n        config: const VectorStoreSimilaritySearch(k: 1),\n      );\n\n      expect(results.length, 1);\n      expect(results.first.id, '2');\n      expect(results.first.pageContent, 'hi');\n    });\n\n    test('Test scoreThreshold filter', () async {\n      const embeddings = _FakeEmbeddings();\n      final store = await MemoryVectorStore.fromText(\n        ids: const ['1', '2', '3', '4'],\n        texts: const ['hello', 'hi', 'bye', \"what's this\"],\n        embeddings: embeddings,\n      );\n\n      final res = await store.similaritySearchWithScores(\n        query: 'chao',\n        config: const VectorStoreSimilaritySearch(scoreThreshold: 0.85),\n      );\n\n      for (final (_, score) in res) {\n        expect(score, greaterThan(0.85));\n      }\n    });\n\n    test('Test filtering', () async {\n      const embeddings = _FakeEmbeddings();\n      final store = await MemoryVectorStore.fromDocuments(\n        documents: [\n          const Document(\n            id: '1',\n            pageContent: 'hello',\n            metadata: {'type': 'a'},\n          ),\n          const Document(id: '2', pageContent: 'hi', metadata: {'type': 'a'}),\n          const Document(id: '3', pageContent: 'bye', metadata: {'type': 'b'}),\n          const Document(\n            id: '4',\n            pageContent: \"what's this\",\n            metadata: {'type': 'b'},\n          ),\n        ],\n        embeddings: embeddings,\n      );\n\n      final results = await store.similaritySearch(\n        query: 'chao',\n        config: const VectorStoreSimilaritySearch(filter: {'type': 'b'}),\n      );\n\n      for (final doc in results) {\n        expect(doc.metadata['type'], 'b');\n      }\n    });\n\n    test('Test toMap and fromMap', () {\n      const embeddings = _FakeEmbeddings();\n      final store = MemoryVectorStore(\n        embeddings: embeddings,\n        initialMemoryVectors: const [\n          MemoryVector(\n            document: Document(id: '1', pageContent: 'hello'),\n            embedding: [1, 2, 3],\n          ),\n          MemoryVector(\n            document: Document(id: '2', pageContent: 'hi'),\n            embedding: [4, 5, 6],\n          ),\n        ],\n      );\n\n      final map = store.memoryVectors.map((final v) => v.toMap());\n      final expectedMap = [\n        {\n          'document': {\n            'id': '1',\n            'pageContent': 'hello',\n            'metadata': <String, dynamic>{},\n          },\n          'embedding': [1.0, 2.0, 3.0],\n        },\n        {\n          'document': {\n            'id': '2',\n            'pageContent': 'hi',\n            'metadata': <String, dynamic>{},\n          },\n          'embedding': [4.0, 5.0, 6.0],\n        },\n      ];\n      expect(map, expectedMap);\n\n      final newMap = expectedMap\n          .map(MemoryVector.fromMap)\n          .toList(growable: false);\n      expect(newMap, store.memoryVectors);\n    });\n  });\n}\n\nclass _FakeEmbeddings extends Embeddings {\n  const _FakeEmbeddings();\n\n  @override\n  Future<List<double>> embedQuery(final String query) async {\n    return embedText(query);\n  }\n\n  @override\n  Future<List<List<double>>> embedDocuments(\n    final List<Document> documents,\n  ) async {\n    return [for (final document in documents) embedText(document.pageContent)];\n  }\n\n  List<double> embedText(final String text) {\n    return switch (text) {\n      'hello' => _helloVector,\n      'hi' => _hiVector,\n      'bye' => _byeVector,\n      \"what's this\" => _whatsThisVector,\n      'chao' => _chaoVector,\n      _ => throw Exception('Unknown text: $text'),\n    };\n  }\n}\n\nfinal List<double> _helloVector = [\n  -0.02502486,\n  -0.019432934,\n  -0.027708454,\n  -0.031039815,\n  -0.024667928,\n  0.027417623,\n  -0.0124992095,\n  -0.008513476,\n  -0.017463202,\n  -0.008453987,\n  0.03252042,\n  0.004293092,\n  -0.024496071,\n  -0.00063867593,\n  0.01414506,\n  -0.0015045651,\n  0.039421093,\n  0.002020959,\n  0.026875615,\n  -0.012585138,\n  -0.020992856,\n  0.008896846,\n  0.008427547,\n  -0.0030620089,\n  -0.00541346,\n  -0.009491732,\n  0.011091313,\n  -0.0017053391,\n  0.0034767764,\n  -0.023200544,\n  0.0067255134,\n  -0.00793181,\n  -0.023887966,\n  -0.008943115,\n  0.0068477956,\n  -0.013682372,\n  0.009485122,\n  -0.01413184,\n  0.021706719,\n  -0.010569137,\n  0.003407373,\n  -0.01458131,\n  0.0052250796,\n  -0.014925022,\n  -0.031832997,\n  0.016220551,\n  -0.005416765,\n  -0.0066726347,\n  -0.009663588,\n  0.034714885,\n  0.026333608,\n  0.009220729,\n  -0.01960479,\n  -0.008004518,\n  -0.0056448043,\n  -0.0057736966,\n  -0.013603053,\n  0.006160372,\n  0.00090224337,\n  -0.038601473,\n  -0.00040691835,\n  0.018983465,\n  -0.01540093,\n  0.01874551,\n  -0.0004096036,\n  0.01082692,\n  0.019036343,\n  0.0040683574,\n  -0.004699597,\n  0.02209009,\n  0.025143836,\n  0.017331004,\n  0.006940333,\n  -0.018031647,\n  0.019089222,\n  -0.010793871,\n  -0.0073038745,\n  -0.005377106,\n  -0.01455487,\n  0.0013773257,\n  0.01579752,\n  -0.024826564,\n  -0.008434158,\n  0.013398148,\n  0.018877707,\n  0.015176196,\n  -0.021733157,\n  0.023848308,\n  -0.009789175,\n  -0.028871788,\n  -0.001676421,\n  0.017991988,\n  0.008824138,\n  0.024866223,\n  -0.021442326,\n  0.0028620614,\n  -0.014911802,\n  0.03159504,\n  -0.0018210113,\n  -0.00832179,\n  -0.001288919,\n  0.013669152,\n  -0.00067503005,\n  -0.004921027,\n  -0.03254686,\n  -0.009452073,\n  -0.011924154,\n  -0.007330314,\n  0.02372933,\n  0.009914762,\n  -0.005281263,\n  0.026611222,\n  -0.0024258117,\n  -0.044365253,\n  -0.014964681,\n  -0.0065569626,\n  -0.0051788106,\n  -0.003744475,\n  -0.012234816,\n  -0.020688804,\n  0.016749337,\n  0.0022407363,\n  0.026822736,\n  -0.0035164356,\n  0.021971112,\n  0.004497997,\n  -0.017357443,\n  -0.016947633,\n  0.0044318987,\n  -0.0063950215,\n  0.03371019,\n  0.003704816,\n  -0.0038039638,\n  -0.0043988493,\n  -0.020477287,\n  0.019353615,\n  -0.021402666,\n  0.0249852,\n  -0.039685488,\n  -0.02671698,\n  -0.014343356,\n  0.032652617,\n  -0.0003639544,\n  -0.00354618,\n  -0.02333274,\n  0.008526695,\n  0.013232903,\n  -0.0010823616,\n  0.0066957693,\n  -0.014449113,\n  0.00090306957,\n  0.0012756994,\n  0.012267865,\n  -0.004455033,\n  0.0020507032,\n  0.0136427125,\n  -0.010086617,\n  0.008440767,\n  -0.006755258,\n  -0.013120535,\n  -0.011943983,\n  0.0031148877,\n  0.012690895,\n  -0.008691941,\n  -0.0104105,\n  0.023041908,\n  0.017661495,\n  0.016617142,\n  -0.0014607749,\n  -0.003065314,\n  0.011408586,\n  0.011124363,\n  -0.035217233,\n  0.021349788,\n  0.0025943627,\n  0.009888323,\n  0.018137405,\n  0.015929718,\n  -0.02420524,\n  -0.04153624,\n  -0.016180892,\n  0.008513476,\n  0.019287517,\n  0.038442835,\n  -0.0125454785,\n  0.009280217,\n  0.017212028,\n  0.009643759,\n  0.0026174972,\n  -0.02083422,\n  0.008731601,\n  0.022394143,\n  0.033472236,\n  -0.01333205,\n  -0.7051908,\n  -0.0057869162,\n  0.026518684,\n  0.00916785,\n  0.013576614,\n  0.0483576,\n  0.025090957,\n  0.013087486,\n  -0.022420581,\n  0.037358824,\n  0.012552089,\n  0.01747642,\n  0.0070989695,\n  -0.009392585,\n  0.008923286,\n  0.00091463677,\n  -0.003420593,\n  -0.0031710714,\n  0.005952162,\n  0.002078795,\n  -0.013265952,\n  0.006900674,\n  -0.01997494,\n  0.015242294,\n  0.004534351,\n  0.0030752288,\n  0.016960854,\n  -0.0074823406,\n  -0.012704115,\n  0.021204371,\n  -0.013404758,\n  0.005925723,\n  0.007204727,\n  -0.0041080164,\n  0.038892306,\n  -0.0031561991,\n  0.007693855,\n  0.022711415,\n  0.009326486,\n  0.039711926,\n  -0.010655064,\n  -0.016722899,\n  0.0034899963,\n  -0.0024208543,\n  0.012214987,\n  0.006163677,\n  -0.0012319091,\n  -0.0009567745,\n  -0.00031541337,\n  -0.0076806354,\n  0.01500434,\n  0.000087632034,\n  -0.022169407,\n  0.0048119645,\n  0.005244909,\n  -0.009908152,\n  0.016180892,\n  -0.018031647,\n  0.0037808293,\n  0.023570694,\n  0.009485122,\n  -0.0025745332,\n  -0.016035475,\n  -0.006913894,\n  -0.014634188,\n  0.0020143492,\n  -0.013563395,\n  -0.015136536,\n  -0.005747257,\n  0.0031495893,\n  -0.0038072686,\n  0.014277257,\n  -0.012168718,\n  0.0032156878,\n  0.012889191,\n  0.026783077,\n  0.018930586,\n  -0.015295173,\n  -0.006834576,\n  0.009385975,\n  0.004412069,\n  -0.007198117,\n  -0.015744641,\n  -0.013550174,\n  0.03532299,\n  -0.0149779,\n  -0.02420524,\n  0.003794049,\n  0.003043832,\n  0.010542697,\n  0.027682016,\n  0.028660271,\n  0.0073964125,\n  -0.024786904,\n  -0.003784134,\n  0.0009724729,\n  -0.02749694,\n  0.009564441,\n  -0.0011377189,\n  -0.027946409,\n  -0.0027017726,\n  -0.00048127907,\n  -0.012836312,\n  -0.011739078,\n  0.034291856,\n  0.009941202,\n  -0.024443192,\n  0.03910382,\n  0.026055995,\n  -0.018811608,\n  -0.003873367,\n  0.0064016315,\n  -0.008639063,\n  0.012459551,\n  0.00916124,\n  -0.026822736,\n  -0.004336056,\n  0.015625665,\n  0.02207687,\n  -0.026730198,\n  -0.0063851066,\n  0.0005552267,\n  0.018930586,\n  -0.003873367,\n  -0.0014723422,\n  0.025910579,\n  0.000886545,\n  -0.0075219995,\n  -0.01788623,\n  0.009544611,\n  0.01667002,\n  -0.0020953198,\n  0.008024347,\n  -0.014396234,\n  0.015493468,\n  -0.024271337,\n  -0.002667071,\n  -0.022605658,\n  -0.0022027297,\n  -0.005337447,\n  0.0045971447,\n  0.003628803,\n  -0.007409632,\n  -0.005403545,\n  0.004527741,\n  -0.011858055,\n  -0.0062000314,\n  -0.0062463,\n  -0.0014624274,\n  0.0031363696,\n  0.010489818,\n  0.0031760288,\n  -0.003965905,\n  0.017621838,\n  0.022235505,\n  -0.016828656,\n  0.007753344,\n  -0.0127503835,\n  -0.023663232,\n  -0.008797699,\n  -0.007720295,\n  0.014422674,\n  -0.023398839,\n  -0.0021531559,\n  -0.007978079,\n  -0.027206106,\n  0.0036750718,\n  0.010080008,\n  0.0057670865,\n  -0.04576654,\n  0.025249593,\n  -0.020226113,\n  -0.021746378,\n  0.0141715,\n  0.018851267,\n  0.0049408562,\n  -0.011613491,\n  0.0046467185,\n  -0.0031727238,\n  -0.01331883,\n  0.008169764,\n  -0.0045607905,\n  -0.008619233,\n  0.005991821,\n  0.026769858,\n  -0.0012087747,\n  0.0059224176,\n  0.02253956,\n  -0.021561302,\n  0.018613312,\n  0.012135669,\n  0.013999644,\n  -0.014660628,\n  -0.000008397855,\n  0.013841008,\n  0.0021829002,\n  0.0007828531,\n  0.009445463,\n  0.0060942736,\n  0.017317785,\n  0.036169052,\n  -0.01414506,\n  0.018401798,\n  -0.022235505,\n  -0.016167672,\n  -0.026743418,\n  -0.003628803,\n  -0.015691763,\n  0.02037153,\n  0.025632964,\n  -0.0061504575,\n  -0.014224378,\n  0.0030206975,\n  0.0030867958,\n  0.025038078,\n  0.03577246,\n  -0.010628625,\n  0.007832662,\n  -0.0142375985,\n  -0.0072443862,\n  -0.0015450504,\n  -0.010344402,\n  0.008308571,\n  -0.0039824294,\n  0.007621147,\n  0.02287005,\n  0.014911802,\n  0.032414664,\n  0.00541346,\n  -0.0016970767,\n  -0.0142375985,\n  0.011051655,\n  0.009802395,\n  0.010483208,\n  -0.00024539037,\n  -0.01163993,\n  0.013027998,\n  -0.021521643,\n  0.031515725,\n  -0.02002782,\n  -0.0011104534,\n  0.03204451,\n  0.03410678,\n  -0.03408034,\n  0.001022873,\n  0.016617142,\n  0.0081565445,\n  0.0046962923,\n  0.023636792,\n  0.026386486,\n  0.0011162369,\n  0.003870062,\n  0.0020804475,\n  -0.008592794,\n  0.0018358835,\n  -0.020490509,\n  -0.011884495,\n  0.00998747,\n  0.015876839,\n  0.025289252,\n  0.0017681326,\n  0.011078094,\n  0.0062991786,\n  -0.00093611877,\n  0.0100072995,\n  0.0075550484,\n  -0.00791859,\n  -0.0077665634,\n  -0.0018689326,\n  0.006537133,\n  0.008097055,\n  -0.0029463368,\n  0.032203145,\n  -0.030960497,\n  0.023557475,\n  0.00748895,\n  -0.00395599,\n  0.0050267843,\n  -0.016313089,\n  -0.018150624,\n  -0.023134444,\n  -0.026968153,\n  0.004418679,\n  0.01083353,\n  0.01002052,\n  -0.008116885,\n  -0.053592592,\n  0.00092207285,\n  0.01038406,\n  0.016736118,\n  0.0072708256,\n  0.013153584,\n  0.028237242,\n  -0.021640621,\n  -0.004626889,\n  0.006414851,\n  0.03495284,\n  -0.0016557652,\n  0.0032355173,\n  0.011071485,\n  -0.004375715,\n  0.00045070855,\n  0.015638884,\n  -0.019009903,\n  -0.004504607,\n  0.0050267843,\n  -0.0112169,\n  -0.011282999,\n  0.00353296,\n  0.0030041728,\n  -0.0051127123,\n  0.0149779,\n  -0.01126978,\n  -0.002100277,\n  0.0027414316,\n  0.0022803952,\n  -0.020609485,\n  -0.008546525,\n  0.020596266,\n  -0.023412058,\n  -0.018044867,\n  -0.009339706,\n  -0.0010980599,\n  0.010608795,\n  0.055681303,\n  0.024760466,\n  0.0021548083,\n  -0.013417978,\n  0.0010278303,\n  0.018917365,\n  -0.020464068,\n  -0.012975118,\n  -0.01500434,\n  -0.018428238,\n  0.002268828,\n  -0.009359535,\n  -0.020900318,\n  0.030828299,\n  0.017331004,\n  0.0024786906,\n  0.011124363,\n  -0.0029281597,\n  -0.0033330126,\n  0.0030901008,\n  -0.0042666527,\n  -0.012942069,\n  0.005116017,\n  0.017687935,\n  0.022195848,\n  -0.007046091,\n  0.01751608,\n  0.022737853,\n  -0.00047838726,\n  -0.014951461,\n  -0.0036056684,\n  -0.0000073263377,\n  0.01207618,\n  0.008645672,\n  0.012446331,\n  0.024429973,\n  -0.013801348,\n  0.014224378,\n  0.0084077185,\n  -0.002850494,\n  0.012201767,\n  0.012439721,\n  0.009974251,\n  -0.021481983,\n  0.0011368927,\n  -0.010146107,\n  -0.013827788,\n  0.024244897,\n  -0.006788307,\n  -0.015043999,\n  0.03455625,\n  0.002721602,\n  -0.017198807,\n  -0.017026952,\n  -0.0036321077,\n  0.006289264,\n  -0.01413184,\n  -0.0069271135,\n  -0.006913894,\n  -0.0022837003,\n  0.0004709512,\n  -0.02000138,\n  -0.00058207917,\n  0.0057869162,\n  -0.029030424,\n  -0.03965905,\n  -0.026161753,\n  0.00007110743,\n  -0.012532259,\n  0.008804308,\n  -0.013292391,\n  -0.006090969,\n  -0.008969555,\n  -0.011567222,\n  0.00094768597,\n  0.013603053,\n  0.0006960989,\n  -0.011633321,\n  0.020252554,\n  0.005991821,\n  -0.0016706374,\n  -0.032229587,\n  -0.0017367358,\n  -0.023676451,\n  -0.00499704,\n  -0.003068619,\n  -0.0068081366,\n  -0.004412069,\n  -0.01084675,\n  0.01663036,\n  0.025606526,\n  0.016498163,\n  -0.0026406315,\n  -0.0010955812,\n  0.009809004,\n  -0.0024737332,\n  0.013312221,\n  0.0060942736,\n  -0.012016691,\n  -0.011560612,\n  0.011950593,\n  -0.005122627,\n  -0.006580097,\n  -0.009240558,\n  0.017991988,\n  0.0021101919,\n  0.012598357,\n  0.025884138,\n  -0.004015479,\n  -0.010284913,\n  0.03503216,\n  -0.012770213,\n  0.016921194,\n  -0.0021151493,\n  -0.0044913874,\n  0.0068081366,\n  0.006295874,\n  0.029241938,\n  -0.026293948,\n  -0.014753166,\n  0.0031148877,\n  -0.05821948,\n  0.017317785,\n  0.007165068,\n  0.006005041,\n  0.0011195418,\n  0.019300736,\n  -0.012988338,\n  0.0023134444,\n  0.0016466767,\n  -0.0071452386,\n  0.015691763,\n  -0.008189593,\n  -0.017331004,\n  -0.03167436,\n  -0.0059025884,\n  -0.012195157,\n  0.0027563039,\n  -0.009194289,\n  -0.036512762,\n  -0.03003512,\n  -0.014938242,\n  0.013080876,\n  -0.027391182,\n  -0.018124186,\n  -0.047141388,\n  -0.0051821154,\n  0.005800136,\n  -0.00045649215,\n  0.013655932,\n  -0.0065701823,\n  0.010192376,\n  -0.027391182,\n  -0.027946409,\n  -0.008506866,\n  -0.024033383,\n  -0.0022803952,\n  -0.004712817,\n  0.038416397,\n  0.028898226,\n  0.015731422,\n  0.0121224485,\n  0.008843968,\n  -0.0056117554,\n  0.003701511,\n  0.018243162,\n  -0.00055687915,\n  0.0041807247,\n  -0.010377451,\n  0.01455487,\n  0.02581804,\n  0.03172724,\n  0.004828489,\n  -0.012148889,\n  0.008182984,\n  0.013933546,\n  -0.006900674,\n  -0.00018517884,\n  -0.0057241227,\n  -0.019393275,\n  -0.013140365,\n  0.0018011817,\n  -0.011877885,\n  0.004739256,\n  0.01291563,\n  -0.009214119,\n  0.026029555,\n  0.027708454,\n  0.031013375,\n  0.0014467291,\n  0.0299558,\n  -0.011203681,\n  0.030748982,\n  0.010172546,\n  0.018005207,\n  0.019234639,\n  -0.0142375985,\n  -0.016841875,\n  -0.018586874,\n  0.009762736,\n  -0.0011724206,\n  0.02039797,\n  -0.015242294,\n  -0.007627757,\n  -0.017317785,\n  0.02958565,\n  -0.012723944,\n  -0.008520085,\n  -0.006593317,\n  -0.0103510115,\n  -0.012842922,\n  -0.03873367,\n  -0.014620969,\n  -0.008718381,\n  0.00459384,\n  0.00026625267,\n  -0.0166568,\n  0.03320784,\n  -0.021918233,\n  -0.01830926,\n  0.0023960674,\n  -0.0062793493,\n  0.021058954,\n  -0.0011897715,\n  0.014819264,\n  0.011124363,\n  -0.022936149,\n  -0.028686712,\n  -0.00042282327,\n  -0.011606881,\n  -0.014515212,\n  0.020754902,\n  0.0011302829,\n  0.0011220205,\n  -0.018018428,\n  0.00036808554,\n  -0.010298133,\n  -0.01744998,\n  -0.009399194,\n  0.012585138,\n  0.017648276,\n  -0.008936506,\n  -0.019697327,\n  -0.0042038593,\n  -0.021270469,\n  0.026479024,\n  0.010265084,\n  -0.010648455,\n  -0.0086588925,\n  0.0046665478,\n  -0.014118621,\n  0.019472592,\n  -0.01252565,\n  0.009775955,\n  0.02333274,\n  -0.017225247,\n  -0.017145928,\n  0.0058992836,\n  -0.01873229,\n  0.014462333,\n  -0.0073964125,\n  0.01500434,\n  -0.012016691,\n  0.0018243162,\n  0.014541651,\n  -0.007667416,\n  -0.03127777,\n  -0.0121224485,\n  -0.0020920148,\n  0.026492244,\n  -0.024932321,\n  0.015678544,\n  -0.0038965014,\n  0.00014221486,\n  0.01458131,\n  0.00082251214,\n  -0.012552089,\n  -0.028528076,\n  -0.020107137,\n  -0.0012286042,\n  0.026029555,\n  0.010701333,\n  -0.0025513987,\n  -0.0033644093,\n  -0.0028934581,\n  -0.026082434,\n  -0.033948146,\n  0.00874482,\n  0.0026373267,\n  -0.0007097317,\n  -0.012307525,\n  -0.013827788,\n  -0.0032074256,\n  0.009015824,\n  0.014250818,\n  -0.026412927,\n  0.02497198,\n  0.0154141495,\n  -0.032441102,\n  -0.0047822203,\n  -0.0049078073,\n  -0.009088532,\n  -0.029876484,\n  0.0129222395,\n  -0.0052845683,\n  -0.019406494,\n  0.01001391,\n  -0.017555738,\n  -0.02002782,\n  0.014885362,\n  0.0125454785,\n  0.011481294,\n  0.013867447,\n  0.0077665634,\n  0.017317785,\n  0.018031647,\n  0.006090969,\n  -0.008103666,\n  -0.019750206,\n  0.012452941,\n  -0.01877195,\n  0.012426502,\n  -0.0072443862,\n  -0.011745688,\n  0.021151492,\n  -0.016009036,\n  0.019221418,\n  0.006880845,\n  -0.023147665,\n  -0.0021151493,\n  -0.0005064791,\n  -0.00066718087,\n  -0.01791267,\n  -0.0065007787,\n  -0.0026224544,\n  -0.011567222,\n  -0.02334596,\n  0.007528609,\n  0.011408586,\n  -0.011943983,\n  0.01582396,\n  -0.0044847773,\n  -0.002293615,\n  0.0029083302,\n  -0.0036486324,\n  -0.0006188464,\n  -0.008189593,\n  -0.011983642,\n  -0.0058992836,\n  -0.009438854,\n  0.00040051507,\n  0.03212383,\n  0.010800481,\n  -0.024297778,\n  -0.018917365,\n  0.008890237,\n  -0.01919498,\n  -0.008520085,\n  -0.0059786015,\n  0.0012897453,\n  0.019380055,\n  0.017767254,\n  0.0058497097,\n  0.017172368,\n  -0.008771259,\n  0.020530168,\n  -0.020212894,\n  -0.006999822,\n  0.014634188,\n  0.00914802,\n  0.013431198,\n  -0.007250996,\n  -0.018586874,\n  -0.033128526,\n  0.008044177,\n  0.0013451027,\n  -0.010959117,\n  0.019485813,\n  -0.012043131,\n  -0.018481117,\n  -0.013801348,\n  0.008249082,\n  -0.028422318,\n  -0.007819442,\n  0.031066254,\n  -0.024667928,\n  0.0003176855,\n  0.01041711,\n  -0.020781342,\n  -0.0018276211,\n  -0.017106269,\n  0.0016004079,\n  -0.0016499816,\n  0.016498163,\n  -0.0054861684,\n  -0.0048582335,\n  0.022314824,\n  -0.031489283,\n  0.020860659,\n  0.008830748,\n  -0.017291345,\n  0.031039815,\n  -0.0032008158,\n  0.006788307,\n  -0.016868316,\n  0.004954076,\n  -0.01413184,\n  0.0004742561,\n  0.014211159,\n  -0.017674716,\n  -0.025183495,\n  -0.030907618,\n  -0.02161418,\n  -0.021138273,\n  0.012036521,\n  -0.030484589,\n  -0.0019614703,\n  -0.0021234115,\n  0.018018428,\n  0.0010063483,\n  -0.008929895,\n  -0.013213073,\n  -0.01038406,\n  -0.017754033,\n  0.0103047425,\n  -0.009656978,\n  0.014766386,\n  -0.0011757255,\n  0.000121972225,\n  0.0113226585,\n  -0.018229943,\n  -0.018190283,\n  0.0075153895,\n  -0.0010294828,\n  0.015731422,\n  0.02626751,\n  0.20929402,\n  -0.0133915385,\n  -0.0062231654,\n  0.047749493,\n  0.016960854,\n  0.012776824,\n  0.0138938865,\n  0.0133915385,\n  -0.0048119645,\n  0.03497928,\n  -0.015731422,\n  0.011891104,\n  -0.016207332,\n  0.006632976,\n  0.014885362,\n  0.004111321,\n  -0.02501164,\n  -0.01834892,\n  -0.023226982,\n  -0.0015987554,\n  -0.0071187993,\n  -0.0103510115,\n  -0.021495204,\n  -0.015229074,\n  0.036169052,\n  0.00056803325,\n  -0.009974251,\n  0.00013457224,\n  0.008863797,\n  0.0029893008,\n  -0.020146796,\n  -0.037728973,\n  0.007872321,\n  0.010093228,\n  -0.005819965,\n  -0.0026042776,\n  0.007759954,\n  0.002749694,\n  0.03080186,\n  0.0050631384,\n  0.0020688802,\n  -0.0061868113,\n  0.0034701666,\n  -0.0052713486,\n  -0.014806044,\n  -0.0032834387,\n  -0.005459729,\n  -0.016934413,\n  -0.001425247,\n  0.021376226,\n  -0.0333136,\n  0.0066395854,\n  0.02456217,\n  0.034688447,\n  0.005201945,\n  -0.00015584767,\n  0.005585316,\n  0.005694378,\n  -0.010337791,\n  0.007568268,\n  -0.01788623,\n  0.033657312,\n  -0.0052746534,\n  0.0144887725,\n  -0.029056862,\n  0.008143324,\n  -0.003946075,\n  0.00010617058,\n  0.008910066,\n  -0.008916676,\n  -0.018547215,\n  -0.014938242,\n  0.006418156,\n  -0.0037378652,\n  -0.01540093,\n  -0.033948146,\n  0.016207332,\n  0.016339527,\n  0.02368967,\n  0.015850399,\n  0.00458723,\n  0.0006630497,\n  -0.0071584582,\n  -0.022565998,\n  -0.01123012,\n  -0.021006076,\n  0.002749694,\n  0.019380055,\n  0.0017367358,\n  -0.0046665478,\n  -0.00177309,\n  -0.015665324,\n  -0.0065503526,\n  0.011329268,\n  0.022526339,\n  0.012988338,\n  0.003542875,\n  0.018428238,\n  -0.01918176,\n  0.024932321,\n  -0.025368571,\n  -0.00750217,\n  -0.0067651724,\n  0.013867447,\n  -0.008262302,\n  0.01959157,\n  0.0034371174,\n  0.0002048018,\n  0.001364106,\n  -0.017238466,\n  -0.019803084,\n  -0.016326308,\n  -0.00208871,\n  -0.006090969,\n  0.019380055,\n  0.008797699,\n  0.012148889,\n  -0.009075312,\n  0.0012542173,\n  0.009742906,\n  0.012862751,\n  -0.005700988,\n  0.0024836478,\n  -0.0010460074,\n  -0.008361449,\n  -0.015942937,\n  -0.018824829,\n  -0.0041939444,\n  0.007865711,\n  -0.030299513,\n  -0.005585316,\n  -0.011335878,\n  0.026148532,\n  -0.0056381947,\n  -0.013213073,\n  -0.005165591,\n  0.0011261518,\n  0.00049945613,\n  -0.005330837,\n  0.016788997,\n  0.00052506925,\n  -0.020860659,\n  0.0028058777,\n  0.017555738,\n  -0.0076409765,\n  -0.025632964,\n  0.025104178,\n  -0.020953197,\n  -0.011487904,\n  0.0016169325,\n  -0.020331873,\n  0.0030289597,\n  -0.007627757,\n  0.001105496,\n  0.030590346,\n  -0.0041873343,\n  -0.031489283,\n  -0.03326072,\n  -0.0010336139,\n  -0.007323704,\n  -0.005502693,\n  -0.0020110442,\n  0.018851267,\n  0.0030917532,\n  -0.021640621,\n  -0.025223155,\n  -0.17259617,\n  0.048859946,\n  0.0058166604,\n  -0.02667732,\n  0.020490509,\n  0.017846571,\n  0.023385618,\n  -0.0012244731,\n  0.00033586257,\n  0.013312221,\n  0.015691763,\n  0.000437902,\n  -0.034794204,\n  -0.0038204882,\n  -0.012591748,\n  -0.007720295,\n  0.017132709,\n  0.006900674,\n  0.004322836,\n  0.034397613,\n  0.03757034,\n  -0.024773685,\n  0.0030372222,\n  -0.0022275166,\n  0.008249082,\n  -0.004630194,\n  0.007409632,\n  0.01877195,\n  0.011864665,\n  0.0023811953,\n  -0.02129691,\n  -0.014409454,\n  0.018904146,\n  0.011302829,\n  0.011646541,\n  0.003456947,\n  0.0031760288,\n  -0.024496071,\n  -0.012981729,\n  0.022830391,\n  0.026069215,\n  0.030828299,\n  0.0079384195,\n  -0.017317785,\n  -0.014290477,\n  0.014660628,\n  0.016987292,\n  0.010522868,\n  0.015295173,\n  -0.017621838,\n  0.009914762,\n  -0.009141411,\n  0.0050697485,\n  0.00031004287,\n  0.021984331,\n  0.0037147307,\n  0.003101668,\n  0.015929718,\n  0.016881535,\n  0.00015698373,\n  -0.012036521,\n  -0.004835099,\n  0.018877707,\n  -0.0059389425,\n  0.01080709,\n  -0.015242294,\n  -0.011620101,\n  0.018507555,\n  -0.0140657425,\n  0.028422318,\n  0.007865711,\n  -0.03838996,\n  0.00353957,\n  -0.040505107,\n  0.021058954,\n  0.018692631,\n  -0.023226982,\n  0.007198117,\n  -0.016471725,\n  -0.006537133,\n  -0.021389447,\n  0.01165315,\n  -0.00959088,\n  0.00706592,\n  0.0064908643,\n  0.0017813522,\n  0.0107740415,\n  -0.002746389,\n  -0.020173235,\n  -0.037120868,\n  0.021891795,\n  -0.013173413,\n  -0.0043856297,\n  -0.020662364,\n  0.01750286,\n  0.006176897,\n  0.010093228,\n  0.014277257,\n  -0.01579752,\n  -0.0041807247,\n  0.008453987,\n  -0.0027116875,\n  -0.018467898,\n  0.016445285,\n  0.024879443,\n  -0.019432934,\n  0.018904146,\n  0.021984331,\n  0.008923286,\n  -0.020331873,\n  -0.005258129,\n  0.0077070748,\n  0.012419892,\n  0.033181403,\n  0.0019449458,\n  0.012618187,\n  -0.0011889453,\n  -0.007634367,\n  0.0037709144,\n  -0.016762558,\n  0.041932832,\n  -0.013265952,\n  -0.007422852,\n  -0.010575746,\n  -0.00458723,\n  0.0058232704,\n  -0.12056351,\n  -0.03659208,\n  0.0016210636,\n  0.015057218,\n  -0.022010772,\n  0.0333136,\n  0.010549307,\n  0.007455901,\n  -0.013629492,\n  0.040161397,\n  -0.011877885,\n  -0.025659405,\n  -0.029479893,\n  0.0027513464,\n  0.020239335,\n  -0.013576614,\n  -0.015202635,\n  -0.008361449,\n  -0.005241604,\n  0.0048780628,\n  0.0014888668,\n  -0.0022754378,\n  0.0075219995,\n  -0.007330314,\n  -0.004633499,\n  -0.0014244209,\n  0.0023167494,\n  0.022552779,\n  0.017066611,\n  -0.013550174,\n  0.009247168,\n  -0.033102084,\n  0.013285781,\n  -0.014343356,\n  -0.0055059977,\n  0.0025613136,\n  -0.049415175,\n  0.021931453,\n  0.018031647,\n  -0.026505463,\n  0.019776646,\n  0.012737164,\n  0.0037213406,\n  -0.04248806,\n  0.011084704,\n  -0.021442326,\n  -0.010694724,\n  0.034794204,\n  0.0092075085,\n  -0.0015227422,\n  -0.030537467,\n  -0.015890058,\n  -0.034344736,\n  -0.0083284,\n  0.027417623,\n  -0.001956513,\n  0.01418472,\n  0.019525472,\n  -0.012849531,\n  0.021838916,\n  0.010873189,\n  0.0053143124,\n  -0.0048483186,\n  0.028633833,\n  -0.007297265,\n  -0.0216274,\n  -0.013001557,\n  0.0050631384,\n  0.00047590857,\n  -0.010218815,\n  -0.008566354,\n  0.00003459839,\n  -0.014858923,\n  0.012565308,\n  -0.02626751,\n  0.013100705,\n  -0.025117397,\n  -0.019208198,\n  0.0040286984,\n  -0.0258577,\n  -0.02829012,\n  -0.016299868,\n  0.016987292,\n  -0.002553051,\n  0.013497296,\n  -0.000829122,\n  -0.0028604087,\n  -0.011983642,\n  -0.0034272028,\n  -0.032837693,\n  -0.0032784813,\n  -0.003919636,\n  0.009405804,\n  -0.004461643,\n  0.012274476,\n  0.020226113,\n  -0.005915808,\n  -0.008923286,\n  0.016114794,\n  0.009650368,\n  -0.018626533,\n  -0.008645672,\n  -0.062291145,\n  0.037702534,\n  -0.0040981015,\n  0.00562167,\n  0.010291523,\n  0.00007792383,\n  0.020781342,\n  -0.010456769,\n  -0.009438854,\n  -0.016022256,\n  -0.02125725,\n  -0.0006593317,\n  -0.006834576,\n  0.0056481096,\n  -0.032996327,\n  -0.006699074,\n  0.016207332,\n  0.0021845526,\n  0.020873878,\n  0.011117753,\n  0.007462511,\n  -0.020305432,\n  0.013576614,\n  -0.007746734,\n  -0.0032685664,\n  0.016445285,\n  -0.017463202,\n  0.014832484,\n  0.01664358,\n  -0.01124334,\n  0.0040088687,\n  -0.029374136,\n  0.017727595,\n  0.015678544,\n  0.0048086597,\n  -0.002336579,\n  0.0070593106,\n  0.01538771,\n  -0.0047756103,\n  0.01664358,\n  -0.027311863,\n  -0.025897358,\n  0.008910066,\n  -0.0026356743,\n  0.0083284,\n  0.010225425,\n  -0.00061760703,\n  -0.005241604,\n  0.01252565,\n  0.006841186,\n  0.004759086,\n  0.008843968,\n  -0.022989027,\n  -0.008896846,\n  -0.017793693,\n  0.00012661978,\n  -0.0028736284,\n  0.009465293,\n  -0.0018408408,\n  -0.011064874,\n  0.018031647,\n  0.007674026,\n  0.013199853,\n  -0.0005915808,\n  0.005859624,\n  0.0042699575,\n  -0.019274298,\n  -0.010397281,\n  0.009055482,\n  -0.030193755,\n  -0.022843612,\n  -0.007574878,\n  -0.0108137,\n  -0.0008617581,\n  0.024628269,\n  0.009670198,\n  -0.002817445,\n  0.023002248,\n  -0.018031647,\n  0.028448757,\n  0.013973204,\n  0.0077335145,\n  -0.03984412,\n  -0.0021201067,\n  0.021058954,\n  0.028580954,\n  -0.010893019,\n  0.001489693,\n  -0.006613146,\n  0.01875873,\n  -0.022737853,\n  -0.016207332,\n  0.0077269045,\n  0.011421806,\n  -0.010793871,\n  -0.0096305385,\n  -0.0021085395,\n  -0.002100277,\n  0.026386486,\n  0.022711415,\n  0.014620969,\n  0.0049672956,\n  -0.008169764,\n  -0.018600093,\n  0.0029810385,\n  -0.0025166972,\n  -0.021984331,\n  -0.04462965,\n  -0.010238644,\n  0.013490686,\n  0.01955191,\n  0.007383193,\n  0.012380233,\n  0.01874551,\n  -0.0140657425,\n  0.005261434,\n  -0.011567222,\n  -0.0092075085,\n  -0.03296989,\n  0.032679055,\n  0.004970601,\n  -0.0030306121,\n  0.02165384,\n  -0.015876839,\n  0.026307167,\n  -0.006666025,\n  -0.006322313,\n  -0.0055522667,\n  0.0108533595,\n  -0.002412592,\n  0.009703247,\n  -0.010760821,\n  -0.010655064,\n  -0.019499032,\n  -0.021164712,\n  0.009954421,\n  -0.00028029858,\n  0.027391182,\n  -0.02076812,\n  0.06710311,\n  -0.011170632,\n  -0.0065239132,\n  0.0108996285,\n  0.011468074,\n  0.02045085,\n  0.007713685,\n  0.0050201747,\n  -0.01459453,\n  -0.006715599,\n  -0.004911112,\n  -0.014197939,\n  -0.013523735,\n  0.00019974115,\n  -0.018838048,\n  0.007799613,\n  -0.011144193,\n  0.015850399,\n  -0.02129691,\n  0.0043162266,\n  0.012664456,\n  -0.012776824,\n  0.013265952,\n  -0.0071452386,\n  -0.007363363,\n  -0.010628625,\n  0.00040175443,\n  0.0016970767,\n  -0.031462844,\n  -0.015096878,\n  0.00417742,\n  0.00012971813,\n  -0.01788623,\n  -0.011382147,\n  0.020477287,\n  -0.019313956,\n  -0.004382325,\n  -0.023134444,\n  -0.007614537,\n  0.005492778,\n  0.011798567,\n  0.005162286,\n  -0.021006076,\n  -0.023425277,\n  0.029003983,\n  0.0023811953,\n  -0.016299868,\n  -0.0050565284,\n  -0.006120713,\n];\nfinal List<double> _hiVector = [\n  -0.035099167,\n  -0.020636523,\n  -0.015421565,\n  -0.03990691,\n  -0.027375247,\n  0.021122552,\n  -0.022002658,\n  -0.019467426,\n  -0.009484131,\n  -0.013129348,\n  0.029608354,\n  -0.00469609,\n  -0.015145711,\n  -0.014134245,\n  0.009057214,\n  0.015171982,\n  0.038356874,\n  -0.0058257785,\n  0.023920503,\n  -0.01276811,\n  -0.014961808,\n  -0.003100076,\n  -0.006856948,\n  -0.008439826,\n  -0.02280395,\n  -0.00014428982,\n  0.013543129,\n  -0.016984738,\n  0.0045417426,\n  -0.0223836,\n  0.014620274,\n  -0.00089570525,\n  -0.044951104,\n  -0.009654898,\n  -0.009799393,\n  -0.015736828,\n  0.009858505,\n  -0.02107001,\n  0.015158847,\n  -0.005609036,\n  0.0082296515,\n  -0.0050343396,\n  0.0069423313,\n  -0.013286979,\n  -0.01842969,\n  -0.0084858015,\n  -0.0023890946,\n  0.0063150916,\n  -0.010114655,\n  0.020781018,\n  0.021766212,\n  0.0051624146,\n  -0.028084587,\n  -0.010915946,\n  -0.016367352,\n  -0.006640205,\n  -0.014318148,\n  0.014436372,\n  -0.011172096,\n  -0.018613592,\n  -0.010193471,\n  0.003914503,\n  -0.0026879366,\n  0.012163858,\n  0.0061508925,\n  0.001247912,\n  0.022278512,\n  -0.00041029212,\n  0.0007828185,\n  0.0068897875,\n  0.030054975,\n  0.030054975,\n  -0.0047190776,\n  -0.013608809,\n  0.02431458,\n  -0.01157931,\n  -0.00028057495,\n  0.004364408,\n  -0.006702601,\n  -0.0081705395,\n  0.02640319,\n  -0.026744723,\n  -0.014830449,\n  0.015605467,\n  0.002666591,\n  0.014423235,\n  0.0005213317,\n  0.019979728,\n  -0.012374032,\n  -0.031946547,\n  0.0062724,\n  0.025746394,\n  0.01405543,\n  0.010351102,\n  -0.011310023,\n  0.02093865,\n  -0.0029654328,\n  0.013162187,\n  -0.0090178065,\n  0.005136143,\n  0.0015262292,\n  0.007953797,\n  -0.0072050495,\n  -0.006134473,\n  -0.036806837,\n  -0.015618604,\n  -0.014699089,\n  -0.0077370545,\n  0.017102962,\n  -0.009562947,\n  -0.006955467,\n  0.03675429,\n  -0.0025483677,\n  -0.032314353,\n  -0.010778018,\n  -0.007060555,\n  0.009779689,\n  0.000030171555,\n  -0.019454291,\n  -0.022488687,\n  0.006810972,\n  -0.004676386,\n  0.008741952,\n  0.0019490415,\n  0.017562719,\n  0.008111428,\n  -0.02955581,\n  -0.0070999623,\n  0.0082887625,\n  0.0016469155,\n  0.04316462,\n  0.012282081,\n  0.0061279046,\n  -0.008065452,\n  -0.03257707,\n  0.030002432,\n  -0.024262035,\n  0.027716782,\n  -0.021687396,\n  -0.032918606,\n  -0.01064666,\n  0.037174642,\n  -0.0021953399,\n  -0.021372134,\n  -0.013818983,\n  -0.010850267,\n  0.017746622,\n  -0.002655097,\n  -0.0038356874,\n  -0.0022774395,\n  0.0027519744,\n  -0.0007926704,\n  0.02286963,\n  -0.0016034027,\n  0.028820198,\n  0.022580639,\n  -0.008065452,\n  -0.010672932,\n  -0.009497267,\n  -0.0062888195,\n  0.00076804054,\n  -0.0042954446,\n  0.02315862,\n  -0.006397191,\n  -0.0050277715,\n  0.02036067,\n  0.020820426,\n  0.029529538,\n  0.011881435,\n  -0.00075695716,\n  0.01064666,\n  0.02202893,\n  -0.03058041,\n  0.0049391044,\n  -0.0033447326,\n  0.0149224,\n  0.010429917,\n  0.015828779,\n  -0.031631283,\n  -0.042954445,\n  -0.016459303,\n  0.014515187,\n  0.035703417,\n  0.03888231,\n  -0.0069094915,\n  0.010640091,\n  0.03139484,\n  0.005132859,\n  0.018561048,\n  -0.025680715,\n  0.016327944,\n  0.026587093,\n  0.012492255,\n  -0.008991534,\n  -0.6990409,\n  -0.026639637,\n  -0.007809302,\n  -0.0050047836,\n  0.015027488,\n  0.049995296,\n  0.02968717,\n  0.022685727,\n  -0.023644648,\n  0.018101292,\n  -0.010863402,\n  0.0062132883,\n  0.014068565,\n  -0.013247571,\n  0.000043230502,\n  0.0036485007,\n  0.0019868072,\n  -0.0036911923,\n  -0.011283752,\n  0.008439826,\n  -0.0052313786,\n  -0.0051000193,\n  -0.025588764,\n  0.026363783,\n  0.009904481,\n  -0.0056878515,\n  0.0005443195,\n  -0.01064666,\n  -0.024459075,\n  0.013897799,\n  -0.009687738,\n  0.008006341,\n  0.003881663,\n  -0.015500381,\n  0.056011543,\n  0.0019096337,\n  -0.009740282,\n  0.022265377,\n  0.00065638527,\n  0.05648444,\n  0.0014211419,\n  -0.016170312,\n  0.00007896943,\n  0.0017848426,\n  0.005326614,\n  0.010009567,\n  -0.009287092,\n  -0.0016657982,\n  0.011467654,\n  -0.0033233867,\n  0.0076910784,\n  -0.009287092,\n  -0.004531891,\n  0.015395293,\n  0.004167369,\n  -0.0027339123,\n  0.015447836,\n  -0.022633182,\n  -0.0027536163,\n  0.02273827,\n  0.0011592446,\n  0.0028537777,\n  -0.015080031,\n  -0.01951997,\n  -0.014016022,\n  0.016262263,\n  -0.020886106,\n  -0.014830449,\n  0.0039703306,\n  -0.008183676,\n  -0.010232878,\n  0.018482232,\n  -0.02997616,\n  -0.011388839,\n  0.000016471207,\n  0.024406532,\n  0.021306455,\n  -0.00092772406,\n  -0.0053397496,\n  0.0079143895,\n  -0.00342519,\n  -0.007316705,\n  0.0021427963,\n  -0.009589219,\n  0.02524723,\n  -0.022015795,\n  -0.033023693,\n  0.016551254,\n  -0.0036058088,\n  -0.0063118073,\n  0.01193398,\n  0.028636295,\n  0.013661352,\n  -0.021910707,\n  0.0030721622,\n  0.0072772973,\n  -0.014646546,\n  0.006702601,\n  -0.0006354499,\n  -0.018403418,\n  -0.0045515946,\n  0.0034777336,\n  -0.0038553912,\n  -0.008906151,\n  0.018705543,\n  -0.0044005318,\n  -0.013661352,\n  0.028715111,\n  0.026928626,\n  -0.017877981,\n  0.002625541,\n  -0.005287206,\n  -0.0035335612,\n  0.010948786,\n  0.009457859,\n  -0.03210418,\n  -0.013385498,\n  0.032524526,\n  0.023776008,\n  -0.016616933,\n  0.003256065,\n  -0.004206777,\n  0.020032272,\n  -0.012295217,\n  -0.002027857,\n  0.021569174,\n  -0.009201709,\n  -0.0036189447,\n  -0.025404861,\n  0.006200152,\n  0.008301899,\n  -0.003257707,\n  0.021779347,\n  -0.020400077,\n  0.013365794,\n  -0.007999772,\n  0.00342519,\n  -0.015552924,\n  -0.0035302774,\n  -0.011585877,\n  -0.0078552775,\n  -0.0009154091,\n  0.0047913254,\n  -0.005566344,\n  0.0073889527,\n  -0.02811086,\n  -0.030081246,\n  -0.012557935,\n  -0.0071196663,\n  0.0036025248,\n  0.0026271832,\n  -0.0036320807,\n  -0.00961549,\n  0.020268718,\n  0.031946547,\n  -0.017286865,\n  -0.001231492,\n  -0.021713668,\n  -0.0091557335,\n  -0.0000045924394,\n  0.001249554,\n  0.025102735,\n  -0.021923844,\n  0.0018948559,\n  0.003599241,\n  -0.030028703,\n  -0.004377544,\n  0.021135688,\n  0.0013735242,\n  -0.039512835,\n  0.017063554,\n  -0.017523311,\n  -0.0052182423,\n  0.002953939,\n  0.021135688,\n  0.00060343114,\n  -0.006249412,\n  0.006177164,\n  0.0027289866,\n  -0.03270843,\n  0.004978512,\n  0.0033792143,\n  -0.009195141,\n  0.009759985,\n  0.01994032,\n  0.009589219,\n  0.016879652,\n  0.027716782,\n  -0.014554595,\n  0.0061804485,\n  0.0081508355,\n  0.014173653,\n  -0.016367352,\n  -0.0039276388,\n  0.0016231065,\n  0.011559606,\n  0.0028291477,\n  0.008380714,\n  0.005178835,\n  0.0223836,\n  0.034678817,\n  0.0083281705,\n  0.039223842,\n  -0.005641876,\n  -0.0026830107,\n  -0.031972818,\n  0.0036123767,\n  -0.009306796,\n  0.02116196,\n  0.020189902,\n  0.00029863682,\n  -0.018941991,\n  0.0051098713,\n  0.0031903854,\n  0.018994534,\n  0.018114427,\n  -0.008111428,\n  0.013050532,\n  -0.019795824,\n  0.006587662,\n  0.009063782,\n  -0.014370692,\n  0.012131018,\n  -0.0023595388,\n  0.01157931,\n  0.017339408,\n  0.01842969,\n  0.031289753,\n  0.013464313,\n  -0.0033660783,\n  -0.007290433,\n  -0.003272485,\n  0.003114854,\n  0.0011961893,\n  -0.010935649,\n  0.0032232252,\n  0.0065613897,\n  -0.022593774,\n  0.027191345,\n  -0.008157403,\n  0.0036255126,\n  0.028032044,\n  0.03286606,\n  -0.024459075,\n  0.005727259,\n  0.024853153,\n  0.014265604,\n  0.00591773,\n  0.020255582,\n  0.024695521,\n  0.005421849,\n  0.011290319,\n  -0.005438269,\n  0.011342864,\n  0.012643319,\n  -0.012052203,\n  -0.01694533,\n  -0.0034580298,\n  0.009346204,\n  0.018547913,\n  0.00006952799,\n  0.01987464,\n  0.019625058,\n  -0.023119211,\n  0.015789371,\n  -0.0031526198,\n  -0.0028275058,\n  -0.0014268889,\n  0.00080005935,\n  0.0012405231,\n  -0.0039768983,\n  -0.0027749622,\n  0.0256019,\n  -0.020255582,\n  0.028610025,\n  0.015145711,\n  -0.005198539,\n  -0.00082058425,\n  -0.005457973,\n  -0.0043414203,\n  -0.02218656,\n  -0.035388157,\n  0.016932195,\n  0.01842969,\n  -0.004702658,\n  -0.02511587,\n  -0.03386439,\n  0.007822438,\n  0.0038159834,\n  0.0072050495,\n  -0.0064891423,\n  0.01714237,\n  0.031316023,\n  -0.023079803,\n  -0.0031542617,\n  0.0039867503,\n  0.042218834,\n  -0.008728816,\n  0.002825864,\n  0.010397078,\n  -0.0021148825,\n  -0.00027626473,\n  0.001712595,\n  -0.005011352,\n  -0.006134473,\n  0.021595445,\n  -0.008354442,\n  -0.022764541,\n  -0.0072970013,\n  0.023723463,\n  -0.0012610479,\n  0.026232423,\n  -0.013884663,\n  0.010377374,\n  0.0014490557,\n  0.009957024,\n  -0.0062691155,\n  -0.00090473617,\n  0.023618376,\n  -0.011736941,\n  -0.020754747,\n  -0.0011748434,\n  -0.014436372,\n  0.0002672338,\n  0.053489447,\n  0.021188231,\n  0.0015689209,\n  -0.0024580583,\n  0.0086828405,\n  0.020150494,\n  0.005389009,\n  -0.020846698,\n  -0.014436372,\n  -0.013950342,\n  0.009201709,\n  -0.013608809,\n  -0.005763383,\n  -0.00095892185,\n  0.009516971,\n  -0.0008784644,\n  0.022436144,\n  0.00067691016,\n  0.0074217925,\n  -0.00742836,\n  -0.0039900346,\n  0.011237776,\n  0.010725475,\n  0.023552697,\n  0.011566173,\n  0.01585505,\n  0.024301443,\n  0.024406532,\n  0.007008011,\n  -0.027296433,\n  -0.0050573274,\n  0.002651813,\n  0.014186789,\n  0.009825665,\n  -0.0048668566,\n  0.028688839,\n  -0.010876538,\n  0.0097862575,\n  0.017904254,\n  -0.006535118,\n  0.0038291195,\n  0.015369021,\n  0.007336409,\n  -0.014252468,\n  0.021542901,\n  -0.020978058,\n  -0.016511846,\n  0.03055414,\n  -0.0065843775,\n  -0.010764883,\n  0.028662568,\n  0.0005328256,\n  -0.010344533,\n  -0.011099849,\n  -0.006019533,\n  0.01061382,\n  -0.018508505,\n  -0.0052018226,\n  -0.005300342,\n  0.014515187,\n  -0.015171982,\n  -0.016551254,\n  0.012045635,\n  -0.008157403,\n  -0.020045407,\n  -0.031263478,\n  -0.02981853,\n  -0.012400304,\n  -0.016012682,\n  0.003451462,\n  -0.014830449,\n  -0.00074176874,\n  -0.018245786,\n  -0.010889674,\n  0.0061607445,\n  0.011401975,\n  0.0046041384,\n  -0.008439826,\n  0.02389423,\n  0.008630296,\n  -0.013129348,\n  -0.032025363,\n  -0.0018915718,\n  -0.019086486,\n  0.013845255,\n  0.012071907,\n  -0.011067009,\n  -0.0082099475,\n  -0.0034744497,\n  0.0015130932,\n  0.026232423,\n  0.010232878,\n  0.0031345577,\n  -0.0074349283,\n  -0.0011378987,\n  -0.011421679,\n  0.016038952,\n  0.0055959,\n  -0.012860062,\n  -0.013135916,\n  0.0068241083,\n  -0.00044333717,\n  0.0043282844,\n  -0.0089981025,\n  0.015763098,\n  0.0005008068,\n  0.008203379,\n  0.016643206,\n  -0.018272059,\n  -0.013779575,\n  0.031552467,\n  -0.010915946,\n  0.009306796,\n  -0.0029490131,\n  -0.0003255244,\n  0.01714237,\n  0.0003099255,\n  0.019927183,\n  0.0009236191,\n  -0.03058041,\n  0.004659966,\n  -0.039854366,\n  0.03399575,\n  -0.009227981,\n  0.003999886,\n  0.0018915718,\n  0.013063668,\n  -0.016827108,\n  -0.00742836,\n  -0.0009679528,\n  -0.008091724,\n  0.023237435,\n  0.011152392,\n  -0.012229538,\n  -0.022882765,\n  -0.012787814,\n  -0.022501823,\n  0.0067847003,\n  -0.00004271738,\n  -0.032025363,\n  -0.018272059,\n  -0.0067321565,\n  0.0018899299,\n  -0.0067715645,\n  -0.023066668,\n  -0.04508246,\n  -0.014094837,\n  0.015369021,\n  -0.013142483,\n  0.006968603,\n  -0.0050507598,\n  0.0019326216,\n  -0.025102735,\n  -0.035072893,\n  -0.012531663,\n  -0.031815186,\n  -0.015802506,\n  -0.0004962913,\n  0.035125438,\n  0.026849812,\n  0.02167426,\n  0.0056648636,\n  0.007145938,\n  0.0050770314,\n  0.013963479,\n  0.013221299,\n  -0.0068044043,\n  0.00974685,\n  -0.014974943,\n  0.024787473,\n  0.026284967,\n  0.017917389,\n  0.004820881,\n  0.0026994306,\n  0.018600456,\n  0.012689294,\n  -0.0050244876,\n  -0.015999544,\n  -0.008420122,\n  -0.029240549,\n  -0.008413554,\n  -0.012623615,\n  -0.01605209,\n  0.011021033,\n  0.006567958,\n  -0.01266959,\n  0.032524526,\n  0.017930524,\n  0.039644193,\n  0.0055302205,\n  0.02209461,\n  0.00590131,\n  0.02617988,\n  0.006334795,\n  0.010200039,\n  0.015973274,\n  -0.005737111,\n  -0.0055499244,\n  -0.003756872,\n  0.016879652,\n  -0.0001526024,\n  0.006292104,\n  -0.013405202,\n  0.0072772973,\n  -0.015960138,\n  0.016643206,\n  -0.004187073,\n  -0.018114427,\n  -0.013017693,\n  -0.0072444575,\n  -0.016643206,\n  -0.014344419,\n  -0.0010533362,\n  -0.02473493,\n  0.012078474,\n  -0.004945672,\n  -0.01849537,\n  0.02209461,\n  -0.016695749,\n  -0.03142111,\n  0.0041148257,\n  -0.014515187,\n  0.030606683,\n  -0.010055543,\n  0.016144041,\n  0.012708998,\n  -0.026482007,\n  -0.026219288,\n  -0.005336466,\n  0.0027421224,\n  -0.00038012056,\n  0.017523311,\n  0.0018242503,\n  -0.004344704,\n  -0.02167426,\n  0.008617161,\n  0.0005118902,\n  0.007152506,\n  -0.01672202,\n  0.008242787,\n  0.012085042,\n  -0.0087813595,\n  -0.02360524,\n  -0.003265917,\n  -0.024340851,\n  0.010797722,\n  0.008709112,\n  0.0050441916,\n  -0.009372476,\n  -0.012610479,\n  -0.004988364,\n  0.022252241,\n  -0.0298448,\n  0.012485688,\n  0.022462416,\n  -0.016511846,\n  -0.011408542,\n  0.011250911,\n  -0.023106076,\n  0.022475552,\n  0.009050646,\n  0.011743508,\n  -0.019178437,\n  0.023697192,\n  0.011152392,\n  -0.002157574,\n  -0.022081474,\n  -0.0010730401,\n  0.0003431758,\n  0.029345635,\n  -0.0075334474,\n  0.00006573089,\n  -0.0018554481,\n  0.012229538,\n  0.006659909,\n  -0.0016304955,\n  -0.010725475,\n  -0.02273827,\n  -0.025326045,\n  -0.0017322989,\n  0.039223842,\n  0.019808961,\n  -0.017891116,\n  -0.019533107,\n  -0.016669476,\n  -0.024879424,\n  -0.026455734,\n  0.0040064543,\n  0.001709311,\n  0.0026386771,\n  -0.02231792,\n  -0.025864618,\n  0.0035007214,\n  0.017405089,\n  0.017851708,\n  -0.009589219,\n  -0.011296887,\n  0.01727373,\n  -0.02074161,\n  0.010863402,\n  -0.0066040815,\n  -0.0018324602,\n  -0.04363751,\n  0.018810632,\n  -0.0052576503,\n  -0.018508505,\n  0.0081705395,\n  -0.008512073,\n  -0.03483645,\n  -0.0076451027,\n  0.0100818155,\n  0.014961808,\n  0.023434473,\n  -0.008045748,\n  0.021950115,\n  0.008610592,\n  -0.0035040055,\n  -0.003428474,\n  -0.022212833,\n  0.009149165,\n  -0.024406532,\n  0.006715737,\n  -0.008656569,\n  0.00038935675,\n  0.018823767,\n  -0.014974943,\n  0.024078133,\n  -0.0030541003,\n  -0.020400077,\n  -0.020255582,\n  -0.006988307,\n  0.021556037,\n  -0.011723804,\n  0.017444495,\n  -0.0034843015,\n  -0.006663193,\n  -0.022278512,\n  0.010935649,\n  0.0049128323,\n  -0.008636865,\n  0.016393622,\n  0.003740452,\n  -0.011927411,\n  0.005927582,\n  -0.0050671794,\n  -0.01778603,\n  0.0013374004,\n  -0.029240549,\n  -0.005632024,\n  0.0026222572,\n  -0.005789655,\n  0.01885004,\n  0.013910934,\n  -0.033338953,\n  -0.021871299,\n  0.0069357636,\n  -0.022396736,\n  -0.00685038,\n  0.0019079917,\n  0.015513516,\n  0.029765984,\n  0.0063315113,\n  0.006232992,\n  0.019966591,\n  0.0070671225,\n  0.016866516,\n  -0.0037667237,\n  -0.017129233,\n  0.007178778,\n  -0.0049325363,\n  0.01556606,\n  0.0030672364,\n  -0.035177983,\n  -0.020006,\n  0.019638194,\n  -0.0034843015,\n  -0.012938877,\n  0.023421338,\n  -0.006968603,\n  -0.002356255,\n  -0.015605467,\n  0.01054814,\n  0.0007011295,\n  0.0004942388,\n  0.008446394,\n  -0.00987164,\n  -0.0072444575,\n  0.02218656,\n  -0.024051862,\n  -0.00021715311,\n  -0.008249355,\n  0.000620672,\n  -0.006282252,\n  0.0033086087,\n  -0.027480336,\n  -0.0067912685,\n  0.015999544,\n  -0.025299773,\n  0.013832119,\n  0.0055959,\n  -0.02666591,\n  0.038383145,\n  -0.017194914,\n  -0.0031017181,\n  -0.026744723,\n  0.0072313217,\n  -0.009477563,\n  -0.015789371,\n  0.021477222,\n  -0.008991534,\n  -0.009195141,\n  -0.00993732,\n  -0.005441553,\n  -0.03315505,\n  -0.0007163179,\n  -0.033260137,\n  -0.00075121014,\n  -0.0194937,\n  0.032655887,\n  -0.0024564161,\n  0.0026764427,\n  0.0025303056,\n  -0.008301899,\n  -0.017194914,\n  -0.0084660975,\n  -0.00742836,\n  0.020268718,\n  0.013241003,\n  -0.01058098,\n  -0.0010853551,\n  -0.0036977602,\n  -0.026219288,\n  -0.006837244,\n  0.018272059,\n  -0.00071057095,\n  0.02022931,\n  0.19672348,\n  -0.0039506266,\n  0.0016715452,\n  0.04332225,\n  0.012196697,\n  0.0021033885,\n  0.01605209,\n  0.02231792,\n  -0.016774565,\n  0.02216029,\n  -0.026232423,\n  0.010003,\n  -0.020111086,\n  0.003720748,\n  0.01463341,\n  0.0014318147,\n  -0.034205925,\n  -0.0064694383,\n  -0.026613366,\n  -0.0072444575,\n  -0.011599014,\n  -0.006988307,\n  -0.014396964,\n  -0.0032002374,\n  0.030606683,\n  0.007323273,\n  0.005737111,\n  0.0064267465,\n  0.015894458,\n  0.005632024,\n  -0.011907708,\n  -0.03139484,\n  0.016511846,\n  0.0087813595,\n  -0.000110731664,\n  0.000021923124,\n  -0.008242787,\n  0.0039900346,\n  0.015907593,\n  -0.004325,\n  -0.0041509494,\n  0.010429917,\n  0.014869857,\n  -0.014515187,\n  -0.017050419,\n  0.0050737476,\n  0.015369021,\n  -0.029765984,\n  0.0019884491,\n  0.012203266,\n  -0.018692408,\n  0.0025762815,\n  0.02753288,\n  0.027165074,\n  -0.008630296,\n  -0.00278974,\n  0.008183676,\n  -0.008630296,\n  -0.010344533,\n  0.00017825847,\n  -0.0073955203,\n  0.023434473,\n  -0.0093133645,\n  0.0088601755,\n  -0.012925741,\n  0.013004556,\n  -0.011165529,\n  -0.0072641615,\n  -0.002346403,\n  -0.022882765,\n  -0.007375817,\n  -0.044268034,\n  0.011651557,\n  -0.013595672,\n  -0.027454063,\n  -0.0028915433,\n  0.03515171,\n  0.021332728,\n  0.01643303,\n  0.016472438,\n  0.017733486,\n  0.0050343396,\n  -0.006111485,\n  -0.027243888,\n  -0.011750077,\n  -0.027348977,\n  0.015382157,\n  0.0023858107,\n  -0.011395407,\n  0.0061935843,\n  -0.0063380795,\n  -0.026324375,\n  -0.008406986,\n  -0.00688322,\n  0.0023726746,\n  0.022790814,\n  -0.00012704893,\n  0.02158231,\n  0.0011773064,\n  0.015080031,\n  -0.014882992,\n  -0.016235992,\n  -0.0056977035,\n  0.018863175,\n  -0.004797893,\n  0.02473493,\n  0.0020771166,\n  0.011021033,\n  -0.009214845,\n  -0.02145095,\n  -0.0006465333,\n  -0.03215672,\n  0.022239106,\n  -0.0064563025,\n  0.023421338,\n  0.013845255,\n  0.0003680109,\n  -0.006492426,\n  0.014094837,\n  -0.0000065519225,\n  0.0086434325,\n  -0.015671147,\n  0.008374146,\n  0.014081702,\n  -0.010489029,\n  -0.021148823,\n  -0.032471985,\n  -0.0010081815,\n  0.0033578684,\n  -0.024301443,\n  0.007809302,\n  -0.009300228,\n  0.02158231,\n  -0.009851936,\n  -0.016406758,\n  0.0011411826,\n  0.0070539867,\n  -0.0005283101,\n  -0.012347761,\n  0.014738497,\n  -0.003421906,\n  -0.019112756,\n  0.013556265,\n  0.008282195,\n  0.002630467,\n  -0.029293092,\n  0.032603342,\n  -0.027348977,\n  0.004091838,\n  -0.009044078,\n  -0.025417997,\n  -0.007651671,\n  -0.0052313786,\n  0.0083478745,\n  0.03299742,\n  -0.01308994,\n  -0.049758848,\n  -0.022633182,\n  -0.0019933751,\n  0.0021083145,\n  -0.02122764,\n  0.007040851,\n  0.024656113,\n  -0.013293547,\n  0.0016863232,\n  -0.023106076,\n  -0.17087199,\n  0.018048748,\n  0.00784871,\n  -0.015789371,\n  0.016393622,\n  -0.004650114,\n  0.033522855,\n  -0.016800836,\n  0.00026661804,\n  0.015894458,\n  0.019414883,\n  0.0021198085,\n  -0.034022022,\n  -0.017168641,\n  -0.0008883163,\n  -0.010383941,\n  -0.010318262,\n  0.0081311315,\n  0.01534275,\n  0.04093151,\n  0.03270843,\n  -0.02595657,\n  0.019283524,\n  -0.01951997,\n  0.025851483,\n  0.0031838175,\n  -0.0044202358,\n  0.02813713,\n  0.01994032,\n  -0.00562874,\n  0.005944002,\n  -0.018771224,\n  0.023263706,\n  0.01276811,\n  -0.0007959544,\n  0.011750077,\n  -0.009530107,\n  -0.031263478,\n  -0.0063512153,\n  0.022002658,\n  0.026284967,\n  0.016262263,\n  0.0045811506,\n  -0.005441553,\n  -0.011789484,\n  0.028610025,\n  0.029503267,\n  0.01183546,\n  0.008334738,\n  -0.024669249,\n  0.0072050495,\n  -0.020203039,\n  0.020426348,\n  0.0014539816,\n  0.014909265,\n  0.01205877,\n  -0.003287263,\n  0.015408429,\n  0.0077173505,\n  -0.013260706,\n  -0.007500608,\n  -0.020899242,\n  0.021253912,\n  0.00016686718,\n  0.015001216,\n  -0.040379804,\n  -0.015684282,\n  0.017733486,\n  -0.029949887,\n  0.015316478,\n  0.007113098,\n  -0.035466973,\n  -0.0005964527,\n  -0.022396736,\n  0.020465756,\n  0.0067847003,\n  -0.01643303,\n  0.00246791,\n  -0.011973387,\n  0.0045745824,\n  -0.017391952,\n  0.023789143,\n  -0.010600684,\n  0.002648529,\n  0.007842141,\n  -0.006091781,\n  -0.0032478552,\n  -0.009339636,\n  -0.0044235196,\n  -0.01283379,\n  0.02007168,\n  -0.018705543,\n  -0.019375475,\n  -0.018350873,\n  0.022856493,\n  0.02286963,\n  0.0015984768,\n  -0.0027306285,\n  0.006955467,\n  0.006682897,\n  -0.0059505696,\n  0.003740452,\n  -0.02286963,\n  0.007888117,\n  0.016669476,\n  -0.02273827,\n  0.012183562,\n  0.023224298,\n  0.022817085,\n  -0.020203039,\n  -0.012873197,\n  0.00836101,\n  -0.007303569,\n  0.016735157,\n  0.0069620353,\n  0.019730145,\n  -0.0081377,\n  -0.013595672,\n  0.023933638,\n  -0.018482232,\n  0.03659666,\n  -0.013818983,\n  -0.01405543,\n  -0.008196811,\n  -0.009957024,\n  -0.014002886,\n  -0.11296888,\n  -0.027138801,\n  0.01022631,\n  0.01122464,\n  -0.008761656,\n  0.021661125,\n  0.006955467,\n  -0.0024038726,\n  -0.03244571,\n  0.016984738,\n  -0.012242673,\n  -0.028032044,\n  -0.028767655,\n  -0.007375817,\n  0.016708884,\n  -0.0066566253,\n  -0.018127564,\n  -0.0056681475,\n  -0.010502164,\n  0.028373579,\n  -0.01376644,\n  -0.00075983064,\n  -0.0050376235,\n  -0.030895673,\n  -0.013569401,\n  -0.0070736906,\n  -0.011638422,\n  0.035834778,\n  0.027348977,\n  0.0030803722,\n  -0.009076918,\n  -0.022659454,\n  0.016472438,\n  -0.021017466,\n  0.007671375,\n  0.021267047,\n  -0.03386439,\n  0.0039637624,\n  0.0035204254,\n  -0.030895673,\n  0.0075794235,\n  0.025063327,\n  0.00070974993,\n  -0.039092485,\n  -0.0019276956,\n  -0.0090178065,\n  -0.007809302,\n  0.022462416,\n  0.012997989,\n  -0.011526766,\n  -0.03701701,\n  -0.015316478,\n  -0.033916935,\n  -0.009602354,\n  0.0228959,\n  0.008794496,\n  0.010331398,\n  0.026140472,\n  -0.009819097,\n  0.019625058,\n  -0.0024301445,\n  -0.0042691724,\n  -0.01607836,\n  0.030212605,\n  -0.01109328,\n  -0.015868185,\n  -0.003083656,\n  0.0046041384,\n  -0.012071907,\n  -0.0076779425,\n  -0.011264048,\n  0.0042330488,\n  -0.019598786,\n  -0.0016945332,\n  -0.026390055,\n  0.02244928,\n  -0.028583752,\n  -0.016314806,\n  0.010370805,\n  -0.02229165,\n  -0.022593774,\n  -0.013648217,\n  0.012505392,\n  -0.015145711,\n  0.023447609,\n  -0.0028669136,\n  0.002014721,\n  0.002625541,\n  0.010594116,\n  -0.013011124,\n  -0.013595672,\n  0.011303456,\n  0.014948672,\n  -0.014252468,\n  0.02007168,\n  0.011172096,\n  -0.013661352,\n  0.0044891993,\n  0.016984738,\n  0.00961549,\n  -0.004502335,\n  -0.030107519,\n  -0.056011543,\n  0.020794155,\n  -0.01302426,\n  -0.002510602,\n  0.02145095,\n  0.000038484126,\n  0.012919173,\n  -0.008597457,\n  -0.008617161,\n  -0.013477449,\n  -0.004062282,\n  -0.008439826,\n  -0.0050573274,\n  0.0045581628,\n  -0.031815186,\n  -0.020951785,\n  0.015881322,\n  0.022790814,\n  0.00900467,\n  0.020689066,\n  0.01398975,\n  -0.005300342,\n  0.013674488,\n  -0.0048570046,\n  -0.010318262,\n  0.007008011,\n  -0.013543129,\n  -0.0014112899,\n  0.0007659881,\n  -0.015671147,\n  0.0071393703,\n  -0.044399396,\n  0.002331625,\n  0.012091611,\n  0.0101015195,\n  -0.009149165,\n  0.0067715645,\n  0.036386486,\n  0.00033455534,\n  0.017733486,\n  -0.01816697,\n  -0.012387169,\n  -0.011388839,\n  -0.019204708,\n  -0.0092739565,\n  0.011605581,\n  -0.022252241,\n  0.014541458,\n  0.027059985,\n  -0.00064694387,\n  0.018350873,\n  0.015986409,\n  -0.012702431,\n  -0.00535617,\n  -0.016879652,\n  -0.01498808,\n  -0.013976614,\n  0.008847039,\n  -0.008085156,\n  -0.012892901,\n  0.021148823,\n  0.0074349283,\n  0.01829833,\n  -0.0032051634,\n  0.018731816,\n  -0.015658012,\n  -0.015868185,\n  0.019756418,\n  0.0027109245,\n  -0.028032044,\n  -0.010587548,\n  0.0078749815,\n  -0.013201595,\n  -0.0015262292,\n  0.007585991,\n  0.013648217,\n  -0.008814199,\n  0.0052773543,\n  -0.019533107,\n  0.02910919,\n  0.012912605,\n  -0.012938877,\n  -0.039670464,\n  0.007566287,\n  0.026350647,\n  0.019598786,\n  -0.007303569,\n  0.0091360295,\n  0.010666364,\n  0.017378816,\n  -0.0046107066,\n  -0.015421565,\n  0.0017503607,\n  0.0031854594,\n  0.0059932615,\n  0.016091496,\n  0.001255301,\n  -0.0013029187,\n  0.016025817,\n  0.022633182,\n  0.008952127,\n  -0.0052018226,\n  0.006374203,\n  -0.012091611,\n  0.0013981541,\n  0.009582651,\n  -0.015027488,\n  -0.033785574,\n  -0.006830676,\n  0.01456773,\n  0.012676159,\n  0.0059932615,\n  0.023697192,\n  0.02151663,\n  -0.01592073,\n  0.022002658,\n  -0.02280395,\n  -0.011625285,\n  -0.031788915,\n  0.039223842,\n  -0.008833903,\n  0.022265377,\n  0.028032044,\n  -0.011809188,\n  0.01807502,\n  0.0016116126,\n  -0.0062132883,\n  -0.017838573,\n  0.020570844,\n  0.005733827,\n  0.0075400155,\n  -0.014239333,\n  -0.013234435,\n  -0.01376644,\n  -0.029056646,\n  0.0027536163,\n  -0.0089783985,\n  0.033470314,\n  -0.025759531,\n  0.05627426,\n  0.0012273871,\n  -0.01054814,\n  0.026705317,\n  0.018547913,\n  0.013569401,\n  0.010830563,\n  -0.007947229,\n  -0.041246776,\n  -0.00068881456,\n  -0.00010241909,\n  -0.01109328,\n  0.002343119,\n  -0.013188459,\n  -0.016695749,\n  0.0044465074,\n  -0.011901139,\n  0.013503721,\n  -0.018403418,\n  -0.0015910878,\n  0.007671375,\n  0.0015221242,\n  0.005280638,\n  -0.002957223,\n  -0.02682354,\n  -0.023881095,\n  0.019414883,\n  -0.000051209547,\n  -0.021753076,\n  -0.016997876,\n  -0.0013571043,\n  0.015552924,\n  -0.0223836,\n  -0.011053873,\n  0.01885004,\n  -0.0306855,\n  -0.0046369783,\n  -0.0086828405,\n  -0.007027715,\n  0.01176978,\n  0.015053759,\n  0.013181891,\n  -0.033181325,\n  -0.015290205,\n  0.023749735,\n  0.014291876,\n  -0.011592446,\n  -0.000933471,\n  0.004673102,\n];\nfinal List<double> _byeVector = [\n  -0.0084679825,\n  -0.018047791,\n  0.0074078683,\n  -0.03914664,\n  -0.02015509,\n  0.01239816,\n  -0.014182254,\n  -0.026761409,\n  -0.017000604,\n  -0.003285383,\n  0.041241013,\n  0.0033225517,\n  0.0005603574,\n  -0.0024498971,\n  0.0044473065,\n  -0.005316729,\n  0.03451834,\n  0.00036663614,\n  0.03413049,\n  -0.022624379,\n  -0.013820264,\n  0.010045225,\n  -0.012566227,\n  -0.019599177,\n  -0.015462147,\n  -0.0031900376,\n  0.0016984445,\n  -0.025041956,\n  0.002327079,\n  -0.025507372,\n  0.01631541,\n  0.008325771,\n  -0.030562304,\n  0.0021137635,\n  -0.0049838275,\n  -0.031338,\n  -0.00298157,\n  -0.005840322,\n  0.02218482,\n  -0.008461518,\n  0.012805399,\n  -0.0066451035,\n  -0.0053555137,\n  -0.001552194,\n  -0.033742648,\n  0.0031851893,\n  0.014802808,\n  0.00008080136,\n  -0.015294081,\n  0.030820869,\n  0.02545566,\n  0.007582399,\n  -0.024486043,\n  -0.025248807,\n  -0.0029169288,\n  0.021990897,\n  -0.005552669,\n  0.020168018,\n  -0.013044571,\n  -0.022352887,\n  0.008552016,\n  -0.011506113,\n  -0.004473163,\n  0.029734898,\n  0.001489169,\n  -0.0044376105,\n  0.008829972,\n  0.0027310858,\n  -0.020193875,\n  0.005972836,\n  0.03226883,\n  0.019754315,\n  0.0063994676,\n  -0.020206803,\n  0.01993531,\n  -0.01609563,\n  -0.02399477,\n  0.0065287496,\n  -0.03508718,\n  0.0011594994,\n  0.005468636,\n  -0.0375694,\n  -0.032294687,\n  0.017892651,\n  0.022740733,\n  -0.0055203484,\n  -0.006690352,\n  0.006147367,\n  -0.015552645,\n  -0.01965089,\n  0.00500322,\n  0.030795014,\n  -0.015138942,\n  0.016845467,\n  0.003451834,\n  0.01084031,\n  -0.010976057,\n  0.027433677,\n  -0.0042598476,\n  -0.017711658,\n  -0.0017081407,\n  0.0043244883,\n  -0.0067743855,\n  0.0030817636,\n  -0.0012346447,\n  -0.0036813098,\n  0.0051777507,\n  -0.019754315,\n  0.03930178,\n  -0.020749789,\n  -0.019547464,\n  0.047860257,\n  -0.003936642,\n  -0.03648343,\n  -0.011519041,\n  -0.011480257,\n  -0.014350321,\n  -0.008073672,\n  -0.011150587,\n  -0.020840285,\n  0.02045244,\n  0.015100157,\n  0.022391671,\n  -0.031441424,\n  0.007970246,\n  0.0070394143,\n  -0.0012669653,\n  -0.02261145,\n  0.02131863,\n  0.002567867,\n  0.0343632,\n  0.0033839608,\n  0.022016753,\n  -0.012249486,\n  -0.010452463,\n  0.010704564,\n  -0.024046483,\n  0.0074078683,\n  -0.010349038,\n  -0.022598524,\n  0.0077181454,\n  0.034544196,\n  -0.0023545516,\n  -0.01192628,\n  0.006506125,\n  0.00714284,\n  0.020129234,\n  0.007097591,\n  -0.0013849352,\n  0.007705217,\n  0.006496429,\n  -0.0027246217,\n  0.023257863,\n  0.00014816949,\n  0.01565607,\n  0.004696175,\n  -0.010142186,\n  -0.0046121413,\n  -0.021693548,\n  0.010400751,\n  0.009101464,\n  -0.0015562341,\n  0.03379436,\n  -0.021525482,\n  0.0024062644,\n  0.029890038,\n  0.016418835,\n  0.02399477,\n  0.011887495,\n  -0.010711027,\n  -0.0010649619,\n  0.020491224,\n  -0.03878465,\n  0.0047931364,\n  -0.0056787194,\n  0.028959205,\n  0.0008597264,\n  0.014350321,\n  -0.022314101,\n  -0.023038082,\n  -0.019107904,\n  0.026334777,\n  0.020840285,\n  0.028674785,\n  -0.0015117934,\n  0.020620506,\n  0.019586248,\n  0.010232683,\n  0.0024773695,\n  -0.020620506,\n  0.006509357,\n  0.021279845,\n  0.021564266,\n  -0.019159617,\n  -0.66895765,\n  -0.012230094,\n  0.0019182242,\n  -0.001672588,\n  -0.00018907517,\n  0.00431156,\n  0.016470548,\n  0.0088041155,\n  -0.023555212,\n  0.0130833555,\n  -0.0003403757,\n  0.007382012,\n  0.0032966954,\n  -0.015759496,\n  -0.0021105313,\n  0.010206827,\n  0.0051034135,\n  -0.012462801,\n  -0.0004932923,\n  0.015862921,\n  -0.010601138,\n  0.025688367,\n  0.0047349595,\n  0.020775644,\n  0.0008043775,\n  -0.012630868,\n  0.0022543578,\n  0.007873284,\n  -0.015849994,\n  -0.00021331558,\n  -0.020413654,\n  0.008306379,\n  0.0049030264,\n  -0.013910761,\n  0.050859604,\n  0.014363249,\n  -0.0015319937,\n  0.013678053,\n  0.0032433665,\n  0.052850552,\n  0.006625711,\n  -0.050083913,\n  -0.009760804,\n  0.006638639,\n  -0.0014019036,\n  0.0031173164,\n  0.007330299,\n  0.0055106524,\n  0.004537804,\n  -0.012882968,\n  0.012689045,\n  -0.00074579654,\n  0.004957971,\n  0.007937925,\n  0.002991266,\n  0.001612795,\n  0.030717444,\n  -0.01261794,\n  0.01478988,\n  0.018577848,\n  0.013548771,\n  0.0070588063,\n  -0.009734947,\n  -0.0008500303,\n  -0.006373611,\n  0.009179034,\n  -0.0065966225,\n  -0.0059146592,\n  0.015565573,\n  -0.010924343,\n  -0.010484784,\n  0.025636654,\n  -0.022094322,\n  0.0012815095,\n  0.015759496,\n  0.028623072,\n  0.019418182,\n  -0.0048674736,\n  -0.013871977,\n  0.009851301,\n  0.020349013,\n  -0.00024604012,\n  -0.014880378,\n  -0.011538434,\n  0.036095582,\n  -0.018358069,\n  -0.02820937,\n  0.0014972491,\n  -0.006483501,\n  -0.018306356,\n  0.003396889,\n  0.024395546,\n  -0.006916596,\n  -0.01892691,\n  0.005025844,\n  -0.0050452366,\n  -0.010336109,\n  0.011131194,\n  0.004360041,\n  -0.012630868,\n  -0.016832538,\n  -0.0087653315,\n  0.017414307,\n  -0.010161579,\n  -0.007453117,\n  -0.0032045818,\n  -0.020478295,\n  0.018215857,\n  0.027537102,\n  -0.0123076625,\n  0.017931437,\n  0.00057368964,\n  0.0063348264,\n  0.008817044,\n  0.025843505,\n  -0.029372908,\n  -0.0019473126,\n  0.0018551991,\n  0.026166711,\n  -0.031493135,\n  0.006441484,\n  -0.009217818,\n  0.013949546,\n  -0.009237211,\n  0.012411088,\n  0.029372908,\n  0.0039948192,\n  -0.024951458,\n  -0.039560344,\n  0.00262766,\n  -0.0032886153,\n  0.0006258065,\n  0.027252682,\n  0.0028215833,\n  -0.00031936736,\n  -0.0057272003,\n  0.00391725,\n  -0.01478988,\n  0.00523916,\n  0.016199056,\n  -0.005126038,\n  -0.00045450762,\n  0.002931473,\n  0.007692289,\n  0.0064673405,\n  -0.010633458,\n  -0.015281152,\n  -0.013432417,\n  -0.01936647,\n  0.021512553,\n  0.00953456,\n  -0.0092630675,\n  0.010284397,\n  0.029476335,\n  0.019043263,\n  -0.020969568,\n  -0.022107251,\n  -0.014880378,\n  -0.03263082,\n  -0.006554606,\n  0.020336086,\n  0.036948845,\n  -0.022870015,\n  0.016354194,\n  -0.008920469,\n  -0.03131214,\n  0.01084031,\n  0.028623072,\n  -0.003636061,\n  -0.03524232,\n  0.0041531897,\n  -0.030252028,\n  0.01287004,\n  0.01181639,\n  0.018383924,\n  -0.01062053,\n  0.0036748457,\n  0.00068761955,\n  -0.008112456,\n  -0.0044957874,\n  0.0002446261,\n  -0.0008459902,\n  -0.00881058,\n  -0.006625711,\n  0.023400072,\n  0.001670972,\n  0.0045119477,\n  0.021887472,\n  -0.02102128,\n  0.019263044,\n  0.0014423042,\n  0.028648928,\n  -0.01776337,\n  -0.0019796332,\n  0.0055365087,\n  0.00020664946,\n  0.008642513,\n  0.011118267,\n  0.017634088,\n  0.010484784,\n  0.024524827,\n  -0.013180317,\n  0.027976662,\n  0.004906258,\n  0.004647694,\n  -0.029450478,\n  0.009831909,\n  -0.030381309,\n  0.003393657,\n  0.006305738,\n  -0.013070427,\n  -0.016069774,\n  -0.017556518,\n  0.011952137,\n  0.015061373,\n  0.02646406,\n  -0.009657378,\n  0.0026939171,\n  -0.033820216,\n  -0.013445346,\n  0.0024127285,\n  -0.011861639,\n  0.0017695497,\n  -0.0001308982,\n  0.001552194,\n  0.012999322,\n  0.014751095,\n  0.02015509,\n  0.010943736,\n  -0.017608231,\n  -0.020555865,\n  0.0046153734,\n  -0.00027310857,\n  0.0010205212,\n  -0.009166106,\n  -0.018138288,\n  0.025145382,\n  -0.014156397,\n  0.037905533,\n  -0.037802108,\n  -0.00022341574,\n  0.024977315,\n  0.03387193,\n  -0.02443433,\n  -0.011221692,\n  0.0047446555,\n  0.009890086,\n  -0.007511294,\n  0.013794407,\n  0.020142162,\n  -0.020129234,\n  0.013652197,\n  -0.0213962,\n  0.00689074,\n  0.005294105,\n  -0.01688425,\n  -0.017104032,\n  0.026011573,\n  0.031570707,\n  0.028235225,\n  0.007931461,\n  0.0038655368,\n  -0.0006855995,\n  0.013484131,\n  0.0013146381,\n  -0.0065352134,\n  0.0026292761,\n  0.007931461,\n  -0.009922407,\n  -0.0038461445,\n  -0.031338,\n  -0.022249462,\n  0.013962475,\n  -0.012889432,\n  0.030950151,\n  0.020982496,\n  0.01762116,\n  0.00049975637,\n  -0.02756296,\n  -0.016625687,\n  -0.034621764,\n  -0.021137634,\n  0.02204261,\n  0.015643142,\n  -0.0037782714,\n  -0.027252682,\n  -0.023451785,\n  0.01058821,\n  -0.006638639,\n  0.011577218,\n  -0.007828035,\n  0.03407878,\n  -0.007369084,\n  -0.0058694105,\n  -0.01790558,\n  0.002739166,\n  0.022546811,\n  0.005911427,\n  0.00060075807,\n  0.02508074,\n  -0.014156397,\n  -0.009127321,\n  0.0054427795,\n  -0.027743954,\n  0.010853238,\n  0.0032401343,\n  -0.0048771696,\n  -0.026864834,\n  -0.023193222,\n  0.004182278,\n  -0.022740733,\n  -0.005436315,\n  -0.019314757,\n  -0.015914636,\n  0.0071880887,\n  0.010303789,\n  -0.018681273,\n  -0.013031643,\n  0.029269483,\n  -0.016302481,\n  -0.018823484,\n  -0.002210725,\n  0.0033613364,\n  -0.011525505,\n  0.070432924,\n  0.039172497,\n  0.008099528,\n  0.021370342,\n  0.0078345,\n  0.025041956,\n  -0.013445346,\n  -0.023917202,\n  0.008713618,\n  0.002031346,\n  -0.0026712928,\n  -0.010174506,\n  -0.012223629,\n  0.0038946252,\n  0.021939185,\n  0.0015287617,\n  0.000108071814,\n  -0.0056819515,\n  0.008875221,\n  -0.00061328226,\n  -0.001976401,\n  -0.011784069,\n  -0.0015764345,\n  0.039715484,\n  0.011984457,\n  0.020025808,\n  -0.0018147985,\n  0.036276575,\n  0.007162232,\n  -0.006690352,\n  0.0058144657,\n  0.000418147,\n  0.014233966,\n  0.002323847,\n  -0.017013533,\n  0.02261145,\n  0.008073672,\n  -0.011738821,\n  0.0013017098,\n  -0.022314101,\n  0.013018714,\n  0.019948239,\n  -0.002275366,\n  -0.02536516,\n  0.021525482,\n  -0.02704583,\n  -0.008739474,\n  0.0148933055,\n  -0.0062863454,\n  -0.014402034,\n  0.033897784,\n  0.009605665,\n  0.00026482643,\n  -0.010568817,\n  0.0020992192,\n  0.0071105193,\n  -0.018577848,\n  -0.012436945,\n  0.00964445,\n  -0.0021961809,\n  -0.017491877,\n  -0.02566251,\n  -0.000070196176,\n  -0.0068390267,\n  -0.0020555865,\n  -0.028881637,\n  -0.021628907,\n  -0.011260477,\n  -0.02589522,\n  0.0070070936,\n  0.0051034135,\n  -0.012314127,\n  -0.012721365,\n  0.0008815428,\n  -0.00063509867,\n  0.020517081,\n  0.0013307984,\n  -0.011247548,\n  0.011603075,\n  0.012837719,\n  -0.009159641,\n  -0.015578501,\n  -0.010930807,\n  -0.009961192,\n  -0.0010261772,\n  0.009075608,\n  -0.003985123,\n  -0.00536521,\n  -0.022197748,\n  0.019301828,\n  0.030252028,\n  0.0057498245,\n  0.011221692,\n  -0.011784069,\n  0.009870694,\n  -0.012062026,\n  0.0087653315,\n  0.007905604,\n  -0.018047791,\n  0.008377485,\n  0.014389105,\n  -0.010917879,\n  -0.0077246097,\n  -0.012333519,\n  0.017142816,\n  0.015358722,\n  0.02204261,\n  0.041241013,\n  -0.004948275,\n  -0.010601138,\n  0.026632126,\n  -0.0379831,\n  0.0022802143,\n  0.0042727757,\n  -0.00069287163,\n  0.016535189,\n  0.008920469,\n  0.04312853,\n  -0.009140249,\n  -0.013484131,\n  0.0049288827,\n  -0.05822869,\n  0.02378792,\n  0.00045127558,\n  0.0079896385,\n  0.006021317,\n  0.004954739,\n  -0.022818303,\n  -0.0146347415,\n  -0.0034712262,\n  -0.009987048,\n  0.013975402,\n  -0.0034679943,\n  -0.040465318,\n  -0.04589517,\n  -0.020038737,\n  -0.020258516,\n  0.023232006,\n  -0.018797627,\n  -0.016005132,\n  -0.023645708,\n  0.0077698585,\n  0.001608755,\n  -0.01464767,\n  -0.019107904,\n  -0.054195084,\n  -0.0069553806,\n  0.014402034,\n  -0.004534572,\n  0.0072074807,\n  -0.0151130855,\n  0.0037136304,\n  -0.010898487,\n  -0.008299915,\n  0.015035517,\n  -0.022857087,\n  -0.011796998,\n  0.0007021638,\n  0.018965695,\n  0.034544196,\n  0.03154485,\n  -0.0011659636,\n  0.015849994,\n  -0.000030628762,\n  0.0035584916,\n  0.008371021,\n  -0.008222346,\n  0.009637985,\n  0.00451518,\n  0.0049159545,\n  0.015992204,\n  0.014091756,\n  0.0014406882,\n  -0.002325463,\n  0.004010979,\n  0.02204261,\n  -0.008442125,\n  -0.007963781,\n  -0.017440164,\n  -0.017491877,\n  -0.0065255174,\n  -0.019482823,\n  -0.0049385787,\n  0.0027197737,\n  0.00067711534,\n  0.00016927884,\n  0.027795667,\n  0.012333519,\n  -0.0021590122,\n  -0.016573975,\n  0.030846726,\n  -0.011557826,\n  0.015513861,\n  -0.0067162085,\n  -0.010575281,\n  0.0004718799,\n  -0.0057465923,\n  0.015281152,\n  -0.0068842755,\n  0.006877811,\n  -0.0011481872,\n  0.01378148,\n  -0.01891398,\n  -0.014130541,\n  0.0009292156,\n  0.012624403,\n  -0.010749812,\n  -0.027950805,\n  0.018009007,\n  -0.0085908,\n  -0.0017921741,\n  -0.034337346,\n  -0.025326377,\n  -0.014970875,\n  -0.0047963685,\n  0.002322231,\n  -0.011628931,\n  0.009372957,\n  -0.0085908,\n  -0.0023610156,\n  -0.017996078,\n  -0.00044279144,\n  0.020116305,\n  -0.016496405,\n  0.032941096,\n  0.014583029,\n  -0.006137671,\n  -0.011460864,\n  0.008060743,\n  -0.009870694,\n  -0.0030656033,\n  0.011667715,\n  0.005294105,\n  0.004692943,\n  0.006496429,\n  -0.013639268,\n  0.014389105,\n  -0.016496405,\n  -0.017892651,\n  0.004414986,\n  0.019508678,\n  -0.011906887,\n  -0.012359375,\n  -0.011363903,\n  -0.015707783,\n  0.01603099,\n  -0.011447936,\n  0.008868757,\n  -0.018500278,\n  -0.011803462,\n  -0.010381358,\n  0.042663116,\n  -0.034906186,\n  0.02204261,\n  0.011331582,\n  -0.020775644,\n  -0.016354194,\n  0.0024094963,\n  -0.01762116,\n  0.004298632,\n  -0.014660598,\n  0.033251375,\n  -0.021499624,\n  0.0069359886,\n  0.01428568,\n  -0.0065966225,\n  -0.022831231,\n  -0.009036823,\n  0.0027763345,\n  0.025753008,\n  -0.017802155,\n  -0.004395594,\n  0.002550091,\n  -0.0071234475,\n  0.0050937175,\n  -0.008377485,\n  -0.009612129,\n  -0.03110529,\n  -0.014802808,\n  0.0011304109,\n  0.0045539644,\n  0.012786007,\n  -0.015384578,\n  -0.026257208,\n  -0.01217838,\n  -0.026321849,\n  -0.0128247915,\n  0.013639268,\n  0.00067024725,\n  -0.010646387,\n  -0.007912069,\n  -0.010323181,\n  -0.00030219706,\n  0.03167413,\n  0.0012596932,\n  -0.021990897,\n  0.00511311,\n  0.012430481,\n  -0.012992858,\n  0.02094371,\n  -0.010329645,\n  0.000017927801,\n  -0.022301175,\n  -0.013458273,\n  -0.014414961,\n  -0.021939185,\n  0.015630214,\n  0.00017968201,\n  -0.019948239,\n  0.0029928822,\n  -0.005420155,\n  0.015617286,\n  0.020801501,\n  -0.005662559,\n  0.010659315,\n  0.019999951,\n  0.0006625711,\n  -0.0054169227,\n  -0.030614018,\n  0.007828035,\n  -0.0041241013,\n  0.015772425,\n  -0.0010067848,\n  -0.013419489,\n  0.007265658,\n  0.0032546786,\n  0.009385886,\n  -0.011635395,\n  -0.024046483,\n  0.00005070285,\n  -0.0035358672,\n  0.012514514,\n  -0.009127321,\n  -0.030872582,\n  -0.009444063,\n  -0.026451131,\n  -0.0059793005,\n  0.03873294,\n  0.018901054,\n  -0.016276626,\n  0.0018875196,\n  0.018435637,\n  -0.0088429,\n  0.021939185,\n  0.000892855,\n  -0.011609538,\n  0.01250805,\n  0.0025145381,\n  -0.009269532,\n  0.0028797602,\n  0.008836436,\n  0.020219732,\n  0.00703295,\n  -0.024033556,\n  -0.0059889965,\n  0.0087847235,\n  -0.012424016,\n  0.003390425,\n  -0.0059534437,\n  0.024317976,\n  0.00786682,\n  0.0068002422,\n  0.0066515678,\n  0.03467348,\n  -0.0021008353,\n  0.012611476,\n  -0.019883597,\n  -0.008157705,\n  0.0038558408,\n  0.024951458,\n  0.029760756,\n  0.003458298,\n  -0.027252682,\n  -0.015811209,\n  0.015345793,\n  -0.0055946857,\n  -0.015229439,\n  0.008868757,\n  -0.00036017204,\n  -0.009987048,\n  -0.016328339,\n  0.006147367,\n  -0.01688425,\n  0.007737538,\n  0.028157657,\n  -0.02523588,\n  0.0176858,\n  0.011544897,\n  -0.0123076625,\n  0.0020507383,\n  0.0018923677,\n  0.017789226,\n  -0.0055203484,\n  0.0012281806,\n  -0.0056787194,\n  0.0068842755,\n  0.007950854,\n  -0.035500884,\n  -0.003881697,\n  0.01515187,\n  -0.019586248,\n  0.046024453,\n  -0.016703255,\n  -0.015164798,\n  -0.012605011,\n  -0.0072979783,\n  -0.011098874,\n  -0.0050096842,\n  0.020077521,\n  -0.01087263,\n  -0.0059857643,\n  -0.02261145,\n  -0.012074955,\n  -0.047756832,\n  0.0020151858,\n  -0.03363922,\n  -0.017375523,\n  -0.020387799,\n  0.004754352,\n  0.017104032,\n  -0.015578501,\n  -0.011350974,\n  0.0030833797,\n  -0.006221704,\n  -0.011990922,\n  -0.009082072,\n  0.019314757,\n  -0.012249486,\n  -0.015811209,\n  0.014324464,\n  -0.0057659848,\n  -0.016729113,\n  0.0110342335,\n  0.010762741,\n  0.0021687085,\n  0.010284397,\n  0.21843515,\n  -0.012598547,\n  0.002550091,\n  0.041034162,\n  0.009198426,\n  0.019172546,\n  -0.0024062644,\n  0.010445999,\n  0.011325118,\n  0.024524827,\n  -0.018164145,\n  0.01740138,\n  -0.015604358,\n  -0.003691006,\n  0.019663818,\n  -0.007582399,\n  -0.06412396,\n  -0.023826703,\n  -0.029553903,\n  -0.014014187,\n  -0.0063962354,\n  -0.011881031,\n  -0.0075500784,\n  -0.004457003,\n  0.03834509,\n  0.007511294,\n  -0.0047608158,\n  0.0009195194,\n  0.0028700642,\n  0.0022365814,\n  -0.024111124,\n  -0.04661915,\n  0.002202645,\n  0.021874543,\n  -0.015707783,\n  0.0013202941,\n  0.0148933055,\n  -0.0054039946,\n  0.042688973,\n  0.024783392,\n  -0.0006411588,\n  0.00052520883,\n  -0.012734294,\n  -0.0123270545,\n  -0.020801501,\n  0.010439535,\n  0.008138313,\n  -0.031027721,\n  0.015604358,\n  0.0058015375,\n  -0.04212013,\n  0.014686454,\n  0.021615978,\n  0.052178282,\n  0.0030963079,\n  0.016405907,\n  0.0070394143,\n  0.0117065,\n  -0.023322504,\n  -0.0071880887,\n  -0.0062895776,\n  0.024524827,\n  -0.017750442,\n  0.01675497,\n  -0.02435676,\n  0.007886212,\n  -0.014699383,\n  0.005135734,\n  0.021564266,\n  -0.017789226,\n  -0.0010698099,\n  -0.019870669,\n  0.0053555137,\n  0.0070523424,\n  -0.010581746,\n  -0.022908801,\n  0.019883597,\n  0.010892022,\n  0.011758213,\n  0.005782145,\n  -0.0053813704,\n  0.0040174434,\n  -0.011363903,\n  -0.012546835,\n  -0.010575281,\n  -0.04183571,\n  0.031415567,\n  0.017013533,\n  0.01250805,\n  -0.007899141,\n  0.012055562,\n  -0.040930733,\n  -0.010685171,\n  -0.0027924948,\n  0.009954727,\n  0.011945672,\n  0.017310882,\n  0.025675438,\n  -0.030096889,\n  0.02784738,\n  -0.00989655,\n  -0.027976662,\n  0.00549126,\n  0.015164798,\n  -0.02212018,\n  -0.009127321,\n  0.0044957874,\n  0.03154485,\n  0.0021800206,\n  -0.019637961,\n  0.0046864785,\n  -0.022301175,\n  0.00786682,\n  0.0018051023,\n  0.043800797,\n  0.0071880887,\n  0.008306379,\n  0.0020523544,\n  -0.00024583814,\n  -0.018603705,\n  0.024020627,\n  0.0026712928,\n  0.012482193,\n  0.006244329,\n  -0.0074272607,\n  0.0011215229,\n  -0.021577194,\n  -0.008293451,\n  0.021512553,\n  -0.037310835,\n  0.01370391,\n  -0.0024256567,\n  0.02298637,\n  -0.013962475,\n  -0.009204891,\n  -0.008532623,\n  0.0047414238,\n  -0.010646387,\n  -0.014040044,\n  0.025197094,\n  -0.013206174,\n  -0.00022482977,\n  0.009075608,\n  0.00076114875,\n  -0.0060342452,\n  -0.031777557,\n  0.0146347415,\n  -0.007640576,\n  -0.024990244,\n  -0.010284397,\n  -0.023232006,\n  -0.009689699,\n  -0.01965089,\n  0.011842247,\n  0.0271234,\n  -0.001018097,\n  -0.022947585,\n  -0.030174458,\n  -0.00346153,\n  -0.004350345,\n  -0.026295993,\n  -0.01696182,\n  0.021292774,\n  -0.002685837,\n  -0.04095659,\n  -0.016199056,\n  -0.16630858,\n  0.046826,\n  0.022107251,\n  -0.013380704,\n  0.0271234,\n  0.006916596,\n  0.033587508,\n  0.012223629,\n  0.002503226,\n  -0.009553952,\n  0.027873235,\n  0.008461518,\n  -0.024305047,\n  -0.0005595494,\n  0.000106758795,\n  0.010601138,\n  -0.0037039341,\n  0.008552016,\n  0.021874543,\n  0.013497058,\n  0.02218482,\n  -0.032217115,\n  0.018177073,\n  -0.005956676,\n  0.016573975,\n  0.00084356614,\n  0.017672872,\n  0.0098189805,\n  -0.011596611,\n  -0.0074725095,\n  -0.031208716,\n  -0.005313497,\n  0.03653514,\n  0.02009045,\n  0.0077181454,\n  0.0039948192,\n  0.008358092,\n  -0.014272751,\n  0.00848091,\n  0.011719429,\n  0.025171239,\n  0.022469241,\n  -0.00689074,\n  0.011195836,\n  -0.015255296,\n  0.007873284,\n  0.006066566,\n  0.019922383,\n  0.007466045,\n  -0.009075608,\n  0.009237211,\n  -0.020749789,\n  -0.0021751726,\n  -0.0067420653,\n  -0.0007490286,\n  0.00514543,\n  -0.015565573,\n  0.0049224184,\n  -0.0021477,\n  -0.008661905,\n  -0.012676117,\n  -0.017297953,\n  0.0021250756,\n  0.010943736,\n  -0.0032611426,\n  -0.012184844,\n  -0.006441484,\n  0.014466675,\n  -0.026528701,\n  0.00986423,\n  0.014479603,\n  -0.025584942,\n  0.011796998,\n  -0.015617286,\n  0.024124052,\n  0.020504152,\n  -0.019547464,\n  0.02212018,\n  0.0012023242,\n  0.0032643748,\n  -0.025701296,\n  0.047653407,\n  -0.03283767,\n  0.0071816244,\n  0.022714878,\n  0.008610193,\n  -0.0027052294,\n  -0.00942467,\n  -0.021861615,\n  -0.03211369,\n  0.020633435,\n  -0.024951458,\n  -0.014324464,\n  -0.014751095,\n  0.015190654,\n  0.017375523,\n  0.004056228,\n  0.014182254,\n  -0.0051551266,\n  -0.002621196,\n  0.008358092,\n  -0.00811892,\n  -0.029786611,\n  -0.00030866117,\n  0.03873294,\n  -0.024563612,\n  0.0054169227,\n  0.020103376,\n  0.026063286,\n  -0.018797627,\n  -0.022714878,\n  0.00084841426,\n  0.021085922,\n  0.01944404,\n  -0.008306379,\n  0.015061373,\n  -0.0072204093,\n  -0.013264351,\n  0.018435637,\n  -0.017569447,\n  0.040982448,\n  -0.0047317273,\n  -0.016263697,\n  -0.0036651494,\n  -0.0065869265,\n  -0.012488658,\n  -0.073432274,\n  -0.033535793,\n  0.0075371503,\n  0.016367123,\n  -0.024744608,\n  -0.0037168623,\n  -0.0039043215,\n  0.017453093,\n  -0.009411742,\n  0.011208764,\n  -0.005074325,\n  -0.021939185,\n  -0.026037429,\n  -0.015061373,\n  0.020904927,\n  -0.010956664,\n  -0.015488003,\n  -0.03573359,\n  -0.008758867,\n  0.0112734055,\n  0.010601138,\n  -0.012579155,\n  -0.009321244,\n  -0.00047026388,\n  -0.010413678,\n  -0.008209418,\n  -0.015785353,\n  0.03283767,\n  0.0151130855,\n  -0.0067291367,\n  0.0027116935,\n  -0.021047138,\n  0.022081394,\n  -0.0197931,\n  -0.0029444015,\n  -0.013742695,\n  -0.03943106,\n  0.009625058,\n  0.020219732,\n  -0.01870713,\n  0.008720082,\n  0.010032296,\n  -0.001247573,\n  -0.040646315,\n  0.007621184,\n  -0.006189384,\n  -0.021163492,\n  0.012120203,\n  0.00689074,\n  -0.026140854,\n  -0.028235225,\n  -0.021952113,\n  -0.032346398,\n  -0.02081443,\n  0.0405946,\n  0.0017420772,\n  0.0022963744,\n  0.016173199,\n  -0.022469241,\n  0.017440164,\n  0.009541024,\n  0.0014043276,\n  -0.01943111,\n  0.030769156,\n  0.009592737,\n  -0.004870706,\n  -0.010168043,\n  0.008752403,\n  0.002676141,\n  -0.0014835129,\n  0.00689074,\n  0.031622417,\n  0.011318654,\n  0.010400751,\n  -0.014001259,\n  0.016263697,\n  -0.0065416778,\n  -0.024964387,\n  0.01265026,\n  -0.024602396,\n  -0.009586273,\n  -0.017789226,\n  0.010458928,\n  -0.008170633,\n  0.014996732,\n  0.0047349595,\n  0.026942404,\n  0.003522939,\n  0.013471202,\n  -0.023244934,\n  0.009838373,\n  0.013206174,\n  0.015035517,\n  -0.023697421,\n  0.0038849292,\n  0.004828689,\n  -0.017802155,\n  -0.0061182785,\n  0.013031643,\n  0.008429198,\n  -0.008409805,\n  -0.017750442,\n  -0.045662463,\n  0.022895873,\n  -0.0148933055,\n  0.007886212,\n  0.023955986,\n  -0.013820264,\n  0.0076535046,\n  -0.0056076143,\n  -0.01675497,\n  -0.00071024394,\n  -0.00089770305,\n  -0.008959254,\n  -0.009793124,\n  -0.008552016,\n  -0.037181552,\n  -0.021215204,\n  0.035785303,\n  0.017659944,\n  0.028907493,\n  0.018939838,\n  0.012721365,\n  0.0015392659,\n  0.012954073,\n  0.002858752,\n  -0.020271445,\n  -0.0024563612,\n  -0.009915942,\n  0.018112432,\n  0.000893663,\n  -0.0474207,\n  0.014389105,\n  -0.04387837,\n  0.0040788525,\n  0.022469241,\n  0.012029706,\n  -0.0064770365,\n  -0.0069812373,\n  0.0146347415,\n  0.015966348,\n  0.0052747126,\n  -0.0213962,\n  -0.020038737,\n  0.0031771094,\n  -0.015358722,\n  -0.005361978,\n  0.0015618902,\n  -0.014751095,\n  0.016108558,\n  0.0012225246,\n  0.010387822,\n  -0.0009623442,\n  0.023568138,\n  0.005232696,\n  -0.0066709598,\n  -0.008907542,\n  -0.0032676067,\n  0.00645118,\n  0.0016281473,\n  -0.010801525,\n  -0.016354194,\n  0.02196504,\n  0.030950151,\n  0.01573364,\n  0.011628931,\n  0.0019505448,\n  0.0055688294,\n  -0.025132453,\n  0.002150932,\n  0.0062152403,\n  -0.0385778,\n  -0.012773078,\n  0.0057465923,\n  0.010736885,\n  0.00008569994,\n  0.01515187,\n  0.00608919,\n  -0.014492531,\n  0.007149304,\n  -0.039353494,\n  0.0166774,\n  0.02268902,\n  -0.020555865,\n  -0.027821522,\n  0.007369084,\n  0.032966953,\n  0.028105944,\n  0.003749183,\n  0.0075500784,\n  0.0017113728,\n  0.0141176125,\n  -0.031622417,\n  -0.002317383,\n  0.030691586,\n  0.0098189805,\n  -0.005290873,\n  0.017918508,\n  -0.0020151858,\n  -0.000026942202,\n  0.010614066,\n  0.024537755,\n  -0.005982532,\n  -0.0056528626,\n  -0.018396853,\n  -0.020633435,\n  0.0019618568,\n  0.026373563,\n  -0.008655441,\n  -0.031027721,\n  0.0059922286,\n  0.01109241,\n  0.011700036,\n  0.016173199,\n  0.022701949,\n  0.029347053,\n  -0.0040432997,\n  0.015475076,\n  -0.011616003,\n  -0.017828012,\n  -0.020672219,\n  0.054919064,\n  0.0060536377,\n  0.006961845,\n  0.023038082,\n  0.00068438746,\n  0.027019974,\n  -0.0010641539,\n  -0.01862956,\n  -0.025688367,\n  0.0102973245,\n  -0.015669,\n  0.021124706,\n  -0.02610207,\n  -0.0171816,\n  -0.031932697,\n  -0.010904951,\n  -0.006570766,\n  0.0062314006,\n  0.037672825,\n  -0.032889385,\n  0.050782036,\n  0.013497058,\n  -0.014583029,\n  0.0016297633,\n  -0.0020588185,\n  0.02893335,\n  -0.0018341908,\n  0.0010673859,\n  -0.024188694,\n  -0.015488003,\n  0.0187847,\n  -0.012798935,\n  -0.0029411693,\n  -0.021590123,\n  -0.011997385,\n  -0.021809902,\n  -0.019107904,\n  0.010820918,\n  -0.0031092362,\n  0.023464713,\n  0.021447912,\n  0.015669,\n  0.024589468,\n  0.008926934,\n  -0.022443384,\n  0.010665779,\n  0.015992204,\n  -0.0046800147,\n  -0.0044279145,\n  -0.0562636,\n  0.007750466,\n  0.007575935,\n  -0.010704564,\n  -0.0013744311,\n  0.028519647,\n  -0.017297953,\n  -0.016367123,\n  -0.012520978,\n  -0.010245612,\n  0.01652226,\n  0.015979277,\n  -0.0067162085,\n  -0.025507372,\n  -0.03488033,\n  0.031208716,\n  0.0074337246,\n  -0.026218424,\n  0.007129912,\n  -0.025584942,\n];\nfinal List<double> _whatsThisVector = [\n  -0.01297786,\n  -0.0028681739,\n  0.010790355,\n  0.008147955,\n  -0.020644162,\n  -0.00613438,\n  -0.02576841,\n  -0.014275646,\n  -0.015881155,\n  -0.022022223,\n  0.02437697,\n  0.0021824879,\n  -0.028096396,\n  -0.008562711,\n  0.009077812,\n  0.01051608,\n  0.027159851,\n  -0.005559073,\n  0.0036324628,\n  -0.03293968,\n  0.001190752,\n  0.00011487329,\n  0.0020319715,\n  -0.0049503176,\n  0.009766843,\n  0.0067230663,\n  0.007378649,\n  -0.02559448,\n  -0.0100009795,\n  -0.011151594,\n  0.021594089,\n  -0.018316176,\n  0.0003518321,\n  -0.02049699,\n  -0.01733949,\n  -0.011512833,\n  -0.0030972932,\n  -0.03031735,\n  0.008923951,\n  0.0058667953,\n  0.0127905505,\n  0.010649872,\n  -0.012061382,\n  -0.010288633,\n  -0.012262071,\n  -0.0077532674,\n  0.011084697,\n  -0.03176231,\n  -0.009546085,\n  0.009619671,\n  0.0088169165,\n  0.0051275925,\n  -0.030370867,\n  -0.016041705,\n  -0.010676631,\n  -0.004752974,\n  -0.015399502,\n  0.01052277,\n  0.0014483024,\n  -0.00047621722,\n  -0.0026607956,\n  0.0113857305,\n  -0.021861672,\n  0.0004212369,\n  0.020992022,\n  0.0016046722,\n  -0.004696112,\n  0.00074756483,\n  -0.00929188,\n  -0.0032578441,\n  0.02295877,\n  0.038960338,\n  0.00285145,\n  -0.0001723622,\n  0.020028718,\n  -0.007699751,\n  -0.032511543,\n  -0.017620455,\n  0.011559661,\n  -0.0012367432,\n  0.03138769,\n  -0.017834522,\n  -0.012603241,\n  0.020403337,\n  0.00011957693,\n  -0.009385535,\n  -0.008823606,\n  0.029568113,\n  -0.018958379,\n  -0.016938115,\n  0.001041908,\n  -0.0065457914,\n  0.013011307,\n  0.015881155,\n  -0.012422621,\n  -0.0010302012,\n  0.013807372,\n  0.035695802,\n  -0.0014683713,\n  -0.011245249,\n  -0.024992414,\n  0.012817308,\n  0.0014416127,\n  -0.011305455,\n  -0.015827637,\n  -0.011833935,\n  -0.0071244435,\n  -0.0022042291,\n  0.017138802,\n  0.0108371815,\n  -0.01210152,\n  0.027373917,\n  0.012757102,\n  -0.058172923,\n  0.021861672,\n  -0.013874268,\n  0.027066195,\n  -0.00007494463,\n  -0.0021941948,\n  -0.024136143,\n  0.019841408,\n  0.035695802,\n  0.025741652,\n  -0.013051446,\n  0.02191519,\n  0.010021049,\n  -0.014677023,\n  -0.0028414153,\n  -0.00648893,\n  -0.01909217,\n  0.039415233,\n  -0.0046860776,\n  0.011619867,\n  -0.018704172,\n  -0.0020771264,\n  0.007151202,\n  -0.01156635,\n  0.01228214,\n  -0.014275646,\n  -0.0043683206,\n  0.0151185375,\n  0.031039828,\n  -0.03401002,\n  -0.018329553,\n  -0.008729951,\n  0.02017589,\n  0.017673971,\n  0.0058634505,\n  0.00079564645,\n  -0.009398914,\n  0.014717161,\n  -0.0012869153,\n  0.00622469,\n  0.009505948,\n  -0.002963501,\n  0.026062753,\n  -0.005733003,\n  0.00042416362,\n  -0.009907325,\n  0.0112118,\n  -0.026062753,\n  0.01593467,\n  0.0073920283,\n  -0.008107818,\n  0.011158284,\n  0.03326078,\n  0.0025688135,\n  0.0040572537,\n  0.002083816,\n  -0.013626752,\n  -0.0092785,\n  0.0056828307,\n  -0.034839533,\n  0.020737818,\n  -0.0036424971,\n  0.007231477,\n  -0.00822823,\n  0.00063049654,\n  -0.02540717,\n  -0.008174714,\n  -0.013178548,\n  0.01875769,\n  0.014703781,\n  0.017781006,\n  -0.0057731406,\n  -0.010663251,\n  0.011532902,\n  0.008254989,\n  0.012656758,\n  0.007077616,\n  0.018490106,\n  0.017446525,\n  -0.0069170655,\n  0.0026758474,\n  -0.67345726,\n  -0.015279088,\n  -0.0025270034,\n  0.0014140181,\n  0.00822823,\n  0.0031156898,\n  0.0073385113,\n  -0.0012200192,\n  -0.016790941,\n  0.0059169675,\n  0.030130042,\n  0.0030370867,\n  0.0011021146,\n  -0.0032929645,\n  -0.007646234,\n  -0.019145688,\n  0.0010920801,\n  -0.016844459,\n  -0.0039870124,\n  0.0056293136,\n  -0.027909087,\n  0.006221345,\n  -0.0053884876,\n  0.0041642874,\n  0.034491673,\n  0.014998124,\n  0.012783861,\n  -0.0037294622,\n  -0.022383463,\n  0.004485389,\n  -0.057423685,\n  0.010007669,\n  0.017419767,\n  -0.0058935536,\n  0.049369384,\n  -0.005579142,\n  -0.014302404,\n  0.02788233,\n  -0.00040534907,\n  0.001988489,\n  -0.013392616,\n  -0.010984354,\n  0.039789848,\n  -0.0033799296,\n  -0.0016749132,\n  -0.0032177065,\n  0.014289024,\n  0.01875769,\n  0.035321184,\n  -0.020108992,\n  0.004873387,\n  -0.004013771,\n  0.0023748146,\n  0.029648388,\n  0.017085286,\n  0.0024667967,\n  0.030130042,\n  0.0068836175,\n  0.013004618,\n  0.020818092,\n  -0.026209924,\n  0.025367033,\n  -0.012436001,\n  -0.01033546,\n  -0.0056192796,\n  0.014248887,\n  -0.029487837,\n  0.01629591,\n  0.02365449,\n  -0.022370083,\n  0.022022223,\n  0.029514596,\n  0.0004707819,\n  0.0036358077,\n  0.011947658,\n  0.0364718,\n  0.002831381,\n  -0.0022627634,\n  -0.0030989656,\n  0.007311753,\n  0.0047730426,\n  0.0026373821,\n  -0.015011503,\n  -0.00893064,\n  0.03676614,\n  -0.021808155,\n  -0.030023007,\n  0.010388977,\n  0.007325132,\n  0.016041705,\n  -0.0057029,\n  -0.0057029,\n  -0.020590644,\n  -0.0034886359,\n  0.0048265597,\n  0.033421334,\n  0.0016866201,\n  0.0039535644,\n  0.01281062,\n  0.02051037,\n  -0.010964285,\n  0.008000784,\n  0.019306239,\n  -0.0013028032,\n  0.015988188,\n  0.007612786,\n  -0.025982477,\n  0.020309681,\n  0.030156799,\n  -0.052553643,\n  0.010435805,\n  -0.00024291677,\n  -0.01754018,\n  -0.007311753,\n  -0.028123155,\n  -0.031227138,\n  0.020256164,\n  0.014489713,\n  0.019078791,\n  -0.02507269,\n  0.017285973,\n  -0.00640531,\n  -0.0036424971,\n  -0.007345201,\n  -0.012081451,\n  0.020403337,\n  -0.0037829792,\n  -0.015867775,\n  -0.046907604,\n  -0.012415932,\n  0.010041117,\n  0.0021674363,\n  0.040378537,\n  -0.018878102,\n  0.002087161,\n  0.010944216,\n  0.016402945,\n  0.01594805,\n  0.0064788954,\n  0.0033331024,\n  -0.029942732,\n  0.00420108,\n  0.01628253,\n  -0.015265709,\n  -0.021540571,\n  -0.018904861,\n  0.009679878,\n  -0.01245607,\n  -0.011445937,\n  -0.0026474164,\n  -0.012221932,\n  -0.008703194,\n  -0.02349394,\n  0.021607468,\n  0.00026674854,\n  -0.016349427,\n  -0.0014976383,\n  -0.030023007,\n  -0.0057463823,\n  -0.029675147,\n  -0.018597139,\n  0.017647212,\n  0.010308702,\n  -0.01069001,\n  -0.016857838,\n  -0.031628516,\n  -0.01576074,\n  0.013245445,\n  -0.0148108145,\n  -0.03577608,\n  0.0114726955,\n  -0.0007283322,\n  -0.027320402,\n  0.022182774,\n  0.0055624177,\n  0.0141953705,\n  -0.0017643869,\n  0.0016606977,\n  -0.02368125,\n  -0.019761132,\n  -0.01664377,\n  0.003662566,\n  -0.0036692556,\n  -0.005910278,\n  0.0025520893,\n  0.002384849,\n  0.018142246,\n  -0.004712836,\n  -0.020443473,\n  -0.01733949,\n  0.0032812578,\n  -0.0106967,\n  -0.0034919807,\n  0.009592913,\n  -0.037301313,\n  0.0055390038,\n  0.0062113106,\n  0.019172447,\n  0.01735287,\n  0.0228116,\n  0.035829596,\n  0.009385535,\n  0.009626361,\n  -0.0017050165,\n  0.0028865703,\n  -0.044205,\n  0.011947658,\n  -0.044365548,\n  0.02472483,\n  0.0020905058,\n  0.01822252,\n  -0.029568113,\n  -0.001438268,\n  -0.0072649256,\n  0.009740084,\n  0.027748536,\n  -0.01648322,\n  -0.012001175,\n  -0.025206482,\n  -0.002418297,\n  0.0002621494,\n  0.00858278,\n  0.0025270034,\n  -0.010743527,\n  -0.03120038,\n  0.022329945,\n  0.029675147,\n  0.022691185,\n  0.0016899648,\n  -0.045141544,\n  0.0024952276,\n  0.006990651,\n  0.025474066,\n  0.026076132,\n  -0.005100834,\n  0.00067481527,\n  0.008776779,\n  0.0017066889,\n  0.02295877,\n  -0.007994094,\n  -0.0036826348,\n  0.0068969964,\n  0.023119321,\n  -0.026276821,\n  0.028979426,\n  0.004237873,\n  0.033100232,\n  0.018289417,\n  0.012837377,\n  0.023600973,\n  -0.012061382,\n  0.0012133295,\n  -0.03154824,\n  0.017031768,\n  0.015386122,\n  -0.029327286,\n  -0.017754247,\n  0.00041266583,\n  0.016964871,\n  0.019761132,\n  0.019921683,\n  -0.0079606455,\n  0.021875052,\n  -0.014797436,\n  -0.004257942,\n  -0.0106164245,\n  -0.007291684,\n  0.0069705825,\n  -0.005789865,\n  -0.003930151,\n  -0.010917457,\n  -0.027721778,\n  -0.0005874321,\n  -0.002351401,\n  -0.004167632,\n  -0.00587014,\n  0.015653707,\n  0.015680466,\n  -0.0026223303,\n  0.005856761,\n  -0.007311753,\n  -0.029782182,\n  0.0035321184,\n  0.011057939,\n  -0.02382842,\n  -0.031815823,\n  -0.0116265565,\n  0.00034848732,\n  -0.010308702,\n  0.016764184,\n  -0.010797044,\n  0.0050707306,\n  -0.0151185375,\n  -0.009191535,\n  0.003339792,\n  -0.0069705825,\n  0.025152965,\n  0.0020052132,\n  0.00089640886,\n  -0.008027542,\n  0.005328281,\n  0.0036157386,\n  0.023266492,\n  -0.00035287737,\n  0.02524662,\n  0.0072114086,\n  0.012041313,\n  -0.021152573,\n  0.0043917345,\n  0.0106164245,\n  -0.020831471,\n  0.0078937495,\n  0.0023614352,\n  -0.010094634,\n  -0.008040921,\n  0.0077733365,\n  0.0008324394,\n  0.0044084582,\n  0.029434321,\n  0.0126366895,\n  -0.004940283,\n  -0.02666482,\n  -0.021540571,\n  -0.016670529,\n  0.08899868,\n  0.025875444,\n  0.0032544993,\n  0.018436588,\n  -0.009606292,\n  -0.028176673,\n  -0.034919806,\n  -0.0259691,\n  0.011017801,\n  0.011238559,\n  0.0016941458,\n  -0.018650657,\n  -0.00007578083,\n  0.001331234,\n  0.0062180003,\n  0.014302404,\n  -0.014436197,\n  0.0021774708,\n  0.0068267556,\n  -0.021982085,\n  0.0036057043,\n  -0.012469448,\n  0.03173555,\n  0.015546673,\n  0.0014483024,\n  -0.0011012785,\n  0.007853612,\n  0.0067397906,\n  0.0041776667,\n  -0.018797828,\n  -0.013559856,\n  -0.015800878,\n  -0.016550116,\n  -0.024631174,\n  -0.00057823386,\n  0.009780222,\n  -0.0093654655,\n  0.016322669,\n  0.012576482,\n  -0.0058801747,\n  0.010609735,\n  0.023253113,\n  0.026758473,\n  0.0020737818,\n  0.00023100089,\n  0.0015862758,\n  0.000036557718,\n  0.022998909,\n  0.0074789934,\n  -0.003277913,\n  0.022490498,\n  0.0054821423,\n  -0.045061268,\n  -0.008870434,\n  0.014449576,\n  -0.018904861,\n  -0.00059872086,\n  -0.00011142396,\n  0.00042938988,\n  -0.038157582,\n  0.010295322,\n  -0.005799899,\n  -0.011813866,\n  -0.0063718613,\n  -0.023895316,\n  -0.024644554,\n  0.001029365,\n  -0.016028326,\n  -0.0095193265,\n  0.00569621,\n  -0.0038565649,\n  -0.004191046,\n  -0.02823019,\n  -0.000525135,\n  0.01877107,\n  0.010723459,\n  0.000826586,\n  0.015198813,\n  -0.028685084,\n  0.036338005,\n  0.010810424,\n  -0.016683908,\n  -0.014342542,\n  -0.01577412,\n  0.005227937,\n  0.01105125,\n  0.009559465,\n  0.010890699,\n  0.004960352,\n  0.00787368,\n  0.008649676,\n  -0.005973829,\n  0.010094634,\n  -0.019480169,\n  0.024831863,\n  -0.00201692,\n  0.006672894,\n  0.0038264617,\n  0.004699457,\n  -0.019185826,\n  -0.0068100314,\n  -0.016376186,\n  -0.0031641894,\n  -0.0054252804,\n  0.012589862,\n  0.021982085,\n  0.027480952,\n  0.021085678,\n  -0.014610127,\n  -0.0055289697,\n  -0.004401769,\n  -0.027066195,\n  0.0030120006,\n  0.002777864,\n  0.027066195,\n  0.008248299,\n  -0.0032461374,\n  0.026771853,\n  -0.038478684,\n  0.008114507,\n  0.003170879,\n  -0.023373526,\n  -0.003418395,\n  0.011245249,\n  -0.011713522,\n  -0.008622918,\n  0.008469057,\n  -0.041582666,\n  -0.022677805,\n  -0.011680074,\n  -0.0033531713,\n  0.012148347,\n  -0.021112435,\n  0.00946581,\n  -0.021634225,\n  -0.001541957,\n  -0.002070437,\n  0.025821926,\n  -0.0007676337,\n  -0.011499454,\n  -0.006642791,\n  -0.004803146,\n  -0.0058835195,\n  0.011900831,\n  -0.012375794,\n  -0.03628449,\n  -0.0029283804,\n  0.01086394,\n  -0.0009181501,\n  0.013459512,\n  -0.013446133,\n  -0.0025387101,\n  -0.012509586,\n  -0.0367929,\n  -0.0047596633,\n  -0.018958379,\n  -0.006381896,\n  0.0039000474,\n  0.029300528,\n  0.00911795,\n  0.027253505,\n  0.01961396,\n  0.015225572,\n  0.02103216,\n  -0.041127775,\n  -0.011987796,\n  0.00587014,\n  0.019332998,\n  -0.008395471,\n  0.013238755,\n  0.0054453495,\n  0.014556609,\n  0.0024450554,\n  0.0077198194,\n  -0.015586811,\n  0.007111064,\n  0.018490106,\n  -0.008656366,\n  -0.0060708285,\n  -0.036177456,\n  0.016028326,\n  0.0031658618,\n  0.004050564,\n  -0.010395667,\n  -0.011205111,\n  -0.01297117,\n  -0.0030471212,\n  0.008917261,\n  0.005207868,\n  -0.014757298,\n  0.030424384,\n  -0.039763093,\n  0.024631174,\n  0.014396058,\n  0.001806197,\n  -0.0069170655,\n  -0.007151202,\n  -0.0035454978,\n  -0.011559661,\n  0.009131329,\n  -0.007204719,\n  0.023520699,\n  -0.013165168,\n  0.007064237,\n  0.0238418,\n  0.003455188,\n  -0.023226354,\n  0.0044753547,\n  0.0131451,\n  -0.010850561,\n  0.0025972442,\n  -0.0066160327,\n  -0.02666482,\n  -0.031441208,\n  0.017647212,\n  0.0071913395,\n  -0.033367816,\n  0.016603632,\n  0.0052480055,\n  -0.0032511544,\n  0.0024316763,\n  0.020764574,\n  0.00552228,\n  0.00613438,\n  0.021313123,\n  0.014623506,\n  0.0021055574,\n  -0.02769502,\n  0.0073719593,\n  0.0064722057,\n  0.003612394,\n  0.02630358,\n  0.013024687,\n  0.0059704846,\n  -0.0012384156,\n  0.023641111,\n  0.011673384,\n  -0.026985921,\n  -0.03783648,\n  -0.006820066,\n  0.0318961,\n  -0.0020152475,\n  -0.010027738,\n  -0.023386905,\n  -0.0110780075,\n  0.017205698,\n  -0.007044168,\n  0.00082993077,\n  -0.008087749,\n  -0.020604024,\n  -0.02523324,\n  0.008107818,\n  -0.015667086,\n  0.016724046,\n  0.001025184,\n  -0.0016623702,\n  -0.014315783,\n  -0.030023007,\n  -0.002996949,\n  0.0012049675,\n  0.014128474,\n  0.03243127,\n  -0.015640328,\n  0.014543231,\n  0.011907521,\n  -0.004522182,\n  -0.0238418,\n  0.010930836,\n  0.006080863,\n  0.025755031,\n  -0.017125422,\n  -0.012375794,\n  0.002951794,\n  0.007913819,\n  0.008489125,\n  0.003423412,\n  -0.0022811599,\n  -0.009077812,\n  -0.012703585,\n  -0.005408556,\n  0.014449576,\n  0.010756906,\n  -0.03890682,\n  0.001401475,\n  -0.011305455,\n  -0.0053583845,\n  -0.014784057,\n  -0.0032461374,\n  0.00964643,\n  -0.012308897,\n  -0.012462759,\n  -0.0018781103,\n  0.01209483,\n  0.018075349,\n  -0.007646234,\n  -0.0116265565,\n  0.004344907,\n  0.035562012,\n  -0.014275646,\n  -0.0031976376,\n  -0.004030495,\n  0.022731323,\n  -0.018664036,\n  0.014141853,\n  0.009559465,\n  -0.035454977,\n  0.013553167,\n  -0.011445937,\n  -0.020389957,\n  -0.023373526,\n  0.023801662,\n  0.016938115,\n  0.008141265,\n  -0.01210152,\n  0.020189269,\n  0.020951884,\n  -0.004676043,\n  0.013914406,\n  -0.02611627,\n  0.018316176,\n  0.0011330541,\n  -0.004401769,\n  0.0119209,\n  -0.007432166,\n  0.007024099,\n  0.021446917,\n  0.0025370377,\n  0.018864723,\n  -0.018985137,\n  -0.015038262,\n  -0.005903588,\n  0.0060340357,\n  -0.01664377,\n  -0.009318639,\n  -0.018102108,\n  -0.011091387,\n  -0.016429702,\n  -0.0050974893,\n  -0.013593305,\n  -0.008027542,\n  -0.018115487,\n  0.021808155,\n  -0.020015338,\n  0.018650657,\n  -0.014436197,\n  0.0007325132,\n  -0.010910767,\n  -0.014677023,\n  -0.02683875,\n  -0.031949617,\n  -0.019212583,\n  0.020282922,\n  0.012850757,\n  -0.02332001,\n  -0.017821142,\n  0.006234724,\n  -0.0036424971,\n  -0.00022431127,\n  0.008341954,\n  -0.00928519,\n  0.014436197,\n  0.008890503,\n  -0.014717161,\n  0.017807763,\n  0.014382679,\n  -0.008040921,\n  -0.007940577,\n  -0.0029049667,\n  -0.00605076,\n  -0.012750412,\n  0.009492569,\n  0.018637277,\n  -0.02138002,\n  -0.016188877,\n  0.008783469,\n  -0.016911356,\n  0.030718727,\n  0.025728272,\n  0.0015369398,\n  -0.010736837,\n  0.002789571,\n  0.0062982757,\n  -0.008676435,\n  0.027454194,\n  0.010101324,\n  -0.04222487,\n  -0.008281748,\n  -0.007726509,\n  0.024082625,\n  -0.011512833,\n  0.014302404,\n  0.0009173139,\n  -0.014556609,\n  0.01576074,\n  0.0055390038,\n  -0.013566546,\n  0.0062782066,\n  0.017834522,\n  0.02579517,\n  0.0036759453,\n  -0.001806197,\n  0.041636184,\n  0.008435609,\n  -0.029354045,\n  -0.00578652,\n  -0.0057229684,\n  -0.023708008,\n  0.0127236545,\n  0.005963795,\n  -0.041555908,\n  -0.025380412,\n  -0.012436001,\n  0.011639936,\n  -0.01368027,\n  -0.0080476105,\n  0.0022627634,\n  -0.0066528255,\n  -0.019132309,\n  0.008422229,\n  0.00052388076,\n  -0.009840429,\n  -0.009492569,\n  -0.008642986,\n  -0.02067092,\n  0.001235907,\n  -0.008321885,\n  0.018811207,\n  0.0019767822,\n  -0.024082625,\n  0.029166736,\n  -0.006261483,\n  0.0014583368,\n  -0.0101347715,\n  0.010201668,\n  -0.021875052,\n  -0.0073719593,\n  0.21299745,\n  0.007980715,\n  0.010496011,\n  0.016001567,\n  -0.0021557296,\n  0.012931032,\n  0.017580317,\n  -0.009325328,\n  -0.008609539,\n  0.017660592,\n  0.015412881,\n  0.02333339,\n  -0.020938504,\n  0.012837377,\n  -0.016670529,\n  -0.027989363,\n  -0.030558176,\n  -0.012262071,\n  -0.021821534,\n  -0.025567722,\n  -0.006749825,\n  -0.012549724,\n  -0.005375108,\n  -0.0038599097,\n  0.029835697,\n  -0.0037762895,\n  0.011706832,\n  -0.007639544,\n  0.021781398,\n  0.011305455,\n  -0.0037629104,\n  -0.014034819,\n  -0.0009457448,\n  0.0073050633,\n  -0.007519131,\n  0.0031658618,\n  0.004017116,\n  -0.02101878,\n  0.034090295,\n  0.037595656,\n  -0.0036324628,\n  0.0012242001,\n  -0.010027738,\n  0.02016251,\n  -0.00037879962,\n  -0.0032762405,\n  -0.0056059,\n  -0.027226746,\n  -0.003277913,\n  0.01017491,\n  -0.021607468,\n  0.0055089006,\n  -0.0005723805,\n  0.0416897,\n  -0.0020486957,\n  0.000101755366,\n  -0.008188093,\n  -0.008295127,\n  -0.011231869,\n  -0.002426659,\n  -0.011365661,\n  0.029220253,\n  -0.009853808,\n  0.045328856,\n  -0.021072298,\n  0.029755423,\n  -0.046693537,\n  0.011158284,\n  -0.016269151,\n  0.0070909956,\n  -0.0000098449755,\n  -0.011887452,\n  -0.003963599,\n  -0.010542839,\n  -0.02207574,\n  -0.024925519,\n  0.019841408,\n  0.028069638,\n  0.035294425,\n  0.0054286253,\n  -0.019720994,\n  -0.015078399,\n  -0.021701122,\n  -0.015252329,\n  -0.017981693,\n  -0.02456428,\n  0.015573432,\n  -0.005227937,\n  -0.015841017,\n  0.008957399,\n  -0.019734373,\n  -0.0062815514,\n  -0.008977468,\n  -0.01998858,\n  0.008843675,\n  0.03141445,\n  -0.005318247,\n  0.019038653,\n  -0.005906933,\n  0.020229407,\n  -0.026865507,\n  -0.007980715,\n  0.010736837,\n  0.0094725,\n  -0.014797436,\n  0.00042458173,\n  -0.02017589,\n  0.022209533,\n  -0.0012526311,\n  -0.027641503,\n  -0.026397234,\n  -0.027802054,\n  -0.004104081,\n  -0.010248495,\n  -0.0012049675,\n  0.02121947,\n  -0.008562711,\n  -0.010148151,\n  0.0090577435,\n  -0.017954936,\n  0.011733591,\n  -0.012168416,\n  -0.019520307,\n  -0.0032578441,\n  -0.008188093,\n  -0.011231869,\n  -0.0037261173,\n  0.011954348,\n  0.0041274945,\n  -0.03315375,\n  0.02034982,\n  -0.021540571,\n  -0.007398718,\n  0.015439639,\n  -0.014556609,\n  -0.009786911,\n  -0.004224494,\n  0.014315783,\n  -0.028096396,\n  0.01683108,\n  -0.010669941,\n  0.00005236717,\n  -0.0053550396,\n  -0.02192857,\n  -0.007967335,\n  -0.03384947,\n  0.025928961,\n  -0.0006660351,\n  -0.012964481,\n  -0.013345788,\n  -0.03138769,\n  -0.0221025,\n  0.010589666,\n  -0.035669044,\n  0.029862456,\n  -0.010656562,\n  -0.0046492848,\n  -0.02471145,\n  -0.00012145838,\n  0.019600581,\n  -0.019894924,\n  0.01033546,\n  0.046827327,\n  -0.0009833738,\n  -0.008923951,\n  0.014917849,\n  -0.17318083,\n  0.016603632,\n  0.011312145,\n  -0.029862456,\n  0.035107117,\n  0.037916757,\n  0.011365661,\n  0.007198029,\n  0.0039033922,\n  0.012716965,\n  0.013620063,\n  -0.01333241,\n  -0.01998858,\n  0.0017292664,\n  0.026557785,\n  -0.016148739,\n  -0.02052375,\n  -0.002603934,\n  0.0077064405,\n  0.009405604,\n  0.006679584,\n  0.0035521872,\n  0.020992022,\n  -0.0018630587,\n  0.0077934056,\n  -0.0063517927,\n  0.0070174094,\n  0.021834914,\n  -0.0019951786,\n  -0.011552971,\n  0.0046927673,\n  -0.015586811,\n  -0.0073585804,\n  0.033742435,\n  0.012857446,\n  -0.013238755,\n  0.0052647297,\n  0.0070575476,\n  -0.0072649256,\n  0.0047262153,\n  0.03644504,\n  -0.0017593696,\n  0.020978643,\n  -0.0086630555,\n  0.014917849,\n  0.037006967,\n  -0.0074923723,\n  0.0012635017,\n  0.02262429,\n  -0.02084485,\n  0.01577412,\n  -0.012034623,\n  -0.0046225265,\n  0.011874072,\n  0.019761132,\n  0.009940773,\n  -0.010475942,\n  0.031227138,\n  -0.0014892763,\n  -0.0075258208,\n  -0.0023296596,\n  -0.020256164,\n  0.0050439723,\n  -0.015225572,\n  0.009124639,\n  -0.015172054,\n  -0.020430094,\n  0.0054955212,\n  -0.026704956,\n  0.020590644,\n  0.013011307,\n  -0.009526016,\n  0.009238363,\n  -0.0013638459,\n  -0.0012910963,\n  0.0032662062,\n  -0.041315082,\n  0.036043663,\n  0.014596747,\n  0.008375403,\n  0.0080543,\n  0.026357096,\n  -0.00210054,\n  -0.0003024962,\n  0.03384947,\n  0.0027845537,\n  0.007084306,\n  0.005023903,\n  0.020644162,\n  0.0075325104,\n  0.017045148,\n  -0.015345984,\n  0.009526016,\n  -0.019868167,\n  0.007064237,\n  0.0060106223,\n  0.010328771,\n  0.013967923,\n  -0.016710667,\n  -0.019774511,\n  0.0009883911,\n  0.005318247,\n  -0.025647996,\n  0.014650264,\n  0.031146863,\n  0.004575699,\n  0.01227545,\n  0.013626752,\n  0.04597106,\n  -0.009385535,\n  -0.01297786,\n  -0.0015160347,\n  0.017406387,\n  0.03748862,\n  0.0059303464,\n  0.010455874,\n  0.007499062,\n  0.0043081143,\n  0.03609718,\n  -0.01680432,\n  0.02595572,\n  0.016229015,\n  -0.032324236,\n  -0.00085292634,\n  -0.0099876,\n  -0.015024883,\n  -0.09713326,\n  -0.020256164,\n  0.010455874,\n  0.030558176,\n  -0.008743331,\n  0.02508607,\n  0.0009833738,\n  0.0007329313,\n  -0.004492079,\n  0.01735287,\n  -0.00025148783,\n  -0.012429311,\n  -0.021540571,\n  -0.0062982757,\n  0.00078603014,\n  -0.0016548444,\n  -0.017259216,\n  0.005291488,\n  -0.0016933096,\n  0.028765358,\n  -0.009158088,\n  -0.015894534,\n  0.0009833738,\n  -0.016509978,\n  0.00029078935,\n  -0.011767039,\n  -0.025326895,\n  0.019306239,\n  0.028283706,\n  0.01787466,\n  -0.002261091,\n  -0.023761524,\n  0.0071779606,\n  -0.022156015,\n  -0.008214851,\n  -0.0027410712,\n  -0.023413664,\n  0.01350634,\n  0.03417057,\n  -0.024631174,\n  0.008917261,\n  -0.011158284,\n  0.017901419,\n  -0.01594805,\n  0.009599602,\n  -0.023239734,\n  -0.004492079,\n  0.016750805,\n  0.0063852407,\n  -0.007137823,\n  -0.024430485,\n  -0.022771461,\n  -0.024470624,\n  0.009151398,\n  0.020122372,\n  -0.021527192,\n  -0.01280393,\n  0.027828813,\n  -0.023988971,\n  -0.008448988,\n  -0.012148347,\n  0.003306344,\n  -0.020095613,\n  0.0364718,\n  0.02330663,\n  0.007432166,\n  -0.008101128,\n  -0.01350634,\n  0.04522182,\n  -0.021165952,\n  -0.0064220335,\n  0.015386122,\n  -0.031307414,\n  0.011813866,\n  -0.02156733,\n  0.017393008,\n  -0.015305847,\n  0.008475746,\n  0.0063183447,\n  -0.0119209,\n  -0.009773532,\n  -0.01577412,\n  0.009204915,\n  -0.00045573025,\n  0.014476334,\n  0.0012877515,\n  0.028658325,\n  -0.014556609,\n  -0.008482436,\n  -0.047416016,\n  -0.0032963094,\n  0.024617795,\n  0.014382679,\n  -0.024992414,\n  -0.01017491,\n  0.010897389,\n  0.014409438,\n  -0.0023279872,\n  -0.00030855864,\n  0.0038799786,\n  0.007733199,\n  -0.006064139,\n  -0.049877793,\n  0.0015938015,\n  -0.0026691577,\n  -0.02259753,\n  0.0035454978,\n  0.0010853906,\n  0.013051446,\n  -0.022704564,\n  0.018610518,\n  0.02753447,\n  -0.030825762,\n  0.012522966,\n  0.010884009,\n  -0.007111064,\n  -0.02560786,\n  -0.004167632,\n  0.023279872,\n  -0.00929188,\n  0.019386513,\n  0.011091387,\n  0.02032306,\n  -0.0061042765,\n  0.037729446,\n  0.018302796,\n  -0.008254989,\n  0.012121588,\n  -0.03152148,\n  0.009512637,\n  -0.043669827,\n  -0.007238167,\n  0.015158675,\n  -0.038987096,\n  -0.01909217,\n  0.021099057,\n  -0.005411901,\n  -0.0043382174,\n  0.018610518,\n  0.01593467,\n  0.011680074,\n  -0.025005793,\n  -0.014155232,\n  -0.027828813,\n  0.014891091,\n  -0.01121849,\n  -0.034652222,\n  0.008823606,\n  -0.032190442,\n  0.014396058,\n  0.037408344,\n  -0.0011924244,\n  0.014155232,\n  0.032003134,\n  -0.015586811,\n  -0.013432753,\n  0.0011129852,\n  -0.016402945,\n  0.010295322,\n  -0.0077198194,\n  0.0078937495,\n  -0.035321184,\n  0.011285386,\n  0.03470574,\n  0.021647604,\n  -0.01034884,\n  0.0139545435,\n  0.019707616,\n  0.0035990146,\n  0.00066478085,\n  0.032056652,\n  -0.026932403,\n  -0.005324936,\n  0.0029083116,\n  0.012650068,\n  0.0036257731,\n  0.009793601,\n  0.0010979336,\n  0.0075592687,\n  -0.0071779606,\n  -0.02420304,\n  0.035347942,\n  0.021607468,\n  -0.0182359,\n  -0.012288829,\n  0.00013504666,\n  0.023012288,\n  -0.017647212,\n  -0.00059788465,\n  0.010797044,\n  -0.005662762,\n  0.01929286,\n  -0.03384947,\n  0.015131917,\n  -0.0013429408,\n  -0.015827637,\n  0.021139193,\n  -0.0016013274,\n  0.010950905,\n  -0.014128474,\n  0.005241316,\n  0.026397234,\n  0.018704172,\n  -0.015439639,\n  -0.035160635,\n  -0.021339882,\n  -0.0014140181,\n  -0.0031775688,\n  -0.010235116,\n  -0.022196153,\n  0.009579534,\n  -0.0013053118,\n  0.0013446133,\n  -0.0122420015,\n  -0.0013772252,\n  0.010542839,\n  -0.007947267,\n  0.0068836175,\n  -0.013352478,\n  -0.022744702,\n  -0.026597923,\n  0.0037261173,\n  0.0033916365,\n  0.012589862,\n  0.023039045,\n  -0.01244269,\n  0.022329945,\n  0.018784449,\n  0.0058032437,\n  -0.010255185,\n  0.008676435,\n  0.009358776,\n  -0.02119271,\n  0.017326111,\n  -0.00447201,\n  -0.004702802,\n  -0.0045623197,\n  -0.0023597628,\n  -0.014115095,\n  0.008375403,\n  0.0026842093,\n  0.049851038,\n  -0.008984158,\n  0.0009950807,\n  0.0025437274,\n  -0.021995464,\n  0.013399306,\n  0.013593305,\n  -0.0005794882,\n  -0.003906737,\n  -0.027427435,\n  0.027092954,\n  -0.010268564,\n  0.022329945,\n  -0.003306344,\n  -0.011559661,\n  0.024617795,\n  -0.024176281,\n  0.015573432,\n  -0.0100009795,\n  -0.007499062,\n  0.014850953,\n  -0.00402715,\n  0.016764184,\n  -0.0066160327,\n  -0.016683908,\n  -0.0066528255,\n  0.0245509,\n  -0.017767627,\n  -0.021139193,\n  -0.01893162,\n  0.008796848,\n  0.018557,\n  -0.021674363,\n  -0.025888823,\n  0.013245445,\n  -0.025045931,\n  0.00005748891,\n  -0.006137725,\n  0.0175268,\n  0.028016122,\n  0.01733949,\n  0.03928144,\n  -0.029166736,\n  -0.022851735,\n  0.031976376,\n  -0.028123155,\n  -0.013459512,\n  -0.014369301,\n  -0.01910555,\n];\nfinal List<double> _chaoVector = [\n  0.014674897,\n  -0.012181209,\n  0.013539028,\n  -0.023239605,\n  -0.018304454,\n  -0.00029049508,\n  -0.015523534,\n  -0.04358079,\n  -0.017886663,\n  -0.008740964,\n  -0.0019469698,\n  0.009680993,\n  0.0063745715,\n  0.003120374,\n  0.005698926,\n  -0.0020106176,\n  0.021150652,\n  -0.021150652,\n  0.044651378,\n  -0.018239174,\n  -0.008969443,\n  0.012572887,\n  0.010359903,\n  -0.014100435,\n  -0.008427621,\n  -0.007572456,\n  0.012142041,\n  -0.02035424,\n  0.005992685,\n  -0.03407605,\n  0.041857403,\n  0.004749105,\n  -0.029062564,\n  -0.020589245,\n  -0.01650273,\n  -0.039428994,\n  -0.0128927585,\n  0.005911085,\n  0.0099421125,\n  0.0044847215,\n  -0.001860474,\n  -0.00749412,\n  0.003410869,\n  0.003450037,\n  -0.0137740355,\n  0.0025197999,\n  -0.0032803095,\n  -0.01547131,\n  -0.029924257,\n  0.033814933,\n  0.015027408,\n  0.0021183293,\n  -0.03389327,\n  -0.012050649,\n  0.0060938685,\n  0.015249359,\n  -0.019309763,\n  0.016189389,\n  -0.014962128,\n  -0.03433717,\n  -0.003789492,\n  -0.010131423,\n  -0.011652443,\n  0.016750794,\n  0.008375397,\n  0.0031987098,\n  0.014857681,\n  0.0054182224,\n  -0.0016238348,\n  0.0025948717,\n  0.012357464,\n  0.0076703755,\n  0.007259113,\n  -0.015627982,\n  0.012481496,\n  0.014949072,\n  -0.021085372,\n  0.00499064,\n  -0.0010379486,\n  -0.013721812,\n  0.00920445,\n  -0.039402883,\n  -0.0005516142,\n  0.019492546,\n  0.015171023,\n  0.01376098,\n  0.0055096145,\n  0.0054998226,\n  -0.022103738,\n  -0.020497855,\n  0.002456152,\n  0.013539028,\n  0.027809191,\n  0.029193122,\n  -0.00007185877,\n  0.009321954,\n  0.008923748,\n  0.030968733,\n  0.0070241056,\n  -0.022404024,\n  0.0025654957,\n  -0.007703015,\n  -0.022900151,\n  -0.014492113,\n  -0.013995987,\n  -0.02134649,\n  0.026255531,\n  -0.011456603,\n  0.03391938,\n  0.002208089,\n  -0.017338313,\n  0.046923112,\n  0.010581854,\n  -0.024101298,\n  -0.02419269,\n  -0.005630382,\n  0.017586377,\n  -0.0048927204,\n  0.0066977064,\n  -0.033997715,\n  0.0016825866,\n  0.0072525847,\n  0.023944627,\n  -0.014701009,\n  0.02105926,\n  0.0032052377,\n  0.009426402,\n  -0.01579771,\n  0.0030485662,\n  0.021007037,\n  0.012324825,\n  0.03587777,\n  0.03297935,\n  -0.01033379,\n  -0.02603358,\n  0.009583074,\n  -0.022913206,\n  0.0070241056,\n  -0.0009139171,\n  -0.009517794,\n  0.020132288,\n  0.036739465,\n  -0.0052843993,\n  -0.01669857,\n  0.008871524,\n  0.027417513,\n  0.012559832,\n  0.007259113,\n  -0.008662628,\n  0.01566715,\n  0.029114787,\n  -0.017129418,\n  -0.0033260053,\n  0.00008578173,\n  0.016006604,\n  0.004664241,\n  0.0031921817,\n  0.023200437,\n  -0.0067499303,\n  -0.01253372,\n  0.014962128,\n  -0.00066258985,\n  0.0022962166,\n  -0.019792832,\n  0.018565573,\n  0.029637026,\n  0.014048211,\n  0.010353375,\n  -0.00035128687,\n  -0.022717368,\n  0.004703409,\n  0.016946634,\n  -0.027443623,\n  -0.015915213,\n  0.0016254667,\n  0.015562702,\n  -0.0029065828,\n  0.015575758,\n  -0.029532578,\n  -0.019923393,\n  -0.012847063,\n  0.000943293,\n  0.029741473,\n  0.0053986385,\n  -0.019531714,\n  0.027861414,\n  0.03159542,\n  0.011835226,\n  0.00862346,\n  0.010686302,\n  0.019270593,\n  0.006195052,\n  0.027600296,\n  -0.010869085,\n  -0.68684787,\n  0.0022113528,\n  -0.0024218801,\n  -0.01056227,\n  0.002521432,\n  0.02997648,\n  0.014217938,\n  -0.0113391,\n  -0.00034904288,\n  -0.016124109,\n  0.006841322,\n  -0.0025883438,\n  0.012834007,\n  -0.0022831606,\n  0.0098572485,\n  -0.02752196,\n  0.019858113,\n  -0.015184079,\n  0.012422744,\n  0.029271457,\n  -0.012259545,\n  -0.0060873404,\n  -0.0064431154,\n  0.0054704463,\n  0.005548782,\n  0.0048829284,\n  -0.005297455,\n  -0.023357108,\n  -0.019910336,\n  -0.012076762,\n  -0.046087533,\n  0.013160406,\n  -0.0027368553,\n  -0.02461048,\n  0.045173615,\n  -0.0021199612,\n  0.022208184,\n  0.01805639,\n  -0.0054671825,\n  0.043633014,\n  -0.007990247,\n  -0.017756103,\n  0.0131799895,\n  -0.007957607,\n  0.005290927,\n  -0.0056956615,\n  0.02438853,\n  -0.0042105466,\n  -0.0051440476,\n  -0.009641825,\n  -0.0085190125,\n  -0.010758109,\n  0.012709975,\n  -0.016803019,\n  -0.020223679,\n  0.011613275,\n  0.034102164,\n  -0.02887978,\n  0.0022423607,\n  0.021777337,\n  -0.007879271,\n  0.007213417,\n  -0.03773172,\n  -0.010327263,\n  -0.019453378,\n  0.0019339138,\n  -0.021020092,\n  0.012285656,\n  0.010255455,\n  -0.017612487,\n  -0.0043182583,\n  0.009217506,\n  -0.0363739,\n  0.0054084305,\n  0.006919658,\n  0.02105926,\n  0.03556443,\n  0.009106531,\n  -0.009439458,\n  0.010901725,\n  -0.010170591,\n  0.008257894,\n  0.008101222,\n  0.010059616,\n  0.019531714,\n  -0.0108233895,\n  -0.026947498,\n  -0.023187382,\n  -0.012422744,\n  -0.0024365683,\n  -0.0014981712,\n  0.034728847,\n  -0.00794455,\n  0.010431711,\n  0.010261983,\n  0.011554523,\n  -0.022377912,\n  -0.011117148,\n  0.0122987125,\n  -0.016111052,\n  0.0023158006,\n  -0.014583506,\n  -0.0065736747,\n  -0.0028249829,\n  -0.015014352,\n  0.034572177,\n  -0.025641901,\n  0.015627982,\n  0.039063428,\n  -0.01056227,\n  0.0113325715,\n  0.0012231801,\n  0.0067172903,\n  -0.00820567,\n  0.016111052,\n  -0.032065433,\n  -0.013721812,\n  0.006195052,\n  0.02105926,\n  -0.01682913,\n  0.02445381,\n  -0.004732785,\n  0.022978486,\n  0.005313775,\n  0.015301583,\n  0.013969875,\n  0.017403591,\n  -0.015849933,\n  -0.025406895,\n  -0.009478626,\n  0.0087736035,\n  0.01395682,\n  0.010738526,\n  -0.00746148,\n  0.016450508,\n  -0.023618229,\n  0.007761767,\n  -0.017194698,\n  -0.0027221672,\n  0.0039265794,\n  0.009047779,\n  0.0029588065,\n  -0.003727476,\n  -0.011234652,\n  0.0070959134,\n  -0.019701442,\n  0.0067368746,\n  0.006828266,\n  -0.025080496,\n  0.022665143,\n  0.022273464,\n  -0.026020525,\n  0.0001858434,\n  0.023004599,\n  -0.0023109047,\n  -0.026999721,\n  -0.008401509,\n  -0.012540248,\n  -0.020954812,\n  -0.010575326,\n  0.020693693,\n  0.024271026,\n  -0.004954736,\n  0.005819693,\n  0.0032819414,\n  -0.037000585,\n  -0.004683825,\n  0.036974475,\n  -0.00930237,\n  -0.026947498,\n  0.017782215,\n  -0.031856537,\n  0.004758897,\n  0.009478626,\n  0.0032444056,\n  0.013408469,\n  -0.0060449084,\n  -0.0013904596,\n  0.00042268666,\n  -0.011267292,\n  -0.00094818894,\n  -0.018552516,\n  -0.015353806,\n  -0.002208089,\n  0.029245347,\n  -0.011221596,\n  0.011600219,\n  0.028018085,\n  -0.007448424,\n  0.021007037,\n  0.014270162,\n  0.009667937,\n  -0.040160127,\n  -0.010092256,\n  0.014100435,\n  -0.000120971614,\n  0.01331055,\n  0.0144007215,\n  0.019636162,\n  0.012481496,\n  0.008767076,\n  0.013284437,\n  0.026307756,\n  -0.005898029,\n  0.013813204,\n  -0.034102164,\n  -0.0010020448,\n  -0.005052656,\n  -0.0020563134,\n  0.013153878,\n  -0.008669157,\n  -0.010993117,\n  -0.0057185097,\n  -0.0017625544,\n  0.012651224,\n  0.025315503,\n  -0.0065214513,\n  0.004093043,\n  0.0045695854,\n  -0.0023288566,\n  -0.0032672535,\n  -0.01172425,\n  -0.0073766164,\n  0.008133862,\n  -0.02984592,\n  0.006273388,\n  0.020432575,\n  0.037287816,\n  0.003720948,\n  0.0052223834,\n  -0.010353375,\n  0.0029947103,\n  0.0039167874,\n  0.007983718,\n  0.0013251797,\n  -0.0035381645,\n  0.03326658,\n  -0.0077682952,\n  0.029454242,\n  -0.0064072115,\n  -0.0010501887,\n  0.026895273,\n  0.019401154,\n  -0.009863776,\n  0.0025279599,\n  0.023892403,\n  0.0028967906,\n  -0.009054307,\n  0.012481496,\n  0.026242476,\n  -0.020993982,\n  0.0054508625,\n  -0.011064924,\n  -0.012187737,\n  0.02609886,\n  -0.019257538,\n  -0.005532462,\n  0.019832,\n  0.008003302,\n  0.008310118,\n  0.0069849375,\n  0.006211372,\n  0.009896416,\n  0.0025867117,\n  0.010137952,\n  -0.0010526367,\n  0.0061297724,\n  -0.013969875,\n  0.0005222383,\n  -0.011933146,\n  -0.00625054,\n  -0.020262847,\n  -0.015980493,\n  -0.00097593287,\n  0.025250223,\n  -0.007454952,\n  -0.005715246,\n  -0.005920877,\n  0.0023109047,\n  0.004037555,\n  -0.001888218,\n  -0.019492546,\n  0.021685947,\n  0.0034010771,\n  0.0042758263,\n  -0.009622241,\n  -0.018970307,\n  0.014217938,\n  -0.020523965,\n  -0.014962128,\n  -0.008133862,\n  0.014466002,\n  0.018852804,\n  -0.0035544846,\n  -0.0313343,\n  0.01560187,\n  0.03908954,\n  -0.0038645635,\n  0.00926973,\n  0.011561051,\n  -0.008701796,\n  0.0006523899,\n  -0.01363042,\n  -0.03459829,\n  -0.01711636,\n  0.0035349007,\n  -0.02070675,\n  -0.011495771,\n  0.008616933,\n  0.0023908722,\n  0.00038800677,\n  -0.02064147,\n  -0.029558688,\n  -0.017051082,\n  -0.002787447,\n  -0.0020350975,\n  -0.024976049,\n  -0.0023827124,\n  0.032326553,\n  -0.0054247505,\n  -0.00035842686,\n  -0.007761767,\n  -0.019596994,\n  -0.008838884,\n  0.013656532,\n  0.026190251,\n  -0.005114672,\n  0.0053529427,\n  0.0106210215,\n  0.012664279,\n  -0.0036034444,\n  -0.026542762,\n  0.00010373367,\n  0.005627118,\n  -0.016842186,\n  -0.020824254,\n  0.0025426478,\n  0.011959258,\n  0.027443623,\n  0.023500724,\n  0.013656532,\n  -0.01831751,\n  0.0015030672,\n  -0.0038221318,\n  -0.0045728493,\n  -0.0070959134,\n  0.0031889179,\n  0.036478348,\n  -0.0035577486,\n  0.02684305,\n  -0.0015822189,\n  0.013747924,\n  0.0013814836,\n  -0.021633722,\n  -0.010555742,\n  0.012285656,\n  0.010040032,\n  0.0051179356,\n  -0.024440754,\n  0.0005450863,\n  -0.010699358,\n  0.0067238184,\n  -0.018461125,\n  -0.0036524043,\n  0.014727121,\n  0.0155365905,\n  -0.0057739974,\n  -0.017821383,\n  0.012599,\n  -0.018565573,\n  -0.026686378,\n  0.019871168,\n  -0.020197567,\n  -0.0158891,\n  0.039011203,\n  0.015993549,\n  -0.023566004,\n  -0.019466434,\n  0.0066781226,\n  0.009184866,\n  0.0047099367,\n  -0.012873175,\n  0.016124109,\n  0.0057772617,\n  -0.01505352,\n  -0.025654959,\n  -0.0023696565,\n  -0.010418654,\n  -0.010934365,\n  -0.009576545,\n  -0.029375905,\n  -0.007931494,\n  -0.028226981,\n  0.004119155,\n  -0.015236303,\n  -0.00374706,\n  -0.017808327,\n  -0.010020448,\n  0.028279206,\n  0.0064496435,\n  0.005927405,\n  -0.011149788,\n  0.016306892,\n  0.012161625,\n  0.012997206,\n  -0.037783943,\n  -0.009210979,\n  -0.021294268,\n  0.006860906,\n  0.00943293,\n  0.024976049,\n  -0.013839316,\n  -0.006893546,\n  0.021620667,\n  0.02997648,\n  -0.010359903,\n  0.0049025123,\n  -0.0045630573,\n  0.0009041251,\n  0.003427189,\n  0.0003776028,\n  0.0055814222,\n  -0.0003439429,\n  0.012938455,\n  -0.004625073,\n  -0.021516219,\n  -0.026790826,\n  0.00045124657,\n  0.016920522,\n  -0.00041228268,\n  0.016581066,\n  0.026451372,\n  -0.0038417156,\n  0.001564267,\n  0.012422744,\n  -0.027809191,\n  0.023944627,\n  -0.000006802103,\n  -0.00061118207,\n  0.012102873,\n  0.011900506,\n  0.021568444,\n  -0.011482716,\n  -0.0006552459,\n  -0.0070632733,\n  -0.0128078945,\n  0.0014239154,\n  0.011143261,\n  0.0101118395,\n  0.011228124,\n  0.008845411,\n  -0.002475736,\n  -0.008864996,\n  -0.008616933,\n  0.0029506464,\n  0.01967533,\n  -0.008662628,\n  -0.019701442,\n  -0.028357541,\n  -0.0029359586,\n  0.0011195485,\n  0.021907898,\n  -0.027809191,\n  -0.0047001448,\n  -0.041883513,\n  -0.004634865,\n  -0.020249791,\n  -0.017455816,\n  -0.03587777,\n  -0.036974475,\n  0.000617302,\n  0.0035708046,\n  0.01062755,\n  0.02199929,\n  0.009060835,\n  0.010346847,\n  -0.026738603,\n  0.007259113,\n  -0.0040114434,\n  -0.022234296,\n  -0.013460693,\n  -0.020954812,\n  0.030759837,\n  0.009348066,\n  0.03284879,\n  -0.016124109,\n  0.01505352,\n  0.011685083,\n  0.009694049,\n  0.0003461869,\n  -0.02393157,\n  -0.0237749,\n  -0.020262847,\n  0.011097564,\n  0.010438238,\n  0.023657396,\n  0.0033782292,\n  -0.017808327,\n  -0.016672458,\n  0.0017135945,\n  -0.0030045023,\n  -0.0002735631,\n  0.0049449443,\n  -0.018944195,\n  -0.017273033,\n  -0.015027408,\n  -0.0038972036,\n  0.012938455,\n  0.021646779,\n  0.0011334204,\n  0.03300546,\n  0.019923393,\n  0.02051091,\n  -0.0029555424,\n  0.03013315,\n  0.00083435734,\n  0.0026666794,\n  -0.013800148,\n  -0.00015391751,\n  -0.00746148,\n  0.009491681,\n  0.01192009,\n  -0.0022619448,\n  0.019166147,\n  -0.003397813,\n  0.03869786,\n  0.0049025123,\n  -0.012592471,\n  0.0037699079,\n  0.0073831445,\n  0.005532462,\n  -0.020262847,\n  -0.0012860119,\n  -0.0223518,\n  -0.02064147,\n  -0.03734004,\n  -0.0011880922,\n  -0.06366085,\n  0.010647134,\n  0.0035251088,\n  -0.015314639,\n  0.025707182,\n  -0.006221164,\n  -0.00868874,\n  0.01702497,\n  0.016907467,\n  0.035459984,\n  -0.022195129,\n  0.003436981,\n  0.018173894,\n  -0.0114174355,\n  -0.0013406837,\n  0.023370165,\n  0.0076964875,\n  -0.0176386,\n  0.0101118395,\n  0.02820087,\n  -0.0027303272,\n  -0.002836407,\n  0.016816074,\n  0.0190617,\n  -0.012292185,\n  -0.023409333,\n  0.0012876439,\n  0.012122457,\n  0.0032036058,\n  -0.0034696208,\n  0.0012623479,\n  -0.017521096,\n  0.012057178,\n  -0.012279129,\n  0.0048568165,\n  -0.019805888,\n  -0.0073048086,\n  -0.015680205,\n  0.033710483,\n  -0.01240316,\n  0.019388098,\n  0.020654526,\n  -0.008904164,\n  -0.011991898,\n  0.0057772617,\n  -0.013930707,\n  -0.0042921463,\n  0.00566955,\n  0.05057878,\n  -0.009726689,\n  0.022155961,\n  0.0070698014,\n  0.018147781,\n  -0.013108182,\n  -0.0032395096,\n  -0.011195485,\n  0.018160839,\n  -0.009876832,\n  0.0013210998,\n  -0.03026371,\n  -0.024036018,\n  0.005672814,\n  0.0021052733,\n  -0.0018131462,\n  -0.022599863,\n  -0.023814067,\n  -0.0038156037,\n  0.017599432,\n  0.0031562778,\n  -0.023122102,\n  -0.032457113,\n  -0.03159542,\n  -0.032901015,\n  -0.010092256,\n  0.0051114075,\n  0.0013072278,\n  0.029062564,\n  -0.012938455,\n  -0.019270593,\n  -0.00620158,\n  0.01540603,\n  0.007761767,\n  -0.009224035,\n  0.0073178643,\n  0.006795626,\n  -0.016776906,\n  0.017756103,\n  -0.024009908,\n  -0.005640174,\n  -0.027417513,\n  0.004690353,\n  -0.0045891693,\n  -0.014857681,\n  -0.0014279954,\n  -0.006580203,\n  -0.014348498,\n  -0.020119231,\n  0.01853946,\n  0.014048211,\n  0.014217938,\n  -0.0055357264,\n  0.009772385,\n  0.022142906,\n  0.0010991485,\n  -0.006514923,\n  -0.022926262,\n  0.011423963,\n  -0.013930707,\n  0.012990679,\n  0.0060416446,\n  0.009374178,\n  0.0063680434,\n  0.014622673,\n  0.021215932,\n  0.009047779,\n  -0.027626408,\n  0.012736087,\n  -0.0019453377,\n  0.010079199,\n  -0.022586808,\n  -0.021111485,\n  -0.0328749,\n  -0.010255455,\n  -0.015876045,\n  0.0013096757,\n  0.0035610127,\n  -0.01175689,\n  0.0011032284,\n  0.029375905,\n  -0.0017233865,\n  -0.00797719,\n  0.004895984,\n  -0.0057837893,\n  0.0062962356,\n  -0.000310487,\n  -0.022939319,\n  -0.011319515,\n  0.00095879694,\n  0.017912775,\n  0.020628413,\n  -0.01812167,\n  -0.01327791,\n  0.0046479213,\n  -0.005917613,\n  0.0137740355,\n  -0.020171456,\n  0.0030958941,\n  0.00009649169,\n  0.0005614062,\n  -0.006204844,\n  0.03553832,\n  0.0014157555,\n  0.019910336,\n  -0.026216364,\n  0.019009475,\n  0.012664279,\n  0.009132642,\n  0.013930707,\n  -0.008114278,\n  -0.025459118,\n  -0.011482716,\n  0.012918871,\n  0.012677335,\n  -0.009583074,\n  0.014844624,\n  -0.010588382,\n  -0.005385583,\n  -0.005274607,\n  0.007187305,\n  -0.000634846,\n  -0.0029131107,\n  0.029924257,\n  -0.008904164,\n  -0.0023696565,\n  -0.009341538,\n  -0.027417513,\n  0.0101967035,\n  -0.01498824,\n  -0.018291397,\n  0.0011268924,\n  0.011143261,\n  -0.015915213,\n  -0.016868297,\n  0.018291397,\n  -0.020158399,\n  0.01579771,\n  -0.012794839,\n  0.004687089,\n  0.026947498,\n  0.005313775,\n  -0.031073181,\n  -0.019662272,\n  -0.011391323,\n  -0.0012076761,\n  0.011084508,\n  0.012168153,\n  -0.0044716657,\n  0.0019518657,\n  -0.002849463,\n  0.025798574,\n  -0.015549646,\n  0.0033406932,\n  -0.014557393,\n  -0.0051016156,\n  -0.008003302,\n  0.025889965,\n  0.016737739,\n  0.0057576774,\n  0.009563489,\n  -0.0054965583,\n  0.000012864709,\n  -0.022534585,\n  -0.020053951,\n  -0.012174681,\n  -0.02467576,\n  0.0032688854,\n  -0.0000045517354,\n  -0.018800579,\n  -0.010784222,\n  -0.0037046282,\n  -0.00095308496,\n  0.0002170553,\n  0.0013602676,\n  0.20221068,\n  -0.010692829,\n  -0.0123835765,\n  0.037392262,\n  0.007135081,\n  0.016933577,\n  0.01230524,\n  0.009152227,\n  -0.0011415803,\n  0.015510478,\n  0.0018425221,\n  0.013362773,\n  -0.0047621606,\n  0.0037046282,\n  -0.0004912304,\n  -0.0070502176,\n  -0.032039322,\n  -0.010229344,\n  0.0026487275,\n  -0.043058548,\n  -0.0009930688,\n  -0.023761844,\n  0.0013961715,\n  0.0033553813,\n  0.022952374,\n  0.015027408,\n  -0.011861338,\n  -0.0010648766,\n  0.03324047,\n  0.01085603,\n  -0.011202012,\n  -0.0014998032,\n  -0.0027629673,\n  0.018774468,\n  -0.021490106,\n  0.00020297935,\n  0.017481929,\n  -0.013317077,\n  0.01246844,\n  0.020053951,\n  -0.015419086,\n  -0.005943725,\n  0.005633646,\n  -0.020523965,\n  -0.010320735,\n  0.026268588,\n  -0.0041158907,\n  -0.022665143,\n  0.0151971355,\n  0.032770455,\n  -0.02303071,\n  -0.003773172,\n  0.009106531,\n  0.056192845,\n  -0.0081795575,\n  -0.0019012738,\n  0.0027433832,\n  0.0035218447,\n  -0.04269298,\n  -0.0035544846,\n  0.0008143654,\n  0.03365826,\n  -0.004592433,\n  0.006341932,\n  -0.015118799,\n  0.0000065343543,\n  -0.009002083,\n  -0.0012272601,\n  0.0011831962,\n  -0.016058829,\n  -0.016776906,\n  -0.010686302,\n  -0.0006944138,\n  -0.017364424,\n  -0.03188265,\n  -0.014257106,\n  0.021085372,\n  0.013591252,\n  0.0028478308,\n  0.01560187,\n  -0.013982931,\n  -0.0051636314,\n  0.014714065,\n  -0.005630382,\n  -0.009224035,\n  -0.024714928,\n  0.0076638474,\n  0.008362341,\n  -0.019179203,\n  -0.00018502741,\n  0.005548782,\n  -0.02412741,\n  -0.014204882,\n  0.0062864437,\n  0.014753233,\n  0.025106607,\n  0.022991542,\n  0.011783002,\n  -0.0055389903,\n  0.012690391,\n  -0.031438746,\n  -0.03136041,\n  0.01844807,\n  0.010053088,\n  -0.009628769,\n  -0.006821738,\n  -0.0061558844,\n  0.01799111,\n  0.009870305,\n  -0.009687521,\n  -0.032744344,\n  -0.020693693,\n  0.01961005,\n  -0.0016034349,\n  0.029114787,\n  -0.004674033,\n  -0.0076050954,\n  -0.0063810996,\n  0.01812167,\n  -0.00072623766,\n  0.013108182,\n  -0.013153878,\n  -0.009831136,\n  0.010131423,\n  -0.0056662858,\n  -0.012651224,\n  -0.021411771,\n  0.02038035,\n  0.004660977,\n  -0.011391323,\n  0.012266072,\n  -0.026203308,\n  0.0005483502,\n  0.01844807,\n  -0.020171456,\n  -0.00025928317,\n  0.007193833,\n  -0.0006805418,\n  -0.01792583,\n  -0.00018390542,\n  -0.020093119,\n  -0.014296274,\n  0.0029637024,\n  -0.007108969,\n  0.012664279,\n  -0.0029212707,\n  0.013323605,\n  -0.004625073,\n  -0.03159542,\n  -0.0009506369,\n  -0.017233865,\n  0.0007184857,\n  -0.018800579,\n  0.001213388,\n  0.017403591,\n  -0.018356677,\n  -0.030394271,\n  -0.033867154,\n  -0.0024430961,\n  0.0010632445,\n  -0.027756967,\n  -0.013297494,\n  0.018839747,\n  -0.0013504757,\n  -0.028932003,\n  -0.007676903,\n  -0.16857854,\n  0.022808759,\n  0.006596523,\n  -0.0136957,\n  0.0033080534,\n  -0.0017413384,\n  0.040316798,\n  -0.010967005,\n  -0.016163277,\n  0.00618526,\n  0.030342046,\n  0.0024496242,\n  -0.033632148,\n  -0.0011440284,\n  0.009067363,\n  0.0013610836,\n  -0.005209327,\n  0.002834775,\n  0.004037555,\n  0.021281213,\n  0.0039592194,\n  -0.019166147,\n  0.036086667,\n  -0.0075985678,\n  0.008473317,\n  0.00044675855,\n  0.028044198,\n  0.019518657,\n  0.0026405675,\n  -0.013251797,\n  -0.036008332,\n  -0.021790395,\n  0.006527979,\n  0.0061003966,\n  0.031438746,\n  -0.0005675262,\n  0.017899718,\n  -0.03720948,\n  -0.0026813673,\n  0.028540324,\n  0.016111052,\n  0.040812925,\n  -0.009426402,\n  0.0024137204,\n  -0.011521883,\n  0.021085372,\n  0.0020269374,\n  -0.002181977,\n  0.016659403,\n  -0.004093043,\n  0.0058001094,\n  -0.011019229,\n  0.020693693,\n  -0.005917613,\n  0.031151516,\n  0.01647662,\n  -0.011097564,\n  0.021437883,\n  0.0035871244,\n  0.012696919,\n  -0.0029620705,\n  -0.019518657,\n  -0.023801012,\n  -0.01233788,\n  0.003472885,\n  -0.017442761,\n  -0.01695969,\n  -0.0035675406,\n  -0.008388453,\n  0.011508827,\n  0.013800148,\n  -0.014883792,\n  0.016711626,\n  -0.003737268,\n  0.022874039,\n  0.0153668625,\n  -0.027548071,\n  0.014387666,\n  0.0060710204,\n  -0.009752801,\n  -0.014505169,\n  0.014022099,\n  -0.017481929,\n  -0.018513348,\n  0.0049188323,\n  0.02861866,\n  0.016319947,\n  -0.00079478143,\n  0.01014448,\n  -0.015105744,\n  0.010477406,\n  -0.014531282,\n  -0.010033504,\n  -0.035094414,\n  0.012612056,\n  0.010967005,\n  -0.003066518,\n  0.008845411,\n  -0.0023631284,\n  -0.02261292,\n  0.00019594138,\n  -0.00023011127,\n  -0.0020710013,\n  -0.013095126,\n  0.008727908,\n  -0.004608753,\n  0.0069849375,\n  0.020863421,\n  0.039193988,\n  -0.014635729,\n  -0.0071154973,\n  0.034702737,\n  0.034546066,\n  0.034728847,\n  -0.0055291983,\n  0.026111916,\n  -0.024532145,\n  -0.027313065,\n  -0.0008771972,\n  -0.0029539105,\n  0.058542915,\n  -0.019192258,\n  0.007892326,\n  0.014518226,\n  0.004347634,\n  -0.013160406,\n  -0.083505906,\n  -0.011978841,\n  -0.000059618807,\n  0.014635729,\n  -0.012664279,\n  0.007859686,\n  0.010033504,\n  0.022495415,\n  -0.024362419,\n  0.014544337,\n  -0.005914349,\n  -0.029323682,\n  -0.027078057,\n  -0.02035424,\n  0.024218803,\n  -0.009524321,\n  -0.009733217,\n  -0.009583074,\n  0.001558555,\n  0.002771127,\n  0.005068976,\n  -0.004964528,\n  0.0055291983,\n  -0.018957252,\n  -0.028226981,\n  -0.0013219158,\n  -0.014426834,\n  0.025446063,\n  0.010947421,\n  0.005865389,\n  -0.0041452665,\n  -0.020471742,\n  0.01853946,\n  -0.0027368553,\n  0.0042301305,\n  0.002539384,\n  -0.01628078,\n  -0.007840103,\n  0.012918871,\n  -0.0127556715,\n  0.0144007215,\n  -0.018108614,\n  -0.005046128,\n  -0.037914503,\n  0.025315503,\n  0.006550827,\n  -0.0030028704,\n  -0.0014206514,\n  0.007141609,\n  -0.05311164,\n  -0.03475496,\n  -0.0033782292,\n  -0.020876477,\n  -0.0020856892,\n  0.025981357,\n  0.0049090404,\n  0.008251365,\n  0.010483935,\n  -0.020850366,\n  0.009687521,\n  0.019570882,\n  0.002134649,\n  -0.03159542,\n  0.021228988,\n  0.00494168,\n  -0.00684785,\n  -0.017938886,\n  -0.0041974904,\n  0.021790395,\n  -0.011058397,\n  -0.0075332876,\n  0.030629277,\n  -0.009700577,\n  0.015458254,\n  -0.035773326,\n  -0.011358684,\n  -0.015706317,\n  -0.016816074,\n  0.021777337,\n  -0.015641037,\n  -0.021281213,\n  -0.027025834,\n  0.013656532,\n  -0.012847063,\n  0.035799436,\n  0.013721812,\n  0.020341182,\n  -0.010346847,\n  0.011489243,\n  -0.023344053,\n  0.0024382002,\n  0.028696995,\n  -0.008251365,\n  -0.018395845,\n  0.0054704463,\n  0.018891972,\n  0.0033390613,\n  -0.0036230283,\n  0.020080063,\n  0.0039690114,\n  -0.002828247,\n  -0.008982499,\n  -0.059639618,\n  0.025198,\n  -0.009511266,\n  -0.0067303465,\n  0.013173462,\n  -0.017299145,\n  0.017064137,\n  -0.013813204,\n  -0.005943725,\n  -0.033632148,\n  -0.006873962,\n  0.011032284,\n  -0.0014883792,\n  0.011626331,\n  -0.035668876,\n  -0.021150652,\n  0.04222297,\n  0.03136041,\n  0.013512917,\n  0.008571236,\n  0.008408037,\n  0.0022766327,\n  0.029402018,\n  -0.006926186,\n  -0.004716465,\n  0.0041420027,\n  -0.016254667,\n  0.009413346,\n  -0.008035942,\n  -0.029245347,\n  0.01014448,\n  -0.040186238,\n  0.0036262923,\n  0.02261292,\n  0.017220808,\n  -0.007219945,\n  -0.011352155,\n  0.029349795,\n  0.025041327,\n  0.018278342,\n  -0.037000585,\n  -0.025459118,\n  0.0030159263,\n  -0.024401587,\n  -0.023357108,\n  0.00943293,\n  -0.031987097,\n  -0.0061101886,\n  0.019322818,\n  0.0074288403,\n  0.009818081,\n  0.008727908,\n  -0.00006777878,\n  0.005330095,\n  0.0015283631,\n  -0.025916077,\n  0.013003734,\n  0.0060220608,\n  -0.01547131,\n  -0.022730423,\n  0.029036451,\n  0.024714928,\n  0.009106531,\n  -0.0063223476,\n  -0.004951472,\n  0.0011040445,\n  0.0018245701,\n  -0.016946634,\n  0.001860474,\n  -0.06261638,\n  -0.038488965,\n  0.014949072,\n  0.0019371778,\n  0.026738603,\n  -0.00061077403,\n  0.010053088,\n  0.00006946178,\n  -0.005202799,\n  -0.032639895,\n  0.03240489,\n  0.0035512205,\n  -0.014152658,\n  -0.020902589,\n  0.019805888,\n  0.016554955,\n  0.005339887,\n  -0.00923709,\n  -0.004008179,\n  -0.0054378067,\n  0.009511266,\n  -0.045304175,\n  -0.0038482437,\n  0.007905383,\n  -0.008780132,\n  -0.011893977,\n  0.0034924687,\n  0.0061819963,\n  -0.0019420738,\n  0.017899718,\n  0.015836878,\n  -0.014661841,\n  0.0016091468,\n  -0.014361554,\n  0.0064006834,\n  -0.014322386,\n  0.019388098,\n  -0.001853946,\n  -0.008812772,\n  0.013539028,\n  0.014674897,\n  0.022795703,\n  0.010790749,\n  0.0019371778,\n  0.021072317,\n  -0.008923748,\n  0.006002477,\n  -0.0057837893,\n  -0.010914781,\n  -0.019414209,\n  0.03653057,\n  0.015876045,\n  0.040917374,\n  -0.009791968,\n  0.004964528,\n  0.0037699079,\n  -0.0049710562,\n  -0.0021020093,\n  -0.038384516,\n  0.011698139,\n  -0.013042903,\n  0.00694577,\n  -0.014661841,\n  -0.016672458,\n  -0.027156392,\n  -0.016084941,\n  -0.0031840217,\n  -0.0018588421,\n  0.01641134,\n  -0.028827555,\n  0.088258274,\n  0.012696919,\n  0.0033651732,\n  0.0106210215,\n  0.031543195,\n  0.018591685,\n  0.015719373,\n  0.008440677,\n  -0.013995987,\n  -0.013591252,\n  0.011880922,\n  -0.015210191,\n  -0.0011285244,\n  -0.014479058,\n  -0.009935584,\n  0.013460693,\n  -0.01702497,\n  0.0005169343,\n  -0.009250146,\n  0.0037046282,\n  0.029323682,\n  0.0059731007,\n  0.015118799,\n  0.007422312,\n  -0.010477406,\n  0.0026356715,\n  -0.0017641863,\n  -0.0011505563,\n  -0.035747215,\n  -0.020798141,\n  0.009785441,\n  -0.0023370164,\n  -0.025641901,\n  -0.00429541,\n  0.005209327,\n  -0.0030273504,\n  -0.011051869,\n  -0.0016581067,\n  -0.023122102,\n  0.0038776195,\n  -0.0035708046,\n  0.014139603,\n  -0.0075920396,\n  -0.02642526,\n  0.04522584,\n  -0.0033553813,\n  -0.0019796097,\n  -0.0054214867,\n  -0.00085516524,\n];\n"
  },
  {
    "path": "packages/langchain_amazon/.gitignore",
    "content": "# https://dart.dev/guides/libraries/private-files\n# Created by `dart pub`\n.dart_tool/\n\n# Avoid committing pubspec.lock for library packages; see\n# https://dart.dev/guides/libraries/private-files#pubspeclock.\npubspec.lock\n"
  },
  {
    "path": "packages/langchain_amazon/CHANGELOG.md",
    "content": "## 0.0.1-dev.1\n\n- Bootstrap project.\n"
  },
  {
    "path": "packages/langchain_amazon/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2023 David Miguel Lozano\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "packages/langchain_amazon/README.md",
    "content": "# 🦜️🔗 LangChain.dart\n\nAmazon module for [LangChain.dart](https://github.com/davidmigloz/langchain_dart).\n\n## License\n\nLangChain.dart is licensed under the\n[MIT License](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE).\n"
  },
  {
    "path": "packages/langchain_amazon/example/langchain_amazon_example.dart",
    "content": "void main() {\n  // TODO\n}\n"
  },
  {
    "path": "packages/langchain_amazon/lib/langchain_amazon.dart",
    "content": "/// Amazon module for LangChain.dart.\nlibrary;\n"
  },
  {
    "path": "packages/langchain_amazon/pubspec.yaml",
    "content": "name: langchain_amazon\ndescription: Amazon module for LangChain.dart.\nversion: 0.0.1-dev.1\nrepository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_amazon\nissue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_amazon\nhomepage: https://github.com/davidmigloz/langchain_dart\ndocumentation: https://langchaindart.dev\npublish_to: none # Remove when the package is ready to be published\n\nfunding:\n  - https://github.com/sponsors/davidmigloz\n\ntopics:\n  - nlp\n  - gen-ai\n  - llms\n  - langchain\n\nenvironment:\n  sdk: \">=3.9.0 <4.0.0\"\nresolution: workspace\n"
  },
  {
    "path": "packages/langchain_anthropic/.gitignore",
    "content": "# https://dart.dev/guides/libraries/private-files\n# Created by `dart pub`\n.dart_tool/\n\n# Avoid committing pubspec.lock for library packages; see\n# https://dart.dev/guides/libraries/private-files#pubspeclock.\npubspec.lock\n"
  },
  {
    "path": "packages/langchain_anthropic/CHANGELOG.md",
    "content": "## 0.3.1\n\n - **FIX**(langchain_anthropic): Handle CitationsBlockDelta in streaming responses ([#881](https://github.com/davidmigloz/langchain_dart/issues/881)). ([445ddda1](https://github.com/davidmigloz/langchain_dart/commit/445ddda1fa7a61f6a34e4faae3e024c7e625b86b))\n - **FIX**(langchain_anthropic): Handle SignatureBlockDelta in streaming responses ([#879](https://github.com/davidmigloz/langchain_dart/issues/879)). ([a9fe5285](https://github.com/davidmigloz/langchain_dart/commit/a9fe5285ef00df5f80e934a480b1b79caeb0e1f0))\n - **FIX**(langchain_anthropic): Update mappers for anthropic_sdk_dart schema changes ([#877](https://github.com/davidmigloz/langchain_dart/issues/877)). ([c77d454d](https://github.com/davidmigloz/langchain_dart/commit/c77d454d8effd26aaff59a39c438e4731f7ad773))\n - **FEAT**(langchain_anthropic): Add listModels() support ([#882](https://github.com/davidmigloz/langchain_dart/issues/882)). ([c1ba2592](https://github.com/davidmigloz/langchain_dart/commit/c1ba25920277f625fb6b3629f3d0c312ebd75240))\n\n## 0.3.0+1\n\n - **REFACTOR**: Fix pub format warnings ([#809](https://github.com/davidmigloz/langchain_dart/issues/809)). ([640cdefb](https://github.com/davidmigloz/langchain_dart/commit/640cdefbede9c0a0182fb6bb4005a20aa6f35635))\n\n## 0.3.0\n\n> Note: This release has breaking changes.\n\n - **FEAT**: Add extended thinking support to langchain_anthropic ([#804](https://github.com/davidmigloz/langchain_dart/issues/804)). ([0e58fd31](https://github.com/davidmigloz/langchain_dart/commit/0e58fd316191091fb014287b24063fca61b4a2e5))\n - **FEAT**: Upgrade to http v1.5.0 ([#785](https://github.com/davidmigloz/langchain_dart/issues/785)). ([f7c87790](https://github.com/davidmigloz/langchain_dart/commit/f7c8779011015b5a4a7f3a07dca32bde1bb2ea88))\n - **BREAKING** **BUILD**: Require Dart >=3.8.0 ([#792](https://github.com/davidmigloz/langchain_dart/issues/792)). ([b887f5c6](https://github.com/davidmigloz/langchain_dart/commit/b887f5c62e307b3a510c5049e3d1fbe7b7b4f4c9))\n\n## 0.2.1+3\n\n - Update a dependency to the latest release.\n\n## 0.2.1+2\n\n - Update a dependency to the latest release.\n\n## 0.2.1+1\n\n - **BUILD**: Update dependencies ([#751](https://github.com/davidmigloz/langchain_dart/issues/751)). ([250a3c6](https://github.com/davidmigloz/langchain_dart/commit/250a3c6a6c1815703a61a142ba839c0392a31015))\n\n## 0.2.1\n\n - **FEAT**: Update dependencies (requires Dart 3.6.0) ([#709](https://github.com/davidmigloz/langchain_dart/issues/709)). ([9e3467f7](https://github.com/davidmigloz/langchain_dart/commit/9e3467f7caabe051a43c0eb3c1110bc4a9b77b81))\n - **REFACTOR**: Remove fetch_client dependency in favor of http v1.3.0 ([#659](https://github.com/davidmigloz/langchain_dart/issues/659)). ([0e0a685c](https://github.com/davidmigloz/langchain_dart/commit/0e0a685c376895425dbddb0f9b83758c700bb0c7))\n - **FIX**: Fix linter issues ([#656](https://github.com/davidmigloz/langchain_dart/issues/656)). ([88a79c65](https://github.com/davidmigloz/langchain_dart/commit/88a79c65aad23bcf5859e58a7375a4b686cf02ef))\n\n## 0.2.0+1\n\n - **REFACTOR**: Add new lint rules and fix issues ([#621](https://github.com/davidmigloz/langchain_dart/issues/621)). ([60b10e00](https://github.com/davidmigloz/langchain_dart/commit/60b10e008acf55ebab90789ad08d2449a44b69d8))\n\n## 0.2.0\n\n - **FEAT**: Update ChatAnthropic default model to claude-3-5-sonnet-20241022 ([#584](https://github.com/davidmigloz/langchain_dart/issues/584)). ([4f0d9cfb](https://github.com/davidmigloz/langchain_dart/commit/4f0d9cfb0a71c567d1b37842cd44dac1f7308001))\n\n## 0.1.1+2\n\n - Update a dependency to the latest release.\n\n## 0.1.1+1\n\n - Update a dependency to the latest release.\n\n## 0.1.1\n\n - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43))\n - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560))\n - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d))\n\n## 0.1.0\n\n- **FEAT**: Add ChatAnthropic integration ([#477](https://github.com/davidmigloz/langchain_dart/issues/477)). ([44c7fafd](https://github.com/davidmigloz/langchain_dart/commit/44c7fafd934bf6517e285830b1ca98282127cb7d))\n\n## 0.0.1-dev.1\n\n- Bootstrap project.\n"
  },
  {
    "path": "packages/langchain_anthropic/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2023 David Miguel Lozano\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "packages/langchain_anthropic/MIGRATION.md",
    "content": "# langchain_anthropic Migration Guide\n\n## Image URL support\n\nPreviously, only base64-encoded images were supported when sending images\nto Anthropic models. Now, image URLs are also supported:\n\n```dart\nfinal message = ChatMessage.human(\n  ChatMessageContent.image(\n    data: 'https://example.com/image.png',\n  ),\n);\n```\n\nBase64-encoded images continue to work as before.\n\n## PDF document support\n\nPDF documents can now be sent to Claude models via base64 encoding.\nUse the `application/pdf` MIME type:\n\n```dart\nfinal message = ChatMessage.human(\n  ChatMessageContent.image(\n    data: base64EncodedPdfData,\n    mimeType: 'application/pdf',\n  ),\n);\n```\n\n## Base URL change\n\nThe default base URL has changed from `https://api.anthropic.com/v1` to\n`https://api.anthropic.com`. This is a **breaking change** if you relied on\nthe old default or appended paths to it.\n\nIf you were manually constructing URLs by appending to the base URL, update\nyour code accordingly.\n\n## Updated model list\n\nThe model list in `ChatAnthropicOptions` has been updated to include:\n- `claude-sonnet-4-5-20250514`\n- `claude-sonnet-4-5-latest`\n- `claude-haiku-3-5-latest`\n- `claude-haiku-3-5-20250414`\n- `claude-3-5-haiku-latest`\n- `claude-3-5-haiku-20241022`\n"
  },
  {
    "path": "packages/langchain_anthropic/README.md",
    "content": "# 🦜️🔗 LangChain.dart / Anthropic\n\n[![tests](https://img.shields.io/github/actions/workflow/status/davidmigloz/langchain_dart/test.yaml?logo=github&label=tests)](https://github.com/davidmigloz/langchain_dart/actions/workflows/test.yaml)\n[![docs](https://img.shields.io/github/actions/workflow/status/davidmigloz/langchain_dart/pages%2Fpages-build-deployment?logo=github&label=docs)](https://github.com/davidmigloz/langchain_dart/actions/workflows/pages/pages-build-deployment)\n[![langchain_anthropic](https://img.shields.io/pub/v/langchain_anthropic.svg)](https://pub.dev/packages/langchain_anthropic)\n![Discord](https://img.shields.io/discord/1123158322812555295?label=discord)\n[![MIT](https://img.shields.io/badge/license-MIT-purple.svg)](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE)\n\n[Anthropic](https://anthropic.com) module for [LangChain.dart](https://github.com/davidmigloz/langchain_dart).\n\n## Features\n\n- Chat models:\n    * `ChatAnthropic`: wrapper around [Anthropic Messages](https://docs.anthropic.com/en/api/messages) API (Claude).\n\n## License\n\nLangChain.dart is licensed under the\n[MIT License](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE).\n"
  },
  {
    "path": "packages/langchain_anthropic/example/langchain_anthropic_example.dart",
    "content": "// ignore_for_file: avoid_print, unused_element\nimport 'dart:io';\n\nimport 'package:langchain_anthropic/langchain_anthropic.dart';\nimport 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/prompts.dart';\n\n/// Check the docs for more examples:\n/// https://langchaindart.dev\nvoid main() async {\n  // Uncomment the example you want to run:\n  await _example1();\n  // await _example2();\n}\n\n/// The most basic example of LangChain is calling a model on some input\nFuture<void> _example1() async {\n  final openAiApiKey = Platform.environment['ANTHROPIC_API_KEY'];\n  final llm = ChatAnthropic(\n    apiKey: openAiApiKey,\n    defaultOptions: const ChatAnthropicOptions(temperature: 1),\n  );\n  final ChatResult res = await llm.invoke(PromptValue.string('Tell me a joke'));\n  print(res);\n}\n\n/// Instead of waiting for the full response from the model, you can stream it\n/// while it's being generated\nFuture<void> _example2() async {\n  final openAiApiKey = Platform.environment['ANTHROPIC_API_KEY'];\n  final llm = ChatAnthropic(\n    apiKey: openAiApiKey,\n    defaultOptions: const ChatAnthropicOptions(temperature: 1),\n  );\n  final Stream<ChatResult> stream = llm.stream(\n    PromptValue.string('Tell me a joke'),\n  );\n  await stream.forEach((final chunk) => stdout.write(chunk.output.content));\n}\n"
  },
  {
    "path": "packages/langchain_anthropic/lib/langchain_anthropic.dart",
    "content": "/// Anthropic module for LangChain.dart.\nlibrary;\n\nexport 'src/chat_models/chat_models.dart';\n"
  },
  {
    "path": "packages/langchain_anthropic/lib/src/chat_models/chat_anthropic.dart",
    "content": "import 'package:anthropic_sdk_dart/anthropic_sdk_dart.dart' as a;\nimport 'package:http/http.dart' as http;\nimport 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/language_models.dart';\nimport 'package:langchain_core/prompts.dart';\nimport 'mappers.dart';\nimport 'types.dart';\n\n/// Wrapper around [Anthropic Messages API](https://docs.anthropic.com/en/api/messages)\n/// (aka Claude API).\n///\n/// Example:\n/// ```dart\n/// final chatModel = ChatAnthropic(apiKey: '...');\n/// final messages = [\n///   ChatMessage.system('You are a helpful assistant that translates English to French.'),\n///   ChatMessage.humanText('I love programming.'),\n/// ];\n/// final prompt = PromptValue.chat(messages);\n/// final res = await llm.invoke(prompt);\n/// ```\n///\n/// - Docs: https://docs.anthropic.com\n///\n/// ### Authentication\n///\n/// The Anthropic API uses API keys for authentication. Visit your\n/// [API Keys](https://console.anthropic.com/settings/keys) page to retrieve\n/// the API key you'll use in your requests.\n///\n/// ### Available models\n///\n/// The following models are available:\n/// - `claude-sonnet-4-5`\n/// - `claude-haiku-4-5`\n/// - `claude-opus-4-5`\n///\n/// Mind that the list may not be up-to-date.\n/// See https://platform.claude.com/docs/en/about-claude/models for the updated list.\n///\n/// ### Call options\n///\n/// You can configure the parameters that will be used when calling the\n/// chat completions API in several ways:\n///\n/// **Default options:**\n///\n/// Use the [defaultOptions] parameter to set the default options. These\n/// options will be used unless you override them when generating completions.\n///\n/// ```dart\n/// final chatModel = ChatAnthropic(\n///   apiKey: anthropicApiKey,\n///   defaultOptions: const ChatAnthropicOptions(\n///     temperature: 0.9,\n///     maxTokens: 100,\n///   ),\n/// );\n/// ```\n///\n/// **Call options:**\n///\n/// You can override the default options when invoking the model:\n///\n/// ```dart\n/// final res = await chatModel.invoke(\n///   prompt,\n///   options: const ChatAnthropicOptions(temperature: 0.5),\n/// );\n/// ```\n///\n/// **Bind:**\n///\n/// You can also change the options in a [Runnable] pipeline using the bind\n/// method.\n///\n/// In this example, we are using two totally different models for each\n/// question:\n///\n/// ```dart\n/// final chatModel = ChatAnthropic(apiKey: anthropicApiKey);\n/// const outputParser = StringOutputParser();\n/// final prompt1 = PromptTemplate.fromTemplate('How are you {name}?');\n/// final prompt2 = PromptTemplate.fromTemplate('How old are you {name}?');\n/// final chain = Runnable.fromMap({\n///   'q1': prompt1 | chatModel.bind(const ChatAnthropicOptions(model: 'claude-sonnet-4-5')) | outputParser,\n///   'q2': prompt2 | chatModel.bind(const ChatAnthropicOptions(model: 'claude-haiku-4-5')) | outputParser,\n/// });\n/// final res = await chain.invoke({'name': 'David'});\n/// ```\n///\n/// ### Extended Thinking\n///\n/// Claude's extended thinking feature enables the model to show its internal\n/// reasoning process before providing the final answer. This is particularly\n/// useful for complex reasoning tasks.\n///\n/// ```dart\n/// final chatModel = ChatAnthropic(\n///   apiKey: anthropicApiKey,\n///   defaultOptions: ChatAnthropicOptions(\n///     model: 'claude-sonnet-4-5',\n///     maxTokens: 8192,\n///     thinking: ChatAnthropicThinking.enabled(budgetTokens: 4096),\n///   ),\n/// );\n///\n/// final prompt = PromptValue.string('Solve this complex problem: ...');\n/// final res = await chatModel.invoke(prompt);\n/// // The response will include thinking blocks showing Claude's reasoning\n/// ```\n///\n/// The thinking blocks will appear in the response content and can be\n/// accessed through the message's content blocks. The `budgetTokens` parameter\n/// controls how many tokens Claude can use for thinking (minimum 1024), and\n/// counts towards your `maxTokens` limit.\n///\n/// ### Advance\n///\n/// #### Custom HTTP client\n///\n/// You can always provide your own implementation of `http.Client` for further\n/// customization:\n///\n/// ```dart\n/// final client = ChatAnthropic(\n///   apiKey: 'ANTHROPIC_API_KEY',\n///   client: MyHttpClient(),\n/// );\n/// ```\n///\n/// #### Using a proxy\n///\n/// ##### HTTP proxy\n///\n/// You can use your own HTTP proxy by overriding the `baseUrl` and providing\n/// your required `headers`:\n///\n/// ```dart\n/// final client = ChatAnthropic(\n///   baseUrl: 'https://my-proxy.com',\n///   headers: {'x-my-proxy-header': 'value'},\n/// );\n/// ```\n///\n/// If you need further customization, you can always provide your own\n/// `http.Client`.\n///\n/// ##### SOCKS5 proxy\n///\n/// To use a SOCKS5 proxy, you can use the\n/// [`socks5_proxy`](https://pub.dev/packages/socks5_proxy) package and a\n/// custom `http.Client`.\nclass ChatAnthropic extends BaseChatModel<ChatAnthropicOptions> {\n  /// Create a new [ChatAnthropic] instance.\n  ///\n  /// Main configuration options:\n  /// - `apiKey`: your Anthropic API key. You can find your API key in the\n  ///   [Anthropic dashboard](https://console.anthropic.com/settings/keys).\n  /// - [ChatAnthropic.defaultOptions]\n  ///\n  /// Advance configuration options:\n  /// - `baseUrl`: the base URL to use. Defaults to Anthropic's API URL. You can\n  ///   override this to use a different API URL, or to use a proxy.\n  /// - `headers`: global headers to send with every request. You can use\n  ///   this to set custom headers, or to override the default headers.\n  /// - `queryParams`: global query parameters to send with every request. You\n  ///   can use this to set custom query parameters.\n  /// - `client`: the HTTP client to use. You can set your own HTTP client if\n  ///   you need further customization (e.g. to use a Socks5 proxy).\n  ChatAnthropic({\n    final String? apiKey,\n    final String baseUrl = 'https://api.anthropic.com',\n    final Map<String, String>? headers,\n    final Map<String, dynamic>? queryParams,\n    final http.Client? client,\n    super.defaultOptions = const ChatAnthropicOptions(\n      model: defaultModel,\n      maxTokens: defaultMaxTokens,\n    ),\n  }) : _client = a.AnthropicClient(\n         config: a.AnthropicConfig(\n           authProvider: apiKey != null && apiKey.isNotEmpty\n               ? a.ApiKeyProvider(apiKey)\n               : null,\n           baseUrl: baseUrl,\n           defaultHeaders: headers ?? const {},\n           defaultQueryParams:\n               queryParams?.map((k, v) => MapEntry(k, v.toString())) ??\n               const {},\n         ),\n         httpClient: client,\n       );\n\n  /// A client for interacting with Anthropic API.\n  final a.AnthropicClient _client;\n\n  @override\n  String get modelType => 'anthropic-chat';\n\n  /// The default model to use unless another is specified.\n  static const defaultModel = 'claude-sonnet-4-5';\n\n  /// The default max tokens to use unless another is specified.\n  static const defaultMaxTokens = 1024;\n\n  @override\n  Future<ChatResult> invoke(\n    final PromptValue input, {\n    final ChatAnthropicOptions? options,\n  }) async {\n    final completion = await _client.messages.create(\n      createMessageRequest(\n        input.toChatMessages(),\n        options: options,\n        defaultOptions: defaultOptions,\n      ),\n    );\n    return completion.toChatResult();\n  }\n\n  @override\n  Stream<ChatResult> stream(\n    final PromptValue input, {\n    final ChatAnthropicOptions? options,\n  }) {\n    return _client.messages\n        .createStream(\n          createMessageRequest(\n            input.toChatMessages(),\n            options: options,\n            defaultOptions: defaultOptions,\n            stream: true,\n          ),\n        )\n        .transform(MessageStreamEventTransformer());\n  }\n\n  /// Counts the number of tokens in the given prompt using the Anthropic\n  /// token counting API.\n  ///\n  /// - [promptValue] The prompt to count tokens for.\n  @override\n  Future<int> countTokens(\n    final PromptValue promptValue, {\n    final ChatAnthropicOptions? options,\n  }) async {\n    final request = createMessageRequest(\n      promptValue.toChatMessages(),\n      options: options,\n      defaultOptions: defaultOptions,\n    );\n    final response = await _client.messages.countTokens(\n      a.TokenCountRequest.fromMessageCreateRequest(request),\n    );\n    return response.inputTokens;\n  }\n\n  /// Anthropic does not expose individual token IDs.\n  ///\n  /// Use [countTokens] instead to get the token count for a prompt.\n  @override\n  Future<List<int>> tokenize(\n    final PromptValue promptValue, {\n    final ChatAnthropicOptions? options,\n  }) {\n    throw UnsupportedError(\n      'Anthropic does not expose token IDs. '\n      'Use countTokens() to get the token count for a prompt.',\n    );\n  }\n\n  /// Lists the models available to the Anthropic API.\n  ///\n  /// This returns all models available for use with the Anthropic Messages API.\n  ///\n  /// Example:\n  /// ```dart\n  /// final chatModel = ChatAnthropic(apiKey: '...');\n  /// final models = await chatModel.listModels();\n  /// for (final model in models) {\n  ///   print('${model.id} - ${model.displayName}');\n  /// }\n  /// ```\n  @override\n  Future<List<ModelInfo>> listModels() async {\n    final response = await _client.models.list();\n    return response.data.map((final m) {\n      return ModelInfo(\n        id: m.id,\n        displayName: m.displayName,\n        created: m.createdAt.millisecondsSinceEpoch ~/ 1000,\n      );\n    }).toList();\n  }\n\n  @override\n  void close() {\n    _client.close();\n  }\n}\n"
  },
  {
    "path": "packages/langchain_anthropic/lib/src/chat_models/chat_models.dart",
    "content": "export 'chat_anthropic.dart';\nexport 'types.dart';\n"
  },
  {
    "path": "packages/langchain_anthropic/lib/src/chat_models/mappers.dart",
    "content": "// ignore_for_file: public_member_api_docs\nimport 'dart:async';\nimport 'dart:convert';\n\nimport 'package:anthropic_sdk_dart/anthropic_sdk_dart.dart' as a;\nimport 'package:collection/collection.dart' show IterableExtension;\nimport 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/language_models.dart';\nimport 'package:langchain_core/tools.dart';\nimport 'package:rxdart/rxdart.dart' show WhereNotNullExtension;\n\nimport 'chat_anthropic.dart';\nimport 'types.dart';\n\n/// Creates a [MessageCreateRequest] from the given input.\na.MessageCreateRequest createMessageRequest(\n  final List<ChatMessage> messages, {\n  required final ChatAnthropicOptions? options,\n  required final ChatAnthropicOptions defaultOptions,\n  final bool stream = false,\n}) {\n  final systemMsg = messages.firstOrNull is SystemChatMessage\n      ? messages.firstOrNull?.contentAsString\n      : null;\n\n  final messagesDtos = messages.toInputMessages();\n  final toolChoice = options?.toolChoice ?? defaultOptions.toolChoice;\n  // When ChatToolChoiceNone is used, omit both tools and tool_choice\n  // to avoid sending tool_choice without a tools array.\n  final toolChoiceDto = toolChoice is ChatToolChoiceNone\n      ? null\n      : toolChoice?.toToolChoice();\n  final toolsDtos = (options?.tools ?? defaultOptions.tools)?.toToolDefinitions(\n    toolChoice,\n  );\n  final thinking = options?.thinking ?? defaultOptions.thinking;\n  final thinkingDto = thinking?.toThinkingConfig();\n  final userId = options?.userId ?? defaultOptions.userId;\n\n  return a.MessageCreateRequest(\n    model: options?.model ?? defaultOptions.model ?? ChatAnthropic.defaultModel,\n    messages: messagesDtos,\n    maxTokens:\n        options?.maxTokens ??\n        defaultOptions.maxTokens ??\n        ChatAnthropic.defaultMaxTokens,\n    stopSequences: options?.stopSequences ?? defaultOptions.stopSequences,\n    system: systemMsg != null ? a.SystemPrompt.text(systemMsg) : null,\n    temperature: options?.temperature ?? defaultOptions.temperature,\n    topK: options?.topK ?? defaultOptions.topK,\n    topP: options?.topP ?? defaultOptions.topP,\n    metadata: userId != null ? a.Metadata(userId: userId) : null,\n    tools: toolsDtos,\n    toolChoice: toolChoiceDto,\n    thinking: thinkingDto,\n    stream: stream,\n  );\n}\n\nextension ChatMessageListMapper on List<ChatMessage> {\n  List<a.InputMessage> toInputMessages() {\n    final List<a.InputMessage> result = [];\n    final List<ToolChatMessage> consecutiveToolMessages = [];\n\n    void flushToolMessages() {\n      if (consecutiveToolMessages.isNotEmpty) {\n        result.add(_mapToolChatMessages(consecutiveToolMessages));\n        consecutiveToolMessages.clear();\n      }\n    }\n\n    for (final message in this) {\n      switch (message) {\n        case SystemChatMessage():\n          flushToolMessages();\n          continue; // System message set in request params\n        case final HumanChatMessage msg:\n          flushToolMessages();\n          final res = _mapHumanChatMessage(msg);\n          result.add(res);\n        case final AIChatMessage msg:\n          flushToolMessages();\n          final res = _mapAIChatMessage(msg);\n          result.add(res);\n        case final ToolChatMessage msg:\n          consecutiveToolMessages.add(msg);\n        case CustomChatMessage():\n          throw UnsupportedError('Anthropic does not support custom messages');\n      }\n    }\n\n    flushToolMessages(); // Flush any remaining tool messages\n    return result;\n  }\n\n  a.InputMessage _mapHumanChatMessage(final HumanChatMessage msg) {\n    return switch (msg.content) {\n      final ChatMessageContentText t => a.InputMessage.user(t.text),\n      final ChatMessageContentImage i => a.InputMessage.userBlocks([\n        _mapHumanChatMessageContentImage(i),\n      ]),\n      final ChatMessageContentMultiModal mm => a.InputMessage.userBlocks(\n        mm.parts\n            .map(\n              (final part) => switch (part) {\n                final ChatMessageContentText t => a.InputContentBlock.text(\n                  t.text,\n                ),\n                final ChatMessageContentImage i =>\n                  _mapHumanChatMessageContentImage(i),\n                ChatMessageContentMultiModal() => throw ArgumentError(\n                  'Cannot have multimodal content in multimodal content',\n                ),\n              },\n            )\n            .toList(growable: false),\n      ),\n    };\n  }\n\n  a.InputContentBlock _mapHumanChatMessageContentImage(\n    ChatMessageContentImage i,\n  ) {\n    final imageData = i.data.trim();\n    if (i.mimeType == 'application/pdf') {\n      if (imageData.startsWith('http')) {\n        throw ArgumentError(\n          'Anthropic does not support PDF URLs. '\n          'Provide the PDF as base64-encoded data instead.',\n        );\n      }\n      return a.InputContentBlock.document(\n        a.DocumentSource.base64Pdf(imageData),\n      );\n    }\n    if (imageData.startsWith('http')) {\n      return a.InputContentBlock.image(a.ImageSource.url(imageData));\n    }\n    if (i.mimeType == null) {\n      throw ArgumentError('mimeType is required for base64-encoded images');\n    }\n    return a.InputContentBlock.image(\n      a.ImageSource.base64(\n        data: imageData,\n        mediaType: a.ImageMediaType.fromMimeType(i.mimeType!),\n      ),\n    );\n  }\n\n  a.InputMessage _mapAIChatMessage(final AIChatMessage msg) {\n    if (msg.toolCalls.isEmpty) {\n      return a.InputMessage.assistant(msg.content);\n    } else {\n      return a.InputMessage.assistantBlocks([\n        if (msg.content.isNotEmpty) a.InputContentBlock.text(msg.content),\n        ...msg.toolCalls.map(\n          (final toolCall) => a.InputContentBlock.toolUse(\n            id: toolCall.id,\n            name: toolCall.name,\n            input: toolCall.arguments,\n          ),\n        ),\n      ]);\n    }\n  }\n\n  a.InputMessage _mapToolChatMessages(final List<ToolChatMessage> msgs) {\n    return a.InputMessage.userBlocks(\n      msgs\n          .map(\n            (msg) => a.InputContentBlock.toolResultText(\n              toolUseId: msg.toolCallId,\n              text: msg.content,\n            ),\n          )\n          .toList(growable: false),\n    );\n  }\n}\n\nextension MessageMapper on a.Message {\n  ChatResult toChatResult() {\n    final content = '$thinking$text';\n    final toolCalls = toolUseBlocks\n        .map(\n          (tu) => AIChatMessageToolCall(\n            id: tu.id,\n            name: tu.name,\n            argumentsRaw: tu.input.isNotEmpty ? json.encode(tu.input) : '',\n            arguments: tu.input,\n          ),\n        )\n        .toList(growable: false);\n    return ChatResult(\n      id: id,\n      output: AIChatMessage(content: content, toolCalls: toolCalls),\n      finishReason: _mapFinishReason(stopReason),\n      metadata: {'model': model, 'stop_sequence': stopSequence},\n      usage: _mapUsage(usage),\n    );\n  }\n}\n\nclass MessageStreamEventTransformer\n    extends StreamTransformerBase<a.MessageStreamEvent, ChatResult> {\n  MessageStreamEventTransformer();\n\n  String? lastMessageId;\n  String? lastToolCallId;\n\n  @override\n  Stream<ChatResult> bind(final Stream<a.MessageStreamEvent> stream) {\n    return stream\n        .map(\n          (event) => switch (event) {\n            final a.MessageStartEvent e => _mapMessageStartEvent(e),\n            final a.MessageDeltaEvent e => _mapMessageDeltaEvent(e),\n            final a.ContentBlockStartEvent e => _mapContentBlockStartEvent(e),\n            final a.ContentBlockDeltaEvent e => _mapContentBlockDeltaEvent(e),\n            final a.ContentBlockStopEvent e => _mapContentBlockStopEvent(e),\n            final a.MessageStopEvent e => _mapMessageStopEvent(e),\n            a.PingEvent() => null,\n            a.ErrorEvent() => null,\n          },\n        )\n        .whereNotNull();\n  }\n\n  ChatResult _mapMessageStartEvent(final a.MessageStartEvent e) {\n    final msg = e.message;\n    lastMessageId = msg.id;\n\n    return ChatResult(\n      id: msg.id,\n      output: AIChatMessage(\n        content: '${msg.thinking}${msg.text}',\n        toolCalls: msg.toolUseBlocks\n            .map(\n              (tu) => AIChatMessageToolCall(\n                id: tu.id,\n                name: tu.name,\n                argumentsRaw: tu.input.isNotEmpty ? json.encode(tu.input) : '',\n                arguments: tu.input,\n              ),\n            )\n            .toList(growable: false),\n      ),\n      finishReason: _mapFinishReason(msg.stopReason),\n      metadata: {\n        'model': msg.model,\n        if (msg.stopSequence != null) 'stop_sequence': msg.stopSequence,\n      },\n      usage: _mapUsage(msg.usage),\n      streaming: true,\n    );\n  }\n\n  ChatResult _mapMessageDeltaEvent(final a.MessageDeltaEvent e) {\n    return ChatResult(\n      id: lastMessageId ?? '',\n      output: const AIChatMessage(content: ''),\n      finishReason: _mapFinishReason(e.delta.stopReason),\n      metadata: {\n        if (e.delta.stopSequence != null) 'stop_sequence': e.delta.stopSequence,\n      },\n      usage: _mapMessageDeltaUsage(e.usage),\n      streaming: true,\n    );\n  }\n\n  ChatResult _mapContentBlockStartEvent(final a.ContentBlockStartEvent e) {\n    final (content, toolCall) = _mapContentBlock(e.contentBlock);\n    if (toolCall != null) {\n      lastToolCallId = toolCall.id;\n    }\n\n    return ChatResult(\n      id: lastMessageId ?? '',\n      output: AIChatMessage(\n        content: content,\n        toolCalls: [if (toolCall != null) toolCall],\n      ),\n      finishReason: FinishReason.unspecified,\n      metadata: const {},\n      usage: const LanguageModelUsage(),\n      streaming: true,\n    );\n  }\n\n  ChatResult _mapContentBlockDeltaEvent(final a.ContentBlockDeltaEvent e) {\n    final (content, toolCalls) = _mapContentBlockDelta(lastToolCallId, e.delta);\n    return ChatResult(\n      id: lastMessageId ?? '',\n      output: AIChatMessage(content: content, toolCalls: toolCalls),\n      finishReason: FinishReason.unspecified,\n      metadata: {'index': e.index},\n      usage: const LanguageModelUsage(),\n      streaming: true,\n    );\n  }\n\n  ChatResult? _mapContentBlockStopEvent(final a.ContentBlockStopEvent e) {\n    lastToolCallId = null;\n    return null;\n  }\n\n  ChatResult? _mapMessageStopEvent(final a.MessageStopEvent e) {\n    lastMessageId = null;\n    return null;\n  }\n}\n\n/// Maps a single content block from stream start event.\n(String content, AIChatMessageToolCall? toolCall) _mapContentBlock(\n  final a.ContentBlock contentBlock,\n) => switch (contentBlock) {\n  final a.TextBlock t => (t.text, null),\n  final a.ThinkingBlock t => (t.thinking, null),\n  final a.ToolUseBlock tu => (\n    '',\n    AIChatMessageToolCall(\n      id: tu.id,\n      name: tu.name,\n      argumentsRaw: tu.input.isNotEmpty ? json.encode(tu.input) : '',\n      arguments: tu.input,\n    ),\n  ),\n  a.RedactedThinkingBlock() => ('', null),\n  a.ServerToolUseBlock() => ('', null),\n  a.WebSearchToolResultBlock() => ('', null),\n  _ => ('', null),\n};\n\n/// Maps a content block delta from streaming events.\n(String content, List<AIChatMessageToolCall> toolCalls) _mapContentBlockDelta(\n  final String? lastToolId,\n  final a.ContentBlockDelta blockDelta,\n) => switch (blockDelta) {\n  final a.TextDelta t => (t.text, const <AIChatMessageToolCall>[]),\n  final a.InputJsonDelta jb => (\n    '',\n    [\n      AIChatMessageToolCall(\n        id: lastToolId ?? '',\n        name: '',\n        argumentsRaw: jb.partialJson,\n        arguments: const {},\n      ),\n    ],\n  ),\n  final a.ThinkingDelta t => (t.thinking, const <AIChatMessageToolCall>[]),\n  a.SignatureDelta() => ('', const <AIChatMessageToolCall>[]),\n  a.CitationsDelta() => ('', const <AIChatMessageToolCall>[]),\n  _ => ('', const <AIChatMessageToolCall>[]),\n};\n\nextension ToolSpecListMapper on List<ToolSpec> {\n  /// Converts tool specs to typed ToolDefinition list for the request.\n  List<a.ToolDefinition>? toToolDefinitions(final ChatToolChoice? toolChoice) {\n    if (toolChoice is ChatToolChoiceNone) {\n      return null;\n    }\n\n    if (toolChoice is ChatToolChoiceForced) {\n      final tool = firstWhereOrNull((final t) => t.name == toolChoice.name);\n      if (tool == null) {\n        throw ArgumentError(\n          'Forced tool \"${toolChoice.name}\" not found in the provided tools list',\n        );\n      }\n      return [_mapTool(tool)];\n    }\n\n    return map(_mapTool).toList(growable: false);\n  }\n\n  a.ToolDefinition _mapTool(final ToolSpec tool) {\n    return a.ToolDefinition.custom(\n      a.Tool(\n        name: tool.name,\n        description: tool.description,\n        inputSchema: a.InputSchema.fromJson(tool.inputJsonSchema),\n      ),\n    );\n  }\n}\n\nextension ChatToolChoiceMapper on ChatToolChoice {\n  /// Converts tool choice to typed ToolChoice for the request.\n  a.ToolChoice toToolChoice() {\n    return switch (this) {\n      ChatToolChoiceNone() => throw StateError(\n        'ChatToolChoiceNone should be handled before calling toToolChoice()',\n      ),\n      ChatToolChoiceAuto _ => a.ToolChoice.auto(),\n      ChatToolChoiceRequired _ => a.ToolChoice.any(),\n      final ChatToolChoiceForced t => a.ToolChoice.tool(t.name),\n    };\n  }\n}\n\nFinishReason _mapFinishReason(final a.StopReason? reason) => switch (reason) {\n  a.StopReason.endTurn => FinishReason.stop,\n  a.StopReason.maxTokens => FinishReason.length,\n  a.StopReason.stopSequence => FinishReason.stop,\n  a.StopReason.toolUse => FinishReason.toolCalls,\n  a.StopReason.pauseTurn => FinishReason.unspecified,\n  a.StopReason.refusal => FinishReason.contentFilter,\n  a.StopReason.compaction => FinishReason.unspecified,\n  _ => FinishReason.unspecified,\n};\n\nLanguageModelUsage _mapUsage(final a.Usage usage) {\n  return LanguageModelUsage(\n    promptTokens: usage.inputTokens,\n    responseTokens: usage.outputTokens,\n    totalTokens: usage.inputTokens + usage.outputTokens,\n  );\n}\n\nLanguageModelUsage _mapMessageDeltaUsage(final a.MessageDeltaUsage usage) {\n  return LanguageModelUsage(\n    responseTokens: usage.outputTokens,\n    totalTokens: usage.outputTokens,\n  );\n}\n"
  },
  {
    "path": "packages/langchain_anthropic/lib/src/chat_models/types.dart",
    "content": "import 'package:anthropic_sdk_dart/anthropic_sdk_dart.dart' as a;\nimport 'package:collection/collection.dart';\nimport 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/tools.dart';\nimport 'package:meta/meta.dart';\n\n/// {@template chat_anthropic_thinking}\n/// Configuration for enabling Claude's extended thinking feature.\n///\n/// When enabled, responses include thinking content blocks showing Claude's\n/// internal reasoning process before the final answer.\n///\n/// Example:\n/// ```dart\n/// // Enable thinking with a 4096 token budget\n/// final thinking = ChatAnthropicThinking.enabled(budgetTokens: 4096);\n///\n/// // Disable thinking\n/// final thinking = ChatAnthropicThinking.disabled();\n/// ```\n/// {@endtemplate}\n@immutable\nsealed class ChatAnthropicThinking {\n  const ChatAnthropicThinking();\n\n  /// Enable extended thinking with the specified token budget.\n  ///\n  /// - [budgetTokens]: Maximum tokens for internal reasoning (minimum 1024).\n  ///   Must be less than the model's max_tokens. Larger budgets enable\n  ///   more comprehensive reasoning.\n  const factory ChatAnthropicThinking.enabled({required int budgetTokens}) =\n      ChatAnthropicThinkingEnabled;\n\n  /// Disable extended thinking.\n  const factory ChatAnthropicThinking.disabled() =\n      ChatAnthropicThinkingDisabled;\n\n  /// Converts this thinking configuration to the API format.\n  a.ThinkingConfig toThinkingConfig();\n}\n\n/// Enabled thinking configuration.\n@immutable\nclass ChatAnthropicThinkingEnabled extends ChatAnthropicThinking {\n  /// {@macro chat_anthropic_thinking}\n  const ChatAnthropicThinkingEnabled({required this.budgetTokens});\n\n  /// Maximum tokens for internal reasoning (minimum 1024).\n  final int budgetTokens;\n\n  @override\n  a.ThinkingConfig toThinkingConfig() {\n    return a.ThinkingConfig.enabled(budgetTokens: budgetTokens);\n  }\n\n  @override\n  bool operator ==(Object other) =>\n      identical(this, other) ||\n      other is ChatAnthropicThinkingEnabled &&\n          runtimeType == other.runtimeType &&\n          budgetTokens == other.budgetTokens;\n\n  @override\n  int get hashCode => budgetTokens.hashCode;\n}\n\n/// Disabled thinking configuration.\n@immutable\nclass ChatAnthropicThinkingDisabled extends ChatAnthropicThinking {\n  /// {@macro chat_anthropic_thinking}\n  const ChatAnthropicThinkingDisabled();\n\n  @override\n  a.ThinkingConfig toThinkingConfig() {\n    return a.ThinkingConfig.disabled();\n  }\n\n  @override\n  bool operator ==(Object other) =>\n      identical(this, other) ||\n      other is ChatAnthropicThinkingDisabled &&\n          runtimeType == other.runtimeType;\n\n  @override\n  int get hashCode => 0;\n}\n\n/// {@template chat_anthropic_options}\n/// Options to pass into the Anthropic Chat Model.\n///\n/// Available models:\n/// - `claude-sonnet-4-5-20250514`\n/// - `claude-sonnet-4-5-latest`\n/// - `claude-haiku-3-5-latest`\n/// - `claude-haiku-3-5-20250414`\n/// - `claude-3-5-sonnet-latest`\n/// - `claude-3-5-sonnet-20241022`\n/// - `claude-3-5-sonnet-20240620`\n/// - `claude-3-5-haiku-latest`\n/// - `claude-3-5-haiku-20241022`\n/// - `claude-3-opus-latest`\n/// - `claude-3-opus-20240229`\n/// - `claude-3-sonnet-20240229`\n/// - `claude-3-haiku-20240307`\n///\n/// Mind that the list may be outdated.\n/// See https://docs.anthropic.com/en/docs/about-claude/models for the latest list.\n/// {@endtemplate}\n@immutable\nclass ChatAnthropicOptions extends ChatModelOptions {\n  /// {@macro chat_anthropic_options}\n  const ChatAnthropicOptions({\n    super.model,\n    this.maxTokens,\n    this.stopSequences,\n    this.temperature,\n    this.topK,\n    this.topP,\n    this.userId,\n    this.thinking,\n    super.tools,\n    super.toolChoice,\n    super.concurrencyLimit,\n  });\n\n  /// The maximum number of tokens to generate before stopping.\n  ///\n  /// Note that our models may stop _before_ reaching this maximum. This parameter\n  /// only specifies the absolute maximum number of tokens to generate.\n  ///\n  /// Different models have different maximum values for this parameter. See\n  /// [models](https://docs.anthropic.com/en/docs/models-overview) for details.\n  final int? maxTokens;\n\n  /// Custom text sequences that will cause the model to stop generating.\n  ///\n  /// Anthropic models will normally stop when they have naturally completed\n  /// their turn. If you want the model to stop generating when it encounters\n  /// custom strings of text, you can use the `stopSequences` parameter.\n  final List<String>? stopSequences;\n\n  /// Amount of randomness injected into the response.\n  ///\n  /// Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0`\n  /// for analytical / multiple choice, and closer to `1.0` for creative and\n  /// generative tasks.\n  ///\n  /// Note that even with `temperature` of `0.0`, the results will not be fully\n  /// deterministic.\n  final double? temperature;\n\n  /// Only sample from the top K options for each subsequent token.\n  ///\n  /// Used to remove \"long tail\" low probability responses.\n  /// [Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277).\n  ///\n  /// Recommended for advanced use cases only. You usually only need to use\n  /// `temperature`.\n  final int? topK;\n\n  /// Use nucleus sampling.\n  ///\n  /// In nucleus sampling, we compute the cumulative distribution over all the options\n  /// for each subsequent token in decreasing probability order and cut it off once it\n  /// reaches a particular probability specified by `top_p`. You should either alter\n  /// `temperature` or `top_p`, but not both.\n  ///\n  /// Recommended for advanced use cases only. You usually only need to use\n  /// `temperature`.\n  final double? topP;\n\n  /// An external identifier for the user who is associated with the request.\n  ///\n  /// This should be a uuid, hash value, or other opaque identifier. Anthropic may use\n  /// this id to help detect abuse. Do not include any identifying information such as\n  /// name, email address, or phone number.\n  final String? userId;\n\n  /// Configuration for enabling Claude's extended thinking.\n  ///\n  /// When enabled, responses include thinking content blocks showing Claude's\n  /// internal reasoning process before the final answer. Requires a minimum\n  /// budget of 1,024 tokens and counts towards your `max_tokens` limit.\n  ///\n  /// Example:\n  /// ```dart\n  /// ChatAnthropicOptions(\n  ///   thinking: ChatAnthropicThinking.enabled(budgetTokens: 4096),\n  /// )\n  /// ```\n  final ChatAnthropicThinking? thinking;\n\n  @override\n  ChatAnthropicOptions copyWith({\n    final String? model,\n    final int? maxTokens,\n    final List<String>? stopSequences,\n    final double? temperature,\n    final int? topK,\n    final double? topP,\n    final String? userId,\n    final ChatAnthropicThinking? thinking,\n    final List<ToolSpec>? tools,\n    final ChatToolChoice? toolChoice,\n    final int? concurrencyLimit,\n  }) {\n    return ChatAnthropicOptions(\n      model: model ?? this.model,\n      maxTokens: maxTokens ?? this.maxTokens,\n      stopSequences: stopSequences ?? this.stopSequences,\n      temperature: temperature ?? this.temperature,\n      topK: topK ?? this.topK,\n      topP: topP ?? this.topP,\n      userId: userId ?? this.userId,\n      thinking: thinking ?? this.thinking,\n      tools: tools ?? this.tools,\n      toolChoice: toolChoice ?? this.toolChoice,\n      concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit,\n    );\n  }\n\n  @override\n  ChatAnthropicOptions merge(covariant final ChatAnthropicOptions? other) {\n    return copyWith(\n      model: other?.model,\n      maxTokens: other?.maxTokens,\n      stopSequences: other?.stopSequences,\n      temperature: other?.temperature,\n      topK: other?.topK,\n      topP: other?.topP,\n      userId: other?.userId,\n      thinking: other?.thinking,\n      tools: other?.tools,\n      toolChoice: other?.toolChoice,\n      concurrencyLimit: other?.concurrencyLimit,\n    );\n  }\n\n  @override\n  bool operator ==(covariant final ChatAnthropicOptions other) {\n    return model == other.model &&\n        maxTokens == other.maxTokens &&\n        const ListEquality<String>().equals(\n          stopSequences,\n          other.stopSequences,\n        ) &&\n        temperature == other.temperature &&\n        topK == other.topK &&\n        topP == other.topP &&\n        userId == other.userId &&\n        thinking == other.thinking &&\n        const ListEquality<ToolSpec>().equals(tools, other.tools) &&\n        toolChoice == other.toolChoice &&\n        concurrencyLimit == other.concurrencyLimit;\n  }\n\n  @override\n  int get hashCode {\n    return model.hashCode ^\n        maxTokens.hashCode ^\n        const ListEquality<String>().hash(stopSequences) ^\n        temperature.hashCode ^\n        topK.hashCode ^\n        topP.hashCode ^\n        userId.hashCode ^\n        thinking.hashCode ^\n        const ListEquality<ToolSpec>().hash(tools) ^\n        toolChoice.hashCode ^\n        concurrencyLimit.hashCode;\n  }\n}\n"
  },
  {
    "path": "packages/langchain_anthropic/pubspec.yaml",
    "content": "name: langchain_anthropic\ndescription: Anthropic module for LangChain.dart (Claude 4.5 Sonnet, Opus, Haiku, Instant, etc.).\nversion: 0.3.1\nrepository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_anthropic\nissue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_anthropic\nhomepage: https://github.com/davidmigloz/langchain_dart\ndocumentation: https://langchaindart.dev\n\nfunding:\n  - https://github.com/sponsors/davidmigloz\n\ntopics:\n  - nlp\n  - gen-ai\n  - llms\n  - langchain\n\nenvironment:\n  sdk: \">=3.9.0 <4.0.0\"\nresolution: workspace\n\ndependencies:\n  anthropic_sdk_dart: ^1.3.0\n  collection: ^1.19.1\n  http: ^1.5.0\n  langchain_core: 0.4.1\n  meta: ^1.16.0\n  rxdart: \">=0.27.7 <0.29.0\"\n\ndev_dependencies:\n  test: ^1.26.2\n"
  },
  {
    "path": "packages/langchain_anthropic/test/chat_models/chat_anthropic_test.dart",
    "content": "// ignore_for_file: avoid_redundant_argument_values, avoid_print\n@TestOn('vm')\nlibrary; // Uses dart:io\n\nimport 'dart:convert';\nimport 'dart:io';\n\nimport 'package:langchain_anthropic/langchain_anthropic.dart';\nimport 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/language_models.dart';\nimport 'package:langchain_core/output_parsers.dart';\nimport 'package:langchain_core/prompts.dart';\nimport 'package:langchain_core/tools.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('ChatAnthropic tests', () {\n    const defaultModel = 'claude-sonnet-4-5';\n\n    late ChatAnthropic chatModel;\n\n    setUp(() {\n      chatModel = ChatAnthropic(\n        apiKey: Platform.environment['ANTHROPIC_API_KEY'],\n        defaultOptions: const ChatAnthropicOptions(model: defaultModel),\n      );\n    });\n\n    tearDown(() {\n      chatModel.close();\n    });\n\n    test('Test Text-only input with different models', () async {\n      final models = [\n        'claude-sonnet-4-5',\n        'claude-haiku-4-5',\n        'claude-opus-4-5',\n      ];\n      for (final model in models) {\n        print('Testing model: $model');\n        final res = await chatModel.invoke(\n          PromptValue.string(\n            'List the numbers from 1 to 9 in order '\n            'without any spaces, commas or additional explanations.',\n          ),\n          options: ChatAnthropicOptions(model: model, temperature: 0),\n        );\n        expect(res.id, isNotEmpty);\n        expect(res.finishReason, isNot(FinishReason.unspecified));\n        expect(res.metadata['model'], contains(model.toLowerCase()));\n        expect(\n          res.output.content.replaceAll(RegExp(r'[\\s\\n]'), ''),\n          contains('123456789'),\n        );\n        await Future<void>.delayed(const Duration(seconds: 5));\n      }\n    });\n\n    test('Text-and-image input', () async {\n      final res = await chatModel.invoke(\n        PromptValue.chat([\n          ChatMessage.human(\n            ChatMessageContent.multiModal([\n              ChatMessageContent.text('What fruit is this?'),\n              ChatMessageContent.image(\n                mimeType: 'image/jpeg',\n                data: base64.encode(\n                  await File(\n                    './test/chat_models/assets/apple.jpeg',\n                  ).readAsBytes(),\n                ),\n              ),\n            ]),\n          ),\n        ]),\n      );\n\n      expect(res.output.content.toLowerCase(), contains('apple'));\n    });\n\n    test('Test stop sequence', () async {\n      final res = await chatModel.invoke(\n        PromptValue.string(\n          'List the numbers from 1 to 9 in order '\n          'without any spaces, commas or additional explanations.',\n        ),\n        options: const ChatAnthropicOptions(\n          model: defaultModel,\n          stopSequences: ['4'],\n        ),\n      );\n      final text = res.output.content;\n      expect(text, contains('123'));\n      expect(text, isNot(contains('456789')));\n      expect(res.finishReason, FinishReason.stop);\n    });\n\n    test('Test max tokens', () async {\n      final res = await chatModel.invoke(\n        PromptValue.string('Tell me a joke'),\n        options: const ChatAnthropicOptions(model: defaultModel, maxTokens: 10),\n      );\n      expect(res.output.content.length, lessThan(50));\n      expect(res.finishReason, FinishReason.length);\n    });\n\n    test('Test Multi-turn conversations', () async {\n      final prompt = PromptValue.chat([\n        ChatMessage.humanText(\n          'List the numbers from 1 to 9 in order '\n          'without any spaces, commas or additional explanations.',\n        ),\n        ChatMessage.ai('123456789'),\n        ChatMessage.humanText('Remove the number 4 from the list'),\n      ]);\n      final res = await chatModel.invoke(\n        prompt,\n        options: const ChatAnthropicOptions(\n          model: defaultModel,\n          temperature: 0,\n        ),\n      );\n      expect(res.output.content, contains('12356789'));\n    });\n\n    test('Test streaming', () async {\n      final stream = chatModel.stream(\n        PromptValue.string(\n          'List the numbers from 1 to 100 in order '\n          'without any spaces, commas or additional explanations.',\n        ),\n      );\n\n      var content = '';\n      var count = 0;\n      await for (final res in stream) {\n        content += res.output.content;\n        count++;\n      }\n      expect(count, greaterThan(1));\n      expect(content, contains('123456789'));\n    });\n\n    test(\n      'Test tool calling',\n      timeout: const Timeout(Duration(minutes: 1)),\n      () async {\n        const tool = ToolSpec(\n          name: 'get_current_weather',\n          description: 'Get the current weather in a given location',\n          inputJsonSchema: {\n            'type': 'object',\n            'properties': {\n              'location': {\n                'type': 'string',\n                'description': 'The city and state, e.g. San Francisco, CA',\n              },\n              'unit': {\n                'type': 'string',\n                'description': 'The unit of temperature to return',\n                'enum': ['celsius', 'fahrenheit'],\n              },\n            },\n            'required': ['location'],\n          },\n        );\n        final model = chatModel.bind(\n          const ChatAnthropicOptions(model: defaultModel, tools: [tool]),\n        );\n\n        final humanMessage = ChatMessage.humanText(\n          \"What's the weather like in Boston and Madrid right now in celsius?\",\n        );\n        final res1 = await model.invoke(PromptValue.chat([humanMessage]));\n\n        final aiMessage1 = res1.output;\n        expect(aiMessage1.toolCalls, hasLength(2));\n\n        final toolCall1 = aiMessage1.toolCalls.first;\n        expect(toolCall1.name, tool.name);\n        expect(toolCall1.arguments.containsKey('location'), isTrue);\n        expect(toolCall1.arguments['location'], contains('Boston'));\n        expect(toolCall1.arguments['unit'], 'celsius');\n\n        final toolCall2 = aiMessage1.toolCalls.last;\n        expect(toolCall2.name, tool.name);\n        expect(toolCall2.arguments.containsKey('location'), isTrue);\n        expect(toolCall2.arguments['location'], contains('Madrid'));\n        expect(toolCall2.arguments['unit'], 'celsius');\n\n        final functionResult1 = {\n          'temperature': '22',\n          'unit': 'celsius',\n          'description': 'Sunny',\n        };\n        final functionMessage1 = ChatMessage.tool(\n          toolCallId: toolCall1.id,\n          content: json.encode(functionResult1),\n        );\n\n        final functionResult2 = {\n          'temperature': '25',\n          'unit': 'celsius',\n          'description': 'Cloudy',\n        };\n        final functionMessage2 = ChatMessage.tool(\n          toolCallId: toolCall2.id,\n          content: json.encode(functionResult2),\n        );\n\n        final res2 = await model.invoke(\n          PromptValue.chat([\n            humanMessage,\n            aiMessage1,\n            functionMessage1,\n            functionMessage2,\n          ]),\n        );\n\n        final aiMessage2 = res2.output;\n\n        expect(aiMessage2.toolCalls, isEmpty);\n        expect(aiMessage2.content, contains('22'));\n        expect(aiMessage2.content, contains('25'));\n      },\n    );\n\n    test(\n      'Test streaming with tools',\n      timeout: const Timeout(Duration(minutes: 5)),\n      () async {\n        const tool = ToolSpec(\n          name: 'joke',\n          description: 'A joke',\n          inputJsonSchema: {\n            'type': 'object',\n            'properties': {\n              'setup': {\n                'type': 'string',\n                'description': 'The setup for the joke',\n              },\n              'punchline': {\n                'type': 'string',\n                'description': 'The punchline to the joke',\n              },\n            },\n            'required': ['location', 'punchline'],\n          },\n        );\n\n        final promptTemplate = ChatPromptTemplate.fromTemplate(\n          'tell me a long joke about {foo}',\n        );\n        final chat = chatModel.bind(\n          ChatAnthropicOptions(\n            model: defaultModel,\n            tools: const [tool],\n            toolChoice: ChatToolChoice.forced(name: 'joke'),\n          ),\n        );\n        final jsonOutputParser = ToolsOutputParser();\n\n        final chain = promptTemplate.pipe(chat).pipe(jsonOutputParser);\n\n        final stream = chain.stream({'foo': 'bears'});\n\n        List<ParsedToolCall> lastResult = [];\n        var count = 0;\n        await for (final res in stream) {\n          lastResult = res;\n          count++;\n        }\n\n        expect(count, greaterThan(1));\n        expect(lastResult, hasLength(1));\n        final toolCall = lastResult.first;\n        expect(toolCall.arguments['setup'], isNotEmpty);\n        expect(toolCall.arguments['punchline'], isNotEmpty);\n      },\n    );\n  });\n}\n"
  },
  {
    "path": "packages/langchain_chroma/.gitignore",
    "content": "# https://dart.dev/guides/libraries/private-files\n# Created by `dart pub`\n.dart_tool/\n\n# Avoid committing pubspec.lock for library packages; see\n# https://dart.dev/guides/libraries/private-files#pubspeclock.\npubspec.lock\n"
  },
  {
    "path": "packages/langchain_chroma/CHANGELOG.md",
    "content": "## 0.3.0+2\n\n - Update a dependency to the latest release.\n\n## 0.3.0+1\n\n - **REFACTOR**: Fix pub format warnings ([#809](https://github.com/davidmigloz/langchain_dart/issues/809)). ([640cdefb](https://github.com/davidmigloz/langchain_dart/commit/640cdefbede9c0a0182fb6bb4005a20aa6f35635))\n\n## 0.3.0\n\n> Note: This release has breaking changes.\n\n - **FEAT**: Upgrade to http v1.5.0 ([#785](https://github.com/davidmigloz/langchain_dart/issues/785)). ([f7c87790](https://github.com/davidmigloz/langchain_dart/commit/f7c8779011015b5a4a7f3a07dca32bde1bb2ea88))\n - **BREAKING** **BUILD**: Require Dart >=3.8.0 ([#792](https://github.com/davidmigloz/langchain_dart/issues/792)). ([b887f5c6](https://github.com/davidmigloz/langchain_dart/commit/b887f5c62e307b3a510c5049e3d1fbe7b7b4f4c9))\n\n## 0.2.2+3\n\n - Update a dependency to the latest release.\n\n## 0.2.2+2\n\n - Update a dependency to the latest release.\n\n## 0.2.2+1\n\n - **BUILD**: Update dependencies ([#751](https://github.com/davidmigloz/langchain_dart/issues/751)). ([250a3c6](https://github.com/davidmigloz/langchain_dart/commit/250a3c6a6c1815703a61a142ba839c0392a31015))\n\n## 0.2.2\n\n - **FEAT**: Update dependencies (requires Dart 3.6.0) ([#709](https://github.com/davidmigloz/langchain_dart/issues/709)). ([9e3467f7](https://github.com/davidmigloz/langchain_dart/commit/9e3467f7caabe051a43c0eb3c1110bc4a9b77b81))\n - **REFACTOR**: Remove fetch_client dependency in favor of http v1.3.0 ([#659](https://github.com/davidmigloz/langchain_dart/issues/659)). ([0e0a685c](https://github.com/davidmigloz/langchain_dart/commit/0e0a685c376895425dbddb0f9b83758c700bb0c7))\n - **FIX**: Fix linter issues ([#656](https://github.com/davidmigloz/langchain_dart/issues/656)). ([88a79c65](https://github.com/davidmigloz/langchain_dart/commit/88a79c65aad23bcf5859e58a7375a4b686cf02ef))\n\n## 0.2.1+5\n\n - Update a dependency to the latest release.\n\n## 0.2.1+4\n\n - **FIX**: UUID 'Namespace' can't be assigned to the parameter type 'String?' ([#566](https://github.com/davidmigloz/langchain_dart/issues/566)). ([1e93a595](https://github.com/davidmigloz/langchain_dart/commit/1e93a595f2f166da2cae3f7cfcdbb28892abf9b5))\n\n## 0.2.1+3\n\n - Update a dependency to the latest release.\n\n## 0.2.1+2\n\n - Update a dependency to the latest release.\n\n## 0.2.1+1\n\n - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d))\n\n## 0.2.1\n\n - Update a dependency to the latest release.\n\n## 0.2.0+5\n\n - Update a dependency to the latest release.\n\n## 0.2.0+4\n\n - Update a dependency to the latest release.\n \n## 0.2.0+3\n\n - Update a dependency to the latest release.\n\n## 0.2.0+2\n\n - Update a dependency to the latest release.\n\n## 0.2.0+1\n\n## 0.2.0\n\n> Note: This release has breaking changes.  \n> [Migration guide](https://github.com/davidmigloz/langchain_dart/discussions/374)\n\n - **BREAKING** **REFACTOR**: Introduce langchain_core and langchain_community packages ([#328](https://github.com/davidmigloz/langchain_dart/issues/328)). ([5fa520e6](https://github.com/davidmigloz/langchain_dart/commit/5fa520e663602d9cdfcab0c62a053090fa02b02e))\n\n## 0.1.1\n\n - **FEAT**: Update meta and test dependencies ([#331](https://github.com/davidmigloz/langchain_dart/issues/331)). ([912370ee](https://github.com/davidmigloz/langchain_dart/commit/912370ee0ba667ee9153303395a457e6caf5c72d))\n - **DOCS**: Update pubspecs. ([d23ed89a](https://github.com/davidmigloz/langchain_dart/commit/d23ed89adf95a34a78024e2f621dc0af07292f44))\n\n## 0.1.0+15\n\n - **DOCS**: Update CHANGELOG.md. ([d0d46534](https://github.com/davidmigloz/langchain_dart/commit/d0d46534565d6f52d819d62329e8917e00bc7030))\n\n## 0.1.0+14\n\n - Update a dependency to the latest release.\n\n## 0.1.0+13\n\n - Update a dependency to the latest release.\n\n## 0.1.0+12\n\n - Update a dependency to the latest release.\n\n## 0.1.0+11\n\n - Update a dependency to the latest release.\n\n## 0.1.0+10\n\n - Update a dependency to the latest release.\n\n## 0.1.0+9\n\n - **DOCS**: Update README.md and docs ([#272](https://github.com/davidmigloz/langchain_dart/issues/272)). ([306a1fdd](https://github.com/davidmigloz/langchain_dart/commit/306a1fdd6504ef28dc2066953ae575e975ab9025))\n\n## 0.1.0+8\n\n - Update a dependency to the latest release.\n\n## 0.1.0+7\n\n - **DOCS**: Fix typo in Chroma docs. ([dd97db40](https://github.com/davidmigloz/langchain_dart/commit/dd97db400892fc42cd4d395ace93a40b313247c1))\n\n## 0.1.0+6\n\n - Update a dependency to the latest release.\n\n## 0.1.0+5\n\n - **FIX**: Decode JSON responses as UTF-8 ([#234](https://github.com/davidmigloz/langchain_dart/issues/234)) ([#235](https://github.com/davidmigloz/langchain_dart/issues/235)). ([29347763](https://github.com/davidmigloz/langchain_dart/commit/29347763fe04cb7c9199e33c643dbc585de0a7b8))\n\n## 0.1.0+4\n\n - Update a dependency to the latest release.\n\n## 0.1.0+3\n\n - Update a dependency to the latest release.\n\n## 0.1.0+2\n\n - Update a dependency to the latest release.\n\n## 0.1.0+1\n\n - Update a dependency to the latest release.\n\n## 0.1.0\n\n> Note: This release has breaking changes.\n\n - **BREAKING** **FEAT**: Chroma databases, tenants and global headers support ([#211](https://github.com/davidmigloz/langchain_dart/issues/211)). ([5a1d8397](https://github.com/davidmigloz/langchain_dart/commit/5a1d83971c78849f7185a674ffff527e0348511d))\n\n## 0.0.5+3\n\n - **DOCS**: Update vector stores documentation. ([dad60d24](https://github.com/davidmigloz/langchain_dart/commit/dad60d247fac157f2980f73c14ac88e9a0894fba))\n\n## 0.0.5+2\n\n - Update a dependency to the latest release.\n\n## 0.0.5+1\n\n - **DOCS**: Update CHANGELOG.md. ([5ea4e532](https://github.com/davidmigloz/langchain_dart/commit/5ea4e5326e706a52d157284a281eb881e05117c5))\n\n## 0.0.5\n\n> Note: This release has breaking changes.\n\n - **DOCS**: Update changelog. ([d45d624a](https://github.com/davidmigloz/langchain_dart/commit/d45d624a0ba12e53c4e78a29750cad30d66c61c5))\n - **BREAKING** **FIX**: Change loaders lastModified metadata field to integer ([#172](https://github.com/davidmigloz/langchain_dart/issues/172)). ([72c724f8](https://github.com/davidmigloz/langchain_dart/commit/72c724f8a716e27b4a807b70bcbbafdd9feb0a18))\n - **BREAKING** **FEAT**: Update uuid internal dependency to 4.x.x ([#173](https://github.com/davidmigloz/langchain_dart/issues/173)). ([b01f4afe](https://github.com/davidmigloz/langchain_dart/commit/b01f4afea6cfcdf8a0aa6e1b11d3057efa6e5fc0))\n\n## 0.0.4\n\n - Updated `langchain` dependency\n\n## 0.0.3\n\n - **DOCS**: Fix invalid package topics. ([f81b833a](https://github.com/davidmigloz/langchain_dart/commit/f81b833aae33e0a945ef4450da12344886224bae))\n - **DOCS**: Add topics to pubspecs. ([8c1d6297](https://github.com/davidmigloz/langchain_dart/commit/8c1d62970710cc326fd5930101918aaf16b18f74))\n\n## 0.0.2\n\n - **DOCS**: Update readme. ([e1b5b295](https://github.com/davidmigloz/langchain_dart/commit/e1b5b2958bdf2b787c8b49aeeb6690c33c225943))\n - **DOCS**: Update packages example. ([4f8488fc](https://github.com/davidmigloz/langchain_dart/commit/4f8488fcb324e31b9d8dece7d1999333d7982253))\n\n## 0.0.1\n\n - **FEAT**: Add support for Chroma VectorStore ([#139](https://github.com/davidmigloz/langchain_dart/issues/139)). ([098783b4](https://github.com/davidmigloz/langchain_dart/commit/098783b4895ab30bb61d07355a0b587ff76b9175))\n\n## 0.0.1-dev.1\n\n- Bootstrap project.\n"
  },
  {
    "path": "packages/langchain_chroma/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2023 David Miguel Lozano\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "packages/langchain_chroma/README.md",
    "content": "# 🦜️🔗 LangChain.dart / Chroma\n\n[![tests](https://img.shields.io/github/actions/workflow/status/davidmigloz/langchain_dart/test.yaml?logo=github&label=tests)](https://github.com/davidmigloz/langchain_dart/actions/workflows/test.yaml)\n[![docs](https://img.shields.io/github/actions/workflow/status/davidmigloz/langchain_dart/pages%2Fpages-build-deployment?logo=github&label=docs)](https://github.com/davidmigloz/langchain_dart/actions/workflows/pages/pages-build-deployment)\n[![langchain_chroma](https://img.shields.io/pub/v/langchain_chroma.svg)](https://pub.dev/packages/langchain_chroma)\n![Discord](https://img.shields.io/discord/1123158322812555295?label=discord)\n[![MIT](https://img.shields.io/badge/license-MIT-purple.svg)](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE)\n\nChroma module for [LangChain.dart](https://github.com/davidmigloz/langchain_dart).\n\n## Features\n\n- Vector stores:\n    * `Chroma` vector store that uses [Chroma](https://www.trychroma.com) \n    open-source embedding database.\n\n## License\n\nLangChain.dart is licensed under the\n[MIT License](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE).\n"
  },
  {
    "path": "packages/langchain_chroma/example/langchain_chroma_example.dart",
    "content": "// ignore_for_file: avoid_print\nimport 'dart:io';\n\nimport 'package:langchain/langchain.dart';\nimport 'package:langchain_chroma/langchain_chroma.dart';\nimport 'package:langchain_openai/langchain_openai.dart';\n\nvoid main() async {\n  final openaiApiKey = Platform.environment['OPENAI_API_KEY']!;\n  final embeddings = OpenAIEmbeddings(apiKey: openaiApiKey);\n  final vectorStore = Chroma(embeddings: embeddings);\n\n  // Add documents to the vector store\n  await vectorStore.addDocuments(\n    documents: const [\n      Document(\n        id: '1',\n        pageContent: 'The cat sat on the mat',\n        metadata: {'cat': 'animal'},\n      ),\n      Document(\n        id: '2',\n        pageContent: 'The dog chased the ball.',\n        metadata: {'cat': 'animal'},\n      ),\n      Document(\n        id: '3',\n        pageContent: 'The boy ate the apple.',\n        metadata: {'cat': 'person'},\n      ),\n      Document(\n        id: '4',\n        pageContent: 'The girl drank the milk.',\n        metadata: {'cat': 'person'},\n      ),\n      Document(\n        id: '5',\n        pageContent: 'The sun is shining.',\n        metadata: {'cat': 'natural'},\n      ),\n    ],\n  );\n\n  // Query the vector store\n  final res = await vectorStore.similaritySearch(\n    query: 'What are they eating?',\n    config: const ChromaSimilaritySearch(\n      k: 2,\n      scoreThreshold: 0.4,\n      where: {'cat': 'person'},\n    ),\n  );\n  print(res);\n}\n"
  },
  {
    "path": "packages/langchain_chroma/lib/langchain_chroma.dart",
    "content": "/// LangChain.dart integration module for Chroma open-source embedding database.\nlibrary;\n\nexport 'src/vector_stores/vector_stores.dart';\n"
  },
  {
    "path": "packages/langchain_chroma/lib/src/vector_stores/chroma.dart",
    "content": "import 'package:chromadb/chromadb.dart';\nimport 'package:http/http.dart' as http;\nimport 'package:langchain_core/documents.dart';\nimport 'package:langchain_core/vector_stores.dart';\nimport 'package:uuid/uuid.dart';\n\nimport 'types.dart';\n\n/// {@template chroma}\n/// Vector store for Chroma open-source embedding database.\n///\n/// Chroma documentation:\n/// https://docs.trychroma.com\n///\n/// This vector stores requires Chroma to be running in client/server mode.\n///\n/// The server can run on your local computer via docker or be easily deployed\n/// to any cloud provider.\n///\n/// ### Running Chroma in client/server mode\n///\n/// You can run a Chroma server in two ways:\n///\n/// #### Using Python client\n///\n/// The Python client supports spinning up a Chroma server easily:\n///\n/// ```sh\n/// pip install chromadb\n/// chroma run --path /db_path\n/// ```\n///\n/// #### Using Docker\n///\n/// Otherwise, you can run the Chroma server using Docker:\n///\n/// ```sh\n/// docker pull chromadb/chroma\n/// docker run -p 8000:8000 chromadb/chroma\n/// ```\n///\n/// If you are interacting with Chroma server from a web browser,\n/// you may need to configure the CORS policy. You can do this by\n/// passing the following environment variable:\n/// ```sh\n/// docker run -p 8000:8000 -e 'CHROMA_SERVER_CORS_ALLOW_ORIGINS=[\"*\"]' chromadb/chroma\n/// ```\n/// The previous command will allow all origins to access the Chroma server\n/// (do not use in production).\n///\n/// By default, the Chroma client will connect to a server running on\n/// `http://localhost:8000`. To connect to a different server, pass the\n/// `baseUrl` parameter to the constructor.\n///\n/// ```dart\n/// final vectorStore = Chroma(\n///   embeddings: OpenAIEmbeddings(apiKey: openaiApiKey),\n///   baseUrl: 'http://localhost:8888',\n/// );\n/// ```\n///\n/// ### Collections\n///\n/// Chroma lets you manage collections of embeddings, using the collection\n/// primitive.\n///\n/// You can configure the collection to use in the [collectionName] parameter.\n///\n/// You can also configure the metadata to associate with the collection in the\n/// [collectionMetadata] parameter.\n///\n/// ### Changing the distance function\n///\n/// You can change the distance function of the embedding space by setting the\n/// value of `hnsw:space` in [collectionMetadata]. Valid options are \"l2\",\n/// \"ip\", or \"cosine\". The default is \"l2\".\n///\n/// ### Filtering\n///\n/// Chroma supports filtering queries by metadata and document contents.\n/// The `where` filter is used to filter by metadata, and the `whereDocument`\n/// filter is used to filter by document contents.\n///\n/// For example:\n/// ```dart\n/// final vectorStore = Chroma(...);\n/// final res = await vectorStore.similaritySearch(\n///   query: 'What should I feed my cat?',\n///   config: ChromaSimilaritySearch(\n///     k: 5,\n///     scoreThreshold: 0.8,\n///     where: {'class': 'cat'},\n///   ),\n/// );\n/// ```\n///\n/// Chroma supports a wide range of operators for filtering. Check out the\n/// filtering section of the Chroma docs for more info:\n/// https://docs.trychroma.com/usage-guide?lang=js#using-where-filters\n/// {@endtemplate}\nclass Chroma extends VectorStore {\n  /// {@macro chroma}\n  Chroma({\n    this.collectionName = 'langchain',\n    this.collectionMetadata,\n    required super.embeddings,\n    final String tenant = 'default_tenant',\n    final String database = 'default_database',\n    final String baseUrl = 'http://localhost:8000',\n    final Map<String, String> headers = const {},\n    final http.Client? client,\n  }) : _client = ChromaClient(\n         config: ChromaConfig(\n           tenant: tenant,\n           database: database,\n           baseUrl: baseUrl,\n           defaultHeaders: headers,\n         ),\n         httpClient: client,\n       );\n\n  /// Name of the collection to use.\n  final String collectionName;\n\n  /// Metadata to associate with the collection.\n  final Map<String, dynamic>? collectionMetadata;\n\n  /// The Chroma client.\n  final ChromaClient _client;\n\n  /// A UUID generator.\n  final _uuid = const Uuid();\n\n  /// The collection to use.\n  ChromaCollection? _collection;\n\n  @override\n  Future<List<String>> addVectors({\n    required final List<List<double>> vectors,\n    required final List<Document> documents,\n  }) async {\n    assert(vectors.length == documents.length);\n\n    final collection = await _getCollection();\n\n    final List<String> ids = [];\n    final List<Map<String, dynamic>> metadatas = [];\n    final List<String> docs = [];\n\n    for (var i = 0; i < documents.length; i++) {\n      final doc = documents[i];\n      final id = doc.id ?? _uuid.v4();\n      ids.add(id);\n      metadatas.add(doc.metadata);\n      docs.add(doc.pageContent);\n    }\n\n    await collection.upsert(\n      ids: ids,\n      embeddings: vectors,\n      metadatas: metadatas,\n      documents: docs,\n    );\n    return ids;\n  }\n\n  @override\n  Future<void> delete({required final List<String> ids}) async {\n    final collection = await _getCollection();\n    await collection.delete(ids: ids);\n  }\n\n  @override\n  Future<List<(Document, double)>> similaritySearchByVectorWithScores({\n    required final List<double> embedding,\n    final VectorStoreSimilaritySearch config =\n        const VectorStoreSimilaritySearch(),\n  }) async {\n    final collection = await _getCollection();\n    final result = await collection.query(\n      queryEmbeddings: [embedding],\n      nResults: config.k,\n      where: config.filter,\n      whereDocument: config is ChromaSimilaritySearch\n          ? config.whereDocument\n          : null,\n      include: Include.defaultQuery,\n    );\n    final ids = result.ids.first;\n    final metadatas = result.metadatas?.first;\n    final docs = result.documents?.first;\n    final distances = result.distances?.first;\n\n    final List<(Document, double)> results = [];\n    for (var i = 0; i < ids.length; i++) {\n      final distance = distances?[i] ?? 0.0;\n      if (config.scoreThreshold != null && distance < config.scoreThreshold!) {\n        continue;\n      }\n\n      final doc = Document(\n        id: ids[i],\n        metadata: metadatas?[i] ?? {},\n        pageContent: docs?[i] ?? '',\n      );\n      results.add((doc, distance));\n    }\n    return results;\n  }\n\n  Future<ChromaCollection> _getCollection() async {\n    if (_collection != null) {\n      return _collection!;\n    }\n\n    final collection = await _client.getOrCreateCollection(\n      name: collectionName,\n      metadata: collectionMetadata,\n    );\n\n    _collection = collection;\n    return collection;\n  }\n\n  /// Closes the client and cleans up any resources associated with it.\n  void close() {\n    _client.close();\n  }\n}\n"
  },
  {
    "path": "packages/langchain_chroma/lib/src/vector_stores/types.dart",
    "content": "import 'package:langchain_core/vector_stores.dart';\n\n/// {@template chroma_similarity_search}\n/// Chroma similarity search config.\n///\n/// Chroma supports filtering queries by metadata and document contents.\n/// The [where] filter is used to filter by metadata, and the [whereDocument]\n/// filter is used to filter by document contents.\n///\n/// Check out the filtering section of the Chroma docs for more info:\n/// https://docs.trychroma.com/usage-guide?lang=js#using-where-filters\n///\n/// Example:\n/// ```dart\n/// ChromaSimilaritySearch(\n///   k: 5,\n///   where: {'style: 'style1'},\n///   scoreThreshold: 0.8,\n/// ),\n/// ```\n/// {@endtemplate}\nclass ChromaSimilaritySearch extends VectorStoreSimilaritySearch {\n  /// {@macro chroma_similarity_search}\n  const ChromaSimilaritySearch({\n    super.k = 4,\n    final Map<String, dynamic>? where,\n    this.whereDocument,\n    super.scoreThreshold,\n  }) : super(filter: where);\n\n  /// Optional query condition to filter results based on document content.\n  final Map<String, dynamic>? whereDocument;\n}\n"
  },
  {
    "path": "packages/langchain_chroma/lib/src/vector_stores/vector_stores.dart",
    "content": "export 'chroma.dart';\nexport 'types.dart';\n"
  },
  {
    "path": "packages/langchain_chroma/pubspec.yaml",
    "content": "name: langchain_chroma\ndescription: LangChain.dart integration module for Chroma open-source embedding database.\nversion: 0.3.0+2\nrepository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_chroma\nissue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_chroma\nhomepage: https://github.com/davidmigloz/langchain_dart\ndocumentation: https://langchaindart.dev\n\nfunding:\n  - https://github.com/sponsors/davidmigloz\n\ntopics:\n  - nlp\n  - gen-ai\n  - llms\n  - langchain\n  - vector-db\n\nenvironment:\n  sdk: \">=3.9.0 <4.0.0\"\nresolution: workspace\n\ndependencies:\n  chromadb: ^1.1.0\n  http: ^1.5.0\n  langchain_core: 0.4.1\n  meta: ^1.16.0\n  uuid: ^4.5.1\n\ndev_dependencies:\n  test: ^1.26.2\n  langchain: ^0.8.1\n  langchain_community: 0.4.0+2\n  langchain_openai: ^0.8.1+1\n"
  },
  {
    "path": "packages/langchain_chroma/test/vector_stores/assets/example.txt",
    "content": "The answer to the question \"Who are we?\" is complex and multifaceted. It depends on the context in which the question is asked, and the perspective of the person answering.\n\nOn a general level, we are all human beings. We share the same basic biology, and we all have the same basic needs: food, water, shelter, love, and belonging. We also share the same basic capacity for love, compassion, creativity, and resilience.\n\nHowever, we are also all unique individuals. We have different experiences, different talents, and different perspectives. We are shaped by our families, our communities, our cultures, and our individual choices.\n\nSo, who are we? We are all of these things, and more. We are complex and contradictory, but we are also beautiful and resilient. We are human beings, and we are all connected.\n\nIn the context of this conversation, you and I are both users of a language model. We are both interested in learning and exploring new ideas. We are both part of a community of people who are using technology to connect with each other and to make the world a better place.\n\nUltimately, the answer to the question \"Who are we?\" is up to each individual to decide. We are all free to define ourselves in our own way.\n"
  },
  {
    "path": "packages/langchain_chroma/test/vector_stores/chroma_test.dart",
    "content": "@TestOn('vm')\nlibrary; // Uses dart:io\n\nimport 'dart:io';\n\nimport 'package:langchain/langchain.dart' show RecursiveCharacterTextSplitter;\nimport 'package:langchain_chroma/langchain_chroma.dart';\nimport 'package:langchain_community/langchain_community.dart';\nimport 'package:langchain_core/documents.dart';\nimport 'package:langchain_openai/langchain_openai.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n  late final OpenAIEmbeddings embeddings;\n  late final Chroma vectorStore;\n\n  group(\n    'Chroma tests',\n    skip: openaiApiKey == null || Platform.environment.containsKey('CI'),\n    () {\n      setUpAll(() {\n        embeddings = OpenAIEmbeddings(apiKey: openaiApiKey);\n        vectorStore = Chroma(embeddings: embeddings);\n      });\n\n      tearDownAll(() {\n        embeddings.close();\n        vectorStore.close();\n      });\n\n      test('Test Chroma add new vectors', () async {\n        final res = await vectorStore.addDocuments(\n          documents: const [\n            Document(\n              id: '1',\n              pageContent: 'The cat sat on the mat',\n              metadata: {'cat': 'animal'},\n            ),\n            Document(\n              id: '2',\n              pageContent: 'The dog chased the ball.',\n              metadata: {'cat': 'animal'},\n            ),\n            Document(\n              id: '3',\n              pageContent: 'The boy ate the apple.',\n              metadata: {'cat': 'person'},\n            ),\n            Document(\n              id: '4',\n              pageContent: 'The girl drank the milk.',\n              metadata: {'cat': 'person'},\n            ),\n            Document(\n              id: '5',\n              pageContent: 'The sun is shining.',\n              metadata: {'cat': 'natural'},\n            ),\n          ],\n        );\n\n        expect(res.length, 5);\n      });\n\n      test('Test Chroma add new vectors from file', () async {\n        const filePath = './test/vector_stores/assets/example.txt';\n        const loader = TextLoader(filePath);\n        final pages = await loader.load();\n\n        const splitter = RecursiveCharacterTextSplitter(\n          chunkOverlap: 150,\n          chunkSize: 1500,\n        );\n        final docs = splitter.splitDocuments(pages);\n\n        await vectorStore.addDocuments(documents: docs);\n\n        final res = await vectorStore.similaritySearch(\n          query: 'Who are we?',\n          config: const ChromaSimilaritySearch(k: 1),\n        );\n        expect(res.length, 1);\n      });\n\n      test('Test Chroma query return 1 result', () async {\n        final res = await vectorStore.similaritySearch(\n          query: 'Is it raining?',\n          config: const ChromaSimilaritySearch(k: 1),\n        );\n        expect(res.length, 1);\n        expect(res.first.id, '5');\n      });\n\n      test('Test Chroma query with scoreThreshold', () async {\n        final res = await vectorStore.similaritySearchWithScores(\n          query: 'Is it raining?',\n          config: const ChromaSimilaritySearch(scoreThreshold: 0.6),\n        );\n        for (final (_, score) in res) {\n          expect(score, greaterThan(0.6));\n        }\n      });\n\n      test('Test Chroma query with where filter', () async {\n        final res = await vectorStore.similaritySearch(\n          query: 'What are they eating?',\n          config: const ChromaSimilaritySearch(k: 10, where: {'cat': 'person'}),\n        );\n        for (final doc in res) {\n          expect(doc.metadata['cat'], 'person');\n        }\n      });\n\n      test('Test Chroma query with whereDocument filter', () async {\n        final res = await vectorStore.similaritySearch(\n          query: 'What are they eating?',\n          config: const ChromaSimilaritySearch(\n            whereDocument: {r'$contains': 'milk'},\n          ),\n        );\n        expect(res.length, 1);\n        expect(res.first.id, '4');\n      });\n\n      test('Test Chroma delete document', () async {\n        await vectorStore.addDocuments(\n          documents: [\n            const Document(\n              id: 'delete',\n              pageContent: 'This document will be deleted',\n              metadata: {'cat': 'xxx'},\n            ),\n          ],\n        );\n        final res1 = await vectorStore.similaritySearch(\n          query: 'Deleted doc',\n          config: const ChromaSimilaritySearch(where: {'cat': 'xxx'}),\n        );\n        expect(res1.length, 1);\n        expect(res1.first.id, 'delete');\n\n        await vectorStore.delete(ids: ['delete']);\n        final res2 = await vectorStore.similaritySearch(\n          query: 'Deleted doc',\n          config: const ChromaSimilaritySearch(where: {'cat': 'xxx'}),\n        );\n        expect(res2.length, 0);\n      });\n    },\n  );\n\n  group('ChromaSimilaritySearch', () {\n    test('ChromaSimilaritySearch fields', () {\n      const config = ChromaSimilaritySearch(\n        k: 5,\n        where: {'style': 'style1'},\n        whereDocument: {r'$contains': 'milk'},\n        scoreThreshold: 0.8,\n      );\n      expect(config.k, 5);\n      expect(config.filter, {'style': 'style1'});\n      expect(config.whereDocument, {r'$contains': 'milk'});\n      expect(config.scoreThreshold, 0.8);\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain_cohere/.gitignore",
    "content": "# https://dart.dev/guides/libraries/private-files\n# Created by `dart pub`\n.dart_tool/\n\n# Avoid committing pubspec.lock for library packages; see\n# https://dart.dev/guides/libraries/private-files#pubspeclock.\npubspec.lock\n"
  },
  {
    "path": "packages/langchain_cohere/CHANGELOG.md",
    "content": "## 0.0.1-dev.1\n\n- Bootstrap project.\n"
  },
  {
    "path": "packages/langchain_cohere/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2023 David Miguel Lozano\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "packages/langchain_cohere/README.md",
    "content": "# 🦜️🔗 LangChain.dart\n\nCohere module for [LangChain.dart](https://github.com/davidmigloz/langchain_dart).\n\n## License\n\nLangChain.dart is licensed under the\n[MIT License](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE).\n"
  },
  {
    "path": "packages/langchain_cohere/example/langchain_cohere_example.dart",
    "content": "void main() {\n  // TODO\n}\n"
  },
  {
    "path": "packages/langchain_cohere/lib/langchain_cohere.dart",
    "content": "/// Cohere module for LangChain.dart.\nlibrary;\n"
  },
  {
    "path": "packages/langchain_cohere/pubspec.yaml",
    "content": "name: langchain_cohere\ndescription: Cohere module for LangChain.dart.\nversion: 0.0.1-dev.1\nrepository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_cohere\nissue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_cohere\nhomepage: https://github.com/davidmigloz/langchain_dart\ndocumentation: https://langchaindart.dev\npublish_to: none # Remove when the package is ready to be published\n\nfunding:\n  - https://github.com/sponsors/davidmigloz\n\ntopics:\n  - nlp\n  - gen-ai\n  - llms\n  - langchain\n\nenvironment:\n  sdk: \">=3.9.0 <4.0.0\"\nresolution: workspace\n"
  },
  {
    "path": "packages/langchain_community/.gitignore",
    "content": "# https://dart.dev/guides/libraries/private-files\n# Created by `dart pub`\n.dart_tool/\n\n# Avoid committing pubspec.lock for library packages; see\n# https://dart.dev/guides/libraries/private-files#pubspeclock.\npubspec.lock\n"
  },
  {
    "path": "packages/langchain_community/CHANGELOG.md",
    "content": "## 0.4.0+2\n\n - Update a dependency to the latest release.\n\n## 0.4.0+1\n\n - **REFACTOR**: Fix pub format warnings ([#809](https://github.com/davidmigloz/langchain_dart/issues/809)). ([640cdefb](https://github.com/davidmigloz/langchain_dart/commit/640cdefbede9c0a0182fb6bb4005a20aa6f35635))\n\n## 0.4.0\n\n> Note: This release has breaking changes.\n\n - **FEAT**: Upgrade to http v1.5.0 ([#785](https://github.com/davidmigloz/langchain_dart/issues/785)). ([f7c87790](https://github.com/davidmigloz/langchain_dart/commit/f7c8779011015b5a4a7f3a07dca32bde1bb2ea88))\n - **BREAKING** **BUILD**: Require Dart >=3.8.0 ([#792](https://github.com/davidmigloz/langchain_dart/issues/792)). ([b887f5c6](https://github.com/davidmigloz/langchain_dart/commit/b887f5c62e307b3a510c5049e3d1fbe7b7b4f4c9))\n\n## 0.3.4+3\n\n - Update a dependency to the latest release.\n\n## 0.3.4+2\n\n - Update a dependency to the latest release.\n\n## 0.3.4+1\n\n - **BUILD**: Update dependencies ([#751](https://github.com/davidmigloz/langchain_dart/issues/751)). ([250a3c6](https://github.com/davidmigloz/langchain_dart/commit/250a3c6a6c1815703a61a142ba839c0392a31015))\n\n## 0.3.4\n\n - **FEAT**: Update dependencies (requires Dart 3.6.0) ([#709](https://github.com/davidmigloz/langchain_dart/issues/709)). ([9e3467f7](https://github.com/davidmigloz/langchain_dart/commit/9e3467f7caabe051a43c0eb3c1110bc4a9b77b81))\n - **REFACTOR**: Remove fetch_client dependency in favor of http v1.3.0 ([#659](https://github.com/davidmigloz/langchain_dart/issues/659)). ([0e0a685c](https://github.com/davidmigloz/langchain_dart/commit/0e0a685c376895425dbddb0f9b83758c700bb0c7))\n - **FIX**: Fix static instance of ObjectBoxVectorStore ([#684](https://github.com/davidmigloz/langchain_dart/issues/684)). ([719ead93](https://github.com/davidmigloz/langchain_dart/commit/719ead93c6e91d42bb8e45910ccf0da4e3e51afd))\n - **FIX**: Fix langchain_community  WASM compatibility ([#660](https://github.com/davidmigloz/langchain_dart/issues/660)). ([0be8aae4](https://github.com/davidmigloz/langchain_dart/commit/0be8aae44950fddd9ac9538ccfa5d017e6f585a0))\n - **FIX**: Fix linter issues ([#656](https://github.com/davidmigloz/langchain_dart/issues/656)). ([88a79c65](https://github.com/davidmigloz/langchain_dart/commit/88a79c65aad23bcf5859e58a7375a4b686cf02ef))\n - **FIX**: Made apiKey optional for `TavilyAnswerTool` and `TavilySearchResultsTool` ([#646](https://github.com/davidmigloz/langchain_dart/issues/646)). ([5085ea4a](https://github.com/davidmigloz/langchain_dart/commit/5085ea4ad8b5cd072832e73afcbb7075a6375307))\n\n## 0.3.3\n\n - **FEAT**: Add support for DirectoryLoader ([#620](https://github.com/davidmigloz/langchain_dart/issues/620)). ([4730f2a3](https://github.com/davidmigloz/langchain_dart/commit/4730f2a376b152ea38e5204125209ef01f29cab9))\n - **FEAT**: Expose internal store in ObjectBoxVectorStore ([#611](https://github.com/davidmigloz/langchain_dart/issues/611)). ([c33f2e07](https://github.com/davidmigloz/langchain_dart/commit/c33f2e07c31ddd91dae16856df3b6c8ffddc45e9))\n - **FIX**: Chinese character support on web loader ([#600](https://github.com/davidmigloz/langchain_dart/issues/600)). ([48e64d5b](https://github.com/davidmigloz/langchain_dart/commit/48e64d5b01aa8469dbf7a973350eeac26b43df8f))\n - **REFACTOR**: Add new lint rules and fix issues ([#621](https://github.com/davidmigloz/langchain_dart/issues/621)). ([60b10e00](https://github.com/davidmigloz/langchain_dart/commit/60b10e008acf55ebab90789ad08d2449a44b69d8))\n - **REFACTOR**: Upgrade api clients generator version ([#610](https://github.com/davidmigloz/langchain_dart/issues/610)). ([0c8750e8](https://github.com/davidmigloz/langchain_dart/commit/0c8750e85b34764f99b6e34cc531776ffe8fba7c))\n\n## 0.3.2+2\n\n - **FIX**: Update ObjectBox SDK to v4.0.3 to fix StorageException in iOS ([#581](https://github.com/davidmigloz/langchain_dart/issues/581)). ([943811a5](https://github.com/davidmigloz/langchain_dart/commit/943811a5d5ab1c7ef3e83db0c45082a0d4d1fc4a))\n\n## 0.3.2+1\n\n - **FIX**: UUID 'Namespace' can't be assigned to the parameter type 'String?' ([#566](https://github.com/davidmigloz/langchain_dart/issues/566)). ([1e93a595](https://github.com/davidmigloz/langchain_dart/commit/1e93a595f2f166da2cae3f7cfcdbb28892abf9b5))\n\n## 0.3.2\n\n - **FEAT**: Add support for deleteWhere in ObjectBoxVectorStore ([#552](https://github.com/davidmigloz/langchain_dart/issues/552)). ([90918bba](https://github.com/davidmigloz/langchain_dart/commit/90918bbac411ccfe4823ae195de6a50a46575573))\n - **REFACTOR**: Add stubs for ObjectBox on web platform ([#553](https://github.com/davidmigloz/langchain_dart/issues/553)). ([41caed92](https://github.com/davidmigloz/langchain_dart/commit/41caed924bf24382567758be4590d5ddff31e839))\n\n## 0.3.1\n\n - **FEAT**: Deprecate OpenAIToolsAgent in favour of ToolsAgent ([#532](https://github.com/davidmigloz/langchain_dart/issues/532)). ([68d8011a](https://github.com/davidmigloz/langchain_dart/commit/68d8011a9aa09368875ba0434839d12623ba2bab))\n\n## 0.3.0\n\n- **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43))\n- **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d))\n\n## 0.2.2\n\n - **FEAT**: Add support for TavilySearchResultsTool and TavilyAnswerTool ([#467](https://github.com/davidmigloz/langchain_dart/issues/467)). ([a9f35755](https://github.com/davidmigloz/langchain_dart/commit/a9f35755dfac9d257efb251c4a6c5948673c2f6c))\n\n## 0.2.1+1\n\n - **FIX**: Add missing dependency in langchain_community package ([#448](https://github.com/davidmigloz/langchain_dart/issues/448)). ([70ffd027](https://github.com/davidmigloz/langchain_dart/commit/70ffd027cb41c5c5058bb266966734894f773330))\n\n## 0.2.1\n\n - **FEAT**: Add support for ObjectBoxVectorStore ([#438](https://github.com/davidmigloz/langchain_dart/issues/438)). ([81e167a6](https://github.com/davidmigloz/langchain_dart/commit/81e167a6888fff9f8db381caaef6ee788acef3a8))\n   + Check out the [ObjectBoxVectorStore documentation](https://langchaindart.dev/#/modules/retrieval/vector_stores/integrations/objectbox?id=objectbox)\n\n## 0.2.0+1\n\n- Update a dependency to the latest release.\n\n## 0.2.0\n\n> Note: This release has breaking changes.  \n> If you are using \"function calling\" check [how to migrate to \"tool calling\"](https://github.com/davidmigloz/langchain_dart/issues/400).\n\n - **BREAKING** **FEAT**: Migrate from function calling to tool calling ([#400](https://github.com/davidmigloz/langchain_dart/issues/400)). ([44413b83](https://github.com/davidmigloz/langchain_dart/commit/44413b8321b1188ff6b4027b1972a7ee0002761e))\n - **BREAKING** **REFACTOR**: Improve Tool abstractions ([#398](https://github.com/davidmigloz/langchain_dart/issues/398)). ([2a50aec2](https://github.com/davidmigloz/langchain_dart/commit/2a50aec28385068f9be32392020d727fc9a1561e))\n\n## 0.1.0+2\n\n - Update a dependency to the latest release.\n\n## 0.1.0+1\n\n - Update a dependency to the latest release.\n\n## 0.1.0\n\n> Note: This release has breaking changes.  \n> [Migration guide](https://github.com/davidmigloz/langchain_dart/discussions/374)\n\n - **BREAKING** **REFACTOR**: Introduce langchain_core and langchain_community packages ([#328](https://github.com/davidmigloz/langchain_dart/issues/328)). ([5fa520e6](https://github.com/davidmigloz/langchain_dart/commit/5fa520e663602d9cdfcab0c62a053090fa02b02e))\n\n## 0.0.1-dev.2\n\n - **FEAT**: Update meta and test dependencies ([#331](https://github.com/davidmigloz/langchain_dart/issues/331)). ([912370ee](https://github.com/davidmigloz/langchain_dart/commit/912370ee0ba667ee9153303395a457e6caf5c72d))\n\n## 0.0.1-dev.1\n\n- Bootstrap package.\n"
  },
  {
    "path": "packages/langchain_community/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2023 David Miguel Lozano\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "packages/langchain_community/README.md",
    "content": "# 🦜️🔗 LangChain.dart Community\n\n[![tests](https://img.shields.io/github/actions/workflow/status/davidmigloz/langchain_dart/test.yaml?logo=github&label=tests)](https://github.com/davidmigloz/langchain_dart/actions/workflows/test.yaml)\n[![docs](https://img.shields.io/github/actions/workflow/status/davidmigloz/langchain_dart/pages%2Fpages-build-deployment?logo=github&label=docs)](https://github.com/davidmigloz/langchain_dart/actions/workflows/pages/pages-build-deployment)\n[![langchain_community](https://img.shields.io/pub/v/langchain_community.svg)](https://pub.dev/packages/langchain_community)\n![Discord](https://img.shields.io/discord/1123158322812555295?label=discord)\n[![MIT](https://img.shields.io/badge/license-MIT-purple.svg)](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE)\n\nCommunity package for [LangChain.dart](https://github.com/davidmigloz/langchain_dart). \n\n## What is LangChain.dart Community?\n\nLangChain.dart Community contains third-party integrations and community-contributed components that are not part of the core LangChain.dart API. \n\n> Depend on this package if you want to use any of the integrations or components it provides.\n\nThe most popular third-party integrations have their own packages (e.g. [langchain_openai](https://pub.dev/packages/langchain_openai), [langchain_google](https://pub.dev/packages/langchain_google), etc.). Check out the [full list of packages](https://github.com/davidmigloz/langchain_dart?tab=readme-ov-file#packages).\n\n![LangChain.dart packages](https://raw.githubusercontent.com/davidmigloz/langchain_dart/main/docs/img/langchain_packages.png)\n\n## Features\n\n- Document loaders:\n  * `TextLoader`: for plain text files.\n  * `CsvLoader`: for CSV or TSV files.\n  * `JsonLoader` for JSON files.\n  * `WebBaseLoader`: for web pages.\n  * `DirectoryLoader`: for directories of files.\n- Tools:\n  * `CalculatorTool`: to calculate math expressions. \n  * `TavilySearchResultsTool`: returns a list of results for a query using the [Tavily](https://tavily.com) search engine.\n  * `TavilyAnswerTool`: returns an answer for a query using the [Tavily](https://tavily.com) search engine.\n- Vector stores:\n  * `ObjectBoxVectorStore`: [ObjectBox](https://objectbox.io/) on-device vector database. \n\nCheck out the [API reference](https://pub.dev/documentation/langchain_community/latest) for more details.\n\n## License\n\nLangChain.dart is licensed under the\n[MIT License](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE).\n"
  },
  {
    "path": "packages/langchain_community/example/langchain_community_example.dart",
    "content": "void main() {\n  // TODO\n}\n"
  },
  {
    "path": "packages/langchain_community/lib/langchain_community.dart",
    "content": "/// LangChain.dart third-party integrations that don't have a dedicated package.\nlibrary;\n\nexport 'src/document_loaders/document_loaders.dart';\nexport 'src/tools/tools.dart';\nexport 'src/vector_stores/vector_stores.dart';\n"
  },
  {
    "path": "packages/langchain_community/lib/src/document_loaders/csv.dart",
    "content": "import 'dart:convert';\n\nimport 'package:cross_file/cross_file.dart';\nimport 'package:csv/csv.dart';\nimport 'package:langchain_core/document_loaders.dart';\nimport 'package:langchain_core/documents.dart';\n\n/// {@template csv_loader}\n/// A document loader for loading documents from CSV or TSV files.\n///\n/// It reads the CSV file specified by [filePath] and transforms each row into\n/// a [Document] object.\n///\n/// You can customize the [fields] that you want to extract or rename them\n/// using [fieldsOverride]. The field, text, and line delimiters can also be\n/// customized using [fieldDelimiter], [fieldTextDelimiter], [fieldTextEndDelimiter],\n/// and [eol].\n///\n/// The fields are added to the page content in the following format:\n/// ```txt\n/// {field1Name}: {field1Value}\n/// {field2Name}: {field2Value}\n/// ...\n/// {fieldNName}: {fieldNValue}\n/// ```\n///\n/// You can specify a [sourceField] to use as the source metadata field for\n/// each document. Otherwise, the [filePath] will be used as source.\n///\n/// For loading TSV files, set the [fieldDelimiter] as `\\t`.\n///\n/// Example:\n/// ```dart\n/// final loader = CsvLoader('path/to/file.csv');\n/// final documents = await loader.load();\n/// ```\n/// {@endtemplate}\nclass CsvLoader extends BaseDocumentLoader {\n  /// {@macro csv_loader}\n  CsvLoader(\n    this.filePath, {\n    this.fields,\n    this.fieldsOverride,\n    this.fieldDelimiter = ',',\n    this.fieldTextDelimiter = '\"',\n    @Deprecated(\n      'No longer supported by the underlying csv library (csv ^7.1.0)',\n    )\n    this.fieldTextEndDelimiter,\n    @Deprecated(\n      'No longer supported by the underlying csv library (csv ^7.1.0)',\n    )\n    this.eol = '\\n',\n    this.sourceField,\n    this.metadataBuilder,\n  });\n\n  /// The file path of the CSV file to be loaded.\n  final String filePath;\n\n  /// Optional field to specify which fields from the row to extract and add to\n  /// the page content of the document.\n  ///\n  /// If not provided, all row fields are extracted.\n  final List<String>? fields;\n\n  /// Optional field to override the field names from the CSV file.\n  ///\n  /// If not provided, the column names from the CSV file are used.\n  List<String>? fieldsOverride;\n\n  /// The delimiter used to separate fields in the CSV file.\n  ///\n  /// By default, it is set to comma (`,`).\n  final String fieldDelimiter;\n\n  /// The delimiter which (optionally) surrounds text / fields.\n  ///\n  /// By default, it is set to double quotation marks (`\"`).\n  final String fieldTextDelimiter;\n\n  /// Optional end delimiter for text. This allows text to be quoted with\n  /// different start / end delimiters: Example:  «abc».\n  ///\n  /// If null, [fieldTextDelimiter] is used as the end delimiter.\n  @Deprecated('No longer supported by the underlying csv library (csv ^7.1.0)')\n  final String? fieldTextEndDelimiter;\n\n  /// The end-of-line character used to separate rows in the CSV file. The eol\n  /// is optional for the last row.\n  ///\n  /// By default, it is set to the newline character (`\\n`).\n  @Deprecated('No longer supported by the underlying csv library (csv ^7.1.0)')\n  final String eol;\n\n  /// Optional field to specify a source for the document created from each\n  /// row. Otherwise [filePath] will be used as the source for all documents\n  /// created from the CSV file.\n  ///\n  /// This is useful when using documents loaded from CSV files for chains that\n  /// answer questions using sources.\n  final String? sourceField;\n\n  /// An optional function to build custom metadata for each document based on\n  /// the row content and the default metadata.\n  ///\n  /// If null, the default row metadata will be used.\n  final Map<String, dynamic> Function(\n    Map<String, dynamic> row,\n    Map<String, dynamic> rowMetadata,\n  )?\n  metadataBuilder;\n\n  @override\n  Stream<Document> lazyLoad() async* {\n    final file = XFile(filePath);\n\n    final csvLinesStream = file\n        .openRead()\n        .cast<List<int>>()\n        .transform(utf8.decoder)\n        .transform(\n          CsvDecoder(\n            fieldDelimiter: fieldDelimiter,\n            quoteCharacter: fieldTextDelimiter,\n          ),\n        );\n\n    final fieldsToPositions = <String, int>{};\n    final pageContentFields = <String>[];\n    await for (final row in csvLinesStream) {\n      // Process CSV header\n      if (fieldsToPositions.isEmpty) {\n        assert(row.isNotEmpty, 'Header row cannot be empty');\n        assert(\n          fields == null &&\n                  (fieldsOverride == null ||\n                      fieldsOverride!.length == row.length) ||\n              fields != null &&\n                  (fieldsOverride == null ||\n                      fieldsOverride!.length == fields!.length),\n          'You have specified more field names than fields',\n        );\n\n        for (var i = 0; i < row.length; i++) {\n          final field = row[i] as String;\n          if (field == sourceField || (fields?.contains(field) ?? true)) {\n            final fieldName =\n                fieldsOverride?[fieldsToPositions.length] ?? field;\n            fieldsToPositions[fieldName] = i;\n          }\n        }\n\n        assert(\n          fields == null ||\n              (fieldsOverride ?? fields)!.every(\n                (final field) => fieldsToPositions[field] != null,\n              ),\n          'You have specified a field that does not exist in your csv file.',\n        );\n\n        pageContentFields.addAll(\n          fields == null || sourceField == null || fields!.contains(sourceField)\n              ? fieldsToPositions.keys\n              : fields!.where((final field) => field != sourceField),\n        );\n        continue;\n      }\n\n      // Process rows\n      final rowMetadata = <String, dynamic>{\n        if (sourceField == null)\n          'source': filePath\n        else\n          'source': row[fieldsToPositions[sourceField!]!],\n      };\n      final rowContent = {\n        for (final field in pageContentFields)\n          field: row[fieldsToPositions[field]!],\n      };\n\n      final doc = Document(\n        pageContent: rowContent.entries\n            .map((final field) => '${field.key}: ${field.value}')\n            .join('\\n'),\n        metadata: metadataBuilder?.call(rowContent, rowMetadata) ?? rowMetadata,\n      );\n\n      yield doc;\n    }\n  }\n}\n"
  },
  {
    "path": "packages/langchain_community/lib/src/document_loaders/directory/directory.dart",
    "content": "export 'directory_io.dart' if (dart.library.js_interop) 'directory_stub.dart';\n"
  },
  {
    "path": "packages/langchain_community/lib/src/document_loaders/directory/directory_io.dart",
    "content": "import 'dart:async';\nimport 'dart:io';\nimport 'dart:math';\nimport 'package:cross_file/cross_file.dart' show XFile;\nimport 'package:glob/glob.dart';\nimport 'package:langchain_core/document_loaders.dart';\nimport 'package:langchain_core/documents.dart';\nimport 'package:path/path.dart' as path;\n\nimport '../csv.dart';\nimport '../json.dart';\nimport '../text.dart';\n\n/// {@template directory_loader}\n/// A versatile document loader that loads [Document]s from a directory.\n///\n/// This loader can:\n/// - Load files from a specified directory\n/// - Apply glob patterns to filter files\n/// - Recursively search subdirectories\n/// - Exclude specific files or patterns\n/// - Use custom loaders for different file types\n/// - Sample files randomly or by a specific count\n/// - Build custom metadata for loaded documents\n///\n/// ## Default Supported File Types\n///\n/// By default, the DirectoryLoader supports the following file types:\n/// - `.txt`: Text files (loaded using [TextLoader])\n///   - Loads the entire file content as a single document\n/// - `.json`: JSON files (loaded using [JsonLoader] with root schema)\n///   - Extracts all JSON objects or values at the root level\n/// - `.csv` and `.tsv`: CSV/TSV files (loaded using [CsvLoader])\n///   - Converts each row into a separate document\n///\n/// Example usage:\n/// ```dart\n/// // Load all text and JSON files from a directory recursively\n/// final loader = DirectoryLoader(\n///   '/path/to/documents',\n///   glob: '*.{txt,json}',\n///   recursive: true,\n/// );\n/// final documents = await loader.load();\n///\n/// // Load a random sample of 10 CSV files, excluding hidden files\n/// final sampleLoader = DirectoryLoader(\n///   '/path/to/csvs',\n///   glob: '*.csv',\n///   loadHidden: false,\n///   sampleSize: 10,\n///   randomizeSample: true,\n/// );\n/// final sampleDocuments = await sampleLoader.load();\n/// ```\n///\n/// The loader supports customization through various parameters:\n/// - [filePath]: The directory path to load documents from\n/// - [glob]: Glob pattern to match files (defaults to all files)\n/// - [recursive]: Whether to search recursively in subdirectories\n/// - [exclude]: Patterns to exclude from loading\n/// - [loaderMap]: Map of file extensions to specific loaders\n/// - [loadHidden]: Whether to load hidden files\n/// - [sampleSize]: Maximum number of files to load\n/// - [randomizeSample]: Whether to randomize the file sample\n/// - [sampleSeed]: Seed for reproducible random sampling\n/// - [metadataBuilder]: Custom metadata building function\n///\n/// You can extend the default loader support by providing a custom [loaderMap].\n/// {@endtemplate}\nclass DirectoryLoader extends BaseDocumentLoader {\n  /// {@macro directory_loader}\n  const DirectoryLoader(\n    this.filePath, {\n    this.glob = '*',\n    this.recursive = true,\n    this.exclude = const [],\n    this.loaderMap = const {},\n    this.loadHidden = false,\n    this.sampleSize = 0,\n    this.randomizeSample = false,\n    this.sampleSeed,\n    this.metadataBuilder,\n  });\n\n  /// The path to the directory to load documents from.\n  final String filePath;\n\n  /// Glob pattern to match files.\n  /// Use '*' to match all files.\n  final String glob;\n\n  /// Whether to search recursively in subdirectories.\n  final bool recursive;\n\n  /// Patterns to exclude from loading.\n  final List<String> exclude;\n\n  /// Map of file extensions to specific loaders.\n  ///\n  /// This map allows customization of how different file types are loaded:\n  /// - Keys are file extensions (including the dot, e.g., '.txt', '.json').\n  /// - Values are functions that create a [BaseDocumentLoader] for a given\n  ///   file path.\n  ///\n  /// If not provided, [defaultLoaderMap] will be used, which supports:\n  /// - `.txt`: [TextLoader]\n  /// - `.json`: [JsonLoader] (with root schema)\n  /// - `.csv` and `.tsv`: [CsvLoader]\n  ///\n  /// Example of extending or customizing loaders:\n  /// ```dart\n  /// final loader = DirectoryLoader(\n  ///   '/path/to/docs',\n  ///   loaderMap: {\n  ///     // Add a custom loader for XML files\n  ///     '.xml': (path) => CustomXmlLoader(path),\n  ///\n  ///     // Override default JSON loader with a custom implementation\n  ///     '.json': (path) => CustomJsonLoader(path),\n  ///\n  ///     // Combine with default loaders\n  ///     ...DirectoryLoader.defaultLoaderMap,\n  ///   },\n  /// );\n  /// ```\n  ///\n  /// If no loader is found for a file type, [TextLoader] will be used as a\n  /// fallback.\n  final Map<String, BaseDocumentLoader Function(String)> loaderMap;\n\n  /// Whether to load hidden files (starting with '.').\n  final bool loadHidden;\n\n  /// Maximum number of files to load.\n  /// Use 0 to load all files.\n  final int sampleSize;\n\n  /// Whether to randomize the sample of files.\n  final bool randomizeSample;\n\n  /// Seed for random sampling to ensure reproducibility.\n  final int? sampleSeed;\n\n  /// Optional function to build custom metadata for each document.\n  final Map<String, dynamic> Function(\n    XFile file,\n    Map<String, dynamic> defaultMetadata,\n  )?\n  metadataBuilder;\n\n  /// Default loader map with common file type loaders.\n  ///\n  /// Provides out-of-the-box support for:\n  /// - Plain text files (`.txt`)\n  /// - JSON files (`.json`) - uses root schema\n  /// - CSV and TSV files (`.csv`, `.tsv`)\n  ///\n  /// Can be extended or overridden when creating a [DirectoryLoader].\n  static Map<String, BaseDocumentLoader Function(String)> defaultLoaderMap = {\n    '.txt': TextLoader.new,\n    '.json': (path) => JsonLoader(path, jpSchema: r'$'),\n    '.csv': CsvLoader.new,\n    '.tsv': CsvLoader.new,\n  };\n\n  bool _shouldLoadFile(File file) {\n    if (!loadHidden && path.basename(file.path).startsWith('.')) {\n      return false;\n    }\n\n    final globMatcher = Glob(glob);\n    if (!globMatcher.matches(path.basename(file.path))) {\n      return false;\n    }\n\n    for (final excludePattern in exclude) {\n      if (Glob(excludePattern).matches(path.basename(file.path))) {\n        return false;\n      }\n    }\n\n    return true;\n  }\n\n  Future<Map<String, dynamic>> _buildDefaultMetadata(XFile file) async {\n    return {\n      'source': file.path,\n      'name': path.basename(file.path),\n      'extension': path.extension(file.path),\n      'size': await file.length(),\n      'lastModified': (await file.lastModified()).millisecondsSinceEpoch,\n    };\n  }\n\n  @override\n  Stream<Document> lazyLoad() async* {\n    if (glob.isEmpty) {\n      throw ArgumentError('Glob pattern must not be empty');\n    }\n\n    final directory = Directory(filePath);\n\n    var files = directory\n        .listSync(recursive: recursive)\n        .whereType<File>()\n        .where(_shouldLoadFile)\n        .map((e) => XFile(e.path));\n\n    if (sampleSize > 0) {\n      if (randomizeSample) {\n        final seed = sampleSeed ?? DateTime.now().millisecondsSinceEpoch;\n        files = files.toList(growable: false)..shuffle(Random(seed));\n      }\n      files = files.take(sampleSize);\n    }\n\n    for (final file in files) {\n      final ext = path.extension(file.path).toLowerCase();\n\n      final loaders = loaderMap.isNotEmpty ? loaderMap : defaultLoaderMap;\n      final loader = loaders[ext]?.call(file.path) ?? TextLoader(file.path);\n\n      final defaultMetadata = await _buildDefaultMetadata(file);\n      final metadata =\n          metadataBuilder?.call(file, defaultMetadata) ?? defaultMetadata;\n\n      await for (final doc in loader.lazyLoad()) {\n        final finalDoc = Document(\n          pageContent: doc.pageContent,\n          metadata: doc.metadata['source'] == file.path\n              ? metadata\n              : {...metadata, ...doc.metadata},\n        );\n        yield finalDoc;\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "packages/langchain_community/lib/src/document_loaders/directory/directory_stub.dart",
    "content": "// ignore_for_file: public_member_api_docs\nimport 'dart:async';\nimport 'package:cross_file/cross_file.dart' show XFile;\nimport 'package:langchain_core/document_loaders.dart';\nimport 'package:langchain_core/documents.dart';\n\n// This is a stub class\n// DirectoryLoader is not supported on web platform\nclass DirectoryLoader extends BaseDocumentLoader {\n  const DirectoryLoader(\n    this.filePath, {\n    this.glob = '*',\n    this.recursive = true,\n    this.exclude = const [],\n    this.loaderMap = const {},\n    this.loadHidden = false,\n    this.sampleSize = 0,\n    this.randomizeSample = false,\n    this.sampleSeed,\n    this.metadataBuilder,\n  });\n\n  final String filePath;\n  final String glob;\n  final bool recursive;\n  final List<String> exclude;\n  final Map<String, BaseDocumentLoader Function(String)> loaderMap;\n  final bool loadHidden;\n  final int sampleSize;\n  final bool randomizeSample;\n  final int? sampleSeed;\n  final Map<String, dynamic> Function(\n    XFile file,\n    Map<String, dynamic> defaultMetadata,\n  )?\n  metadataBuilder;\n  static Map<String, BaseDocumentLoader Function(String)> defaultLoaderMap = {};\n\n  @override\n  Stream<Document> lazyLoad() async* {\n    throw UnsupportedError('DirectoryLoader is not supported on web platform.');\n  }\n}\n"
  },
  {
    "path": "packages/langchain_community/lib/src/document_loaders/document_loaders.dart",
    "content": "export 'csv.dart';\nexport 'directory/directory.dart';\nexport 'json.dart';\nexport 'text.dart';\nexport 'web.dart';\n"
  },
  {
    "path": "packages/langchain_community/lib/src/document_loaders/json.dart",
    "content": "import 'dart:convert';\n\nimport 'package:cross_file/cross_file.dart';\nimport 'package:json_path/json_path.dart';\n\nimport 'package:langchain_core/document_loaders.dart';\nimport 'package:langchain_core/documents.dart';\n\n/// {@template json_loader}\n/// A document loader that loads a [Document]s from a JSON file.\n///\n/// This loader reads a JSON file located at [filePath] and extracts\n/// [Document]s based on the provided JSON path schema [jpSchema].\n/// Each [Document] represents a matching JSON object or value found in the\n/// file.\n///\n/// If you want to customize the metadata for each [Document], you can provide\n/// a [metadataBuilder] function that takes in the JSON object extracted by the\n/// [jpSchema] and the default file metadata and returns a map of the updated\n/// metadata.\n///\n/// Example usage:\n/// ```dart\n/// final loader = JsonLoader(\n///   'path/to/file.json',\n///   jpSchema: '$..yourJsonPath',\n/// );\n/// final documents = await loader.load();\n/// ```\n///\n/// Documentation related to JSON path schemas:\n/// - https://pub.dev/packages/json_path\n/// - https://goessner.net/articles/JsonPath\n/// {@endtemplate}\nclass JsonLoader extends BaseDocumentLoader {\n  /// {@macro json_loader}\n  const JsonLoader(\n    this.filePath, {\n    required this.jpSchema,\n    this.metadataBuilder,\n  });\n\n  /// The path to the JSON file.\n  final String filePath;\n\n  /// The JSON path schema to use to extract the data or text from the JSON.\n  final String jpSchema;\n\n  /// A function that takes in the JSON object extracted by the [jpSchema] and\n  /// the default file metadata and returns a map of the updated metadata.\n  /// If this function is not provided, the default file metadata will be used.\n  final Map<String, dynamic> Function(\n    Object? matchValue,\n    Map<String, dynamic> fileMetadata,\n  )?\n  metadataBuilder;\n\n  @override\n  Stream<Document> lazyLoad() async* {\n    final file = XFile(filePath);\n\n    final fileName = file.name;\n    final fileSize = await file.length();\n    final fileLastModified = await file.lastModified();\n    final basePath = JsonPath(jpSchema);\n    final fileMetadata = <String, dynamic>{\n      'source': filePath,\n      'name': fileName,\n      'size': fileSize,\n      'lastModified': fileLastModified.millisecondsSinceEpoch,\n    };\n\n    final fileContent = await file.readAsString();\n    final jsonData = json.decode(fileContent);\n\n    for (final value in basePath.readValues(jsonData)) {\n      final doc = Document(\n        pageContent: value.toString(),\n        metadata: metadataBuilder?.call(value, fileMetadata) ?? fileMetadata,\n      );\n      yield doc;\n    }\n  }\n}\n"
  },
  {
    "path": "packages/langchain_community/lib/src/document_loaders/text.dart",
    "content": "import 'package:cross_file/cross_file.dart';\n\nimport 'package:langchain_core/document_loaders.dart';\nimport 'package:langchain_core/documents.dart';\n\n/// {@template text_loader}\n/// A document loader that loads a [Document] from a text file.\n///\n/// Example usage:\n/// ```dart\n/// final loader = TextLoader('path/to/file.txt');\n/// final documents = await loader.load();\n/// ```\n/// {@endtemplate}\nclass TextLoader extends BaseDocumentLoader {\n  /// {@macro text_loader}\n  const TextLoader(this.filePath);\n\n  /// The path to the text file.\n  final String filePath;\n\n  @override\n  Stream<Document> lazyLoad() async* {\n    final file = XFile(filePath);\n\n    final fileName = file.name;\n    final fileSize = await file.length();\n    final fileLastModified = await file.lastModified();\n    final fileContent = await file.readAsString();\n\n    final doc = Document(\n      pageContent: fileContent,\n      metadata: {\n        'source': filePath,\n        'name': fileName,\n        'size': fileSize,\n        'lastModified': fileLastModified.millisecondsSinceEpoch,\n      },\n    );\n    yield doc;\n  }\n}\n"
  },
  {
    "path": "packages/langchain_community/lib/src/document_loaders/web.dart",
    "content": "import 'dart:convert';\n\nimport 'package:beautiful_soup_dart/beautiful_soup.dart';\nimport 'package:http/http.dart' as http;\nimport 'package:langchain_core/document_loaders.dart';\nimport 'package:langchain_core/documents.dart';\n\n/// {@template web_base_loader}\n/// A document loader that loads [Document]s from web pages.\n///\n/// It uses [http](https://pub.dev/packages/http) to fetch the web page content.\n/// And [beautiful_soup_dart](https://pub.dev/packages/beautiful_soup_dart) to\n/// parse the HTML content.\n///\n/// Example usage:\n/// ```dart\n/// final loader = WebBaseLoader('https://en.wikipedia.org/wiki/Wikipedia');\n/// final documents = await loader.load();\n/// ```\n/// {@endtemplate}\nclass WebBaseLoader extends BaseDocumentLoader {\n  /// {@macro web_base_loader}\n  const WebBaseLoader(this.urls, {this.requestHeaders});\n\n  /// The URLs to the web pages to load.\n  final List<String> urls;\n\n  /// The request headers to use when fetching the web page content.\n  final Map<String, String>? requestHeaders;\n\n  @override\n  Stream<Document> lazyLoad() async* {\n    for (final url in urls) {\n      final doc = await _scrape(url);\n      yield doc;\n    }\n  }\n\n  /// 1. Fetch the HTML content from the [url].\n  /// 2. Parse the HTML content using [BeautifulSoup].\n  /// 3. Extract the text from the HTML content.\n  /// 4. Build the [Document] with the extracted text and metadata.\n  Future<Document> _scrape(final String url) async {\n    final html = await _fetchUrl(url);\n    final soup = BeautifulSoup(html);\n    final body = soup.body!;\n    body.findAll('style').forEach((final element) => element.extract());\n    body.findAll('script').forEach((final element) => element.extract());\n    final content = body.getText(strip: true);\n    return Document(pageContent: content, metadata: _buildMetadata(url, soup));\n  }\n\n  Future<String> _fetchUrl(final String url) async {\n    final response = await http.get(Uri.parse(url), headers: requestHeaders);\n    return utf8.decode(response.bodyBytes);\n  }\n\n  Map<String, dynamic> _buildMetadata(\n    final String url,\n    final BeautifulSoup soup,\n  ) {\n    final title = soup.title;\n    final description = soup\n        .find('meta', attrs: {'name': 'description'})\n        ?.getAttrValue('content');\n    final language = soup.find('html')?.getAttrValue('lang');\n    return {\n      'source': url,\n      if (title != null) 'title': title.text,\n      if (description != null) 'description': description.trim(),\n      if (language != null) 'language': language,\n    };\n  }\n}\n"
  },
  {
    "path": "packages/langchain_community/lib/src/tools/calculator.dart",
    "content": "import 'dart:async';\n\nimport 'package:langchain_core/tools.dart';\nimport 'package:math_expressions/math_expressions.dart';\n\n/// {@template calculator_tool}\n/// A tool that can be used to calculate the result of a math expression.\n///\n/// Example:\n/// ```dart\n/// final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n/// final llm = ChatOpenAI(\n///   apiKey: openaiApiKey,\n///   temperature: 0,\n/// );\n/// final tool = CalculatorTool();\n/// final agent = ToolsAgent.fromLLMAndTools(llm: llm, tools: [tool]);\n/// final executor = AgentExecutor(agent: agent);\n/// final res = await executor.run('What is 40 raised to the 0.43 power? ');\n/// print(res); // -> '40 raised to the power of 0.43 is approximately 4.8852'\n/// ```\n/// {@endtemplate}\nfinal class CalculatorTool extends StringTool<ToolOptions> {\n  /// {@macro calculator_tool}\n  CalculatorTool()\n    : super(\n        name: 'calculator',\n        description:\n            'Useful for getting the result of a math expression '\n            'that could be executed by a simple calculator.',\n        inputDescription:\n            'A valid mathematical expression to evaluate. '\n            'For example: \"(x^2 + cos(y)) / 3\".',\n      );\n\n  final _parser = GrammarParser();\n  final _evaluator = RealEvaluator();\n\n  @override\n  Future<String> invokeInternal(\n    final String toolInput, {\n    final ToolOptions? options,\n  }) async {\n    try {\n      final exp = _parser.parse(toolInput);\n      return _evaluator.evaluate(exp).toString();\n    } catch (e) {\n      return \"I don't know how to do that.\";\n    }\n  }\n}\n"
  },
  {
    "path": "packages/langchain_community/lib/src/tools/tavily/mappers.dart",
    "content": "// ignore_for_file: public_member_api_docs\nimport 'package:tavily_dart/tavily_dart.dart';\n\nimport 'types.dart';\n\nextension TavilySearchDepthX on TavilySearchDepth {\n  SearchRequestSearchDepth toSearchRequestSearchDepth() => switch (this) {\n    TavilySearchDepth.basic => SearchRequestSearchDepth.basic,\n    TavilySearchDepth.advanced => SearchRequestSearchDepth.advanced,\n  };\n}\n\nextension TavilySearchResultX on SearchResult {\n  TavilySearchResult toTavilySearchResult() => TavilySearchResult(\n    title: title,\n    url: url,\n    content: content,\n    rawContent: rawContent,\n    score: score,\n  );\n}\n"
  },
  {
    "path": "packages/langchain_community/lib/src/tools/tavily/tavily.dart",
    "content": "export 'tavily_answer.dart';\nexport 'tavily_search_results.dart';\nexport 'types.dart';\n"
  },
  {
    "path": "packages/langchain_community/lib/src/tools/tavily/tavily_answer.dart",
    "content": "import 'dart:async';\n\nimport 'package:http/http.dart' as http;\nimport 'package:langchain_core/tools.dart';\nimport 'package:tavily_dart/tavily_dart.dart';\n\nimport 'mappers.dart';\nimport 'tavily_search_results.dart';\nimport 'types.dart';\n\n/// Tool that queries the [Tavily  Search API](https://tavily.com) and\n/// gets an answer to the search query.\n///\n/// The Tavily API uses API keys for authentication. Visit the\n/// [Tavily console](https://app.tavily.com/) to retrieve the API key you'll\n/// use in your requests.\n///\n/// If you want to get a list of search results instead, use the\n/// [TavilySearchResultsTool] instead.\n///\n/// Example:\n/// ```dart\n/// final tool = TavilyAnswerTool(\n///   apiKey: Platform.environment['TAVILY_API_KEY']!,\n/// );\n/// final res = await tool.invoke('What is the weather like in New York?');\n/// print(res);\n/// // The current weather in New York is clear with a temperature of 22.8°C (73.0°F)...\n/// ```\nfinal class TavilyAnswerTool extends StringTool<TavilyAnswerToolOptions> {\n  /// Creates a [TavilyAnswerTool] instance.\n  ///\n  /// Main configuration options:\n  /// - `apiKey`: your Tavily API key. You can find your API key in the\n  ///   [Tavily console](https://app.tavily.com/).\n  ///\n  /// Advance configuration options:\n  /// - `baseUrl`: the base URL to use. Defaults to Tavily's API URL. You can\n  ///   override this to use a different API URL, or to use a proxy.\n  /// - `headers`: global headers to send with every request. You can use\n  ///   this to set custom headers, or to override the default headers.\n  /// - `queryParams`: global query parameters to send with every request. You\n  ///   can use this to set custom query parameters (e.g. Azure OpenAI API\n  ///   required to attach a `version` query parameter to every request).\n  /// - `client`: the HTTP client to use. You can set your own HTTP client if\n  ///   you need further customization (e.g. to use a Socks5 proxy).\n  TavilyAnswerTool({\n    this.apiKey,\n    final String? baseUrl,\n    final Map<String, String> headers = const {},\n    final Map<String, dynamic> queryParams = const {},\n    final http.Client? client,\n    super.defaultOptions = const TavilyAnswerToolOptions(),\n  }) : _client = TavilyClient(\n         baseUrl: baseUrl,\n         headers: headers,\n         queryParams: queryParams,\n         client: client,\n       ),\n       super(\n         name: 'tavily_answer',\n         description:\n             'A search engine optimized for comprehensive, accurate, and trusted answers. '\n             'Useful for when you need to answer questions about current events. '\n             'The tool returns an answer to the search query - not the search results.',\n         inputDescription:\n             'The search query to get an answer to. '\n             'Eg: \"What is the weather like in New York?\"',\n       );\n\n  /// A client for interacting with Tavily API.\n  final TavilyClient _client;\n\n  /// Your Tavily API key.\n  String? apiKey;\n\n  @override\n  Future<String> invokeInternal(\n    final String toolInput, {\n    final TavilyAnswerToolOptions? options,\n  }) async {\n    final res = await _client.search(\n      request: SearchRequest(\n        apiKey: apiKey,\n        query: toolInput,\n        includeAnswer: true,\n        searchDepth: (options?.searchDepth ?? defaultOptions.searchDepth)\n            .toSearchRequestSearchDepth(),\n        maxResults: options?.maxResults ?? defaultOptions.maxResults,\n        includeDomains:\n            options?.includeDomains ?? defaultOptions.includeDomains,\n        excludeDomains:\n            options?.excludeDomains ?? defaultOptions.excludeDomains,\n      ),\n    );\n    return res.answer ?? '';\n  }\n\n  @override\n  void close() {\n    _client.endSession();\n  }\n}\n"
  },
  {
    "path": "packages/langchain_community/lib/src/tools/tavily/tavily_search_results.dart",
    "content": "import 'dart:async';\n\nimport 'package:http/http.dart' as http;\nimport 'package:langchain_core/tools.dart';\nimport 'package:tavily_dart/tavily_dart.dart';\n\nimport 'mappers.dart';\nimport 'tavily_answer.dart';\nimport 'types.dart';\n\n/// Tool that queries the [Tavily  Search API](https://tavily.com) and\n/// gets back a list of search results.\n///\n/// The Tavily API uses API keys for authentication. Visit the\n/// [Tavily console](https://app.tavily.com/) to retrieve the API key you'll\n/// use in your requests.\n///\n/// If you want to get directly an answer to a search query, use the\n/// [TavilyAnswerTool] instead.\n///\n/// Example:\n/// ```dart\n/// final tool = TavilySearchResultsTool(\n///   apiKey: Platform.environment['TAVILY_API_KEY']!,\n/// );\n/// final res = await tool.invoke('What is the weather like in New York?');\n/// print(res);\n/// // [\n/// //   {\n/// //     \"title\": \"Weather in New York\",\n/// //     \"url\": \"https://www.weatherapi.com/\",\n/// //     \"content\": \"{'location': {'lat': 40.71, 'lon': -74.01}, 'current': {'last_updated': '2024-06-20 17:00', 'temp_c': 31.1, 'condition': {'text': 'Sunny', 'icon': '//cdn.weatherapi.com/weather/64x64/day/113.png'}, 'wind_mph': 2.2, 'wind_kph': 3.6, 'wind_degree': 161, 'wind_dir': 'SSE', 'pressure_mb': 1025.0, 'pressure_in': 30.26, 'precip_mm': 0.0, 'precip_in': 0.0, 'humidity': 48, 'cloud': 0, 'feelslike_c': 33.1, 'feelslike_f': 91.6, 'windchill_c': 29.5, 'windchill_f': 85.0, 'heatindex_c': 30.6, 'heatindex_f': 87.0, 'dewpoint_c': 17.7, 'dewpoint_f': 63.8, 'vis_km': 16.0, 'vis_miles': 9.0, 'uv': 7.0, 'gust_mph': 16.4, 'gust_kph': 26.4}}\",\n/// //     \"score\": 0.98855\n/// //   },\n/// //   ...\n/// // ]\n/// ```\nfinal class TavilySearchResultsTool\n    extends Tool<String, TavilySearchResultsToolOptions, TavilySearchResults> {\n  /// Creates a [TavilySearchResultsTool] instance.\n  ///\n  /// Main configuration options:\n  /// - `apiKey`: your Tavily API key. You can find your API key in the\n  ///   [Tavily console](https://app.tavily.com/).\n  ///\n  /// Advance configuration options:\n  /// - `baseUrl`: the base URL to use. Defaults to Tavily's API URL. You can\n  ///   override this to use a different API URL, or to use a proxy.\n  /// - `headers`: global headers to send with every request. You can use\n  ///   this to set custom headers, or to override the default headers.\n  /// - `queryParams`: global query parameters to send with every request. You\n  ///   can use this to set custom query parameters (e.g. Azure OpenAI API\n  ///   required to attach a `version` query parameter to every request).\n  /// - `client`: the HTTP client to use. You can set your own HTTP client if\n  ///   you need further customization (e.g. to use a Socks5 proxy).\n  TavilySearchResultsTool({\n    this.apiKey,\n    final String? baseUrl,\n    final Map<String, String> headers = const {},\n    final Map<String, dynamic> queryParams = const {},\n    final http.Client? client,\n    super.defaultOptions = const TavilySearchResultsToolOptions(),\n  }) : _client = TavilyClient(\n         baseUrl: baseUrl,\n         headers: headers,\n         queryParams: queryParams,\n         client: client,\n       ),\n       super(\n         name: 'tavily_search_results',\n         description:\n             'A search engine optimized for comprehensive, accurate, and trusted results. '\n             'Useful for when you need to answer questions about current events. '\n             'The tool returns a JSON object with search results.',\n         inputJsonSchema: {\n           'type': 'object',\n           'properties': {\n             'query': {\n               'type': 'string',\n               'description':\n                   'The search query to look up. '\n                   'Eg: \"What is the weather like in New York?\"',\n             },\n           },\n           'required': ['query'],\n         },\n       );\n\n  /// A client for interacting with Tavily API.\n  final TavilyClient _client;\n\n  /// Your Tavily API key.\n  String? apiKey;\n\n  @override\n  Future<TavilySearchResults> invokeInternal(\n    final String input, {\n    final TavilySearchResultsToolOptions? options,\n  }) async {\n    final res = await _client.search(\n      request: SearchRequest(\n        apiKey: apiKey,\n        query: input,\n        searchDepth: (options?.searchDepth ?? defaultOptions.searchDepth)\n            .toSearchRequestSearchDepth(),\n        maxResults: options?.maxResults ?? defaultOptions.maxResults,\n        includeRawContent:\n            options?.includeRawContent ?? defaultOptions.includeRawContent,\n        includeDomains:\n            options?.includeDomains ?? defaultOptions.includeDomains,\n        excludeDomains:\n            options?.excludeDomains ?? defaultOptions.excludeDomains,\n      ),\n    );\n    return TavilySearchResults(\n      results: res.results\n          .map((r) => r.toTavilySearchResult())\n          .toList(growable: false),\n    );\n  }\n\n  @override\n  String getInputFromJson(final Map<String, dynamic> json) {\n    return json['query'] as String;\n  }\n\n  @override\n  void close() {\n    _client.endSession();\n  }\n}\n"
  },
  {
    "path": "packages/langchain_community/lib/src/tools/tavily/types.dart",
    "content": "import 'dart:convert';\n\nimport 'package:langchain_core/tools.dart';\nimport 'package:meta/meta.dart';\n\nimport 'tavily_answer.dart';\nimport 'tavily_search_results.dart';\n\n/// The depth of the search.\nenum TavilySearchDepth {\n  /// Basic search depth.\n  basic,\n\n  /// Advanced search depth.\n  advanced,\n}\n\n/// {@template tavily_search_results}\n/// A search results from the Tavily search engine.\n/// {@endtemplate}\n@immutable\nclass TavilySearchResults {\n  /// {@macro tavily_search_results}\n  const TavilySearchResults({required this.results});\n\n  /// The search results.\n  final List<TavilySearchResult> results;\n\n  @override\n  String toString() {\n    return json.encode(\n      results\n          .map(\n            (result) => {\n              'title': result.title,\n              'url': result.url,\n              'content': result.content,\n              'rawContent': result.rawContent,\n              'score': result.score,\n            },\n          )\n          .toList(growable: false),\n    );\n  }\n}\n\n/// {@template tavily_search_result}\n/// A search result from the Tavily search engine.\n/// {@endtemplate}\n@immutable\nclass TavilySearchResult {\n  /// {@macro tavily_search_result}\n  const TavilySearchResult({\n    required this.title,\n    required this.url,\n    required this.content,\n    this.rawContent,\n    required this.score,\n  });\n\n  /// The title of the search result url.\n  final String title;\n\n  /// The url of the search result.\n  final String url;\n\n  /// The most query related content from the scraped url.\n  final String content;\n\n  /// The parsed and cleaned HTML of the site. For now includes parsed text only.\n  final String? rawContent;\n\n  /// The relevance score of the search result.\n  final double score;\n}\n\n/// {@template tavily_search_results_tool_options}\n/// Generation options to pass into the [TavilySearchResultsTool].\n/// {@endtemplate}\nclass TavilySearchResultsToolOptions extends ToolOptions {\n  /// {@macro tavily_search_results_tool_options}\n  const TavilySearchResultsToolOptions({\n    this.maxResults = 5,\n    this.searchDepth = TavilySearchDepth.basic,\n    this.includeRawContent = false,\n    this.includeDomains,\n    this.excludeDomains,\n  });\n\n  /// The number of maximum search results to return.\n  final int maxResults;\n\n  /// The depth of the search.\n  final TavilySearchDepth searchDepth;\n\n  /// Include raw content in the search results.\n  final bool includeRawContent;\n\n  /// A list of domains to specifically include in the search results.\n  final List<String>? includeDomains;\n\n  /// A list of domains to specifically exclude from the search results.\n  final List<String>? excludeDomains;\n}\n\n/// {@template tavily_answer_tool_options}\n/// Generation options to pass into the [TavilyAnswerTool].\n/// {@endtemplate}\n@immutable\nclass TavilyAnswerToolOptions extends ToolOptions {\n  /// {@macro tavily_answer_tool_options}\n  const TavilyAnswerToolOptions({\n    this.maxResults = 5,\n    this.searchDepth = TavilySearchDepth.basic,\n    this.includeDomains,\n    this.excludeDomains,\n    super.concurrencyLimit,\n  });\n\n  /// The number of maximum search results to return.\n  final int maxResults;\n\n  /// The depth of the search.\n  final TavilySearchDepth searchDepth;\n\n  /// A list of domains to specifically include in the search results.\n  final List<String>? includeDomains;\n\n  /// A list of domains to specifically exclude from the search results.\n  final List<String>? excludeDomains;\n\n  @override\n  TavilyAnswerToolOptions copyWith({\n    final int? maxResults,\n    final TavilySearchDepth? searchDepth,\n    final List<String>? includeDomains,\n    final List<String>? excludeDomains,\n    final int? concurrencyLimit,\n  }) {\n    return TavilyAnswerToolOptions(\n      maxResults: maxResults ?? this.maxResults,\n      searchDepth: searchDepth ?? this.searchDepth,\n      includeDomains: includeDomains ?? this.includeDomains,\n      excludeDomains: excludeDomains ?? this.excludeDomains,\n      concurrencyLimit: concurrencyLimit ?? super.concurrencyLimit,\n    );\n  }\n\n  @override\n  TavilyAnswerToolOptions merge(\n    covariant final TavilyAnswerToolOptions? other,\n  ) {\n    return copyWith(\n      maxResults: other?.maxResults,\n      searchDepth: other?.searchDepth,\n      includeDomains: other?.includeDomains,\n      excludeDomains: other?.excludeDomains,\n      concurrencyLimit: other?.concurrencyLimit,\n    );\n  }\n\n  @override\n  bool operator ==(covariant final TavilyAnswerToolOptions other) {\n    return maxResults == other.maxResults &&\n        searchDepth == other.searchDepth &&\n        includeDomains == other.includeDomains &&\n        excludeDomains == other.excludeDomains &&\n        concurrencyLimit == other.concurrencyLimit;\n  }\n\n  @override\n  int get hashCode {\n    return maxResults.hashCode ^\n        searchDepth.hashCode ^\n        includeDomains.hashCode ^\n        excludeDomains.hashCode ^\n        concurrencyLimit.hashCode;\n  }\n}\n"
  },
  {
    "path": "packages/langchain_community/lib/src/tools/tools.dart",
    "content": "export 'calculator.dart';\nexport 'tavily/tavily.dart';\n"
  },
  {
    "path": "packages/langchain_community/lib/src/vector_stores/objectbox/base_objectbox.dart",
    "content": "// ignore_for_file: unsafe_variance\n\nimport 'dart:convert';\n\nimport 'package:langchain_core/documents.dart';\nimport 'package:langchain_core/vector_stores.dart';\nimport 'package:objectbox/objectbox.dart'\n    show\n        Box,\n        Condition,\n        ObjectWithScore,\n        QueryHnswProperty,\n        QueryStringProperty;\nimport 'package:uuid/uuid.dart';\n\n/// {@template base_object_box_vector_store}\n/// Base class for ObjectBox vector store.\n///\n/// The [ObjectBoxVectorStore] class is a pre-configured version of this class,\n/// but it can only be used if you don't use ObjectBox for anything else.\n///\n/// If you need more control over the ObjectBox store, use this class instead.\n/// For example, if you are using ObjectBox to store other entities, or if you\n/// need to customize the Document entity class.\n///\n/// Here is an example of how to use this class:\n///\n/// First, you can define our own Document entity class instead of using the\n/// one provided by the [ObjectBoxVectorStore]. In this way, you can customize\n/// the entity to your needs. You will need to define the mapping logic between\n/// the entity and the LangChain [Document] model.\n///\n/// ```dart\n/// @Entity()\n/// class MyDocumentEntity {\n///   MyDocumentEntity({\n///     required this.id,\n///     required this.content,\n///     required this.metadata,\n///     required this.embedding,\n///   });\n///\n///   @Id()\n///   int internalId = 0;\n///\n///   @Unique(onConflict: ConflictStrategy.replace)\n///   String id;\n///\n///   String content;\n///\n///   String metadata;\n///\n///   @HnswIndex(\n///     dimensions: 768,\n///     distanceType: VectorDistanceType.cosine,\n///   )\n///   @Property(type: PropertyType.floatVector)\n///   List<double> embedding;\n///\n///   factory MyDocumentEntity.fromModel(\n///     Document doc, List<double> embedding,\n///   ) => MyDocumentEntity(\n///         id: doc.id ?? '',\n///         content: doc.pageContent,\n///         metadata: jsonEncode(doc.metadata),\n///         embedding: embedding,\n///       );\n///\n///   Document toModel() => Document(\n///         id: id,\n///         pageContent: content,\n///         metadata: jsonDecode(metadata),\n///       );\n/// }\n/// ```\n///\n/// After defining the entity class, you will need to run the ObjectBox\n/// generator:\n///\n/// ```sh\n/// dart run build_runner build --delete-conflicting-outputs\n/// ```\n///\n/// Then, you just need to create your custom vector store class that\n/// extends [BaseObjectBoxVectorStore] and wire everything up:\n///\n/// ```dart\n/// class MyCustomVectorStore extends BaseObjectBoxVectorStore<MyDocumentEntity> {\n///   MyCustomVectorStore({\n///     required super.embeddings,\n///     required Store store,\n///   }) : super(\n///           box: store.box<MyDocumentEntity>(),\n///           createEntity: (\n///             String id,\n///             String content,\n///             String metadata,\n///             List<double> embedding,\n///           ) =>\n///               MyDocumentEntity(\n///             id: id,\n///             content: content,\n///             metadata: metadata,\n///             embedding: embedding,\n///           ),\n///           createDocument: (MyDocumentEntity docDto) => docDto.toModel(),\n///           getIdProperty: () => MyDocumentEntity_.id,\n///           getEmbeddingProperty: () => MyDocumentEntity_.embedding,\n///         );\n/// }\n/// ```\n///\n/// Now you can use the [MyCustomVectorStore] class to store and search documents.\n/// {@endtemplate}\nclass BaseObjectBoxVectorStore<T> extends VectorStore {\n  /// {@macro base_object_box_vector_store}\n  BaseObjectBoxVectorStore({\n    required super.embeddings,\n    required final Box<T> box,\n    required final T Function(\n      String id,\n      String content,\n      String metadata,\n      List<double> embedding,\n    )\n    createEntity,\n    required final Document Function(T) createDocument,\n    required final QueryStringProperty<T> Function() getIdProperty,\n    required final QueryHnswProperty<T> Function() getEmbeddingProperty,\n  }) : _box = box,\n       _createEntity = createEntity,\n       _createDocument = createDocument,\n       _getIdProperty = getIdProperty,\n       _getEmbeddingProperty = getEmbeddingProperty;\n\n  /// The [Box] to store the entities in.\n  final Box<T> _box;\n\n  /// The function to create an entity [T] from the given data.\n  final T Function(\n    String id,\n    String content,\n    String metadata,\n    List<double> embedding,\n  )\n  _createEntity;\n\n  /// The function to create a [Document] from the given entity [T].\n  final Document Function(T) _createDocument;\n\n  /// A getter for the ID query property.\n  final QueryStringProperty<T> Function() _getIdProperty;\n\n  /// A getter for the embedding query property.\n  final QueryHnswProperty<T> Function() _getEmbeddingProperty;\n\n  /// UUID generator.\n  final _uuid = const Uuid();\n\n  @override\n  Future<List<String>> addVectors({\n    required final List<List<double>> vectors,\n    required final List<Document> documents,\n  }) async {\n    assert(vectors.length == documents.length);\n\n    final List<String> ids = [];\n    final List<T> records = [];\n    for (var i = 0; i < documents.length; i++) {\n      final doc = documents[i];\n      final id = doc.id ?? _uuid.v4();\n      final entity = _createEntity(\n        id,\n        doc.pageContent,\n        jsonEncode(doc.metadata),\n        vectors[i],\n      );\n      ids.add(id);\n      records.add(entity);\n    }\n\n    _box.putMany(records);\n    return ids;\n  }\n\n  @override\n  Future<void> delete({required final List<String> ids}) {\n    return _box.query(_getIdProperty().oneOf(ids)).build().removeAsync();\n  }\n\n  /// Delete by condition.\n  ///\n  /// - [condition] is the condition to delete by.\n  Future<void> deleteWhere(final Condition<T> condition) {\n    return _box.query(condition).build().removeAsync();\n  }\n\n  @override\n  Future<List<(Document, double)>> similaritySearchByVectorWithScores({\n    required final List<double> embedding,\n    final VectorStoreSimilaritySearch config =\n        const VectorStoreSimilaritySearch(),\n  }) async {\n    var filter = _getEmbeddingProperty().nearestNeighborsF32(\n      embedding,\n      config.k,\n    );\n\n    final filterCondition = config.filter?.values.firstOrNull;\n    if (filterCondition != null && filterCondition is Condition<T>) {\n      filter = filter.and(filterCondition);\n    }\n\n    final query = _box.query(filter).build();\n\n    Iterable<ObjectWithScore<T>> results = query.findWithScores();\n\n    if (config.scoreThreshold != null) {\n      results = results.where((final r) => r.score >= config.scoreThreshold!);\n    }\n\n    return results\n        .map((r) => (_createDocument(r.object), r.score))\n        .toList(growable: false);\n  }\n}\n"
  },
  {
    "path": "packages/langchain_community/lib/src/vector_stores/objectbox/base_objectbox_stub.dart",
    "content": "// ignore_for_file: public_member_api_docs, avoid_unused_constructor_parameters\nimport 'package:langchain_core/documents.dart';\nimport 'package:langchain_core/vector_stores.dart';\n\n// This is a stub class\nclass BaseObjectBoxVectorStore<T> extends VectorStore {\n  BaseObjectBoxVectorStore({\n    required super.embeddings,\n    required final Object? box,\n    required final Object? createEntity,\n    required final Object? createDocument,\n    required final Object? getIdProperty,\n    required final Object? getEmbeddingProperty,\n  });\n\n  @override\n  Future<List<String>> addVectors({\n    required List<List<double>> vectors,\n    required List<Document> documents,\n  }) {\n    throw UnsupportedError('ObjectBox is not supported on web platform.');\n  }\n\n  @override\n  Future<void> delete({required List<String> ids}) {\n    throw UnsupportedError('ObjectBox is not supported on web platform.');\n  }\n\n  Future<void> deleteWhere(final Object condition) {\n    throw UnsupportedError('ObjectBox is not supported on web platform.');\n  }\n\n  @override\n  Future<List<(Document, double)>> similaritySearchByVectorWithScores({\n    required List<double> embedding,\n    VectorStoreSimilaritySearch config = const VectorStoreSimilaritySearch(),\n  }) {\n    throw UnsupportedError('ObjectBox is not supported on web platform.');\n  }\n}\n"
  },
  {
    "path": "packages/langchain_community/lib/src/vector_stores/objectbox/ob.dart",
    "content": "export 'ob_io.dart'\n    if (dart.library.js_interop) 'ob_stub.dart'\n    show\n        BaseObjectBoxVectorStore,\n        ObjectBoxDocument,\n        ObjectBoxDocumentProps,\n        ObjectBoxSimilaritySearch,\n        ObjectBoxVectorStore;\n"
  },
  {
    "path": "packages/langchain_community/lib/src/vector_stores/objectbox/ob_io.dart",
    "content": "export 'base_objectbox.dart';\nexport 'objectbox.dart';\nexport 'types.dart';\n"
  },
  {
    "path": "packages/langchain_community/lib/src/vector_stores/objectbox/ob_stub.dart",
    "content": "export 'base_objectbox_stub.dart';\nexport 'objectbox_stub.dart';\nexport 'types_stub.dart';\n"
  },
  {
    "path": "packages/langchain_community/lib/src/vector_stores/objectbox/objectbox-model.json",
    "content": "{\n  \"_note1\": \"KEEP THIS FILE! Check it into a version control system (VCS) like git.\",\n  \"_note2\": \"ObjectBox manages crucial IDs for your object model. See docs for details.\",\n  \"_note3\": \"If you have VCS merge conflicts, you must resolve them according to ObjectBox docs.\",\n  \"entities\": [\n    {\n      \"id\": \"1:4662034750769022750\",\n      \"lastPropertyId\": \"5:5762998900965066008\",\n      \"name\": \"ObjectBoxDocument\",\n      \"properties\": [\n        {\n          \"id\": \"1:328437667364158177\",\n          \"name\": \"internalId\",\n          \"type\": 6,\n          \"flags\": 1\n        },\n        {\n          \"id\": \"2:3766173764062654800\",\n          \"name\": \"id\",\n          \"type\": 9,\n          \"flags\": 34848,\n          \"indexId\": \"1:8818474670164842374\"\n        },\n        {\n          \"id\": \"3:7972539540824041325\",\n          \"name\": \"content\",\n          \"type\": 9\n        },\n        {\n          \"id\": \"4:866532944790310363\",\n          \"name\": \"metadata\",\n          \"type\": 9\n        },\n        {\n          \"id\": \"5:5762998900965066008\",\n          \"name\": \"embedding\",\n          \"type\": 28,\n          \"flags\": 8,\n          \"indexId\": \"2:3016727589204567263\"\n        }\n      ],\n      \"relations\": []\n    }\n  ],\n  \"lastEntityId\": \"1:4662034750769022750\",\n  \"lastIndexId\": \"2:3016727589204567263\",\n  \"lastRelationId\": \"0:0\",\n  \"lastSequenceId\": \"0:0\",\n  \"modelVersion\": 5,\n  \"modelVersionParserMinimum\": 5,\n  \"retiredEntityUids\": [],\n  \"retiredIndexUids\": [],\n  \"retiredPropertyUids\": [],\n  \"retiredRelationUids\": [],\n  \"version\": 1\n}"
  },
  {
    "path": "packages/langchain_community/lib/src/vector_stores/objectbox/objectbox.dart",
    "content": "import 'dart:convert';\n\nimport 'package:langchain_core/documents.dart';\nimport 'package:langchain_core/embeddings.dart';\nimport 'package:objectbox/objectbox.dart'\n    show\n        Condition,\n        ConflictStrategy,\n        Entity,\n        HnswIndex,\n        Id,\n        Property,\n        PropertyType,\n        Store,\n        Unique;\n\nimport 'base_objectbox.dart';\nimport 'objectbox.g.dart' as obxg;\nimport 'types.dart';\n\n/// Vector store for the [ObjectBox](https://objectbox.io/) on-device database.\n///\n/// ```dart\n/// final embeddings = OllamaEmbeddings(model: 'llama3.2');\n/// final vectorStore = ObjectBoxVectorStore.open(embeddings: embeddings);\n/// ```\n///\n/// This vector stores creates a [Store] with an [ObjectBoxDocument] entity\n/// that persists LangChain [Document]s along with their embeddings. If you\n/// need more control over the entity or the storeo, you can use the\n/// [BaseObjectBoxVectorStore] class instead.\n///\n/// See documentation for more details:\n/// - [LangChain.dart ObjectBox docs](https://langchaindart.com/#/modules/retrieval/vector_stores/integrations/objectbox)\n/// - [ObjectBox Vector Search docs](https://docs.objectbox.io/ann-vector-search)\n///\n/// ### Filtering\n///\n/// You can use the [ObjectBoxSimilaritySearch] class to pass ObjectBox-specific\n/// filtering options.\n///\n/// [ObjectBoxVectorStore] supports filtering queries by id, content or metadata\n/// using ObjectBox's [Condition]. You can define the filter condition in the\n/// [ObjectBoxSimilaritySearch] `filterCondition` parameter. Use the\n/// [ObjectBoxDocumentProps] class to reference the entity fields to use in the\n/// query.\n///\n/// For example:\n/// ```dart\n/// final vectorStore = ObjectBoxVectorStore.open(...);\n/// final res = await vectorStore.similaritySearch(\n///   query: 'What should I feed my cat?',\n///   config: ObjectBoxSimilaritySearch(\n///     k: 5,\n///     scoreThreshold: 0.8,\n///     filterCondition: ObjectBoxDocumentProps.id.equals('my-id')\n///         .or(ObjectBoxDocumentProps.metadata.contains('some-text')),\n///   ),\n/// );\n/// ```\nclass ObjectBoxVectorStore extends BaseObjectBoxVectorStore<ObjectBoxDocument> {\n  ObjectBoxVectorStore._(Store store, {required super.embeddings})\n    : _store = store,\n      super(\n        box: store.box<ObjectBoxDocument>(),\n        createEntity: _createObjectBoxDocument,\n        createDocument: _createDoc,\n        getIdProperty: () => obxg.ObjectBoxDocument_.id,\n        getEmbeddingProperty: () => obxg.ObjectBoxDocument_.embedding,\n      );\n\n  Store? _store;\n\n  /// The ObjectBox store.\n  Store? get store => _store;\n\n  /// Creates an [ObjectBoxVectorStore] instance and opens the ObjectBox store.\n  ///\n  /// Main configuration options:\n  /// - [embeddings]  The embeddings model to use.\n  /// - [dimensions]  The number of dimensions of the embeddings (vector size).\n  ///\n  /// ObjectBox-specific options:\n  /// - Check the ObjectBox's [Store] documentation for more details on the\n  ///   different options.\n  factory ObjectBoxVectorStore.open({\n    required Embeddings embeddings,\n    required final int dimensions,\n    final String? directory,\n    final int? maxDBSizeInKB,\n    final int? maxDataSizeInKB,\n    final int? fileMode,\n    final int? maxReaders,\n    final bool queriesCaseSensitiveDefault = true,\n    final String? macosApplicationGroup,\n  }) {\n    final store = obxg.openStore(\n      dimensions: dimensions,\n      directory: directory,\n      maxDBSizeInKB: maxDBSizeInKB,\n      maxDataSizeInKB: maxDataSizeInKB,\n      fileMode: fileMode,\n      maxReaders: maxReaders,\n      queriesCaseSensitiveDefault: queriesCaseSensitiveDefault,\n      macosApplicationGroup: macosApplicationGroup,\n    );\n\n    return ObjectBoxVectorStore._(store, embeddings: embeddings);\n  }\n\n  /// Creates an [ObjectBoxDocument] entity.\n  static ObjectBoxDocument _createObjectBoxDocument(\n    String id,\n    String content,\n    String metadata,\n    List<double> embedding,\n  ) => ObjectBoxDocument(0, id, content, metadata, embedding);\n\n  /// Creates a [Document] from an [ObjectBoxDocument] entity.\n  static Document _createDoc(ObjectBoxDocument entity) {\n    var metadata = const <String, dynamic>{};\n    try {\n      metadata = jsonDecode(entity.metadata);\n    } catch (_) {}\n    return Document(\n      id: entity.id,\n      pageContent: entity.content,\n      metadata: metadata,\n    );\n  }\n\n  /// Closes the ObjectBox store;\n  ///\n  /// Don't try to call any other methods after the store is closed.\n  void close() {\n    _store?.close();\n    _store = null;\n  }\n}\n\n/// {@template objectbox_document}\n/// The ObjectBox entity representing a LangChain [Document].\n/// {@endtemplate}\n@Entity()\nclass ObjectBoxDocument {\n  /// {@macro objectbox_document}\n  ObjectBoxDocument(\n    this.internalId,\n    this.id,\n    this.content,\n    this.metadata,\n    this.embedding,\n  );\n\n  /// The internal ID used by ObjectBox.\n  @Id()\n  int internalId = 0;\n\n  /// The ID of the document.\n  @Unique(onConflict: ConflictStrategy.replace)\n  String id;\n\n  /// The content of the document.\n  String content;\n\n  /// The metadata of the document.\n  String metadata;\n\n  /// The embedding of the document.\n  @HnswIndex(dimensions: 0) // Set dynamically in the ObjectBoxVectorStore\n  @Property(type: PropertyType.floatVector)\n  List<double> embedding;\n}\n\n/// [ObjectBoxDocument] entity fields to define ObjectBox queries.\n///\n/// Example:\n/// ```dart\n/// final filterCondition = ObjectBoxDocumentProps.metadata\n///     .contains('animal')\n///     .or(ObjectBoxDocumentProps.metadata.contains('natural');\n/// ```\ntypedef ObjectBoxDocumentProps = obxg.ObjectBoxDocument_;\n"
  },
  {
    "path": "packages/langchain_community/lib/src/vector_stores/objectbox/objectbox.g.dart",
    "content": "// GENERATED CODE - DO NOT MODIFY BY HAND\n// This code was generated by ObjectBox. To update it run the generator again\n// with `dart run build_runner build`.\n// See also https://docs.objectbox.io/getting-started#generate-objectbox-code\n\n// ignore_for_file: camel_case_types, depend_on_referenced_packages, avoid_js_rounded_ints, require_trailing_commas, cascade_invocations, strict_raw_type\n// coverage:ignore-file\n\nimport 'dart:typed_data';\n\nimport 'package:flat_buffers/flat_buffers.dart' as fb;\nimport 'package:objectbox/internal.dart'\n    as obx_int; // generated code can access \"internal\" functionality\nimport 'package:objectbox/objectbox.dart' as obx;\n\nimport '../../../src/vector_stores/objectbox/objectbox.dart';\n\nexport 'package:objectbox/objectbox.dart'; // so that callers only have to import this file\n\nList<obx_int.ModelEntity>? _entities;\n\nList<obx_int.ModelEntity> _getEntities(int dimensions) {\n  if (_entities != null) {\n    final objectBoxDocumentEntity = _entities![0];\n    final embeddingProperty = objectBoxDocumentEntity.properties[4];\n\n    if (embeddingProperty.hnswParams?.dimensions != dimensions) {\n      _entities = null;\n    } else {\n      return _entities!;\n    }\n  }\n\n  return _entities ??= <obx_int.ModelEntity>[\n    obx_int.ModelEntity(\n      id: const obx_int.IdUid(1, 4662034750769022750),\n      name: 'ObjectBoxDocument',\n      lastPropertyId: const obx_int.IdUid(5, 5762998900965066008),\n      flags: 0,\n      properties: <obx_int.ModelProperty>[\n        obx_int.ModelProperty(\n          id: const obx_int.IdUid(1, 328437667364158177),\n          name: 'internalId',\n          type: 6,\n          flags: 1,\n        ),\n        obx_int.ModelProperty(\n          id: const obx_int.IdUid(2, 3766173764062654800),\n          name: 'id',\n          type: 9,\n          flags: 34848,\n          indexId: const obx_int.IdUid(1, 8818474670164842374),\n        ),\n        obx_int.ModelProperty(\n          id: const obx_int.IdUid(3, 7972539540824041325),\n          name: 'content',\n          type: 9,\n          flags: 0,\n        ),\n        obx_int.ModelProperty(\n          id: const obx_int.IdUid(4, 866532944790310363),\n          name: 'metadata',\n          type: 9,\n          flags: 0,\n        ),\n        obx_int.ModelProperty(\n          id: const obx_int.IdUid(5, 5762998900965066008),\n          name: 'embedding',\n          type: 28,\n          flags: 8,\n          indexId: const obx_int.IdUid(2, 3016727589204567263),\n          hnswParams: obx_int.ModelHnswParams(dimensions: dimensions),\n        ),\n      ],\n      relations: <obx_int.ModelRelation>[],\n      backlinks: <obx_int.ModelBacklink>[],\n    ),\n  ];\n}\n\n/// Shortcut for [obx.Store.new] that passes [getObjectBoxModel] and for Flutter\n/// apps by default a [directory] using `defaultStoreDirectory()` from the\n/// ObjectBox Flutter library.\n///\n/// Note: for desktop apps it is recommended to specify a unique [directory].\n///\n/// See [obx.Store.new] for an explanation of all parameters.\n///\n/// For Flutter apps, also calls `loadObjectBoxLibraryAndroidCompat()` from\n/// the ObjectBox Flutter library to fix loading the native ObjectBox library\n/// on Android 6 and older.\nobx.Store openStore({\n  required int dimensions,\n  String? directory,\n  int? maxDBSizeInKB,\n  int? maxDataSizeInKB,\n  int? fileMode,\n  int? maxReaders,\n  bool queriesCaseSensitiveDefault = true,\n  String? macosApplicationGroup,\n}) {\n  return obx.Store(\n    getObjectBoxModel(dimensions),\n    directory: directory,\n    maxDBSizeInKB: maxDBSizeInKB,\n    maxDataSizeInKB: maxDataSizeInKB,\n    fileMode: fileMode,\n    maxReaders: maxReaders,\n    queriesCaseSensitiveDefault: queriesCaseSensitiveDefault,\n    macosApplicationGroup: macosApplicationGroup,\n  );\n}\n\n/// Returns the ObjectBox model definition for this project for use with\n/// [obx.Store.new].\nobx_int.ModelDefinition getObjectBoxModel(int dimensions) {\n  final entities = _getEntities(dimensions);\n  final model = obx_int.ModelInfo(\n    generatorVersion: obx_int.GeneratorVersion.v2025_12_16,\n    entities: entities,\n    lastEntityId: const obx_int.IdUid(1, 4662034750769022750),\n    lastIndexId: const obx_int.IdUid(2, 3016727589204567263),\n    lastRelationId: const obx_int.IdUid(0, 0),\n    lastSequenceId: const obx_int.IdUid(0, 0),\n    retiredEntityUids: const [],\n    retiredIndexUids: const [],\n    retiredPropertyUids: const [],\n    retiredRelationUids: const [],\n    modelVersion: 5,\n    modelVersionParserMinimum: 5,\n    version: 1,\n  );\n\n  final bindings = <Type, obx_int.EntityDefinition>{\n    ObjectBoxDocument: obx_int.EntityDefinition<ObjectBoxDocument>(\n      model: entities[0],\n      toOneRelations: (ObjectBoxDocument object) => [],\n      toManyRelations: (ObjectBoxDocument object) => {},\n      getId: (ObjectBoxDocument object) => object.internalId,\n      setId: (ObjectBoxDocument object, int id) {\n        object.internalId = id;\n      },\n      objectToFB: (ObjectBoxDocument object, fb.Builder fbb) {\n        final idOffset = fbb.writeString(object.id);\n        final contentOffset = fbb.writeString(object.content);\n        final metadataOffset = fbb.writeString(object.metadata);\n        final embeddingOffset = fbb.writeListFloat32(object.embedding);\n        fbb.startTable(6);\n        fbb.addInt64(0, object.internalId);\n        fbb.addOffset(1, idOffset);\n        fbb.addOffset(2, contentOffset);\n        fbb.addOffset(3, metadataOffset);\n        fbb.addOffset(4, embeddingOffset);\n        fbb.finish(fbb.endTable());\n        return object.internalId;\n      },\n      objectFromFB: (obx.Store store, ByteData fbData) {\n        final buffer = fb.BufferContext(fbData);\n        final rootOffset = buffer.derefObject(0);\n        final internalIdParam = const fb.Int64Reader().vTableGet(\n          buffer,\n          rootOffset,\n          4,\n          0,\n        );\n        final idParam = const fb.StringReader(\n          asciiOptimization: true,\n        ).vTableGet(buffer, rootOffset, 6, '');\n        final contentParam = const fb.StringReader(\n          asciiOptimization: true,\n        ).vTableGet(buffer, rootOffset, 8, '');\n        final metadataParam = const fb.StringReader(\n          asciiOptimization: true,\n        ).vTableGet(buffer, rootOffset, 10, '');\n        final embeddingParam = const fb.ListReader<double>(\n          fb.Float32Reader(),\n          lazy: false,\n        ).vTableGet(buffer, rootOffset, 12, []);\n        final object = ObjectBoxDocument(\n          internalIdParam,\n          idParam,\n          contentParam,\n          metadataParam,\n          embeddingParam,\n        );\n\n        return object;\n      },\n    ),\n  };\n\n  return obx_int.ModelDefinition(model, bindings);\n}\n\n/// [ObjectBoxDocument] entity fields to define ObjectBox queries.\nclass ObjectBoxDocument_ {\n  /// See [ObjectBoxDocument.internalId].\n  static final internalId = obx.QueryIntegerProperty<ObjectBoxDocument>(\n    _entities![0].properties[0],\n  );\n\n  /// See [ObjectBoxDocument.id].\n  static final id = obx.QueryStringProperty<ObjectBoxDocument>(\n    _entities![0].properties[1],\n  );\n\n  /// See [ObjectBoxDocument.content].\n  static final content = obx.QueryStringProperty<ObjectBoxDocument>(\n    _entities![0].properties[2],\n  );\n\n  /// See [ObjectBoxDocument.metadata].\n  static final metadata = obx.QueryStringProperty<ObjectBoxDocument>(\n    _entities![0].properties[3],\n  );\n\n  /// See [ObjectBoxDocument.embedding].\n  static final embedding = obx.QueryHnswProperty<ObjectBoxDocument>(\n    _entities![0].properties[4],\n  );\n}\n"
  },
  {
    "path": "packages/langchain_community/lib/src/vector_stores/objectbox/objectbox_stub.dart",
    "content": "// ignore_for_file: public_member_api_docs, avoid_unused_constructor_parameters, unused_element\nimport 'package:langchain_core/embeddings.dart' show Embeddings;\n\nimport 'base_objectbox_stub.dart';\n\n// This is a stub class\nclass ObjectBoxVectorStore extends BaseObjectBoxVectorStore<ObjectBoxDocument> {\n  ObjectBoxVectorStore._({required super.embeddings})\n    : super(\n        box: null,\n        createEntity: null,\n        createDocument: null,\n        getIdProperty: null,\n        getEmbeddingProperty: null,\n      );\n\n  factory ObjectBoxVectorStore.open({\n    required Embeddings embeddings,\n    required final int dimensions,\n    final String? directory,\n    final int? maxDBSizeInKB,\n    final int? maxDataSizeInKB,\n    final int? fileMode,\n    final int? maxReaders,\n    final bool queriesCaseSensitiveDefault = true,\n    final String? macosApplicationGroup,\n  }) {\n    throw UnsupportedError('ObjectBox is not supported on web platform.');\n  }\n\n  void close() {\n    throw UnsupportedError('ObjectBox is not supported on web platform.');\n  }\n}\n\n// This is a stub class\nclass ObjectBoxDocument {\n  ObjectBoxDocument(\n    this.internalId,\n    this.id,\n    this.content,\n    this.metadata,\n    this.embedding,\n  );\n\n  int internalId = 0;\n  String id;\n  String content;\n  String metadata;\n  List<double> embedding;\n}\n\n// This is a stub class\nclass ObjectBoxDocumentProps {\n  static const internalId = null;\n  static const id = null;\n  static const content = null;\n  static const metadata = null;\n  static const embedding = null;\n}\n"
  },
  {
    "path": "packages/langchain_community/lib/src/vector_stores/objectbox/types.dart",
    "content": "import 'package:langchain_core/vector_stores.dart';\nimport 'package:objectbox/objectbox.dart' show Condition;\n\n/// {@template objectbox_similarity_search}\n/// ObjectBox similarity search config.\n///\n/// ObjectBox supports filtering queries by id, content or metadata using\n/// [Condition]. You can define the filter condition in the [filterCondition]\n/// parameter.\n///\n/// Example:\n/// ```dart\n/// ObjectBoxSimilaritySearch(\n///   k: 10,\n///   scoreThreshold: 1.3,\n///   filterCondition: ObjectBoxDocumentProps.metadata.contains('cat'),\n/// );\n/// ```\n/// {@endtemplate}\nclass ObjectBoxSimilaritySearch extends VectorStoreSimilaritySearch {\n  /// {@macro objectbox_similarity_search}\n  ObjectBoxSimilaritySearch({\n    super.k = 4,\n    super.scoreThreshold,\n    final Condition<Object>? filterCondition,\n  }) : super(\n         filter: filterCondition != null ? {'filter': filterCondition} : null,\n       );\n}\n"
  },
  {
    "path": "packages/langchain_community/lib/src/vector_stores/objectbox/types_stub.dart",
    "content": "// ignore_for_file: public_member_api_docs, avoid_unused_constructor_parameters\nimport 'package:langchain_core/vector_stores.dart';\n\n// This is a stub class\nclass ObjectBoxSimilaritySearch extends VectorStoreSimilaritySearch {\n  ObjectBoxSimilaritySearch({\n    super.k = 0,\n    super.scoreThreshold,\n    Object? filterCondition,\n  }) : super(filter: null);\n}\n"
  },
  {
    "path": "packages/langchain_community/lib/src/vector_stores/vector_stores.dart",
    "content": "export 'objectbox/ob.dart';\n"
  },
  {
    "path": "packages/langchain_community/pubspec.yaml",
    "content": "name: langchain_community\ndescription: LangChain.dart third-party integrations that don't have a dedicated package.\nversion: 0.4.0+2\nrepository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_community\nissue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_community\nhomepage: https://github.com/davidmigloz/langchain_dart\ndocumentation: https://langchaindart.dev\n\nfunding:\n  - https://github.com/sponsors/davidmigloz\n\ntopics:\n  - nlp\n  - gen-ai\n  - llms\n  - langchain\n\nenvironment:\n  sdk: \">=3.9.0 <4.0.0\"\nresolution: workspace\n\ndependencies:\n  beautiful_soup_dart: ^0.3.0\n  cross_file: ^0.3.4+2\n  csv: ^7.1.0\n  flat_buffers: ^25.9.23\n  glob: ^2.1.3\n  http: ^1.5.0\n  json_path: ^0.9.0\n  langchain_core: 0.4.1\n  math_expressions: ^3.0.0\n  meta: ^1.16.0\n  objectbox: ^5.1.0\n  path: ^1.9.1\n  tavily_dart: ^0.2.2\n  uuid: ^4.5.1\n\ndev_dependencies:\n  build_runner: ^2.5.4\n  langchain_openai: ^0.8.1+1\n  objectbox_generator: ^5.1.0\n  test: ^1.26.2\n\nobjectbox:\n  output_dir: src/vector_stores/objectbox\n"
  },
  {
    "path": "packages/langchain_community/test/document_loaders/assets/example.txt",
    "content": "Foo\nBar\nBaz\n"
  },
  {
    "path": "packages/langchain_community/test/document_loaders/assets/example_1.csv",
    "content": "Foo,Bar,Baz\nFoo,Bar,Baz\n"
  },
  {
    "path": "packages/langchain_community/test/document_loaders/assets/example_1.json",
    "content": "{\n  \"text\": \"Foo\\nBar\\nBaz\\n\"\n}\n"
  },
  {
    "path": "packages/langchain_community/test/document_loaders/assets/example_2.csv",
    "content": "category,author,title,price\nreference,\"Nigel Rees\",\"Sayings of the Century\",8.95\nfiction,\"Evelyn Waugh\",\"Sword of Honour\",12.99\nfiction,\"Herman Melville\",\"Moby Dick\",8.99\nfiction,\"J. R. R. Tolkien\",\"The Lord of the Rings\",22.99"
  },
  {
    "path": "packages/langchain_community/test/document_loaders/assets/example_2.json",
    "content": "{\n  \"store\": {\n    \"book\": [\n      {\n        \"category\": \"reference\",\n        \"author\": \"Nigel Rees\",\n        \"title\": \"Sayings of the Century\",\n        \"price\": 8.95\n      },\n      {\n        \"category\": \"fiction\",\n        \"author\": \"Evelyn Waugh\",\n        \"title\": \"Sword of Honour\",\n        \"price\": 12.99\n      },\n      {\n        \"category\": \"fiction\",\n        \"author\": \"Herman Melville\",\n        \"title\": \"Moby Dick\",\n        \"isbn\": \"0-553-21311-3\",\n        \"price\": 8.99\n      },\n      {\n        \"category\": \"fiction\",\n        \"author\": \"J. R. R. Tolkien\",\n        \"title\": \"The Lord of the Rings\",\n        \"isbn\": \"0-395-19395-8\",\n        \"price\": 22.99\n      }\n    ],\n    \"bicycle\": {\n      \"color\": \"red\",\n      \"price\": 19.95\n    }\n  }\n}\n"
  },
  {
    "path": "packages/langchain_community/test/document_loaders/assets/example_2.tsv",
    "content": "category\tauthor\ttitle\tprice\nreference\tNigel Rees\tSayings of the Century\t8.95\nfiction\tEvelyn Waugh\tSword of Honour\t12.99\nfiction\tHerman Melville\tMoby Dick\t8.99\nfiction\tJ. R. R. Tolkien\tThe Lord of the Rings\t22.99\n"
  },
  {
    "path": "packages/langchain_community/test/document_loaders/csv_test.dart",
    "content": "// ignore_for_file: unnecessary_async\n\nimport 'package:langchain_community/langchain_community.dart';\nimport 'package:langchain_core/documents.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('CsvLoader tests', () {\n    test('Test that a csv file can be loaded', () {\n      const filePath = './test/document_loaders/assets/example_1.csv';\n\n      const expectedDoc = Document(\n        pageContent: 'Foo: Foo\\nBar: Bar\\nBaz: Baz',\n        metadata: {'source': filePath},\n      );\n\n      final loader = CsvLoader(filePath);\n\n      expect(loader.lazyLoad(), emitsInOrder([expectedDoc, emitsDone]));\n    });\n\n    test('Test field names override', () {\n      const filePath = './test/document_loaders/assets/example_2.csv';\n\n      final loader = CsvLoader(\n        filePath,\n        fieldsOverride: ['cat', 'auth', 'tit', 'cost'],\n      );\n\n      expect(\n        loader.lazyLoad(),\n        emitsInOrder([\n          const Document(\n            pageContent:\n                'cat: reference\\n'\n                'auth: Nigel Rees\\n'\n                'tit: Sayings of the Century\\n'\n                'cost: 8.95',\n            metadata: {'source': filePath},\n          ),\n          const Document(\n            pageContent:\n                'cat: fiction\\n'\n                'auth: Evelyn Waugh\\n'\n                'tit: Sword of Honour\\n'\n                'cost: 12.99',\n            metadata: {'source': filePath},\n          ),\n          const Document(\n            pageContent:\n                'cat: fiction\\n'\n                'auth: Herman Melville\\n'\n                'tit: Moby Dick\\n'\n                'cost: 8.99',\n            metadata: {'source': filePath},\n          ),\n          const Document(\n            pageContent:\n                'cat: fiction\\n'\n                'auth: J. R. R. Tolkien\\n'\n                'tit: The Lord of the Rings\\n'\n                'cost: 22.99',\n            metadata: {'source': filePath},\n          ),\n          emitsDone,\n        ]),\n      );\n    });\n\n    test('Test extracting field', () {\n      const filePath = './test/document_loaders/assets/example_2.csv';\n\n      final loader = CsvLoader(filePath, fields: ['category']);\n\n      expect(\n        loader.lazyLoad(),\n        emitsInOrder([\n          (final Document doc) => doc.pageContent == 'category: reference',\n          (final Document doc) => doc.pageContent == 'category: fiction',\n          (final Document doc) => doc.pageContent == 'category: fiction',\n          (final Document doc) => doc.pageContent == 'category: fiction',\n        ]),\n      );\n    });\n\n    test('Test extracting field and override name', () {\n      const filePath = './test/document_loaders/assets/example_2.csv';\n\n      final loader = CsvLoader(\n        filePath,\n        fields: ['category'],\n        fieldsOverride: ['cat'],\n      );\n\n      expect(\n        loader.lazyLoad(),\n        emitsInOrder([\n          (final Document doc) => doc.pageContent == 'cat: reference',\n          (final Document doc) => doc.pageContent == 'cat: fiction',\n          (final Document doc) => doc.pageContent == 'cat: fiction',\n          (final Document doc) => doc.pageContent == 'cat: fiction',\n        ]),\n      );\n    });\n\n    test('test invalid column throws exception', () {\n      const filePath = './test/document_loaders/assets/example_2.csv';\n\n      final loader = CsvLoader(filePath, fields: ['isbn']);\n\n      expect(() async => loader.load(), throwsA(isA<AssertionError>()));\n    });\n\n    test('test custom source metadata', () {\n      const filePath = './test/document_loaders/assets/example_2.csv';\n\n      final loader = CsvLoader(\n        filePath,\n        fields: ['category'],\n        sourceField: 'title',\n      );\n\n      expect(\n        loader.lazyLoad(),\n        emitsInOrder([\n          (final Document doc) =>\n              doc.pageContent == 'category: reference' &&\n              doc.metadata['source'] == 'Sayings of the Century',\n          (final Document doc) =>\n              doc.pageContent == 'category: fiction' &&\n              doc.metadata['source'] == 'Sword of Honour',\n          (final Document doc) =>\n              doc.pageContent == 'category: fiction' &&\n              doc.metadata['source'] == 'Moby Dick',\n          (final Document doc) =>\n              doc.pageContent == 'category: fiction' &&\n              doc.metadata['source'] == 'The Lord of the Rings',\n        ]),\n      );\n    });\n\n    test('Test metadata builder', () {\n      const filePath = './test/document_loaders/assets/example_2.csv';\n\n      final loader = CsvLoader(\n        filePath,\n        metadataBuilder: (final Map<String, dynamic> row, final rowMetadata) {\n          return {'source': '${row['title']} | ${row['price']}'};\n        },\n      );\n\n      expect(\n        loader.lazyLoad(),\n        emitsInOrder([\n          (final Document doc) =>\n              doc.metadata['source'] == 'Sayings of the Century | 8.95',\n          (final Document doc) =>\n              doc.metadata['source'] == 'Sword of Honour | 12.99',\n          (final Document doc) => doc.metadata['source'] == 'Moby Dick | 8.99',\n          (final Document doc) =>\n              doc.metadata['source'] == 'The Lord of the Rings | 22.99',\n        ]),\n      );\n    });\n\n    test('Test TSV file', () {\n      const filePath = './test/document_loaders/assets/example_2.tsv';\n\n      final loader = CsvLoader(filePath, fieldDelimiter: '\\t');\n\n      expect(\n        loader.lazyLoad(),\n        emitsInOrder([\n          const Document(\n            pageContent:\n                'category: reference\\n'\n                'author: Nigel Rees\\n'\n                'title: Sayings of the Century\\n'\n                'price: 8.95',\n            metadata: {'source': filePath},\n          ),\n          const Document(\n            pageContent:\n                'category: fiction\\n'\n                'author: Evelyn Waugh\\n'\n                'title: Sword of Honour\\n'\n                'price: 12.99',\n            metadata: {'source': filePath},\n          ),\n          const Document(\n            pageContent:\n                'category: fiction\\n'\n                'author: Herman Melville\\n'\n                'title: Moby Dick\\n'\n                'price: 8.99',\n            metadata: {'source': filePath},\n          ),\n          const Document(\n            pageContent:\n                'category: fiction\\n'\n                'author: J. R. R. Tolkien\\n'\n                'title: The Lord of the Rings\\n'\n                'price: 22.99',\n            metadata: {'source': filePath},\n          ),\n          emitsDone,\n        ]),\n      );\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain_community/test/document_loaders/directory_test.dart",
    "content": "// ignore_for_file: unnecessary_async\n\nimport 'dart:io';\nimport 'package:langchain_community/langchain_community.dart';\nimport 'package:langchain_core/documents.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('DirectoryLoader tests', () {\n    test(\n      'Test loading directory with multiple file types and multiple documents per file',\n      () async {\n        const filePath = './test/document_loaders/assets';\n        const loader = DirectoryLoader(filePath, glob: '*.{txt,json,csv,tsv}');\n\n        final List<Document> docs = await loader.lazyLoad().toList();\n\n        expect(docs, isNotEmpty);\n\n        final Set<String> processedFiles = docs\n            .map((doc) => doc.metadata['source'] as String)\n            .toSet();\n\n        final directory = Directory(filePath);\n\n        final Set<String> expectedFiles = directory\n            .listSync()\n            .where((entity) {\n              return entity is File &&\n                  RegExp(r'\\.(txt|json|csv|tsv)$').hasMatch(entity.path);\n            })\n            .map((file) => file.path)\n            .toSet();\n\n        expect(processedFiles, equals(expectedFiles));\n\n        final textDocs = docs\n            .where((doc) => doc.metadata['name'] == 'example.txt')\n            .toList();\n\n        expect(textDocs.length, greaterThanOrEqualTo(1));\n\n        expect(\n          textDocs.any((doc) => doc.pageContent.contains('Foo\\nBar\\nBaz\\n')),\n          isTrue,\n          reason: 'Text content should match for example.txt',\n        );\n\n        final jsonDocs = docs\n            .where((doc) => doc.metadata['name'] == 'example_2.json')\n            .toList();\n\n        expect(jsonDocs.length, greaterThanOrEqualTo(1));\n\n        expect(\n          jsonDocs.any(\n            (doc) => doc.pageContent.contains('Sayings of the Century'),\n          ),\n          isTrue,\n          reason: 'JSON content should match for example_2.json',\n        );\n      },\n    );\n\n    test('Test directory loader with specific loader map', () {\n      const filePath = './test/document_loaders/assets';\n\n      final loader = DirectoryLoader(\n        filePath,\n        glob: '*.json',\n        loaderMap: {'.json': (path) => JsonLoader(path, jpSchema: r'$..text')},\n      );\n\n      expect(\n        loader.lazyLoad(),\n        emitsInOrder([\n          (final Document doc) => doc.pageContent == 'Foo\\nBar\\nBaz\\n',\n          emitsDone,\n        ]),\n      );\n    });\n\n    test('Test directory loader with sample size', () async {\n      const filePath = './test/document_loaders/assets';\n\n      const loader = DirectoryLoader(\n        filePath,\n        glob: '*.{txt,json,csv}',\n        sampleSize: 2,\n        randomizeSample: true,\n        sampleSeed: 42,\n      );\n\n      final loadedDocs = await loader.load();\n\n      expect(loadedDocs, hasLength(2));\n    });\n\n    test('Test directory loader with custom metadata builder', () {\n      const filePath = './test/document_loaders/assets';\n\n      final loader = DirectoryLoader(\n        filePath,\n        glob: '*.txt',\n        metadataBuilder: (file, defaultMetadata) {\n          return {...defaultMetadata, 'custom_info': 'custom_value'};\n        },\n      );\n\n      expect(\n        loader.lazyLoad(),\n        emitsInOrder([\n          (final Document doc) {\n            expect(doc.pageContent, 'Foo\\nBar\\nBaz\\n');\n            expect(doc.metadata['custom_info'], 'custom_value');\n            return true;\n          },\n          emitsDone,\n        ]),\n      );\n    });\n\n    test('Test directory loader with non-existent directory', () {\n      const filePath = './non_existent_directory';\n\n      const loader = DirectoryLoader(filePath);\n\n      expect(\n        () async => loader.lazyLoad().toList(),\n        throwsA(isA<FileSystemException>()),\n      );\n    });\n\n    test('Test directory loader with empty directory', () async {\n      final tempDir = await Directory.systemTemp.createTemp('empty_test_dir');\n\n      try {\n        final loader = DirectoryLoader(tempDir.path, glob: '*.txt');\n\n        final loadedDocs = await loader.load();\n\n        expect(loadedDocs, isEmpty);\n      } finally {\n        await tempDir.delete();\n      }\n    });\n\n    test('Test directory loader with exclude patterns', () async {\n      const filePath = './test/document_loaders/assets';\n\n      const loader = DirectoryLoader(\n        filePath,\n        glob: '*.txt',\n        exclude: ['example.txt'],\n      );\n\n      final loadedDocs = await loader.load();\n\n      expect(\n        loadedDocs.any((doc) => doc.metadata['name'] == 'example.txt'),\n        isFalse,\n      );\n    });\n\n    test('Test directory loader non-recursive mode', () async {\n      const filePath = './test/document_loaders/assets';\n\n      const loader = DirectoryLoader(filePath, glob: '*.txt', recursive: false);\n\n      final loadedDocs = await loader.load();\n\n      expect(loadedDocs, isNotEmpty);\n    });\n\n    test('Sample size of 0 loads all files', () async {\n      const filePath = './test/document_loaders/assets';\n\n      const loader = DirectoryLoader(\n        filePath,\n        glob: '*.{txt,json}',\n        sampleSize: 0,\n      );\n\n      final loadedDocs = await loader.load();\n\n      final allFiles = Directory(filePath)\n          .listSync()\n          .where(\n            (entity) =>\n                entity is File &&\n                RegExp(r'\\.(txt|json)$').hasMatch(entity.path),\n          )\n          .length;\n\n      expect(loadedDocs, hasLength(allFiles));\n    });\n\n    test('Sample size larger than total files loads all files', () async {\n      const filePath = './test/document_loaders/assets';\n\n      const loader = DirectoryLoader(\n        filePath,\n        glob: '*.{txt,json}',\n        sampleSize: 1000,\n      );\n\n      final loadedDocs = await loader.load();\n\n      final allFiles = Directory(filePath)\n          .listSync()\n          .where(\n            (entity) =>\n                entity is File &&\n                RegExp(r'\\.(txt|json)$').hasMatch(entity.path),\n          )\n          .length;\n\n      expect(loadedDocs, hasLength(allFiles));\n    });\n\n    test('Reproducible random sampling', () async {\n      const filePath = './test/document_loaders/assets';\n\n      const loader1 = DirectoryLoader(\n        filePath,\n        glob: '*.{txt,json}',\n        sampleSize: 2,\n        randomizeSample: true,\n        sampleSeed: 42,\n      );\n\n      const loader2 = DirectoryLoader(\n        filePath,\n        glob: '*.{txt,json}',\n        sampleSize: 2,\n        randomizeSample: true,\n        sampleSeed: 42,\n      );\n\n      final docs1 = await loader1.load();\n\n      final docs2 = await loader2.load();\n\n      expect(\n        docs1.map((doc) => doc.metadata['source']),\n        equals(docs2.map((doc) => doc.metadata['source'])),\n      );\n    });\n\n    test('Loader map with no matching loader uses fallback', () async {\n      const filePath = './test/document_loaders/assets';\n\n      const loader = DirectoryLoader(\n        filePath,\n        glob: '*.md',\n        loaderMap: {'.xml': TextLoader.new},\n      );\n\n      final loadedDocs = await loader.load();\n\n      expect(loadedDocs, isEmpty);\n    });\n\n    test('Mixed hidden and non-hidden files', () async {\n      final tempDir = await Directory.systemTemp.createTemp('mixed_files_test');\n\n      try {\n        await File(\n          '${tempDir.path}/.hidden.txt',\n        ).writeAsString('hidden content');\n\n        await File(\n          '${tempDir.path}/visible.txt',\n        ).writeAsString('visible content');\n\n        final loader1 = DirectoryLoader(\n          tempDir.path,\n          glob: '*.txt',\n          loadHidden: false,\n        );\n\n        final docs1 = await loader1.load();\n\n        expect(\n          docs1.any((doc) => doc.metadata['name'] == '.hidden.txt'),\n          isFalse,\n        );\n\n        final loader2 = DirectoryLoader(\n          tempDir.path,\n          glob: '*.txt',\n          loadHidden: true,\n        );\n\n        final docs2 = await loader2.load();\n\n        expect(\n          docs2.any((doc) => doc.metadata['name'] == '.hidden.txt'),\n          isTrue,\n        );\n      } finally {\n        await tempDir.delete(recursive: true);\n      }\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain_community/test/document_loaders/json_test.dart",
    "content": "// ignore_for_file: unnecessary_async\n\nimport 'package:langchain_community/langchain_community.dart';\nimport 'package:langchain_core/documents.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('JsonLoader tests', () {\n    test('Test that a json file can be loaded', () {\n      const filePath = './test/document_loaders/assets/example_1.json';\n\n      const loader = JsonLoader(filePath, jpSchema: r'$..text');\n\n      expect(\n        loader.lazyLoad(),\n        emitsInOrder([\n          (final Document doc) {\n            expect(doc.pageContent, 'Foo\\nBar\\nBaz\\n');\n            expect(doc.metadata['source'], filePath);\n            expect(doc.metadata['name'], 'example_1.json');\n            expect(doc.metadata['size'], 32);\n            expect(doc.metadata['lastModified'], isA<int>());\n            return true;\n          },\n          emitsDone,\n        ]),\n      );\n    });\n\n    test('test extracting numeric field', () {\n      const filePath = './test/document_loaders/assets/example_2.json';\n\n      const loader = JsonLoader(filePath, jpSchema: r'$..price');\n\n      expect(\n        loader.lazyLoad(),\n        emitsInOrder([\n          (final Document doc) => doc.pageContent == '8.95',\n          (final Document doc) => doc.pageContent == '12.99',\n          (final Document doc) => doc.pageContent == '8.99',\n          (final Document doc) => doc.pageContent == '22.99',\n          (final Document doc) => doc.pageContent == '19.95',\n        ]),\n      );\n    });\n\n    test('test extracting string field', () {\n      const filePath = './test/document_loaders/assets/example_2.json';\n\n      const loader = JsonLoader(filePath, jpSchema: r'$..category');\n\n      expect(\n        loader.lazyLoad(),\n        emitsInOrder([\n          (final Document doc) => doc.pageContent == 'reference',\n          (final Document doc) => doc.pageContent == 'fiction',\n          (final Document doc) => doc.pageContent == 'fiction',\n          (final Document doc) => doc.pageContent == 'fiction',\n        ]),\n      );\n    });\n\n    test('test invalid jpSchema throws exception', () {\n      const filePath = './test/documents/loaders/assets/example_2.json';\n\n      const loader = JsonLoader(filePath, jpSchema: r'.[]');\n\n      expect(() async => loader.load(), throwsException);\n    });\n\n    test('test custom metadata', () {\n      const filePath = './test/document_loaders/assets/example_2.json';\n\n      final loader = JsonLoader(\n        filePath,\n        jpSchema: r'$..category',\n        metadataBuilder: (final matchValue, final fileMetadata) {\n          return {'length': matchValue.toString().length};\n        },\n      );\n\n      expect(\n        loader.lazyLoad(),\n        emitsInOrder([\n          (final Document doc) => doc.metadata['length'] == 9,\n          (final Document doc) => doc.metadata['length'] == 7,\n          (final Document doc) => doc.metadata['length'] == 7,\n          (final Document doc) => doc.metadata['length'] == 7,\n        ]),\n      );\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain_community/test/document_loaders/text_test.dart",
    "content": "import 'package:langchain_community/langchain_community.dart';\nimport 'package:langchain_core/documents.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('TextLoader tests', () {\n    test('Test that a text file can be loaded', () {\n      const filePath = './test/document_loaders/assets/example.txt';\n      const loader = TextLoader(filePath);\n      expect(\n        loader.lazyLoad(),\n        emitsInOrder([\n          (final Document doc) {\n            expect(doc.pageContent, 'Foo\\nBar\\nBaz\\n');\n            expect(doc.metadata['source'], filePath);\n            expect(doc.metadata['name'], 'example.txt');\n            expect(doc.metadata['size'], 12);\n            expect(doc.metadata['lastModified'], isA<int>());\n            return true;\n          },\n          emitsDone,\n        ]),\n      );\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain_community/test/document_loaders/web_test.dart",
    "content": "import 'package:langchain_community/langchain_community.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('WebBaseLoader tests', () {\n    test('Test that a web page can be loaded', () async {\n      const url = 'https://en.wikipedia.org/wiki/Wikipedia';\n\n      const loader = WebBaseLoader([url]);\n      final loadedDocs = await loader.load();\n\n      expect(loadedDocs, hasLength(1));\n\n      final loadedDoc = loadedDocs.first;\n      expect(loadedDoc.pageContent, isNot(contains('<html')));\n      expect(loadedDoc.pageContent, isNot(contains('<head')));\n      expect(loadedDoc.pageContent, isNot(contains('<body')));\n      expect(loadedDoc.pageContent, isNot(contains('<style')));\n      expect(loadedDoc.pageContent, isNot(contains('<script')));\n\n      final metadata = loadedDoc.metadata;\n      expect(metadata['source'], url);\n      expect(metadata['title'], contains('Wikipedia'));\n      expect(metadata['language'], 'en');\n    });\n\n    test('Test that non-ASCII characters are handled correctly', () async {\n      const url = 'http://web.tccf.org.tw/lib/addon.php?act=post&id=4975';\n\n      const loader = WebBaseLoader([url]);\n      final loadedDocs = await loader.load();\n\n      expect(loadedDocs, hasLength(1));\n\n      final loadedDoc = loadedDocs.first;\n      expect(loadedDoc.pageContent, contains('子宮內膜癌的介紹及治療現況'));\n\n      final metadata = loadedDoc.metadata;\n      expect(metadata['source'], url);\n      expect(metadata['title'], contains('子宮內膜癌的介紹及治療現況'));\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain_community/test/tools/calculator_test.dart",
    "content": "import 'package:langchain_community/langchain_community.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('CalculatorTool tests', () {\n    test('Calculate expressions', () async {\n      final echoTool = CalculatorTool();\n\n      expect(await echoTool.invoke('1 + 1'), '2.0');\n      expect(await echoTool.invoke('1 - 1'), '0.0');\n      expect(await echoTool.invoke('10*1 - (-5)'), '15.0');\n      expect(\n        double.parse(await echoTool.invoke('(2^2 + cos(3.14)) / 3')),\n        closeTo(1.0, 0.000001),\n      );\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain_community/test/tools/tavily_test.dart",
    "content": "import 'dart:convert';\nimport 'dart:io';\n\nimport 'package:langchain_community/langchain_community.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('TavilySearchResultsTool tests', () {\n    test('Calculate expressions', () async {\n      final tool = TavilySearchResultsTool(\n        apiKey: Platform.environment['TAVILY_API_KEY'],\n      );\n      final res = await tool.invoke('What is the weather like in New York?');\n      expect(res.results, isNotEmpty);\n      final jsonString = res.toString();\n      expect(() => json.decode(jsonString), returnsNormally);\n      tool.close();\n    });\n  });\n\n  group('TavilyAnswerTool tests', () {\n    test('Invoke TavilyAnswerTool', () async {\n      final tool = TavilyAnswerTool(\n        apiKey: Platform.environment['TAVILY_API_KEY'],\n      );\n      final res = await tool.invoke('What is the weather like in New York?');\n      expect(res, isNotEmpty);\n      tool.close();\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain_community/test/vector_stores/objectbox/objectbox_test.dart",
    "content": "import 'dart:io';\n\nimport 'package:langchain_community/langchain_community.dart';\nimport 'package:langchain_core/documents.dart';\nimport 'package:langchain_openai/langchain_openai.dart';\nimport 'package:objectbox/objectbox.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  late final OpenAIEmbeddings embeddings;\n  late final ObjectBoxVectorStore vectorStore;\n\n  setUpAll(() {\n    embeddings = OpenAIEmbeddings(\n      apiKey: Platform.environment['OPENAI_API_KEY'],\n    );\n    vectorStore = ObjectBoxVectorStore.open(\n      embeddings: embeddings,\n      dimensions: 1536,\n      directory: 'test/vector_stores/objectbox',\n    );\n  });\n\n  group('ObjectBoxVectorStore tests', () {\n    test('Test add new vectors', () async {\n      final res = await vectorStore.addDocuments(\n        documents: [\n          const Document(\n            id: '1',\n            pageContent: 'The cat sat on the mat',\n            metadata: {'cat': 'animal'},\n          ),\n          const Document(\n            id: '2',\n            pageContent: 'The dog chased the ball.',\n            metadata: {'cat': 'animal'},\n          ),\n          const Document(\n            id: '3',\n            pageContent: 'The boy ate the apple.',\n            metadata: {'cat': 'person'},\n          ),\n          const Document(\n            id: '4',\n            pageContent: 'The girl drank the milk.',\n            metadata: {'cat': 'person'},\n          ),\n          const Document(\n            id: '5',\n            pageContent: 'The sun is shining.',\n            metadata: {'cat': 'natural'},\n          ),\n        ],\n      );\n\n      expect(res.length, 5);\n    });\n\n    test('Test query return 1 result', () async {\n      final res = await vectorStore.similaritySearch(\n        query: 'Is it raining?',\n        config: ObjectBoxSimilaritySearch(k: 1),\n      );\n      expect(res.length, 1);\n      expect(res.first.id, '5');\n    });\n\n    test('Test query with scoreThreshold', () async {\n      final res = await vectorStore.similaritySearchWithScores(\n        query: 'Is it raining?',\n        config: ObjectBoxSimilaritySearch(scoreThreshold: 0.3),\n      );\n      for (final (_, score) in res) {\n        expect(score, greaterThan(0.3));\n      }\n    });\n\n    test('Test query with equality filter', () async {\n      final res = await vectorStore.similaritySearch(\n        query: 'What are they eating?',\n        config: ObjectBoxSimilaritySearch(\n          k: 10,\n          scoreThreshold: 1.3,\n          filterCondition: ObjectBoxDocumentProps.metadata.contains('person'),\n        ),\n      );\n      for (final doc in res) {\n        expect(doc.metadata['cat'], 'person');\n      }\n    });\n\n    test('Test query with filter with multiple operators', () async {\n      final res = await vectorStore.similaritySearch(\n        query: 'What are they eating?',\n        config: ObjectBoxSimilaritySearch(\n          k: 10,\n          filterCondition: ObjectBoxDocumentProps.metadata\n              .contains('animal')\n              .or(ObjectBoxDocumentProps.metadata.contains('natural')),\n        ),\n      );\n      for (final doc in res) {\n        expect(doc.metadata['cat'], isNot('person'));\n      }\n    });\n\n    test('Test delete document', () async {\n      await vectorStore.addDocuments(\n        documents: [\n          const Document(\n            id: '9999',\n            pageContent: 'This document will be deleted',\n            metadata: {'cat': 'xxx'},\n          ),\n        ],\n      );\n      final res1 = await vectorStore.similaritySearch(\n        query: 'Deleted doc',\n        config: ObjectBoxSimilaritySearch(\n          filterCondition: ObjectBoxDocumentProps.metadata.contains('xxx'),\n        ),\n      );\n      expect(res1.length, 1);\n      expect(res1.first.id, '9999');\n\n      await vectorStore.delete(ids: ['9999']);\n      final res2 = await vectorStore.similaritySearch(\n        query: 'Deleted doc',\n        config: ObjectBoxSimilaritySearch(\n          filterCondition: ObjectBoxDocumentProps.metadata.contains('xxx'),\n        ),\n      );\n      expect(res2.length, 0);\n    });\n\n    test('Test delete where', () async {\n      await vectorStore.addDocuments(\n        documents: [\n          const Document(\n            id: '9999',\n            pageContent: 'This document will be deleted',\n            metadata: {'cat': 'xxx'},\n          ),\n        ],\n      );\n      final res1 = await vectorStore.similaritySearch(\n        query: 'Deleted doc',\n        config: ObjectBoxSimilaritySearch(\n          filterCondition: ObjectBoxDocumentProps.metadata.contains('xxx'),\n        ),\n      );\n      expect(res1.length, 1);\n      expect(res1.first.id, '9999');\n\n      await vectorStore.deleteWhere(\n        ObjectBoxDocumentProps.metadata.contains('xxx'),\n      );\n      final res2 = await vectorStore.similaritySearch(\n        query: 'Deleted doc',\n        config: ObjectBoxSimilaritySearch(\n          filterCondition: ObjectBoxDocumentProps.metadata.contains('xxx'),\n        ),\n      );\n      expect(res2.length, 0);\n    });\n  });\n\n  group('ObjectBoxSimilaritySearch', () {\n    test('ObjectBoxSimilaritySearch fields', () {\n      final config = ObjectBoxSimilaritySearch(\n        k: 5,\n        scoreThreshold: 0.8,\n        filterCondition: ObjectBoxDocumentProps.metadata.contains('style1'),\n      );\n      expect(config.k, 5);\n      expect(config.scoreThreshold, 0.8);\n      expect(config.filter?['filter'], isA<Condition<ObjectBoxDocument>>());\n    });\n  });\n\n  tearDownAll(() async {\n    embeddings.close();\n    vectorStore.close();\n    await File('test/vector_stores/objectbox/data.mdb').delete();\n    await File('test/vector_stores/objectbox/lock.mdb').delete();\n  });\n}\n"
  },
  {
    "path": "packages/langchain_core/.gitignore",
    "content": "# https://dart.dev/guides/libraries/private-files\n# Created by `dart pub`\n.dart_tool/\n\n# Avoid committing pubspec.lock for library packages; see\n# https://dart.dev/guides/libraries/private-files#pubspeclock.\npubspec.lock\n"
  },
  {
    "path": "packages/langchain_core/CHANGELOG.md",
    "content": "## 0.4.1\n\n - **FEAT**: Add listModels() API for LLMs and Embeddings ([#371](https://github.com/davidmigloz/langchain_dart/issues/371)) ([#844](https://github.com/davidmigloz/langchain_dart/issues/844)). ([4b737389](https://github.com/davidmigloz/langchain_dart/commit/4b7373894d5b8701b6d00d153c1741931a49b3a1))\n\n## 0.4.0+1\n\n - **REFACTOR**: Fix pub format warnings ([#809](https://github.com/davidmigloz/langchain_dart/issues/809)). ([640cdefb](https://github.com/davidmigloz/langchain_dart/commit/640cdefbede9c0a0182fb6bb4005a20aa6f35635))\n\n## 0.4.0\n\n> Note: This release has breaking changes.\n\n - **FEAT**: Upgrade to http v1.5.0 ([#785](https://github.com/davidmigloz/langchain_dart/issues/785)). ([f7c87790](https://github.com/davidmigloz/langchain_dart/commit/f7c8779011015b5a4a7f3a07dca32bde1bb2ea88))\n - **BREAKING** **BUILD**: Require Dart >=3.8.0 ([#792](https://github.com/davidmigloz/langchain_dart/issues/792)). ([b887f5c6](https://github.com/davidmigloz/langchain_dart/commit/b887f5c62e307b3a510c5049e3d1fbe7b7b4f4c9))\n\n## 0.3.9\n\n - Update a dependency to the latest release.\n\n## 0.3.8\n\n - **FEAT**: Make CreateChatCompletionStreamResponse.choices field nullable to support Groq's OpenAI-compatible API ([#742](https://github.com/davidmigloz/langchain_dart/issues/742)). ([76fbbdc6](https://github.com/davidmigloz/langchain_dart/commit/76fbbdc6f78e83f1f622ed73ff4b27b37a4f744b))\n - **FIX**: Add multi-LLM compatibility for Tool.fromFunction getInputFromJson ([#738](https://github.com/davidmigloz/langchain_dart/issues/738)). ([291a0efc](https://github.com/davidmigloz/langchain_dart/commit/291a0efcebe1696f609ecbd0b803cc9324474db5))\n - **BUILD**: Update dependencies ([#751](https://github.com/davidmigloz/langchain_dart/issues/751)). ([250a3c6](https://github.com/davidmigloz/langchain_dart/commit/250a3c6a6c1815703a61a142ba839c0392a31015))\n\n## 0.3.7\n\n - **FEAT**: Update dependencies (requires Dart 3.6.0) ([#709](https://github.com/davidmigloz/langchain_dart/issues/709)). ([9e3467f7](https://github.com/davidmigloz/langchain_dart/commit/9e3467f7caabe051a43c0eb3c1110bc4a9b77b81))\n - **FEAT**: Add to/fromMap serialization to ChatMessage, PromptValue & ChatHistory ([#681](https://github.com/davidmigloz/langchain_dart/issues/681)). ([d239c7c7](https://github.com/davidmigloz/langchain_dart/commit/d239c7c7b4a1504559e475466be7f176521a0473))\n - **FIX**: Fix linter issues ([#656](https://github.com/davidmigloz/langchain_dart/issues/656)). ([88a79c65](https://github.com/davidmigloz/langchain_dart/commit/88a79c65aad23bcf5859e58a7375a4b686cf02ef))\n - **FIX**: RunnableMap doesn't invoke multiple Runnables in parallel ([#649](https://github.com/davidmigloz/langchain_dart/issues/649)). ([fc722d85](https://github.com/davidmigloz/langchain_dart/commit/fc722d85eef6644f7593dd26c7fd55a56615595b))\n\n## 0.3.6+1\n\n - **REFACTOR**: Add new lint rules and fix issues ([#621](https://github.com/davidmigloz/langchain_dart/issues/621)). ([60b10e00](https://github.com/davidmigloz/langchain_dart/commit/60b10e008acf55ebab90789ad08d2449a44b69d8))\n\n## 0.3.6\n\n - **FEAT**: Add retry support for Runnables ([#540](https://github.com/davidmigloz/langchain_dart/issues/540)). ([1099725d](https://github.com/davidmigloz/langchain_dart/commit/1099725d88de4103381edad533209a9a098bdb7f))\n\n## 0.3.5\n\n - **FEAT**: Add copyWith method to all RunnableOptions subclasses ([#531](https://github.com/davidmigloz/langchain_dart/issues/531)). ([42c8d480](https://github.com/davidmigloz/langchain_dart/commit/42c8d480041e7ca331e4928c46536037c06dbff0))\n - **FEAT**: Support OpenAI's strict mode for tool calling in ChatOpenAI ([#536](https://github.com/davidmigloz/langchain_dart/issues/536)). ([71623f49](https://github.com/davidmigloz/langchain_dart/commit/71623f490289e63252165167305e00038d800be1))\n - **FEAT**: Deprecate OpenAIToolsAgent in favour of ToolsAgent ([#532](https://github.com/davidmigloz/langchain_dart/issues/532)). ([68d8011a](https://github.com/davidmigloz/langchain_dart/commit/68d8011a9aa09368875ba0434839d12623ba2bab))\n\n## 0.3.4\n\n - **FEAT**: Add Fallback support for Runnables ([#501](https://github.com/davidmigloz/langchain_dart/issues/501)). ([5887858d](https://github.com/davidmigloz/langchain_dart/commit/5887858d667d43c49978291ea98a92cab0069971))\n - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43))\n - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560))\n\n## 0.3.3\n\n - **FEAT**: Add support for ChatToolChoiceRequired ([#474](https://github.com/davidmigloz/langchain_dart/issues/474)). ([bf324f36](https://github.com/davidmigloz/langchain_dart/commit/bf324f36f645c53458d5891f8285991cd50f2649))\n - **FEAT**: Update ChatResult.id concat logic ([#477](https://github.com/davidmigloz/langchain_dart/issues/477)). ([44c7fafd](https://github.com/davidmigloz/langchain_dart/commit/44c7fafd934bf6517e285830b1ca98282127cb7d))\n\n## 0.3.2\n\n - **REFACTOR**: Migrate to langchaindart.dev domain ([#434](https://github.com/davidmigloz/langchain_dart/issues/434)). ([358f79d6](https://github.com/davidmigloz/langchain_dart/commit/358f79d6e0bae2ecd657aeed2eae7fad16d97c18))\n - **FIX**: Stream errors are not propagated by StringOutputParser ([#440](https://github.com/davidmigloz/langchain_dart/issues/440)). ([496b11cc](https://github.com/davidmigloz/langchain_dart/commit/496b11cca9bbf9892c425e49138562537398bc70))\n - **FEAT**: Add Runnable.close() to close any resources associated with it ([#439](https://github.com/davidmigloz/langchain_dart/issues/439)). ([4e08cced](https://github.com/davidmigloz/langchain_dart/commit/4e08cceda964921178061e9721618a1505198ff5))\n\n## 0.3.1\n\n - **FEAT**: Add equals to ChatToolChoiceForced ([#422](https://github.com/davidmigloz/langchain_dart/issues/422)). ([8d0786bc](https://github.com/davidmigloz/langchain_dart/commit/8d0786bc6228ce86de962d30e9c2cc9728a08f3f))\n - **FIX**: Fix finishReason null check ([#406](https://github.com/davidmigloz/langchain_dart/issues/406)). ([5e2b0ecc](https://github.com/davidmigloz/langchain_dart/commit/5e2b0eccd54c6c1dc15af8ff6d62c395f12fbd90))\n\n## 0.3.0\n\n> Note: This release has breaking changes.  \n> If you are using \"function calling\" check [how to migrate to \"tool calling\"](https://github.com/davidmigloz/langchain_dart/issues/400).\n\n - **BREAKING** **FEAT**: Migrate from function calling to tool calling ([#400](https://github.com/davidmigloz/langchain_dart/issues/400)). ([44413b83](https://github.com/davidmigloz/langchain_dart/commit/44413b8321b1188ff6b4027b1972a7ee0002761e))\n - **BREAKING** **REFACTOR**: Improve Tool abstractions ([#398](https://github.com/davidmigloz/langchain_dart/issues/398)). ([2a50aec2](https://github.com/davidmigloz/langchain_dart/commit/2a50aec28385068f9be32392020d727fc9a1561e))\n\n## 0.2.0+1\n\n - **FIX**: Allow async functions in Runnable.mapInput ([#396](https://github.com/davidmigloz/langchain_dart/issues/396)). ([e4c35092](https://github.com/davidmigloz/langchain_dart/commit/e4c3509267b7be28e2b0fa334a9255baadabfb6a))\n\n## 0.2.0\n\n> Note: This release has breaking changes.  \n> If you are using `Runnable.fromFunction` check the [migration guide](https://github.com/davidmigloz/langchain_dart/issues/394).\n\n - **FEAT** Add support for RunnableRouter ([#386](https://github.com/davidmigloz/langchain_dart/issues/386)). ([827e262](https://github.com/davidmigloz/langchain_dart/commit/827e2627535941d702e8fbe300ca1426ddf50efe))\n - **FEAT**: Add support for Runnable.mapInputStream ([#393](https://github.com/davidmigloz/langchain_dart/issues/393)). ([a2b6bbb5](https://github.com/davidmigloz/langchain_dart/commit/a2b6bbb5ea7a65c36d1e955f9f96298cf2384afc))\n - **FEAT**: Add support for JsonOutputParser ([#392](https://github.com/davidmigloz/langchain_dart/issues/392)). ([c6508f0f](https://github.com/davidmigloz/langchain_dart/commit/c6508f0fadde3fd4d93accbcae5cea37b7beca20))\n - **FEAT**: Reduce input stream for PromptTemplate, LLM, ChatModel, Retriever and Tool ([#388](https://github.com/davidmigloz/langchain_dart/issues/388)). ([b59bcd40](https://github.com/davidmigloz/langchain_dart/commit/b59bcd409f4904fb2e16f928b3c7206a186ab3f4))\n - **BREAKING** **FEAT**: Support different logic for streaming in RunnableFunction ([#394](https://github.com/davidmigloz/langchain_dart/issues/394)). ([8bb2b8ed](https://github.com/davidmigloz/langchain_dart/commit/8bb2b8ede18bfe3a4f266b78ca32f1dfb83db1b1))\n\n## 0.1.0\n\n> Note: This release has breaking changes.  \n> [Migration guide](https://github.com/davidmigloz/langchain_dart/discussions/374)\n\n - **BREAKING** **REFACTOR**: Introduce langchain_core and langchain_community packages ([#328](https://github.com/davidmigloz/langchain_dart/issues/328)). ([5fa520e6](https://github.com/davidmigloz/langchain_dart/commit/5fa520e663602d9cdfcab0c62a053090fa02b02e))\n - **BREAKING** **REFACTOR**: Simplify LLMResult and ChatResult classes ([#363](https://github.com/davidmigloz/langchain_dart/issues/363)). ([ffe539c1](https://github.com/davidmigloz/langchain_dart/commit/ffe539c13f92cce5f564107430163b44be1dfd96))\n - **BREAKING** **REFACTOR**: Simplify Output Parsers ([#367](https://github.com/davidmigloz/langchain_dart/issues/367)). ([f24b7058](https://github.com/davidmigloz/langchain_dart/commit/f24b7058949fba47ba624f071a3f548b8f6e915e))\n - **REFACTOR**: Simplify internal .stream implementation ([#364](https://github.com/davidmigloz/langchain_dart/issues/364)). ([c83fed22](https://github.com/davidmigloz/langchain_dart/commit/c83fed22b2b89d5e51211984b12ec126a3ca225e))\n - **FEAT**: Implement .batch support ([#370](https://github.com/davidmigloz/langchain_dart/issues/370)). ([d254f929](https://github.com/davidmigloz/langchain_dart/commit/d254f929b03d9c950029e55c66831f9f89cc14a9))\n - **FEAT**: Add reduceOutputStream option to StringOutputParser ([#368](https://github.com/davidmigloz/langchain_dart/issues/368)). ([7f9a9fae](https://github.com/davidmigloz/langchain_dart/commit/7f9a9faeef93685ff810a88bbfe866da4b843369))\n\n## 0.0.1-dev.2\n\n - **FEAT**: Update meta and test dependencies ([#331](https://github.com/davidmigloz/langchain_dart/issues/331)). ([912370ee](https://github.com/davidmigloz/langchain_dart/commit/912370ee0ba667ee9153303395a457e6caf5c72d))\n\n## 0.0.1-dev.1\n\n- Bootstrap package.\n"
  },
  {
    "path": "packages/langchain_core/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2023 David Miguel Lozano\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "packages/langchain_core/README.md",
    "content": "# 🦜️🔗 LangChain.dart Core\n\n[![tests](https://img.shields.io/github/actions/workflow/status/davidmigloz/langchain_dart/test.yaml?logo=github&label=tests)](https://github.com/davidmigloz/langchain_dart/actions/workflows/test.yaml)\n[![docs](https://img.shields.io/github/actions/workflow/status/davidmigloz/langchain_dart/pages%2Fpages-build-deployment?logo=github&label=docs)](https://github.com/davidmigloz/langchain_dart/actions/workflows/pages/pages-build-deployment)\n[![langchain_core](https://img.shields.io/pub/v/langchain_core.svg)](https://pub.dev/packages/langchain_core)\n![Discord](https://img.shields.io/discord/1123158322812555295?label=discord)\n[![MIT](https://img.shields.io/badge/license-MIT-purple.svg)](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE)\n\nCore package for [LangChain.dart](https://github.com/davidmigloz/langchain_dart).\n\n## What is LangChain.dart Core?\n\nLangChain.dart Core contains the core abstractions that power the rest of the [LangChain.dart](https://github.com/davidmigloz/langchain_dart) ecosystem as well as LangChain Expression Language (LCEL) as a way to compose them together.\n\nThese abstractions are designed to be as modular and simple as possible. Examples of these abstractions include those for language models, document loaders, embedding models, vector stores, retrievers, etc.\n\nLangChain Expression Language (LCEL) is a _declarative language_ for composing components (Runnables) into sequences (or DAGs), covering the most common patterns when building with LLMs.\n\n> Depend on this package to build frameworks on top of LangChain.dart or to interoperate with it.\n\nFor full documentation see the [API reference](https://pub.dev/documentation/langchain_core/latest/).\n\n![LangChain.dart packages](https://raw.githubusercontent.com/davidmigloz/langchain_dart/main/docs/img/langchain_packages.png)\n\n## Why build on top of LangChain Core?\n\nThe whole LangChain.dart ecosystem is built on top of LangChain Core, so you're in good company when building on top of it. Some of the benefits:\n\n- **Modularity**: LangChain Core is designed around abstractions that are independent of each other, and not tied to any specific model provider.\n- **Stability**: We are committed to a stable versioning scheme, and will communicate any breaking changes with advance notice and version bumps.\n- **Battle-tested**: LangChain Core components have the largest install base in the LLM ecosystem, and are used in production by many companies.\n- **Community**: LangChain Core is developed in the open, and we welcome contributions from the community.\n\n## License\n\nLangChain.dart is licensed under the\n[MIT License](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE).\n"
  },
  {
    "path": "packages/langchain_core/example/langchain_core_example.dart",
    "content": "void main() {\n  // TODO\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/agents.dart",
    "content": "/// Contains core abstractions related to agents.\nlibrary;\n\nexport 'src/agents/agents.dart';\n"
  },
  {
    "path": "packages/langchain_core/lib/chains.dart",
    "content": "/// Contains core abstractions related to chains.\nlibrary;\n\nexport 'src/chains/chains.dart';\n"
  },
  {
    "path": "packages/langchain_core/lib/chat_history.dart",
    "content": "/// Contains core abstractions related to chat history.\nlibrary;\n\nexport 'src/chat_history/chat_history.dart';\n"
  },
  {
    "path": "packages/langchain_core/lib/chat_models.dart",
    "content": "/// Contains core abstractions related to chat models.\nlibrary;\n\nexport 'src/chat_models/chat_models.dart';\n"
  },
  {
    "path": "packages/langchain_core/lib/document_loaders.dart",
    "content": "/// Contains core abstractions related to document loaders.\nlibrary;\n\nexport 'src/document_loaders/document_loaders.dart';\n"
  },
  {
    "path": "packages/langchain_core/lib/documents.dart",
    "content": "/// Contains core abstractions related to documents.\nlibrary;\n\nexport 'src/documents/documents.dart';\n"
  },
  {
    "path": "packages/langchain_core/lib/embeddings.dart",
    "content": "/// Contains core abstractions related to embeddings.\nlibrary;\n\nexport 'src/embeddings/embeddings.dart';\n"
  },
  {
    "path": "packages/langchain_core/lib/exceptions.dart",
    "content": "/// Contains core abstractions related to exceptions.\nlibrary;\n\nexport 'src/exceptions/exceptions.dart';\n"
  },
  {
    "path": "packages/langchain_core/lib/langchain.dart",
    "content": "/// Contains core abstractions related to LangChain.\nlibrary;\n\nexport 'src/langchain/langchain.dart';\n"
  },
  {
    "path": "packages/langchain_core/lib/language_models.dart",
    "content": "/// Contains core abstractions related to language models.\nlibrary;\n\nexport 'src/language_models/language_models.dart';\n"
  },
  {
    "path": "packages/langchain_core/lib/llms.dart",
    "content": "/// Contains core abstractions related to LLMs.\nlibrary;\n\nexport 'src/llms/llms.dart';\n"
  },
  {
    "path": "packages/langchain_core/lib/memory.dart",
    "content": "/// Contains core abstractions related to memory.\nlibrary;\n\nexport 'src/memory/memory.dart';\n"
  },
  {
    "path": "packages/langchain_core/lib/output_parsers.dart",
    "content": "/// Contains core abstractions related to output parsers.\nlibrary;\n\nexport 'src/output_parsers/output_parsers.dart';\n"
  },
  {
    "path": "packages/langchain_core/lib/prompts.dart",
    "content": "/// Contains core abstractions related to prompts.\nlibrary;\n\nexport 'src/prompts/prompts.dart';\n"
  },
  {
    "path": "packages/langchain_core/lib/retrievers.dart",
    "content": "/// Contains core abstractions related to retrievers.\nlibrary;\n\nexport 'src/retrievers/retrievers.dart';\n"
  },
  {
    "path": "packages/langchain_core/lib/runnables.dart",
    "content": "/// Contains core abstractions related to runnables.\nlibrary;\n\nexport 'src/runnables/runnables.dart';\n"
  },
  {
    "path": "packages/langchain_core/lib/src/agents/agents.dart",
    "content": "export 'base.dart';\nexport 'types.dart';\n"
  },
  {
    "path": "packages/langchain_core/lib/src/agents/base.dart",
    "content": "import '../exceptions/base.dart';\nimport '../runnables/runnables.dart';\nimport '../tools/base.dart';\nimport 'types.dart';\n\n/// {@template agent}\n/// An agent is a component that takes an input and a set of tools and decides\n/// what to do to get to the desired output.\n/// {@endtemplate}\nabstract class Agent {\n  /// {@macro agent}\n  const Agent();\n\n  /// Creates an agent from a [Runnable].\n  static BaseMultiActionAgent fromRunnable(\n    final Runnable<AgentPlanInput, RunnableOptions, List<BaseAgentAction>>\n    runnable, {\n    required final List<Tool> tools,\n  }) {\n    return RunnableAgent(runnable, tools: tools);\n  }\n}\n\n/// {@template base_action_agent}\n/// Base class for action agents.\n/// {@endtemplate}\nabstract class BaseActionAgent extends Agent {\n  /// {@macro base_action_agent}\n  const BaseActionAgent({required this.tools});\n\n  /// The key for the scratchpad (intermediate steps) of the agent.\n  static const agentScratchpadInputKey = 'agent_scratchpad';\n\n  /// Return key for the agent's output.\n  static const agentReturnKey = 'output';\n\n  /// Returns the input keys.\n  Set<String> get inputKeys;\n\n  /// Returns values of the agent.\n  Set<String> get returnValues {\n    return const {agentReturnKey};\n  }\n\n  /// Returns the type of agent.\n  String get agentType;\n\n  /// The tools this agent can use.\n  final List<Tool> tools;\n\n  /// Given the input and previous steps, returns the next action to take.\n  ///\n  /// Returns either [AgentAction] specifying what tool to use or\n  /// [AgentFinish] specifying the agent's final return value.\n  /// The list should always have length 1.\n  Future<List<BaseAgentAction>> plan(final AgentPlanInput input);\n\n  /// Return response when agent has been stopped due to max iterations.\n  AgentFinish returnStoppedResponse(\n    final AgentEarlyStoppingMethod earlyStoppingMethod,\n    final List<AgentStep> intermediateSteps,\n  ) {\n    return switch (earlyStoppingMethod) {\n      AgentEarlyStoppingMethod.force => const AgentFinish(\n        returnValues: {\n          agentReturnKey: 'Agent stopped due to iteration limit or time limit.',\n        },\n      ),\n      _ => throw LangChainException(\n        message: 'Got unsupported early stopping method: $earlyStoppingMethod.',\n      ),\n    };\n  }\n}\n\n/// {@template base_single_action_agent}\n/// Base class for single action agents.\n/// {@endtemplate}\nabstract class BaseSingleActionAgent extends BaseActionAgent {\n  /// {@macro base_single_action_agent}\n  const BaseSingleActionAgent({required super.tools});\n}\n\n/// {@template base_multi_action_agent}\n/// Base class for multi action agents.\n/// {@endtemplate}\nabstract class BaseMultiActionAgent extends BaseActionAgent {\n  /// {@macro base_multi_action_agent}\n  const BaseMultiActionAgent({required super.tools});\n}\n\n/// {@template runnable_agent}\n/// An agent implemented from a [Runnable].\n/// {@endtemplate}\nclass RunnableAgent extends BaseMultiActionAgent {\n  /// {@macro runnable_agent}\n  const RunnableAgent(this.runnable, {required super.tools});\n\n  /// The runnable that implements the agent.\n  final Runnable<AgentPlanInput, RunnableOptions, List<BaseAgentAction>>\n  runnable;\n\n  @override\n  String get agentType => 'runnable-agent';\n\n  @override\n  Set<String> get inputKeys => const {};\n\n  @override\n  Future<List<BaseAgentAction>> plan(final AgentPlanInput input) {\n    return runnable.invoke(input);\n  }\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/agents/types.dart",
    "content": "import '../chat_models/types.dart';\n\n/// {@template agent_plan_input}\n/// Input to the agent's plan method.\n/// {@endtemplate}\nclass AgentPlanInput {\n  /// {@macro agent_plan_input}\n  const AgentPlanInput(this.inputs, this.intermediateSteps);\n\n  /// The input values to the agent.\n  final Map<String, dynamic> inputs;\n\n  /// The actions the agent has taken to date, along with their observations.\n  final List<AgentStep> intermediateSteps;\n}\n\n/// {@template base_agent_action}\n/// Base class for agent actions.\n/// {@endtemplate}\nsealed class BaseAgentAction {\n  /// {@macro base_agent_action}\n  const BaseAgentAction();\n}\n\n/// {@template agent_action}\n/// Agent's action to take.\n/// {@endtemplate}\nclass AgentAction extends BaseAgentAction {\n  /// {@macro agent_action}\n  const AgentAction({\n    required this.id,\n    required this.tool,\n    required this.toolInput,\n    this.log = '',\n    this.messageLog = const [],\n  });\n\n  /// The id for the action.\n  final String id;\n\n  /// The name of the tool to use.\n  final String tool;\n\n  /// The input to the tool.\n  final Map<String, dynamic> toolInput;\n\n  /// The log of the action.\n  final String log;\n\n  /// The log of the messages.\n  final List<ChatMessage> messageLog;\n}\n\n/// {@template agent_finish}\n/// Agent's return value.\n/// {@endtemplate}\nclass AgentFinish extends BaseAgentAction {\n  /// {@macro agent_finish}\n  const AgentFinish({required this.returnValues, this.log = ''});\n\n  /// The return values of the agent.\n  final Map<String, dynamic> returnValues;\n\n  /// The log of the action.\n  final String log;\n}\n\n/// {@template agent_step}\n/// A action taken by the agent along with the observation of the action.\n/// {@endtemplate}\nclass AgentStep {\n  /// {@macro agent_step}\n  const AgentStep({required this.action, required this.observation});\n\n  /// The action taken by the agent.\n  final AgentAction action;\n\n  /// The observation of the action.\n  final String observation;\n}\n\n/// {@template agent_early_stopping_method}\n/// Method to use for early stopping if the agent never returns [AgentFinish].\n/// {@endtemplate}\nenum AgentEarlyStoppingMethod {\n  /// The executor returns a string saying that it stopped because it met a\n  /// time or iteration limit.\n  force,\n\n  /// Calls the agent's LLM Chain one final time to generate final answer based\n  /// on the previous steps.\n  generate,\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/chains/base.dart",
    "content": "import 'package:meta/meta.dart';\n\nimport '../../exceptions.dart';\nimport '../../langchain.dart';\nimport '../../memory.dart';\nimport 'types.dart';\n\n/// {@template base_chain}\n/// Base class for creating structured sequences of calls to components.\n///\n/// Chains should be used to encode a sequence of calls to components like\n/// models, document retrievers, other chains, etc., and provide a simple\n/// interface to this sequence.\n///\n/// The [BaseChain] interface makes it easy to create apps that are:\n/// - Stateful: add Memory to any Chain to give it state.\n/// - Observable: pass Callbacks to a Chain to execute additional functionality,\n///   like logging, outside the main sequence of component calls.\n/// - Composable: the Chain API is flexible enough that it is easy to combine\n///   Chains with other components, including other Chains.\n///\n/// The main methods exposed by chains are:\n///\n/// - [call] Chains are callable. The [call] method is the primary way to\n///   execute a Chain. This takes inputs as a dictionary and returns a\n///   dictionary output.\n/// - [run] A convenience method that takes inputs and returns the output as a\n///   string. This method can only be used if the Chain has a single string\n///   output.\n/// {@endtemplate}\nabstract class BaseChain<MemoryType extends BaseMemory>\n    extends BaseLangChain<ChainValues, ChainOptions, ChainValues> {\n  /// {@macro base_chain}\n  const BaseChain({this.memory, final ChainOptions? defaultOptions})\n    : super(defaultOptions: defaultOptions ?? const ChainOptions());\n\n  /// Memory to use for this chain.\n  final MemoryType? memory;\n\n  /// Return the string type key uniquely identifying this class of chain.\n  String get chainType;\n\n  /// Input keys for this chain.\n  Set<String> get inputKeys;\n\n  /// Output keys for this chain.\n  Set<String> get outputKeys;\n\n  /// Output key from where the [run] method needs to take the return value.\n  String get runOutputKey {\n    if (outputKeys.length != 1) {\n      throw LangChainException(\n        message:\n            '`chain.run` method can only be used with chains that return '\n            'a single string output. $chainType chain returns '\n            '${outputKeys.length} output: $outputKeys. '\n            'Use `chain.call` method instead.',\n      );\n    }\n    return outputKeys.first;\n  }\n\n  /// Default input key for the input of the chain.\n  static const defaultInputKey = 'input';\n\n  /// Default output key for the output of the chain.\n  static const defaultOutputKey = 'output';\n\n  /// Runs the core logic of this chain with the given input.\n  ///\n  /// - [input] is the input to this chain.\n  /// - [options] not used.\n  @override\n  Future<ChainValues> invoke(\n    final ChainValues input, {\n    final ChainOptions? options,\n  }) {\n    return call(input);\n  }\n\n  /// Runs the core logic of this chain with the given values.\n  /// If [memory] is not null, it will be used to load and save values.\n  ///\n  /// - [inputs] are the inputs to this chain. Assumed to contain all inputs\n  /// specified in [inputKeys], including any inputs added by [memory].\n  /// - [returnOnlyOutputs] if true the chain will only return the outputs of\n  /// this chain. If false, the chain will return all inputs and outputs.\n  ///\n  /// Returns a dictionary of outputs. It should contain all outputs specified\n  /// in [outputKeys].\n  Future<ChainValues> call(\n    final dynamic input, {\n    final bool returnOnlyOutputs = false,\n  }) async {\n    ChainValues chainValues;\n\n    if (input is Map) {\n      chainValues = input.cast();\n    } else {\n      chainValues = {inputKeys.firstOrNull ?? defaultInputKey: input};\n    }\n\n    if (inputKeys.isNotEmpty) {\n      if (chainValues.length < inputKeys.length) {\n        throw ArgumentError(\n          'This chain ($chainType) requires ${inputKeys.length} input values '\n          'but only ${chainValues.length} were provided.',\n        );\n      }\n      final chainValuesKeys = chainValues.keys.toSet();\n      final inputKeysDiff = inputKeys.difference(chainValuesKeys);\n      if (inputKeysDiff.isNotEmpty) {\n        throw ArgumentError(\n          'This chain ($chainType) also requires $inputKeysDiff input values.',\n        );\n      }\n    }\n\n    final memory = this.memory;\n    if (memory != null) {\n      final newValues = await memory.loadMemoryVariables(chainValues);\n      chainValues.addAll(newValues);\n    }\n\n    final outputValues = await callInternal(chainValues);\n\n    if (memory != null) {\n      await memory.saveContext(\n        inputValues: chainValues,\n        outputValues: outputValues,\n      );\n    }\n\n    if (outputKeys.isNotEmpty) {\n      if (outputValues.length < outputKeys.length) {\n        throw ArgumentError(\n          'This chain ($chainType) expects ${outputKeys.length} output values '\n          'but only ${outputValues.length} were returned.',\n        );\n      }\n      final outputValuesKeys = outputValues.keys.toSet();\n      final outputKeysDiff = outputKeys.difference(outputValuesKeys);\n      if (outputKeysDiff.isNotEmpty) {\n        throw ArgumentError(\n          'This chain ($chainType) also expects $outputKeysDiff output values.',\n        );\n      }\n    }\n\n    if (returnOnlyOutputs) {\n      return outputValues;\n    }\n\n    return {...chainValues, ...outputValues};\n  }\n\n  /// Call method to be implemented by subclasses (called by [call]).\n  /// This is where the core logic of the chain should be implemented.\n  @protected\n  Future<ChainValues> callInternal(final ChainValues inputs);\n\n  /// Call the chain on all inputs in the list.\n  Future<List<ChainValues>> apply(final List<ChainValues> inputs) {\n    return Future.wait(inputs.map(call));\n  }\n\n  /// Convenience method for executing chain when there's a single string\n  /// output.\n  ///\n  /// The main difference between this method and [call] is that this method\n  /// can only be used for chains that return a single string output. If a\n  /// Chain has more outputs, the output is not a string, or you want to return\n  /// the inputs/run info along with the outputs, use [call].\n  ///\n  /// If [run] is called on a chain that does not return a string,\n  /// [Object.toString] will be called on the output.\n  ///\n  /// The input can be:\n  /// - A single value, if the chain has a single input key.\n  ///   Eg: `chain.run('Hello world!')`\n  /// - A map of key->values, if the chain has multiple input keys.\n  ///   Eg: `chain.run({'foo': 'Hello', 'bar': 'world!'})`\n  Future<String> run(final dynamic input) async {\n    final outputKey = runOutputKey;\n    final returnValues = await call(input, returnOnlyOutputs: true);\n    return returnValues[outputKey].toString();\n  }\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/chains/chains.dart",
    "content": "export 'base.dart';\nexport 'llm_chain.dart';\nexport 'types.dart';\n"
  },
  {
    "path": "packages/langchain_core/lib/src/chains/llm_chain.dart",
    "content": "import '../../language_models.dart';\nimport '../../memory.dart';\nimport '../../output_parsers.dart';\nimport '../../prompts.dart';\n\nimport 'base.dart';\nimport 'types.dart';\n\n/// {@template llm_chain}\n/// > NOTE: Chains are the legacy way of using LangChain and will eventually be\n/// > removed. Use LangChain Expression Language (LCEL) instead, a more flexible\n/// > and powerful way to combine different components.\n///\n/// Chain to run queries against LLMs.\n///\n/// It takes in a prompt template, formats it with the user input and returns\n/// the response from an LLM.\n///\n/// Example:\n/// ```dart\n/// final prompt = PromptTemplate.fromTemplate('Tell me a {adjective} joke');\n/// final chain = LLMChain(llm: OpenAI(apiKey: '...'), prompt: prompt);\n/// final res = await chain.run('bad');\n/// ```\n/// {@endtemplate}\nclass LLMChain<\n  LLMType extends BaseLanguageModel,\n  LLMOptions extends LanguageModelOptions,\n  MemoryType extends BaseMemory\n>\n    extends BaseChain<MemoryType> {\n  /// {@macro llm_chain}\n  const LLMChain({\n    required this.llm,\n    this.llmOptions,\n    required this.prompt,\n    super.memory,\n    this.outputParser,\n    this.outputKey = defaultOutputKey,\n    this.returnFinalOnly = true,\n  }) : super(defaultOptions: const ChainOptions());\n\n  /// Language model to call.\n  final LLMType llm;\n\n  /// Options to pass to the language model.\n  final LLMOptions? llmOptions;\n\n  /// Prompt object to use.\n  final BasePromptTemplate prompt;\n\n  /// OutputParser to use.\n  ///\n  /// Defaults to one that takes the most likely string but does not change it\n  /// otherwise.\n  final BaseOutputParser? outputParser;\n\n  /// Key to use for output.\n  final String outputKey;\n\n  /// Whether to return only the final parsed result.\n  /// If false, it will return a bunch of extra information about the\n  /// generation.\n  final bool returnFinalOnly;\n\n  /// Default output key.\n  static const defaultOutputKey = 'output';\n\n  /// Output key to use for returning the full generation.\n  static const fullGenerationOutputKey = 'full_generation';\n\n  @override\n  String get chainType => 'llm';\n\n  @override\n  Set<String> get inputKeys =>\n      prompt.inputVariables.difference(memory?.memoryKeys ?? {});\n\n  @override\n  Set<String> get outputKeys =>\n      returnFinalOnly ? {outputKey} : {outputKey, fullGenerationOutputKey};\n\n  @override\n  String get runOutputKey => outputKey;\n\n  @override\n  Future<ChainValues> callInternal(final ChainValues inputs) async {\n    final promptValue = prompt.formatPrompt(inputs);\n\n    final response = await llm.invoke(promptValue, options: llmOptions);\n\n    final res = outputParser == null\n        ? response.output\n        : await outputParser!.invoke(response);\n\n    return {\n      outputKey: res,\n      if (!returnFinalOnly) fullGenerationOutputKey: response,\n    };\n  }\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/chains/types.dart",
    "content": "import 'package:meta/meta.dart';\n\nimport '../langchain/types.dart';\n\n/// Values to be used in the chain.\ntypedef ChainValues = Map<String, dynamic>;\n\n/// {@template chain_options}\n/// Options to pass to the chain.\n/// {@endtemplate}\n@immutable\nclass ChainOptions extends BaseLangChainOptions {\n  /// {@macro chain_options}\n  const ChainOptions({super.concurrencyLimit});\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/chat_history/base.dart",
    "content": "import '../chat_models/types.dart';\n\n/// {@template base_chat_message_history}\n/// Base interface for chat message history.\n///\n/// See [ChatMessageHistory] for an in-memory implementation.\n/// {@endtemplate}\nabstract base class BaseChatMessageHistory {\n  /// {@macro base_chat_message_history}\n  const BaseChatMessageHistory();\n\n  /// Return a list of messages.\n  Future<List<ChatMessage>> getChatMessages();\n\n  /// Add [ChatMessage] to the history.\n  Future<void> addChatMessage(final ChatMessage message);\n\n  /// Add a human message to the history.\n  Future<void> addHumanChatMessage(final String message) {\n    return addChatMessage(ChatMessage.humanText(message));\n  }\n\n  /// Add an AI message to the history.\n  Future<void> addAIChatMessage(final String message) {\n    return addChatMessage(ChatMessage.ai(message));\n  }\n\n  /// Add a Tool response message to the history.\n  Future<void> addToolChatMessage({\n    required final String toolCallId,\n    required final String content,\n  }) {\n    return addChatMessage(\n      ChatMessage.tool(toolCallId: toolCallId, content: content),\n    );\n  }\n\n  /// Removes and returns the first (oldest) element of the history.\n  ///\n  /// The history must not be empty when this method is called.\n  Future<ChatMessage> removeFirst();\n\n  /// Removes and returns the last (newest) element of the history.\n  ///\n  /// The history must not be empty when this method is called.\n  Future<ChatMessage> removeLast();\n\n  /// Clear the history.\n  Future<void> clear();\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/chat_history/chat_history.dart",
    "content": "export 'base.dart';\n"
  },
  {
    "path": "packages/langchain_core/lib/src/chat_models/base.dart",
    "content": "import 'package:meta/meta.dart';\n\nimport '../language_models/language_models.dart';\nimport '../prompts/types.dart';\nimport '../utils/reduce.dart';\nimport 'types.dart';\n\n/// {@template base_chat_model}\n/// Chat models base class.\n/// It should take in chat messages and return a chat message.\n/// {@endtemplate}\nabstract class BaseChatModel<Options extends ChatModelOptions>\n    extends BaseLanguageModel<List<ChatMessage>, Options, ChatResult> {\n  /// {@macro base_chat_model}\n  const BaseChatModel({required super.defaultOptions});\n\n  @override\n  Stream<ChatResult> streamFromInputStream(\n    final Stream<PromptValue> inputStream, {\n    final Options? options,\n  }) async* {\n    final input = await inputStream.toList();\n    final reduced = reduce<PromptValue>(input);\n    yield* stream(reduced, options: options);\n  }\n\n  /// Runs the chat model on the given messages and returns a chat message.\n  ///\n  /// - [messages] The messages to pass into the model.\n  /// - [options] Generation options to pass into the Chat Model.\n  ///\n  /// Example:\n  /// ```dart\n  /// final result = await chat([ChatMessage.humanText('say hi!')]);\n  /// ```\n  Future<AIChatMessage> call(\n    final List<ChatMessage> messages, {\n    final Options? options,\n  }) async {\n    final result = await invoke(PromptValue.chat(messages), options: options);\n    return result.output;\n  }\n}\n\n/// {@template simple_chat_model}\n/// [SimpleChatModel] provides a simplified interface for working with chat\n/// models, rather than expecting the user to implement the full\n/// [SimpleChatModel.invoke] method.\n/// {@endtemplate}\nabstract class SimpleChatModel<Options extends ChatModelOptions>\n    extends BaseChatModel<Options> {\n  /// {@macro simple_chat_model}\n  const SimpleChatModel({required super.defaultOptions});\n\n  @override\n  Future<ChatResult> invoke(\n    final PromptValue input, {\n    final Options? options,\n  }) async {\n    final text = await callInternal(input.toChatMessages(), options: options);\n    final message = AIChatMessage(content: text);\n    return ChatResult(\n      id: '1',\n      output: message,\n      finishReason: FinishReason.unspecified,\n      metadata: const {},\n      usage: const LanguageModelUsage(),\n    );\n  }\n\n  /// Method which should be implemented by subclasses to run the model.\n  @visibleForOverriding\n  Future<String> callInternal(\n    final List<ChatMessage> messages, {\n    final Options? options,\n  });\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/chat_models/chat_models.dart",
    "content": "export 'base.dart';\nexport 'fake.dart';\nexport 'types.dart';\nexport 'utils.dart';\n"
  },
  {
    "path": "packages/langchain_core/lib/src/chat_models/fake.dart",
    "content": "import 'package:collection/collection.dart';\n\nimport '../../language_models.dart';\nimport '../prompts/types.dart';\nimport '../tools/base.dart';\nimport 'base.dart';\nimport 'types.dart';\n\n/// {@template fake_chat_model}\n/// Fake Chat Model for testing.\n/// You can pass in a list of responses to return in order when called.\n/// {@endtemplate}\nclass FakeChatModel extends BaseChatModel<FakeChatModelOptions> {\n  /// {@macro fake_list_llm}\n  FakeChatModel({\n    required this.responses,\n    super.defaultOptions = const FakeChatModelOptions(),\n  });\n\n  /// Responses to return in order when called.\n  final List<String> responses;\n\n  var _i = 0;\n\n  @override\n  String get modelType => 'fake-chat-model';\n\n  @override\n  Future<ChatResult> invoke(\n    final PromptValue input, {\n    final FakeChatModelOptions? options,\n  }) async {\n    final text = responses[_i++ % responses.length];\n    final message = AIChatMessage(content: text);\n    return ChatResult(\n      id: '1',\n      output: message,\n      finishReason: FinishReason.unspecified,\n      metadata: {\n        'model': options?.model ?? defaultOptions.model,\n        ...?options?.metadata ?? defaultOptions.metadata,\n      },\n      usage: const LanguageModelUsage(),\n    );\n  }\n\n  @override\n  Stream<ChatResult> stream(\n    final PromptValue input, {\n    final FakeChatModelOptions? options,\n  }) {\n    final res = responses[_i++ % responses.length].split('');\n    return Stream.fromIterable(res).map(\n      (final char) => ChatResult(\n        id: 'fake-chat-model',\n        output: AIChatMessage(content: char),\n        finishReason: FinishReason.stop,\n        metadata: {\n          'model': options?.model ?? defaultOptions.model,\n          ...?options?.metadata ?? defaultOptions.metadata,\n        },\n        usage: const LanguageModelUsage(),\n        streaming: true,\n      ),\n    );\n  }\n\n  @override\n  Future<List<int>> tokenize(\n    final PromptValue promptValue, {\n    final ChatModelOptions? options,\n  }) async {\n    return promptValue\n        .toString()\n        .split(' ')\n        .map((final word) => word.hashCode)\n        .toList(growable: false);\n  }\n}\n\n/// {@template fake_chat_model_options}\n/// Fake Chat Model Options for testing.\n/// {@endtemplate}\nclass FakeChatModelOptions extends ChatModelOptions {\n  /// {@macro fake_chat_model_options}\n  const FakeChatModelOptions({\n    super.model,\n    this.metadata,\n    super.tools,\n    super.toolChoice,\n    super.concurrencyLimit,\n  });\n\n  /// Metadata.\n  final Map<String, dynamic>? metadata;\n\n  @override\n  FakeChatModelOptions copyWith({\n    final String? model,\n    final Map<String, dynamic>? metadata,\n    final List<ToolSpec>? tools,\n    final ChatToolChoice? toolChoice,\n    final int? concurrencyLimit,\n  }) {\n    return FakeChatModelOptions(\n      model: model ?? this.model,\n      metadata: metadata ?? this.metadata,\n      tools: tools ?? this.tools,\n      toolChoice: toolChoice ?? this.toolChoice,\n      concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit,\n    );\n  }\n\n  @override\n  FakeChatModelOptions merge(covariant final FakeChatModelOptions? other) {\n    return copyWith(\n      model: other?.model,\n      metadata: other?.metadata,\n      concurrencyLimit: other?.concurrencyLimit,\n    );\n  }\n\n  @override\n  bool operator ==(covariant final FakeChatModelOptions other) {\n    return model == other.model &&\n        const MapEquality<String, dynamic>().equals(metadata, other.metadata) &&\n        concurrencyLimit == other.concurrencyLimit;\n  }\n\n  @override\n  int get hashCode {\n    return model.hashCode ^\n        const MapEquality<String, dynamic>().hash(metadata) ^\n        concurrencyLimit.hashCode;\n  }\n}\n\n/// {@template fake_echo_chat_model}\n/// Fake Chat Model for testing.\n/// It just returns the content of the last message of the prompt\n/// or streams it char by char.\n/// {@endtemplate}\nclass FakeEchoChatModel extends BaseChatModel<FakeEchoChatModelOptions> {\n  /// {@macro fake_echo_chat_model}\n  const FakeEchoChatModel({\n    super.defaultOptions = const FakeEchoChatModelOptions(),\n  });\n\n  @override\n  String get modelType => 'fake-echo-chat-model';\n\n  @override\n  Future<ChatResult> invoke(\n    final PromptValue input, {\n    final FakeEchoChatModelOptions? options,\n  }) async {\n    final throwError =\n        options?.throwRandomError ?? defaultOptions.throwRandomError;\n    if (throwError) {\n      throw Exception('Random error');\n    }\n\n    final text = input.toChatMessages().last.contentAsString;\n    final message = AIChatMessage(content: text);\n    return ChatResult(\n      id: '1',\n      output: message,\n      finishReason: FinishReason.unspecified,\n      metadata: {\n        'model': options?.model ?? defaultOptions.model,\n        ...?options?.metadata ?? defaultOptions.metadata,\n      },\n      usage: const LanguageModelUsage(),\n    );\n  }\n\n  @override\n  Stream<ChatResult> stream(\n    final PromptValue input, {\n    final FakeEchoChatModelOptions? options,\n  }) {\n    final prompt = input.toChatMessages().first.contentAsString.split('');\n    final throwError =\n        options?.throwRandomError ?? defaultOptions.throwRandomError;\n\n    var index = 0;\n    return Stream.fromIterable(prompt).map((final char) {\n      if (throwError && index == prompt.length ~/ 2) {\n        throw Exception('Random error');\n      }\n\n      return ChatResult(\n        id: 'fake-echo-chat-model',\n        output: AIChatMessage(content: char),\n        finishReason: FinishReason.stop,\n        metadata: {\n          'model': options?.model ?? defaultOptions.model,\n          ...?options?.metadata ?? defaultOptions.metadata,\n          'index': index++,\n        },\n        usage: const LanguageModelUsage(),\n        streaming: true,\n      );\n    });\n  }\n\n  @override\n  Future<List<int>> tokenize(\n    final PromptValue promptValue, {\n    final ChatModelOptions? options,\n  }) async {\n    return promptValue\n        .toString()\n        .split(' ')\n        .map((final word) => word.hashCode)\n        .toList(growable: false);\n  }\n}\n\n/// {@template fake_chat_model_options}\n/// Fake Echo Chat Model Options for testing.\n/// {@endtemplate}\nclass FakeEchoChatModelOptions extends ChatModelOptions {\n  /// {@macro fake_chat_model_options}\n  const FakeEchoChatModelOptions({\n    super.model,\n    this.metadata,\n    this.throwRandomError = false,\n    super.tools,\n    super.toolChoice,\n    super.concurrencyLimit,\n  });\n\n  /// Metadata.\n  final Map<String, dynamic>? metadata;\n\n  /// If true, throws a random error.\n  final bool throwRandomError;\n\n  @override\n  FakeEchoChatModelOptions copyWith({\n    final String? model,\n    final Map<String, dynamic>? metadata,\n    final bool? throwRandomError,\n    final List<ToolSpec>? tools,\n    final ChatToolChoice? toolChoice,\n    final int? concurrencyLimit,\n  }) {\n    return FakeEchoChatModelOptions(\n      model: model ?? this.model,\n      metadata: metadata ?? this.metadata,\n      throwRandomError: throwRandomError ?? this.throwRandomError,\n      tools: tools ?? this.tools,\n      toolChoice: toolChoice ?? this.toolChoice,\n      concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit,\n    );\n  }\n\n  @override\n  FakeEchoChatModelOptions merge(\n    covariant final FakeEchoChatModelOptions? other,\n  ) {\n    return copyWith(\n      model: other?.model,\n      metadata: other?.metadata,\n      throwRandomError: other?.throwRandomError,\n      concurrencyLimit: other?.concurrencyLimit,\n    );\n  }\n\n  @override\n  bool operator ==(covariant final FakeEchoChatModelOptions other) {\n    return model == other.model &&\n        const MapEquality<String, dynamic>().equals(metadata, other.metadata) &&\n        throwRandomError == other.throwRandomError &&\n        concurrencyLimit == other.concurrencyLimit;\n  }\n\n  @override\n  int get hashCode {\n    return model.hashCode ^\n        const MapEquality<String, dynamic>().hash(metadata) ^\n        throwRandomError.hashCode ^\n        concurrencyLimit.hashCode;\n  }\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/chat_models/types.dart",
    "content": "import 'package:collection/collection.dart';\nimport 'package:meta/meta.dart';\n\nimport '../language_models/language_models.dart';\nimport '../tools/base.dart';\n\n/// {@template chat_model_options}\n/// Generation options to pass into the Chat Model.\n/// {@endtemplate}\n@immutable\nabstract class ChatModelOptions extends LanguageModelOptions {\n  /// {@macro chat_model_options}\n  const ChatModelOptions({\n    super.model,\n    this.tools,\n    this.toolChoice,\n    super.concurrencyLimit,\n  });\n\n  /// A list of tools the model may call.\n  final List<ToolSpec>? tools;\n\n  /// Controls which (if any) tool is called by the model.\n  final ChatToolChoice? toolChoice;\n\n  @override\n  ChatModelOptions copyWith({\n    final String? model,\n    final List<ToolSpec>? tools,\n    final ChatToolChoice? toolChoice,\n    final int? concurrencyLimit,\n  });\n}\n\n/// {@template chat_result}\n/// Result returned by the Chat Model.\n/// {@endtemplate}\nclass ChatResult extends LanguageModelResult<AIChatMessage> {\n  /// {@macro chat_result}\n  const ChatResult({\n    required super.id,\n    required super.output,\n    required super.finishReason,\n    required super.metadata,\n    required super.usage,\n    super.streaming = false,\n  });\n\n  @override\n  String get outputAsString => output.content;\n\n  @override\n  ChatResult concat(final LanguageModelResult<AIChatMessage> other) {\n    return ChatResult(\n      id: other.id.isNotEmpty ? other.id : id,\n      output: output.concat(other.output),\n      finishReason:\n          finishReason != FinishReason.unspecified &&\n              other.finishReason == FinishReason.unspecified\n          ? finishReason\n          : other.finishReason,\n      metadata: {...metadata, ...other.metadata},\n      usage: usage.concat(other.usage),\n      streaming: other.streaming,\n    );\n  }\n\n  @override\n  String toString() {\n    return '''\nChatResult{\n  id: $id, \n  output: $output,\n  finishReason: $finishReason,\n  metadata: $metadata,\n  usage: $usage,\n  streaming: $streaming\n}''';\n  }\n}\n\n/// {@template chat_message}\n/// A message that is part of a chat conversation.\n/// {@endtemplate}\n@immutable\nsealed class ChatMessage {\n  /// {@macro chat_message}\n  const ChatMessage();\n\n  /// Converts this ChatMessage to a map along with a type hint for deserialization.\n  Map<String, dynamic> toMap() => {};\n\n  /// Converts a map to a [ChatMessage]. Requires at least a type hint.\n  factory ChatMessage.fromMap(Map<String, dynamic> map) =>\n      switch (map['type']) {\n        'system' => SystemChatMessage.fromMap(map),\n        'human' => HumanChatMessage.fromMap(map),\n        'ai' => AIChatMessage.fromMap(map),\n        'tool' => ToolChatMessage.fromMap(map),\n        'custom' => CustomChatMessage.fromMap(map),\n        null => throw ArgumentError('Type is required'),\n        _ => throw UnimplementedError('Unknown type: ${map['type']}'),\n      };\n\n  /// Type of message that is a system message.\n  factory ChatMessage.system(final String content) =>\n      SystemChatMessage(content: content);\n\n  /// Type of message that is spoken by the human.\n  factory ChatMessage.human(final ChatMessageContent content) =>\n      HumanChatMessage(content: content);\n\n  /// This is a convenience method for [ChatMessage.human] with\n  /// [ChatMessageContent.text].\n  factory ChatMessage.humanText(final String text) =>\n      HumanChatMessage(content: ChatMessageContent.text(text));\n\n  /// Type of message that is spoken by the AI.\n  factory ChatMessage.ai(\n    final String content, {\n    final List<AIChatMessageToolCall> toolCalls = const [],\n  }) => AIChatMessage(content: content, toolCalls: toolCalls);\n\n  /// Type of message that is the response of calling a tool.\n  factory ChatMessage.tool({\n    required final String toolCallId,\n    required final String content,\n  }) => ToolChatMessage(toolCallId: toolCallId, content: content);\n\n  /// Chat message with custom role.\n  factory ChatMessage.custom(\n    final String content, {\n    required final String role,\n  }) => CustomChatMessage(content: content, role: role);\n\n  /// Returns to content of the message as a string.\n  String get contentAsString => switch (this) {\n    final SystemChatMessage system => system.content,\n    final HumanChatMessage human => switch (human.content) {\n      final ChatMessageContentText text => text.text,\n      final ChatMessageContentImage image => image.data,\n      final ChatMessageContentMultiModal multiModal =>\n        multiModal.parts\n            .map(\n              (final p) => switch (p) {\n                final ChatMessageContentText text => text.text,\n                final ChatMessageContentImage image => image.data,\n                ChatMessageContentMultiModal _ => '',\n              },\n            )\n            .join('\\n'),\n    },\n    final AIChatMessage ai => ai.content,\n    final ToolChatMessage tool => tool.content,\n    final CustomChatMessage custom => custom.content,\n  };\n\n  /// Merges this message with another by concatenating the content.\n  ChatMessage concat(final ChatMessage other);\n}\n\n/// {@template system_chat_message}\n/// Type of message that is a system message.\n/// {@endtemplate}\n@immutable\nclass SystemChatMessage extends ChatMessage {\n  /// {@macro system_chat_message}\n  const SystemChatMessage({required this.content});\n\n  /// Converts a map to a [SystemChatMessage].\n  factory SystemChatMessage.fromMap(Map<String, dynamic> map) =>\n      SystemChatMessage(content: map['content'] as String);\n\n  /// Converts this ChatMessage to a map along with a type hint for deserialization.\n  @override\n  Map<String, dynamic> toMap() => {\n    ...super.toMap(),\n    'content': content,\n    'type': 'system',\n  };\n\n  /// The content of the message.\n  final String content;\n\n  /// Default prefix for [SystemChatMessage].\n  static const defaultPrefix = 'System';\n\n  @override\n  bool operator ==(covariant final SystemChatMessage other) =>\n      identical(this, other) || content == other.content;\n\n  @override\n  int get hashCode => content.hashCode;\n\n  @override\n  SystemChatMessage concat(final ChatMessage other) {\n    if (other is! SystemChatMessage) {\n      return this;\n    }\n    return SystemChatMessage(content: content + other.content);\n  }\n\n  @override\n  String toString() {\n    return '''\nSystemChatMessage{\n  content: $content,\n}''';\n  }\n}\n\n/// {@template human_chat_message}\n/// Type of message that is spoken by the human.\n/// {@endtemplate}\n@immutable\nclass HumanChatMessage extends ChatMessage {\n  /// {@macro human_chat_message}\n  const HumanChatMessage({required this.content});\n\n  /// Converts a map to a [HumanChatMessage].\n  factory HumanChatMessage.fromMap(Map<String, dynamic> map) =>\n      HumanChatMessage(content: ChatMessageContent.fromMap(map['content']));\n\n  /// Converts this ChatMessage to a map along with a type hint for deserialization.\n  @override\n  Map<String, dynamic> toMap() => {\n    ...super.toMap(),\n    'content': content.toMap(),\n    'type': 'human',\n  };\n\n  /// The content of the message.\n  final ChatMessageContent content;\n\n  /// Default prefix for [HumanChatMessage].\n  static const defaultPrefix = 'Human';\n\n  @override\n  bool operator ==(covariant final HumanChatMessage other) =>\n      identical(this, other) || content == other.content;\n\n  @override\n  int get hashCode => content.hashCode;\n\n  @override\n  HumanChatMessage concat(final ChatMessage other) {\n    if (other is! HumanChatMessage) {\n      return this;\n    }\n\n    final thisContent = content;\n    final otherContent = other.content;\n\n    if (thisContent is ChatMessageContentText) {\n      return switch (otherContent) {\n        ChatMessageContentText(text: final text) => HumanChatMessage(\n          content: ChatMessageContent.text(thisContent.text + text),\n        ),\n        final ChatMessageContentImage image => HumanChatMessage(\n          content: ChatMessageContentMultiModal(parts: [thisContent, image]),\n        ),\n        final ChatMessageContentMultiModal multiModal => HumanChatMessage(\n          content: ChatMessageContentMultiModal(\n            parts: [thisContent, ...multiModal.parts],\n          ),\n        ),\n      };\n    } else if (thisContent is ChatMessageContentImage) {\n      return switch (otherContent) {\n        final ChatMessageContentText text => HumanChatMessage(\n          content: ChatMessageContentMultiModal(parts: [thisContent, text]),\n        ),\n        final ChatMessageContentImage image => HumanChatMessage(\n          content: ChatMessageContentMultiModal(parts: [thisContent, image]),\n        ),\n        final ChatMessageContentMultiModal multiModal => HumanChatMessage(\n          content: ChatMessageContentMultiModal(\n            parts: [thisContent, ...multiModal.parts],\n          ),\n        ),\n      };\n    } else if (thisContent is ChatMessageContentMultiModal) {\n      return switch (otherContent) {\n        final ChatMessageContentText text => HumanChatMessage(\n          content: ChatMessageContentMultiModal(\n            parts: [...thisContent.parts, text],\n          ),\n        ),\n        final ChatMessageContentImage image => HumanChatMessage(\n          content: ChatMessageContentMultiModal(\n            parts: [...thisContent.parts, image],\n          ),\n        ),\n        final ChatMessageContentMultiModal multiModal => HumanChatMessage(\n          content: ChatMessageContentMultiModal(\n            parts: [...thisContent.parts, ...multiModal.parts],\n          ),\n        ),\n      };\n    } else {\n      throw ArgumentError('Unknown ChatMessageContent type: $thisContent');\n    }\n  }\n\n  @override\n  String toString() {\n    return '''\nHumanChatMessage{\n  content: $content,\n}''';\n  }\n}\n\n/// {@template ai_chat_message}\n/// Type of message that is spoken by the AI.\n/// {@endtemplate}\n@immutable\nclass AIChatMessage extends ChatMessage {\n  /// {@macro ai_chat_message}\n  const AIChatMessage({required this.content, this.toolCalls = const []});\n\n  /// Converts a map to a [AIChatMessage].\n  factory AIChatMessage.fromMap(Map<String, dynamic> map) => AIChatMessage(\n    content: map['content'] as String,\n    toolCalls: (map['toolCalls'] as List<dynamic>)\n        .map((i) => i as Map<String, dynamic>)\n        .map(AIChatMessageToolCall.fromMap)\n        .toList(growable: false),\n  );\n\n  /// Converts this ChatMessage to a map along with a type hint for deserialization.\n  @override\n  Map<String, dynamic> toMap() => {\n    ...super.toMap(),\n    'content': content,\n    'toolCalls': toolCalls.map((t) => t.toMap()).toList(growable: false),\n    'type': 'ai',\n  };\n\n  /// The content of the message.\n  final String content;\n\n  /// The list of tool that the model wants to call.\n  /// If the model does not want to call any tool, this list will be empty.\n  final List<AIChatMessageToolCall> toolCalls;\n\n  /// Default prefix for [AIChatMessage].\n  static const defaultPrefix = 'AI';\n\n  @override\n  bool operator ==(covariant final AIChatMessage other) {\n    final listEquals = const DeepCollectionEquality().equals;\n    return identical(this, other) ||\n        content == other.content && listEquals(toolCalls, other.toolCalls);\n  }\n\n  @override\n  int get hashCode => content.hashCode ^ toolCalls.hashCode;\n\n  @override\n  AIChatMessage concat(final ChatMessage other) {\n    if (other is! AIChatMessage) {\n      return this;\n    }\n\n    final toolCalls = <AIChatMessageToolCall>[];\n    if (this.toolCalls.isNotEmpty || other.toolCalls.isNotEmpty) {\n      final thisToolCallsById = {\n        for (final toolCall in this.toolCalls) toolCall.id: toolCall,\n      };\n      final otherToolCallsById = {\n        for (final toolCall in other.toolCalls)\n          (toolCall.id.isNotEmpty\n                  ? toolCall.id\n                  : (this.toolCalls.lastOrNull?.id ?? '')):\n              toolCall,\n      };\n      final toolCallsIds = {\n        ...thisToolCallsById.keys,\n        ...otherToolCallsById.keys,\n      };\n\n      for (final id in toolCallsIds) {\n        final thisToolCall = thisToolCallsById[id];\n        final otherToolCall = otherToolCallsById[id];\n        toolCalls.add(\n          AIChatMessageToolCall(\n            id: id,\n            name: (thisToolCall?.name ?? '') + (otherToolCall?.name ?? ''),\n            argumentsRaw:\n                (thisToolCall?.argumentsRaw ?? '') +\n                (otherToolCall?.argumentsRaw ?? ''),\n            arguments: {\n              ...?thisToolCall?.arguments,\n              ...?otherToolCall?.arguments,\n            },\n          ),\n        );\n      }\n    }\n\n    return AIChatMessage(\n      content: content + other.content,\n      toolCalls: toolCalls,\n    );\n  }\n\n  @override\n  String toString() {\n    return '''\nAIChatMessage{\n  content: $content,\n  toolCalls: $toolCalls,\n}''';\n  }\n}\n\n/// {@template ai_chat_message_tool_call}\n/// A tool that the model wants to call.\n/// {@endtemplate}\n@immutable\nclass AIChatMessageToolCall {\n  /// {@macro ai_chat_message_tool_call}\n  const AIChatMessageToolCall({\n    required this.id,\n    required this.name,\n    required this.argumentsRaw,\n    required this.arguments,\n  });\n\n  /// The id of the tool to call.\n  ///\n  /// This is used to match up the tool results later.\n  final String id;\n\n  /// The name of the tool to call.\n  final String name;\n\n  /// The raw arguments JSON string (needed to parse streaming responses).\n  final String argumentsRaw;\n\n  /// The arguments to pass to the tool in JSON Map format.\n  ///\n  /// Note that the model does not always generate a valid JSON, in that case,\n  /// [arguments] will be empty but you can still see the raw response in\n  /// [argumentsRaw].\n  ///\n  /// The model may also hallucinate parameters not defined by your tool schema.\n  /// Validate the arguments in your code before calling your tool.\n  final Map<String, dynamic> arguments;\n\n  /// Converts the [AIChatMessageToolCall] to a [Map].\n  Map<String, dynamic> toMap() {\n    return {\n      'id': id,\n      'name': name,\n      'argumentsRaw': argumentsRaw,\n      'arguments': arguments,\n    };\n  }\n\n  /// Converts a map to a [AIChatMessageToolCall].\n  factory AIChatMessageToolCall.fromMap(Map<String, dynamic> map) =>\n      AIChatMessageToolCall(\n        id: map['id'] as String,\n        name: map['name'] as String,\n        argumentsRaw: map['argumentsRaw'] as String,\n        arguments: (map['arguments'] as Map<String, dynamic>?) ?? {},\n      );\n\n  @override\n  bool operator ==(covariant final AIChatMessageToolCall other) {\n    final mapEquals = const DeepCollectionEquality().equals;\n    return identical(this, other) ||\n        id == other.id &&\n            name == other.name &&\n            argumentsRaw == other.argumentsRaw &&\n            mapEquals(arguments, other.arguments);\n  }\n\n  @override\n  int get hashCode =>\n      id.hashCode ^ name.hashCode ^ argumentsRaw.hashCode ^ arguments.hashCode;\n\n  @override\n  String toString() {\n    return '''\nAIChatMessageToolCall{\n  id: $id,\n  name: $name,\n  argumentsRaw: $argumentsRaw,\n  arguments: $arguments,\n}''';\n  }\n}\n\n/// {@template tool_chat_message}\n/// Type of message that is the response of calling a tool.\n/// {@endtemplate}\n@immutable\nclass ToolChatMessage extends ChatMessage {\n  /// {@macro tool_chat_message}\n  const ToolChatMessage({required this.toolCallId, required this.content});\n\n  /// Converts a map to a [ToolChatMessage].\n  factory ToolChatMessage.fromMap(Map<String, dynamic> map) => ToolChatMessage(\n    toolCallId: map['toolCallId'] as String,\n    content: map['content'] as String,\n  );\n\n  /// Converts this ChatMessage to a map along with a type hint for deserialization.\n  @override\n  Map<String, dynamic> toMap() => {\n    ...super.toMap(),\n    'content': content,\n    'toolCallId': toolCallId,\n    'type': 'tool',\n  };\n\n  /// The id of the tool that was called.\n  final String toolCallId;\n\n  /// The response of the tool call.\n  final String content;\n\n  /// Default prefix for [ToolChatMessage].\n  static const defaultPrefix = 'Tool';\n\n  @override\n  bool operator ==(covariant final ToolChatMessage other) =>\n      identical(this, other) ||\n      toolCallId == other.toolCallId && content == other.content;\n\n  @override\n  int get hashCode => toolCallId.hashCode ^ content.hashCode;\n\n  @override\n  ToolChatMessage concat(final ChatMessage other) {\n    if (other is! ToolChatMessage) {\n      return this;\n    }\n\n    return ToolChatMessage(\n      toolCallId: toolCallId,\n      content: content + other.content,\n    );\n  }\n\n  @override\n  String toString() {\n    return '''\nToolChatMessage{\n  toolCallId: $toolCallId,\n  content: $content,\n}''';\n  }\n}\n\n/// {@template custom_chat_message}\n/// Chat message with custom role.\n/// {@endtemplate}\n@immutable\nclass CustomChatMessage extends ChatMessage {\n  /// {@macro custom_chat_message}\n  const CustomChatMessage({required this.content, required this.role});\n\n  /// Converts a map to a [CustomChatMessage].\n  factory CustomChatMessage.fromMap(Map<String, dynamic> map) =>\n      CustomChatMessage(\n        content: map['content'] as String,\n        role: map['role'] as String,\n      );\n\n  /// Converts this ChatMessage to a map along with a type hint for deserialization.\n  @override\n  Map<String, dynamic> toMap() => {\n    ...super.toMap(),\n    'content': content,\n    'role': role,\n    'type': 'custom',\n  };\n\n  /// The content of the message.\n  final String content;\n\n  /// The role of the author of this message.\n  final String role;\n\n  @override\n  bool operator ==(covariant final CustomChatMessage other) =>\n      identical(this, other) || content == other.content && role == other.role;\n\n  @override\n  int get hashCode => content.hashCode ^ role.hashCode;\n\n  @override\n  CustomChatMessage concat(final ChatMessage other) {\n    if (other is! CustomChatMessage) {\n      return this;\n    }\n    return CustomChatMessage(role: role, content: content + other.content);\n  }\n\n  @override\n  String toString() {\n    return '''\nCustomChatMessage{\n  content: $content,\n  role: $role,\n}''';\n  }\n}\n\n/// Role of a chat message\nenum ChatMessageRole {\n  /// A system message.\n  system,\n\n  /// A human (user) message.\n  human,\n\n  /// An AI message.\n  ai,\n\n  /// A message with a custom role.\n  custom,\n}\n\n/// {@template chat_message_content}\n/// The content of a message.\n/// {@endtemplate}\n@immutable\nsealed class ChatMessageContent {\n  const ChatMessageContent();\n\n  /// Converts this ChatMessageContent to a map\n  Map<String, dynamic> toMap() => {};\n\n  factory ChatMessageContent.fromMap(Map<String, dynamic> map) =>\n      switch (map['type']) {\n        'text' => ChatMessageContentText.fromMap(map),\n        'image' => ChatMessageContentImage.fromMap(map),\n        'multi_modal' => ChatMessageContentMultiModal.fromMap(map),\n        null => throw ArgumentError('Type is required'),\n        _ => throw UnimplementedError('Unknown type: ${map['type']}'),\n      };\n\n  /// The content of a message that is text.\n  factory ChatMessageContent.text(final String text) =>\n      ChatMessageContentText(text: text);\n\n  /// The content of a message that is an image.\n  ///\n  /// More info about the possible values:\n  /// - [ChatMessageContentImage.data]\n  /// - [ChatMessageContentImage.mimeType]\n  /// - [ChatMessageContentImage.detail]\n  factory ChatMessageContent.image({\n    required final String data,\n    final String? mimeType,\n    final ChatMessageContentImageDetail imageDetail =\n        ChatMessageContentImageDetail.auto,\n  }) => ChatMessageContentImage(\n    data: data,\n    mimeType: mimeType,\n    detail: imageDetail,\n  );\n\n  /// The content of a message that is multi-modal.\n  factory ChatMessageContent.multiModal(final List<ChatMessageContent> parts) =>\n      ChatMessageContentMultiModal(parts: parts);\n}\n\n/// {@template chat_message_content_text}\n/// The content of a message that is text.\n/// {@endtemplate}\nclass ChatMessageContentText extends ChatMessageContent {\n  /// {@macro chat_message_content_text}\n  const ChatMessageContentText({required this.text});\n\n  /// The text content.\n  final String text;\n\n  /// Converts a map to a [ChatMessageContentText].\n  factory ChatMessageContentText.fromMap(Map<String, dynamic> map) =>\n      ChatMessageContentText(text: map['content'] as String);\n\n  @override\n  /// Converts this ChatMessageContent to a map along with a type hint for deserialization.\n  Map<String, dynamic> toMap() => {\n    ...super.toMap(),\n    'type': 'text',\n    'content': text,\n  };\n\n  @override\n  bool operator ==(covariant final ChatMessageContentText other) =>\n      identical(this, other) || text == other.text;\n\n  @override\n  int get hashCode => text.hashCode;\n\n  @override\n  String toString() {\n    return '''\nChatMessageContentText{\n  text: $text,\n}''';\n  }\n}\n\n/// {@template chat_message_content_image}\n/// The content of a message that is an image.\n/// {@endtemplate}\nclass ChatMessageContentImage extends ChatMessageContent {\n  /// {@macro chat_message_content_image}\n  const ChatMessageContentImage({\n    required this.data,\n    this.mimeType,\n    this.detail = ChatMessageContentImageDetail.auto,\n  });\n\n  /// Depending on the model, this can be either:\n  /// - The base64 encoded image data\n  /// - A URL of the image (only supported by some providers)\n  final String data;\n\n  /// The IANA standard MIME type of the source data.\n  /// The accepted types vary per model.\n  ///\n  /// Examples of MIME types:\n  /// - `image/png`\n  /// - `image/jpeg`\n  /// - `image/heic`\n  /// - `image/heif`\n  /// - `image/webp`\n  final String? mimeType;\n\n  /// Specifies the detail level of the image.\n  final ChatMessageContentImageDetail detail;\n\n  /// Converts a map to a [ChatMessageContentImage].\n  factory ChatMessageContentImage.fromMap(Map<String, dynamic> map) =>\n      ChatMessageContentImage(\n        data: map['data'] as String,\n        mimeType: map['mimeType'] as String?,\n        detail: ChatMessageContentImageDetail.values.firstWhere(\n          (i) => map['detail'] == i.name,\n          orElse: () => ChatMessageContentImageDetail.auto,\n        ),\n      );\n\n  /// Converts this ChatMessageContent to a map along with a type hint for deserialization.\n  @override\n  Map<String, dynamic> toMap() => {\n    ...super.toMap(),\n    'type': 'image',\n    'data': data,\n    'mimeType': mimeType,\n    'detail': detail.name,\n  };\n\n  @override\n  bool operator ==(covariant final ChatMessageContentImage other) =>\n      identical(this, other) ||\n      data == other.data &&\n          mimeType == other.mimeType &&\n          detail == other.detail;\n\n  @override\n  int get hashCode => data.hashCode ^ mimeType.hashCode ^ detail.hashCode;\n\n  @override\n  String toString() {\n    return '''\nChatMessageContentImage{\n  url: $data,\n  mimeType: $mimeType,\n  imageDetail: $detail,\n}''';\n  }\n}\n\n/// {@template chat_message_content_multi_modal}\n/// The content of a message that is multi-modal.\n/// {@endtemplate\n@immutable\nclass ChatMessageContentMultiModal extends ChatMessageContent {\n  /// {@macro chat_message_content_multi_modal}\n  ChatMessageContentMultiModal({required this.parts})\n    : assert(\n        !parts.any((final p) => p is ChatMessageContentMultiModal),\n        'Multi-modal messages cannot contain other multi-modal messages.',\n      );\n\n  /// Converts a map to a [ChatMessageContentMultiModal].\n  factory ChatMessageContentMultiModal.fromMap(Map<String, dynamic> map) =>\n      ChatMessageContentMultiModal(\n        parts: (map['parts'] as List<dynamic>)\n            .whereType<Map<String, dynamic>>()\n            .map(ChatMessageContent.fromMap)\n            .toList(growable: false),\n      );\n\n  /// Converts [ChatMessageContentMultiModal] to a map along with a type hint for deserialization.\n  @override\n  Map<String, dynamic> toMap() => {\n    ...super.toMap(),\n    'type': 'multi_modal',\n    'parts': parts.map((p) => p.toMap()).toList(growable: false),\n  };\n\n  /// The parts of the multi-modal message.\n  final List<ChatMessageContent> parts;\n\n  @override\n  bool operator ==(covariant final ChatMessageContentMultiModal other) =>\n      identical(this, other) || parts == other.parts;\n\n  @override\n  int get hashCode => parts.hashCode;\n\n  @override\n  String toString() {\n    return '''\nChatMessageContentMultiModal{\n  parts: $parts,\n}''';\n  }\n}\n\n/// Specifies the detail level of the image.\nenum ChatMessageContentImageDetail {\n  /// Automatically select the detail level.\n  auto,\n\n  /// Low detail level (faster response time and lower token usage).\n  low,\n\n  /// Medium detail level (slower response time and higher token usage).\n  high,\n}\n\n/// {@template chat_tool_choice}\n/// Controls how the model responds to tool calls.\n/// {@endtemplate}\nsealed class ChatToolChoice {\n  /// {@macro chat_tool_choice}\n  const ChatToolChoice();\n\n  /// The model does not call a tool, and responds to the end-user.\n  static const none = ChatToolChoiceNone();\n\n  /// The model can pick between responding to the end-user or calling a tool.\n  static const auto = ChatToolChoiceAuto();\n\n  /// The model must call at least one tool, but doesn't force a particular tool.\n  static const required = ChatToolChoiceRequired();\n\n  /// The model is forced to to call the specified tool.\n  factory ChatToolChoice.forced({required final String name}) =>\n      ChatToolChoiceForced(name: name);\n\n  /// Converts this ChatToolChoice to a map along with a type hint for deserialization.\n  Map<String, dynamic> toMap() => {};\n\n  /// Converts a map to a [ChatToolChoice]. Requires at least a type hint.\n  factory ChatToolChoice.fromMap(Map<String, dynamic> map) =>\n      switch (map['type']) {\n        'none' => ChatToolChoiceNone.fromMap(map),\n        'auto' => ChatToolChoiceAuto.fromMap(map),\n        'required' => ChatToolChoiceRequired.fromMap(map),\n        'forced' => ChatToolChoiceForced.fromMap(map),\n        null => throw ArgumentError('Type is required'),\n        _ => throw UnimplementedError('Unknown type: ${map['type']}'),\n      };\n}\n\n/// {@template chat_tool_choice_none}\n/// The model does not call a tool, and responds to the end-user.\n/// {@endtemplate}\nfinal class ChatToolChoiceNone extends ChatToolChoice {\n  /// {@macro chat_tool_choice_none}\n  const ChatToolChoiceNone();\n\n  /// Converts this ChatToolChoice to a map along with a type hint for deserialization.\n  @override\n  Map<String, dynamic> toMap() => {...super.toMap(), 'type': 'none'};\n\n  /// Converts a map to a [ChatToolChoiceNone].\n  // ignore: avoid_unused_constructor_parameters\n  factory ChatToolChoiceNone.fromMap(Map<String, dynamic> map) =>\n      const ChatToolChoiceNone();\n}\n\n/// {@template chat_tool_choice_auto}\n/// The model can pick between responding to the end-user or calling a tool.\n/// {@endtemplate}\nfinal class ChatToolChoiceAuto extends ChatToolChoice {\n  /// {@macro chat_tool_choice_auto}\n  const ChatToolChoiceAuto();\n\n  /// Converts this ChatToolChoice to a map along with a type hint for deserialization.\n  @override\n  Map<String, dynamic> toMap() => {...super.toMap(), 'type': 'auto'};\n\n  /// Converts a map to a [ChatToolChoiceAuto].\n  // ignore: avoid_unused_constructor_parameters\n  factory ChatToolChoiceAuto.fromMap(Map<String, dynamic> map) =>\n      const ChatToolChoiceAuto();\n}\n\n/// {@template chat_tool_choice_required}\n/// The model must call at least one tool, but doesn't force a particular tool.\n/// {@endtemplate}\nfinal class ChatToolChoiceRequired extends ChatToolChoice {\n  /// {@macro chat_tool_choice_none}\n  const ChatToolChoiceRequired();\n\n  /// Converts this ChatToolChoice to a map along with a type hint for deserialization.\n  @override\n  Map<String, dynamic> toMap() => {...super.toMap(), 'type': 'required'};\n\n  /// Converts a map to a [ChatToolChoiceRequired].\n  // ignore: avoid_unused_constructor_parameters\n  factory ChatToolChoiceRequired.fromMap(Map<String, dynamic> map) =>\n      const ChatToolChoiceRequired();\n}\n\n/// {@template chat_tool_choice_forced}\n/// The model is forced to to call the specified tool.\n/// {@endtemplate}\n@immutable\nfinal class ChatToolChoiceForced extends ChatToolChoice {\n  /// {@macro chat_tool_choice_forced}\n  const ChatToolChoiceForced({required this.name});\n\n  /// The name of the tool to call.\n  final String name;\n\n  /// Converts this ChatToolChoice to a map along with a type hint for deserialization.\n  @override\n  Map<String, dynamic> toMap() => {\n    ...super.toMap(),\n    'type': 'forced',\n    'name': name,\n  };\n\n  /// Converts a map to a [ChatToolChoiceForced].\n  factory ChatToolChoiceForced.fromMap(Map<String, dynamic> map) =>\n      ChatToolChoiceForced(name: map['name'] as String);\n\n  @override\n  bool operator ==(covariant final ChatToolChoiceForced other) =>\n      identical(this, other) ||\n      runtimeType == other.runtimeType && name == other.name;\n\n  @override\n  int get hashCode => name.hashCode;\n}\n\n/// {@template chat_example}\n/// An example of a conversation between the end-user and the model.\n/// {@endtemplate}\nclass ChatExample {\n  /// {@macro chat_example}\n  const ChatExample({required this.input, required this.output});\n\n  /// An example of an input message from the user.\n  final ChatMessage input;\n\n  /// An example of what the model should output given the input.\n  final ChatMessage output;\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/chat_models/utils.dart",
    "content": "import 'types.dart';\n\n/// Extensions on `List<ChatMessage>`.\nextension ChatMessagesX on List<ChatMessage> {\n  /// This function is to get a string representation of the chat messages\n  /// based on the message content and role.\n  String toBufferString({\n    final String systemPrefix = SystemChatMessage.defaultPrefix,\n    final String humanPrefix = HumanChatMessage.defaultPrefix,\n    final String aiPrefix = AIChatMessage.defaultPrefix,\n    final String toolPrefix = ToolChatMessage.defaultPrefix,\n  }) {\n    return map((final m) {\n      return switch (m) {\n        SystemChatMessage _ => '$systemPrefix: ${m.contentAsString}',\n        HumanChatMessage _ => '$humanPrefix: ${m.contentAsString}',\n        AIChatMessage _ =>\n          m.toolCalls.isEmpty\n              ? '$aiPrefix: ${m.contentAsString}'\n              : m.toolCalls\n                    .map((c) => '$aiPrefix: ${c.id} ${c.name}(${c.arguments})')\n                    .join('\\n'),\n        ToolChatMessage(toolCallId: final id, content: final c) =>\n          '$toolPrefix: $id=$c',\n        final CustomChatMessage m => '${m.role}: ${m.contentAsString}',\n      };\n    }).join('\\n');\n  }\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/document_loaders/base.dart",
    "content": "import '../documents/document.dart';\n\n/// @{template base_document_loader}\n/// Interface for loading documents.\n/// @{endtemplate}\nabstract class BaseDocumentLoader {\n  /// @{macro base_document_loader}\n  const BaseDocumentLoader();\n\n  /// Loads documents lazily.\n  ///\n  /// This is useful for loading large amounts of data, as it allows you to\n  /// process each [Document] as it is loaded, rather than waiting for the\n  /// entire data set to be loaded in memory.\n  Stream<Document> lazyLoad();\n\n  /// Loads a list of documents.\n  ///\n  /// Under the hood, this method calls [lazyLoad] and collects all the results\n  /// into a list. Use this method only with small data sets, as it will load\n  /// all the data into memory at once.\n  Future<List<Document>> load() {\n    return lazyLoad().toList();\n  }\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/document_loaders/document_loaders.dart",
    "content": "export 'base.dart';\n"
  },
  {
    "path": "packages/langchain_core/lib/src/documents/document.dart",
    "content": "import 'package:collection/collection.dart';\nimport 'package:meta/meta.dart';\n\n/// {@template document}\n/// Interface for interacting with a document.\n/// {@endtemplate}\n@immutable\nclass Document {\n  /// {@macro document}\n  const Document({\n    this.id,\n    required this.pageContent,\n    this.metadata = const {},\n  });\n\n  /// Optional ID for the document.\n  ///\n  /// It can be used to identify the document in the vector store.\n  final String? id;\n\n  /// The text content of the document.\n  final String pageContent;\n\n  /// The metadata of the document.\n  final Map<String, dynamic> metadata;\n\n  /// Creates a document from a map.\n  factory Document.fromMap(final Map<String, dynamic> map) {\n    return Document(\n      id: map['id'] as String?,\n      pageContent: map['pageContent'] as String,\n      metadata: map['metadata'] as Map<String, dynamic>,\n    );\n  }\n\n  /// Converts the document to a map.\n  Map<String, dynamic> toMap() {\n    return {'id': id, 'pageContent': pageContent, 'metadata': metadata};\n  }\n\n  /// Creates a copy of the document.\n  Document copyWith({\n    final String? id,\n    final String? pageContent,\n    final Map<String, dynamic>? metadata,\n  }) {\n    return Document(\n      id: id ?? this.id,\n      pageContent: pageContent ?? this.pageContent,\n      metadata: metadata ?? this.metadata,\n    );\n  }\n\n  @override\n  bool operator ==(covariant final Document other) {\n    final mapEquals = const MapEquality<String, dynamic>().equals;\n    return identical(this, other) ||\n        id == other.id &&\n            pageContent == other.pageContent &&\n            mapEquals(metadata, other.metadata);\n  }\n\n  @override\n  int get hashCode => id.hashCode ^ pageContent.hashCode ^ metadata.hashCode;\n\n  /// Concatenates the current document with another document.\n  Document concat(final Document other) {\n    return Document(\n      id: id ?? other.id,\n      pageContent: '$pageContent${other.pageContent}',\n      metadata: {...metadata, ...other.metadata},\n    );\n  }\n\n  @override\n  String toString() {\n    return 'Document{'\n        'id: $id, '\n        'pageContent: $pageContent, '\n        'metadata: $metadata}';\n  }\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/documents/documents.dart",
    "content": "export 'document.dart';\nexport 'transformer.dart';\n"
  },
  {
    "path": "packages/langchain_core/lib/src/documents/transformer.dart",
    "content": "import '../langchain/types.dart';\nimport '../runnables/runnable.dart';\nimport 'document.dart';\n\n/// {@template base_document_transformer}\n/// Base interface for transforming documents.\n/// {@endtemplate}\nabstract class BaseDocumentTransformer\n    extends Runnable<List<Document>, BaseLangChainOptions, List<Document>> {\n  /// {@macro base_document_transformer}\n  const BaseDocumentTransformer()\n    : super(defaultOptions: const BaseLangChainOptions());\n\n  /// Transform a list of documents.\n  ///\n  /// - [input] - The documents to transform.\n  /// - [options] - Not used.\n  @override\n  Future<List<Document>> invoke(\n    final List<Document> input, {\n    final BaseLangChainOptions? options,\n  }) {\n    return transformDocuments(input);\n  }\n\n  /// Transform a list of documents.\n  Future<List<Document>> transformDocuments(final List<Document> documents);\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/embeddings/base.dart",
    "content": "import '../documents/document.dart';\nimport '../language_models/types.dart';\n\n/// {@template embeddings}\n/// Interface for embedding models.\n/// {@endtemplate}\nabstract class Embeddings {\n  /// {@macro embeddings}\n  const Embeddings();\n\n  /// Embed search docs.\n  Future<List<List<double>>> embedDocuments(final List<Document> documents);\n\n  /// Embed query text.\n  Future<List<double>> embedQuery(final String query);\n\n  /// Returns a list of available embedding models from this provider.\n  ///\n  /// This method allows you to programmatically discover which embedding\n  /// models are available from the provider. The returned list contains\n  /// [ModelInfo] objects with metadata about each model.\n  ///\n  /// By default, this returns an empty list. Providers that support\n  /// model listing will override this method to return the actual\n  /// available models.\n  ///\n  /// Example:\n  /// ```dart\n  /// final embeddings = OpenAIEmbeddings(apiKey: '...');\n  /// final models = await embeddings.listModels();\n  /// for (final model in models) {\n  ///   print('${model.id} - ${model.displayName ?? \"\"}');\n  /// }\n  /// ```\n  ///\n  /// Note: Not all providers support model listing. For providers that\n  /// don't support this feature, this method returns an empty list.\n  Future<List<ModelInfo>> listModels() async => [];\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/embeddings/embeddings.dart",
    "content": "export 'base.dart';\nexport 'fake.dart';\n"
  },
  {
    "path": "packages/langchain_core/lib/src/embeddings/fake.dart",
    "content": "import 'dart:convert';\nimport 'dart:math';\n\nimport 'package:crypto/crypto.dart';\n\nimport '../documents/document.dart';\nimport 'base.dart';\n\n/// {@template fake_embeddings}\n/// Fake embeddings model for testing.\n///\n/// By default, the embeddings are deterministic (the same text will always\n/// have the same embedding vector). You can change this behavior by setting\n/// [deterministic] to false.\n/// {@endtemplate}\nclass FakeEmbeddings extends Embeddings {\n  /// {@macro fake_embeddings}\n  FakeEmbeddings({this.size = 10, this.deterministic = true});\n\n  /// The size of the embedding vector.\n  final int size;\n\n  /// Whether the embedding vector is deterministic.\n  final bool deterministic;\n\n  @override\n  Future<List<List<double>>> embedDocuments(\n    final List<Document> documents,\n  ) async {\n    return documents\n        .map((final d) => _getEmbeddings(d.pageContent))\n        .toList(growable: false);\n  }\n\n  @override\n  Future<List<double>> embedQuery(final String query) async {\n    return _getEmbeddings(query);\n  }\n\n  List<double> _getEmbeddings(final String text) {\n    final random = Random(deterministic ? _getSeed(text) : null);\n    return List.generate(size, (final i) => random.nextDouble());\n  }\n\n  int _getSeed(final String text) {\n    final bytes = utf8.encode(text);\n    final digest = sha256.convert(bytes);\n    // We restrict the seed to 32 bits to avoid overflow\n    return (BigInt.parse(digest.toString(), radix: 16) %\n            BigInt.from(pow(2, 32)))\n        .toInt();\n  }\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/exceptions/base.dart",
    "content": "import 'package:meta/meta.dart';\n\n/// {@template lang_chain_exception}\n/// A base class for all exceptions thrown by LangChain.\n/// {@endtemplate}\n@immutable\nbase class LangChainException implements Exception {\n  /// {@macro lang_chain_exception}\n  const LangChainException({this.message, final String? code, this.stackTrace})\n    : code = code ?? 'exception';\n\n  /// The long form message of the exception.\n  final String? message;\n\n  /// The optional code to accommodate the message.\n  final String code;\n\n  /// The stack trace which provides information to the user about the call\n  /// sequence that triggered an exception\n  final StackTrace? stackTrace;\n\n  @override\n  bool operator ==(covariant final LangChainException other) {\n    if (identical(this, other)) return true;\n    return other.hashCode == hashCode;\n  }\n\n  @override\n  int get hashCode => Object.hash(code, message);\n\n  @override\n  String toString() {\n    var output = '[$code] $message';\n\n    if (stackTrace != null) {\n      output += '\\n\\n$stackTrace';\n    }\n\n    return output;\n  }\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/exceptions/exceptions.dart",
    "content": "export 'base.dart';\n"
  },
  {
    "path": "packages/langchain_core/lib/src/langchain/base.dart",
    "content": "import '../runnables/runnable.dart';\nimport '../runnables/types.dart';\n\n/// {@template base_lang_chain}\n/// Base class for LangChain components (language models, chains, tools, etc.).\n/// {@endtemplate}\nabstract class BaseLangChain<\n  RunInput extends Object,\n  CallOptions extends RunnableOptions,\n  RunOutput extends Object\n>\n    extends Runnable<RunInput, CallOptions, RunOutput> {\n  /// {@macro base_lang_chain}\n  const BaseLangChain({required super.defaultOptions});\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/langchain/langchain.dart",
    "content": "export 'base.dart';\nexport 'types.dart';\n"
  },
  {
    "path": "packages/langchain_core/lib/src/langchain/types.dart",
    "content": "import 'package:meta/meta.dart';\n\nimport '../runnables/types.dart';\n\n/// {@template base_lang_chain_options}\n/// Base options class for LangChain components.\n/// {@endtemplate}\n@immutable\nclass BaseLangChainOptions extends RunnableOptions {\n  /// {@macro base_lang_chain_options}\n  const BaseLangChainOptions({super.concurrencyLimit});\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/language_models/base.dart",
    "content": "import '../langchain/base.dart';\nimport '../prompts/types.dart';\nimport 'types.dart';\n\n/// {@template base_language_model}\n/// Base class for all language models.\n///\n/// There are two different sub-types of Language Models:\n/// - LLMs: these wrap APIs which take text in and return text.\n/// - ChatModels: these wrap models which take chat messages in and return a\n///   chat message.\n/// {@endtemplate}\nabstract class BaseLanguageModel<\n  Input extends Object,\n  Options extends LanguageModelOptions,\n  Output extends LanguageModelResult\n>\n    extends BaseLangChain<PromptValue, Options, Output> {\n  /// {@macro base_language_model}\n  const BaseLanguageModel({required super.defaultOptions});\n\n  /// Return type of language model.\n  String get modelType;\n\n  /// Tokenizes the given prompt using the encoding used by the language\n  /// model.\n  ///\n  /// - [promptValue] The prompt to tokenize.\n  Future<List<int>> tokenize(\n    final PromptValue promptValue, {\n    final Options? options,\n  });\n\n  /// Returns the number of tokens resulting from [tokenize] the given prompt.\n  ///\n  /// Knowing how many tokens are in a text string can tell you:\n  /// - Whether the string is too long for a text model to process.\n  /// - How much the API call can costs (as usage is usually priced by token).\n  ///\n  /// In message-based models the exact way that tokens are counted from\n  /// messages may change from model to model. Consider the result from this\n  /// method an estimate, not a timeless guarantee.\n  ///\n  /// - [promptValue] The prompt to tokenize.\n  ///\n  /// Note: subclasses can override this method to provide a more accurate\n  /// implementation.\n  Future<int> countTokens(\n    final PromptValue promptValue, {\n    final Options? options,\n  }) async {\n    final tokens = await tokenize(promptValue, options: options);\n    return tokens.length;\n  }\n\n  @override\n  String toString() => modelType;\n\n  /// Returns a list of available models from this provider.\n  ///\n  /// This method allows you to programmatically discover which models\n  /// are available from the provider. The returned list contains\n  /// [ModelInfo] objects with metadata about each model.\n  ///\n  /// By default, this returns an empty list. Providers that support\n  /// model listing will override this method to return the actual\n  /// available models.\n  ///\n  /// Example:\n  /// ```dart\n  /// final chatModel = ChatOpenAI(apiKey: '...');\n  /// final models = await chatModel.listModels();\n  /// for (final model in models) {\n  ///   print('${model.id} - ${model.displayName ?? \"\"}');\n  /// }\n  /// ```\n  ///\n  /// Note: Not all providers support model listing. For providers that\n  /// don't support this feature, this method returns an empty list.\n  Future<List<ModelInfo>> listModels() async => [];\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/language_models/language_models.dart",
    "content": "export 'base.dart';\nexport 'types.dart';\n"
  },
  {
    "path": "packages/langchain_core/lib/src/language_models/types.dart",
    "content": "import 'package:collection/collection.dart';\nimport 'package:meta/meta.dart';\n\nimport '../langchain/types.dart';\n\n/// {@template language_model_options}\n/// Options to pass into the language model.\n/// {@endtemplate}\n@immutable\nabstract class LanguageModelOptions extends BaseLangChainOptions {\n  /// {@macro language_model_options}\n  const LanguageModelOptions({this.model, super.concurrencyLimit});\n\n  /// ID of the language model to use.\n  /// Check the provider's documentation for available models.\n  final String? model;\n\n  @override\n  LanguageModelOptions copyWith({\n    final String? model,\n    final int? concurrencyLimit,\n  });\n}\n\n/// {@template language_model}\n/// Result returned by the model.\n/// {@endtemplate}\n@immutable\nabstract class LanguageModelResult<O extends Object> {\n  /// {@macro language_model}\n  const LanguageModelResult({\n    required this.id,\n    required this.output,\n    required this.finishReason,\n    required this.metadata,\n    required this.usage,\n    this.streaming = false,\n  });\n\n  /// Result id.\n  final String id;\n\n  /// Generated output.\n  final O output;\n\n  /// The reason the model stopped generating tokens.\n  final FinishReason finishReason;\n\n  /// Other metadata about the generation.\n  final Map<String, dynamic> metadata;\n\n  ///  Usage stats for the generation.\n  final LanguageModelUsage usage;\n\n  /// Whether the result of the language model is being streamed.\n  final bool streaming;\n\n  /// Returns the output as a string.\n  ///\n  /// This is a convenience method for getting the first output as a string:\n  /// - If you are using an `LLM`, this will be the output String.\n  /// - If you are using a `ChatModel`, this will be the content of the output `ChatMessage`.\n  String get outputAsString;\n\n  @override\n  bool operator ==(covariant final LanguageModelResult<O> other) =>\n      identical(this, other) ||\n      runtimeType == other.runtimeType &&\n          id == other.id &&\n          output == other.output &&\n          finishReason == other.finishReason &&\n          const MapEquality<String, dynamic>().equals(\n            metadata,\n            other.metadata,\n          ) &&\n          usage == other.usage &&\n          streaming == other.streaming;\n\n  @override\n  int get hashCode =>\n      id.hashCode ^\n      output.hashCode ^\n      finishReason.hashCode ^\n      const MapEquality<String, dynamic>().hash(metadata) ^\n      usage.hashCode ^\n      streaming.hashCode;\n\n  /// Merges this result with another by concatenating the outputs.\n  LanguageModelResult<O> concat(final LanguageModelResult<O> other);\n}\n\n/// {@template language_model_usage}\n/// Usage stats for the generation.\n///\n/// You can use this information to determine how much the model call costed\n/// (as usage is usually priced by token).\n///\n/// This is only available for some models.\n/// {@endtemplate}\n@immutable\nclass LanguageModelUsage {\n  /// {@macro language_model_usage}\n  const LanguageModelUsage({\n    this.promptTokens,\n    this.promptBillableCharacters,\n    this.responseTokens,\n    this.responseBillableCharacters,\n    this.totalTokens,\n  });\n\n  /// The number of tokens in the prompt.\n  ///\n  /// Some providers call this \"input_tokens\".\n  final int? promptTokens;\n\n  /// The total number of billable characters in the prompt if applicable.\n  final int? promptBillableCharacters;\n\n  /// The number of tokens in the completion.\n  ///\n  /// Some providers call this \"output_tokens\".\n  final int? responseTokens;\n\n  /// The total number of billable characters in the completion if applicable.\n  final int? responseBillableCharacters;\n\n  /// The total number of tokens in the prompt and completion.\n  final int? totalTokens;\n\n  @override\n  bool operator ==(covariant final LanguageModelUsage other) =>\n      identical(this, other) ||\n      runtimeType == other.runtimeType &&\n          promptTokens == other.promptTokens &&\n          promptBillableCharacters == other.promptBillableCharacters &&\n          responseTokens == other.responseTokens &&\n          responseBillableCharacters == other.responseBillableCharacters &&\n          totalTokens == other.totalTokens;\n\n  @override\n  int get hashCode =>\n      promptTokens.hashCode ^\n      promptBillableCharacters.hashCode ^\n      responseTokens.hashCode ^\n      responseBillableCharacters.hashCode ^\n      totalTokens.hashCode;\n\n  /// Merges this usage with another by summing the values.\n  LanguageModelUsage concat(final LanguageModelUsage other) {\n    return LanguageModelUsage(\n      promptTokens: promptTokens == null && other.promptTokens == null\n          ? null\n          : (promptTokens ?? 0) + (other.promptTokens ?? 0),\n      promptBillableCharacters:\n          promptBillableCharacters == null &&\n              other.promptBillableCharacters == null\n          ? null\n          : (promptBillableCharacters ?? 0) +\n                (other.promptBillableCharacters ?? 0),\n      responseTokens: responseTokens == null && other.responseTokens == null\n          ? null\n          : (responseTokens ?? 0) + (other.responseTokens ?? 0),\n      responseBillableCharacters:\n          responseBillableCharacters == null &&\n              other.responseBillableCharacters == null\n          ? null\n          : (responseBillableCharacters ?? 0) +\n                (other.responseBillableCharacters ?? 0),\n      totalTokens: totalTokens == null && other.totalTokens == null\n          ? null\n          : (totalTokens ?? 0) + (other.totalTokens ?? 0),\n    );\n  }\n\n  @override\n  String toString() {\n    return '''\nLanguageModelUsage{\n  promptTokens: $promptTokens, \n  promptBillableCharacters: $promptBillableCharacters, \n  responseTokens: $responseTokens, \n  responseBillableCharacters: $responseBillableCharacters, \n  totalTokens: $totalTokens}\n''';\n  }\n}\n\n/// The reason the model stopped generating tokens.\nenum FinishReason {\n  /// The model hit a natural stop point or a provided stop sequence.\n  ///\n  /// Some providers call this \"end_turn\".\n  stop,\n\n  /// The maximum number of tokens specified in the request was reached.\n  ///\n  /// Some providers call this \"max_tokens\".\n  length,\n\n  /// The content was flagged for content filter reasons.\n  contentFilter,\n\n  /// The content content was flagged for recitation reasons.\n  recitation,\n\n  /// The model called a tool.\n  ///\n  /// Some providers call this \"tool_use\".\n  toolCalls,\n\n  /// The finish reason is unspecified.\n  unspecified,\n}\n\n/// {@template model_info}\n/// Information about an available model from a provider.\n///\n/// This class provides a standardized way to represent model metadata\n/// across different providers. Not all providers supply all fields.\n///\n/// Example:\n/// ```dart\n/// final chatModel = ChatOpenAI(apiKey: '...');\n/// final models = await chatModel.listModels();\n/// for (final model in models) {\n///   print('${model.id} - ${model.displayName ?? \"\"}');\n/// }\n/// ```\n/// {@endtemplate}\n@immutable\nclass ModelInfo {\n  /// {@macro model_info}\n  const ModelInfo({\n    required this.id,\n    this.displayName,\n    this.description,\n    this.ownedBy,\n    this.created,\n    this.inputTokenLimit,\n    this.outputTokenLimit,\n  });\n\n  /// The model identifier (e.g., \"gpt-4\", \"llama3.2\", \"claude-3-sonnet\").\n  ///\n  /// This is the value you would pass to the model's `options` when invoking.\n  final String id;\n\n  /// Human-readable display name for the model.\n  ///\n  /// Not all providers supply this field.\n  final String? displayName;\n\n  /// Description of the model's capabilities or purpose.\n  ///\n  /// Not all providers supply this field.\n  final String? description;\n\n  /// The organization or owner of the model.\n  ///\n  /// For example: \"openai\", \"system\", \"user\".\n  final String? ownedBy;\n\n  /// Unix timestamp (in seconds) when the model was created.\n  ///\n  /// Not all providers supply this field.\n  final int? created;\n\n  /// Maximum number of input tokens the model can process.\n  ///\n  /// Not all providers supply this field.\n  final int? inputTokenLimit;\n\n  /// Maximum number of output tokens the model can generate.\n  ///\n  /// Not all providers supply this field.\n  final int? outputTokenLimit;\n\n  @override\n  bool operator ==(Object other) =>\n      identical(this, other) ||\n      other is ModelInfo &&\n          runtimeType == other.runtimeType &&\n          id == other.id &&\n          displayName == other.displayName &&\n          description == other.description &&\n          ownedBy == other.ownedBy &&\n          created == other.created &&\n          inputTokenLimit == other.inputTokenLimit &&\n          outputTokenLimit == other.outputTokenLimit;\n\n  @override\n  int get hashCode =>\n      id.hashCode ^\n      displayName.hashCode ^\n      description.hashCode ^\n      ownedBy.hashCode ^\n      created.hashCode ^\n      inputTokenLimit.hashCode ^\n      outputTokenLimit.hashCode;\n\n  @override\n  String toString() {\n    return '''\nModelInfo{\n  id: $id,\n  displayName: $displayName,\n  description: $description,\n  ownedBy: $ownedBy,\n  created: $created,\n  inputTokenLimit: $inputTokenLimit,\n  outputTokenLimit: $outputTokenLimit,\n}''';\n  }\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/llms/base.dart",
    "content": "import 'package:meta/meta.dart';\n\nimport '../language_models/language_models.dart';\nimport '../prompts/types.dart';\nimport 'types.dart';\n\n/// {@template base_llm}\n/// Large Language Models base class.\n///\n/// LLMs take in a String and returns a String.\n/// {@endtemplate}\nabstract class BaseLLM<Options extends LLMOptions>\n    extends BaseLanguageModel<String, Options, LLMResult> {\n  /// {@macro base_llm}\n  const BaseLLM({required super.defaultOptions});\n\n  /// Runs the LLM on the given String prompt and returns a String with the\n  /// generated text.\n  ///\n  /// - [prompt] The prompt to pass into the model.\n  /// - [options] Generation options to pass into the LLM.\n  ///\n  /// Example:\n  /// ```dart\n  /// final result = await openai('Tell me a joke.');\n  /// ```\n  Future<String> call(final String prompt, {final Options? options}) async {\n    final result = await invoke(PromptValue.string(prompt), options: options);\n    return result.output;\n  }\n}\n\n/// {@template simple_llm}\n/// [SimpleLLM] provides a simplified interface for working with LLMs.\n/// Rather than expecting the user to implement the full [SimpleLLM.invoke]\n/// method, the user only needs to implement [SimpleLLM.callInternal].\n/// {@endtemplate}\nabstract class SimpleLLM<Options extends LLMOptions> extends BaseLLM<Options> {\n  /// {@macro simple_llm}\n  const SimpleLLM({required super.defaultOptions});\n\n  @override\n  Future<LLMResult> invoke(\n    final PromptValue input, {\n    final Options? options,\n  }) async {\n    final output = await callInternal(input.toString(), options: options);\n    return LLMResult(\n      id: '1',\n      output: output,\n      finishReason: FinishReason.unspecified,\n      metadata: const {},\n      usage: const LanguageModelUsage(),\n    );\n  }\n\n  /// Method which should be implemented by subclasses to run the model.\n  @visibleForOverriding\n  Future<String> callInternal(final String prompt, {final Options? options});\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/llms/fake.dart",
    "content": "import '../../language_models.dart';\nimport '../prompts/types.dart';\nimport 'base.dart';\nimport 'types.dart';\n\n/// {@template fake_list_llm}\n/// Fake LLM for testing.\n/// You can pass in a list of responses to return in order when called.\n/// {@endtemplate}\nclass FakeLLM extends SimpleLLM<FakeLLMOptions> {\n  /// {@macro fake_list_llm}\n  FakeLLM({required this.responses})\n    : super(defaultOptions: const FakeLLMOptions());\n\n  /// Responses to return in order when called.\n  final List<String> responses;\n\n  var _i = 0;\n\n  @override\n  String get modelType => 'fake-list';\n\n  @override\n  Future<String> callInternal(\n    final String prompt, {\n    final LLMOptions? options,\n  }) {\n    return Future<String>.value(responses[_i++ % responses.length]);\n  }\n\n  @override\n  Stream<LLMResult> stream(\n    final PromptValue input, {\n    final LLMOptions? options,\n  }) {\n    final res = responses[_i++ % responses.length].split('');\n    return Stream.fromIterable(res).map(\n      (final item) => LLMResult(\n        id: 'fake-echo',\n        output: item,\n        finishReason: FinishReason.unspecified,\n        metadata: const {},\n        usage: const LanguageModelUsage(),\n        streaming: true,\n      ),\n    );\n  }\n\n  @override\n  Future<List<int>> tokenize(\n    final PromptValue promptValue, {\n    final LLMOptions? options,\n  }) async {\n    return promptValue\n        .toString()\n        .split(' ')\n        .map((final word) => word.hashCode)\n        .toList(growable: false);\n  }\n}\n\n/// {@template fake_llm_options}\n/// Fake LLM options for testing.\n/// {@endtemplate}\nclass FakeLLMOptions extends LLMOptions {\n  /// {@macro fake_llm_options}\n  const FakeLLMOptions({super.model, super.concurrencyLimit});\n\n  @override\n  FakeLLMOptions copyWith({final String? model, final int? concurrencyLimit}) {\n    return FakeLLMOptions(\n      model: model ?? this.model,\n      concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit,\n    );\n  }\n}\n\n/// {@template fake_echo_llm}\n/// Fake LLM for testing.\n/// It just returns the prompt or streams it char by char.\n/// {@endtemplate}\nclass FakeEchoLLM extends BaseLLM<FakeLLMOptions> {\n  /// {@macro fake_echo_llm}\n  const FakeEchoLLM() : super(defaultOptions: const FakeLLMOptions());\n\n  @override\n  String get modelType => 'fake-echo';\n\n  @override\n  Future<LLMResult> invoke(\n    final PromptValue input, {\n    final LLMOptions? options,\n  }) {\n    return Future<LLMResult>.value(\n      LLMResult(\n        id: 'fake-echo',\n        output: input.toString(),\n        finishReason: FinishReason.stop,\n        metadata: const {},\n        usage: const LanguageModelUsage(),\n      ),\n    );\n  }\n\n  @override\n  Stream<LLMResult> stream(\n    final PromptValue input, {\n    final LLMOptions? options,\n  }) {\n    final promptChars = input.toString().split('');\n    return Stream.fromIterable(promptChars).map(\n      (final item) => LLMResult(\n        id: 'fake-echo',\n        output: item,\n        finishReason: FinishReason.unspecified,\n        metadata: const {},\n        usage: const LanguageModelUsage(),\n        streaming: true,\n      ),\n    );\n  }\n\n  @override\n  Future<List<int>> tokenize(\n    final PromptValue promptValue, {\n    final LLMOptions? options,\n  }) async {\n    return promptValue\n        .toString()\n        .split(' ')\n        .map((final word) => word.hashCode)\n        .toList(growable: false);\n  }\n}\n\n/// {@template fake_handler_llm}\n/// Fake LLM for testing.\n/// It returns the string returned by the [handler] function.\n/// {@endtemplate}\nclass FakeHandlerLLM extends SimpleLLM<FakeLLMOptions> {\n  /// {@macro fake_handler_llm}\n  FakeHandlerLLM({required this.handler})\n    : super(defaultOptions: const FakeLLMOptions());\n\n  /// Function called to generate the response.\n  final String Function(String prompt, LLMOptions? options, int callCount)\n  handler;\n\n  var _callCount = 0;\n\n  @override\n  String get modelType => 'fake-handler';\n\n  @override\n  Future<String> callInternal(\n    final String prompt, {\n    final LLMOptions? options,\n  }) {\n    return Future.value(handler(prompt, options, ++_callCount));\n  }\n\n  @override\n  Future<List<int>> tokenize(\n    final PromptValue promptValue, {\n    final LLMOptions? options,\n  }) async {\n    return promptValue\n        .toString()\n        .split(' ')\n        .map((final word) => word.hashCode)\n        .toList(growable: false);\n  }\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/llms/llms.dart",
    "content": "export 'base.dart';\nexport 'fake.dart';\nexport 'types.dart';\n"
  },
  {
    "path": "packages/langchain_core/lib/src/llms/types.dart",
    "content": "import 'package:meta/meta.dart';\n\nimport '../language_models/types.dart';\n\n/// {@template llm_options}\n/// Options to pass into the LLM.\n/// {@endtemplate}\n@immutable\nabstract class LLMOptions extends LanguageModelOptions {\n  /// {@macro llm_options}\n  const LLMOptions({super.model, super.concurrencyLimit});\n}\n\n/// {@template llm_result}\n/// Result returned by the LLM.\n/// {@endtemplate}\nclass LLMResult extends LanguageModelResult<String> {\n  /// {@macro llm_result}\n  const LLMResult({\n    required super.id,\n    required super.output,\n    required super.finishReason,\n    required super.metadata,\n    required super.usage,\n    super.streaming = false,\n  });\n\n  @override\n  String get outputAsString => output;\n\n  @override\n  LLMResult concat(final LanguageModelResult<String> other) {\n    return LLMResult(\n      id: other.id,\n      output: output + other.output,\n      finishReason:\n          finishReason != FinishReason.unspecified &&\n              other.finishReason == FinishReason.unspecified\n          ? finishReason\n          : other.finishReason,\n      metadata: {...metadata, ...other.metadata},\n      usage: usage.concat(other.usage),\n      streaming: other.streaming,\n    );\n  }\n\n  @override\n  String toString() {\n    return '''\nLLMResult{\n  id: $id, \n  output: $output,\n  finishReason: $finishReason,\n  metadata: $metadata,\n  usage: $usage,\n  streaming: $streaming\n}''';\n  }\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/memory/base.dart",
    "content": "import 'types.dart';\n\n/// {@template base_memory}\n/// Base interface for memory in chains.\n///\n/// Memory refers to state in Chains. Memory can be used to store information\n/// about past executions of a Chain and inject that information into the\n/// inputs of future executions of the Chain. For example, for conversational\n/// Chains Memory can be used to store conversations and automatically add them\n/// to future model prompts so that the model has the necessary context to\n/// respond coherently to the latest input.\n/// {@endtemplate}\nabstract interface class BaseMemory {\n  /// {@macro base_memory}\n  const BaseMemory();\n\n  /// Default memory key.\n  static const defaultMemoryKey = 'history';\n\n  /// Input keys this memory class will load dynamically to the prompt.\n  Set<String> get memoryKeys;\n\n  /// Returns key-value pairs given the [MemoryInputValues].\n  Future<MemoryVariables> loadMemoryVariables([\n    final MemoryInputValues values = const {},\n  ]);\n\n  /// Save the context of this model run to memory.\n  Future<void> saveContext({\n    required final MemoryInputValues inputValues,\n    required final MemoryOutputValues outputValues,\n  });\n\n  /// Clear memory contents.\n  Future<void> clear();\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/memory/chat.dart",
    "content": "import 'dart:async';\n\nimport '../chat_history/base.dart';\nimport '../chat_models/types.dart';\nimport '../exceptions/base.dart';\nimport 'base.dart';\nimport 'types.dart';\nimport 'utils.dart';\n\n/// {@template base_chat_memory}\n/// Base interface for chat memory.\n/// {@endtemplate}\nabstract base class BaseChatMemory implements BaseMemory {\n  /// {@macro base_chat_memory}\n  BaseChatMemory({\n    required this.chatHistory,\n    this.inputKey,\n    this.outputKey,\n    this.returnMessages = false,\n  });\n\n  /// The chat history.\n  final BaseChatMessageHistory chatHistory;\n\n  /// The input key to use for the chat history.\n  ///\n  /// If null, the input key is inferred from the prompt (the input key hat\n  /// was filled in by the user (i.e. not a memory key)).\n  final String? inputKey;\n\n  /// The output key to use for the chat history.\n  final String? outputKey;\n\n  /// If true, when [loadMemoryVariables] is called, it will return\n  /// [ChatMessage] objects. If false, it will return a String representation\n  /// of the messages.\n  ///\n  /// Set this to true when you are using a Chat model like `ChatOpenAI`.\n  /// Set this to false when you are use a text LLM like `OpenAI`.\n  final bool returnMessages;\n\n  @override\n  Future<void> saveContext({\n    required final MemoryInputValues inputValues,\n    required final MemoryOutputValues outputValues,\n  }) async {\n    // this is purposefully done in sequence so they're saved in order\n    final (input, output) = _getInputOutputValues(inputValues, outputValues);\n\n    if (input is ChatMessage) {\n      await chatHistory.addChatMessage(input);\n    } else {\n      await chatHistory.addHumanChatMessage(input.toString());\n    }\n\n    if (output is ChatMessage) {\n      await chatHistory.addChatMessage(output);\n    } else {\n      await chatHistory.addAIChatMessage(output.toString());\n    }\n  }\n\n  (dynamic input, dynamic output) _getInputOutputValues(\n    final MemoryInputValues inputValues,\n    final MemoryOutputValues outputValues,\n  ) {\n    final promptInputKey =\n        inputKey ?? getPromptInputKey(inputValues, memoryKeys);\n    String outputKey;\n    if (this.outputKey == null) {\n      if (outputValues.isEmpty) {\n        outputKey = '';\n      } else if (outputValues.length == 1) {\n        outputKey = outputValues.keys.first;\n      } else if (outputValues.containsKey('output')) {\n        outputKey = 'output';\n      } else {\n        throw LangChainException(\n          message: 'One output key expected, got ${outputValues.keys}',\n        );\n      }\n    } else {\n      outputKey = this.outputKey!;\n    }\n    return (inputValues[promptInputKey], outputValues[outputKey]);\n  }\n\n  @override\n  Future<void> clear() async {\n    await chatHistory.clear();\n  }\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/memory/memory.dart",
    "content": "export 'base.dart';\nexport 'chat.dart';\nexport 'types.dart';\nexport 'utils.dart';\n"
  },
  {
    "path": "packages/langchain_core/lib/src/memory/types.dart",
    "content": "/// Input values to load from memory.\ntypedef MemoryInputValues = Map<String, dynamic>;\n\n/// Output values to save to memory.\ntypedef MemoryOutputValues = Map<String, dynamic>;\n\n/// Key-value pairs loaded from memory.\ntypedef MemoryVariables = Map<String, dynamic>;\n"
  },
  {
    "path": "packages/langchain_core/lib/src/memory/utils.dart",
    "content": "import '../agents/base.dart';\nimport '../exceptions/base.dart';\nimport 'types.dart';\n\n/// This function is used by memory classes to select the input value to use\n/// for the memory.\n///\n/// Given the [inputValues] and [memoryKeys], it returns the input key\n/// from the prompt that was filled in by the user (i.e. not a memory key).\nString getPromptInputKey(\n  final MemoryInputValues inputValues,\n  final Set<String> memoryKeys,\n) {\n  // Reserved keys can be passed as input but is not used to format the prompt\n  final promptInputKeys = inputValues.keys.toSet().difference({\n    ...memoryKeys,\n    'stop',\n    BaseActionAgent.agentScratchpadInputKey,\n  });\n  if (promptInputKeys.length != 1) {\n    throw LangChainException(\n      message:\n          'One input key expected got $promptInputKeys. '\n          'If you have multiple input keys in your prompt you need to specify '\n          'the input key to use for the memory using the `inputKey` parameter.',\n    );\n  }\n  return promptInputKeys.first;\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/output_parsers/base.dart",
    "content": "import '../runnables/runnable.dart';\nimport 'types.dart';\n\n/// {@template base_llm_output_parser}\n/// Class to parse the output of a [Runnable] invocation.\n/// {@endtemplate}\nabstract class BaseOutputParser<\n  ParserInput extends Object?,\n  CallOptions extends OutputParserOptions,\n  ParserOutput extends Object?\n>\n    extends Runnable<ParserInput, CallOptions, ParserOutput> {\n  /// {@macro base_llm_output_parser}\n  const BaseOutputParser({required super.defaultOptions});\n\n  /// Invokes the output parser on the given input.\n  ///\n  /// - [input] - The result of an LLM call.\n  /// - [options] - Not used.\n  @override\n  Future<ParserOutput> invoke(\n    final ParserInput input, {\n    final CallOptions? options,\n  });\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/output_parsers/exceptions.dart",
    "content": "import '../exceptions/base.dart';\n\n/// {@template output_parser_exception}\n/// Exception that output parsers should raise to signify a parsing error.\n///\n/// This exists to differentiate parsing errors from other code or execution\n/// errors that also may arise inside the output parser. OutputParserExceptions\n/// will be available to catch and handle in ways to fix the parsing error,\n/// while other errors will be raised.\n/// {@endtemplate}\nfinal class OutputParserException extends LangChainException {\n  /// {@macro output_parser_exception}\n  const OutputParserException({super.message = ''})\n    : super(code: 'output_parser');\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/output_parsers/json.dart",
    "content": "import 'package:collection/collection.dart' show DeepCollectionEquality;\nimport 'package:rxdart/rxdart.dart' show DoExtensions;\n\nimport '../runnables/runnable.dart';\nimport 'base.dart';\nimport 'string.dart';\nimport 'types.dart';\nimport 'utils.dart';\n\n/// {@template json_output_parser}\n/// Output parser that returns the output of the previous [Runnable] as a\n/// JSON [Map].\n///\n/// - [ParserInput] - The type of the input to the parser.\n///\n/// Example:\n/// ```dart\n/// final model = ChatOpenAI(\n///   apiKey: openAiApiKey,\n///   defaultOptions: ChatOpenAIOptions(\n///     responseFormat: ChatOpenAIResponseFormat(\n///       type: ChatOpenAIResponseFormatType.jsonObject,\n///     ),\n///   ),\n/// );\n/// final parser = JsonOutputParser<ChatResult>();\n/// final chain = model.pipe(parser);\n/// final stream = chain.stream(\n///   PromptValue.string(\n///     'Output a list of the countries france, spain and japan and their '\n///     'populations in JSON format. Use a dict with an outer key of '\n///     '\"countries\" which contains a list of countries. '\n///     'Each country should have the key \"name\" and \"population\"',\n///   ),\n/// );\n/// await stream.forEach((final chunk) => print('$chunk|'));\n/// // {}|\n/// // {countries: []}|\n/// // {countries: [{name: France}]}|\n/// // {countries: [{name: France, population: 67076000}, {}]}|\n/// // {countries: [{name: France, population: 67076000}, {name: Spain}]}|\n/// // {countries: [{name: France, population: 67076000}, {name: Spain, population: 46723749}]}|\n/// // {countries: [{name: France, population: 67076000}, {name: Spain, population: 46723749}, {name: Japan}]}|\n/// // {countries: [{name: France, population: 67076000}, {name: Spain, population: 46723749}, {name: Japan, population: 126476461}]}|\n/// ```\n/// {@endtemplate}\nclass JsonOutputParser<ParserInput extends Object?>\n    extends\n        BaseOutputParser<\n          ParserInput,\n          OutputParserOptions,\n          Map<String, dynamic>\n        > {\n  /// {@macro json_output_parser}\n  JsonOutputParser({this.reduceOutputStream = false})\n    : _stringOutputParser = StringOutputParser<ParserInput>(),\n      super(defaultOptions: const OutputParserOptions());\n\n  /// When invoking this parser with [Runnable.stream], every item from the\n  /// input stream will be parsed and emitted by default.\n  ///\n  /// If [reduceOutputStream] is set to `true`, the parser will reduce the\n  /// output stream into a single String and emit it as a single item.\n  ///\n  /// Visual example:\n  /// - reduceOutputStream = false\n  /// 'A', 'B', 'C' -> 'A', 'B', 'C'\n  /// - reduceOutputStream = true\n  /// 'A', 'B', 'C' -> 'ABC'\n  final bool reduceOutputStream;\n\n  final StringOutputParser<ParserInput> _stringOutputParser;\n\n  var _lastInputStr = '';\n  Map<String, dynamic> _lastOutputMap = {};\n\n  @override\n  Future<Map<String, dynamic>> invoke(\n    final ParserInput input, {\n    final OutputParserOptions? options,\n  }) {\n    return _parseInvoke(input, options: options);\n  }\n\n  @override\n  Stream<Map<String, dynamic>> stream(\n    final ParserInput input, {\n    final OutputParserOptions? options,\n  }) async* {\n    yield await _parseStream(input, options: options);\n  }\n\n  @override\n  Stream<Map<String, dynamic>> streamFromInputStream(\n    final Stream<ParserInput> inputStream, {\n    final OutputParserOptions? options,\n  }) async* {\n    if (reduceOutputStream) {\n      await inputStream.forEach(\n        (final input) => _parseStream(input, options: options),\n      );\n      yield _lastOutputMap;\n      _clear();\n    } else {\n      yield* super\n          .streamFromInputStream(inputStream, options: options)\n          .distinct(const DeepCollectionEquality().equals)\n          .doOnCancel(_clear);\n    }\n  }\n\n  Future<Map<String, dynamic>> _parseInvoke(\n    final ParserInput input, {\n    final OutputParserOptions? options,\n  }) async {\n    final inputStr = await _stringOutputParser.invoke(input, options: options);\n    return _parse(inputStr);\n  }\n\n  Future<Map<String, dynamic>> _parseStream(\n    final ParserInput input, {\n    final OutputParserOptions? options,\n  }) async {\n    final inputStr = await _stringOutputParser.invoke(input, options: options);\n    _lastInputStr = '$_lastInputStr$inputStr';\n    return _lastOutputMap = _parse(_lastInputStr, fallback: _lastOutputMap);\n  }\n\n  Map<String, dynamic> _parse(\n    final String input, {\n    Map<String, dynamic> fallback = const {},\n  }) {\n    final result = parsePartialJson(input);\n    return result ?? fallback;\n  }\n\n  void _clear() {\n    _lastInputStr = '';\n    _lastOutputMap = {};\n  }\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/output_parsers/output_parsers.dart",
    "content": "export 'base.dart';\nexport 'exceptions.dart';\nexport 'json.dart';\nexport 'string.dart';\nexport 'tools.dart';\nexport 'types.dart';\n"
  },
  {
    "path": "packages/langchain_core/lib/src/output_parsers/string.dart",
    "content": "import '../../llms.dart';\nimport '../chat_models/types.dart';\nimport '../documents/document.dart';\nimport '../language_models/types.dart';\nimport '../runnables/runnable.dart';\nimport 'base.dart';\nimport 'types.dart';\n\n/// {@template string_output_parser}\n/// Output parser that returns the output of the previous [Runnable] as a\n/// `String`.\n///\n/// - [ParserInput] - The type of the input to the parser.\n///\n/// If the input is:\n/// - `null`, the parser returns an empty String.\n/// - A [LLMResult], the parser returns the output String.\n/// - A [ChatResult], the parser returns the content of the output message as a String.\n/// - A [ChatMessage], the parser returns the content of the message as a String.\n/// - A [Document], the parser returns the page content as a String.\n/// - Anything else, the parser returns the String representation of the input.\n///\n/// Example:\n/// ```dart\n/// final model = ChatOpenAI(apiKey: openAiApiKey);\n/// final promptTemplate = ChatPromptTemplate.fromTemplate(\n///   'Tell me a joke about {topic}',\n/// );\n/// final chain = promptTemplate | model | StringOutputParser();\n/// final res = await chain.invoke({'topic': 'bears'});\n/// print(res);\n/// // Why don't bears wear shoes? Because they have bear feet!\n/// ```\n/// {@endtemplate}\nclass StringOutputParser<ParserInput extends Object?>\n    extends BaseOutputParser<ParserInput, OutputParserOptions, String> {\n  /// {@macro string_output_parser}\n  const StringOutputParser({this.reduceOutputStream = false})\n    : super(defaultOptions: const OutputParserOptions());\n\n  /// When invoking this parser with [Runnable.stream], every item from the\n  /// input stream will be parsed and emitted by default.\n  ///\n  /// If [reduceOutputStream] is set to `true`, the parser will reduce the\n  /// output stream into a single String and emit it as a single item.\n  ///\n  /// Visual example:\n  /// - reduceOutputStream = false\n  /// 'A', 'B', 'C' -> 'A', 'B', 'C'\n  /// - reduceOutputStream = true\n  /// 'A', 'B', 'C' -> 'ABC'\n  final bool reduceOutputStream;\n\n  @override\n  Future<String> invoke(\n    final ParserInput input, {\n    final OutputParserOptions? options,\n  }) {\n    return Future.value(_parse(input));\n  }\n\n  @override\n  Stream<String> streamFromInputStream(\n    final Stream<ParserInput> inputStream, {\n    final OutputParserOptions? options,\n  }) async* {\n    if (reduceOutputStream) {\n      yield await inputStream.map(_parse).reduce((final a, final b) => '$a$b');\n    } else {\n      yield* inputStream.map(_parse);\n    }\n  }\n\n  String _parse(final ParserInput input) {\n    final output = switch (input) {\n      null => '',\n      final LanguageModelResult res => res.outputAsString,\n      final ChatMessage res => res.contentAsString,\n      final Document res => res.pageContent,\n      _ => input.toString(),\n    };\n    return output;\n  }\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/output_parsers/tools.dart",
    "content": "import 'package:collection/collection.dart' show DeepCollectionEquality;\nimport 'package:rxdart/rxdart.dart' show DoExtensions;\n\nimport '../chat_models/types.dart';\nimport 'base.dart';\nimport 'types.dart';\nimport 'utils.dart';\n\n/// {@template tools_output_parser}\n/// A parser that returns the list of tool calls returned by the model.\n///\n/// When streaming, the parser attempts to “auto-complete” the partial json\n/// from each chunk into a valid state.\n///\n/// Example:\n/// ```dart\n/// const tool = ToolSpec(\n///   name: 'joke',\n///   description: 'A joke',\n///   inputJsonSchema: {\n///     'type': 'object',\n///     'properties': {\n///       'setup': {\n///         'type': 'string',\n///         'description': 'The setup for the joke',\n///       },\n///       'punchline': {\n///         'type': 'string',\n///         'description': 'The punchline to the joke',\n///       },\n///     },\n///     'required': ['location', 'punchline'],\n///   },\n/// );\n/// final promptTemplate = ChatPromptTemplate.fromTemplate(\n///   'tell me a long joke about {foo}',\n/// );\n/// final chat = ChatOpenAI(\n///   apiKey: openaiApiKey,\n///   defaultOptions: ChatOpenAIOptions(\n///     temperature: 0,\n///   ),\n/// ).bind(\n///   ChatOpenAIOptions(\n///     tools: [tool],\n///     toolChoice: ChatToolChoice.forced(name: 'joke'),\n///   ),\n/// );\n/// final outputParser = ToolsOutputParser();\n/// final chain = promptTemplate.pipe(chat).pipe(outputParser);\n/// final res = await chain.invoke({'foo': 'bears'});\n/// print(res);\n/// // [ParsedToolCall{\n/// //   id: call_5TU1iYgYO3Z81eAuTe7J23f7,\n/// //   name: joke,\n/// //   arguments: {\n/// //     setup: Why don't bears like fast food restaurants?,\n/// //     punchline: Because they can't bear the wait!\n/// //   },\n/// // }]\n/// ```\n/// {@endtemplate}\nclass ToolsOutputParser\n    extends\n        BaseOutputParser<\n          ChatResult,\n          OutputParserOptions,\n          List<ParsedToolCall>\n        > {\n  /// {@macro tools_output_parser}\n  ToolsOutputParser({this.reduceOutputStream = false})\n    : super(defaultOptions: const OutputParserOptions());\n\n  /// When invoking this parser with [Runnable.stream], every item from the\n  /// input stream will be parsed and emitted by default.\n  ///\n  /// If [reduceOutputStream] is set to `true`, the parser will reduce the\n  /// output stream into a single String and emit it as a single item.\n  ///\n  /// Visual example:\n  /// - reduceOutputStream = false\n  /// 'A', 'B', 'C' -> 'A', 'B', 'C'\n  /// - reduceOutputStream = true\n  /// 'A', 'B', 'C' -> 'ABC'\n  final bool reduceOutputStream;\n\n  ChatResult? _lastResult;\n  List<ParsedToolCall> _lastOutput = [];\n\n  @override\n  Future<List<ParsedToolCall>> invoke(\n    final ChatResult input, {\n    final OutputParserOptions? options,\n  }) {\n    return _parseInvoke(input, options: options);\n  }\n\n  @override\n  Stream<List<ParsedToolCall>> stream(\n    final ChatResult input, {\n    final OutputParserOptions? options,\n  }) async* {\n    yield await _parseStream(input, options: options);\n  }\n\n  @override\n  Stream<List<ParsedToolCall>> streamFromInputStream(\n    final Stream<ChatResult> inputStream, {\n    final OutputParserOptions? options,\n  }) async* {\n    if (reduceOutputStream) {\n      await inputStream.forEach(\n        (final input) => _parseStream(input, options: options),\n      );\n      yield _lastOutput;\n      _clear();\n    } else {\n      yield* super\n          .streamFromInputStream(inputStream, options: options)\n          .distinct(const DeepCollectionEquality().equals)\n          .doOnCancel(_clear);\n    }\n  }\n\n  Future<List<ParsedToolCall>> _parseInvoke(\n    final ChatResult input, {\n    final OutputParserOptions? options,\n  }) async {\n    return _parse(input.output.toolCalls);\n  }\n\n  Future<List<ParsedToolCall>> _parseStream(\n    final ChatResult input, {\n    final OutputParserOptions? options,\n  }) async {\n    final mergedResult = _lastResult?.concat(input) ?? input;\n    _lastResult = mergedResult;\n    return _lastOutput = _parse(\n      mergedResult.output.toolCalls,\n      fallback: _lastOutput,\n    );\n  }\n\n  List<ParsedToolCall> _parse(\n    final List<AIChatMessageToolCall>? toolCalls, {\n    List<ParsedToolCall> fallback = const [],\n  }) {\n    final List<ParsedToolCall> output = [];\n    for (var i = 0; i < (toolCalls?.length ?? 0); i++) {\n      final toolCall = toolCalls![i];\n      final arguments = toolCall.arguments.isNotEmpty\n          ? toolCall.arguments\n          : parsePartialJson(toolCall.argumentsRaw) ??\n                (i < fallback.length\n                    ? fallback[i].arguments\n                    : const <String, dynamic>{});\n      output.add(\n        ParsedToolCall(\n          id: toolCall.id,\n          name: toolCall.name,\n          arguments: arguments,\n        ),\n      );\n    }\n    return output;\n  }\n\n  void _clear() {\n    _lastResult = null;\n    _lastOutput = [];\n  }\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/output_parsers/types.dart",
    "content": "import 'package:collection/collection.dart' show DeepCollectionEquality;\nimport 'package:meta/meta.dart' show immutable;\n\nimport '../langchain/types.dart';\n\n/// {@template output_parser_options}\n/// Options to pass to an output parser.\n/// {@endtemplate}\n@immutable\nclass OutputParserOptions extends BaseLangChainOptions {\n  /// {@macro output_parser_options}\n  const OutputParserOptions({super.concurrencyLimit});\n}\n\n/// {@template parsed_tool_call}\n/// A parsed tool call from the model.\n/// {@endtemplate}\n@immutable\nclass ParsedToolCall {\n  /// {@macro parsed_tool_call}\n  const ParsedToolCall({\n    required this.id,\n    required this.name,\n    required this.arguments,\n  });\n\n  /// The id of the tool to call.\n  final String id;\n\n  /// The name of the tool to call.\n  final String name;\n\n  /// The arguments to pass to the tool in JSON Map format.\n  ///\n  /// Note that the model does not always generate a valid JSON, in that case,\n  /// [arguments] will be empty.\n  ///\n  /// The model may also hallucinate parameters not defined by your tool schema.\n  /// Validate the arguments in your code before calling your tool.\n  final Map<String, dynamic> arguments;\n\n  /// Converts the [ParsedToolCall] to a [Map].\n  Map<String, dynamic> toMap() {\n    return {'id': id, 'name': name, 'arguments': arguments};\n  }\n\n  @override\n  bool operator ==(covariant final ParsedToolCall other) {\n    final mapEquals = const DeepCollectionEquality().equals;\n    return identical(this, other) ||\n        id == other.id &&\n            name == other.name &&\n            mapEquals(arguments, other.arguments);\n  }\n\n  @override\n  int get hashCode {\n    return id.hashCode ^ name.hashCode ^ arguments.hashCode;\n  }\n\n  @override\n  String toString() {\n    return '''\nParsedToolCall{\n  id: $id,\n  name: $name,\n  arguments: $arguments,\n}''';\n  }\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/output_parsers/utils.dart",
    "content": "import 'dart:convert' show jsonDecode;\n\n/// Parses a JSON string that may be missing some closing characters.\n///\n/// This function is useful for parsing an incomplete JSON string that is\n/// being streamed from an API.\n///\n/// For example:\n/// ```dart\n/// parsePartialJson('{\"foo\":\"bar\"'); // Returns {\"foo\":\"bar\"}\n/// ```\ndynamic parsePartialJson(final String s) {\n  // Attempt to parse the string as-is\n  try {\n    return jsonDecode(s);\n  } on Exception {\n    // Pass\n  }\n\n  // Initialize variables\n  var newStr = '';\n  final stack = <String>[];\n  var isInsideString = false;\n  var escaped = false;\n\n  // Process each character in the string one at a time\n  for (var i = 0; i < s.length; i++) {\n    var char = s[i];\n    if (isInsideString) {\n      if (char == '\"' && !escaped) {\n        isInsideString = false;\n      } else if (char == '\\n' && !escaped) {\n        char = r'\\n'; // Replace the newline character with the escape sequence\n      } else if (char == r'\\') {\n        escaped = !escaped;\n      } else {\n        escaped = false;\n      }\n    } else {\n      if (char == '\"') {\n        isInsideString = true;\n        escaped = false;\n      } else if (char == '{') {\n        stack.add('}');\n      } else if (char == '[') {\n        stack.add(']');\n      } else if (char == '}' || char == ']') {\n        if (stack.isNotEmpty && stack.last == char) {\n          stack.removeLast();\n        } else {\n          // Mismatched closing character; the input is malformed\n          return null;\n        }\n      }\n    }\n\n    // Append the processed character to the new string\n    newStr += char;\n  }\n\n  // If we're still inside a string at the end of processing,\n  // we need to close the string\n  if (isInsideString) {\n    newStr += '\"';\n  }\n\n  // Close any remaining open structures in the reverse order\n  // that they were opened\n  while (stack.isNotEmpty) {\n    newStr += stack.removeLast();\n  }\n\n  // Attempt to parse the modified string as JSON\n  try {\n    return jsonDecode(newStr);\n  } on Exception {\n    // If we still can't parse the string as JSON,\n    // return null to indicate failure\n    return null;\n  }\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/prompts/base_chat_message_prompt.dart",
    "content": "import 'package:meta/meta.dart';\n\nimport '../chat_models/types.dart';\nimport '../langchain/types.dart';\nimport '../runnables/runnable.dart';\nimport 'base_prompt.dart';\nimport 'chat_prompt.dart';\nimport 'prompt.dart';\nimport 'types.dart';\n\n/// {@template chat_message_prompt_template}\n/// Base class for all message templates in a [ChatPromptTemplate].\n/// {@endtemplate}\n@immutable\nabstract base class ChatMessagePromptTemplate\n    extends Runnable<InputValues, BaseLangChainOptions, List<ChatMessage>> {\n  /// {@macro chat_message_prompt_template}\n  const ChatMessagePromptTemplate({required this.prompt})\n    : super(defaultOptions: const BaseLangChainOptions());\n\n  /// The prompt template for the message.\n  final BasePromptTemplate prompt;\n\n  /// Input variables of all the messages in the prompt template.\n  Set<String> get inputVariables;\n\n  /// Partial variables.\n  PartialValues? get partialVariables;\n\n  /// Creates a [SystemChatMessagePromptTemplate] from a string template.\n  ///\n  /// This is a convenience factory equivalent to [SystemChatMessagePromptTemplate.fromTemplate].\n  ///\n  /// Example:\n  /// ```dart\n  /// final msgTemplate = ChatMessagePromptTemplate.system(\"Here's some context: {context}\");\n  /// ```\n  ///\n  /// - [template] the template string.\n  /// - [partialVariables] the partial variables to use for the template.\n  /// - [validateTemplate] whether to validate the template.\n  factory ChatMessagePromptTemplate.system(\n    final String template, {\n    final PartialValues? partialVariables,\n    final bool validateTemplate = true,\n  }) {\n    return SystemChatMessagePromptTemplate.fromTemplate(\n      template,\n      partialVariables: partialVariables,\n      validateTemplate: validateTemplate,\n    );\n  }\n\n  /// Creates a [HumanChatMessagePromptTemplate] from a string template.\n  ///\n  /// This is a convenience factory equivalent to [HumanChatMessagePromptTemplate.fromTemplate].\n  ///\n  /// Example:\n  /// ```dart\n  /// final msgTemplate = ChatMessagePromptTemplate.human(\n  ///   \"Hello {foo}, I'm {bar}. Thanks for the {context}\",\n  /// );\n  /// ```\n  ///\n  /// - [template] the template string.\n  /// - [partialVariables] the partial variables to use for the template.\n  /// - [validateTemplate] whether to validate the template.\n  factory ChatMessagePromptTemplate.human(\n    final String template, {\n    final PartialValues? partialVariables,\n    final bool validateTemplate = true,\n  }) {\n    return HumanChatMessagePromptTemplate.fromTemplate(\n      template,\n      partialVariables: partialVariables,\n      validateTemplate: validateTemplate,\n    );\n  }\n\n  /// Creates a [AIChatMessagePromptTemplate] from a string template.\n  ///\n  /// This is a convenience factory equivalent to [AIChatMessagePromptTemplate.fromTemplate].\n  ///\n  /// Example:\n  /// ```dart\n  /// final msgTemplate = ChatMessagePromptTemplate.ai(\"I'm an AI. I'm {foo}. I'm {bar}.\");\n  /// ```\n  ///\n  /// - [template] the template string.\n  /// - [partialVariables] the partial variables to use for the template.\n  /// - [validateTemplate] whether to validate the template.\n  factory ChatMessagePromptTemplate.ai(\n    final String template, {\n    final PartialValues? partialVariables,\n    final bool validateTemplate = true,\n  }) {\n    return AIChatMessagePromptTemplate.fromTemplate(\n      template,\n      partialVariables: partialVariables,\n      validateTemplate: validateTemplate,\n    );\n  }\n\n  /// Creates a [CustomChatMessagePromptTemplate] from a string template.\n  ///\n  /// This is a convenience factory equivalent to [CustomChatMessagePromptTemplate.fromTemplate].\n  ///\n  /// Example:\n  /// ```dart\n  /// final msgTemplate = ChatMessagePromptTemplate.custom(\n  ///   \"I'm an assistant. I'm {foo}. I'm {bar}.\",\n  ///   role: 'assistant',\n  /// );\n  /// ```\n  ///\n  /// - [template] the template string.\n  /// - [role] the role of the message.\n  /// - [partialVariables] the partial variables to use for the template.\n  /// - [validateTemplate] whether to validate the template.\n  factory ChatMessagePromptTemplate.custom(\n    final String template, {\n    required final String role,\n    final PartialValues? partialVariables,\n    final bool validateTemplate = true,\n  }) {\n    return CustomChatMessagePromptTemplate.fromTemplate(\n      template,\n      role: role,\n      partialVariables: partialVariables,\n      validateTemplate: validateTemplate,\n    );\n  }\n\n  /// Creates a [MessagePlaceholder], a prompt template that assumes the variable is a [ChatMessage].\n  ///\n  /// This is useful when you want to use a single [ChatMessage] in the prompt.\n  /// For example, when you decide the type of message at runtime (e.g.\n  /// [HumanChatMessage] or [FunctionChatMessage]).\n  ///\n  /// This is a convenience factory equivalent to [MessagePlaceholder] constructor.\n  ///\n  /// If you need to add multiple messages, use [ChatMessagePromptTemplate.messagesPlaceholder].\n  ///\n  /// Example:\n  /// ```dart\n  /// final promptTemplate = ChatPromptTemplate.fromPromptMessages([\n  ///   ChatMessagePromptTemplate.system('You are a helpful AI assistant'),\n  ///   ChatMessagePromptTemplate.messagesPlaceholder('history'),\n  ///   ChatMessagePromptTemplate.messagePlaceholder('input'),\n  /// ]);\n  /// ```\n  ///\n  /// - [variableName] the name of the placeholder variable.\n  factory ChatMessagePromptTemplate.messagePlaceholder(\n    final String variableName,\n  ) {\n    return MessagePlaceholder(variableName: variableName);\n  }\n\n  /// Creates a [MessagesPlaceholder], a prompt template that assumes the variable is a list of [ChatMessage].\n  ///\n  /// This is useful for when you want to use a list of messages in the prompt.\n  /// For example, after retrieving them from memory.\n  ///\n  /// This is a convenience factory equivalent to [MessagesPlaceholder] constructor.\n  ///\n  /// If you need to add a single message, use [ChatMessagePromptTemplate.messagePlaceholder].\n  ///\n  /// Example:\n  /// ```dart\n  /// final promptTemplate = ChatPromptTemplate.fromPromptMessages([\n  ///   ChatMessagePromptTemplate.system('You are a helpful AI assistant'),\n  ///   ChatMessagePromptTemplate.messagesPlaceholder('history'),\n  ///   ChatMessagePromptTemplate.messagePlaceholder('input'),\n  /// ]);\n  /// ```\n  ///\n  /// - [variableName] the name of the placeholder variable.\n  factory ChatMessagePromptTemplate.messagesPlaceholder(\n    final String variableName,\n  ) {\n    return MessagesPlaceholder(variableName: variableName);\n  }\n\n  /// Format the prompt with the inputs returning a list of messages.\n  ///\n  /// - [input] - Any arguments to be passed to the prompt template.\n  @override\n  Future<List<ChatMessage>> invoke(\n    final InputValues input, {\n    final BaseLangChainOptions? options,\n  }) {\n    return Future.value(formatMessages(input));\n  }\n\n  /// Format the prompt with the inputs returning a list of messages.\n  ///\n  /// - [values] - Any arguments to be passed to the prompt template.\n  List<ChatMessage> formatMessages(final InputValues values);\n\n  @override\n  bool operator ==(covariant final ChatMessagePromptTemplate other) =>\n      identical(this, other) ||\n      runtimeType == other.runtimeType && prompt == other.prompt;\n\n  @override\n  int get hashCode => prompt.hashCode;\n\n  @override\n  String toString() {\n    return '''\nBaseChatMessagePromptTemplate{\n  prompt: $prompt, \n  inputVariables: $inputVariables, \n  partialVariables: $partialVariables,\n}''';\n  }\n\n  /// Return a new [ChatMessagePromptTemplate] instance with the given\n  /// values.\n  ChatMessagePromptTemplate copyWith({final BasePromptTemplate? prompt});\n}\n\n/// {@template string_message_prompt_template}\n/// Base class for all string message templates in a [ChatPromptTemplate].\n/// {@endtemplate}\n@immutable\nabstract base class StringMessagePromptTemplate\n    extends ChatMessagePromptTemplate {\n  /// {@macro string_message_prompt_template}\n  const StringMessagePromptTemplate({required final PromptTemplate prompt})\n    : super(prompt: prompt);\n\n  @override\n  PromptTemplate get prompt => super.prompt as PromptTemplate;\n\n  @override\n  Set<String> get inputVariables => prompt.inputVariables;\n\n  @override\n  PartialValues? get partialVariables => prompt.partialVariables;\n\n  @override\n  List<ChatMessage> formatMessages(final InputValues values) {\n    return [format(values)];\n  }\n\n  /// Format the prompt with the inputs.\n  ///\n  /// - [values] - Any arguments to be passed to the prompt template.\n  ChatMessage format([final InputValues values = const {}]);\n\n  /// Return a new [StringMessagePromptTemplate] instance with the given\n  /// values.\n  @override\n  StringMessagePromptTemplate copyWith({final BasePromptTemplate? prompt});\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/prompts/base_chat_prompt.dart",
    "content": "import 'package:meta/meta.dart';\n\nimport '../chat_models/types.dart';\nimport 'base_prompt.dart';\nimport 'types.dart';\n\n/// {@template base_chat_prompt_template}\n/// Base class for chat prompt templates.\n///\n/// It exposes two methods:\n/// - [format]: returns a [String] prompt given a set of input values.\n/// - [formatPrompt]: returns a [PromptValue] given a set of input values.\n/// - [formatMessages]: returns a list of [ChatMessage] given a set of input values.\n/// {@endtemplate}\n@immutable\nabstract base class BaseChatPromptTemplate extends BasePromptTemplate {\n  /// {@macro base_chat_prompt_template}\n  const BaseChatPromptTemplate({\n    required super.inputVariables,\n    super.partialVariables,\n  });\n\n  @override\n  String format(final InputValues values) {\n    return formatPrompt(values).toString();\n  }\n\n  @override\n  PromptValue formatPrompt(final InputValues values) {\n    return PromptValue.chat(formatMessages(values));\n  }\n\n  /// Format input values into a list of messages.\n  List<ChatMessage> formatMessages(final InputValues values);\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/prompts/base_prompt.dart",
    "content": "import 'package:collection/collection.dart';\nimport 'package:meta/meta.dart';\n\nimport '../langchain/types.dart';\nimport '../runnables/runnable.dart';\nimport '../utils/reduce.dart';\nimport 'template.dart';\nimport 'types.dart';\n\n/// {@template base_prompt_template}\n/// Base class for prompt templates.\n///\n/// It exposes two methods:\n/// - [format] returns a [String] prompt given a set of input values.\n/// - [formatPrompt] returns a [PromptValue] given a set of input values.\n/// {@endtemplate}\n@immutable\nabstract base class BasePromptTemplate\n    extends Runnable<InputValues, BaseLangChainOptions, PromptValue> {\n  /// {@macro base_prompt_template}\n  const BasePromptTemplate({\n    required this.inputVariables,\n    this.partialVariables,\n  }) : super(defaultOptions: const BaseLangChainOptions());\n\n  /// A set of the names of the variables the prompt template expects.\n  final Set<String> inputVariables;\n\n  /// Partial variables.\n  final PartialValues? partialVariables;\n\n  /// The type of the prompt template.\n  String get type;\n\n  /// Return a partial of the prompt template.\n  BasePromptTemplate partial(final PartialValues values) {\n    final newInputVariables = inputVariables\n        .where((final variable) => !values.keys.contains(variable))\n        .toSet();\n    final newPartialVariables = {...?partialVariables, ...values};\n    return copyWith(\n      inputVariables: newInputVariables,\n      partialVariables: newPartialVariables,\n    );\n  }\n\n  /// Validate the integrity of the prompt template, checking that all the\n  /// variables are present and that the right format is used.\n  ///\n  /// Throws a [TemplateValidationException] if the template is not valid.\n  void validateTemplate() {\n    throw UnimplementedError(\n      'This method should be implemented by subclasses.',\n    );\n  }\n\n  /// Format the prompt given the input values and return a formatted prompt\n  /// value.\n  ///\n  /// - [input] - Any arguments to be passed to the prompt template.\n  @override\n  Future<PromptValue> invoke(\n    final InputValues input, {\n    final BaseLangChainOptions? options,\n  }) {\n    return Future.value(formatPrompt(input));\n  }\n\n  @override\n  Stream<PromptValue> streamFromInputStream(\n    final Stream<InputValues> inputStream, {\n    final BaseLangChainOptions? options,\n  }) async* {\n    final List<InputValues> input = await inputStream.toList();\n    final InputValues reduced = input.isEmpty\n        ? const {}\n        : reduce<InputValues>(input);\n    yield* stream(reduced, options: options);\n  }\n\n  /// Format the prompt given the input values and return a formatted string.\n  ///\n  /// - [values] - Any arguments to be passed to the prompt template.\n  String format(final InputValues values);\n\n  /// Format the prompt given the input values and return a formatted prompt\n  /// value.\n  ///\n  /// - [values] - Any arguments to be passed to the prompt template.\n  PromptValue formatPrompt(final InputValues values);\n\n  /// Merge the partial variables with the user variables.\n  @protected\n  Map<String, Object> mergePartialAndUserVariables(\n    final Map<String, dynamic> userVariables,\n  ) {\n    return {...?partialVariables, ...userVariables};\n  }\n\n  @override\n  bool operator ==(covariant final BasePromptTemplate other) {\n    const setEqualityInputVariables = SetEquality<String>();\n    const mapEqualityPartialVariables = MapEquality<String, dynamic>();\n    return identical(this, other) ||\n        runtimeType == other.runtimeType &&\n            setEqualityInputVariables.equals(\n              inputVariables,\n              other.inputVariables,\n            ) &&\n            mapEqualityPartialVariables.equals(\n              partialVariables,\n              other.partialVariables,\n            );\n  }\n\n  @override\n  int get hashCode => inputVariables.hashCode ^ partialVariables.hashCode;\n\n  @override\n  String toString() {\n    return '''\nBasePromptTemplate{\n  inputVariables: $inputVariables, \n  partialVariables: $partialVariables,\n}\n  ''';\n  }\n\n  /// Copy the prompt template with the given parameters.\n  BasePromptTemplate copyWith({\n    final Set<String>? inputVariables,\n    final Map<String, dynamic>? partialVariables,\n  });\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/prompts/chat_prompt.dart",
    "content": "import 'package:collection/collection.dart';\nimport 'package:cross_file/cross_file.dart';\nimport 'package:meta/meta.dart';\n\nimport '../chat_models/chat_models.dart';\nimport 'base_chat_message_prompt.dart';\nimport 'base_chat_prompt.dart';\nimport 'base_prompt.dart';\nimport 'prompt.dart';\nimport 'template.dart';\nimport 'types.dart';\n\n/// {@template chat_prompt_template}\n/// A prompt template for chat models.\n///\n/// Whereas LLMs take a string as prompt, Chat models take a list of messages.\n/// [ChatPromptTemplate] uses a list of template messages to generate the final prompt.\n///\n/// Each template message can be:\n/// - [SystemChatMessagePromptTemplate] (for system messages)\n/// - [HumanChatMessagePromptTemplate] (for human messages)\n/// - [AIChatMessagePromptTemplate] (for AI messages)\n/// - [CustomChatMessagePromptTemplate] (for custom role messages)\n/// - [MessagePlaceholder] (for a single message placeholder)\n/// - [MessagesPlaceholder] (for a list of messages placeholder)\n///\n/// Example:\n/// ```dart\n/// final promptTemplate = ChatPromptTemplate.fromPromptMessages([\n///   ChatMessagePromptTemplate.system(\"Here's some context: {context}\"),\n///   ChatMessagePromptTemplate.human(\"Hello {foo}, I'm {bar}. Thanks for the {context}\"),\n///   ChatMessagePromptTemplate.ai(\"I'm an AI. I'm {foo}. I'm {bar}.\"),\n/// ]);\n/// final prompt = promptTemplate.formatPrompt({\n///     'foo': 'GPT-4',\n///     'bar': 'Gemini',\n///     'context': 'competition',\n/// });\n/// final res = await chatModel.invoke(prompt);\n/// ```\n///\n/// If you prompt template only contains one message, you can use the convenient factory constructor\n/// [ChatPromptTemplate.fromTemplate].\n///\n/// ```dart\n/// final promptTemplate = ChatPromptTemplate.fromTemplate(\"Hello {foo}, I'm {bar}. Thanks for the {context}\");\n/// ```\n///\n/// If your prompt template contains multiple messages, you can use the convenient factory constructor\n/// [ChatPromptTemplate.fromTemplates].\n///\n/// ```dart\n/// final promptTemplate = ChatPromptTemplate.fromTemplates([\n///   (ChatMessageType.system, 'You are a helpful assistant that translates {input_language} to {output_language}.'),\n///   (ChatMessageType.human, '{text}'),\n/// ]);\n/// ```\n///\n/// If you need a placeholder for a single message or a list of messages,\n/// you can use [MessagePlaceholder] or [MessagesPlaceholder].\n///\n/// ```dart\n/// final promptTemplate = ChatPromptTemplate.fromTemplates([\n///   (ChatMessageType.system, \"You are a helpful AI assistant.\"),\n///   (ChatMessageType.messagesPlaceholder, 'history'),\n///   (ChatMessageType.messagePlaceholder, 'input'),\n/// ]);\n/// ```\n///\n/// In general, prefer using [ChatPromptTemplate.fromTemplate] and [ChatPromptTemplate.fromTemplates] to create\n/// a [ChatPromptTemplate] as the resulting code is more readable. Use the main [ChatPromptTemplate] constructor or\n/// [ChatPromptTemplate.fromPromptMessages] for advanced use cases.\n/// {@endtemplate}\n@immutable\nfinal class ChatPromptTemplate extends BaseChatPromptTemplate {\n  /// {@macro chat_prompt_template}\n  const ChatPromptTemplate({\n    required super.inputVariables,\n    super.partialVariables,\n    required this.promptMessages,\n  });\n\n  /// Creates a chat prompt template with a single message from a string\n  /// template.\n  ///\n  /// Example:\n  /// ```dart\n  /// final promptTemplate = ChatPromptTemplate.fromTemplate(\n  ///   \"Hello {foo}, I'm {bar}. Thanks for the {context}\",\n  ///   partialVariables: {'foo': 'GPT-4', 'bar': 'Gemini'},\n  /// );\n  /// final prompt = promptTemplate.formatPrompt({'context': 'competition'});\n  /// final res = await chatModel.invoke(prompt);\n  /// ```\n  ///\n  /// - [template] the template string.\n  /// - [type] the type of chat message prompt template ([HumanChatMessagePromptTemplate] by default).\n  /// - [customRole] the role of the message if [type] is [ChatMessageType.custom].\n  /// - [partialVariables] the partial variables to use for the template.\n  /// - [validateTemplate] whether to validate the template.\n  factory ChatPromptTemplate.fromTemplate(\n    final String template, {\n    final ChatMessageType type = ChatMessageType.human,\n    final String? customRole,\n    final PartialValues? partialVariables,\n    final bool validateTemplate = true,\n  }) {\n    return ChatPromptTemplate.fromTemplates(\n      [(type, template)],\n      customRole: customRole,\n      partialVariables: partialVariables,\n      validateTemplate: validateTemplate,\n    );\n  }\n\n  /// Creates a [ChatPromptTemplate] from a list of pairs of chat message prompt template type and template.\n  ///\n  /// Example:\n  /// ```dart\n  /// final promptTemplate = ChatPromptTemplate.fromTemplates([\n  ///   (ChatMessageType.system, 'You are an assistant that translates {input_language} to {output_language}.'),\n  ///   (ChatMessageType.human, '{text}'),\n  /// ]);\n  /// final prompt = promptTemplate.formatPrompt({\n  ///     'input_language': 'English',\n  ///     'output_language': 'French',\n  ///     'text': 'I love programming.',\n  /// });\n  /// final res = await chatModel.invoke(prompt);\n  /// ```\n  ///\n  /// - [messages] the list of pairs of type and templates.\n  /// - [customRole] the role of the message if [type] is [ChatMessageType.custom].\n  /// - [partialVariables] the partial variables to use for the template.\n  /// - [validateTemplate] whether to validate the template.\n  factory ChatPromptTemplate.fromTemplates(\n    final List<(ChatMessageType, String)> messages, {\n    final String? customRole,\n    final PartialValues? partialVariables,\n    final bool validateTemplate = true,\n  }) {\n    final chatMessagePromptTemplates = messages\n        .map((final message) {\n          final (role, template) = message;\n          return switch (role) {\n            ChatMessageType.human =>\n              HumanChatMessagePromptTemplate.fromTemplate(\n                template,\n                partialVariables: partialVariables,\n                validateTemplate: false,\n              ),\n            ChatMessageType.ai => AIChatMessagePromptTemplate.fromTemplate(\n              template,\n              partialVariables: partialVariables,\n              validateTemplate: false,\n            ),\n            ChatMessageType.system =>\n              SystemChatMessagePromptTemplate.fromTemplate(\n                template,\n                partialVariables: partialVariables,\n                validateTemplate: false,\n              ),\n            ChatMessageType.custom =>\n              CustomChatMessagePromptTemplate.fromTemplate(\n                template,\n                role: ArgumentError.checkNotNull(customRole, 'customRole'),\n                partialVariables: partialVariables,\n                validateTemplate: false,\n              ),\n            ChatMessageType.messagePlaceholder => MessagePlaceholder(\n              variableName: template,\n            ),\n            ChatMessageType.messagesPlaceholder => MessagesPlaceholder(\n              variableName: template,\n            ),\n          };\n        })\n        .toList(growable: false);\n    return ChatPromptTemplate.fromPromptMessages(\n      chatMessagePromptTemplates,\n      validateTemplate: validateTemplate,\n    );\n  }\n\n  /// Creates a [ChatPromptTemplate] with a list of template messages.\n  ///\n  /// - [promptMessages] the list of template messages. The list can contain:\n  ///   * [SystemChatMessagePromptTemplate] (for system messages)\n  ///   * [HumanChatMessagePromptTemplate] (for human messages)\n  ///   * [AIChatMessagePromptTemplate] (for AI messages)\n  ///   * [CustomChatMessagePromptTemplate] (for custom role messages)\n  /// - [validateTemplate] whether to validate the template.\n  factory ChatPromptTemplate.fromPromptMessages(\n    final List<ChatMessagePromptTemplate> promptMessages, {\n    final bool validateTemplate = true,\n  }) {\n    final inputVariables = promptMessages\n        .map((final m) => m.inputVariables)\n        .expand((final i) => i)\n        .toSet();\n    final partialVariables = {\n      for (final m in promptMessages) ...?m.partialVariables,\n    };\n    final t = ChatPromptTemplate(\n      inputVariables: inputVariables,\n      partialVariables: partialVariables.isEmpty ? null : partialVariables,\n      promptMessages: promptMessages,\n    );\n    if (validateTemplate) {\n      t.validateTemplate();\n    }\n    return t;\n  }\n\n  /// Creates a [ChatPromptTemplate] with a single message from a file.\n  ///\n  /// - [templateFile] the path to the file containing the prompt template.\n  /// - [type] the type of chat message prompt template ([HumanChatMessagePromptTemplate] by default).\n  /// - [customRole] the role of the message if [type] is [ChatMessageType.custom].\n  /// - [partialVariables] the partial variables to use for the template.\n  /// - [validateTemplate] whether to validate the template.\n  static Future<ChatPromptTemplate> fromTemplateFile(\n    final String templateFile, {\n    final ChatMessageType type = ChatMessageType.human,\n    final String? customRole,\n    final PartialValues? partialVariables,\n    final bool validateTemplate = true,\n  }) async {\n    final file = XFile(templateFile);\n    final template = await file.readAsString();\n    return ChatPromptTemplate.fromTemplate(\n      template,\n      type: type,\n      customRole: customRole,\n      partialVariables: partialVariables,\n      validateTemplate: validateTemplate,\n    );\n  }\n\n  /// The list of messages that make up the prompt template.\n  final List<ChatMessagePromptTemplate> promptMessages;\n\n  @override\n  String get type => 'chat';\n\n  @override\n  BasePromptTemplate partial(final PartialValues values) {\n    final newPromptMessages = promptMessages\n        .map(\n          (final ChatMessagePromptTemplate m) =>\n              m.copyWith(prompt: m.prompt.partial(values)),\n        )\n        .toList(growable: false);\n    return ChatPromptTemplate.fromPromptMessages(newPromptMessages);\n  }\n\n  @override\n  void validateTemplate() {\n    checkValidChatPromptTemplate(\n      promptMessages: promptMessages,\n      inputVariables: inputVariables,\n      partialVariables: partialVariables?.keys,\n    );\n  }\n\n  @override\n  List<ChatMessage> formatMessages([final InputValues values = const {}]) {\n    final allValues = mergePartialAndUserVariables(values);\n    return promptMessages\n        .map(\n          (final m) => m.formatMessages({\n            for (final inputVariable in m.inputVariables)\n              inputVariable: ArgumentError.checkNotNull(\n                allValues[inputVariable],\n                inputVariable,\n              ),\n          }),\n        )\n        .expand((final i) => i)\n        .toList(growable: false);\n  }\n\n  @override\n  bool operator ==(covariant final ChatPromptTemplate other) {\n    const setEqualsInputVariables = SetEquality<String>();\n    const mapEqualsPartialVariables = MapEquality<String, dynamic>();\n    const listEqualsPromptMessages = ListEquality<ChatMessagePromptTemplate>();\n    return identical(this, other) ||\n        runtimeType == other.runtimeType &&\n            setEqualsInputVariables.equals(\n              inputVariables,\n              other.inputVariables,\n            ) &&\n            mapEqualsPartialVariables.equals(\n              partialVariables,\n              other.partialVariables,\n            ) &&\n            listEqualsPromptMessages.equals(\n              promptMessages,\n              other.promptMessages,\n            );\n  }\n\n  @override\n  int get hashCode =>\n      inputVariables.hashCode ^\n      partialVariables.hashCode ^\n      promptMessages.hashCode;\n\n  @override\n  String toString() {\n    return '''\nChatPromptTemplate{\n  promptMessages: $promptMessages,\n}''';\n  }\n\n  /// Creates a copy of this [ChatPromptTemplate] with the given fields.\n  @override\n  ChatPromptTemplate copyWith({\n    final Set<String>? inputVariables,\n    final PartialValues? partialVariables,\n    final List<ChatMessagePromptTemplate>? promptMessages,\n  }) {\n    return ChatPromptTemplate(\n      inputVariables: inputVariables ?? this.inputVariables,\n      partialVariables: partialVariables ?? this.partialVariables,\n      promptMessages: promptMessages ?? this.promptMessages,\n    );\n  }\n}\n\n/// The type of a chat message prompt template.\nenum ChatMessageType {\n  /// A [SystemChatMessagePromptTemplate].\n  system,\n\n  /// A [HumanChatMessagePromptTemplate].\n  human,\n\n  /// A [AIChatMessagePromptTemplate].\n  ai,\n\n  /// A [CustomChatMessagePromptTemplate].\n  custom,\n\n  /// A [MessagePlaceholder].\n  messagePlaceholder,\n\n  /// A [MessagesPlaceholder].\n  messagesPlaceholder,\n}\n\n/// {@template system_chat_message_prompt_template}\n/// A chat message prompt template for a [SystemChatMessagePromptTemplate] ([ChatMessageType.human]).\n/// {@endtemplate}\n@immutable\nfinal class SystemChatMessagePromptTemplate\n    extends StringMessagePromptTemplate {\n  /// {@macro system_chat_message_prompt_template}\n  const SystemChatMessagePromptTemplate({required super.prompt});\n\n  /// Creates a [SystemChatMessagePromptTemplate] from a string template.\n  /// It considers the prompt a [SystemChatMessage].\n  ///\n  /// Example:\n  /// ```dart\n  /// final msgTemplate = SystemChatMessagePromptTemplate.fromTemplate(\"Here's some context: {context}\");\n  /// ```\n  ///\n  /// Alternatively, you can use [ChatMessagePromptTemplate.system] to achieve the same result.\n  ///\n  /// ```dart\n  /// final msgTemplate = ChatMessagePromptTemplate.system(\"Here's some context: {context}\");\n  /// ```\n  ///\n  /// - [template] the template string.\n  /// - [partialVariables] the partial variables to use for the template.\n  /// - [validateTemplate] whether to validate the template.\n  factory SystemChatMessagePromptTemplate.fromTemplate(\n    final String template, {\n    final PartialValues? partialVariables,\n    final bool validateTemplate = true,\n  }) {\n    return SystemChatMessagePromptTemplate(\n      prompt: PromptTemplate.fromTemplate(\n        template,\n        partialVariables: partialVariables,\n        validateTemplate: validateTemplate,\n      ),\n    );\n  }\n\n  /// Load a prompt from a file.\n  /// It considers the prompt a [SystemChatMessage].\n  ///\n  /// - [templateFile] the path to the file containing the prompt template.\n  /// - [partialVariables] the partial variables to use for the template.\n  /// - [validateTemplate] whether to validate the template.\n  static Future<SystemChatMessagePromptTemplate> fromTemplateFile(\n    final String templateFile, {\n    final PartialValues? partialVariables,\n    final bool validateTemplate = true,\n  }) async {\n    final file = XFile(templateFile);\n    final template = await file.readAsString();\n    return SystemChatMessagePromptTemplate.fromTemplate(\n      template,\n      partialVariables: partialVariables,\n      validateTemplate: validateTemplate,\n    );\n  }\n\n  @override\n  ChatMessage format([final InputValues values = const {}]) {\n    return ChatMessage.system(prompt.format(values));\n  }\n\n  @override\n  String toString() {\n    return '''\nSystemChatMessagePromptTemplate{\n  prompt: $prompt,\n  inputVariables: $inputVariables,\n  partialVariables: $partialVariables,\n}''';\n  }\n\n  @override\n  StringMessagePromptTemplate copyWith({final BasePromptTemplate? prompt}) {\n    return SystemChatMessagePromptTemplate(\n      prompt: prompt as PromptTemplate? ?? this.prompt,\n    );\n  }\n}\n\n/// {@template human_chat_message_prompt_template}\n/// A chat message prompt template for a [HumanChatMessage] ([ChatMessageType.human]).\n/// {@endtemplate}\n@immutable\nfinal class HumanChatMessagePromptTemplate extends StringMessagePromptTemplate {\n  /// {@macro human_chat_message_prompt_template}\n  const HumanChatMessagePromptTemplate({required super.prompt});\n\n  /// Creates a [HumanChatMessagePromptTemplate] from a string template.\n  /// It considers the prompt a [HumanChatMessage].\n  ///\n  /// Example:\n  /// ```dart\n  /// final msgTemplate = HumanChatMessagePromptTemplate.fromTemplate(\n  ///   \"Hello {foo}, I'm {bar}. Thanks for the {context}\",\n  /// );\n  /// ```\n  ///\n  /// Alternatively, you can use [ChatMessagePromptTemplate.human] to achieve the same result.\n  ///\n  /// ```dart\n  /// final msgTemplate = ChatMessagePromptTemplate.human(\n  ///   \"Hello {foo}, I'm {bar}. Thanks for the {context}\",\n  /// );\n  /// ```\n  ///\n  /// - [template] the template string.\n  /// - [partialVariables] the partial variables to use for the template.\n  /// - [validateTemplate] whether to validate the template.\n  factory HumanChatMessagePromptTemplate.fromTemplate(\n    final String template, {\n    final PartialValues? partialVariables,\n    final bool validateTemplate = true,\n  }) {\n    return HumanChatMessagePromptTemplate(\n      prompt: PromptTemplate.fromTemplate(\n        template,\n        partialVariables: partialVariables,\n        validateTemplate: validateTemplate,\n      ),\n    );\n  }\n\n  /// Load a prompt from a file.\n  /// It considers the prompt a [HumanChatMessage].\n  ///\n  /// - [templateFile] the path to the file containing the prompt template.\n  /// - [partialVariables] the partial variables to use for the template.\n  /// - [validateTemplate] whether to validate the template.\n  static Future<HumanChatMessagePromptTemplate> fromTemplateFile(\n    final String templateFile, {\n    final PartialValues? partialVariables,\n    final bool validateTemplate = true,\n  }) async {\n    final file = XFile(templateFile);\n    final template = await file.readAsString();\n    return HumanChatMessagePromptTemplate.fromTemplate(\n      template,\n      partialVariables: partialVariables,\n      validateTemplate: validateTemplate,\n    );\n  }\n\n  @override\n  ChatMessage format([final InputValues values = const {}]) {\n    return ChatMessage.humanText(prompt.format(values));\n  }\n\n  @override\n  String toString() {\n    return '''\nHumanChatMessagePromptTemplate{\n  prompt: $prompt,\n  inputVariables: $inputVariables,\n  partialVariables: $partialVariables,\n}''';\n  }\n\n  @override\n  StringMessagePromptTemplate copyWith({final BasePromptTemplate? prompt}) {\n    return HumanChatMessagePromptTemplate(\n      prompt: prompt as PromptTemplate? ?? this.prompt,\n    );\n  }\n}\n\n/// {@template ai_chat_message_prompt_template}\n/// A chat message prompt template for a [AIChatMessage] ([ChatMessageType.ai]).\n/// {@endtemplate}\n@immutable\nfinal class AIChatMessagePromptTemplate extends StringMessagePromptTemplate {\n  /// {@macro ai_chat_message_prompt_template}\n  const AIChatMessagePromptTemplate({required super.prompt});\n\n  /// Creates a [AIChatMessagePromptTemplate] from a string template.\n  /// It considers the prompt a [AIChatMessage].\n  ///\n  /// Example:\n  /// ```dart\n  /// final msgTemplate = AIChatMessagePromptTemplate.fromTemplate(\"I'm an AI. I'm {foo}. I'm {bar}.\");\n  /// ```\n  ///\n  /// Alternatively, you can use [ChatMessagePromptTemplate.ai] to achieve the same result.\n  ///\n  /// ```dart\n  /// final msgTemplate = ChatMessagePromptTemplate.ai(\"I'm an AI. I'm {foo}. I'm {bar}.\");\n  /// ```\n  ///\n  /// - [template] the template string.\n  /// - [partialVariables] the partial variables to use for the template.\n  /// - [validateTemplate] whether to validate the template.\n  factory AIChatMessagePromptTemplate.fromTemplate(\n    final String template, {\n    final PartialValues? partialVariables,\n    final bool validateTemplate = true,\n  }) {\n    return AIChatMessagePromptTemplate(\n      prompt: PromptTemplate.fromTemplate(\n        template,\n        partialVariables: partialVariables,\n        validateTemplate: validateTemplate,\n      ),\n    );\n  }\n\n  /// Load a prompt from a file.\n  /// It considers the prompt a [AIChatMessage].\n  ///\n  /// - [templateFile] the path to the file containing the prompt template.\n  /// - [partialVariables] the partial variables to use for the template.\n  /// - [validateTemplate] whether to validate the template.\n  static Future<AIChatMessagePromptTemplate> fromTemplateFile(\n    final String templateFile, {\n    final PartialValues? partialVariables,\n    final bool validateTemplate = true,\n  }) async {\n    final file = XFile(templateFile);\n    final template = await file.readAsString();\n    return AIChatMessagePromptTemplate.fromTemplate(\n      template,\n      partialVariables: partialVariables,\n      validateTemplate: validateTemplate,\n    );\n  }\n\n  @override\n  ChatMessage format([final InputValues values = const {}]) {\n    return ChatMessage.ai(prompt.format(values));\n  }\n\n  @override\n  String toString() {\n    return '''\nAIChatMessagePromptTemplate{\n  prompt: $prompt,\n  inputVariables: $inputVariables,\n  partialVariables: $partialVariables,\n}''';\n  }\n\n  @override\n  StringMessagePromptTemplate copyWith({final BasePromptTemplate? prompt}) {\n    return AIChatMessagePromptTemplate(\n      prompt: prompt as PromptTemplate? ?? this.prompt,\n    );\n  }\n}\n\n/// {@template custom_chat_message_prompt_template}\n/// A template for a [CustomChatMessage] ([ChatMessageType.custom]).\n/// {@endtemplate}\n@immutable\nfinal class CustomChatMessagePromptTemplate\n    extends StringMessagePromptTemplate {\n  /// {@macro custom_chat_message_prompt_template}\n  const CustomChatMessagePromptTemplate({\n    required super.prompt,\n    required this.role,\n  });\n\n  /// Creates a [CustomChatMessagePromptTemplate] from a string template.\n  /// It considers the prompt a [CustomChatMessage].\n  ///\n  /// Example:\n  /// ```dart\n  /// final msgTemplate = CustomChatMessagePromptTemplate.fromTemplate(\n  ///   \"I'm an assistant. I'm {foo}. I'm {bar}.\",\n  ///   role: 'assistant',\n  /// );\n  /// ```\n  ///\n  /// Alternatively, you can use [ChatMessagePromptTemplate.custom] to achieve the same result.\n  ///\n  /// ```dart\n  /// final msgTemplate = ChatMessagePromptTemplate.custom(\n  ///   \"I'm an assistant. I'm {foo}. I'm {bar}.\",\n  ///   role: 'assistant',\n  /// );\n  /// ```\n  ///\n  /// - [template] the template string.\n  /// - [role] the role of the message.\n  /// - [partialVariables] the partial variables to use for the template.\n  /// - [validateTemplate] whether to validate the template.\n  factory CustomChatMessagePromptTemplate.fromTemplate(\n    final String template, {\n    required final String role,\n    final PartialValues? partialVariables,\n    final bool validateTemplate = true,\n  }) {\n    return CustomChatMessagePromptTemplate(\n      prompt: PromptTemplate.fromTemplate(\n        template,\n        partialVariables: partialVariables,\n        validateTemplate: validateTemplate,\n      ),\n      role: role,\n    );\n  }\n\n  /// Load a prompt from a file.\n  /// It considers the prompt a [CustomChatMessage].\n  ///\n  /// - [templateFile] the path to the file containing the prompt template.\n  /// - [partialVariables] the partial variables to use for the template.\n  /// - [validateTemplate] whether to validate the template.\n  static Future<CustomChatMessagePromptTemplate> fromTemplateFile(\n    final String templateFile, {\n    required final String role,\n    final PartialValues? partialVariables,\n    final bool validateTemplate = true,\n  }) async {\n    final file = XFile(templateFile);\n    final template = await file.readAsString();\n    return CustomChatMessagePromptTemplate.fromTemplate(\n      template,\n      role: role,\n      partialVariables: partialVariables,\n      validateTemplate: validateTemplate,\n    );\n  }\n\n  /// The role of the message.\n  final String role;\n\n  @override\n  ChatMessage format([final InputValues values = const {}]) {\n    return ChatMessage.custom(prompt.format(values), role: role);\n  }\n\n  @override\n  String toString() {\n    return '''\nCustomChatMessagePromptTemplate{\n  prompt: $prompt,\n  role: $role,\n  inputVariables: $inputVariables,\n  partialVariables: $partialVariables,\n}''';\n  }\n\n  @override\n  StringMessagePromptTemplate copyWith({final BasePromptTemplate? prompt}) {\n    return CustomChatMessagePromptTemplate(\n      prompt: prompt as PromptTemplate? ?? this.prompt,\n      role: role,\n    );\n  }\n}\n\n/// {@template message_placeholder}\n/// Prompt template that assumes the variable is a [ChatMessage] ([ChatMessageType.messagePlaceholder]).\n///\n/// This is useful when you want to use a single [ChatMessage] in the prompt.\n/// For example, when you decide the type of message at runtime (e.g.\n/// [HumanChatMessage] or [FunctionChatMessage]).\n///\n/// If you need to add multiple messages, use [MessagesPlaceholder].\n///\n/// Example:\n/// ```dart\n/// ChatPromptTemplate.fromPromptMessages([\n///   ChatMessagePromptTemplate.system('You are a helpful AI assistant'),\n///   ChatMessagePromptTemplate.messagesPlaceholder('history'),\n///   ChatMessagePromptTemplate.messagePlaceholder('input'),\n/// ]);\n/// ```\n///\n/// Alternatively, you can use [ChatPromptTemplate.fromTemplates] to achieve the same result.\n///\n/// ```dart\n/// final promptTemplate = ChatPromptTemplate.fromTemplates([\n///   (ChatMessageType.system, 'You are a helpful AI assistant'),\n///   (ChatMessageType.messagesPlaceholder, 'history'),\n///   (ChatMessageType.messagePlaceholder, 'input'),\n/// ]);\n/// ```\n/// {@endtemplate}\n@immutable\nfinal class MessagePlaceholder extends ChatMessagePromptTemplate {\n  /// {@macro message_placeholder}\n  const MessagePlaceholder({required this.variableName})\n    : super(\n        prompt: const PromptTemplate(inputVariables: {}, template: ''),\n      );\n\n  /// The name of the placeholder variable.\n  final String variableName;\n\n  @override\n  Set<String> get inputVariables => {variableName};\n\n  @override\n  PartialValues? get partialVariables => null;\n\n  @override\n  List<ChatMessage> formatMessages([\n    final Map<String, dynamic> values = const {},\n  ]) {\n    final message = values[variableName] as ChatMessage?;\n    return [if (message != null) message];\n  }\n\n  @override\n  String toString() {\n    return '''\nMessagePlaceholder{\n  variableName: $variableName,\n  inputVariables: $inputVariables,\n  partialVariables: $partialVariables,\n}''';\n  }\n\n  @override\n  ChatMessagePromptTemplate copyWith({\n    final BasePromptTemplate? prompt,\n    final String? variableName,\n  }) {\n    return MessagePlaceholder(variableName: variableName ?? this.variableName);\n  }\n}\n\n/// {@template messages_placeholder}\n/// Prompt template that assumes the variable is a list of [ChatMessage] ([ChatMessageType.messagesPlaceholder]).\n///\n/// This is useful for when you want to use a list of messages in the prompt.\n/// For example, after retrieving them from memory.\n///\n/// If you need to add a single message, use [MessagePlaceholder].\n///\n/// Example:\n/// ```dart\n/// ChatPromptTemplate.fromPromptMessages([\n///   ChatMessagePromptTemplate.system('You are a helpful AI assistant'),\n///   ChatMessagePromptTemplate.messagesPlaceholder('history'),\n///   ChatMessagePromptTemplate.messagePlaceholder('input'),\n/// ]);\n/// ```\n///\n/// Alternatively, you can use [ChatPromptTemplate.fromTemplates] to achieve the same result.\n///\n/// ```dart\n/// final promptTemplate = ChatPromptTemplate.fromTemplates([\n///   (ChatMessageType.system, 'You are a helpful AI assistant'),\n///   (ChatMessageType.messagesPlaceholder, 'history'),\n///   (ChatMessageType.messagePlaceholder, 'input'),\n/// ]);\n/// ```\n/// {@endtemplate}\n@immutable\nfinal class MessagesPlaceholder extends ChatMessagePromptTemplate {\n  /// {@macro messages_placeholder}\n  const MessagesPlaceholder({required this.variableName})\n    : super(\n        prompt: const PromptTemplate(inputVariables: {}, template: ''),\n      );\n\n  /// The name of the placeholder variable.\n  final String variableName;\n\n  @override\n  Set<String> get inputVariables => {variableName};\n\n  @override\n  PartialValues? get partialVariables => null;\n\n  @override\n  List<ChatMessage> formatMessages([\n    final Map<String, dynamic> values = const {},\n  ]) {\n    return values[variableName] as List<ChatMessage>? ?? const [];\n  }\n\n  @override\n  String toString() {\n    return '''\nMessagesPlaceholder{\n  variableName: $variableName,\n  inputVariables: $inputVariables,\n  partialVariables: $partialVariables,\n}''';\n  }\n\n  @override\n  ChatMessagePromptTemplate copyWith({\n    final BasePromptTemplate? prompt,\n    final String? variableName,\n  }) {\n    return MessagesPlaceholder(variableName: variableName ?? this.variableName);\n  }\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/prompts/pipeline.dart",
    "content": "import 'base_prompt.dart';\nimport 'chat_prompt.dart';\nimport 'prompt.dart';\nimport 'types.dart';\n\n/// {@template pipeline_prompt_template}\n/// A prompt template for composing multiple prompts together.\n///\n/// This can be useful when you want to reuse parts of prompts.\n/// A [PipelinePromptTemplate] consists of two main parts:\n/// - [finalPrompt] This is the final prompt that is returned.\n/// - [pipelinePrompts] This is a list of records, consisting of a string\n///   (`name`) and a [BasePromptTemplate]. Each [BasePromptTemplate] will be\n///   formatted and then passed to future prompt templates as a variable with\n///   the same name as `name`.\n///\n/// Example:\n/// ```dart\n/// final promptA = PromptTemplate.fromTemplate('{foo}');\n/// final promptB = PromptTemplate.fromTemplate('{bar}');\n/// final pipelinePromptTemplate = PipelinePromptTemplate(\n///   finalPrompt: promptB,\n///   pipelinePrompts: [('bar', promptA)],\n/// );\n/// final prompt = pipelinePromptTemplate.formatPrompt({'foo': 'jim'});\n/// final res = await llm.invoke(prompt);\n/// ```\n/// {@endtemplate}\nfinal class PipelinePromptTemplate extends BasePromptTemplate {\n  /// {@macro pipeline_prompt_template}\n  PipelinePromptTemplate({\n    required this.finalPrompt,\n    required this.pipelinePrompts,\n  }) : super(inputVariables: _computeInputValues(finalPrompt, pipelinePrompts));\n\n  /// The final prompt that is returned.\n  final BasePromptTemplate finalPrompt;\n\n  /// Tuples of the name of the variable and the prompt template in the\n  /// pipeline.\n  final List<(String name, BasePromptTemplate)> pipelinePrompts;\n\n  static Set<String> _computeInputValues(\n    final BasePromptTemplate finalPrompt,\n    final List<(String name, BasePromptTemplate)> pipelinePrompts,\n  ) {\n    final intermediateValues = pipelinePrompts.map((final pipelinePrompt) {\n      final (name, _) = pipelinePrompt;\n      return name;\n    }).toSet();\n\n    return pipelinePrompts.expand((final pipelinePrompt) {\n      final (_, prompt) = pipelinePrompt;\n      return prompt.inputVariables.where(\n        (final inputValue) => !intermediateValues.contains(inputValue),\n      );\n    }).toSet();\n  }\n\n  @override\n  String format(final InputValues values) {\n    return finalPrompt.format(_formatPipelinePrompts(values));\n  }\n\n  @override\n  PromptValue formatPrompt(final InputValues values) {\n    return finalPrompt.formatPrompt(_formatPipelinePrompts(values));\n  }\n\n  Map<String, dynamic> _formatPipelinePrompts(final InputValues values) {\n    final allValues = mergePartialAndUserVariables(values);\n    for (final pipelinePrompt in pipelinePrompts) {\n      final (name, prompt) = pipelinePrompt;\n      final pipelinePromptInputValues = _extractRequiredInputValues(\n        allValues,\n        prompt.inputVariables,\n      );\n      allValues[name] = switch (prompt) {\n        final ChatPromptTemplate p => p.formatMessages(\n          pipelinePromptInputValues,\n        ),\n        final PromptTemplate p => p.format(pipelinePromptInputValues),\n        _ => throw UnsupportedError('Unsupported prompt type: $prompt'),\n      };\n    }\n    return _extractRequiredInputValues(allValues, finalPrompt.inputVariables);\n  }\n\n  Map<String, dynamic> _extractRequiredInputValues(\n    final Map<String, dynamic> allValues,\n    final Set<String> requiredValueNames,\n  ) {\n    return requiredValueNames.fold(<String, dynamic>{}, (\n      final requiredValues,\n      final valueName,\n    ) {\n      requiredValues[valueName] = allValues[valueName];\n      return requiredValues;\n    });\n  }\n\n  @override\n  PipelinePromptTemplate copyWith({\n    final BasePromptTemplate? finalPrompt,\n    final List<(String name, BasePromptTemplate)>? pipelinePrompts,\n    final Set<String>? inputVariables,\n    final Map<String, dynamic>? partialVariables,\n  }) {\n    return PipelinePromptTemplate(\n      finalPrompt: finalPrompt ?? this.finalPrompt,\n      pipelinePrompts: pipelinePrompts ?? this.pipelinePrompts,\n    );\n  }\n\n  @override\n  String get type => 'pipeline';\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/prompts/prompt.dart",
    "content": "import 'package:collection/collection.dart';\nimport 'package:cross_file/cross_file.dart';\nimport 'package:meta/meta.dart';\n\nimport 'base_prompt.dart';\nimport 'template.dart';\nimport 'types.dart';\n\n/// {@template prompt_template}\n/// A prompt template for a language model.\n///\n/// A prompt template consists of a string template. It accepts a set of parameters\n/// from the user that can be used to generate a prompt for a language model.\n///\n/// Example:\n/// ```dart\n/// final promptTemplate = PromptTemplate(\n///   inputVariables: ['product'],\n///   template: 'What is a good name for a company that makes {product}?',\n/// );\n/// final prompt = promptTemplate.formatPrompt({'product': 'colorful socks'});\n/// final res = await llm.invoke(prompt);\n/// ```\n///\n/// Note: the default constructor does not validate the template. You can use\n/// [PromptTemplate.validateTemplate] to validate the template.\n///\n/// You can also use the following convenience factory constructors to create a prompt template:\n///\n/// - [PromptTemplate.fromTemplate] creates a prompt template from a string template automatically\n///   extracting the input variables.\n///\n/// ```dart\n/// final promptTemplate = PromptTemplate.fromTemplate(\n///   'What is a good name for a company that makes {product}?',\n/// );\n/// ```\n///\n/// - [PromptTemplate.fromExamples] to create prompt templates from a list of examples.\n/// - [PromptTemplate.fromFile] to create a prompt template from a file.\n/// {@endtemplate}\n@immutable\nfinal class PromptTemplate extends BasePromptTemplate {\n  /// {@macro prompt_template}\n  const PromptTemplate({\n    required super.inputVariables,\n    super.partialVariables,\n    required this.template,\n  });\n\n  /// Creates a prompt template from a string template automatically extracting the input variables.\n  ///\n  /// Example:\n  /// ```dart\n  /// final promptTemplate = PromptTemplate.fromTemplate(\n  ///   'What is a good name for a company that makes {product}?',\n  /// );\n  /// final prompt = promptTemplate.formatPrompt({'product': 'colorful socks'});\n  /// final res = await llm.invoke(prompt);\n  /// ```\n  ///\n  /// - [template] the template string.\n  /// - [partialVariables] the partial variables to use for the template.\n  /// - [validateTemplate] whether to validate the template.\n  factory PromptTemplate.fromTemplate(\n    final String template, {\n    final PartialValues? partialVariables,\n    final bool validateTemplate = true,\n  }) {\n    final t = PromptTemplate(\n      inputVariables: parseFStringTemplate(template)\n          .whereType<ParsedFStringVariableNode>()\n          .map((final node) => node.name)\n          .toSet()\n          .difference(partialVariables?.keys.toSet() ?? {}),\n      partialVariables: partialVariables,\n      template: template,\n    );\n    if (validateTemplate) {\n      t.validateTemplate();\n    }\n    return t;\n  }\n\n  /// Take examples in list format with prefix and suffix to create a prompt.\n  ///\n  /// Intended to be used a a way to dynamically create a prompt from examples.\n  ///\n  /// - [examples] list of examples to use in the prompt.\n  /// - [suffix] string to go after the list of examples. Should generally\n  ///   set up the user's input.\n  /// - [inputVariables] list list of variable names the final prompt\n  ///   template will expect.\n  /// - [exampleSeparator] the separator to use in between examples.\n  /// - [prefix] string that should go before any examples. Generally includes\n  ///   examples.\n  /// - [validateTemplate] whether to validate the template.\n  factory PromptTemplate.fromExamples({\n    required final List<String> examples,\n    required final String suffix,\n    required final Set<String> inputVariables,\n    final String exampleSeparator = '\\n\\n',\n    final String prefix = '',\n    final bool validateTemplate = true,\n  }) {\n    final template = [prefix, ...examples, suffix].join(exampleSeparator);\n    final t = PromptTemplate(\n      inputVariables: inputVariables,\n      template: template,\n    );\n    if (validateTemplate) {\n      t.validateTemplate();\n    }\n    return t;\n  }\n\n  /// Loads a prompt from a file.\n  ///\n  /// - [templateFile] the path to the file containing the prompt template.\n  /// - [partialVariables] the partial variables to use for the template.\n  /// - [validateTemplate] whether to validate the template.\n  static Future<PromptTemplate> fromFile(\n    final String templateFile, {\n    final PartialValues? partialVariables,\n    final bool validateTemplate = true,\n  }) async {\n    final file = XFile(templateFile);\n    final template = await file.readAsString();\n    return PromptTemplate.fromTemplate(\n      template,\n      partialVariables: partialVariables,\n      validateTemplate: validateTemplate,\n    );\n  }\n\n  /// The prompt template.\n  final String template;\n\n  @override\n  String get type => 'prompt';\n\n  @override\n  PromptTemplate partial(final PartialValues values) {\n    return super.partial(values) as PromptTemplate;\n  }\n\n  @override\n  void validateTemplate() {\n    checkValidPromptTemplate(\n      template: template,\n      inputVariables: inputVariables,\n      partialVariables: partialVariables?.keys,\n    );\n  }\n\n  @override\n  String format([final InputValues values = const {}]) {\n    final allValues = mergePartialAndUserVariables(values);\n    return renderTemplate(template: template, inputValues: allValues);\n  }\n\n  @override\n  PromptValue formatPrompt(final InputValues values) {\n    return PromptValue.string(format(values));\n  }\n\n  @override\n  bool operator ==(covariant final PromptTemplate other) {\n    const setEqualsInputVariables = SetEquality<String>();\n    const mapEqualsPartialVariables = MapEquality<String, dynamic>();\n    return identical(this, other) ||\n        runtimeType == other.runtimeType &&\n            setEqualsInputVariables.equals(\n              inputVariables,\n              other.inputVariables,\n            ) &&\n            mapEqualsPartialVariables.equals(\n              partialVariables,\n              other.partialVariables,\n            ) &&\n            template == other.template;\n  }\n\n  @override\n  int get hashCode =>\n      inputVariables.hashCode ^ partialVariables.hashCode ^ template.hashCode;\n\n  @override\n  String toString() {\n    return '''\nPromptTemplate{\n  template: $template, \n  inputVariables: $inputVariables,\n  partialVariables: $partialVariables,\n}''';\n  }\n\n  @override\n  PromptTemplate copyWith({\n    final Set<String>? inputVariables,\n    final Map<String, dynamic>? partialVariables,\n    final String? template,\n  }) {\n    return PromptTemplate(\n      inputVariables: inputVariables ?? this.inputVariables,\n      partialVariables: partialVariables ?? this.partialVariables,\n      template: template ?? this.template,\n    );\n  }\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/prompts/prompt_selector.dart",
    "content": "import '../chat_models/base.dart';\nimport '../language_models/base.dart';\nimport '../llms/base.dart';\nimport 'base_prompt.dart';\n\n/// {@template base_prompt_template}\n/// Base class for a prompt template.\n/// {@endtemplate}\nabstract interface class BasePromptSelector {\n  /// {@macro base_prompt_template}\n  const BasePromptSelector();\n\n  /// Get default prompt for a language model.\n  BasePromptTemplate getPrompt(final BaseLanguageModel llm);\n}\n\n/// {@template conditional_prompt_selector}\n/// Prompt collection that goes through conditionals to select the appropriate prompt template.\n///\n/// You can use this to select a prompt template based on the type of language model (LLM vs. ChatModel)\n/// or the specific model used (e.g. GPT-4 vs Gemini Pro).\n///\n/// Example: Selecting a prompt based on the type of language model.\n/// ```dart\n/// final prompt = PromptTemplate.fromTemplate('''\n/// Use the following pieces of context to answer the question at the end.\n/// {context}\n/// Question: {question}\n/// Helpful Answer:\n/// ''');\n/// final chatPrompt = ChatPromptTemplate.fromTemplates([\n///   (ChatMessageRole.system, 'Use the following pieces of context to answer the users question.\\n\\n{context}'),\n///   (ChatMessageRole.human, '{question}'),\n/// ]);\n/// final promptSelector = ConditionalPromptSelector(\n///   defaultPrompt: prompt,\n///   conditionals: [PromptCondition.isChatModel(chatPrompt)],\n/// );\n/// final prompt = promptSelector.getPrompt(llm);\n/// ```\n/// {@endtemplate}\nclass ConditionalPromptSelector implements BasePromptSelector {\n  /// {@macro conditional_prompt_selector}\n  const ConditionalPromptSelector({\n    required this.defaultPrompt,\n    this.conditionals = const [],\n  });\n\n  /// Default prompt to use if no conditionals match.\n  final BasePromptTemplate defaultPrompt;\n\n  /// Conditional prompts to use.\n  final List<PromptCondition> conditionals;\n\n  @override\n  BasePromptTemplate getPrompt(final BaseLanguageModel llm) {\n    for (final conditional in conditionals) {\n      if (conditional.condition(llm)) {\n        return conditional.prompt;\n      }\n    }\n    return defaultPrompt;\n  }\n}\n\n/// {@template prompt_condition}\n/// Condition for a prompt.\n///\n/// The following pre-defined conditions are available:\n/// - [PromptCondition.isLlm]: checks that the language model is an LLM.\n/// - [PromptCondition.isChatModel]: checks that the language model is a chat model.\n/// {@endtemplate}\nclass PromptCondition {\n  /// {@macro prompt_condition}\n  const PromptCondition({required this.condition, required this.prompt});\n\n  /// Condition for a prompt.\n  final bool Function(BaseLanguageModel llm) condition;\n\n  /// Prompt to use if the condition is met.\n  final BasePromptTemplate prompt;\n\n  /// A prompt for a language mode that is an LLM.\n  factory PromptCondition.isLlm(final BasePromptTemplate prompt) {\n    return PromptCondition(\n      condition: (final BaseLanguageModel llm) => llm is BaseLLM,\n      prompt: prompt,\n    );\n  }\n\n  /// A prompt for a language mode that is a chat model.\n  factory PromptCondition.isChatModel(final BasePromptTemplate prompt) {\n    return PromptCondition(\n      condition: (final BaseLanguageModel llm) => llm is BaseChatModel,\n      prompt: prompt,\n    );\n  }\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/prompts/prompts.dart",
    "content": "export 'base_chat_message_prompt.dart';\nexport 'base_chat_prompt.dart';\nexport 'base_prompt.dart';\nexport 'chat_prompt.dart';\nexport 'pipeline.dart';\nexport 'prompt.dart';\nexport 'prompt_selector.dart';\nexport 'template.dart' show TemplateValidationException;\nexport 'types.dart';\n"
  },
  {
    "path": "packages/langchain_core/lib/src/prompts/template.dart",
    "content": "import 'package:meta/meta.dart';\n\nimport '../exceptions/base.dart';\nimport 'base_chat_message_prompt.dart';\nimport 'chat_prompt.dart';\nimport 'prompt.dart';\nimport 'types.dart';\n\n/// Checks if the template is a valid [PromptTemplate].\n///\n/// Throws a [TemplateValidationException] if it is not.\nvoid checkValidPromptTemplate({\n  required final String template,\n  required final Set<String> inputVariables,\n  required final Iterable<String>? partialVariables,\n}) {\n  try {\n    // Check reversed keywords\n    if (inputVariables.contains('stop') ||\n        (partialVariables?.contains('stop') ?? false)) {\n      throw const TemplateValidationException(\n        message:\n            'Cannot have a variable named `stop`, as it is used internally.',\n      );\n    }\n    // Check overlapping\n    if (partialVariables != null &&\n        inputVariables\n            .toSet()\n            .intersection(partialVariables.toSet())\n            .isNotEmpty) {\n      throw const TemplateValidationException(\n        message: 'Cannot have overlapping between input and partial variables',\n      );\n    }\n    // Check variables in text\n    final allVariables = [...inputVariables, ...?partialVariables];\n    final variablesNodes = parseFStringTemplate(\n      template,\n    ).whereType<ParsedFStringVariableNode>().toSet();\n    if (variablesNodes.length != allVariables.length) {\n      throw TemplateValidationException(\n        message:\n            '${variablesNodes.length} variables found, '\n            'but ${inputVariables.length} expected.',\n      );\n    }\n    // Try to render\n    final dummyInputs = allVariables.fold(<String, Object>{}, (\n      final acc,\n      final v,\n    ) {\n      acc[v] = 'foo';\n      return acc;\n    });\n    renderTemplate(template: template, inputValues: dummyInputs);\n  } on TemplateValidationException {\n    rethrow;\n  } catch (e) {\n    throw TemplateValidationException(message: '$e');\n  }\n}\n\n/// Checks if the template is a valid [ChatPromptTemplate].\n///\n/// Throws a [TemplateValidationException] if it is not.\nvoid checkValidChatPromptTemplate({\n  required final List<ChatMessagePromptTemplate> promptMessages,\n  required final Set<String> inputVariables,\n  required final Iterable<String>? partialVariables,\n}) {\n  try {\n    final inputVariablesMessages = promptMessages\n        .map((final promptMessage) => promptMessage.inputVariables)\n        .expand((final element) => element)\n        .toSet();\n    final inputVariablesInstance = inputVariables.toSet();\n    final inputVariablesDiff = inputVariablesMessages\n        .difference(inputVariablesInstance)\n        .union(inputVariablesInstance.difference(inputVariablesMessages));\n    if (inputVariablesDiff.isNotEmpty) {\n      throw TemplateValidationException(\n        message:\n            'Mismatch between input variables and prompt messages input '\n            'variables. Diff: $inputVariablesDiff',\n      );\n    }\n    final partialVariablesSet = {...?partialVariables};\n    final partialVariablesInstance = partialVariablesSet.toSet();\n    final partialVariablesDiff = partialVariablesSet\n        .difference(partialVariablesInstance)\n        .union(partialVariablesInstance.difference(partialVariablesSet));\n    if (partialVariablesDiff.isNotEmpty) {\n      throw TemplateValidationException(\n        message:\n            'Mismatch between partial variables and prompt messages input '\n            'variables. Diff: $partialVariablesDiff',\n      );\n    }\n    for (final promptMessage in promptMessages) {\n      promptMessage.prompt.validateTemplate();\n    }\n  } on TemplateValidationException {\n    rethrow;\n  } catch (e) {\n    throw TemplateValidationException(message: '$e');\n  }\n}\n\n/// {@template template_validation_exception}\n/// Exception thrown when a template validation fails.\n/// {@endtemplate}\nfinal class TemplateValidationException extends LangChainException {\n  /// {@macro template_validation_exception}\n  const TemplateValidationException({super.message = ''})\n    : super(code: 'template_validation');\n}\n\n/// Renders a template with the given values.\nString renderTemplate({\n  required final String template,\n  required final InputValues inputValues,\n}) {\n  return renderFStringTemplate(template, inputValues);\n}\n\n/// Render a template in fString format.\nString renderFStringTemplate(\n  final String template,\n  final InputValues inputValues,\n) {\n  return parseFStringTemplate(template)\n      .map(\n        (final node) => switch (node) {\n          ParsedFStringLiteralNode(text: final t) => t,\n          ParsedFStringVariableNode(name: final n) =>\n            ArgumentError.checkNotNull(\n              inputValues[n],\n              'Missing value for variable ${node.name}',\n            ),\n        },\n      )\n      .join();\n}\n\n/// Parses a template in fString format.\nList<ParsedFStringNode> parseFStringTemplate(final String template) {\n  final chars = template.split('');\n  final List<ParsedFStringNode> nodes = [];\n\n  int nextBracket(final String bracket, final int start) {\n    for (var i = start; i < chars.length; i += 1) {\n      if (bracket.contains(chars[i])) {\n        return i;\n      }\n    }\n    return -1;\n  }\n\n  var i = 0;\n  while (i < chars.length) {\n    if (chars[i] == '{' && i + 1 < chars.length && chars[i + 1] == '{') {\n      nodes.add(const ParsedFStringLiteralNode(text: '{'));\n      i += 2;\n    } else if (chars[i] == '}' && i + 1 < chars.length && chars[i + 1] == '}') {\n      nodes.add(const ParsedFStringLiteralNode(text: '}'));\n      i += 2;\n    } else if (chars[i] == '{') {\n      final j = nextBracket('}', i);\n      if (j < 0) {\n        throw const TemplateValidationException(\n          message: \"Unclosed '{' in template.\",\n        );\n      }\n      nodes.add(\n        ParsedFStringVariableNode(name: chars.sublist(i + 1, j).join()),\n      );\n      i = j + 1;\n    } else if (chars[i] == '}') {\n      throw const TemplateValidationException(\n        message: \"Single '}' in template.\",\n      );\n    } else {\n      final next = nextBracket('{}', i);\n      final text = (next < 0 ? chars.sublist(i) : chars.sublist(i, next))\n          .join();\n      nodes.add(ParsedFStringLiteralNode(text: text));\n      i = next < 0 ? chars.length : next;\n    }\n  }\n  return nodes;\n}\n\n/// {@template parsed_f_string_node}\n/// A node in a parsed f-string template.\n/// {@endtemplate}\n@immutable\nsealed class ParsedFStringNode {\n  /// {@macro parsed_f_string_node}\n  const ParsedFStringNode();\n}\n\n/// {@template parsed_f_string_literal_node}\n/// A literal node in a parsed f-string template.\n/// {@endtemplate}\n@immutable\nclass ParsedFStringLiteralNode extends ParsedFStringNode {\n  /// {@macro parsed_f_string_literal_node}\n  const ParsedFStringLiteralNode({required this.text});\n\n  /// The text of the literal.\n  final String text;\n\n  @override\n  bool operator ==(covariant final ParsedFStringLiteralNode other) =>\n      identical(this, other) ||\n      runtimeType == other.runtimeType && text == other.text;\n\n  @override\n  int get hashCode => text.hashCode;\n}\n\n/// {@template parsed_f_string_variable_node}\n/// A variable node in a parsed f-string template.\n/// {@endtemplate}\n@immutable\nclass ParsedFStringVariableNode extends ParsedFStringNode {\n  /// {@macro parsed_f_string_variable_node}\n  const ParsedFStringVariableNode({required this.name});\n\n  /// The name of the variable.\n  final String name;\n\n  @override\n  bool operator ==(covariant final ParsedFStringVariableNode other) =>\n      identical(this, other) ||\n      runtimeType == other.runtimeType && name == other.name;\n\n  @override\n  int get hashCode => name.hashCode;\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/prompts/types.dart",
    "content": "import 'dart:math';\n\nimport 'package:collection/collection.dart';\nimport 'package:meta/meta.dart';\n\nimport '../chat_models/types.dart';\nimport '../chat_models/utils.dart';\nimport '../exceptions/base.dart';\n\n/// {@template prompt_value}\n/// Represents the input to an LLM or Chat model.\n///\n/// When working with an LLM, the [toString] method will be used.\n/// When working with a Chat model, the [toChatMessages] method will be used.\n/// {@endtemplate}\n@immutable\nsealed class PromptValue {\n  /// {@macro prompt_value}\n  const PromptValue();\n\n  /// Returns a string representing the prompt.\n  @override\n  String toString();\n\n  /// Returns a list of messages representing the prompt.\n  List<ChatMessage> toChatMessages();\n\n  /// Convert the prompt value to a map.\n  Map<String, dynamic> toMap() => {};\n\n  /// Convert the prompt value from a map.\n  factory PromptValue.fromMap(Map<String, dynamic> map) =>\n      switch (map['type']) {\n        'string' => StringPromptValue.fromMap(map),\n        'chat' => ChatPromptValue.fromMap(map),\n        null => throw ArgumentError('Prompt value type is null'),\n        _ => throw ArgumentError('Unknown prompt value type ${map['type']}'),\n      };\n\n  /// {@macro string_prompt_template}\n  factory PromptValue.string(final String value) {\n    return StringPromptValue(value);\n  }\n\n  /// {@macro chat_prompt_template}\n  factory PromptValue.chat(final List<ChatMessage> messages) {\n    return ChatPromptValue(messages);\n  }\n\n  /// Merges this prompt value with another by concatenating the content.\n  PromptValue concat(final PromptValue other);\n}\n\n/// {@template string_prompt_template}\n/// Prompt value whose value is a string.\n///\n/// When [toString] is called, it returns the string value.\n/// When [toChatMessages] is called, it returns a [HumanChatMessage] with the\n/// value as content.\n/// {@endtemplate}\n@immutable\nclass StringPromptValue implements PromptValue {\n  /// {@macro string_prompt_template}\n  const StringPromptValue(this.value);\n\n  /// String value to use as the prompt.\n  final String value;\n\n  /// Convert the prompt value to a map.\n  @override\n  Map<String, dynamic> toMap() => {'value': value, 'type': 'string'};\n\n  /// Convert the prompt value from a map.\n  factory StringPromptValue.fromMap(Map<String, dynamic> map) =>\n      StringPromptValue(map['value'] as String);\n\n  @override\n  String toString() {\n    return value;\n  }\n\n  @override\n  List<ChatMessage> toChatMessages() {\n    return [ChatMessage.humanText(value)];\n  }\n\n  @override\n  PromptValue concat(final PromptValue other) => switch (other) {\n    final StringPromptValue other => StringPromptValue(value + other.value),\n    final ChatPromptValue other => ChatPromptValue([\n      ChatMessage.humanText(value),\n      ...other.messages,\n    ]),\n  };\n\n  @override\n  bool operator ==(covariant final StringPromptValue other) =>\n      identical(this, other) ||\n      runtimeType == other.runtimeType && value == other.value;\n\n  @override\n  int get hashCode => value.hashCode;\n}\n\n/// {@template chat_prompt_value}\n/// Prompt value whose value is a list of messages.\n///\n/// When [toString] is called, it returns the string representation of the\n/// messages using the following format:\n/// ```txt\n/// System: <system message>\n/// Human: <human message>\n/// AI: <AI message>\n/// Human: <human message>\n/// AI: <AI message>\n/// ...\n/// ```\n/// When [toChatMessages] is called, it returns the list of messages.\n/// {@endtemplate}\n@immutable\nclass ChatPromptValue implements PromptValue {\n  /// {@macro chat_prompt_value}\n  const ChatPromptValue(this.messages);\n\n  /// List of messages to use as the prompt.\n  final List<ChatMessage> messages;\n\n  @override\n  String toString() {\n    return messages.toBufferString();\n  }\n\n  @override\n  List<ChatMessage> toChatMessages() {\n    return messages;\n  }\n\n  /// Convert the prompt value to a map.\n  @override\n  Map<String, dynamic> toMap() => {\n    'value': messages.map((message) => message.toMap()).toList(),\n    'type': 'chat',\n  };\n\n  /// Convert the prompt value from a map.\n  factory ChatPromptValue.fromMap(Map<String, dynamic> map) => ChatPromptValue(\n    (map['value'] as List<dynamic>)\n        .whereType<Map<String, dynamic>>()\n        .map(ChatMessage.fromMap)\n        .toList(),\n  );\n\n  @override\n  PromptValue concat(final PromptValue other) => switch (other) {\n    final StringPromptValue other => ChatPromptValue([\n      ...messages,\n      ChatMessage.humanText(other.value),\n    ]),\n    final ChatPromptValue other => ChatPromptValue(\n      List.generate(\n            max(messages.length, other.messages.length),\n            (index) => (\n              index < messages.length ? messages[index] : null,\n              index < other.messages.length ? other.messages[index] : null,\n            ),\n          )\n          .map((final pair) {\n            final (message, otherMessage) = pair;\n            if (message == null) {\n              return otherMessage;\n            } else if (otherMessage == null) {\n              return message;\n            } else {\n              return message.concat(otherMessage);\n            }\n          })\n          .nonNulls\n          .toList(growable: false),\n    ),\n  };\n\n  @override\n  bool operator ==(covariant final ChatPromptValue other) {\n    return identical(this, other) ||\n        runtimeType == other.runtimeType &&\n            const ListEquality<ChatMessage>().equals(messages, other.messages);\n  }\n\n  @override\n  int get hashCode => const ListEquality<ChatMessage>().hash(messages);\n}\n\n/// Input values used to format a prompt.\n/// The keys are the names of the variables in the prompt template\n/// and the values the actual values to use.\ntypedef InputValues = Map<String, dynamic>;\n\n/// Partial input values used to format a prompt.\n/// The keys are the names of the variables in the prompt template\n/// and the values the actual values to use.\ntypedef PartialValues = Map<String, dynamic>;\n\n/// {@template prompt_exception}\n/// Exception thrown when some error occurs while working with a prompt.\n/// {@endtemplate}\nfinal class PromptException extends LangChainException {\n  /// {@macro prompt_exception}\n  const PromptException({super.message = ''}) : super(code: 'prompt');\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/retrievers/base.dart",
    "content": "import '../documents/document.dart';\nimport '../runnables/runnable.dart';\nimport '../utils/reduce.dart';\nimport 'types.dart';\n\n/// {@template base_retriever}\n/// Base Index class. All indexes should extend this class.\n/// {@endtemplate}\nabstract class Retriever<Options extends RetrieverOptions>\n    extends Runnable<String, Options, List<Document>> {\n  /// {@macro base_retriever}\n  const Retriever({required super.defaultOptions});\n\n  /// Get the most relevant documents for a given query.\n  ///\n  /// - [input] - The query to search for.\n  /// - [options] - Retrieval options.\n  @override\n  Future<List<Document>> invoke(final String input, {final Options? options}) {\n    return getRelevantDocuments(input, options: options);\n  }\n\n  /// Streams the most relevant documents for the query resulting from\n  /// reducing the input stream.\n  ///\n  /// - [inputStream] - the input stream to reduce and use as the query.\n  /// - [options] - Retrieval options.\n  @override\n  Stream<List<Document>> streamFromInputStream(\n    final Stream<String> inputStream, {\n    final Options? options,\n  }) async* {\n    final input = await inputStream.toList();\n    final reduced = reduce<String>(input);\n    yield* stream(reduced, options: options);\n  }\n\n  /// Get the most relevant documents for a given query.\n  ///\n  /// - [query] - The query to search for.\n  /// - [options] - Retrieval options.\n  Future<List<Document>> getRelevantDocuments(\n    final String query, {\n    final Options? options,\n  });\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/retrievers/fake.dart",
    "content": "import '../documents/document.dart';\nimport 'base.dart';\nimport 'types.dart';\n\n/// {@template fake_retriever}\n/// A retriever that returns a fixed list of documents.\n/// This class is meant for testing purposes only.\n/// {@endtemplate}\nclass FakeRetriever extends Retriever<RetrieverOptions> {\n  /// {@macro fake_retriever}\n  const FakeRetriever(this.docs)\n    : super(defaultOptions: const RetrieverOptions());\n\n  /// The documents to return.\n  final List<Document> docs;\n\n  @override\n  Future<List<Document>> getRelevantDocuments(\n    final String query, {\n    final RetrieverOptions? options,\n  }) {\n    return Future.value(docs);\n  }\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/retrievers/retrievers.dart",
    "content": "export 'base.dart';\nexport 'fake.dart';\nexport 'types.dart';\nexport 'vector_store.dart';\n"
  },
  {
    "path": "packages/langchain_core/lib/src/retrievers/types.dart",
    "content": "import 'package:meta/meta.dart';\n\nimport '../langchain/types.dart';\nimport '../vector_stores/types.dart';\n\n/// {@template retriever_options}\n/// Base class for [Retriever] options.\n/// {@endtemplate}\n@immutable\nclass RetrieverOptions extends BaseLangChainOptions {\n  /// {@macro retriever_options}\n  const RetrieverOptions({super.concurrencyLimit});\n}\n\n/// {@template vector_store_retriever_options}\n/// Options for [VectorStoreRetriever].\n/// {@endtemplate}\nclass VectorStoreRetrieverOptions extends RetrieverOptions {\n  /// {@macro vector_store_retriever_options}\n  const VectorStoreRetrieverOptions({\n    this.searchType = const VectorStoreSimilaritySearch(),\n    super.concurrencyLimit,\n  });\n\n  /// The type of search to perform, either:\n  /// - [VectorStoreSearchType.similarity] (default)\n  /// - [VectorStoreSearchType.mmr]\n  final VectorStoreSearchType searchType;\n\n  @override\n  VectorStoreRetrieverOptions copyWith({\n    final VectorStoreSearchType? searchType,\n    final int? concurrencyLimit,\n  }) {\n    return VectorStoreRetrieverOptions(\n      searchType: searchType ?? this.searchType,\n      concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit,\n    );\n  }\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/retrievers/vector_store.dart",
    "content": "import '../documents/document.dart';\nimport '../vector_stores/base.dart';\nimport 'base.dart';\nimport 'types.dart';\n\n/// {@template vector_store_retriever}\n/// A retriever that uses a vector store to retrieve documents.\n/// {@endtemplate}\nclass VectorStoreRetriever<V extends VectorStore>\n    extends Retriever<VectorStoreRetrieverOptions> {\n  /// {@macro vector_store_retriever}\n  const VectorStoreRetriever({\n    required this.vectorStore,\n    super.defaultOptions = const VectorStoreRetrieverOptions(),\n  });\n\n  /// The vector store to retrieve documents from.\n  final V vectorStore;\n\n  @override\n  Future<List<Document>> getRelevantDocuments(\n    final String query, {\n    final VectorStoreRetrieverOptions? options,\n  }) {\n    return vectorStore.search(\n      query: query,\n      searchType: options?.searchType ?? defaultOptions.searchType,\n    );\n  }\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/runnables/binding.dart",
    "content": "import 'runnable.dart';\nimport 'types.dart';\n\n/// {@template runnable_binding}\n/// A [RunnableBinding] allows you to run a [Runnable] object with\n/// [CallOptions].\n///\n/// You can create a [RunnableBinding] using the [Runnable.bind] method.\n///\n/// When you call [invoke] on a [RunnableBinding], it will invoke the\n/// [Runnable] with the [CallOptions] passed to [bind].\n///\n/// Example: Attaching Stop Sequences\n/// ```dart\n/// final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n/// final model = ChatOpenAI(apiKey: openaiApiKey);\n///\n/// final promptTemplate = ChatPromptTemplate.fromTemplate(\n///   'Tell me a joke about {foo}',\n/// );\n///\n/// final chain = promptTemplate | model.bind(ChatOpenAIOptions(stop: ['\\n']));\n///\n/// final res = await chain.invoke({'foo': 'bears'});\n/// print(res);\n/// // ChatResult{\n/// //   generations: [\n/// //     ChatGeneration{\n/// //       output: AIChatMessage{\n/// //         content: Why don't bears wear shoes?,\n/// //       },\n/// //     },\n/// //   ],\n/// //   usage: ...,\n/// //   modelOutput: ...,\n/// // }\n/// ```\n/// {@endtemplate}\nclass RunnableBinding<\n  RunInput extends Object?,\n  CallOptions extends RunnableOptions,\n  RunOutput extends Object?\n>\n    extends Runnable<RunInput, CallOptions, RunOutput> {\n  /// {@macro runnable_binding}\n  const RunnableBinding({required this.bound, required this.options})\n    : super(defaultOptions: options);\n\n  /// The [Runnable] to bind.\n  final Runnable<RunInput, CallOptions, RunOutput> bound;\n\n  /// The [CallOptions] to bind the [Runnable] with.\n  final CallOptions options;\n\n  /// Invokes the [RunnableBinding] on the given [input].\n  ///\n  /// - [input] - the input to invoke the [RunnableBinding] on.\n  /// - [options] - the options to use when invoking the [RunnableBinding].\n  @override\n  Future<RunOutput> invoke(final RunInput input, {final CallOptions? options}) {\n    final finalOptions =\n        options?.merge(this.options) as CallOptions? ?? this.options;\n    return bound.invoke(input, options: finalOptions);\n  }\n\n  @override\n  Stream<RunOutput> stream(final RunInput input, {final CallOptions? options}) {\n    return bound.stream(input, options: options ?? this.options);\n  }\n\n  @override\n  void close() {\n    bound.close();\n  }\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/runnables/fallbacks.dart",
    "content": "import 'runnable.dart';\nimport 'types.dart';\n\n/// {@template runnable_with_fallback}\n/// A [Runnable] that can fallback to other [Runnable]s if it fails.\n///\n/// This class allows for the creation of a [Runnable] chain where a main\n/// [Runnable] is attempted first, and if it fails, a sequence of fallback\n/// [Runnable]s are tried in order. This process continues until one of the\n/// [Runnable]s succeeds or all of them fail, in which case an exception is\n/// thrown.\n///\n/// You can create a [RunnableWithFallback] using the [Runnable.withFallbacks]\n/// method.\n///\n/// Example:\n/// ```dart\n/// final mainChatModel = ChatOpenAI(...);\n/// final fallbackChatModel = ChatOpenAI(...);\n/// final chatModel = mainChatModel.withFallbacks([fallbackChatModel]);\n/// final res = await chatModel.invoke(...);\n/// ```\n/// {@endtemplate}\nclass RunnableWithFallback<RunInput extends Object?, RunOutput extends Object?>\n    extends Runnable<RunInput, RunnableOptions, RunOutput> {\n  /// {@macro runnable_fallback}\n  RunnableWithFallback({required this.mainRunnable, required this.fallbacks})\n    : super(defaultOptions: const RunnableOptions());\n\n  /// The Runnable to run first.\n  final Runnable<RunInput, RunnableOptions, RunOutput> mainRunnable;\n\n  /// A sequence of fallbacks to try if the [mainRunnable] fails.\n  final List<Runnable<RunInput, RunnableOptions, RunOutput>> fallbacks;\n\n  @override\n  Future<RunOutput> invoke(RunInput input, {RunnableOptions? options}) async {\n    Object? firstError;\n    for (final runnable in [mainRunnable, ...fallbacks]) {\n      try {\n        return await runnable.invoke(\n          input,\n          options: firstError == null\n              ? options\n              : runnable.getCompatibleOptions(options),\n        );\n      } catch (e) {\n        firstError ??= e;\n      }\n    }\n    throw Exception('All runnables failed. First error: $firstError');\n  }\n\n  @override\n  Future<List<RunOutput>> batch(\n    List<RunInput> inputs, {\n    List<RunnableOptions>? options,\n  }) async {\n    Object? firstError;\n    for (final runnable in [mainRunnable, ...fallbacks]) {\n      List<RunnableOptions>? currentOptions;\n      if (firstError == null) {\n        currentOptions = options;\n      } else {\n        final compatibleOptions = options\n            ?.map(runnable.getCompatibleOptions)\n            .toList(growable: false);\n        final hasNullOptions =\n            compatibleOptions?.any((o) => o == null) ?? false;\n        if (!hasNullOptions) {\n          currentOptions = compatibleOptions?.cast();\n        }\n      }\n\n      try {\n        return await runnable.batch(inputs, options: currentOptions);\n      } catch (e) {\n        firstError ??= e;\n      }\n    }\n    throw Exception('All runnables failed. First error: $firstError');\n  }\n\n  @override\n  Stream<RunOutput> stream(RunInput input, {RunnableOptions? options}) async* {\n    Object? firstError;\n    for (final runnable in [mainRunnable, ...fallbacks]) {\n      try {\n        final stream = runnable.stream(\n          input,\n          options: firstError == null\n              ? options\n              : runnable.getCompatibleOptions(options),\n        );\n        await for (final output in stream) {\n          yield output;\n        }\n        return;\n      } catch (e) {\n        firstError ??= e;\n      }\n    }\n    throw Exception('All runnables failed. First error: $firstError');\n  }\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/runnables/function.dart",
    "content": "// ignore_for_file: unsafe_variance\n\nimport 'dart:async';\n\nimport 'runnable.dart';\nimport 'types.dart';\n\n/// {@template runnable_function}\n/// A [RunnableFunction] allows you to run a Dart function as part of a chain.\n///\n/// You can create a [RunnableFunction] using the [Runnable.fromFunction]\n/// static method.\n///\n/// When you call [invoke] on a [RunnableFunction], it will invoke the\n/// function, passing the input to it. The output of the function is returned.\n///\n/// Example:\n/// ```dart\n/// final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n/// final model = ChatOpenAI(apiKey: openaiApiKey);\n///\n/// final promptTemplate = ChatPromptTemplate.fromTemplate(\n///   'How much is {a} + {b}?',\n/// );\n///\n/// final chain = Runnable.fromMap({\n///       'a': Runnable.fromFunction((\n///         final Map<String, String> input,\n///         final options,\n///       ) async {\n///         final foo = input['foo'] ?? '';\n///         return '${foo.length}';\n///       }),\n///       'b': Runnable.fromFunction((\n///         final Map<String, String> input,\n///         final options,\n///       ) async {\n///         final foo = input['foo'] ?? '';\n///         final bar = input['bar'] ?? '';\n///         return '${bar.length * foo.length}';\n///       }),\n///     }) |\n///     promptTemplate |\n///     model |\n///     StringOutputParser();\n///\n/// final res = await chain.invoke({'foo': 'foo', 'bar': 'bar'});\n/// print(res);\n/// // 3 + 9 = 12\n/// ```\n/// {@endtemplate}\nclass RunnableFunction<RunInput extends Object, RunOutput extends Object>\n    extends Runnable<RunInput, RunnableOptions, RunOutput> {\n  /// {@macro runnable_function}\n  const RunnableFunction({\n    final FutureOr<RunOutput> Function(\n      RunInput input,\n      RunnableOptions? options,\n    )?\n    invoke,\n    final Stream<RunOutput> Function(\n      Stream<RunInput> inputStream,\n      RunnableOptions? options,\n    )?\n    stream,\n    super.defaultOptions = const RunnableOptions(),\n  }) : _invokeFunc = invoke,\n       _streamFunc = stream,\n       assert(\n         invoke != null || stream != null,\n         'Either invoke or stream must be provided',\n       );\n\n  /// The function to run.\n  final FutureOr<RunOutput> Function(RunInput input, RunnableOptions? options)?\n  _invokeFunc;\n\n  /// The stream transformer to run.\n  final Stream<RunOutput> Function(\n    Stream<RunInput> inputStream,\n    RunnableOptions? options,\n  )?\n  _streamFunc;\n\n  /// Invokes the [RunnableFunction] on the given [input].\n  ///\n  /// - [input] - the input to invoke the [RunnableFunction] on.\n  /// - [options] - the options to use when invoking the [RunnableFunction].\n  @override\n  Future<RunOutput> invoke(\n    final RunInput input, {\n    final RunnableOptions? options,\n  }) async {\n    if (_invokeFunc != null) {\n      return _invokeFunc(input, options);\n    } else {\n      return stream(input, options: options).first;\n    }\n  }\n\n  /// Streams the [input] through the [RunnableFunction].\n  ///\n  /// - [input] - the input to stream through the [RunnableFunction].\n  /// - [options] - the options to use when streaming the [input].\n  @override\n  Stream<RunOutput> stream(\n    final RunInput input, {\n    final RunnableOptions? options,\n  }) {\n    return streamFromInputStream(Stream.value(input), options: options);\n  }\n\n  @override\n  Stream<RunOutput> streamFromInputStream(\n    final Stream<RunInput> inputStream, {\n    final RunnableOptions? options,\n  }) async* {\n    if (_streamFunc != null) {\n      yield* _streamFunc(inputStream, options);\n    } else {\n      yield* inputStream.asyncMap((final input) {\n        return invoke(input, options: options);\n      });\n    }\n  }\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/runnables/input_map.dart",
    "content": "// ignore_for_file: unsafe_variance\n\nimport 'dart:async';\n\nimport 'runnable.dart';\nimport 'types.dart';\n\n/// {@template runnable_map_input}\n/// A [RunnableMapInput] allows you to map the input to a different value.\n///\n/// You can create a [RunnableMapInput] using the [Runnable.mapInput] static\n/// method.\n///\n/// When you call [invoke] on a [RunnableMapInput], it will take the\n/// input it receives and returns the output returned by the given\n/// [inputMapper] function.\n///\n/// Example:\n///\n/// ```dart\n/// final agent = Agent.fromRunnable(\n///   Runnable.mapInput(\n///     (final AgentPlanInput planInput) => <String, dynamic>{\n///       'input': planInput.inputs['input'],\n///       'agent_scratchpad': buildScratchpad(planInput.intermediateSteps),\n///     },\n///   ).pipe(prompt).pipe(model).pipe(outputParser),\n///   tools: [tool],\n/// );\n/// ```\n/// {@endtemplate}\nclass RunnableMapInput<RunInput extends Object, RunOutput extends Object>\n    extends Runnable<RunInput, RunnableOptions, RunOutput> {\n  /// {@macro runnable_map_input}\n  const RunnableMapInput(this.inputMapper)\n    : super(defaultOptions: const RunnableOptions());\n\n  /// A function that maps [RunInput] to [RunOutput].\n  final FutureOr<RunOutput> Function(RunInput input) inputMapper;\n\n  /// Invokes the [RunnableMapInput] on the given [input].\n  ///\n  /// - [input] - the input to invoke the [RunnableMapInput] on.\n  /// - [options] - not used.\n  @override\n  Future<RunOutput> invoke(\n    final RunInput input, {\n    final RunnableOptions? options,\n  }) async {\n    return inputMapper(input);\n  }\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/runnables/input_stream_map.dart",
    "content": "// ignore_for_file: unsafe_variance\n\nimport 'dart:async';\n\nimport 'runnable.dart';\nimport 'types.dart';\n\n/// {@template runnable_map_input_stream}\n/// A [RunnableMapInputStream] allows you to map the input stream to a\n/// different stream of values.\n///\n/// You can create a [RunnableMapInputStream] using the [Runnable.mapInputStream]\n/// static method.\n///\n/// When you call [stream] on a [RunnableMapInputStream], it will take the\n/// input stream it receives and returns the output stream returned by the given\n/// [inputStreamMapper] function.\n///\n/// Example:\n/// ```dart\n/// final model = ChatOpenAI(\n///   apiKey: openAiApiKey,\n///   defaultOptions: ChatOpenAIOptions(\n///     responseFormat: ChatOpenAIResponseFormat(\n///       type: ChatOpenAIResponseFormatType.jsonObject,\n///     ),\n///   ),\n/// );\n/// final parser = JsonOutputParser<ChatResult>();\n/// final mapper = Runnable.mapInputStream((Stream<Map<String, dynamic>> inputStream) async* {\n///   yield await inputStream.last;\n/// });\n///\n/// final chain = model.pipe(parser).pipe(mapper);\n///\n/// final stream = chain.stream(\n///   PromptValue.string(\n///     'Output a list of the countries france, spain and japan and their '\n///         'populations in JSON format. Use a dict with an outer key of '\n///         '\"countries\" which contains a list of countries. '\n///         'Each country should have the key \"name\" and \"population\"',\n///   ),\n/// );\n/// await stream.forEach((final chunk) => print('$chunk|'));\n/// // {countries: [{name: France, population: 65273511}, {name: Spain, population: 46754778}, {name: Japan, population: 126476461}]}|\n/// ```\n/// {@endtemplate}\nclass RunnableMapInputStream<RunInput extends Object, RunOutput extends Object>\n    extends Runnable<RunInput, RunnableOptions, RunOutput> {\n  /// {@macro runnable_map_input_stream}\n  const RunnableMapInputStream(this.inputStreamMapper)\n    : super(defaultOptions: const RunnableOptions());\n\n  /// The stream transformer to run.\n  final Stream<RunOutput> Function(Stream<RunInput> inputStream)\n  inputStreamMapper;\n\n  /// Invokes the [RunnableMapInputStream] on the given [input].\n  ///\n  /// - [input] - the input to invoke the [RunnableMapInputStream] on.\n  /// - [options] - not used.\n  @override\n  Future<RunOutput> invoke(\n    final RunInput input, {\n    final RunnableOptions? options,\n  }) {\n    return streamFromInputStream(Stream.value(input), options: options).first;\n  }\n\n  /// Streams the [input] through the [RunnableMapInputStream].\n  ///\n  /// - [input] - the input to stream through the [RunnableMapInputStream].\n  /// - [options] - not used.\n  @override\n  Stream<RunOutput> stream(\n    final RunInput input, {\n    final RunnableOptions? options,\n  }) {\n    return streamFromInputStream(Stream.value(input), options: options);\n  }\n\n  /// Streams the [inputStream] through the [RunnableMapInputStream].\n  ///\n  /// - [inputStream] - the input stream to stream through the [RunnableMapInputStream].\n  /// - [options] - not used.\n  @override\n  Stream<RunOutput> streamFromInputStream(\n    final Stream<RunInput> inputStream, {\n    final RunnableOptions? options,\n  }) {\n    return inputStreamMapper(inputStream);\n  }\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/runnables/map.dart",
    "content": "import 'dart:async';\n\nimport 'package:async/async.dart' show StreamGroup;\nimport 'package:rxdart/subjects.dart' show ReplaySubject;\n\nimport 'runnable.dart';\nimport 'types.dart';\n\n/// {@template runnable_map}\n/// A [RunnableMap] allows you to run multiple [Runnable] objects in parallel\n/// on the same input returning a map of the results.\n///\n/// You can create a [RunnableMap] using the [Runnable.fromMap] static method.\n///\n/// When you call [invoke] on a [RunnableMap], it will invoke each [Runnable]\n/// in the map in parallel, passing the same input to each one. The output of\n/// each [Runnable] is returned in a map, where the keys are the names of the\n/// outputs.\n///\n/// Example:\n/// ```dart\n/// final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n/// final model = ChatOpenAI(apiKey: openaiApiKey);\n///\n/// final promptTemplate1 = ChatPromptTemplate.fromTemplate(\n///   'What is the city {person} is from?',\n/// );\n/// final promptTemplate2 = ChatPromptTemplate.fromTemplate(\n///   'How old is {person}?',\n/// );\n/// final promptTemplate3 = ChatPromptTemplate.fromTemplate(\n///   'Is {city} a good city for a {age} years old person?',\n/// );\n/// const stringOutputParser = StringOutputParser<ChatResult>();\n///\n/// final chain = Runnable.fromMap({\n///   'city': promptTemplate1 | model | stringOutputParser,\n///   'age': promptTemplate2 | model | stringOutputParser,\n/// }) | promptTemplate3 | model | stringOutputParser;\n///\n/// final res = await chain.invoke({'person': 'Elon Musk'});\n/// print(res);\n/// // It is subjective to determine whether Pretoria, South Africa, is a good\n/// // city for a 50-year-old person as it depends on individual preferences and needs.\n/// ```\n/// {@endtemplate}\nclass RunnableMap<RunInput extends Object>\n    extends Runnable<RunInput, RunnableOptions, Map<String, dynamic>> {\n  /// {@macro runnable_map}\n  const RunnableMap(this.steps)\n    : super(defaultOptions: const RunnableOptions());\n\n  /// The map of [Runnable] objects to run in parallel.\n  final Map<String, Runnable<RunInput, RunnableOptions, Object>> steps;\n\n  /// Invokes the [RunnableMap] on the given [input].\n  ///\n  /// - [input] - the input to invoke the [RunnableMap] on.\n  /// - [options] - the options to use when invoking the [RunnableMap].\n  @override\n  Future<Map<String, dynamic>> invoke(\n    final RunInput input, {\n    final RunnableOptions? options,\n  }) async {\n    final futures = steps.entries.map((entry) async {\n      final result = await entry.value.invoke(\n        input,\n        options: entry.value.getCompatibleOptions(options),\n      );\n      return MapEntry(entry.key, result);\n    });\n\n    final results = await Future.wait(futures);\n    return Map.fromEntries(results);\n  }\n\n  @override\n  Stream<Map<String, dynamic>> stream(\n    final RunInput input, {\n    final RunnableOptions? options,\n  }) {\n    return streamFromInputStream(Stream.value(input), options: options);\n  }\n\n  @override\n  Stream<Map<String, dynamic>> streamFromInputStream(\n    final Stream<RunInput> inputStream, {\n    final RunnableOptions? options,\n  }) {\n    final subject = ReplaySubject<RunInput>();\n    inputStream.listen(\n      subject.add,\n      onError: subject.addError,\n      onDone: subject.close,\n    );\n\n    return StreamGroup.merge(\n      steps.entries.map((final entry) {\n        return entry.value\n            .streamFromInputStream(\n              subject.stream,\n              options: entry.value.getCompatibleOptions(options),\n            )\n            .map((final output) => {entry.key: output});\n      }),\n    ).asBroadcastStream();\n  }\n\n  @override\n  void close() {\n    for (final step in steps.values) {\n      step.close();\n    }\n  }\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/runnables/passthrough.dart",
    "content": "import 'runnable.dart';\nimport 'types.dart';\n\n/// {@template runnable_passthrough}\n/// A [RunnablePassthrough] takes the input it receives and passes it through\n/// as output.\n///\n/// You can create a [RunnablePassthrough] using the [Runnable.passthrough]\n/// static method.\n///\n/// When you call [invoke] on a [RunnablePassthrough], it will return the input\n/// it receives.\n///\n/// Example:\n/// ```dart\n/// final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n/// final model = ChatOpenAI(apiKey: openaiApiKey);\n///\n/// final promptTemplate = ChatPromptTemplate.fromTemplate(\n///   'Tell me a joke about {foo}',\n/// );\n///\n/// final map = Runnable.fromMap({\n///   'foo': Runnable.passthrough(),\n/// });\n/// final chain = map | promptTemplate | model | StringOutputParser();\n///\n/// final res = await chain.invoke('bears');\n/// print(res);\n/// // Why don't bears wear shoes? Because they have bear feet!\n/// ```\n/// {@endtemplate}\nclass RunnablePassthrough<RunInput extends Object>\n    extends Runnable<RunInput, RunnableOptions, RunInput> {\n  /// {@macro runnable_passthrough}\n  const RunnablePassthrough() : super(defaultOptions: const RunnableOptions());\n\n  /// Invokes the [RunnablePassthrough] on the given [input].\n  ///\n  /// - [input] - the input to invoke the [RunnablePassthrough] on.\n  /// - [options] - not used.\n  @override\n  Future<RunInput> invoke(\n    final RunInput input, {\n    final RunnableOptions? options,\n  }) {\n    return Future.value(input);\n  }\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/runnables/retry.dart",
    "content": "import 'dart:async';\nimport '../utils/retry_client.dart';\nimport 'runnables.dart';\n\n/// {@template runnable_retry}\n/// A [Runnable] that automatically retries the operation if it fails.\n///\n/// You can create a [RunnableRetry] using [Runnable.withRetry], passing in the\n/// [RetryOptions].\n///\n/// When [invoke] or [batch] is called on the runnable, if the initial attempt\n/// fails, it will be retried according to the specified [RetryOptions].\n///\n/// Example usage:\n/// ```dart\n/// final model = ChatOpenAI(...);\n/// final modelWithRetry = model.withRetry(maxRetries: 2);\n/// final res = await modelWithRetry.invoke(...);\n/// ```\n/// {@endtemplate}\nclass RunnableRetry<RunInput extends Object?, RunOutput extends Object?>\n    extends Runnable<RunInput, RunnableOptions, RunOutput> {\n  /// {@macro runnable_retry}\n  RunnableRetry({\n    required this.runnable,\n    required super.defaultOptions,\n    required this.retryOptions,\n  });\n\n  /// Runnable that will be retried on error.\n  final Runnable<RunInput, RunnableOptions, RunOutput> runnable;\n\n  /// Options to retry the runnable.\n  final RetryOptions retryOptions;\n\n  @override\n  Future<RunOutput> invoke(RunInput input, {RunnableOptions? options}) async {\n    return retryClient(\n      options: retryOptions,\n      fn: () => runnable.invoke(input, options: options),\n    );\n  }\n\n  @override\n  Future<List<RunOutput>> batch(\n    List<RunInput> inputs, {\n    List<RunnableOptions>? options,\n  }) async {\n    return retryClient(\n      options: retryOptions,\n      fn: () => runnable.batch(inputs, options: options),\n    );\n  }\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/runnables/router.dart",
    "content": "// ignore_for_file: unsafe_variance\n\nimport 'dart:async';\n\nimport '../utils/reduce.dart';\nimport 'runnable.dart';\nimport 'types.dart';\n\n/// {@template runnable_router}\n/// A [RunnableRouter] takes the input it receives and routes it to the runnable\n/// returned by the [router] function.\n///\n/// You can create a [RunnableRouter] using the [Runnable.fromRouter] static\n/// method.\n///\n/// When you call [invoke] on a [RunnableRouter], it will invoke the [router]\n/// function, passing the input to it. Then, the returned runnable will be\n/// invoked with the input.\n///\n/// Example:\n/// ```dart\n/// final router = Runnable.fromRouter((Map<String, dynamic> input, _) {\n///   return switch(input['topic'] as String) {\n///     'langchain' => langchainChain,\n///     'anthropic' => anthropicChain,\n///     _ => generalChain,\n///   };\n/// });\n///\n/// final fullChain = Runnable.fromMap({\n///       'topic': classificationChain,\n///       'question': Runnable.getItemFromMap('question'),\n///     }).pipe(router);\n///\n/// final res2 = await fullChain.invoke({\n///   'question': 'how do I use Anthropic?',\n/// });\n/// print(res2);\n/// // As Dario Amodei told me, using Anthropic is a straightforward process that...\n/// ```\n/// {@endtemplate}\nclass RunnableRouter<RunInput extends Object, RunOutput extends Object>\n    extends Runnable<RunInput, RunnableOptions, RunOutput> {\n  /// {@macro runnable_router}\n  const RunnableRouter(this.router)\n    : super(defaultOptions: const RunnableOptions());\n\n  /// The function that will be called to determine the runnable to use.\n  final FutureOr<Runnable<RunInput, RunnableOptions, RunOutput>> Function(\n    RunInput input,\n    RunnableOptions? options,\n  )\n  router;\n\n  @override\n  Future<RunOutput> invoke(\n    final RunInput input, {\n    final RunnableOptions? options,\n  }) async {\n    final runnable = await router.call(input, options);\n    return runnable.invoke(input, options: options);\n  }\n\n  @override\n  Stream<RunOutput> stream(\n    final RunInput input, {\n    final RunnableOptions? options,\n  }) async* {\n    final runnable = await router.call(input, options);\n    yield* runnable.stream(input, options: options);\n  }\n\n  @override\n  Stream<RunOutput> streamFromInputStream(\n    final Stream<RunInput> inputStream, {\n    final RunnableOptions? options,\n  }) async* {\n    final input = await inputStream.toList();\n    final reduced = reduce<RunInput>(input);\n    yield* stream(reduced, options: options);\n  }\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/runnables/runnable.dart",
    "content": "import 'dart:async';\n\nimport '../../utils.dart';\nimport 'binding.dart';\nimport 'fallbacks.dart';\nimport 'function.dart';\nimport 'input_map.dart';\nimport 'input_stream_map.dart';\nimport 'map.dart';\nimport 'passthrough.dart';\nimport 'retry.dart';\nimport 'router.dart';\nimport 'sequence.dart';\nimport 'types.dart';\n\n/// {@template runnable}\n/// A Runnable is a generic unit of work that can be invoked, batched,\n/// streamed, and/or transformed. It is the basic building block of the\n/// LangChain Expression Language (LCEL).\n///\n/// It is implemented by most of the LangChain components (prompt templates,\n/// models, retrievers, output parsers, etc.) which makes it easy to define\n/// custom chains as well as making it possible to invoke them in a standard\n/// way.\n///\n/// The standard interface exposed includes:\n/// - [stream] stream back chunks of the response.\n/// - [invoke] call the chain on an input.\n/// - [batch] call the chain on a list of inputs.\n///\n/// There are also several useful primitives for working with runnables:\n/// - [pipe] allows you to chain runnables together (alternatively, you can use\n///  the `|` operator or the [fromList] static method.\n/// - [fromMap] allows you to run multiple runnables concurrently on the same\n///  input returning a map of the results.\n/// - [passthrough] takes the input it receives and passes it through as output.\n/// - [mapInput] allows you to map the input to a different value.\n/// - [mapInputStream] allows you to map the input stream to a different stream\n///  of values.\n/// - [getItemFromMap] allows you to get a value from the input.\n/// - [getMapFromInput] allows you to output a map with the given key and the\n///  input as value.\n/// - [fromFunction] allows you to run a Dart function as part of a chain.\n/// - [fromRouter] takes the input it receives and routes it to the runnable\n///  returned by the router function.\n/// - [bind] allows you to bind the runnable to a set of options.\n/// {@endtemplate}\nabstract class Runnable<\n  RunInput extends Object?,\n  CallOptions extends RunnableOptions,\n  RunOutput extends Object?\n> {\n  /// {@macro runnable}\n  const Runnable({required this.defaultOptions});\n\n  /// The default options to use when invoking the [Runnable].\n  ///\n  /// This can be overridden by passing options to the [invoke], [batch], or\n  /// [stream] methods.\n  final CallOptions defaultOptions;\n\n  /// Creates a [RunnableSequence] from a list of [Runnable] objects.\n  ///\n  /// A [RunnableSequence] allows you to run multiple [Runnable] objects\n  /// sequentially, passing the output of the previous [Runnable] to the next one.\n  ///\n  /// - [runnables] - the list of [Runnable] objects to run in sequence.\n  static Runnable fromList(final List<Runnable> runnables) {\n    return RunnableSequence.from(runnables);\n  }\n\n  /// Creates a [RunnableMap] from a map of [Runnable] objects.\n  ///\n  /// A [RunnableMap] allows you to run multiple [Runnable] objects in parallel\n  /// on the same input returning a map of the results.\n  ///\n  /// - [steps] - the map of [Runnable] objects to run in parallel.\n  static Runnable<RunInput, RunnableOptions, Map<String, dynamic>> fromMap<\n    RunInput extends Object\n  >(final Map<String, Runnable<RunInput, RunnableOptions, Object>> steps) {\n    return RunnableMap<RunInput>(steps);\n  }\n\n  /// Creates a [RunnableFunction] from a Dart function.\n  ///\n  /// A [RunnableFunction] allows you to run a Dart function as part of a chain.\n  ///\n  /// - [function] - the function to run.\n  static Runnable<RunInput, RunnableOptions, RunOutput>\n  fromFunction<RunInput extends Object, RunOutput extends Object>({\n    final FutureOr<RunOutput> Function(\n      RunInput input,\n      RunnableOptions? options,\n    )?\n    invoke,\n    final Stream<RunOutput> Function(\n      Stream<RunInput> inputStream,\n      RunnableOptions? options,\n    )?\n    stream,\n  }) {\n    return RunnableFunction<RunInput, RunOutput>(\n      invoke: invoke,\n      stream: stream,\n    );\n  }\n\n  /// Creates a [RunnableRouter] from a Dart function.\n  ///\n  /// A [RunnableRouter] takes the input it receives and routes it to the runnable\n  /// returned by the [router] function.\n  ///\n  /// - [router] - the function that will be called to determine the runnable to use.\n  static Runnable<RunInput, RunnableOptions, RunOutput>\n  fromRouter<RunInput extends Object, RunOutput extends Object>(\n    final FutureOr<Runnable<RunInput, RunnableOptions, RunOutput>> Function(\n      RunInput input,\n      RunnableOptions? options,\n    )\n    router,\n  ) {\n    return RunnableRouter<RunInput, RunOutput>(router);\n  }\n\n  /// Creates a [RunnablePassthrough].\n  ///\n  /// A [RunnablePassthrough] takes the input it receives and passes it through\n  /// as output.\n  static Runnable<RunInput, RunnableOptions, RunInput>\n  passthrough<RunInput extends Object>() {\n    return RunnablePassthrough<RunInput>();\n  }\n\n  /// Creates a [RunnableMapInput] from a function.\n  ///\n  /// A [RunnableMapInput] allows you to map the input to a different value.\n  ///\n  /// - [inputMapper] - a function that maps [RunInput] to [RunOutput].\n  static Runnable<RunInput, RunnableOptions, RunOutput> mapInput<\n    RunInput extends Object,\n    RunOutput extends Object\n  >(final FutureOr<RunOutput> Function(RunInput input) inputMapper) {\n    return RunnableMapInput<RunInput, RunOutput>(inputMapper);\n  }\n\n  /// Creates a [RunnableMapInputStream] from an asynchronous generator.\n  ///\n  /// A [RunnableMapInputStream] allows you to map the input stream to a\n  /// different stream of values.\n  ///\n  /// - [inputStreamMapper] - the stream transformer to run.\n  static Runnable<RunInput, RunnableOptions, RunOutput>\n  mapInputStream<RunInput extends Object, RunOutput extends Object>(\n    final Stream<RunOutput> Function(Stream<RunInput> inputStream)\n    inputStreamMapper,\n  ) {\n    return RunnableMapInputStream<RunInput, RunOutput>(inputStreamMapper);\n  }\n\n  /// Convenience method to return a value from an input map.\n  ///\n  /// - [key] - the key of the item to get from the input map.\n  static Runnable<Map<String, dynamic>, RunnableOptions, RunOutput>\n  getItemFromMap<RunOutput extends Object>(final String key) {\n    return Runnable.mapInput<Map<String, dynamic>, RunOutput>(\n      (input) => input[key],\n    );\n  }\n\n  /// Convenience method to return a map with the given key and the input\n  /// as value.\n  ///\n  /// - [key] - the key where to place the input in the output map.\n  static Runnable<RunInput, RunnableOptions, Map<String, dynamic>>\n  getMapFromInput<RunInput extends Object>([final String key = 'input']) {\n    return Runnable.mapInput<RunInput, Map<String, dynamic>>(\n      (input) => {key: input},\n    );\n  }\n\n  /// Invokes the [Runnable] on the given [input].\n  ///\n  /// - [input] - the input to invoke the [Runnable] on.\n  /// - [options] - the options to use when invoking the [Runnable].\n  Future<RunOutput> invoke(final RunInput input, {final CallOptions? options});\n\n  /// Batches the invocation of the [Runnable] on the given [inputs].\n  ///\n  /// If the underlying provider supports batching, this method will try to\n  /// batch the calls to the provider. Otherwise, it will just call [invoke] on\n  /// each input concurrently.\n  ///\n  /// You can configure the concurrency limit by setting the `concurrencyLimit`\n  /// field in the [options] parameter.\n  ///\n  /// - [inputs] - the inputs to invoke the [Runnable] on concurrently.\n  /// - [options] - the options to use when invoking the [Runnable]. It can be:\n  ///   * `null`: the default options are used.\n  ///   * List with 1 element: the same options are used for all inputs.\n  ///   * List with the same length as the inputs: each input gets its own options.\n  Future<List<RunOutput>> batch(\n    final List<RunInput> inputs, {\n    final List<CallOptions>? options,\n  }) async {\n    // By default, it just calls `.invoke` on each input con\n    // Subclasses should override this method if they support batching\n    assert(\n      options == null || options.length == 1 || options.length == inputs.length,\n    );\n\n    final finalOptions = options?.first ?? defaultOptions;\n    final concurrencyLimit = finalOptions.concurrencyLimit;\n\n    var index = 0;\n    final results = <RunOutput>[];\n    for (final chunk in chunkList(inputs, chunkSize: concurrencyLimit)) {\n      final chunkResults = await Future.wait(\n        chunk.map(\n          (final input) => invoke(\n            input,\n            options: options?.length == 1 ? options![0] : options?[index++],\n          ),\n        ),\n      );\n      results.addAll(chunkResults);\n    }\n    return results;\n  }\n\n  /// Streams the output of invoking the [Runnable] on the given [input].\n  ///\n  /// - [input] - the input to invoke the [Runnable] on.\n  /// - [options] - the options to use when invoking the [Runnable].\n  Stream<RunOutput> stream(\n    final RunInput input, {\n    final CallOptions? options,\n  }) async* {\n    // By default, it just emits the result of calling `.invoke`\n    // Subclasses should override this method if they support streaming output\n    yield await invoke(input, options: options);\n  }\n\n  /// Streams the output of invoking the [Runnable] on the given [inputStream].\n  ///\n  /// - [inputStream] - the input stream to invoke the [Runnable] on.\n  /// - [options] - the options to use when invoking the [Runnable].\n  Stream<RunOutput> streamFromInputStream(\n    final Stream<RunInput> inputStream, {\n    final CallOptions? options,\n  }) {\n    return inputStream.asyncExpand((final input) {\n      return stream(input, options: options);\n    });\n  }\n\n  /// Pipes the output of this [Runnable] into another [Runnable] using a\n  /// [RunnableSequence].\n  ///\n  /// A [RunnableSequence] allows you to run multiple [Runnable] objects\n  /// sequentially, passing the output of the previous [Runnable] to the next one.\n  ///\n  /// - [next] - the [Runnable] to pipe the output into.\n  RunnableSequence<RunInput, NewRunOutput> pipe<\n    NewRunOutput extends Object?,\n    NewCallOptions extends RunnableOptions\n  >(final Runnable<RunOutput, NewCallOptions, NewRunOutput> next) {\n    return RunnableSequence<RunInput, NewRunOutput>(first: this, last: next);\n  }\n\n  /// Binds the [Runnable] to the given [options].\n  ///\n  /// - [options] - the [CallOptions] to bind the [Runnable] with.\n  RunnableBinding<RunInput, CallOptions, RunOutput> bind(\n    final CallOptions options,\n  ) {\n    return RunnableBinding<RunInput, CallOptions, RunOutput>(\n      bound: this,\n      options: options,\n    );\n  }\n\n  /// Adds fallback runnables to be invoked if the primary runnable fails.\n  ///\n  /// This method creates a [RunnableWithFallback] instance that wraps the\n  /// current [Runnable]. If the initial invocation of the current [Runnable]\n  /// fails, the [fallbacks] runnables are attempted in the order they are\n  /// provided. This process continues until a runnable succeeds or all\n  /// fallbacks fail. The result of the first successful runnable is returned,\n  /// or an error is thrown if all runnables fail.\n  ///\n  /// - [fallbacks] - A list of [Runnable] instances to be used as fallbacks.\n  RunnableWithFallback<RunInput, RunOutput> withFallbacks(\n    List<Runnable<RunInput, RunnableOptions, RunOutput>> fallbacks,\n  ) {\n    return RunnableWithFallback<RunInput, RunOutput>(\n      mainRunnable: this,\n      fallbacks: fallbacks,\n    );\n  }\n\n  /// Adds retry logic to an existing runnable.\n  ///\n  /// This method create a [RunnableRetry] instance, if the current [Runnable]\n  /// throws an exception during invocation, it will be retried based on the\n  /// configuration provided. By default the runnable will be retried 3 times\n  /// with exponential delay between each retry.\n  ///\n  ///  - [maxRetries] - max attempts to retry the runnable.\n  ///  - [retryIf] - evaluator function to check whether to retry based the\n  ///    exception thrown.\n  ///  - [delayDurations] - by default runnable will be retried based on an\n  ///    exponential backoff strategy with base delay as 1 second. But you can\n  ///    override this behavior by providing an optional list of [Duration]s.\n  ///  - [addJitter] - whether to add jitter to the delay.\n  RunnableRetry<RunInput, RunOutput> withRetry({\n    final int maxRetries = 3,\n    final FutureOr<bool> Function(Object e)? retryIf,\n    final List<Duration?>? delayDurations,\n    final bool addJitter = false,\n  }) {\n    return RunnableRetry<RunInput, RunOutput>(\n      runnable: this,\n      defaultOptions: defaultOptions,\n      retryOptions: RetryOptions(\n        maxRetries: maxRetries,\n        retryIf: retryIf,\n        delayDurations: delayDurations,\n        addJitter: addJitter,\n      ),\n    );\n  }\n\n  /// Returns the given [options] if they are compatible with the [Runnable],\n  /// otherwise returns `null`.\n  CallOptions? getCompatibleOptions(final RunnableOptions? options) {\n    return options is CallOptions ? options : null;\n  }\n\n  /// Cleans up any resources associated with it the [Runnable].\n  ///\n  /// For example, if the [Runnable] uses a http client internally, it closes\n  /// it. If there is no resource to clean up, this method does nothing.\n  ///\n  /// Don't try to call the [Runnable] after calling this method.\n  void close() {\n    // Override this method if the Runnable needs to clean up resources\n  }\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/runnables/runnable_ext.dart",
    "content": "import 'runnable.dart';\nimport 'sequence.dart';\nimport 'types.dart';\n\n/// Extension methods for [Runnable]s.\nextension RunnableX<\n  RunInput extends Object,\n  CallOptions extends RunnableOptions,\n  RunOutput extends Object,\n  NewRunOutput extends Object\n>\n    on Runnable<RunInput, CallOptions, RunOutput> {\n  /// Pipes the output of this [Runnable] into another [Runnable].\n  ///\n  /// This is a convenience operator for [Runnable.pipe].\n  ///\n  /// Mind that this operator offers less type safety than [Runnable.pipe] as\n  /// Dart does not takes into account [NewRunOutput] type when resolving the\n  /// generic type parameters of the extension, it will always resolve\n  /// [NewRunOutput] to [Object] (see https://github.com/dart-lang/language/issues/1044).\n  ///\n  /// - [next] - the [Runnable] to pipe the output into.\n  RunnableSequence<RunInput, NewRunOutput> operator |(\n    final Runnable<RunOutput, RunnableOptions, NewRunOutput> next,\n  ) {\n    return pipe(next);\n  }\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/runnables/runnables.dart",
    "content": "export 'binding.dart';\nexport 'fallbacks.dart';\nexport 'function.dart';\nexport 'input_map.dart';\nexport 'input_stream_map.dart';\nexport 'map.dart';\nexport 'passthrough.dart';\nexport 'retry.dart';\nexport 'router.dart';\nexport 'runnable.dart';\nexport 'runnable_ext.dart';\nexport 'sequence.dart';\nexport 'types.dart';\n"
  },
  {
    "path": "packages/langchain_core/lib/src/runnables/sequence.dart",
    "content": "import 'runnable.dart';\nimport 'types.dart';\n\n/// {@template runnable_sequence}\n/// A [RunnableSequence] allows you to run multiple [Runnable] objects\n/// sequentially, passing the output of the previous [Runnable] to the next one.\n///\n/// You can create a [RunnableSequence] in several ways:\n///\n/// - Calling [Runnable.pipe] method which takes another [Runnable] as an\n/// argument. E.g.:\n///\n/// ```dart\n/// final chain = promptTemplate.pipe(chatModel);\n/// ```\n///\n/// - Using the `|` operator. This is a convenience method that calls\n/// [Runnable.pipe] under the hood (note that it offers less type safety than\n/// [Runnable.pipe] because of Dart limitations). E.g.:\n///\n/// ```dart\n/// final chain = promptTemplate | chatModel;\n/// ```\n///\n/// - Using the [Runnable.fromList] static method with a list of [Runnable],\n/// which will run in sequence when invoked. E.g.:\n///\n/// ```dart\n/// final chain = Runnable.fromList([promptTemplate, chatModel]);\n/// ```\n///\n/// When you call [invoke] on a [RunnableSequence], it will invoke each\n/// [Runnable] in the sequence in order, passing the output of the previous\n/// [Runnable] to the next one. The output of the last [Runnable] in the\n/// sequence is returned.\n///\n/// You can think of [RunnableSequence] as the replacement for\n/// [SequentialChain].\n///\n/// Example:\n/// ```dart\n/// final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n/// final model = ChatOpenAI(apiKey: openaiApiKey);\n///\n/// final promptTemplate = ChatPromptTemplate.fromTemplate(\n///   'Tell me a joke about {topic}',\n/// );\n///\n/// // The following three chains are equivalent:\n/// final chain1 = promptTemplate | model | StringOutputParser();\n/// final chain2 = promptTemplate.pipe(model).pipe(StringOutputParser());\n/// final chain3 = Runnable.fromList(\n///   [promptTemplate, model, StringOutputParser()],\n/// );\n///\n/// final res = await chain1.invoke({'topic': 'bears'});\n/// print(res);\n/// // Why don't bears wear shoes? Because they have bear feet!\n/// ```\n/// {@endtemplate}\nclass RunnableSequence<RunInput extends Object?, RunOutput extends Object?>\n    extends Runnable<RunInput, RunnableOptions, RunOutput> {\n  /// {@macro runnable_sequence}\n  const RunnableSequence({\n    required this.first,\n    this.middle = const [],\n    required this.last,\n  }) : super(defaultOptions: const RunnableOptions());\n\n  /// The first [Runnable] in the [RunnableSequence].\n  final Runnable<RunInput, RunnableOptions, Object?> first;\n\n  /// The middle [Runnable]s in the [RunnableSequence].\n  final List<Runnable> middle;\n\n  /// The last [Runnable] in the [RunnableSequence].\n  final Runnable<Object?, RunnableOptions, RunOutput> last;\n\n  /// Returns a list of all the [Runnable]s in the [RunnableSequence].\n  List<Runnable> get steps => [first, ...middle, last];\n\n  /// Creates a [RunnableSequence] from a list of [Runnable]s.\n  ///\n  /// - [runnables] - the [Runnable]s to create the [RunnableSequence] from.\n  static RunnableSequence from(final List<Runnable> runnables) {\n    return RunnableSequence(\n      first: runnables.first,\n      middle: runnables.sublist(1, runnables.length - 1),\n      last: runnables.last,\n    );\n  }\n\n  /// Invokes the [RunnableSequence] on the given [input].\n  ///\n  /// - [input] - the input to invoke the [RunnableSequence] on.\n  /// - [options] - the options to use when invoking the [RunnableSequence].\n  @override\n  Future<RunOutput> invoke(\n    final RunInput input, {\n    final RunnableOptions? options,\n  }) async {\n    Object? nextStepInput = input;\n\n    for (final step in [first, ...middle]) {\n      nextStepInput = await step.invoke(\n        nextStepInput,\n        options: step.getCompatibleOptions(options),\n      );\n    }\n\n    return last.invoke(\n      nextStepInput,\n      options: last.getCompatibleOptions(options),\n    );\n  }\n\n  @override\n  Stream<RunOutput> stream(\n    final RunInput input, {\n    final RunnableOptions? options,\n  }) {\n    return streamFromInputStream(\n      Stream.value(input).asBroadcastStream(),\n      options: options,\n    );\n  }\n\n  @override\n  Stream<RunOutput> streamFromInputStream(\n    final Stream<RunInput> inputStream, {\n    final RunnableOptions? options,\n  }) async* {\n    Stream<Object?> nextStepStream;\n    try {\n      nextStepStream = first.streamFromInputStream(\n        inputStream,\n        options: first.getCompatibleOptions(options),\n      );\n    } on TypeError catch (e) {\n      _throwInvalidInputTypeStream(e, first);\n    }\n\n    for (final step in middle) {\n      try {\n        nextStepStream = step.streamFromInputStream(\n          nextStepStream,\n          options: step.getCompatibleOptions(options),\n        );\n      } on TypeError catch (e) {\n        _throwInvalidInputTypeStream(e, step);\n      }\n    }\n\n    try {\n      yield* last.streamFromInputStream(\n        nextStepStream,\n        options: last.getCompatibleOptions(options),\n      );\n    } on TypeError catch (e) {\n      _throwInvalidInputTypeStream(e, last);\n    }\n  }\n\n  /// Pipes the output of this [RunnableSequence] into another [Runnable].\n  ///\n  /// - [next] - the [Runnable] to pipe the output into.\n  @override\n  RunnableSequence<RunInput, NewRunOutput> pipe<\n    NewRunOutput extends Object?,\n    NewCallOptions extends RunnableOptions\n  >(final Runnable<RunOutput, NewCallOptions, NewRunOutput> next) {\n    if (next is RunnableSequence<RunOutput, NewRunOutput>) {\n      final nextSeq = next as RunnableSequence<RunOutput, NewRunOutput>;\n      return RunnableSequence(\n        first: first,\n        middle: [...middle, last, nextSeq.first, ...nextSeq.middle],\n        last: nextSeq.last,\n      );\n    } else {\n      return RunnableSequence(\n        first: first,\n        middle: [...middle, last],\n        last: next,\n      );\n    }\n  }\n\n  /// Provides a better error message for type errors when streaming.\n  Never _throwInvalidInputTypeStream(\n    final TypeError e,\n    final Runnable runnable,\n  ) {\n    // TypeError: type '_BroadcastStream<X>' is not a subtype of type 'Stream<Y>' of 'inputStream'\n    final pattern = RegExp(\n      r'_(As)?BroadcastStream<(?<BroadcastType>[^>]+)>.*?Stream<(?<StreamType>[^>]+)>',\n    );\n    final error = e.toString();\n    final match = pattern.firstMatch(error);\n    final actualInputType = match?.namedGroup('BroadcastType') ?? 'Unknown';\n    final expectedInputType = match?.namedGroup('StreamType') ?? 'Unknown';\n    final errorMessage =\n        '''\n${runnable.runtimeType} runnable expects an input type of $expectedInputType, but received an instance of type $actualInputType instead.\n\nPlease ensure that the output of the previous runnable in the sequence matches the expected input type of the current runnable. \n    ''';\n    throw ArgumentError(errorMessage);\n  }\n\n  @override\n  void close() {\n    for (final step in steps) {\n      step.close();\n    }\n  }\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/runnables/types.dart",
    "content": "import 'package:meta/meta.dart';\n\n/// {@template runnable_options}\n/// Options to pass into a runnable.\n/// {@endtemplate}\n@immutable\nclass RunnableOptions {\n  /// {@macro runnable_options}\n  const RunnableOptions({this.concurrencyLimit = 1000});\n\n  /// The maximum number of concurrent calls that the runnable can make.\n  /// Defaults to 1000 (different Runnable types may have different defaults).\n  final int concurrencyLimit;\n\n  /// Creates a copy of this [RunnableOptions] with the given fields replaced\n  /// by the new values.\n  RunnableOptions copyWith({int? concurrencyLimit}) {\n    return RunnableOptions(\n      concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit,\n    );\n  }\n\n  /// Merges this [RunnableOptions] with another [RunnableOptions].\n  RunnableOptions merge(RunnableOptions? other) {\n    return copyWith(concurrencyLimit: other?.concurrencyLimit);\n  }\n\n  @override\n  bool operator ==(covariant final RunnableOptions other) {\n    return concurrencyLimit == other.concurrencyLimit;\n  }\n\n  @override\n  int get hashCode {\n    return concurrencyLimit.hashCode;\n  }\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/stores/base.dart",
    "content": "import 'dart:async';\n\n/// {@template base_store}\n/// Abstract interface for a key-value store.\n/// {@endtemplate}\nabstract interface class BaseStore<K, V> {\n  /// {@macro base_store}\n  const BaseStore();\n\n  /// Returns the values associated with the given keys.\n  ///\n  /// If a key is not found, the corresponding value will be null`.\n  Future<List<V?>> get(final List<K> keys);\n\n  /// Sets the given key-value pairs.\n  Future<void> set(final List<(K, V)> keyValuePairs);\n\n  /// Deletes the given keys.\n  Future<void> delete(final List<K> keys);\n\n  /// Returns a stream that emits all the keys that match the given prefix.\n  Stream<K> yieldKeys({final String? prefix});\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/stores/stores.dart",
    "content": "export 'base.dart';\n"
  },
  {
    "path": "packages/langchain_core/lib/src/tools/base.dart",
    "content": "// ignore_for_file: avoid_equals_and_hash_code_on_mutable_classes, avoid_implementing_value_types, unsafe_variance\nimport 'dart:async';\n\nimport 'package:collection/collection.dart';\nimport 'package:meta/meta.dart';\n\nimport '../langchain/base.dart';\nimport '../utils/reduce.dart';\nimport 'string.dart';\nimport 'types.dart';\n\n/// {@template tool_spec}\n/// The specification of a LangChain tool without the actual implementation.\n/// {@endtemplate}\nclass ToolSpec {\n  /// {@macro tool_spec}\n  const ToolSpec({\n    required this.name,\n    required this.description,\n    required this.inputJsonSchema,\n    this.strict = false,\n  });\n\n  /// The unique name of the tool that clearly communicates its purpose.\n  final String name;\n\n  /// Used to tell the model how/when/why to use the tool.\n  /// You can provide few-shot examples as a part of the description.\n  final String description;\n\n  /// Schema to parse and validate tool's input arguments.\n  /// Following the [JSON Schema specification](https://json-schema.org).\n  ///\n  /// Example:\n  /// ```json\n  /// {\n  ///   'type': 'object',\n  ///   'properties': {\n  ///     'answer': {\n  ///       'type': 'string',\n  ///       'description': 'The answer to the question being asked',\n  ///     },\n  ///     'sources': {\n  ///       'type': 'array',\n  ///       'items': {'type': 'string'},\n  ///       'description': 'The sources used to answer the question',\n  ///     },\n  ///   },\n  ///   'required': ['answer', 'sources'],\n  /// },\n  /// ```\n  final Map<String, dynamic> inputJsonSchema;\n\n  /// Whether to enable strict schema adherence when generating the tool call.\n  /// If set to true, the model will follow the exact schema defined in the\n  /// [inputJsonSchema] field.\n  ///\n  /// This is only supported by some providers (e.g. OpenAI). Mind that when\n  /// enabled, only a subset of JSON Schema may be supported. Check out the\n  /// provider's tool calling documentation for more information.\n  final bool strict;\n\n  @override\n  bool operator ==(covariant final ToolSpec other) {\n    final mapEquals = const DeepCollectionEquality().equals;\n    return identical(this, other) ||\n        name == other.name &&\n            description == other.description &&\n            mapEquals(inputJsonSchema, other.inputJsonSchema) &&\n            strict == other.strict;\n  }\n\n  @override\n  int get hashCode =>\n      name.hashCode ^\n      description.hashCode ^\n      inputJsonSchema.hashCode ^\n      strict.hashCode;\n\n  @override\n  String toString() {\n    return '''\nToolSpec{\n  name: $name,\n  description: $description,\n  inputJsonSchema: $inputJsonSchema,\n  strict: $strict,\n}\n''';\n  }\n\n  /// Converts the tool spec to a JSON-serializable map.\n  Map<String, dynamic> toJson() {\n    return {\n      'name': name,\n      'description': description,\n      'inputJsonSchema': inputJsonSchema,\n      'strict': strict,\n    };\n  }\n}\n\n/// {@template tool}\n/// A LangChain tool.\n///\n/// The [Input] to the tool needs to be described by the [inputJsonSchema].\n///\n/// You can easily create a tool from a function using [Tool.fromFunction].\n///\n/// If you want to create a tool that accepts a single string input and returns\n/// a string output, you can use [StringTool] or [StringTool.fromFunction].\n/// {@endtemplate}\nabstract base class Tool<\n  Input extends Object,\n  Options extends ToolOptions,\n  Output extends Object\n>\n    extends BaseLangChain<Input, Options, Output>\n    implements ToolSpec {\n  /// {@macro tool}\n  Tool({\n    required this.name,\n    required this.description,\n    required this.inputJsonSchema,\n    this.strict = false,\n    this.returnDirect = false,\n    this.handleToolError,\n    final Options? defaultOptions,\n  }) : assert(name.isNotEmpty, 'Tool name cannot be empty.'),\n       assert(description.isNotEmpty, 'Tool description cannot be empty.'),\n       super(defaultOptions: defaultOptions ?? const ToolOptions() as Options);\n\n  @override\n  final String name;\n\n  @override\n  final String description;\n\n  @override\n  final Map<String, dynamic> inputJsonSchema;\n\n  @override\n  final bool strict;\n\n  /// Whether to return the tool's output directly.\n  /// Setting this to true means that after the tool is called,\n  /// the AgentExecutor will stop looping.\n  final bool returnDirect;\n\n  /// Handle the content of the [ToolException] thrown by the tool.\n  final Output Function(ToolException)? handleToolError;\n\n  /// Creates a [Tool] from a function.\n  ///\n  /// - [name] is the unique name of the tool that clearly communicates its\n  ///   purpose.\n  /// - [description] is used to tell the model how/when/why to use the tool.\n  ///   You can provide few-shot examples as a part of the description.\n  /// - [inputJsonSchema] is the schema to parse and validate tool's input.\n  /// - [strict] whether to enable strict schema adherence when generating the\n  ///   tool call (only supported by some providers).\n  /// - [func] is the function that will be called when the tool is run.\n  ///   arguments.\n  /// - [getInputFromJson] is a function that parses the input JSON to the\n  ///   tool's input type. By default, it assumes the input values is under\n  ///   the key 'input'. Define your own deserialization logic if the input\n  ///   is not a primitive type or is under a different key.\n  /// - [returnDirect] whether to return the tool's output directly.\n  ///   Setting this to true means that after the tool is called,\n  ///   the AgentExecutor will stop looping.\n  /// - [handleToolError] is a function that handles the content of the\n  ///   [ToolException] thrown by the tool.\n  static Tool fromFunction<Input extends Object, Output extends Object>({\n    required final String name,\n    required final String description,\n    required final Map<String, dynamic> inputJsonSchema,\n    final bool strict = false,\n    required final FutureOr<Output> Function(Input input) func,\n    Input Function(Map<String, dynamic> json)? getInputFromJson,\n    final bool returnDirect = false,\n    final Output Function(ToolException)? handleToolError,\n  }) {\n    return _ToolFunc<Input, Output>(\n      name: name,\n      description: description,\n      inputJsonSchema: inputJsonSchema,\n      strict: strict,\n      function: func,\n      getInputFromJson:\n          getInputFromJson ??\n          (json) {\n            if (json.containsKey('input')) {\n              return json['input'] as Input;\n            }\n            return json as Input;\n          },\n      returnDirect: returnDirect,\n      handleToolError: handleToolError,\n    );\n  }\n\n  /// Runs the tool.\n  ///\n  /// - [input] is the input to the tool.\n  /// - [options] is the options to pass to the tool.\n  @override\n  Future<Output> invoke(final Input input, {final Options? options}) async {\n    try {\n      return invokeInternal(input, options: options);\n    } on ToolException catch (e) {\n      if (handleToolError != null) {\n        return handleToolError!(e);\n      } else {\n        rethrow;\n      }\n    } catch (e) {\n      rethrow;\n    }\n  }\n\n  /// Actual implementation of [invoke] method logic.\n  @protected\n  Future<Output> invokeInternal(final Input input, {final Options? options});\n\n  /// Streams the tool's output for the input resulting from\n  /// reducing the input stream.\n  ///\n  /// - [inputStream] - the input stream to reduce and use as the input.\n  /// - [options] is the options to pass to the tool.\n  @override\n  Stream<Output> streamFromInputStream(\n    final Stream<Input> inputStream, {\n    final Options? options,\n  }) async* {\n    final input = await inputStream.toList();\n    final reduced = reduce<Input>(input);\n    yield* stream(reduced, options: options);\n  }\n\n  /// Parses the input JSON to the tool's input type.\n  Input getInputFromJson(final Map<String, dynamic> json);\n\n  @override\n  bool operator ==(covariant final ToolSpec other) {\n    final mapEquals = const DeepCollectionEquality().equals;\n    return identical(this, other) ||\n        name == other.name &&\n            description == other.description &&\n            mapEquals(inputJsonSchema, other.inputJsonSchema) &&\n            strict == other.strict;\n  }\n\n  @override\n  int get hashCode =>\n      name.hashCode ^\n      description.hashCode ^\n      inputJsonSchema.hashCode ^\n      strict.hashCode;\n\n  @override\n  Map<String, dynamic> toJson() {\n    return {\n      'name': name,\n      'description': description,\n      'inputJsonSchema': inputJsonSchema,\n      'strict': strict,\n    };\n  }\n}\n\n/// {@template tool_func}\n/// A tool that accepts a function as input.\n/// Used in [Tool.fromFunction].\n/// {@endtemplate}\nfinal class _ToolFunc<Input extends Object, Output extends Object>\n    extends Tool<Input, ToolOptions, Output> {\n  /// {@macro tool_func}\n  _ToolFunc({\n    required super.name,\n    required super.description,\n    required super.inputJsonSchema,\n    required super.strict,\n    required FutureOr<Output> Function(Input input) function,\n    required Input Function(Map<String, dynamic> json) getInputFromJson,\n    super.returnDirect = false,\n    super.handleToolError,\n    super.defaultOptions,\n  }) : _getInputFromJson = getInputFromJson,\n       _function = function;\n\n  /// The function to run when the tool is called.\n  final FutureOr<Output> Function(Input toolInput) _function;\n\n  /// The function to parse the input JSON to the tool's input type.\n  final Input Function(Map<String, dynamic> json) _getInputFromJson;\n\n  @override\n  Future<Output> invokeInternal(\n    final Input toolInput, {\n    final ToolOptions? options,\n  }) async {\n    return _function(toolInput);\n  }\n\n  @override\n  Input getInputFromJson(final Map<String, dynamic> json) {\n    return _getInputFromJson(json);\n  }\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/tools/fake.dart",
    "content": "import 'dart:async';\n\nimport 'string.dart';\nimport 'types.dart';\n\n/// {@template fake_tool}\n/// Fake tool for testing.\n/// It just returns the input string as is.\n/// {@endtemplate}\nfinal class FakeTool extends StringTool<ToolOptions> {\n  /// {@macro fake_tool}\n  FakeTool()\n    : super(\n        name: 'take',\n        description: 'Returns the input string as is.',\n        inputDescription: 'A string',\n      );\n\n  @override\n  Future<String> invokeInternal(\n    final String toolInput, {\n    final ToolOptions? options,\n  }) async {\n    try {\n      return toolInput;\n    } catch (e) {\n      return \"I don't know how to do that.\";\n    }\n  }\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/tools/string.dart",
    "content": "import 'dart:async';\n\nimport 'base.dart';\nimport 'types.dart';\n\n/// {@template string_tool}\n/// Base class for tools that accept a single string input and returns a\n/// string output.\n/// {@endtemplate}\nabstract base class StringTool<Options extends ToolOptions>\n    extends Tool<String, Options, String> {\n  /// {@macro string_tool}\n  StringTool({\n    required super.name,\n    required super.description,\n    final String inputDescription = 'The input to the tool',\n    super.strict = false,\n    super.returnDirect = false,\n    super.handleToolError,\n    super.defaultOptions,\n  }) : super(\n         inputJsonSchema: {\n           'type': 'object',\n           'properties': {\n             'input': {'type': 'string', 'description': inputDescription},\n           },\n           'required': ['input'],\n         },\n       );\n\n  /// Creates a [StringTool] from a function.\n  ///\n  /// - [name] is the unique name of the tool that clearly communicates its\n  ///   purpose.\n  /// - [description] is used to tell the model how/when/why to use the tool.\n  ///   You can provide few-shot examples as a part of the description.\n  /// - [strict] whether to enable strict schema adherence when generating the\n  ///   tool call (only supported by some providers).\n  /// - [func] is the function that will be called when the tool is run.\n  /// - [returnDirect] whether to return the tool's output directly.\n  ///   Setting this to true means that after the tool is called,\n  ///   the AgentExecutor will stop looping.\n  /// - [handleToolError] is a function that handles the content of the\n  ///   [ToolException] thrown by the tool.\n  static StringTool fromFunction<Options extends ToolOptions>({\n    required final String name,\n    required final String description,\n    final String inputDescription = 'The input to the tool',\n    final bool strict = false,\n    required final FutureOr<String> Function(String input) func,\n    final bool returnDirect = false,\n    final String Function(ToolException)? handleToolError,\n  }) {\n    return _StringToolFunc<Options>(\n      name: name,\n      description: description,\n      inputDescription: inputDescription,\n      strict: strict,\n      func: func,\n      returnDirect: returnDirect,\n      handleToolError: handleToolError,\n    );\n  }\n\n  /// Actual implementation of [invoke] method logic with string input.\n  @override\n  Future<String> invokeInternal(\n    final String toolInput, {\n    final Options? options,\n  });\n\n  @override\n  String getInputFromJson(final Map<String, dynamic> json) {\n    return json['input'] as String;\n  }\n}\n\n/// {@template string_tool_func}\n/// Implementation of [StringTool] that accepts a function as input.\n/// Used in [StringTool.fromFunction].\n/// {@endtemplate}\nfinal class _StringToolFunc<Options extends ToolOptions>\n    extends StringTool<Options> {\n  /// {@macro string_tool_func}\n  _StringToolFunc({\n    required super.name,\n    required super.description,\n    super.inputDescription,\n    required super.strict,\n    required FutureOr<String> Function(String) func,\n    super.returnDirect = false,\n    super.handleToolError,\n    super.defaultOptions,\n  }) : _func = func;\n\n  final FutureOr<String> Function(String input) _func;\n\n  @override\n  Future<String> invokeInternal(\n    final String toolInput, {\n    final Options? options,\n  }) async {\n    return _func(toolInput);\n  }\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/tools/tools.dart",
    "content": "export 'base.dart';\nexport 'fake.dart';\nexport 'string.dart';\nexport 'types.dart';\n"
  },
  {
    "path": "packages/langchain_core/lib/src/tools/types.dart",
    "content": "import '../exceptions/base.dart';\nimport '../langchain/types.dart';\n\n/// {@template tool_options}\n/// Generation options to pass into the Tool.\n/// {@endtemplate}\nclass ToolOptions extends BaseLangChainOptions {\n  /// {@macro tool_options}\n  const ToolOptions({super.concurrencyLimit});\n}\n\n/// {@template tool_exception}\n/// An exception that a tool throws when execution error occurs.\n///\n/// When this exception is thrown, the agent will not stop working, but will\n/// handle the exception according to the [BaseTool.handleToolError] variable\n/// of the tool, and the processing result will be returned to the agent as\n/// observation, and printed in red on the console.\n/// {@endtemplate}\nfinal class ToolException extends LangChainException {\n  /// {@macro tool_exception}\n  const ToolException({super.message = ''}) : super(code: 'tool');\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/utils/chunk.dart",
    "content": "import 'package:collection/collection.dart';\n\n/// Chunk a list into smaller list of a specified size.\nList<List<T>> chunkList<T>(final List<T> arr, {required final int chunkSize}) {\n  return List<List<T>>.generate(\n        (arr.length / chunkSize).ceil(),\n        (final int index) => [],\n      )\n      .mapIndexed((final chunkIndex, final chunk) {\n        final start = chunkIndex * chunkSize;\n        final end = (start + chunkSize > arr.length)\n            ? arr.length\n            : start + chunkSize;\n        return arr.getRange(start, end).toList(growable: false);\n      })\n      .toList(growable: false);\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/utils/reduce.dart",
    "content": "import '../chat_models/types.dart';\nimport '../documents/document.dart';\nimport '../language_models/types.dart';\nimport '../prompts/types.dart';\n\n/// Reduces a list of objects to a single object by concatenating them.\n///\n/// E.g.:\n/// - A list of strings will be concatenated.\n/// - A list of chat messages will be concatenated into a single chat message.\n/// - A list of language model results will be concatenated into a single language model result.\n/// - A list of documents will be concatenated into a single document.\n/// - A list of lists will be reduced to a single list containing a single reduced item.\n/// - A list of maps will be reduced to a single map containing the reduced items.\n/// - If a type is not recognized, the last item in the list will be returned.\nT reduce<T>(final Iterable<T> input) {\n  if (input.isEmpty) {\n    throw Exception('Cannot reduce an empty list');\n  } else if (input.length == 1) {\n    return input.first;\n  }\n\n  final first = input.first;\n  return switch (first) {\n        String() => input.cast<String>().join(),\n        ChatMessage() => input.cast<ChatMessage>().reduce(\n          (final a, final b) => a.concat(b),\n        ),\n        PromptValue() => input.cast<PromptValue>().reduce(\n          (final a, final b) => a.concat(b),\n        ),\n        LanguageModelResult() => input.cast<LanguageModelResult>().reduce(\n          (final a, final b) => a.concat(b),\n        ),\n        Document() => input.cast<Document>().reduce(\n          (final a, final b) => a.concat(b),\n        ),\n        Iterable<String>() => _reduceIterable(input.cast<Iterable<String>>()),\n        Iterable<Object>() => _reduceIterable(input.cast<Iterable<Object>>()),\n        Iterable<dynamic>() => _reduceIterable(input.cast<Iterable<dynamic>>()),\n        Map<String, String>() => _reduceMap(input.cast<Map<String, String>>()),\n        Map<String, Object>() => _reduceMap(input.cast<Map<String, Object>>()),\n        Map<String, dynamic>() => _reduceMap(\n          input.cast<Map<String, dynamic>>(),\n        ),\n        Map<dynamic, dynamic>() => _reduceMap(\n          input.cast<Map<dynamic, dynamic>>(),\n        ),\n        _ => input.last,\n      }\n      as T;\n}\n\nIterable<V> _reduceIterable<V>(final Iterable<Iterable<V>> input) {\n  return [reduce(input.expand((final e) => e).toList(growable: false))];\n}\n\nMap<K, V> _reduceMap<K, V>(final Iterable<Map<K, V>> input) {\n  return input.fold<Map<K, V>>({}, (final a, final b) {\n    final keys = a.keys.toSet()..addAll(b.keys);\n    return {\n      for (final key in keys)\n        key: reduce([\n          if (a[key] != null) a[key] as V,\n          if (b[key] != null) b[key] as V,\n        ]),\n    };\n  });\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/utils/retry_client.dart",
    "content": "import 'dart:async';\nimport 'dart:math';\n\n/// {@template retry_options}\n/// Options to pass into [retryClient] to control the retry behavior.\n/// {@endtemplate}\nclass RetryOptions {\n  /// {@macro retry_options}\n  RetryOptions({\n    required this.maxRetries,\n    required this.addJitter,\n    this.retryIf,\n    this.delayDurations,\n  });\n\n  /// The maximum number of attempts to retry.\n  final int maxRetries;\n\n  /// An evaluator function that can be used to decide if the function should\n  /// be retried based on the exception it throws.\n  ///\n  /// If you decide not to retry on a particular exception, [retryIf] can return\n  /// `false` and the retry won't happen. By default [retryIf] is `true` and\n  /// all exceptions are retried.\n  final FutureOr<bool> Function(Object e)? retryIf;\n\n  /// The function will be retried based on an exponential backoff strategy\n  /// with a base delay of 1 second.\n  ///\n  /// But you can override this behavior by providing an optional list of\n  /// [delayDurations]`. Each entry in the list corresponds to a specific\n  /// retry attempt, and the corresponding delay from the list will be used\n  /// instead of the default exponential delay.\n  ///\n  /// For example, if you provide a list of `[2, 4, 8]`, the delays between the\n  /// first three retries will be 2, 4, and 8 seconds, respectively.\n  final List<Duration?>? delayDurations;\n\n  /// Whether to add jitter to the exponential backoff.\n  ///\n  /// Jitter is a random value added to the delay to prevent multiple clients\n  /// from retrying at the same time.\n  final bool addJitter;\n}\n\n/// A client that handles retry logic for a given function.\n///\n/// This client takes [RetryOptions] and a function to execute. If the\n/// function fails, it will be retried according to the specified options.\n/// If it succeeds, the result of the function will be returned.\nFutureOr<T> retryClient<T>({\n  required RetryOptions options,\n  required FutureOr<T> Function() fn,\n}) async {\n  const defaultDelay = Duration(seconds: 1);\n\n  for (var attempt = 0; attempt < options.maxRetries; attempt++) {\n    try {\n      return await fn();\n    } catch (e) {\n      final isLastAttempt = attempt == options.maxRetries - 1;\n      final shouldRetry = await options.retryIf?.call(e) ?? true;\n\n      if (isLastAttempt || !shouldRetry) {\n        rethrow;\n      }\n\n      final duration =\n          options.delayDurations?[attempt] ?? defaultDelay * pow(2, attempt);\n      await _delay(duration, attempt, options.addJitter);\n    }\n  }\n\n  // This line should never be reached\n  throw StateError('Exhausted all retry attempts');\n}\n\nFuture<void> _delay(\n  final Duration duration,\n  final int attempt,\n  final bool addJitter,\n) async {\n  final Duration delay;\n  if (addJitter) {\n    final random = Random();\n    final jitter = random.nextInt(100);\n    delay = Duration(milliseconds: duration.inMilliseconds + jitter);\n  } else {\n    delay = duration;\n  }\n  await Future<void>.delayed(delay);\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/utils/similarity.dart",
    "content": "import 'dart:math';\n\n/// Measures the cosine of the angle between two vectors in a vector space.\n/// It ranges from -1 to 1, where 1 represents identical vectors, 0 represents\n/// orthogonal vectors, and -1 represents vectors that are diametrically\n/// opposed.\ndouble cosineSimilarity(final List<double> a, final List<double> b) {\n  double p = 0;\n  double p2 = 0;\n  double q2 = 0;\n  for (var i = 0; i < a.length; i++) {\n    p += a[i] * b[i];\n    p2 += a[i] * a[i];\n    q2 += b[i] * b[i];\n  }\n  return p / sqrt(p2 * q2);\n}\n\n/// Calculates the similarity between an embedding and a list of embeddings.\n///\n/// The similarity is calculated using the provided [similarityFunction].\n/// The default similarity function is [cosineSimilarity].\nList<double> calculateSimilarity(\n  final List<double> embedding,\n  final List<List<double>> embeddings, {\n  final double Function(List<double> a, List<double> b) similarityFunction =\n      cosineSimilarity,\n}) {\n  return embeddings\n      .map((final vector) => similarityFunction(vector, embedding))\n      .toList(growable: false);\n}\n\n/// Returns a sorted list of indexes of [embeddings] that are most similar to\n/// the provided [embedding] (in descending order, most similar first).\n///\n/// The similarity is calculated using the provided [similarityFunction].\n/// The default similarity function is [cosineSimilarity].\nList<int> getIndexesMostSimilarEmbeddings(\n  final List<double> embedding,\n  final List<List<double>> embeddings, {\n  final double Function(List<double> a, List<double> b) similarityFunction =\n      cosineSimilarity,\n}) {\n  final similarities = calculateSimilarity(\n    embedding,\n    embeddings,\n    similarityFunction: similarityFunction,\n  );\n  return List<int>.generate(embeddings.length, (i) => i)\n    ..sort((a, b) => similarities[b].compareTo(similarities[a]));\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/utils/utils.dart",
    "content": "export 'chunk.dart';\nexport 'reduce.dart';\nexport 'retry_client.dart';\nexport 'similarity.dart';\n"
  },
  {
    "path": "packages/langchain_core/lib/src/vector_stores/base.dart",
    "content": "// ignore_for_file: avoid_unused_constructor_parameters\nimport '../documents/document.dart';\nimport '../embeddings/base.dart';\nimport '../retrievers/types.dart';\nimport '../retrievers/vector_store.dart';\nimport 'types.dart';\n\n/// {@template vector_store}\n/// Interface for vector stores.\n/// {@endtemplate}\nabstract class VectorStore {\n  /// {@macro vector_store}\n  const VectorStore({required this.embeddings});\n\n  /// The embeddings model used to embed documents.\n  final Embeddings embeddings;\n\n  /// Runs more documents through the embeddings and add to the vector store.\n  ///\n  /// - [documents] is a list of documents to add to the vector store.\n  ///\n  /// Returns a list of ids from adding the documents into the vector store.\n  Future<List<String>> addDocuments({\n    required final List<Document> documents,\n  }) async {\n    return addVectors(\n      vectors: await embeddings.embedDocuments(documents),\n      documents: documents,\n    );\n  }\n\n  /// Runs more texts through the embeddings and add to the vector store.\n  ///\n  /// - [vectors] is a list of vectors to add to the vector store.\n  /// - [documents] is a list of documents to add to the vector store.\n  ///\n  /// Returns a list of ids from adding the vectors into the vector store.\n  Future<List<String>> addVectors({\n    required final List<List<double>> vectors,\n    required final List<Document> documents,\n  });\n\n  /// Delete by vector ID.\n  ///\n  /// - [ids] is a list of ids to delete.\n  Future<void> delete({required final List<String> ids});\n\n  /// Returns docs most similar to query using specified search type.\n  ///\n  /// - [query] is the query to search for.\n  /// - [searchType] is the type of search to perform, either\n  ///   [VectorStoreSearchType.similarity] (default) or\n  ///   [VectorStoreSearchType.mmr].\n  Future<List<Document>> search({\n    required final String query,\n    required final VectorStoreSearchType searchType,\n  }) {\n    return switch (searchType) {\n      final VectorStoreSimilaritySearch config => similaritySearch(\n        query: query,\n        config: config,\n      ),\n      final VectorStoreMMRSearch config => maxMarginalRelevanceSearch(\n        query: query,\n        config: config,\n      ),\n    };\n  }\n\n  /// Returns docs most similar to query using similarity.\n  ///\n  /// - [query] the query to search for.\n  /// - [config] the configuration for the search.\n  Future<List<Document>> similaritySearch({\n    required final String query,\n    final VectorStoreSimilaritySearch config =\n        const VectorStoreSimilaritySearch(),\n  }) async {\n    final docsWithScores = await similaritySearchWithScores(\n      query: query,\n      config: config,\n    );\n    return docsWithScores\n        .map((final docWithScore) => docWithScore.$1)\n        .toList(growable: false);\n  }\n\n  /// Returns docs most similar to embedding vector using similarity.\n  ///\n  /// - [embedding] is the embedding vector to look up documents similar to.\n  /// - [config] the configuration for the search.\n  Future<List<Document>> similaritySearchByVector({\n    required final List<double> embedding,\n    final VectorStoreSimilaritySearch config =\n        const VectorStoreSimilaritySearch(),\n  }) async {\n    final docsWithScores = await similaritySearchByVectorWithScores(\n      embedding: embedding,\n      config: config,\n    );\n    return docsWithScores\n        .map((final docWithScore) => docWithScore.$1)\n        .toList(growable: false);\n  }\n\n  /// Returns docs and relevance scores in the range `[0, 1]`.\n  /// 0 is dissimilar, 1 is most similar.\n  ///\n  /// - [query] is the query to search for.\n  /// - [config] the configuration for the search.\n  ///\n  /// Returns a list of tuples of documents and their similarity scores.\n  Future<List<(Document, double score)>> similaritySearchWithScores({\n    required final String query,\n    final VectorStoreSimilaritySearch config =\n        const VectorStoreSimilaritySearch(),\n  }) async {\n    return similaritySearchByVectorWithScores(\n      embedding: await embeddings.embedQuery(query),\n      config: config,\n    );\n  }\n\n  /// Returns docs and relevance scores in the range `[0, 1]`,\n  /// 0 is dissimilar, 1 is most similar.\n  ///\n  /// - [query] is the query to search for.\n  /// - [config] the configuration for the search.\n  ///\n  /// Returns a list of tuples of documents and their similarity scores.\n  Future<List<(Document, double scores)>> similaritySearchByVectorWithScores({\n    required final List<double> embedding,\n    final VectorStoreSimilaritySearch config =\n        const VectorStoreSimilaritySearch(),\n  });\n\n  /// Returns docs selected using the maximal marginal relevance algorithm (MMR)\n  /// for the given query.\n  ///\n  /// Maximal marginal relevance optimizes for similarity to query\n  /// AND diversity among selected documents.\n  ///\n  /// - [query] is the query to search for.\n  /// - [config] the configuration for the search.\n  Future<List<Document>> maxMarginalRelevanceSearch({\n    required final String query,\n    final VectorStoreMMRSearch config = const VectorStoreMMRSearch(),\n  }) async {\n    return maxMarginalRelevanceSearchByVector(\n      embedding: await embeddings.embedQuery(query),\n      config: config,\n    );\n  }\n\n  /// Returns docs selected using the maximal marginal relevance algorithm (MMR)\n  /// for the given embedding vector.\n  ///\n  /// Maximal marginal relevance optimizes for similarity to query\n  /// AND diversity among selected documents.\n  ///\n  /// - [embedding] is the embedding vector to look up documents similar to.\n  /// - [config] the configuration for the search.\n  List<Document> maxMarginalRelevanceSearchByVector({\n    required final List<double> embedding,\n    final VectorStoreMMRSearch config = const VectorStoreMMRSearch(),\n  }) {\n    throw UnimplementedError('MRR not supported for this vector store');\n  }\n\n  /// Returns a [VectorStoreRetriever] that uses this vector store.\n  ///\n  /// - [defaultOptions] are the default options for the retriever.\n  VectorStoreRetriever asRetriever({\n    final VectorStoreRetrieverOptions defaultOptions =\n        const VectorStoreRetrieverOptions(),\n  }) {\n    return VectorStoreRetriever(\n      vectorStore: this,\n      defaultOptions: defaultOptions,\n    );\n  }\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/vector_stores/types.dart",
    "content": "/// {@template vector_store_search_type}\n/// Vector store search type.\n///\n/// In general, we provide support two types of search:\n/// - Similarity search.\n/// - Maximal Marginal Relevance (MMR) search.\n///\n/// But it depends on the actual implementation of the vector store whether\n/// these are supported or not. Vector stores may also provide their own\n/// subclasses of this class to support additional configuration options.\n/// For example, [VertexAIMatchingEngine](https://pub.dev/documentation/langchain_google/latest/langchain_google/VertexAIMatchingEngine-class.html)\n/// provides `VertexAIMatchingEngineSimilaritySearch` which is a subclass of\n/// [VectorStoreSimilaritySearch]. Check the documentation of the vector store\n/// you are using for more information.\n/// {@endtemplate}\nsealed class VectorStoreSearchType {\n  /// {@macro vector_store_search_type}\n  const VectorStoreSearchType({required this.k, this.filter});\n\n  /// The number of documents to return.\n  final int k;\n\n  /// The filter to apply to the search.\n  final Map<String, dynamic>? filter;\n\n  /// Similarity search.\n  factory VectorStoreSearchType.similarity({\n    final int k = 4,\n    final Map<String, dynamic>? filter,\n    final double? scoreThreshold,\n  }) {\n    return VectorStoreSimilaritySearch(\n      k: k,\n      filter: filter,\n      scoreThreshold: scoreThreshold,\n    );\n  }\n\n  /// Maximal Marginal Relevance (MMR) search.\n  factory VectorStoreSearchType.mmr({\n    final int k = 4,\n    final Map<String, dynamic>? filter,\n    final int fetchK = 20,\n    final double lambdaMult = 0.5,\n  }) {\n    return VectorStoreMMRSearch(\n      k: k,\n      filter: filter,\n      fetchK: fetchK,\n      lambdaMult: lambdaMult,\n    );\n  }\n}\n\n/// {@template vector_store_similarity_search}\n/// Similarity search.\n/// Eg. using Cosine similarity.\n/// {@endtemplate}\nclass VectorStoreSimilaritySearch extends VectorStoreSearchType {\n  /// {@macro vector_store_similarity_search}\n  const VectorStoreSimilaritySearch({\n    super.k = 4,\n    super.filter,\n    this.scoreThreshold,\n  });\n\n  /// The minimum relevance score a document must have to be returned.\n  /// Range: `[0, 1]`.\n  final double? scoreThreshold;\n}\n\n/// {@template vector_store_mmr_search}\n/// Maximal Marginal Relevance (MMR) search .\n///\n/// Maximal marginal relevance optimizes for similarity to query\n/// AND diversity among selected documents.\n/// {@endtemplate}\nclass VectorStoreMMRSearch extends VectorStoreSearchType {\n  /// {@macro vector_store_mmr_search}\n  const VectorStoreMMRSearch({\n    super.k = 4,\n    super.filter,\n    this.fetchK = 20,\n    this.lambdaMult = 0.5,\n  });\n\n  /// The number of documents to pass to MMR algorithm.\n  final int fetchK;\n\n  /// Number between 0 and 1 that determines the degree of diversity among the\n  /// results with 0 corresponding to maximum diversity and 1 to minimum\n  /// diversity.\n  final double lambdaMult;\n}\n"
  },
  {
    "path": "packages/langchain_core/lib/src/vector_stores/vector_stores.dart",
    "content": "export 'base.dart';\nexport 'types.dart';\n"
  },
  {
    "path": "packages/langchain_core/lib/stores.dart",
    "content": "/// Contains core abstractions related to stores.\nlibrary;\n\nexport 'src/stores/stores.dart';\n"
  },
  {
    "path": "packages/langchain_core/lib/tools.dart",
    "content": "/// Contains core abstractions related to tools.\nlibrary;\n\nexport 'src/tools/tools.dart';\n"
  },
  {
    "path": "packages/langchain_core/lib/utils.dart",
    "content": "/// Contains core utilities.\nlibrary;\n\nexport 'src/utils/utils.dart';\n"
  },
  {
    "path": "packages/langchain_core/lib/vector_stores.dart",
    "content": "/// Contains core abstractions related to vector stores.\nlibrary;\n\nexport 'src/vector_stores/vector_stores.dart';\n"
  },
  {
    "path": "packages/langchain_core/pubspec.yaml",
    "content": "name: langchain_core\ndescription: Contains core abstractions of LangChain.dart and the LangChain Expression Language (LCEL).\nversion: 0.4.1\nrepository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_core\nissue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_core\nhomepage: https://github.com/davidmigloz/langchain_dart\ndocumentation: https://langchaindart.dev\n\nfunding:\n  - https://github.com/sponsors/davidmigloz\n\ntopics:\n  - nlp\n  - gen-ai\n  - llms\n  - langchain\n\nenvironment:\n  sdk: \">=3.9.0 <4.0.0\"\nresolution: workspace\n\ndependencies:\n  async: ^2.13.0\n  collection: ^1.19.1\n  cross_file: ^0.3.4+2\n  crypto: ^3.0.6\n  meta: ^1.16.0\n  rxdart: \">=0.27.7 <0.29.0\"\n\ndev_dependencies:\n  test: ^1.26.2\n"
  },
  {
    "path": "packages/langchain_core/test/chains/llm_chain_test.dart",
    "content": "// ignore_for_file: unnecessary_async\n\nimport 'package:langchain_core/chains.dart';\nimport 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/llms.dart';\nimport 'package:langchain_core/prompts.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('LLMChain tests', () {\n    test('Test LLMChain call', () async {\n      final model = FakeLLM(responses: ['Hello world!']);\n      final prompt = PromptTemplate.fromTemplate('Print {foo}');\n      final chain = LLMChain(prompt: prompt, llm: model);\n      final res = await chain.call({'foo': 'Hello world!'});\n      expect(res[LLMChain.defaultOutputKey], 'Hello world!');\n      expect(res['foo'], 'Hello world!');\n    });\n\n    test('Test LLMChain call single value', () async {\n      final model = FakeLLM(responses: ['Hello world!']);\n      final prompt = PromptTemplate.fromTemplate('Print {foo}');\n      final chain = LLMChain(prompt: prompt, llm: model);\n      final res = await chain.call('Hello world!');\n      expect(res[LLMChain.defaultOutputKey], 'Hello world!');\n      expect(res['foo'], 'Hello world!');\n    });\n\n    test('Test LLMChain call returnOnlyOutputs true', () async {\n      final model = FakeLLM(responses: ['Hello world! again!']);\n      final prompt = PromptTemplate.fromTemplate('Print {foo} {bar}');\n      final chain = LLMChain(prompt: prompt, llm: model);\n      final res = await chain.call({\n        'foo': 'Hello world!, ',\n        'bar': 'again!',\n      }, returnOnlyOutputs: true);\n      expect(res.length, 1);\n      expect(res[LLMChain.defaultOutputKey], 'Hello world! again!');\n    });\n\n    test('Test LLMChain outputKey', () async {\n      final model = FakeLLM(responses: ['Hello world! again!']);\n      final prompt = PromptTemplate.fromTemplate('Print {foo} {bar}');\n      final chain = LLMChain(prompt: prompt, llm: model, outputKey: 'xxx');\n      final res = await chain.call({\n        'foo': 'Hello world!, ',\n        'bar': 'again!',\n      }, returnOnlyOutputs: true);\n      expect(res.length, 1);\n      expect(res['xxx'], 'Hello world! again!');\n    });\n\n    test('Test LLMChain run single input value', () async {\n      final model = FakeLLM(responses: ['Hello world!']);\n      final prompt = PromptTemplate.fromTemplate('Print {foo}');\n      final chain = LLMChain(prompt: prompt, llm: model);\n      final res = await chain.run('Hello world!');\n      expect(res, 'Hello world!');\n    });\n\n    test('Test LLMChain run multiple input values', () async {\n      final model = FakeLLM(responses: ['Hello world! again!']);\n      final prompt = PromptTemplate.fromTemplate('Print {foo} {bar}');\n      final chain = LLMChain(prompt: prompt, llm: model);\n      final res = await chain.run({'foo': 'Hello world!, ', 'bar': 'again!'});\n      expect(res, 'Hello world! again!');\n    });\n\n    test('Test LLMChain throws error with less input values', () {\n      final model = FakeLLM(responses: ['Hello world! again!']);\n      final prompt = PromptTemplate.fromTemplate('Print {foo} {bar} {baz}');\n      final chain = LLMChain(prompt: prompt, llm: model);\n      expect(\n        () async => chain.run({'foo': 'Hello world!, ', 'bar': 'again!'}),\n        throwsArgumentError,\n      );\n    });\n\n    test('Test LLMChain throws error with wrong input values', () {\n      final model = FakeLLM(responses: ['Hello world! again!']);\n      final prompt = PromptTemplate.fromTemplate('Print {foo} {bar}');\n      final chain = LLMChain(prompt: prompt, llm: model);\n      expect(\n        () async => chain.run({'foo': 'Hello world!, ', 'sun': 'again!'}),\n        throwsArgumentError,\n      );\n    });\n\n    test('Test LLMChain with chat model', () async {\n      final model = FakeChatModel(responses: ['Hello world!']);\n      final prompt = PromptTemplate.fromTemplate('Print {foo}');\n      final chain = LLMChain(prompt: prompt, llm: model);\n      final res = await chain.call({'foo': 'Hello world!'});\n      expect(res[LLMChain.defaultOutputKey], ChatMessage.ai('Hello world!'));\n      expect(res['foo'], 'Hello world!');\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain_core/test/chat_models/fake_test.dart",
    "content": "import 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/prompts.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('FakeChatModel tests', () {\n    test('Test model returns given responses', () async {\n      final chatModel = FakeChatModel(responses: ['foo', 'bar']);\n      final res1 = await chatModel.invoke(\n        PromptValue.chat([ChatMessage.humanText('Hello')]),\n      );\n      expect(res1.outputAsString, 'foo');\n      final res2 = await chatModel.invoke(\n        PromptValue.chat([ChatMessage.humanText('World')]),\n      );\n      expect(res2.outputAsString, 'bar');\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain_core/test/chat_models/types.dart",
    "content": "import 'dart:convert';\n\nimport 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/prompts.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('ChatModel Types Serialization', () {\n    test('ChatPromptValue', () {\n      final testMessages = <ChatMessage>[\n        ChatMessage.system('This is a system prompt'),\n        ChatMessage.human(\n          ChatMessageContent.multiModal([\n            // Tests all message content parts\n            ChatMessageContent.text('Basic Text'),\n            ChatMessageContent.image(\n              data: 'not-real-image-data',\n              imageDetail: ChatMessageContentImageDetail.low,\n              mimeType: 'any/fake',\n            ),\n          ]),\n        ),\n        ChatMessage.ai(\n          'Assistant Response',\n          toolCalls: const [\n            AIChatMessageToolCall(\n              id: 'some-id',\n              name: 'some name',\n              argumentsRaw: '{\"a\": 1, \"b\": 2}',\n              arguments: {'a': 1, 'b': 2},\n            ),\n            AIChatMessageToolCall(\n              id: 'some-id2',\n              name: 'some name',\n              argumentsRaw: '{\"c\": 3, \"d\": 4}',\n              arguments: {'c': 3, 'd': 4},\n            ),\n          ],\n        ),\n        ChatMessage.tool(toolCallId: 'some-id2', content: 'Tool Call content'),\n        ChatMessage.custom('huh?', role: 'internal-monologue'),\n      ];\n      final promptValue = PromptValue.chat(testMessages);\n\n      // Basic encode/decode re-encode check\n      expect(\n        jsonEncode(PromptValue.fromMap(promptValue.toMap()).toMap()),\n        jsonEncode(promptValue.toMap()),\n      );\n    });\n\n    test('StringPromptValue', () {\n      final promptValue = PromptValue.string('This is a test prompt');\n\n      // Basic encode/decode re-encode check\n      expect(\n        jsonEncode(PromptValue.fromMap(promptValue.toMap()).toMap()),\n        jsonEncode(promptValue.toMap()),\n      );\n    });\n\n    test('ChatToolChoice', () {\n      // Test ChatToolChoiceNone\n      const noneChoice = ChatToolChoiceNone();\n      final noneMap = noneChoice.toMap();\n      final decodedNone = ChatToolChoice.fromMap(noneMap);\n      expect(decodedNone, isA<ChatToolChoiceNone>());\n\n      // Test ChatToolChoiceAuto\n      const autoChoice = ChatToolChoiceAuto();\n      final autoMap = autoChoice.toMap();\n      final decodedAuto = ChatToolChoice.fromMap(autoMap);\n      expect(decodedAuto, isA<ChatToolChoiceAuto>());\n\n      // Test ChatToolChoiceRequired\n      const requiredChoice = ChatToolChoiceRequired();\n      final requiredMap = requiredChoice.toMap();\n      final decodedRequired = ChatToolChoice.fromMap(requiredMap);\n      expect(decodedRequired, isA<ChatToolChoiceRequired>());\n\n      // Test ChatToolChoiceForced\n      const forcedChoice = ChatToolChoiceForced(name: 'testTool');\n      final forcedMap = forcedChoice.toMap();\n      final decodedForced = ChatToolChoice.fromMap(forcedMap);\n      expect(decodedForced, isA<ChatToolChoiceForced>());\n      expect((decodedForced as ChatToolChoiceForced).name, 'testTool');\n    });\n\n    test('ChatMessageContent', () {\n      // Test text content\n      final textContent = ChatMessageContent.text('Test text');\n      final textMap = textContent.toMap();\n      final decodedText = ChatMessageContent.fromMap(textMap);\n      expect(decodedText, isA<ChatMessageContentText>());\n      expect((decodedText as ChatMessageContentText).text, 'Test text');\n\n      // Test image content\n      final imageContent = ChatMessageContent.image(\n        data: 'image-data',\n        mimeType: 'image/png',\n        imageDetail: ChatMessageContentImageDetail.high,\n      );\n      final imageMap = imageContent.toMap();\n      final decodedImage = ChatMessageContent.fromMap(imageMap);\n      expect(decodedImage, isA<ChatMessageContentImage>());\n      expect((decodedImage as ChatMessageContentImage).data, 'image-data');\n      expect(decodedImage.mimeType, 'image/png');\n      expect(decodedImage.detail, ChatMessageContentImageDetail.high);\n\n      // Test multi-modal content\n      final multiModalContent = ChatMessageContent.multiModal([\n        ChatMessageContent.text('Text part'),\n        ChatMessageContent.image(data: 'image-part'),\n      ]);\n      final multiModalMap = multiModalContent.toMap();\n      final decodedMultiModal = ChatMessageContent.fromMap(multiModalMap);\n      expect(decodedMultiModal, isA<ChatMessageContentMultiModal>());\n      expect(\n        (decodedMultiModal as ChatMessageContentMultiModal).parts.length,\n        2,\n      );\n      expect(decodedMultiModal.parts[0], isA<ChatMessageContentText>());\n      expect(decodedMultiModal.parts[1], isA<ChatMessageContentImage>());\n    });\n\n    test('Error handling', () {\n      // Test missing type\n      expect(\n        () => PromptValue.fromMap(const <String, dynamic>{}),\n        throwsArgumentError,\n      );\n      expect(\n        () => ChatMessage.fromMap(const <String, dynamic>{}),\n        throwsArgumentError,\n      );\n      expect(\n        () => ChatMessageContent.fromMap(const <String, dynamic>{}),\n        throwsArgumentError,\n      );\n      expect(\n        () => ChatToolChoice.fromMap(const <String, dynamic>{}),\n        throwsArgumentError,\n      );\n\n      // Test unknown type\n      expect(\n        () => PromptValue.fromMap(const <String, dynamic>{'type': 'unknown'}),\n        throwsArgumentError,\n      );\n      expect(\n        () => ChatMessage.fromMap(const <String, dynamic>{'type': 'unknown'}),\n        throwsUnimplementedError,\n      );\n      expect(\n        () => ChatMessageContent.fromMap(const <String, dynamic>{\n          'type': 'unknown',\n        }),\n        throwsUnimplementedError,\n      );\n      expect(\n        () =>\n            ChatToolChoice.fromMap(const <String, dynamic>{'type': 'unknown'}),\n        throwsUnimplementedError,\n      );\n    });\n\n    test('Complete serialization flow', () {\n      // Use different tool choices\n      final toolChoices = [\n        ChatToolChoice.none,\n        ChatToolChoice.auto,\n        ChatToolChoice.required,\n        ChatToolChoice.forced(name: 'calculator'),\n      ];\n\n      // Serialize and deserialize each\n      for (final choice in toolChoices) {\n        final choiceMap = choice.toMap();\n        final deserializedChoice = ChatToolChoice.fromMap(choiceMap);\n\n        // Check type is preserved\n        expect(deserializedChoice.runtimeType, choice.runtimeType);\n\n        // For forced choice, check name is preserved\n        if (choice is ChatToolChoiceForced) {\n          expect(\n            (deserializedChoice as ChatToolChoiceForced).name,\n            'calculator',\n          );\n        }\n      }\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain_core/test/embeddings/fake.dart",
    "content": "import 'package:langchain_core/documents.dart';\nimport 'package:langchain_core/embeddings.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('FakeEmbeddings tests', () {\n    test('embedQuery: should return an embedding with correct size', () async {\n      final embeddings = FakeEmbeddings();\n      final actual = await embeddings.embedQuery('test');\n      expect(actual.length, equals(10));\n    });\n\n    test(\n      'Embeds a document with the same embedding vector for the same text',\n      () async {\n        final embeddings = FakeEmbeddings(size: 3);\n        const document1 = Document(pageContent: 'This is a document.');\n        const document2 = Document(pageContent: 'This is a document.');\n\n        final embedding1 = (await embeddings.embedDocuments([document1])).first;\n        final embedding2 = (await embeddings.embedDocuments([document2])).first;\n\n        expect(embedding1[0], embedding2[0]);\n        expect(embedding1[1], embedding2[1]);\n        expect(embedding1[2], embedding2[2]);\n      },\n    );\n\n    test(\n      'Embeds a query with the same embedding vector for the same text',\n      () async {\n        final embeddings = FakeEmbeddings(size: 3);\n        const query1 = 'This is a query.';\n        const query2 = 'This is a query.';\n\n        final embedding1 = await embeddings.embedQuery(query1);\n        final embedding2 = await embeddings.embedQuery(query2);\n\n        expect(embedding1[0], embedding2[0]);\n        expect(embedding1[1], embedding2[1]);\n        expect(embedding1[2], embedding2[2]);\n      },\n    );\n\n    test(\n      'Embeds a query with different embedding vector for different text',\n      () async {\n        final embeddings = FakeEmbeddings(size: 3);\n        const query1 = 'This is a query.';\n        const query2 = 'Totally different text.';\n\n        final embedding1 = await embeddings.embedQuery(query1);\n        final embedding2 = await embeddings.embedQuery(query2);\n\n        expect(embedding1[0], isNot(embedding2[0]));\n        expect(embedding1[1], isNot(embedding2[1]));\n        expect(embedding1[2], isNot(embedding2[2]));\n      },\n    );\n\n    test('If deterministic is false, embeddings are different', () async {\n      final embeddings = FakeEmbeddings(size: 3, deterministic: false);\n      const document1 = Document(pageContent: 'This is a document.');\n      const document2 = Document(pageContent: 'This is a document.');\n\n      final embedding1 = (await embeddings.embedDocuments([document1])).first;\n      final embedding2 = (await embeddings.embedDocuments([document2])).first;\n\n      expect(embedding1[0], isNot(embedding2[0]));\n      expect(embedding1[1], isNot(embedding2[1]));\n      expect(embedding1[2], isNot(embedding2[2]));\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain_core/test/output_parsers/json_test.dart",
    "content": "import 'package:langchain_core/output_parsers.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('JsonOutputParser tests', () {\n    test('Valid JSON input should be parsed correctly', () async {\n      final parser = JsonOutputParser<String>();\n      final result = await parser.invoke('{\"name\": \"John\", \"age\": 30}');\n      expect(result, equals({'name': 'John', 'age': 30}));\n    });\n\n    test('Whitespace in JSON input should be handled correctly', () async {\n      final parser = JsonOutputParser<String>();\n      final result = await parser.invoke('  {\"name\": \"John Doe\"}  ');\n      expect(result, equals({'name': 'John Doe'}));\n    });\n\n    test('Clearing the parser should reset the last result', () async {\n      final parser = JsonOutputParser<String>();\n      final result1 = await parser.invoke('{\"name\": \"John\", \"age\": 30}');\n      expect(result1, equals({'name': 'John', 'age': 30}));\n      final result2 = await parser.invoke('{\"name\": \"Ana\", \"age\": 40}');\n      expect(result2, equals({'name': 'Ana', 'age': 40}));\n    });\n\n    test('Valid JSON stream should be parsed correctly', () async {\n      final parser = JsonOutputParser<String>();\n      final inputStream = Stream.fromIterable([\n        '{\"name\": \"John\"',\n        ', \"age\": 30}',\n      ]);\n      final result = await parser.streamFromInputStream(inputStream).toList();\n      expect(\n        result,\n        equals([\n          {'name': 'John'},\n          {'name': 'John', 'age': 30},\n        ]),\n      );\n    });\n\n    test('Calling stream twice should parse the latest input', () async {\n      final parser = JsonOutputParser<String>();\n      final inputStream1 = Stream.fromIterable(['{\"name\": \"John\", \"age\": 30}']);\n      final result1 = await parser.streamFromInputStream(inputStream1).toList();\n      expect(\n        result1,\n        equals([\n          {'name': 'John', 'age': 30},\n        ]),\n      );\n      final inputStream2 = Stream.fromIterable(['{\"name\": \"Ana\", \"age\": 40}']);\n      final result2 = await parser.streamFromInputStream(inputStream2).toList();\n      expect(\n        result2,\n        equals([\n          {'name': 'Ana', 'age': 40},\n        ]),\n      );\n    });\n\n    test('Test reduceOutputStream', () async {\n      final parser = JsonOutputParser<String>(reduceOutputStream: true);\n      final inputStream = Stream.fromIterable([\n        '{\"name\": \"John\"',\n        ', \"age\": 30}',\n      ]);\n      final result = await parser.streamFromInputStream(inputStream).toList();\n      expect(\n        result,\n        equals([\n          {'name': 'John', 'age': 30},\n        ]),\n      );\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain_core/test/output_parsers/string_test.dart",
    "content": "// ignore_for_file: unused_element\nimport 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/language_models.dart';\nimport 'package:langchain_core/llms.dart';\nimport 'package:langchain_core/output_parsers.dart';\nimport 'package:langchain_core/prompts.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('StringOutputParser tests', () {\n    test('StringOutputParser from LLMResult', () async {\n      const result = LLMResult(\n        id: 'id',\n        output: 'Hello world!',\n        finishReason: FinishReason.stop,\n        metadata: {},\n        usage: LanguageModelUsage(),\n      );\n      final res = await const StringOutputParser().invoke(result);\n      expect(res, 'Hello world!');\n    });\n\n    test('StringOutputParser from ChatResult', () async {\n      const result = ChatResult(\n        id: 'id',\n        output: AIChatMessage(content: 'Hello world!'),\n        finishReason: FinishReason.stop,\n        metadata: {},\n        usage: LanguageModelUsage(),\n      );\n      final res = await const StringOutputParser().invoke(result);\n      expect(res, 'Hello world!');\n    });\n\n    test('Test reduceOutputStream', () async {\n      final chat = FakeChatModel(responses: ['ABC']);\n\n      final chain1 = chat.pipe(\n        const StringOutputParser(reduceOutputStream: false),\n      );\n      final chain2 = chat.pipe(\n        const StringOutputParser(reduceOutputStream: true),\n      );\n\n      final res1 = await chain1.stream(PromptValue.string('test')).toList();\n      final res2 = await chain2.stream(PromptValue.string('test')).toList();\n\n      expect(res1, ['A', 'B', 'C']);\n      expect(res2, ['ABC']);\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain_core/test/output_parsers/tools_test.dart",
    "content": "import 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/language_models.dart';\nimport 'package:langchain_core/output_parsers.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  const result = ChatResult(\n    id: 'id',\n    output: AIChatMessage(\n      content: '',\n      toolCalls: [\n        AIChatMessageToolCall(\n          id: 'id',\n          name: 'test',\n          argumentsRaw: '{\"foo\":\"bar\",\"bar\":\"foo\"}',\n          arguments: {'foo': 'bar', 'bar': 'foo'},\n        ),\n      ],\n    ),\n    finishReason: FinishReason.stop,\n    metadata: {},\n    usage: LanguageModelUsage(),\n  );\n\n  const streamingResult = [\n    ChatResult(\n      id: 'id',\n      output: AIChatMessage(\n        content: '',\n        toolCalls: [\n          AIChatMessageToolCall(\n            id: 'id',\n            name: 'test',\n            argumentsRaw: '{\"foo\":\"bar\"',\n            arguments: {},\n          ),\n        ],\n      ),\n      finishReason: FinishReason.stop,\n      metadata: {},\n      usage: LanguageModelUsage(),\n      streaming: true,\n    ),\n    ChatResult(\n      id: 'id',\n      output: AIChatMessage(\n        content: '',\n        toolCalls: [\n          AIChatMessageToolCall(\n            id: '',\n            name: '',\n            argumentsRaw: ', ',\n            arguments: {},\n          ),\n        ],\n      ),\n      finishReason: FinishReason.stop,\n      metadata: {},\n      usage: LanguageModelUsage(),\n      streaming: true,\n    ),\n    ChatResult(\n      id: 'id',\n      output: AIChatMessage(\n        content: '',\n        toolCalls: [\n          AIChatMessageToolCall(\n            id: '',\n            name: '',\n            argumentsRaw: '\"bar\":\"foo\"}',\n            arguments: {},\n          ),\n        ],\n      ),\n      finishReason: FinishReason.stop,\n      metadata: {},\n      usage: LanguageModelUsage(),\n      streaming: true,\n    ),\n  ];\n\n  group('ToolsOutputParser tests', () {\n    test('ToolsOutputParser invoke', () async {\n      final res = await ToolsOutputParser().invoke(result);\n      expect(res, const [\n        ParsedToolCall(\n          id: 'id',\n          name: 'test',\n          arguments: {'foo': 'bar', 'bar': 'foo'},\n        ),\n      ]);\n    });\n\n    test('ToolsOutputParser stream', () async {\n      final res = await ToolsOutputParser()\n          .streamFromInputStream(Stream.fromIterable(streamingResult))\n          .toList();\n      expect(res, const [\n        [\n          ParsedToolCall(id: 'id', name: 'test', arguments: {'foo': 'bar'}),\n        ],\n        [\n          ParsedToolCall(\n            id: 'id',\n            name: 'test',\n            arguments: {'foo': 'bar', 'bar': 'foo'},\n          ),\n        ],\n      ]);\n    });\n\n    test('ToolsOutputParser reduce input stream', () async {\n      final res = await ToolsOutputParser(\n        reduceOutputStream: true,\n      ).streamFromInputStream(Stream.fromIterable(streamingResult)).toList();\n      expect(res, const [\n        [\n          ParsedToolCall(\n            id: 'id',\n            name: 'test',\n            arguments: {'foo': 'bar', 'bar': 'foo'},\n          ),\n        ],\n      ]);\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain_core/test/output_parsers/utils_test.dart",
    "content": "import 'package:langchain_core/src/output_parsers/utils.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('parsePartialJson tests', () {\n    test('Valid JSON input should be parsed correctly', () {\n      const jsonString = '{\"name\": \"John\", \"age\": 30}';\n      final result = parsePartialJson(jsonString) as Map<String, dynamic>;\n      expect(result['name'], equals('John'));\n      expect(result['age'], equals(30));\n    });\n\n    test('Missing closing brace should be parsed correctly', () {\n      const jsonString = '{\"name\": \"John\", \"age\": 30';\n      final result = parsePartialJson(jsonString) as Map<String, dynamic>;\n      expect(result, equals({'name': 'John', 'age': 30}));\n    });\n\n    test('Missing value should return null', () {\n      const jsonString = '{\"name\": \"John\", \"age\":';\n      final result = parsePartialJson(jsonString);\n      expect(result, isNull);\n    });\n\n    test('Invalid JSON should return null', () {\n      const jsonString = '{\"name\": \"John, \"age\": 30}';\n      final result = parsePartialJson(jsonString);\n      expect(result, isNull);\n    });\n\n    test('should handle strings with whitespace', () {\n      const jsonString = '  {\"name\": \"John Doe\"}  ';\n      final result = parsePartialJson(jsonString) as Map<String, dynamic>;\n      expect(result['name'], equals('John Doe'));\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain_core/test/prompts/assets/prompt_file.txt",
    "content": "Question: {question}\nAnswer:"
  },
  {
    "path": "packages/langchain_core/test/prompts/chat_prompt_test.dart",
    "content": "// ignore_for_file: avoid_redundant_argument_values\nimport 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/prompts.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('ChatPromptTemplate tests', () {\n    test('Create a chat prompt template', () {\n      final prompt = ChatPromptTemplate.fromTemplate('hi {foo} {bar}');\n      expect(prompt.promptMessages, [\n        HumanChatMessagePromptTemplate.fromTemplate('hi {foo} {bar}'),\n      ]);\n    });\n\n    test('Test format', () {\n      final chatPrompt = _createChatPromptTemplate();\n      final messages = chatPrompt.formatPrompt({\n        'context': 'This is a context',\n        'foo': 'Foo',\n        'bar': 'Bar',\n      });\n      expect(messages.toChatMessages(), [\n        ChatMessage.system(\"Here's some context: This is a context\"),\n        ChatMessage.humanText(\n          \"Hello Foo, I'm Bar. Thanks for the This is a context\",\n        ),\n        ChatMessage.ai(\"I'm an AI. I'm Foo. I'm Bar.\"),\n        ChatMessage.custom(\n          \"I'm a generic message. I'm Foo. I'm Bar.\",\n          role: 'test',\n        ),\n      ]);\n    });\n\n    test('Test ChatPromptTemplate.fromTemplates', () {\n      final chatPrompt = ChatPromptTemplate.fromTemplates(const [\n        (ChatMessageType.system, \"Here's some context: {context}\"),\n        (\n          ChatMessageType.human,\n          \"Hello {foo}, I'm {bar}. Thanks for the {context}\",\n        ),\n        (ChatMessageType.ai, \"I'm an AI. I'm {foo}. I'm {bar}.\"),\n        (\n          ChatMessageType.custom,\n          \"I'm a generic message. I'm {foo}. I'm {bar}.\",\n        ),\n      ], customRole: 'test');\n      final messages = chatPrompt.formatPrompt({\n        'context': 'This is a context',\n        'foo': 'Foo',\n        'bar': 'Bar',\n      });\n      expect(messages.toChatMessages(), [\n        ChatMessage.system(\"Here's some context: This is a context\"),\n        ChatMessage.humanText(\n          \"Hello Foo, I'm Bar. Thanks for the This is a context\",\n        ),\n        ChatMessage.ai(\"I'm an AI. I'm Foo. I'm Bar.\"),\n        ChatMessage.custom(\n          \"I'm a generic message. I'm Foo. I'm Bar.\",\n          role: 'test',\n        ),\n      ]);\n    });\n\n    test(\n      'Test ChatPromptTemplate.fromPromptMessages with factory constructors',\n      () {\n        final chatPrompt = ChatPromptTemplate.fromPromptMessages([\n          ChatMessagePromptTemplate.system(\"Here's some context: {context}\"),\n          ChatMessagePromptTemplate.human(\n            \"Hello {foo}, I'm {bar}. Thanks for the {context}\",\n          ),\n          ChatMessagePromptTemplate.ai(\"I'm an AI. I'm {foo}. I'm {bar}.\"),\n          ChatMessagePromptTemplate.custom(\n            \"I'm a generic message. I'm {foo}. I'm {bar}.\",\n            role: 'test',\n          ),\n        ]);\n        final messages = chatPrompt.formatPrompt({\n          'context': 'This is a context',\n          'foo': 'Foo',\n          'bar': 'Bar',\n        });\n        expect(messages.toChatMessages(), [\n          ChatMessage.system(\"Here's some context: This is a context\"),\n          ChatMessage.humanText(\n            \"Hello Foo, I'm Bar. Thanks for the This is a context\",\n          ),\n          ChatMessage.ai(\"I'm an AI. I'm Foo. I'm Bar.\"),\n          ChatMessage.custom(\n            \"I'm a generic message. I'm Foo. I'm Bar.\",\n            role: 'test',\n          ),\n        ]);\n      },\n    );\n\n    test('Test format with invalid input variables', () {\n      const systemPrompt = PromptTemplate(\n        template: \"Here's some context: {context}\",\n        inputVariables: {'context'},\n      );\n      const userPrompt = PromptTemplate(\n        template: \"Hello {foo}, I'm {bar}\",\n        inputVariables: {'foo', 'bar'},\n      );\n      expect(\n        () => const ChatPromptTemplate(\n          promptMessages: [\n            SystemChatMessagePromptTemplate(prompt: systemPrompt),\n            HumanChatMessagePromptTemplate(prompt: userPrompt),\n          ],\n          inputVariables: {'context', 'foo', 'bar', 'baz'},\n        ).validateTemplate(),\n        throwsA(isA<TemplateValidationException>()),\n      );\n      expect(\n        () => const ChatPromptTemplate(\n          promptMessages: [\n            SystemChatMessagePromptTemplate(prompt: systemPrompt),\n            HumanChatMessagePromptTemplate(prompt: userPrompt),\n          ],\n          inputVariables: {'context', 'foo'},\n        ).validateTemplate(),\n        throwsA(isA<TemplateValidationException>()),\n      );\n    });\n\n    test('Create a chat prompt template with partials', () {\n      final prompt = ChatPromptTemplate.fromTemplate(\n        'hi {foo} {bar}',\n        partialVariables: const {'foo': 'jim'},\n      );\n      const expectedPrompt = PromptTemplate(\n        template: 'hi {foo} {bar}',\n        inputVariables: {'bar'},\n        partialVariables: {'foo': 'jim'},\n      );\n      expect(prompt.promptMessages.length, 1);\n      final outputPrompt = prompt.promptMessages[0];\n      expect(outputPrompt is HumanChatMessagePromptTemplate, true);\n      expect(\n        (outputPrompt as HumanChatMessagePromptTemplate).prompt,\n        expectedPrompt,\n      );\n    });\n\n    test('Test chat prompt can be partial', () {\n      const template = 'hi {foo} {bar}';\n      final prompt = ChatPromptTemplate.fromTemplate(template);\n      final expectedPrompt = ChatPromptTemplate(\n        inputVariables: const {'foo', 'bar'},\n        promptMessages: [HumanChatMessagePromptTemplate.fromTemplate(template)],\n      );\n      expect(prompt, expectedPrompt);\n\n      final partialPrompt1 = prompt.partial(const {'foo': 'jim'});\n      const expectedPrompt1 = ChatPromptTemplate(\n        inputVariables: {'bar'},\n        partialVariables: {'foo': 'jim'},\n        promptMessages: [\n          HumanChatMessagePromptTemplate(\n            prompt: PromptTemplate(\n              inputVariables: {'bar'},\n              partialVariables: {'foo': 'jim'},\n              template: template,\n            ),\n          ),\n        ],\n      );\n      expect(partialPrompt1, expectedPrompt1);\n\n      final partialPrompt2 = partialPrompt1.partial(const {'bar': 'morrison'});\n      const expectedPrompt2 = ChatPromptTemplate(\n        inputVariables: {},\n        partialVariables: {'foo': 'jim', 'bar': 'morrison'},\n        promptMessages: [\n          HumanChatMessagePromptTemplate(\n            prompt: PromptTemplate(\n              inputVariables: {},\n              partialVariables: {'foo': 'jim', 'bar': 'morrison'},\n              template: template,\n            ),\n          ),\n        ],\n      );\n      expect(partialPrompt2, expectedPrompt2);\n    });\n\n    test('Test using partial', () {\n      const userPrompt = PromptTemplate(\n        template: '{foo}{bar}',\n        inputVariables: {'foo', 'bar'},\n      );\n\n      const prompt = ChatPromptTemplate(\n        promptMessages: [HumanChatMessagePromptTemplate(prompt: userPrompt)],\n        inputVariables: {'foo', 'bar'},\n      );\n\n      final partialPrompt = prompt.partial({'foo': 'foo'});\n\n      expect(prompt.inputVariables, ['foo', 'bar']);\n      expect(partialPrompt.inputVariables, ['bar']);\n\n      expect(partialPrompt.format({'bar': 'baz'}), 'Human: foobaz');\n    });\n\n    test('Load chat prompt template from file', () async {\n      const templateFile = './test/prompts/assets/prompt_file.txt';\n      final expected = CustomChatMessagePromptTemplate.fromTemplate(\n        'Question: {question}\\nAnswer:',\n        role: 'human',\n      );\n      final actual = await CustomChatMessagePromptTemplate.fromTemplateFile(\n        templateFile,\n        role: 'human',\n      );\n      expect(expected, actual);\n    });\n\n    test('Test chat prompt template', () {\n      final promptTemplate = _createChatPromptTemplate();\n      final prompt = promptTemplate.formatPrompt({\n        'foo': 'foo',\n        'bar': 'bar',\n        'context': 'context',\n      });\n      expect(prompt is ChatPromptValue, true);\n      final messages = prompt.toChatMessages();\n      expect(messages.length, 4);\n      expect(messages[0].contentAsString, \"Here's some context: context\");\n      expect(\n        messages[1].contentAsString,\n        \"Hello foo, I'm bar. Thanks for the context\",\n      );\n      expect(messages[2].contentAsString, \"I'm an AI. I'm foo. I'm bar.\");\n      expect(\n        messages[3].contentAsString,\n        \"I'm a generic message. I'm foo. I'm bar.\",\n      );\n\n      final string = prompt.toString();\n      const expected =\n          \"System: Here's some context: context\\nHuman: Hello foo, \"\n          \"I'm bar. Thanks for the context\\nAI: I'm an AI. I'm foo. I'm bar.\\n\"\n          \"test: I'm a generic message. I'm foo. I'm bar.\";\n      expect(string, expected);\n\n      final string2 = promptTemplate.format({\n        'foo': 'foo',\n        'bar': 'bar',\n        'context': 'context',\n      });\n      expect(string2, expected);\n    });\n\n    test('Test creating a chat prompt template from messages', () {\n      final chatPromptTemplate = ChatPromptTemplate.fromPromptMessages(\n        _createMessages(),\n      );\n      expect(chatPromptTemplate.inputVariables, {'context', 'foo', 'bar'});\n      expect(chatPromptTemplate.promptMessages.length, 4);\n    });\n\n    test('Test fromPromptMessages', () {\n      const systemPrompt = PromptTemplate(\n        template: \"Here's some context: {context}\",\n        inputVariables: {'context'},\n      );\n      const userPrompt = PromptTemplate(\n        template: \"Hello {foo}, I'm {bar}\",\n        inputVariables: {'foo', 'bar'},\n      );\n      final chatPrompt = ChatPromptTemplate.fromPromptMessages(const [\n        SystemChatMessagePromptTemplate(prompt: systemPrompt),\n        HumanChatMessagePromptTemplate(prompt: userPrompt),\n      ]);\n      expect(chatPrompt.inputVariables, ['context', 'foo', 'bar']);\n      final messages = chatPrompt.formatPrompt({\n        'context': 'This is a context',\n        'foo': 'Foo',\n        'bar': 'Bar',\n      });\n      expect(messages.toChatMessages(), [\n        ChatMessage.system(\"Here's some context: This is a context\"),\n        ChatMessage.humanText(\"Hello Foo, I'm Bar\"),\n      ]);\n    });\n\n    test('Test SimpleMessagePromptTemplate', () {\n      const prompt = MessagesPlaceholder(variableName: 'foo');\n      final values = {\n        'foo': [ChatMessage.humanText(\"Hello Foo, I'm Bar\")],\n      };\n      final messages = prompt.formatMessages(values);\n      expect(messages, [ChatMessage.humanText(\"Hello Foo, I'm Bar\")]);\n    });\n\n    test('Test MessagesPlaceholder', () {\n      final chatPromptTemplate = ChatPromptTemplate.fromPromptMessages([\n        const MessagesPlaceholder(variableName: 'conversation'),\n        HumanChatMessagePromptTemplate.fromTemplate(\n          'Summarize our conversation so far in {word_count} words.',\n        ),\n      ]);\n\n      expect(chatPromptTemplate.inputVariables, {'conversation', 'word_count'});\n\n      final humanMessage = ChatMessage.humanText(\n        'What is the best way to learn programming?',\n      );\n      final aiMessage = ChatMessage.ai('''\n1. Choose a programming language: Decide on a programming language that you want to learn. \n\n2. Start with the basics: Familiarize yourself with the basic programming concepts such as variables, data types and control structures.\n\n3. Practice, practice, practice: The best way to learn programming is through hands-on experience\n        ''');\n\n      final promptValue = chatPromptTemplate.formatPrompt({\n        'conversation': [humanMessage, aiMessage],\n        'word_count': 10,\n      });\n\n      expect(promptValue is ChatPromptValue, true);\n      final chatPromptValue = promptValue as ChatPromptValue;\n\n      expect(chatPromptValue.messages.length, 3);\n      expect(chatPromptValue.messages[0], isA<HumanChatMessage>());\n      expect(\n        chatPromptValue.messages[0].contentAsString,\n        startsWith('What is the best way to learn programming?'),\n      );\n      expect(chatPromptValue.messages[1], isA<AIChatMessage>());\n      expect(\n        chatPromptValue.messages[1].contentAsString,\n        startsWith('1. Choose a programming language:'),\n      );\n      expect(chatPromptValue.messages[2], isA<HumanChatMessage>());\n      expect(\n        chatPromptValue.messages[2].contentAsString,\n        startsWith('Summarize our conversation so far in 10 words.'),\n      );\n    });\n  });\n}\n\nList<ChatMessagePromptTemplate> _createMessages() {\n  return [\n    SystemChatMessagePromptTemplate.fromTemplate(\n      \"Here's some context: {context}\",\n    ),\n    HumanChatMessagePromptTemplate.fromTemplate(\n      \"Hello {foo}, I'm {bar}. Thanks for the {context}\",\n    ),\n    AIChatMessagePromptTemplate.fromTemplate(\n      \"I'm an AI. I'm {foo}. I'm {bar}.\",\n    ),\n    CustomChatMessagePromptTemplate.fromTemplate(\n      \"I'm a generic message. I'm {foo}. I'm {bar}.\",\n      role: 'test',\n    ),\n  ];\n}\n\nChatPromptTemplate _createChatPromptTemplate() {\n  return ChatPromptTemplate.fromPromptMessages(_createMessages());\n}\n"
  },
  {
    "path": "packages/langchain_core/test/prompts/pipeline_test.dart",
    "content": "import 'package:langchain_core/prompts.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('PipelinePromptTemplate tests', () {\n    test('Test prompts can be constructed', () {\n      final promptA = PromptTemplate.fromTemplate('{foo}');\n      final promptB = PromptTemplate.fromTemplate('{bar}');\n      final pipelinePrompt = PipelinePromptTemplate(\n        finalPrompt: promptB,\n        pipelinePrompts: [('bar', promptA)],\n      );\n      expect(pipelinePrompt.inputVariables, ['foo']);\n    });\n\n    test('Simple pipeline', () {\n      final promptA = PromptTemplate.fromTemplate('{foo}');\n      final promptB = PromptTemplate.fromTemplate('{bar}');\n      final pipelinePrompt = PipelinePromptTemplate(\n        finalPrompt: promptB,\n        pipelinePrompts: [('bar', promptA)],\n      );\n      final output = pipelinePrompt.format({'foo': 'jim'});\n      expect(output, 'jim');\n    });\n\n    test('Multi variable pipeline', () {\n      final promptA = PromptTemplate.fromTemplate('{foo}');\n      final promptB = PromptTemplate.fromTemplate('okay {bar} {baz}');\n      final pipelinePrompt = PipelinePromptTemplate(\n        finalPrompt: promptB,\n        pipelinePrompts: [('bar', promptA)],\n      );\n      final output = pipelinePrompt.format({'foo': 'jim', 'baz': 'deep'});\n      expect(output, 'okay jim deep');\n    });\n\n    test('Partial with chat prompts', () {\n      const promptA = ChatPromptTemplate(\n        inputVariables: {'foo'},\n        promptMessages: [MessagesPlaceholder(variableName: 'foo')],\n      );\n      final promptB = ChatPromptTemplate.fromTemplate('jim {bar}');\n      final pipelinePrompt = PipelinePromptTemplate(\n        finalPrompt: promptA,\n        pipelinePrompts: [('foo', promptB)],\n      );\n      expect(pipelinePrompt.inputVariables, ['bar']);\n      final output = pipelinePrompt.formatPrompt({'bar': 'okay'});\n      expect(output.toChatMessages()[0].contentAsString, 'jim okay');\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain_core/test/prompts/prompt_test.dart",
    "content": "// ignore_for_file: avoid_redundant_argument_values\nimport 'package:langchain_core/prompts.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('PromptTemplate tests', () {\n    test('Test prompts can be constructed', () {\n      const template = 'This is a {foo} test.';\n      final inputVariables = {'foo'};\n      final prompt = PromptTemplate(\n        inputVariables: inputVariables,\n        template: template,\n      );\n      expect(prompt.template, template);\n      expect(prompt.inputVariables, inputVariables);\n    });\n\n    test(\n      'Test error is raised when input variables are using reserved names',\n      () {\n        const template = 'This is a {stop} test.';\n        const inputVariables = {'stop', ''};\n        expect(\n          () => const PromptTemplate(\n            inputVariables: inputVariables,\n            template: template,\n          ).validateTemplate(),\n          throwsA(isA<TemplateValidationException>()),\n        );\n      },\n    );\n\n    test('Test prompts can be constructed from a template', () {\n      // Single input variable.\n      const template1 = 'This is a {foo} test.';\n      final prompt1 = PromptTemplate.fromTemplate(template1);\n      const expectedPrompt1 = PromptTemplate(\n        template: template1,\n        inputVariables: {'foo'},\n      );\n      expect(prompt1, expectedPrompt1);\n\n      // Multiple input variables\n      const template2 = 'This {bar} is a {foo} test.';\n      final prompt2 = PromptTemplate.fromTemplate(template2);\n      const expectedPrompt2 = PromptTemplate(\n        template: template2,\n        inputVariables: {'bar', 'foo'},\n      );\n      expect(prompt2, expectedPrompt2);\n\n      // Multiple input variables with repeats\n      const template3 = 'This {bar} is a {foo} test {foo}.';\n      final prompt3 = PromptTemplate.fromTemplate(template3);\n      const expectedPrompt3 = PromptTemplate(\n        template: template3,\n        inputVariables: {'bar', 'foo'},\n      );\n      expect(prompt3, expectedPrompt3);\n    });\n\n    test('Create a prompt template with partials', () {\n      const template = 'This is a {foo} {bar} test.';\n      final prompt = PromptTemplate.fromTemplate(\n        template,\n        partialVariables: const {'foo': 'jim'},\n      );\n      const expectedPrompt = PromptTemplate(\n        template: template,\n        inputVariables: {'bar'},\n        partialVariables: {'foo': 'jim'},\n      );\n      expect(prompt, expectedPrompt);\n    });\n\n    test('Test error is raised when partial variables overlap', () {\n      expect(\n        () => const PromptTemplate(\n          inputVariables: {'foo', 'bar'},\n          partialVariables: {'foo': 'jim'},\n          template: 'This is a {foo} {bar} test.',\n        ).validateTemplate(),\n        throwsA(isA<TemplateValidationException>()),\n      );\n    });\n\n    test('Test error is raised when input variables are not provided', () {\n      const template = 'This is a {foo} test.';\n      const inputVariables = <String>{};\n      expect(\n        () => const PromptTemplate(\n          inputVariables: inputVariables,\n          template: template,\n        ).validateTemplate(),\n        throwsA(isA<TemplateValidationException>()),\n      );\n    });\n\n    test('Test error is raised when there are too many input variables', () {\n      const template = 'This is a {foo} test.';\n      const inputVariables = {'foo', 'bar'};\n      expect(\n        () => const PromptTemplate(\n          inputVariables: inputVariables,\n          template: template,\n        ).validateTemplate(),\n        throwsA(isA<TemplateValidationException>()),\n      );\n    });\n\n    test('Test error is raised when name of input variable is wrong', () {\n      const template = 'This is a {foo} test.';\n      const inputVariables = {'bar'};\n      expect(\n        () => const PromptTemplate(\n          inputVariables: inputVariables,\n          template: template,\n        ).validateTemplate(),\n        throwsA(isA<TemplateValidationException>()),\n      );\n    });\n\n    test('Test prompt can be successfully constructed from examples', () {\n      const template = '''\nTest Prompt:\n\nQuestion: who are you?\nAnswer: foo\n\nQuestion: what are you?\nAnswer: bar\n\nQuestion: {question}\nAnswer:''';\n      const inputVariables = {'question'};\n      const exampleSeparator = '\\n\\n';\n      const prefix = 'Test Prompt:';\n      const suffix = 'Question: {question}\\nAnswer:';\n      const examples = [\n        'Question: who are you?\\nAnswer: foo',\n        'Question: what are you?\\nAnswer: bar',\n      ];\n\n      final promptFromExamples = PromptTemplate.fromExamples(\n        examples: examples,\n        suffix: suffix,\n        inputVariables: inputVariables,\n        exampleSeparator: exampleSeparator,\n        prefix: prefix,\n      );\n      const promptFromTemplate = PromptTemplate(\n        template: template,\n        inputVariables: inputVariables,\n      );\n      expect(promptFromExamples, promptFromTemplate);\n    });\n\n    test('Test prompt can be successfully constructed from a file', () async {\n      const templateFile = './test/prompts/assets/prompt_file.txt';\n      const inputVariables = ['question'];\n      final prompt = await PromptTemplate.fromFile(templateFile);\n      expect(prompt.template, 'Question: {question}\\nAnswer:');\n      expect(prompt.inputVariables, inputVariables);\n    });\n\n    test('Test prompt can be initialized with partial variables', () {\n      const template = 'This is a {foo} test.';\n      const prompt = PromptTemplate(\n        inputVariables: {},\n        template: template,\n        partialVariables: {'foo': 1},\n      );\n      expect(prompt.template, template);\n      expect(prompt.inputVariables, <String>[]);\n      final result = prompt.format();\n      expect(result, 'This is a 1 test.');\n    });\n\n    test('Test prompt can be initialized with partial variables', () {\n      const template = 'This is a {foo} test.';\n      const prompt = PromptTemplate(\n        inputVariables: {},\n        template: template,\n        partialVariables: {'foo': 2},\n      );\n      expect(prompt.template, template);\n      expect(prompt.inputVariables, <String>[]);\n      final result = prompt.format();\n      expect(result, 'This is a 2 test.');\n    });\n\n    test('Test prompt can be partial', () {\n      const template = 'This is a {foo} test.';\n      const prompt = PromptTemplate(\n        inputVariables: {'foo'},\n        template: template,\n      );\n      expect(prompt.template, template);\n      expect(prompt.inputVariables, ['foo']);\n\n      final newPrompt = prompt.partial({'foo': '3'});\n      final newResult = newPrompt.format();\n      expect(newResult, 'This is a 3 test.');\n\n      final result = prompt.format({'foo': 'foo'});\n      expect(result, 'This is a foo test.');\n    });\n\n    test('Test another partial prompt', () {\n      const prompt = PromptTemplate(\n        template: '{foo}{bar}',\n        inputVariables: {'foo'},\n        partialVariables: {'bar': 'baz'},\n      );\n      expect(prompt.format({'foo': 'foo'}), 'foobaz');\n    });\n\n    test('Test using full partial', () {\n      const prompt = PromptTemplate(\n        template: '{foo}{bar}',\n        inputVariables: {},\n        partialVariables: {'bar': 'baz', 'foo': 'boo'},\n      );\n      expect(prompt.format({}), 'boobaz');\n    });\n\n    test('Test partial', () {\n      const prompt = PromptTemplate(\n        template: '{foo}{bar}',\n        inputVariables: {'foo', 'bar'},\n      );\n      expect(prompt.inputVariables, ['foo', 'bar']);\n      final partialPrompt = prompt.partial({'foo': 'foo'});\n      // original prompt is not modified\n      expect(prompt.inputVariables, ['foo', 'bar']);\n      // partial prompt has only remaining variables\n      expect(partialPrompt.inputVariables, ['bar']);\n      expect(partialPrompt.format({'bar': 'baz'}), 'foobaz');\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain_core/test/prompts/template_test.dart",
    "content": "import 'package:langchain_core/prompts.dart';\nimport 'package:langchain_core/src/prompts/template.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('Template tests', () {\n    test('Render renderFStringTemplate', () {\n      final scenarios = <(String, InputValues, String)>[\n        ('{foo}', {'foo': 'bar'}, 'bar'),\n        ('pre{foo}post', {'foo': 'bar'}, 'prebarpost'),\n        ('{{pre{foo}post}}', {'foo': 'bar'}, '{prebarpost}'),\n        ('text', {}, 'text'),\n        ('}}{{', {}, '}{'),\n        ('{first}_{second}', {'first': 'foo', 'second': 'bar'}, 'foo_bar'),\n      ];\n\n      for (final scenario in scenarios) {\n        final (template, inputValues, expected) = scenario;\n        final actual = renderFStringTemplate(template, inputValues);\n        expect(\n          actual,\n          expected,\n          reason: 'Template: $template | Input: $inputValues',\n        );\n      }\n    });\n\n    test('Invalid f-strings', () {\n      final scenarios = <(String, InputValues)>[\n        ('{', {}),\n        ('}', {}),\n        ('{foo', {}),\n        ('foo}', {}),\n      ];\n\n      for (final scenario in scenarios) {\n        final (template, inputValues) = scenario;\n        // Expect exception to be thrown\n        expect(\n          () => renderFStringTemplate(template, inputValues),\n          throwsA(isA<TemplateValidationException>()),\n          reason: 'Template: $template',\n        );\n      }\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain_core/test/runnables/batch_test.dart",
    "content": "// ignore_for_file: unused_element\nimport 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/documents.dart';\nimport 'package:langchain_core/language_models.dart';\nimport 'package:langchain_core/llms.dart';\nimport 'package:langchain_core/output_parsers.dart';\nimport 'package:langchain_core/prompts.dart';\nimport 'package:langchain_core/retrievers.dart';\nimport 'package:langchain_core/tools.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('Runnable batch tests', () {\n    test('PromptTemplate batch', () async {\n      final run = PromptTemplate.fromTemplate('This is a {input}');\n      final res = await run.batch([\n        {'input': 'test1'},\n        {'input': 'test2'},\n        {'input': 'test3'},\n      ]);\n      expect(\n        res.map((final e) => e.toString()).toList(),\n        equals(['This is a test1', 'This is a test2', 'This is a test3']),\n      );\n    });\n\n    test('ChatPromptTemplate batch', () async {\n      final run = ChatPromptTemplate.fromPromptMessages([\n        SystemChatMessagePromptTemplate.fromTemplate(\n          'You are a helpful chatbot',\n        ),\n        HumanChatMessagePromptTemplate.fromTemplate('{input}'),\n      ]);\n      final res = await run.batch([\n        {'input': 'test1'},\n        {'input': 'test2'},\n        {'input': 'test3'},\n      ]);\n      expect(\n        res.map((final e) => e.toChatMessages()).toList(),\n        equals([\n          [\n            ChatMessage.system('You are a helpful chatbot'),\n            ChatMessage.humanText('test1'),\n          ],\n          [\n            ChatMessage.system('You are a helpful chatbot'),\n            ChatMessage.humanText('test2'),\n          ],\n          [\n            ChatMessage.system('You are a helpful chatbot'),\n            ChatMessage.humanText('test3'),\n          ],\n        ]),\n      );\n    });\n\n    test('Retriever batch', () async {\n      const doc = Document(id: '1', pageContent: 'This is a test');\n      const run = FakeRetriever([doc]);\n      final res = await run.batch(['test1', 'test2', 'test3']);\n      expect(\n        res.map((final e) => e).toList(),\n        equals([\n          [doc],\n          [doc],\n          [doc],\n        ]),\n      );\n    });\n\n    test('LLM batch', () async {\n      final run = FakeLLM(responses: ['test1', 'test2', 'test3']);\n      final res = await run.batch([\n        PromptValue.string('test1'),\n        PromptValue.string('test2'),\n        PromptValue.string('test3'),\n      ]);\n      expect(\n        res.map((final e) => e.output).toList(),\n        equals(['test1', 'test2', 'test3']),\n      );\n    });\n\n    test('ChatModel batch', () async {\n      final run = FakeChatModel(responses: ['test1', 'test2', 'test3']);\n      final res = await run.batch([\n        PromptValue.string('test1'),\n        PromptValue.string('test2'),\n        PromptValue.string('test3'),\n      ]);\n      expect(\n        res.map((final e) => e.output.content).toList(),\n        equals(['test1', 'test2', 'test3']),\n      );\n    });\n\n    test('OutputParser batch', () async {\n      final results = List.generate(\n        3,\n        (final i) => LLMResult(\n          id: 'id$i',\n          output: 'Hello world! $i',\n          finishReason: FinishReason.stop,\n          metadata: const {},\n          usage: const LanguageModelUsage(),\n        ),\n      );\n      const run = StringOutputParser();\n      final res = await run.batch(results);\n      expect(\n        res,\n        equals(['Hello world! 0', 'Hello world! 1', 'Hello world! 2']),\n      );\n    });\n\n    test('Tool batch', () async {\n      final run = FakeTool();\n      final res = await run.batch(['hello1', 'hello2', 'hello3']);\n      expect(\n        res.map((final e) => e).toList(),\n        equals(['hello1', 'hello2', 'hello3']),\n      );\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain_core/test/runnables/binding_test.dart",
    "content": "// ignore_for_file: unused_element\nimport 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/language_models.dart';\nimport 'package:langchain_core/output_parsers.dart';\nimport 'package:langchain_core/prompts.dart';\nimport 'package:langchain_core/runnables.dart';\nimport 'package:langchain_core/tools.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('RunnableBinding tests', () {\n    test('RunnableBinding from Runnable.bind', () async {\n      final prompt = PromptTemplate.fromTemplate('Hello {input}');\n      const model = _FakeOptionsChatModel();\n      const outputParser = StringOutputParser<ChatResult>();\n      final chain =\n          prompt |\n          model.bind(const _FakeOptionsChatModelOptions('world')) |\n          outputParser;\n\n      final res = await chain.invoke({'input': 'world'});\n      expect(res, 'Hello ');\n    });\n\n    test('Chaining bind calls', () async {\n      final model = FakeChatModel(\n        responses: ['a', 'b'],\n        defaultOptions: const FakeChatModelOptions(\n          model: 'modelA',\n          metadata: {'foo': 'bar'},\n        ),\n      );\n\n      final res1 = await model.invoke(PromptValue.string('1'));\n      expect(res1.metadata['model'], 'modelA');\n      expect(res1.metadata['foo'], 'bar');\n\n      final chain2 = model.bind(const FakeChatModelOptions(model: 'modelB'));\n      final res2 = await chain2.invoke(PromptValue.string('2'));\n      expect(res2.metadata['model'], 'modelB');\n      expect(res2.metadata['foo'], 'bar');\n\n      final chain3 = chain2.bind(\n        const FakeChatModelOptions(metadata: {'foo': 'baz'}),\n      );\n      final res3 = await chain3.invoke(PromptValue.string('3'));\n      expect(res3.metadata['model'], 'modelB');\n      expect(res3.metadata['foo'], 'baz');\n    });\n\n    test('Streaming RunnableBinding', () async {\n      final prompt = PromptTemplate.fromTemplate('Hello {input}');\n      const model = _FakeOptionsChatModel();\n      const outputParser = StringOutputParser<ChatResult>();\n\n      final chain = prompt\n          .pipe(model.bind(const _FakeOptionsChatModelOptions('world')))\n          .pipe(outputParser);\n      final stream = chain.stream({'input': 'world'});\n\n      final streamList = await stream.toList();\n      expect(streamList.length, 6);\n      expect(streamList, isA<List<String>>());\n\n      final output = streamList.join();\n      expect(output, 'Hello ');\n    });\n  });\n}\n\nclass _FakeOptionsChatModel\n    extends SimpleChatModel<_FakeOptionsChatModelOptions> {\n  const _FakeOptionsChatModel()\n    : super(defaultOptions: const _FakeOptionsChatModelOptions(''));\n\n  @override\n  String get modelType => 'fake-options-chat-model';\n\n  @override\n  Future<String> callInternal(\n    final List<ChatMessage> messages, {\n    final _FakeOptionsChatModelOptions? options,\n  }) {\n    return Future.value(\n      messages.first.contentAsString.replaceAll(options?.stop ?? '', ''),\n    );\n  }\n\n  @override\n  Stream<ChatResult> stream(\n    final PromptValue input, {\n    final _FakeOptionsChatModelOptions? options,\n  }) {\n    final prompt = input\n        .toChatMessages()\n        .first\n        .contentAsString\n        .replaceAll(options?.stop ?? '', '')\n        .split('');\n    return Stream.fromIterable(prompt).map(\n      (final char) => ChatResult(\n        id: 'fake-options-chat-model',\n        output: AIChatMessage(content: char),\n        finishReason: FinishReason.stop,\n        metadata: const {},\n        usage: const LanguageModelUsage(),\n      ),\n    );\n  }\n\n  @override\n  Future<List<int>> tokenize(\n    final PromptValue promptValue, {\n    final _FakeOptionsChatModelOptions? options,\n  }) async {\n    return promptValue\n        .toString()\n        .split(' ')\n        .map((final word) => word.hashCode)\n        .toList(growable: false);\n  }\n}\n\nclass _FakeOptionsChatModelOptions extends ChatModelOptions {\n  const _FakeOptionsChatModelOptions(this.stop);\n\n  final String stop;\n\n  @override\n  ChatModelOptions copyWith({\n    final String? model,\n    final List<ToolSpec>? tools,\n    final ChatToolChoice? toolChoice,\n    final int? concurrencyLimit,\n  }) {\n    return _FakeOptionsChatModelOptions(stop);\n  }\n}\n"
  },
  {
    "path": "packages/langchain_core/test/runnables/fallbacks_test.dart",
    "content": "// ignore_for_file: unnecessary_async\n\nimport 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/output_parsers.dart';\nimport 'package:langchain_core/prompts.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('RunnableFallback tests', () {\n    late FakeEchoChatModel model;\n    late FakeChatModel fallbackModel;\n    final promptTemplate = ChatPromptTemplate.fromTemplate(\n      'tell me a joke about {topic}',\n    );\n    final input = PromptValue.string('why is the sky blue');\n\n    setUp(() {\n      model = const FakeEchoChatModel();\n      fallbackModel = FakeChatModel(responses: ['fallback response']);\n    });\n\n    test('RunnableFallback should return main runnable output', () async {\n      final modelWithFallback = model.withFallbacks([fallbackModel]);\n      final res = await modelWithFallback.invoke(input);\n      expect(res.output.content, 'why is the sky blue');\n    });\n\n    test('Should call fallback runnable if main runnable fails', () async {\n      final brokenModel = model.bind(\n        const FakeEchoChatModelOptions(throwRandomError: true),\n      );\n      final modelWithFallback = brokenModel.withFallbacks([fallbackModel]);\n      final res = await modelWithFallback.invoke(input);\n      expect(res.output.content, 'fallback response');\n    });\n\n    test('Test batch response of main runnable in RunnableFallback', () async {\n      const model = FakeEchoChatModel();\n      const fallbackModel = FakeEchoChatModel();\n      final fallbackChain = promptTemplate.pipe(fallbackModel);\n      final chainWithFallbacks = promptTemplate.pipe(model).withFallbacks([\n        fallbackChain,\n      ]);\n      final res = await chainWithFallbacks.batch([\n        {'topic': 'bears'},\n        {'topic': 'cats'},\n      ]);\n      expect(res[0].output.content, 'tell me a joke about bears');\n      expect(res[1].output.content, 'tell me a joke about cats');\n    });\n\n    test('Test fallbacks response in batch', () async {\n      final brokenModel = model.bind(\n        const FakeEchoChatModelOptions(throwRandomError: true),\n      );\n      final fallbackChain = promptTemplate.pipe(fallbackModel);\n      final chainWithFallbacks = promptTemplate.pipe(brokenModel).withFallbacks(\n        [fallbackChain],\n      );\n      final res = await chainWithFallbacks.batch([\n        {'topic': 'bears'},\n      ]);\n      expect(res.first.output.content, 'fallback response');\n    });\n\n    test('Should throw error if none of runnable returned output', () {\n      final brokenModel1 = model.bind(\n        const FakeEchoChatModelOptions(throwRandomError: true),\n      );\n      final brokenModel2 = model.bind(\n        const FakeEchoChatModelOptions(throwRandomError: true),\n      );\n      final fallbackChain = promptTemplate.pipe(brokenModel2);\n      final chainWithFallbacks = promptTemplate\n          .pipe(brokenModel1)\n          .withFallbacks([fallbackChain]);\n      expect(\n        () async => chainWithFallbacks.batch([\n          {'topic': 'bears'},\n        ]),\n        throwsException,\n      );\n    });\n\n    test('Test stream response of main runnable in RunnableFallback', () async {\n      final modelWithFallback = model.withFallbacks([fallbackModel]);\n      final chain = modelWithFallback.pipe(const StringOutputParser());\n      final res = await chain.stream(input).toList();\n      expect(res.join('|'), 'w|h|y| |i|s| |t|h|e| |s|k|y| |b|l|u|e');\n    });\n\n    test('Test fallbacks response in stream', () async {\n      final brokenModel = model.bind(\n        const FakeEchoChatModelOptions(throwRandomError: true),\n      );\n      final modelWithFallback = brokenModel.withFallbacks([fallbackModel]);\n      final chain = modelWithFallback.pipe(const StringOutputParser());\n      final res = await chain.stream(input).toList();\n      expect(res.join('|'), endsWith('f|a|l|l|b|a|c|k| |r|e|s|p|o|n|s|e'));\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain_core/test/runnables/function_test.dart",
    "content": "// ignore_for_file: unused_element\nimport 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/output_parsers.dart';\nimport 'package:langchain_core/prompts.dart';\nimport 'package:langchain_core/runnables.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('RunnableFunction tests', () {\n    test('Invoke RunnableFunction', () async {\n      final prompt = PromptTemplate.fromTemplate('Hello {input}!');\n      const model = FakeEchoChatModel();\n      const outputParser = StringOutputParser<ChatResult>();\n      final chain =\n          prompt |\n          model |\n          outputParser |\n          Runnable.fromFunction<String, int>(\n            invoke: (final input, final options) => input.length,\n          );\n\n      final res = await chain.invoke({'input': 'world'});\n      expect(res, 12);\n    });\n\n    test('Streaming RunnableFunction', () async {\n      final function = Runnable.fromFunction<String, int>(\n        invoke: (final input, final options) => input.length,\n      );\n      final stream = function.stream('world');\n\n      final streamList = await stream.toList();\n      expect(streamList.length, 1);\n      expect(streamList.first, isA<int>());\n\n      final item = streamList.first;\n      expect(item, 5);\n    });\n\n    test('Streaming input RunnableFunction', () async {\n      final function = Runnable.fromFunction<String, int>(\n        invoke: (final input, final options) => input.length,\n      );\n      final stream = function.streamFromInputStream(\n        Stream.fromIterable(['w', 'o', 'r', 'l', 'd']),\n      );\n\n      final streamList = await stream.toList();\n      expect(streamList.length, 5);\n      expect(streamList, [1, 1, 1, 1, 1]);\n    });\n\n    test('Separate logic for invoke and stream', () async {\n      final function = Runnable.fromFunction<String, int>(\n        invoke: (final input, final options) => input.length,\n        stream: (final inputStream, final options) async* {\n          final input = (await inputStream.toList()).reduce((a, b) => a + b);\n          yield input.length;\n        },\n      );\n\n      final invokeRes = await function.invoke('world');\n      expect(invokeRes, 5);\n      final streamRes = await function\n          .streamFromInputStream(Stream.fromIterable(['w', 'o', 'r', 'l', 'd']))\n          .toList();\n      expect(streamRes, [5]);\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain_core/test/runnables/input_getter_test.dart",
    "content": "// ignore_for_file: unused_element\nimport 'package:langchain_core/runnables.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('RunnableItemFromMap tests', () {\n    test('RunnableItemFromMap from Runnable.getItemFromMap', () async {\n      final chain = Runnable.getItemFromMap('foo');\n\n      final res = await chain.invoke({'foo': 'foo1', 'bar': 'bar1'});\n      expect(res, 'foo1');\n    });\n\n    test('RunnableMapFromInput from Runnable.getMapFromInput', () async {\n      final chain = Runnable.getMapFromInput('foo');\n\n      final res = await chain.invoke('foo1');\n      expect(res, {'foo': 'foo1'});\n    });\n\n    test('Streaming RunnableItemFromMap', () async {\n      final chain = Runnable.getItemFromMap('foo');\n      final stream = chain.stream({'foo': 'foo1', 'bar': 'bar1'});\n\n      final streamList = await stream.toList();\n      expect(streamList.length, 1);\n      expect(streamList.first, isA<String>());\n\n      final item = streamList.first;\n      expect(item, 'foo1');\n    });\n\n    test('Streaming RunnableMapFromInput', () async {\n      final chain = Runnable.getMapFromInput('foo');\n      final stream = chain.stream('foo1');\n\n      final streamList = await stream.toList();\n      expect(streamList.length, 1);\n      expect(streamList.first, isA<Map<String, dynamic>>());\n\n      final item = streamList.first;\n      expect(item, {'foo': 'foo1'});\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain_core/test/runnables/input_map_test.dart",
    "content": "// ignore_for_file: unnecessary_async\n\nimport 'package:langchain_core/runnables.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('RunnableMapInput tests', () {\n    test('Invoke RunnableMapInput', () async {\n      final chain =\n          Runnable.mapInput<Map<String, dynamic>, Map<String, dynamic>>(\n            (final input) => {'input': '${input['foo']}${input['bar']}'},\n          );\n\n      final res = await chain.invoke({'foo': 'foo1', 'bar': 'bar1'});\n      expect(res, {'input': 'foo1bar1'});\n    });\n\n    test('Invoke async RunnableMapInput', () async {\n      Future<int> asyncFunc(final Map<String, dynamic> input) async {\n        await Future<void>.delayed(const Duration(milliseconds: 100));\n        return input.length;\n      }\n\n      final chain = Runnable.mapInput<Map<String, dynamic>, int>(\n        (final input) async => asyncFunc(input),\n      );\n\n      final res = await chain.invoke({'foo': 'foo1', 'bar': 'bar1'});\n      expect(res, 2);\n    });\n\n    test('Streaming RunnableMapInput', () async {\n      final chain =\n          Runnable.mapInput<Map<String, dynamic>, Map<String, dynamic>>(\n            (final input) => {'input': '${input['foo']}${input['bar']}'},\n          );\n      final stream = chain.stream({'foo': 'foo1', 'bar': 'bar1'});\n\n      final streamList = await stream.toList();\n      expect(streamList.length, 1);\n\n      final item = streamList.first;\n      expect(item, {'input': 'foo1bar1'});\n    });\n\n    test('Streaming input RunnableMapInput', () async {\n      final chain = Runnable.mapInput<String, int>(\n        (final input) => input.length,\n      );\n      final stream = chain.streamFromInputStream(\n        Stream.fromIterable(['w', 'o', 'r', 'l', 'd']),\n      );\n\n      final streamList = await stream.toList();\n      expect(streamList, [1, 1, 1, 1, 1]);\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain_core/test/runnables/input_stream_map_test.dart",
    "content": "import 'package:langchain_core/runnables.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('RunnableMapInputStream tests', () {\n    test('Invoking RunnableMapInputStream', () async {\n      final chain = Runnable.mapInputStream<String, int>((final inputStream) {\n        return inputStream.map((final input) => input.length);\n      });\n\n      final res = await chain.invoke('world');\n\n      expect(res, 5);\n    });\n\n    test('Streaming RunnableMapInputStream', () async {\n      final chain = Runnable.mapInputStream<String, int>((final inputStream) {\n        return inputStream.map((final input) => input.length);\n      });\n      final stream = chain.stream('world');\n\n      final streamList = await stream.toList();\n      expect(streamList.length, 1);\n      expect(streamList.first, isA<int>());\n\n      final item = streamList.first;\n      expect(item, 5);\n    });\n\n    test('Streaming input RunnableMapInputStream', () async {\n      final chain = Runnable.mapInputStream<String, int>((final inputStream) {\n        return inputStream.map((final input) => input.length);\n      });\n      final stream = chain.streamFromInputStream(\n        Stream.fromIterable(['w', 'or', 'l', 'd']),\n      );\n\n      final streamList = await stream.toList();\n      expect(streamList, [1, 2, 1, 1]);\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain_core/test/runnables/invoke_test.dart",
    "content": "// ignore_for_file: unused_element\nimport 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/documents.dart';\nimport 'package:langchain_core/language_models.dart';\nimport 'package:langchain_core/llms.dart';\nimport 'package:langchain_core/output_parsers.dart';\nimport 'package:langchain_core/prompts.dart';\nimport 'package:langchain_core/retrievers.dart';\nimport 'package:langchain_core/tools.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('Runnable invoke tests', () {\n    test('PromptTemplate as Runnable', () async {\n      final run = PromptTemplate.fromTemplate('This is a {input}');\n      final res = await run.invoke({'input': 'test'});\n      expect(res.toString(), equals('This is a test'));\n    });\n\n    test('ChatPromptTemplate as Runnable', () async {\n      final run = ChatPromptTemplate.fromPromptMessages([\n        SystemChatMessagePromptTemplate.fromTemplate(\n          'You are a helpful chatbot',\n        ),\n        HumanChatMessagePromptTemplate.fromTemplate('{input}'),\n      ]);\n      final res = await run.invoke({'input': 'test'});\n      expect(\n        res.toChatMessages(),\n        equals([\n          ChatMessage.system('You are a helpful chatbot'),\n          ChatMessage.humanText('test'),\n        ]),\n      );\n    });\n\n    test('Retriever as Runnable', () async {\n      const doc = Document(id: '1', pageContent: 'This is a test');\n      const run = FakeRetriever([doc]);\n      final res = await run.invoke('test');\n      expect(res, equals([doc]));\n    });\n\n    test('LLM as Runnable', () async {\n      const run = FakeEchoLLM();\n      final res = await run.invoke(PromptValue.string('Hello world!'));\n      expect(res.output, 'Hello world!');\n    });\n\n    test('ChatModel as Runnable', () async {\n      const run = FakeEchoChatModel();\n      final res = await run.invoke(PromptValue.string('Hello world!'));\n      expect(res.output.content, 'Hello world!');\n    });\n\n    test('OutputParser as Runnable', () async {\n      const run = StringOutputParser();\n      final res = await run.invoke(\n        const LLMResult(\n          id: 'id',\n          output: 'Hello world!',\n          finishReason: FinishReason.stop,\n          metadata: {},\n          usage: LanguageModelUsage(),\n        ),\n      );\n      expect(res, 'Hello world!');\n    });\n\n    test('Tool as Runnable', () async {\n      final run = FakeTool();\n      final res = await run.invoke('hello');\n      expect(res, 'hello');\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain_core/test/runnables/map_test.dart",
    "content": "// ignore_for_file: unused_element\nimport 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/llms.dart';\nimport 'package:langchain_core/output_parsers.dart';\nimport 'package:langchain_core/prompts.dart';\nimport 'package:langchain_core/runnables.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('RunnableMap tests', () {\n    test('RunnableMap with multiple branches', () async {\n      final prompt1 = PromptTemplate.fromTemplate('Hello {input}!');\n      final prompt2 = PromptTemplate.fromTemplate('Bye {input}!');\n      const model = FakeEchoChatModel();\n      const outputParser = StringOutputParser<ChatResult>();\n      final chain = Runnable.fromMap({\n        'left': prompt1 | model | outputParser,\n        'right': prompt2 | model | outputParser,\n      });\n\n      final res = await chain.invoke({'input': 'world'});\n      expect(res, {'left': 'Hello world!', 'right': 'Bye world!'});\n    });\n\n    test('RunnableMap runs tasks in parallel', () async {\n      final longTask = Runnable.fromFunction(\n        invoke: (_, _) async {\n          await Future<void>.delayed(const Duration(seconds: 2));\n          return 'long';\n        },\n      );\n      final shortTask = Runnable.fromFunction(\n        invoke: (_, _) async {\n          await Future<void>.delayed(const Duration(seconds: 1));\n          return 'short';\n        },\n      );\n\n      final chain = Runnable.fromMap({'long': longTask, 'short': shortTask});\n\n      final stopwatch = Stopwatch()..start();\n      final result = await chain.invoke({});\n      stopwatch.stop();\n\n      expect(stopwatch.elapsed, lessThan(const Duration(seconds: 3)));\n      expect(result['long'], 'long');\n      expect(result['short'], 'short');\n    });\n\n    test('Streaming RunnableMap', () async {\n      final prompt1 = PromptTemplate.fromTemplate('Hello {input}!');\n      final prompt2 = PromptTemplate.fromTemplate('Bye {input}!');\n      const model = FakeEchoLLM();\n      const outputParser = StringOutputParser<LLMResult>();\n      final chain = Runnable.fromMap({\n        'left': prompt1 | model | outputParser,\n        'right': prompt2 | model | outputParser,\n      });\n      final stream = chain.stream({'input': 'world'});\n\n      final streamList = await stream.toList();\n      expect(streamList.length, 22);\n      expect(streamList, isA<List<Map<String, dynamic>>>());\n\n      final left = streamList\n          .map((final it) => it['left']) //\n          .nonNulls\n          .join();\n      final right = streamList\n          .map((final it) => it['right']) //\n          .nonNulls\n          .join();\n\n      expect(left, 'Hello world!');\n      expect(right, 'Bye world!');\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain_core/test/runnables/passthrough_test.dart",
    "content": "// ignore_for_file: unused_element\nimport 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/output_parsers.dart';\nimport 'package:langchain_core/prompts.dart';\nimport 'package:langchain_core/runnables.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('RunnablePassthrough tests', () {\n    test('RunnablePassthrough from Runnable.passthrough', () async {\n      final prompt = PromptTemplate.fromTemplate('Hello {input}!');\n      const model = FakeEchoChatModel();\n      const outputParser = StringOutputParser<ChatResult>();\n      final chain = Runnable.fromMap({\n        'in': Runnable.passthrough(),\n        'out': Runnable.getMapFromInput() | prompt | model | outputParser,\n      });\n\n      final res = await chain.invoke('world');\n      expect(res, {'in': 'world', 'out': 'Hello world!'});\n    });\n\n    test('Streaming RunnablePassthrough', () async {\n      final passthrough = Runnable.passthrough();\n      final stream = passthrough.stream('world');\n\n      final streamList = await stream.toList();\n      expect(streamList.length, 1);\n      expect(streamList.first, isA<String>());\n\n      final item = streamList.first;\n      expect(item, 'world');\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain_core/test/runnables/retry_test.dart",
    "content": "// ignore_for_file: unnecessary_async\n\nimport 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/prompts.dart';\nimport 'package:langchain_core/runnables.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('Runnable Retry Test', () {\n    late FakeEchoChatModel model;\n    final input = PromptValue.string('why is the sky blue');\n    final promptTemplate = ChatPromptTemplate.fromTemplate(\n      'tell me a joke about {topic}',\n    );\n\n    setUp(() {\n      model = const FakeEchoChatModel();\n    });\n\n    test('Runnable retry should return output for invoke', () async {\n      final modelWithRetry = model.withRetry(maxRetries: 2);\n      final res = await modelWithRetry.invoke(input);\n      expect(res.output.content, 'why is the sky blue');\n    });\n\n    test('Runnable retry should return output for batch', () async {\n      final chain = promptTemplate.pipe(model);\n      final chainWithRetry = chain.withRetry();\n      final res = await chainWithRetry.batch([\n        {'topic': 'bears'},\n        {'topic': 'cats'},\n      ]);\n      expect(res[0].output.content, 'tell me a joke about bears');\n      expect(res[1].output.content, 'tell me a joke about cats');\n    });\n\n    test('Should retry based RetryOptions, maxRetries = 2', () {\n      final modelWithRetry = model.withRetry(maxRetries: 2);\n      expect(\n        () async => modelWithRetry.invoke(\n          input,\n          options: const FakeEchoChatModelOptions(throwRandomError: true),\n        ),\n        throwsException,\n      );\n    });\n\n    test('Should return the output after successful retry', () async {\n      var count = 0;\n      final modelWithRetry = model\n          .pipe(\n            Runnable.fromFunction(\n              invoke: (input, opt) {\n                if (count++ < 1) {\n                  throw Exception('Random error');\n                }\n                return input;\n              },\n            ),\n          )\n          .withRetry(maxRetries: 2);\n      final res = await modelWithRetry.invoke(input);\n      expect(res.outputAsString, input.toString());\n      expect(count, 2);\n    });\n\n    test('Should not retry if retryIf returned false', () async {\n      late String error;\n      final modelWithRetry = model.withRetry(\n        maxRetries: 3,\n        retryIf: (e) {\n          if (e.toString() == 'Exception: Random error') {\n            return false;\n          } else {\n            return true;\n          }\n        },\n      );\n      try {\n        await modelWithRetry.invoke(\n          input,\n          options: const FakeEchoChatModelOptions(throwRandomError: true),\n        );\n      } catch (e) {\n        error = e.toString();\n      }\n      expect(error, 'Exception: Random error');\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain_core/test/runnables/router_test.dart",
    "content": "import 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/prompts.dart';\nimport 'package:langchain_core/runnables.dart';\nimport 'package:langchain_core/src/output_parsers/output_parsers.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('RunnableRouter tests', () {\n    test('RunnableRouter invoke', () async {\n      final add = Runnable.mapInput((int x) => x + 1);\n      final subtract = Runnable.mapInput((int x) => x - 1);\n\n      final router = Runnable.fromRouter(\n        (int x, options) => switch (x) {\n          > 0 => add,\n          _ => subtract,\n        },\n      );\n\n      final result = await router.invoke(1);\n      expect(result, equals(2));\n      final result2 = await router.invoke(-1);\n      expect(result2, equals(-2));\n    });\n\n    test('RunnableRouter batch', () async {\n      final add = Runnable.mapInput((int x) => x + 1);\n      final multiply = Runnable.mapInput((int x) => x * 10);\n      final subtract = Runnable.mapInput((int x) => x - 1);\n\n      final router = Runnable.fromRouter(\n        (int x, options) => switch (x) {\n          > 0 && < 5 => add,\n          > 5 => multiply,\n          _ => subtract,\n        },\n      );\n\n      final batchResult = await router.batch([1, 10, 0]);\n      expect(batchResult, equals([2, 100, -1]));\n    });\n\n    test('RunnableRouter stream', () async {\n      final promptTemplate = ChatPromptTemplate.fromTemplate('{question}');\n      const model = FakeEchoChatModel();\n\n      final classificationChain = promptTemplate\n          .pipe(model)\n          .pipe(const StringOutputParser());\n      final generalChain = ChatPromptTemplate.fromTemplate(\n        'GENERAL CHAIN',\n      ).pipe(model).pipe(const StringOutputParser());\n      final langChainChain = ChatPromptTemplate.fromTemplate(\n        'LANGCHAIN CHAIN',\n      ).pipe(model).pipe(const StringOutputParser());\n\n      final router = Runnable.fromRouter((Map<String, dynamic> input, _) {\n        final topic = (input['topic'] as String).toLowerCase();\n        return switch (topic.contains('langchain')) {\n          true => langChainChain,\n          false => generalChain,\n        };\n      });\n\n      final fullChain = Runnable.fromMap({\n        'topic': classificationChain,\n        'question': Runnable.getItemFromMap('question'),\n      }).pipe(router).pipe(const StringOutputParser());\n\n      final stream1 = fullChain.stream({\n        'question': 'How do I use langchain? Explain in one sentence',\n      });\n      var output1 = '';\n      await for (final chunk in stream1) {\n        output1 += chunk;\n      }\n      expect(output1.length, greaterThan(1));\n      expect(output1, contains('LANGCHAIN'));\n\n      final stream2 = fullChain.stream({\n        'question': 'What is up? Explain in one sentence',\n      });\n      var output2 = '';\n      await for (final chunk in stream2) {\n        output2 += chunk;\n      }\n      expect(output2.length, greaterThan(1));\n      expect(output2, contains('GENERAL CHAIN'));\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain_core/test/runnables/sequence_test.dart",
    "content": "// ignore_for_file: unused_element\nimport 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/llms.dart';\nimport 'package:langchain_core/output_parsers.dart';\nimport 'package:langchain_core/prompts.dart';\nimport 'package:langchain_core/runnables.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('RunnableSequence tests', () {\n    test('RunnableSequence from Runnable.pipe', () async {\n      final prompt = PromptTemplate.fromTemplate('Hello {input}!');\n      const model = FakeEchoChatModel();\n      const outputParser = StringOutputParser<ChatResult>();\n      final chain = prompt.pipe(model).pipe(outputParser);\n\n      final res = await chain.invoke({'input': 'world'});\n      expect(res, 'Hello world!');\n    });\n\n    test('RunnableSequence from | operator', () async {\n      final prompt = PromptTemplate.fromTemplate('Hello {input}!');\n      const model = FakeEchoChatModel();\n      const outputParser = StringOutputParser<ChatResult>();\n      final chain = prompt | model | outputParser;\n\n      final res = await chain.invoke({'input': 'world'});\n      expect(res, 'Hello world!');\n    });\n\n    test('RunnableSequence from Runnable.fromList', () async {\n      final prompt = PromptTemplate.fromTemplate('Hello {input}!');\n      const model = FakeEchoChatModel();\n      const outputParser = StringOutputParser<ChatResult>();\n      final chain = Runnable.fromList([prompt, model, outputParser]);\n\n      final res = await chain.invoke({'input': 'world'});\n      expect(res, 'Hello world!');\n    });\n\n    test('Streaming RunnableSequence', () async {\n      final prompt = PromptTemplate.fromTemplate('Hello {input}!');\n      const model = FakeEchoLLM();\n      const outputParser = StringOutputParser<LLMResult>();\n      final chain = prompt.pipe(model).pipe(outputParser);\n      final stream = chain.stream({'input': 'world'});\n\n      final streamList = await stream.toList();\n      expect(streamList.length, 12);\n      expect(streamList, isA<List<String>>());\n\n      final res = streamList.join();\n      expect(res, 'Hello world!');\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain_core/test/runnables/stream_test.dart",
    "content": "// ignore_for_file: unused_element\nimport 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/documents.dart';\nimport 'package:langchain_core/language_models.dart';\nimport 'package:langchain_core/llms.dart';\nimport 'package:langchain_core/output_parsers.dart';\nimport 'package:langchain_core/prompts.dart';\nimport 'package:langchain_core/retrievers.dart';\nimport 'package:langchain_core/tools.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('Runnable stream tests', () {\n    test('Test streaming PromptTemplate', () async {\n      final run = PromptTemplate.fromTemplate('This is a {input}');\n      final stream = run.stream({'input': 'test'});\n\n      final streamList = await stream.toList();\n      expect(streamList.length, 1);\n      expect(streamList.first, isA<StringPromptValue>());\n\n      final item = streamList.first;\n      expect(item.toString(), 'This is a test');\n    });\n\n    test('Test streaming ChatPromptTemplate', () async {\n      final run = ChatPromptTemplate.fromPromptMessages([\n        SystemChatMessagePromptTemplate.fromTemplate(\n          'You are a helpful chatbot',\n        ),\n        HumanChatMessagePromptTemplate.fromTemplate('{input}'),\n      ]);\n      final stream = run.stream({'input': 'test'});\n\n      final streamList = await stream.toList();\n      expect(streamList.length, 1);\n      expect(streamList.first, isA<ChatPromptValue>());\n\n      final item = streamList.first;\n      expect(\n        item.toChatMessages(),\n        equals([\n          ChatMessage.system('You are a helpful chatbot'),\n          ChatMessage.humanText('test'),\n        ]),\n      );\n    });\n\n    test('Test streaming', () async {\n      const doc = Document(id: '1', pageContent: 'This is a test');\n      const run = FakeRetriever([doc]);\n      final stream = run.stream('test');\n\n      final streamList = await stream.toList();\n      expect(streamList.length, 1);\n      expect(streamList.first, isA<List<Document>>());\n\n      final item = streamList.first;\n      expect(item, [doc]);\n    });\n\n    test('Streaming LLM', () async {\n      const run = FakeEchoLLM();\n      final stream = run.stream(PromptValue.string('Hello world!'));\n\n      final streamList = await stream.toList();\n      expect(streamList.length, 12);\n      expect(streamList, isA<List<LLMResult>>());\n\n      final res = streamList.map((final i) => i.output).join();\n\n      expect(res, 'Hello world!');\n    });\n\n    test('Streaming ChatModel', () async {\n      const run = FakeEchoChatModel();\n      final stream = run.stream(PromptValue.string('Hello world!'));\n\n      final streamList = await stream.toList();\n      expect(streamList.length, 12);\n      expect(streamList, isA<List<ChatResult>>());\n\n      final res = streamList.map((final i) => i.output.content).join();\n      expect(res, 'Hello world!');\n    });\n\n    test('Streaming OutputParser', () async {\n      const run = StringOutputParser();\n      final stream = run.stream(\n        const LLMResult(\n          id: 'id',\n          output: 'Hello world!',\n          finishReason: FinishReason.stop,\n          metadata: {},\n          usage: LanguageModelUsage(),\n        ),\n      );\n\n      final streamList = await stream.toList();\n      expect(streamList.length, 1);\n      expect(streamList.first, isA<String>());\n\n      final res = streamList.first;\n      expect(res, 'Hello world!');\n    });\n\n    test('Streaming Tool', () async {\n      final run = FakeTool();\n      final stream = run.stream('hello');\n\n      final streamList = await stream.toList();\n      expect(streamList.length, 1);\n      expect(streamList.first, isA<String>());\n\n      final res = streamList.first;\n      expect(res, 'hello');\n    });\n  });\n\n  test('Test call to PromptTemplate from streaming input', () async {\n    final inputStream = Stream.fromIterable([\n      {'input': 'H'},\n      {'input': 'e'},\n      {'input': 'l'},\n      {'input': 'l'},\n      {'input': 'o'},\n      {'input': ' '},\n      {'input': 'W'},\n      {'input': 'o'},\n      {'input': 'r'},\n      {'input': 'l'},\n      {'input': 'd'},\n    ]);\n\n    final promptTemplate = PromptTemplate.fromTemplate(\n      'Spell the following text {input}',\n    );\n\n    final stream = promptTemplate.streamFromInputStream(inputStream);\n    var count = 0;\n    var output = PromptValue.string('');\n    await stream.forEach((final i) {\n      count++;\n      output = output.concat(i);\n    });\n    expect(count, 1);\n    expect(output, PromptValue.string('Spell the following text Hello World'));\n  });\n\n  test('Test call to ChatPromptTemplate from streaming input', () async {\n    final inputStream = Stream.fromIterable([\n      {'input': 'H'},\n      {'input': 'e'},\n      {'input': 'l'},\n      {'input': 'l'},\n      {'input': 'o'},\n      {'input': ' '},\n      {'input': 'W'},\n      {'input': 'o'},\n      {'input': 'r'},\n      {'input': 'l'},\n      {'input': 'd'},\n    ]);\n\n    final promptTemplate = ChatPromptTemplate.fromTemplate(\n      'Spell the following text {input}',\n    );\n\n    final stream = promptTemplate.streamFromInputStream(inputStream);\n    var count = 0;\n    var output = PromptValue.chat([ChatMessage.humanText('')]);\n    await stream.forEach((final i) {\n      count++;\n      output = output.concat(i);\n    });\n    expect(count, 1);\n    expect(\n      output,\n      PromptValue.chat([\n        ChatMessage.humanText('Spell the following text Hello World'),\n      ]),\n    );\n  });\n\n  test('Test call to LLM from streaming input', () async {\n    final inputStream = Stream.fromIterable([\n      PromptValue.string('H'),\n      PromptValue.string('e'),\n      PromptValue.string('l'),\n      PromptValue.string('l'),\n      PromptValue.string('o'),\n      PromptValue.string(' '),\n      PromptValue.string('W'),\n      PromptValue.string('o'),\n      PromptValue.string('r'),\n      PromptValue.string('l'),\n      PromptValue.string('d'),\n    ]);\n\n    const llm = FakeEchoLLM();\n    final stream = llm.streamFromInputStream(inputStream);\n    var count = 0;\n    LLMResult? output;\n    await stream.forEach((final LLMResult i) {\n      count++;\n      output = output?.concat(i) ?? i;\n    });\n    expect(count, 11);\n    expect(output?.output, 'Hello World');\n  });\n\n  test('Test call to ChatModel from streaming input', () async {\n    final inputStream = Stream.fromIterable([\n      PromptValue.chat([ChatMessage.humanText('H')]),\n      PromptValue.chat([ChatMessage.humanText('e')]),\n      PromptValue.chat([ChatMessage.humanText('l')]),\n      PromptValue.chat([ChatMessage.humanText('l')]),\n      PromptValue.chat([ChatMessage.humanText('o')]),\n      PromptValue.chat([ChatMessage.humanText(' ')]),\n      PromptValue.chat([ChatMessage.humanText('W')]),\n      PromptValue.chat([ChatMessage.humanText('o')]),\n      PromptValue.chat([ChatMessage.humanText('r')]),\n      PromptValue.chat([ChatMessage.humanText('l')]),\n      PromptValue.chat([ChatMessage.humanText('d')]),\n    ]);\n\n    const chatModel = FakeEchoChatModel();\n    final stream = chatModel.streamFromInputStream(inputStream);\n    var count = 0;\n    ChatResult? output;\n    await stream.forEach((final ChatResult i) {\n      count++;\n      output = output?.concat(i) ?? i;\n    });\n    expect(count, 11);\n    expect(output?.output.content, 'Hello World');\n  });\n\n  test('Test call to Tool from streaming input', () async {\n    final inputStream = Stream.fromIterable([\n      'H',\n      'e',\n      'l',\n      'l',\n      'o',\n      ' ',\n      'W',\n      'o',\n      'r',\n      'l',\n      'd',\n    ]);\n\n    final tool = FakeTool();\n    final stream = tool.streamFromInputStream(inputStream);\n    var count = 0;\n    String? output;\n    await stream.forEach((final String i) {\n      count++;\n      output = i;\n    });\n    expect(count, 1);\n    expect(output, 'Hello World');\n  });\n\n  test('Test call to Retriever from streaming input', () async {\n    final inputStream = Stream.fromIterable([\n      'H',\n      'e',\n      'l',\n      'l',\n      'o',\n      ' ',\n      'W',\n      'o',\n      'r',\n      'l',\n      'd',\n    ]);\n\n    const doc = Document(id: '1', pageContent: 'Hello World');\n    const retriever = FakeRetriever([doc]);\n    final stream = retriever.streamFromInputStream(inputStream);\n    var count = 0;\n    List<Document>? output;\n    await stream.forEach((final List<Document> i) {\n      count++;\n      output = i;\n    });\n    expect(count, 1);\n    expect(output, [doc]);\n  });\n}\n"
  },
  {
    "path": "packages/langchain_core/test/tools/base_test.dart",
    "content": "import 'package:langchain_core/tools.dart';\nimport 'package:meta/meta.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('Tool tests', () {\n    test('Tool.fromFunction', () async {\n      final echoTool = Tool.fromFunction<int, String>(\n        name: 'echo-int',\n        description: 'echo-int',\n        func: (final int toolInput) => toolInput.toString(),\n        inputJsonSchema: const {\n          'type': 'object',\n          'properties': {\n            'input': {\n              'type': 'integer',\n              'description': 'The input to the tool',\n            },\n          },\n          'required': ['input'],\n        },\n      );\n\n      expect(echoTool.name, 'echo-int');\n      expect(echoTool.description, 'echo-int');\n      expect(await echoTool.invoke(1), '1');\n      expect(echoTool.getInputFromJson({'input': 1}), 1);\n    });\n\n    test('Tool.fromFunction with custom deserialization', () async {\n      final tool = Tool.fromFunction<_SearchInput, String>(\n        name: 'search',\n        description: 'Tool for searching the web.',\n        inputJsonSchema: const {\n          'type': 'object',\n          'properties': {\n            'query': {\n              'type': 'string',\n              'description': 'The query to search for',\n            },\n            'n': {\n              'type': 'number',\n              'description': 'The number of results to return',\n            },\n          },\n          'required': ['query'],\n        },\n        func: (final _SearchInput toolInput) {\n          final n = toolInput.n;\n          final res = List<String>.generate(n, (final i) => 'Result ${i + 1}');\n          return 'Results:\\n${res.join('\\n')}';\n        },\n        getInputFromJson: _SearchInput.fromJson,\n      );\n\n      expect(tool.name, 'search');\n      expect(tool.description, 'Tool for searching the web.');\n      expect(\n        await tool.invoke(const _SearchInput(query: 'cats', n: 3)),\n        'Results:\\nResult 1\\nResult 2\\nResult 3',\n      );\n      expect(\n        tool.getInputFromJson({'query': 'cats', 'n': 3}),\n        const _SearchInput(query: 'cats', n: 3),\n      );\n    });\n\n    group('Tool.fromFunction multi-LLM compatibility', () {\n      test('OpenAI format compatibility - nested input format', () async {\n        final weatherTool = Tool.fromFunction<Map<String, dynamic>, String>(\n          name: 'get_current_weather',\n          description: 'Get the current weather in a given location',\n          inputJsonSchema: const {\n            'type': 'object',\n            'properties': {\n              'location': {\n                'type': 'string',\n                'description': 'The city and state, e.g. San Francisco, CA',\n              },\n            },\n            'required': ['location'],\n          },\n          func: (final Map<String, dynamic> toolInput) {\n            final location = toolInput['location'] as String;\n            return 'Weather in $location: 20°C, sunny';\n          },\n        );\n\n        // OpenAI format: {input: {location: \"...\"}}\n        final openAiFormat = {\n          'input': {'location': 'San Francisco, CA'},\n        };\n\n        final parsedInput = weatherTool.getInputFromJson(openAiFormat);\n        expect(parsedInput, {'location': 'San Francisco, CA'});\n\n        final result = await weatherTool.invoke(parsedInput);\n        expect(result, 'Weather in San Francisco, CA: 20°C, sunny');\n      });\n\n      test(\n        'Google AI format compatibility - direct arguments format',\n        () async {\n          final weatherTool = Tool.fromFunction<Map<String, dynamic>, String>(\n            name: 'get_current_weather',\n            description: 'Get the current weather in a given location',\n            inputJsonSchema: const {\n              'type': 'object',\n              'properties': {\n                'location': {\n                  'type': 'string',\n                  'description': 'The city and state, e.g. San Francisco, CA',\n                },\n              },\n              'required': ['location'],\n            },\n            func: (final Map<String, dynamic> toolInput) {\n              final location = toolInput['location'] as String;\n              return 'Weather in $location: 15°C, cloudy';\n            },\n          );\n\n          // Google AI format: {location: \"...\"} (direct arguments)\n          final googleAiFormat = {'location': 'Boston, MA'};\n\n          final parsedInput = weatherTool.getInputFromJson(googleAiFormat);\n          expect(parsedInput, {'location': 'Boston, MA'});\n\n          final result = await weatherTool.invoke(parsedInput);\n          expect(result, 'Weather in Boston, MA: 15°C, cloudy');\n        },\n      );\n    });\n  });\n}\n\n@immutable\nclass _SearchInput {\n  const _SearchInput({required this.query, required this.n});\n\n  final String query;\n  final int n;\n\n  _SearchInput.fromJson(final Map<String, dynamic> json)\n    : this(query: json['query'] as String, n: json['n'] as int);\n\n  @override\n  bool operator ==(covariant _SearchInput other) =>\n      identical(this, other) || query == other.query && n == other.n;\n\n  @override\n  int get hashCode => query.hashCode ^ n.hashCode;\n}\n"
  },
  {
    "path": "packages/langchain_core/test/tools/string_test.dart",
    "content": "import 'package:langchain_core/tools.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('StringTool tests', () {\n    test('StringTool.fromFunction', () async {\n      final echoTool = StringTool.fromFunction(\n        name: 'echo',\n        description: 'echo',\n        func: (String input) => input,\n      );\n\n      expect(echoTool.name, 'echo');\n      expect(echoTool.description, 'echo');\n      expect(await echoTool.invoke('Hello world!'), 'Hello world!');\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain_core/test/utils/chunk_test.dart",
    "content": "import 'package:langchain_core/utils.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('Chunk tests', () {\n    test('Test with empty list', () {\n      expect(chunkList(<int>[], chunkSize: 3), <int>[]);\n    });\n\n    test('Test with list of integers and chunk size 2', () {\n      final arr = [1, 2, 3, 4, 5, 6, 7];\n      expect(chunkList(arr, chunkSize: 2), [\n        [1, 2],\n        [3, 4],\n        [5, 6],\n        [7],\n      ]);\n    });\n\n    test('Test with list of strings and chunk size 3', () {\n      final arr = ['a', 'b', 'c', 'd', 'e', 'f', 'g'];\n      expect(chunkList(arr, chunkSize: 3), [\n        ['a', 'b', 'c'],\n        ['d', 'e', 'f'],\n        ['g'],\n      ]);\n    });\n\n    test('Test with chunk size larger than list size', () {\n      final arr = [1, 2, 3];\n      expect(chunkList(arr, chunkSize: 4), [\n        [1, 2, 3],\n      ]);\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain_core/test/utils/reduce_test.dart",
    "content": "import 'dart:collection';\n\nimport 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/documents.dart';\nimport 'package:langchain_core/language_models.dart';\nimport 'package:langchain_core/llms.dart';\nimport 'package:langchain_core/utils.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('Reduce tests', () {\n    test('throws an exception when the input is empty', () {\n      expect(() => reduce([]), throwsException);\n    });\n\n    test('returns the first element when the input has only one element', () {\n      expect(reduce([1]), equals(1));\n    });\n\n    test('joins strings when the input is a list of strings', () {\n      expect(reduce(['Hello', 'World']), equals('HelloWorld'));\n    });\n\n    test(\n      'concatenates chat messages when the input is a list of chat messages',\n      () {\n        final messages = [\n          ChatMessage.humanText('Hello'),\n          ChatMessage.humanText('World'),\n        ];\n        expect(reduce(messages), equals(ChatMessage.humanText('HelloWorld')));\n      },\n    );\n\n    test(\n      'concatenates language model results when the input is a list of language model results',\n      () {\n        const results = [\n          LLMResult(\n            id: 'id',\n            output: 'Hello',\n            finishReason: FinishReason.stop,\n            metadata: {},\n            usage: LanguageModelUsage(),\n          ),\n          LLMResult(\n            id: 'id',\n            output: 'World',\n            finishReason: FinishReason.stop,\n            metadata: {},\n            usage: LanguageModelUsage(),\n          ),\n        ];\n        expect(\n          reduce(results),\n          equals(\n            const LLMResult(\n              id: 'id',\n              output: 'HelloWorld',\n              finishReason: FinishReason.stop,\n              metadata: {},\n              usage: LanguageModelUsage(),\n            ),\n          ),\n        );\n      },\n    );\n\n    test('concatenates documents when the input is a list of documents', () {\n      const documents = [\n        Document(pageContent: 'Hello'),\n        Document(pageContent: 'World'),\n      ];\n      expect(\n        reduce(documents),\n        equals(const Document(pageContent: 'HelloWorld')),\n      );\n    });\n\n    test('flattens the list when the input is a list of lists', () {\n      expect(\n        reduce([\n          ['a', 'b'],\n          ['c', 'd'],\n        ]),\n        equals(['abcd']),\n      );\n    });\n\n    test('merges maps when the input is a list of maps', () {\n      final maps = [\n        {'a': 'Hello', 'b': 'foo'},\n        {'a': 'World', 'c': 'bar'},\n      ];\n      expect(reduce(maps), equals({'a': 'HelloWorld', 'b': 'foo', 'c': 'bar'}));\n    });\n\n    test(\n      'returns the last element when the input is a list of unknown types',\n      () {\n        expect(reduce([Stopwatch(), Queue<int>()]), equals(Queue<int>()));\n      },\n    );\n  });\n}\n"
  },
  {
    "path": "packages/langchain_core/test/utils/similarity_test.dart",
    "content": "import 'package:langchain_core/utils.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('cosineSimilarity tests', () {\n    const precision = 0.0001;\n\n    test('Test cosine similarity function', () {\n      expect(\n        cosineSimilarity([1, 2, 3], [4, 5, 6]),\n        closeTo(0.9746, precision),\n      );\n      expect(cosineSimilarity([1, 0], [0, 1]), closeTo(0.0, precision));\n      expect(cosineSimilarity([1, 2], [2, 4]), closeTo(1.0, precision));\n      expect(cosineSimilarity([1], [2]), closeTo(1.0, precision));\n      expect(cosineSimilarity([1], [1]), closeTo(1.0, precision));\n      expect(cosineSimilarity([], []), isNaN);\n    });\n  });\n\n  group('calculateSimilarity tests', () {\n    const precision = 0.0001;\n\n    test('Calculates similarity for non-empty vectors', () {\n      expect(\n        calculateSimilarity(\n          [1, 2, 3],\n          [\n            [4, 5, 6],\n            [7, 8, 9],\n          ],\n        ),\n        equals([0.9746, 0.9594].map((v) => closeTo(v, precision)).toList()),\n      );\n    });\n\n    test('Calculates similarity for orthogonal vectors', () {\n      expect(\n        calculateSimilarity(\n          [1, 0],\n          [\n            [0, 1],\n            [0, -1],\n          ],\n        ),\n        equals([0.0, 0.0].map((v) => closeTo(v, precision)).toList()),\n      );\n    });\n\n    test('Calculates similarity for identical vectors', () {\n      expect(\n        calculateSimilarity(\n          [1, 2],\n          [\n            [1, 2],\n            [2, 4],\n          ],\n        ),\n        equals([1.0, 1.0].map((v) => closeTo(v, precision)).toList()),\n      );\n    });\n\n    test('Calculates similarity for empty vectors', () {\n      expect(calculateSimilarity([], [[]]).first, isNaN);\n    });\n\n    test('Calculates similarity with custom similarity function', () {\n      expect(\n        calculateSimilarity(\n          [1, 2, 3],\n          [\n            [4, 5, 6],\n            [7, 8, 9],\n          ],\n          similarityFunction: (a, b) => a[0] * b[0],\n        ),\n        equals([4.0, 7.0]),\n      );\n    });\n  });\n\n  group('getIndexesMostSimilarEmbedding tests', () {\n    test(\n      'Returns sorted indexes of most similar vectors for non-empty vectors',\n      () {\n        expect(\n          getIndexesMostSimilarEmbeddings(\n            [1, 2, 3],\n            [\n              [4, 5, 6],\n              [7, 8, 9],\n            ],\n          ),\n          equals([0, 1]),\n        );\n      },\n    );\n\n    test(\n      'Returns sorted indexes of most similar vectors for orthogonal vectors',\n      () {\n        expect(\n          getIndexesMostSimilarEmbeddings(\n            [1, 0],\n            [\n              [0, 1],\n              [0, -1],\n            ],\n          ),\n          equals([0, 1]),\n        );\n      },\n    );\n\n    test(\n      'Returns sorted indexes of most similar vectors for identical vectors',\n      () {\n        expect(\n          getIndexesMostSimilarEmbeddings(\n            [1, 2],\n            [\n              [1, 2],\n              [2, 4],\n            ],\n          ),\n          equals([0, 1]),\n        );\n      },\n    );\n\n    test(\n      'Returns sorted indexes of most similar vectors with custom similarity function',\n      () {\n        expect(\n          getIndexesMostSimilarEmbeddings(\n            [1, 2, 3],\n            [\n              [4, 5, 6],\n              [7, 8, 9],\n            ],\n            similarityFunction: (a, b) => a[0] * b[0],\n          ),\n          equals([1, 0]),\n        );\n      },\n    );\n  });\n}\n"
  },
  {
    "path": "packages/langchain_firebase/.gitattributes",
    "content": "example/*/.metadata linguist-generated=true\nexample/**/Flutter/GeneratedPluginRegistrant.swift linguist-generated=true\nexample/**/Runner.xcodeproj/ linguist-generated=true\nexample/**/Runner.xcworkspace/ linguist-generated=true\nexample/**/flutter/CMakeLists.txt linguist-generated=true\nexample/**/flutter/generated_* linguist-generated=true\n"
  },
  {
    "path": "packages/langchain_firebase/.gitignore",
    "content": "# Miscellaneous\n*.class\n*.log\n*.pyc\n*.swp\n.DS_Store\n.atom/\n.buildlog/\n.history\n.svn/\nmigrate_working_dir/\n\n# IntelliJ related\n*.iml\n*.ipr\n*.iws\n.idea/\n\n# The .vscode folder contains launch configuration and tasks you configure in\n# VS Code which you may wish to be included in version control, so this line\n# is commented out by default.\n#.vscode/\n\n# Flutter/Dart/Pub related\n**/doc/api/\n**/ios/Flutter/.last_build_id\n.dart_tool/\n.flutter-plugins\n.flutter-plugins-dependencies\n.packages\n.pub-cache/\n.pub/\n/build/\n/example/build/\n\n# Symbolication related\napp.*.symbols\n\n# Obfuscation related\napp.*.map.json\n\n# Android Studio will place build artifacts here\n/android/app/debug\n/android/app/profile\n/android/app/release\n"
  },
  {
    "path": "packages/langchain_firebase/CHANGELOG.md",
    "content": "## 0.3.2\n\n - **FEAT**: Fix formatting issues ([#922](https://github.com/davidmigloz/langchain_dart/issues/922)). ([62bca9da](https://github.com/davidmigloz/langchain_dart/commit/62bca9da1abc4a64267c2d3085ad969cad33f4d6))\n\n## 0.3.1+1\n\n - **FIX**(langchain_firebase): Remove invalid FinishReason.malformedFunctionCall case ([#911](https://github.com/davidmigloz/langchain_dart/issues/911)). ([569e9cc5](https://github.com/davidmigloz/langchain_dart/commit/569e9cc53f3cf884f4a5c2bd5d56f081a9c39ad0))\n\n## 0.3.1\n\n - **FIX**(langchain_firebase): Handle malformedFunctionCall finish reason ([#842](https://github.com/davidmigloz/langchain_dart/issues/842)). ([d6eef0dd](https://github.com/davidmigloz/langchain_dart/commit/d6eef0ddb0c8a5436d830e0487218373ff6dbbce))\n - **FEAT**(langchain_firebase): Migrate to firebase_ai and add Google AI backend support ([#909](https://github.com/davidmigloz/langchain_dart/issues/909)). ([3be47d26](https://github.com/davidmigloz/langchain_dart/commit/3be47d261d1fd5c45c58bf84420d6fa37dc0c9c7))\n\n## 0.3.0+1\n\n - **REFACTOR**: Fix pub format warnings ([#809](https://github.com/davidmigloz/langchain_dart/issues/809)). ([640cdefb](https://github.com/davidmigloz/langchain_dart/commit/640cdefbede9c0a0182fb6bb4005a20aa6f35635))\n\n## 0.3.0\n\n> Note: This release has breaking changes.\n\n - **BREAKING** **BUILD**: Require Dart >=3.8.0 ([#792](https://github.com/davidmigloz/langchain_dart/issues/792)). ([b887f5c6](https://github.com/davidmigloz/langchain_dart/commit/b887f5c62e307b3a510c5049e3d1fbe7b7b4f4c9))\n\n## 0.2.2+4\n\n - **FIX**: depend_on_referenced_packages error ([#772](https://github.com/davidmigloz/langchain_dart/issues/772)). ([ef57d530](https://github.com/davidmigloz/langchain_dart/commit/ef57d5303331c7cb85fdb077a50e040a819ec94e))\n\n## 0.2.2+3\n\n - **FIX**: Breaking change in firebase_vertexai package ([#770](https://github.com/davidmigloz/langchain_dart/issues/770)). ([6a21546e](https://github.com/davidmigloz/langchain_dart/commit/6a21546e889956cc8e0f9282073757e8aa2abeb3))\n\n## 0.2.2+2\n\n - **FIX**: Batch sequential tool responses in GoogleAI & Firebase VertexAI ([#757](https://github.com/davidmigloz/langchain_dart/issues/757)). ([8ff44486](https://github.com/davidmigloz/langchain_dart/commit/8ff4448665d26b49c1e1077d0822703e7d853d39))\n\n## 0.2.2+1\n\n - **BUILD**: Update dependencies ([#751](https://github.com/davidmigloz/langchain_dart/issues/751)). ([250a3c6](https://github.com/davidmigloz/langchain_dart/commit/250a3c6a6c1815703a61a142ba839c0392a31015))\n\n## 0.2.2\n\n - **FEAT**: Update dependencies (requires Dart 3.6.0) ([#709](https://github.com/davidmigloz/langchain_dart/issues/709)). ([9e3467f7](https://github.com/davidmigloz/langchain_dart/commit/9e3467f7caabe051a43c0eb3c1110bc4a9b77b81))\n - **REFACTOR**: Fix linter issues ([#708](https://github.com/davidmigloz/langchain_dart/issues/708)). ([652e7c64](https://github.com/davidmigloz/langchain_dart/commit/652e7c64776d92d309cbd708d9e477fc2ee1391c))\n - **REFACTOR**: Migrate firebase_vertexai dep to 1.4.0 ([#663](https://github.com/davidmigloz/langchain_dart/issues/663)). ([4fca38c5](https://github.com/davidmigloz/langchain_dart/commit/4fca38c5599c4c5a058ece1a7d9c4e276b716432))\n - **FIX**: Fix linter issues ([#656](https://github.com/davidmigloz/langchain_dart/issues/656)). ([88a79c65](https://github.com/davidmigloz/langchain_dart/commit/88a79c65aad23bcf5859e58a7375a4b686cf02ef))\n\n## 0.2.1+4\n\n - **REFACTOR**: Add new lint rules and fix issues ([#621](https://github.com/davidmigloz/langchain_dart/issues/621)). ([60b10e00](https://github.com/davidmigloz/langchain_dart/commit/60b10e008acf55ebab90789ad08d2449a44b69d8))\n\n## 0.2.1+3\n\n - **FIX**: UUID 'Namespace' can't be assigned to the parameter type 'String?' ([#566](https://github.com/davidmigloz/langchain_dart/issues/566)). ([1e93a595](https://github.com/davidmigloz/langchain_dart/commit/1e93a595f2f166da2cae3f7cfcdbb28892abf9b5))\n\n## 0.2.1+2\n\n - **DOCS**: Update Google's models in documentation ([#551](https://github.com/davidmigloz/langchain_dart/issues/551)). ([1da543f7](https://github.com/davidmigloz/langchain_dart/commit/1da543f7ab90eb39b599a6fdd0cc52e2cbc1460d))\n\n## 0.2.1+1\n\n - Update a dependency to the latest release.\n\n## 0.2.1\n\n - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43))\n - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560))\n - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d))\n\n## 0.2.0\n\n> Note: `ChatFirebaseVertexAI` now uses `gemini-1.5-flash` model by default. \n\n - **BREAKING** **FEAT**: Update ChatFirebaseVertexAI default model to gemini-1.5-flash ([#458](https://github.com/davidmigloz/langchain_dart/issues/458)). ([d3c96c52](https://github.com/davidmigloz/langchain_dart/commit/d3c96c52e95e889ba6955e3de80a83978b27618b))\n - **FEAT**: Add support for ChatToolChoiceRequired ([#474](https://github.com/davidmigloz/langchain_dart/issues/474)). ([bf324f36](https://github.com/davidmigloz/langchain_dart/commit/bf324f36f645c53458d5891f8285991cd50f2649))\n - **FEAT**: Support response MIME type in ChatFirebaseVertexAI ([#461](https://github.com/davidmigloz/langchain_dart/issues/461)) ([#463](https://github.com/davidmigloz/langchain_dart/issues/463)). ([c3452721](https://github.com/davidmigloz/langchain_dart/commit/c3452721c78ba3071ed2510a243f9c824a291c34))\n - **FEAT**: Add support for Firebase Auth in ChatFirebaseVertexAI ([#460](https://github.com/davidmigloz/langchain_dart/issues/460)). ([6d137290](https://github.com/davidmigloz/langchain_dart/commit/6d137290ca0f56c9fcc725e6211e838a3e3c6d16))\n - **FEAT**: Add support for usage metadata in ChatFirebaseVertexAI ([#457](https://github.com/davidmigloz/langchain_dart/issues/457)). ([2587f9e2](https://github.com/davidmigloz/langchain_dart/commit/2587f9e2bcbcc2bf5e2295dce409e92a89bf3c44))\n - **REFACTOR**: Simplify how tools are passed to the internal Firebase client ([#459](https://github.com/davidmigloz/langchain_dart/issues/459)). ([7f772396](https://github.com/davidmigloz/langchain_dart/commit/7f77239601fb216a01ec9d25680ec4d3dc4b97c7))\n\n## 0.1.0+3\n\n - Update a dependency to the latest release.\n\n## 0.1.0+2\n\n - Update a dependency to the latest release.\n\n## 0.1.0+1\n\n - **DOCS**: Fix lint issues in langchain_firebase example. ([f85a6ad7](https://github.com/davidmigloz/langchain_dart/commit/f85a6ad755e00c513bd4349663e33d40be8a696c))\n\n## 0.1.0\n\n - **FEAT**: Add support for ChatFirebaseVertexAI ([#422](https://github.com/davidmigloz/langchain_dart/issues/422)). ([8d0786bc](https://github.com/davidmigloz/langchain_dart/commit/8d0786bc6228ce86de962d30e9c2cc9728a08f3f))\n - **DOCS**: Add Gemini 1.5 Flash to models list ([#423](https://github.com/davidmigloz/langchain_dart/issues/423)). ([40f4c9de](https://github.com/davidmigloz/langchain_dart/commit/40f4c9de9c25804e298fd481c80f8c52d53302fb))\n\n## 0.0.1-dev.1\n\n- Bootstrap `langchain_firebase` package.\n"
  },
  {
    "path": "packages/langchain_firebase/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2023 David Miguel Lozano\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "packages/langchain_firebase/README.md",
    "content": "# 🦜️🔗 LangChain.dart / Firebase\n\n[![tests](https://img.shields.io/github/actions/workflow/status/davidmigloz/langchain_dart/test.yaml?logo=github&label=tests)](https://github.com/davidmigloz/langchain_dart/actions/workflows/test.yaml)\n[![docs](https://img.shields.io/github/actions/workflow/status/davidmigloz/langchain_dart/pages%2Fpages-build-deployment?logo=github&label=docs)](https://github.com/davidmigloz/langchain_dart/actions/workflows/pages/pages-build-deployment)\n[![langchain_firebase](https://img.shields.io/pub/v/langchain_firebase.svg)](https://pub.dev/packages/langchain_firebase)\n![Discord](https://img.shields.io/discord/1123158322812555295?label=discord)\n[![MIT](https://img.shields.io/badge/license-MIT-purple.svg)](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE)\n\nFirebase module for [LangChain.dart](https://github.com/davidmigloz/langchain_dart).\n\n## Features\n\n- Chat models:\n  * `ChatFirebaseVertexAI`: wrapper around [Vertex AI for Firebase](https://firebase.google.com/docs/vertex-ai) API (Gemini).\n\n> Note: GCP VertexAI (`ChatVertexAI`) and Google AI for Developers (`ChatGoogleGenerativeAI`) are available in the [`langchain_google`](https://pub.dev/packages/langchain_google) package.\n\n## License\n\nLangChain.dart is licensed under the\n[MIT License](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE).\n"
  },
  {
    "path": "packages/langchain_firebase/example/README.md",
    "content": "# firebase_ai_example\n\nExample project to show how to use the Firebase integration module for LangChain.dart.\n\nThis example project demonstrates how to use Firebase AI (Vertex AI / Google AI backends) with LangChain.dart.\n\n## Getting Started\n\nThis project is a starting point for a Flutter application.\n\nA few resources to get you started if this is your first Flutter project:\n\n- [Lab: Write your first Flutter app](https://docs.flutter.dev/get-started/codelab)\n- [Cookbook: Useful Flutter samples](https://docs.flutter.dev/cookbook)\n\nFor help getting started with Flutter development, view the\n[online documentation](https://docs.flutter.dev/), which offers tutorials,\nsamples, guidance on mobile development, and a full API reference.\n"
  },
  {
    "path": "packages/langchain_firebase/example/android/.gitignore",
    "content": "gradle-wrapper.jar\n/.gradle\n/captures/\n/gradlew\n/gradlew.bat\n/local.properties\nGeneratedPluginRegistrant.java\n\n# Remember to never publicly share your keystore.\n# See https://flutter.dev/docs/deployment/android#reference-the-keystore-from-the-app\nkey.properties\n**/*.keystore\n**/*.jks\n"
  },
  {
    "path": "packages/langchain_firebase/example/android/app/build.gradle",
    "content": "def localProperties = new Properties()\ndef localPropertiesFile = rootProject.file('local.properties')\nif (localPropertiesFile.exists()) {\n    localPropertiesFile.withReader('UTF-8') { reader ->\n        localProperties.load(reader)\n    }\n}\n\ndef flutterRoot = localProperties.getProperty('flutter.sdk')\nif (flutterRoot == null) {\n    throw new GradleException(\"Flutter SDK not found. Define location with flutter.sdk in the local.properties file.\")\n}\n\ndef flutterVersionCode = localProperties.getProperty('flutter.versionCode')\nif (flutterVersionCode == null) {\n    flutterVersionCode = '1'\n}\n\ndef flutterVersionName = localProperties.getProperty('flutter.versionName')\nif (flutterVersionName == null) {\n    flutterVersionName = '1.0'\n}\n\napply plugin: 'com.android.application'\n// START: FlutterFire Configuration\napply plugin: 'com.google.gms.google-services'\n// END: FlutterFire Configuration\napply from: \"$flutterRoot/packages/flutter_tools/gradle/flutter.gradle\"\n\nandroid {\n    namespace \"com.example.example\"\n\n    compileSdk 33\n\n    defaultConfig {\n        applicationId \"com.example.example\"\n        minSdk 21\n        targetSdk 33\n        versionCode flutterVersionCode.toInteger()\n        versionName flutterVersionName\n    }\n\n    buildFeatures {\n        buildConfig true\n    }\n\n    buildTypes {\n        release {\n            // TODO: Add your own signing config for the release build.\n            // Signing with the debug keys for now, so `flutter run --release` works.\n            signingConfig signingConfigs.debug\n        }\n    }\n}\n\nflutter {\n    source '../..'\n}\n"
  },
  {
    "path": "packages/langchain_firebase/example/android/app/src/debug/AndroidManifest.xml",
    "content": "<manifest xmlns:android=\"http://schemas.android.com/apk/res/android\">\n    <!-- The INTERNET permission is required for development. Specifically,\n         the Flutter tool needs it to communicate with the running application\n         to allow setting breakpoints, to provide hot reload, etc.\n    -->\n    <uses-permission android:name=\"android.permission.INTERNET\"/>\n</manifest>\n"
  },
  {
    "path": "packages/langchain_firebase/example/android/app/src/main/AndroidManifest.xml",
    "content": "<manifest xmlns:android=\"http://schemas.android.com/apk/res/android\">\n    <application\n        android:label=\"example\"\n        android:name=\"${applicationName}\"\n        android:icon=\"@mipmap/ic_launcher\">\n        <activity\n            android:name=\".MainActivity\"\n            android:exported=\"true\"\n            android:launchMode=\"singleTop\"\n            android:theme=\"@style/LaunchTheme\"\n            android:configChanges=\"orientation|keyboardHidden|keyboard|screenSize|smallestScreenSize|locale|layoutDirection|fontScale|screenLayout|density|uiMode\"\n            android:hardwareAccelerated=\"true\"\n            android:windowSoftInputMode=\"adjustResize\">\n            <!-- Specifies an Android theme to apply to this Activity as soon as\n                 the Android process has started. This theme is visible to the user\n                 while the Flutter UI initializes. After that, this theme continues\n                 to determine the Window background behind the Flutter UI. -->\n            <meta-data\n              android:name=\"io.flutter.embedding.android.NormalTheme\"\n              android:resource=\"@style/NormalTheme\"\n              />\n            <intent-filter>\n                <action android:name=\"android.intent.action.MAIN\"/>\n                <category android:name=\"android.intent.category.LAUNCHER\"/>\n            </intent-filter>\n        </activity>\n        <!-- Don't delete the meta-data below.\n             This is used by the Flutter tool to generate GeneratedPluginRegistrant.java -->\n        <meta-data\n            android:name=\"flutterEmbedding\"\n            android:value=\"2\" />\n    </application>\n    <!-- Required to query activities that can process text, see:\n         https://developer.android.com/training/package-visibility?hl=en and\n         https://developer.android.com/reference/android/content/Intent#ACTION_PROCESS_TEXT.\n\n         In particular, this is used by the Flutter engine in io.flutter.plugin.text.ProcessTextPlugin. -->\n    <queries>\n        <intent>\n            <action android:name=\"android.intent.action.PROCESS_TEXT\"/>\n            <data android:mimeType=\"text/plain\"/>\n        </intent>\n    </queries>\n</manifest>\n"
  },
  {
    "path": "packages/langchain_firebase/example/android/app/src/main/kotlin/com/example/example/MainActivity.kt",
    "content": "package com.example.example\n\nimport io.flutter.embedding.android.FlutterActivity\n\nclass MainActivity: FlutterActivity()\n"
  },
  {
    "path": "packages/langchain_firebase/example/android/app/src/main/res/drawable/launch_background.xml",
    "content": "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<!-- Modify this file to customize your launch splash screen -->\n<layer-list xmlns:android=\"http://schemas.android.com/apk/res/android\">\n    <item android:drawable=\"@android:color/white\" />\n\n    <!-- You can insert your own image assets here -->\n    <!-- <item>\n        <bitmap\n            android:gravity=\"center\"\n            android:src=\"@mipmap/launch_image\" />\n    </item> -->\n</layer-list>\n"
  },
  {
    "path": "packages/langchain_firebase/example/android/app/src/main/res/drawable-v21/launch_background.xml",
    "content": "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<!-- Modify this file to customize your launch splash screen -->\n<layer-list xmlns:android=\"http://schemas.android.com/apk/res/android\">\n    <item android:drawable=\"?android:colorBackground\" />\n\n    <!-- You can insert your own image assets here -->\n    <!-- <item>\n        <bitmap\n            android:gravity=\"center\"\n            android:src=\"@mipmap/launch_image\" />\n    </item> -->\n</layer-list>\n"
  },
  {
    "path": "packages/langchain_firebase/example/android/app/src/main/res/values/styles.xml",
    "content": "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<resources>\n    <!-- Theme applied to the Android Window while the process is starting when the OS's Dark Mode setting is off -->\n    <style name=\"LaunchTheme\" parent=\"@android:style/Theme.Light.NoTitleBar\">\n        <!-- Show a splash screen on the activity. Automatically removed when\n             the Flutter engine draws its first frame -->\n        <item name=\"android:windowBackground\">@drawable/launch_background</item>\n    </style>\n    <!-- Theme applied to the Android Window as soon as the process has started.\n         This theme determines the color of the Android Window while your\n         Flutter UI initializes, as well as behind your Flutter UI while its\n         running.\n\n         This Theme is only used starting with V2 of Flutter's Android embedding. -->\n    <style name=\"NormalTheme\" parent=\"@android:style/Theme.Light.NoTitleBar\">\n        <item name=\"android:windowBackground\">?android:colorBackground</item>\n    </style>\n</resources>\n"
  },
  {
    "path": "packages/langchain_firebase/example/android/app/src/main/res/values-night/styles.xml",
    "content": "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<resources>\n    <!-- Theme applied to the Android Window while the process is starting when the OS's Dark Mode setting is on -->\n    <style name=\"LaunchTheme\" parent=\"@android:style/Theme.Black.NoTitleBar\">\n        <!-- Show a splash screen on the activity. Automatically removed when\n             the Flutter engine draws its first frame -->\n        <item name=\"android:windowBackground\">@drawable/launch_background</item>\n    </style>\n    <!-- Theme applied to the Android Window as soon as the process has started.\n         This theme determines the color of the Android Window while your\n         Flutter UI initializes, as well as behind your Flutter UI while its\n         running.\n\n         This Theme is only used starting with V2 of Flutter's Android embedding. -->\n    <style name=\"NormalTheme\" parent=\"@android:style/Theme.Black.NoTitleBar\">\n        <item name=\"android:windowBackground\">?android:colorBackground</item>\n    </style>\n</resources>\n"
  },
  {
    "path": "packages/langchain_firebase/example/android/app/src/profile/AndroidManifest.xml",
    "content": "<manifest xmlns:android=\"http://schemas.android.com/apk/res/android\">\n    <!-- The INTERNET permission is required for development. Specifically,\n         the Flutter tool needs it to communicate with the running application\n         to allow setting breakpoints, to provide hot reload, etc.\n    -->\n    <uses-permission android:name=\"android.permission.INTERNET\"/>\n</manifest>\n"
  },
  {
    "path": "packages/langchain_firebase/example/android/build.gradle",
    "content": "buildscript {\n    repositories {\n        google()\n        mavenCentral()\n    }\n\n    dependencies {\n        classpath 'com.android.tools.build:gradle:8.1.2'\n        // START: FlutterFire Configuration\n        classpath 'com.google.gms:google-services:4.4.0'\n        // END: FlutterFire Configuration\n    }\n}\n\nallprojects {\n    repositories {\n        google()\n        mavenCentral()\n    }\n}\n\nrootProject.buildDir = '../build'\nsubprojects {\n    project.buildDir = \"${rootProject.buildDir}/${project.name}\"\n}\nsubprojects {\n    project.evaluationDependsOn(':app')\n}\n\ntasks.register(\"clean\", Delete) {\n    delete rootProject.buildDir\n}\n"
  },
  {
    "path": "packages/langchain_firebase/example/android/gradle/wrapper/gradle-wrapper.properties",
    "content": "distributionBase=GRADLE_USER_HOME\ndistributionPath=wrapper/dists\nzipStoreBase=GRADLE_USER_HOME\nzipStorePath=wrapper/dists\ndistributionUrl=https\\://services.gradle.org/distributions/gradle-7.6.3-all.zip\n"
  },
  {
    "path": "packages/langchain_firebase/example/android/gradle.properties",
    "content": "org.gradle.jvmargs=-Xmx4G\nandroid.useAndroidX=true\nandroid.enableJetifier=true\n"
  },
  {
    "path": "packages/langchain_firebase/example/android/settings.gradle",
    "content": "pluginManagement {\n    def flutterSdkPath = {\n        def properties = new Properties()\n        file(\"local.properties\").withInputStream { properties.load(it) }\n        def flutterSdkPath = properties.getProperty(\"flutter.sdk\")\n        assert flutterSdkPath != null, \"flutter.sdk not set in local.properties\"\n        return flutterSdkPath\n    }\n    settings.ext.flutterSdkPath = flutterSdkPath()\n\n    includeBuild(\"${settings.ext.flutterSdkPath}/packages/flutter_tools/gradle\")\n\n    repositories {\n        google()\n        mavenCentral()\n        gradlePluginPortal()\n    }\n}\n\nplugins {\n    id \"dev.flutter.flutter-plugin-loader\" version \"1.0.0\"\n    id \"com.android.application\" version \"7.3.0\" apply false\n    id \"org.jetbrains.kotlin.android\" version \"1.7.10\" apply false\n}\n\ninclude \":app\"\n"
  },
  {
    "path": "packages/langchain_firebase/example/ios/.gitignore",
    "content": "**/dgph\n*.mode1v3\n*.mode2v3\n*.moved-aside\n*.pbxuser\n*.perspectivev3\n**/*sync/\n.sconsign.dblite\n.tags*\n**/.vagrant/\n**/DerivedData/\nIcon?\n**/Pods/\n**/.symlinks/\nprofile\nxcuserdata\n**/.generated/\nFlutter/App.framework\nFlutter/Flutter.framework\nFlutter/Flutter.podspec\nFlutter/Generated.xcconfig\nFlutter/ephemeral/\nFlutter/app.flx\nFlutter/app.zip\nFlutter/flutter_assets/\nFlutter/flutter_export_environment.sh\nServiceDefinitions.json\nRunner/GeneratedPluginRegistrant.*\n\n# Exceptions to above rules.\n!default.mode1v3\n!default.mode2v3\n!default.pbxuser\n!default.perspectivev3\n"
  },
  {
    "path": "packages/langchain_firebase/example/ios/Flutter/AppFrameworkInfo.plist",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n<plist version=\"1.0\">\n<dict>\n  <key>CFBundleDevelopmentRegion</key>\n  <string>en</string>\n  <key>CFBundleExecutable</key>\n  <string>App</string>\n  <key>CFBundleIdentifier</key>\n  <string>io.flutter.flutter.app</string>\n  <key>CFBundleInfoDictionaryVersion</key>\n  <string>6.0</string>\n  <key>CFBundleName</key>\n  <string>App</string>\n  <key>CFBundlePackageType</key>\n  <string>FMWK</string>\n  <key>CFBundleShortVersionString</key>\n  <string>1.0</string>\n  <key>CFBundleSignature</key>\n  <string>????</string>\n  <key>CFBundleVersion</key>\n  <string>1.0</string>\n  <key>MinimumOSVersion</key>\n  <string>12.0</string>\n</dict>\n</plist>\n"
  },
  {
    "path": "packages/langchain_firebase/example/ios/Flutter/Debug.xcconfig",
    "content": "#include? \"Pods/Target Support Files/Pods-Runner/Pods-Runner.debug.xcconfig\"\n#include \"Generated.xcconfig\"\n"
  },
  {
    "path": "packages/langchain_firebase/example/ios/Flutter/Release.xcconfig",
    "content": "#include? \"Pods/Target Support Files/Pods-Runner/Pods-Runner.release.xcconfig\"\n#include \"Generated.xcconfig\"\n"
  },
  {
    "path": "packages/langchain_firebase/example/ios/Podfile",
    "content": "# Uncomment this line to define a global platform for your project\n# platform :ios, '12.0'\n\n# CocoaPods analytics sends network stats synchronously affecting flutter build latency.\nENV['COCOAPODS_DISABLE_STATS'] = 'true'\n\nproject 'Runner', {\n  'Debug' => :debug,\n  'Profile' => :release,\n  'Release' => :release,\n}\n\ndef flutter_root\n  generated_xcode_build_settings_path = File.expand_path(File.join('..', 'Flutter', 'Generated.xcconfig'), __FILE__)\n  unless File.exist?(generated_xcode_build_settings_path)\n    raise \"#{generated_xcode_build_settings_path} must exist. If you're running pod install manually, make sure flutter pub get is executed first\"\n  end\n\n  File.foreach(generated_xcode_build_settings_path) do |line|\n    matches = line.match(/FLUTTER_ROOT\\=(.*)/)\n    return matches[1].strip if matches\n  end\n  raise \"FLUTTER_ROOT not found in #{generated_xcode_build_settings_path}. Try deleting Generated.xcconfig, then run flutter pub get\"\nend\n\nrequire File.expand_path(File.join('packages', 'flutter_tools', 'bin', 'podhelper'), flutter_root)\n\nflutter_ios_podfile_setup\n\ntarget 'Runner' do\n  use_frameworks!\n  use_modular_headers!\n\n  flutter_install_all_ios_pods File.dirname(File.realpath(__FILE__))\n  target 'RunnerTests' do\n    inherit! :search_paths\n  end\nend\n\npost_install do |installer|\n  installer.pods_project.targets.each do |target|\n    flutter_additional_ios_build_settings(target)\n  end\nend\n"
  },
  {
    "path": "packages/langchain_firebase/example/ios/Runner/AppDelegate.swift",
    "content": "import UIKit\nimport Flutter\n\n@UIApplicationMain\n@objc class AppDelegate: FlutterAppDelegate {\n  override func application(\n    _ application: UIApplication,\n    didFinishLaunchingWithOptions launchOptions: [UIApplication.LaunchOptionsKey: Any]?\n  ) -> Bool {\n    GeneratedPluginRegistrant.register(with: self)\n    return super.application(application, didFinishLaunchingWithOptions: launchOptions)\n  }\n}\n"
  },
  {
    "path": "packages/langchain_firebase/example/ios/Runner/Assets.xcassets/AppIcon.appiconset/Contents.json",
    "content": "{\n  \"images\" : [\n    {\n      \"size\" : \"20x20\",\n      \"idiom\" : \"iphone\",\n      \"filename\" : \"Icon-App-20x20@2x.png\",\n      \"scale\" : \"2x\"\n    },\n    {\n      \"size\" : \"20x20\",\n      \"idiom\" : \"iphone\",\n      \"filename\" : \"Icon-App-20x20@3x.png\",\n      \"scale\" : \"3x\"\n    },\n    {\n      \"size\" : \"29x29\",\n      \"idiom\" : \"iphone\",\n      \"filename\" : \"Icon-App-29x29@1x.png\",\n      \"scale\" : \"1x\"\n    },\n    {\n      \"size\" : \"29x29\",\n      \"idiom\" : \"iphone\",\n      \"filename\" : \"Icon-App-29x29@2x.png\",\n      \"scale\" : \"2x\"\n    },\n    {\n      \"size\" : \"29x29\",\n      \"idiom\" : \"iphone\",\n      \"filename\" : \"Icon-App-29x29@3x.png\",\n      \"scale\" : \"3x\"\n    },\n    {\n      \"size\" : \"40x40\",\n      \"idiom\" : \"iphone\",\n      \"filename\" : \"Icon-App-40x40@2x.png\",\n      \"scale\" : \"2x\"\n    },\n    {\n      \"size\" : \"40x40\",\n      \"idiom\" : \"iphone\",\n      \"filename\" : \"Icon-App-40x40@3x.png\",\n      \"scale\" : \"3x\"\n    },\n    {\n      \"size\" : \"60x60\",\n      \"idiom\" : \"iphone\",\n      \"filename\" : \"Icon-App-60x60@2x.png\",\n      \"scale\" : \"2x\"\n    },\n    {\n      \"size\" : \"60x60\",\n      \"idiom\" : \"iphone\",\n      \"filename\" : \"Icon-App-60x60@3x.png\",\n      \"scale\" : \"3x\"\n    },\n    {\n      \"size\" : \"20x20\",\n      \"idiom\" : \"ipad\",\n      \"filename\" : \"Icon-App-20x20@1x.png\",\n      \"scale\" : \"1x\"\n    },\n    {\n      \"size\" : \"20x20\",\n      \"idiom\" : \"ipad\",\n      \"filename\" : \"Icon-App-20x20@2x.png\",\n      \"scale\" : \"2x\"\n    },\n    {\n      \"size\" : \"29x29\",\n      \"idiom\" : \"ipad\",\n      \"filename\" : \"Icon-App-29x29@1x.png\",\n      \"scale\" : \"1x\"\n    },\n    {\n      \"size\" : \"29x29\",\n      \"idiom\" : \"ipad\",\n      \"filename\" : \"Icon-App-29x29@2x.png\",\n      \"scale\" : \"2x\"\n    },\n    {\n      \"size\" : \"40x40\",\n      \"idiom\" : \"ipad\",\n      \"filename\" : \"Icon-App-40x40@1x.png\",\n      \"scale\" : \"1x\"\n    },\n    {\n      \"size\" : \"40x40\",\n      \"idiom\" : \"ipad\",\n      \"filename\" : \"Icon-App-40x40@2x.png\",\n      \"scale\" : \"2x\"\n    },\n    {\n      \"size\" : \"76x76\",\n      \"idiom\" : \"ipad\",\n      \"filename\" : \"Icon-App-76x76@1x.png\",\n      \"scale\" : \"1x\"\n    },\n    {\n      \"size\" : \"76x76\",\n      \"idiom\" : \"ipad\",\n      \"filename\" : \"Icon-App-76x76@2x.png\",\n      \"scale\" : \"2x\"\n    },\n    {\n      \"size\" : \"83.5x83.5\",\n      \"idiom\" : \"ipad\",\n      \"filename\" : \"Icon-App-83.5x83.5@2x.png\",\n      \"scale\" : \"2x\"\n    },\n    {\n      \"size\" : \"1024x1024\",\n      \"idiom\" : \"ios-marketing\",\n      \"filename\" : \"Icon-App-1024x1024@1x.png\",\n      \"scale\" : \"1x\"\n    }\n  ],\n  \"info\" : {\n    \"version\" : 1,\n    \"author\" : \"xcode\"\n  }\n}\n"
  },
  {
    "path": "packages/langchain_firebase/example/ios/Runner/Assets.xcassets/LaunchImage.imageset/Contents.json",
    "content": "{\n  \"images\" : [\n    {\n      \"idiom\" : \"universal\",\n      \"filename\" : \"LaunchImage.png\",\n      \"scale\" : \"1x\"\n    },\n    {\n      \"idiom\" : \"universal\",\n      \"filename\" : \"LaunchImage@2x.png\",\n      \"scale\" : \"2x\"\n    },\n    {\n      \"idiom\" : \"universal\",\n      \"filename\" : \"LaunchImage@3x.png\",\n      \"scale\" : \"3x\"\n    }\n  ],\n  \"info\" : {\n    \"version\" : 1,\n    \"author\" : \"xcode\"\n  }\n}\n"
  },
  {
    "path": "packages/langchain_firebase/example/ios/Runner/Assets.xcassets/LaunchImage.imageset/README.md",
    "content": "# Launch Screen Assets\n\nYou can customize the launch screen with your own desired assets by replacing the image files in this directory.\n\nYou can also do it by opening your Flutter project's Xcode project with `open ios/Runner.xcworkspace`, selecting `Runner/Assets.xcassets` in the Project Navigator and dropping in the desired images."
  },
  {
    "path": "packages/langchain_firebase/example/ios/Runner/Base.lproj/LaunchScreen.storyboard",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?>\n<document type=\"com.apple.InterfaceBuilder3.CocoaTouch.Storyboard.XIB\" version=\"3.0\" toolsVersion=\"12121\" systemVersion=\"16G29\" targetRuntime=\"iOS.CocoaTouch\" propertyAccessControl=\"none\" useAutolayout=\"YES\" launchScreen=\"YES\" colorMatched=\"YES\" initialViewController=\"01J-lp-oVM\">\n    <dependencies>\n        <deployment identifier=\"iOS\"/>\n        <plugIn identifier=\"com.apple.InterfaceBuilder.IBCocoaTouchPlugin\" version=\"12089\"/>\n    </dependencies>\n    <scenes>\n        <!--View Controller-->\n        <scene sceneID=\"EHf-IW-A2E\">\n            <objects>\n                <viewController id=\"01J-lp-oVM\" sceneMemberID=\"viewController\">\n                    <layoutGuides>\n                        <viewControllerLayoutGuide type=\"top\" id=\"Ydg-fD-yQy\"/>\n                        <viewControllerLayoutGuide type=\"bottom\" id=\"xbc-2k-c8Z\"/>\n                    </layoutGuides>\n                    <view key=\"view\" contentMode=\"scaleToFill\" id=\"Ze5-6b-2t3\">\n                        <autoresizingMask key=\"autoresizingMask\" widthSizable=\"YES\" heightSizable=\"YES\"/>\n                        <subviews>\n                            <imageView opaque=\"NO\" clipsSubviews=\"YES\" multipleTouchEnabled=\"YES\" contentMode=\"center\" image=\"LaunchImage\" translatesAutoresizingMaskIntoConstraints=\"NO\" id=\"YRO-k0-Ey4\">\n                            </imageView>\n                        </subviews>\n                        <color key=\"backgroundColor\" red=\"1\" green=\"1\" blue=\"1\" alpha=\"1\" colorSpace=\"custom\" customColorSpace=\"sRGB\"/>\n                        <constraints>\n                            <constraint firstItem=\"YRO-k0-Ey4\" firstAttribute=\"centerX\" secondItem=\"Ze5-6b-2t3\" secondAttribute=\"centerX\" id=\"1a2-6s-vTC\"/>\n                            <constraint firstItem=\"YRO-k0-Ey4\" firstAttribute=\"centerY\" secondItem=\"Ze5-6b-2t3\" secondAttribute=\"centerY\" id=\"4X2-HB-R7a\"/>\n                        </constraints>\n                    </view>\n                </viewController>\n                <placeholder placeholderIdentifier=\"IBFirstResponder\" id=\"iYj-Kq-Ea1\" userLabel=\"First Responder\" sceneMemberID=\"firstResponder\"/>\n            </objects>\n            <point key=\"canvasLocation\" x=\"53\" y=\"375\"/>\n        </scene>\n    </scenes>\n    <resources>\n        <image name=\"LaunchImage\" width=\"168\" height=\"185\"/>\n    </resources>\n</document>\n"
  },
  {
    "path": "packages/langchain_firebase/example/ios/Runner/Base.lproj/Main.storyboard",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?>\n<document type=\"com.apple.InterfaceBuilder3.CocoaTouch.Storyboard.XIB\" version=\"3.0\" toolsVersion=\"10117\" systemVersion=\"15F34\" targetRuntime=\"iOS.CocoaTouch\" propertyAccessControl=\"none\" useAutolayout=\"YES\" useTraitCollections=\"YES\" initialViewController=\"BYZ-38-t0r\">\n    <dependencies>\n        <deployment identifier=\"iOS\"/>\n        <plugIn identifier=\"com.apple.InterfaceBuilder.IBCocoaTouchPlugin\" version=\"10085\"/>\n    </dependencies>\n    <scenes>\n        <!--Flutter View Controller-->\n        <scene sceneID=\"tne-QT-ifu\">\n            <objects>\n                <viewController id=\"BYZ-38-t0r\" customClass=\"FlutterViewController\" sceneMemberID=\"viewController\">\n                    <layoutGuides>\n                        <viewControllerLayoutGuide type=\"top\" id=\"y3c-jy-aDJ\"/>\n                        <viewControllerLayoutGuide type=\"bottom\" id=\"wfy-db-euE\"/>\n                    </layoutGuides>\n                    <view key=\"view\" contentMode=\"scaleToFill\" id=\"8bC-Xf-vdC\">\n                        <rect key=\"frame\" x=\"0.0\" y=\"0.0\" width=\"600\" height=\"600\"/>\n                        <autoresizingMask key=\"autoresizingMask\" widthSizable=\"YES\" heightSizable=\"YES\"/>\n                        <color key=\"backgroundColor\" white=\"1\" alpha=\"1\" colorSpace=\"custom\" customColorSpace=\"calibratedWhite\"/>\n                    </view>\n                </viewController>\n                <placeholder placeholderIdentifier=\"IBFirstResponder\" id=\"dkx-z0-nzr\" sceneMemberID=\"firstResponder\"/>\n            </objects>\n        </scene>\n    </scenes>\n</document>\n"
  },
  {
    "path": "packages/langchain_firebase/example/ios/Runner/Info.plist",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n<plist version=\"1.0\">\n<dict>\n\t<key>CFBundleDevelopmentRegion</key>\n\t<string>$(DEVELOPMENT_LANGUAGE)</string>\n\t<key>CFBundleDisplayName</key>\n\t<string>Example</string>\n\t<key>CFBundleExecutable</key>\n\t<string>$(EXECUTABLE_NAME)</string>\n\t<key>CFBundleIdentifier</key>\n\t<string>$(PRODUCT_BUNDLE_IDENTIFIER)</string>\n\t<key>CFBundleInfoDictionaryVersion</key>\n\t<string>6.0</string>\n\t<key>CFBundleName</key>\n\t<string>example</string>\n\t<key>CFBundlePackageType</key>\n\t<string>APPL</string>\n\t<key>CFBundleShortVersionString</key>\n\t<string>$(FLUTTER_BUILD_NAME)</string>\n\t<key>CFBundleSignature</key>\n\t<string>????</string>\n\t<key>CFBundleVersion</key>\n\t<string>$(FLUTTER_BUILD_NUMBER)</string>\n\t<key>LSRequiresIPhoneOS</key>\n\t<true/>\n\t<key>UILaunchStoryboardName</key>\n\t<string>LaunchScreen</string>\n\t<key>UIMainStoryboardFile</key>\n\t<string>Main</string>\n\t<key>UISupportedInterfaceOrientations</key>\n\t<array>\n\t\t<string>UIInterfaceOrientationPortrait</string>\n\t\t<string>UIInterfaceOrientationLandscapeLeft</string>\n\t\t<string>UIInterfaceOrientationLandscapeRight</string>\n\t</array>\n\t<key>UISupportedInterfaceOrientations~ipad</key>\n\t<array>\n\t\t<string>UIInterfaceOrientationPortrait</string>\n\t\t<string>UIInterfaceOrientationPortraitUpsideDown</string>\n\t\t<string>UIInterfaceOrientationLandscapeLeft</string>\n\t\t<string>UIInterfaceOrientationLandscapeRight</string>\n\t</array>\n\t<key>CADisableMinimumFrameDurationOnPhone</key>\n\t<true/>\n\t<key>UIApplicationSupportsIndirectInputEvents</key>\n\t<true/>\n</dict>\n</plist>\n"
  },
  {
    "path": "packages/langchain_firebase/example/ios/Runner/Runner-Bridging-Header.h",
    "content": "#import \"GeneratedPluginRegistrant.h\"\n"
  },
  {
    "path": "packages/langchain_firebase/example/ios/Runner.xcodeproj/project.pbxproj",
    "content": "// !$*UTF8*$!\n{\n\tarchiveVersion = 1;\n\tclasses = {\n\t};\n\tobjectVersion = 54;\n\tobjects = {\n\n/* Begin PBXBuildFile section */\n\t\t1498D2341E8E89220040F4C2 /* GeneratedPluginRegistrant.m in Sources */ = {isa = PBXBuildFile; fileRef = 1498D2331E8E89220040F4C2 /* GeneratedPluginRegistrant.m */; };\n\t\t331C808B294A63AB00263BE5 /* RunnerTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 331C807B294A618700263BE5 /* RunnerTests.swift */; };\n\t\t3414F5B6C6F086F6373F1948 /* GoogleService-Info.plist in Resources */ = {isa = PBXBuildFile; fileRef = 5F1FA05866A2D0FCA3287B20 /* GoogleService-Info.plist */; };\n\t\t3B3967161E833CAA004F5970 /* AppFrameworkInfo.plist in Resources */ = {isa = PBXBuildFile; fileRef = 3B3967151E833CAA004F5970 /* AppFrameworkInfo.plist */; };\n\t\t74858FAF1ED2DC5600515810 /* AppDelegate.swift in Sources */ = {isa = PBXBuildFile; fileRef = 74858FAE1ED2DC5600515810 /* AppDelegate.swift */; };\n\t\t901FEC83A38129064032C578 /* Pods_Runner.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 94CE5BFCDF90764354BB6740 /* Pods_Runner.framework */; };\n\t\t97C146FC1CF9000F007C117D /* Main.storyboard in Resources */ = {isa = PBXBuildFile; fileRef = 97C146FA1CF9000F007C117D /* Main.storyboard */; };\n\t\t97C146FE1CF9000F007C117D /* Assets.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = 97C146FD1CF9000F007C117D /* Assets.xcassets */; };\n\t\t97C147011CF9000F007C117D /* LaunchScreen.storyboard in Resources */ = {isa = PBXBuildFile; fileRef = 97C146FF1CF9000F007C117D /* LaunchScreen.storyboard */; };\n\t\tB7B3CA2D70F15615E1B8E5D8 /* Pods_RunnerTests.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 154D9627A1C14A5ACE0B7B0D /* Pods_RunnerTests.framework */; };\n/* End PBXBuildFile section */\n\n/* Begin PBXContainerItemProxy section */\n\t\t331C8085294A63A400263BE5 /* PBXContainerItemProxy */ = {\n\t\t\tisa = PBXContainerItemProxy;\n\t\t\tcontainerPortal = 97C146E61CF9000F007C117D /* Project object */;\n\t\t\tproxyType = 1;\n\t\t\tremoteGlobalIDString = 97C146ED1CF9000F007C117D;\n\t\t\tremoteInfo = Runner;\n\t\t};\n/* End PBXContainerItemProxy section */\n\n/* Begin PBXCopyFilesBuildPhase section */\n\t\t9705A1C41CF9048500538489 /* Embed Frameworks */ = {\n\t\t\tisa = PBXCopyFilesBuildPhase;\n\t\t\tbuildActionMask = 2147483647;\n\t\t\tdstPath = \"\";\n\t\t\tdstSubfolderSpec = 10;\n\t\t\tfiles = (\n\t\t\t);\n\t\t\tname = \"Embed Frameworks\";\n\t\t\trunOnlyForDeploymentPostprocessing = 0;\n\t\t};\n/* End PBXCopyFilesBuildPhase section */\n\n/* Begin PBXFileReference section */\n\t\t1498D2321E8E86230040F4C2 /* GeneratedPluginRegistrant.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = GeneratedPluginRegistrant.h; sourceTree = \"<group>\"; };\n\t\t1498D2331E8E89220040F4C2 /* GeneratedPluginRegistrant.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GeneratedPluginRegistrant.m; sourceTree = \"<group>\"; };\n\t\t154D9627A1C14A5ACE0B7B0D /* Pods_RunnerTests.framework */ = {isa = PBXFileReference; explicitFileType = wrapper.framework; includeInIndex = 0; path = Pods_RunnerTests.framework; sourceTree = BUILT_PRODUCTS_DIR; };\n\t\t232D95ECCEC6F04B9CEC8925 /* Pods-Runner.release.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = \"Pods-Runner.release.xcconfig\"; path = \"Target Support Files/Pods-Runner/Pods-Runner.release.xcconfig\"; sourceTree = \"<group>\"; };\n\t\t331C807B294A618700263BE5 /* RunnerTests.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = RunnerTests.swift; sourceTree = \"<group>\"; };\n\t\t331C8081294A63A400263BE5 /* RunnerTests.xctest */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = RunnerTests.xctest; sourceTree = BUILT_PRODUCTS_DIR; };\n\t\t3B3967151E833CAA004F5970 /* AppFrameworkInfo.plist */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.plist.xml; name = AppFrameworkInfo.plist; path = Flutter/AppFrameworkInfo.plist; sourceTree = \"<group>\"; };\n\t\t560CA017EC76D8AAE2E21549 /* Pods-Runner.profile.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = \"Pods-Runner.profile.xcconfig\"; path = \"Target Support Files/Pods-Runner/Pods-Runner.profile.xcconfig\"; sourceTree = \"<group>\"; };\n\t\t5F1FA05866A2D0FCA3287B20 /* GoogleService-Info.plist */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.plist.xml; name = \"GoogleService-Info.plist\"; path = \"Runner/GoogleService-Info.plist\"; sourceTree = \"<group>\"; };\n\t\t74858FAD1ED2DC5600515810 /* Runner-Bridging-Header.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = \"Runner-Bridging-Header.h\"; sourceTree = \"<group>\"; };\n\t\t74858FAE1ED2DC5600515810 /* AppDelegate.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = AppDelegate.swift; sourceTree = \"<group>\"; };\n\t\t7AFA3C8E1D35360C0083082E /* Release.xcconfig */ = {isa = PBXFileReference; lastKnownFileType = text.xcconfig; name = Release.xcconfig; path = Flutter/Release.xcconfig; sourceTree = \"<group>\"; };\n\t\t8ACDC47C7E9AF1A1B9595598 /* Pods-RunnerTests.release.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = \"Pods-RunnerTests.release.xcconfig\"; path = \"Target Support Files/Pods-RunnerTests/Pods-RunnerTests.release.xcconfig\"; sourceTree = \"<group>\"; };\n\t\t94CE5BFCDF90764354BB6740 /* Pods_Runner.framework */ = {isa = PBXFileReference; explicitFileType = wrapper.framework; includeInIndex = 0; path = Pods_Runner.framework; sourceTree = BUILT_PRODUCTS_DIR; };\n\t\t9740EEB21CF90195004384FC /* Debug.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; name = Debug.xcconfig; path = Flutter/Debug.xcconfig; sourceTree = \"<group>\"; };\n\t\t9740EEB31CF90195004384FC /* Generated.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; name = Generated.xcconfig; path = Flutter/Generated.xcconfig; sourceTree = \"<group>\"; };\n\t\t97C146EE1CF9000F007C117D /* Runner.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = Runner.app; sourceTree = BUILT_PRODUCTS_DIR; };\n\t\t97C146FB1CF9000F007C117D /* Base */ = {isa = PBXFileReference; lastKnownFileType = file.storyboard; name = Base; path = Base.lproj/Main.storyboard; sourceTree = \"<group>\"; };\n\t\t97C146FD1CF9000F007C117D /* Assets.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Assets.xcassets; sourceTree = \"<group>\"; };\n\t\t97C147001CF9000F007C117D /* Base */ = {isa = PBXFileReference; lastKnownFileType = file.storyboard; name = Base; path = Base.lproj/LaunchScreen.storyboard; sourceTree = \"<group>\"; };\n\t\t97C147021CF9000F007C117D /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = \"<group>\"; };\n\t\tA85D07EF8959748E1D3E564B /* Pods-RunnerTests.debug.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = \"Pods-RunnerTests.debug.xcconfig\"; path = \"Target Support Files/Pods-RunnerTests/Pods-RunnerTests.debug.xcconfig\"; sourceTree = \"<group>\"; };\n\t\tB0B22A9E291076BD22BA9F10 /* Pods-RunnerTests.profile.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = \"Pods-RunnerTests.profile.xcconfig\"; path = \"Target Support Files/Pods-RunnerTests/Pods-RunnerTests.profile.xcconfig\"; sourceTree = \"<group>\"; };\n\t\tE1D0571EA0792087F8F27457 /* Pods-Runner.debug.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = \"Pods-Runner.debug.xcconfig\"; path = \"Target Support Files/Pods-Runner/Pods-Runner.debug.xcconfig\"; sourceTree = \"<group>\"; };\n/* End PBXFileReference section */\n\n/* Begin PBXFrameworksBuildPhase section */\n\t\t0F5F3CD1ED7DB09B81C92173 /* Frameworks */ = {\n\t\t\tisa = PBXFrameworksBuildPhase;\n\t\t\tbuildActionMask = 2147483647;\n\t\t\tfiles = (\n\t\t\t\tB7B3CA2D70F15615E1B8E5D8 /* Pods_RunnerTests.framework in Frameworks */,\n\t\t\t);\n\t\t\trunOnlyForDeploymentPostprocessing = 0;\n\t\t};\n\t\t97C146EB1CF9000F007C117D /* Frameworks */ = {\n\t\t\tisa = PBXFrameworksBuildPhase;\n\t\t\tbuildActionMask = 2147483647;\n\t\t\tfiles = (\n\t\t\t\t901FEC83A38129064032C578 /* Pods_Runner.framework in Frameworks */,\n\t\t\t);\n\t\t\trunOnlyForDeploymentPostprocessing = 0;\n\t\t};\n/* End PBXFrameworksBuildPhase section */\n\n/* Begin PBXGroup section */\n\t\t331C8082294A63A400263BE5 /* RunnerTests */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t331C807B294A618700263BE5 /* RunnerTests.swift */,\n\t\t\t);\n\t\t\tpath = RunnerTests;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t3C3B3E8596675CC144D1BD5B /* Pods */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\tE1D0571EA0792087F8F27457 /* Pods-Runner.debug.xcconfig */,\n\t\t\t\t232D95ECCEC6F04B9CEC8925 /* Pods-Runner.release.xcconfig */,\n\t\t\t\t560CA017EC76D8AAE2E21549 /* Pods-Runner.profile.xcconfig */,\n\t\t\t\tA85D07EF8959748E1D3E564B /* Pods-RunnerTests.debug.xcconfig */,\n\t\t\t\t8ACDC47C7E9AF1A1B9595598 /* Pods-RunnerTests.release.xcconfig */,\n\t\t\t\tB0B22A9E291076BD22BA9F10 /* Pods-RunnerTests.profile.xcconfig */,\n\t\t\t);\n\t\t\tname = Pods;\n\t\t\tpath = Pods;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t9740EEB11CF90186004384FC /* Flutter */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t3B3967151E833CAA004F5970 /* AppFrameworkInfo.plist */,\n\t\t\t\t9740EEB21CF90195004384FC /* Debug.xcconfig */,\n\t\t\t\t7AFA3C8E1D35360C0083082E /* Release.xcconfig */,\n\t\t\t\t9740EEB31CF90195004384FC /* Generated.xcconfig */,\n\t\t\t);\n\t\t\tname = Flutter;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t97C146E51CF9000F007C117D = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t9740EEB11CF90186004384FC /* Flutter */,\n\t\t\t\t97C146F01CF9000F007C117D /* Runner */,\n\t\t\t\t97C146EF1CF9000F007C117D /* Products */,\n\t\t\t\t331C8082294A63A400263BE5 /* RunnerTests */,\n\t\t\t\t5F1FA05866A2D0FCA3287B20 /* GoogleService-Info.plist */,\n\t\t\t\t3C3B3E8596675CC144D1BD5B /* Pods */,\n\t\t\t\tA50BECFB61A452F592070BAA /* Frameworks */,\n\t\t\t);\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t97C146EF1CF9000F007C117D /* Products */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t97C146EE1CF9000F007C117D /* Runner.app */,\n\t\t\t\t331C8081294A63A400263BE5 /* RunnerTests.xctest */,\n\t\t\t);\n\t\t\tname = Products;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t97C146F01CF9000F007C117D /* Runner */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t97C146FA1CF9000F007C117D /* Main.storyboard */,\n\t\t\t\t97C146FD1CF9000F007C117D /* Assets.xcassets */,\n\t\t\t\t97C146FF1CF9000F007C117D /* LaunchScreen.storyboard */,\n\t\t\t\t97C147021CF9000F007C117D /* Info.plist */,\n\t\t\t\t1498D2321E8E86230040F4C2 /* GeneratedPluginRegistrant.h */,\n\t\t\t\t1498D2331E8E89220040F4C2 /* GeneratedPluginRegistrant.m */,\n\t\t\t\t74858FAE1ED2DC5600515810 /* AppDelegate.swift */,\n\t\t\t\t74858FAD1ED2DC5600515810 /* Runner-Bridging-Header.h */,\n\t\t\t);\n\t\t\tpath = Runner;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\tA50BECFB61A452F592070BAA /* Frameworks */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t94CE5BFCDF90764354BB6740 /* Pods_Runner.framework */,\n\t\t\t\t154D9627A1C14A5ACE0B7B0D /* Pods_RunnerTests.framework */,\n\t\t\t);\n\t\t\tname = Frameworks;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n/* End PBXGroup section */\n\n/* Begin PBXNativeTarget section */\n\t\t331C8080294A63A400263BE5 /* RunnerTests */ = {\n\t\t\tisa = PBXNativeTarget;\n\t\t\tbuildConfigurationList = 331C8087294A63A400263BE5 /* Build configuration list for PBXNativeTarget \"RunnerTests\" */;\n\t\t\tbuildPhases = (\n\t\t\t\tF5C7CFE0E232B64D613F0623 /* [CP] Check Pods Manifest.lock */,\n\t\t\t\t331C807D294A63A400263BE5 /* Sources */,\n\t\t\t\t331C807F294A63A400263BE5 /* Resources */,\n\t\t\t\t0F5F3CD1ED7DB09B81C92173 /* Frameworks */,\n\t\t\t);\n\t\t\tbuildRules = (\n\t\t\t);\n\t\t\tdependencies = (\n\t\t\t\t331C8086294A63A400263BE5 /* PBXTargetDependency */,\n\t\t\t);\n\t\t\tname = RunnerTests;\n\t\t\tproductName = RunnerTests;\n\t\t\tproductReference = 331C8081294A63A400263BE5 /* RunnerTests.xctest */;\n\t\t\tproductType = \"com.apple.product-type.bundle.unit-test\";\n\t\t};\n\t\t97C146ED1CF9000F007C117D /* Runner */ = {\n\t\t\tisa = PBXNativeTarget;\n\t\t\tbuildConfigurationList = 97C147051CF9000F007C117D /* Build configuration list for PBXNativeTarget \"Runner\" */;\n\t\t\tbuildPhases = (\n\t\t\t\tF51794D56D63ACA383D5C2E4 /* [CP] Check Pods Manifest.lock */,\n\t\t\t\t9740EEB61CF901F6004384FC /* Run Script */,\n\t\t\t\t97C146EA1CF9000F007C117D /* Sources */,\n\t\t\t\t97C146EB1CF9000F007C117D /* Frameworks */,\n\t\t\t\t97C146EC1CF9000F007C117D /* Resources */,\n\t\t\t\t9705A1C41CF9048500538489 /* Embed Frameworks */,\n\t\t\t\t3B06AD1E1E4923F5004D2608 /* Thin Binary */,\n\t\t\t\t123ADD1BD119276C98000FAF /* [CP] Embed Pods Frameworks */,\n\t\t\t);\n\t\t\tbuildRules = (\n\t\t\t);\n\t\t\tdependencies = (\n\t\t\t);\n\t\t\tname = Runner;\n\t\t\tproductName = Runner;\n\t\t\tproductReference = 97C146EE1CF9000F007C117D /* Runner.app */;\n\t\t\tproductType = \"com.apple.product-type.application\";\n\t\t};\n/* End PBXNativeTarget section */\n\n/* Begin PBXProject section */\n\t\t97C146E61CF9000F007C117D /* Project object */ = {\n\t\t\tisa = PBXProject;\n\t\t\tattributes = {\n\t\t\t\tBuildIndependentTargetsInParallel = YES;\n\t\t\t\tLastUpgradeCheck = 1510;\n\t\t\t\tORGANIZATIONNAME = \"\";\n\t\t\t\tTargetAttributes = {\n\t\t\t\t\t331C8080294A63A400263BE5 = {\n\t\t\t\t\t\tCreatedOnToolsVersion = 14.0;\n\t\t\t\t\t\tTestTargetID = 97C146ED1CF9000F007C117D;\n\t\t\t\t\t};\n\t\t\t\t\t97C146ED1CF9000F007C117D = {\n\t\t\t\t\t\tCreatedOnToolsVersion = 7.3.1;\n\t\t\t\t\t\tLastSwiftMigration = 1100;\n\t\t\t\t\t};\n\t\t\t\t};\n\t\t\t};\n\t\t\tbuildConfigurationList = 97C146E91CF9000F007C117D /* Build configuration list for PBXProject \"Runner\" */;\n\t\t\tcompatibilityVersion = \"Xcode 9.3\";\n\t\t\tdevelopmentRegion = en;\n\t\t\thasScannedForEncodings = 0;\n\t\t\tknownRegions = (\n\t\t\t\ten,\n\t\t\t\tBase,\n\t\t\t);\n\t\t\tmainGroup = 97C146E51CF9000F007C117D;\n\t\t\tproductRefGroup = 97C146EF1CF9000F007C117D /* Products */;\n\t\t\tprojectDirPath = \"\";\n\t\t\tprojectRoot = \"\";\n\t\t\ttargets = (\n\t\t\t\t97C146ED1CF9000F007C117D /* Runner */,\n\t\t\t\t331C8080294A63A400263BE5 /* RunnerTests */,\n\t\t\t);\n\t\t};\n/* End PBXProject section */\n\n/* Begin PBXResourcesBuildPhase section */\n\t\t331C807F294A63A400263BE5 /* Resources */ = {\n\t\t\tisa = PBXResourcesBuildPhase;\n\t\t\tbuildActionMask = 2147483647;\n\t\t\tfiles = (\n\t\t\t);\n\t\t\trunOnlyForDeploymentPostprocessing = 0;\n\t\t};\n\t\t97C146EC1CF9000F007C117D /* Resources */ = {\n\t\t\tisa = PBXResourcesBuildPhase;\n\t\t\tbuildActionMask = 2147483647;\n\t\t\tfiles = (\n\t\t\t\t97C147011CF9000F007C117D /* LaunchScreen.storyboard in Resources */,\n\t\t\t\t3B3967161E833CAA004F5970 /* AppFrameworkInfo.plist in Resources */,\n\t\t\t\t97C146FE1CF9000F007C117D /* Assets.xcassets in Resources */,\n\t\t\t\t97C146FC1CF9000F007C117D /* Main.storyboard in Resources */,\n\t\t\t\t3414F5B6C6F086F6373F1948 /* GoogleService-Info.plist in Resources */,\n\t\t\t);\n\t\t\trunOnlyForDeploymentPostprocessing = 0;\n\t\t};\n/* End PBXResourcesBuildPhase section */\n\n/* Begin PBXShellScriptBuildPhase section */\n\t\t123ADD1BD119276C98000FAF /* [CP] Embed Pods Frameworks */ = {\n\t\t\tisa = PBXShellScriptBuildPhase;\n\t\t\tbuildActionMask = 2147483647;\n\t\t\tfiles = (\n\t\t\t);\n\t\t\tinputFileListPaths = (\n\t\t\t\t\"${PODS_ROOT}/Target Support Files/Pods-Runner/Pods-Runner-frameworks-${CONFIGURATION}-input-files.xcfilelist\",\n\t\t\t);\n\t\t\tname = \"[CP] Embed Pods Frameworks\";\n\t\t\toutputFileListPaths = (\n\t\t\t\t\"${PODS_ROOT}/Target Support Files/Pods-Runner/Pods-Runner-frameworks-${CONFIGURATION}-output-files.xcfilelist\",\n\t\t\t);\n\t\t\trunOnlyForDeploymentPostprocessing = 0;\n\t\t\tshellPath = /bin/sh;\n\t\t\tshellScript = \"\\\"${PODS_ROOT}/Target Support Files/Pods-Runner/Pods-Runner-frameworks.sh\\\"\\n\";\n\t\t\tshowEnvVarsInLog = 0;\n\t\t};\n\t\t3B06AD1E1E4923F5004D2608 /* Thin Binary */ = {\n\t\t\tisa = PBXShellScriptBuildPhase;\n\t\t\talwaysOutOfDate = 1;\n\t\t\tbuildActionMask = 2147483647;\n\t\t\tfiles = (\n\t\t\t);\n\t\t\tinputPaths = (\n\t\t\t\t\"${TARGET_BUILD_DIR}/${INFOPLIST_PATH}\",\n\t\t\t);\n\t\t\tname = \"Thin Binary\";\n\t\t\toutputPaths = (\n\t\t\t);\n\t\t\trunOnlyForDeploymentPostprocessing = 0;\n\t\t\tshellPath = /bin/sh;\n\t\t\tshellScript = \"/bin/sh \\\"$FLUTTER_ROOT/packages/flutter_tools/bin/xcode_backend.sh\\\" embed_and_thin\";\n\t\t};\n\t\t9740EEB61CF901F6004384FC /* Run Script */ = {\n\t\t\tisa = PBXShellScriptBuildPhase;\n\t\t\talwaysOutOfDate = 1;\n\t\t\tbuildActionMask = 2147483647;\n\t\t\tfiles = (\n\t\t\t);\n\t\t\tinputPaths = (\n\t\t\t);\n\t\t\tname = \"Run Script\";\n\t\t\toutputPaths = (\n\t\t\t);\n\t\t\trunOnlyForDeploymentPostprocessing = 0;\n\t\t\tshellPath = /bin/sh;\n\t\t\tshellScript = \"/bin/sh \\\"$FLUTTER_ROOT/packages/flutter_tools/bin/xcode_backend.sh\\\" build\";\n\t\t};\n\t\tF51794D56D63ACA383D5C2E4 /* [CP] Check Pods Manifest.lock */ = {\n\t\t\tisa = PBXShellScriptBuildPhase;\n\t\t\tbuildActionMask = 2147483647;\n\t\t\tfiles = (\n\t\t\t);\n\t\t\tinputFileListPaths = (\n\t\t\t);\n\t\t\tinputPaths = (\n\t\t\t\t\"${PODS_PODFILE_DIR_PATH}/Podfile.lock\",\n\t\t\t\t\"${PODS_ROOT}/Manifest.lock\",\n\t\t\t);\n\t\t\tname = \"[CP] Check Pods Manifest.lock\";\n\t\t\toutputFileListPaths = (\n\t\t\t);\n\t\t\toutputPaths = (\n\t\t\t\t\"$(DERIVED_FILE_DIR)/Pods-Runner-checkManifestLockResult.txt\",\n\t\t\t);\n\t\t\trunOnlyForDeploymentPostprocessing = 0;\n\t\t\tshellPath = /bin/sh;\n\t\t\tshellScript = \"diff \\\"${PODS_PODFILE_DIR_PATH}/Podfile.lock\\\" \\\"${PODS_ROOT}/Manifest.lock\\\" > /dev/null\\nif [ $? != 0 ] ; then\\n    # print error to STDERR\\n    echo \\\"error: The sandbox is not in sync with the Podfile.lock. Run 'pod install' or update your CocoaPods installation.\\\" >&2\\n    exit 1\\nfi\\n# This output is used by Xcode 'outputs' to avoid re-running this script phase.\\necho \\\"SUCCESS\\\" > \\\"${SCRIPT_OUTPUT_FILE_0}\\\"\\n\";\n\t\t\tshowEnvVarsInLog = 0;\n\t\t};\n\t\tF5C7CFE0E232B64D613F0623 /* [CP] Check Pods Manifest.lock */ = {\n\t\t\tisa = PBXShellScriptBuildPhase;\n\t\t\tbuildActionMask = 2147483647;\n\t\t\tfiles = (\n\t\t\t);\n\t\t\tinputFileListPaths = (\n\t\t\t);\n\t\t\tinputPaths = (\n\t\t\t\t\"${PODS_PODFILE_DIR_PATH}/Podfile.lock\",\n\t\t\t\t\"${PODS_ROOT}/Manifest.lock\",\n\t\t\t);\n\t\t\tname = \"[CP] Check Pods Manifest.lock\";\n\t\t\toutputFileListPaths = (\n\t\t\t);\n\t\t\toutputPaths = (\n\t\t\t\t\"$(DERIVED_FILE_DIR)/Pods-RunnerTests-checkManifestLockResult.txt\",\n\t\t\t);\n\t\t\trunOnlyForDeploymentPostprocessing = 0;\n\t\t\tshellPath = /bin/sh;\n\t\t\tshellScript = \"diff \\\"${PODS_PODFILE_DIR_PATH}/Podfile.lock\\\" \\\"${PODS_ROOT}/Manifest.lock\\\" > /dev/null\\nif [ $? != 0 ] ; then\\n    # print error to STDERR\\n    echo \\\"error: The sandbox is not in sync with the Podfile.lock. Run 'pod install' or update your CocoaPods installation.\\\" >&2\\n    exit 1\\nfi\\n# This output is used by Xcode 'outputs' to avoid re-running this script phase.\\necho \\\"SUCCESS\\\" > \\\"${SCRIPT_OUTPUT_FILE_0}\\\"\\n\";\n\t\t\tshowEnvVarsInLog = 0;\n\t\t};\n/* End PBXShellScriptBuildPhase section */\n\n/* Begin PBXSourcesBuildPhase section */\n\t\t331C807D294A63A400263BE5 /* Sources */ = {\n\t\t\tisa = PBXSourcesBuildPhase;\n\t\t\tbuildActionMask = 2147483647;\n\t\t\tfiles = (\n\t\t\t\t331C808B294A63AB00263BE5 /* RunnerTests.swift in Sources */,\n\t\t\t);\n\t\t\trunOnlyForDeploymentPostprocessing = 0;\n\t\t};\n\t\t97C146EA1CF9000F007C117D /* Sources */ = {\n\t\t\tisa = PBXSourcesBuildPhase;\n\t\t\tbuildActionMask = 2147483647;\n\t\t\tfiles = (\n\t\t\t\t74858FAF1ED2DC5600515810 /* AppDelegate.swift in Sources */,\n\t\t\t\t1498D2341E8E89220040F4C2 /* GeneratedPluginRegistrant.m in Sources */,\n\t\t\t);\n\t\t\trunOnlyForDeploymentPostprocessing = 0;\n\t\t};\n/* End PBXSourcesBuildPhase section */\n\n/* Begin PBXTargetDependency section */\n\t\t331C8086294A63A400263BE5 /* PBXTargetDependency */ = {\n\t\t\tisa = PBXTargetDependency;\n\t\t\ttarget = 97C146ED1CF9000F007C117D /* Runner */;\n\t\t\ttargetProxy = 331C8085294A63A400263BE5 /* PBXContainerItemProxy */;\n\t\t};\n/* End PBXTargetDependency section */\n\n/* Begin PBXVariantGroup section */\n\t\t97C146FA1CF9000F007C117D /* Main.storyboard */ = {\n\t\t\tisa = PBXVariantGroup;\n\t\t\tchildren = (\n\t\t\t\t97C146FB1CF9000F007C117D /* Base */,\n\t\t\t);\n\t\t\tname = Main.storyboard;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t97C146FF1CF9000F007C117D /* LaunchScreen.storyboard */ = {\n\t\t\tisa = PBXVariantGroup;\n\t\t\tchildren = (\n\t\t\t\t97C147001CF9000F007C117D /* Base */,\n\t\t\t);\n\t\t\tname = LaunchScreen.storyboard;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n/* End PBXVariantGroup section */\n\n/* Begin XCBuildConfiguration section */\n\t\t249021D3217E4FDB00AE95B9 /* Profile */ = {\n\t\t\tisa = XCBuildConfiguration;\n\t\t\tbuildSettings = {\n\t\t\t\tALWAYS_SEARCH_USER_PATHS = NO;\n\t\t\t\tASSETCATALOG_COMPILER_GENERATE_SWIFT_ASSET_SYMBOL_EXTENSIONS = YES;\n\t\t\t\tCLANG_ANALYZER_NONNULL = YES;\n\t\t\t\tCLANG_CXX_LANGUAGE_STANDARD = \"gnu++0x\";\n\t\t\t\tCLANG_CXX_LIBRARY = \"libc++\";\n\t\t\t\tCLANG_ENABLE_MODULES = YES;\n\t\t\t\tCLANG_ENABLE_OBJC_ARC = YES;\n\t\t\t\tCLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES;\n\t\t\t\tCLANG_WARN_BOOL_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_COMMA = YES;\n\t\t\t\tCLANG_WARN_CONSTANT_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES;\n\t\t\t\tCLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR;\n\t\t\t\tCLANG_WARN_EMPTY_BODY = YES;\n\t\t\t\tCLANG_WARN_ENUM_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_INFINITE_RECURSION = YES;\n\t\t\t\tCLANG_WARN_INT_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES;\n\t\t\t\tCLANG_WARN_OBJC_LITERAL_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR;\n\t\t\t\tCLANG_WARN_RANGE_LOOP_ANALYSIS = YES;\n\t\t\t\tCLANG_WARN_STRICT_PROTOTYPES = YES;\n\t\t\t\tCLANG_WARN_SUSPICIOUS_MOVE = YES;\n\t\t\t\tCLANG_WARN_UNREACHABLE_CODE = YES;\n\t\t\t\tCLANG_WARN__DUPLICATE_METHOD_MATCH = YES;\n\t\t\t\t\"CODE_SIGN_IDENTITY[sdk=iphoneos*]\" = \"iPhone Developer\";\n\t\t\t\tCOPY_PHASE_STRIP = NO;\n\t\t\t\tDEBUG_INFORMATION_FORMAT = \"dwarf-with-dsym\";\n\t\t\t\tENABLE_NS_ASSERTIONS = NO;\n\t\t\t\tENABLE_STRICT_OBJC_MSGSEND = YES;\n\t\t\t\tENABLE_USER_SCRIPT_SANDBOXING = NO;\n\t\t\t\tGCC_C_LANGUAGE_STANDARD = gnu99;\n\t\t\t\tGCC_NO_COMMON_BLOCKS = YES;\n\t\t\t\tGCC_WARN_64_TO_32_BIT_CONVERSION = YES;\n\t\t\t\tGCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR;\n\t\t\t\tGCC_WARN_UNDECLARED_SELECTOR = YES;\n\t\t\t\tGCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE;\n\t\t\t\tGCC_WARN_UNUSED_FUNCTION = YES;\n\t\t\t\tGCC_WARN_UNUSED_VARIABLE = YES;\n\t\t\t\tIPHONEOS_DEPLOYMENT_TARGET = 12.0;\n\t\t\t\tMTL_ENABLE_DEBUG_INFO = NO;\n\t\t\t\tSDKROOT = iphoneos;\n\t\t\t\tSUPPORTED_PLATFORMS = iphoneos;\n\t\t\t\tTARGETED_DEVICE_FAMILY = \"1,2\";\n\t\t\t\tVALIDATE_PRODUCT = YES;\n\t\t\t};\n\t\t\tname = Profile;\n\t\t};\n\t\t249021D4217E4FDB00AE95B9 /* Profile */ = {\n\t\t\tisa = XCBuildConfiguration;\n\t\t\tbaseConfigurationReference = 7AFA3C8E1D35360C0083082E /* Release.xcconfig */;\n\t\t\tbuildSettings = {\n\t\t\t\tASSETCATALOG_COMPILER_APPICON_NAME = AppIcon;\n\t\t\t\tCLANG_ENABLE_MODULES = YES;\n\t\t\t\tCURRENT_PROJECT_VERSION = \"$(FLUTTER_BUILD_NUMBER)\";\n\t\t\t\tENABLE_BITCODE = NO;\n\t\t\t\tINFOPLIST_FILE = Runner/Info.plist;\n\t\t\t\tLD_RUNPATH_SEARCH_PATHS = (\n\t\t\t\t\t\"$(inherited)\",\n\t\t\t\t\t\"@executable_path/Frameworks\",\n\t\t\t\t);\n\t\t\t\tPRODUCT_BUNDLE_IDENTIFIER = com.example.example;\n\t\t\t\tPRODUCT_NAME = \"$(TARGET_NAME)\";\n\t\t\t\tSWIFT_OBJC_BRIDGING_HEADER = \"Runner/Runner-Bridging-Header.h\";\n\t\t\t\tSWIFT_VERSION = 5.0;\n\t\t\t\tVERSIONING_SYSTEM = \"apple-generic\";\n\t\t\t};\n\t\t\tname = Profile;\n\t\t};\n\t\t331C8088294A63A400263BE5 /* Debug */ = {\n\t\t\tisa = XCBuildConfiguration;\n\t\t\tbaseConfigurationReference = A85D07EF8959748E1D3E564B /* Pods-RunnerTests.debug.xcconfig */;\n\t\t\tbuildSettings = {\n\t\t\t\tBUNDLE_LOADER = \"$(TEST_HOST)\";\n\t\t\t\tCODE_SIGN_STYLE = Automatic;\n\t\t\t\tCURRENT_PROJECT_VERSION = 1;\n\t\t\t\tGENERATE_INFOPLIST_FILE = YES;\n\t\t\t\tMARKETING_VERSION = 1.0;\n\t\t\t\tPRODUCT_BUNDLE_IDENTIFIER = com.example.example.RunnerTests;\n\t\t\t\tPRODUCT_NAME = \"$(TARGET_NAME)\";\n\t\t\t\tSWIFT_ACTIVE_COMPILATION_CONDITIONS = DEBUG;\n\t\t\t\tSWIFT_OPTIMIZATION_LEVEL = \"-Onone\";\n\t\t\t\tSWIFT_VERSION = 5.0;\n\t\t\t\tTEST_HOST = \"$(BUILT_PRODUCTS_DIR)/Runner.app/$(BUNDLE_EXECUTABLE_FOLDER_PATH)/Runner\";\n\t\t\t};\n\t\t\tname = Debug;\n\t\t};\n\t\t331C8089294A63A400263BE5 /* Release */ = {\n\t\t\tisa = XCBuildConfiguration;\n\t\t\tbaseConfigurationReference = 8ACDC47C7E9AF1A1B9595598 /* Pods-RunnerTests.release.xcconfig */;\n\t\t\tbuildSettings = {\n\t\t\t\tBUNDLE_LOADER = \"$(TEST_HOST)\";\n\t\t\t\tCODE_SIGN_STYLE = Automatic;\n\t\t\t\tCURRENT_PROJECT_VERSION = 1;\n\t\t\t\tGENERATE_INFOPLIST_FILE = YES;\n\t\t\t\tMARKETING_VERSION = 1.0;\n\t\t\t\tPRODUCT_BUNDLE_IDENTIFIER = com.example.example.RunnerTests;\n\t\t\t\tPRODUCT_NAME = \"$(TARGET_NAME)\";\n\t\t\t\tSWIFT_VERSION = 5.0;\n\t\t\t\tTEST_HOST = \"$(BUILT_PRODUCTS_DIR)/Runner.app/$(BUNDLE_EXECUTABLE_FOLDER_PATH)/Runner\";\n\t\t\t};\n\t\t\tname = Release;\n\t\t};\n\t\t331C808A294A63A400263BE5 /* Profile */ = {\n\t\t\tisa = XCBuildConfiguration;\n\t\t\tbaseConfigurationReference = B0B22A9E291076BD22BA9F10 /* Pods-RunnerTests.profile.xcconfig */;\n\t\t\tbuildSettings = {\n\t\t\t\tBUNDLE_LOADER = \"$(TEST_HOST)\";\n\t\t\t\tCODE_SIGN_STYLE = Automatic;\n\t\t\t\tCURRENT_PROJECT_VERSION = 1;\n\t\t\t\tGENERATE_INFOPLIST_FILE = YES;\n\t\t\t\tMARKETING_VERSION = 1.0;\n\t\t\t\tPRODUCT_BUNDLE_IDENTIFIER = com.example.example.RunnerTests;\n\t\t\t\tPRODUCT_NAME = \"$(TARGET_NAME)\";\n\t\t\t\tSWIFT_VERSION = 5.0;\n\t\t\t\tTEST_HOST = \"$(BUILT_PRODUCTS_DIR)/Runner.app/$(BUNDLE_EXECUTABLE_FOLDER_PATH)/Runner\";\n\t\t\t};\n\t\t\tname = Profile;\n\t\t};\n\t\t97C147031CF9000F007C117D /* Debug */ = {\n\t\t\tisa = XCBuildConfiguration;\n\t\t\tbuildSettings = {\n\t\t\t\tALWAYS_SEARCH_USER_PATHS = NO;\n\t\t\t\tASSETCATALOG_COMPILER_GENERATE_SWIFT_ASSET_SYMBOL_EXTENSIONS = YES;\n\t\t\t\tCLANG_ANALYZER_NONNULL = YES;\n\t\t\t\tCLANG_CXX_LANGUAGE_STANDARD = \"gnu++0x\";\n\t\t\t\tCLANG_CXX_LIBRARY = \"libc++\";\n\t\t\t\tCLANG_ENABLE_MODULES = YES;\n\t\t\t\tCLANG_ENABLE_OBJC_ARC = YES;\n\t\t\t\tCLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES;\n\t\t\t\tCLANG_WARN_BOOL_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_COMMA = YES;\n\t\t\t\tCLANG_WARN_CONSTANT_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES;\n\t\t\t\tCLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR;\n\t\t\t\tCLANG_WARN_EMPTY_BODY = YES;\n\t\t\t\tCLANG_WARN_ENUM_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_INFINITE_RECURSION = YES;\n\t\t\t\tCLANG_WARN_INT_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES;\n\t\t\t\tCLANG_WARN_OBJC_LITERAL_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR;\n\t\t\t\tCLANG_WARN_RANGE_LOOP_ANALYSIS = YES;\n\t\t\t\tCLANG_WARN_STRICT_PROTOTYPES = YES;\n\t\t\t\tCLANG_WARN_SUSPICIOUS_MOVE = YES;\n\t\t\t\tCLANG_WARN_UNREACHABLE_CODE = YES;\n\t\t\t\tCLANG_WARN__DUPLICATE_METHOD_MATCH = YES;\n\t\t\t\t\"CODE_SIGN_IDENTITY[sdk=iphoneos*]\" = \"iPhone Developer\";\n\t\t\t\tCOPY_PHASE_STRIP = NO;\n\t\t\t\tDEBUG_INFORMATION_FORMAT = dwarf;\n\t\t\t\tENABLE_STRICT_OBJC_MSGSEND = YES;\n\t\t\t\tENABLE_TESTABILITY = YES;\n\t\t\t\tENABLE_USER_SCRIPT_SANDBOXING = NO;\n\t\t\t\tGCC_C_LANGUAGE_STANDARD = gnu99;\n\t\t\t\tGCC_DYNAMIC_NO_PIC = NO;\n\t\t\t\tGCC_NO_COMMON_BLOCKS = YES;\n\t\t\t\tGCC_OPTIMIZATION_LEVEL = 0;\n\t\t\t\tGCC_PREPROCESSOR_DEFINITIONS = (\n\t\t\t\t\t\"DEBUG=1\",\n\t\t\t\t\t\"$(inherited)\",\n\t\t\t\t);\n\t\t\t\tGCC_WARN_64_TO_32_BIT_CONVERSION = YES;\n\t\t\t\tGCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR;\n\t\t\t\tGCC_WARN_UNDECLARED_SELECTOR = YES;\n\t\t\t\tGCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE;\n\t\t\t\tGCC_WARN_UNUSED_FUNCTION = YES;\n\t\t\t\tGCC_WARN_UNUSED_VARIABLE = YES;\n\t\t\t\tIPHONEOS_DEPLOYMENT_TARGET = 12.0;\n\t\t\t\tMTL_ENABLE_DEBUG_INFO = YES;\n\t\t\t\tONLY_ACTIVE_ARCH = YES;\n\t\t\t\tSDKROOT = iphoneos;\n\t\t\t\tTARGETED_DEVICE_FAMILY = \"1,2\";\n\t\t\t};\n\t\t\tname = Debug;\n\t\t};\n\t\t97C147041CF9000F007C117D /* Release */ = {\n\t\t\tisa = XCBuildConfiguration;\n\t\t\tbuildSettings = {\n\t\t\t\tALWAYS_SEARCH_USER_PATHS = NO;\n\t\t\t\tASSETCATALOG_COMPILER_GENERATE_SWIFT_ASSET_SYMBOL_EXTENSIONS = YES;\n\t\t\t\tCLANG_ANALYZER_NONNULL = YES;\n\t\t\t\tCLANG_CXX_LANGUAGE_STANDARD = \"gnu++0x\";\n\t\t\t\tCLANG_CXX_LIBRARY = \"libc++\";\n\t\t\t\tCLANG_ENABLE_MODULES = YES;\n\t\t\t\tCLANG_ENABLE_OBJC_ARC = YES;\n\t\t\t\tCLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES;\n\t\t\t\tCLANG_WARN_BOOL_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_COMMA = YES;\n\t\t\t\tCLANG_WARN_CONSTANT_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES;\n\t\t\t\tCLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR;\n\t\t\t\tCLANG_WARN_EMPTY_BODY = YES;\n\t\t\t\tCLANG_WARN_ENUM_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_INFINITE_RECURSION = YES;\n\t\t\t\tCLANG_WARN_INT_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES;\n\t\t\t\tCLANG_WARN_OBJC_LITERAL_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR;\n\t\t\t\tCLANG_WARN_RANGE_LOOP_ANALYSIS = YES;\n\t\t\t\tCLANG_WARN_STRICT_PROTOTYPES = YES;\n\t\t\t\tCLANG_WARN_SUSPICIOUS_MOVE = YES;\n\t\t\t\tCLANG_WARN_UNREACHABLE_CODE = YES;\n\t\t\t\tCLANG_WARN__DUPLICATE_METHOD_MATCH = YES;\n\t\t\t\t\"CODE_SIGN_IDENTITY[sdk=iphoneos*]\" = \"iPhone Developer\";\n\t\t\t\tCOPY_PHASE_STRIP = NO;\n\t\t\t\tDEBUG_INFORMATION_FORMAT = \"dwarf-with-dsym\";\n\t\t\t\tENABLE_NS_ASSERTIONS = NO;\n\t\t\t\tENABLE_STRICT_OBJC_MSGSEND = YES;\n\t\t\t\tENABLE_USER_SCRIPT_SANDBOXING = NO;\n\t\t\t\tGCC_C_LANGUAGE_STANDARD = gnu99;\n\t\t\t\tGCC_NO_COMMON_BLOCKS = YES;\n\t\t\t\tGCC_WARN_64_TO_32_BIT_CONVERSION = YES;\n\t\t\t\tGCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR;\n\t\t\t\tGCC_WARN_UNDECLARED_SELECTOR = YES;\n\t\t\t\tGCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE;\n\t\t\t\tGCC_WARN_UNUSED_FUNCTION = YES;\n\t\t\t\tGCC_WARN_UNUSED_VARIABLE = YES;\n\t\t\t\tIPHONEOS_DEPLOYMENT_TARGET = 12.0;\n\t\t\t\tMTL_ENABLE_DEBUG_INFO = NO;\n\t\t\t\tSDKROOT = iphoneos;\n\t\t\t\tSUPPORTED_PLATFORMS = iphoneos;\n\t\t\t\tSWIFT_COMPILATION_MODE = wholemodule;\n\t\t\t\tSWIFT_OPTIMIZATION_LEVEL = \"-O\";\n\t\t\t\tTARGETED_DEVICE_FAMILY = \"1,2\";\n\t\t\t\tVALIDATE_PRODUCT = YES;\n\t\t\t};\n\t\t\tname = Release;\n\t\t};\n\t\t97C147061CF9000F007C117D /* Debug */ = {\n\t\t\tisa = XCBuildConfiguration;\n\t\t\tbaseConfigurationReference = 9740EEB21CF90195004384FC /* Debug.xcconfig */;\n\t\t\tbuildSettings = {\n\t\t\t\tASSETCATALOG_COMPILER_APPICON_NAME = AppIcon;\n\t\t\t\tCLANG_ENABLE_MODULES = YES;\n\t\t\t\tCURRENT_PROJECT_VERSION = \"$(FLUTTER_BUILD_NUMBER)\";\n\t\t\t\tENABLE_BITCODE = NO;\n\t\t\t\tINFOPLIST_FILE = Runner/Info.plist;\n\t\t\t\tLD_RUNPATH_SEARCH_PATHS = (\n\t\t\t\t\t\"$(inherited)\",\n\t\t\t\t\t\"@executable_path/Frameworks\",\n\t\t\t\t);\n\t\t\t\tPRODUCT_BUNDLE_IDENTIFIER = com.example.example;\n\t\t\t\tPRODUCT_NAME = \"$(TARGET_NAME)\";\n\t\t\t\tSWIFT_OBJC_BRIDGING_HEADER = \"Runner/Runner-Bridging-Header.h\";\n\t\t\t\tSWIFT_OPTIMIZATION_LEVEL = \"-Onone\";\n\t\t\t\tSWIFT_VERSION = 5.0;\n\t\t\t\tVERSIONING_SYSTEM = \"apple-generic\";\n\t\t\t};\n\t\t\tname = Debug;\n\t\t};\n\t\t97C147071CF9000F007C117D /* Release */ = {\n\t\t\tisa = XCBuildConfiguration;\n\t\t\tbaseConfigurationReference = 7AFA3C8E1D35360C0083082E /* Release.xcconfig */;\n\t\t\tbuildSettings = {\n\t\t\t\tASSETCATALOG_COMPILER_APPICON_NAME = AppIcon;\n\t\t\t\tCLANG_ENABLE_MODULES = YES;\n\t\t\t\tCURRENT_PROJECT_VERSION = \"$(FLUTTER_BUILD_NUMBER)\";\n\t\t\t\tENABLE_BITCODE = NO;\n\t\t\t\tINFOPLIST_FILE = Runner/Info.plist;\n\t\t\t\tLD_RUNPATH_SEARCH_PATHS = (\n\t\t\t\t\t\"$(inherited)\",\n\t\t\t\t\t\"@executable_path/Frameworks\",\n\t\t\t\t);\n\t\t\t\tPRODUCT_BUNDLE_IDENTIFIER = com.example.example;\n\t\t\t\tPRODUCT_NAME = \"$(TARGET_NAME)\";\n\t\t\t\tSWIFT_OBJC_BRIDGING_HEADER = \"Runner/Runner-Bridging-Header.h\";\n\t\t\t\tSWIFT_VERSION = 5.0;\n\t\t\t\tVERSIONING_SYSTEM = \"apple-generic\";\n\t\t\t};\n\t\t\tname = Release;\n\t\t};\n/* End XCBuildConfiguration section */\n\n/* Begin XCConfigurationList section */\n\t\t331C8087294A63A400263BE5 /* Build configuration list for PBXNativeTarget \"RunnerTests\" */ = {\n\t\t\tisa = XCConfigurationList;\n\t\t\tbuildConfigurations = (\n\t\t\t\t331C8088294A63A400263BE5 /* Debug */,\n\t\t\t\t331C8089294A63A400263BE5 /* Release */,\n\t\t\t\t331C808A294A63A400263BE5 /* Profile */,\n\t\t\t);\n\t\t\tdefaultConfigurationIsVisible = 0;\n\t\t\tdefaultConfigurationName = Release;\n\t\t};\n\t\t97C146E91CF9000F007C117D /* Build configuration list for PBXProject \"Runner\" */ = {\n\t\t\tisa = XCConfigurationList;\n\t\t\tbuildConfigurations = (\n\t\t\t\t97C147031CF9000F007C117D /* Debug */,\n\t\t\t\t97C147041CF9000F007C117D /* Release */,\n\t\t\t\t249021D3217E4FDB00AE95B9 /* Profile */,\n\t\t\t);\n\t\t\tdefaultConfigurationIsVisible = 0;\n\t\t\tdefaultConfigurationName = Release;\n\t\t};\n\t\t97C147051CF9000F007C117D /* Build configuration list for PBXNativeTarget \"Runner\" */ = {\n\t\t\tisa = XCConfigurationList;\n\t\t\tbuildConfigurations = (\n\t\t\t\t97C147061CF9000F007C117D /* Debug */,\n\t\t\t\t97C147071CF9000F007C117D /* Release */,\n\t\t\t\t249021D4217E4FDB00AE95B9 /* Profile */,\n\t\t\t);\n\t\t\tdefaultConfigurationIsVisible = 0;\n\t\t\tdefaultConfigurationName = Release;\n\t\t};\n/* End XCConfigurationList section */\n\t};\n\trootObject = 97C146E61CF9000F007C117D /* Project object */;\n}\n"
  },
  {
    "path": "packages/langchain_firebase/example/ios/Runner.xcodeproj/project.xcworkspace/contents.xcworkspacedata",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<Workspace\n   version = \"1.0\">\n   <FileRef\n      location = \"self:\">\n   </FileRef>\n</Workspace>\n"
  },
  {
    "path": "packages/langchain_firebase/example/ios/Runner.xcodeproj/project.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n<plist version=\"1.0\">\n<dict>\n\t<key>IDEDidComputeMac32BitWarning</key>\n\t<true/>\n</dict>\n</plist>\n"
  },
  {
    "path": "packages/langchain_firebase/example/ios/Runner.xcodeproj/project.xcworkspace/xcshareddata/WorkspaceSettings.xcsettings",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n<plist version=\"1.0\">\n<dict>\n\t<key>PreviewsEnabled</key>\n\t<false/>\n</dict>\n</plist>\n"
  },
  {
    "path": "packages/langchain_firebase/example/ios/Runner.xcodeproj/xcshareddata/xcschemes/Runner.xcscheme",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<Scheme\n   LastUpgradeVersion = \"1510\"\n   version = \"1.3\">\n   <BuildAction\n      parallelizeBuildables = \"YES\"\n      buildImplicitDependencies = \"YES\">\n      <BuildActionEntries>\n         <BuildActionEntry\n            buildForTesting = \"YES\"\n            buildForRunning = \"YES\"\n            buildForProfiling = \"YES\"\n            buildForArchiving = \"YES\"\n            buildForAnalyzing = \"YES\">\n            <BuildableReference\n               BuildableIdentifier = \"primary\"\n               BlueprintIdentifier = \"97C146ED1CF9000F007C117D\"\n               BuildableName = \"Runner.app\"\n               BlueprintName = \"Runner\"\n               ReferencedContainer = \"container:Runner.xcodeproj\">\n            </BuildableReference>\n         </BuildActionEntry>\n      </BuildActionEntries>\n   </BuildAction>\n   <TestAction\n      buildConfiguration = \"Debug\"\n      selectedDebuggerIdentifier = \"Xcode.DebuggerFoundation.Debugger.LLDB\"\n      selectedLauncherIdentifier = \"Xcode.DebuggerFoundation.Launcher.LLDB\"\n      shouldUseLaunchSchemeArgsEnv = \"YES\">\n      <MacroExpansion>\n         <BuildableReference\n            BuildableIdentifier = \"primary\"\n            BlueprintIdentifier = \"97C146ED1CF9000F007C117D\"\n            BuildableName = \"Runner.app\"\n            BlueprintName = \"Runner\"\n            ReferencedContainer = \"container:Runner.xcodeproj\">\n         </BuildableReference>\n      </MacroExpansion>\n      <Testables>\n         <TestableReference\n            skipped = \"NO\"\n            parallelizable = \"YES\">\n            <BuildableReference\n               BuildableIdentifier = \"primary\"\n               BlueprintIdentifier = \"331C8080294A63A400263BE5\"\n               BuildableName = \"RunnerTests.xctest\"\n               BlueprintName = \"RunnerTests\"\n               ReferencedContainer = \"container:Runner.xcodeproj\">\n            </BuildableReference>\n         </TestableReference>\n      </Testables>\n   </TestAction>\n   <LaunchAction\n      buildConfiguration = \"Debug\"\n      selectedDebuggerIdentifier = \"Xcode.DebuggerFoundation.Debugger.LLDB\"\n      selectedLauncherIdentifier = \"Xcode.DebuggerFoundation.Launcher.LLDB\"\n      launchStyle = \"0\"\n      useCustomWorkingDirectory = \"NO\"\n      ignoresPersistentStateOnLaunch = \"NO\"\n      debugDocumentVersioning = \"YES\"\n      debugServiceExtension = \"internal\"\n      allowLocationSimulation = \"YES\">\n      <BuildableProductRunnable\n         runnableDebuggingMode = \"0\">\n         <BuildableReference\n            BuildableIdentifier = \"primary\"\n            BlueprintIdentifier = \"97C146ED1CF9000F007C117D\"\n            BuildableName = \"Runner.app\"\n            BlueprintName = \"Runner\"\n            ReferencedContainer = \"container:Runner.xcodeproj\">\n         </BuildableReference>\n      </BuildableProductRunnable>\n      <CommandLineArguments>\n         <CommandLineArgument\n            argument = \"-FIRDebugEnabled\"\n            isEnabled = \"YES\">\n         </CommandLineArgument>\n      </CommandLineArguments>\n   </LaunchAction>\n   <ProfileAction\n      buildConfiguration = \"Profile\"\n      shouldUseLaunchSchemeArgsEnv = \"YES\"\n      savedToolIdentifier = \"\"\n      useCustomWorkingDirectory = \"NO\"\n      debugDocumentVersioning = \"YES\">\n      <BuildableProductRunnable\n         runnableDebuggingMode = \"0\">\n         <BuildableReference\n            BuildableIdentifier = \"primary\"\n            BlueprintIdentifier = \"97C146ED1CF9000F007C117D\"\n            BuildableName = \"Runner.app\"\n            BlueprintName = \"Runner\"\n            ReferencedContainer = \"container:Runner.xcodeproj\">\n         </BuildableReference>\n      </BuildableProductRunnable>\n   </ProfileAction>\n   <AnalyzeAction\n      buildConfiguration = \"Debug\">\n   </AnalyzeAction>\n   <ArchiveAction\n      buildConfiguration = \"Release\"\n      revealArchiveInOrganizer = \"YES\">\n   </ArchiveAction>\n</Scheme>\n"
  },
  {
    "path": "packages/langchain_firebase/example/ios/Runner.xcworkspace/contents.xcworkspacedata",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<Workspace\n   version = \"1.0\">\n   <FileRef\n      location = \"group:Runner.xcodeproj\">\n   </FileRef>\n   <FileRef\n      location = \"group:Pods/Pods.xcodeproj\">\n   </FileRef>\n</Workspace>\n"
  },
  {
    "path": "packages/langchain_firebase/example/ios/Runner.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n<plist version=\"1.0\">\n<dict>\n\t<key>IDEDidComputeMac32BitWarning</key>\n\t<true/>\n</dict>\n</plist>\n"
  },
  {
    "path": "packages/langchain_firebase/example/ios/Runner.xcworkspace/xcshareddata/WorkspaceSettings.xcsettings",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n<plist version=\"1.0\">\n<dict>\n\t<key>PreviewsEnabled</key>\n\t<false/>\n</dict>\n</plist>\n"
  },
  {
    "path": "packages/langchain_firebase/example/ios/firebase_app_id_file.json",
    "content": "{\n  \"file_generated_by\": \"FlutterFire CLI\",\n  \"purpose\": \"FirebaseAppID & ProjectID for this Firebase app in this directory\",\n  \"GOOGLE_APP_ID\": \"1:651313571784:ios:2f1472905da3e8e9b1c2fd\",\n  \"FIREBASE_PROJECT_ID\": \"vertex-ai-example-ef5a2\",\n  \"GCM_SENDER_ID\": \"651313571784\"\n}"
  },
  {
    "path": "packages/langchain_firebase/example/lib/main.dart",
    "content": "// ignore_for_file: public_member_api_docs, avoid_print, unnecessary_async\n// Copyright 2024 Google LLC\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\nimport 'dart:convert';\n\nimport 'package:firebase_core/firebase_core.dart';\nimport 'package:flutter/material.dart';\nimport 'package:flutter/services.dart';\nimport 'package:flutter_markdown/flutter_markdown.dart';\nimport 'package:langchain/langchain.dart';\nimport 'package:langchain_firebase/langchain_firebase.dart';\n\nvoid main() async {\n  await initFirebase();\n  runApp(const GenerativeAISample());\n}\n\nFuture<void> initFirebase() async {\n  await Firebase.initializeApp(\n    // Replace these values with your own Firebase project configuration\n    options: const FirebaseOptions(\n      apiKey: 'apiKey',\n      appId: 'appId',\n      projectId: 'projectId',\n      storageBucket: 'storageBucket',\n      messagingSenderId: 'messagingSenderId',\n    ),\n  );\n}\n\nclass GenerativeAISample extends StatelessWidget {\n  const GenerativeAISample({super.key});\n\n  @override\n  Widget build(BuildContext context) {\n    return MaterialApp(\n      title: 'Flutter + Firebase Vertex AI + LangChain.dart',\n      theme: ThemeData(\n        colorScheme: ColorScheme.fromSeed(\n          brightness: Brightness.dark,\n          seedColor: const Color.fromARGB(255, 171, 222, 244),\n        ),\n        useMaterial3: true,\n      ),\n      home: const ChatScreen(\n        title: 'Flutter + Firebase Vertex AI + LangChain.dart',\n      ),\n    );\n  }\n}\n\nclass ChatScreen extends StatefulWidget {\n  const ChatScreen({super.key, required this.title});\n\n  final String title;\n\n  @override\n  State<ChatScreen> createState() => _ChatScreenState();\n}\n\nclass _ChatScreenState extends State<ChatScreen> {\n  @override\n  Widget build(BuildContext context) {\n    return Scaffold(\n      appBar: AppBar(title: Text(widget.title)),\n      body: const ChatWidget(),\n    );\n  }\n}\n\nclass ChatWidget extends StatefulWidget {\n  const ChatWidget({super.key});\n\n  @override\n  State<ChatWidget> createState() => _ChatWidgetState();\n}\n\nclass _ChatWidgetState extends State<ChatWidget> {\n  late final ChatFirebaseVertexAI _model;\n  late final RunnableSequence<ChatMessage, ChatResult> _chain;\n  late final ConversationBufferMemory _memory;\n  late final Tool exchangeRateTool;\n\n  final _scrollController = ScrollController();\n  final _textController = TextEditingController();\n  final _textFieldFocus = FocusNode();\n  final _generatedContent = <({Image? image, String? text, bool fromUser})>[];\n  var _loading = false;\n\n  @override\n  void initState() {\n    super.initState();\n    _memory = ConversationBufferMemory(returnMessages: true);\n    exchangeRateTool = Tool.fromFunction(\n      name: 'findExchangeRate',\n      description:\n          'Returns the exchange rate between currencies on given date.',\n      inputJsonSchema: {\n        'type': 'object',\n        'properties': {\n          'currencyDate': {\n            'type': 'string',\n            'description':\n                'A date in YYYY-MM-DD format or '\n                'the exact value \"latest\" if a time period is not specified.',\n          },\n          'currencyFrom': {\n            'type': 'string',\n            'description':\n                'The currency code of the currency to convert from, '\n                'such as \"USD\".',\n          },\n          'currencyTo': {\n            'type': 'string',\n            'description':\n                'The currency code of the currency to convert to, '\n                'such as \"USD\".',\n          },\n        },\n        'required': ['currencyDate', 'currencyFrom', 'currencyTo'],\n      },\n      func: (Map<String, Object?> input) async => {\n        // This hypothetical API returns a JSON such as:\n        // {\"base\":\"USD\",\"date\":\"2024-04-17\",\"rates\":{\"SEK\": 0.091}}\n        'date': input['currencyDate'],\n        'base': input['currencyFrom'],\n        'rates': {input['currencyTo']! as String: 0.091},\n      },\n    );\n    final promptTemplate = ChatPromptTemplate.fromTemplates(const [\n      (ChatMessageType.system, 'You are a helpful assistant.'),\n      (ChatMessageType.messagesPlaceholder, 'history'),\n      (ChatMessageType.messagePlaceholder, 'input'),\n    ]);\n    final baseChain = Runnable.mapInput(\n      (ChatMessage input) async => {\n        'input': input,\n        ...await _memory.loadMemoryVariables(),\n      },\n    ).pipe(promptTemplate);\n\n    _model = ChatFirebaseVertexAI(\n      defaultOptions: ChatFirebaseVertexAIOptions(\n        model: 'gemini-1.5-pro',\n        tools: [exchangeRateTool],\n      ),\n      // location: 'us-central1',\n    );\n    _chain = baseChain.pipe(_model);\n  }\n\n  void _scrollDown() {\n    WidgetsBinding.instance.addPostFrameCallback(\n      (_) async => _scrollController.animateTo(\n        _scrollController.position.maxScrollExtent,\n        duration: const Duration(milliseconds: 750),\n        curve: Curves.easeOutCirc,\n      ),\n    );\n  }\n\n  @override\n  Widget build(BuildContext context) {\n    final textFieldDecoration = InputDecoration(\n      contentPadding: const EdgeInsets.all(15),\n      hintText: 'Enter a prompt...',\n      border: OutlineInputBorder(\n        borderRadius: const BorderRadius.all(Radius.circular(14)),\n        borderSide: BorderSide(color: Theme.of(context).colorScheme.secondary),\n      ),\n      focusedBorder: OutlineInputBorder(\n        borderRadius: const BorderRadius.all(Radius.circular(14)),\n        borderSide: BorderSide(color: Theme.of(context).colorScheme.secondary),\n      ),\n    );\n\n    return Padding(\n      padding: const EdgeInsets.all(8),\n      child: Column(\n        mainAxisAlignment: MainAxisAlignment.center,\n        crossAxisAlignment: CrossAxisAlignment.start,\n        children: [\n          Expanded(\n            child: ListView.builder(\n              controller: _scrollController,\n              itemBuilder: (context, idx) {\n                final content = _generatedContent[idx];\n                return MessageWidget(\n                  text: content.text,\n                  image: content.image,\n                  isFromUser: content.fromUser,\n                );\n              },\n              itemCount: _generatedContent.length,\n            ),\n          ),\n          Padding(\n            padding: const EdgeInsets.symmetric(vertical: 25, horizontal: 15),\n            child: Row(\n              children: [\n                Expanded(\n                  child: TextField(\n                    autofocus: true,\n                    focusNode: _textFieldFocus,\n                    decoration: textFieldDecoration,\n                    controller: _textController,\n                    onSubmitted: _sendChatMessage,\n                  ),\n                ),\n                const SizedBox.square(dimension: 15),\n                IconButton(\n                  tooltip: 'tokenCount Test',\n                  onPressed: !_loading\n                      ? () async {\n                          await _testCountToken();\n                        }\n                      : null,\n                  icon: Icon(\n                    Icons.numbers,\n                    color: _loading\n                        ? Theme.of(context).colorScheme.secondary\n                        : Theme.of(context).colorScheme.primary,\n                  ),\n                ),\n                IconButton(\n                  tooltip: 'function calling Test',\n                  onPressed: !_loading\n                      ? () async {\n                          await _testFunctionCalling();\n                        }\n                      : null,\n                  icon: Icon(\n                    Icons.functions,\n                    color: _loading\n                        ? Theme.of(context).colorScheme.secondary\n                        : Theme.of(context).colorScheme.primary,\n                  ),\n                ),\n                IconButton(\n                  tooltip: 'image prompt',\n                  onPressed: !_loading\n                      ? () async {\n                          await _sendImagePrompt(_textController.text);\n                        }\n                      : null,\n                  icon: Icon(\n                    Icons.image,\n                    color: _loading\n                        ? Theme.of(context).colorScheme.secondary\n                        : Theme.of(context).colorScheme.primary,\n                  ),\n                ),\n                IconButton(\n                  tooltip: 'storage prompt',\n                  onPressed: !_loading\n                      ? () async {\n                          await _sendStorageUriPrompt(_textController.text);\n                        }\n                      : null,\n                  icon: Icon(\n                    Icons.folder,\n                    color: _loading\n                        ? Theme.of(context).colorScheme.secondary\n                        : Theme.of(context).colorScheme.primary,\n                  ),\n                ),\n                if (!_loading)\n                  IconButton(\n                    onPressed: () async {\n                      await _sendChatMessage(_textController.text);\n                    },\n                    icon: Icon(\n                      Icons.send,\n                      color: Theme.of(context).colorScheme.primary,\n                    ),\n                  )\n                else\n                  const CircularProgressIndicator(),\n              ],\n            ),\n          ),\n        ],\n      ),\n    );\n  }\n\n  Future<void> _sendStorageUriPrompt(String message) async {\n    setState(() {\n      _loading = true;\n    });\n    try {\n      final chatMessage = ChatMessage.human(\n        ChatMessageContent.multiModal([\n          ChatMessageContent.text(message),\n          ChatMessageContent.image(\n            mimeType: 'image/jpeg',\n            data: 'gs://vertex-ai-example-ef5a2.appspot.com/foodpic.jpg',\n          ),\n        ]),\n      );\n\n      _generatedContent.add((image: null, text: message, fromUser: true));\n\n      final response = await _chain.invoke(chatMessage);\n      final text = response.output.content;\n      _generatedContent.add((image: null, text: text, fromUser: false));\n\n      if (text.isEmpty) {\n        await _showError('No response from API.');\n        return;\n      } else {\n        setState(() {\n          _loading = false;\n          _scrollDown();\n        });\n      }\n    } catch (e) {\n      await _showError(e.toString());\n      setState(() {\n        _loading = false;\n      });\n    } finally {\n      _textController.clear();\n      setState(() {\n        _loading = false;\n      });\n      _textFieldFocus.requestFocus();\n    }\n  }\n\n  Future<void> _sendImagePrompt(String message) async {\n    setState(() {\n      _loading = true;\n    });\n    try {\n      final ByteData catBytes = await rootBundle.load('assets/images/cat.jpg');\n      final ByteData sconeBytes = await rootBundle.load(\n        'assets/images/scones.jpg',\n      );\n      final chatMessage = ChatMessage.human(\n        ChatMessageContent.multiModal([\n          ChatMessageContent.text(message),\n          ChatMessageContent.image(\n            mimeType: 'image/jpeg',\n            data: base64Encode(catBytes.buffer.asUint8List()),\n          ),\n          ChatMessageContent.image(\n            mimeType: 'image/jpeg',\n            data: base64Encode(sconeBytes.buffer.asUint8List()),\n          ),\n        ]),\n      );\n\n      _generatedContent\n        ..add((\n          image: Image.asset('assets/images/cat.jpg'),\n          text: message,\n          fromUser: true,\n        ))\n        ..add((\n          image: Image.asset('assets/images/scones.jpg'),\n          text: null,\n          fromUser: true,\n        ));\n\n      final response = await _chain.invoke(chatMessage);\n\n      final text = response.output.content;\n      _generatedContent.add((image: null, text: text, fromUser: false));\n\n      if (text.isEmpty) {\n        await _showError('No response from API.');\n        return;\n      } else {\n        await _memory.saveContext(\n          inputValues: {'input': chatMessage},\n          outputValues: {'output': response.output},\n        );\n        setState(() {\n          _loading = false;\n          _scrollDown();\n        });\n      }\n    } catch (e) {\n      await _showError(e.toString());\n      setState(() {\n        _loading = false;\n      });\n    } finally {\n      _textController.clear();\n      setState(() {\n        _loading = false;\n      });\n      _textFieldFocus.requestFocus();\n    }\n  }\n\n  Future<void> _sendChatMessage(String message) async {\n    setState(() {\n      _textController.clear();\n      _loading = true;\n    });\n\n    try {\n      final chatMessage = ChatMessage.humanText(message);\n\n      _generatedContent.add((image: null, text: message, fromUser: true));\n      final response = await _chain.invoke(chatMessage);\n      final text = response.output.content;\n      _generatedContent.add((image: null, text: text, fromUser: false));\n\n      if (text.isEmpty) {\n        await _showError('No response from API.');\n        return;\n      } else {\n        await _memory.saveContext(\n          inputValues: {'input': chatMessage},\n          outputValues: {'output': response.output},\n        );\n        setState(() {\n          _loading = false;\n          _scrollDown();\n        });\n      }\n    } catch (e) {\n      await _showError(e.toString());\n      setState(() {\n        _loading = false;\n      });\n    } finally {\n      _textController.clear();\n      setState(() {\n        _loading = false;\n      });\n      _textFieldFocus.requestFocus();\n    }\n  }\n\n  Future<void> _testFunctionCalling() async {\n    setState(() {\n      _loading = true;\n    });\n    final chatMessage = ChatMessage.humanText(\n      'How much is 50 US dollars worth in Swedish krona?',\n    );\n\n    // Send the message to the generative model.\n    var response = await _chain.invoke(chatMessage);\n    await _memory.saveContext(\n      inputValues: {'input': chatMessage},\n      outputValues: {'output': response.output},\n    );\n\n    final toolCalls = response.output.toolCalls;\n    // When the model response with a function call, invoke the function.\n    if (toolCalls.isNotEmpty) {\n      final toolCall = toolCalls.first;\n      final result = switch (toolCall.name) {\n        // Forward arguments to the hypothetical API.\n        'findExchangeRate' => await exchangeRateTool.invoke(toolCall.arguments),\n        // Throw an exception if the model attempted to call a function that was\n        // not declared.\n        _ => throw UnimplementedError(\n          'Function not implemented: ${toolCall.name}',\n        ),\n      };\n      // Send the response to the model so that it can use the result to generate\n      // text for the user.\n      final toolMessage = ChatMessage.tool(\n        toolCallId: toolCall.id,\n        content: jsonEncode(result),\n      );\n\n      response = await _chain.invoke(toolMessage);\n      await _memory.saveContext(\n        inputValues: {'input': chatMessage},\n        outputValues: {'output': response.output},\n      );\n    }\n    // When the model responds with non-null text content, print it.\n    if (response.output.content.isNotEmpty) {\n      _generatedContent.add((\n        image: null,\n        text: response.output.content,\n        fromUser: false,\n      ));\n      setState(() {\n        _loading = false;\n      });\n    }\n  }\n\n  Future<void> _testCountToken() async {\n    setState(() {\n      _loading = true;\n    });\n\n    const prompt = 'tell a short story';\n    final response = await _model.countTokens(PromptValue.string(prompt));\n    print('token: $response');\n\n    setState(() {\n      _loading = false;\n    });\n  }\n\n  Future<void> _showError(String message) async {\n    await showDialog<void>(\n      context: context,\n      builder: (context) {\n        return AlertDialog(\n          title: const Text('Something went wrong'),\n          content: SingleChildScrollView(child: SelectableText(message)),\n          actions: [\n            TextButton(\n              onPressed: () {\n                Navigator.of(context).pop();\n              },\n              child: const Text('OK'),\n            ),\n          ],\n        );\n      },\n    );\n  }\n}\n\nclass MessageWidget extends StatelessWidget {\n  final Image? image;\n  final String? text;\n  final bool isFromUser;\n\n  const MessageWidget({\n    super.key,\n    this.image,\n    this.text,\n    required this.isFromUser,\n  });\n\n  @override\n  Widget build(BuildContext context) {\n    return Row(\n      mainAxisAlignment: isFromUser\n          ? MainAxisAlignment.end\n          : MainAxisAlignment.start,\n      children: [\n        Flexible(\n          child: Container(\n            constraints: const BoxConstraints(maxWidth: 600),\n            decoration: BoxDecoration(\n              color: isFromUser\n                  ? Theme.of(context).colorScheme.primaryContainer\n                  : Theme.of(context).colorScheme.surfaceContainerHighest,\n              borderRadius: BorderRadius.circular(18),\n            ),\n            padding: const EdgeInsets.symmetric(vertical: 15, horizontal: 20),\n            margin: const EdgeInsets.only(bottom: 8),\n            child: Column(\n              children: [\n                if (text case final text?) MarkdownBody(data: text),\n                if (image case final image?) image,\n              ],\n            ),\n          ),\n        ),\n      ],\n    );\n  }\n}\n"
  },
  {
    "path": "packages/langchain_firebase/example/macos/.gitignore",
    "content": "# Flutter-related\n**/Flutter/ephemeral/\n**/Pods/\n\n# Xcode-related\n**/dgph\n**/xcuserdata/\n"
  },
  {
    "path": "packages/langchain_firebase/example/macos/Flutter/Flutter-Debug.xcconfig",
    "content": "#include? \"Pods/Target Support Files/Pods-Runner/Pods-Runner.debug.xcconfig\"\n#include \"ephemeral/Flutter-Generated.xcconfig\"\n"
  },
  {
    "path": "packages/langchain_firebase/example/macos/Flutter/Flutter-Release.xcconfig",
    "content": "#include? \"Pods/Target Support Files/Pods-Runner/Pods-Runner.release.xcconfig\"\n#include \"ephemeral/Flutter-Generated.xcconfig\"\n"
  },
  {
    "path": "packages/langchain_firebase/example/macos/Flutter/GeneratedPluginRegistrant.swift",
    "content": "//\n//  Generated file. Do not edit.\n//\n\nimport FlutterMacOS\nimport Foundation\n\nimport firebase_app_check\nimport firebase_auth\nimport firebase_core\n\nfunc RegisterGeneratedPlugins(registry: FlutterPluginRegistry) {\n  FLTFirebaseAppCheckPlugin.register(with: registry.registrar(forPlugin: \"FLTFirebaseAppCheckPlugin\"))\n  FLTFirebaseAuthPlugin.register(with: registry.registrar(forPlugin: \"FLTFirebaseAuthPlugin\"))\n  FLTFirebaseCorePlugin.register(with: registry.registrar(forPlugin: \"FLTFirebaseCorePlugin\"))\n}\n"
  },
  {
    "path": "packages/langchain_firebase/example/macos/Podfile",
    "content": "platform :osx, '10.15'\n\n# CocoaPods analytics sends network stats synchronously affecting flutter build latency.\nENV['COCOAPODS_DISABLE_STATS'] = 'true'\n\nproject 'Runner', {\n  'Debug' => :debug,\n  'Profile' => :release,\n  'Release' => :release,\n}\n\ndef flutter_root\n  generated_xcode_build_settings_path = File.expand_path(File.join('..', 'Flutter', 'ephemeral', 'Flutter-Generated.xcconfig'), __FILE__)\n  unless File.exist?(generated_xcode_build_settings_path)\n    raise \"#{generated_xcode_build_settings_path} must exist. If you're running pod install manually, make sure \\\"flutter pub get\\\" is executed first\"\n  end\n\n  File.foreach(generated_xcode_build_settings_path) do |line|\n    matches = line.match(/FLUTTER_ROOT\\=(.*)/)\n    return matches[1].strip if matches\n  end\n  raise \"FLUTTER_ROOT not found in #{generated_xcode_build_settings_path}. Try deleting Flutter-Generated.xcconfig, then run \\\"flutter pub get\\\"\"\nend\n\nrequire File.expand_path(File.join('packages', 'flutter_tools', 'bin', 'podhelper'), flutter_root)\n\nflutter_macos_podfile_setup\n\ntarget 'Runner' do\n  use_frameworks!\n  use_modular_headers!\n\n  flutter_install_all_macos_pods File.dirname(File.realpath(__FILE__))\n  target 'RunnerTests' do\n    inherit! :search_paths\n  end\nend\n\npost_install do |installer|\n  installer.pods_project.targets.each do |target|\n    flutter_additional_macos_build_settings(target)\n  end\nend\n"
  },
  {
    "path": "packages/langchain_firebase/example/macos/Runner/AppDelegate.swift",
    "content": "import Cocoa\nimport FlutterMacOS\n\n@NSApplicationMain\nclass AppDelegate: FlutterAppDelegate {\n  override func applicationShouldTerminateAfterLastWindowClosed(_ sender: NSApplication) -> Bool {\n    return true\n  }\n}\n"
  },
  {
    "path": "packages/langchain_firebase/example/macos/Runner/Assets.xcassets/AppIcon.appiconset/Contents.json",
    "content": "{\n  \"images\" : [\n    {\n      \"size\" : \"16x16\",\n      \"idiom\" : \"mac\",\n      \"filename\" : \"app_icon_16.png\",\n      \"scale\" : \"1x\"\n    },\n    {\n      \"size\" : \"16x16\",\n      \"idiom\" : \"mac\",\n      \"filename\" : \"app_icon_32.png\",\n      \"scale\" : \"2x\"\n    },\n    {\n      \"size\" : \"32x32\",\n      \"idiom\" : \"mac\",\n      \"filename\" : \"app_icon_32.png\",\n      \"scale\" : \"1x\"\n    },\n    {\n      \"size\" : \"32x32\",\n      \"idiom\" : \"mac\",\n      \"filename\" : \"app_icon_64.png\",\n      \"scale\" : \"2x\"\n    },\n    {\n      \"size\" : \"128x128\",\n      \"idiom\" : \"mac\",\n      \"filename\" : \"app_icon_128.png\",\n      \"scale\" : \"1x\"\n    },\n    {\n      \"size\" : \"128x128\",\n      \"idiom\" : \"mac\",\n      \"filename\" : \"app_icon_256.png\",\n      \"scale\" : \"2x\"\n    },\n    {\n      \"size\" : \"256x256\",\n      \"idiom\" : \"mac\",\n      \"filename\" : \"app_icon_256.png\",\n      \"scale\" : \"1x\"\n    },\n    {\n      \"size\" : \"256x256\",\n      \"idiom\" : \"mac\",\n      \"filename\" : \"app_icon_512.png\",\n      \"scale\" : \"2x\"\n    },\n    {\n      \"size\" : \"512x512\",\n      \"idiom\" : \"mac\",\n      \"filename\" : \"app_icon_512.png\",\n      \"scale\" : \"1x\"\n    },\n    {\n      \"size\" : \"512x512\",\n      \"idiom\" : \"mac\",\n      \"filename\" : \"app_icon_1024.png\",\n      \"scale\" : \"2x\"\n    }\n  ],\n  \"info\" : {\n    \"version\" : 1,\n    \"author\" : \"xcode\"\n  }\n}\n"
  },
  {
    "path": "packages/langchain_firebase/example/macos/Runner/Base.lproj/MainMenu.xib",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<document type=\"com.apple.InterfaceBuilder3.Cocoa.XIB\" version=\"3.0\" toolsVersion=\"14490.70\" targetRuntime=\"MacOSX.Cocoa\" propertyAccessControl=\"none\" useAutolayout=\"YES\" customObjectInstantitationMethod=\"direct\">\n    <dependencies>\n        <deployment identifier=\"macosx\"/>\n        <plugIn identifier=\"com.apple.InterfaceBuilder.CocoaPlugin\" version=\"14490.70\"/>\n        <capability name=\"documents saved in the Xcode 8 format\" minToolsVersion=\"8.0\"/>\n    </dependencies>\n    <objects>\n        <customObject id=\"-2\" userLabel=\"File's Owner\" customClass=\"NSApplication\">\n            <connections>\n                <outlet property=\"delegate\" destination=\"Voe-Tx-rLC\" id=\"GzC-gU-4Uq\"/>\n            </connections>\n        </customObject>\n        <customObject id=\"-1\" userLabel=\"First Responder\" customClass=\"FirstResponder\"/>\n        <customObject id=\"-3\" userLabel=\"Application\" customClass=\"NSObject\"/>\n        <customObject id=\"Voe-Tx-rLC\" customClass=\"AppDelegate\" customModule=\"Runner\" customModuleProvider=\"target\">\n            <connections>\n                <outlet property=\"applicationMenu\" destination=\"uQy-DD-JDr\" id=\"XBo-yE-nKs\"/>\n                <outlet property=\"mainFlutterWindow\" destination=\"QvC-M9-y7g\" id=\"gIp-Ho-8D9\"/>\n            </connections>\n        </customObject>\n        <customObject id=\"YLy-65-1bz\" customClass=\"NSFontManager\"/>\n        <menu title=\"Main Menu\" systemMenu=\"main\" id=\"AYu-sK-qS6\">\n            <items>\n                <menuItem title=\"APP_NAME\" id=\"1Xt-HY-uBw\">\n                    <modifierMask key=\"keyEquivalentModifierMask\"/>\n                    <menu key=\"submenu\" title=\"APP_NAME\" systemMenu=\"apple\" id=\"uQy-DD-JDr\">\n                        <items>\n                            <menuItem title=\"About APP_NAME\" id=\"5kV-Vb-QxS\">\n                                <modifierMask key=\"keyEquivalentModifierMask\"/>\n                                <connections>\n                                    <action selector=\"orderFrontStandardAboutPanel:\" target=\"-1\" id=\"Exp-CZ-Vem\"/>\n                                </connections>\n                            </menuItem>\n                            <menuItem isSeparatorItem=\"YES\" id=\"VOq-y0-SEH\"/>\n                            <menuItem title=\"Preferences…\" keyEquivalent=\",\" id=\"BOF-NM-1cW\"/>\n                            <menuItem isSeparatorItem=\"YES\" id=\"wFC-TO-SCJ\"/>\n                            <menuItem title=\"Services\" id=\"NMo-om-nkz\">\n                                <modifierMask key=\"keyEquivalentModifierMask\"/>\n                                <menu key=\"submenu\" title=\"Services\" systemMenu=\"services\" id=\"hz9-B4-Xy5\"/>\n                            </menuItem>\n                            <menuItem isSeparatorItem=\"YES\" id=\"4je-JR-u6R\"/>\n                            <menuItem title=\"Hide APP_NAME\" keyEquivalent=\"h\" id=\"Olw-nP-bQN\">\n                                <connections>\n                                    <action selector=\"hide:\" target=\"-1\" id=\"PnN-Uc-m68\"/>\n                                </connections>\n                            </menuItem>\n                            <menuItem title=\"Hide Others\" keyEquivalent=\"h\" id=\"Vdr-fp-XzO\">\n                                <modifierMask key=\"keyEquivalentModifierMask\" option=\"YES\" command=\"YES\"/>\n                                <connections>\n                                    <action selector=\"hideOtherApplications:\" target=\"-1\" id=\"VT4-aY-XCT\"/>\n                                </connections>\n                            </menuItem>\n                            <menuItem title=\"Show All\" id=\"Kd2-mp-pUS\">\n                                <modifierMask key=\"keyEquivalentModifierMask\"/>\n                                <connections>\n                                    <action selector=\"unhideAllApplications:\" target=\"-1\" id=\"Dhg-Le-xox\"/>\n                                </connections>\n                            </menuItem>\n                            <menuItem isSeparatorItem=\"YES\" id=\"kCx-OE-vgT\"/>\n                            <menuItem title=\"Quit APP_NAME\" keyEquivalent=\"q\" id=\"4sb-4s-VLi\">\n                                <connections>\n                                    <action selector=\"terminate:\" target=\"-1\" id=\"Te7-pn-YzF\"/>\n                                </connections>\n                            </menuItem>\n                        </items>\n                    </menu>\n                </menuItem>\n                <menuItem title=\"Edit\" id=\"5QF-Oa-p0T\">\n                    <modifierMask key=\"keyEquivalentModifierMask\"/>\n                    <menu key=\"submenu\" title=\"Edit\" id=\"W48-6f-4Dl\">\n                        <items>\n                            <menuItem title=\"Undo\" keyEquivalent=\"z\" id=\"dRJ-4n-Yzg\">\n                                <connections>\n                                    <action selector=\"undo:\" target=\"-1\" id=\"M6e-cu-g7V\"/>\n                                </connections>\n                            </menuItem>\n                            <menuItem title=\"Redo\" keyEquivalent=\"Z\" id=\"6dh-zS-Vam\">\n                                <connections>\n                                    <action selector=\"redo:\" target=\"-1\" id=\"oIA-Rs-6OD\"/>\n                                </connections>\n                            </menuItem>\n                            <menuItem isSeparatorItem=\"YES\" id=\"WRV-NI-Exz\"/>\n                            <menuItem title=\"Cut\" keyEquivalent=\"x\" id=\"uRl-iY-unG\">\n                                <connections>\n                                    <action selector=\"cut:\" target=\"-1\" id=\"YJe-68-I9s\"/>\n                                </connections>\n                            </menuItem>\n                            <menuItem title=\"Copy\" keyEquivalent=\"c\" id=\"x3v-GG-iWU\">\n                                <connections>\n                                    <action selector=\"copy:\" target=\"-1\" id=\"G1f-GL-Joy\"/>\n                                </connections>\n                            </menuItem>\n                            <menuItem title=\"Paste\" keyEquivalent=\"v\" id=\"gVA-U4-sdL\">\n                                <connections>\n                                    <action selector=\"paste:\" target=\"-1\" id=\"UvS-8e-Qdg\"/>\n                                </connections>\n                            </menuItem>\n                            <menuItem title=\"Paste and Match Style\" keyEquivalent=\"V\" id=\"WeT-3V-zwk\">\n                                <modifierMask key=\"keyEquivalentModifierMask\" option=\"YES\" command=\"YES\"/>\n                                <connections>\n                                    <action selector=\"pasteAsPlainText:\" target=\"-1\" id=\"cEh-KX-wJQ\"/>\n                                </connections>\n                            </menuItem>\n                            <menuItem title=\"Delete\" id=\"pa3-QI-u2k\">\n                                <modifierMask key=\"keyEquivalentModifierMask\"/>\n                                <connections>\n                                    <action selector=\"delete:\" target=\"-1\" id=\"0Mk-Ml-PaM\"/>\n                                </connections>\n                            </menuItem>\n                            <menuItem title=\"Select All\" keyEquivalent=\"a\" id=\"Ruw-6m-B2m\">\n                                <connections>\n                                    <action selector=\"selectAll:\" target=\"-1\" id=\"VNm-Mi-diN\"/>\n                                </connections>\n                            </menuItem>\n                            <menuItem isSeparatorItem=\"YES\" id=\"uyl-h8-XO2\"/>\n                            <menuItem title=\"Find\" id=\"4EN-yA-p0u\">\n                                <modifierMask key=\"keyEquivalentModifierMask\"/>\n                                <menu key=\"submenu\" title=\"Find\" id=\"1b7-l0-nxx\">\n                                    <items>\n                                        <menuItem title=\"Find…\" tag=\"1\" keyEquivalent=\"f\" id=\"Xz5-n4-O0W\">\n                                            <connections>\n                                                <action selector=\"performFindPanelAction:\" target=\"-1\" id=\"cD7-Qs-BN4\"/>\n                                            </connections>\n                                        </menuItem>\n                                        <menuItem title=\"Find and Replace…\" tag=\"12\" keyEquivalent=\"f\" id=\"YEy-JH-Tfz\">\n                                            <modifierMask key=\"keyEquivalentModifierMask\" option=\"YES\" command=\"YES\"/>\n                                            <connections>\n                                                <action selector=\"performFindPanelAction:\" target=\"-1\" id=\"WD3-Gg-5AJ\"/>\n                                            </connections>\n                                        </menuItem>\n                                        <menuItem title=\"Find Next\" tag=\"2\" keyEquivalent=\"g\" id=\"q09-fT-Sye\">\n                                            <connections>\n                                                <action selector=\"performFindPanelAction:\" target=\"-1\" id=\"NDo-RZ-v9R\"/>\n                                            </connections>\n                                        </menuItem>\n                                        <menuItem title=\"Find Previous\" tag=\"3\" keyEquivalent=\"G\" id=\"OwM-mh-QMV\">\n                                            <connections>\n                                                <action selector=\"performFindPanelAction:\" target=\"-1\" id=\"HOh-sY-3ay\"/>\n                                            </connections>\n                                        </menuItem>\n                                        <menuItem title=\"Use Selection for Find\" tag=\"7\" keyEquivalent=\"e\" id=\"buJ-ug-pKt\">\n                                            <connections>\n                                                <action selector=\"performFindPanelAction:\" target=\"-1\" id=\"U76-nv-p5D\"/>\n                                            </connections>\n                                        </menuItem>\n                                        <menuItem title=\"Jump to Selection\" keyEquivalent=\"j\" id=\"S0p-oC-mLd\">\n                                            <connections>\n                                                <action selector=\"centerSelectionInVisibleArea:\" target=\"-1\" id=\"IOG-6D-g5B\"/>\n                                            </connections>\n                                        </menuItem>\n                                    </items>\n                                </menu>\n                            </menuItem>\n                            <menuItem title=\"Spelling and Grammar\" id=\"Dv1-io-Yv7\">\n                                <modifierMask key=\"keyEquivalentModifierMask\"/>\n                                <menu key=\"submenu\" title=\"Spelling\" id=\"3IN-sU-3Bg\">\n                                    <items>\n                                        <menuItem title=\"Show Spelling and Grammar\" keyEquivalent=\":\" id=\"HFo-cy-zxI\">\n                                            <connections>\n                                                <action selector=\"showGuessPanel:\" target=\"-1\" id=\"vFj-Ks-hy3\"/>\n                                            </connections>\n                                        </menuItem>\n                                        <menuItem title=\"Check Document Now\" keyEquivalent=\";\" id=\"hz2-CU-CR7\">\n                                            <connections>\n                                                <action selector=\"checkSpelling:\" target=\"-1\" id=\"fz7-VC-reM\"/>\n                                            </connections>\n                                        </menuItem>\n                                        <menuItem isSeparatorItem=\"YES\" id=\"bNw-od-mp5\"/>\n                                        <menuItem title=\"Check Spelling While Typing\" id=\"rbD-Rh-wIN\">\n                                            <modifierMask key=\"keyEquivalentModifierMask\"/>\n                                            <connections>\n                                                <action selector=\"toggleContinuousSpellChecking:\" target=\"-1\" id=\"7w6-Qz-0kB\"/>\n                                            </connections>\n                                        </menuItem>\n                                        <menuItem title=\"Check Grammar With Spelling\" id=\"mK6-2p-4JG\">\n                                            <modifierMask key=\"keyEquivalentModifierMask\"/>\n                                            <connections>\n                                                <action selector=\"toggleGrammarChecking:\" target=\"-1\" id=\"muD-Qn-j4w\"/>\n                                            </connections>\n                                        </menuItem>\n                                        <menuItem title=\"Correct Spelling Automatically\" id=\"78Y-hA-62v\">\n                                            <modifierMask key=\"keyEquivalentModifierMask\"/>\n                                            <connections>\n                                                <action selector=\"toggleAutomaticSpellingCorrection:\" target=\"-1\" id=\"2lM-Qi-WAP\"/>\n                                            </connections>\n                                        </menuItem>\n                                    </items>\n                                </menu>\n                            </menuItem>\n                            <menuItem title=\"Substitutions\" id=\"9ic-FL-obx\">\n                                <modifierMask key=\"keyEquivalentModifierMask\"/>\n                                <menu key=\"submenu\" title=\"Substitutions\" id=\"FeM-D8-WVr\">\n                                    <items>\n                                        <menuItem title=\"Show Substitutions\" id=\"z6F-FW-3nz\">\n                                            <modifierMask key=\"keyEquivalentModifierMask\"/>\n                                            <connections>\n                                                <action selector=\"orderFrontSubstitutionsPanel:\" target=\"-1\" id=\"oku-mr-iSq\"/>\n                                            </connections>\n                                        </menuItem>\n                                        <menuItem isSeparatorItem=\"YES\" id=\"gPx-C9-uUO\"/>\n                                        <menuItem title=\"Smart Copy/Paste\" id=\"9yt-4B-nSM\">\n                                            <modifierMask key=\"keyEquivalentModifierMask\"/>\n                                            <connections>\n                                                <action selector=\"toggleSmartInsertDelete:\" target=\"-1\" id=\"3IJ-Se-DZD\"/>\n                                            </connections>\n                                        </menuItem>\n                                        <menuItem title=\"Smart Quotes\" id=\"hQb-2v-fYv\">\n                                            <modifierMask key=\"keyEquivalentModifierMask\"/>\n                                            <connections>\n                                                <action selector=\"toggleAutomaticQuoteSubstitution:\" target=\"-1\" id=\"ptq-xd-QOA\"/>\n                                            </connections>\n                                        </menuItem>\n                                        <menuItem title=\"Smart Dashes\" id=\"rgM-f4-ycn\">\n                                            <modifierMask key=\"keyEquivalentModifierMask\"/>\n                                            <connections>\n                                                <action selector=\"toggleAutomaticDashSubstitution:\" target=\"-1\" id=\"oCt-pO-9gS\"/>\n                                            </connections>\n                                        </menuItem>\n                                        <menuItem title=\"Smart Links\" id=\"cwL-P1-jid\">\n                                            <modifierMask key=\"keyEquivalentModifierMask\"/>\n                                            <connections>\n                                                <action selector=\"toggleAutomaticLinkDetection:\" target=\"-1\" id=\"Gip-E3-Fov\"/>\n                                            </connections>\n                                        </menuItem>\n                                        <menuItem title=\"Data Detectors\" id=\"tRr-pd-1PS\">\n                                            <modifierMask key=\"keyEquivalentModifierMask\"/>\n                                            <connections>\n                                                <action selector=\"toggleAutomaticDataDetection:\" target=\"-1\" id=\"R1I-Nq-Kbl\"/>\n                                            </connections>\n                                        </menuItem>\n                                        <menuItem title=\"Text Replacement\" id=\"HFQ-gK-NFA\">\n                                            <modifierMask key=\"keyEquivalentModifierMask\"/>\n                                            <connections>\n                                                <action selector=\"toggleAutomaticTextReplacement:\" target=\"-1\" id=\"DvP-Fe-Py6\"/>\n                                            </connections>\n                                        </menuItem>\n                                    </items>\n                                </menu>\n                            </menuItem>\n                            <menuItem title=\"Transformations\" id=\"2oI-Rn-ZJC\">\n                                <modifierMask key=\"keyEquivalentModifierMask\"/>\n                                <menu key=\"submenu\" title=\"Transformations\" id=\"c8a-y6-VQd\">\n                                    <items>\n                                        <menuItem title=\"Make Upper Case\" id=\"vmV-6d-7jI\">\n                                            <modifierMask key=\"keyEquivalentModifierMask\"/>\n                                            <connections>\n                                                <action selector=\"uppercaseWord:\" target=\"-1\" id=\"sPh-Tk-edu\"/>\n                                            </connections>\n                                        </menuItem>\n                                        <menuItem title=\"Make Lower Case\" id=\"d9M-CD-aMd\">\n                                            <modifierMask key=\"keyEquivalentModifierMask\"/>\n                                            <connections>\n                                                <action selector=\"lowercaseWord:\" target=\"-1\" id=\"iUZ-b5-hil\"/>\n                                            </connections>\n                                        </menuItem>\n                                        <menuItem title=\"Capitalize\" id=\"UEZ-Bs-lqG\">\n                                            <modifierMask key=\"keyEquivalentModifierMask\"/>\n                                            <connections>\n                                                <action selector=\"capitalizeWord:\" target=\"-1\" id=\"26H-TL-nsh\"/>\n                                            </connections>\n                                        </menuItem>\n                                    </items>\n                                </menu>\n                            </menuItem>\n                            <menuItem title=\"Speech\" id=\"xrE-MZ-jX0\">\n                                <modifierMask key=\"keyEquivalentModifierMask\"/>\n                                <menu key=\"submenu\" title=\"Speech\" id=\"3rS-ZA-NoH\">\n                                    <items>\n                                        <menuItem title=\"Start Speaking\" id=\"Ynk-f8-cLZ\">\n                                            <modifierMask key=\"keyEquivalentModifierMask\"/>\n                                            <connections>\n                                                <action selector=\"startSpeaking:\" target=\"-1\" id=\"654-Ng-kyl\"/>\n                                            </connections>\n                                        </menuItem>\n                                        <menuItem title=\"Stop Speaking\" id=\"Oyz-dy-DGm\">\n                                            <modifierMask key=\"keyEquivalentModifierMask\"/>\n                                            <connections>\n                                                <action selector=\"stopSpeaking:\" target=\"-1\" id=\"dX8-6p-jy9\"/>\n                                            </connections>\n                                        </menuItem>\n                                    </items>\n                                </menu>\n                            </menuItem>\n                        </items>\n                    </menu>\n                </menuItem>\n                <menuItem title=\"View\" id=\"H8h-7b-M4v\">\n                    <modifierMask key=\"keyEquivalentModifierMask\"/>\n                    <menu key=\"submenu\" title=\"View\" id=\"HyV-fh-RgO\">\n                        <items>\n                            <menuItem title=\"Enter Full Screen\" keyEquivalent=\"f\" id=\"4J7-dP-txa\">\n                                <modifierMask key=\"keyEquivalentModifierMask\" control=\"YES\" command=\"YES\"/>\n                                <connections>\n                                    <action selector=\"toggleFullScreen:\" target=\"-1\" id=\"dU3-MA-1Rq\"/>\n                                </connections>\n                            </menuItem>\n                        </items>\n                    </menu>\n                </menuItem>\n                <menuItem title=\"Window\" id=\"aUF-d1-5bR\">\n                    <modifierMask key=\"keyEquivalentModifierMask\"/>\n                    <menu key=\"submenu\" title=\"Window\" systemMenu=\"window\" id=\"Td7-aD-5lo\">\n                        <items>\n                            <menuItem title=\"Minimize\" keyEquivalent=\"m\" id=\"OY7-WF-poV\">\n                                <connections>\n                                    <action selector=\"performMiniaturize:\" target=\"-1\" id=\"VwT-WD-YPe\"/>\n                                </connections>\n                            </menuItem>\n                            <menuItem title=\"Zoom\" id=\"R4o-n2-Eq4\">\n                                <modifierMask key=\"keyEquivalentModifierMask\"/>\n                                <connections>\n                                    <action selector=\"performZoom:\" target=\"-1\" id=\"DIl-cC-cCs\"/>\n                                </connections>\n                            </menuItem>\n                            <menuItem isSeparatorItem=\"YES\" id=\"eu3-7i-yIM\"/>\n                            <menuItem title=\"Bring All to Front\" id=\"LE2-aR-0XJ\">\n                                <modifierMask key=\"keyEquivalentModifierMask\"/>\n                                <connections>\n                                    <action selector=\"arrangeInFront:\" target=\"-1\" id=\"DRN-fu-gQh\"/>\n                                </connections>\n                            </menuItem>\n                        </items>\n                    </menu>\n                </menuItem>\n                <menuItem title=\"Help\" id=\"EPT-qC-fAb\">\n                    <modifierMask key=\"keyEquivalentModifierMask\"/>\n                    <menu key=\"submenu\" title=\"Help\" systemMenu=\"help\" id=\"rJ0-wn-3NY\"/>\n                </menuItem>\n            </items>\n            <point key=\"canvasLocation\" x=\"142\" y=\"-258\"/>\n        </menu>\n        <window title=\"APP_NAME\" allowsToolTipsWhenApplicationIsInactive=\"NO\" autorecalculatesKeyViewLoop=\"NO\" releasedWhenClosed=\"NO\" animationBehavior=\"default\" id=\"QvC-M9-y7g\" customClass=\"MainFlutterWindow\" customModule=\"Runner\" customModuleProvider=\"target\">\n            <windowStyleMask key=\"styleMask\" titled=\"YES\" closable=\"YES\" miniaturizable=\"YES\" resizable=\"YES\"/>\n            <rect key=\"contentRect\" x=\"335\" y=\"390\" width=\"800\" height=\"600\"/>\n            <rect key=\"screenRect\" x=\"0.0\" y=\"0.0\" width=\"2560\" height=\"1577\"/>\n            <view key=\"contentView\" wantsLayer=\"YES\" id=\"EiT-Mj-1SZ\">\n                <rect key=\"frame\" x=\"0.0\" y=\"0.0\" width=\"800\" height=\"600\"/>\n                <autoresizingMask key=\"autoresizingMask\"/>\n            </view>\n        </window>\n    </objects>\n</document>\n"
  },
  {
    "path": "packages/langchain_firebase/example/macos/Runner/Configs/AppInfo.xcconfig",
    "content": "// Application-level settings for the Runner target.\n//\n// This may be replaced with something auto-generated from metadata (e.g., pubspec.yaml) in the\n// future. If not, the values below would default to using the project name when this becomes a\n// 'flutter create' template.\n\n// The application's name. By default this is also the title of the Flutter window.\nPRODUCT_NAME = example\n\n// The application's bundle identifier\nPRODUCT_BUNDLE_IDENTIFIER = com.example.example\n\n// The copyright displayed in application information\nPRODUCT_COPYRIGHT = Copyright © 2024 com.example. All rights reserved.\n"
  },
  {
    "path": "packages/langchain_firebase/example/macos/Runner/Configs/Debug.xcconfig",
    "content": "#include \"../../Flutter/Flutter-Debug.xcconfig\"\n#include \"Warnings.xcconfig\"\n"
  },
  {
    "path": "packages/langchain_firebase/example/macos/Runner/Configs/Release.xcconfig",
    "content": "#include \"../../Flutter/Flutter-Release.xcconfig\"\n#include \"Warnings.xcconfig\"\n"
  },
  {
    "path": "packages/langchain_firebase/example/macos/Runner/Configs/Warnings.xcconfig",
    "content": "WARNING_CFLAGS = -Wall -Wconditional-uninitialized -Wnullable-to-nonnull-conversion -Wmissing-method-return-type -Woverlength-strings\nGCC_WARN_UNDECLARED_SELECTOR = YES\nCLANG_UNDEFINED_BEHAVIOR_SANITIZER_NULLABILITY = YES\nCLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE\nCLANG_WARN__DUPLICATE_METHOD_MATCH = YES\nCLANG_WARN_PRAGMA_PACK = YES\nCLANG_WARN_STRICT_PROTOTYPES = YES\nCLANG_WARN_COMMA = YES\nGCC_WARN_STRICT_SELECTOR_MATCH = YES\nCLANG_WARN_OBJC_REPEATED_USE_OF_WEAK = YES\nCLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES\nGCC_WARN_SHADOW = YES\nCLANG_WARN_UNREACHABLE_CODE = YES\n"
  },
  {
    "path": "packages/langchain_firebase/example/macos/Runner/DebugProfile.entitlements",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n<plist version=\"1.0\">\n<dict>\n\t<key>com.apple.security.app-sandbox</key>\n\t<true/>\n\t<key>com.apple.security.cs.allow-jit</key>\n\t<true/>\n\t<key>com.apple.security.network.server</key>\n\t<true/>\n  <key>com.apple.security.network.client</key>\n\t<true/>\n</dict>\n</plist>\n"
  },
  {
    "path": "packages/langchain_firebase/example/macos/Runner/Info.plist",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n<plist version=\"1.0\">\n<dict>\n\t<key>CFBundleDevelopmentRegion</key>\n\t<string>$(DEVELOPMENT_LANGUAGE)</string>\n\t<key>CFBundleExecutable</key>\n\t<string>$(EXECUTABLE_NAME)</string>\n\t<key>CFBundleIconFile</key>\n\t<string></string>\n\t<key>CFBundleIdentifier</key>\n\t<string>$(PRODUCT_BUNDLE_IDENTIFIER)</string>\n\t<key>CFBundleInfoDictionaryVersion</key>\n\t<string>6.0</string>\n\t<key>CFBundleName</key>\n\t<string>$(PRODUCT_NAME)</string>\n\t<key>CFBundlePackageType</key>\n\t<string>APPL</string>\n\t<key>CFBundleShortVersionString</key>\n\t<string>$(FLUTTER_BUILD_NAME)</string>\n\t<key>CFBundleVersion</key>\n\t<string>$(FLUTTER_BUILD_NUMBER)</string>\n\t<key>LSMinimumSystemVersion</key>\n\t<string>$(MACOSX_DEPLOYMENT_TARGET)</string>\n\t<key>NSHumanReadableCopyright</key>\n\t<string>$(PRODUCT_COPYRIGHT)</string>\n\t<key>NSMainNibFile</key>\n\t<string>MainMenu</string>\n\t<key>NSPrincipalClass</key>\n\t<string>NSApplication</string>\n</dict>\n</plist>\n"
  },
  {
    "path": "packages/langchain_firebase/example/macos/Runner/MainFlutterWindow.swift",
    "content": "import Cocoa\nimport FlutterMacOS\n\nclass MainFlutterWindow: NSWindow {\n  override func awakeFromNib() {\n    let flutterViewController = FlutterViewController()\n    let windowFrame = self.frame\n    self.contentViewController = flutterViewController\n    self.setFrame(windowFrame, display: true)\n\n    RegisterGeneratedPlugins(registry: flutterViewController)\n\n    super.awakeFromNib()\n  }\n}\n"
  },
  {
    "path": "packages/langchain_firebase/example/macos/Runner/Release.entitlements",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n<plist version=\"1.0\">\n<dict>\n\t<key>com.apple.security.app-sandbox</key>\n\t<true/>\n</dict>\n</plist>\n"
  },
  {
    "path": "packages/langchain_firebase/example/macos/Runner.xcodeproj/project.pbxproj",
    "content": "// !$*UTF8*$!\n{\n\tarchiveVersion = 1;\n\tclasses = {\n\t};\n\tobjectVersion = 54;\n\tobjects = {\n\n/* Begin PBXAggregateTarget section */\n\t\t33CC111A2044C6BA0003C045 /* Flutter Assemble */ = {\n\t\t\tisa = PBXAggregateTarget;\n\t\t\tbuildConfigurationList = 33CC111B2044C6BA0003C045 /* Build configuration list for PBXAggregateTarget \"Flutter Assemble\" */;\n\t\t\tbuildPhases = (\n\t\t\t\t33CC111E2044C6BF0003C045 /* ShellScript */,\n\t\t\t);\n\t\t\tdependencies = (\n\t\t\t);\n\t\t\tname = \"Flutter Assemble\";\n\t\t\tproductName = FLX;\n\t\t};\n/* End PBXAggregateTarget section */\n\n/* Begin PBXBuildFile section */\n\t\t1E1464098F5197FB1E35FDA1 /* Pods_RunnerTests.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 0E05DB31CC6D204C7C78D127 /* Pods_RunnerTests.framework */; };\n\t\t20C13FC2C906153EF4A40292 /* GoogleService-Info.plist in Resources */ = {isa = PBXBuildFile; fileRef = 08B0491E23641E5BA5DD096C /* GoogleService-Info.plist */; };\n\t\t331C80D8294CF71000263BE5 /* RunnerTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 331C80D7294CF71000263BE5 /* RunnerTests.swift */; };\n\t\t335BBD1B22A9A15E00E9071D /* GeneratedPluginRegistrant.swift in Sources */ = {isa = PBXBuildFile; fileRef = 335BBD1A22A9A15E00E9071D /* GeneratedPluginRegistrant.swift */; };\n\t\t33CC10F12044A3C60003C045 /* AppDelegate.swift in Sources */ = {isa = PBXBuildFile; fileRef = 33CC10F02044A3C60003C045 /* AppDelegate.swift */; };\n\t\t33CC10F32044A3C60003C045 /* Assets.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = 33CC10F22044A3C60003C045 /* Assets.xcassets */; };\n\t\t33CC10F62044A3C60003C045 /* MainMenu.xib in Resources */ = {isa = PBXBuildFile; fileRef = 33CC10F42044A3C60003C045 /* MainMenu.xib */; };\n\t\t33CC11132044BFA00003C045 /* MainFlutterWindow.swift in Sources */ = {isa = PBXBuildFile; fileRef = 33CC11122044BFA00003C045 /* MainFlutterWindow.swift */; };\n\t\t3D1CF19370CB8E26E5C667A5 /* Pods_Runner.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = C4DAA18FE8B79A454BF3F8CB /* Pods_Runner.framework */; };\n/* End PBXBuildFile section */\n\n/* Begin PBXContainerItemProxy section */\n\t\t331C80D9294CF71000263BE5 /* PBXContainerItemProxy */ = {\n\t\t\tisa = PBXContainerItemProxy;\n\t\t\tcontainerPortal = 33CC10E52044A3C60003C045 /* Project object */;\n\t\t\tproxyType = 1;\n\t\t\tremoteGlobalIDString = 33CC10EC2044A3C60003C045;\n\t\t\tremoteInfo = Runner;\n\t\t};\n\t\t33CC111F2044C79F0003C045 /* PBXContainerItemProxy */ = {\n\t\t\tisa = PBXContainerItemProxy;\n\t\t\tcontainerPortal = 33CC10E52044A3C60003C045 /* Project object */;\n\t\t\tproxyType = 1;\n\t\t\tremoteGlobalIDString = 33CC111A2044C6BA0003C045;\n\t\t\tremoteInfo = FLX;\n\t\t};\n/* End PBXContainerItemProxy section */\n\n/* Begin PBXCopyFilesBuildPhase section */\n\t\t33CC110E2044A8840003C045 /* Bundle Framework */ = {\n\t\t\tisa = PBXCopyFilesBuildPhase;\n\t\t\tbuildActionMask = 2147483647;\n\t\t\tdstPath = \"\";\n\t\t\tdstSubfolderSpec = 10;\n\t\t\tfiles = (\n\t\t\t);\n\t\t\tname = \"Bundle Framework\";\n\t\t\trunOnlyForDeploymentPostprocessing = 0;\n\t\t};\n/* End PBXCopyFilesBuildPhase section */\n\n/* Begin PBXFileReference section */\n\t\t08B0491E23641E5BA5DD096C /* GoogleService-Info.plist */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.plist.xml; name = \"GoogleService-Info.plist\"; path = \"Runner/GoogleService-Info.plist\"; sourceTree = \"<group>\"; };\n\t\t0E05DB31CC6D204C7C78D127 /* Pods_RunnerTests.framework */ = {isa = PBXFileReference; explicitFileType = wrapper.framework; includeInIndex = 0; path = Pods_RunnerTests.framework; sourceTree = BUILT_PRODUCTS_DIR; };\n\t\t331C80D5294CF71000263BE5 /* RunnerTests.xctest */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = RunnerTests.xctest; sourceTree = BUILT_PRODUCTS_DIR; };\n\t\t331C80D7294CF71000263BE5 /* RunnerTests.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = RunnerTests.swift; sourceTree = \"<group>\"; };\n\t\t333000ED22D3DE5D00554162 /* Warnings.xcconfig */ = {isa = PBXFileReference; lastKnownFileType = text.xcconfig; path = Warnings.xcconfig; sourceTree = \"<group>\"; };\n\t\t335BBD1A22A9A15E00E9071D /* GeneratedPluginRegistrant.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = GeneratedPluginRegistrant.swift; sourceTree = \"<group>\"; };\n\t\t33CC10ED2044A3C60003C045 /* example.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = example.app; sourceTree = BUILT_PRODUCTS_DIR; };\n\t\t33CC10F02044A3C60003C045 /* AppDelegate.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = AppDelegate.swift; sourceTree = \"<group>\"; };\n\t\t33CC10F22044A3C60003C045 /* Assets.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; name = Assets.xcassets; path = Runner/Assets.xcassets; sourceTree = \"<group>\"; };\n\t\t33CC10F52044A3C60003C045 /* Base */ = {isa = PBXFileReference; lastKnownFileType = file.xib; name = Base; path = Base.lproj/MainMenu.xib; sourceTree = \"<group>\"; };\n\t\t33CC10F72044A3C60003C045 /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; name = Info.plist; path = Runner/Info.plist; sourceTree = \"<group>\"; };\n\t\t33CC11122044BFA00003C045 /* MainFlutterWindow.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = MainFlutterWindow.swift; sourceTree = \"<group>\"; };\n\t\t33CEB47222A05771004F2AC0 /* Flutter-Debug.xcconfig */ = {isa = PBXFileReference; lastKnownFileType = text.xcconfig; path = \"Flutter-Debug.xcconfig\"; sourceTree = \"<group>\"; };\n\t\t33CEB47422A05771004F2AC0 /* Flutter-Release.xcconfig */ = {isa = PBXFileReference; lastKnownFileType = text.xcconfig; path = \"Flutter-Release.xcconfig\"; sourceTree = \"<group>\"; };\n\t\t33CEB47722A0578A004F2AC0 /* Flutter-Generated.xcconfig */ = {isa = PBXFileReference; lastKnownFileType = text.xcconfig; name = \"Flutter-Generated.xcconfig\"; path = \"ephemeral/Flutter-Generated.xcconfig\"; sourceTree = \"<group>\"; };\n\t\t33E51913231747F40026EE4D /* DebugProfile.entitlements */ = {isa = PBXFileReference; lastKnownFileType = text.plist.entitlements; path = DebugProfile.entitlements; sourceTree = \"<group>\"; };\n\t\t33E51914231749380026EE4D /* Release.entitlements */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.plist.entitlements; path = Release.entitlements; sourceTree = \"<group>\"; };\n\t\t33E5194F232828860026EE4D /* AppInfo.xcconfig */ = {isa = PBXFileReference; lastKnownFileType = text.xcconfig; path = AppInfo.xcconfig; sourceTree = \"<group>\"; };\n\t\t3A40C9AE19ACEC6C433878E9 /* Pods-RunnerTests.debug.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = \"Pods-RunnerTests.debug.xcconfig\"; path = \"Target Support Files/Pods-RunnerTests/Pods-RunnerTests.debug.xcconfig\"; sourceTree = \"<group>\"; };\n\t\t587C61AFC0E2B0BF5340F8E8 /* Pods-RunnerTests.release.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = \"Pods-RunnerTests.release.xcconfig\"; path = \"Target Support Files/Pods-RunnerTests/Pods-RunnerTests.release.xcconfig\"; sourceTree = \"<group>\"; };\n\t\t5C2B5E4F1CE100E1FA5D9DC5 /* Pods-RunnerTests.profile.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = \"Pods-RunnerTests.profile.xcconfig\"; path = \"Target Support Files/Pods-RunnerTests/Pods-RunnerTests.profile.xcconfig\"; sourceTree = \"<group>\"; };\n\t\t766A2E414AFDFA56243527A6 /* Pods-Runner.profile.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = \"Pods-Runner.profile.xcconfig\"; path = \"Target Support Files/Pods-Runner/Pods-Runner.profile.xcconfig\"; sourceTree = \"<group>\"; };\n\t\t7AFA3C8E1D35360C0083082E /* Release.xcconfig */ = {isa = PBXFileReference; lastKnownFileType = text.xcconfig; path = Release.xcconfig; sourceTree = \"<group>\"; };\n\t\t816B0EE72BF94FC5261D04E6 /* Pods-Runner.release.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = \"Pods-Runner.release.xcconfig\"; path = \"Target Support Files/Pods-Runner/Pods-Runner.release.xcconfig\"; sourceTree = \"<group>\"; };\n\t\t9740EEB21CF90195004384FC /* Debug.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = Debug.xcconfig; sourceTree = \"<group>\"; };\n\t\tA2911B8EF91B3925874FDE6A /* Pods-Runner.debug.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = \"Pods-Runner.debug.xcconfig\"; path = \"Target Support Files/Pods-Runner/Pods-Runner.debug.xcconfig\"; sourceTree = \"<group>\"; };\n\t\tC4DAA18FE8B79A454BF3F8CB /* Pods_Runner.framework */ = {isa = PBXFileReference; explicitFileType = wrapper.framework; includeInIndex = 0; path = Pods_Runner.framework; sourceTree = BUILT_PRODUCTS_DIR; };\n/* End PBXFileReference section */\n\n/* Begin PBXFrameworksBuildPhase section */\n\t\t331C80D2294CF70F00263BE5 /* Frameworks */ = {\n\t\t\tisa = PBXFrameworksBuildPhase;\n\t\t\tbuildActionMask = 2147483647;\n\t\t\tfiles = (\n\t\t\t\t1E1464098F5197FB1E35FDA1 /* Pods_RunnerTests.framework in Frameworks */,\n\t\t\t);\n\t\t\trunOnlyForDeploymentPostprocessing = 0;\n\t\t};\n\t\t33CC10EA2044A3C60003C045 /* Frameworks */ = {\n\t\t\tisa = PBXFrameworksBuildPhase;\n\t\t\tbuildActionMask = 2147483647;\n\t\t\tfiles = (\n\t\t\t\t3D1CF19370CB8E26E5C667A5 /* Pods_Runner.framework in Frameworks */,\n\t\t\t);\n\t\t\trunOnlyForDeploymentPostprocessing = 0;\n\t\t};\n/* End PBXFrameworksBuildPhase section */\n\n/* Begin PBXGroup section */\n\t\t331C80D6294CF71000263BE5 /* RunnerTests */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t331C80D7294CF71000263BE5 /* RunnerTests.swift */,\n\t\t\t);\n\t\t\tpath = RunnerTests;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t33BA886A226E78AF003329D5 /* Configs */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t33E5194F232828860026EE4D /* AppInfo.xcconfig */,\n\t\t\t\t9740EEB21CF90195004384FC /* Debug.xcconfig */,\n\t\t\t\t7AFA3C8E1D35360C0083082E /* Release.xcconfig */,\n\t\t\t\t333000ED22D3DE5D00554162 /* Warnings.xcconfig */,\n\t\t\t);\n\t\t\tpath = Configs;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t33CC10E42044A3C60003C045 = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t33FAB671232836740065AC1E /* Runner */,\n\t\t\t\t33CEB47122A05771004F2AC0 /* Flutter */,\n\t\t\t\t331C80D6294CF71000263BE5 /* RunnerTests */,\n\t\t\t\t33CC10EE2044A3C60003C045 /* Products */,\n\t\t\t\tD73912EC22F37F3D000D13A0 /* Frameworks */,\n\t\t\t\t08B0491E23641E5BA5DD096C /* GoogleService-Info.plist */,\n\t\t\t\tBE277C424FC00920BE07E371 /* Pods */,\n\t\t\t);\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t33CC10EE2044A3C60003C045 /* Products */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t33CC10ED2044A3C60003C045 /* example.app */,\n\t\t\t\t331C80D5294CF71000263BE5 /* RunnerTests.xctest */,\n\t\t\t);\n\t\t\tname = Products;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t33CC11242044D66E0003C045 /* Resources */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t33CC10F22044A3C60003C045 /* Assets.xcassets */,\n\t\t\t\t33CC10F42044A3C60003C045 /* MainMenu.xib */,\n\t\t\t\t33CC10F72044A3C60003C045 /* Info.plist */,\n\t\t\t);\n\t\t\tname = Resources;\n\t\t\tpath = ..;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t33CEB47122A05771004F2AC0 /* Flutter */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t335BBD1A22A9A15E00E9071D /* GeneratedPluginRegistrant.swift */,\n\t\t\t\t33CEB47222A05771004F2AC0 /* Flutter-Debug.xcconfig */,\n\t\t\t\t33CEB47422A05771004F2AC0 /* Flutter-Release.xcconfig */,\n\t\t\t\t33CEB47722A0578A004F2AC0 /* Flutter-Generated.xcconfig */,\n\t\t\t);\n\t\t\tpath = Flutter;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t33FAB671232836740065AC1E /* Runner */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t33CC10F02044A3C60003C045 /* AppDelegate.swift */,\n\t\t\t\t33CC11122044BFA00003C045 /* MainFlutterWindow.swift */,\n\t\t\t\t33E51913231747F40026EE4D /* DebugProfile.entitlements */,\n\t\t\t\t33E51914231749380026EE4D /* Release.entitlements */,\n\t\t\t\t33CC11242044D66E0003C045 /* Resources */,\n\t\t\t\t33BA886A226E78AF003329D5 /* Configs */,\n\t\t\t);\n\t\t\tpath = Runner;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\tBE277C424FC00920BE07E371 /* Pods */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\tA2911B8EF91B3925874FDE6A /* Pods-Runner.debug.xcconfig */,\n\t\t\t\t816B0EE72BF94FC5261D04E6 /* Pods-Runner.release.xcconfig */,\n\t\t\t\t766A2E414AFDFA56243527A6 /* Pods-Runner.profile.xcconfig */,\n\t\t\t\t3A40C9AE19ACEC6C433878E9 /* Pods-RunnerTests.debug.xcconfig */,\n\t\t\t\t587C61AFC0E2B0BF5340F8E8 /* Pods-RunnerTests.release.xcconfig */,\n\t\t\t\t5C2B5E4F1CE100E1FA5D9DC5 /* Pods-RunnerTests.profile.xcconfig */,\n\t\t\t);\n\t\t\tname = Pods;\n\t\t\tpath = Pods;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\tD73912EC22F37F3D000D13A0 /* Frameworks */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\tC4DAA18FE8B79A454BF3F8CB /* Pods_Runner.framework */,\n\t\t\t\t0E05DB31CC6D204C7C78D127 /* Pods_RunnerTests.framework */,\n\t\t\t);\n\t\t\tname = Frameworks;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n/* End PBXGroup section */\n\n/* Begin PBXNativeTarget section */\n\t\t331C80D4294CF70F00263BE5 /* RunnerTests */ = {\n\t\t\tisa = PBXNativeTarget;\n\t\t\tbuildConfigurationList = 331C80DE294CF71000263BE5 /* Build configuration list for PBXNativeTarget \"RunnerTests\" */;\n\t\t\tbuildPhases = (\n\t\t\t\t33B83C0D35C3606AED8215FE /* [CP] Check Pods Manifest.lock */,\n\t\t\t\t331C80D1294CF70F00263BE5 /* Sources */,\n\t\t\t\t331C80D2294CF70F00263BE5 /* Frameworks */,\n\t\t\t\t331C80D3294CF70F00263BE5 /* Resources */,\n\t\t\t);\n\t\t\tbuildRules = (\n\t\t\t);\n\t\t\tdependencies = (\n\t\t\t\t331C80DA294CF71000263BE5 /* PBXTargetDependency */,\n\t\t\t);\n\t\t\tname = RunnerTests;\n\t\t\tproductName = RunnerTests;\n\t\t\tproductReference = 331C80D5294CF71000263BE5 /* RunnerTests.xctest */;\n\t\t\tproductType = \"com.apple.product-type.bundle.unit-test\";\n\t\t};\n\t\t33CC10EC2044A3C60003C045 /* Runner */ = {\n\t\t\tisa = PBXNativeTarget;\n\t\t\tbuildConfigurationList = 33CC10FB2044A3C60003C045 /* Build configuration list for PBXNativeTarget \"Runner\" */;\n\t\t\tbuildPhases = (\n\t\t\t\tE10F886575A4AF9F1D3D5C5B /* [CP] Check Pods Manifest.lock */,\n\t\t\t\t33CC10E92044A3C60003C045 /* Sources */,\n\t\t\t\t33CC10EA2044A3C60003C045 /* Frameworks */,\n\t\t\t\t33CC10EB2044A3C60003C045 /* Resources */,\n\t\t\t\t33CC110E2044A8840003C045 /* Bundle Framework */,\n\t\t\t\t3399D490228B24CF009A79C7 /* ShellScript */,\n\t\t\t\t1D3525FBE401B81EB0265948 /* [CP] Embed Pods Frameworks */,\n\t\t\t);\n\t\t\tbuildRules = (\n\t\t\t);\n\t\t\tdependencies = (\n\t\t\t\t33CC11202044C79F0003C045 /* PBXTargetDependency */,\n\t\t\t);\n\t\t\tname = Runner;\n\t\t\tproductName = Runner;\n\t\t\tproductReference = 33CC10ED2044A3C60003C045 /* example.app */;\n\t\t\tproductType = \"com.apple.product-type.application\";\n\t\t};\n/* End PBXNativeTarget section */\n\n/* Begin PBXProject section */\n\t\t33CC10E52044A3C60003C045 /* Project object */ = {\n\t\t\tisa = PBXProject;\n\t\t\tattributes = {\n\t\t\t\tBuildIndependentTargetsInParallel = YES;\n\t\t\t\tLastSwiftUpdateCheck = 0920;\n\t\t\t\tLastUpgradeCheck = 1510;\n\t\t\t\tORGANIZATIONNAME = \"\";\n\t\t\t\tTargetAttributes = {\n\t\t\t\t\t331C80D4294CF70F00263BE5 = {\n\t\t\t\t\t\tCreatedOnToolsVersion = 14.0;\n\t\t\t\t\t\tTestTargetID = 33CC10EC2044A3C60003C045;\n\t\t\t\t\t};\n\t\t\t\t\t33CC10EC2044A3C60003C045 = {\n\t\t\t\t\t\tCreatedOnToolsVersion = 9.2;\n\t\t\t\t\t\tLastSwiftMigration = 1100;\n\t\t\t\t\t\tProvisioningStyle = Automatic;\n\t\t\t\t\t\tSystemCapabilities = {\n\t\t\t\t\t\t\tcom.apple.Sandbox = {\n\t\t\t\t\t\t\t\tenabled = 1;\n\t\t\t\t\t\t\t};\n\t\t\t\t\t\t};\n\t\t\t\t\t};\n\t\t\t\t\t33CC111A2044C6BA0003C045 = {\n\t\t\t\t\t\tCreatedOnToolsVersion = 9.2;\n\t\t\t\t\t\tProvisioningStyle = Manual;\n\t\t\t\t\t};\n\t\t\t\t};\n\t\t\t};\n\t\t\tbuildConfigurationList = 33CC10E82044A3C60003C045 /* Build configuration list for PBXProject \"Runner\" */;\n\t\t\tcompatibilityVersion = \"Xcode 9.3\";\n\t\t\tdevelopmentRegion = en;\n\t\t\thasScannedForEncodings = 0;\n\t\t\tknownRegions = (\n\t\t\t\ten,\n\t\t\t\tBase,\n\t\t\t);\n\t\t\tmainGroup = 33CC10E42044A3C60003C045;\n\t\t\tproductRefGroup = 33CC10EE2044A3C60003C045 /* Products */;\n\t\t\tprojectDirPath = \"\";\n\t\t\tprojectRoot = \"\";\n\t\t\ttargets = (\n\t\t\t\t33CC10EC2044A3C60003C045 /* Runner */,\n\t\t\t\t331C80D4294CF70F00263BE5 /* RunnerTests */,\n\t\t\t\t33CC111A2044C6BA0003C045 /* Flutter Assemble */,\n\t\t\t);\n\t\t};\n/* End PBXProject section */\n\n/* Begin PBXResourcesBuildPhase section */\n\t\t331C80D3294CF70F00263BE5 /* Resources */ = {\n\t\t\tisa = PBXResourcesBuildPhase;\n\t\t\tbuildActionMask = 2147483647;\n\t\t\tfiles = (\n\t\t\t);\n\t\t\trunOnlyForDeploymentPostprocessing = 0;\n\t\t};\n\t\t33CC10EB2044A3C60003C045 /* Resources */ = {\n\t\t\tisa = PBXResourcesBuildPhase;\n\t\t\tbuildActionMask = 2147483647;\n\t\t\tfiles = (\n\t\t\t\t33CC10F32044A3C60003C045 /* Assets.xcassets in Resources */,\n\t\t\t\t33CC10F62044A3C60003C045 /* MainMenu.xib in Resources */,\n\t\t\t\t20C13FC2C906153EF4A40292 /* GoogleService-Info.plist in Resources */,\n\t\t\t);\n\t\t\trunOnlyForDeploymentPostprocessing = 0;\n\t\t};\n/* End PBXResourcesBuildPhase section */\n\n/* Begin PBXShellScriptBuildPhase section */\n\t\t1D3525FBE401B81EB0265948 /* [CP] Embed Pods Frameworks */ = {\n\t\t\tisa = PBXShellScriptBuildPhase;\n\t\t\tbuildActionMask = 2147483647;\n\t\t\tfiles = (\n\t\t\t);\n\t\t\tinputFileListPaths = (\n\t\t\t\t\"${PODS_ROOT}/Target Support Files/Pods-Runner/Pods-Runner-frameworks-${CONFIGURATION}-input-files.xcfilelist\",\n\t\t\t);\n\t\t\tname = \"[CP] Embed Pods Frameworks\";\n\t\t\toutputFileListPaths = (\n\t\t\t\t\"${PODS_ROOT}/Target Support Files/Pods-Runner/Pods-Runner-frameworks-${CONFIGURATION}-output-files.xcfilelist\",\n\t\t\t);\n\t\t\trunOnlyForDeploymentPostprocessing = 0;\n\t\t\tshellPath = /bin/sh;\n\t\t\tshellScript = \"\\\"${PODS_ROOT}/Target Support Files/Pods-Runner/Pods-Runner-frameworks.sh\\\"\\n\";\n\t\t\tshowEnvVarsInLog = 0;\n\t\t};\n\t\t3399D490228B24CF009A79C7 /* ShellScript */ = {\n\t\t\tisa = PBXShellScriptBuildPhase;\n\t\t\talwaysOutOfDate = 1;\n\t\t\tbuildActionMask = 2147483647;\n\t\t\tfiles = (\n\t\t\t);\n\t\t\tinputFileListPaths = (\n\t\t\t);\n\t\t\tinputPaths = (\n\t\t\t);\n\t\t\toutputFileListPaths = (\n\t\t\t);\n\t\t\toutputPaths = (\n\t\t\t);\n\t\t\trunOnlyForDeploymentPostprocessing = 0;\n\t\t\tshellPath = /bin/sh;\n\t\t\tshellScript = \"echo \\\"$PRODUCT_NAME.app\\\" > \\\"$PROJECT_DIR\\\"/Flutter/ephemeral/.app_filename && \\\"$FLUTTER_ROOT\\\"/packages/flutter_tools/bin/macos_assemble.sh embed\\n\";\n\t\t};\n\t\t33B83C0D35C3606AED8215FE /* [CP] Check Pods Manifest.lock */ = {\n\t\t\tisa = PBXShellScriptBuildPhase;\n\t\t\tbuildActionMask = 2147483647;\n\t\t\tfiles = (\n\t\t\t);\n\t\t\tinputFileListPaths = (\n\t\t\t);\n\t\t\tinputPaths = (\n\t\t\t\t\"${PODS_PODFILE_DIR_PATH}/Podfile.lock\",\n\t\t\t\t\"${PODS_ROOT}/Manifest.lock\",\n\t\t\t);\n\t\t\tname = \"[CP] Check Pods Manifest.lock\";\n\t\t\toutputFileListPaths = (\n\t\t\t);\n\t\t\toutputPaths = (\n\t\t\t\t\"$(DERIVED_FILE_DIR)/Pods-RunnerTests-checkManifestLockResult.txt\",\n\t\t\t);\n\t\t\trunOnlyForDeploymentPostprocessing = 0;\n\t\t\tshellPath = /bin/sh;\n\t\t\tshellScript = \"diff \\\"${PODS_PODFILE_DIR_PATH}/Podfile.lock\\\" \\\"${PODS_ROOT}/Manifest.lock\\\" > /dev/null\\nif [ $? != 0 ] ; then\\n    # print error to STDERR\\n    echo \\\"error: The sandbox is not in sync with the Podfile.lock. Run 'pod install' or update your CocoaPods installation.\\\" >&2\\n    exit 1\\nfi\\n# This output is used by Xcode 'outputs' to avoid re-running this script phase.\\necho \\\"SUCCESS\\\" > \\\"${SCRIPT_OUTPUT_FILE_0}\\\"\\n\";\n\t\t\tshowEnvVarsInLog = 0;\n\t\t};\n\t\t33CC111E2044C6BF0003C045 /* ShellScript */ = {\n\t\t\tisa = PBXShellScriptBuildPhase;\n\t\t\tbuildActionMask = 2147483647;\n\t\t\tfiles = (\n\t\t\t);\n\t\t\tinputFileListPaths = (\n\t\t\t\tFlutter/ephemeral/FlutterInputs.xcfilelist,\n\t\t\t);\n\t\t\tinputPaths = (\n\t\t\t\tFlutter/ephemeral/tripwire,\n\t\t\t);\n\t\t\toutputFileListPaths = (\n\t\t\t\tFlutter/ephemeral/FlutterOutputs.xcfilelist,\n\t\t\t);\n\t\t\toutputPaths = (\n\t\t\t);\n\t\t\trunOnlyForDeploymentPostprocessing = 0;\n\t\t\tshellPath = /bin/sh;\n\t\t\tshellScript = \"\\\"$FLUTTER_ROOT\\\"/packages/flutter_tools/bin/macos_assemble.sh && touch Flutter/ephemeral/tripwire\";\n\t\t};\n\t\tE10F886575A4AF9F1D3D5C5B /* [CP] Check Pods Manifest.lock */ = {\n\t\t\tisa = PBXShellScriptBuildPhase;\n\t\t\tbuildActionMask = 2147483647;\n\t\t\tfiles = (\n\t\t\t);\n\t\t\tinputFileListPaths = (\n\t\t\t);\n\t\t\tinputPaths = (\n\t\t\t\t\"${PODS_PODFILE_DIR_PATH}/Podfile.lock\",\n\t\t\t\t\"${PODS_ROOT}/Manifest.lock\",\n\t\t\t);\n\t\t\tname = \"[CP] Check Pods Manifest.lock\";\n\t\t\toutputFileListPaths = (\n\t\t\t);\n\t\t\toutputPaths = (\n\t\t\t\t\"$(DERIVED_FILE_DIR)/Pods-Runner-checkManifestLockResult.txt\",\n\t\t\t);\n\t\t\trunOnlyForDeploymentPostprocessing = 0;\n\t\t\tshellPath = /bin/sh;\n\t\t\tshellScript = \"diff \\\"${PODS_PODFILE_DIR_PATH}/Podfile.lock\\\" \\\"${PODS_ROOT}/Manifest.lock\\\" > /dev/null\\nif [ $? != 0 ] ; then\\n    # print error to STDERR\\n    echo \\\"error: The sandbox is not in sync with the Podfile.lock. Run 'pod install' or update your CocoaPods installation.\\\" >&2\\n    exit 1\\nfi\\n# This output is used by Xcode 'outputs' to avoid re-running this script phase.\\necho \\\"SUCCESS\\\" > \\\"${SCRIPT_OUTPUT_FILE_0}\\\"\\n\";\n\t\t\tshowEnvVarsInLog = 0;\n\t\t};\n/* End PBXShellScriptBuildPhase section */\n\n/* Begin PBXSourcesBuildPhase section */\n\t\t331C80D1294CF70F00263BE5 /* Sources */ = {\n\t\t\tisa = PBXSourcesBuildPhase;\n\t\t\tbuildActionMask = 2147483647;\n\t\t\tfiles = (\n\t\t\t\t331C80D8294CF71000263BE5 /* RunnerTests.swift in Sources */,\n\t\t\t);\n\t\t\trunOnlyForDeploymentPostprocessing = 0;\n\t\t};\n\t\t33CC10E92044A3C60003C045 /* Sources */ = {\n\t\t\tisa = PBXSourcesBuildPhase;\n\t\t\tbuildActionMask = 2147483647;\n\t\t\tfiles = (\n\t\t\t\t33CC11132044BFA00003C045 /* MainFlutterWindow.swift in Sources */,\n\t\t\t\t33CC10F12044A3C60003C045 /* AppDelegate.swift in Sources */,\n\t\t\t\t335BBD1B22A9A15E00E9071D /* GeneratedPluginRegistrant.swift in Sources */,\n\t\t\t);\n\t\t\trunOnlyForDeploymentPostprocessing = 0;\n\t\t};\n/* End PBXSourcesBuildPhase section */\n\n/* Begin PBXTargetDependency section */\n\t\t331C80DA294CF71000263BE5 /* PBXTargetDependency */ = {\n\t\t\tisa = PBXTargetDependency;\n\t\t\ttarget = 33CC10EC2044A3C60003C045 /* Runner */;\n\t\t\ttargetProxy = 331C80D9294CF71000263BE5 /* PBXContainerItemProxy */;\n\t\t};\n\t\t33CC11202044C79F0003C045 /* PBXTargetDependency */ = {\n\t\t\tisa = PBXTargetDependency;\n\t\t\ttarget = 33CC111A2044C6BA0003C045 /* Flutter Assemble */;\n\t\t\ttargetProxy = 33CC111F2044C79F0003C045 /* PBXContainerItemProxy */;\n\t\t};\n/* End PBXTargetDependency section */\n\n/* Begin PBXVariantGroup section */\n\t\t33CC10F42044A3C60003C045 /* MainMenu.xib */ = {\n\t\t\tisa = PBXVariantGroup;\n\t\t\tchildren = (\n\t\t\t\t33CC10F52044A3C60003C045 /* Base */,\n\t\t\t);\n\t\t\tname = MainMenu.xib;\n\t\t\tpath = Runner;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n/* End PBXVariantGroup section */\n\n/* Begin XCBuildConfiguration section */\n\t\t331C80DB294CF71000263BE5 /* Debug */ = {\n\t\t\tisa = XCBuildConfiguration;\n\t\t\tbaseConfigurationReference = 3A40C9AE19ACEC6C433878E9 /* Pods-RunnerTests.debug.xcconfig */;\n\t\t\tbuildSettings = {\n\t\t\t\tBUNDLE_LOADER = \"$(TEST_HOST)\";\n\t\t\t\tCURRENT_PROJECT_VERSION = 1;\n\t\t\t\tGENERATE_INFOPLIST_FILE = YES;\n\t\t\t\tMARKETING_VERSION = 1.0;\n\t\t\t\tPRODUCT_BUNDLE_IDENTIFIER = com.example.example.RunnerTests;\n\t\t\t\tPRODUCT_NAME = \"$(TARGET_NAME)\";\n\t\t\t\tSWIFT_VERSION = 5.0;\n\t\t\t\tTEST_HOST = \"$(BUILT_PRODUCTS_DIR)/example.app/$(BUNDLE_EXECUTABLE_FOLDER_PATH)/example\";\n\t\t\t};\n\t\t\tname = Debug;\n\t\t};\n\t\t331C80DC294CF71000263BE5 /* Release */ = {\n\t\t\tisa = XCBuildConfiguration;\n\t\t\tbaseConfigurationReference = 587C61AFC0E2B0BF5340F8E8 /* Pods-RunnerTests.release.xcconfig */;\n\t\t\tbuildSettings = {\n\t\t\t\tBUNDLE_LOADER = \"$(TEST_HOST)\";\n\t\t\t\tCURRENT_PROJECT_VERSION = 1;\n\t\t\t\tGENERATE_INFOPLIST_FILE = YES;\n\t\t\t\tMARKETING_VERSION = 1.0;\n\t\t\t\tPRODUCT_BUNDLE_IDENTIFIER = com.example.example.RunnerTests;\n\t\t\t\tPRODUCT_NAME = \"$(TARGET_NAME)\";\n\t\t\t\tSWIFT_VERSION = 5.0;\n\t\t\t\tTEST_HOST = \"$(BUILT_PRODUCTS_DIR)/example.app/$(BUNDLE_EXECUTABLE_FOLDER_PATH)/example\";\n\t\t\t};\n\t\t\tname = Release;\n\t\t};\n\t\t331C80DD294CF71000263BE5 /* Profile */ = {\n\t\t\tisa = XCBuildConfiguration;\n\t\t\tbaseConfigurationReference = 5C2B5E4F1CE100E1FA5D9DC5 /* Pods-RunnerTests.profile.xcconfig */;\n\t\t\tbuildSettings = {\n\t\t\t\tBUNDLE_LOADER = \"$(TEST_HOST)\";\n\t\t\t\tCURRENT_PROJECT_VERSION = 1;\n\t\t\t\tGENERATE_INFOPLIST_FILE = YES;\n\t\t\t\tMARKETING_VERSION = 1.0;\n\t\t\t\tPRODUCT_BUNDLE_IDENTIFIER = com.example.example.RunnerTests;\n\t\t\t\tPRODUCT_NAME = \"$(TARGET_NAME)\";\n\t\t\t\tSWIFT_VERSION = 5.0;\n\t\t\t\tTEST_HOST = \"$(BUILT_PRODUCTS_DIR)/example.app/$(BUNDLE_EXECUTABLE_FOLDER_PATH)/example\";\n\t\t\t};\n\t\t\tname = Profile;\n\t\t};\n\t\t338D0CE9231458BD00FA5F75 /* Profile */ = {\n\t\t\tisa = XCBuildConfiguration;\n\t\t\tbaseConfigurationReference = 7AFA3C8E1D35360C0083082E /* Release.xcconfig */;\n\t\t\tbuildSettings = {\n\t\t\t\tALWAYS_SEARCH_USER_PATHS = NO;\n\t\t\t\tASSETCATALOG_COMPILER_GENERATE_SWIFT_ASSET_SYMBOL_EXTENSIONS = YES;\n\t\t\t\tCLANG_ANALYZER_NONNULL = YES;\n\t\t\t\tCLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE;\n\t\t\t\tCLANG_CXX_LANGUAGE_STANDARD = \"gnu++14\";\n\t\t\t\tCLANG_CXX_LIBRARY = \"libc++\";\n\t\t\t\tCLANG_ENABLE_MODULES = YES;\n\t\t\t\tCLANG_ENABLE_OBJC_ARC = YES;\n\t\t\t\tCLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES;\n\t\t\t\tCLANG_WARN_BOOL_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_CONSTANT_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES;\n\t\t\t\tCLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR;\n\t\t\t\tCLANG_WARN_DOCUMENTATION_COMMENTS = YES;\n\t\t\t\tCLANG_WARN_EMPTY_BODY = YES;\n\t\t\t\tCLANG_WARN_ENUM_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_INFINITE_RECURSION = YES;\n\t\t\t\tCLANG_WARN_INT_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_OBJC_LITERAL_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR;\n\t\t\t\tCLANG_WARN_RANGE_LOOP_ANALYSIS = YES;\n\t\t\t\tCLANG_WARN_SUSPICIOUS_MOVE = YES;\n\t\t\t\tCODE_SIGN_IDENTITY = \"-\";\n\t\t\t\tCOPY_PHASE_STRIP = NO;\n\t\t\t\tDEAD_CODE_STRIPPING = YES;\n\t\t\t\tDEBUG_INFORMATION_FORMAT = \"dwarf-with-dsym\";\n\t\t\t\tENABLE_NS_ASSERTIONS = NO;\n\t\t\t\tENABLE_STRICT_OBJC_MSGSEND = YES;\n\t\t\t\tENABLE_USER_SCRIPT_SANDBOXING = NO;\n\t\t\t\tGCC_C_LANGUAGE_STANDARD = gnu11;\n\t\t\t\tGCC_NO_COMMON_BLOCKS = YES;\n\t\t\t\tGCC_WARN_64_TO_32_BIT_CONVERSION = YES;\n\t\t\t\tGCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR;\n\t\t\t\tGCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE;\n\t\t\t\tGCC_WARN_UNUSED_FUNCTION = YES;\n\t\t\t\tGCC_WARN_UNUSED_VARIABLE = YES;\n\t\t\t\tMACOSX_DEPLOYMENT_TARGET = 10.14;\n\t\t\t\tMTL_ENABLE_DEBUG_INFO = NO;\n\t\t\t\tSDKROOT = macosx;\n\t\t\t\tSWIFT_COMPILATION_MODE = wholemodule;\n\t\t\t\tSWIFT_OPTIMIZATION_LEVEL = \"-O\";\n\t\t\t};\n\t\t\tname = Profile;\n\t\t};\n\t\t338D0CEA231458BD00FA5F75 /* Profile */ = {\n\t\t\tisa = XCBuildConfiguration;\n\t\t\tbaseConfigurationReference = 33E5194F232828860026EE4D /* AppInfo.xcconfig */;\n\t\t\tbuildSettings = {\n\t\t\t\tASSETCATALOG_COMPILER_APPICON_NAME = AppIcon;\n\t\t\t\tCLANG_ENABLE_MODULES = YES;\n\t\t\t\tCODE_SIGN_ENTITLEMENTS = Runner/DebugProfile.entitlements;\n\t\t\t\tCODE_SIGN_STYLE = Automatic;\n\t\t\t\tCOMBINE_HIDPI_IMAGES = YES;\n\t\t\t\tINFOPLIST_FILE = Runner/Info.plist;\n\t\t\t\tLD_RUNPATH_SEARCH_PATHS = (\n\t\t\t\t\t\"$(inherited)\",\n\t\t\t\t\t\"@executable_path/../Frameworks\",\n\t\t\t\t);\n\t\t\t\tPROVISIONING_PROFILE_SPECIFIER = \"\";\n\t\t\t\tSWIFT_VERSION = 5.0;\n\t\t\t};\n\t\t\tname = Profile;\n\t\t};\n\t\t338D0CEB231458BD00FA5F75 /* Profile */ = {\n\t\t\tisa = XCBuildConfiguration;\n\t\t\tbuildSettings = {\n\t\t\t\tCODE_SIGN_STYLE = Manual;\n\t\t\t\tPRODUCT_NAME = \"$(TARGET_NAME)\";\n\t\t\t};\n\t\t\tname = Profile;\n\t\t};\n\t\t33CC10F92044A3C60003C045 /* Debug */ = {\n\t\t\tisa = XCBuildConfiguration;\n\t\t\tbaseConfigurationReference = 9740EEB21CF90195004384FC /* Debug.xcconfig */;\n\t\t\tbuildSettings = {\n\t\t\t\tALWAYS_SEARCH_USER_PATHS = NO;\n\t\t\t\tASSETCATALOG_COMPILER_GENERATE_SWIFT_ASSET_SYMBOL_EXTENSIONS = YES;\n\t\t\t\tCLANG_ANALYZER_NONNULL = YES;\n\t\t\t\tCLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE;\n\t\t\t\tCLANG_CXX_LANGUAGE_STANDARD = \"gnu++14\";\n\t\t\t\tCLANG_CXX_LIBRARY = \"libc++\";\n\t\t\t\tCLANG_ENABLE_MODULES = YES;\n\t\t\t\tCLANG_ENABLE_OBJC_ARC = YES;\n\t\t\t\tCLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES;\n\t\t\t\tCLANG_WARN_BOOL_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_CONSTANT_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES;\n\t\t\t\tCLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR;\n\t\t\t\tCLANG_WARN_DOCUMENTATION_COMMENTS = YES;\n\t\t\t\tCLANG_WARN_EMPTY_BODY = YES;\n\t\t\t\tCLANG_WARN_ENUM_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_INFINITE_RECURSION = YES;\n\t\t\t\tCLANG_WARN_INT_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_OBJC_LITERAL_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR;\n\t\t\t\tCLANG_WARN_RANGE_LOOP_ANALYSIS = YES;\n\t\t\t\tCLANG_WARN_SUSPICIOUS_MOVE = YES;\n\t\t\t\tCODE_SIGN_IDENTITY = \"-\";\n\t\t\t\tCOPY_PHASE_STRIP = NO;\n\t\t\t\tDEAD_CODE_STRIPPING = YES;\n\t\t\t\tDEBUG_INFORMATION_FORMAT = dwarf;\n\t\t\t\tENABLE_STRICT_OBJC_MSGSEND = YES;\n\t\t\t\tENABLE_TESTABILITY = YES;\n\t\t\t\tENABLE_USER_SCRIPT_SANDBOXING = NO;\n\t\t\t\tGCC_C_LANGUAGE_STANDARD = gnu11;\n\t\t\t\tGCC_DYNAMIC_NO_PIC = NO;\n\t\t\t\tGCC_NO_COMMON_BLOCKS = YES;\n\t\t\t\tGCC_OPTIMIZATION_LEVEL = 0;\n\t\t\t\tGCC_PREPROCESSOR_DEFINITIONS = (\n\t\t\t\t\t\"DEBUG=1\",\n\t\t\t\t\t\"$(inherited)\",\n\t\t\t\t);\n\t\t\t\tGCC_WARN_64_TO_32_BIT_CONVERSION = YES;\n\t\t\t\tGCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR;\n\t\t\t\tGCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE;\n\t\t\t\tGCC_WARN_UNUSED_FUNCTION = YES;\n\t\t\t\tGCC_WARN_UNUSED_VARIABLE = YES;\n\t\t\t\tMACOSX_DEPLOYMENT_TARGET = 10.14;\n\t\t\t\tMTL_ENABLE_DEBUG_INFO = YES;\n\t\t\t\tONLY_ACTIVE_ARCH = YES;\n\t\t\t\tSDKROOT = macosx;\n\t\t\t\tSWIFT_ACTIVE_COMPILATION_CONDITIONS = DEBUG;\n\t\t\t\tSWIFT_OPTIMIZATION_LEVEL = \"-Onone\";\n\t\t\t};\n\t\t\tname = Debug;\n\t\t};\n\t\t33CC10FA2044A3C60003C045 /* Release */ = {\n\t\t\tisa = XCBuildConfiguration;\n\t\t\tbaseConfigurationReference = 7AFA3C8E1D35360C0083082E /* Release.xcconfig */;\n\t\t\tbuildSettings = {\n\t\t\t\tALWAYS_SEARCH_USER_PATHS = NO;\n\t\t\t\tASSETCATALOG_COMPILER_GENERATE_SWIFT_ASSET_SYMBOL_EXTENSIONS = YES;\n\t\t\t\tCLANG_ANALYZER_NONNULL = YES;\n\t\t\t\tCLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE;\n\t\t\t\tCLANG_CXX_LANGUAGE_STANDARD = \"gnu++14\";\n\t\t\t\tCLANG_CXX_LIBRARY = \"libc++\";\n\t\t\t\tCLANG_ENABLE_MODULES = YES;\n\t\t\t\tCLANG_ENABLE_OBJC_ARC = YES;\n\t\t\t\tCLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES;\n\t\t\t\tCLANG_WARN_BOOL_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_CONSTANT_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES;\n\t\t\t\tCLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR;\n\t\t\t\tCLANG_WARN_DOCUMENTATION_COMMENTS = YES;\n\t\t\t\tCLANG_WARN_EMPTY_BODY = YES;\n\t\t\t\tCLANG_WARN_ENUM_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_INFINITE_RECURSION = YES;\n\t\t\t\tCLANG_WARN_INT_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_OBJC_LITERAL_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR;\n\t\t\t\tCLANG_WARN_RANGE_LOOP_ANALYSIS = YES;\n\t\t\t\tCLANG_WARN_SUSPICIOUS_MOVE = YES;\n\t\t\t\tCODE_SIGN_IDENTITY = \"-\";\n\t\t\t\tCOPY_PHASE_STRIP = NO;\n\t\t\t\tDEAD_CODE_STRIPPING = YES;\n\t\t\t\tDEBUG_INFORMATION_FORMAT = \"dwarf-with-dsym\";\n\t\t\t\tENABLE_NS_ASSERTIONS = NO;\n\t\t\t\tENABLE_STRICT_OBJC_MSGSEND = YES;\n\t\t\t\tENABLE_USER_SCRIPT_SANDBOXING = NO;\n\t\t\t\tGCC_C_LANGUAGE_STANDARD = gnu11;\n\t\t\t\tGCC_NO_COMMON_BLOCKS = YES;\n\t\t\t\tGCC_WARN_64_TO_32_BIT_CONVERSION = YES;\n\t\t\t\tGCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR;\n\t\t\t\tGCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE;\n\t\t\t\tGCC_WARN_UNUSED_FUNCTION = YES;\n\t\t\t\tGCC_WARN_UNUSED_VARIABLE = YES;\n\t\t\t\tMACOSX_DEPLOYMENT_TARGET = 10.14;\n\t\t\t\tMTL_ENABLE_DEBUG_INFO = NO;\n\t\t\t\tSDKROOT = macosx;\n\t\t\t\tSWIFT_COMPILATION_MODE = wholemodule;\n\t\t\t\tSWIFT_OPTIMIZATION_LEVEL = \"-O\";\n\t\t\t};\n\t\t\tname = Release;\n\t\t};\n\t\t33CC10FC2044A3C60003C045 /* Debug */ = {\n\t\t\tisa = XCBuildConfiguration;\n\t\t\tbaseConfigurationReference = 33E5194F232828860026EE4D /* AppInfo.xcconfig */;\n\t\t\tbuildSettings = {\n\t\t\t\tASSETCATALOG_COMPILER_APPICON_NAME = AppIcon;\n\t\t\t\tCLANG_ENABLE_MODULES = YES;\n\t\t\t\tCODE_SIGN_ENTITLEMENTS = Runner/DebugProfile.entitlements;\n\t\t\t\tCODE_SIGN_STYLE = Automatic;\n\t\t\t\tCOMBINE_HIDPI_IMAGES = YES;\n\t\t\t\tINFOPLIST_FILE = Runner/Info.plist;\n\t\t\t\tLD_RUNPATH_SEARCH_PATHS = (\n\t\t\t\t\t\"$(inherited)\",\n\t\t\t\t\t\"@executable_path/../Frameworks\",\n\t\t\t\t);\n\t\t\t\tPROVISIONING_PROFILE_SPECIFIER = \"\";\n\t\t\t\tSWIFT_OPTIMIZATION_LEVEL = \"-Onone\";\n\t\t\t\tSWIFT_VERSION = 5.0;\n\t\t\t};\n\t\t\tname = Debug;\n\t\t};\n\t\t33CC10FD2044A3C60003C045 /* Release */ = {\n\t\t\tisa = XCBuildConfiguration;\n\t\t\tbaseConfigurationReference = 33E5194F232828860026EE4D /* AppInfo.xcconfig */;\n\t\t\tbuildSettings = {\n\t\t\t\tASSETCATALOG_COMPILER_APPICON_NAME = AppIcon;\n\t\t\t\tCLANG_ENABLE_MODULES = YES;\n\t\t\t\tCODE_SIGN_ENTITLEMENTS = Runner/Release.entitlements;\n\t\t\t\tCODE_SIGN_STYLE = Automatic;\n\t\t\t\tCOMBINE_HIDPI_IMAGES = YES;\n\t\t\t\tINFOPLIST_FILE = Runner/Info.plist;\n\t\t\t\tLD_RUNPATH_SEARCH_PATHS = (\n\t\t\t\t\t\"$(inherited)\",\n\t\t\t\t\t\"@executable_path/../Frameworks\",\n\t\t\t\t);\n\t\t\t\tPROVISIONING_PROFILE_SPECIFIER = \"\";\n\t\t\t\tSWIFT_VERSION = 5.0;\n\t\t\t};\n\t\t\tname = Release;\n\t\t};\n\t\t33CC111C2044C6BA0003C045 /* Debug */ = {\n\t\t\tisa = XCBuildConfiguration;\n\t\t\tbuildSettings = {\n\t\t\t\tCODE_SIGN_STYLE = Manual;\n\t\t\t\tPRODUCT_NAME = \"$(TARGET_NAME)\";\n\t\t\t};\n\t\t\tname = Debug;\n\t\t};\n\t\t33CC111D2044C6BA0003C045 /* Release */ = {\n\t\t\tisa = XCBuildConfiguration;\n\t\t\tbuildSettings = {\n\t\t\t\tCODE_SIGN_STYLE = Automatic;\n\t\t\t\tPRODUCT_NAME = \"$(TARGET_NAME)\";\n\t\t\t};\n\t\t\tname = Release;\n\t\t};\n/* End XCBuildConfiguration section */\n\n/* Begin XCConfigurationList section */\n\t\t331C80DE294CF71000263BE5 /* Build configuration list for PBXNativeTarget \"RunnerTests\" */ = {\n\t\t\tisa = XCConfigurationList;\n\t\t\tbuildConfigurations = (\n\t\t\t\t331C80DB294CF71000263BE5 /* Debug */,\n\t\t\t\t331C80DC294CF71000263BE5 /* Release */,\n\t\t\t\t331C80DD294CF71000263BE5 /* Profile */,\n\t\t\t);\n\t\t\tdefaultConfigurationIsVisible = 0;\n\t\t\tdefaultConfigurationName = Release;\n\t\t};\n\t\t33CC10E82044A3C60003C045 /* Build configuration list for PBXProject \"Runner\" */ = {\n\t\t\tisa = XCConfigurationList;\n\t\t\tbuildConfigurations = (\n\t\t\t\t33CC10F92044A3C60003C045 /* Debug */,\n\t\t\t\t33CC10FA2044A3C60003C045 /* Release */,\n\t\t\t\t338D0CE9231458BD00FA5F75 /* Profile */,\n\t\t\t);\n\t\t\tdefaultConfigurationIsVisible = 0;\n\t\t\tdefaultConfigurationName = Release;\n\t\t};\n\t\t33CC10FB2044A3C60003C045 /* Build configuration list for PBXNativeTarget \"Runner\" */ = {\n\t\t\tisa = XCConfigurationList;\n\t\t\tbuildConfigurations = (\n\t\t\t\t33CC10FC2044A3C60003C045 /* Debug */,\n\t\t\t\t33CC10FD2044A3C60003C045 /* Release */,\n\t\t\t\t338D0CEA231458BD00FA5F75 /* Profile */,\n\t\t\t);\n\t\t\tdefaultConfigurationIsVisible = 0;\n\t\t\tdefaultConfigurationName = Release;\n\t\t};\n\t\t33CC111B2044C6BA0003C045 /* Build configuration list for PBXAggregateTarget \"Flutter Assemble\" */ = {\n\t\t\tisa = XCConfigurationList;\n\t\t\tbuildConfigurations = (\n\t\t\t\t33CC111C2044C6BA0003C045 /* Debug */,\n\t\t\t\t33CC111D2044C6BA0003C045 /* Release */,\n\t\t\t\t338D0CEB231458BD00FA5F75 /* Profile */,\n\t\t\t);\n\t\t\tdefaultConfigurationIsVisible = 0;\n\t\t\tdefaultConfigurationName = Release;\n\t\t};\n/* End XCConfigurationList section */\n\t};\n\trootObject = 33CC10E52044A3C60003C045 /* Project object */;\n}\n"
  },
  {
    "path": "packages/langchain_firebase/example/macos/Runner.xcodeproj/project.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n<plist version=\"1.0\">\n<dict>\n\t<key>IDEDidComputeMac32BitWarning</key>\n\t<true/>\n</dict>\n</plist>\n"
  },
  {
    "path": "packages/langchain_firebase/example/macos/Runner.xcodeproj/xcshareddata/xcschemes/Runner.xcscheme",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<Scheme\n   LastUpgradeVersion = \"1510\"\n   version = \"1.3\">\n   <BuildAction\n      parallelizeBuildables = \"YES\"\n      buildImplicitDependencies = \"YES\">\n      <BuildActionEntries>\n         <BuildActionEntry\n            buildForTesting = \"YES\"\n            buildForRunning = \"YES\"\n            buildForProfiling = \"YES\"\n            buildForArchiving = \"YES\"\n            buildForAnalyzing = \"YES\">\n            <BuildableReference\n               BuildableIdentifier = \"primary\"\n               BlueprintIdentifier = \"33CC10EC2044A3C60003C045\"\n               BuildableName = \"example.app\"\n               BlueprintName = \"Runner\"\n               ReferencedContainer = \"container:Runner.xcodeproj\">\n            </BuildableReference>\n         </BuildActionEntry>\n      </BuildActionEntries>\n   </BuildAction>\n   <TestAction\n      buildConfiguration = \"Debug\"\n      selectedDebuggerIdentifier = \"Xcode.DebuggerFoundation.Debugger.LLDB\"\n      selectedLauncherIdentifier = \"Xcode.DebuggerFoundation.Launcher.LLDB\"\n      shouldUseLaunchSchemeArgsEnv = \"YES\">\n      <MacroExpansion>\n         <BuildableReference\n            BuildableIdentifier = \"primary\"\n            BlueprintIdentifier = \"33CC10EC2044A3C60003C045\"\n            BuildableName = \"example.app\"\n            BlueprintName = \"Runner\"\n            ReferencedContainer = \"container:Runner.xcodeproj\">\n         </BuildableReference>\n      </MacroExpansion>\n      <Testables>\n         <TestableReference\n            skipped = \"NO\"\n            parallelizable = \"YES\">\n            <BuildableReference\n               BuildableIdentifier = \"primary\"\n               BlueprintIdentifier = \"331C80D4294CF70F00263BE5\"\n               BuildableName = \"RunnerTests.xctest\"\n               BlueprintName = \"RunnerTests\"\n               ReferencedContainer = \"container:Runner.xcodeproj\">\n            </BuildableReference>\n         </TestableReference>\n      </Testables>\n   </TestAction>\n   <LaunchAction\n      buildConfiguration = \"Debug\"\n      selectedDebuggerIdentifier = \"Xcode.DebuggerFoundation.Debugger.LLDB\"\n      selectedLauncherIdentifier = \"Xcode.DebuggerFoundation.Launcher.LLDB\"\n      launchStyle = \"0\"\n      useCustomWorkingDirectory = \"NO\"\n      ignoresPersistentStateOnLaunch = \"NO\"\n      debugDocumentVersioning = \"YES\"\n      debugServiceExtension = \"internal\"\n      allowLocationSimulation = \"YES\">\n      <BuildableProductRunnable\n         runnableDebuggingMode = \"0\">\n         <BuildableReference\n            BuildableIdentifier = \"primary\"\n            BlueprintIdentifier = \"33CC10EC2044A3C60003C045\"\n            BuildableName = \"example.app\"\n            BlueprintName = \"Runner\"\n            ReferencedContainer = \"container:Runner.xcodeproj\">\n         </BuildableReference>\n      </BuildableProductRunnable>\n      <CommandLineArguments>\n         <CommandLineArgument\n            argument = \"-FIRDebugEnabled\"\n            isEnabled = \"YES\">\n         </CommandLineArgument>\n      </CommandLineArguments>\n   </LaunchAction>\n   <ProfileAction\n      buildConfiguration = \"Profile\"\n      shouldUseLaunchSchemeArgsEnv = \"YES\"\n      savedToolIdentifier = \"\"\n      useCustomWorkingDirectory = \"NO\"\n      debugDocumentVersioning = \"YES\">\n      <BuildableProductRunnable\n         runnableDebuggingMode = \"0\">\n         <BuildableReference\n            BuildableIdentifier = \"primary\"\n            BlueprintIdentifier = \"33CC10EC2044A3C60003C045\"\n            BuildableName = \"example.app\"\n            BlueprintName = \"Runner\"\n            ReferencedContainer = \"container:Runner.xcodeproj\">\n         </BuildableReference>\n      </BuildableProductRunnable>\n   </ProfileAction>\n   <AnalyzeAction\n      buildConfiguration = \"Debug\">\n   </AnalyzeAction>\n   <ArchiveAction\n      buildConfiguration = \"Release\"\n      revealArchiveInOrganizer = \"YES\">\n   </ArchiveAction>\n</Scheme>\n"
  },
  {
    "path": "packages/langchain_firebase/example/macos/Runner.xcworkspace/contents.xcworkspacedata",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<Workspace\n   version = \"1.0\">\n   <FileRef\n      location = \"group:Runner.xcodeproj\">\n   </FileRef>\n   <FileRef\n      location = \"group:Pods/Pods.xcodeproj\">\n   </FileRef>\n</Workspace>\n"
  },
  {
    "path": "packages/langchain_firebase/example/macos/Runner.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n<plist version=\"1.0\">\n<dict>\n\t<key>IDEDidComputeMac32BitWarning</key>\n\t<true/>\n</dict>\n</plist>\n"
  },
  {
    "path": "packages/langchain_firebase/example/pubspec.yaml",
    "content": "name: langchain_firebase_example\ndescription: Example project to show how to use the Firebase integration module for LangChain.dart\nversion: 1.0.0+1\npublish_to: 'none'\n\nenvironment:\n  sdk: \">=3.9.0 <4.0.0\"\n  flutter: \">=3.27.0\"\nresolution: workspace\n\ndependencies:\n  cupertino_icons: ^1.0.6\n  firebase_core: ^4.2.0\n  flutter:\n    sdk: flutter\n  flutter_markdown: ^0.7.7\n  langchain: 0.8.1\n  langchain_firebase: 0.3.2\n\ndev_dependencies:\n  flutter_test:\n    sdk: flutter\n\nflutter:\n  uses-material-design: true\n  assets:\n    - assets/images/\n"
  },
  {
    "path": "packages/langchain_firebase/example/web/flutter_bootstrap.js",
    "content": "{{flutter_js}}\n{{flutter_build_config}}\n\n_flutter.loader.load({\n  serviceWorkerSettings: {\n    serviceWorkerVersion: {{flutter_service_worker_version}},\n  },\n  onEntrypointLoaded: async function(engineInitializer) {\n    const appRunner = await engineInitializer.initializeEngine({useColorEmoji: true});\n    await appRunner.runApp();\n  },\n});\n"
  },
  {
    "path": "packages/langchain_firebase/example/web/index.html",
    "content": "<!DOCTYPE html>\n<html>\n<head>\n  <base href=\"$FLUTTER_BASE_HREF\">\n\n  <meta charset=\"UTF-8\">\n  <meta content=\"IE=Edge\" http-equiv=\"X-UA-Compatible\">\n  <meta content=\"A sample Flutter app integrating VertexAI for Firebase in LangChain.dart.\" name=\"description\">\n\n  <!-- iOS meta tags & icons -->\n  <meta content=\"yes\" name=\"apple-mobile-web-app-capable\">\n  <meta content=\"black\" name=\"apple-mobile-web-app-status-bar-style\">\n  <meta content=\"hello_world_flutter\" name=\"apple-mobile-web-app-title\">\n  <link href=\"icons/Icon-192.png\" rel=\"apple-touch-icon\">\n\n  <!-- Favicon -->\n  <link href=\"favicon.png\" rel=\"icon\" type=\"image/png\"/>\n\n  <title>VertexAI for Firebase in LangChain.dart</title>\n  <link href=\"manifest.json\" rel=\"manifest\">\n</head>\n<body>\n<script src=\"flutter_bootstrap.js\" async></script>\n</body>\n</html>\n"
  },
  {
    "path": "packages/langchain_firebase/example/web/manifest.json",
    "content": "{\n    \"name\": \"example\",\n    \"short_name\": \"example\",\n    \"start_url\": \".\",\n    \"display\": \"standalone\",\n    \"background_color\": \"#0175C2\",\n    \"theme_color\": \"#0175C2\",\n    \"description\": \"A new Flutter project.\",\n    \"orientation\": \"portrait-primary\",\n    \"prefer_related_applications\": false,\n    \"icons\": [\n        {\n            \"src\": \"icons/Icon-192.png\",\n            \"sizes\": \"192x192\",\n            \"type\": \"image/png\"\n        },\n        {\n            \"src\": \"icons/Icon-512.png\",\n            \"sizes\": \"512x512\",\n            \"type\": \"image/png\"\n        },\n        {\n            \"src\": \"icons/Icon-maskable-192.png\",\n            \"sizes\": \"192x192\",\n            \"type\": \"image/png\",\n            \"purpose\": \"maskable\"\n        },\n        {\n            \"src\": \"icons/Icon-maskable-512.png\",\n            \"sizes\": \"512x512\",\n            \"type\": \"image/png\",\n            \"purpose\": \"maskable\"\n        }\n    ]\n}\n"
  },
  {
    "path": "packages/langchain_firebase/lib/langchain_firebase.dart",
    "content": "/// LangChain.dart integration module for Firebase (Gemini, VertexAI for Firebase, Firestore, etc.).\nlibrary;\n\nexport 'src/chat_models/chat_models.dart';\n"
  },
  {
    "path": "packages/langchain_firebase/lib/src/chat_models/chat_models.dart",
    "content": "export 'vertex_ai/chat_firebase_vertex_ai.dart';\nexport 'vertex_ai/types.dart';\n"
  },
  {
    "path": "packages/langchain_firebase/lib/src/chat_models/vertex_ai/chat_firebase_vertex_ai.dart",
    "content": "import 'package:collection/collection.dart';\nimport 'package:firebase_ai/firebase_ai.dart';\nimport 'package:firebase_app_check/firebase_app_check.dart';\nimport 'package:firebase_auth/firebase_auth.dart';\nimport 'package:firebase_core/firebase_core.dart';\nimport 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/prompts.dart';\nimport 'package:uuid/uuid.dart';\n\nimport 'mappers.dart';\nimport 'types.dart';\n\n/// Wrapper around [Firebase AI Logic](https://firebase.google.com/docs/ai-logic)\n/// (Gemini API via Firebase).\n///\n/// Supports two backends:\n/// - **Vertex AI** (default): Requires Blaze plan. Best for production.\n/// - **Google AI**: Available on free Spark plan. Good for development/testing.\n///\n/// Example:\n/// ```dart\n/// final chatModel = ChatFirebaseVertexAI();\n/// final messages = [\n///   ChatMessage.humanText('Tell me a joke.'),\n/// ];\n/// final prompt = PromptValue.chat(messages);\n/// final res = await chatModel.invoke(prompt);\n/// ```\n///\n/// Example with Google AI backend:\n/// ```dart\n/// final chatModel = ChatFirebaseVertexAI(\n///   defaultBackend: FirebaseAIBackend.googleAI,\n/// );\n/// ```\n///\n/// - [Firebase AI Logic](https://firebase.google.com/docs/ai-logic)\n///\n/// ### Setup\n///\n/// To use `ChatFirebaseVertexAI` you need to have:\n/// - A Firebase project (Blaze plan for Vertex AI, Spark plan for Google AI)\n/// - Firebase SDK initialized in your app\n/// - For Vertex AI: `aiplatform.googleapis.com` and `firebaseml.googleapis.com` APIs enabled\n/// - For Google AI: Gemini API key configured in Firebase console\n/// - Recommended: Firebase App Check enabled (Vertex AI only)\n///\n/// ### Available models\n///\n/// The following models are available:\n/// - `gemini-2.5-pro`\n/// - `gemini-2.5-flash`\n/// - `gemini-2.0-flash`\n/// - `gemini-2.0-flash-lite`\n/// - `gemini-1.5-flash`\n/// - `gemini-1.5-pro`\n///\n/// Mind that this list may not be up-to-date.\n/// Refer to the [documentation](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models)\n/// for the updated list.\n///\n/// ### Call options\n///\n/// You can configure the parameters that will be used when calling the\n/// chat completions API in several ways:\n///\n/// **Default options:**\n///\n/// Use the [defaultOptions] parameter to set the default options. These\n/// options will be used unless you override them when generating completions.\n///\n/// ```dart\n/// final chatModel = ChatFirebaseVertexAI(\n///   defaultOptions: ChatFirebaseVertexAIOptions(\n///     model: 'gemini-1.5-pro-preview',\n///     temperature: 0,\n///   ),\n/// );\n/// ```\n///\n/// **Call options:**\n///\n/// You can override the default options when invoking the model:\n///\n/// ```dart\n/// final res = await chatModel.invoke(\n///   prompt,\n///   options: const ChatFirebaseVertexAIOptions(temperature: 1),\n/// );\n/// ```\n///\n/// **Bind:**\n///\n/// You can also change the options in a [Runnable] pipeline using the bind\n/// method.\n///\n/// In this example, we are using two totally different models for each\n/// question:\n///\n/// ```dart\n/// final chatModel = ChatFirebaseVertexAI();\n/// const outputParser = StringOutputParser();\n/// final prompt1 = PromptTemplate.fromTemplate('How are you {name}?');\n/// final prompt2 = PromptTemplate.fromTemplate('How old are you {name}?');\n/// final chain = Runnable.fromMap({\n///   'q1': prompt1 | chatModel.bind(const ChatFirebaseVertexAIOptions(model: 'gemini-1.0-pro')) | outputParser,\n///   'q2': prompt2 | chatModel.bind(const ChatFirebaseVertexAIOptions(model: 'gemini-1.0-pro-vision')) | outputParser,\n/// });\n/// final res = await chain.invoke({'name': 'David'});\n/// ```\n///\n/// ### Tool calling\n///\n/// [ChatFirebaseVertexAI] supports tool calling.\n///\n/// Check the [docs](https://langchaindart.dev/#/modules/model_io/models/chat_models/how_to/tools)\n/// for more information on how to use tools.\n///\n/// Example:\n/// ```dart\n/// const tool = ToolSpec(\n///   name: 'get_current_weather',\n///   description: 'Get the current weather in a given location',\n///   inputJsonSchema: {\n///     'type': 'object',\n///     'properties': {\n///       'location': {\n///         'type': 'string',\n///         'description': 'The city and state, e.g. San Francisco, CA',\n///       },\n///     },\n///     'required': ['location'],\n///   },\n/// );\n/// final chatModel = ChatFirebaseVertexAI(\n///   defaultOptions: ChatFirebaseVertexAIOptions(\n///     model: 'gemini-1.5-pro',\n///     temperature: 0,\n///     tools: [tool],\n///   ),\n/// );\n/// final res = await model.invoke(\n///   PromptValue.string('What’s the weather like in Boston and Madrid right now in celsius?'),\n/// );\n/// ```\nclass ChatFirebaseVertexAI extends BaseChatModel<ChatFirebaseVertexAIOptions> {\n  /// Create a new [ChatFirebaseVertexAI] instance.\n  ///\n  /// Main configuration options:\n  /// - [ChatFirebaseVertexAI.defaultOptions]\n  /// - [ChatFirebaseVertexAI.defaultBackend]\n  ///\n  /// Firebase configuration options:\n  /// - [ChatFirebaseVertexAI.app]\n  /// - [ChatFirebaseVertexAI.appCheck] (Vertex AI only)\n  /// - [ChatFirebaseVertexAI.auth] (Vertex AI only)\n  /// - [ChatFirebaseVertexAI.location] (Vertex AI only)\n  ChatFirebaseVertexAI({\n    super.defaultOptions = const ChatFirebaseVertexAIOptions(\n      model: defaultModel,\n    ),\n    this.defaultBackend = FirebaseAIBackend.vertexAI,\n    this.app,\n    this.appCheck,\n    this.auth,\n    this.location,\n  }) : _currentModel = defaultOptions.model ?? '',\n       _currentBackend = defaultBackend {\n    _firebaseClient = _createFirebaseClient(_currentModel);\n  }\n\n  /// The default Firebase AI backend to use.\n  ///\n  /// Defaults to [FirebaseAIBackend.vertexAI] for backward compatibility.\n  final FirebaseAIBackend defaultBackend;\n\n  /// The [FirebaseApp] to use. If not provided, the default app will be used.\n  final FirebaseApp? app;\n\n  /// The optional [FirebaseAppCheck] to use to protect the project from abuse.\n  /// Only applies to Vertex AI backend.\n  final FirebaseAppCheck? appCheck;\n\n  /// The optional [FirebaseAuth] to use for authentication.\n  /// Only applies to Vertex AI backend.\n  final FirebaseAuth? auth;\n\n  /// The service location for the Vertex AI instance.\n  /// Only applies to Vertex AI backend.\n  final String? location;\n\n  /// A client for interacting with Firebase AI API.\n  late GenerativeModel _firebaseClient;\n\n  /// A UUID generator.\n  late final _uuid = const Uuid();\n\n  /// The current model set in [_firebaseClient];\n  String _currentModel;\n\n  /// The current backend set in [_firebaseClient];\n  FirebaseAIBackend _currentBackend;\n\n  /// The current system instruction set in [_firebaseClient];\n  String? _currentSystemInstruction;\n\n  @override\n  String get modelType => 'chat-firebase-vertex-ai';\n\n  /// The default model to use unless another is specified.\n  static const defaultModel = 'gemini-1.5-flash';\n\n  @override\n  Future<ChatResult> invoke(\n    final PromptValue input, {\n    final ChatFirebaseVertexAIOptions? options,\n  }) async {\n    final id = _uuid.v4();\n    final (model, prompt, safetySettings, generationConfig, tools, toolConfig) =\n        _generateCompletionRequest(input.toChatMessages(), options: options);\n    final completion = await _firebaseClient.generateContent(\n      prompt,\n      safetySettings: safetySettings,\n      generationConfig: generationConfig,\n      tools: tools,\n      toolConfig: toolConfig,\n    );\n    return completion.toChatResult(id, model);\n  }\n\n  @override\n  Stream<ChatResult> stream(\n    final PromptValue input, {\n    final ChatFirebaseVertexAIOptions? options,\n  }) {\n    final id = _uuid.v4();\n    final (model, prompt, safetySettings, generationConfig, tools, toolConfig) =\n        _generateCompletionRequest(input.toChatMessages(), options: options);\n    return _firebaseClient\n        .generateContentStream(\n          prompt,\n          safetySettings: safetySettings,\n          generationConfig: generationConfig,\n          tools: tools,\n          toolConfig: toolConfig,\n        )\n        .map((final completion) => completion.toChatResult(id, model));\n  }\n\n  /// Creates a [GenerateContentRequest] from the given input.\n  (\n    String model,\n    Iterable<Content> prompt,\n    List<SafetySetting>? safetySettings,\n    GenerationConfig? generationConfig,\n    List<Tool>? tools,\n    ToolConfig? toolConfig,\n  )\n  _generateCompletionRequest(\n    final List<ChatMessage> messages, {\n    final ChatFirebaseVertexAIOptions? options,\n  }) {\n    _updateClientIfNeeded(messages, options);\n\n    return (\n      _currentModel,\n      messages.toContentList(),\n      (options?.safetySettings ?? defaultOptions.safetySettings)\n          ?.toSafetySettings(),\n      GenerationConfig(\n        candidateCount:\n            options?.candidateCount ?? defaultOptions.candidateCount,\n        stopSequences:\n            options?.stopSequences ?? defaultOptions.stopSequences ?? const [],\n        maxOutputTokens:\n            options?.maxOutputTokens ?? defaultOptions.maxOutputTokens,\n        temperature: options?.temperature ?? defaultOptions.temperature,\n        topP: options?.topP ?? defaultOptions.topP,\n        topK: options?.topK ?? defaultOptions.topK,\n        responseMimeType:\n            options?.responseMimeType ?? defaultOptions.responseMimeType,\n        responseSchema:\n            (options?.responseSchema ?? defaultOptions.responseSchema)\n                ?.toSchema(),\n      ),\n      (options?.tools ?? defaultOptions.tools)?.toToolList(),\n      (options?.toolChoice ?? defaultOptions.toolChoice)?.toToolConfig(),\n    );\n  }\n\n  @override\n  Future<List<int>> tokenize(\n    final PromptValue promptValue, {\n    final ChatFirebaseVertexAIOptions? options,\n  }) {\n    throw UnsupportedError(\n      'Google AI does not expose a tokenizer, only counting tokens is supported.',\n    );\n  }\n\n  @override\n  Future<int> countTokens(\n    final PromptValue promptValue, {\n    final ChatFirebaseVertexAIOptions? options,\n  }) async {\n    final messages = promptValue.toChatMessages();\n    _updateClientIfNeeded(messages, options);\n    final tokens = await _firebaseClient.countTokens(messages.toContentList());\n    return tokens.totalTokens;\n  }\n\n  /// Create a new [GenerativeModel] instance.\n  GenerativeModel _createFirebaseClient(\n    final String model, {\n    final String? systemInstruction,\n    final FirebaseAIBackend? backend,\n  }) {\n    final effectiveBackend = backend ?? defaultBackend;\n\n    final firebaseAI = switch (effectiveBackend) {\n      FirebaseAIBackend.vertexAI => FirebaseAI.vertexAI(\n        app: app,\n        appCheck: appCheck,\n        auth: auth,\n        location: location,\n      ),\n      FirebaseAIBackend.googleAI => FirebaseAI.googleAI(app: app),\n    };\n\n    return firebaseAI.generativeModel(\n      model: model,\n      systemInstruction: systemInstruction != null\n          ? Content.system(systemInstruction)\n          : null,\n    );\n  }\n\n  /// Recreate the [GenerativeModel] instance.\n  void _recreateFirebaseClient(\n    final String model,\n    final String? systemInstruction,\n    final FirebaseAIBackend backend,\n  ) {\n    _firebaseClient = _createFirebaseClient(\n      model,\n      systemInstruction: systemInstruction,\n      backend: backend,\n    );\n  }\n\n  /// Updates the model in [_firebaseClient] if needed.\n  void _updateClientIfNeeded(\n    final List<ChatMessage> messages,\n    final ChatFirebaseVertexAIOptions? options,\n  ) {\n    final model = options?.model ?? defaultOptions.model ?? defaultModel;\n    final backend = options?.backend ?? defaultBackend;\n\n    final systemInstruction = messages.firstOrNull is SystemChatMessage\n        ? messages.firstOrNull?.contentAsString\n        : null;\n\n    var recreate = false;\n    if (model != _currentModel) {\n      _currentModel = model;\n      recreate = true;\n    }\n    if (backend != _currentBackend) {\n      _currentBackend = backend;\n      recreate = true;\n    }\n    if (systemInstruction != _currentSystemInstruction) {\n      _currentSystemInstruction = systemInstruction;\n      recreate = true;\n    }\n\n    if (recreate) {\n      _recreateFirebaseClient(model, systemInstruction, backend);\n    }\n  }\n}\n"
  },
  {
    "path": "packages/langchain_firebase/lib/src/chat_models/vertex_ai/mappers.dart",
    "content": "// ignore_for_file: public_member_api_docs\nimport 'dart:convert';\n\nimport 'package:firebase_ai/firebase_ai.dart' as f;\nimport 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/language_models.dart';\nimport 'package:langchain_core/tools.dart';\n\nimport 'types.dart';\n\nextension ChatMessagesMapper on List<ChatMessage> {\n  List<f.Content> toContentList() {\n    final result = <f.Content>[];\n\n    // NOTE: Gemini/Vertex can return multiple FunctionCall parts in a single model turn.\n    // The API requires the *next* turn to be ONE Content.functionResponses that includes\n    // the SAME number of FunctionResponse parts, in the SAME order. If we send each\n    // ToolChatMessage as its own content turn, counts won’t match and the SDK will throw.\n    // Therefore, we batch consecutive ToolChatMessage instances into a single\n    // f.Content.functionResponses([...]).\n    List<f.FunctionResponse>? pendingToolResponses;\n\n    void flushToolResponses() {\n      if (pendingToolResponses != null && pendingToolResponses!.isNotEmpty) {\n        result.add(f.Content.functionResponses(pendingToolResponses!));\n        pendingToolResponses = null;\n      }\n    }\n\n    for (final message in this) {\n      if (message is SystemChatMessage) {\n        continue; // System messages are ignored\n      }\n\n      if (message is ToolChatMessage) {\n        // Start (or continue) a batch of tool responses\n        pendingToolResponses ??= <f.FunctionResponse>[];\n        pendingToolResponses!.add(_toolMsgToFunctionResponse(message));\n        continue;\n      }\n\n      // Any non-tool message breaks the batch: flush before adding it\n      flushToolResponses();\n\n      switch (message) {\n        case final HumanChatMessage msg:\n          result.add(_mapHumanChatMessage(msg));\n        case final AIChatMessage msg:\n          result.add(_mapAIChatMessage(msg));\n        case final CustomChatMessage msg:\n          result.add(_mapCustomChatMessage(msg));\n        default:\n          throw UnsupportedError('Unknown message type: $message');\n      }\n    }\n\n    // Flush remaining batched tool responses at the end\n    flushToolResponses();\n\n    return result;\n  }\n\n  f.Content _mapHumanChatMessage(final HumanChatMessage msg) {\n    final contentParts = switch (msg.content) {\n      final ChatMessageContentText c => [f.TextPart(c.text)],\n      final ChatMessageContentImage c => [\n        if (c.data.startsWith('gs:') || c.data.startsWith('http'))\n          f.FileData(c.mimeType ?? '', c.data)\n        else\n          f.InlineDataPart(c.mimeType ?? '', base64Decode(c.data)),\n      ],\n      final ChatMessageContentMultiModal c =>\n        c.parts\n            .map(\n              (final p) => switch (p) {\n                final ChatMessageContentText c => f.TextPart(c.text),\n                final ChatMessageContentImage c =>\n                  c.data.startsWith('gs:') || c.data.startsWith('http')\n                      ? f.FileData(c.mimeType ?? '', c.data)\n                      : f.InlineDataPart(\n                          c.mimeType ?? '',\n                          base64Decode(c.data),\n                        ),\n                ChatMessageContentMultiModal() => throw UnsupportedError(\n                  'Cannot have multimodal content in multimodal content',\n                ),\n              },\n            )\n            .toList(growable: false),\n    };\n    return f.Content.multi(contentParts);\n  }\n\n  f.Content _mapAIChatMessage(final AIChatMessage msg) {\n    final contentParts = [\n      if (msg.content.isNotEmpty) f.TextPart(msg.content),\n      if (msg.toolCalls.isNotEmpty)\n        ...msg.toolCalls.map(\n          (final call) => f.FunctionCall(call.name, call.arguments),\n        ),\n    ];\n    return f.Content.model(contentParts);\n  }\n\n  f.FunctionResponse _toolMsgToFunctionResponse(final ToolChatMessage msg) {\n    Map<String, Object?> response;\n    try {\n      response = jsonDecode(msg.content) as Map<String, Object?>;\n    } catch (_) {\n      response = {'result': msg.content};\n    }\n    return f.FunctionResponse(msg.toolCallId, response);\n  }\n\n  f.Content _mapCustomChatMessage(final CustomChatMessage msg) {\n    return f.Content(msg.role, [f.TextPart(msg.content)]);\n  }\n}\n\nextension GenerateContentResponseMapper on f.GenerateContentResponse {\n  ChatResult toChatResult(final String id, final String model) {\n    final candidate = candidates.first;\n    return ChatResult(\n      id: id,\n      output: AIChatMessage(\n        content: candidate.content.parts\n            .map(\n              (p) => switch (p) {\n                final f.TextPart p => p.text,\n                final f.InlineDataPart p => base64Encode(p.bytes),\n                final f.FileData p => p.fileUri,\n                f.FunctionResponse() || f.FunctionCall() => '',\n                f.ExecutableCodePart() => '',\n                f.CodeExecutionResultPart() => '',\n                f.UnknownPart() => '',\n              },\n            )\n            .nonNulls\n            .join('\\n'),\n        toolCalls: candidate.content.parts\n            .whereType<f.FunctionCall>()\n            .map(\n              (final call) => AIChatMessageToolCall(\n                id: call.name,\n                name: call.name,\n                argumentsRaw: jsonEncode(call.args),\n                arguments: call.args,\n              ),\n            )\n            .toList(growable: false),\n      ),\n      finishReason: _mapFinishReason(candidate.finishReason),\n      metadata: {\n        'model': model,\n        'block_reason': promptFeedback?.blockReason?.name,\n        'block_reason_message': promptFeedback?.blockReasonMessage,\n        'safety_ratings': candidate.safetyRatings\n            ?.map(\n              (r) => {\n                'category': r.category.name,\n                'probability': r.probability.name,\n              },\n            )\n            .toList(growable: false),\n        'citation_metadata': candidate.citationMetadata?.citations\n            .map(\n              (s) => {\n                'start_index': s.startIndex,\n                'end_index': s.endIndex,\n                'uri': s.uri.toString(),\n                'license': s.license,\n              },\n            )\n            .toList(growable: false),\n        'finish_message': candidate.finishMessage,\n      },\n      usage: LanguageModelUsage(\n        promptTokens: usageMetadata?.promptTokenCount,\n        responseTokens: usageMetadata?.candidatesTokenCount,\n        totalTokens: usageMetadata?.totalTokenCount,\n      ),\n    );\n  }\n\n  FinishReason _mapFinishReason(final f.FinishReason? reason) =>\n      switch (reason) {\n        f.FinishReason.unknown => FinishReason.unspecified,\n        f.FinishReason.stop => FinishReason.stop,\n        f.FinishReason.maxTokens => FinishReason.length,\n        f.FinishReason.safety => FinishReason.contentFilter,\n        f.FinishReason.recitation => FinishReason.recitation,\n        f.FinishReason.other => FinishReason.unspecified,\n        f.FinishReason.malformedFunctionCall ||\n        null => FinishReason.unspecified,\n      };\n}\n\nextension SafetySettingsMapper on List<ChatFirebaseVertexAISafetySetting> {\n  List<f.SafetySetting> toSafetySettings() {\n    return map(\n      (final setting) => f.SafetySetting(\n        switch (setting.category) {\n          ChatFirebaseVertexAISafetySettingCategory.unspecified =>\n            f.HarmCategory.unknown,\n          ChatFirebaseVertexAISafetySettingCategory.harassment =>\n            f.HarmCategory.harassment,\n          ChatFirebaseVertexAISafetySettingCategory.hateSpeech =>\n            f.HarmCategory.hateSpeech,\n          ChatFirebaseVertexAISafetySettingCategory.sexuallyExplicit =>\n            f.HarmCategory.sexuallyExplicit,\n          ChatFirebaseVertexAISafetySettingCategory.dangerousContent =>\n            f.HarmCategory.dangerousContent,\n        },\n        switch (setting.threshold) {\n          ChatFirebaseVertexAISafetySettingThreshold.unspecified =>\n            f.HarmBlockThreshold.none,\n          ChatFirebaseVertexAISafetySettingThreshold.blockLowAndAbove =>\n            f.HarmBlockThreshold.low,\n          ChatFirebaseVertexAISafetySettingThreshold.blockMediumAndAbove =>\n            f.HarmBlockThreshold.medium,\n          ChatFirebaseVertexAISafetySettingThreshold.blockOnlyHigh =>\n            f.HarmBlockThreshold.high,\n          ChatFirebaseVertexAISafetySettingThreshold.blockNone =>\n            f.HarmBlockThreshold.none,\n        },\n        f.HarmBlockMethod.unspecified,\n      ),\n    ).toList(growable: false);\n  }\n}\n\nextension ChatToolListMapper on List<ToolSpec> {\n  List<f.Tool> toToolList() {\n    return [\n      f.Tool.functionDeclarations(\n        map((tool) {\n          final schema = tool.inputJsonSchema.toSchema();\n          return f.FunctionDeclaration(\n            tool.name,\n            tool.description,\n            parameters: schema.properties ?? const {},\n            optionalParameters: schema.optionalProperties ?? const [],\n          );\n        }).toList(growable: false),\n      ),\n    ];\n  }\n}\n\nextension SchemaMapper on Map<String, dynamic> {\n  f.Schema toSchema() {\n    final jsonSchema = this;\n    final type = jsonSchema['type'] as String;\n    final description = jsonSchema['description'] as String?;\n    final nullable = jsonSchema['nullable'] as bool?;\n    final enumValues = jsonSchema['enum'] as List<String>?;\n    final format = jsonSchema['format'] as String?;\n    final items = jsonSchema['items'] as Map<String, dynamic>?;\n    final properties = jsonSchema['properties'] as Map<String, dynamic>?;\n    final requiredProperties = jsonSchema['required'] as List<String>?;\n\n    switch (type) {\n      case 'string':\n        if (enumValues != null) {\n          return f.Schema.enumString(\n            enumValues: enumValues,\n            description: description,\n            nullable: nullable,\n          );\n        } else {\n          return f.Schema.string(description: description, nullable: nullable);\n        }\n      case 'number':\n        return f.Schema.number(\n          description: description,\n          nullable: nullable,\n          format: format,\n        );\n      case 'integer':\n        return f.Schema.integer(\n          description: description,\n          nullable: nullable,\n          format: format,\n        );\n      case 'boolean':\n        return f.Schema.boolean(description: description, nullable: nullable);\n      case 'array':\n        if (items != null) {\n          final itemsSchema = items.toSchema();\n          return f.Schema.array(\n            description: description,\n            nullable: nullable,\n            items: itemsSchema,\n          );\n        }\n        throw ArgumentError('Array schema must have \"items\" property');\n      case 'object':\n        if (properties != null) {\n          final propertiesSchema = properties.map(\n            (key, value) =>\n                MapEntry(key, (value as Map<String, dynamic>).toSchema()),\n          );\n          return f.Schema.object(\n            properties: propertiesSchema,\n            optionalProperties: requiredProperties != null\n                ? propertiesSchema.keys\n                      .where((key) => !requiredProperties.contains(key))\n                      .toList(growable: false)\n                : propertiesSchema.keys.toList(growable: false),\n            description: description,\n            nullable: nullable,\n          );\n        }\n        throw ArgumentError('Object schema must have \"properties\" property');\n      default:\n        throw ArgumentError('Invalid schema type: $type');\n    }\n  }\n}\n\nextension ChatToolChoiceMapper on ChatToolChoice {\n  f.ToolConfig toToolConfig() {\n    return switch (this) {\n      ChatToolChoiceNone _ => f.ToolConfig(\n        functionCallingConfig: f.FunctionCallingConfig.none(),\n      ),\n      ChatToolChoiceAuto _ => f.ToolConfig(\n        functionCallingConfig: f.FunctionCallingConfig.auto(),\n      ),\n      ChatToolChoiceRequired() => f.ToolConfig(\n        functionCallingConfig: f.FunctionCallingConfig.auto(),\n      ),\n      final ChatToolChoiceForced t => f.ToolConfig(\n        functionCallingConfig: f.FunctionCallingConfig.any({t.name}),\n      ),\n    };\n  }\n}\n"
  },
  {
    "path": "packages/langchain_firebase/lib/src/chat_models/vertex_ai/types.dart",
    "content": "import 'package:collection/collection.dart';\nimport 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/tools.dart';\nimport 'package:meta/meta.dart';\n\n/// {@template chat_firebase_vertex_ai_options}\n/// Options to pass into the Vertex AI for Firebase model.\n///\n/// You can find a list of available models here:\n/// https://firebase.google.com/docs/vertex-ai/gemini-models\n/// {@endtemplate}\n@immutable\nclass ChatFirebaseVertexAIOptions extends ChatModelOptions {\n  /// {@macro chat_firebase_vertex_ai_options}\n  const ChatFirebaseVertexAIOptions({\n    super.model,\n    this.topP,\n    this.topK,\n    this.candidateCount,\n    this.maxOutputTokens,\n    this.temperature,\n    this.stopSequences,\n    this.responseMimeType,\n    this.responseSchema,\n    this.safetySettings,\n    this.backend,\n    super.tools,\n    super.toolChoice,\n    super.concurrencyLimit,\n  });\n\n  /// The maximum cumulative probability of tokens to consider when sampling.\n  /// The model uses combined Top-k and nucleus sampling. Tokens are sorted\n  /// based on their assigned probabilities so that only the most likely\n  /// tokens are considered. Top-k sampling directly limits the maximum\n  /// number of tokens to consider, while Nucleus sampling limits number of\n  /// tokens based on the cumulative probability.\n  ///\n  /// Note: The default value varies by model, see the `Model.top_p`\n  /// attribute of the `Model` returned the `getModel` function.\n  final double? topP;\n\n  /// The maximum number of tokens to consider when sampling. The model\n  /// uses combined Top-k and nucleus sampling. Top-k sampling considers\n  /// the set of `top_k` most probable tokens. Defaults to 40. Note:\n  ///\n  /// The default value varies by model, see the `Model.top_k` attribute\n  /// of the `Model` returned the `getModel` function.\n  final int? topK;\n\n  /// Number of generated responses to return. This value must be between\n  /// [1, 8], inclusive. If unset, this will default to 1.\n  final int? candidateCount;\n\n  /// The maximum number of tokens to include in a candidate. If unset,\n  /// this will default to `output_token_limit` specified in the `Model`\n  /// specification.\n  final int? maxOutputTokens;\n\n  /// Controls the randomness of the output.\n  ///\n  /// Note: The default value varies by model, see the `Model.temperature`\n  /// attribute of the `Model` returned the `getModel` function.\n  ///\n  /// Values can range from [0.0,1.0], inclusive. A value closer to 1.0 will\n  /// produce responses that are more varied and creative, while a value\n  /// closer to 0.0 will typically result in more straightforward responses\n  /// from the model.\n  final double? temperature;\n\n  /// The set of character sequences (up to 5) that will stop output generation.\n  /// If specified, the API will stop at the first appearance of a stop sequence.\n  /// The stop sequence will not be included as part of the response.\n  final List<String>? stopSequences;\n\n  /// Output response mimetype of the generated candidate text.\n  ///\n  /// Supported mimetype:\n  /// - `text/plain`: (default) Text output.\n  /// - `application/json`: JSON response in the candidates.\n  final String? responseMimeType;\n\n  /// Output response schema of the generated candidate text.\n  /// Following the [JSON Schema specification](https://json-schema.org).\n  ///\n  /// Note: This only applies when the [responseMimeType] supports\n  /// a schema; currently this is limited to `application/json`.\n  ///\n  /// Example:\n  /// ```json\n  /// {\n  ///   'type': 'object',\n  ///   'properties': {\n  ///     'answer': {\n  ///       'type': 'string',\n  ///       'description': 'The answer to the question being asked',\n  ///     },\n  ///     'sources': {\n  ///       'type': 'array',\n  ///       'items': {'type': 'string'},\n  ///       'description': 'The sources used to answer the question',\n  ///     },\n  ///   },\n  ///   'required': ['answer', 'sources'],\n  /// }\n  /// ```\n  final Map<String, dynamic>? responseSchema;\n\n  /// A list of unique [ChatFirebaseVertexAISafetySetting] instances for blocking\n  /// unsafe content.\n  ///\n  /// This will be enforced on the generated output. There should not be more than\n  /// one setting for each type. The API will block any contents and responses that\n  /// fail to meet the thresholds set by these settings.\n  ///\n  /// This list overrides the default settings for each category specified. If there\n  /// is no safety setting for a given category provided in the list, the API will use\n  /// the default safety setting for that category.\n  final List<ChatFirebaseVertexAISafetySetting>? safetySettings;\n\n  /// The Firebase AI backend to use.\n  ///\n  /// - [FirebaseAIBackend.vertexAI] (default): Vertex AI Gemini API.\n  ///   Requires Blaze plan (pay-as-you-go). Best for production.\n  /// - [FirebaseAIBackend.googleAI]: Gemini Developer API.\n  ///   Available on Spark plan (free tier). Good for development/testing.\n  final FirebaseAIBackend? backend;\n\n  @override\n  ChatFirebaseVertexAIOptions copyWith({\n    final String? model,\n    final double? topP,\n    final int? topK,\n    final int? candidateCount,\n    final int? maxOutputTokens,\n    final double? temperature,\n    final List<String>? stopSequences,\n    final String? responseMimeType,\n    final List<ChatFirebaseVertexAISafetySetting>? safetySettings,\n    final FirebaseAIBackend? backend,\n    final List<ToolSpec>? tools,\n    final ChatToolChoice? toolChoice,\n    final int? concurrencyLimit,\n  }) {\n    return ChatFirebaseVertexAIOptions(\n      model: model ?? this.model,\n      topP: topP ?? this.topP,\n      topK: topK ?? this.topK,\n      candidateCount: candidateCount ?? this.candidateCount,\n      maxOutputTokens: maxOutputTokens ?? this.maxOutputTokens,\n      temperature: temperature ?? this.temperature,\n      stopSequences: stopSequences ?? this.stopSequences,\n      responseMimeType: responseMimeType ?? this.responseMimeType,\n      safetySettings: safetySettings ?? this.safetySettings,\n      backend: backend ?? this.backend,\n      tools: tools ?? this.tools,\n      toolChoice: toolChoice ?? this.toolChoice,\n      concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit,\n    );\n  }\n\n  @override\n  ChatFirebaseVertexAIOptions merge(\n    covariant final ChatFirebaseVertexAIOptions? other,\n  ) {\n    return copyWith(\n      model: other?.model,\n      topP: other?.topP,\n      topK: other?.topK,\n      candidateCount: other?.candidateCount,\n      maxOutputTokens: other?.maxOutputTokens,\n      temperature: other?.temperature,\n      stopSequences: other?.stopSequences,\n      responseMimeType: other?.responseMimeType,\n      safetySettings: other?.safetySettings,\n      backend: other?.backend,\n      tools: other?.tools,\n      toolChoice: other?.toolChoice,\n      concurrencyLimit: other?.concurrencyLimit,\n    );\n  }\n\n  @override\n  bool operator ==(covariant final ChatFirebaseVertexAIOptions other) {\n    return model == other.model &&\n        topP == other.topP &&\n        topK == other.topK &&\n        candidateCount == other.candidateCount &&\n        maxOutputTokens == other.maxOutputTokens &&\n        temperature == other.temperature &&\n        const ListEquality<String>().equals(\n          stopSequences,\n          other.stopSequences,\n        ) &&\n        responseMimeType == other.responseMimeType &&\n        const ListEquality<ChatFirebaseVertexAISafetySetting>().equals(\n          safetySettings,\n          other.safetySettings,\n        ) &&\n        backend == other.backend &&\n        const ListEquality<ToolSpec>().equals(tools, other.tools) &&\n        toolChoice == other.toolChoice &&\n        concurrencyLimit == other.concurrencyLimit;\n  }\n\n  @override\n  int get hashCode {\n    return model.hashCode ^\n        topP.hashCode ^\n        topK.hashCode ^\n        candidateCount.hashCode ^\n        maxOutputTokens.hashCode ^\n        temperature.hashCode ^\n        const ListEquality<String>().hash(stopSequences) ^\n        responseMimeType.hashCode ^\n        const ListEquality<ChatFirebaseVertexAISafetySetting>().hash(\n          safetySettings,\n        ) ^\n        backend.hashCode ^\n        const ListEquality<ToolSpec>().hash(tools) ^\n        toolChoice.hashCode ^\n        concurrencyLimit.hashCode;\n  }\n}\n\n/// {@template chat_google_generative_ai_safety_setting}\n/// Safety setting, affecting the safety-blocking behavior.\n/// Passing a safety setting for a category changes the allowed probability that\n/// content is blocked.\n/// {@endtemplate}\n@immutable\nclass ChatFirebaseVertexAISafetySetting {\n  /// {@macro chat_google_generative_ai_safety_setting}\n  const ChatFirebaseVertexAISafetySetting({\n    required this.category,\n    required this.threshold,\n  });\n\n  /// The category for this setting.\n  final ChatFirebaseVertexAISafetySettingCategory category;\n\n  /// Controls the probability threshold at which harm is blocked.\n  final ChatFirebaseVertexAISafetySettingThreshold threshold;\n\n  /// Creates a copy of this [ChatFirebaseVertexAISafetySetting] object with\n  /// the given fields replaced with the new values.\n  ChatFirebaseVertexAISafetySetting copyWith({\n    final ChatFirebaseVertexAISafetySettingCategory? category,\n    final ChatFirebaseVertexAISafetySettingThreshold? threshold,\n  }) {\n    return ChatFirebaseVertexAISafetySetting(\n      category: category ?? this.category,\n      threshold: threshold ?? this.threshold,\n    );\n  }\n\n  @override\n  bool operator ==(covariant final ChatFirebaseVertexAISafetySetting other) {\n    return category == other.category && threshold == other.threshold;\n  }\n\n  @override\n  int get hashCode {\n    return category.hashCode ^ threshold.hashCode;\n  }\n}\n\n/// Safety settings categorizes.\n///\n/// Docs: https://ai.google.dev/docs/safety_setting_gemini\nenum ChatFirebaseVertexAISafetySettingCategory {\n  /// The harm category is unspecified.\n  unspecified,\n\n  /// The harm category is harassment.\n  harassment,\n\n  /// The harm category is hate speech.\n  hateSpeech,\n\n  /// The harm category is sexually explicit content.\n  sexuallyExplicit,\n\n  /// The harm category is dangerous content.\n  dangerousContent,\n}\n\n/// Controls the probability threshold at which harm is blocked.\n///\n/// Docs: https://ai.google.dev/docs/safety_setting_gemini\nenum ChatFirebaseVertexAISafetySettingThreshold {\n  /// Threshold is unspecified, block using default threshold.\n  unspecified,\n\n  /// \tBlock when low, medium or high probability of unsafe content.\n  blockLowAndAbove,\n\n  /// Block when medium or high probability of unsafe content.\n  blockMediumAndAbove,\n\n  /// Block when high probability of unsafe content.\n  blockOnlyHigh,\n\n  /// Always show regardless of probability of unsafe content.\n  blockNone,\n}\n\n/// The Firebase AI backend to use.\nenum FirebaseAIBackend {\n  /// Vertex AI Gemini API - requires Blaze plan (pay-as-you-go).\n  /// Recommended for production use.\n  vertexAI,\n\n  /// Gemini Developer API - available on Spark plan (free tier).\n  /// Has rate limits but no billing required.\n  googleAI,\n}\n"
  },
  {
    "path": "packages/langchain_firebase/pubspec.yaml",
    "content": "name: langchain_firebase\ndescription: LangChain.dart integration module for Firebase (Gemini, VertexAI for Firebase, Firestore, etc.).\nversion: 0.3.2\nrepository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_firebase\nissue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_firebase\nhomepage: https://github.com/davidmigloz/langchain_dart\ndocumentation: https://langchaindart.dev\n\nfunding:\n  - https://github.com/sponsors/davidmigloz\n\ntopics:\n  - nlp\n  - gen-ai\n  - llms\n  - langchain\n  - firebase\n\nenvironment:\n  sdk: \">=3.9.0 <4.0.0\"\n  flutter: \">=3.27.0\"\nresolution: workspace\n\ndependencies:\n  collection: ^1.19.1\n  firebase_ai: ^3.4.0\n  firebase_app_check: ^0.4.1+1\n  firebase_auth: ^6.1.1\n  firebase_core: ^4.2.0\n  langchain_core: 0.4.1\n  meta: ^1.16.0\n  uuid: ^4.5.1\n\ndev_dependencies:\n  flutter_test:\n    sdk: flutter\n"
  },
  {
    "path": "packages/langchain_google/.gitignore",
    "content": "# https://dart.dev/guides/libraries/private-files\n# Created by `dart pub`\n.dart_tool/\n\n# Avoid committing pubspec.lock for library packages; see\n# https://dart.dev/guides/libraries/private-files#pubspeclock.\npubspec.lock\n"
  },
  {
    "path": "packages/langchain_google/CHANGELOG.md",
    "content": "## 0.7.1+2\n\n - Update a dependency to the latest release.\n\n## 0.7.1+1\n\n - Update a dependency to the latest release.\n\n## 0.7.1\n\n - **FIX**(langchain_google): Remove ServiceAccountCredentials stub export ([#838](https://github.com/davidmigloz/langchain_dart/issues/838)). ([d0a058b3](https://github.com/davidmigloz/langchain_dart/commit/d0a058b3f5488470362564fa84c350bdb7b41b14))\n - **FIX**(langchain_google): Add web platform compatibility for HttpClientAuthProvider ([#832](https://github.com/davidmigloz/langchain_dart/issues/832)). ([3a9e995b](https://github.com/davidmigloz/langchain_dart/commit/3a9e995b6dc75fe403175f6183c04387b6aa4e03))\n - **FEAT**: Add listModels() API for LLMs and Embeddings ([#371](https://github.com/davidmigloz/langchain_dart/issues/371)) ([#844](https://github.com/davidmigloz/langchain_dart/issues/844)). ([4b737389](https://github.com/davidmigloz/langchain_dart/commit/4b7373894d5b8701b6d00d153c1741931a49b3a1))\n\n## 0.7.0+1\n\n - **REFACTOR**: Fix pub format warnings ([#809](https://github.com/davidmigloz/langchain_dart/issues/809)). ([640cdefb](https://github.com/davidmigloz/langchain_dart/commit/640cdefbede9c0a0182fb6bb4005a20aa6f35635))\n\n## 0.7.0\n\n> Note: This release has breaking changes.\n\n - **REFACTOR**: Migrate langchain_google to the new googleai_dart client ([#788](https://github.com/davidmigloz/langchain_dart/issues/788)). ([f28edec9](https://github.com/davidmigloz/langchain_dart/commit/f28edec9206450d753db181f8af254df339d8290))\n - **FEAT**: Upgrade to http v1.5.0 ([#785](https://github.com/davidmigloz/langchain_dart/issues/785)). ([f7c87790](https://github.com/davidmigloz/langchain_dart/commit/f7c8779011015b5a4a7f3a07dca32bde1bb2ea88))\n - **BREAKING** **BUILD**: Require Dart >=3.8.0 ([#792](https://github.com/davidmigloz/langchain_dart/issues/792)). ([b887f5c6](https://github.com/davidmigloz/langchain_dart/commit/b887f5c62e307b3a510c5049e3d1fbe7b7b4f4c9))\n\n## NEXT\n\n - **BREAKING**: Migrated from deprecated `google_generative_ai` to `googleai_dart` package\n   - Updated ChatGoogleGenerativeAI to use the new googleai_dart client with resource-based API structure\n   - Updated GoogleGenerativeAIEmbeddings to use the new googleai_dart client with synchronous batch embeddings API and resource-based structure\n   - API calls now use resource organization (e.g., `client.models.generateContent()` instead of `client.generateContent()`)\n   - Changed default embeddings model to `gemini-embedding-001` (recommended stable model)\n   - Removed CustomHttpClient utility (replaced by GoogleAIConfig)\n   - Restored support for reduced dimensionality via `dimensions` parameter\n - **FEAT**: Added support for `presencePenalty`, `frequencyPenalty`, and `cachedContent` parameters in ChatGoogleGenerativeAIOptions\n - **DOCS**: Updated model documentation to include Gemini 2.5 series models (gemini-2.5-pro, gemini-2.5-flash, gemini-2.5-flash-lite)\n - **DOCS**: Added documentation for embeddings models including gemini-embedding-001 and flexible dimensions support\n - **FIX**: Improved error handling in embeddings batch API with specific fallback for model field validation errors\n\n## 0.6.5+2\n\n - **FIX**: Batch sequential tool responses in GoogleAI & Firebase VertexAI ([#757](https://github.com/davidmigloz/langchain_dart/issues/757)). ([8ff44486](https://github.com/davidmigloz/langchain_dart/commit/8ff4448665d26b49c1e1077d0822703e7d853d39))\n\n## 0.6.5+1\n\n - **BUILD**: Update dependencies ([#751](https://github.com/davidmigloz/langchain_dart/issues/751)). ([250a3c6](https://github.com/davidmigloz/langchain_dart/commit/250a3c6a6c1815703a61a142ba839c0392a31015))\n\n## 0.6.5\n\n - **FEAT**: Update dependencies (requires Dart 3.6.0) ([#709](https://github.com/davidmigloz/langchain_dart/issues/709)). ([9e3467f7](https://github.com/davidmigloz/langchain_dart/commit/9e3467f7caabe051a43c0eb3c1110bc4a9b77b81))\n - **REFACTOR**: Remove fetch_client dependency in favor of http v1.3.0 ([#659](https://github.com/davidmigloz/langchain_dart/issues/659)). ([0e0a685c](https://github.com/davidmigloz/langchain_dart/commit/0e0a685c376895425dbddb0f9b83758c700bb0c7))\n - **FIX**: Fix linter issues ([#656](https://github.com/davidmigloz/langchain_dart/issues/656)). ([88a79c65](https://github.com/davidmigloz/langchain_dart/commit/88a79c65aad23bcf5859e58a7375a4b686cf02ef))\n\n## 0.6.4+2\n\n - **REFACTOR**: Add new lint rules and fix issues ([#621](https://github.com/davidmigloz/langchain_dart/issues/621)). ([60b10e00](https://github.com/davidmigloz/langchain_dart/commit/60b10e008acf55ebab90789ad08d2449a44b69d8))\n\n## 0.6.4+1\n\n - **FIX**: UUID 'Namespace' can't be assigned to the parameter type 'String?' ([#566](https://github.com/davidmigloz/langchain_dart/issues/566)). ([1e93a595](https://github.com/davidmigloz/langchain_dart/commit/1e93a595f2f166da2cae3f7cfcdbb28892abf9b5))\n\n## 0.6.4\n\n - **FEAT**: Add support for code execution in ChatGoogleGenerativeAI ([#564](https://github.com/davidmigloz/langchain_dart/issues/564)). ([020bc096](https://github.com/davidmigloz/langchain_dart/commit/020bc096e2bb83bd372d0568a111481df188a7f2))\n\n## 0.6.3+1\n\n - **FEAT**: Add support for reduced output dimensionality in GoogleGenerativeAIEmbeddings ([#544](https://github.com/davidmigloz/langchain_dart/issues/544)). ([d5880704](https://github.com/davidmigloz/langchain_dart/commit/d5880704c492889144738acffd49674b91e63981))\n - **DOCS**: Update Google's models in documentation ([#551](https://github.com/davidmigloz/langchain_dart/issues/551)). ([1da543f7](https://github.com/davidmigloz/langchain_dart/commit/1da543f7ab90eb39b599a6fdd0cc52e2cbc1460d))\n\n## 0.6.2\n\n - **FEAT**: Add copyWith method to all RunnableOptions subclasses ([#531](https://github.com/davidmigloz/langchain_dart/issues/531)). ([42c8d480](https://github.com/davidmigloz/langchain_dart/commit/42c8d480041e7ca331e4928c46536037c06dbff0))\n\n## 0.6.1\n\n - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43))\n - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560))\n - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d))\n\n## 0.6.0\n\n> Note: `ChatGoogleGenerativeAI` now uses `gemini-1.5-flash` model by default.\n\n - **BREAKING** **FEAT**: Update ChatGoogleGenerativeAI default model to  gemini-1.5-flash ([#462](https://github.com/davidmigloz/langchain_dart/issues/462)). ([c8b30c90](https://github.com/davidmigloz/langchain_dart/commit/c8b30c906a17751547cc340f987b6670fbd67e69))\n - **FEAT**: Add support for ChatToolChoiceRequired ([#474](https://github.com/davidmigloz/langchain_dart/issues/474)). ([bf324f36](https://github.com/davidmigloz/langchain_dart/commit/bf324f36f645c53458d5891f8285991cd50f2649))\n - **FEAT**: Support response MIME type and schema in ChatGoogleGenerativeAI ([#461](https://github.com/davidmigloz/langchain_dart/issues/461)). ([e258399e](https://github.com/davidmigloz/langchain_dart/commit/e258399e03437e8abe25417a14671dfb719cb273))\n - **REFACTOR**: Migrate conditional imports to js_interop ([#453](https://github.com/davidmigloz/langchain_dart/issues/453)). ([a6a78cfe](https://github.com/davidmigloz/langchain_dart/commit/a6a78cfe05fb8ce68e683e1ad4395ca86197a6c5))\n\n## 0.5.1\n\n - **FEAT**: Add Runnable.close() to close any resources associated with it ([#439](https://github.com/davidmigloz/langchain_dart/issues/439)). ([4e08cced](https://github.com/davidmigloz/langchain_dart/commit/4e08cceda964921178061e9721618a1505198ff5))\n\n## 0.5.0\n\n> Note: `ChatGoogleGenerativeAI` and `GoogleGenerativeAIEmbeddings` now use the version `v1beta` of the Gemini API (instead of `v1`) which support the latest models (`gemini-1.5-pro-latest` and `gemini-1.5-flash-latest`).\n> \n> VertexAI for Firebase (`ChatFirebaseVertexAI`) is available in the new [`langchain_firebase`](https://pub.dev/packages/langchain_firebase) package.\n\n - **FEAT**: Add support for tool calling in ChatGoogleGenerativeAI ([#419](https://github.com/davidmigloz/langchain_dart/issues/419)). ([df41f38a](https://github.com/davidmigloz/langchain_dart/commit/df41f38aab64651a06a42fc41d9c35f33250a3e9))\n - **DOCS**: Add Gemini 1.5 Flash to models list ([#423](https://github.com/davidmigloz/langchain_dart/issues/423)). ([40f4c9de](https://github.com/davidmigloz/langchain_dart/commit/40f4c9de9c25804e298fd481c80f8c52d53302fb))\n - **BREAKING** **FEAT**: Migrate internal client from googleai_dart to google_generative_ai ([#407](https://github.com/davidmigloz/langchain_dart/issues/407)). ([fa4b5c37](https://github.com/davidmigloz/langchain_dart/commit/fa4b5c376a191fea50c3f8b1d6b07cef0480a74e))\n\n## 0.4.0\n\n> Note: This release has breaking changes.  \n> If you are using \"function calling\" check [how to migrate to \"tool calling\"](https://github.com/davidmigloz/langchain_dart/issues/400).\n\n - **BREAKING** **FEAT**: Migrate from function calling to tool calling ([#400](https://github.com/davidmigloz/langchain_dart/issues/400)). ([44413b83](https://github.com/davidmigloz/langchain_dart/commit/44413b8321b1188ff6b4027b1972a7ee0002761e))\n\n## 0.3.0+2\n\n - Update a dependency to the latest release.\n\n## 0.3.0+1\n\n - Update a dependency to the latest release.\n\n## 0.3.0\n\n> Note: This release has breaking changes.  \n> [Migration guide](https://github.com/davidmigloz/langchain_dart/discussions/374)\n\n - **BREAKING** **REFACTOR**: Introduce langchain_core and langchain_community packages ([#328](https://github.com/davidmigloz/langchain_dart/issues/328)). ([5fa520e6](https://github.com/davidmigloz/langchain_dart/commit/5fa520e663602d9cdfcab0c62a053090fa02b02e))\n - **BREAKING** **REFACTOR**: Simplify LLMResult and ChatResult classes ([#363](https://github.com/davidmigloz/langchain_dart/issues/363)). ([ffe539c1](https://github.com/davidmigloz/langchain_dart/commit/ffe539c13f92cce5f564107430163b44be1dfd96))\n - **BREAKING** **REFACTOR**: Simplify Output Parsers ([#367](https://github.com/davidmigloz/langchain_dart/issues/367)). ([f24b7058](https://github.com/davidmigloz/langchain_dart/commit/f24b7058949fba47ba624f071a3f548b8f6e915e))\n - **BREAKING** **REFACTOR**: Remove deprecated generate and predict APIs ([#335](https://github.com/davidmigloz/langchain_dart/issues/335)). ([c55fe50f](https://github.com/davidmigloz/langchain_dart/commit/c55fe50f0040cc04cbd2e90bca475887c093c654))\n - **REFACTOR**: Simplify internal .stream implementation ([#364](https://github.com/davidmigloz/langchain_dart/issues/364)). ([c83fed22](https://github.com/davidmigloz/langchain_dart/commit/c83fed22b2b89d5e51211984b12ec126a3ca225e))\n - **FEAT**: Implement .batch support ([#370](https://github.com/davidmigloz/langchain_dart/issues/370)). ([d254f929](https://github.com/davidmigloz/langchain_dart/commit/d254f929b03d9c950029e55c66831f9f89cc14a9))\n - **FEAT**: Add streaming support in ChatGoogleGenerativeAI ([#360](https://github.com/davidmigloz/langchain_dart/issues/360)). ([68bfdb04](https://github.com/davidmigloz/langchain_dart/commit/68bfdb04e417a7023b8872cbe0798243503fbf3d))\n - **FEAT**: Support tuned models in ChatGoogleGenerativeAI ([#359](https://github.com/davidmigloz/langchain_dart/issues/359)). ([764b633d](https://github.com/davidmigloz/langchain_dart/commit/764b633df1412f53fc238afe1e97d1e1ac22f206))\n - **FEAT**: Add support for GoogleGenerativeAIEmbeddings ([#362](https://github.com/davidmigloz/langchain_dart/issues/362)). ([d4f888a0](https://github.com/davidmigloz/langchain_dart/commit/d4f888a0e347608f0538d656d0c5507b61e5ee7e))\n - **FEAT**: Support output dimensionality in GoogleGenerativeAIEmbeddings ([#373](https://github.com/davidmigloz/langchain_dart/issues/373)). ([6dcb27d8](https://github.com/davidmigloz/langchain_dart/commit/6dcb27d861fa65d2c882e31ce28e8c0a92b65cc1))\n - **FEAT**: Support updating API key in Google AI client ([#357](https://github.com/davidmigloz/langchain_dart/issues/357)). ([b9b808e7](https://github.com/davidmigloz/langchain_dart/commit/b9b808e72f02b9f38ab355d581284a0d848d4bd1))\n\n## 0.2.4\n\n - **FEAT**: Update meta and test dependencies ([#331](https://github.com/davidmigloz/langchain_dart/issues/331)). ([912370ee](https://github.com/davidmigloz/langchain_dart/commit/912370ee0ba667ee9153303395a457e6caf5c72d))\n - **DOCS**: Update pubspecs. ([d23ed89a](https://github.com/davidmigloz/langchain_dart/commit/d23ed89adf95a34a78024e2f621dc0af07292f44))\n\n## 0.2.3+3\n\n - **DOCS**: Update CHANGELOG.md. ([d0d46534](https://github.com/davidmigloz/langchain_dart/commit/d0d46534565d6f52d819d62329e8917e00bc7030))\n\n## 0.2.3+2\n\n - Update a dependency to the latest release.\n\n## 0.2.3+1\n\n - **REFACTOR**: Remove tiktoken in favour of countTokens API on VertexAI ([#307](https://github.com/davidmigloz/langchain_dart/issues/307)). ([8158572b](https://github.com/davidmigloz/langchain_dart/commit/8158572b15c0525b9caa9bc71fbbbee6ab4458fe))\n\n## 0.2.3\n\n - **REFACTOR**: Use cl100k_base encoding model when no tokenizer is available ([#295](https://github.com/davidmigloz/langchain_dart/issues/295)). ([ca908e80](https://github.com/davidmigloz/langchain_dart/commit/ca908e8011a168a74240310c78abb3c590654a49))\n - **REFACTOR**: Make all LLM options fields nullable and add copyWith ([#284](https://github.com/davidmigloz/langchain_dart/issues/284)). ([57eceb9b](https://github.com/davidmigloz/langchain_dart/commit/57eceb9b47da42cf19f64ddd88bfbd2c9676fd5e))\n - **REFACTOR**: Migrate tokenizer to langchain_tiktoken package ([#285](https://github.com/davidmigloz/langchain_dart/issues/285)). ([6a3b6466](https://github.com/davidmigloz/langchain_dart/commit/6a3b6466e3e4cfddda2f506adbf2eb563814d02f))\n - **FEAT**: Update internal dependencies ([#291](https://github.com/davidmigloz/langchain_dart/issues/291)). ([69621cc6](https://github.com/davidmigloz/langchain_dart/commit/69621cc61659980d046518ee20ce055e806cba1f))\n\n## 0.2.2+1\n\n - Update a dependency to the latest release.\n\n## 0.2.2\n\n - Update a dependency to the latest release.\n\n## 0.2.1+2\n\n - Update a dependency to the latest release.\n\n## 0.2.1+1\n\n - **DOCS**: Update langchain_google README. ([5b2acfa1](https://github.com/davidmigloz/langchain_dart/commit/5b2acfa1667e63774526cb10e9adf53ff8c79530))\n\n## 0.2.1\n\n - **FEAT**: Add support for ChatGoogleGenerativeAI wrapper (Gemini API) ([#270](https://github.com/davidmigloz/langchain_dart/issues/270)). ([5d006c12](https://github.com/davidmigloz/langchain_dart/commit/5d006c121172192765b1a76582588c05b779e9c0))\n\n## 0.2.0+1\n\n - Update a dependency to the latest release.\n\n## 0.2.0\n\n> Note: This release has breaking changes.\n> \n> Migration guides:\n> - [`VertexAI`](https://github.com/davidmigloz/langchain_dart/issues/241)\n> - [`ChatVertexAI`](https://github.com/davidmigloz/langchain_dart/issues/242)\n\n - **BREAKING** **FEAT**: Move all model config options to VertexAIOptions ([#241](https://github.com/davidmigloz/langchain_dart/issues/241)). ([a714882a](https://github.com/davidmigloz/langchain_dart/commit/a714882a3026c7f381b6853d6b61506060b0775e))\n - **BREAKING** **FEAT**: Move all model config options to ChatVertexAIOptions ([#242](https://github.com/davidmigloz/langchain_dart/issues/242)). ([89bef8a2](https://github.com/davidmigloz/langchain_dart/commit/89bef8a22fb0b74ffd9d7a4028c64b2d94d38578))\n - **FEAT**: Allow to mutate default options ([#256](https://github.com/davidmigloz/langchain_dart/issues/256)). ([cb5e4058](https://github.com/davidmigloz/langchain_dart/commit/cb5e4058fb89f33c8495ac22fb240ce92daa683c))\n\n## 0.1.0+4\n\n - Update a dependency to the latest release.\n\n## 0.1.0+3\n\n - Update a dependency to the latest release.\n\n## 0.1.0+2\n\n - Update a dependency to the latest release.\n\n## 0.1.0+1\n\n - **DOCS**: Add public_member_api_docs lint rule and document missing APIs ([#223](https://github.com/davidmigloz/langchain_dart/issues/223)). ([52380433](https://github.com/davidmigloz/langchain_dart/commit/523804331783970870b023946c016be6c0797920))\n\n## 0.1.0\n\n> Note: This release has breaking changes.  \n> [Migration guide](https://github.com/davidmigloz/langchain_dart/issues/220)\n\n - **BREAKING** **FEAT**: Add multi-modal messages support with OpenAI Vision ([#220](https://github.com/davidmigloz/langchain_dart/issues/220)). ([6da2e069](https://github.com/davidmigloz/langchain_dart/commit/6da2e069932782eed8c27da45c56b4c290373fac))\n\n## 0.0.10+1\n\n - **DOCS**: Update vector stores documentation. ([dad60d24](https://github.com/davidmigloz/langchain_dart/commit/dad60d247fac157f2980f73c14ac88e9a0894fba))\n\n## 0.0.10\n\n - **FEAT**: Add result id in ChatVertexAI generations ([#195](https://github.com/davidmigloz/langchain_dart/issues/195)). ([a5bea6d3](https://github.com/davidmigloz/langchain_dart/commit/a5bea6d3aefbb53ed55d3abda0f51f5878445b72))\n\n## 0.0.9\n\n> Note: This release has breaking changes.\n\n - **DOCS**: Update changelog. ([d45d624a](https://github.com/davidmigloz/langchain_dart/commit/d45d624a0ba12e53c4e78a29750cad30d66c61c5))\n - **BREAKING** **FEAT**: Update uuid internal dependency to 4.x.x ([#173](https://github.com/davidmigloz/langchain_dart/issues/173)). ([b01f4afe](https://github.com/davidmigloz/langchain_dart/commit/b01f4afea6cfcdf8a0aa6e1b11d3057efa6e5fc0))\n\n## 0.0.8\n\n - Updated `langchain` dependency\n\n## 0.0.7+1\n\n - **REFACTOR**: Require `http.Client` instead of `AuthClient` ([#156](https://github.com/davidmigloz/langchain_dart/issues/156)). ([0f7fee7f](https://github.com/davidmigloz/langchain_dart/commit/0f7fee7f0780e5b650ec50307a7fda65e242e822))\n\n## 0.0.7\n\n> Note: This release has breaking changes.\n\n - **FEAT**: Support document title in VertexAIEmbeddings ([#154](https://github.com/davidmigloz/langchain_dart/issues/154)). ([6b763731](https://github.com/davidmigloz/langchain_dart/commit/6b76373139bb50e8d0e59b3f63b54f6adae3d498))\n - **FEAT**: Support task type in VertexAIEmbeddings ([#151](https://github.com/davidmigloz/langchain_dart/issues/151)). ([8a2199e2](https://github.com/davidmigloz/langchain_dart/commit/8a2199e26a945f7d2ad8d3da3ca14e083172f6f1))\n - **DOCS**: Fix invalid package topics. ([f81b833a](https://github.com/davidmigloz/langchain_dart/commit/f81b833aae33e0a945ef4450da12344886224bae))\n - **DOCS**: Add topics to pubspecs. ([8c1d6297](https://github.com/davidmigloz/langchain_dart/commit/8c1d62970710cc326fd5930101918aaf16b18f74))\n - **BREAKING** **REFACTOR**: Change embedDocuments input to `List<Document>` ([#153](https://github.com/davidmigloz/langchain_dart/issues/153)). ([1b5d6fbf](https://github.com/davidmigloz/langchain_dart/commit/1b5d6fbf20bcbb7734581f91d66eff3a86731fec))\n - **BREAKING** **FEAT**: Add default and call options in VertexAI and ChatVertexAI ([#155](https://github.com/davidmigloz/langchain_dart/issues/155)). ([fe1b12ea](https://github.com/davidmigloz/langchain_dart/commit/fe1b12ea282cd587f9dc78bd959741781ebb6d35))\n\n## 0.0.6\n\n - **DOCS**: Update packages example. ([4f8488fc](https://github.com/davidmigloz/langchain_dart/commit/4f8488fcb324e31b9d8dece7d1999333d7982253))\n\n## 0.0.5\n\n - **FEAT**: Add support for Chroma VectorStore ([#139](https://github.com/davidmigloz/langchain_dart/issues/139)). ([098783b4](https://github.com/davidmigloz/langchain_dart/commit/098783b4895ab30bb61d07355a0b587ff76b9175))\n - **DOCS**: Fix typos. ([282cfa24](https://github.com/davidmigloz/langchain_dart/commit/282cfa24caa7b91ce28db6b1997af4c2c3ecf3e4))\n - **DOCS**: Update readme. ([b61eda5b](https://github.com/davidmigloz/langchain_dart/commit/b61eda5ba506b4602592511c6a9be1e7aae5bf57))\n\n## 0.0.4\n\n - **FEAT**: Support filtering in VertexAI Matching Engine ([#136](https://github.com/davidmigloz/langchain_dart/issues/136)). ([768c6987](https://github.com/davidmigloz/langchain_dart/commit/768c6987de5b36b60090a1fe94f49483da11b885))\n - **FEAT**: Allow to pass vector search config ([#135](https://github.com/davidmigloz/langchain_dart/issues/135)). ([5b8fa5a3](https://github.com/davidmigloz/langchain_dart/commit/5b8fa5a3fcaf785615016be1d5da0a003178cfa9))\n - **DOCS**: Fix API documentation errors ([#138](https://github.com/davidmigloz/langchain_dart/issues/138)). ([1aa38fce](https://github.com/davidmigloz/langchain_dart/commit/1aa38fce17eed7f325e7872d03096740256d57be))\n\n## 0.0.3\n\n - **FEAT**: Infer queryRootUrl in VertexAIMatchingEngine ([#133](https://github.com/davidmigloz/langchain_dart/issues/133)). ([c5353368](https://github.com/davidmigloz/langchain_dart/commit/c5353368d1455756554f6640d33d0b3752476eb9))\n\n## 0.0.2+2\n\n - Update a dependency to the latest release.\n\n## 0.0.2+1\n\n - **DOCS**: Add VertexAI Matching Engine sample setup script ([#121](https://github.com/davidmigloz/langchain_dart/issues/121)). ([ed2e1549](https://github.com/davidmigloz/langchain_dart/commit/ed2e1549ca1d6bb0223231bcbe0c1c4a6a198402))\n\n## 0.0.2\n\n - **FEAT**: Integrate Vertex AI Matching Engine vector store ([#103](https://github.com/davidmigloz/langchain_dart/issues/103)). ([289c3eef](https://github.com/davidmigloz/langchain_dart/commit/289c3eef722206ac9dea0c968c036ad3289d10be))\n - **DOCS**: Update readme. ([a64860ce](https://github.com/davidmigloz/langchain_dart/commit/a64860ceda8fe926b720086cf7c86df2b02abf35))\n\n## 0.0.1\n\n - **REFACTOR**: Move Vertex AI client to its own package ([#111](https://github.com/davidmigloz/langchain_dart/issues/111)). ([d8aea156](https://github.com/davidmigloz/langchain_dart/commit/d8aea15633f1a9fb0df35cf9cc44bbc93ad46cd8))\n - **FEAT**: Integrate Google Vertex AI PaLM Embeddings ([#100](https://github.com/davidmigloz/langchain_dart/issues/100)). ([d777eccc](https://github.com/davidmigloz/langchain_dart/commit/d777eccc0c81c58b322f28e6e3c4a8763f3f84b7))\n - **FEAT**: Integrate Google Vertex AI PaLM Chat Model ([#99](https://github.com/davidmigloz/langchain_dart/issues/99)). ([3897595d](https://github.com/davidmigloz/langchain_dart/commit/3897595db597d5957ef80ae7a1de35c5f41265b8))\n - **FEAT**: Integrate Google Vertex AI PaLM Text model ([#98](https://github.com/davidmigloz/langchain_dart/issues/98)). ([b2746c23](https://github.com/davidmigloz/langchain_dart/commit/b2746c235d68045ba20afd1f2be7c24dcccb5f24))\n - **FEAT**: Add GCP Vertex AI Model Garden API client ([#109](https://github.com/davidmigloz/langchain_dart/issues/109)). ([5b9bb063](https://github.com/davidmigloz/langchain_dart/commit/5b9bb063a03fb290305fbc0bec502a3c93077583))\n\n## 0.0.1-dev.1\n\n- Bootstrap project.\n"
  },
  {
    "path": "packages/langchain_google/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2023 David Miguel Lozano\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "packages/langchain_google/README.md",
    "content": "# 🦜️🔗 LangChain.dart / Google\n\n[![tests](https://img.shields.io/github/actions/workflow/status/davidmigloz/langchain_dart/test.yaml?logo=github&label=tests)](https://github.com/davidmigloz/langchain_dart/actions/workflows/test.yaml)\n[![docs](https://img.shields.io/github/actions/workflow/status/davidmigloz/langchain_dart/pages%2Fpages-build-deployment?logo=github&label=docs)](https://github.com/davidmigloz/langchain_dart/actions/workflows/pages/pages-build-deployment)\n[![langchain_google](https://img.shields.io/pub/v/langchain_google.svg)](https://pub.dev/packages/langchain_google)\n![Discord](https://img.shields.io/discord/1123158322812555295?label=discord)\n[![MIT](https://img.shields.io/badge/license-MIT-purple.svg)](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE)\n\nGoogle module for [LangChain.dart](https://github.com/davidmigloz/langchain_dart).\n\n## Features\n\n- LLMs:\n  * `VertexAI`: wrapper around GCP Vertex AI text models API (aka PaLM API for text).\n- Chat models:\n  * `ChatVertexAI`: wrapper around GCP Vertex AI text chat models API (aka PaLM API for chat).\n  * `ChatGoogleGenerativeAI`: wrapper around [Google AI for Developers](https://ai.google.dev) API (Gemini).\n- Embeddings:\n  * `VertexAIEmbeddings`: wrapper around GCP Vertex AI text embedding models API.\n  * `GoogleGenerativeAIEmbeddings` wrapper around [Google AI for Developers](https://ai.google.dev) API (Gemini).\n- Vector stores:\n  * `VertexAIMatchingEngine` vector store that uses GCP Vertex AI Matching \n    Engine and Cloud Storage.\n\n> Note: VertexAI for Firebase (`ChatFirebaseVertexAI`) is available in the [`langchain_firebase`](https://pub.dev/packages/langchain_firebase) package. \n\n## License\n\nLangChain.dart is licensed under the\n[MIT License](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE).\n"
  },
  {
    "path": "packages/langchain_google/example/langchain_google_example.dart",
    "content": "// ignore_for_file: avoid_print, unused_element\nimport 'dart:convert';\nimport 'dart:io';\n\nimport 'package:googleapis_auth/auth_io.dart';\nimport 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_google/langchain_google.dart';\n\nvoid main() async {\n  // Uncomment the example you want to run:\n  await _example1();\n  // await _example2();\n}\n\n/// The most basic building block of LangChain is calling an LLM on some input.\nFuture<void> _example1() async {\n  final llm = VertexAI(\n    authProvider: _getAuthProvider(),\n    project: _getProjectId(),\n    defaultOptions: const VertexAIOptions(temperature: 0.9),\n  );\n  final result = await llm('Tell me a joke');\n  print(result);\n}\n\n/// The most frequent use case is to create a chat-bot.\n/// This is the most basic one.\nFuture<void> _example2() async {\n  final chat = ChatVertexAI(\n    authProvider: _getAuthProvider(),\n    project: _getProjectId(),\n    defaultOptions: const ChatVertexAIOptions(temperature: 0),\n  );\n\n  while (true) {\n    stdout.write('> ');\n    final usrMsg = ChatMessage.humanText(stdin.readLineSync() ?? '');\n    final aiMsg = await chat([usrMsg]);\n    print(aiMsg.content);\n  }\n}\n\nHttpClientAuthProvider _getAuthProvider() {\n  final serviceAccountCredentials = ServiceAccountCredentials.fromJson(\n    json.decode(Platform.environment['VERTEX_AI_SERVICE_ACCOUNT']!),\n  );\n  return HttpClientAuthProvider(\n    credentials: serviceAccountCredentials,\n    scopes: [VertexAI.cloudPlatformScope],\n  );\n}\n\nString _getProjectId() {\n  return Platform.environment['VERTEX_AI_PROJECT_ID']!;\n}\n"
  },
  {
    "path": "packages/langchain_google/lib/langchain_google.dart",
    "content": "/// LangChain.dart integration module for Google (Gemini, Gemma, VertexAI, Vector Search, etc.).\nlibrary;\n\nexport 'src/chat_models/chat_models.dart';\nexport 'src/embeddings/embeddings.dart';\nexport 'src/llms/llms.dart';\nexport 'src/utils/auth/http_client_auth_provider.dart';\nexport 'src/vector_stores/vector_stores.dart';\n"
  },
  {
    "path": "packages/langchain_google/lib/src/chat_models/chat_models.dart",
    "content": "export 'google_ai/chat_google_generative_ai.dart';\nexport 'google_ai/types.dart';\nexport 'vertex_ai/chat_vertex_ai.dart';\nexport 'vertex_ai/types.dart';\n"
  },
  {
    "path": "packages/langchain_google/lib/src/chat_models/google_ai/chat_google_generative_ai.dart",
    "content": "import 'package:googleai_dart/googleai_dart.dart' as g;\nimport 'package:http/http.dart' as http;\nimport 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/language_models.dart';\nimport 'package:langchain_core/prompts.dart';\nimport 'package:uuid/uuid.dart';\n\nimport 'mappers.dart';\nimport 'types.dart';\n\n/// Wrapper around [Google AI for Developers](https://ai.google.dev/) API\n/// (aka Gemini API).\n///\n/// Example:\n/// ```dart\n/// final chatModel = ChatGoogleGenerativeAI(apiKey: '...');\n/// final messages = [\n///   ChatMessage.humanText('Tell me a joke.'),\n/// ];\n/// final prompt = PromptValue.chat(messages);\n/// final res = await llm.invoke(prompt);\n/// ```\n///\n/// - [Google AI API docs](https://ai.google.dev/docs)\n///\n/// ### Setup\n///\n/// To use `ChatGoogleGenerativeAI` you need to have an API key.\n/// You can get one [here](https://aistudio.google.com/app/apikey).\n///\n/// ### Available models\n///\n/// **Latest models (Gemini 2.5 series):**\n///\n/// - `gemini-2.5-flash`:\n///   * Best price-performance model\n///   * text / image / video / audio -> text\n///   * Max input tokens: 1,048,576 (1M)\n///   * Max output tokens: 8,192\n///   * Supports: code execution, function calling, search grounding\n///\n/// - `gemini-2.5-pro`:\n///   * State-of-the-art thinking model for complex reasoning\n///   * text / image / video / audio / PDF -> text\n///   * Max input tokens: 1,048,576 (1M)\n///   * Max output tokens: 8,192\n///   * Supports: code execution, function calling, search grounding\n///\n/// - `gemini-2.5-flash-lite`:\n///   * Fastest and most cost-efficient\n///   * text / image / video / audio / PDF -> text\n///   * Max input tokens: 1,048,576 (1M)\n///   * Max output tokens: 8,192\n///   * Supports: function calling, structured outputs\n///\n/// **Previous generation models:**\n///\n/// - `gemini-1.5-flash` (still supported)\n/// - `gemini-1.5-pro` (still supported)\n/// - `gemini-1.0-pro` (legacy)\n///\n/// Mind that this list may not be up-to-date.\n/// Refer to the [documentation](https://ai.google.dev/gemini-api/docs/models/gemini)\n/// for the updated list.\n///\n/// #### Tuned models\n///\n/// You can specify a tuned model by setting the `model` parameter to\n/// `tunedModels/{your-model-name}`. For example:\n///\n/// ```dart\n/// final chatModel = ChatGoogleGenerativeAI(\n///   defaultOptions: ChatGoogleGenerativeAIOptions(\n///     model: 'tunedModels/my-tuned-model',\n///   ),\n/// );\n/// ```\n///\n/// ### Call options\n///\n/// You can configure the parameters that will be used when calling the\n/// chat completions API in several ways:\n///\n/// **Default options:**\n///\n/// Use the [defaultOptions] parameter to set the default options. These\n/// options will be used unless you override them when generating completions.\n///\n/// ```dart\n/// final chatModel = ChatGoogleGenerativeAI(\n///   defaultOptions: ChatGoogleGenerativeAIOptions(\n///     model: 'gemini-2.5-flash',\n///     temperature: 0,\n///   ),\n/// );\n/// ```\n///\n/// **Call options:**\n///\n/// You can override the default options when invoking the model:\n///\n/// ```dart\n/// final res = await chatModel.invoke(\n///   prompt,\n///   options: const ChatGoogleGenerativeAIOptions(temperature: 1),\n/// );\n/// ```\n///\n/// **Bind:**\n///\n/// You can also change the options in a [Runnable] pipeline using the bind\n/// method.\n///\n/// In this example, we are using two totally different models for each\n/// question:\n///\n/// ```dart\n/// final chatModel = ChatGoogleGenerativeAI(apiKey: '...');\n/// const outputParser = StringOutputParser();\n/// final prompt1 = PromptTemplate.fromTemplate('How are you {name}?');\n/// final prompt2 = PromptTemplate.fromTemplate('How old are you {name}?');\n/// final chain = Runnable.fromMap({\n///   'q1': prompt1 | chatModel.bind(const ChatGoogleGenerativeAIOptions(model: 'gemini-2.5-flash')) | outputParser,\n///   'q2': prompt2 | chatModel.bind(const ChatGoogleGenerativeAIOptions(model: 'gemini-2.5-pro')) | outputParser,\n/// });\n/// final res = await chain.invoke({'name': 'David'});\n/// ```\n///\n/// ### Advanced parameters\n///\n/// **Presence and Frequency Penalties:**\n///\n/// Control repetition in generated text:\n///\n/// ```dart\n/// final chatModel = ChatGoogleGenerativeAI(\n///   defaultOptions: ChatGoogleGenerativeAIOptions(\n///     presencePenalty: 0.5,  // Discourage repeating topics\n///     frequencyPenalty: 0.8,  // Discourage verbatim repetition\n///   ),\n/// );\n/// ```\n///\n/// **Content Caching:**\n///\n/// Reduce costs and latency for long contexts:\n///\n/// ```dart\n/// // First, create cached content via the Google AI API\n/// // Then reference it by name:\n/// final chatModel = ChatGoogleGenerativeAI(\n///   defaultOptions: ChatGoogleGenerativeAIOptions(\n///     cachedContent: 'cachedContents/abc123',\n///   ),\n/// );\n/// ```\n///\n/// See: https://ai.google.dev/gemini-api/docs/caching\n///\n/// ### Tool calling\n///\n/// [ChatGoogleGenerativeAI] supports tool calling.\n///\n/// Check the [docs](https://langchaindart.dev/#/modules/model_io/models/chat_models/how_to/tools)\n/// for more information on how to use tools.\n///\n/// Example:\n/// ```dart\n/// const tool = ToolSpec(\n///   name: 'get_current_weather',\n///   description: 'Get the current weather in a given location',\n///   inputJsonSchema: {\n///     'type': 'object',\n///     'properties': {\n///       'location': {\n///         'type': 'string',\n///         'description': 'The city and state, e.g. San Francisco, CA',\n///       },\n///     },\n///     'required': ['location'],\n///   },\n/// );\n/// final chatModel = ChatGoogleGenerativeAI(\n///   defaultOptions: ChatGoogleGenerativeAIOptions(\n///     model: 'gemini-2.5-flash',\n///     temperature: 0,\n///     tools: [tool],\n///   ),\n/// );\n/// final res = await model.invoke(\n///   PromptValue.string('What's the weather like in Boston and Madrid right now in celsius?'),\n/// );\n/// ```\nclass ChatGoogleGenerativeAI\n    extends BaseChatModel<ChatGoogleGenerativeAIOptions> {\n  /// Create a new [ChatGoogleGenerativeAI] instance.\n  ///\n  /// Main configuration options:\n  /// - `apiKey`: your Google AI API key. You can find your API key in the\n  ///   [Google AI Studio dashboard](https://aistudio.google.com/app/apikey).\n  /// - [ChatGoogleGenerativeAI.defaultOptions]\n  ///\n  /// Advance configuration options:\n  /// - `baseUrl`: the base URL to use. Defaults to Google AI's API URL. You can\n  ///   override this to use a different API URL, or to use a proxy.\n  /// - `headers`: global headers to send with every request. You can use\n  ///   this to set custom headers, or to override the default headers.\n  /// - `queryParams`: global query parameters to send with every request. You\n  ///   can use this to set custom query parameters.\n  /// - `retries`: the number of retries to attempt if a request fails.\n  /// - `client`: the HTTP client to use. You can set your own HTTP client if\n  ///   you need further customization (e.g. to use a Socks5 proxy).\n  ChatGoogleGenerativeAI({\n    final String? apiKey,\n    final String? baseUrl,\n    final Map<String, String>? headers,\n    final Map<String, String>? queryParams,\n    final int retries = 3,\n    final http.Client? client,\n    super.defaultOptions = const ChatGoogleGenerativeAIOptions(\n      model: defaultModel,\n    ),\n  }) {\n    _googleAiClient = g.GoogleAIClient(\n      config: g.GoogleAIConfig(\n        authProvider: apiKey != null ? g.ApiKeyProvider(apiKey) : null,\n        baseUrl: baseUrl ?? 'https://generativelanguage.googleapis.com',\n        defaultHeaders: headers ?? const {},\n        defaultQueryParams: queryParams ?? const {},\n        retryPolicy: g.RetryPolicy(maxRetries: retries),\n      ),\n      httpClient: client,\n    );\n  }\n\n  /// A client for interacting with Google AI API.\n  late g.GoogleAIClient _googleAiClient;\n\n  /// A UUID generator.\n  late final _uuid = const Uuid();\n\n  @override\n  String get modelType => 'chat-google-generative-ai';\n\n  /// The default model to use unless another is specified.\n  static const defaultModel = 'gemini-1.5-flash';\n\n  @override\n  Future<ChatResult> invoke(\n    final PromptValue input, {\n    final ChatGoogleGenerativeAIOptions? options,\n  }) async {\n    final id = _uuid.v4();\n    final messages = input.toChatMessages();\n    final model = _getModel(options);\n\n    final request = _generateCompletionRequest(messages, options: options);\n    final response = await _googleAiClient.models.generateContent(\n      model: model,\n      request: request,\n    );\n\n    return response.toChatResult(id, model);\n  }\n\n  @override\n  Stream<ChatResult> stream(\n    final PromptValue input, {\n    final ChatGoogleGenerativeAIOptions? options,\n  }) {\n    final id = _uuid.v4();\n    final messages = input.toChatMessages();\n    final model = _getModel(options);\n\n    final request = _generateCompletionRequest(messages, options: options);\n    return _googleAiClient.models\n        .streamGenerateContent(model: model, request: request)\n        .map((final response) => response.toChatResult(id, model));\n  }\n\n  /// Creates a [g.GenerateContentRequest] from the given input.\n  g.GenerateContentRequest _generateCompletionRequest(\n    final List<ChatMessage> messages, {\n    final ChatGoogleGenerativeAIOptions? options,\n  }) {\n    // Extract system instruction if present\n    final systemInstruction = messages.firstOrNull is SystemChatMessage\n        ? g.Content(parts: [g.TextPart(messages.firstOrNull!.contentAsString)])\n        : null;\n\n    return g.GenerateContentRequest(\n      contents: messages.toContentList(),\n      systemInstruction: systemInstruction,\n      safetySettings: (options?.safetySettings ?? defaultOptions.safetySettings)\n          ?.toSafetySettings(),\n      generationConfig: g.GenerationConfig(\n        candidateCount:\n            options?.candidateCount ?? defaultOptions.candidateCount,\n        stopSequences: options?.stopSequences ?? defaultOptions.stopSequences,\n        maxOutputTokens:\n            options?.maxOutputTokens ?? defaultOptions.maxOutputTokens,\n        temperature: options?.temperature ?? defaultOptions.temperature,\n        topP: options?.topP ?? defaultOptions.topP,\n        topK: options?.topK ?? defaultOptions.topK,\n        presencePenalty:\n            options?.presencePenalty ?? defaultOptions.presencePenalty,\n        frequencyPenalty:\n            options?.frequencyPenalty ?? defaultOptions.frequencyPenalty,\n        responseMimeType:\n            options?.responseMimeType ?? defaultOptions.responseMimeType,\n        responseSchema:\n            options?.responseSchema ?? defaultOptions.responseSchema,\n      ),\n      tools: (options?.tools ?? defaultOptions.tools).toToolList(\n        enableCodeExecution:\n            options?.enableCodeExecution ??\n            defaultOptions.enableCodeExecution ??\n            false,\n      ),\n      toolConfig: (options?.toolChoice ?? defaultOptions.toolChoice)\n          ?.toToolConfig(),\n      cachedContent: options?.cachedContent ?? defaultOptions.cachedContent,\n    );\n  }\n\n  @override\n  Future<List<int>> tokenize(\n    final PromptValue promptValue, {\n    final ChatGoogleGenerativeAIOptions? options,\n  }) {\n    throw UnsupportedError(\n      'Google AI does not expose a tokenizer, only counting tokens is supported.',\n    );\n  }\n\n  @override\n  Future<int> countTokens(\n    final PromptValue promptValue, {\n    final ChatGoogleGenerativeAIOptions? options,\n  }) async {\n    final messages = promptValue.toChatMessages();\n    final model = _getModel(options);\n\n    final result = await _googleAiClient.models.countTokens(\n      model: model,\n      request: g.CountTokensRequest(contents: messages.toContentList()),\n    );\n\n    return result.totalTokens;\n  }\n\n  @override\n  void close() {\n    _googleAiClient.close();\n  }\n\n  /// Gets the model to use for the request.\n  String _getModel(final ChatGoogleGenerativeAIOptions? options) {\n    return options?.model ?? defaultOptions.model ?? defaultModel;\n  }\n\n  /// {@template chat_google_generative_ai_list_models}\n  /// Returns a list of available chat models from Google AI.\n  ///\n  /// This method fetches all models from the Google AI API and filters them\n  /// to only return models that support content generation (chat-capable models).\n  ///\n  /// The returned [ModelInfo] includes rich metadata such as:\n  /// - Token limits (input and output)\n  /// - Description\n  /// - Display name\n  ///\n  /// Example:\n  /// ```dart\n  /// final chatModel = ChatGoogleGenerativeAI(apiKey: '...');\n  /// final models = await chatModel.listModels();\n  /// for (final model in models) {\n  ///   print('${model.id} - ${model.displayName}');\n  ///   print('  Input limit: ${model.inputTokenLimit}');\n  ///   print('  Output limit: ${model.outputTokenLimit}');\n  /// }\n  /// ```\n  /// {@endtemplate}\n  @override\n  Future<List<ModelInfo>> listModels() async {\n    final models = <g.Model>[];\n    String? pageToken;\n\n    // Paginate through all models\n    do {\n      final response = await _googleAiClient.models.list(pageToken: pageToken);\n      models.addAll(response.models);\n      pageToken = response.nextPageToken;\n    } while (pageToken != null);\n\n    // Filter to only chat-capable models (those supporting generateContent)\n    return models\n        .where(_isChatModel)\n        .map(\n          (final m) => ModelInfo(\n            id: _extractModelId(m.name),\n            displayName: m.displayName,\n            description: m.description,\n            inputTokenLimit: m.inputTokenLimit,\n            outputTokenLimit: m.outputTokenLimit,\n          ),\n        )\n        .toList();\n  }\n\n  /// Returns true if the model supports chat (generateContent).\n  static bool _isChatModel(final g.Model model) {\n    return model.supportedGenerationMethods?.contains('generateContent') ??\n        false;\n  }\n\n  /// Extracts the model ID from the full resource name.\n  /// e.g., \"models/gemini-1.5-flash\" -> \"gemini-1.5-flash\"\n  static String _extractModelId(final String name) {\n    const prefix = 'models/';\n    if (name.startsWith(prefix)) {\n      return name.substring(prefix.length);\n    }\n    return name;\n  }\n}\n"
  },
  {
    "path": "packages/langchain_google/lib/src/chat_models/google_ai/mappers.dart",
    "content": "// ignore_for_file: public_member_api_docs\nimport 'dart:convert';\n\nimport 'package:googleai_dart/googleai_dart.dart' as g;\nimport 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/language_models.dart';\nimport 'package:langchain_core/tools.dart';\n\nimport 'types.dart';\n\nextension ChatMessagesMapper on List<ChatMessage> {\n  List<g.Content> toContentList() {\n    final result = <g.Content>[];\n\n    // NOTE: Gemini can return multiple FunctionCall parts in a single model turn.\n    // The API requires the next turn to be ONE Content.functionResponses that\n    // includes the SAME number of FunctionResponse parts, in the SAME order.\n    // If we send each ToolChatMessage separately, the counts won't match and\n    // the API throws an error.\n    // Therefore we batch consecutive ToolChatMessage instances into a single\n    // Content with multiple FunctionResponseParts.\n    List<g.FunctionResponsePart>? pendingToolResponses;\n\n    void flushToolResponses() {\n      if (pendingToolResponses != null && pendingToolResponses!.isNotEmpty) {\n        result.add(g.Content(role: 'user', parts: pendingToolResponses!));\n        pendingToolResponses = null;\n      }\n    }\n\n    for (final message in this) {\n      if (message is SystemChatMessage) {\n        continue; // System messages are handled separately\n      }\n\n      if (message is ToolChatMessage) {\n        // Start (or continue) a batch of tool responses\n        pendingToolResponses ??= <g.FunctionResponsePart>[];\n        pendingToolResponses!.add(_toolMsgToFunctionResponsePart(message));\n        continue;\n      }\n\n      // Any non-tool message breaks the batch: flush before adding it\n      flushToolResponses();\n\n      switch (message) {\n        case final HumanChatMessage msg:\n          result.add(_mapHumanChatMessage(msg));\n        case final AIChatMessage msg:\n          result.add(_mapAIChatMessage(msg));\n        case final CustomChatMessage msg:\n          result.add(_mapCustomChatMessage(msg));\n        default:\n          throw UnsupportedError('Unknown message type: $message');\n      }\n    }\n\n    // Flush remaining batched tool responses at the end\n    flushToolResponses();\n\n    return result;\n  }\n\n  g.Content _mapHumanChatMessage(final HumanChatMessage msg) {\n    final contentParts = switch (msg.content) {\n      final ChatMessageContentText c => [g.TextPart(c.text)],\n      final ChatMessageContentImage c => [\n        if (c.data.startsWith('http'))\n          g.FileDataPart(g.FileData(fileUri: c.data))\n        else\n          g.InlineDataPart(\n            g.Blob.fromBytes(c.mimeType ?? 'image/jpeg', base64Decode(c.data)),\n          ),\n      ],\n      final ChatMessageContentMultiModal c =>\n        c.parts\n            .map(\n              (final p) => switch (p) {\n                final ChatMessageContentText c => g.TextPart(c.text),\n                final ChatMessageContentImage c =>\n                  c.data.startsWith('http')\n                      ? g.FileDataPart(g.FileData(fileUri: c.data))\n                      : g.InlineDataPart(\n                          g.Blob.fromBytes(\n                            c.mimeType ?? 'image/jpeg',\n                            base64Decode(c.data),\n                          ),\n                        ),\n                ChatMessageContentMultiModal() => throw UnsupportedError(\n                  'Cannot have multimodal content in multimodal content',\n                ),\n              },\n            )\n            .toList(growable: false),\n    };\n    return g.Content(role: 'user', parts: contentParts);\n  }\n\n  g.Content _mapAIChatMessage(final AIChatMessage msg) {\n    final contentParts = [\n      if (msg.content.isNotEmpty) g.TextPart(msg.content),\n      if (msg.toolCalls.isNotEmpty)\n        ...msg.toolCalls.map(\n          (final call) => g.FunctionCallPart(\n            g.FunctionCall(name: call.name, args: call.arguments),\n          ),\n        ),\n    ];\n    return g.Content(role: 'model', parts: contentParts);\n  }\n\n  g.FunctionResponsePart _toolMsgToFunctionResponsePart(\n    final ToolChatMessage msg,\n  ) {\n    Map<String, Object?> response;\n    try {\n      response = jsonDecode(msg.content) as Map<String, Object?>;\n    } catch (_) {\n      response = {'result': msg.content};\n    }\n    return g.FunctionResponsePart(\n      g.FunctionResponse(name: msg.toolCallId, response: response),\n    );\n  }\n\n  g.Content _mapCustomChatMessage(final CustomChatMessage msg) {\n    return g.Content(role: msg.role, parts: [g.TextPart(msg.content)]);\n  }\n}\n\nextension GenerateContentResponseMapper on g.GenerateContentResponse {\n  ChatResult toChatResult(final String id, final String model) {\n    final candidate = candidates?.first;\n    if (candidate == null) {\n      throw StateError('No candidates in response');\n    }\n\n    return ChatResult(\n      id: id,\n      output: AIChatMessage(\n        content:\n            candidate.content?.parts\n                .map(\n                  (p) => switch (p) {\n                    final g.TextPart p => p.text,\n                    final g.InlineDataPart p => p.inlineData.data,\n                    final g.FileDataPart p => p.fileData.fileUri,\n                    g.FunctionResponsePart() => '',\n                    g.FunctionCallPart() => '',\n                    g.ExecutableCodePart() => '',\n                    g.CodeExecutionResultPart() => '',\n                    g.VideoMetadataPart() => '',\n                    g.ThoughtPart() => '',\n                    g.ThoughtSignaturePart() => '',\n                    g.PartMetadataPart() => '',\n                  },\n                )\n                .nonNulls\n                .join('\\n') ??\n            '',\n        toolCalls:\n            candidate.content?.parts\n                .whereType<g.FunctionCallPart>()\n                .map(\n                  (final part) => AIChatMessageToolCall(\n                    id: part.functionCall.name,\n                    name: part.functionCall.name,\n                    argumentsRaw: jsonEncode(part.functionCall.args ?? {}),\n                    arguments: part.functionCall.args ?? {},\n                  ),\n                )\n                .toList(growable: false) ??\n            [],\n      ),\n      finishReason: _mapFinishReason(candidate.finishReason),\n      metadata: {\n        'model': model,\n        'block_reason': promptFeedback?.blockReason?.name,\n        'safety_ratings': candidate.safetyRatings\n            ?.map(\n              (r) => {\n                'category': r.category.name,\n                'probability': r.probability.name,\n              },\n            )\n            .toList(growable: false),\n        'citation_metadata': candidate.citationMetadata?.citationSources\n            ?.map(\n              (final g.CitationSource s) => {\n                'start_index': s.startIndex,\n                'end_index': s.endIndex,\n                'uri': s.uri,\n                'title': s.title,\n                'license': s.license,\n                'publication_date': s.publicationDate?.toIso8601String(),\n              },\n            )\n            .toList(growable: false),\n        'executable_code': candidate.content?.parts\n            .whereType<g.ExecutableCodePart>()\n            .map(\n              (code) => {\n                'language': code.executableCode.language.name,\n                'code': code.executableCode.code,\n              },\n            )\n            .toList(growable: false),\n        'code_execution_result': candidate.content?.parts\n            .whereType<g.CodeExecutionResultPart>()\n            .map(\n              (result) => {\n                'outcome': result.codeExecutionResult.outcome.name,\n                'output': result.codeExecutionResult.output,\n              },\n            )\n            .toList(growable: false),\n      },\n      usage: LanguageModelUsage(\n        promptTokens: usageMetadata?.promptTokenCount,\n        responseTokens: usageMetadata?.candidatesTokenCount,\n        totalTokens: usageMetadata?.totalTokenCount,\n      ),\n    );\n  }\n\n  FinishReason _mapFinishReason(final g.FinishReason? reason) =>\n      switch (reason) {\n        g.FinishReason.unspecified => FinishReason.unspecified,\n        g.FinishReason.stop => FinishReason.stop,\n        g.FinishReason.maxTokens => FinishReason.length,\n        g.FinishReason.safety => FinishReason.contentFilter,\n        g.FinishReason.recitation => FinishReason.recitation,\n        g.FinishReason.other => FinishReason.unspecified,\n        g.FinishReason.blocklist => FinishReason.contentFilter,\n        g.FinishReason.prohibitedContent => FinishReason.contentFilter,\n        g.FinishReason.spii => FinishReason.contentFilter,\n        g.FinishReason.malformedFunctionCall => FinishReason.unspecified,\n        null => FinishReason.unspecified,\n      };\n}\n\nextension SafetySettingsMapper on List<ChatGoogleGenerativeAISafetySetting> {\n  List<g.SafetySetting> toSafetySettings() {\n    return map(\n      (final setting) => g.SafetySetting(\n        category: switch (setting.category) {\n          ChatGoogleGenerativeAISafetySettingCategory.unspecified =>\n            g.HarmCategory.unspecified,\n          ChatGoogleGenerativeAISafetySettingCategory.harassment =>\n            g.HarmCategory.harassment,\n          ChatGoogleGenerativeAISafetySettingCategory.hateSpeech =>\n            g.HarmCategory.hateSpeech,\n          ChatGoogleGenerativeAISafetySettingCategory.sexuallyExplicit =>\n            g.HarmCategory.sexuallyExplicit,\n          ChatGoogleGenerativeAISafetySettingCategory.dangerousContent =>\n            g.HarmCategory.dangerousContent,\n        },\n        threshold: switch (setting.threshold) {\n          ChatGoogleGenerativeAISafetySettingThreshold.unspecified =>\n            g.HarmBlockThreshold.unspecified,\n          ChatGoogleGenerativeAISafetySettingThreshold.blockLowAndAbove =>\n            g.HarmBlockThreshold.blockLowAndAbove,\n          ChatGoogleGenerativeAISafetySettingThreshold.blockMediumAndAbove =>\n            g.HarmBlockThreshold.blockMediumAndAbove,\n          ChatGoogleGenerativeAISafetySettingThreshold.blockOnlyHigh =>\n            g.HarmBlockThreshold.blockOnlyHigh,\n          ChatGoogleGenerativeAISafetySettingThreshold.blockNone =>\n            g.HarmBlockThreshold.blockNone,\n        },\n      ),\n    ).toList(growable: false);\n  }\n}\n\nextension ChatToolListMapper on List<ToolSpec>? {\n  List<g.Tool>? toToolList({required final bool enableCodeExecution}) {\n    if (this == null && !enableCodeExecution) {\n      return null;\n    }\n\n    return [\n      g.Tool(\n        functionDeclarations: this\n            ?.map(\n              (tool) => g.FunctionDeclaration(\n                name: tool.name,\n                description: tool.description,\n                parameters: tool.inputJsonSchema.toSchema(),\n              ),\n            )\n            .toList(growable: false),\n        codeExecution: enableCodeExecution ? <String, dynamic>{} : null,\n      ),\n    ];\n  }\n}\n\nextension SchemaMapper on Map<String, dynamic> {\n  g.Schema toSchema() {\n    final jsonSchema = this;\n    final type = jsonSchema['type'] as String;\n    final description = jsonSchema['description'] as String?;\n    final nullable = jsonSchema['nullable'] as bool?;\n    final enumValues = (jsonSchema['enum'] as List?)?.cast<String>();\n    final format = jsonSchema['format'] as String?;\n    final items = jsonSchema['items'] as Map<String, dynamic>?;\n    final properties = jsonSchema['properties'] as Map<String, dynamic>?;\n    final requiredProperties = (jsonSchema['required'] as List?)\n        ?.cast<String>();\n\n    switch (type) {\n      case 'string':\n        return g.Schema(\n          type: g.SchemaType.string,\n          description: description,\n          nullable: nullable,\n          enumValues: enumValues,\n        );\n      case 'number':\n        return g.Schema(\n          type: g.SchemaType.number,\n          description: description,\n          nullable: nullable,\n          format: format,\n        );\n      case 'integer':\n        return g.Schema(\n          type: g.SchemaType.integer,\n          description: description,\n          nullable: nullable,\n          format: format,\n        );\n      case 'boolean':\n        return g.Schema(\n          type: g.SchemaType.boolean,\n          description: description,\n          nullable: nullable,\n        );\n      case 'array':\n        if (items != null) {\n          final itemsSchema = items.toSchema();\n          return g.Schema(\n            type: g.SchemaType.array,\n            items: itemsSchema,\n            description: description,\n            nullable: nullable,\n          );\n        }\n        throw ArgumentError('Array schema must have \"items\" property');\n      case 'object':\n        if (properties != null) {\n          final propertiesSchema = properties.map(\n            (key, value) =>\n                MapEntry(key, (value as Map<String, dynamic>).toSchema()),\n          );\n          return g.Schema(\n            type: g.SchemaType.object,\n            properties: propertiesSchema,\n            required: requiredProperties,\n            description: description,\n            nullable: nullable,\n          );\n        }\n        throw ArgumentError('Object schema must have \"properties\" property');\n      default:\n        throw ArgumentError('Invalid schema type: $type');\n    }\n  }\n}\n\nextension ChatToolChoiceMapper on ChatToolChoice {\n  Map<String, dynamic> toToolConfig() {\n    return switch (this) {\n      ChatToolChoiceNone _ => {\n        'functionCallingConfig': {'mode': 'NONE'},\n      },\n      ChatToolChoiceAuto _ => {\n        'functionCallingConfig': {'mode': 'AUTO'},\n      },\n      ChatToolChoiceRequired() => {\n        'functionCallingConfig': {'mode': 'ANY'},\n      },\n      final ChatToolChoiceForced t => {\n        'functionCallingConfig': {\n          'mode': 'ANY',\n          'allowedFunctionNames': [t.name],\n        },\n      },\n    };\n  }\n}\n"
  },
  {
    "path": "packages/langchain_google/lib/src/chat_models/google_ai/types.dart",
    "content": "import 'package:collection/collection.dart';\nimport 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/tools.dart';\nimport 'package:meta/meta.dart';\n\n/// {@template chat_google_generative_ai_options}\n/// Options to pass into the Google Generative AI Chat Model.\n///\n/// You can find a list of available models [here](https://ai.google.dev/models).\n/// {@endtemplate}\n@immutable\nclass ChatGoogleGenerativeAIOptions extends ChatModelOptions {\n  /// {@macro chat_google_generative_ai_options}\n  const ChatGoogleGenerativeAIOptions({\n    super.model,\n    this.topP,\n    this.topK,\n    this.candidateCount,\n    this.maxOutputTokens,\n    this.temperature,\n    this.stopSequences,\n    this.responseMimeType,\n    this.responseSchema,\n    this.safetySettings,\n    this.enableCodeExecution,\n    this.presencePenalty,\n    this.frequencyPenalty,\n    this.cachedContent,\n    super.tools,\n    super.toolChoice,\n    super.concurrencyLimit,\n  });\n\n  /// The maximum cumulative probability of tokens to consider when sampling.\n  /// The model uses combined Top-k and nucleus sampling. Tokens are sorted\n  /// based on their assigned probabilities so that only the most likely\n  /// tokens are considered. Top-k sampling directly limits the maximum\n  /// number of tokens to consider, while Nucleus sampling limits number of\n  /// tokens based on the cumulative probability.\n  ///\n  /// Note: The default value varies by model, see the `Model.top_p`\n  /// attribute of the `Model` returned the `getModel` function.\n  final double? topP;\n\n  /// The maximum number of tokens to consider when sampling. The model\n  /// uses combined Top-k and nucleus sampling. Top-k sampling considers\n  /// the set of `top_k` most probable tokens. Defaults to 40. Note:\n  ///\n  /// The default value varies by model, see the `Model.top_k` attribute\n  /// of the `Model` returned the `getModel` function.\n  final int? topK;\n\n  /// Number of generated responses to return. This value must be between\n  /// [1, 8], inclusive. If unset, this will default to 1.\n  final int? candidateCount;\n\n  /// The maximum number of tokens to include in a candidate. If unset,\n  /// this will default to `output_token_limit` specified in the `Model`\n  /// specification.\n  final int? maxOutputTokens;\n\n  /// Controls the randomness of the output.\n  ///\n  /// Note: The default value varies by model, see the `Model.temperature`\n  /// attribute of the `Model` returned the `getModel` function.\n  ///\n  /// Values can range from [0.0,1.0], inclusive. A value closer to 1.0 will\n  /// produce responses that are more varied and creative, while a value\n  /// closer to 0.0 will typically result in more straightforward responses\n  /// from the model.\n  final double? temperature;\n\n  /// The set of character sequences (up to 5) that will stop output generation.\n  /// If specified, the API will stop at the first appearance of a stop sequence.\n  /// The stop sequence will not be included as part of the response.\n  final List<String>? stopSequences;\n\n  /// Output response mimetype of the generated candidate text.\n  ///\n  /// Supported mimetype:\n  /// - `text/plain`: (default) Text output.\n  /// - `application/json`: JSON response in the candidates.\n  final String? responseMimeType;\n\n  /// Output response schema of the generated candidate text.\n  /// Following the [JSON Schema specification](https://json-schema.org).\n  ///\n  /// - Note: This only applies when the specified ``responseMIMEType`` supports\n  ///   a schema; currently this is limited to `application/json`.\n  ///\n  /// Example:\n  /// ```json\n  /// {\n  ///   'type': 'object',\n  ///   'properties': {\n  ///     'answer': {\n  ///       'type': 'string',\n  ///       'description': 'The answer to the question being asked',\n  ///     },\n  ///     'sources': {\n  ///       'type': 'array',\n  ///       'items': {'type': 'string'},\n  ///       'description': 'The sources used to answer the question',\n  ///     },\n  ///   },\n  ///   'required': ['answer', 'sources'],\n  /// },\n  /// ```\n  final Map<String, dynamic>? responseSchema;\n\n  /// A list of unique [ChatGoogleGenerativeAISafetySetting] instances for blocking\n  /// unsafe content.\n  ///\n  /// This will be enforced on the generated output. There should not be more than\n  /// one setting for each type. The API will block any contents and responses that\n  /// fail to meet the thresholds set by these settings.\n  ///\n  /// This list overrides the default settings for each category specified. If there\n  /// is no safety setting for a given category provided in the list, the API will use\n  /// the default safety setting for that category.\n  final List<ChatGoogleGenerativeAISafetySetting>? safetySettings;\n\n  /// When code execution is enabled the model may generate code and run it in the\n  /// process of generating a response to the prompt. When this happens the code\n  /// that was executed and it's output will be included in the response metadata\n  /// as `metadata['executable_code']` and `metadata['code_execution_result']`.\n  final bool? enableCodeExecution;\n\n  /// Presence penalty applied to the next token's logprobs if the token has\n  /// already been seen in the generated text.\n  ///\n  /// Positive values discourage tokens that have already appeared, making the\n  /// model more likely to introduce new topics.\n  ///\n  /// Values typically range from -1.0 to 1.0.\n  final double? presencePenalty;\n\n  /// Frequency penalty applied to the next token's logprobs, multiplied by the\n  /// number of times the token has been seen in the generated text.\n  ///\n  /// Positive values discourage tokens that have appeared frequently, making the\n  /// model less likely to repeat the same content verbatim.\n  ///\n  /// Values typically range from -1.0 to 1.0.\n  final double? frequencyPenalty;\n\n  /// The name of the cached content to use as context for prediction.\n  ///\n  /// Caching can significantly reduce costs and latency for requests that reuse\n  /// the same long context (like system instructions or large documents).\n  ///\n  /// Format: `cachedContents/{id}`\n  ///\n  /// To create cached content, use the Google AI API's caching endpoints.\n  /// See: https://ai.google.dev/gemini-api/docs/caching\n  final String? cachedContent;\n\n  @override\n  ChatGoogleGenerativeAIOptions copyWith({\n    final String? model,\n    final double? topP,\n    final int? topK,\n    final int? candidateCount,\n    final int? maxOutputTokens,\n    final double? temperature,\n    final List<String>? stopSequences,\n    final String? responseMimeType,\n    final Map<String, dynamic>? responseSchema,\n    final List<ChatGoogleGenerativeAISafetySetting>? safetySettings,\n    final bool? enableCodeExecution,\n    final double? presencePenalty,\n    final double? frequencyPenalty,\n    final String? cachedContent,\n    final List<ToolSpec>? tools,\n    final ChatToolChoice? toolChoice,\n    final int? concurrencyLimit,\n  }) {\n    return ChatGoogleGenerativeAIOptions(\n      model: model ?? this.model,\n      topP: topP ?? this.topP,\n      topK: topK ?? this.topK,\n      candidateCount: candidateCount ?? this.candidateCount,\n      maxOutputTokens: maxOutputTokens ?? this.maxOutputTokens,\n      temperature: temperature ?? this.temperature,\n      stopSequences: stopSequences ?? this.stopSequences,\n      responseMimeType: responseMimeType ?? this.responseMimeType,\n      responseSchema: responseSchema ?? this.responseSchema,\n      safetySettings: safetySettings ?? this.safetySettings,\n      enableCodeExecution: enableCodeExecution ?? this.enableCodeExecution,\n      presencePenalty: presencePenalty ?? this.presencePenalty,\n      frequencyPenalty: frequencyPenalty ?? this.frequencyPenalty,\n      cachedContent: cachedContent ?? this.cachedContent,\n      tools: tools ?? this.tools,\n      toolChoice: toolChoice ?? this.toolChoice,\n      concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit,\n    );\n  }\n\n  @override\n  ChatGoogleGenerativeAIOptions merge(\n    covariant final ChatGoogleGenerativeAIOptions? other,\n  ) {\n    return copyWith(\n      model: other?.model,\n      topP: other?.topP,\n      topK: other?.topK,\n      candidateCount: other?.candidateCount,\n      maxOutputTokens: other?.maxOutputTokens,\n      temperature: other?.temperature,\n      stopSequences: other?.stopSequences,\n      responseMimeType: other?.responseMimeType,\n      responseSchema: other?.responseSchema,\n      safetySettings: other?.safetySettings,\n      enableCodeExecution: other?.enableCodeExecution,\n      presencePenalty: other?.presencePenalty,\n      frequencyPenalty: other?.frequencyPenalty,\n      cachedContent: other?.cachedContent,\n      tools: other?.tools,\n      toolChoice: other?.toolChoice,\n      concurrencyLimit: other?.concurrencyLimit,\n    );\n  }\n\n  @override\n  bool operator ==(covariant final ChatGoogleGenerativeAIOptions other) {\n    return model == other.model &&\n        topP == other.topP &&\n        topK == other.topK &&\n        candidateCount == other.candidateCount &&\n        maxOutputTokens == other.maxOutputTokens &&\n        temperature == other.temperature &&\n        const ListEquality<String>().equals(\n          stopSequences,\n          other.stopSequences,\n        ) &&\n        responseMimeType == other.responseMimeType &&\n        responseSchema == other.responseSchema &&\n        const ListEquality<ChatGoogleGenerativeAISafetySetting>().equals(\n          safetySettings,\n          other.safetySettings,\n        ) &&\n        enableCodeExecution == other.enableCodeExecution &&\n        presencePenalty == other.presencePenalty &&\n        frequencyPenalty == other.frequencyPenalty &&\n        cachedContent == other.cachedContent &&\n        const ListEquality<ToolSpec>().equals(tools, other.tools) &&\n        toolChoice == other.toolChoice &&\n        concurrencyLimit == other.concurrencyLimit;\n  }\n\n  @override\n  int get hashCode {\n    return model.hashCode ^\n        topP.hashCode ^\n        topK.hashCode ^\n        candidateCount.hashCode ^\n        maxOutputTokens.hashCode ^\n        temperature.hashCode ^\n        const ListEquality<String>().hash(stopSequences) ^\n        responseMimeType.hashCode ^\n        responseSchema.hashCode ^\n        const ListEquality<ChatGoogleGenerativeAISafetySetting>().hash(\n          safetySettings,\n        ) ^\n        enableCodeExecution.hashCode ^\n        presencePenalty.hashCode ^\n        frequencyPenalty.hashCode ^\n        cachedContent.hashCode ^\n        const ListEquality<ToolSpec>().hash(tools) ^\n        toolChoice.hashCode ^\n        concurrencyLimit.hashCode;\n  }\n}\n\n/// {@template chat_google_generative_ai_safety_setting}\n/// Safety setting, affecting the safety-blocking behavior.\n/// Passing a safety setting for a category changes the allowed probability that\n/// content is blocked.\n/// {@endtemplate}\nclass ChatGoogleGenerativeAISafetySetting {\n  /// {@macro chat_google_generative_ai_safety_setting}\n  const ChatGoogleGenerativeAISafetySetting({\n    required this.category,\n    required this.threshold,\n  });\n\n  /// The category for this setting.\n  final ChatGoogleGenerativeAISafetySettingCategory category;\n\n  /// Controls the probability threshold at which harm is blocked.\n  final ChatGoogleGenerativeAISafetySettingThreshold threshold;\n}\n\n/// Safety settings categorizes.\n///\n/// Docs: https://ai.google.dev/docs/safety_setting_gemini\nenum ChatGoogleGenerativeAISafetySettingCategory {\n  /// The harm category is unspecified.\n  unspecified,\n\n  /// The harm category is harassment.\n  harassment,\n\n  /// The harm category is hate speech.\n  hateSpeech,\n\n  /// The harm category is sexually explicit content.\n  sexuallyExplicit,\n\n  /// The harm category is dangerous content.\n  dangerousContent,\n}\n\n/// Controls the probability threshold at which harm is blocked.\n///\n/// Docs: https://ai.google.dev/docs/safety_setting_gemini\nenum ChatGoogleGenerativeAISafetySettingThreshold {\n  /// Threshold is unspecified, block using default threshold.\n  unspecified,\n\n  /// \tBlock when low, medium or high probability of unsafe content.\n  blockLowAndAbove,\n\n  /// Block when medium or high probability of unsafe content.\n  blockMediumAndAbove,\n\n  /// Block when high probability of unsafe content.\n  blockOnlyHigh,\n\n  /// Always show regardless of probability of unsafe content.\n  blockNone,\n}\n"
  },
  {
    "path": "packages/langchain_google/lib/src/chat_models/vertex_ai/chat_vertex_ai.dart",
    "content": "import 'package:collection/collection.dart';\nimport 'package:googleai_dart/googleai_dart.dart' as g;\nimport 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/prompts.dart';\nimport 'package:uuid/uuid.dart';\n\nimport '../../utils/auth/http_client_auth_provider.dart';\nimport 'mappers.dart';\nimport 'types.dart';\n\n/// {@template chat_vertex_ai}\n/// Wrapper around GCP Vertex AI chat models API (Gemini API).\n///\n/// Example:\n/// ```dart\n/// final authProvider = HttpClientAuthProvider(\n///   credentials: ServiceAccountCredentials.fromJson({...}),\n///   scopes: ['https://www.googleapis.com/auth/cloud-platform'],\n/// );\n/// final chatModel = ChatVertexAI(\n///   authProvider: authProvider,\n///   project: 'your-project-id',\n/// );\n/// final result = await chatModel([ChatMessage.humanText('Hello')]);\n/// ```\n///\n/// Vertex AI documentation:\n/// https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/overview\n///\n/// ### Set up your Google Cloud Platform project\n///\n/// 1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager).\n/// 2. [Make sure that billing is enabled for your project](https://cloud.google.com/billing/docs/how-to/modify-project).\n/// 3. [Enable the Vertex AI API](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com).\n/// 4. [Configure the Vertex AI location](https://cloud.google.com/vertex-ai/docs/general/locations).\n///\n/// ### Authentication\n///\n/// To create an instance of `ChatVertexAI` you need to provide an\n/// [HttpClientAuthProvider] that wraps your service account credentials.\n///\n/// Example using a service account JSON:\n///\n/// ```dart\n/// final serviceAccountCredentials = ServiceAccountCredentials.fromJson(\n///   json.decode(serviceAccountJson),\n/// );\n/// final authProvider = HttpClientAuthProvider(\n///   credentials: serviceAccountCredentials,\n///   scopes: ['https://www.googleapis.com/auth/cloud-platform'],\n/// );\n/// final chatModel = ChatVertexAI(\n///   authProvider: authProvider,\n///   project: 'your-project-id',\n/// );\n/// ```\n///\n/// The service account should have the following\n/// [permission](https://cloud.google.com/vertex-ai/docs/general/iam-permissions):\n/// - `aiplatform.endpoints.predict`\n///\n/// The required [OAuth2 scope](https://developers.google.com/identity/protocols/oauth2/scopes)\n/// is:\n/// - `https://www.googleapis.com/auth/cloud-platform` (you can use the\n///   constant [ChatVertexAI.cloudPlatformScope])\n///\n/// See: https://cloud.google.com/vertex-ai/docs/generative-ai/access-control\n///\n/// ### Available models\n///\n/// **Latest stable models:**\n///\n/// - `gemini-2.5-flash` (recommended):\n///   * Multimodal input and text output\n///   * Context window: 1M tokens\n///   * Max output: 8,192 tokens\n///\n/// - `gemini-2.0-flash-exp`:\n///   * Multimodal input and output\n///   * Context window: 1M tokens\n///   * Max output: 8,192 tokens\n///\n/// - `gemini-1.5-pro`:\n///   * Multimodal input and text output\n///   * Context window: 2M tokens\n///   * Max output: 8,192 tokens\n///\n/// - `gemini-1.5-flash`:\n///   * Multimodal input and text output\n///   * Context window: 1M tokens\n///   * Max output: 8,192 tokens\n///\n/// The previous list of models may not be exhaustive or up-to-date. Check out\n/// the [Vertex AI documentation](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/model-versions#latest-stable)\n/// for the latest stable models.\n///\n/// ### Call options\n///\n/// You can configure the parameters that will be used when calling the\n/// chat completions API in several ways:\n///\n/// **Default options:**\n///\n/// Use the [defaultOptions] parameter to set the default options. These\n/// options will be used unless you override them when generating completions.\n///\n/// ```dart\n/// final chatModel = ChatVertexAI(\n///   authProvider: authProvider,\n///   project: 'your-project-id',\n///   defaultOptions: ChatVertexAIOptions(\n///     temperature: 0,\n///   ),\n/// );\n/// ```\n///\n/// **Call options:**\n///\n/// You can override the default options when invoking the model:\n///\n/// ```dart\n/// final res = await chatModel.invoke(\n///   PromptValue.string('Tell me a joke'),\n///   options: const ChatVertexAIOptions(temperature: 1),\n/// );\n/// ```\n///\n/// **Bind:**\n///\n/// You can also change the options in a [Runnable] pipeline using the bind\n/// method.\n///\n/// In this example, we are using two totally different models for each\n/// question:\n///\n/// ```dart\n/// final chatModel = ChatVertexAI(\n///   authProvider: authProvider,\n///   project: 'your-project-id',\n/// );\n/// const outputParser = StringOutputParser();\n/// final prompt1 = PromptTemplate.fromTemplate('How are you {name}?');\n/// final prompt2 = PromptTemplate.fromTemplate('How old are you {name}?');\n/// final chain = Runnable.fromMap({\n///   'q1': prompt1 | chatModel.bind(const ChatVertexAIOptions(model: 'gemini-2.5-flash')) | outputParser,\n///   'q2': prompt2 | chatModel.bind(const ChatVertexAIOptions(model: 'gemini-1.5-pro')) | outputParser,\n/// });\n/// final res = await chain.invoke({'name': 'David'});\n/// ```\n///\n/// ### Tool calling\n///\n/// [ChatVertexAI] supports tool calling (aka function calling).\n///\n/// Check the [docs](https://langchaindart.dev/#/modules/model_io/models/chat_models/how_to/tools)\n/// for more information on how to use tools.\n///\n/// Example:\n/// ```dart\n/// const tool = ToolSpec(\n///   name: 'get_current_weather',\n///   description: 'Get the current weather in a given location',\n///   inputJsonSchema: {\n///     'type': 'object',\n///     'properties': {\n///       'location': {\n///         'type': 'string',\n///         'description': 'The city and state, e.g. San Francisco, CA',\n///       },\n///     },\n///     'required': ['location'],\n///   },\n/// );\n/// final chatModel = ChatVertexAI(\n///   authProvider: authProvider,\n///   project: 'your-project-id',\n///   defaultOptions: ChatVertexAIOptions(\n///     model: 'gemini-2.5-flash',\n///     temperature: 0,\n///     tools: [tool],\n///   ),\n/// );\n/// final res = await model.invoke(\n///   PromptValue.string('What\\'s the weather like in Boston and Madrid right now in celsius?'),\n/// );\n/// ```\n/// {@endtemplate}\nclass ChatVertexAI extends BaseChatModel<ChatVertexAIOptions> {\n  /// {@macro chat_vertex_ai}\n  ChatVertexAI({\n    required final HttpClientAuthProvider authProvider,\n    required final String project,\n    final String location = 'us-central1',\n    super.defaultOptions = const ChatVertexAIOptions(model: defaultModel),\n  }) {\n    _googleAiClient = g.GoogleAIClient(\n      config: g.GoogleAIConfig.vertexAI(\n        projectId: project,\n        location: location,\n        authProvider: authProvider,\n      ),\n    );\n  }\n\n  /// A client for interacting with Vertex AI API.\n  late g.GoogleAIClient _googleAiClient;\n\n  /// Scope required for Vertex AI API calls.\n  static const String cloudPlatformScope =\n      'https://www.googleapis.com/auth/cloud-platform';\n\n  /// A UUID generator.\n  late final _uuid = const Uuid();\n\n  @override\n  String get modelType => 'vertex-ai-chat';\n\n  /// The default model to use unless another is specified.\n  static const defaultModel = 'gemini-2.5-flash';\n\n  @override\n  Future<ChatResult> invoke(\n    final PromptValue input, {\n    final ChatVertexAIOptions? options,\n  }) async {\n    final id = _uuid.v4();\n    final messages = input.toChatMessages();\n    final model = _getModel(options);\n\n    final request = _generateCompletionRequest(messages, options: options);\n    final response = await _googleAiClient.models.generateContent(\n      model: model,\n      request: request,\n    );\n\n    return response.toChatResult(id, model);\n  }\n\n  @override\n  Stream<ChatResult> stream(\n    final PromptValue input, {\n    final ChatVertexAIOptions? options,\n  }) {\n    final id = _uuid.v4();\n    final messages = input.toChatMessages();\n    final model = _getModel(options);\n\n    final request = _generateCompletionRequest(messages, options: options);\n    return _googleAiClient.models\n        .streamGenerateContent(model: model, request: request)\n        .map((final response) => response.toChatResult(id, model));\n  }\n\n  @override\n  Future<List<int>> tokenize(\n    final PromptValue promptValue, {\n    final ChatVertexAIOptions? options,\n  }) {\n    throw UnsupportedError(\n      'Vertex AI does not expose a tokenizer, only counting tokens is supported.',\n    );\n  }\n\n  @override\n  Future<int> countTokens(\n    final PromptValue promptValue, {\n    final ChatVertexAIOptions? options,\n  }) async {\n    final messages = promptValue.toChatMessages();\n    final model = _getModel(options);\n\n    final request = _generateCompletionRequest(messages, options: options);\n    final response = await _googleAiClient.models.countTokens(\n      model: model,\n      request: g.CountTokensRequest(\n        contents: request.contents,\n        systemInstruction: request.systemInstruction,\n      ),\n    );\n    return response.totalTokens;\n  }\n\n  /// Creates a [g.GenerateContentRequest] from the given input.\n  g.GenerateContentRequest _generateCompletionRequest(\n    final List<ChatMessage> messages, {\n    final ChatVertexAIOptions? options,\n  }) {\n    final mergedOptions = options != null\n        ? defaultOptions.merge(options)\n        : defaultOptions;\n\n    final firstMessage = messages.firstOrNull;\n    final systemInstruction = firstMessage is SystemChatMessage\n        ? firstMessage.contentAsString\n        : null;\n\n    return g.GenerateContentRequest(\n      contents: messages.toContentList(),\n      systemInstruction: systemInstruction != null\n          ? g.Content(parts: [g.TextPart(systemInstruction)])\n          : null,\n      generationConfig: g.GenerationConfig(\n        temperature: mergedOptions.temperature,\n        topP: mergedOptions.topP,\n        topK: mergedOptions.topK,\n        candidateCount: mergedOptions.candidateCount,\n        maxOutputTokens: mergedOptions.maxOutputTokens,\n        stopSequences: mergedOptions.stopSequences ?? const [],\n        responseMimeType: mergedOptions.responseMimeType,\n        responseSchema: mergedOptions.responseSchema?.toSchema().toJson(),\n        presencePenalty: mergedOptions.presencePenalty,\n        frequencyPenalty: mergedOptions.frequencyPenalty,\n      ),\n      safetySettings: mergedOptions.safetySettings?.toSafetySettings(),\n      tools: mergedOptions.tools.toToolList(\n        enableCodeExecution: mergedOptions.enableCodeExecution ?? false,\n      ),\n      toolConfig: mergedOptions.toolChoice?.toToolConfig(),\n      cachedContent: mergedOptions.cachedContent,\n    );\n  }\n\n  /// Gets the model to use for the request.\n  String _getModel(final ChatVertexAIOptions? options) {\n    return options?.model ?? defaultOptions.model ?? defaultModel;\n  }\n\n  /// Closes the client and cleans up any resources associated with it.\n  @override\n  void close() {\n    _googleAiClient.close();\n  }\n}\n"
  },
  {
    "path": "packages/langchain_google/lib/src/chat_models/vertex_ai/mappers.dart",
    "content": "// ignore_for_file: public_member_api_docs\nimport 'dart:convert';\n\nimport 'package:googleai_dart/googleai_dart.dart' as g;\nimport 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/language_models.dart';\nimport 'package:langchain_core/tools.dart';\n\nimport 'types.dart';\n\nextension ChatMessagesMapper on List<ChatMessage> {\n  List<g.Content> toContentList() {\n    final result = <g.Content>[];\n\n    // NOTE: Gemini can return multiple FunctionCall parts in a single model turn.\n    // The API requires the next turn to be ONE Content.functionResponses that\n    // includes the SAME number of FunctionResponse parts, in the SAME order.\n    // If we send each ToolChatMessage separately, the counts won't match and\n    // the API throws an error.\n    // Therefore we batch consecutive ToolChatMessage instances into a single\n    // Content with multiple FunctionResponseParts.\n    List<g.FunctionResponsePart>? pendingToolResponses;\n\n    void flushToolResponses() {\n      if (pendingToolResponses != null && pendingToolResponses!.isNotEmpty) {\n        result.add(g.Content(role: 'user', parts: pendingToolResponses!));\n        pendingToolResponses = null;\n      }\n    }\n\n    for (final message in this) {\n      if (message is SystemChatMessage) {\n        continue; // System messages are handled separately\n      }\n\n      if (message is ToolChatMessage) {\n        // Start (or continue) a batch of tool responses\n        pendingToolResponses ??= <g.FunctionResponsePart>[];\n        pendingToolResponses!.add(_toolMsgToFunctionResponsePart(message));\n        continue;\n      }\n\n      // Any non-tool message breaks the batch: flush before adding it\n      flushToolResponses();\n\n      switch (message) {\n        case final HumanChatMessage msg:\n          result.add(_mapHumanChatMessage(msg));\n        case final AIChatMessage msg:\n          result.add(_mapAIChatMessage(msg));\n        case final CustomChatMessage msg:\n          result.add(_mapCustomChatMessage(msg));\n        default:\n          throw UnsupportedError('Unknown message type: $message');\n      }\n    }\n\n    // Flush remaining batched tool responses at the end\n    flushToolResponses();\n\n    return result;\n  }\n\n  g.Content _mapHumanChatMessage(final HumanChatMessage msg) {\n    final contentParts = switch (msg.content) {\n      final ChatMessageContentText c => [g.TextPart(c.text)],\n      final ChatMessageContentImage c => [\n        if (c.data.startsWith('http'))\n          g.FileDataPart(g.FileData(fileUri: c.data))\n        else\n          g.InlineDataPart(\n            g.Blob.fromBytes(c.mimeType ?? 'image/jpeg', base64Decode(c.data)),\n          ),\n      ],\n      final ChatMessageContentMultiModal c =>\n        c.parts\n            .map(\n              (final p) => switch (p) {\n                final ChatMessageContentText c => g.TextPart(c.text),\n                final ChatMessageContentImage c =>\n                  c.data.startsWith('http')\n                      ? g.FileDataPart(g.FileData(fileUri: c.data))\n                      : g.InlineDataPart(\n                          g.Blob.fromBytes(\n                            c.mimeType ?? 'image/jpeg',\n                            base64Decode(c.data),\n                          ),\n                        ),\n                ChatMessageContentMultiModal() => throw UnsupportedError(\n                  'Cannot have multimodal content in multimodal content',\n                ),\n              },\n            )\n            .toList(growable: false),\n    };\n    return g.Content(role: 'user', parts: contentParts);\n  }\n\n  g.Content _mapAIChatMessage(final AIChatMessage msg) {\n    final contentParts = [\n      if (msg.content.isNotEmpty) g.TextPart(msg.content),\n      if (msg.toolCalls.isNotEmpty)\n        ...msg.toolCalls.map(\n          (final call) => g.FunctionCallPart(\n            g.FunctionCall(name: call.name, args: call.arguments),\n          ),\n        ),\n    ];\n    return g.Content(role: 'model', parts: contentParts);\n  }\n\n  g.FunctionResponsePart _toolMsgToFunctionResponsePart(\n    final ToolChatMessage msg,\n  ) {\n    Map<String, Object?> response;\n    try {\n      response = jsonDecode(msg.content) as Map<String, Object?>;\n    } catch (_) {\n      response = {'result': msg.content};\n    }\n    return g.FunctionResponsePart(\n      g.FunctionResponse(name: msg.toolCallId, response: response),\n    );\n  }\n\n  g.Content _mapCustomChatMessage(final CustomChatMessage msg) {\n    return g.Content(role: msg.role, parts: [g.TextPart(msg.content)]);\n  }\n}\n\nextension GenerateContentResponseMapper on g.GenerateContentResponse {\n  ChatResult toChatResult(final String id, final String model) {\n    final candidate = candidates?.first;\n    if (candidate == null) {\n      throw StateError('No candidates in response');\n    }\n\n    return ChatResult(\n      id: id,\n      output: AIChatMessage(\n        content:\n            candidate.content?.parts\n                .map(\n                  (p) => switch (p) {\n                    final g.TextPart p => p.text,\n                    final g.InlineDataPart p => p.inlineData.data,\n                    final g.FileDataPart p => p.fileData.fileUri,\n                    g.FunctionResponsePart() => '',\n                    g.FunctionCallPart() => '',\n                    g.ExecutableCodePart() => '',\n                    g.CodeExecutionResultPart() => '',\n                    g.VideoMetadataPart() => '',\n                    g.ThoughtPart() => '',\n                    g.ThoughtSignaturePart() => '',\n                    g.PartMetadataPart() => '',\n                  },\n                )\n                .nonNulls\n                .join('\\n') ??\n            '',\n        toolCalls:\n            candidate.content?.parts\n                .whereType<g.FunctionCallPart>()\n                .map(\n                  (final part) => AIChatMessageToolCall(\n                    id: part.functionCall.name,\n                    name: part.functionCall.name,\n                    argumentsRaw: jsonEncode(part.functionCall.args ?? {}),\n                    arguments: part.functionCall.args ?? {},\n                  ),\n                )\n                .toList(growable: false) ??\n            [],\n      ),\n      finishReason: _mapFinishReason(candidate.finishReason),\n      metadata: {\n        'model': model,\n        'block_reason': promptFeedback?.blockReason?.name,\n        'safety_ratings': candidate.safetyRatings\n            ?.map(\n              (r) => {\n                'category': r.category.name,\n                'probability': r.probability.name,\n              },\n            )\n            .toList(growable: false),\n        'citation_metadata': candidate.citationMetadata?.citationSources\n            ?.map(\n              (final g.CitationSource s) => {\n                'start_index': s.startIndex,\n                'end_index': s.endIndex,\n                'uri': s.uri,\n                'title': s.title,\n                'license': s.license,\n                'publication_date': s.publicationDate?.toIso8601String(),\n              },\n            )\n            .toList(growable: false),\n        'executable_code': candidate.content?.parts\n            .whereType<g.ExecutableCodePart>()\n            .map(\n              (code) => {\n                'language': code.executableCode.language.name,\n                'code': code.executableCode.code,\n              },\n            )\n            .toList(growable: false),\n        'code_execution_result': candidate.content?.parts\n            .whereType<g.CodeExecutionResultPart>()\n            .map(\n              (result) => {\n                'outcome': result.codeExecutionResult.outcome.name,\n                'output': result.codeExecutionResult.output,\n              },\n            )\n            .toList(growable: false),\n      },\n      usage: LanguageModelUsage(\n        promptTokens: usageMetadata?.promptTokenCount,\n        responseTokens: usageMetadata?.candidatesTokenCount,\n        totalTokens: usageMetadata?.totalTokenCount,\n      ),\n    );\n  }\n\n  FinishReason _mapFinishReason(final g.FinishReason? reason) =>\n      switch (reason) {\n        g.FinishReason.unspecified => FinishReason.unspecified,\n        g.FinishReason.stop => FinishReason.stop,\n        g.FinishReason.maxTokens => FinishReason.length,\n        g.FinishReason.safety => FinishReason.contentFilter,\n        g.FinishReason.recitation => FinishReason.recitation,\n        g.FinishReason.other => FinishReason.unspecified,\n        g.FinishReason.blocklist => FinishReason.contentFilter,\n        g.FinishReason.prohibitedContent => FinishReason.contentFilter,\n        g.FinishReason.spii => FinishReason.contentFilter,\n        g.FinishReason.malformedFunctionCall => FinishReason.unspecified,\n        null => FinishReason.unspecified,\n      };\n}\n\nextension SafetySettingsMapper on List<ChatVertexAISafetySetting> {\n  List<g.SafetySetting> toSafetySettings() {\n    return map(\n      (final setting) => g.SafetySetting(\n        category: switch (setting.category) {\n          ChatVertexAISafetySettingCategory.unspecified =>\n            g.HarmCategory.unspecified,\n          ChatVertexAISafetySettingCategory.harassment =>\n            g.HarmCategory.harassment,\n          ChatVertexAISafetySettingCategory.hateSpeech =>\n            g.HarmCategory.hateSpeech,\n          ChatVertexAISafetySettingCategory.sexuallyExplicit =>\n            g.HarmCategory.sexuallyExplicit,\n          ChatVertexAISafetySettingCategory.dangerousContent =>\n            g.HarmCategory.dangerousContent,\n        },\n        threshold: switch (setting.threshold) {\n          ChatVertexAISafetySettingThreshold.unspecified =>\n            g.HarmBlockThreshold.unspecified,\n          ChatVertexAISafetySettingThreshold.blockLowAndAbove =>\n            g.HarmBlockThreshold.blockLowAndAbove,\n          ChatVertexAISafetySettingThreshold.blockMediumAndAbove =>\n            g.HarmBlockThreshold.blockMediumAndAbove,\n          ChatVertexAISafetySettingThreshold.blockOnlyHigh =>\n            g.HarmBlockThreshold.blockOnlyHigh,\n          ChatVertexAISafetySettingThreshold.blockNone =>\n            g.HarmBlockThreshold.blockNone,\n        },\n      ),\n    ).toList(growable: false);\n  }\n}\n\nextension ChatToolListMapper on List<ToolSpec>? {\n  List<g.Tool>? toToolList({required final bool enableCodeExecution}) {\n    if (this == null && !enableCodeExecution) {\n      return null;\n    }\n\n    return [\n      g.Tool(\n        functionDeclarations: this\n            ?.map(\n              (tool) => g.FunctionDeclaration(\n                name: tool.name,\n                description: tool.description,\n                parameters: tool.inputJsonSchema.toSchema(),\n              ),\n            )\n            .toList(growable: false),\n        codeExecution: enableCodeExecution ? <String, dynamic>{} : null,\n      ),\n    ];\n  }\n}\n\nextension SchemaMapper on Map<String, dynamic> {\n  g.Schema toSchema() {\n    final jsonSchema = this;\n    final type = jsonSchema['type'] as String;\n    final description = jsonSchema['description'] as String?;\n    final nullable = jsonSchema['nullable'] as bool?;\n    final enumValues = (jsonSchema['enum'] as List?)?.cast<String>();\n    final format = jsonSchema['format'] as String?;\n    final items = jsonSchema['items'] as Map<String, dynamic>?;\n    final properties = jsonSchema['properties'] as Map<String, dynamic>?;\n    final requiredProperties = (jsonSchema['required'] as List?)\n        ?.cast<String>();\n\n    switch (type) {\n      case 'string':\n        return g.Schema(\n          type: g.SchemaType.string,\n          description: description,\n          nullable: nullable,\n          enumValues: enumValues,\n        );\n      case 'number':\n        return g.Schema(\n          type: g.SchemaType.number,\n          description: description,\n          nullable: nullable,\n          format: format,\n        );\n      case 'integer':\n        return g.Schema(\n          type: g.SchemaType.integer,\n          description: description,\n          nullable: nullable,\n          format: format,\n        );\n      case 'boolean':\n        return g.Schema(\n          type: g.SchemaType.boolean,\n          description: description,\n          nullable: nullable,\n        );\n      case 'array':\n        if (items != null) {\n          final itemsSchema = items.toSchema();\n          return g.Schema(\n            type: g.SchemaType.array,\n            items: itemsSchema,\n            description: description,\n            nullable: nullable,\n          );\n        }\n        throw ArgumentError('Array schema must have \"items\" property');\n      case 'object':\n        if (properties != null) {\n          final propertiesSchema = properties.map(\n            (key, value) =>\n                MapEntry(key, (value as Map<String, dynamic>).toSchema()),\n          );\n          return g.Schema(\n            type: g.SchemaType.object,\n            properties: propertiesSchema,\n            required: requiredProperties,\n            description: description,\n            nullable: nullable,\n          );\n        }\n        throw ArgumentError('Object schema must have \"properties\" property');\n      default:\n        throw ArgumentError('Invalid schema type: $type');\n    }\n  }\n}\n\nextension ChatToolChoiceMapper on ChatToolChoice {\n  Map<String, dynamic> toToolConfig() {\n    return switch (this) {\n      ChatToolChoiceNone _ => {\n        'functionCallingConfig': {'mode': 'NONE'},\n      },\n      ChatToolChoiceAuto _ => {\n        'functionCallingConfig': {'mode': 'AUTO'},\n      },\n      ChatToolChoiceRequired() => {\n        'functionCallingConfig': {'mode': 'ANY'},\n      },\n      final ChatToolChoiceForced t => {\n        'functionCallingConfig': {\n          'mode': 'ANY',\n          'allowedFunctionNames': [t.name],\n        },\n      },\n    };\n  }\n}\n"
  },
  {
    "path": "packages/langchain_google/lib/src/chat_models/vertex_ai/types.dart",
    "content": "import 'package:collection/collection.dart';\nimport 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/tools.dart';\nimport 'package:meta/meta.dart';\n\n/// {@template chat_vertex_ai_options}\n/// Options to pass into the Vertex AI Chat Model (Gemini API).\n///\n/// You can find a list of available models here:\n/// https://cloud.google.com/vertex-ai/generative-ai/docs/learn/model-versions#latest-stable\n/// {@endtemplate}\n@immutable\nclass ChatVertexAIOptions extends ChatModelOptions {\n  /// {@macro chat_vertex_ai_options}\n  const ChatVertexAIOptions({\n    super.model,\n    this.topP,\n    this.topK,\n    this.candidateCount,\n    this.maxOutputTokens,\n    this.temperature,\n    this.stopSequences,\n    this.responseMimeType,\n    this.responseSchema,\n    this.safetySettings,\n    this.enableCodeExecution,\n    this.presencePenalty,\n    this.frequencyPenalty,\n    this.cachedContent,\n    super.tools,\n    super.toolChoice,\n    super.concurrencyLimit,\n  });\n\n  /// The maximum cumulative probability of tokens to consider when sampling.\n  ///\n  /// The model uses combined Top-k and nucleus sampling. Tokens are sorted\n  /// based on their assigned probabilities so that only the most likely\n  /// tokens are considered. Top-k sampling directly limits the maximum\n  /// number of tokens to consider, while Nucleus sampling limits number of\n  /// tokens based on the cumulative probability.\n  ///\n  /// Note: The default value varies by model, see the `Model.top_p`\n  /// attribute of the `Model` returned the `getModel` function.\n  final double? topP;\n\n  /// The maximum number of tokens to consider when sampling.\n  ///\n  /// The model uses combined Top-k and nucleus sampling. Top-k sampling considers\n  /// the set of `top_k` most probable tokens. Defaults to 40.\n  ///\n  /// Note: The default value varies by model, see the `Model.top_k` attribute\n  /// of the `Model` returned the `getModel` function.\n  final int? topK;\n\n  /// Number of generated responses to return.\n  ///\n  /// This value must be between [1, 8], inclusive. If unset, this will default to 1.\n  final int? candidateCount;\n\n  /// The maximum number of tokens to include in a candidate.\n  ///\n  /// If unset, this will default to `output_token_limit` specified in the `Model`\n  /// specification.\n  final int? maxOutputTokens;\n\n  /// Controls the randomness of the output.\n  ///\n  /// Note: The default value varies by model, see the `Model.temperature`\n  /// attribute of the `Model` returned the `getModel` function.\n  ///\n  /// Values can range from [0.0,1.0], inclusive. A value closer to 1.0 will\n  /// produce responses that are more varied and creative, while a value\n  /// closer to 0.0 will typically result in more straightforward responses\n  /// from the model.\n  final double? temperature;\n\n  /// The set of character sequences (up to 5) that will stop output generation.\n  ///\n  /// If specified, the API will stop at the first appearance of a stop sequence.\n  /// The stop sequence will not be included as part of the response.\n  final List<String>? stopSequences;\n\n  /// Output response mimetype of the generated candidate text.\n  ///\n  /// Supported mimetype:\n  /// - `text/plain`: (default) Text output.\n  /// - `application/json`: JSON response in the candidates.\n  final String? responseMimeType;\n\n  /// Output response schema of the generated candidate text.\n  ///\n  /// Following the [JSON Schema specification](https://json-schema.org).\n  ///\n  /// - Note: This only applies when the specified `responseMIMEType` supports\n  ///   a schema; currently this is limited to `application/json`.\n  ///\n  /// Example:\n  /// ```json\n  /// {\n  ///   'type': 'object',\n  ///   'properties': {\n  ///     'answer': {\n  ///       'type': 'string',\n  ///       'description': 'The answer to the question being asked',\n  ///     },\n  ///     'sources': {\n  ///       'type': 'array',\n  ///       'items': {'type': 'string'},\n  ///       'description': 'The sources used to answer the question',\n  ///     },\n  ///   },\n  ///   'required': ['answer', 'sources'],\n  /// },\n  /// ```\n  final Map<String, dynamic>? responseSchema;\n\n  /// A list of unique [ChatVertexAISafetySetting] instances for blocking\n  /// unsafe content.\n  ///\n  /// This will be enforced on the generated output. There should not be more than\n  /// one setting for each type. The API will block any contents and responses that\n  /// fail to meet the thresholds set by these settings.\n  ///\n  /// This list overrides the default settings for each category specified. If there\n  /// is no safety setting for a given category provided in the list, the API will use\n  /// the default safety setting for that category.\n  final List<ChatVertexAISafetySetting>? safetySettings;\n\n  /// When code execution is enabled the model may generate code and run it in the\n  /// process of generating a response to the prompt.\n  ///\n  /// When this happens the code that was executed and its output will be included\n  /// in the response metadata as `metadata['executable_code']` and\n  /// `metadata['code_execution_result']`.\n  final bool? enableCodeExecution;\n\n  /// Presence penalty applied to the next token's logprobs if the token has\n  /// already been seen in the generated text.\n  ///\n  /// Positive values discourage tokens that have already appeared, making the\n  /// model more likely to introduce new topics.\n  ///\n  /// Values typically range from -1.0 to 1.0.\n  final double? presencePenalty;\n\n  /// Frequency penalty applied to the next token's logprobs, multiplied by the\n  /// number of times the token has been seen in the generated text.\n  ///\n  /// Positive values discourage tokens that have appeared frequently, making the\n  /// model less likely to repeat the same content verbatim.\n  ///\n  /// Values typically range from -1.0 to 1.0.\n  final double? frequencyPenalty;\n\n  /// The name of the cached content to use as context for prediction.\n  ///\n  /// Caching can significantly reduce costs and latency for requests that reuse\n  /// the same long context (like system instructions or large documents).\n  ///\n  /// Format: `cachedContents/{id}`\n  ///\n  /// To create cached content, use the Vertex AI API's caching endpoints.\n  /// See: https://cloud.google.com/vertex-ai/generative-ai/docs/context-cache/context-cache-overview\n  final String? cachedContent;\n\n  @override\n  ChatVertexAIOptions copyWith({\n    final String? model,\n    final double? topP,\n    final int? topK,\n    final int? candidateCount,\n    final int? maxOutputTokens,\n    final double? temperature,\n    final List<String>? stopSequences,\n    final String? responseMimeType,\n    final Map<String, dynamic>? responseSchema,\n    final List<ChatVertexAISafetySetting>? safetySettings,\n    final bool? enableCodeExecution,\n    final double? presencePenalty,\n    final double? frequencyPenalty,\n    final String? cachedContent,\n    final List<ToolSpec>? tools,\n    final ChatToolChoice? toolChoice,\n    final int? concurrencyLimit,\n  }) {\n    return ChatVertexAIOptions(\n      model: model ?? this.model,\n      topP: topP ?? this.topP,\n      topK: topK ?? this.topK,\n      candidateCount: candidateCount ?? this.candidateCount,\n      maxOutputTokens: maxOutputTokens ?? this.maxOutputTokens,\n      temperature: temperature ?? this.temperature,\n      stopSequences: stopSequences ?? this.stopSequences,\n      responseMimeType: responseMimeType ?? this.responseMimeType,\n      responseSchema: responseSchema ?? this.responseSchema,\n      safetySettings: safetySettings ?? this.safetySettings,\n      enableCodeExecution: enableCodeExecution ?? this.enableCodeExecution,\n      presencePenalty: presencePenalty ?? this.presencePenalty,\n      frequencyPenalty: frequencyPenalty ?? this.frequencyPenalty,\n      cachedContent: cachedContent ?? this.cachedContent,\n      tools: tools ?? this.tools,\n      toolChoice: toolChoice ?? this.toolChoice,\n      concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit,\n    );\n  }\n\n  @override\n  ChatVertexAIOptions merge(covariant final ChatVertexAIOptions? other) {\n    return copyWith(\n      model: other?.model,\n      topP: other?.topP,\n      topK: other?.topK,\n      candidateCount: other?.candidateCount,\n      maxOutputTokens: other?.maxOutputTokens,\n      temperature: other?.temperature,\n      stopSequences: other?.stopSequences,\n      responseMimeType: other?.responseMimeType,\n      responseSchema: other?.responseSchema,\n      safetySettings: other?.safetySettings,\n      enableCodeExecution: other?.enableCodeExecution,\n      presencePenalty: other?.presencePenalty,\n      frequencyPenalty: other?.frequencyPenalty,\n      cachedContent: other?.cachedContent,\n      tools: other?.tools,\n      toolChoice: other?.toolChoice,\n      concurrencyLimit: other?.concurrencyLimit,\n    );\n  }\n\n  @override\n  bool operator ==(covariant final ChatVertexAIOptions other) {\n    return model == other.model &&\n        topP == other.topP &&\n        topK == other.topK &&\n        candidateCount == other.candidateCount &&\n        maxOutputTokens == other.maxOutputTokens &&\n        temperature == other.temperature &&\n        const ListEquality<String>().equals(\n          stopSequences,\n          other.stopSequences,\n        ) &&\n        responseMimeType == other.responseMimeType &&\n        responseSchema == other.responseSchema &&\n        const ListEquality<ChatVertexAISafetySetting>().equals(\n          safetySettings,\n          other.safetySettings,\n        ) &&\n        enableCodeExecution == other.enableCodeExecution &&\n        presencePenalty == other.presencePenalty &&\n        frequencyPenalty == other.frequencyPenalty &&\n        cachedContent == other.cachedContent &&\n        const ListEquality<ToolSpec>().equals(tools, other.tools) &&\n        toolChoice == other.toolChoice &&\n        concurrencyLimit == other.concurrencyLimit;\n  }\n\n  @override\n  int get hashCode {\n    return model.hashCode ^\n        topP.hashCode ^\n        topK.hashCode ^\n        candidateCount.hashCode ^\n        maxOutputTokens.hashCode ^\n        temperature.hashCode ^\n        const ListEquality<String>().hash(stopSequences) ^\n        responseMimeType.hashCode ^\n        responseSchema.hashCode ^\n        const ListEquality<ChatVertexAISafetySetting>().hash(safetySettings) ^\n        enableCodeExecution.hashCode ^\n        presencePenalty.hashCode ^\n        frequencyPenalty.hashCode ^\n        cachedContent.hashCode ^\n        const ListEquality<ToolSpec>().hash(tools) ^\n        toolChoice.hashCode ^\n        concurrencyLimit.hashCode;\n  }\n}\n\n/// {@template chat_vertex_ai_safety_setting}\n/// Safety setting, affecting the safety-blocking behavior.\n///\n/// Passing a safety setting for a category changes the allowed probability that\n/// content is blocked.\n/// {@endtemplate}\n@immutable\nclass ChatVertexAISafetySetting {\n  /// {@macro chat_vertex_ai_safety_setting}\n  const ChatVertexAISafetySetting({\n    required this.category,\n    required this.threshold,\n  });\n\n  /// The category for this setting.\n  final ChatVertexAISafetySettingCategory category;\n\n  /// Controls the probability threshold at which harm is blocked.\n  final ChatVertexAISafetySettingThreshold threshold;\n}\n\n/// Safety settings categories.\n///\n/// Docs: https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/configure-safety-attributes\nenum ChatVertexAISafetySettingCategory {\n  /// The harm category is unspecified.\n  unspecified,\n\n  /// The harm category is harassment.\n  harassment,\n\n  /// The harm category is hate speech.\n  hateSpeech,\n\n  /// The harm category is sexually explicit content.\n  sexuallyExplicit,\n\n  /// The harm category is dangerous content.\n  dangerousContent,\n}\n\n/// Controls the probability threshold at which harm is blocked.\n///\n/// Docs: https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/configure-safety-attributes\nenum ChatVertexAISafetySettingThreshold {\n  /// Threshold is unspecified, block using default threshold.\n  unspecified,\n\n  /// Block when low, medium or high probability of unsafe content.\n  blockLowAndAbove,\n\n  /// Block when medium or high probability of unsafe content.\n  blockMediumAndAbove,\n\n  /// Block when high probability of unsafe content.\n  blockOnlyHigh,\n\n  /// Always show regardless of probability of unsafe content.\n  blockNone,\n}\n"
  },
  {
    "path": "packages/langchain_google/lib/src/embeddings/embeddings.dart",
    "content": "export 'google_ai/google_ai_embeddings.dart';\nexport 'vertex_ai/vertex_ai_embeddings.dart';\n"
  },
  {
    "path": "packages/langchain_google/lib/src/embeddings/google_ai/google_ai_embeddings.dart",
    "content": "import 'package:googleai_dart/googleai_dart.dart' as g;\nimport 'package:http/http.dart' as http;\nimport 'package:langchain_core/documents.dart';\nimport 'package:langchain_core/embeddings.dart';\nimport 'package:langchain_core/language_models.dart';\nimport 'package:langchain_core/utils.dart';\n\n/// {@template google_generative_ai_embeddings}\n/// Wrapper around Google AI embedding models API\n///\n/// Example:\n/// ```dart\n/// final embeddings = GoogleGenerativeAIEmbeddings(\n///   apiKey: 'your-api-key',\n/// );\n/// final result = await embeddings.embedQuery('Hello world');\n/// ```\n///\n/// Google AI documentation: https://ai.google.dev/\n///\n/// ### Available models\n///\n/// - `gemini-embedding-001` (recommended, stable)\n///   * Default dimensions: 3072\n///   * Supports flexible dimensions: 128-3072 (recommended: 768, 1536, 3072)\n///   * Uses Matryoshka Representation Learning (MRL) technique\n///\n/// Legacy models:\n/// - `gemini-embedding-001`\n/// - `embedding-gecko-001`\n///\n/// The previous list of models may not be exhaustive or up-to-date. Check out\n/// the [Google AI embeddings documentation](https://ai.google.dev/gemini-api/docs/embeddings)\n/// for the latest list of available models.\n///\n/// ### Task type\n///\n/// Google AI support specifying a 'task type' when embedding documents.\n/// The task type is then used by the model to improve the quality of the\n/// embeddings.\n///\n/// This class uses the specifies the following task type:\n/// - `retrievalDocument`: for [embedDocuments]\n/// - `retrievalQuery`: for [embedQuery]\n///\n/// ### Reduced dimensionality\n///\n/// Some embedding models support specifying a smaller number of dimensions\n/// for the resulting embeddings. This can be useful when you want to save\n/// computing and storage costs with minor performance loss. Use the\n/// [dimensions] parameter to specify the number of dimensions.\n///\n/// You can also use this feature to reduce the dimensions to 2D or 3D for\n/// visualization purposes.\n///\n/// ### Title\n///\n/// Google AI support specifying a document title when embedding documents.\n/// The title is then used by the model to improve the quality of the\n/// embeddings.\n///\n/// To specify a document title, add the title to the document's metadata.\n/// Then, specify the metadata key in the [docTitleKey] parameter.\n///\n/// Example:\n/// ```dart\n/// final embeddings = GoogleGenerativeAIEmbeddings(\n///   apiKey: 'your-api-key',\n/// );\n/// final result = await embeddings.embedDocuments([\n///   Document(\n///     pageContent: 'Hello world',\n///     metadata: {'title': 'Hello!'},\n///   ),\n/// ]);\n/// ```\n/// {@endtemplate}\nclass GoogleGenerativeAIEmbeddings extends Embeddings {\n  /// Create a new [GoogleGenerativeAIEmbeddings] instance.\n  ///\n  /// Main configuration options:\n  /// - `apiKey`: your Google AI API key. You can find your API key in the\n  ///   [Google AI Studio dashboard](https://aistudio.google.com/app/apikey).\n  /// - `model`: the embeddings model to use. You can find a list of available\n  ///   embedding models here: https://ai.google.dev/models/gemini\n  /// - [GoogleGenerativeAIEmbeddings.dimensions]\n  /// - [GoogleGenerativeAIEmbeddings.batchSize]\n  /// - [GoogleGenerativeAIEmbeddings.docTitleKey]\n  ///\n  /// Advance configuration options:\n  /// - `baseUrl`: the base URL to use. Defaults to Google AI's API URL. You can\n  ///   override this to use a different API URL, or to use a proxy.\n  /// - `headers`: global headers to send with every request. You can use\n  ///   this to set custom headers, or to override the default headers.\n  /// - `queryParams`: global query parameters to send with every request. You\n  ///   can use this to set custom query parameters.\n  /// - `retries`: the number of retries to attempt if a request fails.\n  /// - `client`: the HTTP client to use. You can set your own HTTP client if\n  ///   you need further customization (e.g. to use a Socks5 proxy).\n  GoogleGenerativeAIEmbeddings({\n    final String? apiKey,\n    final String? baseUrl,\n    final Map<String, String>? headers,\n    final Map<String, String>? queryParams,\n    final int retries = 3,\n    final http.Client? client,\n    this.model = 'gemini-embedding-001',\n    this.dimensions,\n    this.batchSize = 100,\n    this.docTitleKey = 'title',\n  }) {\n    _googleAiClient = g.GoogleAIClient(\n      config: g.GoogleAIConfig(\n        authProvider: apiKey != null ? g.ApiKeyProvider(apiKey) : null,\n        baseUrl: baseUrl ?? 'https://generativelanguage.googleapis.com',\n        defaultHeaders: headers ?? const {},\n        defaultQueryParams: queryParams ?? const {},\n        retryPolicy: g.RetryPolicy(maxRetries: retries),\n      ),\n      httpClient: client,\n    );\n  }\n\n  /// A client for interacting with Google AI API.\n  late g.GoogleAIClient _googleAiClient;\n\n  /// The embeddings model to use.\n  String model;\n\n  /// The number of dimensions the resulting output embeddings should have.\n  /// Supported in `gemini-embedding-001` and later models (not in legacy `gemini-embedding-001`).\n  int? dimensions;\n\n  /// The maximum number of documents to embed in a single batch request.\n  int batchSize;\n\n  /// The metadata key used to store the document's (optional) title.\n  String docTitleKey;\n\n  @override\n  Future<List<List<double>>> embedDocuments(\n    final List<Document> documents,\n  ) async {\n    final batches = chunkList(documents, chunkSize: batchSize);\n\n    final List<List<List<double>>> embeddings = await Future.wait(\n      batches.map((final batch) async {\n        // Use batch API for better performance\n        try {\n          final response = await _googleAiClient.models.batchEmbedContents(\n            model: model,\n            request: g.BatchEmbedContentsRequest(\n              requests: batch\n                  .map(\n                    (final doc) => g.EmbedContentRequest(\n                      content: g.Content(parts: [g.TextPart(doc.pageContent)]),\n                      taskType: g.TaskType.retrievalDocument,\n                      title: doc.metadata[docTitleKey] as String?,\n                      outputDimensionality: dimensions,\n                    ),\n                  )\n                  .toList(),\n            ),\n          );\n          return response.embeddings.map((e) => e.values).toList();\n        } on g.ApiException catch (e) {\n          // Fallback to sequential requests if batch API fails with specific error\n          // This can happen if the API expects model field in each request,\n          // which is an API/client schema mismatch\n          if (e.statusCode == 400 &&\n              (e.message.contains('model is not specified') ||\n                  e.message.contains('model'))) {\n            // Use sequential requests as fallback\n            final results = await Future.wait(\n              batch.map((final doc) async {\n                final response = await _googleAiClient.models.embedContent(\n                  model: model,\n                  request: g.EmbedContentRequest(\n                    content: g.Content(parts: [g.TextPart(doc.pageContent)]),\n                    taskType: g.TaskType.retrievalDocument,\n                    title: doc.metadata[docTitleKey] as String?,\n                    outputDimensionality: dimensions,\n                  ),\n                );\n                return response.embedding.values;\n              }),\n            );\n            return results;\n          } else {\n            // For other API errors, rethrow to let caller handle them\n            rethrow;\n          }\n        }\n      }),\n    );\n\n    return embeddings.expand((final e) => e).toList(growable: false);\n  }\n\n  @override\n  Future<List<double>> embedQuery(final String query) async {\n    final response = await _googleAiClient.models.embedContent(\n      model: model,\n      request: g.EmbedContentRequest(\n        content: g.Content(parts: [g.TextPart(query)]),\n        taskType: g.TaskType.retrievalQuery,\n        outputDimensionality: dimensions,\n      ),\n    );\n    return response.embedding.values;\n  }\n\n  /// {@template google_generative_ai_embeddings_list_models}\n  /// Returns a list of available embedding models from Google AI.\n  ///\n  /// This method filters models to return only those that support embeddings\n  /// (embedContent method).\n  ///\n  /// Example:\n  /// ```dart\n  /// final embeddings = GoogleGenerativeAIEmbeddings(apiKey: '...');\n  /// final models = await embeddings.listModels();\n  /// for (final model in models) {\n  ///   print('${model.id} - ${model.displayName}');\n  ///   print('  Input limit: ${model.inputTokenLimit}');\n  /// }\n  /// ```\n  /// {@endtemplate}\n  @override\n  Future<List<ModelInfo>> listModels() async {\n    final models = <g.Model>[];\n    String? pageToken;\n\n    // Paginate through all models\n    do {\n      final response = await _googleAiClient.models.list(pageToken: pageToken);\n      models.addAll(response.models);\n      pageToken = response.nextPageToken;\n    } while (pageToken != null);\n\n    // Filter to only embedding-capable models (those supporting embedContent)\n    return models\n        .where(_isEmbeddingModel)\n        .map(\n          (final m) => ModelInfo(\n            id: _extractModelId(m.name),\n            displayName: m.displayName,\n            description: m.description,\n            inputTokenLimit: m.inputTokenLimit,\n            outputTokenLimit: m.outputTokenLimit,\n          ),\n        )\n        .toList();\n  }\n\n  /// Returns true if the model supports embeddings (embedContent).\n  static bool _isEmbeddingModel(final g.Model model) {\n    return model.supportedGenerationMethods?.contains('embedContent') ?? false;\n  }\n\n  /// Extracts the model ID from the full model name.\n  static String _extractModelId(final String name) {\n    const prefix = 'models/';\n    if (name.startsWith(prefix)) {\n      return name.substring(prefix.length);\n    }\n    return name;\n  }\n\n  /// Closes the client and cleans up any resources associated with it.\n  void close() {\n    _googleAiClient.close();\n  }\n}\n"
  },
  {
    "path": "packages/langchain_google/lib/src/embeddings/vertex_ai/vertex_ai_embeddings.dart",
    "content": "import 'package:googleai_dart/googleai_dart.dart' as g;\nimport 'package:langchain_core/documents.dart';\nimport 'package:langchain_core/embeddings.dart';\nimport 'package:langchain_core/language_models.dart';\nimport 'package:langchain_core/utils.dart';\n\nimport '../../utils/auth/http_client_auth_provider.dart';\n\n/// {@template vertex_ai_embeddings}\n/// Wrapper around GCP Vertex AI text embedding models API (Gemini embeddings).\n///\n/// Example:\n/// ```dart\n/// final authProvider = HttpClientAuthProvider(\n///   credentials: ServiceAccountCredentials.fromJson({...}),\n///   scopes: ['https://www.googleapis.com/auth/cloud-platform'],\n/// );\n/// final embeddings = VertexAIEmbeddings(\n///   authProvider: authProvider,\n///   project: 'your-project-id',\n/// );\n/// final result = await embeddings.embedQuery('Hello world');\n/// ```\n///\n/// Vertex AI documentation:\n/// https://cloud.google.com/vertex-ai/generative-ai/docs/embeddings/get-text-embeddings\n///\n/// ### Set up your Google Cloud Platform project\n///\n/// 1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager).\n/// 2. [Make sure that billing is enabled for your project](https://cloud.google.com/billing/docs/how-to/modify-project).\n/// 3. [Enable the Vertex AI API](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com).\n/// 4. [Configure the Vertex AI location](https://cloud.google.com/vertex-ai/docs/general/locations).\n///\n/// ### Authentication\n///\n/// To create an instance of `VertexAIEmbeddings` you need to provide an\n/// [HttpClientAuthProvider] that wraps your service account credentials.\n///\n/// Example using a service account JSON:\n///\n/// ```dart\n/// final serviceAccountCredentials = ServiceAccountCredentials.fromJson(\n///   json.decode(serviceAccountJson),\n/// );\n/// final authProvider = HttpClientAuthProvider(\n///   credentials: serviceAccountCredentials,\n///   scopes: ['https://www.googleapis.com/auth/cloud-platform'],\n/// );\n/// final embeddings = VertexAIEmbeddings(\n///   authProvider: authProvider,\n///   project: 'your-project-id',\n/// );\n/// ```\n///\n/// The service account should have the following\n/// [permission](https://cloud.google.com/vertex-ai/docs/general/iam-permissions):\n/// - `aiplatform.endpoints.predict`\n///\n/// The required [OAuth2 scope](https://developers.google.com/identity/protocols/oauth2/scopes)\n/// is:\n/// - `https://www.googleapis.com/auth/cloud-platform` (you can use the\n///   constant [VertexAIEmbeddings.cloudPlatformScope])\n///\n/// See: https://cloud.google.com/vertex-ai/docs/generative-ai/access-control\n///\n/// ### Available models\n///\n/// **Latest stable models:**\n///\n/// - `text-embedding-005` (recommended):\n///   * Output dimensions: 768 (default)\n///   * Max input tokens: 3,072\n///   * Supports task types and custom output dimensions\n///\n/// - `text-multilingual-embedding-002`:\n///   * Supports 100+ languages\n///   * Output dimensions: 768\n///   * Max input tokens: 2,048\n///\n/// **Legacy models:**\n/// - `textembedding-gecko@003`\n/// - `textembedding-gecko@002`\n/// - `textembedding-gecko@001`\n///\n/// The previous list may not be exhaustive or up-to-date. Check out\n/// the [Vertex AI documentation](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/model-versions)\n/// for the latest stable models.\n///\n/// ### Task type\n///\n/// Embedding models support specifying a 'task type' when embedding documents.\n/// The task type is used by the model to improve the quality of the embeddings.\n///\n/// This class automatically uses:\n/// - `RETRIEVAL_DOCUMENT`: for [embedDocuments]\n/// - `RETRIEVAL_QUERY`: for [embedQuery]\n///\n/// ### Output dimensionality\n///\n/// Some models support specifying a smaller number of dimensions for the\n/// resulting embeddings. This can reduce storage costs with minimal\n/// performance loss. Use the [dimensions] parameter to specify custom\n/// dimensions.\n///\n/// ### Title\n///\n/// Embedding models support specifying a document title when embedding\n/// documents. The title is used by the model to improve embedding quality.\n///\n/// To specify a document title, add the title to the document's metadata.\n/// Then, specify the metadata key in the [docTitleKey] parameter.\n///\n/// Example:\n/// ```dart\n/// final embeddings = VertexAIEmbeddings(\n///   authProvider: authProvider,\n///   project: 'your-project-id',\n///   docTitleKey: 'title',\n/// );\n/// final result = await embeddings.embedDocuments([\n///   Document(\n///     pageContent: 'Hello world',\n///     metadata: {'title': 'Hello!'},\n///   ),\n/// ]);\n/// ```\n/// {@endtemplate}\nclass VertexAIEmbeddings extends Embeddings {\n  /// {@macro vertex_ai_embeddings}\n  VertexAIEmbeddings({\n    required final HttpClientAuthProvider authProvider,\n    required final String project,\n    final String location = 'us-central1',\n    this.model = 'text-embedding-005',\n    this.dimensions,\n    this.batchSize = 100,\n    this.docTitleKey = 'title',\n  }) : _client = g.GoogleAIClient(\n         config: g.GoogleAIConfig.vertexAI(\n           projectId: project,\n           location: location,\n           authProvider: authProvider,\n         ),\n       );\n\n  /// A client for interacting with Vertex AI API.\n  final g.GoogleAIClient _client;\n\n  /// The embeddings model to use.\n  ///\n  /// To use the latest stable model version, specify the model name without\n  /// a version number (e.g. `text-embedding-005`).\n  /// To use a specific model version, specify the model version number\n  /// (e.g. `text-embedding-004`).\n  ///\n  /// You can find a list of available models here:\n  /// https://cloud.google.com/vertex-ai/generative-ai/docs/learn/model-versions#latest-stable\n  final String model;\n\n  /// The number of dimensions the resulting output embeddings should have.\n  ///\n  /// Supported in newer models like `text-embedding-005`.\n  /// If not specified, the model's default dimensions will be used.\n  final int? dimensions;\n\n  /// The maximum number of documents to embed in a single batch request.\n  ///\n  /// Newer models support up to 100 or more texts per request.\n  final int batchSize;\n\n  /// The metadata key used to store the document's (optional) title.\n  final String docTitleKey;\n\n  /// Scope required for Vertex AI API calls.\n  static const String cloudPlatformScope =\n      'https://www.googleapis.com/auth/cloud-platform';\n\n  @override\n  Future<List<List<double>>> embedDocuments(\n    final List<Document> documents,\n  ) async {\n    final batches = chunkList(documents, chunkSize: batchSize);\n\n    final List<List<List<double>>> embeddings = await Future.wait(\n      batches.map((final batch) async {\n        // Use batch API for better performance\n        try {\n          final response = await _client.models.batchEmbedContents(\n            model: model,\n            request: g.BatchEmbedContentsRequest(\n              requests: batch\n                  .map(\n                    (final doc) => g.EmbedContentRequest(\n                      content: g.Content(parts: [g.TextPart(doc.pageContent)]),\n                      taskType: g.TaskType.retrievalDocument,\n                      title: doc.metadata[docTitleKey] as String?,\n                      outputDimensionality: dimensions,\n                    ),\n                  )\n                  .toList(),\n            ),\n          );\n          return response.embeddings.map((e) => e.values).toList();\n        } on g.ApiException catch (e) {\n          // Fallback to sequential requests if batch API fails\n          if (e.statusCode == 400 &&\n              (e.message.contains('model is not specified') ||\n                  e.message.contains('model'))) {\n            final results = await Future.wait(\n              batch.map((final doc) async {\n                final response = await _client.models.embedContent(\n                  model: model,\n                  request: g.EmbedContentRequest(\n                    content: g.Content(parts: [g.TextPart(doc.pageContent)]),\n                    taskType: g.TaskType.retrievalDocument,\n                    title: doc.metadata[docTitleKey] as String?,\n                    outputDimensionality: dimensions,\n                  ),\n                );\n                return response.embedding.values;\n              }),\n            );\n            return results;\n          } else {\n            rethrow;\n          }\n        }\n      }),\n    );\n\n    return embeddings.expand((final e) => e).toList(growable: false);\n  }\n\n  @override\n  Future<List<double>> embedQuery(final String query) async {\n    final response = await _client.models.embedContent(\n      model: model,\n      request: g.EmbedContentRequest(\n        content: g.Content(parts: [g.TextPart(query)]),\n        taskType: g.TaskType.retrievalQuery,\n        outputDimensionality: dimensions,\n      ),\n    );\n    return response.embedding.values;\n  }\n\n  /// {@template vertex_ai_embeddings_list_models}\n  /// Returns a list of available embedding models from Vertex AI.\n  ///\n  /// This method filters models to return only those that support embeddings\n  /// (embedContent method).\n  ///\n  /// Example:\n  /// ```dart\n  /// final embeddings = VertexAIEmbeddings(\n  ///   authProvider: authProvider,\n  ///   project: 'your-project-id',\n  /// );\n  /// final models = await embeddings.listModels();\n  /// for (final model in models) {\n  ///   print('${model.id} - ${model.displayName}');\n  ///   print('  Input limit: ${model.inputTokenLimit}');\n  /// }\n  /// ```\n  /// {@endtemplate}\n  @override\n  Future<List<ModelInfo>> listModels() async {\n    final models = <g.Model>[];\n    String? pageToken;\n\n    // Paginate through all models\n    do {\n      final response = await _client.models.list(pageToken: pageToken);\n      models.addAll(response.models);\n      pageToken = response.nextPageToken;\n    } while (pageToken != null);\n\n    // Filter to only embedding-capable models (those supporting embedContent)\n    return models\n        .where(_isEmbeddingModel)\n        .map(\n          (final m) => ModelInfo(\n            id: _extractModelId(m.name),\n            displayName: m.displayName,\n            description: m.description,\n            inputTokenLimit: m.inputTokenLimit,\n            outputTokenLimit: m.outputTokenLimit,\n          ),\n        )\n        .toList();\n  }\n\n  /// Returns true if the model supports embeddings (embedContent).\n  static bool _isEmbeddingModel(final g.Model model) {\n    return model.supportedGenerationMethods?.contains('embedContent') ?? false;\n  }\n\n  /// Extracts the model ID from the full model name.\n  static String _extractModelId(final String name) {\n    const prefix = 'models/';\n    if (name.startsWith(prefix)) {\n      return name.substring(prefix.length);\n    }\n    return name;\n  }\n\n  /// Closes the client and cleans up any resources associated with it.\n  void close() {\n    _client.close();\n  }\n}\n"
  },
  {
    "path": "packages/langchain_google/lib/src/llms/llms.dart",
    "content": "export 'vertex_ai/types.dart';\nexport 'vertex_ai/vertex_ai.dart';\n"
  },
  {
    "path": "packages/langchain_google/lib/src/llms/vertex_ai/mappers.dart",
    "content": "// ignore_for_file: public_member_api_docs\nimport 'package:googleai_dart/googleai_dart.dart' as g;\nimport 'package:langchain_core/language_models.dart';\nimport 'package:langchain_core/llms.dart';\n\nextension GenerateContentResponseMapper on g.GenerateContentResponse {\n  LLMResult toLLMResult(final String id, final String model) {\n    final candidate = candidates?.first;\n    if (candidate == null) {\n      throw StateError('No candidates in response');\n    }\n\n    // Extract text content from all text parts\n    final output =\n        candidate.content?.parts\n            .whereType<g.TextPart>()\n            .map((p) => p.text)\n            .join('\\n') ??\n        '';\n\n    return LLMResult(\n      id: id,\n      output: output,\n      finishReason: _mapFinishReason(candidate.finishReason),\n      metadata: {\n        'model': model,\n        'block_reason': promptFeedback?.blockReason?.name,\n        'safety_ratings': candidate.safetyRatings\n            ?.map(\n              (r) => {\n                'category': r.category.name,\n                'probability': r.probability.name,\n              },\n            )\n            .toList(growable: false),\n        'citation_metadata': candidate.citationMetadata?.citationSources\n            ?.map(\n              (final g.CitationSource s) => {\n                'start_index': s.startIndex,\n                'end_index': s.endIndex,\n                'uri': s.uri,\n                'title': s.title,\n                'license': s.license,\n                'publication_date': s.publicationDate?.toIso8601String(),\n              },\n            )\n            .toList(growable: false),\n      },\n      usage: LanguageModelUsage(\n        promptTokens: usageMetadata?.promptTokenCount,\n        responseTokens: usageMetadata?.candidatesTokenCount,\n        totalTokens: usageMetadata?.totalTokenCount,\n      ),\n    );\n  }\n\n  FinishReason _mapFinishReason(final g.FinishReason? reason) =>\n      switch (reason) {\n        g.FinishReason.unspecified => FinishReason.unspecified,\n        g.FinishReason.stop => FinishReason.stop,\n        g.FinishReason.maxTokens => FinishReason.length,\n        g.FinishReason.safety => FinishReason.contentFilter,\n        g.FinishReason.recitation => FinishReason.recitation,\n        g.FinishReason.other => FinishReason.unspecified,\n        g.FinishReason.blocklist => FinishReason.contentFilter,\n        g.FinishReason.prohibitedContent => FinishReason.contentFilter,\n        g.FinishReason.spii => FinishReason.contentFilter,\n        g.FinishReason.malformedFunctionCall => FinishReason.unspecified,\n        null => FinishReason.unspecified,\n      };\n}\n"
  },
  {
    "path": "packages/langchain_google/lib/src/llms/vertex_ai/types.dart",
    "content": "import 'package:collection/collection.dart';\nimport 'package:langchain_core/llms.dart';\nimport 'package:meta/meta.dart';\n\n/// {@template vertex_ai_options}\n/// Options to pass into the Vertex AI LLM (Gemini API).\n///\n/// You can find a list of available models here:\n/// https://cloud.google.com/vertex-ai/generative-ai/docs/learn/model-versions#latest-stable\n/// {@endtemplate}\n@immutable\nclass VertexAIOptions extends LLMOptions {\n  /// {@macro vertex_ai_options}\n  const VertexAIOptions({\n    super.model,\n    this.maxOutputTokens,\n    this.temperature,\n    this.topP,\n    this.topK,\n    this.stopSequences,\n    this.candidateCount,\n    super.concurrencyLimit,\n  });\n\n  /// Maximum number of tokens that can be generated in the response.\n  ///\n  /// If unset, this will default to `output_token_limit` specified in the `Model`\n  /// specification.\n  final int? maxOutputTokens;\n\n  /// Controls the randomness of the output.\n  ///\n  /// Note: The default value varies by model, see the `Model.temperature`\n  /// attribute of the `Model` returned the `getModel` function.\n  ///\n  /// Values can range from [0.0,1.0], inclusive. A value closer to 1.0 will\n  /// produce responses that are more varied and creative, while a value\n  /// closer to 0.0 will typically result in more straightforward responses\n  /// from the model.\n  final double? temperature;\n\n  /// The maximum cumulative probability of tokens to consider when sampling.\n  ///\n  /// The model uses combined Top-k and nucleus sampling. Tokens are sorted\n  /// based on their assigned probabilities so that only the most likely\n  /// tokens are considered. Top-k sampling directly limits the maximum\n  /// number of tokens to consider, while Nucleus sampling limits number of\n  /// tokens based on the cumulative probability.\n  ///\n  /// Note: The default value varies by model, see the `Model.top_p`\n  /// attribute of the `Model` returned the `getModel` function.\n  final double? topP;\n\n  /// The maximum number of tokens to consider when sampling.\n  ///\n  /// The model uses combined Top-k and nucleus sampling. Top-k sampling considers\n  /// the set of `top_k` most probable tokens. Defaults to 40.\n  ///\n  /// Note: The default value varies by model, see the `Model.top_k` attribute\n  /// of the `Model` returned the `getModel` function.\n  final int? topK;\n\n  /// The set of character sequences (up to 5) that will stop output generation.\n  ///\n  /// If specified, the API will stop at the first appearance of a stop sequence.\n  /// The stop sequence will not be included as part of the response.\n  final List<String>? stopSequences;\n\n  /// Number of generated responses to return.\n  ///\n  /// This value must be between [1, 8], inclusive. If unset, this will default to 1.\n  final int? candidateCount;\n\n  @override\n  VertexAIOptions copyWith({\n    final String? model,\n    final int? maxOutputTokens,\n    final double? temperature,\n    final double? topP,\n    final int? topK,\n    final List<String>? stopSequences,\n    final int? candidateCount,\n    final int? concurrencyLimit,\n  }) {\n    return VertexAIOptions(\n      model: model ?? this.model,\n      maxOutputTokens: maxOutputTokens ?? this.maxOutputTokens,\n      temperature: temperature ?? this.temperature,\n      topP: topP ?? this.topP,\n      topK: topK ?? this.topK,\n      stopSequences: stopSequences ?? this.stopSequences,\n      candidateCount: candidateCount ?? this.candidateCount,\n      concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit,\n    );\n  }\n\n  @override\n  VertexAIOptions merge(covariant final VertexAIOptions? other) {\n    return copyWith(\n      model: other?.model,\n      maxOutputTokens: other?.maxOutputTokens,\n      temperature: other?.temperature,\n      topP: other?.topP,\n      topK: other?.topK,\n      stopSequences: other?.stopSequences,\n      candidateCount: other?.candidateCount,\n      concurrencyLimit: other?.concurrencyLimit,\n    );\n  }\n\n  @override\n  bool operator ==(covariant final VertexAIOptions other) {\n    return model == other.model &&\n        maxOutputTokens == other.maxOutputTokens &&\n        temperature == other.temperature &&\n        topP == other.topP &&\n        topK == other.topK &&\n        const ListEquality<String>().equals(\n          stopSequences,\n          other.stopSequences,\n        ) &&\n        candidateCount == other.candidateCount &&\n        concurrencyLimit == other.concurrencyLimit;\n  }\n\n  @override\n  int get hashCode {\n    return model.hashCode ^\n        maxOutputTokens.hashCode ^\n        temperature.hashCode ^\n        topP.hashCode ^\n        topK.hashCode ^\n        const ListEquality<String>().hash(stopSequences) ^\n        candidateCount.hashCode ^\n        concurrencyLimit.hashCode;\n  }\n}\n"
  },
  {
    "path": "packages/langchain_google/lib/src/llms/vertex_ai/vertex_ai.dart",
    "content": "import 'package:googleai_dart/googleai_dart.dart' as g;\nimport 'package:langchain_core/language_models.dart';\nimport 'package:langchain_core/llms.dart';\nimport 'package:langchain_core/prompts.dart';\nimport 'package:uuid/uuid.dart';\n\nimport '../../utils/auth/http_client_auth_provider.dart';\nimport 'mappers.dart';\nimport 'types.dart';\n\n/// {@template vertex_ai}\n/// Wrapper around GCP Vertex AI text models API (Gemini API).\n///\n/// Example:\n/// ```dart\n/// final authProvider = HttpClientAuthProvider(\n///   credentials: ServiceAccountCredentials.fromJson({...}),\n///   scopes: ['https://www.googleapis.com/auth/cloud-platform'],\n/// );\n/// final llm = VertexAI(\n///   authProvider: authProvider,\n///   project: 'your-project-id',\n/// );\n/// final result = await llm('Hello world!');\n/// ```\n///\n/// Vertex AI documentation:\n/// https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/overview\n///\n/// ### Set up your Google Cloud Platform project\n///\n/// 1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager).\n/// 2. [Make sure that billing is enabled for your project](https://cloud.google.com/billing/docs/how-to/modify-project).\n/// 3. [Enable the Vertex AI API](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com).\n/// 4. [Configure the Vertex AI location](https://cloud.google.com/vertex-ai/docs/general/locations).\n///\n/// ### Authentication\n///\n/// To create an instance of `VertexAI` you need to provide an\n/// [HttpClientAuthProvider] that wraps your service account credentials.\n///\n/// Example using a service account JSON:\n///\n/// ```dart\n/// final serviceAccountCredentials = ServiceAccountCredentials.fromJson(\n///   json.decode(serviceAccountJson),\n/// );\n/// final authProvider = HttpClientAuthProvider(\n///   credentials: serviceAccountCredentials,\n///   scopes: ['https://www.googleapis.com/auth/cloud-platform'],\n/// );\n/// final llm = VertexAI(\n///   authProvider: authProvider,\n///   project: 'your-project-id',\n/// );\n/// ```\n///\n/// The service account should have the following\n/// [permission](https://cloud.google.com/vertex-ai/docs/general/iam-permissions):\n/// - `aiplatform.endpoints.predict`\n///\n/// The required [OAuth2 scope](https://developers.google.com/identity/protocols/oauth2/scopes)\n/// is:\n/// - `https://www.googleapis.com/auth/cloud-platform` (you can use the\n///   constant [VertexAI.cloudPlatformScope])\n///\n/// See: https://cloud.google.com/vertex-ai/docs/generative-ai/access-control\n///\n/// ### Available models\n///\n/// **Latest stable models:**\n///\n/// - `gemini-2.5-flash` (recommended):\n///   * Multimodal input and text output\n///   * Context window: 1M tokens\n///   * Max output: 8,192 tokens\n///\n/// - `gemini-2.0-flash-exp`:\n///   * Multimodal input and output\n///   * Context window: 1M tokens\n///   * Max output: 8,192 tokens\n///\n/// - `gemini-1.5-pro`:\n///   * Multimodal input and text output\n///   * Context window: 2M tokens\n///   * Max output: 8,192 tokens\n///\n/// - `gemini-1.5-flash`:\n///   * Multimodal input and text output\n///   * Context window: 1M tokens\n///   * Max output: 8,192 tokens\n///\n/// The previous list of models may not be exhaustive or up-to-date. Check out\n/// the [Vertex AI documentation](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/model-versions#latest-stable)\n/// for the latest stable models.\n///\n/// ### Model options\n///\n/// You can define default options to use when calling the model (e.g.\n/// temperature, stop sequences, etc.) using the [defaultOptions] parameter.\n///\n/// The default options can be overridden when calling the model using the\n/// `options` parameter.\n///\n/// Example:\n/// ```dart\n/// final llm = VertexAI(\n///   authProvider: authProvider,\n///   project: 'your-project-id',\n///   defaultOptions: VertexAIOptions(\n///     temperature: 0.9,\n///   ),\n/// );\n/// final result = await llm(\n///   'Hello world!',\n///   options: VertexAIOptions(\n///     temperature: 0.5,\n///   ),\n/// );\n/// ```\n/// {@endtemplate}\nclass VertexAI extends BaseLLM<VertexAIOptions> {\n  /// {@macro vertex_ai}\n  VertexAI({\n    required final HttpClientAuthProvider authProvider,\n    required final String project,\n    final String location = 'us-central1',\n    super.defaultOptions = const VertexAIOptions(model: defaultModel),\n  }) : _currentModel = defaultOptions.model ?? defaultModel {\n    _googleAiClient = g.GoogleAIClient(\n      config: g.GoogleAIConfig.vertexAI(\n        projectId: project,\n        location: location,\n        authProvider: authProvider,\n      ),\n    );\n  }\n\n  /// A client for interacting with Vertex AI API.\n  late g.GoogleAIClient _googleAiClient;\n\n  /// Scope required for Vertex AI API calls.\n  static const String cloudPlatformScope =\n      'https://www.googleapis.com/auth/cloud-platform';\n\n  /// A UUID generator.\n  late final _uuid = const Uuid();\n\n  /// The current model.\n  String _currentModel;\n\n  @override\n  String get modelType => 'vertex-ai';\n\n  /// The default model to use unless another is specified.\n  static const defaultModel = 'gemini-2.5-flash';\n\n  @override\n  Future<LLMResult> invoke(\n    final PromptValue input, {\n    final VertexAIOptions? options,\n  }) async {\n    final id = _uuid.v4();\n    _updateCurrentModel(options);\n\n    final request = _generateCompletionRequest(\n      input.toString(),\n      options: options,\n    );\n    final response = await _googleAiClient.models.generateContent(\n      model: _currentModel,\n      request: request,\n    );\n\n    return response.toLLMResult(id, _currentModel);\n  }\n\n  @override\n  Stream<LLMResult> stream(\n    final PromptValue input, {\n    final VertexAIOptions? options,\n  }) {\n    final id = _uuid.v4();\n    _updateCurrentModel(options);\n\n    final request = _generateCompletionRequest(\n      input.toString(),\n      options: options,\n    );\n    return _googleAiClient.models\n        .streamGenerateContent(model: _currentModel, request: request)\n        .map((final response) => response.toLLMResult(id, _currentModel));\n  }\n\n  @override\n  Future<List<int>> tokenize(\n    final PromptValue promptValue, {\n    final VertexAIOptions? options,\n  }) {\n    throw UnsupportedError(\n      'Vertex AI does not expose a tokenizer, only counting tokens is supported.',\n    );\n  }\n\n  @override\n  Future<int> countTokens(\n    final PromptValue promptValue, {\n    final VertexAIOptions? options,\n  }) async {\n    _updateCurrentModel(options);\n\n    final request = _generateCompletionRequest(\n      promptValue.toString(),\n      options: options,\n    );\n    final response = await _googleAiClient.models.countTokens(\n      model: _currentModel,\n      request: g.CountTokensRequest(\n        contents: request.contents,\n        systemInstruction: request.systemInstruction,\n      ),\n    );\n    return response.totalTokens;\n  }\n\n  /// Creates a [g.GenerateContentRequest] from the given input.\n  g.GenerateContentRequest _generateCompletionRequest(\n    final String prompt, {\n    final VertexAIOptions? options,\n  }) {\n    final mergedOptions = options != null\n        ? defaultOptions.merge(options)\n        : defaultOptions;\n\n    return g.GenerateContentRequest(\n      contents: [\n        g.Content(parts: [g.TextPart(prompt)]),\n      ],\n      generationConfig: g.GenerationConfig(\n        temperature: mergedOptions.temperature,\n        topP: mergedOptions.topP,\n        topK: mergedOptions.topK,\n        candidateCount: mergedOptions.candidateCount,\n        maxOutputTokens: mergedOptions.maxOutputTokens,\n        stopSequences: mergedOptions.stopSequences ?? const [],\n      ),\n    );\n  }\n\n  /// Updates the current model if needed.\n  void _updateCurrentModel(final VertexAIOptions? options) {\n    final model = options?.model ?? defaultOptions.model ?? defaultModel;\n    if (model != _currentModel) {\n      _currentModel = model;\n    }\n  }\n\n  /// {@template vertex_ai_llm_list_models}\n  /// Returns a list of available models from Vertex AI.\n  ///\n  /// This method filters models to return only those that support text\n  /// generation (generateContent method).\n  ///\n  /// Example:\n  /// ```dart\n  /// final llm = VertexAI(\n  ///   authProvider: authProvider,\n  ///   project: 'your-project-id',\n  /// );\n  /// final models = await llm.listModels();\n  /// for (final model in models) {\n  ///   print('${model.id} - ${model.displayName}');\n  ///   print('  Input limit: ${model.inputTokenLimit}');\n  ///   print('  Output limit: ${model.outputTokenLimit}');\n  /// }\n  /// ```\n  /// {@endtemplate}\n  @override\n  Future<List<ModelInfo>> listModels() async {\n    final models = <g.Model>[];\n    String? pageToken;\n\n    // Paginate through all models\n    do {\n      final response = await _googleAiClient.models.list(pageToken: pageToken);\n      models.addAll(response.models);\n      pageToken = response.nextPageToken;\n    } while (pageToken != null);\n\n    // Filter to only models supporting generateContent\n    return models\n        .where(_isGenerativeModel)\n        .map(\n          (final m) => ModelInfo(\n            id: _extractModelId(m.name),\n            displayName: m.displayName,\n            description: m.description,\n            inputTokenLimit: m.inputTokenLimit,\n            outputTokenLimit: m.outputTokenLimit,\n          ),\n        )\n        .toList();\n  }\n\n  /// Returns true if the model supports text generation (generateContent).\n  static bool _isGenerativeModel(final g.Model model) {\n    return model.supportedGenerationMethods?.contains('generateContent') ??\n        false;\n  }\n\n  /// Extracts the model ID from the full model name.\n  static String _extractModelId(final String name) {\n    const prefix = 'models/';\n    if (name.startsWith(prefix)) {\n      return name.substring(prefix.length);\n    }\n    return name;\n  }\n\n  /// Closes the client and cleans up any resources associated with it.\n  @override\n  void close() {\n    _googleAiClient.close();\n  }\n}\n"
  },
  {
    "path": "packages/langchain_google/lib/src/utils/auth/http_client_auth_provider.dart",
    "content": "export 'http_client_auth_provider_stub.dart'\n    if (dart.library.io) 'http_client_auth_provider_io.dart';\n"
  },
  {
    "path": "packages/langchain_google/lib/src/utils/auth/http_client_auth_provider_io.dart",
    "content": "import 'package:googleai_dart/googleai_dart.dart' as g;\nimport 'package:googleapis_auth/auth_io.dart' as auth;\nimport 'package:http/http.dart' as http;\n\n/// {@template http_client_auth_provider}\n/// Authentication provider that bridges googleapis_auth with googleai_dart.\n///\n/// This provider wraps Google Cloud service account credentials and manages\n/// OAuth token refresh automatically, making it suitable for long-running\n/// Vertex AI applications.\n///\n/// Example:\n/// ```dart\n/// final authProvider = HttpClientAuthProvider(\n///   credentials: ServiceAccountCredentials.fromJson({...}),\n///   scopes: ['https://www.googleapis.com/auth/cloud-platform'],\n/// );\n///\n/// final chat = ChatVertexAI(\n///   authProvider: authProvider,\n///   project: 'my-project',\n/// );\n/// ```\n///\n/// The provider automatically refreshes tokens when they expire.\n/// {@endtemplate}\nclass HttpClientAuthProvider implements g.AuthProvider {\n  /// {@macro http_client_auth_provider}\n  HttpClientAuthProvider({\n    required auth.ServiceAccountCredentials credentials,\n    required List<String> scopes,\n    http.Client? httpClient,\n  }) : _credentials = credentials,\n       _scopes = scopes,\n       _httpClient = httpClient ?? http.Client();\n\n  /// Creates an auth provider from a JSON service account key.\n  ///\n  /// Example:\n  /// ```dart\n  /// final authProvider = HttpClientAuthProvider.fromJson(\n  ///   serviceAccountJson: {\n  ///     \"type\": \"service_account\",\n  ///     \"project_id\": \"your-project\",\n  ///     // ... rest of service account JSON\n  ///   },\n  ///   scopes: ['https://www.googleapis.com/auth/cloud-platform'],\n  /// );\n  /// ```\n  factory HttpClientAuthProvider.fromJson({\n    required Map<String, dynamic> serviceAccountJson,\n    required List<String> scopes,\n    http.Client? httpClient,\n  }) {\n    return HttpClientAuthProvider(\n      credentials: auth.ServiceAccountCredentials.fromJson(serviceAccountJson),\n      scopes: scopes,\n      httpClient: httpClient,\n    );\n  }\n\n  final auth.ServiceAccountCredentials _credentials;\n  final List<String> _scopes;\n  final http.Client _httpClient;\n  auth.AccessCredentials? _cachedCredentials;\n\n  @override\n  Future<g.AuthCredentials> getCredentials() async {\n    // Refresh if expired or not yet obtained\n    if (_cachedCredentials == null ||\n        _cachedCredentials!.accessToken.expiry.isBefore(DateTime.now())) {\n      _cachedCredentials = await auth.obtainAccessCredentialsViaServiceAccount(\n        _credentials,\n        _scopes,\n        _httpClient,\n      );\n    }\n\n    return g.BearerTokenCredentials(_cachedCredentials!.accessToken.data);\n  }\n\n  /// Closes the HTTP client and cleans up resources.\n  ///\n  /// Call this method when you're done using the auth provider to prevent\n  /// resource leaks.\n  void close() {\n    _httpClient.close();\n  }\n}\n"
  },
  {
    "path": "packages/langchain_google/lib/src/utils/auth/http_client_auth_provider_stub.dart",
    "content": "// ignore_for_file: avoid_unused_constructor_parameters\n\nimport 'package:googleai_dart/googleai_dart.dart' as g;\nimport 'package:http/http.dart' as http;\n\n/// {@template http_client_auth_provider}\n/// Authentication provider that bridges googleapis_auth with googleai_dart.\n///\n/// **Note:** This class is not supported on web/WASM platforms because it\n/// requires dart:io for service account authentication. For web apps, use\n/// [ChatGoogleGenerativeAI] with an API key instead.\n/// {@endtemplate}\nclass HttpClientAuthProvider implements g.AuthProvider {\n  /// {@macro http_client_auth_provider}\n  ///\n  /// Throws [UnsupportedError] on web/WASM platforms.\n  HttpClientAuthProvider({\n    required Object credentials,\n    required List<String> scopes,\n    http.Client? httpClient,\n  }) {\n    throw UnsupportedError(\n      'HttpClientAuthProvider is not supported on this platform. '\n      'Service account authentication requires dart:io. '\n      'For web apps, use ChatGoogleGenerativeAI with an API key instead.',\n    );\n  }\n\n  /// Creates an auth provider from a JSON service account key.\n  ///\n  /// Throws [UnsupportedError] on web/WASM platforms.\n  factory HttpClientAuthProvider.fromJson({\n    required Map<String, dynamic> serviceAccountJson,\n    required List<String> scopes,\n    http.Client? httpClient,\n  }) {\n    throw UnsupportedError(\n      'HttpClientAuthProvider is not supported on this platform. '\n      'Service account authentication requires dart:io. '\n      'For web apps, use ChatGoogleGenerativeAI with an API key instead.',\n    );\n  }\n\n  @override\n  Future<g.AuthCredentials> getCredentials() {\n    throw UnsupportedError(\n      'HttpClientAuthProvider is not supported on this platform.',\n    );\n  }\n\n  /// Closes the HTTP client and cleans up resources.\n  ///\n  /// Throws [UnsupportedError] on web/WASM platforms.\n  void close() {\n    throw UnsupportedError(\n      'HttpClientAuthProvider is not supported on this platform.',\n    );\n  }\n}\n"
  },
  {
    "path": "packages/langchain_google/lib/src/vector_stores/mappers.dart",
    "content": "import 'package:vertex_ai/vertex_ai.dart';\n\nimport 'types.dart';\n\n/// Mapper for [VertexAIIndexDatapointRestriction].\nabstract class VertexAIMatchingEngineFilterMapper {\n  /// Converts a [VertexAIMatchingEngineFilter] to a\n  /// [VertexAIIndexDatapointRestriction].\n  static VertexAIIndexDatapointRestriction toDto(\n    final VertexAIMatchingEngineFilter filter,\n  ) {\n    return VertexAIIndexDatapointRestriction(\n      namespace: filter.namespace,\n      allowList: filter.allowList,\n      denyList: filter.denyList,\n    );\n  }\n}\n"
  },
  {
    "path": "packages/langchain_google/lib/src/vector_stores/matching_engine.dart",
    "content": "import 'dart:convert';\n\nimport 'package:gcloud/storage.dart';\nimport 'package:http/http.dart' as http;\nimport 'package:langchain_core/documents.dart';\nimport 'package:langchain_core/exceptions.dart';\nimport 'package:langchain_core/vector_stores.dart';\nimport 'package:uuid/uuid.dart';\nimport 'package:vertex_ai/vertex_ai.dart';\n\nimport 'types.dart';\n\n/// A vector store that uses Vertex AI Vector Search\n/// (former Vertex AI Matching Engine).\n///\n/// Vertex AI Vector Search provides a high-scale low latency vector database.\n///\n/// This vector stores relies on two GCP services:\n/// - Vertex AI Matching Engine: to store the vectors and perform similarity\n///   searches.\n/// - Google Cloud Storage: to store the documents and the vectors to add to\n///   the index.\n///\n/// Vertex AI Matching Engine documentation:\n/// https://cloud.google.com/vertex-ai/docs/matching-engine/overview\n///\n/// Currently it only supports Batch Updates, it doesn't support Streaming\n/// Updates. Batch Updates take around 1h to be applied to the index. See:\n/// https://cloud.google.com/vertex-ai/docs/matching-engine/update-rebuild-index#update_index_content_with_batch_updates\n///\n/// ### Set up your Google Cloud Platform project\n///\n/// 1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager).\n/// 2. [Make sure that billing is enabled for your project](https://cloud.google.com/billing/docs/how-to/modify-project).\n/// 3. [Enable the Vertex AI API](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com).\n/// 4. [Configure the Vertex AI location](https://cloud.google.com/vertex-ai/docs/general/locations).\n///\n/// ### Create your Vertex AI Vector Search index\n///\n/// To use this vector store, first you need to create a Vertex AI Vector\n/// Search index and expose it in a Vertex AI index endpoint.\n///\n/// You can use [vertex_ai](https://pub.dev/packages/vertex_ai) Dart package\n/// to do that.\n///\n/// Check out this sample script that creates the index and index endpoint\n/// ready to be used with LangChains.dart:\n/// https://github.com/davidmigloz/langchain_dart/tree/main/examples/vertex_ai_matching_engine_setup\n///\n/// ### Authentication\n///\n/// To create an instance of `VertexAIMatchingEngine` you need to provide an\n/// HTTP client that handles authentication. The easiest way to do this is to\n/// use [`AuthClient`](https://pub.dev/documentation/googleapis_auth/latest/googleapis_auth/AuthClient-class.html)\n/// from the [googleapis_auth](https://pub.dev/packages/googleapis_auth)\n/// package.\n///\n/// There are several ways to obtain an `AuthClient` depending on your use case.\n/// Check out the [googleapis_auth](https://pub.dev/packages/googleapis_auth)\n/// package documentation for more details.\n///\n/// Example using a service account JSON:\n///\n/// ```dart\n/// final serviceAccountCredentials = ServiceAccountCredentials.fromJson(\n///   json.decode(serviceAccountJson),\n/// );\n/// final authClient = await clientViaServiceAccount(\n///   serviceAccountCredentials,\n///   VertexAIMatchingEngine.cloudPlatformScopes,\n/// );\n/// final vectorStore = VertexAIMatchingEngine(\n///   httpClient: authClient,\n///   project: 'your-project-id',\n///   location: 'europe-west1',\n///   indexId: 'your-index-id',\n///   gcsBucketName: 'your-gcs-bucket-name',\n///   embeddings: embeddings,\n/// );\n/// ```\n///\n/// The minimum required permissions for the service account if you just need\n/// to query the index are:\n/// - `aiplatform.indexes.get`\n/// - `aiplatform.indexEndpoints.get`\n/// - `aiplatform.indexEndpoints.queryVectors`\n/// - `storage.objects.get`\n///\n/// If you also need to add new vectors to the index, the service account\n/// should have the following permissions as well:\n/// - `aiplatform.indexes.update`\n/// - `storage.objects.create`\n/// - `storage.objects.update`\n///\n/// The required[OAuth2 scope](https://developers.google.com/identity/protocols/oauth2/scopes)\n/// is:\n/// - `https://www.googleapis.com/auth/cloud-platform`\n/// - `https://www.googleapis.com/auth/devstorage.full_control`\n///\n/// You can use the constant `VertexAIMatchingEngine.cloudPlatformScopes`.\n///\n/// ### Vector attributes filtering\n///\n/// Vertex AI Matching Engine allows you to add attributes to the vectors that\n/// you can later use to restrict vector matching searches to a subset of the\n/// index.\n///\n/// To add attributes to the vectors, add a `restricts` key to the document\n/// metadata with the attributes that you want to add. For example:\n/// ```dart\n/// final doc = Document(\n///  id: 'doc1',\n///  pageContent: 'The cat is a domestic species of small carnivorous mammal',\n///  metadata: {\n///    'restricts': [\n///      {\n///        'namespace': 'class',\n///        'allow': ['cat', 'pet']\n///      },\n///      {\n///        'namespace': 'category',\n///        'allow': ['feline']\n///      }\n///    ],\n///    'otherMetadata': '...',\n///  },\n/// );\n/// ```\n///\n/// Check out the documentation for more details:\n/// https://cloud.google.com/vertex-ai/docs/matching-engine/filtering\n///\n/// After adding the attributes to the documents, you can use the use them to\n/// restrict the similarity search results. Example:\n///\n/// ```dart\n/// final vectorStore = VertexAIMatchingEngine(...);\n/// final res = await vectorStore.similaritySearch(\n///   query: 'What should I feed my cat?',\n///   config: VertexAIMatchingEngineSimilaritySearch(\n///     k: 5,\n///     scoreThreshold: 0.8,\n///     filters: [\n///       const VertexAIMatchingEngineFilter(\n///         namespace: 'class',\n///         allowList: ['cat'],\n///       ),\n///     ],\n///   ),\n/// );\n/// ```\nclass VertexAIMatchingEngine extends VectorStore {\n  /// Creates a new Vertex AI Matching Engine vector store.\n  ///\n  /// - [httpClient] An authenticated HTTP client.\n  /// - [project] The ID of the Google Cloud project to use.\n  /// - [location] The Google Cloud location to use. Vertex AI and\n  ///   Cloud Storage should have the same location.\n  /// - [rootUrl] The root URL of the Vertex AI API. By default it uses\n  ///   `https://$location-aiplatform.googleapis.com/`.\n  /// - [queryRootUrl] The root URL of the Vertex AI Matching Engine index\n  ///   endpoint. For example: `https://your-query-root-url.vdb.vertexai.goog/`.\n  ///   Only needed if you have multiple endpoints for the same index.\n  /// - [indexId] The ID of the index to use. You can find this ID when you\n  ///   create the index or in the Vertex AI console.\n  /// - [indexEndpointId] The ID of the IndexEndpoint to use. Only needed if\n  ///   you have multiple endpoints for the same index.\n  /// - [deployedIndexId] The ID of the DeployedIndex to use. Only needed if\n  ///   you have multiple deployed indexes for the same index.\n  /// - [gcsBucketName] The name of the Google Cloud Storage bucket to use to\n  ///   store the documents and the vectors to add to the index.\n  /// - [gcsDocumentsFolder] The folder in the Google Cloud Storage bucket where\n  ///   the documents are stored.\n  /// - [gcsIndexesFolder] The folder in the Google Cloud Storage bucket where\n  ///   the vectors to add to the index are stored.\n  /// - [completeOverwriteWhenAdding] If true, when new vectors are added to\n  ///   the index, the previous ones are deleted. If false, the new vectors are\n  ///   added to the index without deleting the previous ones.\n  VertexAIMatchingEngine({\n    required final http.Client httpClient,\n    required this.project,\n    required this.location,\n    final String? rootUrl,\n    final String? queryRootUrl,\n    required this.indexId,\n    final String? indexEndpointId,\n    final String? deployedIndexId,\n    required this.gcsBucketName,\n    this.gcsDocumentsFolder = 'documents',\n    this.gcsIndexesFolder = 'indexes',\n    this.completeOverwriteWhenAdding = false,\n    required super.embeddings,\n  }) : _httpClient = httpClient,\n       _managementClient = VertexAIMatchingEngineClient(\n         httpClient: httpClient,\n         project: project,\n         location: location,\n         rootUrl: rootUrl ?? 'https://$location-aiplatform.googleapis.com/',\n       ),\n       _storageClient = Storage(httpClient, project),\n       _queryRootUrl = queryRootUrl,\n       _indexEndpointId = indexEndpointId,\n       _deployedIndexId = deployedIndexId;\n\n  /// An authenticated HTTP client.\n  final http.Client _httpClient;\n\n  /// A client for querying the Vertex AI Matching Engine index.\n  VertexAIMatchingEngineClient? _queryClient;\n\n  /// A client for managing the Vertex AI Matching Engine index.\n  final VertexAIMatchingEngineClient _managementClient;\n\n  /// A client for interacting with Google Cloud Storage.\n  final Storage _storageClient;\n\n  /// The ID of the Google Cloud project to use.\n  final String project;\n\n  /// The Google Cloud location to use. Vertex AI and Cloud Storage should have\n  /// the same location.\n  final String location;\n\n  /// The id of the index to use.\n  final String indexId;\n\n  /// The Google Cloud Storage bucket to use.\n  final String gcsBucketName;\n\n  /// The folder in the Google Cloud Storage bucket where the documents are\n  /// stored.\n  final String gcsDocumentsFolder;\n\n  /// The folder in the Google Cloud Storage bucket where the vectors to add\n  /// to the index are stored.\n  final String gcsIndexesFolder;\n\n  /// If true, when new vectors are added to the index, the previous ones are\n  /// deleted. If false, the new vectors are added to the index without\n  /// deleting the previous ones.\n  final bool completeOverwriteWhenAdding;\n\n  /// The root URL of the Vertex AI Matching Engine index endpoint.\n  String? _queryRootUrl;\n\n  /// The ID of the index endpoint to use.\n  String? _indexEndpointId;\n\n  /// The ID of the deployed index to use.\n  String? _deployedIndexId;\n\n  /// A UUID generator.\n  final _uuid = const Uuid();\n\n  /// Scopes required for Vertex AI and Cloud Storage API calls.\n  static const List<String> cloudPlatformScopes = [\n    VertexAIGenAIClient.cloudPlatformScope,\n    ...Storage.SCOPES,\n  ];\n\n  @override\n  Future<List<String>> addVectors({\n    required final List<List<double>> vectors,\n    required final List<Document> documents,\n  }) async {\n    assert(vectors.length == documents.length);\n\n    final bucket = _storageClient.bucket(gcsBucketName);\n    final List<String> ids = [];\n    final List<String> vectorsJsons = [];\n\n    // Write each document to GCS (in gcsDocumentsFolder)\n    for (var i = 0; i < documents.length; i++) {\n      Document doc = documents[i];\n      if (doc.id == null) {\n        doc = doc.copyWith(id: _uuid.v4());\n      }\n      final id = doc.id!;\n      final vector = vectors[i];\n      final docPath = '$gcsDocumentsFolder/$id.json';\n      final docJson = json.encode(doc.toMap());\n      final vectorMap = {\n        'id': id,\n        'embedding': vector,\n        if (doc.metadata['restricts'] != null)\n          'restricts': doc.metadata['restricts'],\n      };\n      final vectorJson = json.encode(vectorMap);\n\n      ids.add(id);\n      vectorsJsons.add(vectorJson);\n      await bucket.writeBytes(\n        docPath,\n        utf8.encode(docJson),\n        contentType: 'application/json',\n      );\n    }\n\n    // Write JSON lines index file to GCS (in indexFolder)\n    final now = DateTime.now().toIso8601String();\n    final indexFolder = '$gcsIndexesFolder/$now';\n    final indexPath = '$indexFolder/${_uuid.v4()}.json';\n    await bucket.writeBytes(\n      indexPath,\n      utf8.encode(vectorsJsons.join('\\n')),\n      contentType: 'application/json',\n    );\n\n    // Trigger a batch update of the index\n    await _managementClient.indexes.update(\n      id: indexId,\n      metadata: VertexAIIndexRequestMetadata(\n        contentsDeltaUri: 'gs://$gcsBucketName/$indexFolder/',\n        isCompleteOverwrite: completeOverwriteWhenAdding,\n      ),\n    );\n    return ids;\n  }\n\n  @override\n  Future<void> delete({required final List<String> ids}) {\n    throw UnimplementedError(\n      'To delete vectors from Matching Engine you just need to sets '\n      '`completeOverwriteWhenAdding` to true and add new ones. '\n      'Each new batch will replace the previous one.',\n    );\n  }\n\n  @override\n  Future<List<(Document, double scores)>> similaritySearchByVectorWithScores({\n    required final List<double> embedding,\n    final VectorStoreSimilaritySearch config =\n        const VectorStoreSimilaritySearch(),\n  }) async {\n    final (queryRootUrl, indexEndpointId, deployedIndexId) =\n        await _getIndexIds();\n    final queryClient = _getQueryClient(queryRootUrl);\n    final queryRes = await queryClient.indexEndpoints.findNeighbors(\n      indexEndpointId: indexEndpointId,\n      deployedIndexId: deployedIndexId,\n      queries: [\n        VertexAIFindNeighborsRequestQuery(\n          datapoint: VertexAIIndexDatapoint(\n            datapointId: _uuid.v4(),\n            featureVector: embedding,\n            restricts: config\n                .filter?[VertexAIMatchingEngineSimilaritySearch.filterKey],\n          ),\n          neighborCount: config.k,\n        ),\n      ],\n    );\n\n    Iterable<VertexAIFindNeighborsResponseNeighbor> neighbors =\n        queryRes.nearestNeighbors.firstOrNull?.neighbors ?? [];\n    if (neighbors.isEmpty) {\n      return const [];\n    }\n\n    if (config.scoreThreshold != null) {\n      final threshold = config.scoreThreshold!;\n      neighbors = neighbors.where(\n        (final neighbor) => neighbor.distance >= threshold,\n      );\n    }\n\n    final List<(Document, double)> results = [];\n    for (final neighbor in neighbors) {\n      final id = neighbor.datapoint.datapointId;\n      final document = await _getDocument(id);\n      final score = neighbor.distance;\n      results.add((document, score));\n    }\n\n    return results;\n  }\n\n  /// Returns the index endpoint and deployed index ids.\n  Future<(String queryRootUrl, String indexEndpointId, String deployedIndexId)>\n  _getIndexIds() async {\n    if (_queryRootUrl != null &&\n        _indexEndpointId != null &&\n        _deployedIndexId != null) {\n      return (_queryRootUrl!, _indexEndpointId!, _deployedIndexId!);\n    }\n\n    final index = await _managementClient.indexes.get(id: indexId);\n    if (index.deployedIndexes.isEmpty) {\n      throw LangChainException(\n        message: 'No deployed indexes found for index $indexId',\n      );\n    } else if (index.deployedIndexes.length > 1) {\n      throw LangChainException(\n        message:\n            'Multiple deployed indexes found for index $indexId. '\n            'Please specify the `indexEndpointId` and `deployedIndexId` '\n            'that you want to use.',\n      );\n    }\n    final deployedIndex = index.deployedIndexes.first;\n    final indexEndpoint = await _managementClient.indexEndpoints.get(\n      id: deployedIndex.indexEndpointId,\n    );\n\n    _queryRootUrl = 'http://${indexEndpoint.publicEndpointDomainName}/';\n    _indexEndpointId = deployedIndex.indexEndpointId;\n    _deployedIndexId = deployedIndex.deployedIndexId;\n    return (_queryRootUrl!, _indexEndpointId!, _deployedIndexId!);\n  }\n\n  /// Returns a client for querying the Vertex AI Matching Engine index.\n  VertexAIMatchingEngineClient _getQueryClient(final String queryRootUrl) {\n    return _queryClient ??= VertexAIMatchingEngineClient(\n      httpClient: _httpClient,\n      project: project,\n      location: location,\n      rootUrl: queryRootUrl,\n    );\n  }\n\n  /// Returns the document with the given id from the Storage bucket.\n  Future<Document> _getDocument(final String id) async {\n    final bucket = _storageClient.bucket(gcsBucketName);\n    final path = '$gcsDocumentsFolder/$id.json';\n    final jsonFile =\n        (await bucket\n                .read('documents/$id.json')\n                .transform(utf8.decoder)\n                .transform(json.decoder)\n                .toList())\n            .firstOrNull;\n    if (jsonFile == null) {\n      throw LangChainException(\n        message: 'No document found in gs://$gcsBucketName/$path',\n      );\n    }\n    return Document.fromMap(jsonFile as Map<String, dynamic>);\n  }\n}\n"
  },
  {
    "path": "packages/langchain_google/lib/src/vector_stores/types.dart",
    "content": "import 'package:langchain_core/vector_stores.dart';\nimport 'package:meta/meta.dart';\n\nimport 'mappers.dart';\n\n/// {@template vertex_ai_matching_engine_similarity_search}\n/// Vertex AI Matching Engine similarity search config.\n///\n/// Example:\n/// ```dart\n/// VertexAIMatchingEngineSimilaritySearch(\n///   k: 5,\n///   filters: [\n///     const VertexAIMatchingEngineFilter(\n///       namespace: 'class',\n///       allowList: ['pet'],\n///     ),\n///     const VertexAIMatchingEngineFilter(\n///       namespace: 'category',\n///       denyList: ['canine'],\n///     ),\n///   ]\n/// ),\n/// ```\n/// {@endtemplate}\nclass VertexAIMatchingEngineSimilaritySearch\n    extends VectorStoreSimilaritySearch {\n  /// {@macro vertex_ai_matching_engine_similarity_search}\n  VertexAIMatchingEngineSimilaritySearch({\n    super.k = 4,\n    final List<VertexAIMatchingEngineFilter>? filters,\n    super.scoreThreshold,\n  }) : super(\n         filter: filters != null\n             ? {\n                 filterKey: filters\n                     .map(VertexAIMatchingEngineFilterMapper.toDto)\n                     .toList(growable: false),\n               }\n             : null,\n       );\n\n  /// The key for the filter.\n  static const filterKey = 'restricts';\n}\n\n/// {@template vertex_ai_matching_engine_filter}\n/// Filter for the Vertex AI Matching Engine.\n/// See: https://cloud.google.com/vertex-ai/docs/matching-engine/filtering\n/// {@endtemplate}\n@immutable\nclass VertexAIMatchingEngineFilter {\n  /// {@macro vertex_ai_matching_engine_filter}\n  const VertexAIMatchingEngineFilter({\n    required this.namespace,\n    this.allowList = const [],\n    this.denyList = const [],\n  });\n\n  /// The namespace of this restriction.\n  ///\n  /// eg: color.\n  final String namespace;\n\n  /// The attributes to allow in this namespace.\n  ///\n  /// eg: 'red'\n  final List<String> allowList;\n\n  /// The attributes to deny in this namespace.\n  ///\n  /// eg: 'blue'\n  final List<String> denyList;\n}\n"
  },
  {
    "path": "packages/langchain_google/lib/src/vector_stores/vector_stores.dart",
    "content": "export 'matching_engine.dart';\nexport 'types.dart';\n"
  },
  {
    "path": "packages/langchain_google/pubspec.yaml",
    "content": "name: langchain_google\ndescription: LangChain.dart integration module for Google (Gemini, Gemma, VertexAI, Vector Search, etc.).\nversion: 0.7.1+2\nrepository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_google\nissue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_google\nhomepage: https://github.com/davidmigloz/langchain_dart\ndocumentation: https://langchaindart.dev\n\nfunding:\n  - https://github.com/sponsors/davidmigloz\n\ntopics:\n  - nlp\n  - gen-ai\n  - llms\n  - langchain\n  - vertex-ai\n\nenvironment:\n  sdk: \">=3.9.0 <4.0.0\"\nresolution: workspace\n\ndependencies:\n  collection: ^1.19.1\n  gcloud: ^0.9.0\n  googleai_dart: ^3.5.0\n  googleapis: ^15.0.0\n  googleapis_auth: ^2.0.0\n  http: ^1.5.0\n  langchain_core: 0.4.1\n  meta: ^1.16.0\n  uuid: ^4.5.1\n  vertex_ai: ^0.2.1\n\ndev_dependencies:\n  test: ^1.26.2\n"
  },
  {
    "path": "packages/langchain_google/test/chat_models/google_ai/chat_google_generative_ai_test.dart",
    "content": "// ignore_for_file: avoid_redundant_argument_values\n@TestOn('vm')\nlibrary; // Uses dart:io\n\nimport 'dart:convert';\nimport 'dart:io';\n\nimport 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/language_models.dart';\nimport 'package:langchain_core/prompts.dart';\nimport 'package:langchain_core/tools.dart';\nimport 'package:langchain_google/langchain_google.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('ChatGoogleGenerativeAI tests', () {\n    const defaultModel = 'gemini-2.5-flash';\n\n    late ChatGoogleGenerativeAI chatModel;\n\n    setUp(() {\n      chatModel = ChatGoogleGenerativeAI(\n        apiKey: Platform.environment['GOOGLEAI_API_KEY'],\n        defaultOptions: const ChatGoogleGenerativeAIOptions(\n          model: defaultModel,\n        ),\n      );\n    });\n\n    tearDown(() {\n      chatModel.close();\n    });\n\n    test('Test Text-only input', () async {\n      const models = [\n        'gemini-2.5-pro',\n        'gemini-2.5-flash',\n        'gemini-2.5-flash-lite',\n      ];\n      for (final model in models) {\n        final res = await chatModel.invoke(\n          PromptValue.string(\n            'List the numbers from 1 to 9 in order '\n            'without any spaces, commas or additional explanations.',\n          ),\n          options: ChatGoogleGenerativeAIOptions(model: model, temperature: 0),\n        );\n        expect(res.id, isNotEmpty);\n        expect(res.finishReason, isNot(FinishReason.unspecified));\n        expect(res.metadata['model'], startsWith(model));\n        expect(res.metadata['block_reason'], isNull);\n        expect(\n          res.output.content.replaceAll(RegExp(r'[\\s\\n]'), ''),\n          contains('123456789'),\n        );\n      }\n    });\n\n    test('Test models prefix', () async {\n      final res = await chatModel.invoke(\n        PromptValue.string(\n          'List the numbers from 1 to 9 in order '\n          'without any spaces, commas or additional explanations.',\n        ),\n        options: const ChatGoogleGenerativeAIOptions(\n          model: defaultModel,\n          temperature: 0,\n        ),\n      );\n      expect(res.output.content, isNotEmpty);\n    });\n\n    test('Text-and-image input', () async {\n      final res = await chatModel.invoke(\n        PromptValue.chat([\n          ChatMessage.human(\n            ChatMessageContent.multiModal([\n              ChatMessageContent.text('What fruit is this?'),\n              ChatMessageContent.image(\n                mimeType: 'image/jpeg',\n                data: base64.encode(\n                  await File(\n                    './test/chat_models/assets/apple.jpeg',\n                  ).readAsBytes(),\n                ),\n              ),\n            ]),\n          ),\n        ]),\n      );\n\n      expect(res.output.content.toLowerCase(), contains('apple'));\n    });\n\n    test('Test stop sequence', () async {\n      final res = await chatModel.invoke(\n        PromptValue.string(\n          'List the numbers from 1 to 9 in order '\n          'without any spaces, commas or additional explanations.',\n        ),\n        options: const ChatGoogleGenerativeAIOptions(\n          model: defaultModel,\n          stopSequences: ['4'],\n        ),\n      );\n      final text = res.output.content;\n      expect(text, contains('123'));\n      expect(text, isNot(contains('456789')));\n    });\n\n    test('Test max tokens', () async {\n      final res = await chatModel.invoke(\n        PromptValue.string('Tell me a joke'),\n        options: const ChatGoogleGenerativeAIOptions(\n          model: defaultModel,\n          maxOutputTokens: 2,\n        ),\n      );\n      expect(res.output.content.length, lessThan(20));\n      expect(res.finishReason, FinishReason.length);\n    });\n\n    test('Test Multi-turn conversations with gemini-pro', () async {\n      final prompt = PromptValue.chat([\n        ChatMessage.humanText(\n          'List the numbers from 1 to 9 in order '\n          'without any spaces, commas or additional explanations.',\n        ),\n        ChatMessage.ai('123456789'),\n        ChatMessage.humanText('Remove the number 4 from the list'),\n      ]);\n      final res = await chatModel.invoke(\n        prompt,\n        options: const ChatGoogleGenerativeAIOptions(\n          model: defaultModel,\n          temperature: 0,\n        ),\n      );\n      expect(res.output.content, contains('12356789'));\n    });\n\n    test('Test streaming', () async {\n      final stream = chatModel.stream(\n        PromptValue.string(\n          'List the numbers from 1 to 100 in order '\n          'without any spaces, commas or additional explanations.',\n        ),\n      );\n\n      var content = '';\n      var count = 0;\n      await for (final res in stream) {\n        content += res.output.content;\n        count++;\n      }\n      expect(count, greaterThan(1));\n      expect(content, contains('123456789'));\n    });\n\n    test(\n      'Test tool calling',\n      timeout: const Timeout(Duration(minutes: 1)),\n      () async {\n        const tool = ToolSpec(\n          name: 'get_current_weather',\n          description: 'Get the current weather in a given location',\n          inputJsonSchema: {\n            'type': 'object',\n            'properties': {\n              'location': {\n                'type': 'string',\n                'description': 'The city and country, e.g. San Francisco, US',\n              },\n              'unit': {\n                'type': 'string',\n                'description': 'The unit of temperature to return',\n                'enum': ['celsius', 'fahrenheit'],\n              },\n            },\n            'required': ['location'],\n          },\n        );\n        final model = chatModel.bind(\n          const ChatGoogleGenerativeAIOptions(\n            model: defaultModel,\n            tools: [tool],\n          ),\n        );\n\n        final humanMessage = ChatMessage.humanText(\n          'What’s the weather like in Boston, US and Madrid, Spain in Celsius?',\n        );\n        final res1 = await model.invoke(PromptValue.chat([humanMessage]));\n\n        final aiMessage1 = res1.output;\n        expect(aiMessage1.toolCalls, hasLength(2));\n\n        final toolCall1 = aiMessage1.toolCalls.first;\n        expect(toolCall1.name, tool.name);\n        expect(toolCall1.arguments.containsKey('location'), isTrue);\n        expect(toolCall1.arguments['location'], contains('Boston'));\n        expect(toolCall1.arguments['unit'], 'celsius');\n\n        final toolCall2 = aiMessage1.toolCalls.last;\n        expect(toolCall2.name, tool.name);\n        expect(toolCall2.arguments.containsKey('location'), isTrue);\n        expect(toolCall2.arguments['location'], contains('Madrid'));\n        expect(toolCall2.arguments['unit'], 'celsius');\n\n        final functionResult1 = {\n          'temperature': '22',\n          'unit': 'celsius',\n          'description': 'Sunny',\n        };\n        final functionMessage1 = ChatMessage.tool(\n          toolCallId: toolCall1.id,\n          content: json.encode(functionResult1),\n        );\n\n        final functionResult2 = {\n          'temperature': '25',\n          'unit': 'celsius',\n          'description': 'Cloudy',\n        };\n        final functionMessage2 = ChatMessage.tool(\n          toolCallId: toolCall2.id,\n          content: json.encode(functionResult2),\n        );\n\n        final res2 = await model.invoke(\n          PromptValue.chat([\n            humanMessage,\n            aiMessage1,\n            functionMessage1,\n            functionMessage2,\n          ]),\n        );\n\n        final aiMessage2 = res2.output;\n\n        expect(aiMessage2.toolCalls, isEmpty);\n        expect(aiMessage2.content, contains('22'));\n        expect(aiMessage2.content, contains('25'));\n      },\n    );\n\n    test('Test code execution', () async {\n      final res = await chatModel.invoke(\n        PromptValue.string(\n          'Calculate the fibonacci sequence up to 10 terms and output the last one.',\n        ),\n        options: const ChatGoogleGenerativeAIOptions(\n          model: defaultModel,\n          enableCodeExecution: true,\n        ),\n      );\n      final text = res.output.content;\n      expect(text, contains('34'));\n      expect(res.metadata['executable_code'], isNotNull);\n      expect(res.metadata['code_execution_result'], isNotNull);\n    });\n\n    // https://github.com/davidmigloz/langchain_dart/issues/753\n    test('Batches sequential tool responses into a single turn', () async {\n      const tool = ToolSpec(\n        name: 'add',\n        description: 'sum of `a` and `b`: result = `a` + `b`',\n        inputJsonSchema: {\n          'type': 'object',\n          'properties': {\n            'a': {'type': 'integer'},\n            'b': {'type': 'integer'},\n          },\n          'required': ['a', 'b'],\n        },\n      );\n\n      final model = chatModel.bind(\n        const ChatGoogleGenerativeAIOptions(\n          model: defaultModel,\n          tools: [tool],\n          temperature: 0,\n        ),\n      );\n\n      // Encourage Gemini to emit both function calls in a single model turn\n      final humanMessage = ChatMessage.humanText(\n        'Compute 1+2 and 5+8 in one go using the `add` tool only. '\n        'Do not produce the final answer until tool results are provided.',\n      );\n\n      final res1 = await model.invoke(PromptValue.chat([humanMessage]));\n      final aiMessage1 = res1.output;\n\n      // The model should request two tool calls in the same turn.\n      expect(aiMessage1.toolCalls, hasLength(2));\n      for (final call in aiMessage1.toolCalls) {\n        expect(call.name, tool.name);\n        expect(call.arguments.containsKey('a'), isTrue);\n        expect(call.arguments.containsKey('b'), isTrue);\n      }\n\n      // Provide two consecutive ToolChatMessages (these will be batched by the mapper)\n      final functionMessages = <ChatMessage>[];\n      for (final call in aiMessage1.toolCalls) {\n        final a = call.arguments['a'] as int;\n        final b = call.arguments['b'] as int;\n        final result = a + b;\n        functionMessages.add(\n          ChatMessage.tool(\n            toolCallId: call.id,\n            content: json.encode({'result': result}),\n          ),\n        );\n      }\n\n      // If batching works, a single functionResponses turn will be sent and this call succeeds\n      final res2 = await model.invoke(\n        PromptValue.chat([humanMessage, aiMessage1, ...functionMessages]),\n      );\n\n      final aiMessage2 = res2.output;\n      expect(aiMessage2.toolCalls, isEmpty);\n      // The final answer should incorporate both tool results: 3 and 13.\n      expect(aiMessage2.content, contains('3'));\n      expect(aiMessage2.content, contains('13'));\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain_google/test/chat_models/vertex_ai/chat_vertex_ai_test.dart",
    "content": "// ignore_for_file: avoid_redundant_argument_values\n@TestOn('vm')\nlibrary; // Uses dart:io\n\nimport 'dart:io';\n\nimport 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/prompts.dart';\nimport 'package:langchain_google/langchain_google.dart';\nimport 'package:test/test.dart';\n\nimport '../../utils/auth.dart';\n\nvoid main() {\n  final authProvider = getAuthProvider();\n  const defaultModel = 'gemini-2.5-flash';\n\n  group('ChatVertexAI tests', () {\n    test('Test ChatVertexAI parameters', () {\n      final llm = ChatVertexAI(\n        authProvider: authProvider,\n        project: Platform.environment['VERTEX_AI_PROJECT_ID']!,\n        location: 'us-central1',\n        defaultOptions: const ChatVertexAIOptions(\n          model: defaultModel,\n          maxOutputTokens: 10,\n          temperature: 0.1,\n          topP: 0.1,\n          topK: 10,\n          stopSequences: ['\\n'],\n          candidateCount: 1,\n        ),\n      );\n      expect(llm.modelType, 'vertex-ai-chat');\n      expect(\n        llm.defaultOptions,\n        const ChatVertexAIOptions(\n          model: defaultModel,\n          maxOutputTokens: 10,\n          temperature: 0.1,\n          topP: 0.1,\n          topK: 10,\n          stopSequences: ['\\n'],\n          candidateCount: 1,\n        ),\n      );\n    });\n\n    test('Test call to ChatVertexAI', () async {\n      final chat = ChatVertexAI(\n        authProvider: authProvider,\n        project: Platform.environment['VERTEX_AI_PROJECT_ID']!,\n        defaultOptions: const ChatVertexAIOptions(\n          model: defaultModel,\n          temperature: 0,\n        ),\n      );\n      final messages = [ChatMessage.humanText('Tell me a joke.')];\n      final res = await chat(messages);\n      expect(res.content, isNotEmpty);\n    });\n\n    test('Test invoke to ChatVertexAI', () async {\n      final chat = ChatVertexAI(\n        authProvider: authProvider,\n        project: Platform.environment['VERTEX_AI_PROJECT_ID']!,\n        defaultOptions: const ChatVertexAIOptions(\n          model: defaultModel,\n          temperature: 0,\n        ),\n      );\n      final prompt = PromptValue.chat([\n        ChatMessage.system('You are a comedian.'),\n        ChatMessage.humanText('Tell me a joke.'),\n      ]);\n      final res = await chat.invoke(prompt);\n      expect(res.output.content, isNotEmpty);\n    });\n\n    test('Test stream to ChatVertexAI', () async {\n      final chat = ChatVertexAI(\n        authProvider: authProvider,\n        project: Platform.environment['VERTEX_AI_PROJECT_ID']!,\n        defaultOptions: const ChatVertexAIOptions(\n          model: defaultModel,\n          temperature: 0,\n        ),\n      );\n      final prompt = PromptValue.chat([\n        ChatMessage.humanText('Tell me a joke.'),\n      ]);\n      final stream = chat.stream(prompt);\n      final results = await stream.toList();\n      expect(results, isNotEmpty);\n      for (final result in results) {\n        expect(result.output.content, isNotEmpty);\n      }\n    });\n\n    test('Test countTokens', () async {\n      final chat = ChatVertexAI(\n        authProvider: authProvider,\n        project: Platform.environment['VERTEX_AI_PROJECT_ID']!,\n        defaultOptions: const ChatVertexAIOptions(model: defaultModel),\n      );\n      final prompt = PromptValue.string('Hello world');\n      final tokenCount = await chat.countTokens(prompt);\n      expect(tokenCount, greaterThan(0));\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain_google/test/embeddings/google_ai/google_ai_embeddings_test.dart",
    "content": "@TestOn('vm')\nlibrary; // Uses dart:io\n\nimport 'dart:io';\n\nimport 'package:langchain_core/documents.dart';\nimport 'package:langchain_google/langchain_google.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('GoogleGenerativeAIEmbeddings tests', () {\n    late GoogleGenerativeAIEmbeddings embeddings;\n\n    setUp(() {\n      embeddings = GoogleGenerativeAIEmbeddings(\n        apiKey: Platform.environment['GOOGLEAI_API_KEY'],\n      );\n    });\n\n    tearDown(() {\n      embeddings.close();\n    });\n\n    test('Test GoogleGenerativeAIEmbeddings.embedQuery', () async {\n      const models = ['gemini-embedding-001'];\n      for (final model in models) {\n        embeddings.model = model;\n        final res = await embeddings.embedQuery('Hello world');\n        expect(res.length, 3072);\n      }\n    });\n\n    test('Test GoogleGenerativeAIEmbeddings.embedDocuments', () async {\n      final res = await embeddings.embedDocuments([\n        const Document(id: '1', pageContent: 'Hello world'),\n        const Document(id: '2', pageContent: 'Bye bye'),\n      ]);\n      expect(res.length, 2);\n      expect(res[0].length, 3072);\n      expect(res[1].length, 3072);\n    });\n\n    test('Test shortening embeddings', () async {\n      embeddings.dimensions = 256;\n      final res = await embeddings.embedQuery('Hello world');\n      expect(res.length, 256);\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain_google/test/embeddings/vertex_ai/vertex_ai_test.dart",
    "content": "@TestOn('vm')\nlibrary; // Uses dart:io\n\nimport 'dart:io';\n\nimport 'package:langchain_core/documents.dart';\nimport 'package:langchain_google/langchain_google.dart';\nimport 'package:test/test.dart';\n\nimport '../../utils/auth.dart';\n\nvoid main() {\n  final authProvider = getAuthProvider();\n  group('VertexAIEmbeddings tests', () {\n    test('Test VertexAIEmbeddings.embedQuery', () async {\n      final embeddings = VertexAIEmbeddings(\n        authProvider: authProvider,\n        project: Platform.environment['VERTEX_AI_PROJECT_ID']!,\n      );\n      final res = await embeddings.embedQuery('Hello world');\n      expect(res.length, 768);\n    });\n\n    test('Test VertexAIEmbeddings.embedDocuments', () async {\n      final embeddings = VertexAIEmbeddings(\n        authProvider: authProvider,\n        project: Platform.environment['VERTEX_AI_PROJECT_ID']!,\n        batchSize: 1,\n      );\n      final res = await embeddings.embedDocuments([\n        const Document(\n          id: '1',\n          pageContent: 'Hello world',\n          metadata: {'title': 'Hello!'},\n        ),\n        const Document(\n          id: '2',\n          pageContent: 'Bye bye',\n          metadata: {'title': 'Bye!'},\n        ),\n      ]);\n      expect(res.length, 2);\n      expect(res[0].length, 768);\n      expect(res[1].length, 768);\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain_google/test/llms/vertex_ai_test.dart",
    "content": "// ignore_for_file: avoid_redundant_argument_values\n@TestOn('vm')\nlibrary; // Uses dart:io\n\nimport 'dart:io';\n\nimport 'package:langchain_core/prompts.dart';\nimport 'package:langchain_google/langchain_google.dart';\nimport 'package:test/test.dart';\n\nimport '../utils/auth.dart';\n\nvoid main() {\n  final authProvider = getAuthProvider();\n  const defaultModel = 'gemini-2.5-flash';\n\n  group('VertexAI tests', () {\n    test('Test VertexAI parameters', () {\n      final llm = VertexAI(\n        authProvider: authProvider,\n        project: Platform.environment['VERTEX_AI_PROJECT_ID']!,\n        location: 'us-central1',\n        defaultOptions: const VertexAIOptions(\n          model: defaultModel,\n          maxOutputTokens: 10,\n          temperature: 0.1,\n          topP: 0.1,\n          topK: 10,\n          stopSequences: ['\\n'],\n          candidateCount: 1,\n        ),\n      );\n      expect(llm.modelType, 'vertex-ai');\n      expect(\n        llm.defaultOptions,\n        const VertexAIOptions(\n          model: defaultModel,\n          maxOutputTokens: 10,\n          temperature: 0.1,\n          topP: 0.1,\n          topK: 10,\n          stopSequences: ['\\n'],\n          candidateCount: 1,\n        ),\n      );\n    });\n\n    test('Test call to VertexAI', () async {\n      final llm = VertexAI(\n        authProvider: authProvider,\n        project: Platform.environment['VERTEX_AI_PROJECT_ID']!,\n      );\n      final output = await llm('Say foo:');\n      expect(output, isNotEmpty);\n    });\n\n    test('Test invoke to VertexAI', () async {\n      final llm = VertexAI(\n        authProvider: authProvider,\n        project: Platform.environment['VERTEX_AI_PROJECT_ID']!,\n        defaultOptions: const VertexAIOptions(\n          model: defaultModel,\n          maxOutputTokens: 100,\n          temperature: 0,\n        ),\n      );\n      final res = await llm.invoke(PromptValue.string('Hello, how are you?'));\n      expect(res.output, isNotEmpty);\n    });\n\n    test('Test model output contains metadata', () async {\n      final llm = VertexAI(\n        authProvider: authProvider,\n        project: Platform.environment['VERTEX_AI_PROJECT_ID']!,\n        defaultOptions: const VertexAIOptions(\n          model: defaultModel,\n          maxOutputTokens: 100,\n          temperature: 0,\n        ),\n      );\n      final res = await llm.invoke(PromptValue.string('Hello, how are you?'));\n      expect(res.metadata, isNotEmpty);\n      expect(res.metadata['model'], llm.defaultOptions.model);\n      expect(res.usage.promptTokens, isNotNull);\n      expect(res.usage.responseTokens, isNotNull);\n      expect(res.usage.totalTokens, isNotNull);\n    });\n\n    test('Test model stop sequence', () async {\n      final llm = VertexAI(\n        authProvider: authProvider,\n        project: Platform.environment['VERTEX_AI_PROJECT_ID']!,\n        defaultOptions: const VertexAIOptions(\n          model: defaultModel,\n          stopSequences: ['4'],\n          temperature: 0,\n        ),\n      );\n      final res = await llm.invoke(\n        PromptValue.string(\n          'List the numbers from 1 to 9 in order without any spaces or commas',\n        ),\n      );\n      expect(res.output, contains('123'));\n      expect(res.output, isNot(contains('456789')));\n\n      // call options should override defaults\n      final res2 = await llm.invoke(\n        PromptValue.string(\n          'List the numbers from 1 to 9 in order without any spaces or commas',\n        ),\n        options: const VertexAIOptions(stopSequences: ['5']),\n      );\n      expect(res2.output, contains('1234'));\n      expect(res2.output, isNot(contains('56789')));\n    });\n\n    test('Test stream to VertexAI', () async {\n      final llm = VertexAI(\n        authProvider: authProvider,\n        project: Platform.environment['VERTEX_AI_PROJECT_ID']!,\n        defaultOptions: const VertexAIOptions(\n          model: defaultModel,\n          temperature: 0,\n        ),\n      );\n      final stream = llm.stream(PromptValue.string('Hello, how are you?'));\n      final results = await stream.toList();\n      expect(results, isNotEmpty);\n      for (final result in results) {\n        expect(result.output, isNotEmpty);\n      }\n    });\n\n    test('Test countTokens', () async {\n      final llm = VertexAI(\n        authProvider: authProvider,\n        project: Platform.environment['VERTEX_AI_PROJECT_ID']!,\n      );\n      const text = 'Hello, how are you?';\n\n      final numTokens = await llm.countTokens(PromptValue.string(text));\n      expect(numTokens, greaterThan(0));\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain_google/test/utils/auth.dart",
    "content": "import 'dart:convert';\nimport 'dart:io';\n\nimport 'package:gcloud/storage.dart';\nimport 'package:googleapis_auth/auth_io.dart';\nimport 'package:langchain_google/langchain_google.dart';\n\nHttpClientAuthProvider getAuthProvider() {\n  return HttpClientAuthProvider(\n    credentials: ServiceAccountCredentials.fromJson(\n      json.decode(Platform.environment['VERTEX_AI_SERVICE_ACCOUNT']!),\n    ),\n    scopes: [\n      'https://www.googleapis.com/auth/cloud-platform',\n      ...Storage.SCOPES,\n    ],\n  );\n}\n"
  },
  {
    "path": "packages/langchain_google/test/vector_stores/matching_engine_test.dart",
    "content": "@TestOn('vm')\nlibrary; // Uses dart:io\n\nimport 'dart:convert';\nimport 'dart:io';\n\nimport 'package:googleapis_auth/auth_io.dart';\nimport 'package:langchain_core/documents.dart';\nimport 'package:langchain_core/vector_stores.dart';\nimport 'package:langchain_google/langchain_google.dart';\nimport 'package:test/test.dart';\n\nimport '../utils/auth.dart';\n\nvoid main() async {\n  final authProvider = getAuthProvider();\n  final embeddings = VertexAIEmbeddings(\n    authProvider: authProvider,\n    project: Platform.environment['VERTEX_AI_PROJECT_ID']!,\n    model: 'text-embedding-005',\n  );\n\n  // VertexAIMatchingEngine still needs an AuthClient (http.Client)\n  final serviceAccountCredentials = ServiceAccountCredentials.fromJson(\n    json.decode(Platform.environment['VERTEX_AI_SERVICE_ACCOUNT']!),\n  );\n  final authHttpClient = await clientViaServiceAccount(\n    serviceAccountCredentials,\n    ['https://www.googleapis.com/auth/cloud-platform'],\n  );\n\n  final vectorStore = VertexAIMatchingEngine(\n    httpClient: authHttpClient,\n    project: Platform.environment['VERTEX_AI_PROJECT_ID']!,\n    location: 'europe-west1',\n    indexId: '6394355006866194432',\n    gcsBucketName: 'public_knowledge_base_index',\n    embeddings: embeddings,\n  );\n\n  group('VertexAIMatchingEngine tests', () {\n    test('Test VertexAIMatchingEngine add new vectors', skip: true, () async {\n      final res = await vectorStore.addDocuments(\n        documents: const [\n          Document(\n            pageContent:\n                'Updating your indexes is important to always having '\n                'the most accurate information',\n            metadata: {'test': '1'},\n          ),\n          Document(\n            pageContent:\n                'You can also add optional tags to your index to help '\n                'with diversifying results or filtering pre index query',\n            metadata: {'test': '2'},\n          ),\n        ],\n      );\n\n      expect(res.length, 2);\n    });\n\n    test('Test VertexAIMatchingEngine query return 1 result', () async {\n      final res = await vectorStore.similaritySearch(\n        query: 'What payment methods do you offer?',\n        config: const VectorStoreSimilaritySearch(k: 1),\n      );\n      expect(res.length, 1);\n      expect(res.first.id, 'faq_621656c96b5ff317d867d019');\n    });\n\n    test('Test VertexAIMatchingEngine query with scoreThreshold', () async {\n      final res = await vectorStore.similaritySearchWithScores(\n        query: 'Can I pay by credit card?',\n        config: VertexAIMatchingEngineSimilaritySearch(scoreThreshold: 0.6),\n      );\n      for (final (_, score) in res) {\n        expect(score, greaterThan(0.6));\n      }\n    });\n\n    test('Test VertexAIMatchingEngine query with filters', () async {\n      final res = await vectorStore.similaritySearch(\n        query: 'Can I pay by credit card?',\n        config: VertexAIMatchingEngineSimilaritySearch(\n          k: 10,\n          filters: [\n            const VertexAIMatchingEngineFilter(\n              namespace: 'type',\n              allowList: ['faq'],\n            ),\n          ],\n        ),\n      );\n      for (final doc in res) {\n        expect(doc.metadata['type'], 'faq');\n      }\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain_huggingface/.gitignore",
    "content": "# https://dart.dev/guides/libraries/private-files\n# Created by `dart pub`\n.dart_tool/\n\n# Avoid committing pubspec.lock for library packages; see\n# https://dart.dev/guides/libraries/private-files#pubspeclock.\npubspec.lock\n"
  },
  {
    "path": "packages/langchain_huggingface/CHANGELOG.md",
    "content": "## 0.0.1-dev.1\n\n- Bootstrap project.\n"
  },
  {
    "path": "packages/langchain_huggingface/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2023 David Miguel Lozano\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "packages/langchain_huggingface/README.md",
    "content": "# 🦜️🔗 LangChain.dart\n\nHugging Face module for [LangChain.dart](https://github.com/davidmigloz/langchain_dart).\n\n## License\n\nLangChain.dart is licensed under the\n[MIT License](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE).\n"
  },
  {
    "path": "packages/langchain_huggingface/example/langchain_huggingface_example.dart",
    "content": "void main() {\n  // TODO\n}\n"
  },
  {
    "path": "packages/langchain_huggingface/lib/langchain_huggingface.dart",
    "content": "/// Hugging Face module for LangChain.dart.\nlibrary;\n"
  },
  {
    "path": "packages/langchain_huggingface/pubspec.yaml",
    "content": "name: langchain_huggingface\ndescription: Hugging Face module for LangChain.dart.\nversion: 0.0.1-dev.1\nrepository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_huggingface\nissue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_huggingface\nhomepage: https://github.com/davidmigloz/langchain_dart\ndocumentation: https://langchaindart.dev\npublish_to: none # Remove when the package is ready to be published\n\nfunding:\n  - https://github.com/sponsors/davidmigloz\n\ntopics:\n  - nlp\n  - gen-ai\n  - llms\n  - langchain\n\nenvironment:\n  sdk: \">=3.9.0 <4.0.0\"\nresolution: workspace\n"
  },
  {
    "path": "packages/langchain_microsoft/.gitignore",
    "content": "# https://dart.dev/guides/libraries/private-files\n# Created by `dart pub`\n.dart_tool/\n\n# Avoid committing pubspec.lock for library packages; see\n# https://dart.dev/guides/libraries/private-files#pubspeclock.\npubspec.lock\n"
  },
  {
    "path": "packages/langchain_microsoft/CHANGELOG.md",
    "content": "## 0.0.1-dev.1\n\n- Bootstrap project.\n"
  },
  {
    "path": "packages/langchain_microsoft/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2023 David Miguel Lozano\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "packages/langchain_microsoft/README.md",
    "content": "# 🦜️🔗 LangChain.dart\n\nMicrosoft module for [LangChain.dart](https://github.com/davidmigloz/langchain_dart).\n\n## License\n\nLangChain.dart is licensed under the\n[MIT License](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE).\n"
  },
  {
    "path": "packages/langchain_microsoft/example/langchain_microsoft_example.dart",
    "content": "void main() {\n  // TODO\n}\n"
  },
  {
    "path": "packages/langchain_microsoft/lib/langchain_microsoft.dart",
    "content": "/// Microsoft module for LangChain.dart.\nlibrary;\n"
  },
  {
    "path": "packages/langchain_microsoft/pubspec.yaml",
    "content": "name: langchain_microsoft\ndescription: Microsoft module for LangChain.dart.\nversion: 0.0.1-dev.1\nrepository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_microsoft\nissue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_microsoft\nhomepage: https://github.com/davidmigloz/langchain_dart\ndocumentation: https://langchaindart.dev\npublish_to: none # Remove when the package is ready to be published\n\nfunding:\n  - https://github.com/sponsors/davidmigloz\n\ntopics:\n  - nlp\n  - gen-ai\n  - llms\n  - langchain\n\nenvironment:\n  sdk: \">=3.9.0 <4.0.0\"\nresolution: workspace\n"
  },
  {
    "path": "packages/langchain_mistralai/.gitignore",
    "content": "# Miscellaneous\n*.class\n*.log\n*.pyc\n*.swp\n.DS_Store\n.atom/\n.buildlog/\n.history\n.svn/\nmigrate_working_dir/\n\n# IntelliJ related\n*.iml\n*.ipr\n*.iws\n.idea/\n\n# The .vscode folder contains launch configuration and tasks you configure in\n# VS Code which you may wish to be included in version control, so this line\n# is commented out by default.\n#.vscode/\n\n# Flutter/Dart/Pub related\n# Libraries should not include pubspec.lock, per https://dart.dev/guides/libraries/private-files#pubspeclock.\n/pubspec.lock\n**/doc/api/\n.dart_tool/\n.packages\nbuild/\n.fvm/\n"
  },
  {
    "path": "packages/langchain_mistralai/CHANGELOG.md",
    "content": "## 0.3.1+1\n\n - **FIX**(mistralai_dart): Fix streaming tool calls deserialization error ([#913](https://github.com/davidmigloz/langchain_dart/issues/913)) ([#914](https://github.com/davidmigloz/langchain_dart/issues/914)). ([ec4d20bf](https://github.com/davidmigloz/langchain_dart/commit/ec4d20bfd966a6c04ab44d47fd9baa175343a990))\n\n## 0.3.1\n\n - **FEAT**(langchain_mistralai): Add tool/function calling support ([#888](https://github.com/davidmigloz/langchain_dart/issues/888)). ([f4a1480c](https://github.com/davidmigloz/langchain_dart/commit/f4a1480c787f53668569896933d0d9321600c20e))\n - **FEAT**: Add listModels() API for LLMs and Embeddings ([#371](https://github.com/davidmigloz/langchain_dart/issues/371)) ([#844](https://github.com/davidmigloz/langchain_dart/issues/844)). ([4b737389](https://github.com/davidmigloz/langchain_dart/commit/4b7373894d5b8701b6d00d153c1741931a49b3a1))\n - **FEAT**(mistralai_dart): Align embeddings API with latest Mistral spec ([#886](https://github.com/davidmigloz/langchain_dart/issues/886)). ([769edc49](https://github.com/davidmigloz/langchain_dart/commit/769edc4937ac611b9c8d4b65421e403012f565a1))\n\n## 0.3.0+1\n\n - **REFACTOR**: Fix pub format warnings ([#809](https://github.com/davidmigloz/langchain_dart/issues/809)). ([640cdefb](https://github.com/davidmigloz/langchain_dart/commit/640cdefbede9c0a0182fb6bb4005a20aa6f35635))\n\n## 0.3.0\n\n> Note: This release has breaking changes.\n\n - **FEAT**: Upgrade to http v1.5.0 ([#785](https://github.com/davidmigloz/langchain_dart/issues/785)). ([f7c87790](https://github.com/davidmigloz/langchain_dart/commit/f7c8779011015b5a4a7f3a07dca32bde1bb2ea88))\n - **BREAKING** **BUILD**: Require Dart >=3.8.0 ([#792](https://github.com/davidmigloz/langchain_dart/issues/792)). ([b887f5c6](https://github.com/davidmigloz/langchain_dart/commit/b887f5c62e307b3a510c5049e3d1fbe7b7b4f4c9))\n\n## 0.2.4+3\n\n - Update a dependency to the latest release.\n\n## 0.2.4+2\n\n - Update a dependency to the latest release.\n\n## 0.2.4+1\n\n - **BUILD**: Update dependencies ([#751](https://github.com/davidmigloz/langchain_dart/issues/751)). ([250a3c6](https://github.com/davidmigloz/langchain_dart/commit/250a3c6a6c1815703a61a142ba839c0392a31015))\n\n## 0.2.4\n \n - **FEAT**: Update dependencies (requires Dart 3.6.0) ([#709](https://github.com/davidmigloz/langchain_dart/issues/709)). ([9e3467f7](https://github.com/davidmigloz/langchain_dart/commit/9e3467f7caabe051a43c0eb3c1110bc4a9b77b81))\n - **REFACTOR**: Fix linter issues ([#708](https://github.com/davidmigloz/langchain_dart/issues/708)). ([652e7c64](https://github.com/davidmigloz/langchain_dart/commit/652e7c64776d92d309cbd708d9e477fc2ee1391c))\n - **REFACTOR**: Remove fetch_client dependency in favor of http v1.3.0 ([#659](https://github.com/davidmigloz/langchain_dart/issues/659)). ([0e0a685c](https://github.com/davidmigloz/langchain_dart/commit/0e0a685c376895425dbddb0f9b83758c700bb0c7))\n - **FIX**: Fix linter issues ([#656](https://github.com/davidmigloz/langchain_dart/issues/656)). ([88a79c65](https://github.com/davidmigloz/langchain_dart/commit/88a79c65aad23bcf5859e58a7375a4b686cf02ef))\n - **DOCS**: Add langchain_mistralai example ([#662](https://github.com/davidmigloz/langchain_dart/issues/662)). ([eca7a24d](https://github.com/davidmigloz/langchain_dart/commit/eca7a24d50629b9ce7d61a197bfd9acfb74a1261))\n\n## 0.2.3+2\n\n - **REFACTOR**: Add new lint rules and fix issues ([#621](https://github.com/davidmigloz/langchain_dart/issues/621)). ([60b10e00](https://github.com/davidmigloz/langchain_dart/commit/60b10e008acf55ebab90789ad08d2449a44b69d8))\n\n## 0.2.3+1\n\n - Update a dependency to the latest release.\n\n## 0.2.3\n\n - **FEAT**: Add copyWith method to all RunnableOptions subclasses ([#531](https://github.com/davidmigloz/langchain_dart/issues/531)). ([42c8d480](https://github.com/davidmigloz/langchain_dart/commit/42c8d480041e7ca331e4928c46536037c06dbff0))\n\n## 0.2.2\n\n - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43))\n - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560))\n - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d))\n\n## 0.2.1\n\n - Update a dependency to the latest release.\n\n## 0.2.1\n\n - **FEAT**: Add Runnable.close() to close any resources associated with it ([#439](https://github.com/davidmigloz/langchain_dart/issues/439)). ([4e08cced](https://github.com/davidmigloz/langchain_dart/commit/4e08cceda964921178061e9721618a1505198ff5))\n\n## 0.2.0+1\n\n - Update a dependency to the latest release.\n\n## 0.2.0\n\n> Note: This release has breaking changes.  \n> If you are using \"function calling\" check [how to migrate to \"tool calling\"](https://github.com/davidmigloz/langchain_dart/issues/400).\n\n - **BREAKING** **FEAT**: Migrate from function calling to tool calling ([#400](https://github.com/davidmigloz/langchain_dart/issues/400)). ([44413b83](https://github.com/davidmigloz/langchain_dart/commit/44413b8321b1188ff6b4027b1972a7ee0002761e))\n\n## 0.1.0+2\n\n - Update a dependency to the latest release.\n\n## 0.1.0+1\n\n - Update a dependency to the latest release.\n\n## 0.1.0\n\n> Note: This release has breaking changes.  \n> [Migration guide](https://github.com/davidmigloz/langchain_dart/discussions/374)\n\n - **BREAKING** **REFACTOR**: Introduce langchain_core and langchain_community packages ([#328](https://github.com/davidmigloz/langchain_dart/issues/328)). ([5fa520e6](https://github.com/davidmigloz/langchain_dart/commit/5fa520e663602d9cdfcab0c62a053090fa02b02e))\n - **BREAKING** **REFACTOR**: Simplify LLMResult and ChatResult classes ([#363](https://github.com/davidmigloz/langchain_dart/issues/363)). ([ffe539c1](https://github.com/davidmigloz/langchain_dart/commit/ffe539c13f92cce5f564107430163b44be1dfd96))\n - **BREAKING** **REFACTOR**: Simplify Output Parsers ([#367](https://github.com/davidmigloz/langchain_dart/issues/367)). ([f24b7058](https://github.com/davidmigloz/langchain_dart/commit/f24b7058949fba47ba624f071a3f548b8f6e915e))\n - **BREAKING** **REFACTOR**: Remove deprecated generate and predict APIs ([#335](https://github.com/davidmigloz/langchain_dart/issues/335)). ([c55fe50f](https://github.com/davidmigloz/langchain_dart/commit/c55fe50f0040cc04cbd2e90bca475887c093c654))\n - **REFACTOR**: Simplify internal .stream implementation ([#364](https://github.com/davidmigloz/langchain_dart/issues/364)). ([c83fed22](https://github.com/davidmigloz/langchain_dart/commit/c83fed22b2b89d5e51211984b12ec126a3ca225e))\n - **FEAT**: Implement .batch support ([#370](https://github.com/davidmigloz/langchain_dart/issues/370)). ([d254f929](https://github.com/davidmigloz/langchain_dart/commit/d254f929b03d9c950029e55c66831f9f89cc14a9))\n\n## 0.0.3\n\n - **FEAT**: Update meta and test dependencies ([#331](https://github.com/davidmigloz/langchain_dart/issues/331)). ([912370ee](https://github.com/davidmigloz/langchain_dart/commit/912370ee0ba667ee9153303395a457e6caf5c72d))\n - **DOCS**: Update pubspecs. ([d23ed89a](https://github.com/davidmigloz/langchain_dart/commit/d23ed89adf95a34a78024e2f621dc0af07292f44))\n\n## 0.0.2+3\n\n - **DOCS**: Update CHANGELOG.md. ([d0d46534](https://github.com/davidmigloz/langchain_dart/commit/d0d46534565d6f52d819d62329e8917e00bc7030))\n\n## 0.0.2+2\n\n - Update a dependency to the latest release.\n\n## 0.0.2+1\n\n - **REFACTOR**: Update safe_mode and max temperature in Mistral chat ([#300](https://github.com/davidmigloz/langchain_dart/issues/300)). ([1a4ccd1e](https://github.com/davidmigloz/langchain_dart/commit/1a4ccd1e7d1907e340ce609cc6ba8d0543ee3421))\n\n## 0.0.2\n\n - **REFACTOR**: Use cl100k_base encoding model when no tokenizer is available ([#295](https://github.com/davidmigloz/langchain_dart/issues/295)). ([ca908e80](https://github.com/davidmigloz/langchain_dart/commit/ca908e8011a168a74240310c78abb3c590654a49))\n - **REFACTOR**: Make all LLM options fields nullable and add copyWith ([#284](https://github.com/davidmigloz/langchain_dart/issues/284)). ([57eceb9b](https://github.com/davidmigloz/langchain_dart/commit/57eceb9b47da42cf19f64ddd88bfbd2c9676fd5e))\n - **REFACTOR**: Migrate tokenizer to langchain_tiktoken package ([#285](https://github.com/davidmigloz/langchain_dart/issues/285)). ([6a3b6466](https://github.com/davidmigloz/langchain_dart/commit/6a3b6466e3e4cfddda2f506adbf2eb563814d02f))\n - **FEAT**: Update internal dependencies ([#291](https://github.com/davidmigloz/langchain_dart/issues/291)). ([69621cc6](https://github.com/davidmigloz/langchain_dart/commit/69621cc61659980d046518ee20ce055e806cba1f))\n\n## 0.0.1+4\n\n - Update a dependency to the latest release.\n\n## 0.0.1+3\n\n - Update a dependency to the latest release.\n\n## 0.0.1+2\n\n - Update a dependency to the latest release.\n\n## 0.0.1+1\n\n - Update a dependency to the latest release.\n\n## 0.0.1\n\n - **FEAT**: Add support for ChatMistralAI wrapper ([#262](https://github.com/davidmigloz/langchain_dart/issues/262)). ([1364afec](https://github.com/davidmigloz/langchain_dart/commit/1364afec6ea56043ae17d5460276b10bf19b124e))\n - **FEAT**: Add support for MistralAIEmbeddings ([#254](https://github.com/davidmigloz/langchain_dart/issues/254)) ([#264](https://github.com/davidmigloz/langchain_dart/issues/264)). ([1c6bb1a3](https://github.com/davidmigloz/langchain_dart/commit/1c6bb1a3089c94340267f1091d226c3696efc1f1))\n\n## 0.0.1-dev.1\n\n- Bootstrap project.\n"
  },
  {
    "path": "packages/langchain_mistralai/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2023 David Miguel Lozano\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "packages/langchain_mistralai/MIGRATION.md",
    "content": "# langchain_mistralai Migration Guide\n\n## Base URL change\n\nThe default base URL has changed from `https://api.mistral.ai/v1` to\n`https://api.mistral.ai`. The underlying `mistralai_dart` client now appends\nthe API version path internally.\n\nIf you were passing a custom `baseUrl` that included `/v1`, remove the\ntrailing `/v1` to avoid double-pathing (e.g. `/v1/v1`):\n\n```dart\n// Before\nfinal chatModel = ChatMistralAI(baseUrl: 'https://my-proxy.com/v1');\n\n// After\nfinal chatModel = ChatMistralAI(baseUrl: 'https://my-proxy.com');\n```\n"
  },
  {
    "path": "packages/langchain_mistralai/README.md",
    "content": "# 🦜️🔗 LangChain.dart / Mistral AI\n\n[![tests](https://img.shields.io/github/actions/workflow/status/davidmigloz/langchain_dart/test.yaml?logo=github&label=tests)](https://github.com/davidmigloz/langchain_dart/actions/workflows/test.yaml)\n[![docs](https://img.shields.io/github/actions/workflow/status/davidmigloz/langchain_dart/pages%2Fpages-build-deployment?logo=github&label=docs)](https://github.com/davidmigloz/langchain_dart/actions/workflows/pages/pages-build-deployment)\n[![langchain_mistralai](https://img.shields.io/pub/v/langchain_ollam.svg)](https://pub.dev/packages/langchain_mistralai)\n![Discord](https://img.shields.io/discord/1123158322812555295?label=discord)\n[![MIT](https://img.shields.io/badge/license-MIT-purple.svg)](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE)\n\n[Mistral AI](https://console.mistral.ai) module for [LangChain.dart](https://github.com/davidmigloz/langchain_dart).\n\n## Features\n\n- Chat models:\n  * `ChatMistralAI`: wrapper around Mistral AI [Chat Completions API](https://docs.mistral.ai/api#operation/createChatCompletion).\n- Embeddings:\n  * `MistralAIEmbeddings`: wrapper around Mistral AI [Embeddings API](https://docs.mistral.ai/api#operation/createEmbedding).\n\n## License\n\nLangChain.dart is licensed under the \n[MIT License](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE).\n"
  },
  {
    "path": "packages/langchain_mistralai/example/langchain_mistralai_example.dart",
    "content": "// ignore_for_file: avoid_print, unused_element\nimport 'dart:io';\n\nimport 'package:langchain/langchain.dart';\nimport 'package:langchain_mistralai/langchain_mistralai.dart';\n\nvoid main() async {\n  // Uncomment the example you want to run:\n  await _example1();\n  // await _example2();\n}\n\n/// The most basic building block of LangChain is calling an LLM on some input.\nFuture<void> _example1() async {\n  final mistralAiApiKey = Platform.environment['MISTRAL_API_KEY'];\n  final chatModel = ChatMistralAI(\n    apiKey: mistralAiApiKey,\n    defaultOptions: const ChatMistralAIOptions(temperature: 0.9),\n  );\n  final ChatResult res = await chatModel.invoke(\n    PromptValue.string('Tell me a joke'),\n  );\n  print(res);\n  chatModel.close();\n}\n\n/// The most frequent use case is to create a chat-bot.\n/// This is the most basic one.\nFuture<void> _example2() async {\n  final mistralAiApiKey = Platform.environment['MISTRAL_API_KEY'];\n  final chatModel = ChatMistralAI(\n    apiKey: mistralAiApiKey,\n    defaultOptions: const ChatMistralAIOptions(temperature: 0),\n  );\n\n  try {\n    while (true) {\n      stdout.write('> ');\n      final usrMsg = ChatMessage.humanText(stdin.readLineSync() ?? '');\n      final aiMsg = await chatModel([usrMsg]);\n      print(aiMsg.content);\n    }\n  } finally {\n    chatModel.close();\n  }\n}\n"
  },
  {
    "path": "packages/langchain_mistralai/lib/langchain_mistralai.dart",
    "content": "/// LangChain.dart integration module for Mistral AI (Mistral-7B, Mixtral 8x7B, embeddings, etc.).\nlibrary;\n\nexport 'src/chat_models/chat_models.dart';\nexport 'src/embeddings/embeddings.dart';\n"
  },
  {
    "path": "packages/langchain_mistralai/lib/src/chat_models/chat_mistralai.dart",
    "content": "import 'package:http/http.dart' as http;\nimport 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/language_models.dart';\nimport 'package:langchain_core/prompts.dart';\nimport 'package:langchain_tiktoken/langchain_tiktoken.dart';\nimport 'package:mistralai_dart/mistralai_dart.dart' as mistral;\n\nimport 'mappers.dart';\nimport 'types.dart';\n\n/// Wrapper around [Mistral AI](https://docs.mistral.ai) Chat Completions API.\n///\n/// Mistral AI brings the strongest open generative models to the developers,\n/// along with efficient ways to deploy and customise them for production.\n///\n/// Example:\n/// ```dart\n/// final chatModel = ChatMistralAI(apiKey: '...');\n/// final messages = [\n///   ChatMessage.system('You are a helpful assistant that translates English to French.'),\n///   ChatMessage.humanText('I love programming.'),\n/// ];\n/// final prompt = PromptValue.chat(messages);\n/// final res = await llm.invoke(prompt);\n/// ```\n///\n/// - [Mistral AI API docs](https://docs.mistral.ai)\n///\n/// ### Setup\n///\n/// To use `ChatMistralAI` you need to have a Mistral AI account and an API key.\n/// You can get one [here](https://console.mistral.ai/users).\n///\n/// ### Available models\n///\n/// The following models are available at the moment:\n/// - `mistral-tiny`: Mistral 7B Instruct v0.2 (a minor release of Mistral 7B Instruct).\n///    It only works in English and obtains 7.6 on MT-Bench.\n/// - `mistral-small`: Mixtral 8x7B. It masters English/French/Italian/German/Spanish\n///    and code and obtains 8.3 on MT-Bench.\n/// - `mistral-medium`: a prototype model, that is currently among the top serviced models\n///    available based on standard benchmarks. It masters English/French/Italian/German/Spanish\n///    and code and obtains a score of 8.6 on MT-Bench.\n///\n/// Mind that this list may not be up-to-date.\n/// Refer to the [documentation](https://docs.mistral.ai/getting-started/models) for the updated list.\n///\n/// ### Call options\n///\n/// You can configure the parameters that will be used when calling the\n/// chat completions API in several ways:\n///\n/// **Default options:**\n///\n/// Use the [defaultOptions] parameter to set the default options. These\n/// options will be used unless you override them when generating completions.\n///\n/// ```dart\n/// final chatModel = ChatMistralAI(\n///   defaultOptions: const ChatMistralAIOptions(\n///     model: 'mistral-medium',\n///     temperature: 0,\n///   ),\n/// );\n/// ```\n///\n/// **Call options:**\n///\n/// You can override the default options when invoking the model:\n///\n/// ```dart\n/// final res = await chatModel.invoke(\n///   prompt,\n///   options: const ChatMistralAIOptions(randomSeed: 9999),\n/// );\n/// ```\n///\n/// **Bind:**\n///\n/// You can also change the options in a [Runnable] pipeline using the bind\n/// method.\n///\n/// In this example, we are using two totally different models for each\n/// question:\n///\n/// ```dart\n/// final chatModel = ChatMistralAI(apiKey: '...');\n/// const outputParser = StringOutputParser();\n/// final prompt1 = PromptTemplate.fromTemplate('How are you {name}?');\n/// final prompt2 = PromptTemplate.fromTemplate('How old are you {name}?');\n/// final chain = Runnable.fromMap({\n///   'q1': prompt1 | chatModel.bind(const ChatMistralAIOptions(model: 'mistral-tiny')) | outputParser,\n///   'q2': prompt2| chatModel.bind(const ChatMistralAIOptions(model: 'mistral-medium')) | outputParser,\n/// });\n/// final res = await chain.invoke({'name': 'David'});\n/// ```\n///\n/// ### Advance\n///\n/// #### Custom HTTP client\n///\n/// You can always provide your own implementation of `http.Client` for further\n/// customization:\n///\n/// ```dart\n/// final client = ChatMistralAI(\n///   apiKey: 'MISTRAL_AI_API_KEY',\n///   client: MyHttpClient(),\n/// );\n/// ```\n///\n/// #### Using a proxy\n///\n/// ##### HTTP proxy\n///\n/// You can use your own HTTP proxy by overriding the `baseUrl` and providing\n/// your required `headers`:\n///\n/// ```dart\n/// final client = ChatMistralAI(\n///   baseUrl: 'https://my-proxy.com',\n///   headers: {'x-my-proxy-header': 'value'},\n///   queryParams: {'x-my-proxy-query-param': 'value'},\n/// );\n/// ```\n///\n/// If you need further customization, you can always provide your own\n/// `http.Client`.\n///\n/// ##### SOCKS5 proxy\n///\n/// To use a SOCKS5 proxy, you can use the\n/// [`socks5_proxy`](https://pub.dev/packages/socks5_proxy) package and a\n/// custom `http.Client`.\nclass ChatMistralAI extends BaseChatModel<ChatMistralAIOptions> {\n  /// Create a new [ChatMistralAI] instance.\n  ///\n  /// Main configuration options:\n  /// - `apiKey`: your Mistral AI API key. You can find your API key in the\n  ///   [Mistral AI dashboard](https://console.mistral.ai/users).\n  /// - [ChatMistralAI.defaultOptions]\n  ///\n  /// Advance configuration options:\n  /// - `baseUrl`: the base URL to use. Defaults to Mistral AI's API URL. You can\n  ///   override this to use a different API URL, or to use a proxy.\n  /// - `headers`: global headers to send with every request. You can use\n  ///   this to set custom headers, or to override the default headers.\n  /// - `queryParams`: global query parameters to send with every request. You\n  ///   can use this to set custom query parameters.\n  /// - `client`: the HTTP client to use. You can set your own HTTP client if\n  ///   you need further customization (e.g. to use a Socks5 proxy).\n  /// - [ChatMistralAI.encoding]\n  ChatMistralAI({\n    final String? apiKey,\n    final String baseUrl = 'https://api.mistral.ai',\n    final Map<String, String>? headers,\n    final Map<String, dynamic>? queryParams,\n    final http.Client? client,\n    super.defaultOptions = const ChatMistralAIOptions(model: defaultModel),\n    this.encoding = 'cl100k_base',\n  }) : _client = mistral.MistralClient(\n         config: mistral.MistralConfig(\n           authProvider: apiKey != null && apiKey.isNotEmpty\n               ? mistral.ApiKeyProvider(apiKey)\n               : null,\n           baseUrl: baseUrl,\n           defaultHeaders: headers ?? const {},\n           defaultQueryParams:\n               queryParams?.map((k, v) => MapEntry(k, v.toString())) ??\n               const {},\n         ),\n         httpClient: client,\n       );\n\n  /// A client for interacting with Mistral AI API.\n  final mistral.MistralClient _client;\n\n  /// The encoding to use by tiktoken when [tokenize] is called.\n  ///\n  /// Mistral does not provide any API to count tokens, so we use tiktoken\n  /// to get an estimation of the number of tokens in a prompt.\n  String encoding;\n\n  @override\n  String get modelType => 'chat-mistralai';\n\n  /// The default model to use unless another is specified.\n  static const defaultModel = 'mistral-small';\n\n  @override\n  Future<ChatResult> invoke(\n    final PromptValue input, {\n    final ChatMistralAIOptions? options,\n  }) async {\n    final completion = await _client.chat.create(\n      request: _generateCompletionRequest(\n        input.toChatMessages(),\n        options: options,\n      ),\n    );\n    return completion.toChatResult();\n  }\n\n  @override\n  Stream<ChatResult> stream(\n    final PromptValue input, {\n    final ChatMistralAIOptions? options,\n  }) {\n    return _client.chat\n        .createStream(\n          request: _generateCompletionRequest(\n            input.toChatMessages(),\n            options: options,\n          ),\n        )\n        .map((final completion) => completion.toChatResult());\n  }\n\n  /// Creates a [ChatCompletionRequest] from the given input.\n  mistral.ChatCompletionRequest _generateCompletionRequest(\n    final List<ChatMessage> messages, {\n    final bool stream = false,\n    final ChatMistralAIOptions? options,\n  }) {\n    final tools = options?.tools ?? defaultOptions.tools;\n    final toolChoice = options?.toolChoice ?? defaultOptions.toolChoice;\n\n    return mistral.ChatCompletionRequest(\n      model: options?.model ?? defaultOptions.model ?? defaultModel,\n      messages: messages.toChatMessages(),\n      temperature: options?.temperature ?? defaultOptions.temperature,\n      topP: options?.topP ?? defaultOptions.topP,\n      maxTokens: options?.maxTokens ?? defaultOptions.maxTokens,\n      safePrompt: options?.safePrompt ?? defaultOptions.safePrompt,\n      randomSeed: options?.randomSeed ?? defaultOptions.randomSeed,\n      tools: tools?.toMistralTools(),\n      toolChoice: toolChoice?.toMistralToolChoice(),\n      stream: stream,\n    );\n  }\n\n  /// Tokenizes the given prompt using tiktoken.\n  ///\n  /// Currently Mistral AI does not provide a tokenizer for the models it supports.\n  /// So we use tiktoken and [encoding] model to get an approximation\n  /// for counting tokens. Mind that the actual tokens will be totally\n  /// different from the ones used by the Ollama model.\n  ///\n  /// If an encoding model is specified in [encoding] field, that\n  /// encoding is used instead.\n  ///\n  /// - [promptValue] The prompt to tokenize.\n  @override\n  Future<List<int>> tokenize(\n    final PromptValue promptValue, {\n    final ChatMistralAIOptions? options,\n  }) async {\n    final encoding = getEncoding(this.encoding);\n    return encoding.encode(promptValue.toString());\n  }\n\n  @override\n  void close() {\n    _client.close();\n  }\n\n  /// {@template chat_mistralai_list_models}\n  /// Returns a list of available chat models from Mistral AI.\n  ///\n  /// This method fetches all models from the Mistral AI API.\n  /// Mistral AI models are primarily chat models.\n  ///\n  /// Example:\n  /// ```dart\n  /// final chatModel = ChatMistralAI(apiKey: '...');\n  /// final models = await chatModel.listModels();\n  /// for (final model in models) {\n  ///   print('${model.id} - owned by ${model.ownedBy ?? \"unknown\"}');\n  /// }\n  /// ```\n  /// {@endtemplate}\n  @override\n  Future<List<ModelInfo>> listModels() async {\n    final response = await _client.models.list();\n    return response.data\n        .map(\n          (final m) =>\n              ModelInfo(id: m.id, ownedBy: m.ownedBy, created: m.created),\n        )\n        .toList();\n  }\n}\n"
  },
  {
    "path": "packages/langchain_mistralai/lib/src/chat_models/chat_models.dart",
    "content": "export 'chat_mistralai.dart';\nexport 'types.dart';\n"
  },
  {
    "path": "packages/langchain_mistralai/lib/src/chat_models/mappers.dart",
    "content": "// ignore_for_file: public_member_api_docs\nimport 'dart:convert';\n\nimport 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/language_models.dart';\nimport 'package:langchain_core/tools.dart';\nimport 'package:mistralai_dart/mistralai_dart.dart' as mistral;\n\nextension ChatMessageListMapper on List<ChatMessage> {\n  List<mistral.ChatMessage> toChatMessages() {\n    return map(_mapMessage).toList(growable: false);\n  }\n\n  mistral.ChatMessage _mapMessage(final ChatMessage msg) {\n    return switch (msg) {\n      final SystemChatMessage msg => mistral.ChatMessage.system(msg.content),\n      final HumanChatMessage msg => mistral.ChatMessage.user(\n        msg.contentAsString,\n      ),\n      final AIChatMessage msg =>\n        msg.toolCalls.isNotEmpty\n            ? mistral.ChatMessage.assistant(\n                msg.content.isNotEmpty ? msg.content : null,\n                toolCalls: msg.toolCalls\n                    .map(_mapToolCall)\n                    .toList(growable: false),\n              )\n            : mistral.ChatMessage.assistant(msg.content),\n      final ToolChatMessage msg => mistral.ChatMessage.tool(\n        toolCallId: msg.toolCallId,\n        content: msg.content,\n        name: null,\n      ),\n      CustomChatMessage() => throw UnsupportedError(\n        'Mistral AI does not support custom messages',\n      ),\n    };\n  }\n\n  mistral.ToolCall _mapToolCall(final AIChatMessageToolCall toolCall) {\n    return mistral.ToolCall(\n      id: toolCall.id,\n      function: mistral.FunctionCall(\n        name: toolCall.name,\n        arguments: json.encode(toolCall.arguments),\n      ),\n    );\n  }\n}\n\nextension ChatToolListMapper on List<ToolSpec> {\n  List<mistral.Tool> toMistralTools() {\n    return map(_mapTool).toList(growable: false);\n  }\n\n  mistral.Tool _mapTool(final ToolSpec tool) {\n    return mistral.Tool.function(\n      name: tool.name,\n      description: tool.description,\n      parameters: tool.inputJsonSchema,\n    );\n  }\n}\n\nextension ChatToolChoiceMapper on ChatToolChoice {\n  mistral.ToolChoice toMistralToolChoice() {\n    return switch (this) {\n      ChatToolChoiceNone() => mistral.ToolChoice.none,\n      ChatToolChoiceAuto() => mistral.ToolChoice.auto,\n      ChatToolChoiceRequired() => mistral.ToolChoice.any,\n      final ChatToolChoiceForced t => mistral.ToolChoice.function(t.name),\n    };\n  }\n}\n\nextension ChatResultMapper on mistral.ChatCompletionResponse {\n  ChatResult toChatResult({final bool streaming = false}) {\n    return ChatResult(\n      id: id,\n      output: AIChatMessage(\n        content: text ?? '',\n        toolCalls: hasToolCalls\n            ? toolCalls.map(_mapResponseToolCall).toList(growable: false)\n            : const [],\n      ),\n      finishReason: _mapFinishReason(finishReason),\n      metadata: {'model': model, 'created': created},\n      usage: _mapUsage(usage),\n      streaming: streaming,\n    );\n  }\n\n  AIChatMessageToolCall _mapResponseToolCall(final mistral.ToolCall toolCall) {\n    final function = toolCall.function;\n    var args = <String, dynamic>{};\n    try {\n      final arguments = function.arguments;\n      if (arguments.isNotEmpty) {\n        args = json.decode(arguments);\n      }\n    } catch (_) {}\n    return AIChatMessageToolCall(\n      id: toolCall.id,\n      name: function.name,\n      argumentsRaw: function.arguments,\n      arguments: args,\n    );\n  }\n\n  LanguageModelUsage _mapUsage(final mistral.UsageInfo? usage) {\n    return LanguageModelUsage(\n      promptTokens: usage?.promptTokens,\n      responseTokens: usage?.completionTokens,\n      totalTokens: usage?.totalTokens,\n    );\n  }\n}\n\n/// Mapper for [mistral.ChatCompletionStreamResponse].\nextension CreateChatCompletionStreamResponseMapper\n    on mistral.ChatCompletionStreamResponse {\n  /// Converts a [mistral.ChatCompletionStreamResponse] to a [ChatResult].\n  ChatResult toChatResult() {\n    return ChatResult(\n      id: id,\n      output: AIChatMessage(\n        content: text ?? '',\n        toolCalls: hasToolCalls\n            ? toolCalls.map(_mapStreamToolCall).toList(growable: false)\n            : const [],\n      ),\n      finishReason: _mapFinishReason(finishReason),\n      metadata: {'model': model, 'created': created},\n      usage: _mapStreamUsage(usage),\n      streaming: true,\n    );\n  }\n\n  AIChatMessageToolCall _mapStreamToolCall(final mistral.ToolCall toolCall) {\n    final function = toolCall.function;\n    var args = <String, dynamic>{};\n    try {\n      final arguments = function.arguments;\n      if (arguments.isNotEmpty) {\n        args = json.decode(arguments);\n      }\n    } catch (_) {}\n    return AIChatMessageToolCall(\n      id: toolCall.id,\n      name: function.name,\n      argumentsRaw: function.arguments,\n      arguments: args,\n    );\n  }\n\n  LanguageModelUsage _mapStreamUsage(final mistral.UsageInfo? usage) {\n    return LanguageModelUsage(\n      promptTokens: usage?.promptTokens,\n      responseTokens: usage?.completionTokens,\n      totalTokens: usage?.totalTokens,\n    );\n  }\n}\n\nFinishReason _mapFinishReason(final mistral.FinishReason? reason) =>\n    switch (reason) {\n      mistral.FinishReason.stop => FinishReason.stop,\n      mistral.FinishReason.length => FinishReason.length,\n      mistral.FinishReason.modelLength => FinishReason.length,\n      mistral.FinishReason.error => FinishReason.unspecified,\n      mistral.FinishReason.toolCalls => FinishReason.toolCalls,\n      mistral.FinishReason.unknown => FinishReason.unspecified,\n      null => FinishReason.unspecified,\n    };\n"
  },
  {
    "path": "packages/langchain_mistralai/lib/src/chat_models/types.dart",
    "content": "import 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/tools.dart';\nimport 'package:meta/meta.dart';\n\n/// {@template chat_mistral_ai_options}\n/// Options to pass into ChatMistralAI.\n///\n/// You can check the list of available models [here](https://docs.mistral.ai/models).\n/// {@endtemplate}\n@immutable\nclass ChatMistralAIOptions extends ChatModelOptions {\n  /// {@macro chat_mistral_ai_options}\n  const ChatMistralAIOptions({\n    super.model,\n    this.temperature,\n    this.topP,\n    this.maxTokens,\n    this.safePrompt,\n    this.randomSeed,\n    super.tools,\n    super.toolChoice,\n    super.concurrencyLimit,\n  });\n\n  /// What sampling temperature to use, between 0.0 and 2.0. Higher values like\n  /// 0.8 will make the output more random, while lower values like 0.2 will\n  /// make it more focused and deterministic.\n  ///\n  /// We generally recommend altering this or `top_p` but not both.\n  final double? temperature;\n\n  /// Nucleus sampling, where the model considers the results of the tokens\n  /// with `top_p` probability mass. So 0.1 means only the tokens comprising\n  /// the top 10% probability mass are considered.\n  ///\n  /// We generally recommend altering this or `temperature` but not both.\n  final double? topP;\n\n  /// The maximum number of tokens to generate in the completion.\n  ///\n  /// The token count of your prompt plus `max_tokens` cannot exceed the\n  /// model's context length.\n  final int? maxTokens;\n\n  /// Whether to inject a safety prompt before all conversations.\n  final bool? safePrompt;\n\n  /// The seed to use for random sampling.\n  /// If set, different calls will generate deterministic results.\n  final int? randomSeed;\n\n  @override\n  ChatMistralAIOptions copyWith({\n    final String? model,\n    final double? temperature,\n    final double? topP,\n    final int? maxTokens,\n    final bool? safePrompt,\n    final int? randomSeed,\n    final List<ToolSpec>? tools,\n    final ChatToolChoice? toolChoice,\n    final int? concurrencyLimit,\n  }) {\n    return ChatMistralAIOptions(\n      model: model ?? this.model,\n      temperature: temperature ?? this.temperature,\n      topP: topP ?? this.topP,\n      maxTokens: maxTokens ?? this.maxTokens,\n      safePrompt: safePrompt ?? this.safePrompt,\n      randomSeed: randomSeed ?? this.randomSeed,\n      tools: tools ?? this.tools,\n      toolChoice: toolChoice ?? this.toolChoice,\n      concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit,\n    );\n  }\n\n  @override\n  ChatMistralAIOptions merge(covariant ChatMistralAIOptions? other) {\n    return copyWith(\n      model: other?.model,\n      temperature: other?.temperature,\n      topP: other?.topP,\n      maxTokens: other?.maxTokens,\n      safePrompt: other?.safePrompt,\n      randomSeed: other?.randomSeed,\n      tools: other?.tools,\n      toolChoice: other?.toolChoice,\n      concurrencyLimit: other?.concurrencyLimit,\n    );\n  }\n\n  @override\n  bool operator ==(covariant final ChatMistralAIOptions other) {\n    return model == other.model &&\n        temperature == other.temperature &&\n        topP == other.topP &&\n        maxTokens == other.maxTokens &&\n        safePrompt == other.safePrompt &&\n        randomSeed == other.randomSeed &&\n        tools == other.tools &&\n        toolChoice == other.toolChoice &&\n        concurrencyLimit == other.concurrencyLimit;\n  }\n\n  @override\n  int get hashCode {\n    return model.hashCode ^\n        temperature.hashCode ^\n        topP.hashCode ^\n        maxTokens.hashCode ^\n        safePrompt.hashCode ^\n        randomSeed.hashCode ^\n        tools.hashCode ^\n        toolChoice.hashCode ^\n        concurrencyLimit.hashCode;\n  }\n}\n"
  },
  {
    "path": "packages/langchain_mistralai/lib/src/embeddings/embeddings.dart",
    "content": "export 'mistralai_embeddings.dart';\n"
  },
  {
    "path": "packages/langchain_mistralai/lib/src/embeddings/mistralai_embeddings.dart",
    "content": "import 'package:http/http.dart' as http;\nimport 'package:langchain_core/documents.dart';\nimport 'package:langchain_core/embeddings.dart';\nimport 'package:langchain_core/language_models.dart';\nimport 'package:langchain_core/utils.dart';\nimport 'package:mistralai_dart/mistralai_dart.dart';\n\n/// Wrapper around [Mistral AI](https://docs.mistral.ai) Embeddings API.\n///\n/// Example:\n/// ```dart\n/// final embeddings = MistralAIEmbeddings(apiKey: ...);\n/// final res = await embeddings.embedQuery('Hello world');\n/// ```\n///\n/// - [Mistral AI API docs](https://docs.mistral.ai)\n///\n/// ### Setup\n///\n/// To use `MistralAIEmbeddings` you need to have a Mistral AI account and an API key.\n/// You can get one [here](https://console.mistral.ai/users).\n///\n/// ### Available models\n///\n/// The following models are available at the moment:\n/// - `mistral-embed`: an embedding model with a 1024 embedding dimensions designed\n///    with retrieval capabilities in mind. It achieves a retrieval score of 55.26 on MTEB.\n///\n/// Mind that this list may not be up-to-date.\n/// Refer to the [documentation](https://docs.mistral.ai/models) for the updated list.\n///\n/// ### Advance\n///\n/// #### Custom HTTP client\n///\n/// You can always provide your own implementation of `http.Client` for further\n/// customization:\n///\n/// ```dart\n/// final client = MistralAIEmbeddings(\n///   apiKey: 'MISTRAL_AI_API_KEY',\n///   client: MyHttpClient(),\n/// );\n/// ```\n///\n/// #### Using a proxy\n///\n/// ##### HTTP proxy\n///\n/// You can use your own HTTP proxy by overriding the `baseUrl` and providing\n/// your required `headers`:\n///\n/// ```dart\n/// final client = MistralAIEmbeddings(\n///   baseUrl: 'https://my-proxy.com',\n///   headers: {'x-my-proxy-header': 'value'},\n///   queryParams: {'x-my-proxy-query-param': 'value'},\n/// );\n/// ```\n///\n/// If you need further customization, you can always provide your own\n/// `http.Client`.\n///\n/// ##### SOCKS5 proxy\n///\n/// To use a SOCKS5 proxy, you can use the\n/// [`socks5_proxy`](https://pub.dev/packages/socks5_proxy) package and a\n/// custom `http.Client`.\nclass MistralAIEmbeddings extends Embeddings {\n  /// Create a new [MistralAIEmbeddings] instance.\n  ///\n  /// Main configuration options:\n  /// - `apiKey`: your Mistral AI API key. You can find your API key in the\n  ///   [Mistral AI dashboard](https://console.mistral.ai/users/).\n  /// - [MistralAIEmbeddings.model]\n  /// - [MistralAIEmbeddings.batchSize]\n  ///\n  /// Advance configuration options:\n  /// - `baseUrl`: the base URL to use. Defaults to Mistral AI's API URL. You can\n  ///   override this to use a different API URL, or to use a proxy.\n  /// - `headers`: global headers to send with every request. You can use\n  ///   this to set custom headers, or to override the default headers.\n  /// - `queryParams`: global query parameters to send with every request. You\n  ///   can use this to set custom query parameters.\n  /// - `client`: the HTTP client to use. You can set your own HTTP client if\n  ///   you need further customization (e.g. to use a Socks5 proxy).\n  MistralAIEmbeddings({\n    final String? apiKey,\n    final String baseUrl = 'https://api.mistral.ai',\n    final Map<String, String>? headers,\n    final Map<String, dynamic>? queryParams,\n    final http.Client? client,\n    this.model = 'mistral-embed',\n    this.dimensions,\n    this.batchSize = 512,\n  }) : _client = MistralClient(\n         config: MistralConfig(\n           authProvider: apiKey != null && apiKey.isNotEmpty\n               ? ApiKeyProvider(apiKey)\n               : null,\n           baseUrl: baseUrl,\n           defaultHeaders: headers ?? const {},\n           defaultQueryParams:\n               queryParams?.map((k, v) => MapEntry(k, v.toString())) ??\n               const {},\n         ),\n         httpClient: client,\n       );\n\n  /// A client for interacting with Mistral AI API.\n  final MistralClient _client;\n\n  /// The embeddings model to use.\n  final String model;\n\n  /// The number of dimensions for output embeddings.\n  /// Only supported by certain models (e.g., codestral-embed-2505).\n  final int? dimensions;\n\n  /// The maximum number of documents to embed in a single request.\n  int batchSize;\n\n  @override\n  Future<List<List<double>>> embedDocuments(\n    final List<Document> documents,\n  ) async {\n    final batches = chunkList(documents, chunkSize: batchSize);\n\n    final embeddings = await Future.wait(\n      batches.map((final batch) async {\n        final data = await _client.embeddings.create(\n          request: EmbeddingRequest.batch(\n            model: model,\n            input: batch\n                .map((final doc) => doc.pageContent)\n                .toList(growable: false),\n            outputDimension: dimensions,\n          ),\n        );\n        return data.data.map((final d) => d.embedding);\n      }),\n    );\n\n    return embeddings.expand((final e) => e).toList(growable: false);\n  }\n\n  @override\n  Future<List<double>> embedQuery(final String query) async {\n    final data = await _client.embeddings.create(\n      request: EmbeddingRequest.single(\n        model: model,\n        input: query,\n        outputDimension: dimensions,\n      ),\n    );\n    return data.data.firstOrNull?.embedding ?? [];\n  }\n\n  /// {@template mistralai_embeddings_list_models}\n  /// Returns a list of available embedding models from Mistral AI.\n  ///\n  /// This method filters models to return only those suitable for embeddings\n  /// (models with IDs containing `embed`).\n  ///\n  /// Example:\n  /// ```dart\n  /// final embeddings = MistralAIEmbeddings(apiKey: '...');\n  /// final models = await embeddings.listModels();\n  /// for (final model in models) {\n  ///   print('${model.id} - owned by ${model.ownedBy ?? \"unknown\"}');\n  /// }\n  /// ```\n  /// {@endtemplate}\n  @override\n  Future<List<ModelInfo>> listModels() async {\n    final response = await _client.models.list();\n    return response.data\n        .where(_isEmbeddingModel)\n        .map(\n          (final m) =>\n              ModelInfo(id: m.id, ownedBy: m.ownedBy, created: m.created),\n        )\n        .toList();\n  }\n\n  /// Returns true if the model is an embedding model.\n  static bool _isEmbeddingModel(final Model model) {\n    final id = model.id.toLowerCase();\n    return id.contains('embed');\n  }\n\n  /// Closes the client and cleans up any resources associated with it.\n  void close() {\n    _client.close();\n  }\n}\n"
  },
  {
    "path": "packages/langchain_mistralai/pubspec.yaml",
    "content": "name: langchain_mistralai\ndescription: LangChain.dart integration module for Mistral AI (Mistral-7B, Mixtral 8x7B, embeddings, etc.).\nversion: 0.3.1+1\nrepository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_mistralai\nissue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_mistralai\nhomepage: https://github.com/davidmigloz/langchain_dart\ndocumentation: https://langchaindart.dev\n\nfunding:\n  - https://github.com/sponsors/davidmigloz\n\ntopics:\n  - nlp\n  - gen-ai\n  - llms\n  - langchain\n  - mistral\n\nenvironment:\n  sdk: \">=3.9.0 <4.0.0\"\nresolution: workspace\n\ndependencies:\n  collection: ^1.19.1\n  http: ^1.5.0\n  langchain_core: 0.4.1\n  langchain_tiktoken: ^1.0.1\n  meta: ^1.16.0\n  mistralai_dart: ^1.3.0\n\ndev_dependencies:\n  langchain: ^0.8.1\n  test: ^1.26.2\n"
  },
  {
    "path": "packages/langchain_mistralai/test/chat_models/chat_mistralai_test.dart",
    "content": "@TestOn('vm')\nlibrary; // Uses dart:io\n\nimport 'dart:io';\n\nimport 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/language_models.dart';\nimport 'package:langchain_core/output_parsers.dart';\nimport 'package:langchain_core/prompts.dart';\nimport 'package:langchain_mistralai/langchain_mistralai.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('ChatMistralAI tests', () {\n    late ChatMistralAI chatModel;\n\n    setUp(() {\n      chatModel = ChatMistralAI(\n        apiKey: Platform.environment['MISTRAL_API_KEY'],\n      );\n    });\n\n    tearDown(() {\n      chatModel.close();\n    });\n\n    test('Test ChatMistralAI parameters', () {\n      const options = ChatMistralAIOptions(\n        model: 'foo',\n        temperature: 0.1,\n        topP: 0.5,\n        maxTokens: 10,\n        safePrompt: true,\n        randomSeed: 1234,\n      );\n\n      expect(options.model, 'foo');\n      expect(options.temperature, 0.1);\n      expect(options.topP, 0.5);\n      expect(options.maxTokens, 10);\n      expect(options.safePrompt, true);\n      expect(options.randomSeed, 1234);\n    });\n\n    test('Test call to ChatMistralAI', () async {\n      final output = await chatModel([ChatMessage.humanText('Say foo:')]);\n      expect(output, isA<AIChatMessage>());\n      expect(output.content, isNotEmpty);\n    });\n\n    test('Test invoke to ChatMistralAI', () async {\n      final res = await chatModel.invoke(\n        PromptValue.chat([ChatMessage.humanText('Hello, how are you?')]),\n      );\n      expect(res.output.content, isNotEmpty);\n    });\n\n    test('Test model output contains metadata', () async {\n      final res = await chatModel.invoke(\n        PromptValue.chat([\n          ChatMessage.humanText(\n            'List the numbers from 1 to 9 in order. '\n            'Output ONLY the numbers in one line without any spaces or commas. '\n            'NUMBERS:',\n          ),\n        ]),\n      );\n      expect(\n        res.output.content.replaceAll(RegExp(r'[\\s\\n]'), ''),\n        contains('123456789'),\n      );\n      expect(res.id, isNotEmpty);\n      expect(res.finishReason, isNot(FinishReason.unspecified));\n      expect(res.metadata, isNotNull);\n      expect(res.metadata['created'], greaterThan(0));\n      expect(res.metadata['model'], isNotEmpty);\n    });\n\n    test('Test tokenize', () async {\n      const text = 'antidisestablishmentarianism';\n\n      final tokens = await chatModel.tokenize(\n        PromptValue.chat([ChatMessage.humanText(text)]),\n      );\n      expect(tokens, [35075, 25, 3276, 85342, 34500, 479, 8997, 2191]);\n    });\n\n    test('Test different encoding than the model', () async {\n      chatModel.encoding = 'cl100k_base';\n      const text = 'antidisestablishmentarianism';\n\n      final tokens = await chatModel.tokenize(\n        PromptValue.chat([ChatMessage.humanText(text)]),\n      );\n      expect(tokens, [35075, 25, 3276, 85342, 34500, 479, 8997, 2191]);\n    });\n\n    test('Test countTokens', () async {\n      const text = 'Hello, how are you?';\n\n      final numTokens = await chatModel.countTokens(\n        PromptValue.chat([ChatMessage.humanText(text)]),\n      );\n      expect(numTokens, 8);\n    });\n\n    test('Test streaming', () async {\n      final promptTemplate = PromptTemplate.fromTemplate(\n        'List the numbers from 1 to {max_num} in order. '\n        'Output ONLY the numbers in one line without any spaces or commas. '\n        'NUMBERS:',\n      );\n      const stringOutputParser = StringOutputParser<ChatResult>();\n\n      final chain = promptTemplate.pipe(chatModel).pipe(stringOutputParser);\n\n      final stream = chain.stream({'max_num': '9'});\n\n      var content = '';\n      var count = 0;\n      await for (final res in stream) {\n        content += res.trim();\n        count++;\n      }\n      expect(count, greaterThan(1));\n      expect(content, contains('123456789'));\n    });\n\n    test('Test response seed', skip: true, () async {\n      final prompt = PromptValue.string(\n        'Why is the sky blue? Reply in one sentence.',\n      );\n      const options = ChatMistralAIOptions(temperature: 0, randomSeed: 9999);\n\n      final res1 = await chatModel.invoke(prompt, options: options);\n\n      final res2 = await chatModel.invoke(prompt, options: options);\n      expect(res1.output, res2.output);\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain_mistralai/test/embeddings/mistralai_embeddings_test.dart",
    "content": "@TestOn('vm')\nlibrary; // Uses dart:io\n\nimport 'dart:io';\n\nimport 'package:langchain_core/documents.dart';\nimport 'package:langchain_mistralai/langchain_mistralai.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('MistralAIEmbeddings tests', () {\n    late MistralAIEmbeddings embeddings;\n\n    setUp(() {\n      embeddings = MistralAIEmbeddings(\n        apiKey: Platform.environment['MISTRAL_API_KEY'],\n      );\n    });\n\n    tearDown(() {\n      embeddings.close();\n    });\n\n    test('Test MistralAIEmbeddings.embedQuery', () async {\n      final res = await embeddings.embedQuery('Hello world');\n      expect(res.length, 1024);\n    });\n\n    test('Test OllamaEmbeddings.embedDocuments', () async {\n      final res = await embeddings.embedDocuments([\n        const Document(id: '1', pageContent: 'Hello world'),\n        const Document(id: '2', pageContent: 'Bye bye'),\n      ]);\n      expect(res.length, 2);\n      expect(res[0].length, 1024);\n      expect(res[1].length, 1024);\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain_ollama/.gitignore",
    "content": "# Miscellaneous\n*.class\n*.log\n*.pyc\n*.swp\n.DS_Store\n.atom/\n.buildlog/\n.history\n.svn/\nmigrate_working_dir/\n\n# IntelliJ related\n*.iml\n*.ipr\n*.iws\n.idea/\n\n# The .vscode folder contains launch configuration and tasks you configure in\n# VS Code which you may wish to be included in version control, so this line\n# is commented out by default.\n#.vscode/\n\n# Flutter/Dart/Pub related\n# Libraries should not include pubspec.lock, per https://dart.dev/guides/libraries/private-files#pubspeclock.\n/pubspec.lock\n**/doc/api/\n.dart_tool/\n.packages\nbuild/\n.fvm/\n"
  },
  {
    "path": "packages/langchain_ollama/CHANGELOG.md",
    "content": "## 0.4.1\n\n - **FEAT**: Add listModels() API for LLMs and Embeddings ([#371](https://github.com/davidmigloz/langchain_dart/issues/371)) ([#844](https://github.com/davidmigloz/langchain_dart/issues/844)). ([4b737389](https://github.com/davidmigloz/langchain_dart/commit/4b7373894d5b8701b6d00d153c1741931a49b3a1))\n\n## 0.4.0+1\n\n - **REFACTOR**: Fix pub format warnings ([#809](https://github.com/davidmigloz/langchain_dart/issues/809)). ([640cdefb](https://github.com/davidmigloz/langchain_dart/commit/640cdefbede9c0a0182fb6bb4005a20aa6f35635))\n\n## 0.4.0\n\n> Note: This release has breaking changes.\n\n - **FEAT**: Add think support to Ollama and ChatOllama ([#801](https://github.com/davidmigloz/langchain_dart/issues/801)). ([553c7282](https://github.com/davidmigloz/langchain_dart/commit/553c72829073584b428770139939bd790da5c6aa))\n - **FEAT**: Upgrade to http v1.5.0 ([#785](https://github.com/davidmigloz/langchain_dart/issues/785)). ([f7c87790](https://github.com/davidmigloz/langchain_dart/commit/f7c8779011015b5a4a7f3a07dca32bde1bb2ea88))\n - **BREAKING** **REFACTOR**: Improve factory names in ollama_dart ([#806](https://github.com/davidmigloz/langchain_dart/issues/806)). ([fbfa7acb](https://github.com/davidmigloz/langchain_dart/commit/fbfa7acb071a8c2271a6cfb6506e9f6d8b863ca4))\n - **BREAKING** **BUILD**: Require Dart >=3.8.0 ([#792](https://github.com/davidmigloz/langchain_dart/issues/792)). ([b887f5c6](https://github.com/davidmigloz/langchain_dart/commit/b887f5c62e307b3a510c5049e3d1fbe7b7b4f4c9))\n\n## 0.3.3+3\n\n - Update a dependency to the latest release.\n\n## 0.3.3+2\n\n - Update a dependency to the latest release.\n\n## 0.3.3+1\n\n - **BUILD**: Update dependencies ([#751](https://github.com/davidmigloz/langchain_dart/issues/751)). ([250a3c6](https://github.com/davidmigloz/langchain_dart/commit/250a3c6a6c1815703a61a142ba839c0392a31015))\n\n## 0.3.3\n\n - **FEAT**: Update dependencies (requires Dart 3.6.0) ([#709](https://github.com/davidmigloz/langchain_dart/issues/709)). ([9e3467f7](https://github.com/davidmigloz/langchain_dart/commit/9e3467f7caabe051a43c0eb3c1110bc4a9b77b81))\n - **REFACTOR**: Fix linter issues ([#708](https://github.com/davidmigloz/langchain_dart/issues/708)). ([652e7c64](https://github.com/davidmigloz/langchain_dart/commit/652e7c64776d92d309cbd708d9e477fc2ee1391c))\n - **REFACTOR**: Remove fetch_client dependency in favor of http v1.3.0 ([#659](https://github.com/davidmigloz/langchain_dart/issues/659)). ([0e0a685c](https://github.com/davidmigloz/langchain_dart/commit/0e0a685c376895425dbddb0f9b83758c700bb0c7))\n - **FIX**: Fix linter issues ([#656](https://github.com/davidmigloz/langchain_dart/issues/656)). ([88a79c65](https://github.com/davidmigloz/langchain_dart/commit/88a79c65aad23bcf5859e58a7375a4b686cf02ef))\n - **DOCS**: Add langchain_ollama example ([#661](https://github.com/davidmigloz/langchain_dart/issues/661)). ([0bba6cb4](https://github.com/davidmigloz/langchain_dart/commit/0bba6cb4ebe4386ad53b1aa02836d375d5f59cbe))\n\n## 0.3.2+2\n\n - **REFACTOR**: Add new lint rules and fix issues ([#621](https://github.com/davidmigloz/langchain_dart/issues/621)). ([60b10e00](https://github.com/davidmigloz/langchain_dart/commit/60b10e008acf55ebab90789ad08d2449a44b69d8))\n\n## 0.3.2+1\n\n - **FIX**: UUID 'Namespace' can't be assigned to the parameter type 'String?' ([#566](https://github.com/davidmigloz/langchain_dart/issues/566)). ([1e93a595](https://github.com/davidmigloz/langchain_dart/commit/1e93a595f2f166da2cae3f7cfcdbb28892abf9b5))\n\n## 0.3.2\n\n - **FEAT**: Update Ollama default model to llama-3.2 ([#554](https://github.com/davidmigloz/langchain_dart/issues/554)). ([f42ed0f0](https://github.com/davidmigloz/langchain_dart/commit/f42ed0f04136021b30556787cfdea13a14ca5768))\n\n## 0.3.1\n\n - **FEAT**: Add support for min_p in Ollama ([#512](https://github.com/davidmigloz/langchain_dart/issues/512)). ([e40d54b2](https://github.com/davidmigloz/langchain_dart/commit/e40d54b2e729d8fb6bf14bb4ea97820121bc85c7))\n - **FEAT**: Add copyWith method to all RunnableOptions subclasses ([#531](https://github.com/davidmigloz/langchain_dart/issues/531)). ([42c8d480](https://github.com/davidmigloz/langchain_dart/commit/42c8d480041e7ca331e4928c46536037c06dbff0))\n\n## 0.3.0\n\n - **FEAT**: Add tool calling support in ChatOllama ([#505](https://github.com/davidmigloz/langchain_dart/issues/505)). ([6ffde204](https://github.com/davidmigloz/langchain_dart/commit/6ffde2043c1e865411c8b1096063619d6bcd80aa))\n - **BREAKING** **FEAT**: Update Ollama default model to llama-3.1 ([#506](https://github.com/davidmigloz/langchain_dart/issues/506)). ([b1134bf1](https://github.com/davidmigloz/langchain_dart/commit/b1134bf1163cdcea26a9f1e65fee5c515be3857c))\n - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43))\n - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560))\n - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d))\n - **DOCS**: Update Ollama request options default values in API docs ([#479](https://github.com/davidmigloz/langchain_dart/issues/479)). ([e1f93366](https://github.com/davidmigloz/langchain_dart/commit/e1f9336619ee12624a7b045ca18a3118ead0158f))\n\n## 0.2.2+1\n\n - **DOCS**: Update ChatOllama API docs. ([cc4246c8](https://github.com/davidmigloz/langchain_dart/commit/cc4246c8ab907de2c82843bff145edfffe32d302))\n\n## 0.2.2\n\n - **FEAT**: Add Runnable.close() to close any resources associated with it ([#439](https://github.com/davidmigloz/langchain_dart/issues/439)). ([4e08cced](https://github.com/davidmigloz/langchain_dart/commit/4e08cceda964921178061e9721618a1505198ff5))\n\n## 0.2.1+1\n\n - Update a dependency to the latest release.\n\n## 0.2.1\n\n - **FEAT**: Handle finish reason in ChatOllama ([#416](https://github.com/davidmigloz/langchain_dart/issues/416)). ([a5e1af13](https://github.com/davidmigloz/langchain_dart/commit/a5e1af13ef4d2db690ab599dbf5e42f28659a059))\n - **FEAT**: Add keepAlive option to OllamaEmbeddings ([#415](https://github.com/davidmigloz/langchain_dart/issues/415)). ([32e19028](https://github.com/davidmigloz/langchain_dart/commit/32e19028a7e19ef5fc32a410061eb85bc6e27c39))\n - **FEAT**: Update Ollama default model from llama2 to llama3 ([#417](https://github.com/davidmigloz/langchain_dart/issues/417)). ([9d30b1a1](https://github.com/davidmigloz/langchain_dart/commit/9d30b1a1c811d73cfa27110b8c3c10b10da1801e))\n - **REFACTOR**: Remove deprecated Ollama options ([#414](https://github.com/davidmigloz/langchain_dart/issues/414)). ([861a2b74](https://github.com/davidmigloz/langchain_dart/commit/861a2b7430d33718340676ec2804a7aaccb2a08a))\n\n## 0.2.0\n\n> Note: This release has breaking changes.  \n> If you are using \"function calling\" check [how to migrate to \"tool calling\"](https://github.com/davidmigloz/langchain_dart/issues/400).\n\n - **BREAKING** **FEAT**: Migrate from function calling to tool calling ([#400](https://github.com/davidmigloz/langchain_dart/issues/400)). ([44413b83](https://github.com/davidmigloz/langchain_dart/commit/44413b8321b1188ff6b4027b1972a7ee0002761e))\n\n## 0.1.0+2\n\n - Update a dependency to the latest release.\n\n## 0.1.0+1\n\n - Update a dependency to the latest release.\n\n## 0.1.0\n\n> Note: This release has breaking changes.  \n> [Migration guide](https://github.com/davidmigloz/langchain_dart/discussions/374)\n\n - **BREAKING** **REFACTOR**: Introduce langchain_core and langchain_community packages ([#328](https://github.com/davidmigloz/langchain_dart/issues/328)). ([5fa520e6](https://github.com/davidmigloz/langchain_dart/commit/5fa520e663602d9cdfcab0c62a053090fa02b02e))\n - **BREAKING** **REFACTOR**: Simplify LLMResult and ChatResult classes ([#363](https://github.com/davidmigloz/langchain_dart/issues/363)). ([ffe539c1](https://github.com/davidmigloz/langchain_dart/commit/ffe539c13f92cce5f564107430163b44be1dfd96))\n - **BREAKING** **REFACTOR**: Simplify Output Parsers ([#367](https://github.com/davidmigloz/langchain_dart/issues/367)). ([f24b7058](https://github.com/davidmigloz/langchain_dart/commit/f24b7058949fba47ba624f071a3f548b8f6e915e))\n - **BREAKING** **REFACTOR**: Remove deprecated generate and predict APIs ([#335](https://github.com/davidmigloz/langchain_dart/issues/335)). ([c55fe50f](https://github.com/davidmigloz/langchain_dart/commit/c55fe50f0040cc04cbd2e90bca475887c093c654))\n - **REFACTOR**: Simplify internal .stream implementation ([#364](https://github.com/davidmigloz/langchain_dart/issues/364)). ([c83fed22](https://github.com/davidmigloz/langchain_dart/commit/c83fed22b2b89d5e51211984b12ec126a3ca225e))\n - **FEAT**: Implement .batch support ([#370](https://github.com/davidmigloz/langchain_dart/issues/370)). ([d254f929](https://github.com/davidmigloz/langchain_dart/commit/d254f929b03d9c950029e55c66831f9f89cc14a9))\n\n## 0.0.4\n\n - **FEAT**: Add Ollama keep_alive param to control how long models stay loaded ([#319](https://github.com/davidmigloz/langchain_dart/issues/319)). ([3b86e227](https://github.com/davidmigloz/langchain_dart/commit/3b86e22788eb8df9c09b034c5acc98fdaa6b32c6))\n - **FEAT**: Update meta and test dependencies ([#331](https://github.com/davidmigloz/langchain_dart/issues/331)). ([912370ee](https://github.com/davidmigloz/langchain_dart/commit/912370ee0ba667ee9153303395a457e6caf5c72d))\n - **DOCS**: Update pubspecs. ([d23ed89a](https://github.com/davidmigloz/langchain_dart/commit/d23ed89adf95a34a78024e2f621dc0af07292f44))\n\n## 0.0.3+3\n\n - **DOCS**: Update CHANGELOG.md. ([d0d46534](https://github.com/davidmigloz/langchain_dart/commit/d0d46534565d6f52d819d62329e8917e00bc7030))\n\n## 0.0.3+2\n\n - Update a dependency to the latest release.\n\n## 0.0.3+1\n\n - Update a dependency to the latest release.\n\n## 0.0.3\n\n - **REFACTOR**: Use cl100k_base encoding model when no tokenizer is available ([#295](https://github.com/davidmigloz/langchain_dart/issues/295)). ([ca908e80](https://github.com/davidmigloz/langchain_dart/commit/ca908e8011a168a74240310c78abb3c590654a49))\n - **REFACTOR**: Make all LLM options fields nullable and add copyWith ([#284](https://github.com/davidmigloz/langchain_dart/issues/284)). ([57eceb9b](https://github.com/davidmigloz/langchain_dart/commit/57eceb9b47da42cf19f64ddd88bfbd2c9676fd5e))\n - **REFACTOR**: Migrate tokenizer to langchain_tiktoken package ([#285](https://github.com/davidmigloz/langchain_dart/issues/285)). ([6a3b6466](https://github.com/davidmigloz/langchain_dart/commit/6a3b6466e3e4cfddda2f506adbf2eb563814d02f))\n - **FEAT**: Update internal dependencies ([#291](https://github.com/davidmigloz/langchain_dart/issues/291)). ([69621cc6](https://github.com/davidmigloz/langchain_dart/commit/69621cc61659980d046518ee20ce055e806cba1f))\n\n## 0.0.2+1\n\n - Update a dependency to the latest release.\n\n## 0.0.2\n\n - **FEAT**: Migrate ChatOllama to Ollama chat API and add multi-modal support ([#279](https://github.com/davidmigloz/langchain_dart/issues/279)). ([c5de7e12](https://github.com/davidmigloz/langchain_dart/commit/c5de7e12d14c7095864879c604ccd814c51212cc))\n\n## 0.0.1+3\n\n - Update a dependency to the latest release.\n\n## 0.0.1+2\n\n - Update a dependency to the latest release.\n\n## 0.0.1+1\n\n - **REFACTOR**: Minor changes in ChatOllama. ([725b8ff0](https://github.com/davidmigloz/langchain_dart/commit/725b8ff0dde5507378a6f2f54e5979f2f596aa2f))\n\n## 0.0.1\n\n - **FEAT**: Add support for ChatOllama chat model ([#255](https://github.com/davidmigloz/langchain_dart/issues/255)). ([5b156910](https://github.com/davidmigloz/langchain_dart/commit/5b1569104a3e31fcba078e05b81e7a61b67a24dd))\n - **FEAT**: Add support for OllamaEmbeddings ([#254](https://github.com/davidmigloz/langchain_dart/issues/254)). ([b69701c7](https://github.com/davidmigloz/langchain_dart/commit/b69701c720ba63269ca3541881df4afa4c75504b))\n - **FEAT**: Add support for Ollama LLM ([#253](https://github.com/davidmigloz/langchain_dart/issues/253)). ([23362fdd](https://github.com/davidmigloz/langchain_dart/commit/23362fddf06c056fb2f497a6d1d1648e21895eb8))\n - **DOCS**: Update Ollama docs. ([8161f6c9](https://github.com/davidmigloz/langchain_dart/commit/8161f6c99a6d5169e6df48bb0cfc95374ec4c664))\n\n## 0.0.1-dev.1\n\n- Bootstrap project.\n"
  },
  {
    "path": "packages/langchain_ollama/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2023 David Miguel Lozano\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "packages/langchain_ollama/MIGRATION.md",
    "content": "# langchain_ollama Migration Guide\n\n## Base URL change\n\nThe default base URL has changed from `http://localhost:11434/api` to\n`http://localhost:11434`. The underlying `ollama_dart` client now appends\nthe `/api` path internally.\n\nIf you were passing a custom `baseUrl` that included `/api`, remove the\ntrailing `/api` to avoid double-pathing (e.g. `/api/api`):\n\n```dart\n// Before\nfinal chatModel = ChatOllama(baseUrl: 'http://my-host:11434/api');\n\n// After\nfinal chatModel = ChatOllama(baseUrl: 'http://my-host:11434');\n```\n"
  },
  {
    "path": "packages/langchain_ollama/README.md",
    "content": "# 🦜️🔗 LangChain.dart / Ollama\n\n[![tests](https://img.shields.io/github/actions/workflow/status/davidmigloz/langchain_dart/test.yaml?logo=github&label=tests)](https://github.com/davidmigloz/langchain_dart/actions/workflows/test.yaml)\n[![docs](https://img.shields.io/github/actions/workflow/status/davidmigloz/langchain_dart/pages%2Fpages-build-deployment?logo=github&label=docs)](https://github.com/davidmigloz/langchain_dart/actions/workflows/pages/pages-build-deployment)\n[![langchain_ollama](https://img.shields.io/pub/v/langchain_ollama.svg)](https://pub.dev/packages/langchain_ollama)\n![Discord](https://img.shields.io/discord/1123158322812555295?label=discord)\n[![MIT](https://img.shields.io/badge/license-MIT-purple.svg)](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE)\n\n[Ollama](https://ollama.ai) module for [LangChain.dart](https://github.com/davidmigloz/langchain_dart).\n\n## Features\n\n- LLMs:\n  * `Ollama`: wrapper around Ollama Completions API.\n- Chat models:\n  * `ChatOllama`: wrapper around Ollama Chat API in a chat-like fashion.\n- Embeddings:\n  * `OllamaEmbeddings`: wrapper around Ollama Embeddings API.\n\n## License\n\nLangChain.dart is licensed under the \n[MIT License](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE).\n"
  },
  {
    "path": "packages/langchain_ollama/example/langchain_ollama_example.dart",
    "content": "// ignore_for_file: avoid_print, unused_element\nimport 'dart:io';\n\nimport 'package:langchain/langchain.dart';\nimport 'package:langchain_ollama/langchain_ollama.dart';\n\nvoid main() async {\n  // Uncomment the example you want to run:\n  await _example1();\n  // await _example2();\n}\n\n/// The most basic building block of LangChain is calling an LLM on some input.\nFuture<void> _example1() async {\n  final llm = Ollama(defaultOptions: const OllamaOptions(model: 'llama3.2'));\n  final LLMResult res = await llm.invoke(PromptValue.string('Tell me a joke'));\n  print(res);\n  llm.close();\n}\n\n/// The most frequent use case is to create a chat-bot.\n/// This is the most basic one.\nFuture<void> _example2() async {\n  final chatModel = ChatOllama(\n    defaultOptions: const ChatOllamaOptions(model: 'llama3.2'),\n  );\n\n  try {\n    while (true) {\n      stdout.write('> ');\n      final usrMsg = ChatMessage.humanText(stdin.readLineSync() ?? '');\n      final aiMsg = await chatModel([usrMsg]);\n      print(aiMsg.content);\n    }\n  } finally {\n    chatModel.close();\n  }\n}\n"
  },
  {
    "path": "packages/langchain_ollama/lib/langchain_ollama.dart",
    "content": "/// LangChain.dart integration module for Ollama (run Llama 3, Mistral, Vicuna and other models locally).\nlibrary;\n\nexport 'src/chat_models/chat_models.dart';\nexport 'src/embeddings/embeddings.dart';\nexport 'src/llms/llms.dart';\n"
  },
  {
    "path": "packages/langchain_ollama/lib/src/chat_models/chat_models.dart",
    "content": "export 'chat_ollama/chat_ollama.dart';\nexport 'chat_ollama/types.dart';\n"
  },
  {
    "path": "packages/langchain_ollama/lib/src/chat_models/chat_ollama/chat_ollama.dart",
    "content": "import 'package:http/http.dart' as http;\nimport 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/language_models.dart';\nimport 'package:langchain_core/prompts.dart';\nimport 'package:langchain_tiktoken/langchain_tiktoken.dart';\nimport 'package:ollama_dart/ollama_dart.dart';\nimport 'package:uuid/uuid.dart';\n\nimport 'mappers.dart';\nimport 'types.dart';\n\n/// Wrapper around [Ollama](https://ollama.ai) Chat API that enables\n/// to interact with the LLMs in a chat-like fashion.\n///\n/// Ollama allows you to run open-source large language models,\n/// such as Llama 3.2, Gemma 2 or LLaVA, locally.\n///\n/// For a complete list of supported models and model variants, see the\n/// [Ollama model library](https://ollama.ai/library).\n///\n/// Example:\n/// ```dart\n/// final chatModel = ChatOllama();\n/// final messages = [\n///   ChatMessage.system('You are a helpful assistant that translates English to French.'),\n///   ChatMessage.humanText('I love programming.'),\n/// ];\n/// final prompt = PromptValue.chat(messages);\n/// final res = await llm.invoke(prompt);\n/// ```\n///\n/// - [Ollama API docs](https://github.com/jmorganca/ollama/blob/main/docs/api.md#generate-a-completion)\n///\n/// ### Setup\n///\n/// 1. Download and install [Ollama](https://ollama.ai)\n/// 2. Fetch a model via `ollama pull <model family>`\n///   * e.g., for Llama 3: `ollama pull llama3.2`\n///\n/// ### Ollama base URL\n///\n/// By default, [ChatOllama] uses 'http://localhost:11434' as base URL\n/// (default Ollama API URL). But if you are running Ollama on a different\n/// one, you can override it using the [baseUrl] parameter.\n///\n/// ### Call options\n///\n/// You can configure the parameters that will be used when calling the\n/// chat completions API in several ways:\n///\n/// **Default options:**\n///\n/// Use the [defaultOptions] parameter to set the default options. These\n/// options will be used unless you override them when generating completions.\n///\n/// ```dart\n/// final chatModel = ChatOllama(\n///   defaultOptions: const ChatOllamaOptions(\n///     model: 'llama3.2',\n///     temperature: 0,\n///     format: OllamaResponseFormat.json,\n///   ),\n/// );\n/// ```\n///\n/// **Call options:**\n///\n/// You can override the default options when invoking the model:\n///\n/// ```dart\n/// final res = await chatModel.invoke(\n///   prompt,\n///   options: const ChatOllamaOptions(seed: 9999),\n/// );\n/// ```\n///\n/// **Bind:**\n///\n/// You can also change the options in a [Runnable] pipeline using the bind\n/// method.\n///\n/// In this example, we are using two totally different models for each\n/// question:\n///\n/// ```dart\n/// final chatModel = ChatOllama();\n/// const outputParser = StringOutputParser();\n/// final prompt1 = PromptTemplate.fromTemplate('How are you {name}?');\n/// final prompt2 = PromptTemplate.fromTemplate('How old are you {name}?');\n/// final chain = Runnable.fromMap({\n///   'q1': prompt1 | chatModel.bind(const ChatOllamaOptions(model: 'llama3.2')) | outputParser,\n///   'q2': prompt2| chatModel.bind(const ChatOllamaOptions(model: 'mistral')) | outputParser,\n/// });\n/// final res = await chain.invoke({'name': 'David'});\n/// ```\n///\n/// ### Advance\n///\n/// #### Custom HTTP client\n///\n/// You can always provide your own implementation of `http.Client` for further\n/// customization:\n///\n/// ```dart\n/// final client = ChatOllama(\n///   client: MyHttpClient(),\n/// );\n/// ```\n///\n/// #### Using a proxy\n///\n/// ##### HTTP proxy\n///\n/// You can use your own HTTP proxy by overriding the `baseUrl` and providing\n/// your required `headers`:\n///\n/// ```dart\n/// final client = ChatOllama(\n///   baseUrl: 'https://my-proxy.com',\n///   headers: {'x-my-proxy-header': 'value'},\n///   queryParams: {'x-my-proxy-query-param': 'value'},\n/// );\n/// ```\n///\n/// If you need further customization, you can always provide your own\n/// `http.Client`.\n///\n/// ##### SOCKS5 proxy\n///\n/// To use a SOCKS5 proxy, you can use the\n/// [`socks5_proxy`](https://pub.dev/packages/socks5_proxy) package and a\n/// custom `http.Client`.\nclass ChatOllama extends BaseChatModel<ChatOllamaOptions> {\n  /// Create a new [ChatOllama] instance.\n  ///\n  /// Main configuration options:\n  /// - `baseUrl`: the base URL of Ollama API.\n  /// - [ChatOllama.defaultOptions]\n  ///\n  /// Advance configuration options:\n  /// - `headers`: global headers to send with every request. You can use\n  ///   this to set custom headers, or to override the default headers.\n  /// - `queryParams`: global query parameters to send with every request. You\n  ///   can use this to set custom query parameters.\n  /// - `client`: the HTTP client to use. You can set your own HTTP client if\n  ///   you need further customization (e.g. to use a Socks5 proxy).\n  /// - [ChatOllama.encoding]\n  ChatOllama({\n    final String baseUrl = 'http://localhost:11434',\n    final Map<String, String>? headers,\n    final Map<String, dynamic>? queryParams,\n    final http.Client? client,\n    super.defaultOptions = const ChatOllamaOptions(model: defaultModel),\n    this.encoding = 'cl100k_base',\n  }) : _client = OllamaClient(\n         config: OllamaConfig(\n           baseUrl: baseUrl,\n           defaultHeaders: headers ?? const {},\n           defaultQueryParams:\n               queryParams?.map((k, v) => MapEntry(k, v.toString())) ??\n               const {},\n         ),\n         httpClient: client,\n       );\n\n  /// A client for interacting with Ollama API.\n  final OllamaClient _client;\n\n  /// The encoding to use by tiktoken when [tokenize] is called.\n  ///\n  /// Ollama does not provide any API to count tokens, so we use tiktoken\n  /// to get an estimation of the number of tokens in a prompt.\n  String encoding;\n\n  /// A UUID generator.\n  late final _uuid = const Uuid();\n\n  @override\n  String get modelType => 'chat-ollama';\n\n  /// The default model to use unless another is specified.\n  static const defaultModel = 'llama3.2';\n\n  @override\n  Future<ChatResult> invoke(\n    final PromptValue input, {\n    final ChatOllamaOptions? options,\n  }) async {\n    final id = _uuid.v4();\n    final completion = await _client.chat.create(\n      request: createChatRequest(\n        input.toChatMessages(),\n        options: options,\n        defaultOptions: defaultOptions,\n      ),\n    );\n    return completion.toChatResult(id);\n  }\n\n  @override\n  Stream<ChatResult> stream(\n    final PromptValue input, {\n    final ChatOllamaOptions? options,\n  }) {\n    final id = _uuid.v4();\n    return _client.chat\n        .createStream(\n          request: createChatRequest(\n            input.toChatMessages(),\n            options: options,\n            defaultOptions: defaultOptions,\n            stream: true,\n          ),\n        )\n        .map(\n          (final completion) => completion.toChatResult(id, streaming: true),\n        );\n  }\n\n  /// Tokenizes the given prompt using tiktoken.\n  ///\n  /// Currently Ollama does not provide a tokenizer for the models it supports.\n  /// So we use tiktoken and [encoding] model to get an approximation\n  /// for counting tokens. Mind that the actual tokens will be totally\n  /// different from the ones used by the Ollama model.\n  ///\n  /// If an encoding model is specified in [encoding] field, that\n  /// encoding is used instead.\n  ///\n  /// - [promptValue] The prompt to tokenize.\n  @override\n  Future<List<int>> tokenize(\n    final PromptValue promptValue, {\n    final ChatOllamaOptions? options,\n  }) async {\n    final encoding = getEncoding(this.encoding);\n    return encoding.encode(promptValue.toString());\n  }\n\n  @override\n  void close() {\n    _client.close();\n  }\n\n  /// {@template chat_ollama_list_models}\n  /// Returns a list of available models from the local Ollama instance.\n  ///\n  /// This method fetches all locally available models from the Ollama API.\n  /// All Ollama models support chat, so no filtering is applied.\n  ///\n  /// Example:\n  /// ```dart\n  /// final chatModel = ChatOllama();\n  /// final models = await chatModel.listModels();\n  /// for (final model in models) {\n  ///   print('${model.id}');\n  /// }\n  /// ```\n  /// {@endtemplate}\n  @override\n  Future<List<ModelInfo>> listModels() async {\n    final response = await _client.models.list();\n    return (response.models ?? [])\n        .where((final m) => m.name != null)\n        .map((final m) => ModelInfo(id: m.name!, ownedBy: m.details?.family))\n        .toList();\n  }\n}\n"
  },
  {
    "path": "packages/langchain_ollama/lib/src/chat_models/chat_ollama/mappers.dart",
    "content": "// ignore_for_file: public_member_api_docs\nimport 'dart:convert';\n\nimport 'package:collection/collection.dart';\nimport 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/language_models.dart';\nimport 'package:langchain_core/tools.dart';\nimport 'package:ollama_dart/ollama_dart.dart' as o;\nimport 'package:uuid/uuid.dart';\n\nimport '../../llms/types.dart';\nimport 'chat_ollama.dart';\nimport 'types.dart';\n\nextension _OllamaResponseFormatChatMapper on OllamaResponseFormat {\n  o.ResponseFormat toChatFormat() {\n    return switch (this) {\n      OllamaResponseFormat.json => const o.JsonFormat(),\n    };\n  }\n}\n\nextension _OllamaThinkingLevelChatMapper on OllamaThinkingLevel {\n  o.ThinkValue toThinkValue() {\n    return switch (this) {\n      OllamaThinkingLevel.high => const o.ThinkWithLevel(o.ThinkLevel.high),\n      OllamaThinkingLevel.medium => const o.ThinkWithLevel(o.ThinkLevel.medium),\n      OllamaThinkingLevel.low => const o.ThinkWithLevel(o.ThinkLevel.low),\n    };\n  }\n}\n\n/// Creates a [ChatRequest] from the given input.\no.ChatRequest createChatRequest(\n  final List<ChatMessage> messages, {\n  required final ChatOllamaOptions? options,\n  required final ChatOllamaOptions defaultOptions,\n  final bool stream = false,\n}) {\n  return o.ChatRequest(\n    model: options?.model ?? defaultOptions.model ?? ChatOllama.defaultModel,\n    messages: messages.toMessages(),\n    format: (options?.format ?? defaultOptions.format)?.toChatFormat(),\n    keepAlive: options?.keepAlive ?? defaultOptions.keepAlive,\n    think: (options?.think ?? defaultOptions.think)?.toThinkValue(),\n    tools: _mapTools(\n      tools: options?.tools ?? defaultOptions.tools,\n      toolChoice: options?.toolChoice ?? defaultOptions.toolChoice,\n    ),\n    stream: stream,\n    options: o.ModelOptions(\n      seed: options?.seed ?? defaultOptions.seed,\n      numPredict: options?.numPredict ?? defaultOptions.numPredict,\n      topK: options?.topK ?? defaultOptions.topK,\n      topP: options?.topP ?? defaultOptions.topP,\n      minP: options?.minP ?? defaultOptions.minP,\n      temperature: options?.temperature ?? defaultOptions.temperature,\n      stop: options?.stop ?? defaultOptions.stop,\n      numCtx: options?.numCtx ?? defaultOptions.numCtx,\n      numKeep: options?.numKeep ?? defaultOptions.numKeep,\n      tfsZ: options?.tfsZ ?? defaultOptions.tfsZ,\n      typicalP: options?.typicalP ?? defaultOptions.typicalP,\n      repeatLastN: options?.repeatLastN ?? defaultOptions.repeatLastN,\n      repeatPenalty: options?.repeatPenalty ?? defaultOptions.repeatPenalty,\n      presencePenalty:\n          options?.presencePenalty ?? defaultOptions.presencePenalty,\n      frequencyPenalty:\n          options?.frequencyPenalty ?? defaultOptions.frequencyPenalty,\n      mirostat: options?.mirostat ?? defaultOptions.mirostat,\n      mirostatTau: options?.mirostatTau ?? defaultOptions.mirostatTau,\n      mirostatEta: options?.mirostatEta ?? defaultOptions.mirostatEta,\n      penalizeNewline:\n          options?.penalizeNewline ?? defaultOptions.penalizeNewline,\n      numa: options?.numa ?? defaultOptions.numa,\n      numBatch: options?.numBatch ?? defaultOptions.numBatch,\n      numGpu: options?.numGpu ?? defaultOptions.numGpu,\n      mainGpu: options?.mainGpu ?? defaultOptions.mainGpu,\n      lowVram: options?.lowVram ?? defaultOptions.lowVram,\n      f16Kv: options?.f16KV ?? defaultOptions.f16KV,\n      logitsAll: options?.logitsAll ?? defaultOptions.logitsAll,\n      vocabOnly: options?.vocabOnly ?? defaultOptions.vocabOnly,\n      useMmap: options?.useMmap ?? defaultOptions.useMmap,\n      useMlock: options?.useMlock ?? defaultOptions.useMlock,\n      numThread: options?.numThread ?? defaultOptions.numThread,\n    ),\n  );\n}\n\nList<o.ToolDefinition>? _mapTools({\n  final List<ToolSpec>? tools,\n  final ChatToolChoice? toolChoice,\n}) {\n  if (tools == null || tools.isEmpty) {\n    return null;\n  }\n\n  return switch (toolChoice) {\n    ChatToolChoiceNone() => null,\n    ChatToolChoiceAuto() ||\n    ChatToolChoiceRequired() ||\n    null => tools.map(_mapTool).toList(growable: false),\n    final ChatToolChoiceForced f => [\n      _mapTool(tools.firstWhere((t) => t.name == f.name)),\n    ],\n  };\n}\n\no.ToolDefinition _mapTool(final ToolSpec tool) {\n  return o.ToolDefinition(\n    function: o.ToolFunction(\n      name: tool.name,\n      description: tool.description,\n      parameters: tool.inputJsonSchema,\n    ),\n  );\n}\n\nextension OllamaChatMessagesMapper on List<ChatMessage> {\n  List<o.ChatMessage> toMessages() {\n    return map(_mapMessage).expand((final msg) => msg).toList(growable: false);\n  }\n\n  List<o.ChatMessage> _mapMessage(final ChatMessage msg) {\n    return switch (msg) {\n      final SystemChatMessage msg => [o.ChatMessage.system(msg.content)],\n      final HumanChatMessage msg => _mapHumanMessage(msg),\n      final AIChatMessage msg => _mapAIMessage(msg),\n      final ToolChatMessage msg => [o.ChatMessage.tool(msg.content)],\n      CustomChatMessage() => throw UnsupportedError(\n        'Ollama does not support custom messages',\n      ),\n    };\n  }\n\n  List<o.ChatMessage> _mapHumanMessage(final HumanChatMessage message) {\n    return switch (message.content) {\n      final ChatMessageContentText c => [o.ChatMessage.user(c.text)],\n      final ChatMessageContentImage c => [o.ChatMessage.user(c.data)],\n      final ChatMessageContentMultiModal c => _mapContentMultiModal(c),\n    };\n  }\n\n  List<o.ChatMessage> _mapContentMultiModal(\n    final ChatMessageContentMultiModal content,\n  ) {\n    final parts = content.parts.groupListsBy((final p) => p.runtimeType);\n\n    if ((parts[ChatMessageContentMultiModal]?.length ?? 0) > 0) {\n      throw UnsupportedError(\n        'Cannot have multimodal content in multimodal content',\n      );\n    }\n\n    // If there's only one text part and the rest are images, then we combine them in one message\n    if ((parts[ChatMessageContentText]?.length ?? 0) == 1) {\n      return [\n        o.ChatMessage.user(\n          (parts[ChatMessageContentText]!.first as ChatMessageContentText).text,\n          images: parts[ChatMessageContentImage]\n              ?.map((final p) => (p as ChatMessageContentImage).data)\n              .toList(growable: false),\n        ),\n      ];\n    }\n\n    // Otherwise, we return the parts as separate messages\n    return content.parts\n        .map(\n          (final p) => switch (p) {\n            final ChatMessageContentText c => o.ChatMessage.user(c.text),\n            final ChatMessageContentImage c => o.ChatMessage.user(c.data),\n            ChatMessageContentMultiModal() => throw UnsupportedError(\n              'Cannot have multimodal content in multimodal content',\n            ),\n          },\n        )\n        .toList(growable: false);\n  }\n\n  List<o.ChatMessage> _mapAIMessage(final AIChatMessage message) {\n    return [\n      o.ChatMessage.assistant(\n        message.content,\n        toolCalls: message.toolCalls.isNotEmpty\n            ? message.toolCalls.map(_mapToolCall).toList(growable: false)\n            : null,\n      ),\n    ];\n  }\n\n  o.ToolCall _mapToolCall(final AIChatMessageToolCall toolCall) {\n    return o.ToolCall(\n      function: o.ToolCallFunction(\n        name: toolCall.name,\n        arguments: toolCall.arguments,\n      ),\n    );\n  }\n}\n\nextension ChatResultMapper on o.ChatResponse {\n  ChatResult toChatResult(final String id, {final bool streaming = false}) {\n    final content = [\n      if (message?.thinking != null) message!.thinking!,\n      message?.content ?? '',\n    ].join('');\n    return ChatResult(\n      id: id,\n      output: AIChatMessage(\n        content: content,\n        toolCalls:\n            message?.toolCalls?.map(_mapToolCall).toList(growable: false) ??\n            const [],\n      ),\n      finishReason: _mapFinishReason(doneReason),\n      metadata: {\n        'model': model,\n        'created_at': createdAt,\n        'done': done,\n        'total_duration': totalDuration,\n        'load_duration': loadDuration,\n        'prompt_eval_count': promptEvalCount,\n        'prompt_eval_duration': promptEvalDuration,\n        'eval_count': evalCount,\n        'eval_duration': evalDuration,\n      },\n      usage: _mapUsage(),\n      streaming: streaming,\n    );\n  }\n\n  AIChatMessageToolCall _mapToolCall(final o.ToolCall toolCall) {\n    return AIChatMessageToolCall(\n      id: const Uuid().v4(),\n      name: toolCall.function?.name ?? '',\n      argumentsRaw: json.encode(toolCall.function?.arguments ?? const {}),\n      arguments: toolCall.function?.arguments ?? const {},\n    );\n  }\n\n  LanguageModelUsage _mapUsage() {\n    return LanguageModelUsage(\n      promptTokens: promptEvalCount,\n      responseTokens: evalCount,\n      totalTokens: (promptEvalCount != null || evalCount != null)\n          ? (promptEvalCount ?? 0) + (evalCount ?? 0)\n          : null,\n    );\n  }\n\n  FinishReason _mapFinishReason(final o.DoneReason? reason) => switch (reason) {\n    o.DoneReason.stop => FinishReason.stop,\n    o.DoneReason.length => FinishReason.length,\n    o.DoneReason.load => FinishReason.unspecified,\n    o.DoneReason.unload => FinishReason.unspecified,\n    null => FinishReason.unspecified,\n  };\n}\n\nextension ChatStreamResultMapper on o.ChatStreamEvent {\n  ChatResult toChatResult(final String id, {final bool streaming = false}) {\n    final content = [\n      if (message?.thinking != null) message!.thinking!,\n      message?.content ?? '',\n    ].join('');\n    return ChatResult(\n      id: id,\n      output: AIChatMessage(\n        content: content,\n        toolCalls:\n            message?.toolCalls?.map(_mapToolCall).toList(growable: false) ??\n            const [],\n      ),\n      finishReason: (done ?? false)\n          ? FinishReason.stop\n          : FinishReason.unspecified,\n      metadata: {'model': model, 'created_at': createdAt, 'done': done},\n      usage: const LanguageModelUsage(),\n      streaming: streaming,\n    );\n  }\n\n  AIChatMessageToolCall _mapToolCall(final o.ToolCall toolCall) {\n    return AIChatMessageToolCall(\n      id: const Uuid().v4(),\n      name: toolCall.function?.name ?? '',\n      argumentsRaw: json.encode(toolCall.function?.arguments ?? const {}),\n      arguments: toolCall.function?.arguments ?? const {},\n    );\n  }\n}\n"
  },
  {
    "path": "packages/langchain_ollama/lib/src/chat_models/chat_ollama/types.dart",
    "content": "import 'package:collection/collection.dart';\nimport 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/tools.dart';\nimport 'package:meta/meta.dart';\n\nimport '../../../langchain_ollama.dart';\nimport '../../llms/types.dart';\n\n/// {@template chat_ollama_options}\n/// Options to pass into ChatOllama.\n///\n/// For a complete list of supported models and model variants, see the\n/// [Ollama model library](https://ollama.ai/library).\n/// {@endtemplate}\n@immutable\nclass ChatOllamaOptions extends ChatModelOptions {\n  /// {@macro chat_ollama_options}\n  const ChatOllamaOptions({\n    super.model,\n    this.format,\n    this.keepAlive,\n    this.think,\n    this.numKeep,\n    this.seed,\n    this.numPredict,\n    this.topK,\n    this.topP,\n    this.minP,\n    this.tfsZ,\n    this.typicalP,\n    this.repeatLastN,\n    this.temperature,\n    this.repeatPenalty,\n    this.presencePenalty,\n    this.frequencyPenalty,\n    this.mirostat,\n    this.mirostatTau,\n    this.mirostatEta,\n    this.penalizeNewline,\n    this.stop,\n    this.numa,\n    this.numCtx,\n    this.numBatch,\n    this.numGpu,\n    this.mainGpu,\n    this.lowVram,\n    this.f16KV,\n    this.logitsAll,\n    this.vocabOnly,\n    this.useMmap,\n    this.useMlock,\n    this.numThread,\n    super.tools,\n    super.toolChoice,\n    super.concurrencyLimit,\n  });\n\n  /// The format to return a response in. Currently the only accepted value is\n  /// json.\n  ///\n  /// Enable JSON mode by setting the format parameter to json. This will\n  /// structure the response as valid JSON.\n  ///\n  /// Note: it's important to instruct the model to use JSON in the prompt.\n  /// Otherwise, the model may generate large amounts whitespace.\n  final OllamaResponseFormat? format;\n\n  /// How long (in minutes) to keep the model loaded in memory.\n  ///\n  /// - If set to a positive duration (e.g. 20), the model will stay loaded for the provided duration.\n  /// - If set to a negative duration (e.g. -1), the model will stay loaded indefinitely.\n  /// - If set to 0, the model will be unloaded immediately once finished.\n  /// - If not set, the model will stay loaded for 5 minutes by default\n  final int? keepAlive;\n\n  /// Controls whether thinking/reasoning models will think before responding.\n  ///\n  /// Can be set to a [OllamaThinkingLevel] to control the intensity level:\n  /// - [OllamaThinkingLevel.high]: Maximum reasoning depth\n  /// - [OllamaThinkingLevel.medium]: Balanced reasoning\n  /// - [OllamaThinkingLevel.low]: Minimal reasoning\n  final OllamaThinkingLevel? think;\n\n  /// Number of tokens to keep from the prompt.\n  /// (Default: 0)\n  final int? numKeep;\n\n  /// Sets the random number seed to use for generation. Setting this to a\n  /// specific number will make the model generate the same text for the same\n  /// prompt.\n  /// (Default: 0)\n  final int? seed;\n\n  /// Maximum number of tokens to predict when generating text.\n  /// (Default: 128, -1 = infinite generation, -2 = fill context)\n  final int? numPredict;\n\n  /// Reduces the probability of generating nonsense. A higher value (e.g. 100)\n  /// will give more diverse answers, while a lower value (e.g. 10) will be\n  /// more conservative.\n  /// (Default: 40)\n  final int? topK;\n\n  /// Works together with [topK]. A higher value (e.g., 0.95) will lead to more\n  /// diverse text, while a lower value (e.g., 0.5) will generate more focused\n  /// and conservative text.\n  /// (Default: 0.9)\n  final double? topP;\n\n  /// Alternative to the [topP], and aims to ensure a balance of quality and\n  /// variety. [minP] represents the minimum probability for a token to be\n  /// considered, relative to the probability of the most likely token. For\n  /// example, with min_p=0.05 and the most likely token having a probability\n  /// of 0.9, logits with a value less than 0.05*0.9=0.045 are filtered out.\n  /// (Default: 0.0)\n  final double? minP;\n\n  /// Tail free sampling is used to reduce the impact of less probable tokens\n  /// from the output. A higher value (e.g., 2.0) will reduce the impact more,\n  /// while a value of 1.0 disables this setting.\n  /// (default: 1)\n  final double? tfsZ;\n\n  /// Typical p is used to reduce the impact of less probable tokens from the\n  /// output.\n  /// (Default: 1.0)\n  final double? typicalP;\n\n  /// Sets how far back for the model to look back to prevent repetition.\n  /// (Default: 64, 0 = disabled, -1 = num_ctx)\n  final int? repeatLastN;\n\n  /// The temperature of the model. Increasing the temperature will make the\n  /// model answer more creatively.\n  /// (Default: 0.8)\n  final double? temperature;\n\n  /// Sets how strongly to penalize repetitions. A higher value (e.g., 1.5)\n  /// will penalize repetitions more strongly, while a lower value (e.g., 0.9)\n  /// will be more lenient.\n  /// (Default: 1.1)\n  final double? repeatPenalty;\n\n  /// Positive values penalize new tokens based on whether they appear in the\n  /// text so far, increasing the model's likelihood to talk about new topics.\n  final double? presencePenalty;\n\n  /// Positive values penalize new tokens based on their existing frequency in\n  /// the text so far, decreasing the model's likelihood to repeat the same\n  /// line verbatim.\n  final double? frequencyPenalty;\n\n  /// Enable Mirostat sampling for controlling perplexity.\n  /// (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0)\n  final int? mirostat;\n\n  /// Controls the balance between coherence and diversity of the output. A\n  /// lower value will result in more focused and coherent text.\n  /// (Default: 5.0)\n  final double? mirostatTau;\n\n  /// Influences how quickly the algorithm responds to feedback from the\n  /// generated text. A lower learning rate will result in slower adjustments,\n  /// while a higher learning rate will make the algorithm more responsive.\n  /// (Default: 0.1)\n  final double? mirostatEta;\n\n  /// Penalize newlines in the output.\n  /// (Default: true)\n  final bool? penalizeNewline;\n\n  /// Sequences where the API will stop generating further tokens. The returned\n  /// text will not contain the stop sequence.\n  final List<String>? stop;\n\n  /// Enable NUMA support.\n  /// (Default: false)\n  final bool? numa;\n\n  /// Sets the size of the context window used to generate the next token.\n  final int? numCtx;\n\n  /// Sets the number of batches to use for generation.\n  /// (Default: 1)\n  final int? numBatch;\n\n  /// The number of layers to send to the GPU(s). On macOS it defaults to 1 to\n  /// enable metal support, 0 to disable.\n  final int? numGpu;\n\n  /// The GPU to use for the main model.\n  /// (Default: 0)\n  final int? mainGpu;\n\n  /// Enable low VRAM mode.\n  /// (Default: false)\n  final bool? lowVram;\n\n  /// Enable f16 key/value.\n  /// (Default: true)\n  final bool? f16KV;\n\n  /// Enable logits all.\n  /// (Default: false)\n  final bool? logitsAll;\n\n  /// Enable vocab only.\n  /// (Default: false)\n  final bool? vocabOnly;\n\n  /// Enable mmap.\n  /// (Default: false)\n  final bool? useMmap;\n\n  /// Enable mlock.\n  /// (Default: false)\n  final bool? useMlock;\n\n  /// Sets the number of threads to use during computation. By default, Ollama\n  /// will detect this for optimal performance. It is recommended to set this\n  /// value to the number of physical CPU cores your system has (as opposed to\n  /// the logical number of cores).\n  final int? numThread;\n\n  @override\n  ChatOllamaOptions copyWith({\n    final String? model,\n    final OllamaResponseFormat? format,\n    final int? keepAlive,\n    final OllamaThinkingLevel? think,\n    final int? numKeep,\n    final int? seed,\n    final int? numPredict,\n    final int? topK,\n    final double? topP,\n    final double? minP,\n    final double? tfsZ,\n    final double? typicalP,\n    final int? repeatLastN,\n    final double? temperature,\n    final double? repeatPenalty,\n    final double? presencePenalty,\n    final double? frequencyPenalty,\n    final int? mirostat,\n    final double? mirostatTau,\n    final double? mirostatEta,\n    final bool? penalizeNewline,\n    final List<String>? stop,\n    final bool? numa,\n    final int? numCtx,\n    final int? numBatch,\n    final int? numGpu,\n    final int? mainGpu,\n    final bool? lowVram,\n    final bool? f16KV,\n    final bool? logitsAll,\n    final bool? vocabOnly,\n    final bool? useMmap,\n    final bool? useMlock,\n    final int? numThread,\n    final List<ToolSpec>? tools,\n    final ChatToolChoice? toolChoice,\n    final int? concurrencyLimit,\n  }) {\n    return ChatOllamaOptions(\n      model: model ?? this.model,\n      format: format ?? this.format,\n      keepAlive: keepAlive ?? this.keepAlive,\n      think: think ?? this.think,\n      numKeep: numKeep ?? this.numKeep,\n      seed: seed ?? this.seed,\n      numPredict: numPredict ?? this.numPredict,\n      topK: topK ?? this.topK,\n      topP: topP ?? this.topP,\n      minP: minP ?? this.minP,\n      tfsZ: tfsZ ?? this.tfsZ,\n      typicalP: typicalP ?? this.typicalP,\n      repeatLastN: repeatLastN ?? this.repeatLastN,\n      temperature: temperature ?? this.temperature,\n      repeatPenalty: repeatPenalty ?? this.repeatPenalty,\n      presencePenalty: presencePenalty ?? this.presencePenalty,\n      frequencyPenalty: frequencyPenalty ?? this.frequencyPenalty,\n      mirostat: mirostat ?? this.mirostat,\n      mirostatTau: mirostatTau ?? this.mirostatTau,\n      mirostatEta: mirostatEta ?? this.mirostatEta,\n      penalizeNewline: penalizeNewline ?? this.penalizeNewline,\n      stop: stop ?? this.stop,\n      numa: numa ?? this.numa,\n      numCtx: numCtx ?? this.numCtx,\n      numBatch: numBatch ?? this.numBatch,\n      numGpu: numGpu ?? this.numGpu,\n      mainGpu: mainGpu ?? this.mainGpu,\n      lowVram: lowVram ?? this.lowVram,\n      f16KV: f16KV ?? this.f16KV,\n      logitsAll: logitsAll ?? this.logitsAll,\n      vocabOnly: vocabOnly ?? this.vocabOnly,\n      useMmap: useMmap ?? this.useMmap,\n      useMlock: useMlock ?? this.useMlock,\n      numThread: numThread ?? this.numThread,\n      tools: tools ?? this.tools,\n      toolChoice: toolChoice ?? this.toolChoice,\n      concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit,\n    );\n  }\n\n  @override\n  ChatOllamaOptions merge(covariant final ChatOllamaOptions? other) {\n    return copyWith(\n      model: other?.model,\n      format: other?.format,\n      keepAlive: other?.keepAlive,\n      think: other?.think,\n      numKeep: other?.numKeep,\n      seed: other?.seed,\n      numPredict: other?.numPredict,\n      topK: other?.topK,\n      topP: other?.topP,\n      minP: other?.minP,\n      tfsZ: other?.tfsZ,\n      typicalP: other?.typicalP,\n      repeatLastN: other?.repeatLastN,\n      temperature: other?.temperature,\n      repeatPenalty: other?.repeatPenalty,\n      presencePenalty: other?.presencePenalty,\n      frequencyPenalty: other?.frequencyPenalty,\n      mirostat: other?.mirostat,\n      mirostatTau: other?.mirostatTau,\n      mirostatEta: other?.mirostatEta,\n      penalizeNewline: other?.penalizeNewline,\n      stop: other?.stop,\n      numa: other?.numa,\n      numCtx: other?.numCtx,\n      numBatch: other?.numBatch,\n      numGpu: other?.numGpu,\n      mainGpu: other?.mainGpu,\n      lowVram: other?.lowVram,\n      f16KV: other?.f16KV,\n      logitsAll: other?.logitsAll,\n      vocabOnly: other?.vocabOnly,\n      useMmap: other?.useMmap,\n      useMlock: other?.useMlock,\n      numThread: other?.numThread,\n      concurrencyLimit: other?.concurrencyLimit,\n    );\n  }\n\n  @override\n  bool operator ==(covariant final ChatOllamaOptions other) {\n    return model == other.model &&\n        format == other.format &&\n        keepAlive == other.keepAlive &&\n        think == other.think &&\n        numKeep == other.numKeep &&\n        seed == other.seed &&\n        numPredict == other.numPredict &&\n        topK == other.topK &&\n        topP == other.topP &&\n        minP == other.minP &&\n        tfsZ == other.tfsZ &&\n        typicalP == other.typicalP &&\n        repeatLastN == other.repeatLastN &&\n        temperature == other.temperature &&\n        repeatPenalty == other.repeatPenalty &&\n        presencePenalty == other.presencePenalty &&\n        frequencyPenalty == other.frequencyPenalty &&\n        mirostat == other.mirostat &&\n        mirostatTau == other.mirostatTau &&\n        mirostatEta == other.mirostatEta &&\n        penalizeNewline == other.penalizeNewline &&\n        const ListEquality<String>().equals(stop, other.stop) &&\n        numa == other.numa &&\n        numCtx == other.numCtx &&\n        numBatch == other.numBatch &&\n        numGpu == other.numGpu &&\n        mainGpu == other.mainGpu &&\n        lowVram == other.lowVram &&\n        f16KV == other.f16KV &&\n        logitsAll == other.logitsAll &&\n        vocabOnly == other.vocabOnly &&\n        useMmap == other.useMmap &&\n        useMlock == other.useMlock &&\n        numThread == other.numThread &&\n        concurrencyLimit == other.concurrencyLimit;\n  }\n\n  @override\n  int get hashCode {\n    return model.hashCode ^\n        format.hashCode ^\n        keepAlive.hashCode ^\n        think.hashCode ^\n        numKeep.hashCode ^\n        seed.hashCode ^\n        numPredict.hashCode ^\n        topK.hashCode ^\n        topP.hashCode ^\n        minP.hashCode ^\n        tfsZ.hashCode ^\n        typicalP.hashCode ^\n        repeatLastN.hashCode ^\n        temperature.hashCode ^\n        repeatPenalty.hashCode ^\n        presencePenalty.hashCode ^\n        frequencyPenalty.hashCode ^\n        mirostat.hashCode ^\n        mirostatTau.hashCode ^\n        mirostatEta.hashCode ^\n        penalizeNewline.hashCode ^\n        const ListEquality<String>().hash(stop) ^\n        numa.hashCode ^\n        numCtx.hashCode ^\n        numBatch.hashCode ^\n        numGpu.hashCode ^\n        mainGpu.hashCode ^\n        lowVram.hashCode ^\n        f16KV.hashCode ^\n        logitsAll.hashCode ^\n        vocabOnly.hashCode ^\n        useMmap.hashCode ^\n        useMlock.hashCode ^\n        numThread.hashCode ^\n        concurrencyLimit.hashCode;\n  }\n}\n"
  },
  {
    "path": "packages/langchain_ollama/lib/src/embeddings/embeddings.dart",
    "content": "export 'ollama_embeddings.dart';\n"
  },
  {
    "path": "packages/langchain_ollama/lib/src/embeddings/ollama_embeddings.dart",
    "content": "import 'package:http/http.dart' as http;\nimport 'package:langchain_core/documents.dart';\nimport 'package:langchain_core/embeddings.dart';\nimport 'package:langchain_core/language_models.dart';\nimport 'package:ollama_dart/ollama_dart.dart';\n\n/// Wrapper around [Ollama](https://ollama.ai) Embeddings API.\n///\n/// Ollama allows you to run open-source large language models,\n/// such as Llama 3, locally.\n///\n/// For a complete list of supported models and model variants, see the\n/// [Ollama model library](https://ollama.ai/library).\n///\n/// Example:\n/// ```dart\n/// final embeddings = OllamaEmbeddings(model: 'llama3.2');\n/// final res = await embeddings.embedQuery('Hello world');\n/// ```\n///\n/// - [Ollama API docs](https://github.com/jmorganca/ollama/blob/main/docs/api.md#generate-embeddings)\n///\n/// ### Setup\n///\n/// 1. Download and install [Ollama](https://ollama.ai)\n/// 2. Fetch a model via `ollama pull <model family>`\n///   * e.g., for `Llama-7b`: `ollama pull llama3.2`\n///\n/// ### Advance\n///\n/// #### Custom HTTP client\n///\n/// You can always provide your own implementation of `http.Client` for further\n/// customization:\n///\n/// ```dart\n/// final client = Ollama(\n///   client: MyHttpClient(),\n/// );\n/// ```\n///\n/// #### Using a proxy\n///\n/// ##### HTTP proxy\n///\n/// You can use your own HTTP proxy by overriding the `baseUrl` and providing\n/// your required `headers`:\n///\n/// ```dart\n/// final client = Ollama(\n///   baseUrl: 'https://my-proxy.com',\n///   headers: {'x-my-proxy-header': 'value'},\n///   queryParams: {'x-my-proxy-query-param': 'value'},\n/// );\n/// ```\n///\n/// If you need further customization, you can always provide your own\n/// `http.Client`.\n///\n/// ##### SOCKS5 proxy\n///\n/// To use a SOCKS5 proxy, you can use the\n/// [`socks5_proxy`](https://pub.dev/packages/socks5_proxy) package and a\n/// custom `http.Client`.\nclass OllamaEmbeddings extends Embeddings {\n  /// Create a new [OllamaEmbeddings] instance.\n  ///\n  /// Main configuration options:\n  /// - `baseUrl`: the base URL of Ollama API.\n  /// - [OllamaEmbeddings.keepAlive]\n  ///\n  /// Advance configuration options:\n  /// - `headers`: global headers to send with every request. You can use\n  ///   this to set custom headers, or to override the default headers.\n  /// - `queryParams`: global query parameters to send with every request. You\n  ///   can use this to set custom query parameters.\n  /// - `client`: the HTTP client to use. You can set your own HTTP client if\n  ///   you need further customization (e.g. to use a Socks5 proxy).\n  OllamaEmbeddings({\n    this.model = 'llama3.2',\n    this.keepAlive,\n    final String baseUrl = 'http://localhost:11434',\n    final Map<String, String>? headers,\n    final Map<String, dynamic>? queryParams,\n    final http.Client? client,\n  }) : _client = OllamaClient(\n         config: OllamaConfig(\n           baseUrl: baseUrl,\n           defaultHeaders: headers ?? const {},\n           defaultQueryParams:\n               queryParams?.map((k, v) => MapEntry(k, v.toString())) ??\n               const {},\n         ),\n         httpClient: client,\n       );\n\n  /// A client for interacting with Ollama API.\n  final OllamaClient _client;\n\n  /// The embeddings model to use.\n  final String model;\n\n  /// How long (in minutes) to keep the model loaded in memory.\n  ///\n  /// - If set to a positive duration (e.g. 20), the model will stay loaded for the provided duration.\n  /// - If set to a negative duration (e.g. -1), the model will stay loaded indefinitely.\n  /// - If set to 0, the model will be unloaded immediately once finished.\n  /// - If not set, the model will stay loaded for 5 minutes by default\n  final int? keepAlive;\n\n  @override\n  Future<List<List<double>>> embedDocuments(\n    final List<Document> documents,\n  ) async {\n    final data = await _client.embeddings.create(\n      request: EmbedRequest(\n        model: model,\n        input: documents.map((final doc) => doc.pageContent).toList(),\n        keepAlive: keepAlive?.toString(),\n      ),\n    );\n    return data.embeddings ?? [];\n  }\n\n  @override\n  Future<List<double>> embedQuery(final String query) async {\n    final data = await _client.embeddings.create(\n      request: EmbedRequest(\n        model: model,\n        input: query,\n        keepAlive: keepAlive?.toString(),\n      ),\n    );\n    return data.embedding ?? [];\n  }\n\n  /// {@template ollama_embeddings_list_models}\n  /// Returns a list of available models from the local Ollama server.\n  ///\n  /// Note: Ollama does not distinguish between embedding and other models,\n  /// so all locally available models are returned.\n  ///\n  /// Example:\n  /// ```dart\n  /// final embeddings = OllamaEmbeddings();\n  /// final models = await embeddings.listModels();\n  /// for (final model in models) {\n  ///   print('${model.id}');\n  /// }\n  /// ```\n  /// {@endtemplate}\n  @override\n  Future<List<ModelInfo>> listModels() async {\n    final response = await _client.models.list();\n    return (response.models ?? [])\n        .where((final m) => m.name != null)\n        .map((final m) => ModelInfo(id: m.name!, ownedBy: m.details?.family))\n        .toList();\n  }\n\n  /// Closes the client and cleans up any resources associated with it.\n  void close() {\n    _client.close();\n  }\n}\n"
  },
  {
    "path": "packages/langchain_ollama/lib/src/llms/llms.dart",
    "content": "export 'ollama.dart';\nexport 'types.dart';\n"
  },
  {
    "path": "packages/langchain_ollama/lib/src/llms/mappers.dart",
    "content": "// ignore_for_file: public_member_api_docs\n\nimport 'package:langchain_core/language_models.dart';\nimport 'package:langchain_core/llms.dart';\nimport 'package:ollama_dart/ollama_dart.dart';\n\nimport 'types.dart';\n\nextension LLMResultMapper on GenerateResponse {\n  LLMResult toLLMResult(final String id, {final bool streaming = false}) {\n    return LLMResult(\n      id: id,\n      output: response ?? '',\n      finishReason: _mapFinishReason(doneReason),\n      metadata: {\n        'model': model,\n        'created_at': createdAt,\n        'done': done,\n        'total_duration': totalDuration,\n        'load_duration': loadDuration,\n        'prompt_eval_count': promptEvalCount,\n        'prompt_eval_duration': promptEvalDuration,\n        'eval_count': evalCount,\n        'eval_duration': evalDuration,\n      },\n      usage: _mapUsage(),\n      streaming: streaming,\n    );\n  }\n\n  LanguageModelUsage _mapUsage() {\n    return LanguageModelUsage(\n      promptTokens: promptEvalCount,\n      responseTokens: evalCount,\n      totalTokens: (promptEvalCount != null && evalCount != null)\n          ? promptEvalCount! + evalCount!\n          : null,\n    );\n  }\n\n  FinishReason _mapFinishReason(final DoneReason? reason) => switch (reason) {\n    DoneReason.stop => FinishReason.stop,\n    DoneReason.length => FinishReason.length,\n    DoneReason.load => FinishReason.unspecified,\n    DoneReason.unload => FinishReason.unspecified,\n    null => FinishReason.unspecified,\n  };\n}\n\nextension GenerateStreamResultMapper on GenerateStreamEvent {\n  LLMResult toLLMResult(final String id, {final bool streaming = false}) {\n    return LLMResult(\n      id: id,\n      output: response ?? '',\n      finishReason: (done ?? false)\n          ? FinishReason.stop\n          : FinishReason.unspecified,\n      metadata: {'model': model, 'created_at': createdAt, 'done': done},\n      usage: const LanguageModelUsage(),\n      streaming: streaming,\n    );\n  }\n}\n\nextension OllamaResponseFormatMapper on OllamaResponseFormat {\n  ResponseFormat toFormat() {\n    return switch (this) {\n      OllamaResponseFormat.json => const JsonFormat(),\n    };\n  }\n}\n\nextension OllamaThinkingLevelMapper on OllamaThinkingLevel {\n  ThinkValue toThinkValue() {\n    return switch (this) {\n      OllamaThinkingLevel.high => const ThinkWithLevel(ThinkLevel.high),\n      OllamaThinkingLevel.medium => const ThinkWithLevel(ThinkLevel.medium),\n      OllamaThinkingLevel.low => const ThinkWithLevel(ThinkLevel.low),\n    };\n  }\n}\n"
  },
  {
    "path": "packages/langchain_ollama/lib/src/llms/ollama.dart",
    "content": "import 'package:http/http.dart' as http;\nimport 'package:langchain_core/language_models.dart';\nimport 'package:langchain_core/llms.dart';\nimport 'package:langchain_core/prompts.dart';\nimport 'package:langchain_tiktoken/langchain_tiktoken.dart';\nimport 'package:ollama_dart/ollama_dart.dart';\nimport 'package:uuid/uuid.dart';\n\nimport 'mappers.dart';\nimport 'types.dart';\n\n/// Wrapper around [Ollama](https://ollama.ai) Completions API.\n///\n/// Ollama allows you to run open-source large language models,\n/// such as Llama 3 or LLaVA, locally.\n///\n/// For a complete list of supported models and model variants, see the\n/// [Ollama model library](https://ollama.ai/library).\n///\n/// Example:\n/// ```dart\n/// final llm = Ollama(\n///   defaultOption: const OllamaOptions(\n///     model: 'llama3.2',\n///     temperature: 1,\n///   ),\n/// );\n/// final prompt = PromptValue.string('Hello world!');\n/// final result = await openai.invoke(prompt);\n/// ```\n///\n/// - [Ollama API docs](https://github.com/jmorganca/ollama/blob/main/docs/api.md#generate-a-completion)\n///\n/// ### Ollama base URL\n///\n/// By default, [Ollama] uses 'http://localhost:11434' as base URL\n/// (default Ollama API URL). But if you are running Ollama on a different\n/// one, you can override it using the [baseUrl] parameter.\n///\n/// ### Call options\n///\n/// You can configure the parameters that will be used when calling the\n/// completions API in several ways:\n///\n/// **Default options:**\n///\n/// Use the [defaultOptions] parameter to set the default options. These\n/// options will be used unless you override them when generating completions.\n///\n/// ```dart\n/// final llm = Ollama(\n///   defaultOptions: const OllamaOptions(\n///     model: 'llama3.2',\n///     temperature: 0,\n///     format: OllamaResponseFormat.json,\n///   ),\n/// );\n/// final prompt = PromptValue.string('Hello world!');\n/// final result = await llm.invoke(prompt);\n/// ```\n///\n/// **Call options:**\n///\n/// You can override the default options when invoking the model:\n///\n/// ```dart\n/// final res = await llm.invoke(\n///   prompt,\n///   options: const OllamaOptions(seed: 9999),\n/// );\n/// ```\n///\n/// **Bind:**\n///\n/// You can also change the options in a [Runnable] pipeline using the bind\n/// method.\n///\n/// In this example, we are using two totally different models for each\n/// question:\n///\n/// ```dart\n/// final llm = Ollama();\n/// const outputParser = StringOutputParser();\n/// final prompt1 = PromptTemplate.fromTemplate('How are you {name}?');\n/// final prompt2 = PromptTemplate.fromTemplate('How old are you {name}?');\n/// final chain = Runnable.fromMap({\n///   'q1': prompt1 | llm.bind(const OllamaOptions(model: 'llama3.2')) | outputParser,\n///   'q2': prompt2| llm.bind(const OllamaOptions(model: 'mistral')) | outputParser,\n/// });\n/// final res = await chain.invoke({'name': 'David'});\n/// ```\n///\n/// ### Setup\n///\n/// 1. Download and install [Ollama](https://ollama.ai)\n/// 2. Fetch a model via `ollama pull <model family>`\n///   * e.g., for `Llama-7b`: `ollama pull llama3.2`\n///\n/// ### Advance\n///\n/// #### Custom HTTP client\n///\n/// You can always provide your own implementation of `http.Client` for further\n/// customization:\n///\n/// ```dart\n/// final client = Ollama(\n///   client: MyHttpClient(),\n/// );\n/// ```\n///\n/// #### Using a proxy\n///\n/// ##### HTTP proxy\n///\n/// You can use your own HTTP proxy by overriding the `baseUrl` and providing\n/// your required `headers`:\n///\n/// ```dart\n/// final client = Ollama(\n///   baseUrl: 'https://my-proxy.com',\n///   headers: {'x-my-proxy-header': 'value'},\n///   queryParams: {'x-my-proxy-query-param': 'value'},\n/// );\n/// ```\n///\n/// If you need further customization, you can always provide your own\n/// `http.Client`.\n///\n/// ##### SOCKS5 proxy\n///\n/// To use a SOCKS5 proxy, you can use the\n/// [`socks5_proxy`](https://pub.dev/packages/socks5_proxy) package and a\n/// custom `http.Client`.\nclass Ollama extends BaseLLM<OllamaOptions> {\n  /// Create a new [Ollama] instance.\n  ///\n  /// Main configuration options:\n  /// - `baseUrl`: the base URL of Ollama API.\n  /// - [Ollama.defaultOptions]\n  ///\n  /// Advance configuration options:\n  /// - `headers`: global headers to send with every request. You can use\n  ///   this to set custom headers, or to override the default headers.\n  /// - `queryParams`: global query parameters to send with every request. You\n  ///   can use this to set custom query parameters.\n  /// - `client`: the HTTP client to use. You can set your own HTTP client if\n  ///   you need further customization (e.g. to use a Socks5 proxy).\n  /// - [Ollama.encoding]\n  Ollama({\n    final String baseUrl = 'http://localhost:11434',\n    final Map<String, String>? headers,\n    final Map<String, dynamic>? queryParams,\n    final http.Client? client,\n    super.defaultOptions = const OllamaOptions(model: defaultModel),\n    this.encoding = 'cl100k_base',\n  }) : _client = OllamaClient(\n         config: OllamaConfig(\n           baseUrl: baseUrl,\n           defaultHeaders: headers ?? const {},\n           defaultQueryParams:\n               queryParams?.map((k, v) => MapEntry(k, v.toString())) ??\n               const {},\n         ),\n         httpClient: client,\n       );\n\n  /// A client for interacting with Ollama API.\n  final OllamaClient _client;\n\n  /// The encoding to use by tiktoken when [tokenize] is called.\n  ///\n  /// Ollama does not provide any API to count tokens, so we use tiktoken\n  /// to get an estimation of the number of tokens in a prompt.\n  String encoding;\n\n  /// A UUID generator.\n  late final _uuid = const Uuid();\n\n  @override\n  String get modelType => 'ollama';\n\n  /// The default model to use unless another is specified.\n  static const defaultModel = 'llama3.2';\n\n  @override\n  Future<LLMResult> invoke(\n    final PromptValue input, {\n    final OllamaOptions? options,\n  }) async {\n    final id = _uuid.v4();\n    final completion = await _client.completions.generate(\n      request: _createGenerateRequest(input.toString(), options: options),\n    );\n    return completion.toLLMResult(id);\n  }\n\n  @override\n  Stream<LLMResult> stream(\n    final PromptValue input, {\n    final OllamaOptions? options,\n  }) {\n    final id = _uuid.v4();\n    return _client.completions\n        .generateStream(\n          request: _createGenerateRequest(\n            input.toString(),\n            options: options,\n            stream: true,\n          ),\n        )\n        .map((final completion) => completion.toLLMResult(id, streaming: true));\n  }\n\n  /// Creates a [GenerateRequest] from the given input.\n  GenerateRequest _createGenerateRequest(\n    final String prompt, {\n    final bool stream = false,\n    final OllamaOptions? options,\n  }) {\n    return GenerateRequest(\n      model: options?.model ?? defaultOptions.model ?? defaultModel,\n      prompt: prompt,\n      system: options?.system ?? defaultOptions.system,\n      suffix: options?.suffix ?? defaultOptions.suffix,\n      template: options?.template ?? defaultOptions.template,\n      context: options?.context ?? defaultOptions.context,\n      format: (options?.format ?? defaultOptions.format)?.toFormat(),\n      raw: options?.raw ?? defaultOptions.raw,\n      keepAlive: options?.keepAlive ?? defaultOptions.keepAlive,\n      think: (options?.think ?? defaultOptions.think)?.toThinkValue(),\n      stream: stream,\n      options: ModelOptions(\n        seed: options?.seed ?? defaultOptions.seed,\n        numPredict: options?.numPredict ?? defaultOptions.numPredict,\n        topK: options?.topK ?? defaultOptions.topK,\n        topP: options?.topP ?? defaultOptions.topP,\n        minP: options?.minP ?? defaultOptions.minP,\n        temperature: options?.temperature ?? defaultOptions.temperature,\n        stop: options?.stop ?? defaultOptions.stop,\n        numCtx: options?.numCtx ?? defaultOptions.numCtx,\n        numKeep: options?.numKeep ?? defaultOptions.numKeep,\n        tfsZ: options?.tfsZ ?? defaultOptions.tfsZ,\n        typicalP: options?.typicalP ?? defaultOptions.typicalP,\n        repeatLastN: options?.repeatLastN ?? defaultOptions.repeatLastN,\n        repeatPenalty: options?.repeatPenalty ?? defaultOptions.repeatPenalty,\n        presencePenalty:\n            options?.presencePenalty ?? defaultOptions.presencePenalty,\n        frequencyPenalty:\n            options?.frequencyPenalty ?? defaultOptions.frequencyPenalty,\n        mirostat: options?.mirostat ?? defaultOptions.mirostat,\n        mirostatTau: options?.mirostatTau ?? defaultOptions.mirostatTau,\n        mirostatEta: options?.mirostatEta ?? defaultOptions.mirostatEta,\n        penalizeNewline:\n            options?.penalizeNewline ?? defaultOptions.penalizeNewline,\n        numa: options?.numa ?? defaultOptions.numa,\n        numBatch: options?.numBatch ?? defaultOptions.numBatch,\n        numGpu: options?.numGpu ?? defaultOptions.numGpu,\n        mainGpu: options?.mainGpu ?? defaultOptions.mainGpu,\n        lowVram: options?.lowVram ?? defaultOptions.lowVram,\n        f16Kv: options?.f16KV ?? defaultOptions.f16KV,\n        logitsAll: options?.logitsAll ?? defaultOptions.logitsAll,\n        vocabOnly: options?.vocabOnly ?? defaultOptions.vocabOnly,\n        useMmap: options?.useMmap ?? defaultOptions.useMmap,\n        useMlock: options?.useMlock ?? defaultOptions.useMlock,\n        numThread: options?.numThread ?? defaultOptions.numThread,\n      ),\n    );\n  }\n\n  /// Tokenizes the given prompt using tiktoken.\n  ///\n  /// Currently Ollama does not provide a tokenizer for the models it supports.\n  /// So we use tiktoken and [encoding] model to get an approximation\n  /// for counting tokens. Mind that the actual tokens will be totally\n  /// different from the ones used by the Ollama model.\n  ///\n  /// If an encoding model is specified in [encoding] field, that\n  /// encoding is used instead.\n  ///\n  /// - [promptValue] The prompt to tokenize.\n  @override\n  Future<List<int>> tokenize(\n    final PromptValue promptValue, {\n    final OllamaOptions? options,\n  }) async {\n    final encoding = getEncoding(this.encoding);\n    return encoding.encode(promptValue.toString());\n  }\n\n  /// {@template ollama_llm_list_models}\n  /// Returns a list of available models from the local Ollama server.\n  ///\n  /// Example:\n  /// ```dart\n  /// final llm = Ollama();\n  /// final models = await llm.listModels();\n  /// for (final model in models) {\n  ///   print('${model.id}');\n  /// }\n  /// ```\n  /// {@endtemplate}\n  @override\n  Future<List<ModelInfo>> listModels() async {\n    final response = await _client.models.list();\n    return (response.models ?? [])\n        .where((final m) => m.name != null)\n        .map((final m) => ModelInfo(id: m.name!, ownedBy: m.details?.family))\n        .toList();\n  }\n\n  @override\n  void close() {\n    _client.close();\n  }\n}\n"
  },
  {
    "path": "packages/langchain_ollama/lib/src/llms/types.dart",
    "content": "import 'package:collection/collection.dart';\nimport 'package:langchain_core/llms.dart';\nimport 'package:meta/meta.dart';\n\n/// {@template ollama_options}\n/// Options to pass into the Ollama LLM.\n///\n/// For a complete list of supported models and model variants, see the\n/// [Ollama model library](https://ollama.ai/library).\n/// {@endtemplate}\n@immutable\nclass OllamaOptions extends LLMOptions {\n  /// {@macro ollama_options}\n  const OllamaOptions({\n    super.model,\n    this.system,\n    this.suffix,\n    this.template,\n    this.context,\n    this.format,\n    this.raw,\n    this.keepAlive,\n    this.think,\n    this.numKeep,\n    this.seed,\n    this.numPredict,\n    this.topK,\n    this.topP,\n    this.minP,\n    this.tfsZ,\n    this.typicalP,\n    this.repeatLastN,\n    this.temperature,\n    this.repeatPenalty,\n    this.presencePenalty,\n    this.frequencyPenalty,\n    this.mirostat,\n    this.mirostatTau,\n    this.mirostatEta,\n    this.penalizeNewline,\n    this.stop,\n    this.numa,\n    this.numCtx,\n    this.numBatch,\n    this.numGpu,\n    this.mainGpu,\n    this.lowVram,\n    this.f16KV,\n    this.logitsAll,\n    this.vocabOnly,\n    this.useMmap,\n    this.useMlock,\n    this.numThread,\n    super.concurrencyLimit,\n  });\n\n  /// The system prompt (Overrides what is defined in the Modelfile).\n  final String? system;\n\n  /// The text that comes after the inserted text.\n  final String? suffix;\n\n  /// The full prompt or prompt template (overrides what is defined in the\n  /// Modelfile).\n  final String? template;\n\n  /// The context parameter returned from a previous request to\n  /// [generateCompletion], this can be used to keep a short conversational\n  /// memory.\n  final List<int>? context;\n\n  /// The format to return a response in. Currently the only accepted value is\n  /// json.\n  ///\n  /// Enable JSON mode by setting the format parameter to json. This will\n  /// structure the response as valid JSON.\n  ///\n  /// Note: it's important to instruct the model to use JSON in the prompt.\n  /// Otherwise, the model may generate large amounts whitespace.\n  final OllamaResponseFormat? format;\n\n  /// If `true` no formatting will be applied to the prompt and no context will\n  /// be returned.\n  ///\n  /// You may choose to use the `raw` parameter if you are specifying a full\n  /// templated prompt in your request to the API, and are managing history\n  /// yourself.\n  final bool? raw;\n\n  /// How long (in minutes) to keep the model loaded in memory.\n  ///\n  /// - If set to a positive duration (e.g. 20), the model will stay loaded for the provided duration.\n  /// - If set to a negative duration (e.g. -1), the model will stay loaded indefinitely.\n  /// - If set to 0, the model will be unloaded immediately once finished.\n  /// - If not set, the model will stay loaded for 5 minutes by default\n  final int? keepAlive;\n\n  /// Controls whether thinking/reasoning models will think before responding.\n  ///\n  /// Can be set to a [OllamaThinkingLevel] to control the intensity level:\n  /// - [OllamaThinkingLevel.high]: Maximum reasoning depth\n  /// - [OllamaThinkingLevel.medium]: Balanced reasoning\n  /// - [OllamaThinkingLevel.low]: Minimal reasoning\n  final OllamaThinkingLevel? think;\n\n  /// Number of tokens to keep from the prompt.\n  /// (Default: 0)\n  final int? numKeep;\n\n  /// Sets the random number seed to use for generation. Setting this to a\n  /// specific number will make the model generate the same text for the same\n  /// prompt.\n  /// (Default: 0)\n  final int? seed;\n\n  /// Maximum number of tokens to predict when generating text.\n  /// (Default: 128, -1 = infinite generation, -2 = fill context)\n  final int? numPredict;\n\n  /// Reduces the probability of generating nonsense. A higher value (e.g. 100)\n  /// will give more diverse answers, while a lower value (e.g. 10) will be\n  /// more conservative.\n  /// (Default: 40)\n  final int? topK;\n\n  /// Works together with [topK]. A higher value (e.g., 0.95) will lead to more\n  /// diverse text, while a lower value (e.g., 0.5) will generate more focused\n  /// and conservative text.\n  /// (Default: 0.9)\n  final double? topP;\n\n  /// Alternative to the [topP], and aims to ensure a balance of quality and\n  /// variety. [minP] represents the minimum probability for a token to be\n  /// considered, relative to the probability of the most likely token. For\n  /// example, with min_p=0.05 and the most likely token having a probability\n  /// of 0.9, logits with a value less than 0.05*0.9=0.045 are filtered out.\n  /// (Default: 0.0)\n  final double? minP;\n\n  /// Tail free sampling is used to reduce the impact of less probable tokens\n  /// from the output. A higher value (e.g., 2.0) will reduce the impact more,\n  /// while a value of 1.0 disables this setting.\n  /// (default: 1)\n  final double? tfsZ;\n\n  /// Typical p is used to reduce the impact of less probable tokens from the\n  /// output.\n  /// (Default: 1.0)\n  final double? typicalP;\n\n  /// Sets how far back for the model to look back to prevent repetition.\n  /// (Default: 64, 0 = disabled, -1 = num_ctx)\n  final int? repeatLastN;\n\n  /// The temperature of the model. Increasing the temperature will make the\n  /// model answer more creatively.\n  /// (Default: 0.8)\n  final double? temperature;\n\n  /// Sets how strongly to penalize repetitions. A higher value (e.g., 1.5)\n  /// will penalize repetitions more strongly, while a lower value (e.g., 0.9)\n  /// will be more lenient.\n  /// (Default: 1.1)\n  final double? repeatPenalty;\n\n  /// Positive values penalize new tokens based on whether they appear in the\n  /// text so far, increasing the model's likelihood to talk about new topics.\n  final double? presencePenalty;\n\n  /// Positive values penalize new tokens based on their existing frequency in\n  /// the text so far, decreasing the model's likelihood to repeat the same\n  /// line verbatim.\n  final double? frequencyPenalty;\n\n  /// Enable Mirostat sampling for controlling perplexity.\n  /// (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0)\n  final int? mirostat;\n\n  /// Controls the balance between coherence and diversity of the output. A\n  /// lower value will result in more focused and coherent text.\n  /// (Default: 5.0)\n  final double? mirostatTau;\n\n  /// Influences how quickly the algorithm responds to feedback from the\n  /// generated text. A lower learning rate will result in slower adjustments,\n  /// while a higher learning rate will make the algorithm more responsive.\n  /// (Default: 0.1)\n  final double? mirostatEta;\n\n  /// Penalize newlines in the output.\n  /// (Default: false)\n  final bool? penalizeNewline;\n\n  /// Sequences where the API will stop generating further tokens. The returned\n  /// text will not contain the stop sequence.\n  final List<String>? stop;\n\n  /// Enable NUMA support.\n  /// (Default: false)\n  final bool? numa;\n\n  /// Sets the size of the context window used to generate the next token.\n  final int? numCtx;\n\n  /// Sets the number of batches to use for generation.\n  /// (Default: 1)\n  final int? numBatch;\n\n  /// The number of layers to send to the GPU(s). On macOS it defaults to 1 to\n  /// enable metal support, 0 to disable.\n  final int? numGpu;\n\n  /// The GPU to use for the main model.\n  /// (Default: 0)\n  final int? mainGpu;\n\n  /// Enable low VRAM mode.\n  /// (Default: false)\n  final bool? lowVram;\n\n  /// Enable f16 key/value.\n  /// (Default: false)\n  final bool? f16KV;\n\n  /// Enable logits all.\n  /// (Default: false)\n  final bool? logitsAll;\n\n  /// Enable vocab only.\n  /// (Default: false)\n  final bool? vocabOnly;\n\n  /// Enable mmap.\n  /// (Default: false)\n  final bool? useMmap;\n\n  /// Enable mlock.\n  /// (Default: false)\n  final bool? useMlock;\n\n  /// Sets the number of threads to use during computation. By default, Ollama\n  /// will detect this for optimal performance. It is recommended to set this\n  /// value to the number of physical CPU cores your system has (as opposed to\n  /// the logical number of cores).\n  final int? numThread;\n\n  @override\n  OllamaOptions copyWith({\n    final String? model,\n    final String? system,\n    final String? suffix,\n    final String? template,\n    final List<int>? context,\n    final OllamaResponseFormat? format,\n    final bool? raw,\n    final int? keepAlive,\n    final OllamaThinkingLevel? think,\n    final int? numKeep,\n    final int? seed,\n    final int? numPredict,\n    final int? topK,\n    final double? topP,\n    final double? minP,\n    final double? tfsZ,\n    final double? typicalP,\n    final int? repeatLastN,\n    final double? temperature,\n    final double? repeatPenalty,\n    final double? presencePenalty,\n    final double? frequencyPenalty,\n    final int? mirostat,\n    final double? mirostatTau,\n    final double? mirostatEta,\n    final bool? penalizeNewline,\n    final List<String>? stop,\n    final bool? numa,\n    final int? numCtx,\n    final int? numBatch,\n    final int? numGpu,\n    final int? mainGpu,\n    final bool? lowVram,\n    final bool? f16KV,\n    final bool? logitsAll,\n    final bool? vocabOnly,\n    final bool? useMmap,\n    final bool? useMlock,\n    final int? numThread,\n    final int? concurrencyLimit,\n  }) {\n    return OllamaOptions(\n      model: model ?? this.model,\n      system: system ?? this.system,\n      suffix: suffix ?? this.suffix,\n      template: template ?? this.template,\n      context: context ?? this.context,\n      format: format ?? this.format,\n      raw: raw ?? this.raw,\n      keepAlive: keepAlive ?? this.keepAlive,\n      think: think ?? this.think,\n      numKeep: numKeep ?? this.numKeep,\n      seed: seed ?? this.seed,\n      numPredict: numPredict ?? this.numPredict,\n      topK: topK ?? this.topK,\n      topP: topP ?? this.topP,\n      minP: minP ?? this.minP,\n      tfsZ: tfsZ ?? this.tfsZ,\n      typicalP: typicalP ?? this.typicalP,\n      repeatLastN: repeatLastN ?? this.repeatLastN,\n      temperature: temperature ?? this.temperature,\n      repeatPenalty: repeatPenalty ?? this.repeatPenalty,\n      presencePenalty: presencePenalty ?? this.presencePenalty,\n      frequencyPenalty: frequencyPenalty ?? this.frequencyPenalty,\n      mirostat: mirostat ?? this.mirostat,\n      mirostatTau: mirostatTau ?? this.mirostatTau,\n      mirostatEta: mirostatEta ?? this.mirostatEta,\n      penalizeNewline: penalizeNewline ?? this.penalizeNewline,\n      stop: stop ?? this.stop,\n      numa: numa ?? this.numa,\n      numCtx: numCtx ?? this.numCtx,\n      numBatch: numBatch ?? this.numBatch,\n      numGpu: numGpu ?? this.numGpu,\n      mainGpu: mainGpu ?? this.mainGpu,\n      lowVram: lowVram ?? this.lowVram,\n      f16KV: f16KV ?? this.f16KV,\n      logitsAll: logitsAll ?? this.logitsAll,\n      vocabOnly: vocabOnly ?? this.vocabOnly,\n      useMmap: useMmap ?? this.useMmap,\n      useMlock: useMlock ?? this.useMlock,\n      numThread: numThread ?? this.numThread,\n      concurrencyLimit: concurrencyLimit ?? super.concurrencyLimit,\n    );\n  }\n\n  @override\n  OllamaOptions merge(covariant final OllamaOptions? other) {\n    return copyWith(\n      model: other?.model,\n      system: other?.system,\n      suffix: other?.suffix,\n      template: other?.template,\n      context: other?.context,\n      format: other?.format,\n      raw: other?.raw,\n      keepAlive: other?.keepAlive,\n      think: other?.think,\n      numKeep: other?.numKeep,\n      seed: other?.seed,\n      numPredict: other?.numPredict,\n      topK: other?.topK,\n      topP: other?.topP,\n      minP: other?.minP,\n      tfsZ: other?.tfsZ,\n      typicalP: other?.typicalP,\n      repeatLastN: other?.repeatLastN,\n      temperature: other?.temperature,\n      repeatPenalty: other?.repeatPenalty,\n      presencePenalty: other?.presencePenalty,\n      frequencyPenalty: other?.frequencyPenalty,\n      mirostat: other?.mirostat,\n      mirostatTau: other?.mirostatTau,\n      mirostatEta: other?.mirostatEta,\n      penalizeNewline: other?.penalizeNewline,\n      stop: other?.stop,\n      numa: other?.numa,\n      numCtx: other?.numCtx,\n      numBatch: other?.numBatch,\n      numGpu: other?.numGpu,\n      mainGpu: other?.mainGpu,\n      lowVram: other?.lowVram,\n      f16KV: other?.f16KV,\n      logitsAll: other?.logitsAll,\n      vocabOnly: other?.vocabOnly,\n      useMmap: other?.useMmap,\n      useMlock: other?.useMlock,\n      numThread: other?.numThread,\n      concurrencyLimit: other?.concurrencyLimit,\n    );\n  }\n\n  @override\n  bool operator ==(covariant final OllamaOptions other) {\n    return identical(this, other) ||\n        runtimeType == other.runtimeType &&\n            model == other.model &&\n            system == other.system &&\n            suffix == other.suffix &&\n            template == other.template &&\n            const ListEquality<int>().equals(context, other.context) &&\n            format == other.format &&\n            raw == other.raw &&\n            keepAlive == other.keepAlive &&\n            think == other.think &&\n            numKeep == other.numKeep &&\n            seed == other.seed &&\n            numPredict == other.numPredict &&\n            topK == other.topK &&\n            topP == other.topP &&\n            minP == other.minP &&\n            tfsZ == other.tfsZ &&\n            typicalP == other.typicalP &&\n            repeatLastN == other.repeatLastN &&\n            temperature == other.temperature &&\n            repeatPenalty == other.repeatPenalty &&\n            presencePenalty == other.presencePenalty &&\n            frequencyPenalty == other.frequencyPenalty &&\n            mirostat == other.mirostat &&\n            mirostatTau == other.mirostatTau &&\n            mirostatEta == other.mirostatEta &&\n            penalizeNewline == other.penalizeNewline &&\n            const ListEquality<String>().equals(stop, other.stop) &&\n            numa == other.numa &&\n            numCtx == other.numCtx &&\n            numBatch == other.numBatch &&\n            numGpu == other.numGpu &&\n            mainGpu == other.mainGpu &&\n            lowVram == other.lowVram &&\n            f16KV == other.f16KV &&\n            logitsAll == other.logitsAll &&\n            vocabOnly == other.vocabOnly &&\n            useMmap == other.useMmap &&\n            useMlock == other.useMlock &&\n            numThread == other.numThread &&\n            concurrencyLimit == other.concurrencyLimit;\n  }\n\n  @override\n  int get hashCode {\n    return model.hashCode ^\n        system.hashCode ^\n        suffix.hashCode ^\n        template.hashCode ^\n        const ListEquality<int>().hash(context) ^\n        format.hashCode ^\n        raw.hashCode ^\n        keepAlive.hashCode ^\n        think.hashCode ^\n        numKeep.hashCode ^\n        seed.hashCode ^\n        numPredict.hashCode ^\n        topK.hashCode ^\n        topP.hashCode ^\n        minP.hashCode ^\n        tfsZ.hashCode ^\n        typicalP.hashCode ^\n        repeatLastN.hashCode ^\n        temperature.hashCode ^\n        repeatPenalty.hashCode ^\n        presencePenalty.hashCode ^\n        frequencyPenalty.hashCode ^\n        mirostat.hashCode ^\n        mirostatTau.hashCode ^\n        mirostatEta.hashCode ^\n        penalizeNewline.hashCode ^\n        const ListEquality<String>().hash(stop) ^\n        numa.hashCode ^\n        numCtx.hashCode ^\n        numBatch.hashCode ^\n        numGpu.hashCode ^\n        mainGpu.hashCode ^\n        lowVram.hashCode ^\n        f16KV.hashCode ^\n        logitsAll.hashCode ^\n        vocabOnly.hashCode ^\n        useMmap.hashCode ^\n        useMlock.hashCode ^\n        numThread.hashCode ^\n        concurrencyLimit.hashCode;\n  }\n}\n\n/// The format to return a response in.\n///\n/// Currently the only accepted value is json.\nenum OllamaResponseFormat {\n  /// Enable JSON mode by setting the format parameter to json. This will\n  /// structure the response as valid JSON.\n  ///\n  /// Note: it's important to instruct the model to use JSON in the prompt.\n  /// Otherwise, the model may generate large amounts whitespace.\n  json,\n}\n\n/// The thinking intensity level for reasoning models.\nenum OllamaThinkingLevel {\n  /// High thinking intensity - maximum reasoning depth\n  high,\n\n  /// Medium thinking intensity - balanced reasoning\n  medium,\n\n  /// Low thinking intensity - minimal reasoning\n  low,\n}\n"
  },
  {
    "path": "packages/langchain_ollama/pubspec.yaml",
    "content": "name: langchain_ollama\ndescription: LangChain.dart integration module for Ollama (run Llama 4, Gemma 3, Phi4, Mistral, Qwen3 and other models locally).\nversion: 0.4.1\nrepository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_ollama\nissue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_ollama\nhomepage: https://github.com/davidmigloz/langchain_dart\ndocumentation: https://langchaindart.dev\n\nfunding:\n  - https://github.com/sponsors/davidmigloz\n\ntopics:\n  - nlp\n  - gen-ai\n  - llms\n  - langchain\n  - ollama\n\nenvironment:\n  sdk: \">=3.9.0 <4.0.0\"\nresolution: workspace\n\ndependencies:\n  collection: ^1.19.1\n  http: ^1.5.0\n  langchain_core: 0.4.1\n  langchain_tiktoken: ^1.0.1\n  meta: ^1.16.0\n  ollama_dart: ^1.4.0\n  uuid: ^4.5.1\n\ndev_dependencies:\n  langchain: ^0.8.1\n  test: ^1.26.2\n"
  },
  {
    "path": "packages/langchain_ollama/test/chat_models/chat_ollama_test.dart",
    "content": "// ignore_for_file: avoid_redundant_argument_values\nimport 'dart:convert';\nimport 'dart:io';\n\nimport 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/language_models.dart';\nimport 'package:langchain_core/output_parsers.dart';\nimport 'package:langchain_core/prompts.dart';\nimport 'package:langchain_core/tools.dart';\nimport 'package:langchain_ollama/langchain_ollama.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('ChatOllama tests', skip: Platform.environment.containsKey('CI'), () {\n    late ChatOllama chatModel;\n    const defaultModel = 'gpt-oss:latest';\n    const visionModel = 'qwen3-vl:latest';\n\n    setUp(() {\n      chatModel = ChatOllama(\n        defaultOptions: const ChatOllamaOptions(\n          model: defaultModel,\n          keepAlive: 1,\n        ),\n      );\n    });\n\n    tearDown(() {\n      chatModel.close();\n    });\n\n    test('Test ChatOllama parameters', () {\n      const options = ChatOllamaOptions(\n        model: 'foo',\n        format: OllamaResponseFormat.json,\n        numKeep: 0,\n        seed: 1,\n        numPredict: 2,\n        topK: 3,\n        topP: 4.0,\n        tfsZ: 5.0,\n        typicalP: 6.0,\n        repeatLastN: 7,\n        temperature: 8.0,\n        repeatPenalty: 9.0,\n        presencePenalty: 10.0,\n        frequencyPenalty: 11.0,\n        mirostat: 12,\n        mirostatTau: 13.0,\n        mirostatEta: 14.0,\n        penalizeNewline: false,\n        stop: ['stop <start_message>', 'stop <stop_message>'],\n        numa: true,\n        numCtx: 15,\n        numBatch: 16,\n        numGpu: 0,\n        mainGpu: 18,\n        lowVram: true,\n        f16KV: true,\n        logitsAll: true,\n        vocabOnly: true,\n        useMmap: true,\n        useMlock: true,\n        numThread: 21,\n      );\n\n      expect(options.model, 'foo');\n      expect(options.format, OllamaResponseFormat.json);\n      expect(options.numKeep, 0);\n      expect(options.seed, 1);\n      expect(options.numPredict, 2);\n      expect(options.topK, 3);\n      expect(options.topP, 4.0);\n      expect(options.tfsZ, 5.0);\n      expect(options.typicalP, 6.0);\n      expect(options.repeatLastN, 7);\n      expect(options.temperature, 8.0);\n      expect(options.repeatPenalty, 9.0);\n      expect(options.presencePenalty, 10.0);\n      expect(options.frequencyPenalty, 11.0);\n      expect(options.mirostat, 12);\n      expect(options.mirostatTau, 13.0);\n      expect(options.mirostatEta, 14.0);\n      expect(options.penalizeNewline, false);\n      expect(options.stop, ['stop <start_message>', 'stop <stop_message>']);\n      expect(options.numa, true);\n      expect(options.numCtx, 15);\n      expect(options.numBatch, 16);\n      expect(options.numGpu, 0);\n      expect(options.mainGpu, 18);\n      expect(options.lowVram, true);\n      expect(options.f16KV, true);\n      expect(options.logitsAll, true);\n      expect(options.vocabOnly, true);\n      expect(options.useMmap, true);\n      expect(options.useMlock, true);\n      expect(options.numThread, 21);\n    });\n\n    test('Test model output contains metadata', () async {\n      final res = await chatModel.invoke(\n        PromptValue.chat([\n          ChatMessage.humanText(\n            'List the numbers from 1 to 9 in order. '\n            'Output ONLY the numbers in one line without any spaces or commas. '\n            'NUMBERS:',\n          ),\n        ]),\n      );\n      expect(\n        res.output.content.replaceAll(RegExp(r'[\\s\\n-]'), ''),\n        contains('123456789'),\n      );\n      expect(res.finishReason, FinishReason.stop);\n      expect(res.metadata, isNotNull);\n      expect(res.metadata['model'], defaultModel);\n      expect(res.metadata['created_at'], isNotNull);\n      expect(res.metadata['done'], isTrue);\n      expect(res.metadata['total_duration'], greaterThan(0));\n      expect(res.metadata['load_duration'], greaterThan(0));\n      expect(res.metadata['prompt_eval_count'], greaterThan(0));\n      expect(res.metadata['eval_count'], greaterThan(0));\n      expect(res.metadata['eval_duration'], greaterThan(0));\n    });\n\n    test('Test stop logic on valid configuration', () async {\n      final res = await chatModel.invoke(\n        PromptValue.string('write an ordered list of five items'),\n        options: const ChatOllamaOptions(temperature: 0, stop: ['3']),\n      );\n      expect(res.output.content.contains('2.'), isTrue);\n      expect(res.output.content.contains('3.'), isFalse);\n      expect(res.finishReason, FinishReason.stop);\n    });\n\n    test('Test max tokens', () async {\n      final res = await chatModel.invoke(\n        PromptValue.string('write an ordered list of five items'),\n        options: const ChatOllamaOptions(numPredict: 2),\n      );\n      expect(res.finishReason, FinishReason.length);\n    });\n\n    test('Test tokenize', () async {\n      final tokens = await chatModel.tokenize(\n        PromptValue.string('antidisestablishmentarianism'),\n      );\n      expect(tokens, [519, 85342, 34500, 479, 8997, 2191]);\n    });\n\n    test('Test different encoding than the model', () async {\n      chatModel.encoding = 'cl100k_base';\n      final tokens = await chatModel.tokenize(\n        PromptValue.string('antidisestablishmentarianism'),\n      );\n      expect(tokens, [519, 85342, 34500, 479, 8997, 2191]);\n    });\n\n    test('Test countTokens', () async {\n      final numTokens = await chatModel.countTokens(\n        PromptValue.string('Hello, how are you?'),\n      );\n      expect(numTokens, 6);\n    });\n\n    test('Test streaming', () async {\n      final promptTemplate = PromptTemplate.fromTemplate(\n        'List the numbers from 1 to {max_num} in order. '\n        'Output ONLY the numbers in one line without any spaces or commas. '\n        'NUMBERS:',\n      );\n      const stringOutputParser = StringOutputParser<ChatResult>();\n\n      final chain = promptTemplate.pipe(chatModel).pipe(stringOutputParser);\n\n      final stream = chain.stream({'max_num': '9'});\n\n      var content = '';\n      var count = 0;\n      await for (final res in stream) {\n        content += res.trim();\n        count++;\n      }\n      expect(count, greaterThan(1));\n      expect(content, contains('123456789'));\n    });\n\n    test('Test response seed', skip: true, () async {\n      final prompt = PromptValue.string(\n        'Why is the sky blue? Reply in one sentence.',\n      );\n      const options = ChatOllamaOptions(seed: 9999);\n\n      final res1 = await chatModel.invoke(prompt, options: options);\n\n      final res2 = await chatModel.invoke(prompt, options: options);\n      expect(res1.output, res2.output);\n    });\n\n    test('Test Multi-turn conversations', () async {\n      final prompt = PromptValue.chat([\n        ChatMessage.humanText('List the numbers from 1 to 9 in order.'),\n        ChatMessage.ai('123456789'),\n        ChatMessage.humanText(\n          'Remove the number \"4\" from the list. Output only the remaining numbers in ascending order.',\n        ),\n      ]);\n      final res = await chatModel.invoke(\n        prompt,\n        options: const ChatOllamaOptions(temperature: 0),\n      );\n      expect(\n        res.output.content.replaceAll(RegExp(r'[\\s\\n]'), ''),\n        contains('12356789'),\n      );\n    });\n\n    test('Text-and-image input with llava', () async {\n      final res = await chatModel.invoke(\n        PromptValue.chat([\n          ChatMessage.human(\n            ChatMessageContent.multiModal([\n              ChatMessageContent.text('What fruit is this?'),\n              ChatMessageContent.image(\n                mimeType: 'image/jpeg',\n                data: base64.encode(\n                  await File(\n                    './test/chat_models/assets/apple.jpeg',\n                  ).readAsBytes(),\n                ),\n              ),\n            ]),\n          ),\n        ]),\n        options: const ChatOllamaOptions(model: visionModel, temperature: 0),\n      );\n\n      expect(res.output.content.toLowerCase(), contains('apple'));\n    });\n\n    const tool1 = ToolSpec(\n      name: 'get_current_weather',\n      description: 'Get the current weather in a given location',\n      inputJsonSchema: {\n        'type': 'object',\n        'properties': {\n          'location': {\n            'type': 'string',\n            'description': 'The city and country, e.g. San Francisco, US',\n          },\n          'unit': {\n            'type': 'string',\n            'enum': ['celsius', 'fahrenheit'],\n          },\n        },\n        'required': ['location'],\n      },\n    );\n    const tool2 = ToolSpec(\n      name: 'get_historic_weather',\n      description: 'Get the historic weather in a given location',\n      inputJsonSchema: {\n        'type': 'object',\n        'properties': {\n          'location': {\n            'type': 'string',\n            'description': 'The city and country, e.g. San Francisco, US',\n          },\n          'unit': {\n            'type': 'string',\n            'enum': ['celsius', 'fahrenheit'],\n          },\n        },\n        'required': ['location'],\n      },\n    );\n\n    test(\n      'Test tool calling',\n      timeout: const Timeout(Duration(minutes: 1)),\n      () async {\n        final model = chatModel.bind(\n          const ChatOllamaOptions(model: defaultModel, tools: [tool1]),\n        );\n\n        final humanMessage = ChatMessage.humanText(\n          \"What's the weather like in Boston and Madrid right now in celsius?\",\n        );\n        final res1 = await model.invoke(PromptValue.chat([humanMessage]));\n\n        final aiMessage1 = res1.output;\n        expect(aiMessage1.toolCalls, hasLength(2));\n\n        final toolCall1 = aiMessage1.toolCalls.first;\n        expect(toolCall1.name, tool1.name);\n        expect(toolCall1.arguments.containsKey('location'), isTrue);\n        expect(toolCall1.arguments['location'], contains('Boston'));\n        expect(toolCall1.arguments['unit'], 'celsius');\n\n        final toolCall2 = aiMessage1.toolCalls.last;\n        expect(toolCall2.name, tool1.name);\n        expect(toolCall2.arguments.containsKey('location'), isTrue);\n        expect(toolCall2.arguments['location'], contains('Madrid'));\n        expect(toolCall2.arguments['unit'], 'celsius');\n\n        final functionResult1 = {\n          'temperature': '22',\n          'unit': 'celsius',\n          'description': 'Sunny',\n        };\n        final functionMessage1 = ChatMessage.tool(\n          toolCallId: toolCall1.id,\n          content: json.encode(functionResult1),\n        );\n\n        final functionResult2 = {\n          'temperature': '25',\n          'unit': 'celsius',\n          'description': 'Cloudy',\n        };\n        final functionMessage2 = ChatMessage.tool(\n          toolCallId: toolCall2.id,\n          content: json.encode(functionResult2),\n        );\n\n        final res2 = await model.invoke(\n          PromptValue.chat([\n            humanMessage,\n            aiMessage1,\n            functionMessage1,\n            functionMessage2,\n          ]),\n        );\n\n        final aiMessage2 = res2.output;\n\n        expect(aiMessage2.toolCalls, isEmpty);\n        expect(aiMessage2.content, contains('22'));\n        expect(aiMessage2.content, contains('25'));\n      },\n    );\n\n    test('Test multi tool call', () async {\n      final res = await chatModel.invoke(\n        PromptValue.string(\n          \"What's the weather in Vellore, India and in Barcelona, Spain?\",\n        ),\n        options: const ChatOllamaOptions(\n          model: defaultModel,\n          tools: [tool1, tool2],\n        ),\n      );\n      expect(res.output.toolCalls, hasLength(2));\n      final toolCall1 = res.output.toolCalls.first;\n      expect(toolCall1.name, 'get_current_weather');\n      expect(toolCall1.argumentsRaw, isNotEmpty);\n      expect(toolCall1.arguments, isNotEmpty);\n      expect(toolCall1.arguments['location'], 'Vellore, India');\n      expect(toolCall1.arguments['unit'], 'celsius');\n      final toolCall2 = res.output.toolCalls.last;\n      expect(toolCall2.name, 'get_current_weather');\n      expect(toolCall2.argumentsRaw, isNotEmpty);\n      expect(toolCall2.arguments, isNotEmpty);\n      expect(toolCall2.arguments['location'], 'Barcelona, Spain');\n      expect(toolCall2.arguments['unit'], 'celsius');\n      expect(res.finishReason, FinishReason.stop);\n    });\n\n    test('Test ChatToolChoice.none', () async {\n      final res = await chatModel.invoke(\n        PromptValue.string(\"What's the weather in Vellore, India?\"),\n        options: const ChatOllamaOptions(\n          model: defaultModel,\n          tools: [tool1],\n          toolChoice: ChatToolChoice.none,\n        ),\n      );\n      expect(res.output.toolCalls, isEmpty);\n      expect(res.output.content, isNotEmpty);\n    });\n\n    test('Test ChatToolChoice.forced', () async {\n      final res = await chatModel.invoke(\n        PromptValue.string(\"What's the weather in Vellore, India?\"),\n        options: ChatOllamaOptions(\n          model: defaultModel,\n          tools: const [tool1, tool2],\n          toolChoice: ChatToolChoice.forced(name: tool2.name),\n        ),\n      );\n      expect(res.output.toolCalls, hasLength(1));\n      final toolCall = res.output.toolCalls.first;\n      expect(toolCall.name, tool2.name);\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain_ollama/test/embeddings/ollama_test.dart",
    "content": "import 'dart:io';\n\nimport 'package:langchain_core/documents.dart';\nimport 'package:langchain_ollama/langchain_ollama.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group(\n    'OllamaEmbeddings tests',\n    skip: Platform.environment.containsKey('CI'),\n    () {\n      late OllamaEmbeddings embeddings;\n      const defaultModel = 'nomic-embed-text:latest';\n\n      setUp(() {\n        embeddings = OllamaEmbeddings(model: defaultModel);\n      });\n\n      tearDown(() {\n        embeddings.close();\n      });\n\n      test('Test OllamaEmbeddings.embedQuery', () async {\n        final res = await embeddings.embedQuery('Hello world');\n        expect(res.length, 768);\n      });\n\n      test('Test OllamaEmbeddings.embedDocuments', () async {\n        final res = await embeddings.embedDocuments([\n          const Document(id: '1', pageContent: 'Hello world'),\n          const Document(id: '2', pageContent: 'Bye bye'),\n        ]);\n        expect(res.length, 2);\n        expect(res[0].length, 768);\n        expect(res[1].length, 768);\n      });\n    },\n  );\n}\n"
  },
  {
    "path": "packages/langchain_ollama/test/llms/ollama_test.dart",
    "content": "// ignore_for_file: avoid_redundant_argument_values\nimport 'dart:io';\n\nimport 'package:langchain_core/llms.dart';\nimport 'package:langchain_core/output_parsers.dart';\nimport 'package:langchain_core/prompts.dart';\nimport 'package:langchain_ollama/langchain_ollama.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('Ollama tests', skip: Platform.environment.containsKey('CI'), () {\n    late Ollama llm;\n    const defaultModel = 'gpt-oss:latest';\n\n    setUp(() {\n      llm = Ollama(\n        defaultOptions: const OllamaOptions(model: defaultModel, keepAlive: 1),\n      );\n    });\n\n    tearDown(() {\n      llm.close();\n    });\n\n    test('Test Ollama parameters', () {\n      const options = OllamaOptions(\n        model: 'foo',\n        system: 'system prompt',\n        template: 'TEMPLATE \"\"\"',\n        context: [1, 2, 3],\n        format: OllamaResponseFormat.json,\n        raw: true,\n        numKeep: 0,\n        seed: 1,\n        numPredict: 2,\n        topK: 3,\n        topP: 4.0,\n        tfsZ: 5.0,\n        typicalP: 6.0,\n        repeatLastN: 7,\n        temperature: 8.0,\n        repeatPenalty: 9.0,\n        presencePenalty: 10.0,\n        frequencyPenalty: 11.0,\n        mirostat: 12,\n        mirostatTau: 13.0,\n        mirostatEta: 14.0,\n        penalizeNewline: false,\n        stop: ['stop <start_message>', 'stop <stop_message>'],\n        numa: true,\n        numCtx: 15,\n        numBatch: 16,\n        numGpu: 0,\n        mainGpu: 18,\n        lowVram: true,\n        f16KV: true,\n        logitsAll: true,\n        vocabOnly: true,\n        useMmap: true,\n        useMlock: true,\n        numThread: 21,\n      );\n\n      expect(options.model, 'foo');\n      expect(options.system, 'system prompt');\n      expect(options.template, 'TEMPLATE \"\"\"');\n      expect(options.context, [1, 2, 3]);\n      expect(options.format, OllamaResponseFormat.json);\n      expect(options.raw, true);\n      expect(options.numKeep, 0);\n      expect(options.seed, 1);\n      expect(options.numPredict, 2);\n      expect(options.topK, 3);\n      expect(options.topP, 4.0);\n      expect(options.tfsZ, 5.0);\n      expect(options.typicalP, 6.0);\n      expect(options.repeatLastN, 7);\n      expect(options.temperature, 8.0);\n      expect(options.repeatPenalty, 9.0);\n      expect(options.presencePenalty, 10.0);\n      expect(options.frequencyPenalty, 11.0);\n      expect(options.mirostat, 12);\n      expect(options.mirostatTau, 13.0);\n      expect(options.mirostatEta, 14.0);\n      expect(options.penalizeNewline, false);\n      expect(options.stop, ['stop <start_message>', 'stop <stop_message>']);\n      expect(options.numa, true);\n      expect(options.numCtx, 15);\n      expect(options.numBatch, 16);\n      expect(options.numGpu, 0);\n      expect(options.mainGpu, 18);\n      expect(options.lowVram, true);\n      expect(options.f16KV, true);\n      expect(options.logitsAll, true);\n      expect(options.vocabOnly, true);\n      expect(options.useMmap, true);\n      expect(options.useMlock, true);\n      expect(options.numThread, 21);\n    });\n\n    test('Test call to Ollama', () async {\n      final output = await llm('Say foo:');\n      expect(output, isNotEmpty);\n    });\n\n    test('Test invoke to Ollama', () async {\n      final res = await llm.invoke(PromptValue.string('Hello, how are you?'));\n      expect(res.output, isNotEmpty);\n    });\n\n    test('Test model output contains metadata', () async {\n      final res = await llm.invoke(\n        PromptValue.string(\n          'List the numbers from 1 to 9 in order. '\n          'Output ONLY the numbers in one line without any spaces or commas. '\n          'NUMBERS:',\n        ),\n      );\n      expect(\n        res.output.replaceAll(RegExp(r'[\\s\\n]'), ''),\n        contains('123456789'),\n      );\n      expect(res.metadata, isNotNull);\n      expect(res.metadata['model'], defaultModel);\n      expect(res.metadata['created_at'], isNotNull);\n      expect(res.metadata['done'], isTrue);\n      expect(res.metadata['total_duration'], greaterThan(0));\n      expect(res.metadata['load_duration'], greaterThan(0));\n      expect(res.metadata['eval_count'], greaterThan(0));\n      expect(res.metadata['eval_duration'], greaterThan(0));\n    });\n\n    test('Test stop logic on valid configuration', () async {\n      const query = 'write an ordered list of five items';\n      final res = await llm(\n        query,\n        options: const OllamaOptions(temperature: 0, stop: ['3']),\n      );\n      expect(res.contains('2.'), isTrue);\n      expect(res.contains('3.'), isFalse);\n    });\n\n    test('Test tokenize', () async {\n      const text = 'antidisestablishmentarianism';\n\n      final tokens = await llm.tokenize(PromptValue.string(text));\n      expect(tokens, [519, 85342, 34500, 479, 8997, 2191]);\n    });\n\n    test('Test different encoding than the model', () async {\n      llm.encoding = 'cl100k_base';\n      const text = 'antidisestablishmentarianism';\n\n      final tokens = await llm.tokenize(PromptValue.string(text));\n      expect(tokens, [519, 85342, 34500, 479, 8997, 2191]);\n    });\n\n    test('Test countTokens', () async {\n      const text = 'Hello, how are you?';\n\n      final numTokens = await llm.countTokens(PromptValue.string(text));\n      expect(numTokens, 6);\n    });\n\n    test('Test streaming', () async {\n      final promptTemplate = PromptTemplate.fromTemplate(\n        'List the numbers from 1 to {max_num} in order. '\n        'Output ONLY the numbers in one line without any spaces or commas. '\n        'NUMBERS:',\n      );\n      const stringOutputParser = StringOutputParser<LLMResult>();\n\n      final chain = promptTemplate.pipe(llm).pipe(stringOutputParser);\n\n      final stream = chain.stream({'max_num': '9'});\n\n      var content = '';\n      var count = 0;\n      await for (final res in stream) {\n        content += res.trim();\n        count++;\n      }\n      expect(count, greaterThan(1));\n      expect(content, contains('123456789'));\n    });\n\n    test('Test raw mode', () async {\n      final res = await llm.invoke(\n        PromptValue.string(\n          '''\n<|start_header_id|>system<|end_header_id|>\n\nYou are an AI assistant that follows instructions precisely.\n<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nList the numbers from 1 to 9 in order. Output ONLY the numbers on one line without any spaces or commas between them.\n<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n'''\n              .trim(),\n        ),\n        options: const OllamaOptions(raw: true),\n      );\n      expect(\n        res.output.replaceAll(RegExp(r'[\\s\\n]'), ''),\n        contains('123456789'),\n      );\n    });\n\n    test('Test JSON mode', () async {\n      final res = await llm.invoke(\n        PromptValue.string(\n          'List the numbers from 1 to 9 in order. Respond using JSON.',\n        ),\n        options: const OllamaOptions(format: OllamaResponseFormat.json),\n      );\n      expect(\n        res.output.replaceAll(RegExp(r'[\\s\\n]'), ''),\n        contains('[1,2,3,4,5,6,7,8,9]'),\n      );\n    });\n\n    test('Test response seed', skip: true, () async {\n      final prompt = PromptValue.string(\n        'Why is the sky blue? Reply in one sentence.',\n      );\n      const options = OllamaOptions(seed: 9999);\n\n      final res1 = await llm.invoke(prompt, options: options);\n\n      final res2 = await llm.invoke(prompt, options: options);\n\n      expect(res1.output, res2.output);\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain_openai/.gitignore",
    "content": "# https://dart.dev/guides/libraries/private-files\n# Created by `dart pub`\n.dart_tool/\n\n# Avoid committing pubspec.lock for library packages; see\n# https://dart.dev/guides/libraries/private-files#pubspeclock.\npubspec.lock\n"
  },
  {
    "path": "packages/langchain_openai/CHANGELOG.md",
    "content": "## 0.8.1+1\n\n - Update a dependency to the latest release.\n\n## 0.8.1\n\n - **FEAT**: Add listModels() API for LLMs and Embeddings ([#371](https://github.com/davidmigloz/langchain_dart/issues/371)) ([#844](https://github.com/davidmigloz/langchain_dart/issues/844)). ([4b737389](https://github.com/davidmigloz/langchain_dart/commit/4b7373894d5b8701b6d00d153c1741931a49b3a1))\n\n## 0.8.0+1\n\n - **REFACTOR**: Fix pub format warnings ([#809](https://github.com/davidmigloz/langchain_dart/issues/809)). ([640cdefb](https://github.com/davidmigloz/langchain_dart/commit/640cdefbede9c0a0182fb6bb4005a20aa6f35635))\n\n## 0.8.0\n\n> Note: This release has breaking changes.\n\n - **FEAT**: Upgrade to http v1.5.0 ([#785](https://github.com/davidmigloz/langchain_dart/issues/785)). ([f7c87790](https://github.com/davidmigloz/langchain_dart/commit/f7c8779011015b5a4a7f3a07dca32bde1bb2ea88))\n - **BREAKING** **BUILD**: Require Dart >=3.8.0 ([#792](https://github.com/davidmigloz/langchain_dart/issues/792)). ([b887f5c6](https://github.com/davidmigloz/langchain_dart/commit/b887f5c62e307b3a510c5049e3d1fbe7b7b4f4c9))\n\n## 0.7.6+2\n\n - Update a dependency to the latest release.\n\n## 0.7.6+1\n\n - Update a dependency to the latest release.\n\n## 0.7.6\n\n - **FEAT**: Support reasoningEffort, verbosity and other new fields in ChatOpenAI ([#762](https://github.com/davidmigloz/langchain_dart/issues/762)). ([9cc5d591](https://github.com/davidmigloz/langchain_dart/commit/9cc5d591e868bd5dd3e0a926e564d797dd602dab))\n - **FEAT**: Update ChatOpenAI default model to gpt-5-mini ([#761](https://github.com/davidmigloz/langchain_dart/issues/761)). ([b38ce320](https://github.com/davidmigloz/langchain_dart/commit/b38ce320971373454e10506e79ff75479b0391cd))\n\n## 0.7.5\n\n - **FEAT**: Make CreateChatCompletionStreamResponse.choices field nullable to support Groq's OpenAI-compatible API ([#742](https://github.com/davidmigloz/langchain_dart/issues/742)). ([76fbbdc6](https://github.com/davidmigloz/langchain_dart/commit/76fbbdc6f78e83f1f622ed73ff4b27b37a4f744b))\n - **BUILD**: Update dependencies ([#751](https://github.com/davidmigloz/langchain_dart/issues/751)). ([250a3c6](https://github.com/davidmigloz/langchain_dart/commit/250a3c6a6c1815703a61a142ba839c0392a31015))\n\n## 0.7.4+2\n\n - Update a dependency to the latest release.\n\n## 0.7.4+1\n\n - Update a dependency to the latest release.\n\n## 0.7.4\n\n - **FEAT**: Update OpenAI model catalog ([#714](https://github.com/davidmigloz/langchain_dart/issues/714)). ([68df4558](https://github.com/davidmigloz/langchain_dart/commit/68df4558a01e872c73ad465f4b85f1b5c61ddd50))\n - **FEAT**: Update dependencies (requires Dart 3.6.0) ([#709](https://github.com/davidmigloz/langchain_dart/issues/709)). ([9e3467f7](https://github.com/davidmigloz/langchain_dart/commit/9e3467f7caabe051a43c0eb3c1110bc4a9b77b81))\n - **REFACTOR**: Remove fetch_client dependency in favor of http v1.3.0 ([#659](https://github.com/davidmigloz/langchain_dart/issues/659)). ([0e0a685c](https://github.com/davidmigloz/langchain_dart/commit/0e0a685c376895425dbddb0f9b83758c700bb0c7))\n - **FIX**: Fix linter issues ([#656](https://github.com/davidmigloz/langchain_dart/issues/656)). ([88a79c65](https://github.com/davidmigloz/langchain_dart/commit/88a79c65aad23bcf5859e58a7375a4b686cf02ef))\n\n## 0.7.3\n\n - **FEAT**: Add gpt-4o-2024-11-20 to model catalog in openai_dart ([#614](https://github.com/davidmigloz/langchain_dart/issues/614)). ([bf333081](https://github.com/davidmigloz/langchain_dart/commit/bf33308165869792446c3897db95e6ad7a7cb519))\n - **REFACTOR**: Add new lint rules and fix issues ([#621](https://github.com/davidmigloz/langchain_dart/issues/621)). ([60b10e00](https://github.com/davidmigloz/langchain_dart/commit/60b10e008acf55ebab90789ad08d2449a44b69d8))\n\n## 0.7.2+5\n\n - Update a dependency to the latest release.\n\n## 0.7.2+4\n\n - Update a dependency to the latest release.\n\n## 0.7.2+3\n\n - Update a dependency to the latest release.\n\n## 0.7.2+2\n\n - **FIX**: UUID 'Namespace' can't be assigned to the parameter type 'String?' ([#566](https://github.com/davidmigloz/langchain_dart/issues/566)). ([1e93a595](https://github.com/davidmigloz/langchain_dart/commit/1e93a595f2f166da2cae3f7cfcdbb28892abf9b5))\n\n## 0.7.2+1\n\n - Update a dependency to the latest release.\n\n## 0.7.2\n\n - **FEAT**: Add OpenAI o1-preview and o1-mini to model catalog ([#555](https://github.com/davidmigloz/langchain_dart/issues/555)). ([9ceb5ff9](https://github.com/davidmigloz/langchain_dart/commit/9ceb5ff9029cf1ae1967a32189f88c7a8215248e))\n - **REFACTOR**: Migrate ChatOpenAI to maxCompletionTokens ([#557](https://github.com/davidmigloz/langchain_dart/issues/557)). ([08057a5b](https://github.com/davidmigloz/langchain_dart/commit/08057a5b6e08ee2633c6be6144be1619e902bbc5))\n\n## 0.7.1\n\n - **FEAT**: Add support for Structured Outputs in ChatOpenAI ([#526](https://github.com/davidmigloz/langchain_dart/issues/526)). ([c5387b5d](https://github.com/davidmigloz/langchain_dart/commit/c5387b5dd87fe2aac511c4eca2d4a497065db61f))\n - **FEAT**: Handle refusal in OpenAI's Structured Outputs API ([#533](https://github.com/davidmigloz/langchain_dart/issues/533)). ([f4c4ed99](https://github.com/davidmigloz/langchain_dart/commit/f4c4ed9902177560f13fa9f44b07f0a49c3fdf0a))\n - **FEAT**: Include logprobs in result metadata from ChatOpenAI ([#535](https://github.com/davidmigloz/langchain_dart/issues/535)). ([1834b3ad](https://github.com/davidmigloz/langchain_dart/commit/1834b3adb210b7d190a7e0574a304f069813486b))\n - **FEAT**: Add chatgpt-4o-latest to model catalog ([#527](https://github.com/davidmigloz/langchain_dart/issues/527)). ([ec82c760](https://github.com/davidmigloz/langchain_dart/commit/ec82c760582eed123d6e5d3287c24f82ac251df7))\n - **FEAT**: Add gpt-4o-2024-08-06 to model catalog ([#522](https://github.com/davidmigloz/langchain_dart/issues/522)). ([563200e0](https://github.com/davidmigloz/langchain_dart/commit/563200e0bb9d021d9cb3e46e7a77d96cf3860b1c))\n - **FEAT**: Deprecate OpenAIToolsAgent in favour of ToolsAgent ([#532](https://github.com/davidmigloz/langchain_dart/issues/532)). ([68d8011a](https://github.com/davidmigloz/langchain_dart/commit/68d8011a9aa09368875ba0434839d12623ba2bab))\n - **REFACTOR**: Don't send OpenAI-Beta header in ChatOpenAI ([#511](https://github.com/davidmigloz/langchain_dart/issues/511)). ([0e532bab](https://github.com/davidmigloz/langchain_dart/commit/0e532bab84483bf9d77a0d745f1a591eea2ff7c8))\n\n## 0.7.0\n\n - **BREAKING** **FEAT**: Update ChatOpenAI default model to gpt-4o-mini ([#507](https://github.com/davidmigloz/langchain_dart/issues/507)). ([c7b8ce91](https://github.com/davidmigloz/langchain_dart/commit/c7b8ce91ac5b4dbe6bed563fae124a9f5ad76a84))\n - **FEAT**: Add support for disabling parallel tool calls in ChatOpenAI ([#493](https://github.com/davidmigloz/langchain_dart/issues/493)). ([c46d676d](https://github.com/davidmigloz/langchain_dart/commit/c46d676dee836f1d17e0d1fd61a8f1f0ba5c2881))\n - **FEAT**: Add GPT-4o-mini to model catalog ([#497](https://github.com/davidmigloz/langchain_dart/issues/497)). ([faa23aee](https://github.com/davidmigloz/langchain_dart/commit/faa23aeeecfb64dc7d018e642952e41cc7f9eeaf))\n - **FEAT**: Add support for service tier in ChatOpenAI ([#495](https://github.com/davidmigloz/langchain_dart/issues/495)). ([af79a4ff](https://github.com/davidmigloz/langchain_dart/commit/af79a4ffcadb207bfc704365462edebfca1ed6c7))\n - **FEAT**: Implement additive options merging for cascade bind calls ([#500](https://github.com/davidmigloz/langchain_dart/issues/500)). ([8691eb21](https://github.com/davidmigloz/langchain_dart/commit/8691eb21d5d2ffbf853997cbc0eaa29a56c6ca43))\n - **REFACTOR**: Remove default model from the language model options ([#498](https://github.com/davidmigloz/langchain_dart/issues/498)). ([44363e43](https://github.com/davidmigloz/langchain_dart/commit/44363e435778282ed27bc1b2771cf8b25abc7560))\n - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d))\n\n## 0.6.3\n\n - **FEAT**: Add support for ChatToolChoiceRequired ([#474](https://github.com/davidmigloz/langchain_dart/issues/474)). ([bf324f36](https://github.com/davidmigloz/langchain_dart/commit/bf324f36f645c53458d5891f8285991cd50f2649))\n\n## 0.6.2\n\n - **FEAT**: Add Runnable.close() to close any resources associated with it ([#439](https://github.com/davidmigloz/langchain_dart/issues/439)). ([4e08cced](https://github.com/davidmigloz/langchain_dart/commit/4e08cceda964921178061e9721618a1505198ff5))\n - **DOCS**: Document tool calling with OpenRouter ([#437](https://github.com/davidmigloz/langchain_dart/issues/437)). ([47986592](https://github.com/davidmigloz/langchain_dart/commit/47986592a674322fe2f69aff7166a3e594756ace))\n\n## 0.6.1+1\n\n - Update a dependency to the latest release.\n\n## 0.6.1\n\n - **FEAT**: Add GPT-4o to model catalog ([#420](https://github.com/davidmigloz/langchain_dart/issues/420)). ([96214307](https://github.com/davidmigloz/langchain_dart/commit/96214307ec8ae045dade687d4c623bd4dc1be896))\n - **FEAT**: Include usage stats when streaming with OpenAI and ChatOpenAI ([#406](https://github.com/davidmigloz/langchain_dart/issues/406)). ([5e2b0ecc](https://github.com/davidmigloz/langchain_dart/commit/5e2b0eccd54c6c1dc15af8ff6d62c395f12fbd90))\n\n## 0.6.0+2\n\n - Update a dependency to the latest release.\n\n## 0.6.0+1\n\n - Update a dependency to the latest release.\n\n## 0.6.0\n\n> Note: This release has breaking changes.  \n> If you are using \"function calling\" check [how to migrate to \"tool calling\"](https://github.com/davidmigloz/langchain_dart/issues/400).\n\n - **BREAKING** **FEAT**: Migrate from function calling to tool calling ([#400](https://github.com/davidmigloz/langchain_dart/issues/400)). ([44413b83](https://github.com/davidmigloz/langchain_dart/commit/44413b8321b1188ff6b4027b1972a7ee0002761e))\n - **BREAKING** **REFACTOR**: Improve Tool abstractions ([#398](https://github.com/davidmigloz/langchain_dart/issues/398)). ([2a50aec2](https://github.com/davidmigloz/langchain_dart/commit/2a50aec28385068f9be32392020d727fc9a1561e))\n\n## 0.5.1+1\n\n - Update a dependency to the latest release.\n\n## 0.5.1\n\n - **FEAT**: Add support for Runnable.mapInputStream ([#393](https://github.com/davidmigloz/langchain_dart/issues/393)). ([a2b6bbb5](https://github.com/davidmigloz/langchain_dart/commit/a2b6bbb5ea7a65c36d1e955f9f96298cf2384afc))\n\n## 0.5.0+1\n\n - Update a dependency to the latest release.\n\n## 0.5.0\n\n> Note: This release has breaking changes.  \n> [Migration guide](https://github.com/davidmigloz/langchain_dart/discussions/374)\n\n - **BREAKING** **REFACTOR**: Introduce langchain_core and langchain_community packages ([#328](https://github.com/davidmigloz/langchain_dart/issues/328)). ([5fa520e6](https://github.com/davidmigloz/langchain_dart/commit/5fa520e663602d9cdfcab0c62a053090fa02b02e))\n - **BREAKING** **REFACTOR**: Simplify LLMResult and ChatResult classes ([#363](https://github.com/davidmigloz/langchain_dart/issues/363)). ([ffe539c1](https://github.com/davidmigloz/langchain_dart/commit/ffe539c13f92cce5f564107430163b44be1dfd96))\n - **BREAKING** **REFACTOR**: Simplify Output Parsers ([#367](https://github.com/davidmigloz/langchain_dart/issues/367)). ([f24b7058](https://github.com/davidmigloz/langchain_dart/commit/f24b7058949fba47ba624f071a3f548b8f6e915e))\n - **BREAKING** **REFACTOR**: Remove deprecated generate and predict APIs ([#335](https://github.com/davidmigloz/langchain_dart/issues/335)). ([c55fe50f](https://github.com/davidmigloz/langchain_dart/commit/c55fe50f0040cc04cbd2e90bca475887c093c654))\n - **REFACTOR**: Simplify internal .stream implementation ([#364](https://github.com/davidmigloz/langchain_dart/issues/364)). ([c83fed22](https://github.com/davidmigloz/langchain_dart/commit/c83fed22b2b89d5e51211984b12ec126a3ca225e))\n - **FEAT**: Implement .batch support ([#370](https://github.com/davidmigloz/langchain_dart/issues/370)). ([d254f929](https://github.com/davidmigloz/langchain_dart/commit/d254f929b03d9c950029e55c66831f9f89cc14a9))\n - **FEAT**: Remove deprecated OpenAI instance id ([#350](https://github.com/davidmigloz/langchain_dart/issues/350)). ([52939336](https://github.com/davidmigloz/langchain_dart/commit/529393360b7643c8192153c3654e5482dfc299ad))\n\n## 0.4.1\n\n - **FEAT**: Allow to specify OpenAI custom instance ([#327](https://github.com/davidmigloz/langchain_dart/issues/327)). ([4744648c](https://github.com/davidmigloz/langchain_dart/commit/4744648cdf02828b9182ebd34ba3d7db5313786e))\n - **FEAT**: Update meta and test dependencies ([#331](https://github.com/davidmigloz/langchain_dart/issues/331)). ([912370ee](https://github.com/davidmigloz/langchain_dart/commit/912370ee0ba667ee9153303395a457e6caf5c72d))\n - **DOCS**: Update pubspecs. ([d23ed89a](https://github.com/davidmigloz/langchain_dart/commit/d23ed89adf95a34a78024e2f621dc0af07292f44))\n\n## 0.4.0+1\n\n - **DOCS**: Update CHANGELOG.md. ([d0d46534](https://github.com/davidmigloz/langchain_dart/commit/d0d46534565d6f52d819d62329e8917e00bc7030))\n\n## 0.4.0\n\n> Note: This release has breaking changes.\n\n - **BREAKING** **FEAT**: Update OpenAIEmbeddings' default model to text-embedding-3-small ([#313](https://github.com/davidmigloz/langchain_dart/issues/313)). ([43463481](https://github.com/davidmigloz/langchain_dart/commit/4346348108dc105a1daaedc932641e725b648f3e))\n - **FEAT**: Add support for shortening embeddings in OpenAIEmbeddings ([#312](https://github.com/davidmigloz/langchain_dart/issues/312)). ([5f5eb54f](https://github.com/davidmigloz/langchain_dart/commit/5f5eb54f2b991c14c18abf785b873a677bdf7e14))\n\n## 0.3.3+1\n\n - **FIX**: Specified model is always overwritten in OpenAIFunctionsAgent ([#308](https://github.com/davidmigloz/langchain_dart/issues/308)). ([32dc37d8](https://github.com/davidmigloz/langchain_dart/commit/32dc37d8ca3e52929ab69d695f66627ff7e897fa))\n\n## 0.3.3\n\n - **FEAT**: Support Anyscale in ChatOpenAI and OpenAIEmbeddings wrappers ([#305](https://github.com/davidmigloz/langchain_dart/issues/305)). ([7daa3eb0](https://github.com/davidmigloz/langchain_dart/commit/7daa3eb052c32baa7473d7532c795b7f242ed9fc))\n - **FEAT**: Support Together AI API in ChatOpenAI wrapper ([#297](https://github.com/davidmigloz/langchain_dart/issues/297)). ([28ab56af](https://github.com/davidmigloz/langchain_dart/commit/28ab56aff35c93a6835e5f22397d47da9e45fe40))\n - **FEAT**: Support Together AI in OpenAIEmbeddings wrapper ([#304](https://github.com/davidmigloz/langchain_dart/issues/304)). ([ddc761d6](https://github.com/davidmigloz/langchain_dart/commit/ddc761d65154be2df1efc202d9e7e6b2e60e7ac2))\n\n## 0.3.2\n\n - **FEAT**: Support OpenRouter API in ChatOpenAI wrapper ([#292](https://github.com/davidmigloz/langchain_dart/issues/292)). ([c6e7e5be](https://github.com/davidmigloz/langchain_dart/commit/c6e7e5beeb03c32a93b062aab874cae3da0a52d9)) ([docs](https://langchaindart.dev/#/modules/model_io/models/chat_models/integrations/open_router))\n - **REFACTOR**: Make all LLM options fields nullable and add copyWith ([#284](https://github.com/davidmigloz/langchain_dart/issues/284)). ([57eceb9b](https://github.com/davidmigloz/langchain_dart/commit/57eceb9b47da42cf19f64ddd88bfbd2c9676fd5e))\n - **REFACTOR**: Migrate tokenizer to langchain_tiktoken package ([#285](https://github.com/davidmigloz/langchain_dart/issues/285)). ([6a3b6466](https://github.com/davidmigloz/langchain_dart/commit/6a3b6466e3e4cfddda2f506adbf2eb563814d02f))\n - **FEAT**: Update internal dependencies ([#291](https://github.com/davidmigloz/langchain_dart/issues/291)). ([69621cc6](https://github.com/davidmigloz/langchain_dart/commit/69621cc61659980d046518ee20ce055e806cba1f))\n\n## 0.3.1+1\n\n - Update a dependency to the latest release.\n\n## 0.3.1\n\n - Update a dependency to the latest release.\n\n## 0.3.0\n\n> Note: This release has breaking changes.\n\n - **BREAKING** **REFACTOR**: Make MIME Type mandatory for base64 images in prompt ([#269](https://github.com/davidmigloz/langchain_dart/issues/269)). ([2fe076bb](https://github.com/davidmigloz/langchain_dart/commit/2fe076bb8d2ddacfee6ec077c3f564bff919dace))\n - **FEAT**: Allow to pass options to countTokens method ([#268](https://github.com/davidmigloz/langchain_dart/issues/268)). ([4ecb123b](https://github.com/davidmigloz/langchain_dart/commit/4ecb123bd34f0b01d377045b97dace89676d5d16))\n\n## 0.2.0+1\n\n - **FIX**: Fix ChatOpenAI not considering functions from default options ([#257](https://github.com/davidmigloz/langchain_dart/issues/257)). ([cd864783](https://github.com/davidmigloz/langchain_dart/commit/cd864783f7190f7e8aa8988ba5c2cb5f7bfb3fad))\n\n## 0.2.0\n\n> Note: This release has breaking changes.\n>\n> Migration guides:  \n> - [`OpenAI`](https://github.com/davidmigloz/langchain_dart/issues/232)\n> - [`ChatOpenAI`](https://github.com/davidmigloz/langchain_dart/issues/240)\n> - [`OpenAIDallETool`](https://github.com/davidmigloz/langchain_dart/issues/244)\n\n - **BREAKING** **FEAT**: Move all model config options to OpenAIOptions ([#232](https://github.com/davidmigloz/langchain_dart/issues/232)). ([16e3e8e4](https://github.com/davidmigloz/langchain_dart/commit/16e3e8e449790444f2c1370f08430d42f15b6f5c))\n - **BREAKING** **FEAT**: Move all model config options to ChatOpenAIOptions ([#240](https://github.com/davidmigloz/langchain_dart/issues/240)). ([dd6a21a7](https://github.com/davidmigloz/langchain_dart/commit/dd6a21a75de28a0e605b287f75d9770bce4bb706))\n - **BREAKING** **FEAT**: Allow to pass call options to tools ([#243](https://github.com/davidmigloz/langchain_dart/issues/243)). ([4a01adb9](https://github.com/davidmigloz/langchain_dart/commit/4a01adb9346b33cdb148d0f0aa7196e2b16867a9))\n - **BREAKING** **FEAT**: Move all DallE config options to OpenAIDallEToolOptions ([#244](https://github.com/davidmigloz/langchain_dart/issues/244)). ([c24877c6](https://github.com/davidmigloz/langchain_dart/commit/c24877c6bb1063a06d2be7320cbf8ef94fa04ae0))\n - **FEAT**: Allow to mutate default options ([#256](https://github.com/davidmigloz/langchain_dart/issues/256)). ([cb5e4058](https://github.com/davidmigloz/langchain_dart/commit/cb5e4058fb89f33c8495ac22fb240ce92daa683c))\n - **FEAT**: Allow to update OpenAI key without having to recreate the wrapper ([#246](https://github.com/davidmigloz/langchain_dart/issues/246)). ([05739bd1](https://github.com/davidmigloz/langchain_dart/commit/05739bd1a43a82e1e5ba24543ccc985d48d48286))\n - **FIX**: PromptTemplate stream should only emit if it has all inputs ([#247](https://github.com/davidmigloz/langchain_dart/issues/247)). ([a56a2ec5](https://github.com/davidmigloz/langchain_dart/commit/a56a2ec5e084d5c140b0e8469707ecaa19dfdaff))\n\n## 0.1.2+2\n\n - Update a dependency to the latest release.\n\n## 0.1.2+1\n\n - **FIX**: Decode JSON responses as UTF-8 ([#234](https://github.com/davidmigloz/langchain_dart/issues/234)). ([0bca67f4](https://github.com/davidmigloz/langchain_dart/commit/0bca67f4ea682ebd5a8b9d3c7319c9511229b0ba))\n\n## 0.1.2\n\n - **FEAT**: Add support for OpenAIDallETool ([#231](https://github.com/davidmigloz/langchain_dart/issues/231)). ([541e8d77](https://github.com/davidmigloz/langchain_dart/commit/541e8d77d76246b25ffa8c4d3715b5ca728cfc3a))\n - **FEAT**: Support implementing custom agents using LCEL ([#230](https://github.com/davidmigloz/langchain_dart/issues/230)). ([625eeeb4](https://github.com/davidmigloz/langchain_dart/commit/625eeeb4ffa9d92c6fd8da003fa471f5d4752257))\n\n## 0.1.1+1\n\n - **FIX**: Fetch requests with big payloads dropping connection ([#226](https://github.com/davidmigloz/langchain_dart/issues/226)). ([1e771098](https://github.com/davidmigloz/langchain_dart/commit/1e771098d1090dd79846fca6520a1195efc5ac1e))\n\n## 0.1.1\n\n - **FEAT**: Add Azure OpenAI API support ([#224](https://github.com/davidmigloz/langchain_dart/issues/224)). ([333fb7af](https://github.com/davidmigloz/langchain_dart/commit/333fb7af4b1edbdc716221609f2dc8f3923822cf))\n\n## 0.1.0+1\n\n - **DOCS**: Add public_member_api_docs lint rule and document missing APIs ([#223](https://github.com/davidmigloz/langchain_dart/issues/223)). ([52380433](https://github.com/davidmigloz/langchain_dart/commit/523804331783970870b023946c016be6c0797920))\n\n## 0.1.0\n\n> Note: This release has breaking changes.  \n> [Migration guide](https://github.com/davidmigloz/langchain_dart/issues/220)\n\n - **REFACTOR**: Align openai_dart client breaking changes ([#219](https://github.com/davidmigloz/langchain_dart/issues/219)). ([172db27f](https://github.com/davidmigloz/langchain_dart/commit/172db27f6da429e16dcda55678a73e1d885bb6d9))\n - **BREAKING** **FEAT**: Add multi-modal messages support with OpenAI Vision ([#220](https://github.com/davidmigloz/langchain_dart/issues/220)). ([6da2e069](https://github.com/davidmigloz/langchain_dart/commit/6da2e069932782eed8c27da45c56b4c290373fac))\n\n## 0.0.15+2\n\n - Update a dependency to the latest release.\n\n## 0.0.15+1\n\n - **REFACTOR**: Rename ChatCompletionFunction to FunctionObject (internal) ([#206](https://github.com/davidmigloz/langchain_dart/issues/206)). ([0f06df3f](https://github.com/davidmigloz/langchain_dart/commit/0f06df3f9b32e5887976936b5fd2e6aa5a4f4f5b))\n\n## 0.0.15\n\n - **FEAT**: Add streaming support to OpenAI ([#196](https://github.com/davidmigloz/langchain_dart/issues/196)). ([b21fcb38](https://github.com/davidmigloz/langchain_dart/commit/b21fcb387685af8706db62caf33b24e0ccf9c73f))\n - **FEAT**: Support seed and system_fingerprint in OpenAI wrapper ([#204](https://github.com/davidmigloz/langchain_dart/issues/204)). ([c31b6795](https://github.com/davidmigloz/langchain_dart/commit/c31b67959ca7ce3d42e9832669fd18de11f41984))\n - **FEAT**: Add streaming support to ChatOpenAI ([#197](https://github.com/davidmigloz/langchain_dart/issues/197)). ([2268da78](https://github.com/davidmigloz/langchain_dart/commit/2268da783703b76422448128ea929e6fb6f805b6))\n - **FEAT**: Support seed, system_fingerprint and JSON Mode in ChatOpenAI ([#205](https://github.com/davidmigloz/langchain_dart/issues/205)). ([3332c228](https://github.com/davidmigloz/langchain_dart/commit/3332c2281b8a345ac7a6789202cbd5ac2225296b))\n\n## 0.0.14+1\n\n - **FIX**: Revert OpenAI maxTokens default to 256 ([#189](https://github.com/davidmigloz/langchain_dart/issues/189)). ([ab2ce6d4](https://github.com/davidmigloz/langchain_dart/commit/ab2ce6d4231ca0e2aff7aa3d9831625b10d0524d))\n - **DOCS**: Update CHANGELOG.md. ([5ea4e532](https://github.com/davidmigloz/langchain_dart/commit/5ea4e5326e706a52d157284a281eb881e05117c5))\n\n## 0.0.14\n\n - **REFACTOR**: Remove dependency on dart_openai ([#186](https://github.com/davidmigloz/langchain_dart/issues/186)). ([273cfa2e](https://github.com/davidmigloz/langchain_dart/commit/273cfa2e84421161c979ddc41168d1b38981ca04))\n - **FEAT**: Migrate ChatOpenAI to openai_dart client ([#185](https://github.com/davidmigloz/langchain_dart/issues/185)). ([de8f487d](https://github.com/davidmigloz/langchain_dart/commit/de8f487dfddb10bee049539356a20b77b7556cad))\n - **FEAT**: Migrate OpenAI to openai_dart client ([#184](https://github.com/davidmigloz/langchain_dart/issues/184)). ([6c90b371](https://github.com/davidmigloz/langchain_dart/commit/6c90b37183eb47354df91fc4870065afecaf3673))\n - **FEAT**: Migrate OpenAIEmbeddings to openai_dart client ([#183](https://github.com/davidmigloz/langchain_dart/issues/183)). ([8f626fe8](https://github.com/davidmigloz/langchain_dart/commit/8f626fe8253f6b11fba0fcab0e143db93d3bce08))\n\n## 0.0.13\n\n - **FEAT**: Implement LangChain Expression Language (LCEL) ([#163](https://github.com/davidmigloz/langchain_dart/issues/163)). ([85ea41af](https://github.com/davidmigloz/langchain_dart/commit/85ea41af9f5e2ff42bba620a60f765ca0f67c86c))\n\n## 0.0.12\n\n> Note: This release has breaking changes.\n\n - **DOCS**: Add topics to pubspecs. ([8c1d6297](https://github.com/davidmigloz/langchain_dart/commit/8c1d62970710cc326fd5930101918aaf16b18f74))\n - **BREAKING** **REFACTOR**: Change embedDocuments input to `List<Document>` ([#153](https://github.com/davidmigloz/langchain_dart/issues/153)). ([1b5d6fbf](https://github.com/davidmigloz/langchain_dart/commit/1b5d6fbf20bcbb7734581f91d66eff3a86731fec))\n\n## 0.0.11\n\n - **FEAT**: Add ability to specify user in OpenAI and ChatOpenAI ([#143](https://github.com/davidmigloz/langchain_dart/issues/143)). ([457ab54e](https://github.com/davidmigloz/langchain_dart/commit/457ab54e45afd5aa382e284806dc73cfe0905c09))\n - **DOCS**: Update packages example. ([4f8488fc](https://github.com/davidmigloz/langchain_dart/commit/4f8488fcb324e31b9d8dece7d1999333d7982253))\n\n## 0.0.10\n\n - **DOCS**: Update readme. ([b61eda5b](https://github.com/davidmigloz/langchain_dart/commit/b61eda5ba506b4602592511c6a9be1e7aae5bf57))\n\n## 0.0.9\n\n - **DOCS**: Update changelog. ([b211ab47](https://github.com/davidmigloz/langchain_dart/commit/b211ab4739b8feb17d00089ecca548716766272d))\n\n## 0.0.8+1\n\n - **FIX**: OpenAIFunctionsAgent wrong prompt order with no memory ([#134](https://github.com/davidmigloz/langchain_dart/issues/134)). ([8c9dcf22](https://github.com/davidmigloz/langchain_dart/commit/8c9dcf22e5fb0229bb98e5fd22492845d44bc531))\n\n## 0.0.8\n\n - **REFACTOR**: Fix Dart 3.1.0 linter issues ([#125](https://github.com/davidmigloz/langchain_dart/issues/125)). ([cc32f3f1](https://github.com/davidmigloz/langchain_dart/commit/cc32f3f13240c28cf174a9dbffc7d61bc061f843))\n\n## 0.0.7+1\n\n - Update a dependency to the latest release.\n\n## 0.0.7\n\n- Update a dependency to the latest release.\n\n## 0.0.6\n\n - **REFACTOR**: Always await or explicitly discard Futures ([#106](https://github.com/davidmigloz/langchain_dart/issues/106)). ([989e93db](https://github.com/davidmigloz/langchain_dart/commit/989e93dbf6b5d61f053550219d88842156aeb492))\n - **FIX**: Fix OpenAIQAWithSourcesChain returning empty strings ([#113](https://github.com/davidmigloz/langchain_dart/issues/113)). ([6181ff8d](https://github.com/davidmigloz/langchain_dart/commit/6181ff8df77653d38cd84cb066776c04c0ff74ad))\n - **FEAT**: Integrate Google Vertex AI PaLM Embeddings ([#100](https://github.com/davidmigloz/langchain_dart/issues/100)). ([d777eccc](https://github.com/davidmigloz/langchain_dart/commit/d777eccc0c81c58b322f28e6e3c4a8763f3f84b7))\n - **FEAT**: Integrate Google Vertex AI PaLM Chat Model ([#99](https://github.com/davidmigloz/langchain_dart/issues/99)). ([3897595d](https://github.com/davidmigloz/langchain_dart/commit/3897595db597d5957ef80ae7a1de35c5f41265b8))\n - **FEAT**: Integrate Google Vertex AI PaLM Text model ([#98](https://github.com/davidmigloz/langchain_dart/issues/98)). ([b2746c23](https://github.com/davidmigloz/langchain_dart/commit/b2746c235d68045ba20afd1f2be7c24dcccb5f24))\n\n## 0.0.5+1\n\n - **FIX**: ChatOpenAIOptions class not exported ([#105](https://github.com/davidmigloz/langchain_dart/issues/105)). ([dfd77076](https://github.com/davidmigloz/langchain_dart/commit/dfd77076dfb60cd71aed3654f78c562ce0bc88bf))\n - **FIX**: OpenAIOptions class not exported ([#104](https://github.com/davidmigloz/langchain_dart/issues/104)). ([e50efc3d](https://github.com/davidmigloz/langchain_dart/commit/e50efc3ddf0b13ece43298b2e3fee531e944601d))\n\n## 0.0.5\n\n - **FIX**: FunctionChatMessage not saved properly in memory ([#88](https://github.com/davidmigloz/langchain_dart/issues/88)). ([d7b763de](https://github.com/davidmigloz/langchain_dart/commit/d7b763ded1abd59a964afd781558b3559a65d9ec))\n - **FEAT**: Update AgentExecutor constructor to use agent's tools ([#89](https://github.com/davidmigloz/langchain_dart/issues/89)). ([3af56a45](https://github.com/davidmigloz/langchain_dart/commit/3af56a45930fff84b11f6bec29c50502a490c2b4))\n - **DOCS**: Add example of using memory in OpenAIFunctionsAgent ([#91](https://github.com/davidmigloz/langchain_dart/issues/91)). ([898d5350](https://github.com/davidmigloz/langchain_dart/commit/898d53502713ec2fd1ecc93e76e7f941123b81a5))\n\n## 0.0.4\n\n - **FIX**: systemChatMessage was ignored in OpenAIFunctionsAgent ([#86](https://github.com/davidmigloz/langchain_dart/issues/86)). ([cfe1e009](https://github.com/davidmigloz/langchain_dart/commit/cfe1e00972d481f83b9dc9e225a32b7077aa5fd4))\n - **FEAT**: Support LLMChain in OpenAIFunctionsAgent and memory. ([bd4a1cb9](https://github.com/davidmigloz/langchain_dart/commit/bd4a1cb9101ba385ce9613f9aa0b7e5474380f32))\n - **FEAT**: Return ChatMessage when LLMChain used with ChatModel. ([bb5f4d23](https://github.com/davidmigloz/langchain_dart/commit/bb5f4d2325ae1f615159f2ffd11cc8ec4e87ed3c))\n\n## 0.0.3\n\n- **FEAT**: Update internal dependencies (including http to 1.1.0). ([8f3e8bc8](https://github.com/davidmigloz/langchain_dart/commit/8f3e8bc811df5c8bdba2c7e33b6c53ea0c2edad4))\n\n## 0.0.2\n\n - **FIX**: OpenAIQAWithSourcesChain throws exception. ([45c6cb9d](https://github.com/davidmigloz/langchain_dart/commit/45c6cb9d32be670902dd2fe4cb92597765590d85))\n - **FEAT**: Support estimating the number of tokens for a given prompt ([#3](https://github.com/davidmigloz/langchain_dart/issues/3)). ([e22f22c8](https://github.com/davidmigloz/langchain_dart/commit/e22f22c89f188a019b96a7c0003dbd26471bebb7))\n\n## 0.0.1\n\n- Initial public release.\n\nCheck out the announcement post for all the details:\nhttps://blog.langchaindart.dev/introducing-langchain-dart-6b1d34fc41ef\n\n## 0.0.1-dev.7\n\n- Add support for OpenAIAgent class (#34).\n- Export OpenAI Client class (allowing to define custom base URL).\n\n## 0.0.1-dev.6\n\n- Add support for OpenAIQAWithSourcesChain (#53).\n\n## 0.0.1-dev.5\n\n- Add support for OpenAIEmbeddings (#38).\n- Add support for OpenAI functions (#35).\n\n## 0.0.1-dev.4\n\n- Improve API documentation.\n\n## 0.0.1-dev.3\n\n- Add support for ChatPromptTemplate class (#8).\n\n## 0.0.1-dev.2\n\n- Add support for OpenAI LLM (`OpenAI` class).\n- Add support for OpenAI chat model (`ChatOpenAI` class).\n\n## 0.0.1-dev.1\n\n- Bootstrap project.\n"
  },
  {
    "path": "packages/langchain_openai/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2023 David Miguel Lozano\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "packages/langchain_openai/MIGRATION.md",
    "content": "# langchain_openai Migration Guide\n\n## New: ChatOpenAIResponses (Responses API)\n\nA new `ChatOpenAIResponses` class has been added alongside the existing\n`ChatOpenAI` class. It uses OpenAI's\n[Responses API](https://platform.openai.com/docs/api-reference/responses)\ninstead of the Chat Completions API.\n\n### Usage\n\n```dart\nfinal chatModel = ChatOpenAIResponses(apiKey: '...');\nfinal prompt = PromptValue.chat([\n  ChatMessage.system('You are a helpful assistant.'),\n  ChatMessage.humanText('Hello!'),\n]);\nfinal res = await chatModel.invoke(prompt);\n```\n\n### Options\n\n`ChatOpenAIResponsesOptions` replaces `ChatOpenAIOptions` when using\nthe Responses API. Key differences:\n\n**New options:**\n- `instructions` — system prompt via request parameter\n- `previousResponseId` — multi-turn conversation support\n- `truncation` — input truncation strategy (`auto` / `disabled`)\n- `maxOutputTokens` (replaces `maxTokens`)\n\n**Removed options** (not supported by the Responses API):\n- `n`\n- `logitBias`\n- `logprobs` (bool)\n- `seed`\n- `stop`\n- `user`\n\n### Multi-turn conversations\n\n```dart\nfinal res1 = await chatModel.invoke(prompt1);\nfinal res2 = await chatModel.invoke(\n  prompt2,\n  options: ChatOpenAIResponsesOptions(previousResponseId: res1.id),\n);\n```\n\n## Exception type change\n\nThe re-exported exception type has changed from `OpenAIClientException` to\n`OpenAIException`. If your code catches `OpenAIClientException`, update it to\ncatch `OpenAIException` instead:\n\n```dart\n// Before\ntry {\n  await chatModel.invoke(prompt);\n} on OpenAIClientException catch (e) { ... }\n\n// After\ntry {\n  await chatModel.invoke(prompt);\n} on OpenAIException catch (e) { ... }\n```\n\n### Which class to use?\n\n- Use `ChatOpenAI` for the Chat Completions API (stable, widely supported by\n  OpenAI-compatible providers).\n- Use `ChatOpenAIResponses` for the Responses API (newer, supports\n  `previousResponseId` for multi-turn, and additional features).\n"
  },
  {
    "path": "packages/langchain_openai/README.md",
    "content": "# 🦜️🔗 LangChain.dart / OpenAI\n\n[![tests](https://img.shields.io/github/actions/workflow/status/davidmigloz/langchain_dart/test.yaml?logo=github&label=tests)](https://github.com/davidmigloz/langchain_dart/actions/workflows/test.yaml)\n[![docs](https://img.shields.io/github/actions/workflow/status/davidmigloz/langchain_dart/pages%2Fpages-build-deployment?logo=github&label=docs)](https://github.com/davidmigloz/langchain_dart/actions/workflows/pages/pages-build-deployment)\n[![langchain_openai](https://img.shields.io/pub/v/langchain_openai.svg)](https://pub.dev/packages/langchain_openai)\n![Discord](https://img.shields.io/discord/1123158322812555295?label=discord)\n[![MIT](https://img.shields.io/badge/license-MIT-purple.svg)](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE)\n\nOpenAI module for [LangChain.dart](https://github.com/davidmigloz/langchain_dart).\n\n## Features\n\n- LLMs:\n  * `OpenAI`: wrapper around OpenAI Completions API.\n- Chat models:\n  * `ChatOpenAI`: wrapper around OpenAI Chat API.\n- Embeddings:\n  * `OpenAIEmbeddings`: wrapper around OpenAI Embeddings API.\n- Chains:\n  * `OpenAIQAWithStructureChain` a chain that answer questions in the specified \n    structure.\n  * `OpenAIQAWithSourcesChain`: a chain that answer questions providing sources.\n- Tools:\n  * `OpenAIDallETool`: a tool that uses DallE to generate images from text.\n\n## License\n\nLangChain.dart is licensed under the\n[MIT License](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE).\n"
  },
  {
    "path": "packages/langchain_openai/example/langchain_openai_example.dart",
    "content": "// ignore_for_file: avoid_print, unused_element\nimport 'dart:io';\n\nimport 'package:langchain/langchain.dart';\nimport 'package:langchain_openai/langchain_openai.dart';\n\nvoid main() async {\n  // Uncomment the example you want to run:\n  await _example1();\n  // await _example2();\n}\n\n/// The most basic building block of LangChain is calling an LLM on some input.\nFuture<void> _example1() async {\n  final openAiApiKey = Platform.environment['OPENAI_API_KEY'];\n  final llm = OpenAI(\n    apiKey: openAiApiKey,\n    defaultOptions: const OpenAIOptions(temperature: 0.9),\n  );\n  final LLMResult res = await llm.invoke(PromptValue.string('Tell me a joke'));\n  print(res);\n}\n\n/// The most frequent use case is to create a chat-bot.\n/// This is the most basic one.\nFuture<void> _example2() async {\n  final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n  final chat = ChatOpenAI(\n    apiKey: openaiApiKey,\n    defaultOptions: const ChatOpenAIOptions(temperature: 0),\n  );\n\n  while (true) {\n    stdout.write('> ');\n    final usrMsg = ChatMessage.humanText(stdin.readLineSync() ?? '');\n    final aiMsg = await chat([usrMsg]);\n    print(aiMsg.content);\n  }\n}\n"
  },
  {
    "path": "packages/langchain_openai/lib/fix_data/fix.yaml",
    "content": "version: 1\n\ntransforms:\n  - title: \"Migrate to 'ToolsAgent'\"\n    date: 2024-08-21\n    element:\n      uris: ['langchain_openai.dart', 'src/agents/tools.dart']\n      class: 'OpenAIToolsAgent'\n    changes:\n      - kind: 'rename'\n        newName: 'ToolsAgent'\n  - title: \"Migrate to 'ToolsAgentOutputParser'\"\n    date: 2024-08-21\n    element:\n      uris: ['langchain_openai.dart', 'src/agents/tools.dart']\n      class: 'OpenAIToolsAgentOutputParser'\n    changes:\n      - kind: 'rename'\n        newName: 'ToolsAgentOutputParser'\n"
  },
  {
    "path": "packages/langchain_openai/lib/langchain_openai.dart",
    "content": "/// LangChain.dart integration module for OpenAI (GPT-4o, Embeddings, DALL·E, etc.).\nlibrary;\n\nexport 'package:openai_dart/openai_dart.dart' show OpenAIException;\n\nexport 'src/agents/agents.dart';\nexport 'src/chains/chains.dart';\nexport 'src/chat_models/chat_models.dart';\nexport 'src/embeddings/embeddings.dart';\nexport 'src/llms/llms.dart';\nexport 'src/tools/tools.dart';\n"
  },
  {
    "path": "packages/langchain_openai/lib/src/agents/agents.dart",
    "content": "export 'tools.dart';\n"
  },
  {
    "path": "packages/langchain_openai/lib/src/agents/tools.dart",
    "content": "// ignore_for_file: deprecated_member_use_from_same_package\nimport 'package:langchain_core/agents.dart';\nimport 'package:langchain_core/chains.dart';\nimport 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/exceptions.dart';\nimport 'package:langchain_core/memory.dart';\nimport 'package:langchain_core/output_parsers.dart';\nimport 'package:langchain_core/prompts.dart';\nimport 'package:langchain_core/tools.dart';\n\nimport '../chat_models/chat_models.dart';\n\nconst _systemChatMessagePromptTemplate = SystemChatMessagePromptTemplate(\n  prompt: PromptTemplate(\n    inputVariables: {},\n    template: 'You are a helpful AI assistant',\n  ),\n);\n\n/// {@template openai_tools_agent}\n/// > Note: This class is deprecated. Use `ToolsAgent` (from the `langchain`\n/// > package instead). It works with the same API as this class, but can be\n/// > used with any provider that supports tool calling.\n/// > You can run `dart fix --apply` to automatically update your code.\n///\n/// An Agent driven by OpenAI's Tools powered API.\n///\n/// Example:\n/// ```dart\n/// final llm = ChatOpenAI(\n///   apiKey: openaiApiKey,\n///   model: 'gpt-4-turbo',\n///   temperature: 0,\n/// );\n/// final tools = [CalculatorTool()];\n/// final agent = ToolsAgent.fromLLMAndTools(llm: llm, tools: tools);\n/// final executor = AgentExecutor(agent: agent);\n/// final res = await executor.run('What is 40 raised to the 0.43 power? ');\n/// ```\n///\n/// You can easily add memory to the agent using the memory parameter from the\n/// [OpenAIToolsAgent.fromLLMAndTools] constructor. Make sure you enable\n/// [BaseChatMemory.returnMessages] on your memory, as the agent works with\n/// [ChatMessage]s. The default prompt template already takes care of adding\n/// the history to the prompt. For example:\n/// ```dart\n/// final memory = ConversationBufferMemory(returnMessages: true);\n/// final agent = OpenAIToolsAgent.fromLLMAndTools(\n///   llm: llm,\n///   tools: tools,\n///   memory: memory,\n/// );\n/// ```\n///\n/// If you need to use your own [llmChain] make sure your prompt template\n/// includes:\n/// - `MessagePlaceholder(variableName: agentInputKey)`: the input to the agent.\n/// - If you are using memory:\n///   * `MessagesPlaceholder(variableName: '{memoryKey}')`: the history of chat\n///      messages.\n/// - If you are not using memory:\n///   * `MessagesPlaceholder(variableName: BaseActionAgent.agentScratchpadInputKey)`:\n///     the intermediary work of the agent (if you are using memory, the agent\n///     uses the memory to store the intermediary work).\n/// Example:\n/// ```dart\n/// ChatPromptTemplate.fromTemplates([\n///   (ChatMessageType.system, 'You are a helpful AI assistant'),\n///   (ChatMessageType.messagesPlaceholder, 'history'),\n///   (ChatMessageType.messagePlaceholder, 'input'),\n/// ]);\n/// ```\n///\n/// You can use [OpenAIToolsAgent.createPrompt] to build the prompt\n/// template if you only need to customize the system message or add some\n/// extra messages.\n/// {@endtemplate}\n@Deprecated('Use ToolsAgent instead')\nclass OpenAIToolsAgent extends BaseSingleActionAgent {\n  /// {@macro openai_functions_agent}\n  @Deprecated('Use ToolsAgent instead')\n  OpenAIToolsAgent({required this.llmChain, required super.tools})\n    : _parser = const OpenAIToolsAgentOutputParser(),\n      assert(\n        llmChain.memory != null ||\n            llmChain.prompt.inputVariables.contains(\n              BaseActionAgent.agentScratchpadInputKey,\n            ),\n        '`${BaseActionAgent.agentScratchpadInputKey}` should be one of the '\n        'variables in the prompt, got ${llmChain.prompt.inputVariables}',\n      ),\n      assert(\n        llmChain.memory == null || llmChain.memory!.returnMessages,\n        'The memory must have `returnMessages` set to true',\n      );\n\n  /// Chain to use to call the LLM.\n  ///\n  /// If the chain does not have a memory, the prompt MUST include a variable\n  /// called [BaseActionAgent.agentScratchpadInputKey] where the agent can put\n  /// its intermediary work.\n  ///\n  /// If the chain has a memory, the agent will use the memory to store the\n  /// intermediary work.\n  ///\n  /// The memory must have [BaseChatMemory.returnMessages] set to true for\n  /// the agent to work properly.\n  final LLMChain<ChatOpenAI, ChatOpenAIOptions, BaseChatMemory> llmChain;\n\n  /// Parser to use to parse the output of the LLM.\n  final OpenAIToolsAgentOutputParser _parser;\n\n  /// The key for the input to the agent.\n  static const agentInputKey = 'input';\n\n  @override\n  Set<String> get inputKeys => {agentInputKey};\n\n  /// Construct an [OpenAIToolsAgent] from an [llm] and [tools].\n  ///\n  /// - [llm] - The model to use for the agent.\n  /// - [tools] - The tools the agent has access to.\n  /// - [memory] - The memory to use for the agent.\n  /// - [systemChatMessage] message to use as the system message that will be\n  ///   the first in the prompt. Default: \"You are a helpful AI assistant\".\n  /// - [extraPromptMessages] prompt messages that will be placed between the\n  ///   system message and the input from the agent.\n  @Deprecated('Use ToolsAgent.fromLLMAndTools() instead')\n  factory OpenAIToolsAgent.fromLLMAndTools({\n    required final ChatOpenAI llm,\n    required final List<Tool> tools,\n    final BaseChatMemory? memory,\n    final SystemChatMessagePromptTemplate systemChatMessage =\n        _systemChatMessagePromptTemplate,\n    final List<ChatMessagePromptTemplate>? extraPromptMessages,\n  }) {\n    return OpenAIToolsAgent(\n      llmChain: LLMChain(\n        llm: llm,\n        llmOptions: ChatOpenAIOptions(\n          model: llm.defaultOptions.model,\n          tools: tools,\n        ),\n        prompt: createPrompt(\n          systemChatMessage: systemChatMessage,\n          extraPromptMessages: extraPromptMessages,\n          memory: memory,\n        ),\n        memory: memory,\n      ),\n      tools: tools,\n    );\n  }\n\n  @override\n  Future<List<BaseAgentAction>> plan(final AgentPlanInput input) async {\n    final llmChainInputs = _constructLlmChainInputs(\n      input.intermediateSteps,\n      input.inputs,\n    );\n    final ChainValues output = await llmChain.invoke(llmChainInputs);\n    final predictedMessage = output[LLMChain.defaultOutputKey] as AIChatMessage;\n    return _parser.parseChatMessage(predictedMessage);\n  }\n\n  Map<String, dynamic> _constructLlmChainInputs(\n    final List<AgentStep> intermediateSteps,\n    final InputValues inputs,\n  ) {\n    final dynamic agentInput;\n\n    // If there is a memory, we pass the last agent step as a function message.\n    // Otherwise, we pass the input as a human message.\n    if (llmChain.memory != null && intermediateSteps.isNotEmpty) {\n      final lastStep = intermediateSteps.last;\n      final functionMsg = ChatMessage.tool(\n        toolCallId: lastStep.action.id,\n        content: lastStep.observation,\n      );\n      agentInput = functionMsg;\n    } else {\n      agentInput = switch (inputs[agentInputKey]) {\n        final String inputStr => ChatMessage.humanText(inputStr),\n        final ChatMessage inputMsg => inputMsg,\n        final List<ChatMessage> inputMsgs => inputMsgs,\n        _ => throw LangChainException(\n          message:\n              'Agent expected a String or ChatMessage as input,'\n              ' got ${inputs[agentInputKey]}',\n        ),\n      };\n    }\n\n    return {\n      ...inputs,\n      agentInputKey: agentInput,\n      if (llmChain.memory == null)\n        BaseActionAgent.agentScratchpadInputKey: _constructScratchPad(\n          intermediateSteps,\n        ),\n    };\n  }\n\n  List<ChatMessage> _constructScratchPad(\n    final List<AgentStep> intermediateSteps,\n  ) {\n    return [\n      ...intermediateSteps\n          .map((final s) {\n            return s.action.messageLog +\n                [\n                  ChatMessage.tool(\n                    toolCallId: s.action.id,\n                    content: s.observation,\n                  ),\n                ];\n          })\n          .expand((final m) => m),\n    ];\n  }\n\n  @override\n  String get agentType => 'openai-tools';\n\n  /// Creates prompt for this agent.\n  ///\n  /// It takes care of adding the necessary placeholders to handle the\n  /// intermediary work of the agent or the memory.\n  ///\n  /// - [systemChatMessage] message to use as the system message that will be\n  ///   the first in the prompt.\n  /// - [extraPromptMessages] prompt messages that will be placed between the\n  ///   system message and the new human input.\n  /// - [memory] optional memory to use for the agent.\n  static BasePromptTemplate createPrompt({\n    final SystemChatMessagePromptTemplate systemChatMessage =\n        _systemChatMessagePromptTemplate,\n    final List<ChatMessagePromptTemplate>? extraPromptMessages,\n    final BaseChatMemory? memory,\n  }) {\n    return ChatPromptTemplate.fromPromptMessages([\n      systemChatMessage,\n      ...?extraPromptMessages,\n      for (final memoryKey in memory?.memoryKeys ?? {})\n        MessagesPlaceholder(variableName: memoryKey),\n      const MessagePlaceholder(variableName: agentInputKey),\n      if (memory == null)\n        const MessagesPlaceholder(\n          variableName: BaseActionAgent.agentScratchpadInputKey,\n        ),\n    ]);\n  }\n}\n\n/// {@template openai_tools_agent_output_parser}\n/// > Note: This class is deprecated. Use `ToolsAgentOutputParser` (from the\n/// > `langchain` package instead). It is equivalent to this class, but\n/// > prepared to work with the `ToolsAgent`.\n/// > You can run `dart fix --apply` to automatically update your code.\n///\n/// Parser for [OpenAIToolsAgent].\n///\n/// It parses the output of the LLM and returns the corresponding\n/// [BaseAgentAction] to be executed.\n/// {@endtemplate}\n@Deprecated('Use ToolsAgentOutputParser instead')\nclass OpenAIToolsAgentOutputParser\n    extends\n        BaseOutputParser<\n          ChatResult,\n          OutputParserOptions,\n          List<BaseAgentAction>\n        > {\n  /// {@macro openai_tools_agent_output_parser}\n  @Deprecated('Use ToolsAgentOutputParser instead')\n  const OpenAIToolsAgentOutputParser()\n    : super(defaultOptions: const OutputParserOptions());\n\n  @override\n  Future<List<BaseAgentAction>> invoke(\n    final ChatResult input, {\n    final OutputParserOptions? options,\n  }) {\n    return parseChatMessage(input.output);\n  }\n\n  /// Parses the [message] and returns the corresponding [BaseAgentAction].\n  Future<List<BaseAgentAction>> parseChatMessage(\n    final AIChatMessage message,\n  ) async {\n    final toolCalls = message.toolCalls;\n\n    if (toolCalls.isNotEmpty) {\n      return toolCalls\n          .map((final toolCall) {\n            return AgentAction(\n              id: toolCall.id,\n              tool: toolCall.name,\n              toolInput: toolCall.arguments,\n              log:\n                  'Invoking: `${toolCall.name}` '\n                  'with `${toolCall.arguments}`\\n'\n                  'Responded: ${message.content}\\n',\n              messageLog: [message],\n            );\n          })\n          .toList(growable: false);\n    } else {\n      return [\n        AgentFinish(\n          returnValues: {'output': message.content},\n          log: message.content,\n        ),\n      ];\n    }\n  }\n}\n"
  },
  {
    "path": "packages/langchain_openai/lib/src/chains/chains.dart",
    "content": "export 'qa_with_sources.dart';\nexport 'qa_with_structure.dart';\n"
  },
  {
    "path": "packages/langchain_openai/lib/src/chains/qa_with_sources.dart",
    "content": "import 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/output_parsers.dart';\nimport 'package:langchain_core/tools.dart';\n\nimport 'qa_with_structure.dart';\n\n/// {@template openai_qa_with_sources_chain}\n/// A chain that answers questions returning a [QAWithSources] object\n/// containing the answers with the sources used to answer the question.\n///\n/// Example:\n/// ```dart\n/// final llm = ChatOpenAI(\n///   apiKey: openaiApiKey,\n///   model: 'gpt-4o-mini',\n///   temperature: 0,\n/// );\n/// final qaChain = OpenAIQAWithSourcesChain(llm: llm);\n/// final docPrompt = PromptTemplate.fromTemplate(\n///   'Content: {page_content}\\nSource: {source}',\n/// );\n/// final finalQAChain = StuffDocumentsChain(\n///   llmChain: qaChain,\n///   documentPrompt: docPrompt,\n/// );\n/// final retrievalQA = RetrievalQAChain(\n///   retriever: vectorStore.asRetriever(),\n///   combineDocumentsChain: finalQAChain,\n/// );\n/// const query = 'What did President Biden say about Russia?';\n/// final res = await retrievalQA(query);\n/// ```\n/// {@endtemplate}\nclass OpenAIQAWithSourcesChain extends OpenAIQAWithStructureChain {\n  /// {@macro openai_qa_with_sources_chain}\n  OpenAIQAWithSourcesChain({required super.llm})\n    : super(\n        tool: const ToolSpec(\n          name: 'answer_with_sources',\n          description: 'Answers a question with the sources used to answer it',\n          inputJsonSchema: {\n            'type': 'object',\n            'properties': {\n              'answer': {\n                'type': 'string',\n                'description': 'The answer to the question being asked',\n              },\n              'sources': {\n                'type': 'array',\n                'items': {'type': 'string'},\n                'description': 'The sources used to answer the question',\n              },\n            },\n            'required': ['answer', 'sources'],\n          },\n        ),\n        outputParser: QAWithSourcesOutputParser(),\n      );\n}\n\n/// {@template qa_with_sources}\n/// The answer to a question with the sources used to answer it.\n/// {@endtemplate}\nclass QAWithSources {\n  /// {@macro qa_with_sources}\n  const QAWithSources({required this.answer, required this.sources});\n\n  /// The answer to the question.\n  final String answer;\n\n  /// The sources used to answer the question.\n  final List<String> sources;\n\n  /// {@macro qa_with_sources}\n  factory QAWithSources.fromMap(final Map<String, dynamic> map) {\n    return QAWithSources(\n      answer: map['answer'] as String,\n      sources: List<String>.from(map['sources']),\n    );\n  }\n\n  @override\n  String toString() => answer;\n}\n\n/// {@template qa_with_sources_output_parser}\n/// A parser that converts the output of the OpenAI API into a [QAWithSources].\n/// {@endtemplate}\nclass QAWithSourcesOutputParser\n    extends BaseOutputParser<ChatResult, OutputParserOptions, QAWithSources> {\n  /// {@macro qa_with_sources_output_parser}\n  QAWithSourcesOutputParser()\n    : _toolsOutputParser = ToolsOutputParser(),\n      super(defaultOptions: const OutputParserOptions());\n\n  final ToolsOutputParser _toolsOutputParser;\n\n  @override\n  Future<QAWithSources> invoke(\n    final ChatResult input, {\n    final OutputParserOptions? options,\n  }) {\n    return _toolsOutputParser.invoke(input, options: options).then(_parse);\n  }\n\n  @override\n  Stream<QAWithSources> stream(\n    final ChatResult input, {\n    final OutputParserOptions? options,\n  }) {\n    return _toolsOutputParser.stream(input, options: options).map(_parse);\n  }\n\n  @override\n  Stream<QAWithSources> streamFromInputStream(\n    final Stream<ChatResult> inputStream, {\n    final OutputParserOptions? options,\n  }) {\n    return _toolsOutputParser\n        .streamFromInputStream(inputStream, options: options)\n        .map(_parse);\n  }\n\n  QAWithSources _parse(final List<ParsedToolCall> input) {\n    final arguments = input.firstOrNull?.arguments ?? const {};\n    return QAWithSources.fromMap(arguments);\n  }\n}\n"
  },
  {
    "path": "packages/langchain_openai/lib/src/chains/qa_with_structure.dart",
    "content": "import 'package:langchain_core/chains.dart';\nimport 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/memory.dart';\nimport 'package:langchain_core/output_parsers.dart';\nimport 'package:langchain_core/prompts.dart';\nimport 'package:langchain_core/tools.dart';\n\nimport '../chat_models/chat_models.dart';\n\n/// {@template openai_qa_with_structure_chain}\n/// A chain that answers questions returning the answers with the specified\n/// structure ([ToolSpec]).\n///\n/// OpenAI functions allows for structuring of response output. This is often\n/// useful in question answering when you want the answer to be returned with\n/// a specific structure (e.g. the answer and the sources used to answer the\n/// question).\n/// {@endtemplate}\nclass OpenAIQAWithStructureChain<S extends Object>\n    extends LLMChain<ChatOpenAI, ChatOpenAIOptions, BaseChatMemory> {\n  /// {@macro openai_qa_with_structure_chain}\n  OpenAIQAWithStructureChain({\n    required super.llm,\n    required final ToolSpec tool,\n    required BaseOutputParser<ChatResult, OutputParserOptions, S>\n    super.outputParser,\n    final BasePromptTemplate? prompt,\n  }) : super(\n         prompt: prompt ?? _getPrompt(),\n         llmOptions: ChatOpenAIOptions(\n           model: llm.defaultOptions.model,\n           tools: [tool],\n           toolChoice: ChatToolChoice.forced(name: tool.name),\n         ),\n       );\n\n  static BasePromptTemplate _getPrompt() {\n    return ChatPromptTemplate.fromPromptMessages([\n      SystemChatMessagePromptTemplate.fromTemplate(\n        'You are a world class algorithm to answer questions in a specific format.',\n      ),\n      HumanChatMessagePromptTemplate.fromTemplate(\n        'Answer question using the following context',\n      ),\n      HumanChatMessagePromptTemplate.fromTemplate('{context}'),\n      HumanChatMessagePromptTemplate.fromTemplate('Question: {question}'),\n      HumanChatMessagePromptTemplate.fromTemplate(\n        'Tips: Make sure to answer in the correct format',\n      ),\n    ]);\n  }\n}\n"
  },
  {
    "path": "packages/langchain_openai/lib/src/chat_models/chat_models.dart",
    "content": "export 'chat_openai.dart';\nexport 'chat_openai_responses.dart';\nexport 'chat_openai_responses_types.dart';\nexport 'types.dart';\n"
  },
  {
    "path": "packages/langchain_openai/lib/src/chat_models/chat_openai.dart",
    "content": "import 'package:http/http.dart' as http;\nimport 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/language_models.dart';\nimport 'package:langchain_core/prompts.dart';\nimport 'package:langchain_tiktoken/langchain_tiktoken.dart';\nimport 'package:openai_dart/openai_dart.dart';\nimport 'package:uuid/uuid.dart';\n\nimport '../utils/auth.dart';\nimport 'mappers.dart';\nimport 'types.dart';\n\n/// Wrapper around [OpenAI Chat API](https://platform.openai.com/docs/api-reference/chat).\n///\n/// Example:\n/// ```dart\n/// final chatModel = ChatOpenAI(apiKey: '...');\n/// final messages = [\n///   ChatMessage.system('You are a helpful assistant that translates English to French.'),\n///   ChatMessage.humanText('I love programming.'),\n/// ];\n/// final prompt = PromptValue.chat(messages);\n/// final res = await llm.invoke(prompt);\n/// ```\n///\n/// - [Completions guide](https://platform.openai.com/docs/guides/gpt/chat-completions-api)\n/// - [Completions API docs](https://platform.openai.com/docs/api-reference/chat)\n///\n/// You can also use this wrapper to consume OpenAI-compatible APIs like\n/// [TogetherAI](https://www.together.ai/), [Anyscale](https://www.anyscale.com/),\n/// [OpenRouter](https://openrouter.ai), [One API](https://github.com/songquanpeng/one-api),\n/// [Groq](https://groq.com/), [Llamafile](https://llamafile.ai/),\n/// [GPT4All](https://gpt4all.io/), [FastChat](https://github.com/lm-sys/FastChat), etc.\n///\n/// ### Call options\n///\n/// You can configure the parameters that will be used when calling the\n/// chat completions API in several ways:\n///\n/// **Default options:**\n///\n/// Use the [defaultOptions] parameter to set the default options. These\n/// options will be used unless you override them when generating completions.\n///\n/// ```dart\n/// final chatModel = ChatOpenAI(\n///   apiKey: openaiApiKey,\n///   defaultOptions: const ChatOpenAIOptions(\n///     temperature: 0.9,\n///     maxTokens: 100,\n///   ),\n/// );\n/// ```\n///\n/// **Call options:**\n///\n/// You can override the default options when invoking the model:\n///\n/// ```dart\n/// final res = await chatModel.invoke(\n///   prompt,\n///   options: const ChatOpenAIOptions(seed: 9999),\n/// );\n/// ```\n///\n/// **Bind:**\n///\n/// You can also change the options in a [Runnable] pipeline using the bind\n/// method.\n///\n/// In this example, we are using two totally different models for each\n/// question:\n///\n/// ```dart\n/// final chatModel = ChatOpenAI(apiKey: openaiApiKey,);\n/// const outputParser = StringOutputParser();\n/// final prompt1 = PromptTemplate.fromTemplate('How are you {name}?');\n/// final prompt2 = PromptTemplate.fromTemplate('How old are you {name}?');\n/// final chain = Runnable.fromMap({\n///   'q1': prompt1 | chatModel.bind(const ChatOpenAIOptions(model: 'gpt-4')) | outputParser,\n///   'q2': prompt2| chatModel.bind(const ChatOpenAIOptions(model: 'gpt-4o-mini')) | outputParser,\n/// });\n/// final res = await chain.invoke({'name': 'David'});\n/// ```\n///\n/// ### Authentication\n///\n/// The OpenAI API uses API keys for authentication. Visit your\n/// [API Keys](https://platform.openai.com/account/api-keys) page to retrieve\n/// the API key you'll use in your requests.\n///\n/// #### Organization (optional)\n///\n/// For users who belong to multiple organizations, you can specify which\n/// organization is used for an API request. Usage from these API requests will\n/// count against the specified organization's subscription quota.\n///\n/// ```dart\n/// final client = ChatOpenAI(\n///   apiKey: 'OPENAI_API_KEY',\n///   organization: 'org-dtDDtkEGoFccn5xaP5W1p3Rr',\n/// );\n/// ```\n///\n/// ### Advance\n///\n/// #### Azure OpenAI Service\n///\n/// OpenAI's models are also available as an [Azure service](https://learn.microsoft.com/en-us/azure/ai-services/openai/overview).\n///\n/// Although the Azure OpenAI API is similar to the official OpenAI API, there\n/// are subtle differences between them. This client is intended to be used\n/// with the official OpenAI API, but most of the functionality should work\n/// with the Azure OpenAI API as well.\n///\n/// If you want to use this client with the Azure OpenAI API (at your own risk),\n/// you can do so by instantiating the client as follows:\n///\n/// ```dart\n/// final client = ChatOpenAI(\n///   baseUrl: 'https://YOUR_RESOURCE_NAME.openai.azure.com/openai/deployments/YOUR_DEPLOYMENT_NAME',\n///   headers: { 'api-key': 'YOUR_API_KEY' },\n///   queryParams: { 'api-version': 'API_VERSION' },\n/// );\n/// ```\n///\n/// - `YOUR_RESOURCE_NAME`: This value can be found in the Keys & Endpoint\n///    section when examining your resource from the Azure portal.\n/// - `YOUR_DEPLOYMENT_NAME`: This value will correspond to the custom name\n///    you chose for your deployment when you deployed a model. This value can be found under Resource Management > Deployments in the Azure portal.\n/// - `YOUR_API_KEY`: This value can be found in the Keys & Endpoint section\n///    when examining your resource from the Azure portal.\n/// - `API_VERSION`: The Azure OpenAI API version to use (e.g. `2023-05-15`).\n///    Try to use the [latest version available](https://github.com/Azure/azure-rest-api-specs/tree/main/specification/cognitiveservices/data-plane/AzureOpenAI/inference),\n///    it will probably be the closest to the official OpenAI API.\n///\n/// #### Custom HTTP client\n///\n/// You can always provide your own implementation of `http.Client` for further\n/// customization:\n///\n/// ```dart\n/// final client = ChatOpenAI(\n///   apiKey: 'OPENAI_API_KEY',\n///   client: MyHttpClient(),\n/// );\n/// ```\n///\n/// #### Using a proxy\n///\n/// ##### HTTP proxy\n///\n/// You can use your own HTTP proxy by overriding the `baseUrl` and providing\n/// your required `headers`:\n///\n/// ```dart\n/// final client = ChatOpenAI(\n///   baseUrl: 'https://my-proxy.com',\n///   headers: {'x-my-proxy-header': 'value'},\n/// );\n/// ```\n///\n/// If you need further customization, you can always provide your own\n/// `http.Client`.\n///\n/// ##### SOCKS5 proxy\n///\n/// To use a SOCKS5 proxy, you can use the\n/// [`socks5_proxy`](https://pub.dev/packages/socks5_proxy) package and a\n/// custom `http.Client`.\nclass ChatOpenAI extends BaseChatModel<ChatOpenAIOptions> {\n  /// Create a new [ChatOpenAI] instance.\n  ///\n  /// Main configuration options:\n  /// - `apiKey`: your OpenAI API key. You can find your API key in the\n  ///   [OpenAI dashboard](https://platform.openai.com/account/api-keys).\n  /// - `organization`: your OpenAI organization ID (if applicable).\n  /// - [ChatOpenAI.encoding]\n  /// - [ChatOpenAI.defaultOptions]\n  ///\n  /// Advance configuration options:\n  /// - `baseUrl`: the base URL to use. Defaults to OpenAI's API URL. You can\n  ///   override this to use a different API URL, or to use a proxy.\n  /// - `headers`: global headers to send with every request. You can use\n  ///   this to set custom headers, or to override the default headers.\n  /// - `queryParams`: global query parameters to send with every request. You\n  ///   can use this to set custom query parameters (e.g. Azure OpenAI API\n  ///   required to attach a `version` query parameter to every request).\n  /// - `client`: the HTTP client to use. You can set your own HTTP client if\n  ///   you need further customization (e.g. to use a Socks5 proxy).\n  ChatOpenAI({\n    final String? apiKey,\n    final String? organization,\n    final String baseUrl = 'https://api.openai.com/v1',\n    final Map<String, String>? headers,\n    final Map<String, dynamic>? queryParams,\n    final http.Client? client,\n    super.defaultOptions = const ChatOpenAIOptions(model: defaultModel),\n    this.encoding,\n  }) : _authProvider = MutableApiKeyProvider(apiKey ?? '') {\n    _client = OpenAIClient(\n      config: OpenAIConfig(\n        authProvider: _authProvider,\n        organization: organization,\n        baseUrl: buildBaseUrl(baseUrl, queryParams) ?? baseUrl,\n        defaultHeaders: headers ?? const {},\n      ),\n      httpClient: client,\n    );\n  }\n\n  /// The mutable auth provider for API key management.\n  final MutableApiKeyProvider _authProvider;\n\n  /// A client for interacting with OpenAI API.\n  late final OpenAIClient _client;\n\n  /// The encoding to use by tiktoken when [tokenize] is called.\n  ///\n  /// By default, when [encoding] is not set, it is derived from the [model].\n  /// However, there are some cases where you may want to use this wrapper\n  /// class with a [model] not supported by tiktoken (e.g. when using Azure\n  /// embeddings or when using one of the many model providers that expose an\n  /// OpenAI-like API but with different models). In those cases, tiktoken won't\n  /// be able to derive the encoding to use, so you have to explicitly specify\n  /// it using this field.\n  ///\n  /// Supported encodings:\n  /// - `cl100k_base` (used by gpt-4, gpt-3.5-turbo, text-embedding-3-small).\n  ///\n  /// For an exhaustive list check:\n  /// https://github.com/mvitlov/tiktoken/blob/master/lib/tiktoken.dart\n  String? encoding;\n\n  /// A UUID generator.\n  late final _uuid = const Uuid();\n\n  /// Set or replace the API key.\n  set apiKey(final String value) => _authProvider.apiKey = value;\n\n  /// Get the API key.\n  String get apiKey => _authProvider.apiKey;\n\n  @override\n  String get modelType => 'openai-chat';\n\n  /// The default model to use unless another is specified.\n  static const defaultModel = 'gpt-5-mini';\n\n  @override\n  Future<ChatResult> invoke(\n    final PromptValue input, {\n    final ChatOpenAIOptions? options,\n  }) async {\n    final completion = await _client.chat.completions.create(\n      createChatCompletionRequest(\n        input.toChatMessages(),\n        options: options,\n        defaultOptions: defaultOptions,\n      ),\n    );\n    return completion.toChatResult(completion.id ?? _uuid.v4());\n  }\n\n  @override\n  Stream<ChatResult> stream(\n    final PromptValue input, {\n    final ChatOpenAIOptions? options,\n  }) {\n    return _client.chat.completions\n        .createStream(\n          createChatCompletionRequest(\n            input.toChatMessages(),\n            options: options,\n            defaultOptions: defaultOptions,\n            stream: true,\n          ),\n        )\n        .map(\n          (final completion) =>\n              completion.toChatResult(completion.id ?? _uuid.v4()),\n        );\n  }\n\n  /// Tokenizes the given prompt using tiktoken with the encoding used by the\n  /// [model]. If an encoding model is specified in [encoding] field, that\n  /// encoding is used instead.\n  ///\n  /// - [promptValue] The prompt to tokenize.\n  @override\n  Future<List<int>> tokenize(\n    final PromptValue promptValue, {\n    final ChatOpenAIOptions? options,\n  }) async {\n    return _getTiktoken().encode(promptValue.toString());\n  }\n\n  @override\n  Future<int> countTokens(\n    final PromptValue promptValue, {\n    final ChatOpenAIOptions? options,\n  }) async {\n    final model = options?.model ?? defaultOptions.model ?? defaultModel;\n    final tiktoken = _getTiktoken();\n    final messages = promptValue.toChatMessages();\n\n    // Ref: https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb\n    final int tokensPerMessage;\n    final int tokensPerName;\n\n    switch (model) {\n      case 'gpt-3.5-turbo-16k-0613':\n      case 'gpt-4-0314':\n      case 'gpt-4-32k-0314':\n      case 'gpt-4-0613':\n      case 'gpt-4-32k-0613':\n        tokensPerMessage = 3;\n        tokensPerName = 1;\n      case 'gpt-3.5-turbo-0301':\n        // Every message follows <|start|>{role/name}\\n{content}<|end|>\\n\n        tokensPerMessage = 4;\n        // If there's a name, the role is omitted\n        tokensPerName = -1;\n      default:\n        if (model.startsWith('gpt-4o-mini') || model.startsWith('gpt-4')) {\n          // Returning num tokens assuming gpt-4\n          tokensPerMessage = 3;\n          tokensPerName = 1;\n        } else {\n          // For other models we assume gpt-3.5-turbo-0613\n          tokensPerMessage = 3;\n          tokensPerName = 1;\n        }\n    }\n\n    var numTokens = 0;\n    for (final message in messages) {\n      numTokens += tokensPerMessage;\n      numTokens += tiktoken.encode(message.contentAsString).length;\n      numTokens += switch (message) {\n        final SystemChatMessage _ => tiktoken.encode('system').length,\n        final HumanChatMessage _ => tiktoken.encode('user').length,\n        final AIChatMessage msg =>\n          tiktoken.encode('assistant').length +\n              (msg.toolCalls.isNotEmpty\n                  ? tiktoken\n                            .encode(msg.toolCalls.map((c) => c.name).join())\n                            .length +\n                        tiktoken\n                            .encode(\n                              msg.toolCalls.map((c) => c.argumentsRaw).join(),\n                            )\n                            .length\n                  : 0),\n        final ToolChatMessage msg =>\n          tiktoken.encode(msg.toolCallId).length + tokensPerName,\n        final CustomChatMessage msg => tiktoken.encode(msg.role).length,\n      };\n    }\n\n    // every reply is primed with <im_start>assistant\n    return numTokens + 3;\n  }\n\n  /// Returns the tiktoken model to use for the given model.\n  Tiktoken _getTiktoken() {\n    return encoding != null\n        ? getEncoding(encoding!)\n        : getEncoding('cl100k_base');\n  }\n\n  @override\n  void close() {\n    _client.close();\n  }\n\n  /// {@template chat_openai_list_models}\n  /// Returns a list of available chat models from OpenAI.\n  ///\n  /// This method fetches all models from the OpenAI API and filters them\n  /// to only return chat-capable models.\n  ///\n  /// Example:\n  /// ```dart\n  /// final chatModel = ChatOpenAI(apiKey: '...');\n  /// final models = await chatModel.listModels();\n  /// for (final model in models) {\n  ///   print('${model.id} - owned by ${model.ownedBy ?? \"unknown\"}');\n  /// }\n  /// ```\n  /// {@endtemplate}\n  @override\n  Future<List<ModelInfo>> listModels() async {\n    final response = await _client.models.list();\n    return response.data\n        .where(_isChatModel)\n        .map(\n          (final m) =>\n              ModelInfo(id: m.id, ownedBy: m.ownedBy, created: m.created),\n        )\n        .toList();\n  }\n\n  /// Returns true if the model is a chat-capable model.\n  static bool _isChatModel(final Model model) {\n    final id = model.id.toLowerCase();\n\n    // Exclude instruct models (completion API, not chat)\n    if (id.contains('instruct')) return false;\n\n    // Match chat model prefixes\n    // o[1-9] covers o1, o3, and future o4, o5, etc.\n    return id.startsWith('gpt-') ||\n        RegExp(r'^o[1-9]').hasMatch(id) ||\n        id.startsWith('chatgpt-');\n  }\n}\n"
  },
  {
    "path": "packages/langchain_openai/lib/src/chat_models/chat_openai_responses.dart",
    "content": "import 'package:http/http.dart' as http;\nimport 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/language_models.dart';\nimport 'package:langchain_core/prompts.dart';\nimport 'package:openai_dart/openai_dart.dart';\n\nimport '../utils/auth.dart';\nimport 'chat_openai_responses_mappers.dart';\nimport 'chat_openai_responses_types.dart';\n\n/// Wrapper around [OpenAI Responses API](https://platform.openai.com/docs/api-reference/responses).\n///\n/// The Responses API is OpenAI's most advanced interface for generating model\n/// responses. It supports text and image inputs, and text outputs. It also\n/// supports multi-turn conversations via [ChatOpenAIResponsesOptions.previousResponseId].\n///\n/// Example:\n/// ```dart\n/// final chatModel = ChatOpenAIResponses(apiKey: '...');\n/// final messages = [\n///   ChatMessage.system('You are a helpful assistant.'),\n///   ChatMessage.humanText('I love programming.'),\n/// ];\n/// final prompt = PromptValue.chat(messages);\n/// final res = await chatModel.invoke(prompt);\n/// ```\n///\n/// ### Call options\n///\n/// You can configure the parameters that will be used when calling the\n/// Responses API in several ways:\n///\n/// **Default options:**\n///\n/// Use the [defaultOptions] parameter to set the default options. These\n/// options will be used unless you override them when generating completions.\n///\n/// ```dart\n/// final chatModel = ChatOpenAIResponses(\n///   apiKey: openaiApiKey,\n///   defaultOptions: const ChatOpenAIResponsesOptions(\n///     temperature: 0.9,\n///     maxOutputTokens: 100,\n///   ),\n/// );\n/// ```\n///\n/// **Call options:**\n///\n/// You can override the default options when invoking the model:\n///\n/// ```dart\n/// final res = await chatModel.invoke(\n///   prompt,\n///   options: const ChatOpenAIResponsesOptions(\n///     reasoningEffort: ChatOpenAIResponsesReasoningEffort.high,\n///   ),\n/// );\n/// ```\n///\n/// ### Multi-turn conversations\n///\n/// The Responses API supports multi-turn conversations using\n/// [ChatOpenAIResponsesOptions.previousResponseId]:\n///\n/// ```dart\n/// final res1 = await chatModel.invoke(prompt1);\n/// final responseId = res1.id;\n/// final res2 = await chatModel.invoke(\n///   prompt2,\n///   options: ChatOpenAIResponsesOptions(previousResponseId: responseId),\n/// );\n/// ```\n///\n/// ### Authentication\n///\n/// The OpenAI API uses API keys for authentication. Visit your\n/// [API Keys](https://platform.openai.com/account/api-keys) page to retrieve\n/// the API key you'll use in your requests.\n///\n/// #### Organization (optional)\n///\n/// For users who belong to multiple organizations, you can specify which\n/// organization is used for an API request.\n///\n/// ```dart\n/// final client = ChatOpenAIResponses(\n///   apiKey: 'OPENAI_API_KEY',\n///   organization: 'org-dtDDtkEGoFccn5xaP5W1p3Rr',\n/// );\n/// ```\n///\n/// ### Advance\n///\n/// #### Custom HTTP client\n///\n/// You can always provide your own implementation of `http.Client` for further\n/// customization:\n///\n/// ```dart\n/// final client = ChatOpenAIResponses(\n///   apiKey: 'OPENAI_API_KEY',\n///   client: MyHttpClient(),\n/// );\n/// ```\n///\n/// #### Using a proxy\n///\n/// You can use your own HTTP proxy by overriding the `baseUrl` and providing\n/// your required `headers`:\n///\n/// ```dart\n/// final client = ChatOpenAIResponses(\n///   baseUrl: 'https://my-proxy.com',\n///   headers: {'x-my-proxy-header': 'value'},\n/// );\n/// ```\nclass ChatOpenAIResponses extends BaseChatModel<ChatOpenAIResponsesOptions> {\n  /// Create a new [ChatOpenAIResponses] instance.\n  ///\n  /// Main configuration options:\n  /// - `apiKey`: your OpenAI API key. You can find your API key in the\n  ///   [OpenAI dashboard](https://platform.openai.com/account/api-keys).\n  /// - `organization`: your OpenAI organization ID (if applicable).\n  /// - [ChatOpenAIResponses.defaultOptions]\n  ///\n  /// Advance configuration options:\n  /// - `baseUrl`: the base URL to use. Defaults to OpenAI's API URL. You can\n  ///   override this to use a different API URL, or to use a proxy.\n  /// - `headers`: global headers to send with every request. You can use\n  ///   this to set custom headers, or to override the default headers.\n  /// - `queryParams`: global query parameters to send with every request.\n  /// - `client`: the HTTP client to use. You can set your own HTTP client if\n  ///   you need further customization (e.g. to use a Socks5 proxy).\n  ChatOpenAIResponses({\n    final String? apiKey,\n    final String? organization,\n    final String baseUrl = 'https://api.openai.com/v1',\n    final Map<String, String>? headers,\n    final Map<String, dynamic>? queryParams,\n    final http.Client? client,\n    super.defaultOptions = const ChatOpenAIResponsesOptions(\n      model: defaultModel,\n    ),\n  }) : _authProvider = MutableApiKeyProvider(apiKey ?? '') {\n    _client = OpenAIClient(\n      config: OpenAIConfig(\n        authProvider: _authProvider,\n        organization: organization,\n        baseUrl: buildBaseUrl(baseUrl, queryParams) ?? baseUrl,\n        defaultHeaders: headers ?? const {},\n      ),\n      httpClient: client,\n    );\n  }\n\n  /// The mutable auth provider for API key management.\n  final MutableApiKeyProvider _authProvider;\n\n  /// A client for interacting with OpenAI API.\n  late final OpenAIClient _client;\n\n  /// Set or replace the API key.\n  set apiKey(final String value) => _authProvider.apiKey = value;\n\n  /// Get the API key.\n  String get apiKey => _authProvider.apiKey;\n\n  @override\n  String get modelType => 'openai-responses';\n\n  /// The default model to use unless another is specified.\n  static const defaultModel = 'gpt-5-mini';\n\n  @override\n  Future<ChatResult> invoke(\n    final PromptValue input, {\n    final ChatOpenAIResponsesOptions? options,\n  }) async {\n    final response = await _client.responses.create(\n      createResponseRequest(\n        input.toChatMessages(),\n        options: options,\n        defaultOptions: defaultOptions,\n      ),\n    );\n    return response.toChatResult();\n  }\n\n  @override\n  Stream<ChatResult> stream(\n    final PromptValue input, {\n    final ChatOpenAIResponsesOptions? options,\n  }) {\n    return _client.responses\n        .createStreamWithAccumulator(\n          createResponseRequest(\n            input.toChatMessages(),\n            options: options,\n            defaultOptions: defaultOptions,\n          ),\n        )\n        .expand((final accumulator) {\n          final result = accumulator.toChatResult();\n          return result != null ? [result] : const <ChatResult>[];\n        });\n  }\n\n  /// Lists the available models that can be used with [ChatOpenAIResponses].\n  ///\n  /// This method fetches all models from the OpenAI API and filters them\n  /// to only return chat-capable models.\n  ///\n  /// Example:\n  /// ```dart\n  /// final chatModel = ChatOpenAIResponses(apiKey: '...');\n  /// final models = await chatModel.listModels();\n  /// for (final model in models) {\n  ///   print('${model.id} - owned by ${model.ownedBy ?? \"unknown\"}');\n  /// }\n  /// ```\n  @override\n  Future<List<ModelInfo>> listModels() async {\n    final response = await _client.models.list();\n    return response.data\n        .where(_isChatModel)\n        .map(\n          (final m) =>\n              ModelInfo(id: m.id, ownedBy: m.ownedBy, created: m.created),\n        )\n        .toList();\n  }\n\n  /// Returns true if the model is a chat-capable model.\n  static bool _isChatModel(final Model model) {\n    final id = model.id.toLowerCase();\n\n    // Exclude instruct models (completion API, not chat)\n    if (id.contains('instruct')) return false;\n\n    // Match chat model prefixes\n    // o[1-9] covers o1, o3, and future o4, o5, etc.\n    return id.startsWith('gpt-') ||\n        RegExp(r'^o[1-9]').hasMatch(id) ||\n        id.startsWith('chatgpt-');\n  }\n\n  @override\n  Future<List<int>> tokenize(\n    final PromptValue promptValue, {\n    final ChatOpenAIResponsesOptions? options,\n  }) {\n    throw UnsupportedError(\n      'ChatOpenAIResponses does not support tokenization.',\n    );\n  }\n\n  @override\n  void close() {\n    _client.close();\n  }\n}\n"
  },
  {
    "path": "packages/langchain_openai/lib/src/chat_models/chat_openai_responses_mappers.dart",
    "content": "// ignore_for_file: public_member_api_docs\nimport 'dart:convert';\n\nimport 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/language_models.dart';\nimport 'package:langchain_core/tools.dart';\nimport 'package:openai_dart/openai_dart.dart' as oai;\n\nimport 'chat_openai_responses.dart';\nimport 'chat_openai_responses_types.dart';\n\n/// Creates a [oai.CreateResponseRequest] from the given input.\noai.CreateResponseRequest createResponseRequest(\n  final List<ChatMessage> messages, {\n  required final ChatOpenAIResponsesOptions? options,\n  required final ChatOpenAIResponsesOptions defaultOptions,\n}) {\n  final input = messages.toResponseInput();\n  final toolsDtos = (options?.tools ?? defaultOptions.tools)?.toResponseTools();\n  final toolChoice = (options?.toolChoice ?? defaultOptions.toolChoice)\n      ?.toResponseToolChoice();\n  final textConfig = (options?.responseFormat ?? defaultOptions.responseFormat)\n      ?.toTextConfig();\n  final reasoningConfig =\n      (options?.reasoningEffort ?? defaultOptions.reasoningEffort)\n          ?.toReasoningConfig();\n  final serviceTier = (options?.serviceTier ?? defaultOptions.serviceTier)\n      ?.toServiceTier();\n  final truncation = (options?.truncation ?? defaultOptions.truncation)\n      ?.toTruncation();\n\n  return oai.CreateResponseRequest(\n    model:\n        options?.model ??\n        defaultOptions.model ??\n        ChatOpenAIResponses.defaultModel,\n    input: input,\n    instructions: options?.instructions ?? defaultOptions.instructions,\n    previousResponseId:\n        options?.previousResponseId ?? defaultOptions.previousResponseId,\n    store: options?.store ?? defaultOptions.store,\n    metadata: options?.metadata ?? defaultOptions.metadata,\n    tools: toolsDtos,\n    toolChoice: toolChoice,\n    frequencyPenalty:\n        options?.frequencyPenalty ?? defaultOptions.frequencyPenalty,\n    topLogprobs: options?.topLogprobs ?? defaultOptions.topLogprobs,\n    maxOutputTokens: options?.maxOutputTokens ?? defaultOptions.maxOutputTokens,\n    presencePenalty: options?.presencePenalty ?? defaultOptions.presencePenalty,\n    text: textConfig,\n    reasoning: reasoningConfig,\n    temperature: options?.temperature ?? defaultOptions.temperature,\n    topP: options?.topP ?? defaultOptions.topP,\n    parallelToolCalls:\n        options?.parallelToolCalls ?? defaultOptions.parallelToolCalls,\n    serviceTier: serviceTier,\n    truncation: truncation,\n  );\n}\n\nextension ChatMessageListResponseMapper on List<ChatMessage> {\n  oai.ResponseInput toResponseInput() {\n    final items = expand(_mapMessage).toList(growable: false);\n    return oai.ResponseInput.items(items);\n  }\n\n  Iterable<oai.Item> _mapMessage(final ChatMessage msg) {\n    return switch (msg) {\n      final SystemChatMessage msg => [_mapSystemMessage(msg)],\n      final HumanChatMessage msg => [_mapHumanMessage(msg)],\n      final AIChatMessage msg => _mapAIMessage(msg),\n      final ToolChatMessage msg => [_mapToolMessage(msg)],\n      CustomChatMessage() => throw UnsupportedError(\n        'OpenAI Responses API does not support custom messages',\n      ),\n    };\n  }\n\n  oai.Item _mapSystemMessage(final SystemChatMessage msg) {\n    return oai.MessageItem.systemText(msg.content);\n  }\n\n  oai.Item _mapHumanMessage(final HumanChatMessage msg) {\n    return switch (msg.content) {\n      final ChatMessageContentText c => oai.MessageItem.userText(c.text),\n      final ChatMessageContentImage c => oai.MessageItem.user([\n        _mapContentImage(c),\n      ]),\n      final ChatMessageContentMultiModal c => oai.MessageItem.user(\n        _mapContentParts(c),\n      ),\n    };\n  }\n\n  oai.InputContent _mapContentImage(final ChatMessageContentImage c) {\n    final imageData = c.data.trim();\n    final isUrl = imageData.startsWith('http');\n    if (isUrl) {\n      return oai.InputContent.imageUrl(imageData);\n    } else {\n      if (c.mimeType == null) {\n        throw ArgumentError(\n          \"When passing a Base64 encoded image, you need to specify the mimeType (e.g. 'image/png')\",\n          'ChatMessageContentImage.mimeType',\n        );\n      }\n      return oai.InputContent.imageUrl('data:${c.mimeType};base64,$imageData');\n    }\n  }\n\n  List<oai.InputContent> _mapContentParts(\n    final ChatMessageContentMultiModal c,\n  ) {\n    return c.parts\n        .expand(\n          (final part) => switch (part) {\n            final ChatMessageContentText c => [oai.InputContent.text(c.text)],\n            final ChatMessageContentImage img => [_mapContentImage(img)],\n            final ChatMessageContentMultiModal c => _mapContentParts(c),\n          },\n        )\n        .toList(growable: false);\n  }\n\n  // The Responses API input format expects assistant text and function calls\n  // as separate items (unlike Chat Completions which groups them in one message).\n  Iterable<oai.Item> _mapAIMessage(final AIChatMessage msg) {\n    final items = <oai.Item>[];\n    if (msg.content.isNotEmpty) {\n      items.add(oai.MessageItem.assistantText(msg.content));\n    }\n    for (final toolCall in msg.toolCalls) {\n      items.add(\n        oai.FunctionCallItem(\n          callId: toolCall.id,\n          name: toolCall.name,\n          arguments: toolCall.argumentsRaw,\n        ),\n      );\n    }\n    return items;\n  }\n\n  oai.Item _mapToolMessage(final ToolChatMessage msg) {\n    return oai.FunctionCallOutputItem.string(\n      callId: msg.toolCallId,\n      output: msg.content,\n    );\n  }\n}\n\nextension ResponseMapper on oai.Response {\n  ChatResult toChatResult() {\n    final toolCalls = functionCalls\n        .map((fc) {\n          var args = <String, dynamic>{};\n          try {\n            args = fc.arguments.isEmpty ? {} : json.decode(fc.arguments);\n          } catch (_) {}\n          return AIChatMessageToolCall(\n            id: fc.callId,\n            name: fc.name,\n            argumentsRaw: fc.arguments,\n            arguments: args,\n          );\n        })\n        .toList(growable: false);\n\n    return ChatResult(\n      id: id,\n      output: AIChatMessage(content: outputText, toolCalls: toolCalls),\n      finishReason: _mapFinishReason(status),\n      metadata: {'model': model, 'created_at': createdAt},\n      usage: _mapResponseUsage(usage),\n    );\n  }\n}\n\nextension ResponseStreamAccumulatorMapper on oai.ResponseStreamAccumulator {\n  /// Maps the latest streaming event to a [ChatResult], or returns `null`\n  /// for events that carry no meaningful content (e.g. `response.created`).\n  ChatResult? toChatResult() {\n    final event = latestEvent;\n\n    final String content;\n    final List<AIChatMessageToolCall> toolCalls;\n\n    switch (event) {\n      case oai.OutputTextDeltaEvent(:final delta):\n        content = delta;\n        toolCalls = const [];\n      case oai.OutputItemAddedEvent(:final item)\n          when item is oai.FunctionCallOutputItemResponse:\n        content = '';\n        toolCalls = [\n          AIChatMessageToolCall(\n            id: item.callId,\n            name: item.name,\n            argumentsRaw: '',\n            arguments: const {},\n          ),\n        ];\n      case oai.FunctionCallArgumentsDeltaEvent(:final delta):\n        content = '';\n        // Use empty id so AIChatMessage.concat falls back to the last\n        // tool call's id (set by OutputItemAddedEvent with callId).\n        toolCalls = [\n          AIChatMessageToolCall(\n            id: '',\n            name: '',\n            argumentsRaw: delta,\n            arguments: const {},\n          ),\n        ];\n      case oai.RefusalDeltaEvent(:final delta):\n        content = '';\n        toolCalls = const [];\n        return ChatResult(\n          id: responseId ?? '',\n          output: AIChatMessage(content: content, toolCalls: toolCalls),\n          finishReason: FinishReason.unspecified,\n          metadata: {'refusal': delta},\n          usage: _mapResponseUsage(usage),\n          streaming: true,\n        );\n      case oai.ResponseCompletedEvent(:final response):\n        final result = response.toChatResult();\n        return ChatResult(\n          id: result.id,\n          output: result.output,\n          finishReason: result.finishReason,\n          metadata: result.metadata,\n          usage: result.usage,\n          streaming: true,\n        );\n      default:\n        // Skip events with no content (response.created, response.in_progress, etc.)\n        return null;\n    }\n\n    return ChatResult(\n      id: responseId ?? '',\n      output: AIChatMessage(content: content, toolCalls: toolCalls),\n      finishReason: _mapStreamFinishReason(status),\n      metadata: const {},\n      usage: _mapResponseUsage(usage),\n      streaming: true,\n    );\n  }\n}\n\nextension ResponseToolListMapper on List<ToolSpec> {\n  List<oai.ResponseTool> toResponseTools() {\n    return map(_mapResponseTool).toList(growable: false);\n  }\n\n  oai.ResponseTool _mapResponseTool(final ToolSpec tool) {\n    return oai.ResponseTool.function(\n      name: tool.name,\n      description: tool.description,\n      parameters: tool.inputJsonSchema,\n    );\n  }\n}\n\nextension ResponseToolChoiceMapper on ChatToolChoice {\n  oai.ResponseToolChoice toResponseToolChoice() {\n    return switch (this) {\n      ChatToolChoiceNone _ => oai.ResponseToolChoice.none,\n      ChatToolChoiceAuto _ => oai.ResponseToolChoice.auto,\n      ChatToolChoiceRequired() => oai.ResponseToolChoice.required,\n      final ChatToolChoiceForced t => oai.ResponseToolChoice.function(\n        name: t.name,\n      ),\n    };\n  }\n}\n\nextension ChatOpenAIResponsesResponseFormatMapper\n    on ChatOpenAIResponsesResponseFormat {\n  oai.TextConfig toTextConfig() {\n    return switch (this) {\n      ChatOpenAIResponsesResponseFormatText() => const oai.TextConfig(\n        format: oai.PlainTextFormat(),\n      ),\n      ChatOpenAIResponsesResponseFormatJsonObject() => const oai.TextConfig(\n        format: oai.JsonObjectFormat(),\n      ),\n      final ChatOpenAIResponsesResponseFormatJsonSchema res => oai.TextConfig(\n        format: oai.JsonSchemaFormat(\n          name: res.name,\n          description: res.description,\n          schema: res.schema,\n          strict: res.strict,\n        ),\n      ),\n    };\n  }\n}\n\nextension ChatOpenAIResponsesReasoningEffortMapper\n    on ChatOpenAIResponsesReasoningEffort {\n  oai.ReasoningConfig toReasoningConfig() => switch (this) {\n    ChatOpenAIResponsesReasoningEffort.low => const oai.ReasoningConfig(\n      effort: oai.ReasoningEffort.low,\n    ),\n    ChatOpenAIResponsesReasoningEffort.medium => const oai.ReasoningConfig(\n      effort: oai.ReasoningEffort.medium,\n    ),\n    ChatOpenAIResponsesReasoningEffort.high => const oai.ReasoningConfig(\n      effort: oai.ReasoningEffort.high,\n    ),\n  };\n}\n\nextension ChatOpenAIResponsesServiceTierMapper\n    on ChatOpenAIResponsesServiceTier {\n  oai.ServiceTier toServiceTier() => switch (this) {\n    ChatOpenAIResponsesServiceTier.auto => oai.ServiceTier.auto,\n    ChatOpenAIResponsesServiceTier.vDefault => oai.ServiceTier.defaultTier,\n  };\n}\n\nextension ChatOpenAIResponsesTruncationMapper on ChatOpenAIResponsesTruncation {\n  oai.Truncation toTruncation() => switch (this) {\n    ChatOpenAIResponsesTruncation.auto => oai.Truncation.auto,\n    ChatOpenAIResponsesTruncation.disabled => oai.Truncation.disabled,\n  };\n}\n\nFinishReason _mapFinishReason(final oai.ResponseStatus status) =>\n    switch (status) {\n      oai.ResponseStatus.completed => FinishReason.stop,\n      oai.ResponseStatus.failed => FinishReason.unspecified,\n      oai.ResponseStatus.incomplete => FinishReason.length,\n      oai.ResponseStatus.inProgress => FinishReason.unspecified,\n      oai.ResponseStatus.queued => FinishReason.unspecified,\n      oai.ResponseStatus.cancelled => FinishReason.unspecified,\n      _ => FinishReason.unspecified,\n    };\n\nFinishReason _mapStreamFinishReason(final oai.ResponseStatus status) =>\n    switch (status) {\n      oai.ResponseStatus.completed => FinishReason.stop,\n      _ => FinishReason.unspecified,\n    };\n\nLanguageModelUsage _mapResponseUsage(final oai.ResponseUsage? usage) {\n  if (usage == null) return const LanguageModelUsage();\n  return LanguageModelUsage(\n    promptTokens: usage.inputTokens,\n    responseTokens: usage.outputTokens,\n    totalTokens: usage.totalTokens,\n  );\n}\n"
  },
  {
    "path": "packages/langchain_openai/lib/src/chat_models/chat_openai_responses_types.dart",
    "content": "import 'package:collection/collection.dart';\nimport 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/tools.dart';\nimport 'package:meta/meta.dart';\n\n/// {@template chat_openai_responses_options}\n/// Options to pass into the OpenAI Responses API Chat Model.\n///\n/// Available [ChatOpenAIResponsesOptions.model]s:\n/// - `gpt-5`\n/// - `gpt-5-mini`\n/// - `gpt-5-nano`\n/// - `gpt-4.1`\n/// - `gpt-4.1-mini`\n/// - `gpt-4.1-nano`\n/// - `o4-mini`\n/// - `o3`\n/// - `o3-mini`\n/// - `o1`\n/// - `gpt-4o`\n/// - `gpt-4o-mini`\n///\n/// Mind that the list may be outdated.\n/// See https://platform.openai.com/docs/models for the latest list.\n/// {@endtemplate}\n@immutable\nclass ChatOpenAIResponsesOptions extends ChatModelOptions {\n  /// {@macro chat_openai_responses_options}\n  const ChatOpenAIResponsesOptions({\n    super.model,\n    this.instructions,\n    this.previousResponseId,\n    this.store,\n    this.reasoningEffort,\n    this.metadata,\n    this.frequencyPenalty,\n    this.topLogprobs,\n    this.maxOutputTokens,\n    this.presencePenalty,\n    this.responseFormat,\n    this.temperature,\n    this.topP,\n    super.tools,\n    super.toolChoice,\n    this.parallelToolCalls,\n    this.serviceTier,\n    this.truncation,\n    super.concurrencyLimit,\n  });\n\n  /// A system (or developer) message inserted at the beginning of the\n  /// model's context as an alternative to adding a [SystemChatMessage].\n  ///\n  /// See https://platform.openai.com/docs/api-reference/responses/create#responses-create-instructions\n  final String? instructions;\n\n  /// The unique ID of the previous response to the model. Use this to\n  /// create multi-turn conversations. The model will use the previous\n  /// response to inform its response.\n  ///\n  /// See https://platform.openai.com/docs/api-reference/responses/create#responses-create-previous_response_id\n  final String? previousResponseId;\n\n  /// Whether or not to store the output of this chat completion request.\n  ///\n  /// See https://platform.openai.com/docs/api-reference/responses/create#responses-create-store\n  final bool? store;\n\n  /// Constrains effort on reasoning for reasoning models.\n  /// Supported values are `low`, `medium`, and `high`.\n  ///\n  /// See https://platform.openai.com/docs/api-reference/responses/create#responses-create-reasoning\n  final ChatOpenAIResponsesReasoningEffort? reasoningEffort;\n\n  /// Developer-defined tags and values used for filtering completions.\n  ///\n  /// See https://platform.openai.com/docs/api-reference/responses/create#responses-create-metadata\n  final Map<String, String>? metadata;\n\n  /// Number between -2.0 and 2.0. Positive values penalize new tokens based on\n  /// their existing frequency in the text so far, decreasing the model's\n  /// likelihood to repeat the same line verbatim.\n  ///\n  /// See https://platform.openai.com/docs/api-reference/responses/create#responses-create-frequency_penalty\n  final double? frequencyPenalty;\n\n  /// Number of most likely tokens to return at each token position.\n  ///\n  /// See https://platform.openai.com/docs/api-reference/responses/create#responses-create-top_logprobs\n  final int? topLogprobs;\n\n  /// The maximum number of output tokens to generate in the response.\n  ///\n  /// See https://platform.openai.com/docs/api-reference/responses/create#responses-create-max_output_tokens\n  final int? maxOutputTokens;\n\n  /// Number between -2.0 and 2.0. Positive values penalize new tokens based on\n  /// whether they appear in the text so far, increasing the model's likelihood\n  /// to talk about new topics.\n  ///\n  /// See https://platform.openai.com/docs/api-reference/responses/create#responses-create-presence_penalty\n  final double? presencePenalty;\n\n  /// An object specifying the format that the model must output.\n  ///\n  /// See https://platform.openai.com/docs/api-reference/responses/create#responses-create-text\n  final ChatOpenAIResponsesResponseFormat? responseFormat;\n\n  /// What sampling temperature to use, between 0 and 2.\n  ///\n  /// See https://platform.openai.com/docs/api-reference/responses/create#responses-create-temperature\n  final double? temperature;\n\n  /// An alternative to sampling with temperature, called nucleus sampling,\n  /// where the model considers the results of the tokens with top_p\n  /// probability mass.\n  ///\n  /// See https://platform.openai.com/docs/api-reference/responses/create#responses-create-top_p\n  final double? topP;\n\n  /// Whether to enable parallel tool calling during tool use.\n  ///\n  /// See https://platform.openai.com/docs/api-reference/responses/create#responses-create-parallel_tool_calls\n  final bool? parallelToolCalls;\n\n  /// Specifies the latency tier to use for processing the request.\n  final ChatOpenAIResponsesServiceTier? serviceTier;\n\n  /// The truncation strategy to use for the model.\n  ///\n  /// See https://platform.openai.com/docs/api-reference/responses/create#responses-create-truncation\n  final ChatOpenAIResponsesTruncation? truncation;\n\n  @override\n  ChatOpenAIResponsesOptions copyWith({\n    final String? model,\n    final String? instructions,\n    final String? previousResponseId,\n    final bool? store,\n    final ChatOpenAIResponsesReasoningEffort? reasoningEffort,\n    final Map<String, String>? metadata,\n    final double? frequencyPenalty,\n    final int? topLogprobs,\n    final int? maxOutputTokens,\n    final double? presencePenalty,\n    final ChatOpenAIResponsesResponseFormat? responseFormat,\n    final double? temperature,\n    final double? topP,\n    final List<ToolSpec>? tools,\n    final ChatToolChoice? toolChoice,\n    final bool? parallelToolCalls,\n    final ChatOpenAIResponsesServiceTier? serviceTier,\n    final ChatOpenAIResponsesTruncation? truncation,\n    final int? concurrencyLimit,\n  }) {\n    return ChatOpenAIResponsesOptions(\n      model: model ?? this.model,\n      instructions: instructions ?? this.instructions,\n      previousResponseId: previousResponseId ?? this.previousResponseId,\n      store: store ?? this.store,\n      reasoningEffort: reasoningEffort ?? this.reasoningEffort,\n      metadata: metadata ?? this.metadata,\n      frequencyPenalty: frequencyPenalty ?? this.frequencyPenalty,\n      topLogprobs: topLogprobs ?? this.topLogprobs,\n      maxOutputTokens: maxOutputTokens ?? this.maxOutputTokens,\n      presencePenalty: presencePenalty ?? this.presencePenalty,\n      responseFormat: responseFormat ?? this.responseFormat,\n      temperature: temperature ?? this.temperature,\n      topP: topP ?? this.topP,\n      tools: tools ?? this.tools,\n      toolChoice: toolChoice ?? this.toolChoice,\n      parallelToolCalls: parallelToolCalls ?? this.parallelToolCalls,\n      serviceTier: serviceTier ?? this.serviceTier,\n      truncation: truncation ?? this.truncation,\n      concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit,\n    );\n  }\n\n  @override\n  ChatOpenAIResponsesOptions merge(\n    covariant final ChatOpenAIResponsesOptions? other,\n  ) {\n    return copyWith(\n      model: other?.model,\n      instructions: other?.instructions,\n      previousResponseId: other?.previousResponseId,\n      store: other?.store,\n      reasoningEffort: other?.reasoningEffort,\n      metadata: other?.metadata,\n      frequencyPenalty: other?.frequencyPenalty,\n      topLogprobs: other?.topLogprobs,\n      maxOutputTokens: other?.maxOutputTokens,\n      presencePenalty: other?.presencePenalty,\n      responseFormat: other?.responseFormat,\n      temperature: other?.temperature,\n      topP: other?.topP,\n      tools: other?.tools,\n      toolChoice: other?.toolChoice,\n      parallelToolCalls: other?.parallelToolCalls,\n      serviceTier: other?.serviceTier,\n      truncation: other?.truncation,\n      concurrencyLimit: other?.concurrencyLimit,\n    );\n  }\n\n  @override\n  bool operator ==(covariant final ChatOpenAIResponsesOptions other) {\n    return identical(this, other) ||\n        runtimeType == other.runtimeType &&\n            model == other.model &&\n            instructions == other.instructions &&\n            previousResponseId == other.previousResponseId &&\n            store == other.store &&\n            reasoningEffort == other.reasoningEffort &&\n            const MapEquality<String, String>().equals(\n              metadata,\n              other.metadata,\n            ) &&\n            frequencyPenalty == other.frequencyPenalty &&\n            topLogprobs == other.topLogprobs &&\n            maxOutputTokens == other.maxOutputTokens &&\n            presencePenalty == other.presencePenalty &&\n            responseFormat == other.responseFormat &&\n            temperature == other.temperature &&\n            topP == other.topP &&\n            const ListEquality<ToolSpec>().equals(tools, other.tools) &&\n            toolChoice == other.toolChoice &&\n            parallelToolCalls == other.parallelToolCalls &&\n            serviceTier == other.serviceTier &&\n            truncation == other.truncation &&\n            concurrencyLimit == other.concurrencyLimit;\n  }\n\n  @override\n  int get hashCode {\n    return model.hashCode ^\n        instructions.hashCode ^\n        previousResponseId.hashCode ^\n        store.hashCode ^\n        reasoningEffort.hashCode ^\n        const MapEquality<String, String>().hash(metadata) ^\n        frequencyPenalty.hashCode ^\n        topLogprobs.hashCode ^\n        maxOutputTokens.hashCode ^\n        presencePenalty.hashCode ^\n        responseFormat.hashCode ^\n        temperature.hashCode ^\n        topP.hashCode ^\n        const ListEquality<ToolSpec>().hash(tools) ^\n        toolChoice.hashCode ^\n        parallelToolCalls.hashCode ^\n        serviceTier.hashCode ^\n        truncation.hashCode ^\n        concurrencyLimit.hashCode;\n  }\n}\n\n/// {@template chat_openai_responses_response_format}\n/// An object specifying the format that the model must output.\n/// {@endtemplate}\nsealed class ChatOpenAIResponsesResponseFormat {\n  const ChatOpenAIResponsesResponseFormat();\n\n  /// The model will respond with text.\n  static const text = ChatOpenAIResponsesResponseFormatText();\n\n  /// The model will respond with a valid JSON object.\n  static const jsonObject = ChatOpenAIResponsesResponseFormatJsonObject();\n\n  /// The model will respond with a valid JSON object that adheres to the\n  /// specified schema.\n  factory ChatOpenAIResponsesResponseFormat.jsonSchema({\n    required final String name,\n    final String? description,\n    required final Map<String, dynamic> schema,\n    final bool? strict,\n  }) => ChatOpenAIResponsesResponseFormatJsonSchema(\n    name: name,\n    description: description,\n    schema: schema,\n    strict: strict,\n  );\n}\n\n/// {@template chat_openai_responses_response_format_text}\n/// The model will respond with text.\n/// {@endtemplate}\nclass ChatOpenAIResponsesResponseFormatText\n    extends ChatOpenAIResponsesResponseFormat {\n  /// {@macro chat_openai_responses_response_format_text}\n  const ChatOpenAIResponsesResponseFormatText();\n}\n\n/// {@template chat_openai_responses_response_format_json_object}\n/// The model will respond with a valid JSON object.\n/// {@endtemplate}\nclass ChatOpenAIResponsesResponseFormatJsonObject\n    extends ChatOpenAIResponsesResponseFormat {\n  /// {@macro chat_openai_responses_response_format_json_object}\n  const ChatOpenAIResponsesResponseFormatJsonObject();\n}\n\n/// {@template chat_openai_responses_response_format_json_schema}\n/// The model will respond with a valid JSON object that adheres to the\n/// specified schema.\n/// {@endtemplate}\n@immutable\nclass ChatOpenAIResponsesResponseFormatJsonSchema\n    extends ChatOpenAIResponsesResponseFormat {\n  /// {@macro chat_openai_responses_response_format_json_schema}\n  const ChatOpenAIResponsesResponseFormatJsonSchema({\n    required this.name,\n    this.description,\n    required this.schema,\n    this.strict,\n  });\n\n  /// The name of the response format.\n  final String name;\n\n  /// A description of what the response format is for.\n  final String? description;\n\n  /// The schema for the response format, described as a JSON Schema object.\n  final Map<String, dynamic> schema;\n\n  /// Whether to enable strict schema adherence.\n  final bool? strict;\n\n  @override\n  bool operator ==(\n    covariant ChatOpenAIResponsesResponseFormatJsonSchema other,\n  ) {\n    return identical(this, other) ||\n        runtimeType == other.runtimeType &&\n            name == other.name &&\n            description == other.description &&\n            const MapEquality<String, dynamic>().equals(schema, other.schema) &&\n            strict == other.strict;\n  }\n\n  @override\n  int get hashCode =>\n      name.hashCode ^\n      description.hashCode ^\n      const MapEquality<String, dynamic>().hash(schema) ^\n      strict.hashCode;\n}\n\n/// Constrains effort on reasoning for reasoning models.\nenum ChatOpenAIResponsesReasoningEffort {\n  /// Low effort\n  low,\n\n  /// Medium effort\n  medium,\n\n  /// High effort\n  high,\n}\n\n/// Specifies the latency tier to use for processing the request.\nenum ChatOpenAIResponsesServiceTier {\n  /// The system will utilize scale tier credits until they are exhausted.\n  auto,\n\n  /// The request will be processed using the default service tier.\n  vDefault,\n}\n\n/// The truncation strategy to use for the model.\nenum ChatOpenAIResponsesTruncation {\n  /// Automatically truncate input.\n  auto,\n\n  /// Do not truncate input (may fail if input exceeds context).\n  disabled,\n}\n"
  },
  {
    "path": "packages/langchain_openai/lib/src/chat_models/mappers.dart",
    "content": "// ignore_for_file: public_member_api_docs\nimport 'dart:convert';\n\nimport 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/language_models.dart';\nimport 'package:langchain_core/tools.dart';\nimport 'package:openai_dart/openai_dart.dart' as oai;\n\nimport 'chat_openai.dart';\nimport 'types.dart';\n\n/// Creates a [oai.ChatCompletionCreateRequest] from the given input.\noai.ChatCompletionCreateRequest createChatCompletionRequest(\n  final List<ChatMessage> messages, {\n  required final ChatOpenAIOptions? options,\n  required final ChatOpenAIOptions defaultOptions,\n  final bool stream = false,\n}) {\n  final messagesDtos = messages.toChatCompletionMessages();\n  final toolsDtos = (options?.tools ?? defaultOptions.tools)\n      ?.toChatCompletionTool();\n  final toolChoice = (options?.toolChoice ?? defaultOptions.toolChoice)\n      ?.toChatCompletionToolChoice();\n  final responseFormatDto =\n      (options?.responseFormat ?? defaultOptions.responseFormat)\n          ?.toChatCompletionResponseFormat();\n  final serviceTierDto = (options?.serviceTier ?? defaultOptions.serviceTier)\n      .toServiceTierString();\n\n  return oai.ChatCompletionCreateRequest(\n    model: options?.model ?? defaultOptions.model ?? ChatOpenAI.defaultModel,\n    messages: messagesDtos,\n    store: options?.store ?? defaultOptions.store,\n    reasoningEffort:\n        (options?.reasoningEffort ?? defaultOptions.reasoningEffort)\n            .toReasoningEffort(),\n    metadata: options?.metadata ?? defaultOptions.metadata,\n    tools: toolsDtos,\n    toolChoice: toolChoice,\n    frequencyPenalty:\n        options?.frequencyPenalty ?? defaultOptions.frequencyPenalty,\n    logitBias: options?.logitBias ?? defaultOptions.logitBias,\n    logprobs: options?.logprobs ?? defaultOptions.logprobs,\n    topLogprobs: options?.topLogprobs ?? defaultOptions.topLogprobs,\n    maxCompletionTokens: options?.maxTokens ?? defaultOptions.maxTokens,\n    n: options?.n ?? defaultOptions.n,\n    presencePenalty: options?.presencePenalty ?? defaultOptions.presencePenalty,\n    responseFormat: responseFormatDto,\n    seed: options?.seed ?? defaultOptions.seed,\n    stop: options?.stop ?? defaultOptions.stop,\n    temperature: options?.temperature ?? defaultOptions.temperature,\n    topP: options?.topP ?? defaultOptions.topP,\n    parallelToolCalls:\n        options?.parallelToolCalls ?? defaultOptions.parallelToolCalls,\n    serviceTier: serviceTierDto,\n    user: options?.user ?? defaultOptions.user,\n    streamOptions: stream ? const oai.StreamOptions(includeUsage: true) : null,\n  );\n}\n\nextension ChatMessageListMapper on List<ChatMessage> {\n  List<oai.ChatMessage> toChatCompletionMessages() {\n    return map(_mapMessage).toList(growable: false);\n  }\n\n  oai.ChatMessage _mapMessage(final ChatMessage msg) {\n    return switch (msg) {\n      final SystemChatMessage msg => _mapSystemMessage(msg),\n      final HumanChatMessage msg => _mapHumanMessage(msg),\n      final AIChatMessage msg => _mapAIMessage(msg),\n      final ToolChatMessage msg => _mapToolMessage(msg),\n      CustomChatMessage() => throw UnsupportedError(\n        'OpenAI does not support custom messages',\n      ),\n    };\n  }\n\n  oai.ChatMessage _mapSystemMessage(final SystemChatMessage systemChatMessage) {\n    return oai.ChatMessage.system(systemChatMessage.content);\n  }\n\n  oai.ChatMessage _mapHumanMessage(final HumanChatMessage humanChatMessage) {\n    return switch (humanChatMessage.content) {\n      final ChatMessageContentText c => oai.ChatMessage.user(c.text),\n      final ChatMessageContentImage c => oai.ChatMessage.user([\n        _mapMessageContentPartImage(c),\n      ]),\n      final ChatMessageContentMultiModal c => oai.ChatMessage.user(\n        _mapMessageContentParts(c),\n      ),\n    };\n  }\n\n  oai.ContentPart _mapMessageContentPartImage(final ChatMessageContentImage c) {\n    final imageData = c.data.trim();\n    final isUrl = imageData.startsWith('http');\n    if (isUrl) {\n      return oai.ContentPart.imageUrl(\n        imageData,\n        detail: _mapImageDetail(c.detail),\n      );\n    } else {\n      if (c.mimeType == null) {\n        throw ArgumentError(\n          \"When passing a Base64 encoded image, you need to specify the mimeType (e.g. 'image/png')\",\n          'ChatMessageContentImage.mimeType',\n        );\n      }\n      return oai.ContentPart.imageBase64(\n        data: imageData,\n        mediaType: c.mimeType!,\n        detail: _mapImageDetail(c.detail),\n      );\n    }\n  }\n\n  oai.ImageDetail? _mapImageDetail(final ChatMessageContentImageDetail detail) {\n    return switch (detail) {\n      ChatMessageContentImageDetail.auto => oai.ImageDetail.auto,\n      ChatMessageContentImageDetail.low => oai.ImageDetail.low,\n      ChatMessageContentImageDetail.high => oai.ImageDetail.high,\n    };\n  }\n\n  List<oai.ContentPart> _mapMessageContentParts(\n    final ChatMessageContentMultiModal c,\n  ) {\n    return c.parts\n        .expand(\n          (final part) => switch (part) {\n            final ChatMessageContentText c => [oai.ContentPart.text(c.text)],\n            final ChatMessageContentImage img => [\n              _mapMessageContentPartImage(img),\n            ],\n            final ChatMessageContentMultiModal c => _mapMessageContentParts(c),\n          },\n        )\n        .toList(growable: false);\n  }\n\n  oai.ChatMessage _mapAIMessage(final AIChatMessage aiChatMessage) {\n    return oai.ChatMessage.assistant(\n      content: aiChatMessage.content,\n      toolCalls: aiChatMessage.toolCalls.isNotEmpty\n          ? aiChatMessage.toolCalls\n                .map(_mapMessageToolCall)\n                .toList(growable: false)\n          : null,\n    );\n  }\n\n  oai.ToolCall _mapMessageToolCall(final AIChatMessageToolCall toolCall) {\n    return oai.ToolCall(\n      id: toolCall.id,\n      type: 'function',\n      function: oai.FunctionCall(\n        name: toolCall.name,\n        arguments: json.encode(toolCall.arguments),\n      ),\n    );\n  }\n\n  oai.ChatMessage _mapToolMessage(final ToolChatMessage toolChatMessage) {\n    return oai.ChatMessage.tool(\n      toolCallId: toolChatMessage.toolCallId,\n      content: toolChatMessage.content,\n    );\n  }\n}\n\nextension CreateChatCompletionResponseMapper on oai.ChatCompletion {\n  ChatResult toChatResult(final String id) {\n    final choice = choices.first;\n    final msg = choice.message;\n\n    if (msg.refusal != null && msg.refusal!.isNotEmpty) {\n      throw OpenAIRefusalException(msg.refusal!);\n    }\n\n    return ChatResult(\n      id: id,\n      output: AIChatMessage(\n        content: msg.content ?? '',\n        toolCalls:\n            msg.toolCalls?.map(_mapMessageToolCall).toList(growable: false) ??\n            const [],\n      ),\n      finishReason: _mapFinishReason(choice.finishReason),\n      metadata: {\n        'model': model,\n        'created': created,\n        'system_fingerprint': systemFingerprint,\n        'logprobs': choice.logprobs?.toJson(),\n      },\n      usage: _mapUsage(usage),\n    );\n  }\n\n  AIChatMessageToolCall _mapMessageToolCall(final oai.ToolCall toolCall) {\n    var args = <String, dynamic>{};\n    try {\n      args = toolCall.function.arguments.isEmpty\n          ? {}\n          : json.decode(toolCall.function.arguments);\n    } catch (_) {}\n    return AIChatMessageToolCall(\n      id: toolCall.id,\n      name: toolCall.function.name,\n      argumentsRaw: toolCall.function.arguments,\n      arguments: args,\n    );\n  }\n}\n\nLanguageModelUsage _mapUsage(final oai.Usage? usage) {\n  return LanguageModelUsage(\n    promptTokens: usage?.promptTokens,\n    responseTokens: usage?.completionTokens,\n    totalTokens: usage?.totalTokens,\n  );\n}\n\nextension ChatToolListMapper on List<ToolSpec> {\n  List<oai.Tool> toChatCompletionTool() {\n    return map(_mapChatCompletionTool).toList(growable: false);\n  }\n\n  oai.Tool _mapChatCompletionTool(final ToolSpec tool) {\n    return oai.Tool.function(\n      name: tool.name,\n      description: tool.description,\n      parameters: tool.inputJsonSchema,\n    );\n  }\n}\n\nextension ChatToolChoiceMapper on ChatToolChoice {\n  oai.ToolChoice toChatCompletionToolChoice() {\n    return switch (this) {\n      ChatToolChoiceNone _ => oai.ToolChoice.none(),\n      ChatToolChoiceAuto _ => oai.ToolChoice.auto(),\n      ChatToolChoiceRequired() => oai.ToolChoice.required(),\n      final ChatToolChoiceForced t => oai.ToolChoice.function(t.name),\n    };\n  }\n}\n\nextension CreateChatCompletionStreamResponseMapper on oai.ChatStreamEvent {\n  ChatResult toChatResult(final String id) {\n    final choice = choices?.firstOrNull;\n    final delta = choice?.delta;\n\n    if (delta?.refusal != null && delta!.refusal!.isNotEmpty) {\n      throw OpenAIRefusalException(delta.refusal!);\n    }\n\n    return ChatResult(\n      id: id,\n      output: AIChatMessage(\n        content: delta?.content ?? '',\n        toolCalls:\n            delta?.toolCalls\n                ?.map(_mapMessageToolCall)\n                .toList(growable: false) ??\n            const [],\n      ),\n      finishReason: _mapFinishReason(choice?.finishReason),\n      metadata: {\n        if (created != null) 'created': created,\n        if (model != null) 'model': model,\n        if (systemFingerprint != null) 'system_fingerprint': systemFingerprint,\n      },\n      usage: _mapUsage(usage),\n      streaming: true,\n    );\n  }\n\n  AIChatMessageToolCall _mapMessageToolCall(final oai.ToolCallDelta toolCall) {\n    var args = <String, dynamic>{};\n    try {\n      args = json.decode(toolCall.function?.arguments ?? '');\n    } catch (_) {}\n    return AIChatMessageToolCall(\n      id: toolCall.id ?? '',\n      name: toolCall.function?.name ?? '',\n      argumentsRaw: toolCall.function?.arguments ?? '',\n      arguments: args,\n    );\n  }\n}\n\nextension ChatOpenAIResponseFormatMapper on ChatOpenAIResponseFormat {\n  oai.ResponseFormat toChatCompletionResponseFormat() {\n    return switch (this) {\n      ChatOpenAIResponseFormatText() => oai.ResponseFormat.text(),\n      ChatOpenAIResponseFormatJsonObject() => oai.ResponseFormat.jsonObject(),\n      final ChatOpenAIResponseFormatJsonSchema res =>\n        oai.ResponseFormat.jsonSchema(\n          name: res.jsonSchema.name,\n          description: res.jsonSchema.description,\n          schema: res.jsonSchema.schema,\n          strict: res.jsonSchema.strict,\n        ),\n    };\n  }\n}\n\nextension ChatOpenAIReasoningEffortX on ChatOpenAIReasoningEffort? {\n  oai.ReasoningEffort? toReasoningEffort() => switch (this) {\n    ChatOpenAIReasoningEffort.minimal => oai.ReasoningEffort.low, // deprecated\n    ChatOpenAIReasoningEffort.low => oai.ReasoningEffort.low,\n    ChatOpenAIReasoningEffort.medium => oai.ReasoningEffort.medium,\n    ChatOpenAIReasoningEffort.high => oai.ReasoningEffort.high,\n    null => null,\n  };\n}\n\nextension ChatOpenAIServiceTierX on ChatOpenAIServiceTier? {\n  String? toServiceTierString() => switch (this) {\n    ChatOpenAIServiceTier.auto => 'auto',\n    ChatOpenAIServiceTier.vDefault => 'default',\n    null => null,\n  };\n}\n\nFinishReason _mapFinishReason(final oai.FinishReason? reason) =>\n    switch (reason) {\n      oai.FinishReason.stop => FinishReason.stop,\n      oai.FinishReason.length => FinishReason.length,\n      oai.FinishReason.toolCalls => FinishReason.toolCalls,\n      oai.FinishReason.contentFilter => FinishReason.contentFilter,\n      oai.FinishReason.functionCall => FinishReason.toolCalls,\n      null => FinishReason.unspecified,\n    };\n"
  },
  {
    "path": "packages/langchain_openai/lib/src/chat_models/types.dart",
    "content": "import 'package:collection/collection.dart';\nimport 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/tools.dart';\nimport 'package:meta/meta.dart';\n\n/// {@template chat_openai_options}\n/// Options to pass into the OpenAI Chat Model.\n///\n/// Available [ChatOpenAIOptions.model]s:\n/// - `gpt-5`\n/// - `gpt-5-mini`\n/// - `gpt-5-nano`\n/// - `gpt-5-2025-08-07`\n/// - `gpt-5-mini-2025-08-07`\n/// - `gpt-5-nano-2025-08-07`\n/// - `gpt-5-chat-latest`\n/// - `gpt-4.1`\n/// - `gpt-4.1-mini`\n/// - `gpt-4.1-nano`\n/// - `gpt-4.1-2025-04-14`\n/// - `gpt-4.1-mini-2025-04-14`\n/// - `gpt-4.1-nano-2025-04-14`\n/// - `o4-mini`\n/// - `o4-mini-2025-04-16`\n/// - `o3`\n/// - `o3-2025-04-16`\n/// - `o3-mini`\n/// - `o3-mini-2025-01-31`\n/// - `o1`\n/// - `o1-2024-12-17`\n/// - `o1-preview`\n/// - `o1-preview-2024-09-12`\n/// - `o1-mini`\n/// - `o1-mini-2024-09-12`\n/// - `gpt-4o`\n/// - `gpt-4o-2024-11-20`\n/// - `gpt-4o-2024-08-06`\n/// - `gpt-4o-2024-05-13`\n/// - `gpt-4o-audio-preview`\n/// - `gpt-4o-audio-preview-2024-10-01`\n/// - `gpt-4o-audio-preview-2024-12-17`\n/// - `gpt-4o-audio-preview-2025-06-03`\n/// - `gpt-4o-mini-audio-preview`\n/// - `gpt-4o-mini-audio-preview-2024-12-17`\n/// - `gpt-4o-search-preview`\n/// - `gpt-4o-mini-search-preview`\n/// - `gpt-4o-search-preview-2025-03-11`\n/// - `gpt-4o-mini-search-preview-2025-03-11`\n/// - `chatgpt-4o-latest`\n/// - `codex-mini-latest`\n/// - `gpt-4o-mini`\n/// - `gpt-4o-mini-2024-07-18`\n/// - `gpt-4.5-preview`\n/// - `gpt-4.5-preview-2025-02-27`\n/// - `gpt-4-turbo`\n/// - `gpt-4-turbo-2024-04-09`\n/// - `gpt-4-0125-preview`\n/// - `gpt-4-turbo-preview`\n/// - `gpt-4-1106-preview`\n/// - `gpt-4-vision-preview`\n/// - `gpt-4`\n/// - `gpt-4-0314`\n/// - `gpt-4-0613`\n/// - `gpt-4-32k`\n/// - `gpt-4-32k-0314`\n/// - `gpt-4-32k-0613`\n/// - `gpt-3.5-turbo`\n/// - `gpt-3.5-turbo-16k`\n/// - `gpt-3.5-turbo-0301`\n/// - `gpt-3.5-turbo-0613`\n/// - `gpt-3.5-turbo-1106`\n/// - `gpt-3.5-turbo-0125`\n/// - `gpt-3.5-turbo-16k-0613`\n///\n/// Mind that the list may be outdated.\n/// See https://platform.openai.com/docs/models for the latest list.\n/// {@endtemplate}\n@immutable\nclass ChatOpenAIOptions extends ChatModelOptions {\n  /// {@macro chat_openai_options}\n  const ChatOpenAIOptions({\n    super.model,\n    this.store,\n    this.reasoningEffort,\n    this.metadata,\n    this.frequencyPenalty,\n    this.logitBias,\n    this.logprobs,\n    this.topLogprobs,\n    this.maxTokens,\n    this.n,\n    this.presencePenalty,\n    this.responseFormat,\n    this.seed,\n    this.stop,\n    this.temperature,\n    this.topP,\n    super.tools,\n    super.toolChoice,\n    this.parallelToolCalls,\n    this.serviceTier,\n    this.user,\n    @Deprecated('verbosity is no longer supported by the OpenAI API')\n    this.verbosity,\n    super.concurrencyLimit,\n  });\n\n  /// Number between -2.0 and 2.0. Positive values penalize new tokens based on\n  /// their existing frequency in the text so far, decreasing the model's\n  /// likelihood to repeat the same line verbatim.\n  ///\n  /// See https://platform.openai.com/docs/api-reference/chat/create#chat-create-frequency_penalty\n  final double? frequencyPenalty;\n\n  /// Modify the likelihood of specified tokens appearing in the completion.\n  ///\n  /// See https://platform.openai.com/docs/api-reference/chat/create#chat-create-logit_bias\n  final Map<String, int>? logitBias;\n\n  /// Whether or not to store the output of this chat completion request.\n  ///\n  /// See https://platform.openai.com/docs/api-reference/chat/create#chat-create-store\n  final bool? store;\n\n  /// Constrains effort on reasoning for reasoning models.\n  /// Supported values are `minimal`, `low`, `medium`, and `high`.\n  ///\n  /// See https://platform.openai.com/docs/api-reference/chat/create#chat-create-reasoning_effort\n  final ChatOpenAIReasoningEffort? reasoningEffort;\n\n  /// Developer-defined tags and values used for filtering completions.\n  ///\n  /// See https://platform.openai.com/docs/api-reference/chat/create#chat-create-metadata\n  final Map<String, String>? metadata;\n\n  /// Whether to return log probabilities of the output tokens.\n  ///\n  /// See https://platform.openai.com/docs/api-reference/chat/create#chat-create-logprobs\n  final bool? logprobs;\n\n  /// Number of most likely tokens to return at each token position when\n  /// [logprobs] is set to true.\n  ///\n  /// See https://platform.openai.com/docs/api-reference/chat/create#chat-create-top_logprobs\n  final int? topLogprobs;\n\n  /// The maximum number of tokens to generate in the chat completion.\n  /// Defaults to inf.\n  ///\n  /// See https://platform.openai.com/docs/api-reference/chat/create#chat-create-max_tokens\n  final int? maxTokens;\n\n  /// How many chat completion choices to generate for each input message.\n  ///\n  /// See https://platform.openai.com/docs/api-reference/chat/create#chat-create-n\n  final int? n;\n\n  /// Number between -2.0 and 2.0. Positive values penalize new tokens based on\n  /// whether they appear in the text so far, increasing the model's likelihood\n  /// to talk about new topics.\n  ///\n  /// See https://platform.openai.com/docs/api-reference/chat/create#chat-create-presence_penalty\n  final double? presencePenalty;\n\n  /// An object specifying the format that the model must output.\n  ///\n  /// Setting to [ChatOpenAIResponseFormatType.jsonObject] enables JSON mode,\n  /// which guarantees the message the model generates is valid JSON.\n  ///\n  /// Important: when using JSON mode you must still instruct the model to\n  /// produce JSON yourself via some conversation message, for example via your\n  /// system message. If you don't do this, the model may generate an unending\n  /// stream of whitespace until the generation reaches the token limit, which\n  /// may take a lot of time and give the appearance of a \"stuck\" request.\n  /// Also note that the message content may be partial (i.e. cut off) if\n  /// `finish_reason=\"length\"`, which indicates the generation exceeded\n  /// `max_tokens` or the conversation exceeded the max context length.\n  ///\n  /// See https://platform.openai.com/docs/api-reference/chat/create#chat-create-response_format\n  final ChatOpenAIResponseFormat? responseFormat;\n\n  /// This feature is in Beta. If specified, our system will make a best effort\n  /// to sample deterministically, such that repeated requests with the same\n  /// seed and parameters should return the same result. Determinism is not\n  /// guaranteed, and you should refer to the system_fingerprint response\n  /// parameter to monitor changes in the backend.\n  ///\n  /// See https://platform.openai.com/docs/api-reference/chat/create#chat-create-seed\n  final int? seed;\n\n  /// Up to 4 sequences where the API will stop generating further tokens.\n  ///\n  /// Ref: https://platform.openai.com/docs/api-reference/chat/create#chat-create-stop\n  final List<String>? stop;\n\n  /// What sampling temperature to use, between 0 and 2.\n  ///\n  /// See https://platform.openai.com/docs/api-reference/chat/create#chat-create-temperature\n  final double? temperature;\n\n  /// An alternative to sampling with temperature, called nucleus sampling,\n  /// where the model considers the results of the tokens with top_p\n  /// probability mass.\n  ///\n  /// See https://platform.openai.com/docs/api-reference/chat/create#chat-create-top_p\n  final double? topP;\n\n  /// Whether to enable parallel tool calling during tool use.\n  /// By default, it is enabled.\n  ///\n  ///\n  /// Ref: https://platform.openai.com/docs/guides/function-calling/parallel-function-calling\n  final bool? parallelToolCalls;\n\n  /// Specifies the latency tier to use for processing the request.\n  /// This is relevant for customers subscribed to the scale tier service.\n  final ChatOpenAIServiceTier? serviceTier;\n\n  /// A unique identifier representing your end-user, which can help OpenAI to\n  /// monitor and detect abuse.\n  ///\n  /// Ref: https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids\n  final String? user;\n\n  /// Constrains the verbosity of the model's response. Lower values will result\n  /// in more concise responses, while higher values will be more verbose.\n  /// Supported values are `low`, `medium`, and `high`.\n  ///\n  /// See https://platform.openai.com/docs/api-reference/chat/create#chat-create-verbosity\n  @Deprecated('verbosity is no longer supported by the OpenAI API')\n  final ChatOpenAIVerbosity? verbosity;\n\n  @override\n  ChatOpenAIOptions copyWith({\n    final String? model,\n    final bool? store,\n    final ChatOpenAIReasoningEffort? reasoningEffort,\n    final Map<String, String>? metadata,\n    final double? frequencyPenalty,\n    final Map<String, int>? logitBias,\n    final bool? logprobs,\n    final int? topLogprobs,\n    final int? maxTokens,\n    final int? n,\n    final double? presencePenalty,\n    final ChatOpenAIResponseFormat? responseFormat,\n    final int? seed,\n    final List<String>? stop,\n    final double? temperature,\n    final double? topP,\n    final List<ToolSpec>? tools,\n    final ChatToolChoice? toolChoice,\n    final bool? parallelToolCalls,\n    final ChatOpenAIServiceTier? serviceTier,\n    final String? user,\n    final ChatOpenAIVerbosity? verbosity,\n    final int? concurrencyLimit,\n  }) {\n    return ChatOpenAIOptions(\n      model: model ?? this.model,\n      store: store ?? this.store,\n      reasoningEffort: reasoningEffort ?? this.reasoningEffort,\n      metadata: metadata ?? this.metadata,\n      frequencyPenalty: frequencyPenalty ?? this.frequencyPenalty,\n      logitBias: logitBias ?? this.logitBias,\n      logprobs: logprobs ?? this.logprobs,\n      topLogprobs: topLogprobs ?? this.topLogprobs,\n      maxTokens: maxTokens ?? this.maxTokens,\n      n: n ?? this.n,\n      presencePenalty: presencePenalty ?? this.presencePenalty,\n      responseFormat: responseFormat ?? this.responseFormat,\n      seed: seed ?? this.seed,\n      stop: stop ?? this.stop,\n      temperature: temperature ?? this.temperature,\n      topP: topP ?? this.topP,\n      tools: tools ?? this.tools,\n      toolChoice: toolChoice ?? this.toolChoice,\n      parallelToolCalls: parallelToolCalls ?? this.parallelToolCalls,\n      serviceTier: serviceTier ?? this.serviceTier,\n      user: user ?? this.user,\n      verbosity: verbosity ?? this.verbosity,\n      concurrencyLimit: concurrencyLimit ?? this.concurrencyLimit,\n    );\n  }\n\n  @override\n  ChatOpenAIOptions merge(covariant final ChatOpenAIOptions? other) {\n    return copyWith(\n      model: other?.model,\n      store: other?.store,\n      reasoningEffort: other?.reasoningEffort,\n      metadata: other?.metadata,\n      frequencyPenalty: other?.frequencyPenalty,\n      logitBias: other?.logitBias,\n      logprobs: other?.logprobs,\n      topLogprobs: other?.topLogprobs,\n      maxTokens: other?.maxTokens,\n      n: other?.n,\n      presencePenalty: other?.presencePenalty,\n      responseFormat: other?.responseFormat,\n      seed: other?.seed,\n      stop: other?.stop,\n      temperature: other?.temperature,\n      topP: other?.topP,\n      tools: other?.tools,\n      toolChoice: other?.toolChoice,\n      parallelToolCalls: other?.parallelToolCalls,\n      serviceTier: other?.serviceTier,\n      user: other?.user,\n      verbosity: other?.verbosity,\n      concurrencyLimit: other?.concurrencyLimit,\n    );\n  }\n\n  @override\n  bool operator ==(covariant final ChatOpenAIOptions other) {\n    return identical(this, other) ||\n        runtimeType == other.runtimeType &&\n            model == other.model &&\n            store == other.store &&\n            reasoningEffort == other.reasoningEffort &&\n            const MapEquality<String, String>().equals(\n              metadata,\n              other.metadata,\n            ) &&\n            frequencyPenalty == other.frequencyPenalty &&\n            const MapEquality<String, int>().equals(\n              logitBias,\n              other.logitBias,\n            ) &&\n            logprobs == other.logprobs &&\n            topLogprobs == other.topLogprobs &&\n            maxTokens == other.maxTokens &&\n            n == other.n &&\n            presencePenalty == other.presencePenalty &&\n            responseFormat == other.responseFormat &&\n            seed == other.seed &&\n            const ListEquality<String>().equals(stop, other.stop) &&\n            temperature == other.temperature &&\n            topP == other.topP &&\n            const ListEquality<ToolSpec>().equals(tools, other.tools) &&\n            toolChoice == other.toolChoice &&\n            parallelToolCalls == other.parallelToolCalls &&\n            serviceTier == other.serviceTier &&\n            user == other.user &&\n            verbosity == other.verbosity &&\n            concurrencyLimit == other.concurrencyLimit;\n  }\n\n  @override\n  int get hashCode {\n    return model.hashCode ^\n        store.hashCode ^\n        reasoningEffort.hashCode ^\n        const MapEquality<String, String>().hash(metadata) ^\n        frequencyPenalty.hashCode ^\n        const MapEquality<String, int>().hash(logitBias) ^\n        logprobs.hashCode ^\n        topLogprobs.hashCode ^\n        maxTokens.hashCode ^\n        n.hashCode ^\n        presencePenalty.hashCode ^\n        responseFormat.hashCode ^\n        seed.hashCode ^\n        const ListEquality<String>().hash(stop) ^\n        temperature.hashCode ^\n        topP.hashCode ^\n        const ListEquality<ToolSpec>().hash(tools) ^\n        toolChoice.hashCode ^\n        parallelToolCalls.hashCode ^\n        serviceTier.hashCode ^\n        user.hashCode ^\n        verbosity.hashCode ^\n        concurrencyLimit.hashCode;\n  }\n}\n\n/// {@template chat_openai_response_format}\n/// An object specifying the format that the model must output.\n/// {@endtemplate}\nsealed class ChatOpenAIResponseFormat {\n  const ChatOpenAIResponseFormat();\n\n  /// The model will respond with text.\n  static const text = ChatOpenAIResponseFormatText();\n\n  /// The model will respond with a valid JSON object.\n  static const jsonObject = ChatOpenAIResponseFormatJsonObject();\n\n  /// The model will respond with a valid JSON object that adheres to the\n  /// specified schema.\n  factory ChatOpenAIResponseFormat.jsonSchema(\n    final ChatOpenAIJsonSchema jsonSchema,\n  ) => ChatOpenAIResponseFormatJsonSchema(jsonSchema: jsonSchema);\n}\n\n/// {@template chat_openai_response_format_text}\n/// The model will respond with text.\n/// {@endtemplate}\nclass ChatOpenAIResponseFormatText extends ChatOpenAIResponseFormat {\n  /// {@macro chat_openai_response_format_text}\n  const ChatOpenAIResponseFormatText();\n}\n\n/// {@template chat_openai_response_format_json_object}\n/// The model will respond with a valid JSON object.\n/// {@endtemplate}\nclass ChatOpenAIResponseFormatJsonObject extends ChatOpenAIResponseFormat {\n  /// {@macro chat_openai_response_format_json_object}\n  const ChatOpenAIResponseFormatJsonObject();\n}\n\n/// {@template chat_openai_response_format_json_schema}\n/// The model will respond with a valid JSON object that adheres to the\n/// specified schema.\n/// {@endtemplate}\n@immutable\nclass ChatOpenAIResponseFormatJsonSchema extends ChatOpenAIResponseFormat {\n  /// {@macro chat_openai_response_format_json_schema}\n  const ChatOpenAIResponseFormatJsonSchema({required this.jsonSchema});\n\n  /// The JSON schema that the model must adhere to.\n  final ChatOpenAIJsonSchema jsonSchema;\n\n  @override\n  bool operator ==(covariant ChatOpenAIResponseFormatJsonSchema other) {\n    return identical(this, other) ||\n        runtimeType == other.runtimeType && jsonSchema == other.jsonSchema;\n  }\n\n  @override\n  int get hashCode => jsonSchema.hashCode;\n}\n\n/// {@template chat_openai_json_schema}\n/// Specifies the schema for the response format.\n/// {@endtemplate}\n@immutable\nclass ChatOpenAIJsonSchema {\n  /// {@macro chat_openai_json_schema}\n  const ChatOpenAIJsonSchema({\n    required this.name,\n    required this.schema,\n    this.description,\n    this.strict = false,\n  });\n\n  /// The name of the response format. Must be a-z, A-Z, 0-9, or contain\n  /// underscores and dashes, with a maximum length of 64.\n  final String name;\n\n  /// A description of what the response format is for, used by the model to\n  /// determine how to respond in the format.\n  final String? description;\n\n  /// The schema for the response format, described as a JSON Schema object.\n  final Map<String, dynamic> schema;\n\n  /// Whether to enable strict schema adherence when generating the output.\n  /// If set to true, the model will always follow the exact schema defined in\n  /// the `schema` field. Only a subset of JSON Schema is supported when\n  /// `strict` is `true`. To learn more, read the\n  /// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).\n  final bool strict;\n\n  @override\n  bool operator ==(covariant ChatOpenAIJsonSchema other) {\n    return identical(this, other) ||\n        runtimeType == other.runtimeType &&\n            name == other.name &&\n            description == other.description &&\n            const MapEquality<String, dynamic>().equals(schema, other.schema) &&\n            strict == other.strict;\n  }\n\n  @override\n  int get hashCode {\n    return name.hashCode ^\n        description.hashCode ^\n        const MapEquality<String, dynamic>().hash(schema) ^\n        strict.hashCode;\n  }\n}\n\n/// Constrains effort on reasoning for reasoning models.\nenum ChatOpenAIReasoningEffort {\n  /// Minimal effort\n  @Deprecated('The OpenAI API no longer supports minimal. Use low instead.')\n  minimal,\n\n  /// Low effort\n  low,\n\n  /// Medium effort\n  medium,\n\n  /// High effort\n  high,\n}\n\n/// Constrains the verbosity of the model's response.\n@Deprecated('verbosity is no longer supported by the OpenAI API')\nenum ChatOpenAIVerbosity {\n  /// More concise responses\n  low,\n\n  /// Medium verbosity responses\n  medium,\n\n  /// More verbose responses\n  high,\n}\n\n/// Specifies the latency tier to use for processing the request.\n/// This is relevant for customers subscribed to the scale tier service.\nenum ChatOpenAIServiceTier {\n  /// The system will utilize scale tier credits until they are exhausted.\n  auto,\n\n  /// The request will be processed using the default service tier with a lower\n  /// uptime SLA and no latency guarantee.\n  vDefault,\n}\n\n/// {@template openai_refusal_exception}\n/// Exception thrown when OpenAI Structured Outputs API returns a refusal.\n///\n/// When using OpenAI's Structured Outputs API with user-generated input, the\n/// model may occasionally refuse to fulfill the request for safety reasons.\n///\n/// See here for more on refusals:\n/// https://platform.openai.com/docs/guides/structured-outputs/refusals\n/// {@endtemplate}\nclass OpenAIRefusalException implements Exception {\n  /// {@macro openai_refusal_exception}\n  const OpenAIRefusalException(this.message);\n\n  /// The refusal message.\n  final String message;\n\n  @override\n  String toString() {\n    return 'OpenAIRefusalException: $message';\n  }\n}\n"
  },
  {
    "path": "packages/langchain_openai/lib/src/embeddings/embeddings.dart",
    "content": "export 'openai.dart';\n"
  },
  {
    "path": "packages/langchain_openai/lib/src/embeddings/openai.dart",
    "content": "import 'package:http/http.dart' as http;\nimport 'package:langchain_core/documents.dart';\nimport 'package:langchain_core/embeddings.dart';\nimport 'package:langchain_core/language_models.dart';\nimport 'package:langchain_core/utils.dart';\nimport 'package:openai_dart/openai_dart.dart';\n\nimport '../utils/auth.dart';\n\n/// Wrapper around OpenAI Embeddings API.\n///\n/// Example:\n/// ```dart\n/// final embeddings = OpenAIEmbeddings(apiKey: openaiApiKey);\n/// final res = await embeddings.embedQuery('Hello world');\n/// ```\n///\n/// - [Embeddings guide](https://platform.openai.com/docs/guides/embeddings/limitations-risks)\n/// - [Embeddings API docs](https://platform.openai.com/docs/api-reference/embeddings)\n///\n/// You can also use this wrapper to consume OpenAI-compatible APIs like\n/// [Anyscale](https://www.anyscale.com), [Together AI](https://www.together.ai), etc.\n///\n/// ### Authentication\n///\n/// The OpenAI API uses API keys for authentication. Visit your\n/// [API Keys](https://platform.openai.com/account/api-keys) page to retrieve\n/// the API key you'll use in your requests.\n///\n/// #### Organization (optional)\n///\n/// For users who belong to multiple organizations, you can specify which\n/// organization is used for an API request. Usage from these API requests will\n/// count against the specified organization's subscription quota.\n///\n/// ```dart\n/// final client = OpenAIEmbeddings(\n///   apiKey: 'OPENAI_API_KEY',\n///   organization: 'org-dtDDtkEGoFccn5xaP5W1p3Rr',\n/// );\n/// ```\n///\n/// ### Advance\n///\n/// #### Azure OpenAI Service\n///\n/// OpenAI's models are also available as an [Azure service](https://learn.microsoft.com/en-us/azure/ai-services/openai/overview).\n///\n/// Although the Azure OpenAI API is similar to the official OpenAI API, there\n/// are subtle differences between them. This client is intended to be used\n/// with the official OpenAI API, but most of the functionality should work\n/// with the Azure OpenAI API as well.\n///\n/// If you want to use this client with the Azure OpenAI API (at your own risk),\n/// you can do so by instantiating the client as follows:\n///\n/// ```dart\n/// final client = OpenAIEmbeddings(\n///   baseUrl: 'https://YOUR_RESOURCE_NAME.openai.azure.com/openai/deployments/YOUR_DEPLOYMENT_NAME',\n///   headers: { 'api-key': 'YOUR_API_KEY' },\n///   queryParams: { 'api-version': 'API_VERSION' },\n/// );\n/// ```\n///\n/// - `YOUR_RESOURCE_NAME`: This value can be found in the Keys & Endpoint\n///    section when examining your resource from the Azure portal.\n/// - `YOUR_DEPLOYMENT_NAME`: This value will correspond to the custom name\n///    you chose for your deployment when you deployed a model. This value can be found under Resource Management > Deployments in the Azure portal.\n/// - `YOUR_API_KEY`: This value can be found in the Keys & Endpoint section\n///    when examining your resource from the Azure portal.\n/// - `API_VERSION`: The Azure OpenAI API version to use (e.g. `2023-05-15`).\n///    Try to use the [latest version available](https://github.com/Azure/azure-rest-api-specs/tree/main/specification/cognitiveservices/data-plane/AzureOpenAI/inference),\n///    it will probably be the closest to the official OpenAI API.\n///\n/// #### Custom HTTP client\n///\n/// You can always provide your own implementation of `http.Client` for further\n/// customization:\n///\n/// ```dart\n/// final client = OpenAIEmbeddings(\n///   apiKey: 'OPENAI_API_KEY',\n///   client: MyHttpClient(),\n/// );\n/// ```\n///\n/// #### Using a proxy\n///\n/// ##### HTTP proxy\n///\n/// You can use your own HTTP proxy by overriding the `baseUrl` and providing\n/// your required `headers`:\n///\n/// ```dart\n/// final client = OpenAIEmbeddings(\n///   baseUrl: 'https://my-proxy.com',\n///   headers: {'x-my-proxy-header': 'value'},\n/// );\n/// ```\n///\n/// If you need further customization, you can always provide your own\n/// `http.Client`.\n///\n/// ##### SOCKS5 proxy\n///\n/// To use a SOCKS5 proxy, you can use the\n/// [`socks5_proxy`](https://pub.dev/packages/socks5_proxy) package and a\n/// custom `http.Client`.\nclass OpenAIEmbeddings extends Embeddings {\n  /// Create a new [OpenAIEmbeddings] instance.\n  ///\n  /// Main configuration options:\n  /// - `apiKey`: your OpenAI API key. You can find your API key in the\n  ///   [OpenAI dashboard](https://platform.openai.com/account/api-keys).\n  /// - `organization`: your OpenAI organization ID (if applicable).\n  /// - [OpenAIEmbeddings.model]\n  /// - [OpenAIEmbeddings.dimensions]\n  /// - [OpenAIEmbeddings.batchSize]\n  /// - [OpenAIEmbeddings.user]\n  ///\n  /// Advance configuration options:\n  /// - `baseUrl`: the base URL to use. Defaults to OpenAI's API URL. You can\n  ///   override this to use a different API URL, or to use a proxy.\n  /// - `headers`: global headers to send with every request. You can use\n  ///   this to set custom headers, or to override the default headers.\n  /// - `queryParams`: global query parameters to send with every request. You\n  ///   can use this to set custom query parameters (e.g. Azure OpenAI API\n  ///   required to attach a `version` query parameter to every request).\n  /// - `client`: the HTTP client to use. You can set your own HTTP client if\n  ///   you need further customization (e.g. to use a Socks5 proxy).\n  OpenAIEmbeddings({\n    final String? apiKey,\n    final String? organization,\n    final String baseUrl = 'https://api.openai.com/v1',\n    final Map<String, String>? headers,\n    final Map<String, dynamic>? queryParams,\n    final http.Client? client,\n    this.model = 'text-embedding-3-small',\n    this.dimensions,\n    this.batchSize = 512,\n    this.user,\n  }) : _authProvider = MutableApiKeyProvider(apiKey ?? '') {\n    _client = OpenAIClient(\n      config: OpenAIConfig(\n        authProvider: _authProvider,\n        organization: organization,\n        baseUrl: buildBaseUrl(baseUrl, queryParams) ?? baseUrl,\n        defaultHeaders: headers ?? const {},\n      ),\n      httpClient: client,\n    );\n  }\n\n  /// A client for interacting with OpenAI API.\n  late final OpenAIClient _client;\n\n  /// The auth provider for mutable API key access.\n  final MutableApiKeyProvider _authProvider;\n\n  /// ID of the model to use (e.g. 'text-embedding-3-small').\n  ///\n  /// Available models:\n  /// - `text-embedding-3-small`\n  /// - `text-embedding-3-large`\n  /// - `text-embedding-ada-002`\n  ///\n  /// Mind that the list may be outdated.\n  /// See https://platform.openai.com/docs/models for the latest list.\n  String model;\n\n  /// The number of dimensions the resulting output embeddings should have.\n  /// Only supported in `text-embedding-3` and later models.\n  int? dimensions;\n\n  /// The maximum number of documents to embed in a single request.\n  /// This is limited by max input tokens for the model\n  /// (e.g. 8191 tokens for text-embedding-3-small).\n  int batchSize;\n\n  /// A unique identifier representing your end-user, which can help OpenAI to\n  /// monitor and detect abuse.\n  ///\n  /// Ref: https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids\n  String? user;\n\n  /// Set or replace the API key.\n  set apiKey(final String value) => _authProvider.apiKey = value;\n\n  /// Get the API key.\n  String get apiKey => _authProvider.apiKey;\n\n  @override\n  Future<List<List<double>>> embedDocuments(\n    final List<Document> documents,\n  ) async {\n    // TODO use tiktoken to chunk documents that exceed the context length of the model\n    final batches = chunkList(documents, chunkSize: batchSize);\n\n    final embeddings = await Future.wait(\n      batches.map((final batch) async {\n        final data = await _client.embeddings.create(\n          EmbeddingRequest(\n            model: model,\n            input: EmbeddingInput.textList(\n              batch.map((final doc) => doc.pageContent).toList(growable: false),\n            ),\n            dimensions: dimensions,\n            user: user,\n          ),\n        );\n        return data.data.map((final d) => d.embedding);\n      }),\n    );\n\n    return embeddings.expand((final e) => e).toList(growable: false);\n  }\n\n  @override\n  Future<List<double>> embedQuery(final String query) async {\n    final data = await _client.embeddings.create(\n      EmbeddingRequest(\n        model: model,\n        input: EmbeddingInput.text(query),\n        dimensions: dimensions,\n        user: user,\n      ),\n    );\n    return data.firstEmbedding;\n  }\n\n  /// {@template openai_embeddings_list_models}\n  /// Returns a list of available embedding models from the OpenAI API.\n  ///\n  /// This method filters models to return only those suitable for embeddings\n  /// (models with IDs starting with `text-embedding-`).\n  ///\n  /// Example:\n  /// ```dart\n  /// final embeddings = OpenAIEmbeddings(apiKey: '...');\n  /// final models = await embeddings.listModels();\n  /// for (final model in models) {\n  ///   print('${model.id} - owned by ${model.ownedBy ?? \"unknown\"}');\n  /// }\n  /// ```\n  /// {@endtemplate}\n  @override\n  Future<List<ModelInfo>> listModels() async {\n    final response = await _client.models.list();\n    return response.data\n        .where(_isEmbeddingModel)\n        .map(\n          (final m) =>\n              ModelInfo(id: m.id, ownedBy: m.ownedBy, created: m.created),\n        )\n        .toList();\n  }\n\n  /// Returns true if the model is an embedding model.\n  static bool _isEmbeddingModel(final Model model) {\n    final id = model.id.toLowerCase();\n    return id.startsWith('text-embedding-');\n  }\n\n  /// Closes the client and cleans up any resources associated with it.\n  void close() {\n    _client.close();\n  }\n}\n"
  },
  {
    "path": "packages/langchain_openai/lib/src/llms/llms.dart",
    "content": "export 'openai.dart';\nexport 'types.dart';\n"
  },
  {
    "path": "packages/langchain_openai/lib/src/llms/mappers.dart",
    "content": "// ignore_for_file: public_member_api_docs\nimport 'package:collection/collection.dart';\nimport 'package:langchain_core/language_models.dart';\nimport 'package:langchain_core/llms.dart';\nimport 'package:openai_dart/openai_dart.dart' as oai;\n\nextension CompletionMapper on oai.Completion {\n  List<LLMResult> toLLMResults({final bool streaming = false}) {\n    final metadata = {\n      'created': created,\n      'model': model,\n      if (systemFingerprint != null) 'system_fingerprint': systemFingerprint,\n    };\n    final totalUsage = _mapUsage(usage);\n    if (choices.isEmpty) {\n      return [\n        LLMResult(\n          id: '$id:0',\n          output: '',\n          finishReason: FinishReason.unspecified,\n          metadata: metadata,\n          usage: totalUsage,\n          streaming: streaming,\n        ),\n      ];\n    }\n\n    return choices\n        .mapIndexed(\n          (final index, final choice) => LLMResult(\n            id: '$id:$index',\n            output: choice.text,\n            finishReason: _mapFinishReason(choice.finishReason),\n            metadata: {\n              ...metadata,\n              if (choice.logprobs != null)\n                'logprobs': choice.logprobs?.toJson(),\n            },\n            usage: totalUsage,\n            streaming: streaming,\n          ),\n        )\n        .toList(growable: false);\n  }\n\n  FinishReason _mapFinishReason(final oai.FinishReason? reason) =>\n      switch (reason) {\n        oai.FinishReason.stop => FinishReason.stop,\n        oai.FinishReason.length => FinishReason.length,\n        oai.FinishReason.contentFilter => FinishReason.contentFilter,\n        _ => FinishReason.unspecified,\n      };\n\n  LanguageModelUsage _mapUsage(final oai.Usage? usage) {\n    return LanguageModelUsage(\n      promptTokens: usage?.promptTokens,\n      responseTokens: usage?.completionTokens,\n      totalTokens: usage?.totalTokens,\n    );\n  }\n}\n"
  },
  {
    "path": "packages/langchain_openai/lib/src/llms/openai.dart",
    "content": "import 'dart:math';\n\nimport 'package:http/http.dart' as http;\nimport 'package:langchain_core/language_models.dart';\nimport 'package:langchain_core/llms.dart';\nimport 'package:langchain_core/prompts.dart';\nimport 'package:langchain_core/utils.dart';\nimport 'package:langchain_tiktoken/langchain_tiktoken.dart';\nimport 'package:openai_dart/openai_dart.dart';\n\nimport '../utils/auth.dart';\nimport 'mappers.dart';\nimport 'types.dart';\n\n/// Wrapper around [OpenAI Completions API](https://platform.openai.com/docs/api-reference/completions).\n///\n/// Example:\n/// ```dart\n/// final llm = OpenAI(apiKey: '...');\n/// final prompt = PromptValue.string('Tell me a joke');\n/// final res = await llm.invoke(prompt);\n/// ```\n///\n/// - [Completions guide](https://platform.openai.com/docs/guides/gpt/completions-api)\n/// - [Completions API docs](https://platform.openai.com/docs/api-reference/completions)\n///\n/// ### Call options\n///\n/// You can configure the parameters that will be used when calling the\n/// completions API in several ways:\n///\n/// **Default options:**\n///\n/// Use the [defaultOptions] parameter to set the default options. These\n/// options will be used unless you override them when generating completions.\n///\n/// ```dart\n/// final llm = OpenAI(\n///   apiKey: openaiApiKey,\n///   defaultOptions: const OpenAIOptions(\n///     temperature: 0.9,\n///     maxTokens: 100,\n///   ),\n/// );\n/// final prompt = PromptValue.string('Hello world!');\n/// final result = await openai.invoke(prompt);\n/// ```\n///\n/// **Call options:**\n///\n/// You can override the default options when invoking the model:\n///\n/// ```dart\n/// final res = await llm.invoke(\n///   prompt,\n///   options: const OpenAIOptions(seed: 9999),\n/// );\n/// ```\n///\n/// **Bind:**\n///\n/// You can also change the options in a [Runnable] pipeline using the bind\n/// method.\n///\n/// In this example, we are using two totally different models for each\n/// question:\n///\n/// ```dart\n/// final llm = OpenAI(apiKey: openaiApiKey);\n/// const outputParser = StringOutputParser();\n/// final prompt1 = PromptTemplate.fromTemplate('How are you {name}?');\n/// final prompt2 = PromptTemplate.fromTemplate('How old are you {name}?');\n/// final chain = Runnable.fromMap({\n///   'q1': prompt1 | llm.bind(const OpenAIOptions(model: 'gpt-3.5-turbo-instruct')) | outputParser,\n///   'q2': prompt2| llm.bind(const OpenAIOptions(model: 'text-davinci-003')) | outputParser,\n/// });\n/// final res = await chain.invoke({'name': 'David'});\n/// ```\n///\n/// ### Authentication\n///\n/// The OpenAI API uses API keys for authentication. Visit your\n/// [API Keys](https://platform.openai.com/account/api-keys) page to retrieve\n/// the API key you'll use in your requests.\n///\n/// #### Organization (optional)\n///\n/// For users who belong to multiple organizations, you can specify which\n/// organization is used for an API request. Usage from these API requests will\n/// count against the specified organization's subscription quota.\n///\n/// ```dart\n/// final client = OpenAI(\n///   apiKey: 'OPENAI_API_KEY',\n///   organization: 'org-dtDDtkEGoFccn5xaP5W1p3Rr',\n/// );\n/// ```\n///\n/// ### Advance\n///\n/// #### Azure OpenAI Service\n///\n/// OpenAI's models are also available as an [Azure service](https://learn.microsoft.com/en-us/azure/ai-services/openai/overview).\n///\n/// Although the Azure OpenAI API is similar to the official OpenAI API, there\n/// are subtle differences between them. This client is intended to be used\n/// with the official OpenAI API, but most of the functionality should work\n/// with the Azure OpenAI API as well.\n///\n/// If you want to use this client with the Azure OpenAI API (at your own risk),\n/// you can do so by instantiating the client as follows:\n///\n/// ```dart\n/// final client = OpenAI(\n///   baseUrl: 'https://YOUR_RESOURCE_NAME.openai.azure.com/openai/deployments/YOUR_DEPLOYMENT_NAME',\n///   headers: { 'api-key': 'YOUR_API_KEY' },\n///   queryParams: { 'api-version': 'API_VERSION' },\n/// );\n/// ```\n///\n/// - `YOUR_RESOURCE_NAME`: This value can be found in the Keys & Endpoint\n///    section when examining your resource from the Azure portal.\n/// - `YOUR_DEPLOYMENT_NAME`: This value will correspond to the custom name\n///    you chose for your deployment when you deployed a model. This value can be found under Resource Management > Deployments in the Azure portal.\n/// - `YOUR_API_KEY`: This value can be found in the Keys & Endpoint section\n///    when examining your resource from the Azure portal.\n/// - `API_VERSION`: The Azure OpenAI API version to use (e.g. `2023-05-15`).\n///    Try to use the [latest version available](https://github.com/Azure/azure-rest-api-specs/tree/main/specification/cognitiveservices/data-plane/AzureOpenAI/inference),\n///    it will probably be the closest to the official OpenAI API.\n///\n/// #### Custom HTTP client\n///\n/// You can always provide your own implementation of `http.Client` for further\n/// customization:\n///\n/// ```dart\n/// final client = OpenAI(\n///   apiKey: 'OPENAI_API_KEY',\n///   client: MyHttpClient(),\n/// );\n/// ```\n///\n/// #### Using a proxy\n///\n/// ##### HTTP proxy\n///\n/// You can use your own HTTP proxy by overriding the `baseUrl` and providing\n/// your required `headers`:\n///\n/// ```dart\n/// final client = OpenAI(\n///   baseUrl: 'https://my-proxy.com',\n///   headers: {'x-my-proxy-header': 'value'},\n/// );\n/// ```\n///\n/// If you need further customization, you can always provide your own\n/// `http.Client`.\n///\n/// ##### SOCKS5 proxy\n///\n/// To use a SOCKS5 proxy, you can use the\n/// [`socks5_proxy`](https://pub.dev/packages/socks5_proxy) package and a\n/// custom `http.Client`.\nclass OpenAI extends BaseLLM<OpenAIOptions> {\n  /// Create a new [OpenAI] instance.\n  ///\n  /// Main configuration options:\n  /// - `apiKey`: your OpenAI API key. You can find your API key in the\n  ///   [OpenAI dashboard](https://platform.openai.com/account/api-keys).\n  /// - `organization`: your OpenAI organization ID (if applicable).\n  /// - [OpenAI.encoding]\n  /// - [OpenAI.defaultOptions]\n  ///\n  /// Advance configuration options:\n  /// - `baseUrl`: the base URL to use. Defaults to OpenAI's API URL. You can\n  ///   override this to use a different API URL, or to use a proxy.\n  /// - `headers`: global headers to send with every request. You can use\n  ///   this to set custom headers, or to override the default headers.\n  /// - `queryParams`: global query parameters to send with every request. You\n  ///   can use this to set custom query parameters (e.g. Azure OpenAI API\n  ///   required to attach a `version` query parameter to every request).\n  /// - `client`: the HTTP client to use. You can set your own HTTP client if\n  ///   you need further customization (e.g. to use a Socks5 proxy).\n  OpenAI({\n    final String? apiKey,\n    final String? organization,\n    final String? baseUrl,\n    final Map<String, String>? headers,\n    final Map<String, dynamic>? queryParams,\n    final http.Client? client,\n    super.defaultOptions = const OpenAIOptions(\n      model: defaultModel,\n      maxTokens: defaultMaxTokens,\n      concurrencyLimit: defaultConcurrencyLimit,\n    ),\n    this.encoding,\n  }) : _authProvider = MutableApiKeyProvider(apiKey ?? '') {\n    _client = OpenAIClient(\n      config: OpenAIConfig(\n        authProvider: _authProvider,\n        organization: organization,\n        baseUrl:\n            buildBaseUrl(baseUrl ?? 'https://api.openai.com/v1', queryParams) ??\n            baseUrl ??\n            'https://api.openai.com/v1',\n        defaultHeaders: headers ?? const {},\n      ),\n      httpClient: client,\n    );\n  }\n\n  /// A client for interacting with OpenAI API.\n  late final OpenAIClient _client;\n\n  /// The auth provider for mutable API key access.\n  final MutableApiKeyProvider _authProvider;\n\n  /// The encoding to use by tiktoken when [tokenize] is called.\n  ///\n  /// By default, when [encoding] is not set, it is derived from the [model].\n  /// However, there are some cases where you may want to use this wrapper\n  /// class with a [model] not supported by tiktoken (e.g. when using Azure\n  /// embeddings or when using one of the many model providers that expose an\n  /// OpenAI-like API but with different models). In those cases, tiktoken won't\n  /// be able to derive the encoding to use, so you have to explicitly specify\n  /// it using this field.\n  ///\n  /// Supported encodings:\n  /// - `cl100k_base` (used by gpt-4, gpt-3.5-turbo, text-embedding-3-small).\n  ///\n  /// For an exhaustive list check:\n  /// https://github.com/mvitlov/tiktoken/blob/master/lib/tiktoken.dart\n  String? encoding;\n\n  /// Set or replace the API key.\n  set apiKey(final String value) => _authProvider.apiKey = value;\n\n  /// Get the API key.\n  String get apiKey => _authProvider.apiKey;\n\n  @override\n  String get modelType => 'openai';\n\n  /// The default model to use unless another is specified.\n  static const defaultModel = 'gpt-3.5-turbo-instruct';\n\n  /// The default max tokens to use unless another is specified.\n  static const defaultMaxTokens = 256;\n\n  /// The default concurrency limit to use unless another is specified.\n  static const defaultConcurrencyLimit = 20;\n\n  @override\n  Future<LLMResult> invoke(\n    final PromptValue input, {\n    final OpenAIOptions? options,\n  }) async {\n    final completion = await _client.completions.create(\n      _createCompletionRequest([input.toString()], options: options),\n    );\n    return completion.toLLMResults().first;\n  }\n\n  @override\n  Future<List<LLMResult>> batch(\n    final List<PromptValue> inputs, {\n    final List<OpenAIOptions>? options,\n  }) async {\n    assert(\n      options == null || options.length == 1 || options.length == inputs.length,\n    );\n\n    // If the user provided different options for each input, we can't batch\n    // them in a single call. We have to call the API for each input, which\n    // is the default behavior of batch\n    if (options != null &&\n        options.length > 1 &&\n        options.any((final element) => element != options.first)) {\n      return super.batch(inputs, options: options);\n    }\n\n    // Otherwise, we can batch the calls to the API\n    final finalOptions = options?.first ?? defaultOptions;\n    final concurrencyLimit = min(\n      finalOptions.concurrencyLimit,\n      defaultConcurrencyLimit,\n    );\n\n    var index = 0;\n    final results = <LLMResult>[];\n    for (final chunk in chunkList(inputs, chunkSize: concurrencyLimit)) {\n      final completion = await _client.completions.create(\n        _createCompletionRequest(\n          chunk.map((final input) => input.toString()).toList(growable: false),\n          options: options?.length == 1 ? options![0] : options?[index++],\n        ),\n      );\n      final chunkResults = completion.toLLMResults();\n      results.addAll(chunkResults);\n    }\n    return results;\n  }\n\n  @override\n  Stream<LLMResult> stream(\n    final PromptValue input, {\n    final OpenAIOptions? options,\n  }) {\n    return _client.completions\n        .createStream(\n          _createCompletionRequest(\n            [input.toString()],\n            options: options,\n            stream: true,\n          ),\n        )\n        .map(\n          (final completion) => completion.toLLMResults(streaming: true).first,\n        );\n  }\n\n  /// Creates a [CompletionRequest] from the given input.\n  CompletionRequest _createCompletionRequest(\n    final List<String> prompts, {\n    final OpenAIOptions? options,\n    final bool stream = false,\n  }) {\n    return CompletionRequest(\n      model: options?.model ?? defaultOptions.model ?? defaultModel,\n      prompt: CompletionPrompt.texts(prompts),\n      bestOf: options?.bestOf ?? defaultOptions.bestOf,\n      frequencyPenalty:\n          options?.frequencyPenalty ?? defaultOptions.frequencyPenalty,\n      logitBias: options?.logitBias ?? defaultOptions.logitBias,\n      logprobs: options?.logprobs ?? defaultOptions.logprobs,\n      maxTokens:\n          options?.maxTokens ?? defaultOptions.maxTokens ?? defaultMaxTokens,\n      n: options?.n ?? defaultOptions.n,\n      presencePenalty:\n          options?.presencePenalty ?? defaultOptions.presencePenalty,\n      seed: options?.seed ?? defaultOptions.seed,\n      stop: (options?.stop ?? defaultOptions.stop) != null\n          ? StopSequence.multiple(options?.stop ?? defaultOptions.stop!)\n          : null,\n      suffix: options?.suffix ?? defaultOptions.suffix,\n      temperature: options?.temperature ?? defaultOptions.temperature,\n      topP: options?.topP ?? defaultOptions.topP,\n      user: options?.user ?? defaultOptions.user,\n      streamOptions: stream ? const StreamOptions(includeUsage: true) : null,\n    );\n  }\n\n  /// Tokenizes the given prompt using tiktoken with the encoding used by the\n  /// [model]. If an encoding model is specified in [encoding] field, that\n  /// encoding is used instead.\n  ///\n  /// - [promptValue] The prompt to tokenize.\n  @override\n  Future<List<int>> tokenize(\n    final PromptValue promptValue, {\n    final OpenAIOptions? options,\n  }) async {\n    final encoding = this.encoding != null\n        ? getEncoding(this.encoding!)\n        : encodingForModel(\n            options?.model ?? defaultOptions.model ?? defaultModel,\n          );\n    return encoding.encode(promptValue.toString());\n  }\n\n  /// {@template openai_llm_list_models}\n  /// Returns a list of available completion models from the OpenAI API.\n  ///\n  /// This method filters models to return only those suitable for the\n  /// completions API (legacy models like gpt-3.5-turbo-instruct, davinci, etc.).\n  ///\n  /// Example:\n  /// ```dart\n  /// final llm = OpenAI(apiKey: '...');\n  /// final models = await llm.listModels();\n  /// for (final model in models) {\n  ///   print('${model.id} - owned by ${model.ownedBy ?? \"unknown\"}');\n  /// }\n  /// ```\n  /// {@endtemplate}\n  @override\n  Future<List<ModelInfo>> listModels() async {\n    final response = await _client.models.list();\n    return response.data\n        .where(_isCompletionModel)\n        .map(\n          (final m) =>\n              ModelInfo(id: m.id, ownedBy: m.ownedBy, created: m.created),\n        )\n        .toList();\n  }\n\n  /// Returns true if the model is a completion-capable model.\n  static bool _isCompletionModel(final Model model) {\n    final id = model.id.toLowerCase();\n    return id.startsWith('gpt-3.5-turbo-instruct') ||\n        id.startsWith('davinci') ||\n        id.startsWith('babbage') ||\n        id.startsWith('curie');\n  }\n\n  @override\n  void close() {\n    _client.close();\n  }\n}\n"
  },
  {
    "path": "packages/langchain_openai/lib/src/llms/types.dart",
    "content": "import 'package:collection/collection.dart';\nimport 'package:langchain_core/llms.dart';\nimport 'package:meta/meta.dart';\n\n/// {@template openai_options}\n/// Options to pass into the OpenAI LLM.\n///\n/// Available models:\n/// - `gpt-3.5-turbo-instruct`\n/// - `davinci-002`\n/// - `babbage-002`\n/// Mind that the list may be outdated.\n/// See https://platform.openai.com/docs/models for the latest list.\n/// {@endtemplate}\n@immutable\nclass OpenAIOptions extends LLMOptions {\n  /// {@macro openai_options}\n  const OpenAIOptions({\n    super.model,\n    this.bestOf,\n    this.frequencyPenalty,\n    this.logitBias,\n    this.logprobs,\n    this.maxTokens,\n    this.n,\n    this.presencePenalty,\n    this.seed,\n    this.stop,\n    this.suffix,\n    this.temperature,\n    this.topP,\n    this.user,\n    super.concurrencyLimit,\n  });\n\n  /// Generates best_of completions server-side and returns the \"best\"\n  /// (the one with the highest log probability per token).\n  ///\n  /// See https://platform.openai.com/docs/api-reference/completions/create#completions-create-best_of\n  final int? bestOf;\n\n  /// Number between -2.0 and 2.0. Positive values penalize new tokens based on\n  /// their existing frequency in the text so far, decreasing the model's\n  /// likelihood to repeat the same line verbatim.\n  ///\n  /// See https://platform.openai.com/docs/api-reference/completions/create#completions-create-frequency_penalty\n  final double? frequencyPenalty;\n\n  /// Modify the likelihood of specified tokens appearing in the completion.\n  ///\n  /// See https://platform.openai.com/docs/api-reference/completions/create#completions-create-logit_bias\n  final Map<String, int>? logitBias;\n\n  /// Include the log probabilities on the `logprobs` most likely tokens, as\n  /// well the chosen tokens. For example, if `logprobs` is 5, the API will\n  /// return a list of the 5 most likely tokens. The API will always return the\n  /// `logprob` of the sampled token, so there may be up to `logprobs+1`\n  /// elements in the response.\n  ///\n  /// The maximum value for logprobs is 5.\n  ///\n  /// See https://platform.openai.com/docs/api-reference/completions/create#completions-create-logprobs\n  final int? logprobs;\n\n  /// The maximum number of tokens to generate in the completion.\n  ///\n  /// See https://platform.openai.com/docs/api-reference/completions/create#completions-create-max_tokens\n  final int? maxTokens;\n\n  /// How many completions to generate for each prompt.\n  ///\n  /// See https://platform.openai.com/docs/api-reference/completions/create#completions-create-n\n  final int? n;\n\n  /// Number between -2.0 and 2.0. Positive values penalize new tokens based on\n  /// whether they appear in the text so far, increasing the model's likelihood\n  /// to talk about new topics.\n  ///\n  /// See https://platform.openai.com/docs/api-reference/completions/create#completions-create-presence_penalty\n  final double? presencePenalty;\n\n  /// If specified, our system will make a best effort to sample\n  /// deterministically, such that repeated requests with the same seed and\n  /// parameters should return the same result.\n  ///\n  /// Determinism is not guaranteed, and you should refer to the\n  /// `system_fingerprint` response parameter to monitor changes in the backend.\n  ///\n  /// See https://platform.openai.com/docs/api-reference/completions/create#completions-create-seed\n  final int? seed;\n\n  /// Up to 4 sequences where the API will stop generating further tokens.\n  /// The returned text will not contain the stop sequence.\n  ///\n  /// Ref: https://platform.openai.com/docs/api-reference/completions/create#completions-create-stop\n  final List<String>? stop;\n\n  /// The suffix that comes after a completion of inserted text.\n  ///\n  /// See https://platform.openai.com/docs/api-reference/completions/create#completions-create-suffix\n  final String? suffix;\n\n  /// What sampling temperature to use, between 0 and 2.\n  ///\n  /// See https://platform.openai.com/docs/api-reference/completions/create#completions-create-temperature\n  final double? temperature;\n\n  /// An alternative to sampling with temperature, called nucleus sampling,\n  /// where the model considers the results of the tokens with top_p\n  /// probability mass.\n  ///\n  /// See https://platform.openai.com/docs/api-reference/completions/create#completions-create-top_p\n  final double? topP;\n\n  /// A unique identifier representing your end-user, which can help OpenAI to\n  /// monitor and detect abuse.\n  ///\n  /// If you need to send different users in different requests, you can set\n  /// this field in [OpenAIOptions.user] instead.\n  ///\n  /// Ref: https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids\n  final String? user;\n\n  @override\n  OpenAIOptions copyWith({\n    final String? model,\n    final int? bestOf,\n    final double? frequencyPenalty,\n    final Map<String, int>? logitBias,\n    final int? logprobs,\n    final int? maxTokens,\n    final int? n,\n    final double? presencePenalty,\n    final int? seed,\n    final List<String>? stop,\n    final String? suffix,\n    final double? temperature,\n    final double? topP,\n    final String? user,\n    final int? concurrencyLimit,\n  }) {\n    return OpenAIOptions(\n      model: model ?? this.model,\n      bestOf: bestOf ?? this.bestOf,\n      frequencyPenalty: frequencyPenalty ?? this.frequencyPenalty,\n      logitBias: logitBias ?? this.logitBias,\n      logprobs: logprobs ?? this.logprobs,\n      maxTokens: maxTokens ?? this.maxTokens,\n      n: n ?? this.n,\n      presencePenalty: presencePenalty ?? this.presencePenalty,\n      seed: seed ?? this.seed,\n      stop: stop ?? this.stop,\n      suffix: suffix ?? this.suffix,\n      temperature: temperature ?? this.temperature,\n      topP: topP ?? this.topP,\n      user: user ?? this.user,\n      concurrencyLimit: concurrencyLimit ?? super.concurrencyLimit,\n    );\n  }\n\n  @override\n  OpenAIOptions merge(covariant final OpenAIOptions? other) {\n    return copyWith(\n      model: other?.model,\n      bestOf: other?.bestOf,\n      frequencyPenalty: other?.frequencyPenalty,\n      logitBias: other?.logitBias,\n      logprobs: other?.logprobs,\n      maxTokens: other?.maxTokens,\n      n: other?.n,\n      presencePenalty: other?.presencePenalty,\n      seed: other?.seed,\n      stop: other?.stop,\n      suffix: other?.suffix,\n      temperature: other?.temperature,\n      topP: other?.topP,\n      user: other?.user,\n      concurrencyLimit: other?.concurrencyLimit,\n    );\n  }\n\n  @override\n  bool operator ==(covariant final OpenAIOptions other) {\n    return identical(this, other) ||\n        runtimeType == other.runtimeType &&\n            model == other.model &&\n            bestOf == other.bestOf &&\n            frequencyPenalty == other.frequencyPenalty &&\n            const MapEquality<String, int>().equals(\n              logitBias,\n              other.logitBias,\n            ) &&\n            logprobs == other.logprobs &&\n            maxTokens == other.maxTokens &&\n            n == other.n &&\n            presencePenalty == other.presencePenalty &&\n            seed == other.seed &&\n            const ListEquality<String>().equals(stop, other.stop) &&\n            suffix == other.suffix &&\n            temperature == other.temperature &&\n            topP == other.topP &&\n            user == other.user &&\n            concurrencyLimit == other.concurrencyLimit;\n  }\n\n  @override\n  int get hashCode {\n    return model.hashCode ^\n        bestOf.hashCode ^\n        frequencyPenalty.hashCode ^\n        const MapEquality<String, int>().hash(logitBias) ^\n        logprobs.hashCode ^\n        maxTokens.hashCode ^\n        n.hashCode ^\n        presencePenalty.hashCode ^\n        seed.hashCode ^\n        const ListEquality<String>().hash(stop) ^\n        suffix.hashCode ^\n        temperature.hashCode ^\n        topP.hashCode ^\n        user.hashCode ^\n        concurrencyLimit.hashCode;\n  }\n}\n"
  },
  {
    "path": "packages/langchain_openai/lib/src/tools/dall_e.dart",
    "content": "// ignore_for_file: avoid_redundant_argument_values\nimport 'dart:async';\n\nimport 'package:http/http.dart' as http;\nimport 'package:langchain_core/tools.dart';\nimport 'package:openai_dart/openai_dart.dart';\n\nimport '../utils/auth.dart';\nimport 'types.dart';\n\nexport 'package:openai_dart/openai_dart.dart'\n    show ImageQuality, ImageResponseFormat, ImageSize, ImageStyle;\n\n/// {@template dall_e_tool}\n/// Wrapper for [OpenAI's DALL-E Image Generator API](https://platform.openai.com/docs/api-reference/images).\n///\n/// Given a prompt the model will generate an image.\n///\n/// Example:\n/// ```dart\n/// final llm = ChatOpenAI(\n///   apiKey: openAiKey,\n///   defaultOptions: const ChatOpenAIOptions(\n///     model: 'gpt-4',\n///     temperature: 0,\n///   ),\n/// );\n/// final tools = [\n///   CalculatorTool(),\n///   OpenAIDallETool(\n///     apiKey: openAiKey,\n///     defaultOptions: const OpenAIDallEToolOptions(\n///       model: 'dall-e-2',\n///       size: ImageSize.size256x256,\n///     ),\n///   ),\n/// ];\n/// final agent = ToolsAgent.fromLLMAndTools(\n///   llm: llm,\n///   tools: tools,\n/// );\n/// final executor = AgentExecutor(agent: agent);\n/// final res = await executor.run(\n///   'Calculate the result of 40 raised to the power of 0.43 and generate a funny illustration with it. '\n///   'Return ONLY the URL of the image. Do not add any explanation.',\n/// );\n/// ```\n/// {@endtemplate}\nfinal class OpenAIDallETool extends StringTool<OpenAIDallEToolOptions> {\n  /// {@macro dall_e_tool}\n  OpenAIDallETool({\n    final String? apiKey,\n    final String? organization,\n    final String? baseUrl,\n    final Map<String, String>? headers,\n    final Map<String, dynamic>? queryParams,\n    final http.Client? client,\n    super.defaultOptions = const OpenAIDallEToolOptions(),\n  }) : _authProvider = MutableApiKeyProvider(apiKey ?? ''),\n       super(\n         name: 'Dall-E-Image-Generator',\n         description:\n             'A wrapper around OpenAI DALL-E API. '\n             'Useful for when you need to generate images from a text '\n             'description. Input should be an image description.',\n       ) {\n    _client = OpenAIClient(\n      config: OpenAIConfig(\n        authProvider: _authProvider,\n        organization: organization,\n        baseUrl:\n            buildBaseUrl(baseUrl ?? 'https://api.openai.com/v1', queryParams) ??\n            baseUrl ??\n            'https://api.openai.com/v1',\n        defaultHeaders: headers ?? const {},\n      ),\n      httpClient: client,\n    );\n  }\n\n  /// A client for interacting with OpenAI API.\n  late final OpenAIClient _client;\n\n  /// The auth provider for mutable API key access.\n  final MutableApiKeyProvider _authProvider;\n\n  /// Set or replace the API key.\n  set apiKey(final String value) => _authProvider.apiKey = value;\n\n  /// Get the API key.\n  String get apiKey => _authProvider.apiKey;\n\n  @override\n  Future<String> invokeInternal(\n    final String toolInput, {\n    final OpenAIDallEToolOptions? options,\n  }) async {\n    try {\n      final responseFormat =\n          options?.responseFormat ?? defaultOptions.responseFormat;\n      final res = await _client.images.generate(\n        ImageGenerationRequest(\n          prompt: toolInput,\n          model: options?.model ?? defaultOptions.model,\n          n: 1,\n          quality: options?.quality ?? defaultOptions.quality,\n          responseFormat: responseFormat,\n          size: options?.size ?? defaultOptions.size,\n          style: options?.style ?? defaultOptions.style,\n          user: options?.user,\n        ),\n      );\n      final data = res.data.first;\n      return (responseFormat == ImageResponseFormat.url\n              ? data.url\n              : data.b64Json) ??\n          'No image generated.';\n    } catch (e) {\n      return \"I couldn't generate an image for you. I got this error: $e\";\n    }\n  }\n\n  @override\n  void close() {\n    _client.close();\n  }\n}\n"
  },
  {
    "path": "packages/langchain_openai/lib/src/tools/tools.dart",
    "content": "export 'dall_e.dart';\nexport 'types.dart';\n"
  },
  {
    "path": "packages/langchain_openai/lib/src/tools/types.dart",
    "content": "import 'package:langchain_core/tools.dart';\nimport 'package:meta/meta.dart';\n\nimport 'dall_e.dart';\n\n/// {@template open_ai_dall_e_tool_options}\n/// Generation options to pass into the [OpenAIDallETool].\n/// {@endtemplate}\n@immutable\nclass OpenAIDallEToolOptions extends ToolOptions {\n  /// {@macro open_ai_dall_e_tool_options}\n  const OpenAIDallEToolOptions({\n    this.model = 'dall-e-3',\n    this.quality = ImageQuality.standard,\n    this.responseFormat = ImageResponseFormat.url,\n    this.size = ImageSize.size1024x1024,\n    this.style = ImageStyle.vivid,\n    this.user,\n    super.concurrencyLimit,\n  });\n\n  /// ID of the model to use (e.g. `dall-e-2` or 'dall-e-3').\n  ///\n  /// See https://platform.openai.com/docs/api-reference/images/create#images-create-model\n  final String model;\n\n  /// The quality of the image that will be generated. [ImageQuality.hd]\n  /// creates images with finer details and greater consistency across the\n  /// image. This param is only supported for `dall-e-3`\n  ///\n  /// See https://platform.openai.com/docs/api-reference/images/create#images-create-quality\n  final ImageQuality quality;\n\n  /// The format in which the generated images are returned.\n  ///\n  /// See https://platform.openai.com/docs/api-reference/images/create#images-create-response_format\n  final ImageResponseFormat responseFormat;\n\n  /// The size of the generated images.\n  ///\n  /// Must be one of [ImageSize.size256x256], [ImageSize.size512x512], or\n  /// [ImageSize.size1024x1024] for `dall-e-2`.\n  ///\n  /// Must be one of [ImageSize.size1024x1024], [ImageSize.size1792x1024], or\n  /// [ImageSize.size1024x1792] for `dall-e-3` models.\n  ///\n  /// See https://platform.openai.com/docs/api-reference/images/create#images-create-size\n  final ImageSize size;\n\n  /// The style of the generated images.\n  ///\n  /// [ImageStyle.vivid] causes the model to lean towards generating hyper-real\n  /// and dramatic images. [ImageStyle.natural] causes the model to produce\n  /// more natural, less hyper-real looking images.\n  ///\n  /// This param is only supported for `dall-e-3`.\n  ///\n  /// See https://platform.openai.com/docs/api-reference/images/create#images-create-style\n  final ImageStyle style;\n\n  /// A unique identifier representing your end-user, which can help OpenAI to\n  /// monitor and detect abuse.\n  ///\n  /// If you need to send different users in different requests, you can set\n  /// this field in [ChatOpenAIOptions] instead.\n  ///\n  /// Ref: https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids\n  final String? user;\n\n  @override\n  OpenAIDallEToolOptions copyWith({\n    final String? model,\n    final ImageQuality? quality,\n    final ImageResponseFormat? responseFormat,\n    final ImageSize? size,\n    final ImageStyle? style,\n    final String? user,\n    final int? concurrencyLimit,\n  }) {\n    return OpenAIDallEToolOptions(\n      model: model ?? this.model,\n      quality: quality ?? this.quality,\n      responseFormat: responseFormat ?? this.responseFormat,\n      size: size ?? this.size,\n      style: style ?? this.style,\n      user: user ?? this.user,\n      concurrencyLimit: concurrencyLimit ?? super.concurrencyLimit,\n    );\n  }\n\n  @override\n  OpenAIDallEToolOptions merge(covariant final OpenAIDallEToolOptions? other) {\n    return copyWith(\n      model: other?.model,\n      quality: other?.quality,\n      responseFormat: other?.responseFormat,\n      size: other?.size,\n      style: other?.style,\n      user: other?.user,\n      concurrencyLimit: other?.concurrencyLimit,\n    );\n  }\n\n  @override\n  bool operator ==(covariant final OpenAIDallEToolOptions other) {\n    return model == other.model &&\n        quality == other.quality &&\n        responseFormat == other.responseFormat &&\n        size == other.size &&\n        style == other.style &&\n        user == other.user &&\n        concurrencyLimit == other.concurrencyLimit;\n  }\n\n  @override\n  int get hashCode {\n    return model.hashCode ^\n        quality.hashCode ^\n        responseFormat.hashCode ^\n        size.hashCode ^\n        style.hashCode ^\n        user.hashCode ^\n        concurrencyLimit.hashCode;\n  }\n}\n"
  },
  {
    "path": "packages/langchain_openai/lib/src/utils/auth.dart",
    "content": "import 'package:openai_dart/openai_dart.dart';\n\n/// A mutable [AuthProvider] that allows changing the API key at runtime.\n///\n/// This is the intended extensibility pattern for the API client architecture:\n/// [AuthProvider.getHeaders] is called per-request, so changing [apiKey]\n/// takes effect on the next request.\nclass MutableApiKeyProvider implements AuthProvider {\n  /// Creates a [MutableApiKeyProvider] with the given initial API key.\n  MutableApiKeyProvider(this.apiKey);\n\n  /// The current API key.\n  String apiKey;\n\n  @override\n  Map<String, String> getHeaders() =>\n      apiKey.isNotEmpty ? {'Authorization': 'Bearer $apiKey'} : {};\n}\n\n/// Builds a base URL that includes query parameters.\n///\n/// The [RequestBuilder.buildUrl] in openai_dart automatically extracts and\n/// merges query params from the base URL. This helper encodes [queryParams]\n/// into [baseUrl] so they are sent with every request.\n///\n/// This is used for Azure OpenAI API's `api-version` query parameter, etc.\nString? buildBaseUrl(String? baseUrl, Map<String, dynamic>? queryParams) {\n  if (baseUrl == null || queryParams == null || queryParams.isEmpty) {\n    return baseUrl;\n  }\n  final uri = Uri.parse(baseUrl);\n  final filtered = {\n    ...uri.queryParameters,\n    for (final e in queryParams.entries)\n      if (e.value != null) e.key: e.value.toString(),\n  };\n  return uri.replace(queryParameters: filtered).toString();\n}\n"
  },
  {
    "path": "packages/langchain_openai/pubspec.yaml",
    "content": "name: langchain_openai\ndescription: LangChain.dart integration module for OpenAI (GPT-5, o3, Embeddings, DALL·E, etc.).\nversion: 0.8.1+1\nrepository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_openai\nissue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_openai\nhomepage: https://github.com/davidmigloz/langchain_dart\ndocumentation: https://langchaindart.dev\n\nfunding:\n  - https://github.com/sponsors/davidmigloz\n\ntopics:\n  - nlp\n  - gen-ai\n  - llms\n  - langchain\n  - gpt\n\nenvironment:\n  sdk: \">=3.9.0 <4.0.0\"\nresolution: workspace\n\ndependencies:\n  collection: ^1.19.1\n  http: ^1.5.0\n  langchain_core: 0.4.1\n  langchain_tiktoken: ^1.0.1\n  meta: ^1.16.0\n  openai_dart: ^1.4.0\n  uuid: ^4.5.1\n\ndev_dependencies:\n  langchain: ^0.8.1\n  langchain_community: 0.4.0+2\n  test: ^1.26.2\n"
  },
  {
    "path": "packages/langchain_openai/test/agents/assets/state_of_the_union.txt",
    "content": "Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans.  \n\nLast year COVID-19 kept us apart. This year we are finally together again. \n\nTonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. \n\nWith a duty to one another to the American people to the Constitution. \n\nAnd with an unwavering resolve that freedom will always triumph over tyranny. \n\nSix days ago, Russia’s Vladimir Putin sought to shake the foundations of the free world thinking he could make it bend to his menacing ways. But he badly miscalculated. \n\nHe thought he could roll into Ukraine and the world would roll over. Instead he met a wall of strength he never imagined. \n\nHe met the Ukrainian people. \n\nFrom President Zelenskyy to every Ukrainian, their fearlessness, their courage, their determination, inspires the world. \n\nGroups of citizens blocking tanks with their bodies. Everyone from students to retirees teachers turned soldiers defending their homeland. \n\nIn this struggle as President Zelenskyy said in his speech to the European Parliament “Light will win over darkness.” The Ukrainian Ambassador to the United States is here tonight. \n\nLet each of us here tonight in this Chamber send an unmistakable signal to Ukraine and to the world. \n\nPlease rise if you are able and show that, Yes, we the United States of America stand with the Ukrainian people. \n\nThroughout our history we’ve learned this lesson when dictators do not pay a price for their aggression they cause more chaos.   \n\nThey keep moving.   \n\nAnd the costs and the threats to America and the world keep rising.   \n\nThat’s why the NATO Alliance was created to secure peace and stability in Europe after World War 2. \n\nThe United States is a member along with 29 other nations. \n\nIt matters. American diplomacy matters. American resolve matters. \n\nPutin’s latest attack on Ukraine was premeditated and unprovoked. \n\nHe rejected repeated efforts at diplomacy. \n\nHe thought the West and NATO wouldn’t respond. And he thought he could divide us at home. Putin was wrong. We were ready.  Here is what we did.   \n\nWe prepared extensively and carefully. \n\nWe spent months building a coalition of other freedom-loving nations from Europe and the Americas to Asia and Africa to confront Putin. \n\nI spent countless hours unifying our European allies. We shared with the world in advance what we knew Putin was planning and precisely how he would try to falsely justify his aggression.  \n\nWe countered Russia’s lies with truth.   \n\nAnd now that he has acted the free world is holding him accountable. \n\nAlong with twenty-seven members of the European Union including France, Germany, Italy, as well as countries like the United Kingdom, Canada, Japan, Korea, Australia, New Zealand, and many others, even Switzerland. \n\nWe are inflicting pain on Russia and supporting the people of Ukraine. Putin is now isolated from the world more than ever. \n\nTogether with our allies –we are right now enforcing powerful economic sanctions. \n\nWe are cutting off Russia’s largest banks from the international financial system.  \n\nPreventing Russia’s central bank from defending the Russian Ruble making Putin’s $630 Billion “war fund” worthless.   \n\nWe are choking off Russia’s access to technology that will sap its economic strength and weaken its military for years to come.  \n\nTonight I say to the Russian oligarchs and corrupt leaders who have bilked billions of dollars off this violent regime no more. \n\nThe U.S. Department of Justice is assembling a dedicated task force to go after the crimes of Russian oligarchs.  \n\nWe are joining with our European allies to find and seize your yachts your luxury apartments your private jets. We are coming for your ill-begotten gains. \n\nAnd tonight I am announcing that we will join our allies in closing off American air space to all Russian flights – further isolating Russia – and adding an additional squeeze –on their economy. The Ruble has lost 30% of its value. \n\nThe Russian stock market has lost 40% of its value and trading remains suspended. Russia’s economy is reeling and Putin alone is to blame. \n\nTogether with our allies we are providing support to the Ukrainians in their fight for freedom. Military assistance. Economic assistance. Humanitarian assistance. \n\nWe are giving more than $1 Billion in direct assistance to Ukraine. \n\nAnd we will continue to aid the Ukrainian people as they defend their country and to help ease their suffering.  \n\nLet me be clear, our forces are not engaged and will not engage in conflict with Russian forces in Ukraine.  \n\nOur forces are not going to Europe to fight in Ukraine, but to defend our NATO Allies – in the event that Putin decides to keep moving west.  \n\nFor that purpose we’ve mobilized American ground forces, air squadrons, and ship deployments to protect NATO countries including Poland, Romania, Latvia, Lithuania, and Estonia. \n\nAs I have made crystal clear the United States and our Allies will defend every inch of territory of NATO countries with the full force of our collective power.  \n\nAnd we remain clear-eyed. The Ukrainians are fighting back with pure courage. But the next few days weeks, months, will be hard on them.  \n\nPutin has unleashed violence and chaos.  But while he may make gains on the battlefield – he will pay a continuing high price over the long run. \n\nAnd a proud Ukrainian people, who have known 30 years  of independence, have repeatedly shown that they will not tolerate anyone who tries to take their country backwards.  \n\nTo all Americans, I will be honest with you, as I’ve always promised. A Russian dictator, invading a foreign country, has costs around the world. \n\nAnd I’m taking robust action to make sure the pain of our sanctions  is targeted at Russia’s economy. And I will use every tool at our disposal to protect American businesses and consumers. \n\nTonight, I can announce that the United States has worked with 30 other countries to release 60 Million barrels of oil from reserves around the world.  \n\nAmerica will lead that effort, releasing 30 Million barrels from our own Strategic Petroleum Reserve. And we stand ready to do more if necessary, unified with our allies.  \n\nThese steps will help blunt gas prices here at home. And I know the news about what’s happening can seem alarming. \n\nBut I want you to know that we are going to be okay. \n\nWhen the history of this era is written Putin’s war on Ukraine will have left Russia weaker and the rest of the world stronger. \n\nWhile it shouldn’t have taken something so terrible for people around the world to see what’s at stake now everyone sees it clearly. \n\nWe see the unity among leaders of nations and a more unified Europe a more unified West. And we see unity among the people who are gathering in cities in large crowds around the world even in Russia to demonstrate their support for Ukraine.  \n\nIn the battle between democracy and autocracy, democracies are rising to the moment, and the world is clearly choosing the side of peace and security. \n\nThis is a real test. It’s going to take time. So let us continue to draw inspiration from the iron will of the Ukrainian people. \n\nTo our fellow Ukrainian Americans who forge a deep bond that connects our two nations we stand with you. \n\nPutin may circle Kyiv with tanks, but he will never gain the hearts and souls of the Ukrainian people. \n\nHe will never extinguish their love of freedom. He will never weaken the resolve of the free world. \n\nWe meet tonight in an America that has lived through two of the hardest years this nation has ever faced. \n\nThe pandemic has been punishing. \n\nAnd so many families are living paycheck to paycheck, struggling to keep up with the rising cost of food, gas, housing, and so much more. \n\nI understand. \n\nI remember when my Dad had to leave our home in Scranton, Pennsylvania to find work. I grew up in a family where if the price of food went up, you felt it. \n\nThat’s why one of the first things I did as President was fight to pass the American Rescue Plan.  \n\nBecause people were hurting. We needed to act, and we did. \n\nFew pieces of legislation have done more in a critical moment in our history to lift us out of crisis. \n\nIt fueled our efforts to vaccinate the nation and combat COVID-19. It delivered immediate economic relief for tens of millions of Americans.  \n\nHelped put food on their table, keep a roof over their heads, and cut the cost of health insurance. \n\nAnd as my Dad used to say, it gave people a little breathing room. \n\nAnd unlike the $2 Trillion tax cut passed in the previous administration that benefitted the top 1% of Americans, the American Rescue Plan helped working people—and left no one behind. \n\nAnd it worked. It created jobs. Lots of jobs. \n\nIn fact—our economy created over 6.5 Million new jobs just last year, more jobs created in one year  \nthan ever before in the history of America. \n\nOur economy grew at a rate of 5.7% last year, the strongest growth in nearly 40 years, the first step in bringing fundamental change to an economy that hasn’t worked for the working people of this nation for too long.  \n\nFor the past 40 years we were told that if we gave tax breaks to those at the very top, the benefits would trickle down to everyone else. \n\nBut that trickle-down theory led to weaker economic growth, lower wages, bigger deficits, and the widest gap between those at the top and everyone else in nearly a century. \n\nVice President Harris and I ran for office with a new economic vision for America. \n\nInvest in America. Educate Americans. Grow the workforce. Build the economy from the bottom up  \nand the middle out, not from the top down.  \n\nBecause we know that when the middle class grows, the poor have a ladder up and the wealthy do very well. \n\nAmerica used to have the best roads, bridges, and airports on Earth. \n\nNow our infrastructure is ranked 13th in the world. \n\nWe won’t be able to compete for the jobs of the 21st Century if we don’t fix that. \n\nThat’s why it was so important to pass the Bipartisan Infrastructure Law—the most sweeping investment to rebuild America in history. \n\nThis was a bipartisan effort, and I want to thank the members of both parties who worked to make it happen. \n\nWe’re done talking about infrastructure weeks. \n\nWe’re going to have an infrastructure decade. \n\nIt is going to transform America and put us on a path to win the economic competition of the 21st Century that we face with the rest of the world—particularly with China.  \n\nAs I’ve told Xi Jinping, it is never a good bet to bet against the American people. \n\nWe’ll create good jobs for millions of Americans, modernizing roads, airports, ports, and waterways all across America. \n\nAnd we’ll do it all to withstand the devastating effects of the climate crisis and promote environmental justice. \n\nWe’ll build a national network of 500,000 electric vehicle charging stations, begin to replace poisonous lead pipes—so every child—and every American—has clean water to drink at home and at school, provide affordable high-speed internet for every American—urban, suburban, rural, and tribal communities. \n\n4,000 projects have already been announced. \n\nAnd tonight, I’m announcing that this year we will start fixing over 65,000 miles of highway and 1,500 bridges in disrepair. \n\nWhen we use taxpayer dollars to rebuild America – we are going to Buy American: buy American products to support American jobs. \n\nThe federal government spends about $600 Billion a year to keep the country safe and secure. \n\nThere’s been a law on the books for almost a century \nto make sure taxpayers’ dollars support American jobs and businesses. \n\nEvery Administration says they’ll do it, but we are actually doing it. \n\nWe will buy American to make sure everything from the deck of an aircraft carrier to the steel on highway guardrails are made in America. \n\nBut to compete for the best jobs of the future, we also need to level the playing field with China and other competitors. \n\nThat’s why it is so important to pass the Bipartisan Innovation Act sitting in Congress that will make record investments in emerging technologies and American manufacturing. \n\nLet me give you one example of why it’s so important to pass it. \n\nIf you travel 20 miles east of Columbus, Ohio, you’ll find 1,000 empty acres of land. \n\nIt won’t look like much, but if you stop and look closely, you’ll see a “Field of dreams,” the ground on which America’s future will be built. \n\nThis is where Intel, the American company that helped build Silicon Valley, is going to build its $20 billion semiconductor “mega site”. \n\nUp to eight state-of-the-art factories in one place. 10,000 new good-paying jobs. \n\nSome of the most sophisticated manufacturing in the world to make computer chips the size of a fingertip that power the world and our everyday lives. \n\nSmartphones. The Internet. Technology we have yet to invent. \n\nBut that’s just the beginning. \n\nIntel’s CEO, Pat Gelsinger, who is here tonight, told me they are ready to increase their investment from  \n$20 billion to $100 billion. \n\nThat would be one of the biggest investments in manufacturing in American history. \n\nAnd all they’re waiting for is for you to pass this bill. \n\nSo let’s not wait any longer. Send it to my desk. I’ll sign it.  \n\nAnd we will really take off. \n\nAnd Intel is not alone. \n\nThere’s something happening in America. \n\nJust look around and you’ll see an amazing story. \n\nThe rebirth of the pride that comes from stamping products “Made In America.” The revitalization of American manufacturing.   \n\nCompanies are choosing to build new factories here, when just a few years ago, they would have built them overseas. \n\nThat’s what is happening. Ford is investing $11 billion to build electric vehicles, creating 11,000 jobs across the country. \n\nGM is making the largest investment in its history—$7 billion to build electric vehicles, creating 4,000 jobs in Michigan. \n\nAll told, we created 369,000 new manufacturing jobs in America just last year. \n\nPowered by people I’ve met like JoJo Burgess, from generations of union steelworkers from Pittsburgh, who’s here with us tonight. \n\nAs Ohio Senator Sherrod Brown says, “It’s time to bury the label “Rust Belt.” \n\nIt’s time. \n\nBut with all the bright spots in our economy, record job growth and higher wages, too many families are struggling to keep up with the bills.  \n\nInflation is robbing them of the gains they might otherwise feel. \n\nI get it. That’s why my top priority is getting prices under control. \n\nLook, our economy roared back faster than most predicted, but the pandemic meant that businesses had a hard time hiring enough workers to keep up production in their factories. \n\nThe pandemic also disrupted global supply chains. \n\nWhen factories close, it takes longer to make goods and get them from the warehouse to the store, and prices go up. \n\nLook at cars. \n\nLast year, there weren’t enough semiconductors to make all the cars that people wanted to buy. \n\nAnd guess what, prices of automobiles went up. \n\nSo—we have a choice. \n\nOne way to fight inflation is to drive down wages and make Americans poorer.  \n\nI have a better plan to fight inflation. \n\nLower your costs, not your wages. \n\nMake more cars and semiconductors in America. \n\nMore infrastructure and innovation in America. \n\nMore goods moving faster and cheaper in America. \n\nMore jobs where you can earn a good living in America. \n\nAnd instead of relying on foreign supply chains, let’s make it in America. \n\nEconomists call it “increasing the productive capacity of our economy.” \n\nI call it building a better America. \n\nMy plan to fight inflation will lower your costs and lower the deficit. \n\n17 Nobel laureates in economics say my plan will ease long-term inflationary pressures. Top business leaders and most Americans support my plan. And here’s the plan: \n\nFirst – cut the cost of prescription drugs. Just look at insulin. One in ten Americans has diabetes. In Virginia, I met a 13-year-old boy named Joshua Davis.  \n\nHe and his Dad both have Type 1 diabetes, which means they need insulin every day. Insulin costs about $10 a vial to make.  \n\nBut drug companies charge families like Joshua and his Dad up to 30 times more. I spoke with Joshua’s mom. \n\nImagine what it’s like to look at your child who needs insulin and have no idea how you’re going to pay for it.  \n\nWhat it does to your dignity, your ability to look your child in the eye, to be the parent you expect to be. \n\nJoshua is here with us tonight. Yesterday was his birthday. Happy birthday, buddy.  \n\nFor Joshua, and for the 200,000 other young people with Type 1 diabetes, let’s cap the cost of insulin at $35 a month so everyone can afford it.  \n\nDrug companies will still do very well. And while we’re at it let Medicare negotiate lower prices for prescription drugs, like the VA already does. \n\nLook, the American Rescue Plan is helping millions of families on Affordable Care Act plans save $2,400 a year on their health care premiums. Let’s close the coverage gap and make those savings permanent. \n\nSecond – cut energy costs for families an average of $500 a year by combatting climate change.  \n\nLet’s provide investments and tax credits to weatherize your homes and businesses to be energy efficient and you get a tax credit; double America’s clean energy production in solar, wind, and so much more;  lower the price of electric vehicles, saving you another $80 a month because you’ll never have to pay at the gas pump again. \n\nThird – cut the cost of child care. Many families pay up to $14,000 a year for child care per child.  \n\nMiddle-class and working families shouldn’t have to pay more than 7% of their income for care of young children.  \n\nMy plan will cut the cost in half for most families and help parents, including millions of women, who left the workforce during the pandemic because they couldn’t afford child care, to be able to get back to work. \n\nMy plan doesn’t stop there. It also includes home and long-term care. More affordable housing. And Pre-K for every 3- and 4-year-old.  \n\nAll of these will lower costs. \n\nAnd under my plan, nobody earning less than $400,000 a year will pay an additional penny in new taxes. Nobody.  \n\nThe one thing all Americans agree on is that the tax system is not fair. We have to fix it.  \n\nI’m not looking to punish anyone. But let’s make sure corporations and the wealthiest Americans start paying their fair share. \n\nJust last year, 55 Fortune 500 corporations earned $40 billion in profits and paid zero dollars in federal income tax.  \n\nThat’s simply not fair. That’s why I’ve proposed a 15% minimum tax rate for corporations. \n\nWe got more than 130 countries to agree on a global minimum tax rate so companies can’t get out of paying their taxes at home by shipping jobs and factories overseas. \n\nThat’s why I’ve proposed closing loopholes so the very wealthy don’t pay a lower tax rate than a teacher or a firefighter.  \n\nSo that’s my plan. It will grow the economy and lower costs for families. \n\nSo what are we waiting for? Let’s get this done. And while you’re at it, confirm my nominees to the Federal Reserve, which plays a critical role in fighting inflation.  \n\nMy plan will not only lower costs to give families a fair shot, it will lower the deficit. \n\nThe previous Administration not only ballooned the deficit with tax cuts for the very wealthy and corporations, it undermined the watchdogs whose job was to keep pandemic relief funds from being wasted. \n\nBut in my administration, the watchdogs have been welcomed back. \n\nWe’re going after the criminals who stole billions in relief money meant for small businesses and millions of Americans.  \n\nAnd tonight, I’m announcing that the Justice Department will name a chief prosecutor for pandemic fraud. \n\nBy the end of this year, the deficit will be down to less than half what it was before I took office.  \n\nThe only president ever to cut the deficit by more than one trillion dollars in a single year. \n\nLowering your costs also means demanding more competition. \n\nI’m a capitalist, but capitalism without competition isn’t capitalism. \n\nIt’s exploitation—and it drives up prices. \n\nWhen corporations don’t have to compete, their profits go up, your prices go up, and small businesses and family farmers and ranchers go under. \n\nWe see it happening with ocean carriers moving goods in and out of America. \n\nDuring the pandemic, these foreign-owned companies raised prices by as much as 1,000% and made record profits. \n\nTonight, I’m announcing a crackdown on these companies overcharging American businesses and consumers. \n\nAnd as Wall Street firms take over more nursing homes, quality in those homes has gone down and costs have gone up.  \n\nThat ends on my watch. \n\nMedicare is going to set higher standards for nursing homes and make sure your loved ones get the care they deserve and expect. \n\nWe’ll also cut costs and keep the economy going strong by giving workers a fair shot, provide more training and apprenticeships, hire them based on their skills not degrees. \n\nLet’s pass the Paycheck Fairness Act and paid leave.  \n\nRaise the minimum wage to $15 an hour and extend the Child Tax Credit, so no one has to raise a family in poverty. \n\nLet’s increase Pell Grants and increase our historic support of HBCUs, and invest in what Jill—our First Lady who teaches full-time—calls America’s best-kept secret: community colleges. \n\nAnd let’s pass the PRO Act when a majority of workers want to form a union—they shouldn’t be stopped.  \n\nWhen we invest in our workers, when we build the economy from the bottom up and the middle out together, we can do something we haven’t done in a long time: build a better America. \n\nFor more than two years, COVID-19 has impacted every decision in our lives and the life of the nation. \n\nAnd I know you’re tired, frustrated, and exhausted. \n\nBut I also know this. \n\nBecause of the progress we’ve made, because of your resilience and the tools we have, tonight I can say  \nwe are moving forward safely, back to more normal routines.  \n\nWe’ve reached a new moment in the fight against COVID-19, with severe cases down to a level not seen since last July.  \n\nJust a few days ago, the Centers for Disease Control and Prevention—the CDC—issued new mask guidelines. \n\nUnder these new guidelines, most Americans in most of the country can now be mask free.   \n\nAnd based on the projections, more of the country will reach that point across the next couple of weeks. \n\nThanks to the progress we have made this past year, COVID-19 need no longer control our lives.  \n\nI know some are talking about “living with COVID-19”. Tonight – I say that we will never just accept living with COVID-19. \n\nWe will continue to combat the virus as we do other diseases. And because this is a virus that mutates and spreads, we will stay on guard. \n\nHere are four common sense steps as we move forward safely.  \n\nFirst, stay protected with vaccines and treatments. We know how incredibly effective vaccines are. If you’re vaccinated and boosted you have the highest degree of protection. \n\nWe will never give up on vaccinating more Americans. Now, I know parents with kids under 5 are eager to see a vaccine authorized for their children. \n\nThe scientists are working hard to get that done and we’ll be ready with plenty of vaccines when they do. \n\nWe’re also ready with anti-viral treatments. If you get COVID-19, the Pfizer pill reduces your chances of ending up in the hospital by 90%.  \n\nWe’ve ordered more of these pills than anyone in the world. And Pfizer is working overtime to get us 1 Million pills this month and more than double that next month.  \n\nAnd we’re launching the “Test to Treat” initiative so people can get tested at a pharmacy, and if they’re positive, receive antiviral pills on the spot at no cost.  \n\nIf you’re immunocompromised or have some other vulnerability, we have treatments and free high-quality masks. \n\nWe’re leaving no one behind or ignoring anyone’s needs as we move forward. \n\nAnd on testing, we have made hundreds of millions of tests available for you to order for free.   \n\nEven if you already ordered free tests tonight, I am announcing that you can order more from covidtests.gov starting next week. \n\nSecond – we must prepare for new variants. Over the past year, we’ve gotten much better at detecting new variants. \n\nIf necessary, we’ll be able to deploy new vaccines within 100 days instead of many more months or years.  \n\nAnd, if Congress provides the funds we need, we’ll have new stockpiles of tests, masks, and pills ready if needed. \n\nI cannot promise a new variant won’t come. But I can promise you we’ll do everything within our power to be ready if it does.  \n\nThird – we can end the shutdown of schools and businesses. We have the tools we need. \n\nIt’s time for Americans to get back to work and fill our great downtowns again.  People working from home can feel safe to begin to return to the office.   \n\nWe’re doing that here in the federal government. The vast majority of federal workers will once again work in person. \n\nOur schools are open. Let’s keep it that way. Our kids need to be in school. \n\nAnd with 75% of adult Americans fully vaccinated and hospitalizations down by 77%, most Americans can remove their masks, return to work, stay in the classroom, and move forward safely. \n\nWe achieved this because we provided free vaccines, treatments, tests, and masks. \n\nOf course, continuing this costs money. \n\nI will soon send Congress a request. \n\nThe vast majority of Americans have used these tools and may want to again, so I expect Congress to pass it quickly.   \n\nFourth, we will continue vaccinating the world.     \n\nWe’ve sent 475 Million vaccine doses to 112 countries, more than any other nation. \n\nAnd we won’t stop. \n\nWe have lost so much to COVID-19. Time with one another. And worst of all, so much loss of life. \n\nLet’s use this moment to reset. Let’s stop looking at COVID-19 as a partisan dividing line and see it for what it is: A God-awful disease.  \n\nLet’s stop seeing each other as enemies, and start seeing each other for who we really are: Fellow Americans.  \n\nWe can’t change how divided we’ve been. But we can change how we move forward—on COVID-19 and other issues we must face together. \n\nI recently visited the New York City Police Department days after the funerals of Officer Wilbert Mora and his partner, Officer Jason Rivera. \n\nThey were responding to a 9-1-1 call when a man shot and killed them with a stolen gun. \n\nOfficer Mora was 27 years old. \n\nOfficer Rivera was 22. \n\nBoth Dominican Americans who’d grown up on the same streets they later chose to patrol as police officers. \n\nI spoke with their families and told them that we are forever in debt for their sacrifice, and we will carry on their mission to restore the trust and safety every community deserves. \n\nI’ve worked on these issues a long time. \n\nI know what works: Investing in crime preventionand community police officers who’ll walk the beat, who’ll know the neighborhood, and who can restore trust and safety. \n\nSo let’s not abandon our streets. Or choose between safety and equal justice. \n\nLet’s come together to protect our communities, restore trust, and hold law enforcement accountable. \n\nThat’s why the Justice Department required body cameras, banned chokeholds, and restricted no-knock warrants for its officers. \n\nThat’s why the American Rescue Plan provided $350 Billion that cities, states, and counties can use to hire more police and invest in proven strategies like community violence interruption—trusted messengers breaking the cycle of violence and trauma and giving young people hope.  \n\nWe should all agree: The answer is not to Defund the police. The answer is to FUND the police with the resources and training they need to protect our communities. \n\nI ask Democrats and Republicans alike: Pass my budget and keep our neighborhoods safe.  \n\nAnd I will keep doing everything in my power to crack down on gun trafficking and ghost guns you can buy online and make at home—they have no serial numbers and can’t be traced. \n\nAnd I ask Congress to pass proven measures to reduce gun violence. Pass universal background checks. Why should anyone on a terrorist list be able to purchase a weapon? \n\nBan assault weapons and high-capacity magazines. \n\nRepeal the liability shield that makes gun manufacturers the only industry in America that can’t be sued. \n\nThese laws don’t infringe on the Second Amendment. They save lives. \n\nThe most fundamental right in America is the right to vote – and to have it counted. And it’s under assault. \n\nIn state after state, new laws have been passed, not only to suppress the vote, but to subvert entire elections. \n\nWe cannot let this happen. \n\nTonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n\nTonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n\nOne of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n\nAnd I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence. \n\nA former top litigator in private practice. A former federal public defender. And from a family of public school educators and police officers. A consensus builder. Since she’s been nominated, she’s received a broad range of support—from the Fraternal Order of Police to former judges appointed by Democrats and Republicans. \n\nAnd if we are to advance liberty and justice, we need to secure the Border and fix the immigration system. \n\nWe can do both. At our border, we’ve installed new technology like cutting-edge scanners to better detect drug smuggling.  \n\nWe’ve set up joint patrols with Mexico and Guatemala to catch more human traffickers.  \n\nWe’re putting in place dedicated immigration judges so families fleeing persecution and violence can have their cases heard faster. \n\nWe’re securing commitments and supporting partners in South and Central America to host more refugees and secure their own borders. \n\nWe can do all this while keeping lit the torch of liberty that has led generations of immigrants to this land—my forefathers and so many of yours. \n\nProvide a pathway to citizenship for Dreamers, those on temporary status, farm workers, and essential workers. \n\nRevise our laws so businesses have the workers they need and families don’t wait decades to reunite. \n\nIt’s not only the right thing to do—it’s the economically smart thing to do. \n\nThat’s why immigration reform is supported by everyone from labor unions to religious leaders to the U.S. Chamber of Commerce. \n\nLet’s get it done once and for all. \n\nAdvancing liberty and justice also requires protecting the rights of women. \n\nThe constitutional right affirmed in Roe v. Wade—standing precedent for half a century—is under attack as never before. \n\nIf we want to go forward—not backward—we must protect access to health care. Preserve a woman’s right to choose. And let’s continue to advance maternal health care in America. \n\nAnd for our LGBTQ+ Americans, let’s finally get the bipartisan Equality Act to my desk. The onslaught of state laws targeting transgender Americans and their families is wrong. \n\nAs I said last year, especially to our younger transgender Americans, I will always have your back as your President, so you can be yourself and reach your God-given potential. \n\nWhile it often appears that we never agree, that isn’t true. I signed 80 bipartisan bills into law last year. From preventing government shutdowns to protecting Asian-Americans from still-too-common hate crimes to reforming military justice. \n\nAnd soon, we’ll strengthen the Violence Against Women Act that I first wrote three decades ago. It is important for us to show the nation that we can come together and do big things. \n\nSo tonight I’m offering a Unity Agenda for the Nation. Four big things we can do together.  \n\nFirst, beat the opioid epidemic. \n\nThere is so much we can do. Increase funding for prevention, treatment, harm reduction, and recovery.  \n\nGet rid of outdated rules that stop doctors from prescribing treatments. And stop the flow of illicit drugs by working with state and local law enforcement to go after traffickers. \n\nIf you’re suffering from addiction, know you are not alone. I believe in recovery, and I celebrate the 23 million Americans in recovery. \n\nSecond, let’s take on mental health. Especially among our children, whose lives and education have been turned upside down.  \n\nThe American Rescue Plan gave schools money to hire teachers and help students make up for lost learning.  \n\nI urge every parent to make sure your school does just that. And we can all play a part—sign up to be a tutor or a mentor. \n\nChildren were also struggling before the pandemic. Bullying, violence, trauma, and the harms of social media. \n\nAs Frances Haugen, who is here with us tonight, has shown, we must hold social media platforms accountable for the national experiment they’re conducting on our children for profit. \n\nIt’s time to strengthen privacy protections, ban targeted advertising to children, demand tech companies stop collecting personal data on our children. \n\nAnd let’s get all Americans the mental health services they need. More people they can turn to for help, and full parity between physical and mental health care. \n\nThird, support our veterans. \n\nVeterans are the best of us. \n\nI’ve always believed that we have a sacred obligation to equip all those we send to war and care for them and their families when they come home. \n\nMy administration is providing assistance with job training and housing, and now helping lower-income veterans get VA care debt-free.  \n\nOur troops in Iraq and Afghanistan faced many dangers. \n\nOne was stationed at bases and breathing in toxic smoke from “burn pits” that incinerated wastes of war—medical and hazard material, jet fuel, and more. \n\nWhen they came home, many of the world’s fittest and best trained warriors were never the same. \n\nHeadaches. Numbness. Dizziness. \n\nA cancer that would put them in a flag-draped coffin. \n\nI know. \n\nOne of those soldiers was my son Major Beau Biden. \n\nWe don’t know for sure if a burn pit was the cause of his brain cancer, or the diseases of so many of our troops. \n\nBut I’m committed to finding out everything we can. \n\nCommitted to military families like Danielle Robinson from Ohio. \n\nThe widow of Sergeant First Class Heath Robinson.  \n\nHe was born a soldier. Army National Guard. Combat medic in Kosovo and Iraq. \n\nStationed near Baghdad, just yards from burn pits the size of football fields. \n\nHeath’s widow Danielle is here with us tonight. They loved going to Ohio State football games. He loved building Legos with their daughter. \n\nBut cancer from prolonged exposure to burn pits ravaged Heath’s lungs and body. \n\nDanielle says Heath was a fighter to the very end. \n\nHe didn’t know how to stop fighting, and neither did she. \n\nThrough her pain she found purpose to demand we do better. \n\nTonight, Danielle—we are. \n\nThe VA is pioneering new ways of linking toxic exposures to diseases, already helping more veterans get benefits. \n\nAnd tonight, I’m announcing we’re expanding eligibility to veterans suffering from nine respiratory cancers. \n\nI’m also calling on Congress: pass a law to make sure veterans devastated by toxic exposures in Iraq and Afghanistan finally get the benefits and comprehensive health care they deserve. \n\nAnd fourth, let’s end cancer as we know it. \n\nThis is personal to me and Jill, to Kamala, and to so many of you. \n\nCancer is the #2 cause of death in America–second only to heart disease. \n\nLast month, I announced our plan to supercharge  \nthe Cancer Moonshot that President Obama asked me to lead six years ago. \n\nOur goal is to cut the cancer death rate by at least 50% over the next 25 years, turn more cancers from death sentences into treatable diseases.  \n\nMore support for patients and families. \n\nTo get there, I call on Congress to fund ARPA-H, the Advanced Research Projects Agency for Health. \n\nIt’s based on DARPA—the Defense Department project that led to the Internet, GPS, and so much more.  \n\nARPA-H will have a singular purpose—to drive breakthroughs in cancer, Alzheimer’s, diabetes, and more. \n\nA unity agenda for the nation. \n\nWe can do this. \n\nMy fellow Americans—tonight , we have gathered in a sacred space—the citadel of our democracy. \n\nIn this Capitol, generation after generation, Americans have debated great questions amid great strife, and have done great things. \n\nWe have fought for freedom, expanded liberty, defeated totalitarianism and terror. \n\nAnd built the strongest, freest, and most prosperous nation the world has ever known. \n\nNow is the hour. \n\nOur moment of responsibility. \n\nOur test of resolve and conscience, of history itself. \n\nIt is in this moment that our character is formed. Our purpose is found. Our future is forged. \n\nWell I know this nation.  \n\nWe will meet the test. \n\nTo protect freedom and liberty, to expand fairness and opportunity. \n\nWe will save democracy. \n\nAs hard as these times have been, I am more optimistic about America today than I have been my whole life. \n\nBecause I see the future that is within our grasp. \n\nBecause I know there is simply nothing beyond our capacity. \n\nWe are the only nation on Earth that has always turned every crisis we have faced into an opportunity. \n\nThe only nation that can be defined by a single word: possibilities. \n\nSo on this night, in our 245th year as a nation, I have come to report on the State of the Union. \n\nAnd my report is this: the State of the Union is strong—because you, the American people, are strong. \n\nWe are stronger today than we were a year ago. \n\nAnd we will be stronger a year from now than we are today. \n\nNow is our moment to meet and overcome the challenges of our time. \n\nAnd we will, as one people. \n\nOne America. \n\nThe United States of America. \n\nMay God bless you all. May God protect our troops."
  },
  {
    "path": "packages/langchain_openai/test/agents/tools_test.dart",
    "content": "// ignore_for_file: deprecated_member_use_from_same_package, unnecessary_async\n@TestOn('vm')\nlibrary; // Uses dart:io\n\nimport 'dart:io';\n\nimport 'package:langchain/langchain.dart'\n    show AgentExecutor, ConversationBufferMemory;\nimport 'package:langchain_community/langchain_community.dart';\nimport 'package:langchain_core/agents.dart';\nimport 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/prompts.dart';\nimport 'package:langchain_core/runnables.dart';\nimport 'package:langchain_core/tools.dart';\nimport 'package:langchain_openai/langchain_openai.dart';\nimport 'package:meta/meta.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('OpenAIToolsAgent tests', () {\n    final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n\n    test('Test OpenAIToolsAgent with calculator tool', () async {\n      final llm = ChatOpenAI(\n        apiKey: openaiApiKey,\n        defaultOptions: const ChatOpenAIOptions(temperature: 0),\n      );\n\n      final tool = CalculatorTool();\n      final tools = [tool];\n\n      final agent = OpenAIToolsAgent.fromLLMAndTools(llm: llm, tools: tools);\n\n      final executor = AgentExecutor(agent: agent);\n\n      final res = await executor.run(\n        'What is 40 raised to the 0.43 power with 3 decimals? ',\n      );\n\n      expect(res, contains('4.88'));\n    });\n\n    Future<void> testMemory({required final bool returnMessages}) async {\n      final llm = ChatOpenAI(\n        apiKey: openaiApiKey,\n        defaultOptions: const ChatOpenAIOptions(temperature: 0),\n      );\n\n      final tool = Tool.fromFunction<_SearchInput, String>(\n        name: 'search',\n        description: 'Tool for searching the web.',\n        inputJsonSchema: const {\n          'type': 'object',\n          'properties': {\n            'query': {\n              'type': 'string',\n              'description': 'The query to search for',\n            },\n            'n': {\n              'type': 'number',\n              'description': 'The number of results to return',\n            },\n          },\n          'required': ['query'],\n        },\n        func: (final _SearchInput toolInput) {\n          final n = toolInput.n;\n          final res = List<String>.generate(n, (final i) => 'Result ${i + 1}');\n          return 'Results:\\n${res.join('\\n')}';\n        },\n        getInputFromJson: _SearchInput.fromJson,\n      );\n      final tools = [tool];\n\n      final memory = ConversationBufferMemory(returnMessages: returnMessages);\n      final agent = OpenAIToolsAgent.fromLLMAndTools(\n        llm: llm,\n        tools: tools,\n        memory: memory,\n      );\n\n      final executor = AgentExecutor(agent: agent);\n\n      final res1 = await executor.run(\n        'Search for cats. Return only 3 results.',\n      );\n\n      expect(res1, contains('Result 1'));\n      expect(res1, contains('Result 2'));\n      expect(res1, contains('Result 3'));\n      expect(res1, isNot(contains('Result 4')));\n\n      final res2 = await executor.run(\n        'How many results did the search return?',\n      );\n      expect(res2, contains('3'));\n      expect(res2, isNot(contains('1')));\n      expect(res2, isNot(contains('2')));\n      expect(res2, isNot(contains('4')));\n\n      final res3 = await executor.run('What was the last result?');\n      expect(res3, contains('Result 3'));\n    }\n\n    test('Test OpenAIToolsAgent with messages memory', () async {\n      await testMemory(returnMessages: true);\n    });\n\n    test('Test OpenAIToolsAgent with string memory throws error', () {\n      expect(\n        () async => testMemory(returnMessages: false),\n        throwsA(isA<AssertionError>()),\n      );\n    });\n  });\n\n  group('OpenAIToolsAgent LCEL equivalent test', () {\n    final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n\n    final prompt = ChatPromptTemplate.fromPromptMessages([\n      SystemChatMessagePromptTemplate.fromTemplate(\n        'You are a helpful assistant',\n      ),\n      HumanChatMessagePromptTemplate.fromTemplate('{input}'),\n      const MessagesPlaceholder(variableName: 'agent_scratchpad'),\n    ]);\n\n    final tool = CalculatorTool();\n\n    final model = ChatOpenAI(\n      apiKey: openaiApiKey,\n      defaultOptions: const ChatOpenAIOptions(temperature: 0),\n    ).bind(ChatOpenAIOptions(tools: [tool]));\n\n    final agent = Agent.fromRunnable(\n      Runnable.mapInput(\n        (final AgentPlanInput planInput) => <String, dynamic>{\n          'input': planInput.inputs['input'],\n          'agent_scratchpad': planInput.intermediateSteps\n              .map((final s) {\n                return s.action.messageLog +\n                    [\n                      ChatMessage.tool(\n                        toolCallId: s.action.id,\n                        content: s.observation,\n                      ),\n                    ];\n              })\n              .expand((final m) => m)\n              .toList(growable: false),\n        },\n      ).pipe(prompt).pipe(model).pipe(const OpenAIToolsAgentOutputParser()),\n      tools: [tool],\n    );\n\n    final executor = AgentExecutor(agent: agent);\n\n    test('Test OpenAIToolsAgent LCEL equivalent', () async {\n      final res = await executor.invoke({\n        'input': 'What is 40 raised to the 0.43 power with 3 decimals?',\n      });\n      expect(res['output'], contains('4.88'));\n    });\n  });\n}\n\n@immutable\nclass _SearchInput {\n  const _SearchInput({required this.query, required this.n});\n\n  final String query;\n  final int n;\n\n  _SearchInput.fromJson(final Map<String, dynamic> json)\n    : this(query: json['query'] as String, n: json['n'] as int);\n\n  @override\n  bool operator ==(covariant _SearchInput other) =>\n      identical(this, other) || query == other.query && n == other.n;\n\n  @override\n  int get hashCode => query.hashCode ^ n.hashCode;\n}\n"
  },
  {
    "path": "packages/langchain_openai/test/chains/assets/state_of_the_union.txt",
    "content": "Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans.  \n\nLast year COVID-19 kept us apart. This year we are finally together again. \n\nTonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. \n\nWith a duty to one another to the American people to the Constitution. \n\nAnd with an unwavering resolve that freedom will always triumph over tyranny. \n\nSix days ago, Russia’s Vladimir Putin sought to shake the foundations of the free world thinking he could make it bend to his menacing ways. But he badly miscalculated. \n\nHe thought he could roll into Ukraine and the world would roll over. Instead he met a wall of strength he never imagined. \n\nHe met the Ukrainian people. \n\nFrom President Zelenskyy to every Ukrainian, their fearlessness, their courage, their determination, inspires the world. \n\nGroups of citizens blocking tanks with their bodies. Everyone from students to retirees teachers turned soldiers defending their homeland. \n\nIn this struggle as President Zelenskyy said in his speech to the European Parliament “Light will win over darkness.” The Ukrainian Ambassador to the United States is here tonight. \n\nLet each of us here tonight in this Chamber send an unmistakable signal to Ukraine and to the world. \n\nPlease rise if you are able and show that, Yes, we the United States of America stand with the Ukrainian people. \n\nThroughout our history we’ve learned this lesson when dictators do not pay a price for their aggression they cause more chaos.   \n\nThey keep moving.   \n\nAnd the costs and the threats to America and the world keep rising.   \n\nThat’s why the NATO Alliance was created to secure peace and stability in Europe after World War 2. \n\nThe United States is a member along with 29 other nations. \n\nIt matters. American diplomacy matters. American resolve matters. \n\nPutin’s latest attack on Ukraine was premeditated and unprovoked. \n\nHe rejected repeated efforts at diplomacy. \n\nHe thought the West and NATO wouldn’t respond. And he thought he could divide us at home. Putin was wrong. We were ready.  Here is what we did.   \n\nWe prepared extensively and carefully. \n\nWe spent months building a coalition of other freedom-loving nations from Europe and the Americas to Asia and Africa to confront Putin. \n\nI spent countless hours unifying our European allies. We shared with the world in advance what we knew Putin was planning and precisely how he would try to falsely justify his aggression.  \n\nWe countered Russia’s lies with truth.   \n\nAnd now that he has acted the free world is holding him accountable. \n\nAlong with twenty-seven members of the European Union including France, Germany, Italy, as well as countries like the United Kingdom, Canada, Japan, Korea, Australia, New Zealand, and many others, even Switzerland. \n\nWe are inflicting pain on Russia and supporting the people of Ukraine. Putin is now isolated from the world more than ever. \n\nTogether with our allies –we are right now enforcing powerful economic sanctions. \n\nWe are cutting off Russia’s largest banks from the international financial system.  \n\nPreventing Russia’s central bank from defending the Russian Ruble making Putin’s $630 Billion “war fund” worthless.   \n\nWe are choking off Russia’s access to technology that will sap its economic strength and weaken its military for years to come.  \n\nTonight I say to the Russian oligarchs and corrupt leaders who have bilked billions of dollars off this violent regime no more. \n\nThe U.S. Department of Justice is assembling a dedicated task force to go after the crimes of Russian oligarchs.  \n\nWe are joining with our European allies to find and seize your yachts your luxury apartments your private jets. We are coming for your ill-begotten gains. \n\nAnd tonight I am announcing that we will join our allies in closing off American air space to all Russian flights – further isolating Russia – and adding an additional squeeze –on their economy. The Ruble has lost 30% of its value. \n\nThe Russian stock market has lost 40% of its value and trading remains suspended. Russia’s economy is reeling and Putin alone is to blame. \n\nTogether with our allies we are providing support to the Ukrainians in their fight for freedom. Military assistance. Economic assistance. Humanitarian assistance. \n\nWe are giving more than $1 Billion in direct assistance to Ukraine. \n\nAnd we will continue to aid the Ukrainian people as they defend their country and to help ease their suffering.  \n\nLet me be clear, our forces are not engaged and will not engage in conflict with Russian forces in Ukraine.  \n\nOur forces are not going to Europe to fight in Ukraine, but to defend our NATO Allies – in the event that Putin decides to keep moving west.  \n\nFor that purpose we’ve mobilized American ground forces, air squadrons, and ship deployments to protect NATO countries including Poland, Romania, Latvia, Lithuania, and Estonia. \n\nAs I have made crystal clear the United States and our Allies will defend every inch of territory of NATO countries with the full force of our collective power.  \n\nAnd we remain clear-eyed. The Ukrainians are fighting back with pure courage. But the next few days weeks, months, will be hard on them.  \n\nPutin has unleashed violence and chaos.  But while he may make gains on the battlefield – he will pay a continuing high price over the long run. \n\nAnd a proud Ukrainian people, who have known 30 years  of independence, have repeatedly shown that they will not tolerate anyone who tries to take their country backwards.  \n\nTo all Americans, I will be honest with you, as I’ve always promised. A Russian dictator, invading a foreign country, has costs around the world. \n\nAnd I’m taking robust action to make sure the pain of our sanctions  is targeted at Russia’s economy. And I will use every tool at our disposal to protect American businesses and consumers. \n\nTonight, I can announce that the United States has worked with 30 other countries to release 60 Million barrels of oil from reserves around the world.  \n\nAmerica will lead that effort, releasing 30 Million barrels from our own Strategic Petroleum Reserve. And we stand ready to do more if necessary, unified with our allies.  \n\nThese steps will help blunt gas prices here at home. And I know the news about what’s happening can seem alarming. \n\nBut I want you to know that we are going to be okay. \n\nWhen the history of this era is written Putin’s war on Ukraine will have left Russia weaker and the rest of the world stronger. \n\nWhile it shouldn’t have taken something so terrible for people around the world to see what’s at stake now everyone sees it clearly. \n\nWe see the unity among leaders of nations and a more unified Europe a more unified West. And we see unity among the people who are gathering in cities in large crowds around the world even in Russia to demonstrate their support for Ukraine.  \n\nIn the battle between democracy and autocracy, democracies are rising to the moment, and the world is clearly choosing the side of peace and security. \n\nThis is a real test. It’s going to take time. So let us continue to draw inspiration from the iron will of the Ukrainian people. \n\nTo our fellow Ukrainian Americans who forge a deep bond that connects our two nations we stand with you. \n\nPutin may circle Kyiv with tanks, but he will never gain the hearts and souls of the Ukrainian people. \n\nHe will never extinguish their love of freedom. He will never weaken the resolve of the free world. \n\nWe meet tonight in an America that has lived through two of the hardest years this nation has ever faced. \n\nThe pandemic has been punishing. \n\nAnd so many families are living paycheck to paycheck, struggling to keep up with the rising cost of food, gas, housing, and so much more. \n\nI understand. \n\nI remember when my Dad had to leave our home in Scranton, Pennsylvania to find work. I grew up in a family where if the price of food went up, you felt it. \n\nThat’s why one of the first things I did as President was fight to pass the American Rescue Plan.  \n\nBecause people were hurting. We needed to act, and we did. \n\nFew pieces of legislation have done more in a critical moment in our history to lift us out of crisis. \n\nIt fueled our efforts to vaccinate the nation and combat COVID-19. It delivered immediate economic relief for tens of millions of Americans.  \n\nHelped put food on their table, keep a roof over their heads, and cut the cost of health insurance. \n\nAnd as my Dad used to say, it gave people a little breathing room. \n\nAnd unlike the $2 Trillion tax cut passed in the previous administration that benefitted the top 1% of Americans, the American Rescue Plan helped working people—and left no one behind. \n\nAnd it worked. It created jobs. Lots of jobs. \n\nIn fact—our economy created over 6.5 Million new jobs just last year, more jobs created in one year  \nthan ever before in the history of America. \n\nOur economy grew at a rate of 5.7% last year, the strongest growth in nearly 40 years, the first step in bringing fundamental change to an economy that hasn’t worked for the working people of this nation for too long.  \n\nFor the past 40 years we were told that if we gave tax breaks to those at the very top, the benefits would trickle down to everyone else. \n\nBut that trickle-down theory led to weaker economic growth, lower wages, bigger deficits, and the widest gap between those at the top and everyone else in nearly a century. \n\nVice President Harris and I ran for office with a new economic vision for America. \n\nInvest in America. Educate Americans. Grow the workforce. Build the economy from the bottom up  \nand the middle out, not from the top down.  \n\nBecause we know that when the middle class grows, the poor have a ladder up and the wealthy do very well. \n\nAmerica used to have the best roads, bridges, and airports on Earth. \n\nNow our infrastructure is ranked 13th in the world. \n\nWe won’t be able to compete for the jobs of the 21st Century if we don’t fix that. \n\nThat’s why it was so important to pass the Bipartisan Infrastructure Law—the most sweeping investment to rebuild America in history. \n\nThis was a bipartisan effort, and I want to thank the members of both parties who worked to make it happen. \n\nWe’re done talking about infrastructure weeks. \n\nWe’re going to have an infrastructure decade. \n\nIt is going to transform America and put us on a path to win the economic competition of the 21st Century that we face with the rest of the world—particularly with China.  \n\nAs I’ve told Xi Jinping, it is never a good bet to bet against the American people. \n\nWe’ll create good jobs for millions of Americans, modernizing roads, airports, ports, and waterways all across America. \n\nAnd we’ll do it all to withstand the devastating effects of the climate crisis and promote environmental justice. \n\nWe’ll build a national network of 500,000 electric vehicle charging stations, begin to replace poisonous lead pipes—so every child—and every American—has clean water to drink at home and at school, provide affordable high-speed internet for every American—urban, suburban, rural, and tribal communities. \n\n4,000 projects have already been announced. \n\nAnd tonight, I’m announcing that this year we will start fixing over 65,000 miles of highway and 1,500 bridges in disrepair. \n\nWhen we use taxpayer dollars to rebuild America – we are going to Buy American: buy American products to support American jobs. \n\nThe federal government spends about $600 Billion a year to keep the country safe and secure. \n\nThere’s been a law on the books for almost a century \nto make sure taxpayers’ dollars support American jobs and businesses. \n\nEvery Administration says they’ll do it, but we are actually doing it. \n\nWe will buy American to make sure everything from the deck of an aircraft carrier to the steel on highway guardrails are made in America. \n\nBut to compete for the best jobs of the future, we also need to level the playing field with China and other competitors. \n\nThat’s why it is so important to pass the Bipartisan Innovation Act sitting in Congress that will make record investments in emerging technologies and American manufacturing. \n\nLet me give you one example of why it’s so important to pass it. \n\nIf you travel 20 miles east of Columbus, Ohio, you’ll find 1,000 empty acres of land. \n\nIt won’t look like much, but if you stop and look closely, you’ll see a “Field of dreams,” the ground on which America’s future will be built. \n\nThis is where Intel, the American company that helped build Silicon Valley, is going to build its $20 billion semiconductor “mega site”. \n\nUp to eight state-of-the-art factories in one place. 10,000 new good-paying jobs. \n\nSome of the most sophisticated manufacturing in the world to make computer chips the size of a fingertip that power the world and our everyday lives. \n\nSmartphones. The Internet. Technology we have yet to invent. \n\nBut that’s just the beginning. \n\nIntel’s CEO, Pat Gelsinger, who is here tonight, told me they are ready to increase their investment from  \n$20 billion to $100 billion. \n\nThat would be one of the biggest investments in manufacturing in American history. \n\nAnd all they’re waiting for is for you to pass this bill. \n\nSo let’s not wait any longer. Send it to my desk. I’ll sign it.  \n\nAnd we will really take off. \n\nAnd Intel is not alone. \n\nThere’s something happening in America. \n\nJust look around and you’ll see an amazing story. \n\nThe rebirth of the pride that comes from stamping products “Made In America.” The revitalization of American manufacturing.   \n\nCompanies are choosing to build new factories here, when just a few years ago, they would have built them overseas. \n\nThat’s what is happening. Ford is investing $11 billion to build electric vehicles, creating 11,000 jobs across the country. \n\nGM is making the largest investment in its history—$7 billion to build electric vehicles, creating 4,000 jobs in Michigan. \n\nAll told, we created 369,000 new manufacturing jobs in America just last year. \n\nPowered by people I’ve met like JoJo Burgess, from generations of union steelworkers from Pittsburgh, who’s here with us tonight. \n\nAs Ohio Senator Sherrod Brown says, “It’s time to bury the label “Rust Belt.” \n\nIt’s time. \n\nBut with all the bright spots in our economy, record job growth and higher wages, too many families are struggling to keep up with the bills.  \n\nInflation is robbing them of the gains they might otherwise feel. \n\nI get it. That’s why my top priority is getting prices under control. \n\nLook, our economy roared back faster than most predicted, but the pandemic meant that businesses had a hard time hiring enough workers to keep up production in their factories. \n\nThe pandemic also disrupted global supply chains. \n\nWhen factories close, it takes longer to make goods and get them from the warehouse to the store, and prices go up. \n\nLook at cars. \n\nLast year, there weren’t enough semiconductors to make all the cars that people wanted to buy. \n\nAnd guess what, prices of automobiles went up. \n\nSo—we have a choice. \n\nOne way to fight inflation is to drive down wages and make Americans poorer.  \n\nI have a better plan to fight inflation. \n\nLower your costs, not your wages. \n\nMake more cars and semiconductors in America. \n\nMore infrastructure and innovation in America. \n\nMore goods moving faster and cheaper in America. \n\nMore jobs where you can earn a good living in America. \n\nAnd instead of relying on foreign supply chains, let’s make it in America. \n\nEconomists call it “increasing the productive capacity of our economy.” \n\nI call it building a better America. \n\nMy plan to fight inflation will lower your costs and lower the deficit. \n\n17 Nobel laureates in economics say my plan will ease long-term inflationary pressures. Top business leaders and most Americans support my plan. And here’s the plan: \n\nFirst – cut the cost of prescription drugs. Just look at insulin. One in ten Americans has diabetes. In Virginia, I met a 13-year-old boy named Joshua Davis.  \n\nHe and his Dad both have Type 1 diabetes, which means they need insulin every day. Insulin costs about $10 a vial to make.  \n\nBut drug companies charge families like Joshua and his Dad up to 30 times more. I spoke with Joshua’s mom. \n\nImagine what it’s like to look at your child who needs insulin and have no idea how you’re going to pay for it.  \n\nWhat it does to your dignity, your ability to look your child in the eye, to be the parent you expect to be. \n\nJoshua is here with us tonight. Yesterday was his birthday. Happy birthday, buddy.  \n\nFor Joshua, and for the 200,000 other young people with Type 1 diabetes, let’s cap the cost of insulin at $35 a month so everyone can afford it.  \n\nDrug companies will still do very well. And while we’re at it let Medicare negotiate lower prices for prescription drugs, like the VA already does. \n\nLook, the American Rescue Plan is helping millions of families on Affordable Care Act plans save $2,400 a year on their health care premiums. Let’s close the coverage gap and make those savings permanent. \n\nSecond – cut energy costs for families an average of $500 a year by combatting climate change.  \n\nLet’s provide investments and tax credits to weatherize your homes and businesses to be energy efficient and you get a tax credit; double America’s clean energy production in solar, wind, and so much more;  lower the price of electric vehicles, saving you another $80 a month because you’ll never have to pay at the gas pump again. \n\nThird – cut the cost of child care. Many families pay up to $14,000 a year for child care per child.  \n\nMiddle-class and working families shouldn’t have to pay more than 7% of their income for care of young children.  \n\nMy plan will cut the cost in half for most families and help parents, including millions of women, who left the workforce during the pandemic because they couldn’t afford child care, to be able to get back to work. \n\nMy plan doesn’t stop there. It also includes home and long-term care. More affordable housing. And Pre-K for every 3- and 4-year-old.  \n\nAll of these will lower costs. \n\nAnd under my plan, nobody earning less than $400,000 a year will pay an additional penny in new taxes. Nobody.  \n\nThe one thing all Americans agree on is that the tax system is not fair. We have to fix it.  \n\nI’m not looking to punish anyone. But let’s make sure corporations and the wealthiest Americans start paying their fair share. \n\nJust last year, 55 Fortune 500 corporations earned $40 billion in profits and paid zero dollars in federal income tax.  \n\nThat’s simply not fair. That’s why I’ve proposed a 15% minimum tax rate for corporations. \n\nWe got more than 130 countries to agree on a global minimum tax rate so companies can’t get out of paying their taxes at home by shipping jobs and factories overseas. \n\nThat’s why I’ve proposed closing loopholes so the very wealthy don’t pay a lower tax rate than a teacher or a firefighter.  \n\nSo that’s my plan. It will grow the economy and lower costs for families. \n\nSo what are we waiting for? Let’s get this done. And while you’re at it, confirm my nominees to the Federal Reserve, which plays a critical role in fighting inflation.  \n\nMy plan will not only lower costs to give families a fair shot, it will lower the deficit. \n\nThe previous Administration not only ballooned the deficit with tax cuts for the very wealthy and corporations, it undermined the watchdogs whose job was to keep pandemic relief funds from being wasted. \n\nBut in my administration, the watchdogs have been welcomed back. \n\nWe’re going after the criminals who stole billions in relief money meant for small businesses and millions of Americans.  \n\nAnd tonight, I’m announcing that the Justice Department will name a chief prosecutor for pandemic fraud. \n\nBy the end of this year, the deficit will be down to less than half what it was before I took office.  \n\nThe only president ever to cut the deficit by more than one trillion dollars in a single year. \n\nLowering your costs also means demanding more competition. \n\nI’m a capitalist, but capitalism without competition isn’t capitalism. \n\nIt’s exploitation—and it drives up prices. \n\nWhen corporations don’t have to compete, their profits go up, your prices go up, and small businesses and family farmers and ranchers go under. \n\nWe see it happening with ocean carriers moving goods in and out of America. \n\nDuring the pandemic, these foreign-owned companies raised prices by as much as 1,000% and made record profits. \n\nTonight, I’m announcing a crackdown on these companies overcharging American businesses and consumers. \n\nAnd as Wall Street firms take over more nursing homes, quality in those homes has gone down and costs have gone up.  \n\nThat ends on my watch. \n\nMedicare is going to set higher standards for nursing homes and make sure your loved ones get the care they deserve and expect. \n\nWe’ll also cut costs and keep the economy going strong by giving workers a fair shot, provide more training and apprenticeships, hire them based on their skills not degrees. \n\nLet’s pass the Paycheck Fairness Act and paid leave.  \n\nRaise the minimum wage to $15 an hour and extend the Child Tax Credit, so no one has to raise a family in poverty. \n\nLet’s increase Pell Grants and increase our historic support of HBCUs, and invest in what Jill—our First Lady who teaches full-time—calls America’s best-kept secret: community colleges. \n\nAnd let’s pass the PRO Act when a majority of workers want to form a union—they shouldn’t be stopped.  \n\nWhen we invest in our workers, when we build the economy from the bottom up and the middle out together, we can do something we haven’t done in a long time: build a better America. \n\nFor more than two years, COVID-19 has impacted every decision in our lives and the life of the nation. \n\nAnd I know you’re tired, frustrated, and exhausted. \n\nBut I also know this. \n\nBecause of the progress we’ve made, because of your resilience and the tools we have, tonight I can say  \nwe are moving forward safely, back to more normal routines.  \n\nWe’ve reached a new moment in the fight against COVID-19, with severe cases down to a level not seen since last July.  \n\nJust a few days ago, the Centers for Disease Control and Prevention—the CDC—issued new mask guidelines. \n\nUnder these new guidelines, most Americans in most of the country can now be mask free.   \n\nAnd based on the projections, more of the country will reach that point across the next couple of weeks. \n\nThanks to the progress we have made this past year, COVID-19 need no longer control our lives.  \n\nI know some are talking about “living with COVID-19”. Tonight – I say that we will never just accept living with COVID-19. \n\nWe will continue to combat the virus as we do other diseases. And because this is a virus that mutates and spreads, we will stay on guard. \n\nHere are four common sense steps as we move forward safely.  \n\nFirst, stay protected with vaccines and treatments. We know how incredibly effective vaccines are. If you’re vaccinated and boosted you have the highest degree of protection. \n\nWe will never give up on vaccinating more Americans. Now, I know parents with kids under 5 are eager to see a vaccine authorized for their children. \n\nThe scientists are working hard to get that done and we’ll be ready with plenty of vaccines when they do. \n\nWe’re also ready with anti-viral treatments. If you get COVID-19, the Pfizer pill reduces your chances of ending up in the hospital by 90%.  \n\nWe’ve ordered more of these pills than anyone in the world. And Pfizer is working overtime to get us 1 Million pills this month and more than double that next month.  \n\nAnd we’re launching the “Test to Treat” initiative so people can get tested at a pharmacy, and if they’re positive, receive antiviral pills on the spot at no cost.  \n\nIf you’re immunocompromised or have some other vulnerability, we have treatments and free high-quality masks. \n\nWe’re leaving no one behind or ignoring anyone’s needs as we move forward. \n\nAnd on testing, we have made hundreds of millions of tests available for you to order for free.   \n\nEven if you already ordered free tests tonight, I am announcing that you can order more from covidtests.gov starting next week. \n\nSecond – we must prepare for new variants. Over the past year, we’ve gotten much better at detecting new variants. \n\nIf necessary, we’ll be able to deploy new vaccines within 100 days instead of many more months or years.  \n\nAnd, if Congress provides the funds we need, we’ll have new stockpiles of tests, masks, and pills ready if needed. \n\nI cannot promise a new variant won’t come. But I can promise you we’ll do everything within our power to be ready if it does.  \n\nThird – we can end the shutdown of schools and businesses. We have the tools we need. \n\nIt’s time for Americans to get back to work and fill our great downtowns again.  People working from home can feel safe to begin to return to the office.   \n\nWe’re doing that here in the federal government. The vast majority of federal workers will once again work in person. \n\nOur schools are open. Let’s keep it that way. Our kids need to be in school. \n\nAnd with 75% of adult Americans fully vaccinated and hospitalizations down by 77%, most Americans can remove their masks, return to work, stay in the classroom, and move forward safely. \n\nWe achieved this because we provided free vaccines, treatments, tests, and masks. \n\nOf course, continuing this costs money. \n\nI will soon send Congress a request. \n\nThe vast majority of Americans have used these tools and may want to again, so I expect Congress to pass it quickly.   \n\nFourth, we will continue vaccinating the world.     \n\nWe’ve sent 475 Million vaccine doses to 112 countries, more than any other nation. \n\nAnd we won’t stop. \n\nWe have lost so much to COVID-19. Time with one another. And worst of all, so much loss of life. \n\nLet’s use this moment to reset. Let’s stop looking at COVID-19 as a partisan dividing line and see it for what it is: A God-awful disease.  \n\nLet’s stop seeing each other as enemies, and start seeing each other for who we really are: Fellow Americans.  \n\nWe can’t change how divided we’ve been. But we can change how we move forward—on COVID-19 and other issues we must face together. \n\nI recently visited the New York City Police Department days after the funerals of Officer Wilbert Mora and his partner, Officer Jason Rivera. \n\nThey were responding to a 9-1-1 call when a man shot and killed them with a stolen gun. \n\nOfficer Mora was 27 years old. \n\nOfficer Rivera was 22. \n\nBoth Dominican Americans who’d grown up on the same streets they later chose to patrol as police officers. \n\nI spoke with their families and told them that we are forever in debt for their sacrifice, and we will carry on their mission to restore the trust and safety every community deserves. \n\nI’ve worked on these issues a long time. \n\nI know what works: Investing in crime preventionand community police officers who’ll walk the beat, who’ll know the neighborhood, and who can restore trust and safety. \n\nSo let’s not abandon our streets. Or choose between safety and equal justice. \n\nLet’s come together to protect our communities, restore trust, and hold law enforcement accountable. \n\nThat’s why the Justice Department required body cameras, banned chokeholds, and restricted no-knock warrants for its officers. \n\nThat’s why the American Rescue Plan provided $350 Billion that cities, states, and counties can use to hire more police and invest in proven strategies like community violence interruption—trusted messengers breaking the cycle of violence and trauma and giving young people hope.  \n\nWe should all agree: The answer is not to Defund the police. The answer is to FUND the police with the resources and training they need to protect our communities. \n\nI ask Democrats and Republicans alike: Pass my budget and keep our neighborhoods safe.  \n\nAnd I will keep doing everything in my power to crack down on gun trafficking and ghost guns you can buy online and make at home—they have no serial numbers and can’t be traced. \n\nAnd I ask Congress to pass proven measures to reduce gun violence. Pass universal background checks. Why should anyone on a terrorist list be able to purchase a weapon? \n\nBan assault weapons and high-capacity magazines. \n\nRepeal the liability shield that makes gun manufacturers the only industry in America that can’t be sued. \n\nThese laws don’t infringe on the Second Amendment. They save lives. \n\nThe most fundamental right in America is the right to vote – and to have it counted. And it’s under assault. \n\nIn state after state, new laws have been passed, not only to suppress the vote, but to subvert entire elections. \n\nWe cannot let this happen. \n\nTonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n\nTonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n\nOne of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n\nAnd I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence. \n\nA former top litigator in private practice. A former federal public defender. And from a family of public school educators and police officers. A consensus builder. Since she’s been nominated, she’s received a broad range of support—from the Fraternal Order of Police to former judges appointed by Democrats and Republicans. \n\nAnd if we are to advance liberty and justice, we need to secure the Border and fix the immigration system. \n\nWe can do both. At our border, we’ve installed new technology like cutting-edge scanners to better detect drug smuggling.  \n\nWe’ve set up joint patrols with Mexico and Guatemala to catch more human traffickers.  \n\nWe’re putting in place dedicated immigration judges so families fleeing persecution and violence can have their cases heard faster. \n\nWe’re securing commitments and supporting partners in South and Central America to host more refugees and secure their own borders. \n\nWe can do all this while keeping lit the torch of liberty that has led generations of immigrants to this land—my forefathers and so many of yours. \n\nProvide a pathway to citizenship for Dreamers, those on temporary status, farm workers, and essential workers. \n\nRevise our laws so businesses have the workers they need and families don’t wait decades to reunite. \n\nIt’s not only the right thing to do—it’s the economically smart thing to do. \n\nThat’s why immigration reform is supported by everyone from labor unions to religious leaders to the U.S. Chamber of Commerce. \n\nLet’s get it done once and for all. \n\nAdvancing liberty and justice also requires protecting the rights of women. \n\nThe constitutional right affirmed in Roe v. Wade—standing precedent for half a century—is under attack as never before. \n\nIf we want to go forward—not backward—we must protect access to health care. Preserve a woman’s right to choose. And let’s continue to advance maternal health care in America. \n\nAnd for our LGBTQ+ Americans, let’s finally get the bipartisan Equality Act to my desk. The onslaught of state laws targeting transgender Americans and their families is wrong. \n\nAs I said last year, especially to our younger transgender Americans, I will always have your back as your President, so you can be yourself and reach your God-given potential. \n\nWhile it often appears that we never agree, that isn’t true. I signed 80 bipartisan bills into law last year. From preventing government shutdowns to protecting Asian-Americans from still-too-common hate crimes to reforming military justice. \n\nAnd soon, we’ll strengthen the Violence Against Women Act that I first wrote three decades ago. It is important for us to show the nation that we can come together and do big things. \n\nSo tonight I’m offering a Unity Agenda for the Nation. Four big things we can do together.  \n\nFirst, beat the opioid epidemic. \n\nThere is so much we can do. Increase funding for prevention, treatment, harm reduction, and recovery.  \n\nGet rid of outdated rules that stop doctors from prescribing treatments. And stop the flow of illicit drugs by working with state and local law enforcement to go after traffickers. \n\nIf you’re suffering from addiction, know you are not alone. I believe in recovery, and I celebrate the 23 million Americans in recovery. \n\nSecond, let’s take on mental health. Especially among our children, whose lives and education have been turned upside down.  \n\nThe American Rescue Plan gave schools money to hire teachers and help students make up for lost learning.  \n\nI urge every parent to make sure your school does just that. And we can all play a part—sign up to be a tutor or a mentor. \n\nChildren were also struggling before the pandemic. Bullying, violence, trauma, and the harms of social media. \n\nAs Frances Haugen, who is here with us tonight, has shown, we must hold social media platforms accountable for the national experiment they’re conducting on our children for profit. \n\nIt’s time to strengthen privacy protections, ban targeted advertising to children, demand tech companies stop collecting personal data on our children. \n\nAnd let’s get all Americans the mental health services they need. More people they can turn to for help, and full parity between physical and mental health care. \n\nThird, support our veterans. \n\nVeterans are the best of us. \n\nI’ve always believed that we have a sacred obligation to equip all those we send to war and care for them and their families when they come home. \n\nMy administration is providing assistance with job training and housing, and now helping lower-income veterans get VA care debt-free.  \n\nOur troops in Iraq and Afghanistan faced many dangers. \n\nOne was stationed at bases and breathing in toxic smoke from “burn pits” that incinerated wastes of war—medical and hazard material, jet fuel, and more. \n\nWhen they came home, many of the world’s fittest and best trained warriors were never the same. \n\nHeadaches. Numbness. Dizziness. \n\nA cancer that would put them in a flag-draped coffin. \n\nI know. \n\nOne of those soldiers was my son Major Beau Biden. \n\nWe don’t know for sure if a burn pit was the cause of his brain cancer, or the diseases of so many of our troops. \n\nBut I’m committed to finding out everything we can. \n\nCommitted to military families like Danielle Robinson from Ohio. \n\nThe widow of Sergeant First Class Heath Robinson.  \n\nHe was born a soldier. Army National Guard. Combat medic in Kosovo and Iraq. \n\nStationed near Baghdad, just yards from burn pits the size of football fields. \n\nHeath’s widow Danielle is here with us tonight. They loved going to Ohio State football games. He loved building Legos with their daughter. \n\nBut cancer from prolonged exposure to burn pits ravaged Heath’s lungs and body. \n\nDanielle says Heath was a fighter to the very end. \n\nHe didn’t know how to stop fighting, and neither did she. \n\nThrough her pain she found purpose to demand we do better. \n\nTonight, Danielle—we are. \n\nThe VA is pioneering new ways of linking toxic exposures to diseases, already helping more veterans get benefits. \n\nAnd tonight, I’m announcing we’re expanding eligibility to veterans suffering from nine respiratory cancers. \n\nI’m also calling on Congress: pass a law to make sure veterans devastated by toxic exposures in Iraq and Afghanistan finally get the benefits and comprehensive health care they deserve. \n\nAnd fourth, let’s end cancer as we know it. \n\nThis is personal to me and Jill, to Kamala, and to so many of you. \n\nCancer is the #2 cause of death in America–second only to heart disease. \n\nLast month, I announced our plan to supercharge  \nthe Cancer Moonshot that President Obama asked me to lead six years ago. \n\nOur goal is to cut the cancer death rate by at least 50% over the next 25 years, turn more cancers from death sentences into treatable diseases.  \n\nMore support for patients and families. \n\nTo get there, I call on Congress to fund ARPA-H, the Advanced Research Projects Agency for Health. \n\nIt’s based on DARPA—the Defense Department project that led to the Internet, GPS, and so much more.  \n\nARPA-H will have a singular purpose—to drive breakthroughs in cancer, Alzheimer’s, diabetes, and more. \n\nA unity agenda for the nation. \n\nWe can do this. \n\nMy fellow Americans—tonight , we have gathered in a sacred space—the citadel of our democracy. \n\nIn this Capitol, generation after generation, Americans have debated great questions amid great strife, and have done great things. \n\nWe have fought for freedom, expanded liberty, defeated totalitarianism and terror. \n\nAnd built the strongest, freest, and most prosperous nation the world has ever known. \n\nNow is the hour. \n\nOur moment of responsibility. \n\nOur test of resolve and conscience, of history itself. \n\nIt is in this moment that our character is formed. Our purpose is found. Our future is forged. \n\nWell I know this nation.  \n\nWe will meet the test. \n\nTo protect freedom and liberty, to expand fairness and opportunity. \n\nWe will save democracy. \n\nAs hard as these times have been, I am more optimistic about America today than I have been my whole life. \n\nBecause I see the future that is within our grasp. \n\nBecause I know there is simply nothing beyond our capacity. \n\nWe are the only nation on Earth that has always turned every crisis we have faced into an opportunity. \n\nThe only nation that can be defined by a single word: possibilities. \n\nSo on this night, in our 245th year as a nation, I have come to report on the State of the Union. \n\nAnd my report is this: the State of the Union is strong—because you, the American people, are strong. \n\nWe are stronger today than we were a year ago. \n\nAnd we will be stronger a year from now than we are today. \n\nNow is our moment to meet and overcome the challenges of our time. \n\nAnd we will, as one people. \n\nOne America. \n\nThe United States of America. \n\nMay God bless you all. May God protect our troops."
  },
  {
    "path": "packages/langchain_openai/test/chains/qa_with_sources_test.dart",
    "content": "@TestOn('vm')\nlibrary; // Uses dart:io\n\nimport 'dart:io';\n\nimport 'package:collection/collection.dart';\nimport 'package:langchain/langchain.dart'\n    show\n        MemoryVectorStore,\n        RecursiveCharacterTextSplitter,\n        RetrievalQAChain,\n        StuffDocumentsChain;\nimport 'package:langchain_community/langchain_community.dart';\nimport 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/documents.dart';\nimport 'package:langchain_core/output_parsers.dart';\nimport 'package:langchain_core/prompts.dart';\nimport 'package:langchain_core/runnables.dart';\nimport 'package:langchain_openai/langchain_openai.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('OpenAIQAWithSourcesChain tests', () {\n    final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n\n    test('Test OpenAIQAWithSourcesChain', () async {\n      const filePath = './test/chains/assets/state_of_the_union.txt';\n      const loader = TextLoader(filePath);\n      final documents = await loader.load();\n\n      const textSplitter = RecursiveCharacterTextSplitter(\n        chunkSize: 800,\n        chunkOverlap: 0,\n      );\n      final texts = textSplitter.splitDocuments(documents);\n      final textsWithSources = texts\n          .mapIndexed(\n            (final i, final d) =>\n                d.copyWith(metadata: {...d.metadata, 'source': '$i-pl'}),\n          )\n          .toList(growable: false);\n\n      final embeddings = OpenAIEmbeddings(apiKey: openaiApiKey);\n      final docSearch = await MemoryVectorStore.fromDocuments(\n        documents: textsWithSources,\n        embeddings: embeddings,\n      );\n\n      final llm = ChatOpenAI(\n        apiKey: openaiApiKey,\n        defaultOptions: const ChatOpenAIOptions(temperature: 0),\n      );\n\n      final qaChain = OpenAIQAWithSourcesChain(llm: llm);\n\n      final docPrompt = PromptTemplate.fromTemplate(\n        'Content: {page_content}\\nSource: {source}',\n      );\n      final finalQAChain = StuffDocumentsChain(\n        llmChain: qaChain,\n        documentPrompt: docPrompt,\n      );\n\n      final retrievalQA = RetrievalQAChain(\n        retriever: docSearch.asRetriever(),\n        combineDocumentsChain: finalQAChain,\n      );\n\n      const query = 'What did President Biden say about Russia?';\n      final res = await retrievalQA(query);\n\n      expect(res[RetrievalQAChain.defaultInputKey], query);\n      expect(\n        res[RetrievalQAChain.sourceDocumentsOutputKey],\n        isA<List<Document>>(),\n      );\n\n      final output = res[RetrievalQAChain.defaultOutputKey];\n      expect(output, isA<QAWithSources>());\n      final qaWithSources = output as QAWithSources;\n\n      final answer = qaWithSources.answer;\n      expect(answer, isNotEmpty);\n\n      final sources = qaWithSources.sources;\n      expect(sources, isNotEmpty);\n      expect(sources.first, endsWith('-pl'));\n    });\n\n    test('Test custom RetrievalQA streaming pipeline', () async {\n      const filePath = './test/chains/assets/state_of_the_union.txt';\n      const loader = TextLoader(filePath);\n      final documents = await loader.load();\n\n      const textSplitter = RecursiveCharacterTextSplitter(\n        chunkSize: 800,\n        chunkOverlap: 0,\n      );\n      final texts = textSplitter.splitDocuments(documents);\n\n      final embeddings = OpenAIEmbeddings(apiKey: openaiApiKey);\n      final vectorStore = await MemoryVectorStore.fromDocuments(\n        documents: texts,\n        embeddings: embeddings,\n      );\n      final retriever = vectorStore.asRetriever();\n      final docCombiner = Runnable.mapInput<List<Document>, String>(\n        (docs) => docs.map((final d) => d.pageContent).join('\\n'),\n      );\n\n      final promptTemplate = PromptTemplate.fromTemplate('''\nAnswer the question based only on the following context:\n```\n{context}\n```\nQuestion: {question}\n      ''');\n\n      final chatModel = ChatOpenAI(\n        apiKey: openaiApiKey,\n        defaultOptions: const ChatOpenAIOptions(temperature: 0),\n      );\n      const outputParser = StringOutputParser<ChatResult>();\n\n      final chain = Runnable.fromMap<String>({\n        'context': retriever.pipe(docCombiner),\n        'question': Runnable.passthrough(),\n      }).pipe(promptTemplate).pipe(chatModel).pipe(outputParser);\n\n      final stream = chain.stream('What did President Biden say about Russia?');\n\n      var content = '';\n      var count = 0;\n      await for (final res in stream) {\n        content += res;\n        count++;\n      }\n      expect(count, greaterThan(1));\n      expect(content, isNotEmpty);\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain_openai/test/chat_models/chat_openai_responses_mappers_test.dart",
    "content": "import 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/tools.dart';\nimport 'package:langchain_openai/langchain_openai.dart';\nimport 'package:langchain_openai/src/chat_models/chat_openai_responses_mappers.dart';\nimport 'package:openai_dart/openai_dart.dart' as oai;\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('ChatOpenAIResponses mapper tests', () {\n    group('Message mapping', () {\n      test('should map SystemChatMessage to system item', () {\n        final messages = [ChatMessage.system('You are helpful.')];\n        final input = messages.toResponseInput();\n\n        final items = switch (input) {\n          oai.ResponseInputItems(:final items) => items,\n          _ => <oai.Item>[],\n        };\n        expect(items, hasLength(1));\n        expect(items.first, isA<oai.MessageItem>());\n      });\n\n      test('should map HumanChatMessage with text to user item', () {\n        final messages = [ChatMessage.humanText('Hello')];\n        final input = messages.toResponseInput();\n\n        final items = switch (input) {\n          oai.ResponseInputItems(:final items) => items,\n          _ => <oai.Item>[],\n        };\n        expect(items, hasLength(1));\n        expect(items.first, isA<oai.MessageItem>());\n      });\n\n      test('should map AIChatMessage to assistant text item', () {\n        final messages = [ChatMessage.ai('I am fine.')];\n        final input = messages.toResponseInput();\n\n        final items = switch (input) {\n          oai.ResponseInputItems(:final items) => items,\n          _ => <oai.Item>[],\n        };\n        expect(items, hasLength(1));\n        expect(items.first, isA<oai.MessageItem>());\n      });\n\n      test('should map AIChatMessage with tool calls to separate items', () {\n        final messages = [\n          ChatMessage.ai(\n            'Let me check.',\n            toolCalls: const [\n              AIChatMessageToolCall(\n                id: 'call_1',\n                name: 'get_weather',\n                argumentsRaw: '{\"location\":\"Barcelona\"}',\n                arguments: {'location': 'Barcelona'},\n              ),\n            ],\n          ),\n        ];\n        final input = messages.toResponseInput();\n\n        final items = switch (input) {\n          oai.ResponseInputItems(:final items) => items,\n          _ => <oai.Item>[],\n        };\n        // Should produce 2 items: assistant text + function call\n        expect(items, hasLength(2));\n        expect(items[0], isA<oai.MessageItem>());\n        expect(items[1], isA<oai.FunctionCallItem>());\n        final fc = items[1] as oai.FunctionCallItem;\n        expect(fc.callId, 'call_1');\n        expect(fc.name, 'get_weather');\n        expect(fc.arguments, '{\"location\":\"Barcelona\"}');\n      });\n\n      test('should map AIChatMessage without content to only tool calls', () {\n        final messages = [\n          ChatMessage.ai(\n            '',\n            toolCalls: const [\n              AIChatMessageToolCall(\n                id: 'call_1',\n                name: 'get_weather',\n                argumentsRaw: '{\"location\":\"Barcelona\"}',\n                arguments: {'location': 'Barcelona'},\n              ),\n            ],\n          ),\n        ];\n        final input = messages.toResponseInput();\n\n        final items = switch (input) {\n          oai.ResponseInputItems(:final items) => items,\n          _ => <oai.Item>[],\n        };\n        // No text content, so only the function call item\n        expect(items, hasLength(1));\n        expect(items.first, isA<oai.FunctionCallItem>());\n      });\n\n      test('should map ToolChatMessage to function call output', () {\n        final messages = [\n          ChatMessage.tool(\n            toolCallId: 'call_1',\n            content: '{\"temperature\": 22}',\n          ),\n        ];\n        final input = messages.toResponseInput();\n\n        final items = switch (input) {\n          oai.ResponseInputItems(:final items) => items,\n          _ => <oai.Item>[],\n        };\n        expect(items, hasLength(1));\n        expect(items.first, isA<oai.FunctionCallOutputItem>());\n      });\n    });\n\n    group('Tool spec mapping', () {\n      test('should map ToolSpec to ResponseTool', () {\n        final tools = [\n          const ToolSpec(\n            name: 'get_weather',\n            description: 'Get weather data',\n            inputJsonSchema: {\n              'type': 'object',\n              'properties': {\n                'location': {'type': 'string'},\n              },\n            },\n          ),\n        ];\n\n        final result = tools.toResponseTools();\n        expect(result, hasLength(1));\n        expect(result.first, isA<oai.ResponseTool>());\n      });\n    });\n\n    group('Tool choice mapping', () {\n      test('should map ChatToolChoiceNone', () {\n        const choice = ChatToolChoiceNone();\n        expect(choice.toResponseToolChoice(), oai.ResponseToolChoice.none);\n      });\n\n      test('should map ChatToolChoiceAuto', () {\n        const choice = ChatToolChoiceAuto();\n        expect(choice.toResponseToolChoice(), oai.ResponseToolChoice.auto);\n      });\n\n      test('should map ChatToolChoiceRequired', () {\n        const choice = ChatToolChoiceRequired();\n        expect(choice.toResponseToolChoice(), oai.ResponseToolChoice.required);\n      });\n\n      test('should map ChatToolChoiceForced', () {\n        const choice = ChatToolChoiceForced(name: 'get_weather');\n        final result = choice.toResponseToolChoice();\n        expect(result, isA<oai.ResponseToolChoice>());\n      });\n    });\n\n    group('Response format mapping', () {\n      test('should map text format', () {\n        const format = ChatOpenAIResponsesResponseFormatText();\n        final result = format.toTextConfig();\n        expect(result, isA<oai.TextConfig>());\n      });\n\n      test('should map JSON object format', () {\n        const format = ChatOpenAIResponsesResponseFormatJsonObject();\n        final result = format.toTextConfig();\n        expect(result, isA<oai.TextConfig>());\n      });\n\n      test('should map JSON schema format', () {\n        const format = ChatOpenAIResponsesResponseFormatJsonSchema(\n          name: 'test',\n          schema: {'type': 'object'},\n        );\n        final result = format.toTextConfig();\n        expect(result, isA<oai.TextConfig>());\n      });\n    });\n\n    group('createResponseRequest', () {\n      test('should create request with default options', () {\n        final messages = [ChatMessage.humanText('Hello')];\n        final request = createResponseRequest(\n          messages,\n          options: null,\n          defaultOptions: const ChatOpenAIResponsesOptions(\n            model: 'gpt-4o-mini',\n          ),\n        );\n\n        expect(request.model, 'gpt-4o-mini');\n      });\n\n      test('should override defaults with call options', () {\n        final messages = [ChatMessage.humanText('Hello')];\n        final request = createResponseRequest(\n          messages,\n          options: const ChatOpenAIResponsesOptions(\n            model: 'gpt-4o',\n            temperature: 0.5,\n          ),\n          defaultOptions: const ChatOpenAIResponsesOptions(\n            model: 'gpt-4o-mini',\n            temperature: 0.9,\n          ),\n        );\n\n        expect(request.model, 'gpt-4o');\n        expect(request.temperature, 0.5);\n      });\n\n      test('should pass metadata as Map<String, String>', () {\n        final messages = [ChatMessage.humanText('Hello')];\n        final request = createResponseRequest(\n          messages,\n          options: const ChatOpenAIResponsesOptions(\n            model: 'gpt-4o-mini',\n            metadata: {'key': 'value'},\n          ),\n          defaultOptions: const ChatOpenAIResponsesOptions(\n            model: 'gpt-4o-mini',\n          ),\n        );\n\n        expect(request.metadata, {'key': 'value'});\n      });\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain_openai/test/chat_models/chat_openai_responses_test.dart",
    "content": "@TestOn('vm')\nlibrary; // Uses dart:io\n\nimport 'dart:io';\n\nimport 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/language_models.dart';\nimport 'package:langchain_core/prompts.dart';\nimport 'package:langchain_core/tools.dart';\nimport 'package:langchain_openai/langchain_openai.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group(\n    'ChatOpenAIResponses tests',\n    skip:\n        Platform.environment['OPENAI_API_KEY'] == null ||\n        Platform.environment.containsKey('CI'),\n    () {\n      final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n      const defaultModel = 'gpt-4o-mini';\n\n      late ChatOpenAIResponses chatModel;\n\n      setUp(() {\n        chatModel = ChatOpenAIResponses(\n          apiKey: openaiApiKey,\n          defaultOptions: const ChatOpenAIResponsesOptions(\n            model: defaultModel,\n            maxOutputTokens: 100,\n          ),\n        );\n      });\n\n      tearDown(() {\n        chatModel.close();\n      });\n\n      group('invoke', () {\n        test('should return a text response', () async {\n          final res = await chatModel.invoke(\n            PromptValue.chat([ChatMessage.humanText('Say \"hi\"')]),\n          );\n          expect(res.output.content, isNotEmpty);\n          expect(res.id, isNotEmpty);\n          expect(res.finishReason, FinishReason.stop);\n          expect(res.metadata, containsPair('model', contains('gpt-4o-mini')));\n        });\n\n        test('should support system messages', () async {\n          final res = await chatModel.invoke(\n            PromptValue.chat([\n              ChatMessage.system('You always respond with exactly \"OK\".'),\n              ChatMessage.humanText('Hello'),\n            ]),\n          );\n          expect(res.output.content.toLowerCase(), contains('ok'));\n        });\n      });\n\n      group('streaming', () {\n        test('should stream text deltas', () async {\n          final stream = chatModel.stream(\n            PromptValue.chat([ChatMessage.humanText('Say \"hello world\"')]),\n          );\n\n          ChatResult? result;\n          var count = 0;\n          await for (final res in stream) {\n            result = result?.concat(res) ?? res;\n            count++;\n          }\n\n          expect(count, greaterThan(1));\n          expect(result, isNotNull);\n          expect(result!.output.content, isNotEmpty);\n          expect(result.finishReason, FinishReason.stop);\n        });\n      });\n\n      group('tool calling', () {\n        const weatherTool = ToolSpec(\n          name: 'get_weather',\n          description: 'Get the current weather for a location.',\n          inputJsonSchema: {\n            'type': 'object',\n            'properties': {\n              'location': {'type': 'string', 'description': 'The city name'},\n            },\n            'required': ['location'],\n          },\n        );\n\n        test('should return tool calls when tools are provided', () async {\n          final model = ChatOpenAIResponses(\n            apiKey: openaiApiKey,\n            defaultOptions: const ChatOpenAIResponsesOptions(\n              model: defaultModel,\n              tools: [weatherTool],\n            ),\n          );\n\n          final res = await model.invoke(\n            PromptValue.chat([\n              ChatMessage.humanText('What is the weather in Barcelona?'),\n            ]),\n          );\n\n          expect(res.output.toolCalls, isNotEmpty);\n          final toolCall = res.output.toolCalls.first;\n          expect(toolCall.name, 'get_weather');\n          expect(toolCall.arguments, containsPair('location', isNotEmpty));\n\n          model.close();\n        });\n\n        test('should support multi-turn tool calling', () async {\n          final model = ChatOpenAIResponses(\n            apiKey: openaiApiKey,\n            defaultOptions: const ChatOpenAIResponsesOptions(\n              model: defaultModel,\n              tools: [weatherTool],\n              maxOutputTokens: 200,\n            ),\n          );\n\n          // Step 1: model calls the tool\n          final res1 = await model.invoke(\n            PromptValue.chat([\n              ChatMessage.humanText('What is the weather in Barcelona?'),\n            ]),\n          );\n          expect(res1.output.toolCalls, isNotEmpty);\n          final toolCall = res1.output.toolCalls.first;\n\n          // Step 2: provide tool result and get final answer\n          final res2 = await model.invoke(\n            PromptValue.chat([\n              ChatMessage.humanText('What is the weather in Barcelona?'),\n              ChatMessage.ai(\n                res1.output.content,\n                toolCalls: res1.output.toolCalls,\n              ),\n              ChatMessage.tool(\n                toolCallId: toolCall.id,\n                content: '{\"temperature\": 22, \"condition\": \"sunny\"}',\n              ),\n            ]),\n          );\n          expect(res2.output.content, isNotEmpty);\n\n          model.close();\n        });\n      });\n\n      group('tool calling streaming', () {\n        const weatherTool = ToolSpec(\n          name: 'get_weather',\n          description: 'Get the current weather for a location.',\n          inputJsonSchema: {\n            'type': 'object',\n            'properties': {\n              'location': {'type': 'string', 'description': 'The city name'},\n            },\n            'required': ['location'],\n          },\n        );\n\n        test('should stream tool calls', () async {\n          final model = ChatOpenAIResponses(\n            apiKey: openaiApiKey,\n            defaultOptions: const ChatOpenAIResponsesOptions(\n              model: defaultModel,\n              tools: [weatherTool],\n            ),\n          );\n\n          final stream = model.stream(\n            PromptValue.chat([\n              ChatMessage.humanText('What is the weather in Barcelona?'),\n            ]),\n          );\n\n          ChatResult? result;\n          var count = 0;\n          await for (final res in stream) {\n            result = result?.concat(res) ?? res;\n            count++;\n          }\n\n          expect(count, greaterThan(1));\n          expect(result, isNotNull);\n          expect(result!.output.toolCalls, isNotEmpty);\n          final toolCall = result.output.toolCalls.first;\n          expect(toolCall.name, 'get_weather');\n          expect(toolCall.arguments, containsPair('location', isNotEmpty));\n\n          model.close();\n        });\n      });\n\n      group('structured output', () {\n        test('should return JSON matching schema', () async {\n          final model = ChatOpenAIResponses(\n            apiKey: openaiApiKey,\n            defaultOptions: ChatOpenAIResponsesOptions(\n              model: defaultModel,\n              responseFormat: ChatOpenAIResponsesResponseFormat.jsonSchema(\n                name: 'city_info',\n                schema: const {\n                  'type': 'object',\n                  'properties': {\n                    'name': {'type': 'string'},\n                    'country': {'type': 'string'},\n                  },\n                  'required': ['name', 'country'],\n                  'additionalProperties': false,\n                },\n                strict: true,\n              ),\n            ),\n          );\n\n          final res = await model.invoke(\n            PromptValue.chat([\n              ChatMessage.humanText('Tell me about Barcelona.'),\n            ]),\n          );\n\n          expect(res.output.content, isNotEmpty);\n          // Should be valid JSON\n          expect(res.output.content, contains('\"name\"'));\n          expect(res.output.content, contains('\"country\"'));\n\n          model.close();\n        });\n      });\n\n      group('multi-turn with previousResponseId', () {\n        test('should maintain context across turns', () async {\n          final model = ChatOpenAIResponses(\n            apiKey: openaiApiKey,\n            defaultOptions: const ChatOpenAIResponsesOptions(\n              model: defaultModel,\n              maxOutputTokens: 50,\n              store: true,\n            ),\n          );\n\n          final res1 = await model.invoke(\n            PromptValue.chat([\n              ChatMessage.humanText('My name is LangChainDartTestUser.'),\n            ]),\n          );\n          expect(res1.id, isNotEmpty);\n\n          final res2 = await model.invoke(\n            PromptValue.chat([ChatMessage.humanText('What is my name?')]),\n            options: ChatOpenAIResponsesOptions(previousResponseId: res1.id),\n          );\n          expect(res2.output.content.toLowerCase(), contains('langchaindart'));\n\n          model.close();\n        });\n      });\n    },\n  );\n}\n"
  },
  {
    "path": "packages/langchain_openai/test/chat_models/chat_openai_test.dart",
    "content": "@TestOn('vm')\nlibrary; // Uses dart:io\n\nimport 'dart:convert';\nimport 'dart:io';\n\nimport 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/output_parsers.dart';\nimport 'package:langchain_core/prompts.dart';\nimport 'package:langchain_core/tools.dart';\nimport 'package:langchain_openai/langchain_openai.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('ChatOpenAI tests', () {\n    final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n    const defaultModel = 'gpt-4o-mini';\n\n    test('Test ChatOpenAI parameters', () {\n      final chat = ChatOpenAI(\n        apiKey: openaiApiKey,\n        defaultOptions: const ChatOpenAIOptions(\n          model: defaultModel,\n          temperature: 0.1,\n          topP: 0.1,\n          n: 10,\n          maxTokens: 10,\n          presencePenalty: 0.1,\n          frequencyPenalty: 0.1,\n          logitBias: {'foo': 1},\n          user: 'foo',\n        ),\n      );\n      expect(chat.defaultOptions.model, defaultModel);\n      expect(chat.defaultOptions.maxTokens, 10);\n      expect(chat.defaultOptions.temperature, 0.1);\n      expect(chat.defaultOptions.topP, 0.1);\n      expect(chat.defaultOptions.n, 10);\n      expect(chat.defaultOptions.presencePenalty, 0.1);\n      expect(chat.defaultOptions.frequencyPenalty, 0.1);\n      expect(chat.defaultOptions.logitBias, {'foo': 1.0});\n      expect(chat.defaultOptions.user, 'foo');\n    });\n\n    test('Test call to ChatOpenAI', () async {\n      final chat = ChatOpenAI(\n        apiKey: openaiApiKey,\n        defaultOptions: const ChatOpenAIOptions(\n          model: defaultModel,\n          maxTokens: 10,\n        ),\n      );\n      final res = await chat([ChatMessage.humanText('Hello')]);\n      expect(res.content, isNotEmpty);\n    });\n\n    test('Test generate to ChatOpenAI', () async {\n      final chat = ChatOpenAI(apiKey: openaiApiKey);\n      final res = await chat.invoke(\n        PromptValue.chat([\n          ChatMessage.humanText('Hello, how are you?'),\n          ChatMessage.ai('I am fine, thank you.'),\n          ChatMessage.humanText('Good, what is your name?'),\n        ]),\n      );\n      expect(res.output.content, isNotEmpty);\n    });\n\n    test('Test model output contains metadata', () async {\n      final chat = ChatOpenAI(\n        apiKey: openaiApiKey,\n        defaultOptions: const ChatOpenAIOptions(\n          model: defaultModel,\n          maxTokens: 10,\n        ),\n      );\n      final res = await chat.invoke(\n        PromptValue.chat([ChatMessage.humanText('Hello, how are you?')]),\n      );\n      expect(res.metadata, isNotNull);\n      expect(res.metadata['created'], isNotNull);\n      expect(res.metadata['model'], startsWith(chat.defaultOptions.model!));\n    });\n\n    test('Test stop logic on valid configuration', () async {\n      final query = ChatMessage.humanText(\n        'write an ordered list of five items',\n      );\n      final chat = ChatOpenAI(\n        apiKey: openaiApiKey,\n        defaultOptions: const ChatOpenAIOptions(\n          model: defaultModel,\n          temperature: 0,\n        ),\n      );\n      final res = await chat([\n        query,\n      ], options: const ChatOpenAIOptions(stop: ['3']));\n      expect(res.content.contains('2.'), isTrue);\n      expect(res.content.contains('3.'), isFalse);\n    });\n\n    test('Test ChatOpenAI wrapper with system message', () async {\n      final chat = ChatOpenAI(\n        apiKey: openaiApiKey,\n        defaultOptions: const ChatOpenAIOptions(\n          model: defaultModel,\n          maxTokens: 10,\n        ),\n      );\n      final systemMessage = ChatMessage.system(\n        'You are to chat with the user.',\n      );\n      final humanMessage = ChatMessage.humanText(\n        'write an ordered list of five items',\n      );\n      final res = await chat([systemMessage, humanMessage]);\n      expect(res.content, isNotEmpty);\n    });\n\n    const getCurrentWeatherTool = ToolSpec(\n      name: 'get_current_weather',\n      description: 'Get the current weather in a given location',\n      inputJsonSchema: {\n        'type': 'object',\n        'properties': {\n          'location': {\n            'type': 'string',\n            'description': 'The city and state, e.g. San Francisco, CA',\n          },\n          'unit': {\n            'type': 'string',\n            'description': 'The unit of temperature to return',\n            'enum': ['celsius', 'fahrenheit'],\n          },\n        },\n        'required': ['location'],\n      },\n    );\n\n    test(\n      'Test ChatOpenAI tool calling',\n      timeout: const Timeout(Duration(minutes: 1)),\n      () async {\n        final chat = ChatOpenAI(apiKey: openaiApiKey);\n\n        final humanMessage = ChatMessage.humanText(\n          'What’s the weather like in Boston right now?',\n        );\n        final res1 = await chat.invoke(\n          PromptValue.chat([humanMessage]),\n          options: const ChatOpenAIOptions(tools: [getCurrentWeatherTool]),\n        );\n\n        final aiMessage1 = res1.output;\n\n        expect(aiMessage1.content, isEmpty);\n        expect(aiMessage1.toolCalls, isNotEmpty);\n        final toolCall = aiMessage1.toolCalls.first;\n\n        expect(toolCall.name, getCurrentWeatherTool.name);\n        expect(toolCall.arguments.containsKey('location'), isTrue);\n        expect(toolCall.arguments['location'], contains('Boston'));\n\n        final functionResult = {\n          'temperature': '22',\n          'unit': 'celsius',\n          'description': 'Sunny',\n        };\n        final functionMessage = ChatMessage.tool(\n          toolCallId: toolCall.id,\n          content: json.encode(functionResult),\n        );\n\n        final res2 = await chat.invoke(\n          PromptValue.chat([humanMessage, aiMessage1, functionMessage]),\n          options: const ChatOpenAIOptions(tools: [getCurrentWeatherTool]),\n        );\n\n        final aiMessage2 = res2.output;\n\n        expect(aiMessage2.toolCalls, isEmpty);\n        expect(aiMessage2.content, contains('22'));\n      },\n    );\n\n    test('Test tokenize', () async {\n      final chat = ChatOpenAI(apiKey: openaiApiKey);\n      const text = 'Hello, how are you?';\n\n      final tokens = await chat.tokenize(PromptValue.string(text));\n      expect(tokens, [9906, 11, 1268, 527, 499, 30]);\n    });\n\n    test('Test encoding', () async {\n      final chat = ChatOpenAI(apiKey: openaiApiKey, encoding: 'cl100k_base');\n      const text = 'Hello, how are you?';\n\n      final tokens = await chat.tokenize(PromptValue.string(text));\n      expect(tokens, [9906, 11, 1268, 527, 499, 30]);\n    });\n\n    test('Test countTokens string', () async {\n      final chat = ChatOpenAI(apiKey: openaiApiKey);\n      final prompt = PromptValue.string('Hello, how are you?');\n\n      final numTokens = await chat.countTokens(prompt);\n      final generation = await chat.invoke(prompt);\n      expect(numTokens, generation.usage.promptTokens);\n    });\n\n    test('Test countTokens messages', () async {\n      final models = ['gpt-4-0314', 'gpt-4-0613'];\n      for (final model in models) {\n        final chat = ChatOpenAI(\n          apiKey: openaiApiKey,\n          defaultOptions: ChatOpenAIOptions(\n            model: model,\n            temperature: 0,\n            maxTokens: 1,\n          ),\n        );\n        final messages = [\n          ChatMessage.system(\n            'You are a helpful, pattern-following assistant that translates '\n            'corporate jargon into plain English.',\n          ),\n          ChatMessage.humanText(\n            \"This late pivot means we don't have time to boil the ocean for the \"\n            'client deliverable.',\n          ),\n        ];\n\n        final numTokens = await chat.countTokens(PromptValue.chat(messages));\n        final generation = await chat.invoke(PromptValue.chat(messages));\n        expect(numTokens, generation.usage.promptTokens);\n      }\n    });\n\n    test('Test ChatOpenAI streaming', () async {\n      final promptTemplate = ChatPromptTemplate.fromTemplates(const [\n        (\n          ChatMessageType.system,\n          'You are a helpful assistant that replies only with numbers '\n              'in order without any spaces or commas',\n        ),\n        (ChatMessageType.human, 'List the numbers from 1 to {max_num}'),\n      ]);\n      final chat = ChatOpenAI(apiKey: openaiApiKey);\n\n      final chain = promptTemplate.pipe(chat);\n      final stream = chain.stream({'max_num': '9'});\n\n      ChatResult? result;\n      var count = 0;\n      await for (final ChatResult res in stream) {\n        result = result?.concat(res) ?? res;\n        count++;\n      }\n      expect(count, greaterThan(1));\n      expect(\n        result!.output.content.replaceAll(RegExp(r'[\\s\\n]'), ''),\n        contains('123456789'),\n      );\n      expect(result.usage.promptTokens, greaterThan(0));\n      expect(result.usage.responseTokens, greaterThan(0));\n      expect(result.usage.totalTokens, greaterThan(0));\n    });\n\n    const jokeTool = ToolSpec(\n      name: 'joke',\n      description: 'A joke',\n      inputJsonSchema: {\n        'type': 'object',\n        'properties': {\n          'setup': {'type': 'string', 'description': 'The setup for the joke'},\n          'punchline': {\n            'type': 'string',\n            'description': 'The punchline to the joke',\n          },\n        },\n        'required': ['location', 'punchline'],\n      },\n    );\n\n    test('Test ChatOpenAI streaming with functions', () async {\n      final promptTemplate = ChatPromptTemplate.fromTemplate(\n        'tell me a long joke about {foo}',\n      );\n      final chat =\n          ChatOpenAI(\n            apiKey: openaiApiKey,\n            defaultOptions: const ChatOpenAIOptions(\n              model: defaultModel,\n              temperature: 0,\n            ),\n          ).bind(\n            ChatOpenAIOptions(\n              tools: const [jokeTool],\n              toolChoice: ChatToolChoice.forced(name: 'joke'),\n            ),\n          );\n      final jsonOutputParser = ToolsOutputParser();\n\n      final chain = promptTemplate.pipe(chat).pipe(jsonOutputParser);\n\n      final stream = chain.stream({'foo': 'bears'});\n\n      List<ParsedToolCall> lastResult = [];\n      var count = 0;\n      await for (final res in stream) {\n        lastResult = res;\n        count++;\n      }\n\n      expect(count, greaterThan(1));\n      expect(lastResult, hasLength(1));\n      final toolCall = lastResult.first;\n      expect(toolCall.arguments['setup'], isNotEmpty);\n      expect(toolCall.arguments['punchline'], isNotEmpty);\n    });\n\n    test('Test response seed', skip: true, () async {\n      final prompt = PromptValue.string('How are you?');\n      final llm = ChatOpenAI(\n        apiKey: openaiApiKey,\n        defaultOptions: const ChatOpenAIOptions(\n          model: defaultModel,\n          temperature: 0,\n          seed: 12345,\n        ),\n      );\n\n      final res1 = await llm.invoke(prompt);\n      final res2 = await llm.invoke(prompt);\n\n      expect(\n        res1.metadata['system_fingerprint'],\n        res2.metadata['system_fingerprint'],\n      );\n      expect(res1.output, res2.output);\n    });\n\n    test('Test JSON mode', () async {\n      final prompt = PromptValue.chat([\n        ChatMessage.system(\n          \"Extract the 'name' and 'origin' of any companies mentioned in the \"\n          'following statement. Return a JSON list.',\n        ),\n        ChatMessage.humanText(\n          'Google was founded in the USA, while Deepmind was founded in the UK',\n        ),\n      ]);\n      final llm = ChatOpenAI(\n        apiKey: openaiApiKey,\n        defaultOptions: const ChatOpenAIOptions(\n          model: defaultModel,\n          temperature: 0,\n          seed: 9999,\n          responseFormat: ChatOpenAIResponseFormat.jsonObject,\n        ),\n      );\n\n      final res = await llm.invoke(prompt);\n      final outputMsg = res.output;\n      final outputJson = json.decode(outputMsg.content) as Map<String, dynamic>;\n      expect(outputJson['companies'], isNotNull);\n      final companies = outputJson['companies'] as List<dynamic>;\n      expect(companies, hasLength(2));\n      final firstCompany = companies.first as Map<String, dynamic>;\n      expect(firstCompany['name'], 'Google');\n      expect(firstCompany['origin'], 'USA');\n      final secondCompany = companies.last as Map<String, dynamic>;\n      expect(secondCompany['name'], 'Deepmind');\n      expect(secondCompany['origin'], 'UK');\n    });\n\n    test('Test Structured Output', () async {\n      final prompt = PromptValue.chat([\n        ChatMessage.system(\n          'Extract the data of any companies mentioned in the '\n          'following statement. Return a JSON list.',\n        ),\n        ChatMessage.humanText(\n          'Google was founded in the USA, while Deepmind was founded in the UK',\n        ),\n      ]);\n      final llm = ChatOpenAI(\n        apiKey: openaiApiKey,\n        defaultOptions: ChatOpenAIOptions(\n          model: defaultModel,\n          temperature: 0,\n          seed: 9999,\n          responseFormat: ChatOpenAIResponseFormat.jsonSchema(\n            const ChatOpenAIJsonSchema(\n              name: 'Companies',\n              description: 'A list of companies',\n              strict: true,\n              schema: {\n                'type': 'object',\n                'properties': {\n                  'companies': {\n                    'type': 'array',\n                    'items': {\n                      'type': 'object',\n                      'properties': {\n                        'name': {'type': 'string'},\n                        'origin': {'type': 'string'},\n                      },\n                      'additionalProperties': false,\n                      'required': ['name', 'origin'],\n                    },\n                  },\n                },\n                'additionalProperties': false,\n                'required': ['companies'],\n              },\n            ),\n          ),\n        ),\n      );\n\n      final res = await llm.invoke(prompt);\n      final outputMsg = res.output;\n      final outputJson = json.decode(outputMsg.content) as Map<String, dynamic>;\n      expect(outputJson['companies'], isNotNull);\n      final companies = outputJson['companies'] as List<dynamic>;\n      expect(companies, hasLength(2));\n      final firstCompany = companies.first as Map<String, dynamic>;\n      expect(firstCompany['name'], 'Google');\n      expect(firstCompany['origin'], 'USA');\n      final secondCompany = companies.last as Map<String, dynamic>;\n      expect(secondCompany['name'], 'Deepmind');\n      expect(secondCompany['origin'], 'UK');\n    });\n\n    test('Test multi-modal GPT-4 Vision with URL image', () async {\n      final prompt = PromptValue.chat([\n        ChatMessage.system('You are a helpful assistant.'),\n        ChatMessage.human(\n          ChatMessageContent.multiModal([\n            ChatMessageContent.text('What fruit is this?'),\n            ChatMessageContent.image(\n              data:\n                  'https://upload.wikimedia.org/wikipedia/commons/9/92/95apple.jpeg',\n            ),\n          ]),\n        ),\n      ]);\n      final chatModel = ChatOpenAI(\n        apiKey: openaiApiKey,\n        defaultOptions: const ChatOpenAIOptions(model: defaultModel),\n      );\n\n      final res = await chatModel.invoke(prompt);\n      expect(res.output.content.toLowerCase(), contains('apple'));\n    });\n\n    test('Test multi-modal GPT-4 Vision with base64 image', () async {\n      final prompt = PromptValue.chat([\n        ChatMessage.system('You are a helpful assistant.'),\n        ChatMessage.human(\n          ChatMessageContent.multiModal([\n            ChatMessageContent.text('What fruit is this?'),\n            ChatMessageContent.image(\n              mimeType: 'image/jpeg',\n              data:\n                  '/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQH/2wBDAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQH/wAARCAOABLgDASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD+GqR5EK/PuYEZjH3QvQ/KMde3Pse1SQ3UcbK8rFPm4QHKgf3mx6dQB14x1NU5ZAMMGAdl4zgkOc8Z9vxH15xWV2TBJDDlXJAI6k98A4zjjAPbbisYQ5Vqt3fXztpp1vrfl8mfGud1rflWml1ftZ622abte3m2dG+pw7ZP3jxnBCkKQM9eSM7snpwAOPQkY8t68ijkFiCEZjyPTcegPJwfQ1WYl0bcxKEYQHAIwOOB1AzyCeeKjXZuCttzkEfwgkchSc9DjkZ54xXVCEYpq17Ju3f4Vtbz0TaWl7a2MZO+j1stEuytf1u0vysIoVQu5VOxgcZxuX0Y8ZU5wOpwPzcSEVsEgsfl2ncgLZOwDjDDpnn+lSHy2YgsVRlzwMMWA7/3s898cVWLswAySyNhQOGCDHUcZ24BPoOMEVutbppLRXv200em79fyRndzSvok4trvs1p5db+uo9QAckBcKMIzfM4JGR9OxX16nuJFJfCLzLuKqM4wB2IwcKO56DJ7cVAQXjUk4ZidrDjnOSScZxxxxn+swQxyZZdrYB37SCwb0wSAGx0xkY69qUrPTpZJd01Z6+fR220e6Qnru9rqyfVW7vVP73trYQqqvu25KnjOShZTgnPBAPbqevvQxAJLjaQxLEEbcHAKgYPOR1z6Z71IuX+RlLEEEIp4I9AMHoCeepolQnKeWy85RWAYBT/eHGWOBk8cD6Y5+VN3avaSTSWtlZ621V1rtv6kJ6pN9ldvpptu3vtq1rfYuWR2yRtG5UNko+P4zjp0+foAcc9DgV9ufAmSUJaQq4UQn7XIN2FWPIJ+0DacSP0Ud8H0NfDcDmKUZZlVAVOx8Mrnoy5BAxg54GADk19M/BjxNLb3sdurbWMyQ+YGDiQbvkHlfL5oxnzDkbeODnjxs4pOWGlKMUoxu21fsrXt2/4e2ylXTWj5bxcnrfVrl5r72d9Ol1o+n6eaZBI0HmT7RaTx77RGYJcHeAN4UZ4QgbVJBYE9MVmeJbSa8tJYQylbNJSVgA3RFQNwX+9K2Rx2AJwTUWhXsjQRss1u8oltnIL4CwAEzKowd2/K5X5RwAO5rpNStjPBgM0XnkySojDyoTJ/qC0X/Lu9wQ3mAs+3aucivz1p3i7rR3v2X9Kz013Wm/XZdNV081potN9f66/mj8bNBlEkzou4RSb3QP8APEy/fVlAwwPy4ZSMkHA618iyIEMuOGUtIC2VK9iB3zx8uMg8nOeK/Rz4w6KZrO+VhLHMd++KOQSW6Zz9nYybVzMvz+YuPlyOuePz01u0mtb+6SRTlJyrIG3FSOu/5RuP4gAY7ivvshqqph1RlytwV773uotp+l07+m5cWoN036pvW3w2W27006N9L65KMVAEmTnuCOFIyAQOu78D9eafwEJzt3j5STwMnhQe3fA56/m0AqrEhiSQVxztQH5tw/hIzkZ4xxjpTSGcBVbCkg4wOo5DdcEHPoPccV9DFJJK1trrbVWt0+/z76mTSva7V3rpdNaO9rLstXdeV2JvwRuXnOTngFVOcc/3uSep/AGpAwCs3IGS3XkDgbQMceo9jnoaQgchgW24+Y8KR36E8/lnBpxC5IUqVYAqACxDD0zgZ6ZwO34VcJJbWSsk79XZWbfTR9vK3UuysrR30dnrpbVLo/yXqhokcEEHai5wc+vTdnkkjGACC2eBTxchVbcCG3AepIJ5JGeMHvgY+vVskeW2q2UwGAByisRgl89BwfpwemaI4HYHagJLKdx6lT1AHPQAYzgkdvTR1Fy3b23Te3z6ab9NvIShB63tZpaWj2ev+Vt9nbUUsw2lM+XjgDng54wOgP54/A0BxtAAxJncxI+bHIx16d+evv3sC3LOREGZmwQhzgDvhSOn05HIAqz/AGdIyM6RdOSBnBUH5j0yB045/KuOVaD3kkm7XffRpO7uvXXY0Stfok1717Nq99Ve2mqv521MnazSKu3cozyx4GOTkY5HB7g+nB5miQAYGOpUk8DnAOPTv3BGRzWhHYMybgFXLBVK/M3oRyc8cHrk5/Grg0/5hGHBY4GNuAM+p/vDpjqD0zk5xVSLbUmrq93ut1vur7dX/m24WtZd7pdbJ3dne3a3ntd2oQwO5TYH8vcRh854xuUDsvA784rstLsiAEVBnfw5PTOOOB/COvYZ9sU/S9IJEa5BLtk7iCEx0L4GBgng8/nzXYi2+zgkiJWCbl7ZVuu3HOTgZPQBc9wa5Z14u8Ek3urLZpLtprdLTd3Od3d7JO1mr9Xptr579l1vZZfkRQLwQzrztPAA7sBz1+v19s68lEcayAnOSShbKkHsBjvzz09s5zr3U8YVhhVkIwdvJHfLE9QuM5bBGTx1xxmo3bujoFykaHDY+ZByCCecseqjGR82evCpczav/Mr30926v2/r8JTvZ6bpyV9uyS11d7+vmYmp3W1xtlBU/MQMHcDklSeMMMDsc1iGUNn5thYsYyDuJQ/eDcDk9enGOeDgxTOXdQylVZjswdx2dmY45xkjt+Joj2tGwBTIJJJOGcKe3BwRk46Z556V60EoxUd33Wq2VvTTv3fkaqKjBaNvutlt1v33TS6rsWYypj2F/nDZwf4lPTrjnjPv7YApZVQPlGZhtAIzgZIOQPX/AHhkYwO1QRyAkFuCWIXngDqAeOgIOT79uae4wGdmJyAPZF6EdBjPtnJA46GjR8rbSenztyuzvfXZLytvqYtPnvtsl1u9F5PRX/LcqujsBhScM2MNkIq4JJGBkYI5z6+hqF0JVmwcYBUexzuBHG0Y5xz1HPWpivI2HEeTySQ2WwQAB0z1zn3phMrEJsGcgBVI+ZecjHOeB1/Q1vF6OzSXxaNPotlvprbt5q50pv3XdPvfRpXW+vWy8/m7lYKoAOzABbbzlsejEHp04x9OgFTr5bINuVfOARwc55PTJwOgyMnPTNMZeAApAyMkZznJyvYk8DnoePpT4nbDKAmCO4+YBcnKn165wPfPo5tJXvpZX20vbzafa1vmaN3V9bpp2v0006t73av6COVLoDgsnC5G1cHqD23HJ59icc1PGSqHDKecjJyeScKo9R0GPwx1oCFwTjKkcEn079Dkeox6c55qRISBkgZBU7TwSuMnHpjjkHv04rjqtWa/BtWd7O931XS3QyclaKvs1pdWeza8lra1rfpCu7K7l3LklPm7dQrZB49e+Ae5qwVwy4IyrfcJzkkg/L7ZyCferqQqVjAU+Y7KSAQw7kHtwvX0qz9n2l2jUsVHzfLlV7ZB7DJyB/D3OOmcKtmtla19eullqmtLq1ul9TnnWSa0asmrXST+HbVpq76der60hIcAEDcCAyrwSpJyMc4wMe3TtUqooI2AEr1A4wpIJBHYc9ic4NSi3CspIALHDAnJ5HATA44wMZ4xnJ6VpxQW5iZSdswOWXHOM9COMFe2CME9+3XGrG6Wj5tVfa2mj006aafkctWqoqLinZ22dl7zV23/ACq76t3vdmfCgiIYqAd2SgIPynoB9fX0yMGrivHhgARKxACk5A7D5e2D/LOccU6RLdWk8suJAobDqdp4+Zs55zxtGOOpzUG0AGRdi87txPzhe5xnoPQ9fStHUTWiS0Vnr5WVvTRbednthKSnq+ZN2tb5NJrqr9dX1GySpGS25wyqUdf4cY6jHUkkZHOOOT0MPnqsfB+Y/MAx3EN2BOB938Dz0OaRjukCIxKuNyMeMDqwOQeDgjPPPbvVPIDZ8vaBnCtzuI4zuz93OSBj6msnPZNaKy07tLT836ebsuiMU4pO97JtKyvsrWeu622Vt7atHkZdpH8GV64JRicg8E88gnI54I9K+5QMM20htwDN8wQ/wjjAA7EjnJweMVPIUCqu4BmA5Ug5kOc7gOpGRknHX34qoQCFOGYDBJHyt16Ek7hjr057Von63SS1tZbJ/Pfza00OumtNrW7Jq6vo77Jb990mO8xnkCnHlgkKVO1dp4xjHTux+lRmQorYAdlGDgZYAnggcYxk9Omc5pzbckLyMcjqoPRgBxyevv69aEAk2RqBl9xVj2HfPoB278/nD8vRtXStprtfX815otWsm46aaarrfXtfXbs9OjrqgBUBQfmIDbtpIbsR2/2ieue3NKqDDZG0jIXrn5iOFwMcgcg4xkke82FMgcAqeNwIDA7cZIHHBGOOehOeaXCyOQpCnjZkbQwHfB4BA6HPToOpDdunVJfk3r8lfbVluevXZNt301V7/frZLq9t4E2qykqSQSOMgYGDhvU4POfpUysm15NwJ2swPU59AOpPQBu3T/Zppi2tvIIfdlueGGBggAfKCPcA96VcbwCvyk8jvz2xzgdDkZ+vpMrW02sr32ve2uu9tVZ9xSalrrsm7NK+qbS1tbt+K7MCuSq7fl5YHPGOOp98Y7+h7UNvKOSDhMk9Op28rjjJwOfp7U8hgz8dBnYcFUHcqMDk9z2H6MZyANuVbIDbeQy5zzzyevPPbPWsrLTRX91LdXWia7dum2tthrVq1tUn100T81d7N63t56sLjaoGAwGFAztCnuwGOMZ9O9NBXnHylQQD1LMey8cA4ORk/wA8IwOUfbgkEjcOdoPOcHA65GefzNLgMvm7yS5wFUYOQSD06D/ZH3uBxitFFfFZK61Xrb8rdfn56q1ru+vzs72tp5JvW3Ujj3ZVNoJZsjce3IGSOu30wMj8akRQrEYC4JX5ucE/3fQ5HPXHHrUZ2qQBkvk8HAAIxle5znqeB7Zp7MHcFV6cMFUhcn05O4jH3uOeMcVpFX6Kzspbvtd/d+SVrlPXo1dJvfV9+u+qX5oAdhLncABwnUHHVs8cZ7cgkDmpwyCNizjcV3DB3MD1A9sD1+mOtVy+xV2qdyMc+pUnoAeAR0I9O/akUjcoJxluvf8AE+g7/j7VbiuW3m2rLySSdrvzfrfqS1za2fyttG3rbo2+unawisPMQOoK7i2QSCwH8LHHQ9zjnjjriVWbbjGSCR64JIwF9ABx/T0VlA35JzgMjAMUYjljyBknIA9waWOLcjyEFS4JQnoBnDFhnGeOARyM9xWE0n6qKflolfS99dNtV+abjZN6Wa/m3utbdtW3bs9CDcxIVl4VjjBGVXP056ZzjPB4q7E0RiOc7ywEYyd3s31BBAOeecjiqzRhW5XJzkuxIG3nAHYbvp7jpTkLtmIHaCy4Y8EBehHTgZ5xz3rlaaWkU++1k3bZaLTzu9bWuErNXWiVm7addd737vr5JmglwY2Q7j8o2BQcDgYwQf4s8k+uPpXaaFd+akaCQg/MWJPJYEHO7HO3/wCtXAMDuVWKs+UKgAlMY+bPA65HXoRWzps3kyFlUqWb7q8hBnksM8c42+p+hrkr0lKk7K0klHV67aO1l+PnaxnFuMk1q9HpdJWkrNbeeyXW+h7PZwtI8YUq5TnDDC7R94Yzzv4wRyCDkc11ttYhodpQFiSxReQuecAHgFR93vzxjGa4fRr+EpEA6tKNoODlDIw+Tb0+7g7gT1xzxXpGn3C7B5rIpZChU4yCcARqM/M5B68YGPWvl8TCSdrWt33v0fay7/f0O3dOzutE76q7d29VvuusdX0ZgXmihgAVJGSwkX+GPAwGH1OSBjd+VcjdaONzAIQo+YMQATx90ZPOe4+nTv7GIY5HUE5+TdjIAAxwGODuI7ism+05JQ4AJAwAQMKTzn5ugPqcemKxjWcbJtO1l36q75b2u/ku+hzyhZ3td/E7LVW7baev3bHhdzp7yOFWM4UlSpGATk5AGDxk54PJBHWol0tiqqFJkLEEgZ44wCBjHHbJr0i50hNyEQuHDtt39MrjjPHGfu+ozyKr/Ylh3F0VuOSecMMYKkABSeh6+g9B6lDGLlVuVtWs2tL3t3fpr5eTVRdlFPl1Sadtemr6Lf8ABpdzzl9JliZjsJ2nPTg9DgDkjJHTqTkU5DJGrArtAOCpznd0/DHXuT69a9FexjuHdVTczBQPL4C7gMknnPXn07Y61Sk0VCD8oUrwWODvBzgdPvdeMce1drxcZRtPfRN6rom1pZa67dOuhcZXWtm011to7b29dV6KyOWsrqfz4S3IQkDjO08ep4Pp6c4r3HwteNJFACyqx+XlgfnI4BAAIA5BHPDeteMTWT2zkAFdx3Dtzk9ecYwBznn0OMV13h/UBbTR73wy42dQQM9Pcn1479Qcj5/OKSxOGl7O1ovRR110dtHv57HVh6jjNrR30teWya7XWrurr70z6t8OuYVTzCrbSCDGMNjkKwIznqc/TqRW/rOpNsjk8wKoBDvt3MzqRv6EDcAV2n1yfXHm2ia3GEg2yhZCqK+BlmbnhD2JHAb+oIGvczyzyoqk7JE3+WeBIU6gnvjOSeM55r8urYKbxEnKPLrZ30vZpaLq2vKSvqz26d3T9xN81r3avq1pfS6vv110bKWt3kl0uQ6mNVwXx/rCepAycsD2GMZPavAPF0EmJTGxbOWOeqgA8H0Y55HYDvkV7pcxu0MgdAIyoaMAc4GQ3Qnkeh5Oeuc15l4nsUMZVA4ycSOSRhieg4zn1Ck9vmxX0WTL2FanZrlUkum6atdd+na9yMVD3JWV7Qu1ezT02S0aS8tvnf5ev4HjkY7eS524zlATyGJAxg4IH178VWjYKSrHJHzADuOo+uK6/WrBhPM+JF3gna4JDN3fPTOec4BB7nqOaW3YEoiqzEDJxwoGc8kdj1Pv6fe/WMJiYypQWi92L00V/dur7X/O7bPmKrjzShLluldtqKdm1bW7to0tvTVIg3ySOuGAK4AB659SOQQQOvr068dBpUchKEOWO4kAjA2jqB6Edh15zWYloyuzqmchSy8dM8nBIxj0zkd+tdNpiGJkCFQrYDORhlBHJXp0HynsQTk8CtMRWiqfLFp2Wt9EtE9dbX9fTzMpcvJaNtFdd03bpq2ulrvb7vQvD0SL5ez5lB5Unk5x8rDJ9ASR6DpXq1jbRBUZSwXAAjc5DA9Qp6rux3znBB46eWaEqRtHIQIyflViuSUcjzD1AOcDjA9jg16/owjuF2kLLJGMIsS4EaJ0UnOD1yemfXivFdVSndNtvleul4qKvrfpbX0W97nj4lXm3dJbt2ejstLbdLd9Pv1RaOLUsY5PMPXyiJWUHjgYXIAGRkDPPWuB8U6AJI5dpJyodcfKNh3Z3HnaCR05+lez2unSCLcgZiADtXhwDwxk4OB0IO4gZI69ee1zTHMc5Xa3mYBiTlU3ZJXJBHY5bHXgdTS53dNStZ3ab3W9737Jfq9LHn2UJNq6as1bTR2/R2Teln56/G2rae8TyKoIAkKhxksoBw3JI9sZ9+/Nc4wAOxMkhiVJHBUDoTg89eO/senu3iTQMGWQ27qpXchjX5ZF5zvI6H6gk/Ma8d1K0ELMqkxsrFc4OEyB7YJ45I9uOOfZwVRVHy2SbSatrdtx3t2v1XZ+Z7WGqRnDktZpQu9LX0Wq6pXVlva+t9+fkyGJK/KCF2t05zn169frTlC7DgIvVtxPzAd0XjpzhR3+ozSuA7hGZnIAAZRiMYznkE7ip75BqWMxoGEqjGGVDtBLnPGT25P6nPTj346RT00bTduq1vZt211V9fJaW2k3y2tZ6OyWrWj0Ts9dXpqiHO1wGTByGUcjCdyB3xgEZP8A9aZcFBkHcz4TnDnPUZ6cehxkk80pOc5ztKgru5dVHTb0z19flzz7xqGJ2AYJLMpPDADHfoOSMfT/AHqtyuvPZO72037t6q/3rYxknLVLor7+Semy1Wq1++13speSNSQdrFQpyuNoG5DyQc5HXrirtvGHDtG4YxlQgHBAHUNnPHP/AALHGKoDar/vB83ByMYYHptwevHXPX0Iq/bzRsj5kEbYGf7ynj5x2YY6jI9BiuKtC8eV2atd2vdWastLq++m+41pZdFZWe121ro3/n16m7axyRTIQ6uPlYKp+UAffBbnB9OPb0z7r4LnniFqUUoRKDudseWhHKbCDhWHfJ3dABjj56tr9PNiy4KDKKFI3EpjjAyeepOeAcH29y8F3heON2kTHljJdcmMIeSw3fOxBG05HSvDxlO1NuUbe8pJ2s9bWW2q09WtEkbU5NyS0bT6aJrTlfk9LeSPomw1UQwu8pQF2y5I3KndWXkHcBksD0GMnBp9xdO43K5MTKccjlWAL59Og25/2s4rkI7mJYVw5ELxlo9xy5DA/e7qMgYBzx6cGkXU9sTKGUAADy1XLbhkbQMjg5559q8OMZc/NbVPs9bfpr30elh1aqTaevuqyXRt9ej03vv6WNK4mZ7eRdzAgMcplip7Ak9D6ZGev4eJeMIHZi3Tc/HUDLchT6A/XJI9+fXPOchSQTFIQS4GMccB+fvZxjv19s8D4ohabzgykucOnI2gDIwDjkc5Ir18NJxl71mpLZNNq/K9tVff5beXmz91tpO6cVeK07pvq+rXy7ni6hwxUjaqt/F95cc7ScYwTwPX26mVmYuNwVQO54DKQcrjkYOPUdD+FmaMRSyRONz5z0+VlOeST074yPU9qgKtgbR8pAB4DKB0Oc4x7H04969mElJJ+lrq2yittt+ncJatWXRXb80lfqlrfS+m68gxEIeMRkFxswRg4yCf4enA9O4qJEGcbBs7EcHt8rccA+vfPGODVzoArttBQ4Ucgjtxznvk9uO3V/lRkFizBsqcBuQOcsc8AdMDnsPpalda30W17W19PJr0+RitG1azWi6p6K709N7q/a5XEfyuABuHv8irznnGQB9OvB9aY9q7ISgJTAAPUk8k445yO+OnJwc1aUpkkoSGHykdsD+Ne5J69+v0NthCYuuGYA5X7gPft24z7VnJtNOy1ty33to/8/PzsK/e7baT0uktPS1m9e3m9+XdChO+MFVbnDfORyNp68c88fn3zJ1YrllKoxZVHVl6Hg9lPIz+WDxXUy2jYLqrNj+POPlOeD1+i9c88DrWRcwPuZFjdSQMAfwg+ozjHGc5yB2rSEurtpZ9lqtEr7vVN6X7X66Jp2fLqlG7u7a2urXe7a1S29DmHALDKcAsVw2wjB6OccjAz09uhOGhnCuQNwXqQemO3scdc8jjBHa7cREHiEjbn5jzjODhiOQCRx6YPFQKv7xxIcjB2rxh2wM4x/CvGc9M10J36WXKmtVv7qfm9NWzupqNrJJpJPrrs9PzfVau2xVjXaobYMbyyEHLFSeFJ/ug9enNWsn5iqkfKWPlqCAT1P8AvDHIA7Y57Bj+ZmCMCAOv3QB1xjqTwV4yfc09EZHaQkKpUYGQC30Xvxj1HAFTNtO6V9L266WVlrpo/wAHfVs3vezWrS83va6v3StrrvrbUgERClgvRhtJBJKsc47cY6k45Gcc82VVQrqw3S/eAB3DPGAvfAHUnPvgCpBJHtdC3zcsMAE57emB19sVHhSRg5cnB3cEj+7nHPv+PXmsJ2ervZfffTy130fT8rTbS89dtdVtfd7/AKWKxVhj1JKkkY6n7vqSCcZ9/wAKARvwwY9+ec47HGeeWIwT6juasSRMiqcbiTnCn7i88Y4xxngnjk9cGoFAZRsU78thjx8vpyOT16n19qzUte+lvN7btLXbtcGt0m1one/XTp1Xls9xGKkk7iqlchOoJOc5PAA6nHOMDPFRPkrnG48kEYJ28ZPOCB+HH6gcMN2RtJ/h6fKPQd89cD/9blVHwB8x24IzggdyR3xwB26kVtBttLTlu5X69Gt9Vp2v16aGTVrNq+qv6dU+ltdPK/kiFNgAwFKhiSM5LZ6KevU+n9KmYEEAL8p7A8YPbnHzEY7U8Ku4oF59wVHHOSeg+o4H5ilbqTkgKACF/gAxuyO5PHf8Rg10J26LdP7v8/u8jOa53d3VvTy7fg3rsMUgxgc9c7T128cH0HXB5yfzD24Zdq+gUtnvnhj2U7fw7c0m5GZvmwRnBVcMw9WIzk46kYx9alMZYhiASyqqBR8wK55PPAwck55GOuBVOaa00at09NPvt5ad9DKzUtVbbvZX879PXdbkAAX5mwMN1z91eu3OOO2M/kAOZ9wJO4bXweQByuBgKR0z/u9uT1FIrRYVcgkZB43BiOxByCcD259aV1G9nGQjKGHdgPUjjDZA4HPfHUnCT3TVndWuls0ut/Lf0Xpomm7WaaWz000/4H43I1GCpYNuXIDDhdox8rHHzZ6ngZP6zKW2uQoYjO0g/dIx19BjrgYAFRgoxCnOQSUduVAPQseAOvbpmnqp3bicMDjaBwewLYzxg59xxisxtXv2dtNf8/663FQqsYY4yHzkdWJOQMcZ569McfhaDgjCjLlSzBcEb/8Ab6EN13f5xDuiUnDEyYUkKuUU4O4dQAR16cZ6npSqoGSPlLE45ywOOPrn/PAqJvp5Pt+XnZa91e4Wu7fPd6pWbt22VvPp3aVPygoWI9eAF5zuPdc9B9M9zT4ztJTA4JIBOTuPRRgYwPX9OeFyhXDOxfBJwMjd6AZGCOcYp21GUMuQScAscOR/eJ/pwTg/U5euv9afcU7NW9H5Pbe708+/VIVc7l3gZGQM8A9cqegIOfUZOfYU/bgE44Iz6gKT09s+2c8cnpTAq8nJMgILA5wMdQOeD0wOlTb8beTleGVeBsGQBg5y2DyeO30osu3p5EOKf4/e+o0AluF2gElWHBIPRT2IJz1/+tUoLMcYXO3OBnoM5BGDz0+Y479sgNyvBGQrAAEqT8xPH0Jx6/LwTnFOjwG68jvnlsnjAHUnuuRx6ZNK2t/6/rv6LsDinu3/AEku3kHlkFfl/hyoxnAPY+nfH1Gc9KlUYXgEAHcQcAjnoBxwew6j6YpolZ25OCAQdo2/KOxHPP8ATBzxT1dQCGxhhhgRljnpg888Hs2DjrQ0mrMfLpbpp06f8F6389O4j53DAP0OAPx65BPII/PGcuBPT+IgnB6A9lHtjnnofypx2MdzbtwTA9gOn5fnyPTlQEIZiQSBlCTt3egJAxnjt+nYsrWWi+/rfqHKtFt2fzvu76X/AFI0VgVDLnGeeoxzwTnpz1PXuanjJU5+YBe3HbHTr6gk89MH0qIbsZwRk5XJ+UD0Jz1x69+/U1JlgAOcgAnjCqPTGDlvTn3weMSov1+FeiVru78/T8iuW2+qvfdtdNt7LZ/0kWQwYMQGBx1B5B4557HIwMHGPzeDnPIJx9eMDg4/kO3r1MW7IVRjcQNxA6e57dgSc/TjgqowxY4yAcgDgjsRz+fI46cVpd2t0f8AwP8AIFa3XTX5XW97/rq9dhyxgkDbnIyp7Fe4J6YPUL+HbiZUwxXaQR93pkDjgnkYOeRg+3FRq5AzwAN2c9FPbr3HQ46H2xVjeRg7hyPm4zknA68A/Q4/PAraKsttbK/y0EBUgjGTnjAIwB3HbOe2Pr1opMkEZ3Fs9D905zjkHj8sehopiSS2/rRfpY8pdCHbKnAwzAtkPkHJDdhznn26k5puGYFQQAW+8enHOWJ4yCRg/hz3syOgA+bBZQMj5stkgKehA9PTrgCoYyyhUkVSnZejNjox9xnk9DwOMVcVaPTR6vVXskltr5rs7G6qO19dLaX6Lta17dfXcHYxbiR+8AC4Gdqgg8rxjp/+uonK4VypwSQu4Z445x6dwfc8VNJIcs5IJAAPsMnjsCccDt354ppVWXcFA3dzjeDjrjHTpxnnkZ71Sa3d1ZW163s73/TqtluhKWza+d+/LdW1ts2r7jpHUjazANtABiAK+248YJP5EfWoOMKADvRtwk3HLZz0HbHQ85B9+acgUuG8wENkkYxggZA+vJ+XuM9akEakSDBHGVk3bfmHPPua1i0r97dfW29+u6v89mJWjZa7J9lrp1s7K23bV3VhqlwVZnBYZDxYA+UHGMdzzzjGCOlTnnKozZ25DqPlDHueefb9MZwKhYrhwgkc5yGGCpGMkc8jJ69T6c1KGdlGGARzjaOT2IITP3Rz+dVZtX3votdtrry31smn2SVgcb2acVuttE1bp1S1ttond7D1IiIZyVkAGWJO1weh+vB2gdOcdOZJZGUKwYF2UGU5yAh424/vABc9ewGajEhZkiAAdCRhMANgchs5B4xgjGDnrxhSFEgcDETkBo2cOGYdcHaNoyRgDOOnYVk1q97bW66JK7bd76f01rFveTe9k9t07WS023tv2vbczGzIQpb5izMRgsBggEHgY564zk5HAFej+ALsW2rW7ErbobiO4jlP8AiI34kz8rDcAqbDvJPIwc+flVZJhvSONyrkK2ThM4yAB8wBORkcHrjg6mham9tdQAYKbiFZTkvHlQCOByOct2zyO1edi4e0oVIpN6NrT0VmmtdHfa/TYers03aLV1fbZpO2nTttd9dP1z+H+p20tpbMXLRwwQrdTzP85GMxRuP4ihycqcx7hnO4Y9mvXtJI5LrzBAk6RJJPMpkkcsGDzyLlfOkbACYKhcd+3xn8NNe+06TY26eX9mMcAtox81wkkwP2ibrkpLsUSZJBUAcV9JwaqLq2RGW3Mdmfs4hh+ZwsI6W/H/LMPlXJbcSwA+XJ/O6tGUaktLWkl8kl0a3v0tpvpodEdo8qWsb6vWzsk07aaXtrb1vc8r+J9nbXtldwlZiHieS3lgICOijiSX0dzztJwCo59fzJ8bWRttUn8uSUl3VJ5JPvGcE9jycdM9s81+qHjHy7zT3kRVhUW6iAzRFpyX3CWQJkYLHbuOT90EAc1+a3xRsDa6neRNACUkEizbSp2ksQHfkdCQCQCenFfR5DNwny3aburOy1fLfray3fS71WhrUhaNOS5Xqruz5k3a27110T1XzZ5EGMeDvBLAo2QM7eM5PGD0J9O/s3axK7V3AgsMk4HfA9j0wPfHNLld+NjbcbVPRyxH3Tnovp0OPY4qxDC7HcdzDCs3QHaMnCDnBGcEf8CxnIr611EnJNq7ld66Pb5PTTbW/dMl02pbab720srPTu+33WE8uUoCSWBVWAUkleuR7Dnoeex5p7xPyQuACAAD908juO+RnHQ/Uit6202V4964YMwLKuflTvkY6njJ9RjHFah0hhhjEArAH5iSM9goOMnPUZGO/oOf63BO14z179rJrft1879zop0ZOKk42ejvd6p2V73te6WydlrfqcRGjhwzcRkYcA4y3UH7uD9Mde3WtGKM4ypYFgTnqB04XjIPvx24FdDJooMjOASMKxRfu4P8bDt06dP0q7Y6XllBVSGbhsfKcHue/U+h6fU5VsTCa5k9EknFK/bpu9de2jvoKfJF80la2l9r7WVutvuW2uxjWtlO8kTKAd2NzHgIO4HfPPQnnOK7Oy0ZvKkOwuGyGXPy5JHIGORkDPJ/HNdHpXh7LkKnmD5WY9QBjDFuwXnAOTznAHArurbShCmfLQKCNjHC4wOQwxlvc8deRzXkVsS5WUbO2j2TWz2e7e+n3NuxyyqJcytay1baWl9Ek/VryZ5ZFoaxhg0SrggllAA28nABB5OMHHJqhdWsMZGEDMpIZsAMhB4APHAxwR6e9esalbxRxuyhQFwFVhiMsQc7T/AHenXpjnrx5ZrJ8t5mUk8AmNCCNp5PHQEDA65xnJrSlUnJq91zW1u9W0kvP57W0SMYzu7XX93q7vl7O+unV9NWWrGRYtu5ERAoIxkM+T95h1Oepxj6ek13qCAusfDAcvtyFJ6seu4D+70H1zXMxXoUqqsWXZ8zkFnDjOQM4wpzgN+OOapXF0pZnV2VACpVvmDvz8wIP3WOcegHqOeqNNOTctNul23utNW0+9+i6hLVyjrBR1slpry3ut7ffvraxLdXQjG4SFSQyuQMIxYchWOchiORjjBGa5K7upMShWYh05UA7FznIOON4z0P8ALkWL+4+9skWTCncVb5UHcYPSQcAnB7dea55pJDtBACqSRhgSQOhHYjrnpnjj09CjC8U3yq0uVbJtLl6PXouz2s1sVCKab0SurvraPLrbfW3zva4x9x6jgN/EDkKc4BxjGec9Sf5tRmVWBIUEjJB5wM8jgY9+v5VKwJBLMwMgOGGCHIOME5GCDxn6EjtTCgI5UEEYKkg9O54Ax7Z655rq5baa2TXbR3SXT77/AIvbVO6Se110WiVtm7K+ut79352Y2Vo0K5PbJOOnU47gdMZPUehxPlSSisBlVDAZOT/tD8uO3vmqZG5SAAMY2qc7lUAbjz0ByOefu4Awc1biRnkwCCG2BWX5gT8wODxwc89R+ORWXNa17aX8na6f9PpfzOeStd7X113Wq0vt36J6PoNkjXBG0DIXOecMO6kEbVbjHXOD1BqiUcSI5XBwSHJxkAD5cYyMEkH+RGK6mCzaV5JG3svyopcFE3DuMA4IBGFyeOT3qKbSZVLqFKoBuLZ3Kd3oSABu6Dsfwq4VYptc0dLLV6Wul5X3V/XRIUKyj8T6JeSbSTWjT6rTX79TnHWVcsWLs2SWjPAHTa3YNjjPOKakBk2kL8rNhWfI5xxnjAz1HXPPQVqLbTEhSmIxnCKwLnHZiccnjn0zW1Z6X5sf7yNj820Ho2RjJP8AeC56cZ5zVzrJQfM7X2ad3ZW1s/lons1ZFzxChHpo0rpqyT2ve1vTtpqzGS1kRSC4YqAWVTjCei5HU9wQMZFa8VlIYg6R72YqVcjO3HUrxyAO/wCg4roV08ojmNNzRoCSQQ23kfN/s+uB16j0tRWRCpkRxyDayIp2lQedvfIOOMkdT0BxXlVKyk3d63sm002vd3Stvt6aHFOu5JNPez7PotvvW6ul3SOfTTWRlZkWMYBLt8hIc8hQQck9eM4x3oNsQduXBbfsYPhExjhgBknHboCB711t8YFTzJArsI8gBeBIFAIcjPXoPcZxXH3dwoZBCyg7XBG/5UJyOQRw3XJJJB6HIJrOnUc9+r5U7N9U7/j1/Qy9pKatpd21eqW2j69fRvfe5A0YBkMcZ3qgbJPK/wB5unQkgjHC9aYJGiLSHaSwwGVfuMeoGT+Jbke/rGbjaT5jEkg5GdwJJ4xjA29eDzzwOTVZ5CzsvDBcFR0LD1wc7sdye5ycdK7Kd1dO+uibSutr69NfX170oyk7SSa6u26VrrfRdelt31JVumLFS4J2sVPBG7PBOOp9vxPU1BJcuhLRn1Zhg7evzbR3Jz+GMZqsyk/KqgNuYbV65BBwfYc9Dzz6Cmsw5ViFbAOM4UsPugD2HGRg+ua6k1HTXezuttr6rbXzs7W63e8aUbxfLddY2T2trZaWelrdfmPeYqAEcgAEMWxglsZAHZSe+e3TrUe7auVUGTIIxz8vPI6fd6jrye2CSrgMCWYIrKCp3DII9SBjkjpgfXnillFZWJ+UknJJDNg9M+nfpnk8inu9N1rqtHovn0TXmbwgmla+mytdu1na672el2lttqOcbScbgW5IJy3P3cHnGeSV6cD1wEUbvlU7M5Kl+AR1OD1HPbvg5NTSFR8yNgsudwO/cTnKvn7px3PPBPbNQxschJFzGQSCMAknlenOM5BbqB2Pek7q/W6v53td/c/+HSuaptrrdJOz02tfR3vptd7X2eysGR2OWVsgKuSBnpuT2+pPbnOaVcsjEuOuCXzuU/X3J6/XikaTB4xkDaSRlVXkBQc8kDPPHbjihJAx2tsMR6kHk8+mCOc9c9vrTbsrvzV9drLrb5W8l3Q/e5U7aqzfZJJaJd32e+m/V37xGIk3KpXCkDjd/CVPOQcc5AzzyOtSR7JIwpB3b8kg4wM9h6475GO+aUIxVigDxhQ54yeeuckEY6Y6Hj2qSJFcqowFbkHlSAOhJxyuevrwMUrtyt5aNLvbV97d7O+yffKcla692zWy2sk9V6dLu9tbC+UC+0EE424P3Vxzh8/KcdznH5VG0ewsQoU84z93dxnaemPboOPatEooLlRuAGPMHAGMYJ5OAR06579KqygsSQgO0/KOdrAHoOM/z5PTPWHLpezva70vtfS+nV30srrfbGFRtpXfLonftpulpbZO6T6vS5S3AoQcHPJI5YknAAB7dQCO36QdxgbjnnfhR1zjdzn36Y7nNXGQNGFKEMxwpB+YE4GSepQ9jwe45GBTbajgkEnIGDlUbb2Hb5uen86FdvzV7PX4lbV32t6JeR102m5JeemmySXe9vS3ToRktuY4wM4fJzwMZK/3h6HPr1p/AVwjRltuAQvIJ9OeGPGCOc9R1qJspkryxJbg5A468gcgYHT170wMyyK7AZBDDr+8DZAIOOg5zkck59caRu1q1eyb106aLy6rd6aHQo6Jp+isnfbTV212Tv1Tt1UgTDAtkEjo3KH3B5yT2GOfWhlOWwWVOGwvJBB64yPX6AetPbGByQWG4heRuPYfgMZyM+lMZiVXLYIAHy9dvTB9T2x1x+dbR21X56baa6/dpoJNv1WlrN9u62vppv8AMXauBvyTIMqcjK9Ms+eSpwfw5FGEyFyAOMvnJ75wMDHpg+xyaN5G5gFIxhiV5CnjntuI9M96jUl3EZ+6R/e7Z4B46Zz149sdKBJvZtW1eu2131V0raL9biiZwwwVWNQQvPJGeRj8eOcd/WpUmJLh8EHngckHo31HrjA9ORVZo12YWM4HT5iQWXlm55AyQMYzjjOaIC/3lVfLYkZyMjBxjnuO2M9ccda55pp2j0t2S6effTq909WVKEXFySWluy7db6yet1pd9i62HZOSylRsJ5IHYfnn09uopV+XCldyrknIBbHuOxA6jPHU+tRDIOcnAx0/hA6kfmT154AqcB+SPmLDOSNzEnBzk49OvcevNYyW7/BrVLRqy6JO6v8AcYS006fqv+A9bb7jxL5jBSBhQQoGAdo+6O3T5iemfwqaGXa7OrBiRgggYZh91ieGJXpkY449xVYAHKgqSPnAzhfXntnvx7dMCo8new2/KR8pzyW7Ee2fzzxUTg37tr7b7X07277WdvxEtnbTRdr9E1bre/4eWne6VfyhSAUXDKyuTwBjgDGO59yT34GfTtL1kMI9zqGBXEjY5JyGAGecjgkEYx7ivC7CdSph3YOw8E4BYdCc8EoCQB75zXT22oyKVXep2DjggAd+B36ZxzznFePisNzTlCKSfVuzsrJ6XXR7rT8TrpStFJJx1vZPtbV3Vvwvr5Nn0Ja6hEWwskbOFCrzldvfnj5n7Zz6etaysky8qSrhioznDDrgcHr+fHtXjmkawGKnzMhkwRu24YEYO4g4+mMnknnp3UGob1BVxtAw4BC5HGV68kjjjBOAfavmcTSlTnZ6W6tNXV7bLTvt5OzLknaO2qd5bRi1ZX6PW9vW977HVPYwyQjdG24qNoBAY5wSSccjPbnuMk1hXenqGEaqfmPygnaM44UjnK8demB0GK0bTUGkQLuUIYyUxyykD7mcdD0JI4/UWDLHLIpfbuTBQN/F1xtA6HJ657Z7V59KrOE31d07K683su1/n3MZLslpblkt9Uk9urbur/ic3DYAOZCNrAgEAFfu9M9g30/TtHdiNYmBKFt3zYPAPPPI6n/62Diuxe3UpuMijzctxhiD0OTxj3OPr61yOpQ/eB3A4yAvPHbPfI7Z9c4Nexh6ntdXLVNK26S0trez8159jOcnFpSWjvvzX6a2emj6+iOBvWMjsVAUE7VAOQcE8txyck5GBn2qO1fYA7D58g9OQAeCPTBPXpWndW0eOgBVtxJYLjsSQRyeBjr2HtWRJsjZs5UjkHIByeCBjoOBjgdOtdUoKcGlezs0kr66Lf1t1VtfKz9ta1k0lbra70e/bXS1+mnRd/pOryxsoJx0PJ/eJj7oB7DnGRnr+B9Osr6S4Cl5CSUCglskA4zGOgx0GeuOfp88Wt0yzJskDKSF2knocknjsOAfU85PQew+G7ozwgFiolUsATk7l4X72MEc/J6Y9iPnMywMYrmUFzXu9OySdlZt2bvve3lv62DxerTvstpNpWae6V9W93f71dehxh5g4ZlXYy5iHJ8vBBwvU4xzz09RWDq+mCZGJi+UD90rr8xA7DnPOfU4Pr0HU6dFHMinh5An7xyMKSMYITuyjkYPc/Wrl5beYp8oK5BLEtwTHx04wXHU+mec14MZOlJNNJ3juuVK1t3rq9rvf5nouopqLu0nf4ndy2197svvTv0R82eIfDRy7JFINyhgCp3c5JUgj5QcA4GcYBBrzOXQ7qKQZgeFSWxISQhAGVVh2BOck9wOOBX2fNogu1XcowxVm5+Zc7sqoIyEHGfTjIGa5TVvBUMvnbozCAAEBfaJGIyuCAdxGTxxgdOuD9Bgs+dBKnKzur6q7VlFXtZPXvbZb2V35OIw3NJSjGKvHe+j26bt2aaWq27Hy7bWRQndguAfkAOAfXOCQBgZJ9RWvFYzFcqFVAOd47dSQO+ONvPOSRjpXot94ca381UiVmR9ueC6gc7enAzyWwc84FZ4sZFVBhWclS6DJ2kHJUE/eXJ54A9Oten/AGrGtHni7aq6bTf2emvW/X07nnShZyatdLRL/t1db26vzMO2uHgKZ3xFMAlFI3gHiRT02t7DOOxr07wxqrLsBLKGV1aTOYwxxtDA4wzkE556ZGMVxF5bMjMFgVQSp3jB5XPQdNpzz2XHPAzUen6g1rMoMjEttOCMKrdMsvqO57ce9KFbnlFxWj31bV2lrfRd7u+ivbuedXpqcZL3U1q1bV2s7Xt5tbXsj6y0q8SW1UyNGW8sbQg35ZR2IK8Z/iPTI44rM1SF3kZdqMcK6kZV8DPykjgjnk8n64riPD+vytA2HjwjoMquGGOMgknKHqzD8utd+t1HcY3Al9iszA5V5sEgAnnyk579W6V1ptPR3drNd27eWzs/xXkeNOMlLvzvS1ultF2u9dum9tDzvWtK327OWYmYfOnEgPHIxgAHjtj6DANfO/ivSlgnlYRspOWjUrhGB6hiCeRznIzjIz6fWl/EhibeIlUK0kZLbMs/ocHzG2rkggYIUeleBeNrYfvNh8wqwBDcfuhncWx1B7dDjvyK9DBVJRqrlbTb5V0S1VrNW2sr6WtrpozupOyTu1J8t4tXTs431373t2vpqfPMyMCERdqKW+YjADfxHvgdgPft3iEm4hfvFdpGOjDPUDP3RjrnjOMd60LyNlmJYbY8sdmflPPf26ZOeenFZ3C5IA8vlcOOGAPPlnrtGfl9eePT6ynJezirq7UXLreT5ea3nfqt/vPQvzKN1fRNO8t2lo7b69F8/KSUAghgURuUUHLbu20Dtn3HI9KbwI0kDEPuwE3AsQMcNxw3TA79OtE7OmGRo2OwMWi5VUGQUIxgvjrzxxioVLhlzGu0kcfxhTn5hn2GScZBAHqaOZtXurXavpq7WSaelnrfV+XQiPwJ6aLq7bJbXt63fZfOZ2RXGGJ2gAtjoxzuQgk4YHH+eKgRjll42SkBWzg98MevyjOGOecinA5bLEhT8oyMruB+UA8DnnPHvU6RRliCAN2GWPdy5HXjHHrjI6DnmnKGjvvZXitbP17L8LeVgVtbroreT0f4O/r8xpXDlQqHawIdMAK3UkHPGeM9ea7Lwzrs2mzKkjuyt8rfPkCTIxxjBx+npzmuTgKTBigUKQcbcJtK4OWzncw544ByPSmy4WRTE5kdsFtpKKoXkjvzgn5vbvXLOlGpGUJLRq2utrqNt9X1t/wRNu7S07Nr3k1a9ttOnbS+h9S6fr5vY9qspeONQgQZXaAedwPzHqC3r2rX+0hk8wosbFAUO4ozEk4JTB2DIPruFfOXh3W3ik+ziVokUNyX+QnK8545PYe3B6V6db68HTmRBJGBuJJ+6vB2DGGOM4PX0FeBWwroyaTbjzJ+6nbW1tklZaX7mc5N7pybd3pql7rSaV3bro9LW9PRItQcYEjBIkX7pbjd2J7Nuxn2+tcvrV8s5kUuFjRWKkjlSRncrE/Nnuu0buMHjmrc36eTEwmQAoWWMANmRuoxkZyB1OABgfTi9WvwFly/zFANpJG9Ocqy4+XH1J9z1pQi5Nct001Z2tqmrJd9l5aJGHLdtp2ulbu1ZK762/H9cue4DOwBwxwDkg7ipYg5JBOOeOnXnNVVnMjhQ+0HqEJUErwAR3wR34yBzWXJMXcqVC7sOvzEDZ0BzkcHPPAyBkYxUQlAY4wCNp3euOgGTgA+vJOOn931V7sL7tr4Vrr7uq9bdXpvqkW4KyvrtJX1a0Xqu/fVdEbxmGQ6cDcw3MMtkgcDOcZ7jHYdKiEjgOW5VuhJJYngfKck9/8AHqapu6sUAI+ZVOIRuJIzyw4I7ZPb1xSQyRnywccs2GHzjAI4YH9R27nvWrbk9nFpX6bvlaW+mu3p6GDgtG1dr4W1rdWvdu2zev3m2jqy4U4ZQGySQNo69+WU4H0xxmkEvJWRgIhks3QqR2APGT+R61Epi2lvMQArwCcbvocHHpgZ7HpVaeXkrGzEEZIcj7vOQFORz2OT345yE17+rW8fO+ye3z6fmgSu+nuvqrWfW99/y16uxovcqkeEc+WwBBJ3Ekcc8D72enUc9ehzJZd54bfuTJXOVUYOQ2Odw6D0z361WkuHwFUSEhcFUAwoPJwD09mHHXpVF5HJGw+WuPm9WPqTknJP4cDmhQu2+a9n1V9NGr6+dr6XLhFt3aUXdJvyVnt5vV69NxJgQ4b5tpAGx85Yc8EjOckjjjp9c1HIwWXO45JAIwCeCp465B5z6HAq4zHncxyFDZ6lyQcHJ+6F7+nOelVGVfMDYBXAJJzyM/3vp149eOOdlJO9+lrOOr+zb00T8t+m/VFKDs07JLVJ69kvW9vlfUrDzA5LkkAYIB43DGFI7557An64NOUqcbg2QScj7h9P0/A/ic2JYYgzMrghsOAjKxyByHOAQT39OMA9agRQxGD8jnA9wOCCfrgDB7Hihyva3TTTTXr+J0rZPbS2qs/+B6em3WaJCUd8glztIJHQew5DDOBnjIrTjtlaIBghkVgRgDO3g5J/l0Pf0qCKKMKSyDgnkdyP4hz29h7kCnCaRfuEoMYXPOcHjnt0GQRknHfk88nu1olyqy9PVbPq/wDI6OVaKybSu+mzS106380OkjUFlCkMV4J6lum3p06jHt7cV3t5NpY4ViQNxGAB6D2PPPJ9KttvOznL4DZwMDk8E4z39T/Wn7GP3yRxyuMZ9sY/H17elYqbjtdt6Le1ullvr8ugcsWneF9neO26t6N+tt33MKSHY53LuYEY5GMHJz9c5A9sjtTF4bcCFIGQvAI29MHr3yc8ke1adwmFJUgnGDns3JBJP3SD1OMdfasrDKVZtu45IJblsEcdOFx6++c8V0wn7rs7WffzV7dtrX9G3rcylCztZNNJL59lsmtm+l7dR0jupGGxuALZ+4pOfl3ewAB4GPxpi5LhXIO4khgcqR/Dz0wM8EgZHalkDEoFGcje2OwPOMd+ByeM+nOKbGqk4wCoyc9zj0OQQBn72P5VvGdo9W7q67X6ppbaq/QwULLbS192mrdNbvbpsSDAYnChRlcsOhODweu4kcdaUZAkfzG6DaFYKQB3BwCCB978OlPdQDwcIVB+U5ww9Rj5e305zUa4JHRtysASe56b+3Hfp6cDmtU9rP5/8N+hEtLN6p2vte3fS70e3z0vtEvPC7csSN5XpnsT2z9PwqdWZX3OwG1T8oIYEgcbVx0OT8uaYV2r8uF5BO37m7jqOxyeAOeOmc0/GW2kAEA5cj5WJxgqM/d6jPGPTqKie3p+rX+X5ho7Nel/L+v63HK/PDHC4BjdQEIPsCST05JHPTFTchXY4BYZGz5jgY5A+XGMdf5ioAByynDLjcc5C45LEcYHQZ5I9aduYndzhiOeNzBv4gMdMjseKhr3b9d+Vu7a7q2tu/X7hNrmtfVfm7fLb89ByFcoxddpZh82NxI+g575H0qVtp5XICDI2nq38Rb36AjjpjPSq2CrHCYIJJAG3G7GSeeWIHJzkdM1IpG47jtLAYAG4Me3GemR1wPX1rKab8/xaey89duw72aurq2rvbstl63b0S/AFx1xy7MEY/ezx97I6D8OeeasoVKuCUEgUkMchSexXr+eR04wKjVVKN0DcEDdt59VxuznsOOAfxYzYA3A5YYUEYz0GPx7ZPPQHtWa00e2/rfrs/XbuvIqyb6Lfro7bLXVXv1fez6EyAAcMN5zjeMjce2e44Jzx1NO5BZsbuDz7ng/iMHHPA+uaYN3l52qAcNk5yexIz04/pipMsV5O1cbgSRtPPBHX8/r9aLX0tbTX7lfdrzf/A2cU23orLTo1stLb3Xr6tu6Ggl8EnEYZs84wRjkj1weuePT0lyuMA/KfusDzu65J685PPOcHv1hAJzkYyc9TtPT8yfY49DTlDAk4DbTgLnjnHT2+h7gfUtb599P6t17PR+duCtom7dnrbrvp3fQmVdqkqcndjdnA98kgZPpx/KnbtmGJO48gdcHnkA4xjjHJ788U1Gb5xwAQCVHUEE8AnkZyOnpxxSkPuHAJ9vQ5wP89M9ScUhLVNtO6b3s72S02W9+2jT1JkyRu3E7s9TzknuMDOccg9vpT3kOAuFGAASo+8MAnI7+39aSLZlt393ncDgZ78cHH6Y6nFOKIMYJZR95gM9TjrkZAxn8aCdFbR3bWzTWrXkldPutN+w8FdmNxUEZxyRu9iTwf88804YPcDIyGzzkeoyeeuOwznuahyGxgYQdfl2ngHnByDz+P060pBwMEkZAPqAexGBj2xyB0xQU13dm3180r2/LdrZbvSZWdMgYbcD1GeuTweMZwfYnBqZc4yd5JHPA6Ht0/Wq6kZ4zlV478/zP3ux4xyRmpwXOCABwN3yjP5dxjHHp1qor8/ya66vrsreqsS1a+jT80rdPwv26WXUUqAAqkjJBIPPB6j2zxn8KnVTkZYDBAwO3I6ceucc4555ppIAXAUtwSVHIz1BA5yO/HX9QkgkqSCeuQfpnGOuccdhzg4zWxJb2qrEjJG0EFuSWORk8fln+pFFQRyNxnP3SD+fU8dO5/DvzRXRGSSStHZPX/hyGrt+7e27ul+Xk15nlhU/MPmAPJGdvHYnjgZPJ9+vNN2sCSQBz06445IPv9Ocde4lLoSAzAMqFmU8qx/hGePU5xxTdwYgbjgDHqAAM4HQEjJAGRnNQ/d2tq9b63WnxLrrprfSy3Zpd7O+q3tpvFfou/wB9wLKwACgMoCkKMDHUDGOeD19OvSk2AoxbjPTb97OeQcHuDn8OM9aViApx0UH5sYbHqRk5/MnpzUSBjIUBB3/dOThcgckZIwOPf88VN3ZpWWrbtp2WnT8dfzEnZ2drW3u9mm9X/wAFrsNyc7Y8AEswJBIzgZ65+mTn0PUU797iQb2JZQCFUdP72eeMfeHf6g4anD5Yqvllg2T8rNxyR0wc8gZxj0p3mIPM45IPRSVfsRjIxtyOc8cHHeqjJKTSV7xgmnpba+t9dU9N/S2unVJJNe672Td1JPV+iT/Ha4hYKEDcsjjvkN6jgdDj8P5SLtCPKQPnyQTyx65C8jDHr9OnfMawkoSqhgvzMcDIX1JOOPbFOPyDMpBY4IVOAg7Zxxn2xkDqSenRFW01v207LVXdnb9VfYHZ2tq7626rR2Xq7ttvTvuhqqy5YqM5BDkYIBOFBOTgYzyBnOevSpSr7ZHyWyVd1HzCIf7C8A+5z3J+spCGIBsqH2sCWyS/JzjoB6Dr9aMqwILDGCf9psYxG4/iOB8p4A5z2rN23Ss2rvffTR9ra7XRHPfW2t7PfVRtom9tV0Xlfe6fxENygB2jpv3D5cHGSOCDnjOM9adGz+ZGI0jIEq4A+UbkJLqe6q3y568jjPFIysx4VtybSgYYyvJyecnH8R75GRzUaxsZd4BBMi7gW2oGPcDAKDjkc9vmODXDONvR6O+q6Kyte2i3W/4MVtbtJW21d3ZLo1uv1T6tfVvwj1nYk2+4aPzOIbVPlgLORtDNkmOM870G4AYBbkV9saJLbGzB+yu0qk7JEYrGSAMKxGdpQkmM4PmZPyjGK/N34dag6X9vEHCuJSrSg5Ro5MDy1OADnbgnjJHbGa/Rf4frPNYxxxqzSRrHM6RjEZCA7WnJ3bJwCfmyc55HGa+Qx1K1eTsls7pWWjit+uru+p1UFzK1tEopJ2bilyvfX5bene7r6zXFmoieJ5QpeYYG+GRP9YsUZOHcZXJyAeyrXwn8XdPlUyBYZVMpCTyy4Ilk+Yo6oQNoHJ4Y/mBX6GXenSO0/wC6WSNZC6RKAGto3GSS3ANwSPkUcyY7YxXzF8XvCFxPbswgLRgJJFJOf9KKtuJjdAOS+MLzwB6HNPBv2NSL21W7807eeqTWtt10udqheCi46adOyTWnZtW1WnkfnxJbyGZkfBLHJIJDO/TAb04PPHTA54resLWREiA/eGMkME+/z05x91e+O3P1varpn2W8uPlm+WTaVK4JBPDP12beQo546kVe02KJVQhxlQP9pmY9PnONwByBkcZyRmvcq4mXLZ30VpaO17J6Ls7233toiuVPl5kvy3StbyS0suq+Z0mmWaFIwY1UFlJQHeVZjwpO0FguD2GMmuv/ALKa7/5ZKxCl0wAqkY+fbng7sDA+vNZuiWjkoAiICwIc8MoUj5JCckE5Jx3xgZzmvWbbT0a0/evGuFL/ALs85IA7Dgjjg+vWvGq1+ScWnrdcr1d27b20frfe/Uvm5Vafu2SS2trZvfZd3f0uePPpsdpv3sNoZlMQBAYtjBBzzt7dMZ6ntRVka4ihQAgMT8uMZPXjAIHqeRkAd8HsdehMfmmMIT5b7GVcLtGNqOc8OwyA3qK4izspXuY5WXIHDBsgZPrjHI7evOPQ9tOompVJPlfbpdqyeml9LN663T7nm4ipHVJrRK923fbTXo+3nv29D0STYmXPyLldo4bOOCDj51zzjgdcZOK2LidslQQpALJkYyWwcgdOQM468Y9xn6ZCkUKI+QS2A2erE/Kp4+71A/P0z0TxIVLBBu2hePlKgD5mBHtgkH1zxWXMue9naTvdJPTRX7ddL+fq/Hq4h1JN81o3taK289dXr1d77dTj9Xd5Y1QvkLH2XOTjndk9iP8AJrzDUUkTPABRvmG7LODnBx3UdzxjsK9T1CKMqwcHy8k4V/mVh0UkA5HXPHIFeearCPnCIeDt3ludwzgnk8D3/Cu2k9HGyte6ukv5de680tt+xFKsla7d1aze62ve7u9duv3I4O9lkLnkLuLH0CkAEEkHoO5xVE3EjRsPPjG0ZDjjzAOCUOeoxxxzyDVm9t2y7AZUHkE5Oe3A/h9f0z0rLdG2FWAGSoO0YIGSCoz0Q9z6445r0oQTirNSt5JO9l1bbvpfVq3bY7+bmiveTXuu/ba6ta17edk9nqVTIoILsrBtxxkgt7uPTjnOO2c1WCgh2bd1yCoDAEdGI4yrcj2xnnGKsMmGbC52dWx0GPXgc9PXg/jWUKXZdzAHJGxtuMHgHg8dyOp9u/dCy1e6inunva7stmlvb8eusWldptKyd915pW6276u2vnEFLKM8YO4An5R6e+f0Hrnml2EByGAQlQxHDH1Kjj5fb2GD1pHkALAYManDEnoQOfx9D7evVIZC7hAFIz0IBLKORj9eeOcepqpNWur2t89e3bV+f3m3vWcklZJN7JJPlel9nps1pv1JoVDuoUhoxnIJI3HuNozjPfr2A7ite3gZ3xCQwUlvnG1VBI3BevzdOR34x1rMhVkbdhVGRlerHJ5KnsT3/PFdLYrGUQKzhv41X5gxJ9gCMZHHrmuSrLlu021onorLRX9NOmz7vU5akrSW7i7JW11u92vz9eyOh061YwKZMFCeERuQSfvKOrbew4znH113tvMDxomN45Rec8fKcEZLd2x+PJ5fZxGNAUVSCFQqBlkYA5AIxgkcjjA6dhWrFbN5kEkkY3YPzg4G3I+UgDllOOeOSa8p4huUpNpNP3V3+HRbJb99N0r6nJNt6ONktG1HXXr8nvbZLzaOVXRwwJEX71CMsy5DLngH0K4IB6nJ4NaaaaYV6qVA3ERjcyKehxnI3dx04OegrsXt4IbfdISDI33s5JYHgkcZ29unByOKwLu6gjDbRHuK7WHBBIPykf4c44wTRDESqtJXs/0tu7baX1Tvfd6mbajd88pNqzi1dpq1rNuyS1vf4dlqVN6RoiAMfL5chvu4OSGPBKDsvQZwTgiklukJlZdoKqr72TJJ56MSPmbsMHaBxxWTcXAjIdH2HG59+CCDkEEc5DjHBxgDGeTWRPqDbJkM6jcOgCgZA4CL6jjBHvya25OZqUd7X1V2r22326bb+ek8rtrzS25bednr103v6kV7fSEbFG2Jw4G0gbSDllZsH5iQMNgD2rn9/wC8DSrlc7sK3BQ9CGw2cYyTgZ9AKVpizjzDuAPCE4B9Mg8H6k8gduhapHksMbmJGDjIAycMMn5tucDgYGe3TujSUFZJXdm3bvZtf16eZ0Qioxs01zJXfn3Unra33+QrSqgzyULEyFjkFT0KnjJ9ScDgHHSofOUlxtHTK55OVzjcRwR15HABORzw2Z1DKA+RwNpXHJxx1O7p3xjjHrVZvmQPsyCSq7WBbII5OAcKeOD155+WtFFJ7aNpbXWllt13uzaFNNXd/esr/NWsna+1n5eiEH97cCOd55Gc8bR1OewHfBqAkFsckqSV3dArDPOckYwfrjnFPLIGX5vmPJA+6W7KB2HOOnBHpRuUneR8rLjYxyNy55yOSOeBx6ZreKtdXuvPvorLT835HVFWa3s9t+9rW+b00dkl2IWcgFXY7VxnHYDJG3Hbk5x04PUDCEDZnZ0Pysx+cjrkL1AJGBkkZyO6mnvtYliBtcEEL1B9x3xu9R168GmfuwpAkUyDGCDkNzwnT5cc4wDgZH0bsrvql+G/49zRWsmk903ZO1vdTvborWu339QjZVkXfyhGWTcQzegHYA45HOD+JL1KfvOXxkqvy5HTn64Hcc8HGe0HIcEqxx90sOAjZwRjk46ZA/nUqK25mweRg5wApzw2eg69O4J6VMdLWdk9dVs7Xave/TRtWtZ3CSW+14rqktGnZX8t3ZaaNEm0uVwchgVTnGNvQNwenX37HtSBFDMFUKq4LsQTj1OOxPYdznmhiUIZGHOQ2OVQ99uR/FjpzgHrjilikDMUBJZl3Buo5HBOQOmOTz+XNJ20vb3t+n8rs9evf5pd4aai2tYpba2VrXvbtstNbXtYnjlG04J+UnC9SwzwuM5IPQAnjB7mpN7kgk425yGUEAMACcg8Eeozg++MQogABUASDgnJxkHGc9gwPpwD+UkLJ5pD4O4H5S2VDKDjt0I65xz14FTG19NErddldeVtPP8AQwkk+ZpXtd2dm9knZdU119NklfRgY4kkLEhRkAn5WOOCoxwBwB1HIqRwuwIykvkNuU5G7+FVbjIH0xnoPRsBi8tw2DngNj77A8HaM4I6Ak49u1K/mSlY942bgAA2Dj/ZH/oRolG3Zqyd9ktvV6XWtnvfXc5PtybVrNa2tsl6X630Wu9rWMzZIxwBuJzg4wxCnnn+8MnA/Wq0ygFkO5SoG3IJGBnk5yRjoOo4q6wIYlgRgkccc/pgEDt6fSq8igKNoLE5B5JKgnsDnrjqTkZ6Y5LTd0778qstFe0d7b31t6JW0Z2QlqtlorWta+luqVklora3086TbQ24tjb1BOAexB+7gHggdsdcmmqwy2MbjkqABtyOjZwBxnAJ9acyNKcBMsCShzgbeOScA9upB+uTmowkgLOoWMDaMDgknPAz0zj1wevvWsJdbdt/VbdH+fXQ7Y2cbXs1ZPVdGtNG20+ra6WFaNlBByGbaQOuWORkHtx1GfTkmlVVV8SFTjOfQnHrzkgnnp0+tDyE4xksuBJtBKnOBtzxyDjngD6cVGwDEZXDITyemDjB9/cnOeOBxWsX1SfW7vddN79tvTuHvNK7aVrOyX93X02tfvfctYj+Yh05TIGcgnsWwODz07ZGM4quRkA5AGCcdOfYf7Xb9feIb1eRsfLtwQCec9CPRRnBwQOnXOacxARshmxgEAZ+UkggHoe3PGOeCKd7pp7/ADV7q9t7+u3oNRs1ZvpbRdlv57b+WqVyONyGLMRt5AXPLe3bpnnp7nipoyCsiqONxYEj5RnOPoM89PX05gWNSwwhK5YKxbByR8pbGTnJOc5Pb0qZA67xn5NoLc4OBkDAP6g9jk1hN2l6xXfy+03purPW3lfW58tm09Wo6NJJWa6Lqr79+6JFDKBkjAHIPrntxzxnIPpxVyJhsK5XIwQWABBI7DOTg55+nHQVUJXcEcNkHrjKbcE5GSAcfhz6c4nQZQ7RwDjOcv35yedvTI5z1B71l2e93bX5dG1rq79NdHc5qiTSvo3y6pK3T5XfTq+u45EYN5nVumCCVbJI5OcY/wD1UrREFg4CnaXBByDwTjGPx7dOMZ4I3yV5wq5AQE5YkAcgccjH0x2FSu7YcN8qtjLIM89gT79/XjFaO2qa1/GWl+mului+/ZRd3t3S077fkrW738isgkU+Uvyh+Q3Q8Ht7HI4/PjkbFvMQxjIJcDjjCnjBJB6YPuc8845rI3ciNjtAJIJOWZs8Kvp05J6FfwqdJnJZw+zj5+x6noD3z1PB9jzjjxCT0sk7J827bdt977W872OqLtbm0dlq11Wi2eqf4nX2EzKyJGxB5LLjIGCCc9Dt9j9eld1YagVdjJJuAGQvABJ46duhPTj1FeSW9zIspZmI+XJIPy4PQAc9T16cgc+nSWWpLnYx2sB8voV9TjPHXnOT6DBz4eNwzmtEmmtNk0m1e+jbfdfcraG3Ny3Vm07J21s9NXvZ2bfrq0erW+ptlQjhTk5J+7n/ANl7Duc5rctNURlyxG8HOR8xJGMYPovXnrn6V5lbXab1lwQNpBRjlWIP3l7gj0x14ye3SWt2vk4RlLcEnGec9Rnp7Y/EcjPjPCJOLUZK7Sd1pfRvV3b87aNvYhpa+/26NJW6JPe3k+zO0k1bYNzPlR0Ckrn02nPbODkA+46Vz15ri7GAcM/OcAMdw/EH8unHNYF5cSqjFSS3oT8pBB3H3z0zjmuWkuJA56qCeW3cc9iST369a9DDYSHLdt7p9Ndtlrp573uZOmpWTb0stHqlp5vTXdX21OhuLoSADJ2soIzwckZyc87fTufp1yZpFw4xuZMqPlHI9ckjnjn6nmqKTF9ylgG6qCScnruAHbvj/wCvQ25mIzlSQAWG0Fj9OT0POR2+ldbXKkn0tv2VrO+1/wBUHJZJPa60fyezT/BaP5E1nKFkjI5bLErjA3EgAkH0xgHP49QPXfDczSeRvkDBWDEkbSW43A4Jz2zjn+Y8mgs3ZwQhJ34Vhwo7gfzxxjIIIrtdGmks5EZwwClgQrEckAA9MAcE5zweMkGvMxsY1I2hKPNazvbVXVlda+XfzaFTTjP3VJptbO29rtW0a25tL6n0ppMcjYdWOxk3AAYXbxndz0PYfXPTjowFVERfLYqxyM/N8wB+Tg9OcjPJxyK848N61mNQxUGONQx3Z+90QDaARx1yOnTsO/hmSRVO1ApUtlhltpIBYHP9Bwc57D4nF03GcuaPvX001utm/sv0/wCGPWpVLJKV7dbpLWyutUr2fbytceES2Uu+FYqwGVzgEjA3AkM5/hGBjnrnJbqLRSRoAY2ypcsihsyEfMWPGWxjPIAxVS8vUX5YzkqMElcIf9kZJ2+x+Y8nPU1j/aZp90YChTvZFU8kLjBDdyxPBwSQOnWuKNOV1NvV6PXW2my3Vu7/ADHzy5eVWdlZST1V7c13daddfNmLe2UTrIVX5lRsjHyu4IK89dyg4xj8q5aLSyZ1ZY1IBILMMAHOQmMHJz/D39hXa3LuqYEe4Lw4DYx6qGPQ9c8EVUhhf5WwGTcTuU4O44wNuPvDH3iRkdutdNKrUipRjK7e2r6Wfl63899jlnGztyqS5k9E3e9r66+eyVrrza4rV9LaJJGwsmQJHIIACnOQEGTnI56Y4wBxXmlz+4lByAxJO0nLAA8r7/XqM9TivbtcLCGQLtDj7ypyfw6ZIyRjjHU8ZA8Y1SH94r+Vw5YoGGWAA+YkjqF7DA3ZxyRmvey6UpRcZtN23V7/AAx3Xrr8jycQlzKyfZ20Wy762WvW++nQ6zw9qcQZFPmEyKoKE/KznhWAwOAegzxn1PHq1pqTMirHIihciQcEqzYLEDIyxAABH3R6Zr55s7pre4GWC7f3cRcbW8tscYJOM45z049a6ez1xY3O1mDBdpDMdhfnLKM4xjtnv17178ISaVna0Ur9XsrNO/fXR3v0u0ePVivaXSVlay6p2V5JrRJaa26HqWr6kzRyJhQYhvXIxu6Bii5O7PBBJ49OuPHPEl7FLBIvmF5Cm2RQ2dzE4QgZGNuDgA8ZPPer15rTtGxV3d8cb0JVDzgJ830xxzXB39xkk4KZwCxwFAOc85OT0wMeozzx6mEo+8m1s04vq3bVN7/ck09vO6Vorvqul272TaaWi7aa39DiNUV2uPk2uMqG3kAlsHKyDgMO3O0D3OaxTGQjAlUAcFyp5IGf9WpHB7dRkdRnpu3rQu0z5bYx3IWz8xBILA9SORhecnJ4AzWMTs3Iuxg6snzDLscd+eo/hbGPQcYH0FF2go2fubX87Neb7enRWZ2qokkraWWu97We+qb11v8AJFcSqCCzLsVvuAhWUemMdsEnk9+mTUm5JNzKjEOeWK5Lvycqcjdnt0GM96jETBBMsYcKMEbeCDyWJ3DGO3HJPfmlcMQQHJDquzGFEbAH5WPRUJ4z2x9K2UEkn9lJSSaundJ6X/DvfrYdo2TTWlru9lZNNppfe9lvba5CitIY/lBBLbTnHTGA3uvr0559px5imdnaQgYRcDGRxwq5BUHGMYyMAnnioWBR1+UKxyCFPyhTxx2O72Axj6YlDxswjJwwIyydGU9Ack7tuD6deOtaczu+bRNJq/V9bvvv93mU9NtU0u+ivp33drvbW60JYvLKyRhe2VbOJVbng9ckg8559qaZEyh+UGJirKe47jOeensQcj0y7HyyyFWBUEo2ccL1d8LkZyB/F06CqoxuC7BhzuEmeC5xuyem09ATn061E1u18737200t+Lt20sS4qXX5trokt35f569NFZlMhUkojZKeWAvzfwtj+Iddx6fzro7DVPJt3iFzCHiG9pGAcu38TKeMMAAPTt04rkGIV3YjYoOUUdm6ZUZAXpk9eB+FMilMYOSYlcFdyks7DnJz6dcnAx0rjnRU4d3vZJf3bvV99O/3k8l00rpe7tZbdNHd6a2Wl+up28msSNGVknGxgTHjIPIGFABOC+ODnBx7VlPeSu6+azsqLgnJLYwNoYYy3fJyMjrycVktNFJkOzDcFClXwpcDhs9Ao6keuO2RQkkjFhng8qz5PzJ/FyQDz1OemD6iudU4xatHVLrF7+7p6972u7AoRXRPps07K3X/ACtfysahnJBVw2NxxjAyvBDLt5OfwA549Wi5yxCqFPbGCcjoWJOD9cenaqQDDJLIJD8uGPDZHzbR/DnA6Z9QeoEZZAwCg5Pykk/KCPvYHBx6knBOAMYq1G6ej25la3l5K+ru9etiuRO+nbW/W9+ul9N9W9fU04pSoWUsWdWy6beHP+weflznKg+hJGcVaS4ZfM8sgux3YUZVPUDptYd+v4YNZCspDH+IHhmPBPfA556Ec+vXJoMvzZDMGwMckAEcHAweg4HHoB6VUVazWl9LPrtqvxd3ovTUzlT10V0raO19tfK1+/XpobYuiEyXEYI+6FLMrA8kLkcHPK5/HIFNN2SoUyKzp82cnaWPTH90Dng5/OsYyb1LEkvyWwMKT3B9T6nv3XvUayMXQSAADnIblzwVHTGOuenrkdny/E09dLq9+1t072evR2vbawKklrbu21v062d/PXsaJldyN5B3EldpwcdQD9O59zjP3ajWYqV+bkEqc/dGOoxxn3GPbtVUnJJBc45UqM7j39s9OfxIGSaYEzljwAx5J28jqvcEjOPr3HFQm4u177J9ujtrey1/U1cE4u2i6Lfdx11ta1+itq/K+n9oBUtzsdQScHk9Bj2yOOnC568CHzGYrk8YIVfXPTjvjv1H8X0qIXdgisVX+HJAXg++cY755zyetTAAMWJIKsAVxwSOCFPOPyzxTTXbpr1urra+1ui7a621Emt3d3Vnt0Wiey6X8yRNwZnPA6MAMEnsEGee/bp6VJGSv7sgg9Qw4VQOhAPTjnPQ/hTTgKMkFm5UI3yqcnhiRweOR2HcGmEsQQ+4KcY5BOBycYxnrznr7UneTai97Jt9lbW3V/5mqd1qtbry6xs/Pp027aMuxzIh3Fz3Q8AgjjOOcn0ORz+eNBZ0EXIQtwR8vPt3/Tt61z+EK4KkjJxjAwAOMHPHqc5549qlSVguD8o7AsAQF68DtyP505w0bjvZKz7d+2mrfe50KV29Nut+ml/8/l8zajkUvuc5JUkDH3TgED+fPp0zipXYuu4ZO0AnAxgkkDjnjBHXoDyBWSsoIJB+YkEZ6Hjk/mPp+HNWRM2WLEYYH5R908DPtgc9fyrmmmrOPZO1npsrX01/rrcrfZ+7vKz321v023+WpXljcbj8x3Y4HfJ5xkY45xnH6CoPLJX5lYg9PU9+DngZ4HIGfwxYeX5goJJwec+xzgf3Tn0xjPfFORUkMYBJDfKy4wAwzn3xzhuevUinzJpLVKy20d1Z+TWz6/IzUknflt876O3frbZ6blPyuRgYB6fMd54BC9CcduueDgnsnlEudisox3A44GVz6N1yfTpzzuR2iM2QNvIwD1bAOCGA+UHPPXt6c3l00ONx9cADs3OTnGNvI6j8M8VUa/Kkt1dRd+j0vb9fwITV0te97+7r30fbrp0OWMbBjvRgPrx2xtGDx+AweM5pvlbmLKhxzychue3scHrn8M11r6btyFQNIAG9QT1ORzkeuD9R2qCXTWCu5UuSucL93dycEHJHqT/jWyxMet9dmnq78q3atbbz7GUrJJJS26p6vS7X8q2Wu97enLqCCC/Ck5IBwSRkYPXk+p9KGGCzIuOCDn1xjp+W7v8AicVujT1C7mUFhgDHTHPyk8Fjzx255HWh9OUozhWVRwCoPHY5JAz+meuO9aqrGztqn1dm9Lb77dPR7mUrLa+rSsu19Oqt17f5c8gMbZYEZ3Y292bG3d146546Y6irCsAm07SwywbORu4xtOQSO2AQDnjGatyWSclslh02n+E5+/kYJHbvkniq8kR+4qsMYAB42leeR2HofbAFHNFpNSTfp366Pu7bImG7T0dl8krer1W+nprsxd5GHOWUnaMEMcDKgDpxzn1x9RSom1MgAy7sb84ABHIzz8wz1PbPGDTth2nHzMOC4OceoHHHv74B60wB8clVXgNgYYnsAeM59snI9c1i27N3b1St0b7200W+3rfU0S0snZN3fp20aslvrpve4u19wX5g7AHB5z6cjPbjB/PipHB+6wIC5Bx8xDA8/wAwM/majDsCFAKbT94n17An0/IZ64p5yx6kb+p/vDvgY5PqR+vZNp2TWtrrdbNL56ei1fXdrSWulrO3daa6dVfb1bDdjAUkoOgPLEjqCOuO5FSq7bQQcgjay4zgHsBgYPYnvyPo3YNvXGDkZ5JGeScZPPOew59aVVwww3JwwHRe+QTjnP3uevSle2vz/rt8rGytZNbPW/e+t+/36j92BwO+MEdF44A9cZPt6+snBHGc7cZIIIYHjPoAeB+RGKTBzgYBIBOOcdOeB/QjpwcClAP4cAjacjnnJx+I56YxzTvfayVvPXb/ACuvz1u2EaFNrMTu5BI5DA9/fJ9Rzx+EysDkHsODg5JGO/t0xgAdfogXONrnaFycHAzzwQRyW79+nHNKqbcsOv3Qe4PXBGOc4P44HSi/Xr3/AF9fP576gCq4GNxJJ+YnHPIOMjseOg59DUqkbSowScjGepJ9eOOP+AilAO0kEE4+YHkg8dBjr078cfSmbTwcD5jng889D7e3Xoc9MBETjzWa+KLTW9t/W131009NpUX5QfukEgryQcdgM9f54xxmpRtI+6A2MrgdSD3z0Gcgg55GD1OWAgKAWAOARtweeOTn+I4PP/6y/wCUchgcrwfU5OevTn357jrgB3vd6Lva38t0nfre17uyW9hoOOSMPknJGeO3HGc89xj6ZzYDA84AbB6dyeh9PUZPt+EQUY3NyTyBnIPPQc9MZ7/1pwXClsHpx64z0/wHP1HIq4y6d9Oy9dLO/wCemt0jBqSnzbqVk0ul2nfq7df+CtXKCApLclvmPXr6E8cc547kZFTqVGV4PGdy/MT36+vB4x7+pqEYx8+QM8dcDPTHbP4/n1pycnC4LqCBuHBIGeRjAz0H15rUok80MTjkEcjpk44OPT0OfbPoU3aAxABBGSB/ASe/XPJyBx/jRXRF6LVv0a8vLyvrrtfUdr9vvXr3PKiwZtgA2q2V5xkc4ORyRn/6xzTwVAAYqGJwADgYGCCvPB9sD6nNOZFKsMYYqAAMjawzkng5B7jj9MU0o2w8LkMCXP39pIJA6/L6cdO3Wpmrp7NrW/ldX7PrbvtuXeLtrbZdndta9n56bokygUMWB3Y2hRnJ/wBruPcYPXtimHaHQA9icHoSew9Mcf06jDVwSFBXYBgkNgkk8IPvZzwTz/8AWDuAyE+X0ZucfxYGMDI+XPXIz6Yw62d72T100utOj26el9Li5bO13re21rPTy16ta36NBIqkjccgDkAbvmI4G0Y5Hr39aam0BEZFVW3ZYHnPBAbpgHqOTkZGe1LjdtPUSAEr1IAPAOen94EdenenvEsbMoUgjBViQ2F/i6gBSxx06dc1pTs0/KTt5Wtu+t7t69X1sx3SSi2+/Zpqye3aTdt7pX6EyeXsYM3IUbTk/vGGcDGOvoT3Hvw1jID90ttUFgyglYzgnnOQQMHcOnp6wnzfugADjaScZGflf1wcZB9uPWrKMDhBklFwzEg7m7EDI4JznJ44rpi7dnazVururXvtql013d+spcut79d+jS/O+1lZ+WhB/rFaNAwXJCgHDAj+LbxxnknjJHpUkYztUEeajYDMQqHOeGPPzdh16EccU1csS4wzn5WwMBcEHnHfoAeg9O4aqSI2Su04BJ75bpg8Z5z0HP5VL+1tZdujbXz2X+W4/wCZX21s3u2km3s9dr7PToWSxHmMdzEKDyclB3bjoFIGQfUcnJIXy1kCuXYmTJAztYkYwwxncoGeOMg49KjEgKFSHVgckIc5yTnIwT07D8uaexcqwXllK5jC43KMZ55z23e46ZrknG6krW6P1W613e+mv3ak2a30d7XvotvLTV6O7PRPBdxBHeR4kIULujO3ahZCBlz3lXdmMYYEknHGK/ST4Vagklhbp9sjKvDHa3hkcSAy/wAXnvgGZmGMkKoTA67jX5j+F5Y0njlcKPLkDxNnKsCOCVwNrgA54PrjvX2/8FdUiMdsphEisxeF4nZVzEV2YYj/AFvzEyZGWAXjAr57Hwje6V3ZW2tdpJtPbe19VZrS504edpJa2SV3dp2Vu27Tf4et/sl7Pz2kjk5t3DSQXSPtiaJMeS0pwSXBLbAQN2TyK8y+IGkPcaaCiQC6RRFchVyxaMY3I2QHkwwyuB16knFe7WNqktk1yDFcn7PG0qoN8bpz5lxPCCPkIwImDdQxx68V4t0u3W0vkaF0tpllvrSRMq8YmAAkeTkxtlcRRlTuBYZAFeTGceaDTvZrz00b7aW179nc9enBu20YtJ66aaPfW77fPtp+WPj3Q2sLu7uCskInDSrGykq5kJAyvRcYOT/Dn3BrzazuAr+W5IA3FQBjGcYXcOqjnHAOPqK+oPizpNwq3LhRJJE3lTqj7pgVyWhBCgMuCCGGD19TXydO7287cFCwHytlmD5OARxtB6l+/Bx2r1lFVIaNbW30Witrd6W69Fdb6GbjySTtdSs9b2W1/TW2vTtsesaHdRAIQ4ICoGZPm3PnhmGevXsMdfSvSoNURITHI0SAKCMHDBTnAC87mODk5HTntXzxpepPA4MuAoUMAGIzgHq2eeueg6jHIrto9XQ24BlYOylufmZWx2wQcDpjHA7+nFVw0k4px5kuWztqttVb9dH03aObEzkrtWaaSXLd7JaN72vdJr1fc7PVJ7a4X5GLNNGFIGDvYcDjA2nBAAyAOfpWbaWMIDZUszY+XPJJONzfTk4+ncnHNJfCVlWNs5x87HjJJIOM8AdOP5810NjcDchbhmYoHKnBPGCvJwMjsO/rTVOUFa7WnNu76Jdvu++/Y+fxNSpZt3vdLTs7LmVtXa2qT+86BURAvlqfM+XaueFGMZUc9eckdPqcDTgDsuTIf3mSYwCAr/xY/wD1dxxUFsBKQpBLEZzj2GNp4x7Z6d+1XsKjO0YaORcEBwR8vAyO24ke3B24701Utv8A8O9LLaz16eXda+VOqk1dpW82r3tpom3sn3vs+hkX1uv7x0jbcPk+6ShXnzDn/noccccHOMmvPdS00uztswrcrvPJ3E4HtjA47569a9ZeJpPlcs5kAl2JjCnHO4c46dOozWJf2Nt+8Lx/LwuN+HLezY5GDyMY+meOijXcdbNLbV+n9JNaeplGvyz0vrbppfSzSdnb1d+zPHJ9FVocmJ/NJGeAWYkjjHQKAD69cgdAeevNLWGT54AhAJXecFh23evTOetew3VoynagVUKHrgOi8cuo+8rcYPBXHfmuO1a2dpJQBuKAFWIxGNwO4L69AAc+v49tHET5laXuuSTs73TdlbX56bL8euliZttc1421ab929lbfZPproeY3UATzGVeCufkOVRec7h0LcjBz8vXqQaxpFQHcNoJXAPPygk5B9GIGe46ehFdJqCSQxkspXacEKcspPQc9B1yee/FcvPuJOAAob5QeG+YDGfY4OD1znNerTlzRulZN213tpv16LbbZWPZw75kve001v6Kz+f4P74CjZfYpOVymSAFwTub3GCCMck89iTEqMjbsBOARjGeeoPpyBxzx04qYZwqSEDYcsBnknHA78ckjgcnkZOW53OVIxgAjjG4Afwc/N0647fhWzfy+G9raWtpto7u2/l2O9OSTVk1b3t/hvFbt22s3pZJ6+VyGUA5BzJ1YHkEdgBnHHQ4xnv3rrtCikcIAgaZjkMTkFTjgZ6FR3POc546chHHleQvz9MNk4z3PbHBP59OndeH4pHClSUAGfMXIZAP1HsAM/hmuPFO1KbTSdra3Vr21v1unrbVWXc5Kiuvdeqknq3sraK2yd3dbdOx3+nWRDEh8ZQFgRwWwcspyeuOnOPpXXx2QWN5VMYUgBl43Y/iO7+Qx3PPpz9jMYYUCL8wwWYjIXHG5DkHBHUd66OKdzGzLiRZF5UA53EAnYDkDpyc4B59a+UrTqcys3ppqlZpcvlo7XV31RzNxbu4tPZNapPTo1y7PU57U5B5LqNiBCQAi5OB1GeoPQAc5GQO1efXkkhcr8qopkAJJZlJ5UsOvr3GcHpgCvS7qISMRs3eYD8oGQMnkn1KnjOe4wBXG6pZRBpCEK/IrNnPzEZ+YnjaB/d78c+voYKqorkau7OSd72bstV23+fldEwVnquqSaS1vZPRu19tfwucjNMwjzvJcKAJCB83J5A/2R0/E84rnrlkLCMA7j82W+VMf3j9T1zx29Mal+8itsjwASVAPAAH5/KfX8SOBjnpZGLMMEkcEscgE9BHzyrd24+nr7tGNnd295Nrby101utVb1smdEKbV2no7qzet9LN9LX6LW2y1FJADM2Mg/MxPY56AYwc4xzxyDjOA1pZCCqyEDAYYADBRyeM5wcAA+3NVnlbAVBggkMoIJJzjgE/jkdhwBThKoiCN94EZ2/MxIPGM9Mf/AFuvFdPNFNO3dK+yS2vZdF5/Z9DVU3u0n7ySWl0nazs7pa9dVZ62CYs+3A3YJAz64HQ9lHc9CSc5xxGgk2OykFEKlyOCc5yMdSPXPoDxTWLl8nhV++GO07ccgj1PTA56d+KfG8aFwV3qc5HrnOBjjgZyTnPQ85FCVr8vRX087XXpq+ia+Rok1BRSTWjsrPqut9Gltp12Wyh53MBzF/Cx6DPt2PuaY3mkOArKFxt5+VRnBbvnPfjAzz3zZVmYkYBLAjB6KVAw2OMEc4PbpzUalVcLKAQFJAXq2OhGM9DyQT26ZNVH3nJJJ2jG66X0W7bV7N9TSLV9Ip2UdN3o02+mrtZ99lpciG/c2QNqqPmxg/NzkZJ4YDJ4HCjPfMCFVkUuF2jJ4JHI6AnpjPXI+mKuFFkU7nbMmQFXorDkMQOo5yB9OgqExoOMAnjLjo2eg+uOPpmtN4ya091Xjuul0tmumn/BNIyVpaWbXLppZJK+trXu36Nbai/6snawCsSSWwcg4yBuxjPryQc05CWUZ4ySQ3IDKO7deR+mTRKCTlAMFQwUHIGA2Vwevzc/QepGFjyy7AV2ty2Tg9c7QPX8eO9RfSLVkpW266JPXpbRW3W/ch2cE3a767NWtfTrpf1a0tYbjawbkgZDKSQpPGMHncOeSOpI6mlikJTkBQTndj5hjP3fQds9e+KbIG+UjLFASFYcYH3iR6D2I+lSxhiuCoO4k/7S9xjrtB4PI5Ax9Z5b9mnovkk9NN1bV+e19AdlHV3btqnrZavS3no/k+7SIkP8+MA8YOVAPqSMe5x3OPWrKx7W3A4APy5OcZ9BkfTP/wCump5LqVDbn4LLyOmcHPqM8cDjtjpJIzDbtGSTyPYdR+XfPP6lb2tu7J/gl699rowk25WS5b6NPRbLVJ30dnq1bX7tG1ErSbSf9ZHwCoC4Gckn+ED0x8w7gjI0fsucTKgCBVwHAO7qS5BxsUn7o+YkZ5JFY9rM6rgnaCwLkgEFs4XGCCMYPPTnBHeurt5iIpAT8wK4jjQu23PDDoNqZ+b6j6VM5e7e75rNb9NFe/S1/nq2cVROMnrFLRO1ls0rrfa6un69dMuSzhJ2gM7CMAFMHD4ztcfxeX059evpmS2jhiFjdmYE4ddg3dNx5O1egx2JxXcLZykq6KVicEMzABgG5wGPIJP14GMgUg00uJComZ1f+Iko+4joMYA44Ge5JrmWI5W9b7PVrutbPayenr6F0alt9VfS717LTRabpO/4nn32WQHe67VVQo3A4UnOMgckAfj7crl7WgKMZfvMFdWjb5cKMgkY79h6Z+ld9LpT4fK5lYqSq8qrAdWXjlM/ez0PGazrjTJVV1mjwdoCkHA2H7p6HOeSTx6EVaxEW9GltpfVpb9Vey009b9+uMlLVO3zSevZXu/6RwAiIwXCGMMSWzhyB90HI689+Op60nlqysWjfDEgEHGDk5I67hggkcZHpXWz6YFjLmCR2cgoQRgLyC+cfdIxj1OfpWLd2nl5AdUXI2/NuCkrynQDce47Dn2PTGtB3tJ2aV7q2qSu1a/X7rPXc6FOOm6dlqvK120ndPr2/Mx2KrtQ4yGJJ68DqCRxxzkY4OeTTgYypIw2QQA3VgM5J45x2HbPSo3j2SDzFwjZAyeWx3UjoBkfU8GpFQbCMbWHRgfz56jPG3gZxzk1o5JWb1vqvlayf6bfkOSiktb3s1t39ejtp/wxHIBu3AkEjDBV38gd+Ryeg7+lNjfAAfBXsDyW55yemOvXnt2pnz79xzjGQd2MgdOx4P8AX8adGpZ8EKRlj83T8Dzke4A6/jUTle+quo9Hvfltond6atdL+hbtyu9notU0nbqtbO9tvJdHck3N5mwEFDgBWxghufl6Ejt+Yx6WYyPmRgC+dq7eBgehBxjnJPPbmomRcsyAjtn7wGDk84G09Og5wTnjl8SurAnaASG3YySOmM9s/TJyaxut9NNG0+6W9/0fRLXUym4tW2sl5SbVnd7+jT37rceqBTuCsWUjeQcYB6A+hJGevJH0IQBt5DdGYZAPI29V7YI6ZOetXnERRuWVmUA7PmDN935gcc564OOh6ZFVd21RjG4tj5hkhRzu9vr165Arp5lJN+Vvkkulvv3T0MFNyTdnfRWd/J76re706aWWwxowzsqAkksAcntjODnpx17nnigELIzsMfKAR2IHG7IPyr1+Xt2PNOy3yurfM4YMB0UDA684Jxnp1H4U5AzSeWFBOAR3DBsg891Hb8eK55ptaXskr7LRcvV/p63Lg2ru+iSTu7dUt3a+ytprb0I2kJICjIPLYOMkZ5/lwOuPoK1rB5JMKgIIHJ7nHUEjqfqPQdBU9jokk7q75wSUKjOCT3AyeR6474ruLDQreCJgcDLAb24YjHzH2z6fUdK83EV6UEoJpyvfa+um6V0tL7fhZ264tN+65La1tE3pppvp6re3cz7aOZV8w/vGIGFAxgjqMEknqPY9O9bcTSKuRgEgDA3Agnt147HHIPPbFSG1EYUR7chvlBbGRjgYxjGRyckfSp1D7SMAMepyOGXoRx78n6cenCnfd3vqtLadvRXWll/lVnfdeSsns1f+vMgmkJiCfNuUfMQTyfTnpjOAP0xWHcRSEgk9snng/Xg8nr25H1Fbwjk3ENlwc7iQVBZs9Pbpz64NRNalt2FYg9c9FB6jjP8AT2462pRirJcq67eSfltpt2BdErXbV7vbZPZf5HOxozNjBUA4U4w3fv8Anjn/ABrXt4Msu75iBgFuMDt83Y9cHHbPFWUswMfu+hwcgngZIwMDp6jGOK07eAKdx+9jOF559SSMD3HPbjArhxFe0XrdXsr9Wrbdev3bFxpxlKLlK9ldxS0le1l+LW+9tmalnbbwqrEGbA4GCfcYGc/y6EY77Jso0UsF5HBIPU9Rj0OM45OD064qjYI6MrZXYQATnJGT8wzgnsO3XAPNb8Wwlg2AAcqScKSe465xj36g+teZN3k1e20tHa999e2i2t+J2KEHFJRkrb6a30S272t+HQgtLua2J2PIi7124OCGGeSOcj0z19OCa76x8RvHFvaXK7VV+MsoPXABBC5HJAwPTpXHeRE68MMjBO0hcYyct14OeCM9+BSvC0YLxuUOVKqTuDFencfLzwe+a5q9KFVWlHrdNpXWitr17qz76WIUJeltemuqsuvrovv6egSaxHMEAZmikViPmwxOBxk9B+B4z1qva6nC58pnHmDBIJ3bT2yQRkY6Y47ckVxEk8xVwxTeVDfISCAAc4H97GOfQdKz4bm4WYMoIU52yAdcEfeIPGCeenX0Iz5UsEmpWbT0tG9tfd+dm/LX0Kbb1vZ2Vle9/kvTfXT5o9TF1EZFG/Kupzk7UwOcKMHccn1O3POQBi9DLGsQKmONWIbL8ndyAxORkjsMD8687hvnRVaRsnoATuBBxkKOCFHUd8ZPatC51SOJVUSIxZCUXbyA2CNnIw4weew+prm+ptzS1vdJJNtaW0tp2+/Ywqzsld2WiutLarZa77FzWZEAdpG4KnODsDg55384+mBweOwryK/kRpNrNtXnJLElSCdqngAZxzjOR7810mr6usqhTO24JyijIGD9w8j5jwD0Ofc1xk00ch3dAFJCMeO4Bc9Oew65zyea+hwFCUFHnTaaUdn7vwtNbd13vqeZidbvRpptK9rbK3/D7aGfNcnzJAdoEeNzYwCvPQdCP9rPBHrSJcn5HUlVJ25P3938JUfwgZ4PO5fpVe8WMBsFWBUBthzkjOdx9B6dvwxVGOYbPkO1A3yAAn5hwCCemCfrjtX0lCmlFOOjtrrq7W6Xeie9nr2W548rLbW90767W011/rfY6GS6mkQruONpBIAO5SByScfNwefXPBrnr1mABdyIwGADAcZweRn73qe2OOmDejLsvlsykBWY4JGScYC+/U9eR6d8fUXYgkqS6nnIyWUggsR0OMjJ7Hrnt6VGOse7aeltLNW316v597EwalK0bNppWvd2ut099ba+r9Mq4kIUIDE24bYyuGbr1bgcAHk8dR161lSBAVCALn7zZzyxzjdjgMR0xxjPNWztJBCLuyd2NvGccKeMKAM8ZGOMHgVUcoJNygqoADbzkMy9SmQQOvYckivUpJK6s7y018uWzf5p/wDDHRGLutZbbtXTbUV6a6v77vcjZwm5pDkLwwUZOTn5VHA6AZH86ZuPU5YqSNvfyeqsRkEEckn6c96tNGgBHmxqXj3/ALsiTdIT92TGMN+OPU1VaNcswD7sKcNxuOTnIxx1GFzzj2rqVrWTvypLvorLv2062ehrHltomlt112WltF11V9vJiKoaNXRXMkp4LEYQA8OAR90A8c+vYDCuqrIwBSLKqS7ndyQclV9GwOAeMcUm6QAsWVlZSGBX5oVBGQuDwQD0GT39ihkyzrkSCMDyiRkMrc4XnlR3PY4461DfKkmrK2t2utn232s7X30e5S5le2sU97va8Uk29NPTztrqqySMWkLAxglcDAY4HKhMkYORjnqM9agEkhHl7QFVzxn7oPY8g89OeOc9KkjRWjZPKAnB3E5IGByeOmRwQTnHJ61F5TiRGKld6iQH+B1PUNz1B69D6DPSLtqUdVZJrW6s9Nnf/g2RcYxblZKyXuvRdI+8rPVO71t+pMW3ktlkRU2sV+8QOCAPUkj69O9QI2CyNGxIY7QQeFPY9geAT2PGelWCiskjSlfNkA8oR8KCOMuoP3euOBnGaRQ5icAh1HLMTkYHGMnpjt1656cUL3r2urKOjTWll2v1t/SCPKlZau6WzS201erW9/murICrlSqjeQMsqfNsJ6Ejg47n8Ogqyjny1RpOVwWAySvU+3sMdupNRLscBQSHJOGU4IB6Bv7231+vBzinROA2PNO1jskUgfMwHLb89R1xjnB9sKUdWrr1suy+/br+Y3azVtU77O+yu9nvtr6vW97zMXJYeilnUfNtPofcjj86iJcMDjBRyCC33wf4eny9eepOKuKU8plAXA/iQDMvI2tjOQByAefXFOMXIUjftAYEctjsCeOnuM1g1a2+vdW/r+ulm4jJbKzTe71sna3bVXfa2lvKvkbUyNuWLfuxkLkc4HquOvHU8ejsJhSN2XICOxywcn7xHp6DoB7YodAXBYfMcgRjhlAXG49myD+OOtRYJIVQAU4GDjb6k+nTjGe2B2pf1/VzRQWjv2dt0/Ty9bllVPnHzCTlQQpOEc4/Rf8AZ+nPpKsIKsNyjpjGG9yVHGfXHp+VQpvYlccpgA/wnPUhseuMcfiDxVxI5FVt427TtJBPOMYK8dD3HB9qmTsuj8n/AJdSWm2rLRre3u6WabvqtlZW6aXIxCX4TA7KQSMH6dATyc/4UxopEdg4yoGTt6DPXdwAc4z1+lbQhQouAFZghBH3gT3J7DoCevTGetRywNh8qcce+72Bx0PJ9jjqea51OL2fS+ummn+aB80Wtea2l1bS1r6bJejuYe9RyQS4ztGOAM5z+Xp09M5qUEOFbaA2BggbcY/i/wBrH/6veV4ArNlCBztY8fl9M/iT6VCI5AdxKlcYGcg8jAHXBHPpzVLVdLatary6bp3X9ahZN91pt0vbezslulrdeeg4syOMklhyBgbcDqMA9Oc+ox1xTywcLgkByNxGCATnK44HXvzjFRH1O7gZbByAD1H8seopMBjgcJuOO2ApBy3vz1xg/wA9YXt+Xn677bLy6bCas9NVZeifqrb9/wBSXYFy2/POSmchs8dOMfQdT37BrBFDZwW6qBnH0yPXqR06CnEqCxGNnqPQcZB6ZIHAHQAkn0YZQ0irycZw3dlI/kPUjtnvzT2e23Xb5+RpCd7r72tkl87d+/XZCJtC7wzK2cd+COgJ7Z645z69amUswUFyM55OBj3+h4IJ/EdqQFGDAFQTgBQcc924HHQHI4Hoc5MgVN/B3EqMZ7nHTvjHp+PqRg/v2/T8ult7aFpp36fL0Xl63ulvpqx0QbdlstgHnHHqMf0q5EGyCAMHg54J6DHtnB6cdKYiEFeD7jkhR3A9s4Bq9EoAyUy2eCeSOxCgcrjr328c965ZNq1u9vXy+ffpbzE+unfT9NS7alQAWyQMqwz2GBnBHQH8OAa6OAxMirt2lV4OcdPXj36dDnGRXPRNEm0kASY5BIOM9SRkYIx3zjvnmr6TxryZCoIwBwAc44Gfp3x2z61g3qlaSsuZJrW7tdfO/Tay+WalK1rK1+W61ulZ7W6rsashUg4xnPAHXB+vY8Yxng55FQ+SjBeHJOcoD09VYZ6dwCSDn868c8Zcgk7cMd/ReRkjPIJ547/nitCF42TIkUEZ+c8ZH9T0APv0IFVFta62809rbN667WTM2mtVdNWvrbRtdPS+vmUZLeMBikbc9SSNoPfB6Anof6io2jRFPmfdK4KoTgnpjI7nqBj6kmp5dhbAclDksR2Ptz0yBz7ds8wIwfK7wF5wxOdpGME9OvOTg84ra+l9Xok7b62S29V+Zm1JrZtaJu3o97XWu9/nu0UZ4Izv2q+T9xuhAAOQSQeDxuGAO+ayGgQSAOhCkEHn0P3Tg/e7hucYPXIrom5VskEbSM9iDwGK44PX1wck56DMmjQNuG4kEY7qVJBOcjk9x364GcVpGVujtbtrrbV32v8AhpYEpR6NPdtdNkm9r/OVrPVGQYoyrBAcEnuRnPTJIHI57frmqeBgDDEE9xjaRn5f8DyfrnnUlXiQgDCgnC8EKOuc5O459vwPWiuAoB5UHd+XIBPHOc4z1Pv00Tunta/fo7Xev5dOzSN03e6avpu9ZaLTdbPp1trd7VCoLDAO0nBByOPY5GM+rDPYHnAlcfdAHQnkHseO3TjuORipHC5DE7QSA4HXJzkk++AQf506RFByvC4BBX5iRzncD06AHntn0rSL5dG189VfS23Xy01StuJyvZtaJ26vf56v7hgYAAEg8AM3HQ8gA+2emPp60qvkjOWxjaOq47DPGfqMYpqqDgqAUPXLfNnPJxn6f154pxVTwoPGBgcj3JPpjoMjBA6cmsuazs0990k9Gl+V/W+26NElqldp63at5W0+foT72GM7QSuTt7DsMZ5JHbr196dvOR0+7joBzzgkEkZ9T0P1wKgAweGHJwDntwMYIwcdMn69RUgwCc49Rznd0BI6ZA9M8fnRKXLa6ev+a/R39B320319Nr/dd/kPjYlidw4yD6c56gDrjrn+XW1tA5dgpxuAHPU8ZGDz7+mMVWAAJ2k5JztB+XHQ+gzg8D+dTIQfvDnHHfjnjA/X2J6ilz6tcstOy16a2ttqD01bb6Lb7vPbzfRLdignOc9cgHpn0yD25xxyOlSqGCHgYXLFu/XJx79Ow69aPlAwOmM9cEEcEZxyMnkfr1NIDwQejAgDqD1weo9M+3Qc81KqLS8ZJu2672v5tLvazsK+l2uWz++763XTXZX+9MRdxIJBIJwpIG3H5Z9fzp4HzYwRkc9OBwec5/D39KB90cZOMgKcgHtjuMZ4z7Z56KAcBicPkDnnr+Ax6H9OxDlJt6aba+Stvq7rW9tLb2sRUTdmr6NPR2utHffXTXa+l72J12gbMgnOFJHXPIHTqB27fSnckZ4OAM5+6PqOzZyPpxScDAJXld37skt7844/T8qbuDEDJwDjngk9zz6dCecDA5xgaq19b+i6kN2Tlbp6fp62XyJA2QdyhgB0wMDqe5/zyKXzcncCAScHjoBwfU9f/wBRo2AqOeh+7k4IPcdfTnn2HXNGz0GQGGMjPXg9/Ttzzjk9K1T01VraO+nTTp6dr9EhJvqrPt3WmvpqSKffdkAkvg4GMAD0HI98/lRUgCA9Oo4GfvcDqD1GRgcj3ycYK0Umtn+C9P0Cy10+/Xy3PKSCh3Bx8y5ZV6AHOQccjPOV9uvOaQANyMHJ7kgHBxhu2F5xxkEnnmnHkYyAxB6ngt+RHv2HP4U0MQCMDGRknAOBn5h6Drx3/KtWtvXX07fN2RWvz82loreS+Wt9PmI8bblGPmxvVMcNgdc89jTtiFAG4IbcCMkjOc9eODjAJ6H6UpcqFIPK8lifujuF5BwAePfkjjkzkbhglgVLcEsDg4ycnH0A9yegybdvJbPTTbrbp3016heVo9Euqte62Vuia7tuzej6RqWY5BURksM8g8AYAAHBz0Poe1PEh3kSFieOPVcYAB53L1yeOp6UAxhg2WZucocbNwAx3IOByR1xng04bRkthAehPzcgjgHABPoeMevrSWuj6Rej1u7XdtdrtO4Nr+V/CkmlZ3ur26v1S1toxGZUPIGdxBBGVK5HGR168A5+nYOJAGUAUMD85+9k9B2JJ55PH6UwfM2c5RxhS3AJz0z/AIYyOOh4mKYUg8kg7Cr/AMQ6htowoYHoMgiuiNlH3rfF29H2fT79dri2a1d+vVO7j0fZvVN6a9NoIg6MshAAI5AOMkZwSTnBxwRxkVIQVQgLlsht2cgE9NjcZ2j6delMUhYl3kP824EdWBPIJ6jGOO3p05eSu4BSVVgAwIIwGGBtOee+WB5HXsDnyyu30avfrdK+qvfVJ9rq2qG73bt5dUrRatpe/lZqze9ugheMLM6lx80bSYxkHAAPrj04zngjg1IrRlgGJ+f5mAXCgkjCnHXbyePXoaRSwTBYbRyzkZLNztULn7+OFPGPTk4YGlZ1CquA5MYbIYgHPOc+3U8nuOtOeq21tHd6N6P18t97X3E0mui1376p6K3TeyW3Q6SwZmuII41Ty3fcWZSMuMbx2wT8pA749ia+uvhTdsnlNGfKSGL7VCw+RPlI8+e5GSVlzsCrghuT2r41geS3nO4/vC6MihgF5zlumIyT6g9Mn1H0V8N9YktXtWNyTvxazRyHmJHPEURxhxJ8xfps2g8g18zmlGTp86WqW/a3Ldd1162s79bhR+Ja7tK/yW/fV2s+x+nXg3U9sIEksSQyQoFt4pdyoHH7lnuMDMbfOZownHy/N2HTa7Kz2+pRssJG5EQrHviaNQxjlKZGIhk5O4FuDjufIPh3qVvc+VA8E8skarJBdD/j3jUgFJEPG9EwfOyBjKjvx7ndi1ms7oyG3t1+yRkb3wJMbibgLhv3iEDEeRjceSa+SnJ05RbUn7sei1d47Jb6pKzta177M+sw0XKlGyVlGK13VktE7dLJeenZHwx8UNJhaKZ3b53jmIjRNtvPI2DDKwySs2A2FJ47nndXwb4htpYNRmM4cHzHBJYZynZiB8wH93jIPUE1+knxOEN0tyNylxFO0L267UEC7cI7c4kQEYXqNxyTX57+MrURXFwwAkEcyiTJydoY58zJ6nPLd/TtX0mXSlPlUtfcje9007xWj8u9m33OGtCUeaWmqvbqtU3zJdteuy2OMtpgoKFgWI3BlyQWOcFc9lGeB6+9X1uG2hN7LkqcdOSTuK8n5enYD144rCOVB27o1J4PR1yc4Ufwp9c55Hfm5GclXclgFIbjPJIxhhxwe5GeePWva5EruKXNK135ad79tt16nn1LtPyslr0Wi6f1v5HSW07mYDcuVJ4Awxz3z6qO+OTXcadcOnklmDBch1z3BXgHGQR2PTsM9uAsyreWAvXgsOrYI2knjgDPr2zwRXV2shjwsecjkk8lmHVcn+Injt0/GvIxcXez3Tv8lbZaW6v9O/kYtpxvZbNcq3d7au9l3+7z09Us7wYjKsq/Lzx1Y4A5z1OOTgfTNdI13bi3GVRpAOPlBbf1yG43D1xz615VbXcgKZJHOOuMYxxgDjpyenvXQi/ZYCfM3KBk4HO0dlYH5W9ucfiK4Ix0jve97adWrX21Wt769tDwK0JJtXdrre13ZJf8F39V3OnW7iRyyld5VuTztJ+9jnvxgDj+nO30yuzoshUMDt3nI3HnK89SemMYxmuUm1SZGZw5VMlQxzuye2CQCPQ8d8Vm3WrSO0ZZwXG/5SdsYC8cDJ5Oc5PftkCumnCU2opJ30bsltbTVa+Tvs/QmNCTS2ezSd2ktN/P17PXZrea4EZU3DqQgYbCQWJOANw79OVOPSud1K6t5DOJJFC4A4O0qQThs85H+yMcfjnKl1cIrssyhnxu3AO4YcAoMgAjp3yCOfXkb7VQ0gBMf7wFMhSAccZYA4zkkE5OTjkV6FCjOTi1G9lvfTRrVfjZ9babI7aOHk37vMrtNu2jtu/LW3e/rqVdRnSRjH9/cDtIcEMV4UuMc7c8c/xY5yK5J2QuN2X+b52JKqSMgHb14ycEHIGcnkVoXUyFicgYOzd/DnnlTxt3Z5HOMdSOay2PIkYDaxAOeckEDC8DGOnU5J4NexD3YpJdEnez2av5K9tt1r1Pew0HCPXa2u/Nps7Py0Vmnq7bikJuzkuPUgkkgdMnGR7cE/TFP2MUZhFhRtYPgDbjqSeSM5A24P8AWo3LKoKjJ5PyHOf04Y9yM9Bgdi5GYqodtwIJdQT19ifQfryetU2na9lql67W63e3l63Z0KEmrrVLR9XZWu7W107taklv8kqucFQeRnAOcfpntx0HNeheHpAWKhioJAU9VOCcgLznIJHHQ9ua4ZYfMkTYCVBU7epZudx7ce+B+td/4cs2DI+VUeYd6uxGBxlcYxz+XFceMcZUW20r2Wzte67Wt+vYlx5nZ7tJJd1pa666baa3t5npFhC0iqzAFMbSCOduMcdPXr15PUZrp0RUtvL2ojIASVPzggHAVu4Oeo6c9+DQ02NPJYS435yhQ/ICMYOcHcB+o7npW0Y4nXy/mLFQMqcevOP7p9/qSc5r5Go5Xatazu7ei1T0Tv2OWdJ3aSUWrfE1otG76LXVde22hy8kU/nFuAMFQw6jcPukevHJzwMcEVh3trPmT5SY2XnsBx29+ecduSOlegHTo2I3q7nC4LE4YnO3OB69D2wTj0yr7TyBIY0YlFJZWOUUfxEjHOMYB7V1YerytJtXlZXt5prV92uv6smNJqSjZcy1Ss7u9t9k9vzXc8W1S2CvgwkMucljwx9FGOjDrj8OK5C7jUNIUTbuJJbbgbxwR1ww9OBnnA9fWNatChPIMgGSDyNvIIB6HPHfvjGK87vIGQFgAsZZt6k5ZMZweV+7nr6/hivoqD54p8zTvZPdte736O91/wAHXpg3y7a2e8t9UrJO+l9rXWnQ5cxNjLbAcly4blgT8qqMduf89WKHLgHAyMn+8AOxGBnORzx27jFX5Nq8kFcEkkZwQ2cLjHyknpwe/UE1A4TacKSx4QA55z/Ee6jngkAD8j1JNp3XWzeyeqd31bdnZ+paqS1TWuydr8r++2nl80rWGAIS7SMecAZHyMe2Dnn6+ueOae4UKxAxleCOvA/vYxyDx3PHbkoykoVwFU4AxywPGBgZ3Z9iOM9cE1C0kmMZzj5SqgKcjPAyDgcc9eh961vp6vrfZWt5Pr0+WhNm2mnaz25tNLW20T9XpbfXWICRTv4UHjJ75ByGP/1sDvkU85TkEk8ksvzDB64GOvHbv9KVGLRHHzIWz1yQRxwfUcex6e1BxkBmIxhSeMBeRtHqT1zwP0rZeWl7W116at9dfPtqa3u/eS0dn1vpHR9dNd73fdMiVcDzARhmICDh2wcZIJODj+HPPqAOUfJAVcjBOeMRhuvHXHPv/gJgiiMybSfmATkdTzkH3PpyCOvamrs81SxLLuO9CTtDAdOB0AB5PH4DNWtIt7W3s9W3ZJ90tNbWWnyLUldu17X6W1STXV6pO13ZXdtt048xUc5ZdpGBjPuQM4Uf4YzS7Rhs7kVsfdw2D/ezxge3Of5uPytIVICk7t2AQAQMjOOvQZ6YOAc8Uq7iwQOACPl6YIOee3HqTz1PNQrWS6au++9u+/3kXdk1tZfjZu2lr36a2s+isMLIQikj5QSRkkkk+mBgDrjnseuDTsszPIqkkqeQOAwxknBwCQev5CkwwJKIARlS3ULj75z0AJx2PbHqGqG35ONrDqGIJf3HYAnBPfjnAxUt26d7u9n0V/S1m7LTrsFlZ2ata7Td9bptfPSy3t0XRI1dc8BSSMuPu4OTy36np04FWwAc7iSR0YnAzg8qOgGPbnrgdTXO2NWXkuSCVHII4J7j7pPPufxqRiGjXcFAIxuVSGBHTvj1yT161m7brurd1Za383o9CZ3k078qfW2vR3Wrb0WydmtS1E4LopUhXZiWHbG326nPTOeOveu60x4RGkZZfmCruI5lbJ2EjGV6Ed++DXAx5LRx7flwQpI+9zk45yCTgds9q6jTpvLkDzZR0QrG3QgNj/VgZywwee3t256l3F2fyS0ez6Xeru02vNbXfFXjs0rabrV7q3dp2umt7rpZncqEd2kLRCRePLc/KSeCcdCxGMY6Y/2hWyEhWGNVCrM2wuyEkLIucYY9xnjgcfrzNrc2zhmIdpGIZGBIUMPv5zwCOMDoT9BV6K8TehZ90II3DftZcZynTIJ4zjjPPTGfLlzJvd8qVuqaaW2m17X106HNzWel+0na12t2r9/mtddbXtNbukwaTBVkO7AGSx5AY5wcnr0Htk1VuLUMHKqQQCZCV3qxPBHUdBjGOmfXpP8AbIcYR22KSPmwS+euDwWA5xxxzTIr1Xf53QJz/tFcf3h1yecfkCO1NSVn6X06aK6++1rvTdd91O1otXWiW/lZX2/PbVmLJaAKyB/3SIPk6EE5yo7AnHTt6DNclqUKl2Rk3Bc7B0wpHyhm7k85468V6XcC2aORhIFRwGPfPYkHAxkjpyFI9TXEaqI5MKE3ghtjKdvHHzH+9jPYjnPPprRbc7J6p32SS29L92johJxldNavfZL4bp7a+T32OBmhKg7VwULLuK7j65B9+zH3wPSIrIu7J2q4B+XILdBjGfTPzdckCtWZVG1GIKqGL4OMkEEdskAnjt+tZkm1pHXnPByRgN05PQEfXHsADmvTUXyLzjfTS212l06d+51Rm5JXWi1vZu+sdddnpfd2WpArKCqEsQDnB6HueBnjOe/rwM08spZtrDA53EEgscZAOST1ySAMj34ECKBlx94N8xzwQBx6YHXjoc+9ToqlSxJxyMjIxjgkeo59jWclqtU/hblpfRR2323tbS19jeSSvq3Za3vZNteVtdlr362HqxBDs+RjGwcemMYBHGfTv+NWYWAbahGTk4xk56knOOn6dfpWVRuRFXJzgf3ueuV7gY9eOasp13bQMnazKeFxwduRwGB5Bz2OeMVK0ej3s9e+nu6LRd/TXqY1ErPu9lou9rp9Fv1d9F2AqylgcBzztxlT6kHsT1xjgDOe1SBUKITyQQCQ2Tkj5scdAf4ceoyetMD8sHU5VgVIP3hnHH+yvfJHPXIqVQTGUAw4yBt6jqePUe/Hc+mOpTdkuX53W2ltEnb8H0tqYu6tfRpxb2S1te2t7a9erGDYHJYrgtygwOP7u3BHTv0Jx6c9DpkEcr4Ck7yoDkZy5BIOSM4GMHHJHYVhxxZkXjzH7gZIb147k+/AP4Y7TSIvN8uRiqoTvVSvzo4IHYjAXj13Z4xzWGIqqNOdrLmUYp6uzunsuq0727alQV5xSbtp1snbldktX836enU2NjGhDmIqDtXcSRjjqFOev5nHsc7Ag+YqE+Tgq2Pl5zwem449OemT1J0rSFcCMoDKiLJlhjdtxjOBj5ucewJ461ZuY4lVSFCqADtz/Ex5I6Yzjk59fTNfLym5Td27PRd9Hbf8N/k7M9SV1BdXbmWjeidrO2vXddDKFttQnBbIzuJwV2kgPjk9+oOTwO2Kp3Fs6sCfvZVweACDxzgd/XGfxrQkuESPDEJtXDKpy4z0U98nk4Gc1m3d+AMB0JPBBOdp6cnsTx046iripKzWl3b0dlp2312s73WxhOULLm5Yt21jo3ey+71IWVwSO+OhxznjkY/EdenTpV6BFlG1oznaAQMkkgE54HAGMn15rMt5jcTKCAAVwCxxyMYGfy/Hnvz1FhG42hiAqsASvUnORtHrxyemTx7Kpzctm1zPVJu6fkt391ru2xi60ad7u6VtHo+ivZ2dvJdCrLZSkFo4twIUg9evUdOCfz46+hErIg3wYIIQbuSOvJxjjtnI4H5drZhHyjoEBOTt64xx0HP8hnNb8Wk200a/u9pJChWX76g/eBHrk7ATnBPHSvMquduVpSetrttLvf5ettdNRQx8YyTnGLutFHo/du7dXpfy8mjzeDMbqzqMAEkFvl5xlenHseuew76e4EK4U42lgScEZz274x+fU12c3hyKRmSMbTnACDcTgDIxjgDPJHHt0NQSeGJCo2k7R90AHJC9BnPGc8jqR0xXNFtJc8Wm9NvdsrWtd6u++mmtt9e6GYUpNLncE4pWkktW1pfum1bX7jlYG+YsGK7hyOq8/n144z3H43mYZVSxJZcBuSAexHoPbqeSKuf2HeRFwYyicY2ruGMZ+Y+nvgc9R0p39nTCSJHQYPG4g84PTgYzg8E8DHvTnOKbi32fyVrre7s9OnZG6qU5RfLK6+bfS1r6p3a/4bUrQWckmx8FiXzuI4JHU884x25B4zWmulsEb92So5ZQwAb6ZGAOM9+nToTq2mnmJAWZiVyACMbQR8oOf4uDnjrj5a0nj2IcrglRkL8w3EHAJ4zjsBkH09PPqVE37r6pJWs1a3d92nov+Dm5qFrtppXvffX87vXpfe2pyM0SRrsSNEYFdoU/MMgnJPPAI5559+tcpfXBxMyKRtG5HbAORkEqc+5OBwfXFdhf2sqofQD7ygq3XKBuTzzz0H58+f6mZItpb5iW2ks4zGvPAAHI4buc889cdeFUJNW1tq3pq7rz17tPzMKjai+ZLS1r632u/l1167bHNXcrK0gV8K53l88gnO4L1Gc4449MZzWQt2DhEIDBdpJPpn5iO5PoOCRz7mpOyyMV/ePneMnaoXJJBHOSfXP6AGudaVhID0ydvygZXnHTPTPOfTvX0eHoc8XO97qOnLdJ2Tel9E+34ux5VRtuW8duW+l07WXV2739EtTdaRNzks3mMoQIfuv1O7Ho3Q8//Xnt40YrlMjj5QflDg9COx6Y9ayYZi8rRhiHjKkSFPlJPGFP16c9e3euosoJPMySodQkgDDgEcEjtk5HP6dq76UNk1a1lf7m7O/6vS3XbyK8pJ2ckpN91rt8rWv5DhbTfOViC8AqH9+CqcE4x6noMYNZV/CiwOojcOedwPLOckKM87V5xnnB5GcV2lwqxQAsGXADMynO1scr0Ge5A4B4z3NcdfHcjAyYiOct0lCcbfqSemR83UYwK7qUW3ZJ6taPX+VNdt32VrO+uhz0puU4uOi0d7/ElbRvZ67rftqcVdxndsQMDy2GBHIPqCdy88gDriq7JMUJU/NGA5z95O33T/DnHr+Oa0bojcqAklXwG342B/ug8Z2Ae+fT2zclSckMoDAlxjcSeAuSdxPH05wfmzXqwvZR7JatXWnLbS9tvu087+tGTlGNtLNOzVr3aV+tlto156kUiyRs2eGcJjcNoJIILDtgkZPpxgnmmDcF3I7mbJT5WwuePUZ2qAf5Y9FMhd8OA208BQeUA/i/2R36Y54xUUQdD5rqoIlYdSFO7oFH8OMYPXjv6U3ZXTTfXbVLX79Omyf3bxTe9r2Ts7au2i6b9fN9SVVch7gAOAfmU9Cx43Acc+3cdSKfuCptRE8wkchP9WwzjOT/AA55xwQegwMvQlIpZg6l3Ulx97POAI1GMsv8LZwBnqTUTCRZQQQpCLgyfe2sDj8/oTx1rNy5r6dLO/bR9dP+HtuK6el07XT31t06XV9E+1vJKFDLG6ysQx2sHIH38nG0kdQcnJwMexoWQkSL85IIYd0UjGMccAZySDj17U8oChJjYSpknH3AO+euB0x8vPPAxUCRsp3ZCqwycnClc9CBnGOBj2OfZxfMm97KKa37WXS1rJWb1b26FJxd3orJR0Vk0mnpZ9rdr/K45d6BZdoGwkN0OQeAR2I56dM9cginpMyRuiBdrDL5AOWGSQuSNq47jpx+EgWNUkEhBbqjL8wyfujp1GeD7898QNC23CpjADZbGT17jHHpgHOPbnTdX799fwv5dPXqCknvZaqztZXVrXTs+1u1+gMhMatwVY44YBh3JVsEFehxgZPHXqRKvmhQAqyEFcnkgckNnjA7nvjjvhxJI3MnzjG9F+XI6FVUZ6AYJB/POasxplFbYgLH5ScMevBIyCFGMYx39MVzOS2a0Tdm+6a2W+m/o3puDdk10baVn1urWvvb8bmrbxoH3bvlJ2HA+UBe6j+HqQeue5zWisO8zBQAwVXU4yGOMYOPvH8QOe4IqKyi8zbhQFkjJRh8oAXjJznLevt+IO7CkasAgG8AKXOAhKdSpOc7iRjHTHc4rkqVLNytd20Td9L3Vtrbr08mQ58tueK10tazvpfZNed29tPM52TT5ArMqZcDGcADaezHPBIIyO+OuMVTht/LcuSFUEgpj7zfw9OoznPGSBg+/oP2ONo93lyP5iBnO75WcA5UZX5cY56Ae5HGYdOVl8zq33VUAFdvOcnA6ZA6c9j3PNKu2ne9uZLTS23ztutrK19TRTSvHa6010+ykk76a91ZProYUdsu5HHALfNgE59MjjaeBzzjmtcWny70XPJLZ5O48H1zn1GM8YGKmS0VJCoIIztYqcrkHnI7Adc+x+tdNa20XyAIHAIJJ+6QOfXqMcAdOvXrlUrWV020l9pq9vddt76Pf8NmTKbbXK20lfzvZfNv/P0vy39lzxruzhGKlTg5yeoB9e5H8hitGC3VQYpELArhiT3H3SOOGAyc9Mt0OM11l/EEh3qI2ULvdVwSu0cjn1yfyPrXKzzuv3QETr6sDjoQQNpPTPPA/CsYTnVWll0fbRq7asnfRO2m4lKz1s1JKzVrJ6Oz0s+u+rehh6laAO+zIIAIUjqDyRnIyACMYGOvHcZqwqYpA20ugDqr5IOB07kEDOOec9fW7eXpYNhiXA+fJBAHPzD69ySOme1ZJkYkZYYbCMSeW68Y/Dk9eME9K7KUJuMeZtW26b2Ta0tbZO3+ZabkmtopppPRtW1vr0d9G9O6IgBuwcEMDnPrn2IOBkkc9c5zimMACSpOAxB49R09ADjr7DipsKSCAxc52jB4HQdOfz74HpRJGwMhIwDGMBSCABx05wD6HOMda7ErN7tNX16OyXrftd99FYLpuyslyq8fmtb9EtVfTV6dCtvBJy3sB93OO3TBPU/jyaTc3LAEKQSpx1Pcj655x7HjIqJ8MQmwj5iFOT8319/85NOVAPmI3YbIQdiRwAOMEcZ/+vTXr0183pr5aLoUoJdXtbs7+VvTYIFfsRlj7kAg5I7H1z07e9bECkD5sYXGSDkZ/hxx1B5Oc+9Z8SIZEySFBO5ARkEn5c/Tr6enoNRHWMEAhgc/NjIz/D7ZH+P48zT5+bmb6NdNtLO+y7JWv6aatX5btNu3qtra7+vfzJtxJGOFwAWIx2PPvn1/TtTlZz2ZSASCRgHHRjjPX/8AXUDOSoIPDdenGScdOQOnf6HHFIspXksSQvfoeO2RzgcdifzqPZ33sl2er06aK2vfy18xxtu3r1emum/VPfXro09yYF1w3DMeGOOpJ7njg4Hv098PM7EYbqAMhcce/HA/TjPtVfzycgEbT1GcANnjjnkevIANQhnyCW6ttJBHHpzznt06/gKXI9Om1un3demq022dhxinfW6Ttp33f5/Pc0EuJPL6lcEckcg++M9fT2qwNRlVQm/HYsq8AAdevPbj+WKzUZyGVdpA5Jfjpjtjt07Z/SoXJBAPOBgqp4B/LpnnGRxnpxT5E91pZaO176X2Xa+/n3uN046uy0au3r2WuqWr/B6bmwNQ+f5nyAMf3Vb36noc49/Y5oe7+YhG5POFPHboWPHfP8/TEYsTnAIUAEZ5x/dx6dD6k9e1OyRzkjj5QDjnt+HHrnvz1quRbeV+l+nX9O7t5EvlV7Ja+e3na3o/w7o12u3K4yBkYHOORx2POD19/SlNyCmWdd4XvnqO+D0b9RWIWfjrjIGSe/YA89sZ/H0zUmWbBJzxtYAZJ45PX6c+v6jp3Td/LVb7ddn6K1l8758qty9La+b0/PXv0RMZiGDsMjuc8N6dPUHvjPOM1FvJDAAEZzjjBIJwSOPXqc7aYRzyrYAKj0AGMd+54OPqPdAp6gYySACeh7DGce+f8aORW31Vuqt0tZKzvvfXT1H30WlraLpt+vb9R2w7MhcgnnHUZPP1H4Z57UrZUlTkYxhen4fkSDzzx705du3DHkA8AnJJ6YwcHHX2OO54Q8lSSzMAAQeoUZGMY5VeBn3xxmrtd2srqz1tfXZau3RaJdETy3vdu17pO19l11b1/TshoBxkLtHPDdeewHv+fuc8IgIZlxgsDz2HGMZ75xk5Hfg0p35DFgQvzFRxx2B9xk5OeMc4o6EN82CCcDk5bPA9D7evJ9aQKNn5dtuqemre/fr01FUA5JySOMHIycHOD3IH0yOM+kwxuO3pgjA7Eg8e+OMnGPpzUOHAJGcM3Tbk4JOc5Ptx/wDW5mTGwkBd2OCQRg8DIGBjPrx15HQ0DvZK+l2kl2vsv6/yQiFV+c56kHr+HHGcdD6ZqdCDgA5yCTj1Oe/T5c88fhjrDtzt3/eyMAfdIPZe3v29e+akTIbCgAc4OcMDxkD0x6jrjHfNBRMVOfmckY5A5/MY7+vQdMf3pE2geuAduRnJ/Ec98/SgAqDlQxAHTng/iM/Tp0AxmnEj5ABgMPUj1+vAx+H8gARNpEh3EkgEHgc+mM8egGM8D2qTByzBj0BXAyB7/XtkZ7cc5pikgDghQc5J6tzjByfTBJ6dvZQzHDHjHHPbGcDH1I7EjHBHWs1vZq7er62va91bTr+ulmZNu9pJ3b0dk10drL5rR+T6DlDbgWPPYEjJJ746YI/+v1xU4CKCGGSuW3D6Zz09zkd/Y9YCxBB43ZAzjI69BnrjuOmAewqbI2gEc4wcDt1zg569vTk/XRPZ2e67PT8vzuZydtd0lrpbqkrd9NfuvYarnbv5wWIBPAIz6D1PufXjjFgSkLyB93jKsQDng57dzx6io9oIAUn5cbU/hwfb+nv6DhyI2SWyBkYBxgDuBkEZxn6AHArTm76Wte63+G/z7abdg0dn5afMcN24ZOcc8nqMZUbu35HHAb3KnCxkkDJPbrhmH948DPOeccD0zRWiVkvRW9On+f8AwLDTWuidn5/o0eSSowyATjGSBknJxgg9wcEY74GOlKroY1B3eZnkA5Bwe4wCMDrk85olDdepCgnYcgAckYxnj9Oc460iZKgjbyOSeCAOmOOT2GfWt39m6u+Zva2mm1rbPdeenlo3eKvunvone2vM9+nra3UewDDH3gflJB24Pt6gcZPHuKjIYARhRuBwp3FTx1zgcc9ec9fYU5+cjBYp8yqcjAP8R9e+fpjHTKHegDlgwyN27gY6AD2z+frwayS2ur3duq106+qenqJaWV/NJ31bSs+m6fZJ6LXdNxIkhBA38HBAKnqMnGMg4x04OMA0+TfublW4DlAAQqnGSMEZzxk84PXpQXVcgbkJIyTyGLf3R/dGMHr3PtSggx5PylerDO5sH5QO59+MDuea1STcXolFpPS7asmnqvK2nnsF9YyaS+FbX3t0s1bpprfcYykqoAxsLYGchGPXJzxj8RkYHQU9Q6l5Gy4KrkAcnOcEDI4HrzwePSoULK6llBXknLYOMdSeR6n+dWIiArrjdzgsBnJJ+UKBgYUEgED1znIx0JW5tVsnv00013slbe619G5XSadmtHsr6tNrmvdW063d3o7aOQKcooJBBzg45ODgcdBx2759ckhTeCqhQAFO4hlDLu4HA65POCRxjgUu1jgAFBzlmALHGMAYJ47Hn24xUZ+WUqRgbAUySoLt1OSDlRnnggZOTjmocbP5Xd7a/Clq9r+rt07EK0nv9lu2uusb67b29LXY9dzCRMYVslXDFSpUA56c/mD7E81GxxguQGjYg4GFw2CAM5wcfj8vOaljztZZFUgHc56nI4XacDDZz+PXpTFLGVVA3BslWYDaQBkgnHIBPf196y5tGlptdJ6a2aWrs0un/DAr+8tNNdH5JPXXddPn5GoJ0eWBUILBVEknTzTxkE98Y68DIP0r1XwNMoMcgjzIJy4bBCr5f97k42BhsGPmJ6DFeOxx4bdjepK5jVsj5OrDpgcjAGSa9T8LSyQXAf7QLWJ0LErzI4bacFOMO2MHB4xxnNefjKanRcVqkt9ru6f39/xtuFJpVVZu6aeu3Ru+2qvbbp01PvLwJq0tuEeCcrFLBHvd28xIlf8A1yMoC7nf5cDOFx3JzXuc/iC5MawbQlu0sDmSRhh0cONiJk7oWxy24Zxjb3r5D8Ca020LduViQJJbxQne5iU8AEBeATmXnLZA4wa+hIbjzrVpI4VeNoA4jdDuCqPnkUbvmbkFGz1zgCvh8VGMKrul8Ss3try2dtm15btH12DtOEU7t3SUb2fRptb210v5nB+OZ3ZJ3gQMokkR2RTtdB/rDDDk488ld5Lc7Fxivi3xnbRNJOUjcead6Ag7G5J3OM/K6fwjJ4I6Yr7P8WG2SGdo5st9liCtIxYeed2FHA8p05Cj5up5Hb5E8ZQzbnyDGjSsoA6oCckE87jkDn5evHqPVy6pd9Vayv3V09t1vZvW99LbmWMpuCbSu3rJrbXl0Xbz162fc8TuYVExVQdzAcMeWPILEfy9OoPSpIoNoJGWHBIHPU8gjtjjp34NW5on85sgu4IwxHUDPPfCk4z68itOytWJHADEAsg5B9M8e/IyD2OK9ydTkhrs7ap6pPvfa3Xy0sfO1JtJ3to9/VpLTyu+r87i2cDnbgKigghc4IPp7d+vOfyPU21u4dVVt5IyGAI6HrjkAnnuc4qtBbxqQT1GMnHBIwcDsMnH1/3sV0UEaDYRkFgMsrc5/hxjtjOe2Mexrx8RW55N68u2yS0trrr3tu7/ACR5Vd82y01tbRt7331W346BHGVceYD82Tu9jj6Zb3444xya0xgoypyvRVOdpOAPu85PqecjBxngEsZBz1ZlUjjcNvPBHQEg8n6DGDUEiyxMrjKsowobPBPAOBxyAcnHHXgDNcDlzcrvs0+97Pa2ltr6enp50480tW1Z6qTe71vpe3W/rv2zruAlJOAu8KSB6DJOM9Ox5zx9K43UXEaOdqgcgk8kA45BPPVRxnjnnjjrL+WQJnknA80jjaTnKkdSMdD1B+lcBqLuz7NpKEMQ38LqvIBOORycn3Ppk92BjOrU3sk72Wm1m9XbXTTfvY1oRk9U9Ir8kl5dLd/TXXFvLwOrKcqOwjO3b6E8HqQeuPrxWFJNzjbwCR8vYjsc/hk9s89Bie5JfzCsZXOBkD5VIzknBwAc+w9xWcY2+eXcNo2h15+ZcYJxn24GT65zX09OlGMIrXo2ld+87abW1Wjvs35nr0qcFvdeV7e87b2732vr1Vgn25zkGLaFXapbJPqMggg9SOvpyKqYYoOFVM5OOSuD1xzkHPGO/wBBVhiGYIpwM4UB8MFx34OFB65z347VDGwAUMUwgKsCCQ7dsdMnv2+vasZuzdrrlWmm/wAOulujvre1n6HbDSK06p2d9ujt3e1+99bB82dhIIALr83X0GQDgAdT1BPTA4dFGxlQqCBuABI5LHsOm5c5ye55HSlO0nh87mynAIJJyecA46YGeOeTkmtG2gzMu9wzZBG7gEDgAAcZ+9wOm3642gvdv0uk0tbN2X3X66b/AHWpJa+V9N9dLvTo73ttb7tOytHMhkkyfujHQKp64HYnv6D07elaRFDFEEdduR2PJPGAD3xyT0HOM5rBtoFAEi7STGCQAG477uRz6Ht1x2robcSIE+UKGHAxwRxwT0z1446A+lefWftOaOyUtk3d6J9O19rbblU4XSlfW9ldapKzXdLT8dHudCuotGu1UChcFdvB+Xk5zwSe/TOPY1s2d+ZZ7Us7MrMqlcdCeD0OAPT1HrxXMRxPLJgZIZl+UjIGfvc54x/hzjOO20jSgJFOxCEIK8ZJVs4+vTBHHHPY14+JjTgrNWe9097pW36X9L2u1sb08OpbLmerle3dW9dWtNE7P1OzdIPs8ZUjIGVZxjLHGCeeCOeMdOlcpqU8cLtvZRvjIZD2PBDZyMs3PynpgdecdPqU8UdoIpPLj8tTGgU8qSBkn9Bnv615dq+oqsmSC7BSQTyzKB/9fkjJB+prhw0XN6u6vsutmmvetZa6X3dle3SalDkVrJtWctU5xta9t3az0T6Kxh6rPHJ54ByxVir9dufU9CB/d4yeN2ea83v5IQoQFGcDluNzOc5BDYPygZGPoSCa3dVvt0TrEUiyMPtbLKCfnG44KjpkYOST3FcHeXAeVcoCoYskg+UkdRg8jGcgjk88mvrMDScoc19ErWe61jZ979E1o+j3t580+ZJXstLW6vW2ulldO2zto0Ncrk4xuA4Z+CT3J5xvGeOvc96pEENnd8wGcEYHOM55Ix0I47DtSySN1WUEnkbfmwCPm3k9DyCxI7dOuGtJjY4IK9Rz97PG5QR0GO+Afqa7npazvtHVdklrf73fb5ExTV/Rrbd9E7q9ntp87JXHEMAxDFhwQRwQe4JA7HgHngdB0qFk2sWKfIpwcknk9+/I6kHtx3qyZFVWUMNzYJAG4sei9uWHbB6dexqFy5YAMeYwRuGSwwSB64688EenShau7Wml7b7rbzfre9/O1RvrpZdd46e6ntoutn37EKvGEcsdu7kKBu3L6cemeo45PXilZNyhiBtxgYxlQOA3U5znn/61NjAI2qqmQnb97GAeTgduvfpn6UrcecwYxkKBs5LFueFGQceoycDAJNWpXbSVno9e7tv99nrddFsa2s2ou2t9bPqktFqujTd79rDgQqMM4BUbBjBxzxjGMdQT09DTGVQV2qwc4+UnKk+o9M9mH068iVeU2qVLHDfNjcGGSQARz14JPrnIxiI7llUyAEZVlJ/jXByM/wAJHYnPHHPShSVvPd66dFu/wXydhR3b20betm7K2l9dbfg2u5IAoYtgqEIymSTuORtByOfX0PtkBT1ZlBGQc/IAE6E8dd3HJH8hUhRfnIJG4blC9m9WIPOc5JIHXgY5pBgOpLkqVLYfIDMeMYxjHcA46cdajmfWzXffa11r6arTV37ENt66+Ss9vd0W/wAtd+6uxilvNBZwRt6DOGBGABj05B4/lkoqZ3Yxndn2yP7w65AOARwOnPZ4VwWbaSccMQBtPQ4wTx0weOvqSDDH5iTBioKg5PON2eh9COOf06U03aTi+W1k/XS9n6bave+7KSbUmnFWirdnZ7K1r777+QuJFYM4PK5GOuCOCD6cHOAPr6rw5w+TnIwowMY4f0wMkH26HFPwQMAZd23HBDAjrjoCNp6t93rxjmkRQ5VgoJyQx4G5ei8d8EnOcfrSupXtdWu9NbPS+vddd1pvroX0voraJqy2t92vrsvlMqqVCKQ2wcyZwSx6DBOMk9sj2x0rWtZWMsSOMMBlW4C7By2SerAkcde1ZJQDIUZAIIwcgEd+g6Z56g49KmV2CkEYAJfJOT7EZ6DPXg+/HFZuF9VeXRJ330u9H2fTz9DnqJSSu10etk03Z30fV7xStfa251ltfrCJMzQ+WEZeFyzbsfcHGHbHP4VGbyPKSI4UpkLv4zuwe3GcjGcY6fSsF5k3qSwVnAIEY43AcEjjJ7npnpjipGkDKfNC7woC7Dt6dCQAQD6nABYcY5Fc3sktWn8VnZXs0kt+2r37b9+dU1dXTs27K3XTX0+9dLG22pyfLGrjzAwHytjOcHc2cge5z/KiHUJI2ZsjqAxIyDnqcZGCO/bnPfjlI5dsgDHKlu7HceTkZ9Afbn3wK0wAPmSRsTkMA2TuznAGOSSM45z1qZwULRto9b2sntdNd09Vf9LnSqShom3o2277K17atX7J9/u6gaik5dGkbDLlY0U7Qw75zxnPIxzjgjFZl6xfZncy52gEYOBk4/HPJI46kE80+0gZ2xgFyBtAHVR17cY79j7ZrRnsVUFirBQvfowI7ZOcDnJ7HHqKyjVpwktlZpteSS2u79vnvbW+saUpwvFWi2na1nZW1e22jaV9tNDip4wWwhLHJxkAkjOGXA7AcE+3sapSwnkhcKVUnJ3Hbz1OAUPT5R7cnNbU8YQgIQOQWYkEg55PABzjqOh559KysWYqSh3rlQFGSR/D0PHJJPbPsK9CFeCVlsknvs3bTtrurX316taqnJbLmUeVOStpdqz3a/Oy3vcyVtkJKgYAxnBwSO5Iwc4yMA4zzjtVgW28NHsC7R98LtGFJJBOep9T1x04rWitw5BEfzOQCRwV5O4j8f8Avk+1XYbYF5AyFiBwDyCccY4GQMnHP/Aa5Z1ruXlGLWu2ytb8/N66XNXrF/E7Ws9+zT66d9dehgC2G1nCEMu3bI34YYdzyfXjjnmmLaSK6kkAPngk4JGflyODkn5geRjiuuGnwmJtwwTgKIzgq5ycng7VznjnOfzQ2YEfCYA6ljliw+7zjgt2xnBB5qI1+XmUWrPRXWvrdNeb6Pvc5ZTkrvurfJNdb63Wtl/mc08ARCzxgBxuUqeMrjcTxxnA4P4Y6moCpA5UDAAcHDMDyq55GRyBx0q3fTOBjoD8jIOqqp5B5PzY7jkY784zEdWZcj5N2QeCTjgenynPPTkdua7KalKDu7uy2d1zaWf/AAyvfe44RfLd7XTbTtokrqzb/rbytxMyzLhyoRlZSXwckHKgEHOfU5xnOK6OxvHt3Vgyb1fdsBwqKpyQMghjzknHU468DnkSNVPm4G5hsOcAtkEMODznt6+lTKWVndWJVgSwJJAVSM49AOB06E1hVV7R1aTWultlbS2va9/JsuLvJPZK2vdpKyT7ta7NdPX2fTtXBhjIZfNZQX3HJBIwUBJHHHGCQcetXbvUIxGwZnVyh2oBlFGeADn5sknI9uOvHllhfAL5e4AbSULZyoGOS3U9R8p4A4p97qr7GVZzIFXaG242DByQc8HPQ89PavJWGvU101TWl1K7TVv172a62OmNZKNuZ86Wqel7NJabu7bdurvsdJcanENxzGJEAw5GGYZOc4PUcY9R0xXP3GoKZdxyRw4OThg3t6jB4x3H4co99OShPygng7sllb7pIxyOvbPemi6dpFB+ZVJGeAGzg9SeQOSPx7jFdX1d0rczbTStqklortXvbVaXa6WsYzcmteVpPRWei6dd/P03PTNIuIy8ZyW6MRnOCRwO2Ap+8e4/OvUNLjimwpKqudrsPU8HHqDz/wDWrwvRpdhik34ZmBOeA3QHGeg4B9B35r1vSLgRFFNwpQ7ThTk88888E44HPc5rkxFNJ6X0tq79PO2+yvtr6I8vEuSk7Nuyev2U1bys7LRaO726HqdnpIa3LIuChyCo3F0ByzMOwGeMdckfXpIbN441CFioKKRtwFfB2gAZOe/Oc9OnNc3oeqIwiRmCIT82HyQP7hHcHHHrXfWTq6RyhoyvIVGUH5eAXU5yWHG0nrn8a8icGm0/iWzfnvb1tq/PTY82pOSt7za6+vu7dlslpp8rlBLRomMnIYEgMVyr5+90yTuyMZ9Bg1rQBY4gJVyWxu3DJ2k/exk4P+z0/WrzvblF2nEijZJ5RBBY4yOe47Lxj1PSmGBHjOVKA5OCdrIvJDEYPHqB059DnkqcySvfRNW162te22//AADbD1ntNc+qWt29LbLW35FBmtMyKVUAgoSOycDB44Bxg/e6YxUbWEDIpWNG/uqe/wCIzhefvcnpxTJLNjKfJRscsxP3d3Yt65Pf17VTaW6thgYO0jKsPmGSee3A+uPQDivKryk5Xi+Vrq721tbZppq/ro3019jD1U7N86SXLypvT4btLZLRN99VroaL2cQTJ25+VQEPI6Zwccjp+HHBrLnhQsVwiADaFJK/dzgEc4bB4z3yRmlhvmIUM4CMrAsf4lPYZ5wTgHgHI5p0lxDLA7IUDo24tx8rDOe5Py/UkZ6Vxrmi7PVO13ru7Waf4NO/Y7oyUktXa/u3bvql5aLzTSfmYOqQ26wOrsY9yqSQd244PPUYPYDJ4yc141rzxRkKyHG1im1iGyR8u4nrjnIA4z1716nrFywilBkjQOhYfNkj0HTliefb07nxDxFdM8pAAkSIEjOQQvG7IyfvcYPtyOhr2srpSdVL3lHtfZ3Vn5qztZ/gkKrJqC0abWi7LRv0vpbb8zkdTuQ+5VJ3bSpYcbiOAGbIxtzjgY681hJk/Oy8K+CwJ+bI6A9SfTvUk7M3mFTiNzkp1IC53Z6FQ2R65wPYBIwdrx4CqQWViuMsMfNn+8uMDPQE+pFfbQoqnBQVm9G2t2+qa2/B/ckjzaqcU+ui66/N/hpt16mnayDdtyqKW5LdVYY5U55X1U5/Hqez068iKOhdSV27VGXZj2OQAHVeuCQFz3rz5SiGJcgNweejE5JIOcE+oPr1ras7xEj4lSOZDhTnnBYAspwRlcEgnOeQO1dNOlotHa13q7X0309G15dTya9Nys2n0tpdptry+99Lu/Y7e9lkaFtmW3oAwVASOOWbLdTnBYfka5C+ki2KoIcqhzhTudD2kGTlhztxjGTzzTZ9TkJbFyxBj+bIKrGOMkck5OflJ6c56YrGlumaUAEbdrCNl+ffu6MG4I3EEOx5GF6g12U6but0k776NO1r+fonbtuRRoTjZOSvfm002tfRptv08+9ijchCTIittMeWR8s3A5YjjywD0HOe59c9jHKE2o5PJyPlAxg/MDnCcn25564q35xYMPOG8hsg4CknsBj5sAdOOP1rsxkAYOSCCuWG12Axkd856hepx64z2Rdo2va13s27aaX9Hr633PRjdeqtrqlay3vprfe+v5VkjfkgLuH3yc+Sy+5HKtz05yQce98WyrHM7hmGA6sCR2wScA7eoIGADzz61oSd3lqyhWblc85GeGGOBjAJz19QOdJXaOF2C7iAExH2j7jPfGQSOvSsJzlfTbRPRauyb/LdW00N23dJNXajpbVp8u129Fa910fTQzHDRoxRVUsOMjBQc8BTwAeueevPek3qFy2HmyvzKM4JyBsOeACOc9M+tXZ4Okke5sqpYvx5YIPX6Z9PqDis8xtvEUi7VYYDOMDaM9G67OCc4J4PGesxaleWl+q8tN09rW8tgSjJX03vvrsrp+Vul0t1u0Rx5DoZGQgFg2PvSAZ+VgOpHfI/PoJVEeyQMzBm2mMj5hv5wCMAHrx06HPFMZU5eNGYjgEMXQY4kJ4BAOVx6DPJ7xAMrlixVWAKkdCwJ+YE9FHfuT9M1pF/E1okoqz0s002kutn5LT01e9rWTVtGlq1Z2SV9NUr2et/k4M7Ha5RN4BDYGQMZUZ7Z57Z688AVZVomjkRzlolDiRW4Yn+9wMevBwD25wIgnBwpMeeGY4wxxyvHIBHWlkAVXUOrKV2ny2JIHcNwCSOPTH4805qLVvdvFLTq7K7bs+u6e3fcGk1tZpp6JK2yb07rS21+pHlgASA23JEhABVc8DPQHuMj8KdE7gqAWVQwPBwRz6t9373Jx6ZHSlcgr5auzLgblzkM3P3gcYA7YNMXLOmQo5+5wF98jnB45yRnjGeKzm2t0nfe/nrqvMTV10XbTbs7WVvK2x1diXJWPBG0KEkAHzY6se4UkgMcEdOK7Swgd2UtHhSEMbMAFIcEYB9TjPIIz7Vw+kNIs6eYV+U9zncOyhcDBIzkZ6Y6fw+n6VHKyifcjhVbKyDIAOMsuG+XGMKwJAOeD28jFT9m76cqsr6rWyvtpf+ntYwktXreS1sm2mlq7W3v1bulbrsaEOmsRJsVpBjfG2Nihepz1wTxjHpyaifSk2ncg3kgpsz8q91PB59uh5/DrYEj8hVLD5QrOY8EDAJZVOMtjIySO/fpUflhyQMfc+V+chc5C9ep75/L18v2sndXd9H1T0tZ6+mum++pn7T3k0nqtbtuz0u9GtV0st77rU87vLSOFyi8Ng8Z4yepIxgAfiCSevSo1ufIRNsmCuAcjggHB29+ec+mOvFdNqlkxckITk8Zz0/HoD+HfjnFctexCLKhWBAA6HOffj34P16GuinLmXva3s0n1W7/wCG30vZF8/NpfVbP5K7189tXbzW0lxq26IoJDkgrhBlsdQfvc55z129wRiuXu7sMckjldjb1OCwzgrgjDe/UevNLPuXJXOcAHnOASdxB7N2Uc9TyDWJcEkncOjBgCSOOx+nXP06c1206MFqrrTXvd2um7WdraeXVOxcE3vJtppvW/Z2Wuz6q3a4eYJGIYgB84Yk44xkk9z3x3PHHFOWJGieQ4LKQ2/7vAyWIznnGABx1zn0pnbx8mPmPzZxtJ649gR17n2qczeWQqFWzkFQPQ9VGewPp+nB6op23fRabu3d/P8AXoauTasntbS71S5W9r79Frv6jACZFIYIrK3LDkD+5nPGec5z0zx0o3sw4yxXI3KeCo6gjqR9Qcds0srE7SpyoAc7SfkA6gjrk9RzyAeO1Qgg8qpGWA5Y5Y+uOwOMc5zyMd62Wy/rt31+/wDzDez13Tte112e5IFZ4woGMNwxwWAHv1HU9zjninLFtDsdpcAgYwOT0Azxk8kHoec4qVPLEbrLjfjcr5OVbnGB3OT9Mc1XbIwEU7iAFDEDPXvjnpnOABn61Mm46W+++m2r9dvx1ujWL0tzK69LW0fZO9r3/G2hKsaAblU5z1YZOBxwfw46/MDVzy/k3YwMcKOG7gg9xkcEEH/GrG5VFQcy5HyghgMdvrkdeOwx62N7BQXJLDqg+YgHvjjHfucc1Lhf7VtEtu1vPyKdVNxfLpo73abTt37/AHvTXTVkgY7cDYmMFcYzzxxngep/H3qNiD8gGTtwuCevJwe3BH157jkuY7zwGIUgscYYJ6gZ6t7Yzjrjiq/CsGKHcDkHOdw6jJ6c547YzyKOTz/4fr+H3+Q/adb6W1V18t3urdHfXuTKGG1SQGPUcc9COo9ByMcYwPUSMuc4bgEFh0+br6dT7EelRkvu3nBLKWbZyFBA9+Dxz9O3FN3EbmJxk4HctnIV8e+Dnp0/IcLa32W2i6d+j/LrclVLLTS+unLpe19bu/62s9mWFYhWIwOcbTycHjqO3A9SO3fKs+UwPvYBPGcHtyeQOmMDnv2pU8kIRIVViM5DYIPZsdex7cc9eMwsYwRsbOedx7jqepOR0AJ55P0o9n52f36dOzfqJTVmlbpdvdtW3t100XMrv8H7hswSOhLHPOV6Anjp7jnkYxUSgu2DnrwegwMYwecex9O9KWOOApY9hgg47L7/AI/qKZk4V9xByRs65GRjgcAg5J5POBnFJLy6q6a16aK7217fNlrVJu2yf/B8v0JOzIWwVYHuScc+nboP1GDSksSDnAX5WwNpCjpnk+nJwTznj+JRnBOUDFRnBAOf9rjAJ5/T8FIU8ZyTzyRgn3I5z6HPXv1q+WOq79NPL/gb3/F3E0/w/FX/AK9GOXoN3pgk8rntnGM+ufw5PFSbAATkYA3BhggcZKjjHP0P61ATjA7HG7A6dzgdD7Hk9OTjl8bhhjJxlgCOoJ7twf16jj2Jyq1tbL0b232b02XUY3Oe4BY8eucg46EY74HT1608ZIJABZckkjB4/hx3PQdBnrkdKmCgqu7GcjDdMEd+OvXp36g8cqPTqWHU9MgdDx0/X1rLX52W19U7W3690traFcstL2V99e700svu6NdtSvsYgnGCScc9c9vTHXn1puG9CAMKcdRzx+OPQcgnGDV7ZuQk/ePIIwApHOQPzHTPPQCotuMA53k8HtwMfMfUZ47dc8Ur/wBf16Caadn9/wDWv4dGRAg/KD7nP3ie2BjjGeCRjp9Aq8MdxwMYCkY4xwDjkdz35yaftHLFsDH8OMcY+bP1zxjrik2jO7GRkfN3J7g/z5/wo1Vn56eqJkk16NP0a189uitr5EgBOFH3cc7ecHnOO/p9eM04KFwoLBs+mSB756Ejp1+hNCY3AZKgAg468HqPX3/X1qYFCCoPYYOMElT36/y6j64DOTd16Kz2vs766LW1/wAX2FYr1x8w7gs2fcfUfy68UqhcpvzuzgccDd05Hr34zjv0wZVm255wAG4BB54Pp7+g/AGZQvIJHHKu5Bb3wfT04PTHXNJ/Pdbev5d/IXN6vT0/l13eun623IygJXnCg8DPIznPAGQfYfTGBmgAIMszHOMjJ5Htwc5HQde3UYpzYOSCSeASD0x3x+XOMdPamg5x67sAnJJI9Tnp+vGcd6yle/mrX7aW10enXta9+oXbTau7dNlpZ7+rWu9t2TFA2CCADggHnnnJIxj6H86k2qD94NxuODgnHXI79v5jpUYZuQQo9cjpwQCME+nJ74zknmkyS2GbHBA/PjGB0+pyOQO1aRbaV01bv10WvzJi011Wiv110f3a9evUcpwSSCAdwGO47e5C5Hcc989LCk8EthSuSOpJPXjgEnI9j/KEDaoC5P8AFkE4xyc9vTP8qcrls5GAD1APbr07YHX65rRNK6d3fb7Wumya7J6dvNCas3azjbRL3X016dOul/xJ0brgYAztL9eOmWwP7xzx3oqaNY225zyuRkcHt83fHGQO2OMjqVrsktXotk+223T/AIOwXS62vrq/+D+Wh5GD8+MHHzEYP3s9F6ZA6n29ckihZNoYsAOo2kcgdM9eWzz7Ht2qZ0QEuVdRsGwKQSHAwd+Dghsj/CmIgZQwC72OPvYPJ+6o+bLeo6fSulpPp0/yv6a9fTY00aWmmiu++7Xp59Ol0PZlMYHVuu4DdnrgHBHT36fyj3MzpGQjBiFIXgkD1BHPGD/+rgWNiSqLlmORk4GR1ByMAAdu/Tjufdc4BBAVju4Gec8ehwM5wfY5qLJL3bbK3Tey7Ly67v0FGy5n8W++6+FLba3TXp0FO0BtoxwVJ+82QeeucNxgfTgdaTO073DFFOARjcOmB34PoOeMnoDUzlDlmba7IAfLQEBumCpx8x6kjuST6UEgKFCKcgHc2SXPHHTk8cDnHNVF3k+ZWVnpq9bLTdX3u38k7bpPS7TeqTve/S7T622vf5albMRQhlctv3ZBGME8A8dBnuO/5Sk7mWJSoCkEeuOCCTxyOSenPfrhQV8wHeCrLyh/gI98/jtx/KntsI271BbG1j83PUZxwMdO+OMZ5FbK26VrWve91or+qS/FXs7oblqkovZNN3sr2va6eis3b5qwp+Q5kYMQcBsnGOuBxxnvjdyODio43XGCVcBiQDyT3+7n5gpzzkDB9uEbLqSwAAUkMBxgfic5wc9O/bGYNpA2hFVu5PBbk4HbC/ocZ7UpO62fz7NrX79LdmnsEYpp30bsn0Stbba3bR3113LrszLI4x5b5cgDOSMdAcZJJwOeeaiIYhR8y84P91STyAD91SBgn/CpEI2KjOSy8gA4Ulv4V6qAPfufdsKFd3GAzEtgqeOf7uMnOR0OfqKwdkmlve7W1ttbPRry1/IzTs2tEl12TWlu1+l9LbK3aW0DFjESQSdylTjccDbjjHHORkk/ga7nRJ38yD5i8sc21w6YRIgQCqjPz4HU5B+6RXFxwAOQyNGwfAPIIU9QD23duwGM9a7DRkjeZG2MCONuc7ioXHQfeHqT9eea4MVO1KST0SWum7s3ru9rfqyOZcya3dmraJ2tddrdrPfTVWZ9K+CruJSYgsm9RGyytwiOx/dOB/CWAbjJ45w1e+WmoT+RvDiVlj8to2Y7Fx0MTDHmd9ygAjnJr518LDcMyq0bCICJ9vOARmQ4Pz444yMDp6V7Zaw77X927SyhEUhHKIXAOdy44QZycEk9OtfC42cJVXZtNXun2XLrfbS1ns9N+/1OBqNwTd9o6faurW39Lb9G2yrrVy2oQTRuUOWmZdn3o0BUI3mZySMksmB1xnvXzv4wsWWZ183eoOUdl2+Yr5yZBnhSRwD79eg921CJ4GclVJ27WljBEYDk4Zic7c4I6NkjBxkV5N4ihaZ7hZW3FMqFVs+Yh5+8BgHI5HOOOeTW+Dqcsk+b3dG113V9HpZbed79DfFNTpPXlbta763jfy0V9U/8n4W0Ba5ZCmCCSZASA209c/3TnpjtxWlAhRgQoZufqoGMnoSQcHp6e9T3MTJO3GCeFwAMAE8HkcfmPbNWLdd8iEgBgrE7uoC4HXA456Dn04r25VHJJ292UU153tdt9PlpqfNV1bmVna9vW1np2fZNb6bXL9pFG4UMMluMHGBn72M8/L2PB59zW5DaFUyq7kBGW6Ng9Tjv04xn3PNZCyeTzgBtxwzYxg/dAAxkHB5HQgelaCakSuxWC/KSdpBGR368f7pHpyOK8+rzt6fCravZK0W++unq3d6rQ82cLrq5dFvbazXW9vn67mkkuAd8eUULkH/WH+9zk9OOPXpmp2hjmUyhWUnphhkEHkqdvQ7s479ulUFfdhs/LjJ7ZB65zn1wBjuenYbUlWLZvxt7AYKY4APP3vccD1xWPsndON9ru3nZLdtPbV239dOZxcpaKy3u7aXSvd20vbX+mUby0CuxkJeIrvbscnOAxHcnpgD6ennmsQYeTygWKDIbqcc7yw4+bpzjHJOAMV2V/rCR8o6kuu0hzuVcdCCcYJ5zxwfxrzvVdROXdZADKu1l/gAPUA46tjueOevOPWy+hV54ykrWSaTW7XLutLaaPpt6lwi9bc2qSVtE17vZaq7t163ukzj50IZ2ViqtjPJwdx+beOcYIyFxjGSD1ql5iBWGTzwR05PXGOw9/wAOauTsXZz8p3A7ec+wGeTkdAeg596zQQGBkVSwBIOMErxgZ74/LOBX07lzRd3tZ2/8BWmn53231R61Nc0fe1aUdFa7tZr12td6rstR2I85UA7P4mBG4Hkd+Cef6ZpGjyC2FAzjgZPAHLDjpnj6kDvVo7CjbztLAY28Etxgkck4B7EewxkVWdmVMDJAGMAAPtGBjHPB9c8jsO3LKDk4tb9V32t2Se2mutrbM0hJtre+iu9XbrbZb36O29hibQQzkhSvH0GMbeODwec/XtW5byICuw7sYRXC5JcA556HqCT+ODisRThlIDP82MHnahPTHA5xjsc89Oasp5i4wRkthVU8Kw9O446j6Y6mt7Wg7aPS6e3M+W+3bb1uVJJ21ejvfvbo9L9nrZXbZ3+j3EYcszl1zhl6AMSMsOTkeoH3eldgbhXTCOAu0EkLli2OCeThQO/86830syJsKgszYDjI/EkEYwCeeT69c11duZmMp5cAAeWvTBx1PQZyeoJ9vTzKytUbVt1e2muiv2Tdt3pp6HdT+HZ3VrpJu8XbVdtd9dL30Z2OkxBnjfzWwQATjOQD8xJyc4OMn6cHofS7FjDAGA3cFlwdwzj5WzxwATjg+xrgNGtQgQnhVyrJyWCtjJIz93jjH3uOwrqnmmSBlG2NVwxIOAcA4BHJAI+v16V4+ISm27tpaaXSW3ff8Un2S19CklGPM1rKzum9VpZPpfy8tbbmfrd2SXTkRsMqQcHHcgcZx35AP4V5vqV2yF9rh3jyEYrljnOSDngDkkEHHXk5rotWeQybhkFc7S5+UKfvADnIYgE9ASO/Fed6nPtEwE5aQH5ccjJyOPTHIOSeD35AqhRTcUtdItpJP3rxVk7afLztrqc+IXuyb0d7LRRbWj18u6d0k+ljlb6Z/MyDyWO7bnk5wRj0Hp/jWPIxJJJYgY45BAPXjoO3IPHPWlneQ3KDYAwYhmBwQvAIJyRg9jjpwABmkdyocyZ3E4dRySOgGB2PcjocHGTX1NBcsVbdwitNLd1pvt176Ox5c7e61Zt72etnaytb5p22av0tWjbDkkAL94qDhyONw6dcEcDOePQVNGyIZCpPG3B27mx3wMjHYk5x7Gm7ON+08nC5xnbkAE9AemBwOcnI4pzgYO5gWIACqc4YdicdQOCMenOaptXl2cYvW7u7J9LNLpr+W8ycX530aTvqnHte/m316Ea7wwYEAc85wS2eAPTJ9s+lPRuHBBZ8HknuvOCeSAB7nr+cWCxJBOV5bI4xxxjpn+nWhcqw3gbSM/7w7epx6gg5/Ck+uqtZJJu1rpWa1726v8wcb22uklbZ3VntrfS19vO1hixsqlhgHPDD0Y9Cfw6Yz1/CdNoV8n94MlX/AIc89vUds5yfWpXEWOpG5eNhyGcdCw9OevHU88VAZJCijcuOVPAJwONuOgxxyDxnIyeg3rfulfby7PfRXV72632LynrtqlqrbWdtb6abdW7dQACMCxEZxjkZ3MeBg+h5zx7cU5XO0Ku087gRyQR6HoNueevOB60YZ/nIJAXk5AOB3IPPT2GT0NRKoSRSyheeG6HJAxkgZA9+eM8jpUvS/wAtbb3Sfd66fnboG6ad7rpo1ePRddE77t/gTxiRfnIyW4I7kk8E8cDPJ+tSSJuUjpjlecgE8ng/pnPTjHSmoyLuVjuwc7s5UnqCp4IbqTzjI5PaneYzOHcbkzg9ATGvAA67sZ68cdM80pS1Wmj7d9L97K706eW5m+ZyckrNWfVX2cUr9ldemlrXssIU7XkkztGAgBw7dO5+XGDxznr9LASPa53dcHO3ncuMZPpzwOnrUPyqrEBgD9zgHHbceRkcY/HtjmxGYxJtkIYAcqDgKfU9snPTp1OaTdnJXbu0ndLZ289r+b9N75zbu5Lm06LfTldtEtr+S38iskRWQM+GUnLc4OMAjjBwDn+RJ6Cn+Uo3sgYIpyM/wqfx5/Xr24zoiOPLyqjmMqAobJLf3s+objAHTgetNlEQUAMA8igKqjAAVjgHnrz6/XNWkndt78sbPrZp6ff8krbmbqu6VnayTtta6vdLZpXv6fIoj5WBYgrwdpP3Rz7ZOT1B49+xfhQXKgnBJDHhSRjjHOVyOOnAHHaldIgQwDBCo4diTvyevHTIJA/KjJC9CVbAXnaRjqp9B7YGfX01tFq8bpJ9k7tJO/fr2u+4Np2tzX0T6LRx09Vrfy1YyNXAKgLvJ3biMFX5wDnpjjoT2PXgqUk3SAvguMYIxgNy23k8EY544+lA3feJIz0J5UDnAJ44zwO45OBTpMgsGDDbjcTwihs4Kk/THABH15qZw5UrJ+80t/NdL9NdHv5GkeZy0aV7X1Ts9NNdlqtNNe3RkcbOMIOc4ViNyhQcEjJ+73PGDkEZ4x0dnbI3zuxZUAAwoXK4+Z164xnoMnpz0zzkTSLtJBCI5Q/MMlD3A7+59x1PTsLBlEaqqlkxskZuSoJ6AemeOo6+vXzMZKUYuztqlo9fsvda99NbnRCN5Wk7JbddGkr3062e7v52LlmCXU5RUVmywOD2UZGOp7kn046Y3J1LQAph/UgZBU9vU+/fH4VDDaRqFlRHOcKRjOVP3io7A8Y7dua2I40dBEwDZG/5DgKRyN3rxwV6c8cHJ8apUTldJpJbLb7N9lut+23e53Uoci5G15W0Wy6Ws7+tnr3R55fWTRzFtpVW68kYLZIznPydj/OsxV2SMNnKHG5hnbjOCvdcZ5xnHHqc+hahb25dyyNtYYO4kbyBjCjHJzjI6Yrj7mKIFgFwAzDAI35H94kAkH0Pv6k11Ua0qkUmnayTejWjS1WmttuunyLlBRutOV2et0k7X2u1e3Xq16JttIlLLJ8xJyGVgCpHHzsQRwMjnnrxjqN5bOMbywCMBvDg8jjJY9yO2CBjvurnLScJIiIQCCwB4BPQFCD1Xvjr1OemNlL+NPmLgMNwYdQOMAKOhOQcr24z1zVS53e21rO3VJ6fh069+hwzna+mrtFrZ2ezWmyX9WZGY3R2AlREKhyzdsE4AB/MtwB05NW0dkguFH7xipbpkMQB69CM8Hn6Z4rKuL9MFy48tgRtPJyeMjHTpwM4xnn1oHUyIzCrlQy7GKHJzgghuRnnqAfoTkGripSu0m0mlslbRXb62T3Wtl63OeSVn2bVmrJLRJN28r/5pmJOpMrMyb8sxbPA+brkEnONoI6ZzzioFQFGPHIyM8gDgdOo9R6Y4FTzum4srElgUIf7jY4yCOu7oeAAR1PNRoQ0eQAGJGCfu5XI4HTAGMHJ6nivZoq8E/KK1Su1bX8PwvpsO9rdFdWv6dU9trP+mN3MwVSSVGchckD/AGlGeV46fnUqShdiEjan3xj74bpxk5A7jPp2qu5YZG35gMEA4O3HfA4GT26Uij92TgBgcc9QCRx1zjGcUqkVLReTW/k+23TrbWw+ifR9NN21ro33XrfyNFplbJUhEIDLt4O/uwx2Y4yPXqcjirI7tncz7SD8u4ESKeMEdNpODu9+mSMQvuOQRtzg46AEcjAHb6HoBj1pAAVX5QSG+8TnOc4A9geRzx1PNcSi4tK10rarpqt+/X0XQEmmrtv9L2et7Nrv30stEOC4RgFJPG3DZGRjGO5HXjA/TNOC5xxtbOchuR0OBnkDjuenPPNR8IysrbWB6PkKxPUHr144P8s1aUkrIGCliFYE4O4gZBHTBGeT6/hWyhffXRb69VZO3fd3366bU27bvXd+elu600d/PXqaNlcmEZdsYOEAXfzxnHI4Pc4P5V2lhqLFiTOmxlXeRxgDGNnOR0P6cmvPYXIaNRtPJJY9l/2v8ffOBnK3YrkxyO6yMoJGFI/iAIG0/wB045HbqM8CsKuHumvh0Tjp1bWnpq9f87vKcFJa6Sv1futaO9rvprZp+Z7ho2uRh4gHYDJxMGwNgxt3DB565+vbrXqWk+JEYgLcRudoClVGEK/dBXPXn5j14HB6V8qW+r+SYjI7IWyyvuygVcZOO5ORyTxznOK6iy8QFV3CQBWTgg4kwc7m4zhjxz39hXlV8I3eSSu0krJO8tE131v187bI5qmGjadk0rxtZK3TRu600f4+R9S22qIzIEdWJbDlW+Xcf4geeRzyAORwc9NtL1lBDSZGcsxf5huJwGHYEA4BBA9e9fMth4qk3IhIjVvufOSOORu+vv15x3ruLLxU0isvmOTgB9v3s8ghQfwOePTjFeXiMNUilolp59lvZWt0s3ociw0qbUm0l3i3JrVW0Wmuqte932PbYryNzlm2rjcqnIzngH3/AE+oqvdSxykEpGQTg8YcqDymeQM5HHXuOmB5vZ+JkLbWZSxQxAj5gicDIXj5gM5bI7Zrbg1WFmiA4DAByTkFlPUD1/EDJxn08WthpRbbj0TstV0e3b9dm2dVKdly80k9/ehZ3suvZ3XyfQsahG6BzCuSpMnykcoeSCuMgqMZ55yOc9eXk1ZrZ9zyFEK5Ea9Q3Qlh/Eowf6kV0V/eW4RnV8K5b5j95jj7p7q2Oi+nOeePItXvxDK4AOzkjBy2G7HPYce/PXvWmDw/tfcnG/bTV7Wju10007M9CE0o7JySV+qTbSTT3vftZa7FrXNdlfKI8TAMdwHMfzDkjnrjuOF6Y5NeV6nqAfzVDM0hyGRfmDE8rgnkbTkk+/TNTX90rNIQzKWXaQ5+UsckBQTzxxnPHr1rmZpCRNzyF4djtBK993VW5wMA9+RmvqcDgY0VG6Sul8NvLR20/K+9wlNyUtdt77XfLZq+y2un8l0IXcszMVMkhyMLgLuOCr9OWGe/A988NVpGEbsSRk7wOSCMcAE8j/63HUVVw5ZQD+8bayOcgezFumPXr69+ZMNEGPDSBhlVPyqe7qP4QO4PqOcnNe2o8ui8raLVdNtH/SOaaUkk3ttbbovl16We9le7tENiVynzhl2NkZAz1TB6DnHX8qRt6ZjUMowuB1YKeSRnnae+RnI7imKCMMSAc5kYHGOB8o/2SDkjBJ4prSKPNCrnk5JBJKkdF56c/eHHfmt4rls9dbX62bsrWaXd/cYcr5mlv8vJbK9t7pfdqKtwPNAeQhVLBkGSsh7fOP4cnnI6cZJPLZZgrM0YbocFRlQeuA3G1yD94DB2noKpLuTgKN3zKSvJUHBAOccYz83XrnsKcBIymQsSJAFYA42gcDYOw7ED7xPTg10w6ptW0SbV+i1Vlpp6/fqX7NJ3bSaS3tZ7W2779dXqV3RtikKBljuOcsRnoPQKD2/D3tqpYGIsgkBCx4+7nPfrtA5+Y56VXXG8LIhQE/Kq/MSuDyTn7p4JzVy0WJdkhADgsjoxyWY9HGegUZJPfPaqb0tr2017JbvS2rutr9tDVq8dbrZp2W9k03vora79tLWHpa7WZjhpBgltv3lP8QOcf4Ht3rXto4hvkZlC5TfHng7gclDzyvZQPSqrTGLcYwQSB5jE7sHnOzoMdsDjnuKes8a7AyoGVSoCZOQejH1cjv2z05NclWTcd3fbR2urq9+n3K5C6S0bSs3ppqldPokvx87FqfyHlYruCkAsCcEk/cDYHzA84xjFYM+d5IGdueow4I6lyT82e54Bx171uOUGJsFHaEIqjGAFzkqQOGO4YODn0FYk27zUkdHCFN252+Vs5wDxznjIwAOBkDmoovW6tok2+6ejXRXsnZ91bTQO9rNWv2upcq7au91q1+JTi3qwmfJQgsVQ4y7jgBRkDG3LA+1R5bKkjeodhsJ4ZTgkMPY9DnAGQKkYlVkcxtgkljj5BtHA9sfwHHTIxkcwLuGJAwUKxU4yQ69lJ9+eSM9jx074xTu9l0Wum3TuumnXbe+iiru+iskr7bJpJXbTadr9PJIsNvO8bikbBX2qmSvUBsZ4DHO498DpjBro4VsMNrBiVIPLEj5mbO75PXPA4xVrcpTJcI0gyzIN0bf3Qwz93g8joc8HNQxW4mTKgMynJd1I3KCcruDAZ6DGCMHms5NWfRWv5Pvrtrr31ewRS1T6tRvs76JNtrVaa9browSBzG0iEh3IAUcg45LD+6MZ479iB1co2yqzuHxjdk4BIzyq44XqOMZ4zVi1tJijMFZo87VUHLA914HU544JODzT57OWMhijBCMIWH3c9VPHf09+DXNKopNRc9WnbVJ3VmrX69df001cdLtqzbSlbpfb1W6/zNS0mYzAK6gcMxwACSPmK8kn39+cZr0jw/dxrGscj5VMnapzuYn5AM9uuTz1ry612rsZsjaSCoGFPPJHsOMAcjoetdlpF1GjxopG0HGT2bI7nGevUDP49PNrrmjJWe6+drXfT77b6dTnnTS2u1ZXfW+l9bdF8m721PTRdzMoIIwCFwTgqOgz7EdSevBIxyZ4735wFbbs273xkF+c7RwAG43D8+tZFtJCVDmQMj7Qd3KHtleg4yPpgc9KmmSJFl8ssUcDhzk7c9Mccn5e/QcdK4eVSSsmmpcr7N2XW1vTu76Js4pQe8W731Tt3Vmttd9NL3Wt0a019C0TlwSdwByMjPODnuOTx+XbPJao6uZXXbktk9D1BwhOAQvTnAq3LcrFEEyMhRgKAduehJ7kEdsYyaxLu5jYPna7uuCoOQWx97GRg+mO/rVxjyuLs7pqzslqnG2/ZJ27W63HGLe71TW1lvZq99dLa30WiW2vMyrhuchCBkA5yTngnjOOAOn07VmTxrh125OcBmxySflPoTjp7+uRW0/luQSMtzgnJDEdAR6jt6dewxn3UbPkqhOEyFUcjHUnkd/fPvjNenGWqv1tq9H87Xvb9PNmu7ja+qV+lnolvZ6eV3q+2uLIpAOPu8cHJAIODkfz7np2qFwTjauCrHDJyfmAGVPOQcdMfTnirjLhssMY5xnkED5lxjggjkZODj3qEhN0gXJPUNjgt/DkdAeSM9O4Brri2nZ6ta6316Nea+6/YaTaWm29tubReT10vr1v0aI0mIbccAg7diDGQMcHqM+uMY79KmeSMl22lGK/Kccb+CO+M5zzxg9O9VER0IIBMgzliPkC57nj36jPfvT9sxDsW64G0nJBz6Y6ew4PABAFXZW0tZJLW11107vTfc00vfRWvd23bsvO1nfrfvowJkYBScd9xGQAMEfN+vSnIxYMMEiIFw54OVIGc9QOSdo/PNNAIfB3FQPXABI5yPTvg+/enK0QjCl1KsOWJHL5B25GBn64/HpQ43V2vTS+mjfytbXpbux+0SvF31tqtu772utCKMmMhydpLkbgCSxkPGOeuR14wAeKuiSMqwDgyAAHHILcdsj+ZP1xxTdgHDAkhAMjHBJGCeMAnpjkHHI61EuVYgRlCScPnJGeVO4dB+GO9Ple90rrrrtZ/K97X0+eziUtrNx+Sd9ntft/WhfIAImEjAgBXicdSM/N1BOOmPz7Uwt91VJYsSfl7kn7u4846ZHY5zUYdkV92GKrgnbuBY4+YDIBGPTtnHXhCuUWUKxTggj5SgPUkgk/QHr2ximlro9bJ+i0189/010YufTVX01fy10/S46KQltzkEjKlQMAqDkDHcA9ec9KlLYMhILEn5SMkZ4Iwewx25weDVMKH4RWPJw7fLjJ6sfbt3+tWPuAhmwQSAVJwM44zyBwPr9KHH3tHvpyvo3Z766/mrpO+4m09+a++iW1tUk+q9PPYcCWU7+A2Pm5Jx2AweT2wfp7VOvlsNgxlAdrbRgj9Ac/X09OYACZBkfLwoyPlJ7EnuD7gED3xVxI1KybgobGUCcZA6++D6d8dqltx3a2Wq+Vttei39fMuLbfXo7dLvl2W76X3VtdmyuiEFcdWYkjk7iScgHseoHHqPpIY+CxBTBypOTznBOMcfX8x6OXzGQNkKucjA+fOeBzjGecHpz+UgUueOmPmAxgjHzcZ5znjof0FZ7yd3tZ2vs9NulteqV76dTZNrlTd23r3SdtNbtf15WrBQGGW5fcQcHn3J9OgJ4PpzxUoVhuAIPBAKgdOOe2QMYJ/wDr1KY84UqcqPl44wcY5znjuc+nanLC4yzDCqMHB55ye/8A+vkfWtFbW/Tz2fTTr0027lpNPXa23n0f3a2/O9yGIbQGYhjuPJ5wCcbR2GPp9eSTVpVBU5GATkYHJ569+2c4wPoKjAPOAQuSAM9+AARg4BPp04JPWgsyrzwoJUgd/wAOeenPTrSEk03dppbK1traNrb9V0AsDuA6HsMA5GPT6Y68ZzUiFccg5GB0Jxnvkdzzx/kxhFwGKsN3Tnk5zyPy9c+9TIvBAYDuCxOcDqBkeue3pWLVnrfXa7V1t29H338rF3ba100WqWnnr+PfyVx7DIGDgbQSB1zkYyMDJOR/gelNOdnQtwD0xyOg7dOCORj60wMoz82ACRnjPUEnjjuDj6GpFZdvUnPIJ7kj0zj3wcke3So6rfr2t/n1/wCBpcqLXK3K178qbstVZaJfktV6orndjBGBnbyTzzzkdPX36DnBxOhG0odvyoTnopPGMDnJ9+3NRsASuScdCMZGeckHgE9vzGKEww2jPcbiACQOueuOTz6Dn2pu3a3zdun+T+9mDik5O9r27u2q8291/VgXOFGTuJJyTlTkEgAr1/znvUyqVGSASM7epOSfmA7Ede/rnkGnhPlwMgDJB7Ejv/gRnqOSSaQBgQST2IH059wSeo9epx3CXJPXyV0029N7dPxTdn3d1CjbvII+bqOoYnn2I68H35NSc/dXDAggcAdOcHvjPOOnHPpS7wBgdTjggcnuQPw47d8YFKu3ORu3HIIHIHGM47H8Mjjtigm6TTfdW9ei/QYqOpHAyeuDgE84AGep74/H2lGANpABJznvk9Mc8cdOPQelNbaM7ixxyAMknp970J5JJPP40HAAIVhkjGeD+RJ5OMc55+lRyat8z3TS6J97bf8ADrsTzcuyTv1vrffp1elvu2H4DEgE4xll77h755HbA6dqkC4TlTkYwcDkDHPXoM8+memagQvuJ24yMbmIOffnPH49+wPFobtrDGR0OeBg4zx2B+orNPmkk79Hbm6rq15WX427D0lZtczdnvott7+nR7fMYpyCxYlTkAD1x0Ax9fx6cVIAQvyggZJ3HjJ446nkE4xgZ59RTAAAAMhs5C9s56g+/bn6HNPTOccgFjlW7nqOOgJ/Pv246Fdtb3b6blpq3RvazX5u6Xnrq38mTKXyA3QAEYPGevGP5Z6c8EUUg3biOTgfdPKgZ+mO/GO+OgorVaJJ32XRvp5XIvduzaSduj8+ztvtf11ueYqQhKucDAOwHIOfQDoBx/PkchrKOkeQc5CkYZc+xAxknAB5HpVoqpLjAXCdQOFIzgg4O4nP3PxzgGnssLRYZ0eQclkPPGMb+MKAM4wSOOnr0tpLRP16WST3tre7S89PMFJJ2fVrzttq1bztun5amcUdQGLEMDyFJ+UjjZnsT/TjmnZUAEhuPvbfmJyecD8sen4CpnVgM/MVIzkkElRyDyOp4we/Q+pj3KcoBgDkkDJbHOSc/d65IBxjp2qHdR1fmk007WTVl3W/X80Vdytpez2WmmmmiurL8xhUEEdjkehwfb1Oe2eaC20EK5UqOCOw78cYOeO/Y46Ck7ZweWwePTjOe+M8jt60qhXzkg7eBg857AH6dfT8KUJRWnpv026+qW+y13sOzW+y7Wfa+j+W9t13s0XPmBlO7I5ZhkZPQ5yMjOATilKFWY8bSB8uflUjHzKcfL3JGDntSEEvtTcFA5GcKB1OR6+/1OAeisduGYsxXawPB3AnHA6FR69s++BspXju7PTo9rWfo+2lmm/IOq21SVtL26Xemr01WqWvQdEykAHlSWLYJIJ/hAAGRg8nn9etpURoW3Da2Mo3qcZDHHJYZP09CCTVZduNqhVJIwcnqM5J7c59OTT/ADHbAYqAuFwOBsHQDrk+pPtzxURbldWW1nf8Vr3t5dtTOV5O6bWq62ulZ7JNeT6abDjC2VjOfM27lLcE5yQSeRjqM5zx05NTxRs0u9wC5wypnAYKME9+B0H/AOqnDcwGSzbwFBJ6DJGBjqp6Ajrg8DFa1nasHiUKQ/8AES2QWydqg4GAe/YUVmuVy2WzT2aSWm9vuu++j0mcrJO61Tura20vdW0Xo/yLUdvI3735S7BWOFPfOU5Ptgnj6Hmuq0bTpSEMcQiRmysw+/u6gKOwOTg4+b1GMGK0s3lxI4Zi5BkVPmIAPUgDqOBnrj3r0LSdJYxodhQl1JYknbt6Iy4HJz24/Ovkswx3s4yTaV3ay3XwtettbXs/wOONRJxWjV1pa6unt2Vmr9N7nZeGXaER74yVj2hoYx5jshJBVRxgLjLHJIDDive9OjQW8E32csxG15Mc/MPlCoPu7ee7Z5zgV5b4f0zyXQIpLoqtgryYW5bcTgEscEEHjHfPHvejaVI9uzEhg0bEc4aNsDmTrvbsgBGTnrjFfLV60JSuk/eWu9ujs13676Pby+lwFSKTipe81zNaX6Oy6PVXVtLKyucTqNvJcPIhiQ7SzhFG1dhxwSTyBxhT93nnnB8p8QWcKtIGUxsQ20q+4HHViMcZ9M/ljFfQet6S9uzbJI5UaMIWj+YKCCCZDwQWwNvcEHOea8f1m1AmuYXjBbYXiKnI8tuj7+eD1Ixnp6nG2DqLmumrLSyb2stbrfpt0W+9u+rNTi204tWvF6dtbJNpaPrpd2t1+c9TgUXT8ElfuyNwFPJYcHB9h9eT1rMjk2Fizsp6DAyQGJweM/KcHoevTriut8T23lM8kbEbiVbjjcOhHoDz/nk+fy3DAjnBxkgcAqOjAYwPYfh9PpqEXUpxk/JWfpF7b9rLrrp0PFrW5pX3b0X3aP1a11+7Qv3N4wxtOANoYnAHfp154AJBwB61mpelTw/l/OOhJJDHB57jr9AM49cq8uMHtIWycZ/hPUAdnxknJ4x3qjFcxsoySMZBJHK7TwTk4yMnsevaumFDlTfLpKyej00S67Jen+Rgoq1+VpvdJNu6a310vr38n37ldaMcZzIPlAV888eq8g54z9SPYVVuNRDxsVc4IXkD5u+WPI4GfUHHcVyM8+AxSTeNu4g8nHc88gHHQjpVE30iIxDqUdQWUZ+X1wece2Rk49+daODj8UFazs76a6NpWSXV6ro++rx9i2m4uzbSd3svd0W6vtbv32NC9vPMR1zuVAQjfdKL3Lck9vqeRxzXPXMgkO0ZfHEfBBUcnjn7o9+uevPA08jMNzZQhsFTwc8qvGBjrk/piqTOxbguAAwYv/D1wAT9OgGK9GMORJJJX1W6erSevTzt1012NI0XFJbOKT6rV2V797pbb38yI53MxYALgkkYJBJJO3PGOBsHJ68YzTGw7MByMA78Eqe+ckcjt29RTFkIwGVTkgjuce3t69uenq9C5DHKhOjYxhueCB1z+WcDj0Fe+r1Ti9772tbZ26t9dDdQkm1azsknolraz072dk2ltYicuybSu0qPl4PPZcA9QAOSSc9femEnaitncudxzznPQHAIA/H+lWpI1IUBt7uAMx84brhxjnb0J6L1NQldr5yvB2v5mTuz2B5ySc5XjGDzniumKTvotPK7s7a7Xs+m3o2aRjdW0Wrab0d9HdeT9d+uwR4yVIBAycc5JHTpjpzzjjP5SqAsisTgEkFSDux2K454559eORUoOyKQlQOQSRyRtJyB0wTnpyOCOah/5aRtkfMenGSpO3aT2YYPHX3x0baUXZrTbpa1n9re3XuVGKbtG+jV5fZV7XSu9dO2t9L2Or0uSJjGg6Y+Zuec4K59e+SCB0616DpMO4hlUbWwjsygKAfTPXJ+mffpXBaVGC0TfJjAAjHHLEYP6HPUEdq9X0e1A2sDG52ggHhEI4Y7fUj7uCNpyPQ14OMlq1zNc2j33bXX+krLc9GipKyVtbNa69E+mqs/LvvY73RrCOOJndQPlC4bhZCTwV7sT2Xjgfe7U+/hgDEPuVDgFSNpHX5wcnoc4UcD19NezTNpGPuSMpMZU8oB0P487W4zycGsTVJkRlbergLtkPAyy8kHuzEHk8Y75zXjR5nNu7bbaVrt9F28u9/kjpk3eyilfpZ3SVlpba9076a2S7HB68YsMu4uVGFkYnoR90r/ABZxyMjt1PXyTV5AySJuKMo4dcL36k88jjA53Z68Yr0zxRcwvlvMXyyg2jHzFiMEsQTjnGQfzrxXUJ2bepJ2jIZA2So/oOckj8M5xXtZfSTjF6pqXNyvp8O1+qelrdtrHJiH73LF+609Xd/y3d/+Cul9tcpjvfO8ljyGzglARnIA556Dv24yasMwGCMqTwz7csAf7wzkk8enr3xTI/nComem8nthehHHUc8ZwPccVLKyltyyIwIXcBg5YfeDdDjHX8OfX3o2UemrTdu70X+Wmi26Hl1HeXLZ2Wu/ey1sle+rta2jvptWDuzqDs25zgcZx0J44H3u3pTBuVi7A5IZW4H3ePmQ5OM4AOQTwMDsXSRgNtTGGUOmSeRgE5GMrnAHv1qMq2M4IYEFlB7enIJwOeO2eO+cvtbJxdlrdbWvd9lo76drdC1y2VrJNJWaV7fJqz6PfVeQ9SpXflsOSFBYfeUHJPHHXgdWOefV6sAGVsDjk7dzZHYc8Ejoeg/HNNAYDbgbjwQo3EAHnHoemPp2pdvBbkZAKg4LEHqWYfjx/iabV+10/wAb8ttd0lbe++u4nZ31srtbra8bdOyte3TV2IcyfMSoKjJ3Zx8hOBjg8gZ6AY4xzTwGwAqrtByDuP3jgkdO+Dg59yfSULHgNvBY9Yx93PYs3JzjPH4ZORSkIrEEkAhWUjBPHTP908e+B0yMUW36WSvfz8rP/h9dNk+ZdIrSztZpJ6Lyve71Wmqt5OVlWM7yC/G4KMggDhQAc8cg45/GmyBSsbFi2QAUPTj1IIxjpwO/NQK0ivuCgjJG7PzBSRk5/njnHUYpQSXG7gHJI/iOehH15zwenFS0/lZO99delnfbp5bKwuTW6ave+ln2vFJaq1u6d7bbkzbG2qq7UA2jHOGPGPxx16ntwKUFvLWLyz8rAswJ3n3x/COMnrkDPrUbqh3bTwQFXa2SDjr04Pr1575IAVDjC5O0/wB7neRwMn168jvwfWpdmm/5em2vztt6X8rXF0V77qVno7aa3u7vyu9ddGrq5GFJ2nClVONxGcHgBcDknkDoAc8Z6yKoYSFRgqNpB5ztxls+vIwKqOzglsAMPYYQY5I9+nP6ZpY2cFXZ22nPyd2b169P/wBfoKi7b5ktFv1aWibfnro+ttbGTg373MleyVm9007Kyva2t9k1rpq9JpJAmFdk2oOB0AHOARxkkjnHbI9aaPMLKcAoFB3YG4K2AW653HGBgcc9CTiBZQT8rnkEOMEjd0JDDGCOPxPTsHLMC23qqrgErncGHB68g4wT254OTnaLvfa115atJp7advRPyOfklHVLW127bXsuq37au6vp0AguVjGVBx5a44C9VIJ+px3Occ84UIN5ycpGcknjPXoADjPPH9cCmeYQOCPukMGXIU5z8hzxx7DqOpFKsmSVVjhk7AksGBzjnkDvwOO3IA05n8TduVvlae7fTW6d9OnbQpRnbytr3TfLd3t1V+r79RiSxjIIUqW5Xkk+oPAwAe/vznPEh2uTk7YmB3Ekkk9Qw75HTjqOw5FQR7VI2phxkEYJBx0bHv1HTsRT5UXDHdhWAZE6gYJwOhIGedufQE44ok1e19VZp2e/utrr8vmvW/dU1ZNN23tZ2cVe222qaffyFjdMIgIZmbBJHLBRlgcDhguMc8g89q67TWQtGFKiIgKVxk5I6kbs7h2/TiuEVmRTJtUhWxgjLHJ5PrwR37Ac56adneLDIr7vLDgKQcsVLcFkzxgYIODnkHJ5rzsTRnOLit+a7trfa90rpb2623sdcHyWbWl2999tNNFdaJt6WZ7EjxxoMuOAp4A3t1wrD1PfHQgfSoJZDvJjIVcEMF5ZSe7EHr6//qzxsWuK26IttESARykYLYzgjnLEg9T3HWlTVVZh5bkHa25zwGYeo7enPU9zXkRwtS7umne6sm7p23stOnRNvTa5vGpCW6Tbto3blatazbTaWy1u1ptodFdXLYKY3Mi7kDAbT7j2Hb/9VczcRsGkKxo0jKcAAAqQTn6NkZ4/Sh9QlDHkF+SpboF9VOep688D86qT3WFL7/mIBfPQHn5QfbueOnoa7KVCUVsndLW73ur3V7p9vnra4+aE01Zp3drpXtpt3Xfby6mc5KvnCgrgh84JDckZ9QRyB1659IJroEOpI4zgIMDjHbvu74xnAHUUTyE5dcFnx8pbIGcgllPT3+vTOKznJDAMPmyRjBGRzyTuyOvPtXb7GLirW5tNr30a31tbTr/kzlm+aVtXsl02S+aemuv5EhnMiFQ3JG1VxjaOTjGcY9Tg+v1iDEPlOMYycEAnvjPQce/HfFNzhQwHJA5xnAOMlc9Cvtnv68tBPmBWAJOGUnHzDqBkZB98jIPPanGld3je1veu7J35U3fZeS18uwrbtdtVdPqtdtrdra6+Yj4yzEEk9R2yR256Y6jvn3py5RCOVyMDYeQBzjjop9ec8dhViVEUDf8AIrruUqCeTjJPvnpnHbk9ahRFKBxh2Dcp/Ec5GSOenb1yfrXVHRKMbpXTdu1lprre2nr3ZKldX97fqm76K1r6aPR9r9iQtkBcHeMHI5BJ5yOgIAzxxT8ICxLKRgAqwG5hzzjnn8ajbaDgjjJXAIUAnBAA5J6nPTnjIp67ShUbVYdGyA2VPPJ5/PoR7VlJ2Vle6a1fVaN7eu2m3pfNqyTV0n10try372ta/k2tSSWONTu6goDhRu55yXGTg9uOnHXFVnAICjcFxlRjBwenBweSOp+917cyM7hw2WGVGRnBAHBA5wQwJ9O3YVCXctgISwYDDYwVbr34Veec45rOF9E9byTTV1ppZf1592VBS6yTdlrfTpbRq3ZN3t8tSRlHlqrKA2Rgh/mJzxnjH1ABPJGe9L5km7DKN4IbnAyo/vEZwTnOfYe2YMOpDYw6sCVB3BeQRgnke/t+VTcbDyzM2N5xklj19OMA9/511QcVo0m7+vTW/wA3dvTvuldvZX95t6NN6bJ7N2Vl8++lk8SKGcglieSSPlJY84wfu8DnH0HJpxO5SSwHG3YOeOzAepHryc4zUROBtVTtBAOw4bjrkfxZPX6elJGVwyKy7mBOCSCM84Y85Ix6f405qPLe13v32tbRaWs0t9e7uxPX3lpsrrX3dNXppvd369dSx5hVAoIAU4A25JBxw3P3QMYHuRzxiVZ22AKzndggRgfIf74GeF9c8nB4BxmBsAEgg45DdenII9GJ474565wGKTJLs4AYfxEqAAc8+uDwMY7jjBrllCN27JO6knZ31svku+hMfPo3e9t0l+N9b+t73ua0GpvCsahziNt7NkkNjjBXOQBz3611NnrUu8Mkqt5gLO2M5GPmABP3yP1HHeuDCBGb5CwKh0UDKsM9yf065+gq3DKACN4VwoJG/aS2eBtweVHXB6EVhXw8Kiuo3t5NpvTVrTb7mu2pbjGSte271baltdO3RWf3pHqmmanKHhw4BYE5xtDKMbtx3E5A5HqS2PSu7sr2WM7zIAGKsF6lQ/uD8uMHjBxmvCbHUJIdrvIo2NwHJyEJ5OB1XocnH4du5ttc8tCyzKS2DkqCA3O0jJHGev15yQc+Hi8JUk7RjGUXponeyto2723vrd+u7wquN1da3tdbvRdH0d+t7a7Hp93qYaPYDtVQ33MYYkZ3EN0OevPTHXIrz3WL6ORHU71YRgqWHUnOM9OeOM4I75PByp9bnGWnkQMqlWYYwWP3Qy5PzP3PqOBXMX+qTTM3z7lydwxggn72OflPA6nnj8TC4GUZJ3i0rNtfZ1Ttezbb2308jOM52ceW2qXNppa1t316K27dvKLUJkdj5YJ2DIye/GWb6fjx9OMiWRxgnLkct82MDpx+HXsB0IFMeYv1LZQk5DYYoeeuDlR0zg+tR5DEuVOCADnqT1yT3AzjnuMZ9PfpK0VHTdaabadtVfy19TX3kldJ6vz1aT3vs73Vt9bkwYEYGflIO4ndgEnOD6Z/X8qnVBI4IBIcKHZhhix4GCSc4+g6iq4xx90Rsu3A67hjccjqTxnr2/G2MbduQoAA3KcsoAzl+hBI4z35x0rfle+ujV7u/RPpZu1rNLfTXoZudn5+uiem9lurPRfeTvC5MhVF2quSqoGYYxh3G4ZJGfm9MHGTSyWpaBtwRdoyrLxyOofjgdNo578kUiz+WWLNtTjHBY7j3X2OOBye3tSmZixWN8DyzkEcupH3Np6+ueCMVSlK1721urOy7de/yfS6ITd4q2t9Wuuq+5JdE7WXkZjo+Q5yASc7h8xLDGwj36qOeCeeuINxweDnhSq9dgzgBfUdzng9xirzlzv2kMWVDgEgKwzk46fLxyemTgE1VEbKVcqGJBVX5Kso4PcbjnoSBnrjvWsWpJc1k0o7L0vd6bX3a0Xc2TUuZNKytZrS/wAL6dr3va/qtxAjoGVXYuwAycsB2PPYc8Hp1znipkKO8aRhNxcoEUYOVPUg9fc9+M+tQhsbsHLMoCjGNrDqSM+nBOfwwMVGiiMiQgFkJLdwVbpjntz7fjT6dbaW3bvZJJ6bav1vou5drVN6Xt818vLXT1Wpsun7oqWwEBClTlhyM7xgkA9Pz9KArAYQKHBV84G9FGe5IGTwRzz69aZFcRMHRSAVQZQchmzznPOfu8dvWrbbTgrI6FogDj5gSMZ34wBnqMjAww5NYzTs01dtOXTX4fv+bfYq2qbSTSVm9n8OnRf1r1ZWlkkD+bvCrhm29GXGAFEfYnJ7g5GOSCKzJpWYKgDNjJycHJPOB1wvAyce2OlSzPIokk2l8AdVBIx1OSclh64zzis9cebkgumN2Sx5478dBzk8dBkA1NKHLd3Wy0st9Fqn5vfXe/XVRgldpbLbdNq2+v66367CxEYQ71cnIKdn54UDnlRlvx7GnYjbcSSHwMxsMAHvgEclSOfTp600KFLONoyDgZztGcM5/QZPPt0qKMNvBIXaTwQSCc8n5+wz1AHPUYNelGSav2Sul0+XlrtfRfdaSk21pZK3TXp06fpo7Ej5MjcK5O3AAwq4HQHoBxzz1xk10elWjToXlSSVZBtTjAcjhsY6EAjGRyPoKwyh3JtIAYAMqksrEcuG6bX6Z9SBjpz2XhwvxCrqARuPPC7OVRsjq+ecdT2Oa8/GzSouUH1t8ly/NX17Nm1FxdSN/eSXz3ildej1fV3va2m9p1hEA0KLGSdryDrskU/Kqqf+Wg5zz1xxzipr7RoZWZ0G/DIrL0Qhc5x7/wCzjgjPbjYtrcRx+YIdgO0IcY+8T5jKCer4XHPGBya17S2infY0bSZAkbJ2bNh+U55zjqemBycHFfOutKMnPmdla6Vm27q99dmui8md/JCcFScUkneMtVd3vp1bvpa1jzi50ZrPfLGARtKkMv3VIHzLn1x1I4APbGcRUkicsPugkg4yVJyOTk9OePc5PevXNbgiRJPLRXKkK7A4HlrnBIwcHnDHOcj2ry64cGWePayKwLAjJ2kcAkHB2ds+h5HOK6aFWU4uTV9X9y5baO/6+ZyVYqMpaaWs7K+1vS/Zd7db3N2w1OVkjBPyxgxsPy+gyexIBHPvW+upo0KjzCu0AAfxHoNoGTlcHH4+1ebpK4Xbu2ooJeQ/xPxhVB6nqFwR057Gp/7TmEZTcNw2gAEbyCcAhugA7nAGeO9bypc2seV80uZpJJa2t0d2lv5Lfe3LOndxso2XS1rX5Xfaza3tpvudlcXAKELkbRleowOc7jnr781z0rMJNyjqPvMxIPXpwCD7ex5NVk1FyU3uDuQBlwCwPQk4wM8jJ/TjFL9pDM4OSCNysTwH6/LnHfkcfU01CaTT193s+jXS3m3oSo2TeiW918l+S3s2vVJK6rlVw3GFLccgZ54P6nr2wKhc5zl8gIcdsk84OOue3TI6dRVdZc5LPk45HBPXrwWwO/JGeScZolaMqSHw/ABB+9gn8Dk/wn9apWXVpxST066X/DrppvbS0ctryd1K65HstLb9Xe9vyKE23fwcsc7lYY3H698+vGMMOgqFtwjwUIU4ywIPPoeOP/HgWpz7i52gYPIzwMn0Pp1A69e/FNXcFYEAquCMYJZjnaoB65z1zgehzkdcJNLVX63Wl1ZWun3utv8AO75WkmuVttN30avo7p6XtutvxKruVOTwo4wTwC38JHGfc5/CmtKV3FAiuwO5QAAQBycZ+8QeufwNLJhhhWJZjjaO7dwR7Y5bOP6wEHbkD5iSBtPJ6YKt7Y4XjnAJPUdCV1HR3vs9UrpXelvLrt6kWXuuztotWrPZ2873s7PySH7yEVjhjuAZRkkhuBtGRl8g89untVdmZykTINoYqm0jCgEYHT5h6nr6HIp5U/NtO77qliO5z8pBxj2PtTGiUSISpYKchGJAf2DDp1zn6+nFtWst76Jed16LXT03ad2LRy06bO3krK17LW9+1tdmSo4+Ri2QpIZOcOw4PXOAOwP5kVLIyKXO4lGAYsg3c44DNxhgM5PQfSqygoxIC7WJBVfmEfqvbJxycj345qUMCzoSAAoAbI5Y5xtGPm6c9lxmpd09dLrfe1mu3Xr569SnHVWXTpbRXXRro3vby6pkasxUKv3SxXfnnrwG9D39+p6VcjciNt4V9g+dccM3RQB/FxnH1Jziq5RSCeNzYyQeARuySMcHIx0wD19aYplAym1d2QN7bmIJxwcDkY64OPQnFTZu9mtlrsui1enbZ7287hyxvsndpu/fvfW23XR6bal4MSCGyYgMhTgEA9CPf+77ZPSmK5Z9pACcgrgD5Dj7x9QR17Y+lRszqq9GkIHmbfmU/wCznuffJGT6YqRR93OQXOGBPOD0JyOD1x64471V0t3o0tbJu6tut+l7bNbEqF7NJu1tU5LtrbS/W3S3mTKyqsnljuMBhk5Hce+e49RjvUyb1VSzlmwQ/HGD2z2Ixz702ONduWYBlYYUH+Ef3hg5PpnH0PGLAKM5ViVDKCox944GOffHB49xmoutLPs16u3+a/A0jHV266vd/wCb/wCCxy4EYwpYE5yegJ6EZ6qOo55/u+s6KuArxgsRjI5Gf4Sfz9+vvThFGVXKZG3O1HIJPckYzg+mee2OKsxxK42iPaxB2Hq23qcnHI7/AOTWSk5NWT6X0S031107JfjqzZQkldvRNP3lrq4rrd3+7zegiW6EbmGSQfvcgjqdvb04P6U8IjBlKbiwO3AwqsOm5fb39am2MF2DORjlCPXseoz/ABAZycd+riki7iNq4Aycnf756jJ/TA59df67fmN2W/lby1S0sr/L06Ge0BEbYXDqeewwOeT2x+fWqrRnqQcYzyccntk/e/Hg569c3ZC2G4b6YyAPRh7+/fjjjFdjlSBuK8hgDkBunTPTPr16ZpX7a+lgT38vlru7a/13ZXDEZ9ARtBPTkdBj646ZyelSbzyDtJK8qByAc8jt07/p1oK/KemcjHbj3wOvsRnnik2hSSQOgPIyecDqAcfhx0PIzWUnJ3sm7dOtnbvftfz372LpPZd/LRpO/wA3r8xpwAAoJI4Azkc/LkHrnGOfanrnAP3SCSVPI6dSefX1wce/CHHYfMem08DJ4yTyPQ8d+3ZoOOuMsWB5JHPTn8OT9fapG7X01Vk7301S1tfd23t0t0sPYqG25zkjbt7EdeR798e+BUqJ8gYBSA2TzyBnnPH3vUY56Z4ya27oMHIzyD9e/Qjn3/CrEJIDbucEjbnqSeOOc+/fmgie3+b9NUreuq6MnGBt27gBwc5AGcjAz2Pbj1455f8AMcc87Sp5GT3GBjnOeCOp4xzmmEgqd3JKjKDsc9ccZ46/Xp3pw4HQg4wvYEe7c+vtwKDG/bdb+W3zvvbToIRkcffBIBIwQq9+/wDnp6BUzuBJIJByTjBx3PUZPbrTSWBJK5YHt059x9OoHuPWlw45JyDnAPQE85x3zzz1zRo07tLXV3226d9U/loOza+G6T07u9r6PXS/53sWiwAbBXJXAcAEseoweeTnr9fXFRfMcNnJJwVPcDPHt07++Oaj5yvBJGCpyPl68Dn3544I5xg04BskEn1I7DuMEdz17DPGe9JWSWqsla/TsZvZb7cr0fl06fLd6a6kqlU5wOCWA7g8YPXoT2/DjvLv+U4Iy3UYGD+vH1Ixj64qPYeSdxwc8Y4A68DI5zgdyOgHFMxg5G7BPc9QePzHA55+vFZpappu+m9trRVn20fRP77ltvTqk0rJNNJW6rpZW0XfrckztKsxyQeVAyMegx3GP5nAqyAPvbhsb5xgfNk5xn07/p+FYbQOhBHIweR0wSffPXHr3zUqtheSwIPAI49sDH1981vBXd+1vz/VXRo9la6TfVW00td72V92SnhjgEAjPzDOenLddowPXA9eTkpGLA8knjBz29Qf/rc8iiuyCsvV3+9L9b+u4adZJXs/h5t0rvXXvo/1POZFIDHdgHB2g5ww4JOAOvp1P6CFFAG9cMjnbxw+ehI6kY9M8jjIxQXYOA4O3g7MYBXr2xxjJJySMdCeiKMKrAEncBjuQeFA4Hoec8dAOeYpLTW72evl0aWvnr23MY3SXqtrWadtHa13dLW/pcmkKoiqpLFPlZz9088+wHtzjHOc0wqjuUQBguNrsuNwOdwI446AHoT0HFSx/vJCvBVVIKgjgjnLZ7jvx0HXvW5bW6zQAugj3jcrBQGOCRuwCSOe3OegwOrrTjGLbu+/V2vFXt1/F9PN3GybTTblpfZLa19P1av0XTnlsyCJHJG5vlU5CSqxOVXGdv8Au9ORyKje1kDhBGyfMDGSuAQ3IxyR83rwOwxXbpZxtwQhVDuHGCAo4BPXcecduOlTvpiPDnYQjLgNnfIGP8ecDJ4GQO3GTwa8mWMUGlLWPNbTyt062WrWvXqa3b5dWnJWd0tHp0vb5+foeeNHJG/7wHOCM8AAr6evUcY44wRilzlpAFG4d8ZyeOe2eOvAyQMAHFdhNpTSSbWjY/KSrLjlMc8nqDx7jJ44xWc+klkwqFWUnYCMFwDySwzz045BHcAc9dPF05xWyacfRp2V3pfq1bzdyHKLXvaOyTs9tmrp202t318jBhjbKuVDE8PwSrkYHHpgE8Y6kHpWhFaK7OCrbcHaMDaWI55ye/8AM4qeKwlDrxhCSrA/e9wQQeD2PI4+ldPZaaZQo52qwUN91VPGM5H15ORkjjOKcq6ppz5lZfy9tO7fezWrX54zqRi73WmjeumzSTWl9dLL8mULDS90aiSPaVIwB94j37nHb25J9eyg0xQ6tjKlQJAQVbIB5LYIycYzjBOMZ6Vp2GmzIpXYqMuCpbBYqPu44Gcg8HAI7cnFbEcCxyr5wDoAMAEDBB7gds5656dO9eTise53UZJ21tF3v8Otnou9tdra9POxNWX8z5tPd12aTve7iklpsvTs3R7UQhSUj8kMduOWy/8Ae9Tx+Wa7uxVVCsv8TEASYOGbALRrwCTwc9ue/Nc9HGqquFKguMNnKkjlQBjjPI6dB61vWhR0DFcyR/Iqp8q/L1K9ckdSQDnnPXn5nFxlUk5u6cvNO3w26/k099O+EXzO999dUkk24rTydktdUen6HdxqsUeQsylUfywHLMDzyP4VyNw9/wAa9/8ADsxSAQ5jKFfMQqoLLMoBiDEnqMsX/ucEg54+ZvDEqJcwoqko5ZmLDATaRvjz1DtlcDHOD0xX0FpPlrDBNGQQoUgu3ygPnedvZ3wOR02j148DENUpum3ey5rytq5NWT6LRevV9Ge5gqju227p2W1re673XT+tLmnqkaSQTySypI7sVEUXGQ2N8yjB3FMLhcgLk9TivGNfgUzNtX5FIZJyCAoIO1QpB685BPXnnAx7R5RZZWChg6M29GxGzHgKSc7MjhSQenOK808UW43Ax5ETArMrjOx/4ucg84GDtA4B9BW+BqJ1HHS0ko3eyuo6paW0X36Hs87trK6fKlJt3WzavbZJ7W210ufN3i2zVGmldWIfayqBhTkMdw6gDJzgDqc5Irwq9nkgaWPapXfgBxllIJyQc5HJ6jI9OnP0V4mhkaKdC7bEDbEU7gqDH8R/9B5z6HmvnnXotrTFfkUuGwTwDk7sk84xgKv1yfT7vLXGUeWScrWd23ayS26pu2v5s8+r/E/mutetnor7baeVul7nNzS5KyMxYtkBc9ScAOMdgO3XH60fOYnYnykllx2DD254Pue3eo523A4BVQ2A3RRjOfm55PbjtzVUKQ6kEjkEnqGPYDPBB4BOM5478+9BWS0Xle1mtNtFZO3r8xctrNt7W1XTS1u+npt6F8TMxZnkI6KQPlJ/3fUZGT7Yx0pCQ7sWPyblb73BKjuMfdOeR79fSJQ7BwR8ykHG0YIPBOT6YH5/Wr0cCON24YO0H5ec9Dt/3vrwB1rRxSTtotNFs9ndLy1e+/UtJN3SaStvve6vaz/DX8ijJFhRtU7Scq2c8k4wMDjOB9PSqLRsHJCliyhdrH5QQOueh/3fbrwc7NwjEMMbfLBClB0UdCR1DN+pPPWsoo4cggqSoPflT1JPucc+uPpSUb87TWl48t1ZrTVXfnvra297DXwtpaPTd9LN3v202V+3cp7QG3FQrZwBjjjGB6gH35yAeeasxYbzAuFYjeoxgDZn5mHToT0FI0YYtuRkwOqrlST0Yc87uoJ7cgejo1Rc7mAKk7dy/vHXoQffnhevvzylrpdLu320087W0Rm+l/K2vzX+diMx+W6u5ALZPA2h8nsR2Pf+VSKkbuyopZz8wbdlFIznJyN238CeKe7KzBcsQqdGHyndn5f93suce3OabEsi71Bjj2HGMfMM9OPf0yOg7E0Nta9Va7fZuNmk7XemnrruaQu7vqoqyfZ2t8ttu9yysLKsjbS5HOOPmHA3AdAFxwvOOfrTGAJZeFkG2QFsKQyk9+R+nGB2qXzfKRSpInTkhTkDP4duc8VRmeU84HnEkgHgmPsQFzjHOTnjIOOeM6nM23e+j9LK2tu/Xv5m8JJ66XtZvZbLp00tbzdvTd0+RklXhgsmAjuO4Ocbi3A6knB5x1zXqOh6isU4LS4XbsYMcBW4IEZIOd2DleMe3NeN20rhEUt84KnO7cBz0wAOBjnnvxXSQ3zRllKujMoMbZwMDo2MfMeTn1/AmuOtBzvtZ6PRPtra21vR+mh1U6iVrcvTWSVl5XTT1vta93rc+ibfXv3LqoAdFO35csQvvkADBzkcjHTkGuW1PVCJAABt2bgCuCxbkjPUK2Mk84wOCCK86TxBNHCqLJhlHzFOcAdwvHBzzzn+Zz7zWmZJJBdE70D8gkqe6g5xxngDuTjNcKwsYybsldvo1rdJJpLZ9r9Xr1KlWW6ctUlfe+2itZ9PR720JddvxMLhWkRCQdipyEA5IDZ+YD6DPbFedSuZJFwh+b5jITw6ckZ4574zjvzip7q6kmkLMpVH5VlbqhztBz1z9f0qkrPyDghGwoLA7R1+UYHHPtk8gA16uHpeyim1fmjFKyt8Vtd9nb7+nQ46km29FdL3HdaXUX00bV1otNr76TfMGd1yMcYIyQTweBjg8emAMY6Uzy3DHKgZxkEAYI4Az1HTkde/1eWKgEYDOMttbgeqrx19+uePenbScOfmU4Lk/M2c4OenOeAO36ntjqm7b6f9u2Xp07aXOXm721016/DZfhv93kilM7mY5QdAfveip6Bfy75FOJQhsE5YgnILPkEH5uvfPPTnocDKnaWCL8oGQPlPQD+I9yD1xj0pAyq5O4LnjGMqSO+f4e2Vz6c1LTSavfW9rb7bu638+q3Iff3u9umjVltv/WiIdrblOFC9dxPG0g/KT3zgAHk9enBoRmXeMAEZwcZLA5AKnOMd89uvtSttJ+ViygZwQcAk/dAPQdOc8+nBpuNyhRkc4LA44JyBnHGcH/PQ3WumiW3bl1evnp8upejSTStZdNra313erurb2sMRZF3PgAElSwGCcevPXnr6U9mYs2WJA4O3ADD/AGc4yP8AHJpiBgwEgPB7fePofT06dexp6lQx+Un+HJXPyt/d6YY9z2wOKL7aJ3UVuvJ7db6a66fcre7ejaWjS3+Hq99NO9mra6Jx8toggyX3AkZ5yO59sdD29eSaaygkOoJwOSQeD3GfTp34zQqlRJ8nGAS7Y3AeoJI9Tx6d+tL8okAJ3qNpyQQpyM4Ge3r/AIUOOm21rN20btd915rVX9BJWbs21q9db3tdLby06ddGyCHKvvwPLLcjIBZiOgBJwB3B9ueasLJs3hsMC2eRkBsdFAAORnI6jp+EezBJAIAyRjJCg45GBj05x7e1KEO4ggjOMbjgcjgjOeDz6Z7cVDi73i3ZpK1tL2Tat2fqu6v0cuWTu72slvd6NNfNX1tvqrWSRJvJcMTuzj7vylkHGD1zjpnj6dMPHy7mdeFxjjLYPQAjGCMjJHrjg5qIB9wVsAjJOOB6jaOMg47H5eoBqZWeR9g+YkgHAABQZB6+x4OQfxpJWTTd/eu+lm7aet+nlppciS9Eklez6JrW67ry7dCGJihWUqCgJAJP8XQHGPzHcfSrCzAFkG0uSMtj5fmB+6Afp1yD6VDsAaUFWG08LzgkdTjHBORgfz4pX3MysCpXBwgP3TxgE8dOuec+wpuF9Lu0rXVrvZdevR21egSUZPZWtdO+mjUkrbN79bN+Y4SeW6u4weev3WI4G31U4ycDtnPcSiUhGJ27mGUYYOHGenoQTgH+nJqsN4UFMlAR8oyo7+mTzzk45O7HPKhJGjI6DghgcEHgnA5IDADI53Adaetrdns1orW11td9PX1E4RaTemqTd10fRLdd11TW5KPMPzNhQBhmIAyDnHIPJ/u4A568UO+S4JKlSgGCRuU9CvJGQeSR9MdwwOudsnKY9cgnHCjkdT3PTrmnAx5ysTs+7G5m4zkYySCCRzgnHUHA61N7vqmkn1TVvX/gr00Fazuk9tLWS3W7vfo07baWs7gF3IFRSHzkndw23o3I56nA4/HiklHzjKFXGGAIwB7gZyACTzjg9TzinKxRwGYc5Un2AGBnpx0wemD04oZvNkZSxLBRsKj5SoHTHQ479znOMnB0jGOqfVJrbr+Tu++t9hq99E7cvMmm9n69vvt5EZclFjPzDJC7Scgk9W6ggn6dO1WIZHX5Q4jbOHGfvEHpj29en9IFT5CwABDZyeCQOCAPX2zyMjNNO1GRkJYs2SWGACAOT1zyMZ9ge+ApRTTTSVlo1e/TbS3k/Xbc0iuZWSv3TSfVXa1vpprve1rHQRzcskmS20MD2ZTzwRzjjnrgjpyKfMyFCRx8oAx03f7XH0zx+GazIrhQx/eYBIYgHI3LxgcZUfQEA9uTUgmZmIZxg/w5xkc4wADweuRnPt1rkcXFdF100TtbdPbzV1e26uzRdOltvLvt6a27EflbW3sFwSN2Dy3bI6cZ5P8A9ao5Qu0jA6D5gOWb2P8Ad9eg6DBJqR3AOMEqDg5YnrnkHsDzyRz9Kpu5wflIIyoAxwD3/D1zuHTA5qIRqc/Na60e/klZrfz+WthSjdXTs1ZXvbe2m7vt63e1rkZbG0qeQTkcbQR/CR3+p4+lSo67CAG3kg8gHJ54X0A44PTrVaNeAcAsrMSc/LkDuMjkccHjP0NTqrMhbdhmJbc3y4AxznHXpgcd+Diu+MElZq97PZJrql1u1Z3d0ZzirLvf02s7uys+q6Pye5MZHbA+UNnaUAwQoz8u3PJPc565601VOWYHa/3SOAGBwev3fT65weaibKyBiSz7hl+nX3zwTjkHI470RyMrEnHl8jbnlc9OBg8HOT3yMjFS1Zuz06ee19uyfp066Z8ujtay1s9G22rpbp2W2nrayJ3AVjkMGAV1yPl2ngsR0B+71yOaaqttypG3Izx85OT0A65P09cjmnNNgghyoKlWbbkMvpnvj8P5GpFOSSu35kILbcscdGbnGccDA459ql/h1/D5Wt8XkTdpLTtvd7WXbR9bK7XYr+YoAXqytng5JI5xx0x68jsaFCsFk27gzEZP3uMZGeuRxgYxjv0pFBRkkChhgg5HBHcsc9c9ABzn2qZYzucyYQOPl3cKG5JIAA9+entk1z7TvZOyVu99Hbfpfy/GxT5U9Out7q7bajJJbrvvpYikCqcqSWzlsg4x2XjOW9WyOw+pvYgBAu7OG2gAnGc5JPJOfmOB1zipOAxK7nYZABOPmABJzjheeB2556VB5ZULJhF3BioYktjIzjB+UdOox24xxrHvtZ9ls7bXa7W3v+Y1qrS6JLVXve1tN7ebVlbyHEY3Pt4PBPUkHqeuBk57ccjnsxUIYEKEGRhm7Dk464JI9up7VLnYiosiliSM8EMeqhcnIYZ+YYx1PtUTmQ8Zydu7aMDI7kf3gBy2Rx0wTgVqleyu0mknbRq9t/Nd9fPXYjfRKyTb6vZbXTW2+/nfZj943MzEADojHHI4O0HOcA5IPJ/QzeaRGqMwJdgcY37iATkZxjkkYA9emcCrtygIUZUnLAncCT1Yc9BwDx1595Y/vBeu9RjccfNjuQp+Xjke4yTznNwfvXd7+Semm/l6ddtdAajo0tVa6va1kuvnrfa/W25Mjs5GNqkcRlCQMDG0NxywJIznIz06U4RttbKMoYqxz9/PJ3deh549Mc1Gu1G++E6gEjCbh12n37jAAxnNSbpQvmYDsSNxBDMRzvX1BXgE44z+WSn7NuNtLR9Nd3fVW/N2uQ7393Z2tfRK/a9l66XvsupaidWRYSwJXq2/5s8HjI6DOMcegrRE5dZFBCjCoMZUKB/Ew5yp9c544rCAUPsRDuyuCwOSMH7x6Y9T17elWgXjZnYgbcblA4HUZ2+g54P1o5V0Tbt3vfbey083q9kVa9n13S5bWej6X3elt99bbaclxIodTLuYkKjbcguQSGBzwFA9M81mSOygu5yQ5MpAIBB4Py9mwO3p9TTHmZnjVSAi8HawzJk8nnO3npnOAOpFQTM27AIbJZyVbIQDorD+9zz7/rlGDve+6d15aWvs++1/PRBGHNvra17aX2sr9+qTeu/ZCmbCs65JJDAHDBlGeSc8gHHGPlB6DpRFvKH5lO5gQCcEkH+H8DxwAcdckCoA+U2ALjOVyAWJH67evGB2684ed3Yc/KVAOCO2OMcH25GO3SuyCstop23tezVrfdbV6vtfSzlG2nw7Oz1vaySeum36dS1uJXb82zPy7s/Kx/hGB1z16Dj87duxUmST7mQmATyxHU8HgdcDHpVQj5QSxOMZKnIAzwOoAb6inROPmTnaW3kbSc7eRz6jnnHHfOcU7NvTs2/Rf8P/AF05pLR+7bra13ZtL79nrvZX6GsZQgGHGCRlsb4ww7ZwvOCDwMUM4KNIU3Ex7kbAwCvoB0Jycnnpxx1zzJM+SMlUHzRkDgD+IDoeBy2T646U/wA4FdrEpsQDA4yOcnHfHfnJBxjBxSttdXV7Nu1ulu2r7Le/lrCVtGnquj81e+vpe2lhEV4mVyyBQcHnAb/ZLZ69fy4GOrWG0MB5m5QXfaNyMp7r16Y6jpx1qbEJgG923MxCYOAxPTPUAA9unXJpkjEgbQflUjKDDEr/AAuewGRzzyTwOK12fu63t8lp87WVnpp11LUrqzTetvd922q1+fW6uVkXa0btkg7vlbJDKcAEnpgc9epPtTwxjYlOrHCHORuOCUHTp+GDjHszYWA8okYY71IJJ6HrnhT645PboaXlSWVfKKspUFuSQfvBcHGcg4OenrWiXTu1ZPpbRfir/wDBuXJddL2tayXZ7t39Wk/MmjWSIMQipIGVmbHIBJ6Y6Me/rxnA6z+a0cj8MGaMfKPuAMCSxznHPU9eh75MBkXkmUiV8FxGu5S/cHkfMe3cenNMe43Bi7soEYTaqffXjI6n5gcknnHoKTinuk7J7rZPf0ISu9tbp3s9Hp1Xy2+7ujSAIiFgctksxypJOCBn+EYGD2qOQZPm7TjB+YHIBHDMp4z2xjHOelMcF1UFcorMpG3a4U9CCTkg45HX3HFIGTCpJwoPEakjc3TA6kAe4x171LSTVl05Xtt0WvTS367FJdr6vVJX0627N/NfiMQESB3YFT1VTwxHZj6YHIB445FS4aPeVX5cBsj5gh6E5xkE5xnocdOtRMVQhgCqsDjLbxk8HnAIbgHvxSRSEFvMY7SwACnkhcnA4I7/AIn2FNttqz0fKrW3V122VraPfsaa7pu1klbom1ot9Ut339C2jFJlmO0x4BKq2HY44KYz07847muv0ad0eNYWWN5C37w4dyV2kA9BuIBBP6ferjFJR2dflBwCWGSASc7TwPmx7dORWjZyiKbzJFfjdtOSBz3DcjI5x0z9M1hioKpBwSVm0rPsnG3z28k3saRfLOMrPl0Tt5NW2tte7/E9ttw7pG28McRMyscqrDIaPGfmPI2ngAnp3roI7fZtdSLdAMEOfnCt95gOyyYHHPT3rz3S9UiSCNd7kGMKyn70foykn5/qcYPPPSuvbWIWtUK3CMRGqu5+Z1fnchBA3HuPYnivlp0pxbVnrNpaPZpNLTps1p1T7nenC0feSaV221fTlv1v52fX8K+pzxLJJEQzJ5bMPmJjLHlSxxgE459xkjBNeV6k7S3DHZzlnDHA3HGCpx1XA5+7n0HBrq9c1ZpWUIyZUElcgbRxnOB95uOc5HTGOnCXUrSKznHI2NzkqBnjp1J4zjkA8cCvUwtBxjF8qu+W/k7x1269fw6HFUrKUnbVN2XXW0bvTz1Vr2/Aq7mK7VO47uMEkZB5z/u5/XjoanCuMgqARtYBcjGORgf7PcduPxzEZo3XIUhWP3cknP3cY6jg5PXP1zVppnA37gWYMzbV3FScDGD07AH/ACPUjSV3ZRfd631trrt121/XCUuayTd773328r91v2epN5gR8kgMSxwx4GOcKPxGfTgZ9XCYNuwSSSSpycE9iQOMdc+vesyRgdrHJJAI3dR25HYe2T3zSJuLkAbVxzjhjjtj1Oc4x0xzV+xW/VJXvqrJrZd9k7bjVHmvdpPTz0Vul189NXa99TTiZl5DgsWO7d0K55wfYAAen5irKuDkLJ8pz6np7duvHT1rLjkw6B8lMHjg4wOCR2A+pH9LpdAWCkBecNjJzySQeOST05561lOjdN2Wtlvuvd8+yfR3RTgkpX6Ncqf2krdPVXV1prbUkMgXarNho/4iMA4HB59uue/Xk4pm9JC+0kOV3gkFUZlBGRyc9eCfxHFV3clgPmLDJQnhSp/vHJ+nr04ppZgyq6qp6luhKrn5QRkLnPpk/Wq5Nu6Wnm1a2n5pdzJ01LdSSs76ddNfNeXf1RGEwY8HJLEsSMK2c8dehJ9v05CjhyqtjnIK4AX1JbOB6HGegyBjNSkkZwSV68HMYBwSOMc9P6dKjwVGSTxnJbkgE8AKOvtz79c11Jp2f32/G1yX+i1Xn9+33L7xShDFzIMEBvL4K7h03EYwRkY4z/R0iKEcsyCRVDLsORkDq/HykZAyQeOhqIs+SFwqqRwBnAxkFsHr6kc8ikBcSYZAMkHhgS6HK5UgdMjnjOPWpbSad1o32fbTXs7O2789LZyet07PS6dk9Lbadr66321GRqQFZsMrMT1JLZ7EEcj/APVzTjECcopQID1BG08Zb1OeBu5Bx2FTBSEdCuTlXySCFIyQeO4GCT9fSpcO8bOWADqMtgD5B0Gf72TwMYcZ5HWlKSa06rz3Vr/q+2g1PmeiS21d2m3aySSs+tkrLe/lQUMr72KjfkkAgLuA6An889yOcYqwhjCMCN5X51cEnLg9ADjG04+bnHvnhGjVpEwCyhTtDcBg2M54xnjI6HvTioCEPG4XG1CnBAPOMDPHv6Z/CVB3UtWuXbTraz3T66fhbc35OZXV1eze2iurq176Pp208yOOIsEUsATk5ZuOvAPq3cdgM+1XfKCuwklwPlKHduyOT90AbSfUHjvnNUnOeAAAec4zszg4zg5z2PPPUHNS72kbYGLOAE3Y2qy85yMn5SAMc8gjmraffTT5LTW7vfS++6fUn2b0eq7q2ydnfazv2Tvr906S7WZwF2vhAMktkcH2GMgn0x0PbQhkB5Q/Ku4bmGcY/QZyQOf/AK2dHFk7nAHlMQqsPvA9Wzk8jHPP6c1oo8KKyjbhhngYy390en1zjniuWo0mklrZbNNK2m13pfe2nc2jF21vHa0mkk7W297XTffr5IuwL1ztAPPHGW9Pp7/kK0VYrgNuyoGOAcA9DjPI688YJPPriwysx3EEksVjxnAx90Hjnvkn6exvhyd2RmULhdoySR0/nxjr3JOahuW+ivZOPa1vlpv+HcFrdKSlyvvey0uk1dNJ3tbtuXfNRQRGAecBnwWDdcg8YOc9On845HJUksCSMsOxAPXnpgde/wBehoyM4YAqw5BYc/KT90d+OD15HH0pjTEMSx6lQRjAHZRjkDtxzz7UvaSTSSej3urWXLbTXv8ANb9SuW8W/L7LWl/PfS/btqSM+4HoQThQOT/s7m4IB57dsmoXOSApIBBJ2tnGM5GOBknoOhwRzziMzgIxyu4NkKOhI6EegPYc46UwMoH3wFJUnkfMDjPfkAgcHjjGOlN1GlrrqrbJ7pXfRrrtpe9tBOPLa6WykuVd2rtu7S0ttcViTnOV6HjIOBnA45wSD6En2pdobkvgd1Y4zjIOfwHHPPH1pBICFOQASQOB16jjP3TyOBnPb1cykjIHyk89d2fbocd8e/XFTz763WjfZaK172v16X6WBRespOKSV0km3Z202V+nfW/mJ8vJBfjjPTnt9QccH+VA2jg4PO0nPf0x+QzxSnATOM8DsPmU9gfTOSO+evWmkLtxjAJGOSMdD/PkY5OfwqHK90k0mtW2k1qrLzWt3+mhPdLa1m1bTbz0s9Nt+y3eAq5zwD/CcHPOcg/05OT361LleSCOSCAuQMjqcdMnjPb+RhwQpzyRjBHOAenJGMHnjp1HenAuBldvIxwRnGeg9MYJ78+xqFzJNXulazdna1rarr5X/ElwcvhWqtpdLe13+Frbab6u7k8zJPChxgk8DvxzwM+3vwOTVmMgZBBYKcAHOckKQceg7Yx1Hqar9CrE5CjLDOcg9Bzg5B7jJz09DMrLglcknPUcZGAQDngg8Z7emMVcY6qTcm7aJ9Nr/wDAv3MZatNRSSdlolouVerb7+tuhI7bj6f7OMfjj8u/emAllIPY5BzgnHQ5/wAMnjjjoxm5AbJJByB0I+p4GB9PTIFKhUkgBmI6k8Accgn0B9Dg9eKJQvzPdu2j2W3k30/yRtTfKpJpu8urtbbS9r9Vp5W3HxqUwWw2N27HdjwM9M47+oK+hqTecHBAyx44xg98dRjHf1zzniPY25scgjkdVHbGeAcMeP8AHOEMbZ6t97kAk49uBz+GePQ4rKUXf4n5K2nR3v162+/UlteV7p21s9Fdrp33TutS1GzBcuQ3fGep4x9QOv50jOx/3T0BAzkfyzwenGO2KjUAZGSM5wMHgZGO3HGfox54FLtPDBiVzyckkkZyMHr26fmc1rCL5bPVt3Wi6pW7rv06hG7vr/ndNWdn0/P7iUBhjnliR2OAffjgADn37VPGwHytgtyTjke3II6foeeOagwucgknp1PC89B3Izx6de1PAXG5QcgbcE9OxJxyf59iefm3pQatd3ureTWlrq7t6JdNSrJW91Xsld23utdL99dVqtXsWCSWznKnBbJz7HHt15yP54KhBUHIJY4IIwducdOTwcc4x9TzRXVFNLf7kl0Xdf111uTKHM23Z+q/4c82Lj7iDBDE7sAsxHQE9SBzgD1pAzcsq8/xjHII6jH8Iwe2aVo8AnBVjgjBBC46knGMEDjGeRnPWo1R0/f8su4DII5BPf2z6jOM5yaI6OWm1uiWtvu/TrszKPLJbptqy11b03eu+y7NdrD0RxllJUY+ZwMDkngnOScj0znr6DpbKWRRy4YbVwvXCZ55PAGf5iuaL+XkbsAkB8D5QxJ+50xgnpjHbgVet9Q8new2uygHBG5pAOjKM8gc89vQ9uXESc4uNknZWTbXa77dWnvf11LS5nqtNNVvpZa33to9H112PQ4FhYZwxZwqjad21sHOBxg5PU5x1x63kiYMoIKRBDtQDDEgfKo5/iJ5Pb1PNcpp+oxqUkLFXdCdgGQQ3UnJwCOMDtzn1rqYLiF48JMhIUBjuBYMCeDwOgOOnpXz9WE4u0k9UtXd2WzV9EmvMt2d3e91dXdrJWTVlbd39bJ7iCPc5DR9AzEk/KqnHGOMZwSe3QngVOLKKWJ9wyHTHAUMwIOQDjPHGcjoeaYu75iPmLZGAQPlHZhyD7EdfTpV+BXVEwQ0RUgKBtK9OOc+/wA2Bn+bhGcU9eik0nLmsrX07vr0MZOykr6t3eqv5Wt1S89epixWMLNhQCGXhgCwZk4Ax2ZcnHUE5JrobaygJRo2+6SWUj+P1PTB7985p1rblXClQNuSrAAKQeh7/MMcuMYx06Vv21mrRyMNpB4YLyDjq5b/AGc54HIPOO6xc58rcG7pK2r1T5bvruvTbsjy3Vs/eacNW03bq/J3d9NL2ur66hBGBCCAS3AIHznaP7vTPA9uAB7VYa1XerCPbuwpLjLHd1CnOMHHPpj351LW3RlRDHhyuYynVh1A75OAec9/rU88cAhJDIrJGQQeCAoJPU8npz35IxznxFKfPZr4raLRWaWt0r6fh53MJyV0+6s27t9Hs1otnfy3MF4G3Fd5ID4C8lccFR/s45+ueO9bVrlIiCwAU/KVOHB6Y2/3ucZ79cd6wo51dlZSAoZju3fMMcLwRkE8nPOB36bZft58zBZQD+8Rz97g52+hL8YPfBx3x3ypS5LuOyXTfbbfZX0fr0M4Xbjto4trXa/y3fRdO6O/0a9a2li3kjYG2ucADO0ZbHU8Eljzj6jHu2ia3A0Ef7xHaJQEUKAoHqrc72OckkDH418oHU5YpF2SpIWwxTO5SP4s+4GBnqOw9PS/DOs7oVEbFWILIW5VGGNpbPReuSBg9eMYPyuPwcnJzSUrySttp7tnfy1tse7g3q+Wyvb3dF1Ttq76/n5aP6UGqRNHKiNCHfaZFjG92cA4JGVHT7p7ZPFeYeI7k7pXEq7dhbYRgKsXH3sndIS3oox2NQW9+yrJIW2lwscrR/IsinO51wW2oeMjp3z64OqXsREkbMrFo3lSQS7lTONpBwN2zn5cAjOOQTicFTlTnrrflTW978q0bu72s1bpo3c9iFktm01FySTVnaK07eVlfrfoeZ+IrvyonACJuUGQNghXOdrD2PP5Hrzn5917e4lLEEZB46OAT3PJHOBx05969e8VXCN5qq0cvyjPI/djnJJ7Rnt/dx3zx4rqk2UdmUEHjYDkEuTjOM9MZH1PpX3eW+7G/wB65dbrllfXV7Ndb77PXncG5K7tJOy1Tulay318/wBNjkpNjNhQSueF6DI5KnJOcDpz64PGREy7VAXhT0I5P0GORyOn1/CaQMCM8nPr174I4wB26Y5+tNCMxPDbhggA8ALngDpk9/oOK96MtLyur697LRdX3voaSilZb6Ju9t+2i20X3LdD4Qo+bBJkyMnnj2HGT/h65raVVcHPy5HReASMfNnOcdB6nGQayEG1T8oByWBIyVGeOeOQD6cgDjnNXot+0nOF6ZHG455xnkk+g5PJyMVrZO7uuu19b2tb8fP5amajKGy3s3d73um1o9Oq2IpnUOechUYHB6sMYyMdeeCRxz16Vkq+GIf5wcnn5gMZPynjIwVz15GPXN27cEFULYAG7jHY5BPOCCcEe/0rJDFmJ+XyySu3pnHAGAOB/e4Oevar5b2dtrJ9FbRfP187JsGm223ZLTRtafa9fLReuhYYI4V2ZwkygAAEuGXo+MgHnOOmR7ZzEz4RQSHZWAAbkkYztY98cHH5nFD5VVLbflGMRHcpIPA65B55PoMEcVVyN8W+MKGYupJ5ccZwx7A57Z5PborO7+Wl9FdpJfhra2iWupLtJppuyjd2tf7L1bau7eWnqXo3hMv7wgIq5KEjG4dDnAzjLYGOM8nmpx5KkhmAVlDI/LEsBleegJ5A9PfmqTL95gwSNwGAj+cBe+7oQSRkjB/Sldi4KMW2ogETZIzzncexHtnPuBVWuuV3u9LPurbaarv6erITV9L/AA2av1Vt3qnZt31vt1IGkctgqgBYgMThtp/hJ6/iMHjjoaVMSF1KkuhLySA8bAOiHueQB2PrjNKIQVO/gZyGJyuezLxk/QheSTkU7ZtEgLgqyjazZDtnHyjnPbI6jrUONm0k7tK+mjacdU/zvp97NfaRVrR7XS2W17b/AHPo7vuV4WdX3EgRhsgbsNj+EEZ4x14685rS+0yAb/P++MblBcA5xjHGAemR/wDXqlsUZCNvK44xhR3Jc5HsAO/amvgMGTb8+GI7KB29lOefXAHbFZSgnf8A4G+munp590HO79lppZWlta++tuv4F4XLrkmUKQvBI2sd2QMYJznqPTjPrVWS4kYZdhjd90kZK9gB3xzk57jij5XyobngFgvytjtjPIAz+mOOKiKKuGKEgfK+R949wD26fh654qVT1d9b6bJ9urV3v5+vQbqX3TW23nsunR2Wt1tpoIdrFXYv5ZHCtz8wztI9B64I7YPAp4CvHtOAxYN6FsZwScHgDOOCMn6UyRe+3AwCoH8Iz/EMcdOTwc44FJHkMAcAdiM7ySOBjH3eM5H0PtpGOik9lbR90km7Pe2m/wAtyJNtb7NNa9lp1Tb1fN3ew5wQchQFA79PoD79scc9xT4pBskVgvIyOM85GAoAHQgYPXpgcU1gzEKAxcHgHpgDJ5zz+nX65UK3GPlK4BGDuxn1Ht1GMfXNdCs1e+i0X3K29vS93ZepHRXfbZ36r52b6iksSMknaMMSOVB4+XHIJ56cio0EnmMobCkHqMnHPI9eCT169xzmy6xlSd/JAOxGGCcZO7sGx6+oHpVfAjyWGGBJVSQflP0PHTk98YxUc7b91fJ+bS8uu3e/RBF3Wi5bqyVl1s9F116rvtew8KoLkEnaM7cEKTjnp39B+tMfqNrEjJMiEcL0xjkDPp0JAPQ9JQAFLb1UugAC8jPTHPOcHBHGDnJNVpCA4YZ3ckIxwH6Hp0PT1wM9ccgUr9Fr0d7u9lrpb8NtLWQ4ttteqWnWy27a31vo/K4oKFFC8785yeMk8Z+n4Dk5x0DpMcbcD5RkKOMgc5PUkHg56fzYhYq6hUUuctyOMYJx6e3yk56daGDgnOQygYBwFC+in3zgdh1PGaSldWajvy3TbSWnVrs3108i7NPdaO/vO7V7LW3a+6XS/ZDlJCBi4IBGEbryTjcOnHp+XWiQk7cEH5RkYwqbu3ft357dwaSNnG8fKUPYqMgfXPPTj88DpTyEDCQMShAPlHJUnGNyn6knGMHOCcCq5ru29ra3v1Vn1/F9LeqdlJ6J6uzSbT200Wn5d2hiu67d+Cq54BJJz93HXgYOR0HHrTyeS+4lm/hI3BiegOCMgcc/lzjJsRcBXBOAwKnIz1+fPQg+v/6o1yp3nA5yAO/pnjjOD/LPejlf2Xqnfuk1/Wmnr0s93okttNua9nZq731b/To5wwb5slhgNn1IOBwfT0PNQqzxuvy7Sc4PQdiM+x46+1WGEh+Ykk8Fghzt9jjow9T6+nNIUzg9ieAxyxHY4GOmM8H+lDjvrvZPtpZN79l3v5ijJK6aTT0f4b7+tu41WK5ZmJPIY4B4OcBRzk5AwO/SkUndiRRtySGBO7aRx9fQ9PrgYqYxow+8u7aMLGeA68/vOODjOR68dOKaYyEZ/mHzDarYywzjJH16etJxu7XeyvvfdJ9dOj737vYuvnp/dtslbZJPW2627MYu7JK52jhuOdp4IIJ5z/LJ46hRIp+QqM5wyj+JT225zkHoc9SeOuZSisGJkUMUDAKAAHHQH1OCcHuTx0AqIhVdcAhyNzFxjfkZ4I5we2c5HWm4uztv26W2381a/muuljR38lpbWzVtW0vLR669twdFxGQrFcnaWxx04zxycdRj2zyaQbwN53BSMgA/KPUnHAznBHfHXvSzEl8oylWAJTPygYIIOcEH/E/g2LcE2PsMaEtgZ3noFAPbB6nHA79cyrJJ9W7afypWW29n0t99wXwpt76tNu9r7a9brV9XrdCPtIx8xQHGSMAFupXOcr+RJB6c1IucKobKx53SjIAHGFxzzknA579c4ocEsm0/u5M7kfDYGAPYD13AZ60+NWIMKujcFgGyCGGNvPHQZwD2znNarpbRNrXZLb01t/S0LWkVtrru2t0tdNHfXsrrzIXlZmc9NpyxxtG09TjnDZAyc++OajRlDLI+fKyRg/dbsCOhGDzyOuParbxRnnzN7H78YGUyOpPY47A9s44zVN13HG0kEll+XjjG4cd+nfnpxk0f1/Wq/rqjaCvd6p7bNOzs+q6pfh1NCNhh41CvuJKPjjH149OR3zU6xLuWRm3YwCADjng4PTIHAHQc/WqkW9UyAFC4BBBBByM4x29R+AyM1cEzKOejYGRllAPTHpjHoQTxXPNXurdb7+avro9V1tftru5JN3uk9m9ddvyv3/W2hNaxLG0g3EbdwPJ+oIGMEehKjNY8kKN8xVmDfMuflJBzy3XI44/vc8DFaUtw6RKsbK3DZVTkAd8AHr374IrNkkchACpLKFK9SAc9B2HAJOOP1rCDnHnV3Z26tWa187a6a6t9SZNOzV7tq/8AK1pfzbS7fdsQKgBwnTPJBx8wGdpGOnfnGcUeYyqeUbklkI52jhsc9BuBPYn2pwDbgjAhiMnGMHIGHJ6DPT275NRshUsUTkDJ+YltxPXp908ZOe3cHB6eZ2SbT0jrfW+mz23vqyGov4krdn8ur2Wum7vb1crnKrI+TG6ceWNx3dmIyMgdxwcDPPUx4AUAjdggnH3jngbhxjGenIyScHg1IBI4O1Tu3ANHEMEgdgPT1OckY444ux2cjDeImI4DAHLg47pjjtls9Og6Zyc4rRtJ7b6X067rR9t9OqMnyxttvayaV1ZWts3u7tb722M5w3AwVDD5A2Qq56EdiM9emeScdTMjfKwZ1wF2gg4bJ43DHb2z2Gfe3cWMoQyqjEEfMgBLIMd07gd23fgc4FTyJtuFXHIXaDhg3cMPQZz1yPTApc6a3Vm7XTvZu2u3RvuvuQrKSVmtGr35dNVfW71t20e+4xXb5EZgFHzID1Y5H3sDoeMnPsMVIxZ2yo3FAC+4YALZzzk8DHXpx2qSK3lAXKMDzjdwG68HjgDjrjtn0rVismdS2HLPkMsYyAe5I7gexyB+VYqyfNdNO1m7X0to0vlpp+pnOUY3fu32dmrNtpXat1tvfdmXEkrNHuQgDcA2crIw/gU44XoRxx071cjt98cw8v5ozv3DkHAOVzxgjgZ7fQmtZNMPBQqCj8Ic8HuCOMEYBxz+NbAtg0M0WMSyKQwjHy89d3TkYBPPpk9q3jKNlZPo2rLrazV7eXbXXrpzuqm1ZaaJ8rasr6339bXd9rbnnhgdVIIH3iAVGWDdvb5vY8468iq7fIoBU70OXIOTuHy+3A7g59+RXWXdnCA6oSzRrhkwQh5++T3wCccE5P5YNxAkW7Y5dQoI65A9MjpgnjqfXmuiKvspauzbV1dpO/Xfvr530OqnVg7K7vzJ8qVm9FZbvR3/AC9Smu87MAGN85yCrPjtkDPXPUY4PsBKrsrMgbJBxtVdwBA5bggDqMnBxx14qIqAoYEhuOCefctjHsAfUdDniQB0kDNtUdTkYDBgcEEdjjPT8uaU42stLWtd69b2009Pnc0fK77WtorW1uvVfgn3tuPYPvAbeSyh1iLfKAxBLAAf0yPyqdI1KGVRjB+ZOQTkkMT/AHAMccNnrmq6uwfLBVZWBznlwQflHXAGOvPWrS7syOpdh94hEGNje3cjGMnH0545uVO3dPW9+llbR7ar17oSWyatotuuq22Vv+C3rqlZ9jBoyC7kK7O3AbsMkDCgHhhnpycUgkOcuvzk4dg2VYZGAB6fjz3OKVbdZgWAJxlYwfvbhjhDjnHOeOc+leg+EfhZ468d6nomheC/C+r+LNd8Q30VlpGl6JZvd3V7PLxHFHGm0eacHcTxxnpzWdfGYXCU/a4qvSo000pTqTUIrRNvmk0tm2/n1E7JJu8Y6JydkoWaV227JJaXenby84nkcSNsAKlQSxXgeqg5x7k45PHuIS5YZVizAkYB4J7jkYO3+FeM5NfY/jP9gv8Aay8CaE/iDxh8CPiD4f0aG/trWS7u9ImCKLgt5DMVXCRthvmyRw1eb/Eb4BfEHwGfDyXPhDVZY9d06yvEutNsLiS2gS7LCKG5ZEKtduVKyMdo3IFA5xXHTz/Ja8lChmGDqN3aVOtTnzcvLzO6lb3bpy95dbNlRxOGaio1aVRNuPNGcXFNKOkpXSUtdItq721sfPIkKyAk5xggkc5/ukdseh6fpVjzS5Lh1JGN4UZY5zliPQev519+/Cr/AIJhftm/GnwP4i+IPgX4H+Kr3w14atby71TUNQSTTHSGwjEswt4JoGaeQIwbyty54wTnNfA9/pF/oeq3uj6nbz2eoafdXFneWs48ua1uIXMc0MinJDI6EFe/HzV24XNcsx8q1PA43DYqrh3avTpVoTlTdlo1CTaeunS99dzVOlVpRq05KcJ8yjUg1KnKUJctRRmrxlyS92VnZS9167zqMoGOSpwcgYBbAwu09B6flzjh/mOocDG2T5Tn+EepHbPB74qEZKKV3uVQbiOCmP4SMnLHkE8de1PXP3T0Ks4Gcknt+g69uvfjq5b+vZJ9Etfutd/cnocU07t9H+iX9fJilioAPccnk4HUBc8EDqM8n2FSKH8vf5e4AkGUj5ivU45G4g85PTJGeOIWALBgCIyQGHdSO/8Ah79McgOQ5JiVhtUMRn+PAGAw6Dv9c1UFo3v0tp/X6Lr0It/m7adl9/Tb70TRv1QsPLZCCAOnTgDGAxHU8bvr1lWVAGAO5QoJYrguT1xnp/tH1+pxWwmMo53IQHj25Uk9CH3YJ68EfT0qXdhkGNihWKY/5aEfwnvgg8jnt+Gi0+aSdrK6X5Ca0tteza67319fLR6b2I1370dtu1c/MeFKN91TzgsPcZHP1p6cCZyhZlUybhgBQBxjryMnnkHAPSoXLYWRN3zlgyA/IMcEbexGeufpxTkZ14cqQFx6uynpuHHA7kg4zwOlO19U1ayXpt32S0WnR7vpemtrNO11ezWqvd2/4H3aQRh1C5AG+TClnA5PAD5HAB7Nj8qnQ7Uw+Cd7b2LDbu4+4McdgvbjPfFDhGRi+4swRtqD5Rtzlj1554xwDuquGJwoAVGJ2FumBjbk9evOcD8c5otd2v0S2d79VprvotPnsVbn1Wj2b+S77Wu/J9S2w3CVj5jBo8ocgBSOC7Y6kZwvY8g1AwEb8jLEZDlcDY3cdQDxycEjOMjJFOErxqFfawXCyEchRzgq357gB/ICmMzM6gsWQfd3fxB+Qp55Ax6cehot28vx9L7PTv5Cinez212e603tbye333Kz5kfG0hAcAZIyAPlYdeuSSe/uOix4GEdCdpJLZ4PcdgB+HX2xmnlAfmCnIzk7iR15LDA2jj356etEZypABEbd2PXHU7iOPYdRzyc4EtLpp6aa6a+t1c05ko3VrK1urWy2d/mmrehO3PmMXYo20EKMtxwcDIyORkA/Q1KHTCLuDAEAA4BwvX5fxycHqcDmqxcxgKTtYYXA+6F54HB5POfXJqVMsMqo+ZhtJ5IHdsnp6j19aiSWj0dvPV7er2v6LW/eW3pfa61uldtLfe+72fc2YbsxOGDhUcKqjO7CkHcyjjYXxgj5sYAPU1ei1UlXtzK0cbFnVUY5JGM7W6hc44x688VzW8u6KQOM99qlV5yD/dz1zg8e1So5D7VUMy8Iw4GBjJBIxyOvbjrxXO8NFJuSu07pWV1Zq2uiv63vpfbTRPmXM5Lm7ellfrp9+iOhkuUl3HLs4Xaz8Hd1wWP8WCPQHqSfSg/l7gAd2X5RsAFufmA5yuScgn1yeM1U8503dN20gcAqgbgknsPUgcdD7xiRhIBt7nsSWJ44bPCjtx9O2GoqOzsrKNnpta/Xy0+TIcLy02aVraJp22uttX69bXu3zFULYyCCATjI3H16EHpyRjjvkVVWRgwyMAkkjvkemRx/+r8NEwCdFcupPRQoy+Qec85+XPTnnJ5yRQ+nyoylAZRkE4zkE5IB44J7cnPPQVUa0YtK+t1q9E3pt23W9/Ip4eas2k1om76q/Syd7/fd6LVEEcYkywxwRjPLYPUkkdsDkdTj0p0i7CwzyBjGOrdzjocjPQ49MZqcWd6rbhbzEOQOI22rkZBLZ/i5/kB6ktjeH95JBIrFxlWVgyqvfpwpycHnkADOCapV43V2tfPVXsv1Xk299jSFOq5Plp1LR6KEr2XL0ttv69CoDz8ucnB5HBycnk989Pw6YNTqxV2L+ZnCjYPu8dCBwOvJHXimiK4BO6MpEWwXb5VVh6tye3BweM+1SMx+cOVXcMCRfuknPORjJ9TwPz5p1E+na/VW0e9ra/gvvVSpTSTlCaWiV1s3qk762WqT8rWutVPzBJFBJyQVwSQeDwM8Dt1Pce9PMcgUsNoBwSPvEjux6YPPHTpUKO24RZK7gT5ikKOe24/3ucnHHv0qWNzuRHAYgkY6k5PHGOdpB6H0xU81m7WX4tXat/TMJc0Xbl1W6a9Nvv38rrYcdm5gSVTYD6ZIwQQh4JyfmwcgdjgUjYKuFCsCBh8biX55PA5/wPJzUjYl2lgducLn5VLt0GffAHTt09I3g5C7SATtRVkwC3csQOB0Pt+FTzXWnWy29N9fO71316GLi2lvpqvRtN6db6X3sV9pVlZ1DDkMMYD4988gdxx0PP3RSKyAsqp8wKtlQcYOeozkhOM45Gc/SZ0KM27gjA6fKAOw6Y4+vQZ6VGFKmORRtQZDBsZJ9B02qD9c+uRVP4dW76JeW2trve9l06vsCp3SSV7rbXb3bN2V32vt9zEUPsYq2xZMoxPzEnI2henLAn5cdRnNT+a4XaAqrtbegwTgdSB2PXHpzimlE5IYb8E5iORuA4EgHQjnJ5BJ6VAEIAcb8H77EnkdOT3HGAO4+gp04qUrye1rpa3vq15apW/PUpUo7aJJpq/e63euyvro76bWLLSKqKcsVKjhfmwOxAwMNn0z9MYNMALGP5i5bIAY4AXGBkf3R2H4ZNKoXICggjgbhhcdR/8Ar56dhihuJNzYO3J2dy3AwCO2CenIAya6Xy2VlZdUlrpy6a7ednY1UbdHe2rs9dm7/g9RYo0aTBUEIjFh0G4Y59wc5IyMcdc09isLbsKcknbnhQCOw55P09etKUJZSpyhU8L8xye2eCCD16/rk9H4e8H614kuo4LK0d/MfaJCp27TwCTjp1B4HtiuKtXhFtzkoxilvs7W22fa/n12PQwmFrYqpGnQoTr1alnFQ1erSs7eeum2qsjmPPlcZRSd7EDGdxJxjj09h15/HqdH8Ia5rhgaxs5pgxDEIjggZAywxgcnjk+vGa+tfAP7POmW0SXHiORZriN1dYcjA5BJYj7q8jAIO/jkYGfsHwh4Q8NaT5EFho9mjKI8oUVACpPOcMXRvvKeOh9BXzmM4gw9KMlh4OpNP3ZOPLDS2uru9+mnl0X6tw74U4/NHTqZtW+q06ii40oJynduG72i7S01e2rPgjwz+zb8QfEBto7TTCszorMrnYIwfusWK4YN3JAIxjuTX0B4Z/Yg8UXKCTWru0s2ZwGQOLhxu6OIwF8sjkjDHg8HmvvXTCQP3ZVJSURY40VAMfwxbfvZ9VA57enrmjy+VEFIEbhd4Yjf5rADKyHPylc/IADkEnIr5TFcTZm2uWVGMX9lR95JpJ+83a+ttk99WfuGS+CnCdGMKlWhVxUnFJwqSbpuVo+84xilJqzaXMvNXsfBkf8AwTysbkhX8UFXeFH3R/6sSNjYqn+LnIPTBAHOeM/WP+CaGuyWN7PoXiqzlliUtbW91hWnC53l5NxCnpsGD1IB4xX6laU6OyQO2clGKrtCwg5KqinueeAQeMYyCtesaLaxtGyM7lWTbG2MNOpxlA3Z+g28ZAI9c+VHiTNISTWKdk1eMowei5d7patJfdpd3Ppq3hBwfOlaWVUoyUUnKknCytFX5b2vfW/XRn80vj79jT4z+Bbe5urjQHv7OFmbz7CQzq+COhVFIK9s9c/XPyvq+ja7oM8lnrGmX2nSq+1xdQuigbumWGD/AJ9q/s4PhLSNTi+xXllBcxSxMkoRVffIQuFdiPl2jnp8xPtz80fGL9ir4ffEbTb6NdHhivpLQqCsKblkjBw7FQv3s/eJBXAODmvocLxapckcXTjG6XNOOl1dLmXm9db/AJn5TxD4AKVGtWyLGyhUTcoYeuk4tOzUVJK9krJN6vufyoRTqSME8AbBjAJHHAyeMH3+o7XkkXa29ix44AxznPHcY449AOeTn7j/AGhP2HfG3woluNX0DTrnUtBhk3uq5mmto/m3EfKN6KBkNgHtjHI+GpraazlkhuIZImVzG6SxmOSNsgMDG2CrKRx1yK+rw+Nw+LpqpRqxmna6v7ybS0a+7ze+x/PGc8N51w/ipYbM8DVoyTajVcJKhNJ2TjU+F37PW+tm7FlSpAIUnjAxk5/Lk446DjJ5PNOwNvIxkEHIzggcAds/y754qJCmSN5O1AVUDj3zz+Prz6nhc4GMgHO/d0HBOAW9CTwM9fWui92/K35afgeNPVRfKtGk23bS60fTTvrv8xdoGCeMrj0DH6dD7ce2B1p2Qq4UDjnGOd3PAJztHPHPAPFMOOd2eACuOgz0yw7nqOCOuD2K4HBUnGQV55GB1zzgc9B19sE1ok5JXeie1rp7b3va3Rfmc05NJxj8Lau9L30bWy80/wDg6qoAAJbLbhu9D16fjweBnABHSpMnqQNpPCgdsnHHr0PUjr7VGMZbnqenp074wPw98DrViPAUEkN+uR2IPTPXJ7/rWl7fevnt/nb9UKnSjN+98KtddXtprZ9Fd3ttv0Y2eCCSuPmxwQeOP0+vXtSj7o25BzkgYwyjufcDkn9RRJ8xymQCRleMcZzx0x255B9RTUJ35YDaMcc+2R6A9MYJo5t9G7aOyTs7aLft2vbQb0k4xs+WzX4aP729+qaZKjEOWckL0VSRjIHt/LJJ4zirIcENkYOOgHHt0785IHQfSopFUEfMMH5gFOfoOnXjn6fWiN1LN83TPBHoAMn26ZxkZ/Ok1zW63667OytbV6rtbbomyGmteqXn5Jb6p76+WlndJPqx+9gEnnB+7zjp9Pbg1YjKlCuV3AE5Pc+oz19c4/nw10T5fmzwWKqQR9c9QOvOT39qiQn+EHaxK56E8g4x1OO4yT/KqSX/AAztovXd2083bskDummo36t66Wej7NNJ+r1ukWAECgsWJzjPOAB2BA6eh+tSA5zsZQcZAPJzjnPpxjpjjr3xFkDGccfwYwOQeMfTrycc1IgDHPA5xtJxgcDgevGe/OM9cnoVnbtp8v6TLUlbZvbbW7Vl167eXZtjRhWYtnd1IPKlj024A7cY9BjnOaKeVTJJy/AAJHGcZIPqBnH+B6lbxd116bu+6T367/0gbi+ifn7v6u55sDtk3Mw27clexyCcKe/UZ/Q01WXDjaSGPyLnPz9uB125IHPAIAB6USKzAnB+QDaBhgqc8nHPr/nktAbZhNq8/e6EAEA8Z4PfP15rKT5ea2qtZp+VrX6Wut3ouqsc6V0td2o6uy0s90ul/XfXZjGVdq5BYHkAHJDA8H1xzyo9evFICRHsBVWBDFgcSEjPyr/dHPfOcntmnuCGYndv+UlcYABByykkgjuO4Oc4OKQICRgZ3EY7dOmT0BBxjscHismlNK+z6fcn172aff7jVaWXmn89N07aLXXS/wAghaaNt6kDDELzyxI5C88jj29OprZsb64haQb3bcxUoo+Xj7xzngA4I4JPbAyazHjCMACGZcMpXBGfRu2Se/fpRGVV0ZFHmEsJATxjoTkdPxP0zxUSw6qXvs4JWd/JO780vnve6Jck05JbpWtZbNd9lo18z07TLtHjjzJkIf3jgnLljlSy44xjnr1x712VqYZHxnewAcKRhCQBkAjAb2zgf18WtL1rcGXzXRskCMcqM4w47cAfj0APQdVba/5abvtKsyIpchNxyOWYHcBu5GQP51nUwaSUoqz5VG6v5aNpdNNbpW08zmqS5r2TVl0W+2l31fXbTZ2PU2igDeZwMAHy+25sA5A/pkAdRT7eQROOfJj2MBk43ng4JO7cM9P05zXnUPiJpDHvZRFIzNG6sOijhCzA4LHttxx6VYGusRgOXZTk4Yncq447DJ43H6cevn1MNKb5GtXZJJdHZXbta3rt0PLVGbbTTburJ6p2s5Xtprda9L6baeh/2r5IyCBKBldwBVRjgrxwfft+AxjXWsOWwWBV/lZtwAAOBtYY7kHJ+uK4+fWFki2s7Jv6lMbgwPUjuOTg8d+mKqS38DAElVjKDAY4diufnJyeTnpx6Gop4CMXeUL2VnazvyuKT227JFqjJXb5rO101omrO1vy087uzOqkvmRBtyzFslUIwABwQB07kn+fWqkmqzKpQZAXDjb94EnkMeMDOCRgjqK5GXU0OY1kK4X5XBwC4Pyr15xgg/TBOazJL59sj72bswzg89wOpOORnpnPbFdSwspKzjHdNXV09VbV2ab9H5m1Og2/ejo7Ldre22tn5vr1R2MetFZA7FS7HAXO4lSRk546dfQ9OMAj0jQtZRlj2uwLjeTkBlJHIxxkHB4J5GecjJ+dhdkSJIfuhyAScDB5K9cnBBzxx6967vR9RZ0B3OMptD4x8w68DjPYn0we+a8/HZZF0+ZKzte+ln5K3na1vybPZw9FQs435laOjva7V766WfnqvmfQ9v4hiGRLKG2rnZGSFjA6ZXBHf5sHA9++RrOtyMNsLo4ILFQ2SpIBYjgYaTAJ4wNowK87h1J49wICyKoIkPAdD3BzgscdeDwcdBWZe30jNMXlYfJuzu2Kf7uwDOXP64HQ9PHpYGMZ83Kn8NrvrorpJJL/AIfdo9X2Vo3b92yu9U+b3dEr/JaO3kQaxf8AmStLIXxLu2oAWTk4IBHBKjGRgY7k5Irhr6QMsgzxwcLyGI6ZJzjg5wR/hVi6upMscNtBbaS+4pnnDejHHXr7VhSzKyvvciQ8gD7nGTye7fUnp+X0mHpKnGKSTsr3W9tO2y0eu/3C5Lu7Ss1o/kku97vXvukuhQcFn5OCzDO3qOpC578LzxzyO1dFY2ytb4ABJBBbGS7fw5zySuf15Fc9GVkkRfvfNlj3Y56c9xwTx2454Hf6db4tUPlqAFLKcYc5xj8OmB9OetdnO3FX0s1ayXS1lvf7n030Maia5eRXb6XW3q2tvXytokuMuYJY5dnToeOR1ORkjkHn+dOR3VSCQwAAA2khTj7w6Yx06ZHTFbeo28iyjg4zwWYdPfjgEH6detc7cOyllUbBnBycknsOe3v7ewx1U7v71fTu1+d/LS62Mua695uLfLd2ur2169F8/PqVZCmWyWLc5VhtVj3wQTnpyeBx1ql8pjOwAMCcBuAoHtnscYPfPSpZmYAFlzkgEcEY5GAD19xn0x61EhwUZtpOclR0JPr6A9c9OPbI6bPtptdvrp6b69LESnp7sm0trq7aS1t3ctX5WK7K2/5gVBKuck4J6bh1AB/Ht60BcArt4V+COWBOOg/ucdc/j2qeRtgkCsdw5KnkFWzke/bJznpjvQER4zwC7A7juxtIIIxx7n2Pt3lvro+lumrV1vpovlq2Q5aJt2Tsl+Fr206pX/4BDG7h/wB5gRgkEKwD44wMY7n/AA6ciVWXZLGykqmWUn7o6kD1wMkAgjJPTBqMx7WDAZ2nkuOOQBtznkntnGSDTXMjt5X3c4K4AC47+uOAOe+enFLld001aydn0TSel/8AgdkFubZRW0rrRpJq7td3dr32eut9xHJO0DhBztHJz3AGeQcj8AeOKYxO3GMgcjkZx646Y/I9zTynHzMQ27CoAe3QnH3foeeD2PDSAqY+9tySwGWIyCV4Ixg5Hf8AWr89fRq3RPfrv0ZSe1nezWmqvdLW/Xte/wCFxFDEFsgZIVgCRuzjgDqc4JNKu3cVJJByoz90eg9eD+Wc9KZjdtw3ykhwTxnjjn+8M9PUd807Y4JG1hjG0nIznJGOnXqD6j8axaSe7a+Xy1v17/ch2b263TSj5qyta7139HvbWxHsRZUVS7YwD15HdTnqv/16bMgBT5jggZU5IDHO4devTnH8qRPNAb5iq98n7uOp4HHbp7cHmkcNk91UA4bBJBzlh7e/H6UtX1+/psrv8CWmt9b2b9fN7XsLt2ENI2QDgDrkfgegHbjnrilULvJXcxYkgkkjcRzxyce/fr15pmQyhUGWwQDnlec552jOM4PIGT0pVyG35JAUHB5B25BYLxx/30MflT73bd9LLXqtvXu9LIl7Su7O1rbJ7cunS/dvXqtgyytlwAcZYkDoeAF4I/Dkfhmnoyl3KZL/ADMDjCknGeAcEfgPUYpvGSuW3NyCOVw3f12g5znnHahegC7Vctj6kdQeOByOP/11rLrq9lZLV77aO+q08hPZ6a2UeytdN36taW30Xpo1coxBZSzEgqBgnPQL1yCD2xg9BmnBQY84YMclGBxg5GSTg8dev6UxsrJkJiTGQCAQBjkhu3OO3BHB4FKGdVcyZIIAfHUA54A6DPqDnp0rmk7Nt31svK91dPqpW1vvraxWrSacbtxutE90tFs1td69tUyNomJ2bsF+hz0wODnsOvf8elKEBRw2C6EFWzgt0BPTgAgeo5OD6xEneMAhM9f9g+nqO5I/Eckh8bZ3qScqCoJ43BuQO2QOnT8OaG07Wfbpq7ta6ro+qVnYtqVle1kk7pWa1V72d9VZWbW/VakcTcK74KksFUHByDwD12j+fP4TOVOSARzy3JAYjoCfvZwfz6dqFjUKco28kbFUYXHO4nHc8YOOeeTkUNGcsCGVSoKIWyc5xkYAx6/UYq9G9N7q911fnt22W26G2pSva1r6aWteN9NU7Xbvbv5iK58va4wu4byQCzMvQZHQnoOn0yMUI7+YPkGFB2gjgDsc5wR9RxzxwKQgkMASyDlMkLg8ZDdcL64Jzk9qHWQMp+6OjdCDkY6DpnHUHrnjHFUou7+Ftqz2Vtm/LTyd7+YlZ3Xu+8nZX0+z0799NWraK4seEZ5HODjDKQCG7BkHb6DPPcdatKIxGfmyxUEFckb+27p0BOTj14NQrlR9wh+qkc5U4HC9wMHJz/PNKvJOHXJ+UrjbyOg7k55J9Se5xWqd9VpfRa7pWs/K711/QiervqrW9LK1tk7Jdm9HpfYF3BkfeASCSxHBJ/hJ6En0OMEE8mlUNmRsE7VLK5GDxnnIx1J9zyOxprgMxUAhRjq2cPxkjJHB44/ljhykAAvgxqvQEFnIPygj0B+mPyFJtL70tfXv6N/j2Yuia1b6eWjXq97K9u5BFG6DPyhHYFnHJz6HJ698Y5PoMYshkjZ1LbiucMBnk9No7EjPJ44HTGKD5TF/nJfp5ajjPqTznjPTB46+rXH3QAAAgGVyMMM8NxyxGM59evHD3d77pRfXXRt6dW7PZXKb53eWl7XVrO2nlqktPO6torETNIACOXAZRnA+QYJyOxwWJ/r1px3MAfmKqPmOcnjqQODjOM85+g5MeMFWkX587mORtOfqMAH5vTHPPWnqQFYBQ28bMNznJwQOQvHrjjJ68gF7O72t123T/C1reeg2rWsk9VqttWrxvdaJK61T/Qi8rducghcll6Z56Yxn14H6VIwjLN5ZdEcZUE5Bbj7xx0IOMYAwB3NQlChyEOVABUjOQ2MHORkAYHXkHvTi7jcxYY27SABgeoUdjjqRnkc+5bl72aVvuTttsumvXXXca966bd1azfmmklb53va2iuGUWRC0u5hwqZwNqg8Zx6dOnOeuOHglTJIpKq4JwOWUZALDPqcYz+gqIAMQPLzuYhWJwQeMZYHOOvb154qaNgX2s2QARg8BmGMAcdOv9OlNfPXf00tZPrfp/wAEtfZd3ppJO2i3ey02trrda2IkLI2SAYnzh+77u+eM465/Dp0swbVGwDeEYn3PIPyjpgEDqTk4PuBgAMAnaP4W7DuQOnzcYIx6dDToo8ghVA2nIc9V54JOc5+o549Bgf4JtK9r9Fb8tDW6aXLq21ve1tPTf0S08ky1IMZkUDLgEoAMK3HJ925xjpjPvUaxP5Qb5QHZcgY3DPQ455GTnn0zimr5kchdiOBgg8KT246ZI9MDtU0bhB1TLAlt+DtI9Onr6Hn0rOVrX7a+en43vZPr0YnJq1/dkrX0dpK+trXW78038gdWwAgJCKGBGBj72Sx549+M9uOKzmUCQSNgAMAVznIPbJ6ZPT+XNahZmjwpLod2Oc56EA4PQdQffmqDpGSGBY4AEisvHPYHJ98HHQ46ZzyJN80m3tb8r3Xb0WiXlrLkk2ktF212S200v8ttSNiyBsMAGYgtww5wQpbPHGc+nJPGKsW6GRTEEZnba4IHDYyRg46rzjkBvWoQpz8qjaMhRknJOOCOhPPXt64re0y3eSNYy21HGd27GCOFXIxt7knnnORzWdSo4xb6qN2rb7Wbs9ey+T7mUnyxve7vHprra11u7Lz33vuT6bpztIC6ZVtw4Hl7TxgdGyfQnGfrkV6HpWgOFctF5gJ3JGq4YFvuEDHBHzDPPBzjHVNCsF8yNZArHgiTjad/+rDerHBywwQRyPT1nR7WIROXUyThRGAI8KoU/u2X5iC4yQzYyOOK8XEYybdla1lrtvZ2aX3Xva9rnJVnJNuUnG7h01WsbtN7X0e1uiTsedf8Iy372QxZYghYY1yEk42kjrlcnd164xWRN4N8sNMbfAiY7lx9/oVckdDwcjuRkdDX0DDplvtaR2JlAwcLtUkkZDHJBdSBwO57dBWms7ZIbhTgvuUFB8yqwyI1AIxvXLbjzg8k8iueGMqp6Pyd7rT3Vv19Xv36HLKrq3GTd5KLSTjora9FrZ3vt08/niXRmiWRioWRiHI25KuvBDMf7oP4enpFbWj2yh3RTDtO7aoV3ZiMEnncTj5jgYwPavR9ZsAjnzE3REORInytlvubz/FjBDHA/h9RnhLgRq8cSBmZnILO3BPYbTke/UfTPNd1Cu5q0pdFstbO2mvpv007jjzVFqne6evlZq+vX7isgQGfCoCQJIjIMmNhnlDwN/UZ5xjp0xlSyyxmVwyZK7S5Xayq5GQW5yTjrjjHvV66VcqrDaW3tAVIAATAO4YyW55yQTx6VnXMkYUiVgCykAAlt2TxuUD5W4ICknHIzzXrUJJrWUndWWmnTrpd2vvrcSjay1961rLVddE29Em767b7mVMwLEO4C8u0m7J2t3U474wRjnGQfTAlbc4TIwOmx8EDnAPGexyeh7VpTyoGEYAJKYUkZ47bx6jrnqO+DmsmfYW+VfmxtwR8pwOvByMknjPY464HoxStutlbtfRpavd6qz/E6aSd3dNJpcrsuiSTdm9rW3t5FZWwxLYYA4YMMBgAOmOSR0z34p48pnZckbsAMMgBuxxjlR+H6ZppQBQcYBzgE5bIPIK8nbwCenb0qQgncF2jpwGz5eM5AHAXJ64J7nOaipJq1trWbtfZJbeevTp0Oltb6rRLdJfZTbW11rrrckRAoXLKZiwU8D5yeRtHGBwc9uBVtZCfMVQCz5zkfxj7qAc4ZcnHXt6nFFMtKVKBs8oQflY4x1/hHHJ9F5HU1ZIkUEKTGoYFQvrjlm5wh4HruBJyMZrilrO2zXpqm4rq12dnrvrZWLhG79566NarfR6W1S7LTZWXUt2a75Y4pp0ii3mQsVO9VBHG8ZHJ4ORzjsK/dX/gkx+078NP2e/ibb+J/G3gDRfGkumRSz6JJe26TPBfRBTDFBGykLM5LYl3DysEkMGBr8J7C5MTOrIrhlZPmAYgDHIGeT+PGe9fUHwT8UWGj6taiaWWwgmgcyXCKS8cx28pz92QgbumMe/P5Z4rZNVzjhrG4WM60Jez5oSoTcJprleko+8ndJO19NG0wxOHdSCUXGT1vGSupRe6d9JW3XbRn+nx+zB+0v8AA79vr4dy+Hta8B6TGbywlk1jwtfabbPp9ukY2RyWkrDc0kIbdG6RqUzwMHn5f8V/AT9kb4WeIvFNr4h8GReKm8LXh04aXrtjDPB4c0uCVZtO/sOAg+ZHOTLmUFdvljgk4r+aL9gX9tLxf8FNbOt+HkkvLlYbmGBo3Mirp20LNbRQAqA8wKkzknyjHkIwbFfpZ8Ovi14o/aT8Z3l7bz+JdTv9aNx/bNxKxun0ucgtDd3agoHsrVgyz5wArL0r/PnFZvxXw7mOOwNb21HBwkoUsTTrS5lSk4J06j0kpe7pN6yu07WPhcXk2Iw05VIRxFOEqs5tQnKNObaguWcbKDSabi3fSVltY+sf29f+CrPwr+G/wg8X/DT4GeDV8IeJfE2mwQfbI9HgtNKvFu4ZIbiaBYkijkmCRoGlbLtjBJwc/wCfF8eI0vPiT4i137RFcy6zdvqF7JEoQC/nkkkuAVUkZDMM449AOlf1B/trfDb4heJPhl8Udb8TTeF72D4TidkuIJI7XUoxGW2RWrKGeSOMAeYpA7DIya/k68QXE+oajeySys6m7mky5y2N5DE8kkHGAe4Br+vvo94WjGjmeaU51J4qp7OGLnUqSqKTcYSgoKVoxSTvpu22+rPayP20afI1CjGHPzUYwcIRqT9nKcowSVOPO7SlyfE1d63OfCrxhiBtywLEBiegHHIHOB1zkZxStGWbkHKjIJIU5Ofm75B/DPtUyoAeFIC5K7sDgcAMeuRngYx6EdmyEgghgzMo3ZbJUejZxg88n2/P+oozco/cla92rXWvk9rd9L7nszet+uvz9f07p+RABhjuIx1HOPmHOTweDnGO/PSpAiusjhSFILI+cEY6ktg8dOOM8AcimbCBuAHB2s+QduegAxj8Tnv2pPMYB12namVZU6sp4ztzx23HI9AMdNovs0tr9det97q6+duu7hJ3XK9VZdElsr2tvtvpr2GJkkMGBRjt4++zDA2+gPoeePbg2Sp2MSxUIB5cmDuz3bOQMntxj8ua21wrELgI+T2ODwMDOcjI/PGOM1Y8z5BtA3uoWUqAVC9z24HGWznnoccaO13Z6WWiVtetuyae3ltsNpXTS0t5Xeieum9rX/XpEQI5I5CwZSS5HYseuD2DY6c44GadGylnKFSxDOJCMncOSF54I6AgAc9DuNLIEWQOjblKLmLOVJAxg4498eoqNg2wgBRvAKsrcp/eQccL0z1I/Ck9NNL27K6vb87frvdJ6NWb6JeSV07Wtq/wa3EjOzDmVd7hlaPnDEEY5ORkZ5H3cj2qUbChgUq4JBJQhgHGSMdOntwO+egroFVsmMMwxlwCUAfd90g8DjJ4/XOUWN0kYEbABkE8fMT8pBGcjsDjPqRmhtptWvouzfR2s9n5a+t7FuKk3Z8rVmtraWSvFauyfa+/kTSxktgA7GUN5fG8kZBJPYjPI68mh4xgHbtVEym0/Mqn7zOAcD/9fWgsyKNxZnUjc6N8yY7ISDxjsB1HODTgSZAjHKYxuJJLqeR9V9W6gYOOcU9o2stbvdbaba7trr/nZWkku291Zfy6pfelfV6orL8sm4n92wOMdWYg45HJHGPwqVCpjdCCfmDpxkE8Hbjt06g556UrKoGQrbskAdUBAJyOmCeOc446c5pUjZ4/lZU8wfI5ONoU8gnqDnoOp9RRJdtLpNp+Vr2vbzt133T1trZp2uorfe1rt9W7XV9b62uVnTlQR0O0rjaVPpn26dACe/SpUR2hfL7VQjHOHUZGVHXnjp+OeMU94yHJYFv4mJxtxgZIHbJAxzwOKmiMOCSCSAMxkYYkcBupzjJ444I5HOcpd7X17eit87b/AC6g03F23jZ20turq70Tts7X/IYFBZVIOVGEDHC4x6jOc9R69MVNFIsZfzASyjCjGSB359BjnP8AjTJZf4RgnGSDzhv6jpyKdCxmYCMZZhjpnJ5x6cAH72ePQYzUzqWjd99fla/e6673fTRip0p1Gkk222krNtvToujbV31fnqWDKzEMFADKFxjBwP73OMEepwT+JrastJv7+aJbeEymXaAxUgLnoCf4cDkkj/Add4S8JS6lNbvNbF0MgEjOcLyQRtGM465Az7+tfZ/gL4e6dF5TPaRyjaCsTxjdv4O5ZD/CTyBjHv1rxsXmNLD3TXNJ62VtOlt0+t3+C3P0Dh3grHZw6fPeirrTld2t07cr1d9+nofMnhT4KaxqUkEk0L+XJIoclSEO8naVXnrznGcDnOK+rPB37M1vKYTqcQMcsqACVgqCTqFDAZ2qeeQevPrX0joGgWUcCEQW6SwARmNIgCrjG9pmH3s8YUAAc84r1bSNPt7dkVbRXmhCyQi5bCEHpgcAYwdvQkDHGRn5HH51Xm3FTdNJu3Lo7e7Z7pWbau0/TY/fOG/CrLsOoe3w6xKkoylOty1Hze7eEk4NJO7s153PO9E/ZS8M3sf7y1hlCwAOwjCvvHVg4GZUPc4XBA4Oa9Bs/wBi3wleaY6yaTaL5pLKQBLLt42EptUkrliozyC3QCvdPDmpx20cILESKVLs3RG4wijjk9xz2r6I8M67DK28JEs0wXypmAZZJcYU7SACMk4UkEDpnjHzrzjFwk74ibTas3J2T0vpe2m69NNz9Wp+G/D7oway/DJxScm6UNYyUVe9rvladk7JXv2Pze8Q/wDBOnRtRMX9lWwdZQjoC3lmRlB8yNjg4IyNvHr0xuPy58Tf2BPEPh2KSXR7Oe4WEKXUoy+XknG7g5ViMDnjH4V/RboMC+aJruBJ1PzhIwBEVB5YEe5yABkYPJzXp994L0HVNMk+02sM8NxFGkrSbWZWYNsAGCUdMncOoyOecj0cFnmOU4f7TzxXK5wnLRLRtvVNaJd7dddvj868KeH6sZxo4RUZ1ItKTSUr6Plg1G6etk1dWa8z+KDxh8IvFvhG9FvqOk3MBidVeR8+WCd205Kj5Dg4bPbkevnz2F4ZpFdGjUph2ZNuXGc8fxE9Mt36A1/Vd+0D+ypp3iZbq5TSJZgbNntmjgVAwhRvMYSgHzQm5dhKofvAcZx+H/xo/Z6v/DN5O1vHP5TFI3aZTHJFIWfEMp2sWJxgEdMV9bhs3jW5Izdm2ttUr2/F9F0/E/n/AIo8OMRlDqVKP7yly8yTa5007We2ltE01q7NbX+Dfs7OFMiIAmUCgcEDgEYzkg5I7k+wNK1t5LO0isUXG0lCC5YHDEZOen3cggkA9eO6vNDudPkeynikjdZiZMqTGwT7skTccEk4OOx4z0ybyN4nYxtH5hADKVBGcckN3Y+wXBAOD0r1KeK52kveTtfXW2m+jtbz20vd2t+Y1cunQUnNTjJK3I9Nfd2u3ezvfq7dzlZIQSz4JXZn7oGG7seoJ/D2561SdGB4bb8o2FlwcH+8MnkD37nGO2zIkqt8zKi7DkLjBY4wB1AY85FZUgkY5IBwcbWJzgZAA6dM/wCB9PVo+9F6326beXZvz1XrY5HCK1taS201s0m9Pus9fQrhHRyxXPmDnr83PLLxxkdRk+vWplACMjZ+VsnkgN6bFIHGeAeT6UpKhQpVsgnJGcjOCFzx9Mjt655VQMnsSAASAdzd8cHH0PTtnmtUkraLazavvdKz1te99UvyYe7tZXlZXdmrry013V9N7CMHcqu0YPAA+QgnPHp3GMf/AK7EFtI74wQRgZYZBI6gEdQc9fxz0NOtoS7KAG3OSMMTjPT5fQjnPJxnrkivQdD0ZBKkswDxk7ipOSM/eJBHT1/w644rEww8GurV7rXlb5dNH3tdu+nrd+5lOT18fVjaMUm43kv5LpXSu1J9Wm/ky/4N8Dtfywz3KDyWkDNvJPJ/uIQD2xgHFfXnhTRNO0aIG1higdYhsfYCzYA3sOPlBwNvXqSDnmvJ9C8uKFY4gsKluMH5tw5JY9gAMKMEnPXivS7C/lTywfmccKhOAU/hA7HPJOT2r4bMsZVryalLkirxUU3Zp8rV3rd6K3m++37vwnw9g8BGM1SjUrSlFucopuL91K10uVW1tc9h0m5hZ4lU4VgSzK/LlvuqwIGCMHZj1PXt61oY81XmkZN6JwHADqijqGGMlecLjkHA9a8I0aZZXjfJjy2FIG4qRggjoFA5AI6g/SvXdGvZFaNAo/fEKDnkImckYJGccjPpjmvlMViJNctuVRXVJX2u/W6187aOyP2bKMLG8ZVFfWLjdJJtpWi09FFX6Wf6ew6W6usMnMbru8p+f3RXbtaTH3m5I6/pjHpnh83ABZJwDIwAZjuzk/OGOOWz+Rx1PNeSaZOEjgO+SRAzK+zuGx8oxkrk/wAWeg6V6Zot9GmwmMbhKrbARlc/3h3wR26Zxx38fE17p3bvo/PVrdP1v0tr2Z+iZdS1je8WuWUHF2jKzilFtWa+Wi++/s2lQRF4tsq+bG6ALID5bs2ec55JxxnjuMYr3Dw9KkLQxyhI2JxLKWykJJxFhQuC/wB7jIx7rXiWhzLeMimFgCVUOq5K5GA6nIyBgAf3Sc85Ne16OLIKsbK4AQIkjkszyDGFYdy54AzkerdvFnXd07u7euq0tZ201d9eq1Wp9IsPeFpc3vJttpOzdtX3007Lytc9l0mwSIRvGxSSUBsS8xF2OTIT1ycDHpznk16haabaxpJNLCym4QBoZHwrySgASbdp2glfu84/EZ868NQo7QGa3nEgCrbwud0ZEZBbHB3ZzlDlccnJzX0JpWhSSWKOIiHZAI458iVJG6GPg7lU/cyDx2ycV2UZzqr3Y3cU2tL220ejuldb2t3Z5GLUaUo/vGotW1sm7cura6Wbd9dPM8H8X/CzQvF9nf6fqcMbwTRTRrGqK9u0mMwnG0AgHeGb5eTx6V+CH7aH7DF7ps+o+LvBGmskoJlmthGTJIVLltoCDzkYAFZQqkf3TX9Px0Seybbf2e2LysjzT5exuqysecux5Rc44x0BFeS/E3wZaeL7STT5beC4nELRTsFUNKhXB2Lj96yg5BG3ntxXu5bjsTg6sZQadrXTTs1dNp7Lfd7vz2PieLuGcq4my6rhsVRpTnOm1TqwSUouyXNzRV7rTfXbzP4Pr611HRr+407UoZbW6tp3gnhlUrJG6MVIYMMgH1wcjBzxzPHP5gY8MBgYC5UEZ2kdDkdQPU/n+uP7eH7GMvha+v8Axr4as5ZftIF2USMhyCWLpNGARGxA+QZbdzk96/IeOB7eSSCYGOZWdHjZcbXQhWVhxhsngd+eDjn9QwWMoYujCpSb5rLni1aSkkm1Zata6O75l11P4T4v4Wx3CeaVsFXi5UeaTw9WzanBvSLbUWpKNrX0V7XZbJLuNxXpjcvcjsR6Y6n9BnFSbdq5YrwT0HBJz39e5ByAevWoY1C4AJyOQAMDBPPJ/Ak44wcdTiZwxK7cFc/rz0GcZznnpjmvRUk+VK/2VsrPb+rrr1Z8cqale7as7t3W76O1vu1Qqrxv65/I9M8YI9hjn8+ZDkKSBxgEAAYB5yc5P4fXGTzlFLYI6gDkYz0GGA7/AFPoRzzw9Au35txJBOD90kdvbGT+WOOtU9WlfZX+5rV2tfr/AMAqpL3VFLbl6N3tZ3d+tl0f3ojHAJYY+Y4+vfHb8c9D9akVMgkYIySPY8cdPy7Anr3Ll/iBHB6Hgqp/P/8AXnr0p6soXaGHfJxjJ/ug9eMDJwMemORlqnNpLVX8nJ2to+ttd/l2KCVpt2SbT7NvTTXXz+T0erGbTjJBPCj1wMc46cg47dDzSbAGBUYJByPQE9efwyM+uO2AbjnJyx+6O3sQemcdx+Hu4bgBkZbvnJA4P3TgDk4z7elaJfDq9Vff0/zS0tp17zyr19dX011ur6duy0HKAG+Y8hQBj3/LI7EHnIA9zKuzA5BPJBzxkHkjgDPrngnjrUPcHnd0OQQo9u2fbnr39ZEU5BPTO0qMD6/TI9eRnvVXSS33t1d7/j/mtvOWlFbuzdraK97K1/W1/LfuS7duGYH5umcc49h2/njjnFWV2kEAYYjIz0yOMjOcZ+pH0qqwOQvJbIHHYE/XB9CfTg84p6nHAznGOQfy5A59CPTnvi4pJqzvq93rbTVbaave+jtqNPljrqm130vp3t5bel9gdSSAcYbdzngnBx+GevGOKKkMRDAsCSQCV5wg9SOg9u/bPqVsJ8kdLJ9dLPf5/M8yaUv+7wF4wNoUAg9MnoB6Y57VEr8jf8wX7wxjORx3wccHjB+nWpJowDw2SyBvlO4qe+48YB6n09h1hRQWUh1OWIIyAGYY4Oc4Hcg4zxngLnR9dfs20WiacdF8/u6mcUuV2uvVb6Lqu+tvNb2SJGmbfxjamRgr13cDBH3gMcdAOfU0olxG6thWBBbHJ6HBJ45Az8w655yeaUw7g5fEZKeYBnBODk+pHsO/AHNQCPbIm7CiT5kJzluzAsQT349++ODkopX+T2ei0W787NetwXJK9rpqz03dkr6q992nvbfRLWVWYuCzcKcZB5C/w5bjPfoM8446mRWVwzEjgEZGAMDuTnkj04/DGC0xx7XZ8FsqVCng9eW4yT0HQD09oHJ2hVwm8kqOjAD2PY4yfX0HFdEG/wCVK26folr3eqsr/MElLZtbXe1tE9H6XettEy8sgjIKZyDgFhyc8cDnafXqT374ckzRyByowQxIH8Stj5sfxduuOfrzSLt8jbwxAGG64IJAA6AAHqORnjNAYF2IBLcEMBkOTjnrzkZyR3PTnhOUZLbR2XazbTb7Pr6bbEKnvpvHV3ts1otrb76u/TvpR3YAf94QTtIKkYD8/OV4x1wBkA9jxVhL9vLYGcbgCwGPnLdMKARnGM5wOuawlTAJGSzsA2Mc7evOfujPIxg+vBp+WXgjaQRtY53Y6D6L7c8/pg6d237zs0430ta1l3d9U9NncHShfRt3s1qk72Sdlo7J/N7mpLeSk4XeQq9MYYbs5J9M8Y9f5wG7mKBcsQuMoAARjqDz905PTP6cwFlKkyNucgZwcgkZwcDBDDJGSeM8VEXYuFcL8xOATyVHbI7+hxnnnnFbR5Wtk+VJu69Lvbf07bocaS10i1FXb1X8r0269rbLXRk0kzFhkDKZGTyOcfrnknPUbvpNGzOoctnJ+YE8knGNo5z6gYwcnk84rMPusFwrAFthySehDegPbP4cjNKsmUCqxiXjJxgg9gM9z6+3QYGdJUbxX2dU7W9Px27vbq7FqKeySs/ztq3tulZbiTMzOFQkMCDjBCnIy2fc/wCHHIrotGmfermQgLkojE5zgfe5yBnIPrkY71zE0rZwckrwWJGCB90g/wAXTPYjFbWkyCSUBf8AlonzE4y7oR90fxH2+UH86566XsmtOiu7aar87av8DsoXU4q9+ZqNu/w3vpp89tdVsdx9pmRTh2Yg/MWOVXp93sT6enPvUMs7OpV8sSQcoDtb1I5zjI6c5J44zVoWxMYMZdg6puBO4McYYkDoB25PPY1cFlIF2yRrhlDccnP8BzjAxk579uDzXz7lSV+Xl5lJ36W5dO/Z9NHe/Wx7aheNtLbNd3p/n+Lv0OFvmYCQYH3gQei7gfyB6YIPT1GMYJdvmLhQ+SoB54J4IPQNgdhnkdO3WaxZGPe2JEOQQz4C9eSOecZ/H2rkCribYx4BDnccfKSeo6s3HHr+Iz6NG0ouSd37vTb3btX1S87dHcwkrNprSyaS10S2SfS+3k187VmjtMu3BwwIJ4X6cfxNjPTPf2r0mwEiwIHGADnrn3wMdQOvbOfqK4ayhLTRBWUBgCVA7HOckdOg/wDr16HajEBXKKSuRuPO/oME85HPA/LOac3ypJdXqkttrPyWj+44K8oqSUnJLWzTa1uubXXd2er2WulzG1JY2BGHDE5wCOBg4znsOR7d+1cPeKqyuCSFZsDJzlenPoT26nPHTp3l0paRyfmYKcsRgEdx75P0/rXGakiCYurEKOCvdz7YHPv09utdmGk/dik2pON7bdF8/PTsc3NKOl73d/e+XLZLRPXfXW2xjTDG4KSSCOCc/KfXpg4GAOvpniolXgNlQW/gzlieee2BzkZ+nrUzycDYwDYALLjk84D8dQDzxkcDuAK6l1ZA+0Ac56sQTxk46cH6e3Su5yTV7a76aWd1dK99Nfu01Iu29LaPVN6u1rrZ23tf8e9h/LQAbyBgAuBnc3Xgc5II4Ax36iosExEqxIcBRJgq3y9SzZPB4yOM/rTZGc+YThgACNoGE9x05BPJ9+3QorNglv8AV4GQBlmY5wAMjGT1b07A4rHb8r9buyT23/r0lXte6+JOz1utHZdb6Kybs7K90SNJ8qxlxtDK+exkxkdORnn1Ixz1qsXZ5I0kYMCxCg44UdR0AAGfz70pZnYJtLbT8ingBR1z354wc49OaYwZSFdCrqTggghh2AAxgepycDnnNCWltL2Sd7eV7/L9Hsi4xS7O6ckrptLRXSXqrrbzHhVDMysQCSBxuI9TknpweR1/Wo/MLDbg7wwACnHTOMnoF59/Tns/AdWYbUyAAE4PHGSOgJ68dcjFN8smVAyAhsMARhmHPUde2B36imndXVk0lu2mrWv87ffv2NI2bXXZdrOytu0n/wAASIkspcrsjOCM8kk/KAQOSOf55q227kH7m3g4yQD0Gc+/J6dasJZK43EAK4DFEGSAB8xPfA4yD61OlnI7eVtKptwpHG4ds/e4P5ggZrmnUp3fK3otU0ui3Vnqvx+R1qlNpPkfK7JJPRO+l9dE7u3rpqY+CqhmClSSGOTkMemR3z/j7UjSgA8KDjoBnJ9c55IHHbHvXSLpZCSgRBwuCz5yQexOQRnjIz78ECsS7tfKZQUO1smPjaT6qSBkgYAUED6iiFaE2oJau+qStvF6rS3lZ36+kSouNueD10unvezbata9+umxS4wCAQvIz1J/HjPB/nSq20/MCVOQRnHT27e/9cUbdw8vlTjaoU4AYHuMdu/bvninqApwxwUGdzHmXuvy4OCO4OCP5aO199kt+j0SV76a726vfcwcfdbu33WnN067231TsreWrgV3MVJJOMEqSB1BPUcDu3bPTkgooKy7m4YdsHBJzypHcnPBHbGc8BxZxltx+YlXBA+UYGeCTzgjkHn0yDSblZmTJBXBQ8kEHPGOM47DPGSe9Gz020Tasu2t/J6v5b3Rlbfs1rbXT3dOj37Lzuh5dAGCllJ4ZxkjJySBjk8j6ewpgKuAinO4ncTxuIHc9yPw7ntQ8R8rngdVVGyykEZ3cdD6c8ZOcDBaI8qFAVTywLH5mGTnPHvn3+tNxvpo7O7to/N6u3b59rCSjbSXXrqk0lrfS/e2trdLDmSIuM5CMuOfu7gP/QfbrxUgji8t92GIAZHQ9APvBuD83PTv3xio5eGwCeACo3fKo5yADwAO4z69qEYYKBhtdSu7HRupOPQnAOOg9KLK9ul1fvrZfOz1aTtqtR62TTdtLrXbTV26/N3ttsNUn727CcgA9T0+XHXJGOeSOtOZyAMBt44bj5AD91QMdeucn04qby02MCxZwVKFT8nH3sgfxHsMH1zzioWQNIAu4FhkbyM7h2OB04459h2pqK3Tuk1b8N97PTZW7bbNSTu1svu0tvt9+l3sMDRGMKQQ6sGK9WbOMAgehzkk55xk9xy0jx4OQQA6EEcjgDPT15xn06U9FyJOFDrj5mI4CjnB6bjxg47EnkHMZBLBycZ+YBv48defc984601G3X00XkvW777N9Nxppt2897S3Sbtp21u3ulqhxZowfl5BKtwN+xscZ+YAnscEDHGcmotrACQIGwSCDwNpIwc5z2P4DscVZkA4xyXGMpzubn5X9Og6fQ+tRjKtGWAyoB6HMgOcEgDtjHOD9eaV00u1td1qmra72V1qOL0W2u++um3Sy1s9bq6elgUJ94sWOFbaB39PfH4YFPZowcRhQ5wvy8gHnrn+I55IGOhxzTiSS8gwGKA/KfmAA+Yt1wx+mOO1Q5O9C4wGJdCfvMoOeo6nJ/Hik3bor6PXXW0fO6+fYlJvV306XVtEtGra210e9nutBIw8bEsFVj8rMPfOMHt0/AZxwTTxuBZy5fGSY+MAdBz0I/iPHGB61K6R7NxOzcu5QpyHb1YEZBOOOeO+KgLFQrAksSeoAIA4wQCSMjHA6/Q0u7Wlkrve+ze76dvJ33QX5tkruy2t2dldu2qtdO6/ARlQkEKzqSRnkAHrlhn5h0A6c84xkUhRMrHgBicKQ3y5xkZ7jPXH68GnAuWPQx4Jb5TkDB46gAjnHGeelGwCTkErjOWBGV6ZB9Bng+vatNWl5a6O2rST7Xt3TbvfcpO1lfZX0d9ez6J66pLbp2CFBOXZtrDBxyWOPl6nHPY8+5zyhQyZBTLgnGPlwP8AaPc9yf8AHNTtHFkqCVUAeW6ZbLAZO/pyScZ68dOtRjchEh4A5+bOTknlu/8A+rt2p67apW3tokkr67vuuvdiT00vdPS+munbq3o1Z9uhGiyYJCgKpAJAw2TkjC88nkH17+71ZidhUhyxIGMja3X6FcDk9M4pcg71Vh8+CwIyC3JAHuCeD1Ge9NXcjrkDPUh+je2fwH05PtSSWt1fre60tZaevTR+RSd5apbX29Hfq2+3X01LKtEMszMBsK4zkqw6EA9RnAHqBVtFWSIqD8w+fJJXI65PUHpg+/GPWg/zspQKXfOVQ5RcYPI6Aj+97e4zKkhXzJJXIARUEe0jLdsc446Ee45PNTstHe8ldWd9LJb6X0er69rmsbqNtXqmtP8ADGzVn69r7kkrHcisCAAARg/d7gjPzNxwQeOe2ajKsFYCL5RySScnPIIGe/cfqO9hpFMaIGO9kGWX5iv90NwfwHbkE4xThGWTJYiQKTuLBWYDpkc9+QB1/OmtUm01dtW1fbfp/l31HzOyTVld2erd093bbfXt5shR22bQAucBgeoJGMAdh3/Ee1M2ruAztdzwrDCZ9Sfw7j+lKpYxliN53E7yAGUfX8Mdff1qLcwdmfG0dMHqDjjPGSOpK8dOwxUOKs+lr7avur+i/wCHJaTcrvS7v7177a7rb5aeQ8AJJkupUFhgNkMzYxjj256EYHPp0GlXAUqzsowWYgjA2DGRjPLdMdOcYPWudmYbtw2HcoGQcqDjgkY4J7/ltp1lcyQZbCtg8pwS2ehxkEAYHf8Awrgqw507eiv1tZa3t0Xnv6Xl07rvpflVuZfPbd3fXbRWPZ9NukBVomXorF2BJBI5GTjGQPQjjPJJz6J4d1Vw+xSoDhwsiuGXGBhGUgAOSWwc8+ua+f7LUgy7HdkWRSyKoAKEcAg5+ZeuRgHoOnFdrpWrw42FmBjHLjjzcDO5vUqc4xwQTn0r5/EYao1K0euyTdtuul3rbS+rT8ny1MPKcb2b1tqte6u3rtppbQ+hLS7hMfzFvOQFA8Y+8zdc8kMeME4GB6d4JLqHzUjLJIF3B5E/gIwSoGSHYZ5xjBz615rYa/lCqzAbEIWVBgIBj5XyTlvUgfLkZzkU9tfhViVkVJUX5n6hlY5yCf8AlocDPvzz0PEqdRN+69tvPSztuvPfVO9zj+r1OdNRa1VnbdNJLbW1tHezstdDodcEDK8sbnO1cK43KR828k5AQngDJ4JJPWvIdTURzcBIjEGChyGLtwR6YKcgdc7j9K3rzxEJlkhMwX5H27CAi9MZbPJOeeBz15rhbvUoZ1m+fMqglnPCtgHoecHkdM5PPWu3DRk2rpx20XVaX7J+bvpbbU76eHlFNtLW110d7a7a77PXqkQ3t3IyxBnVZQGDZG4qxwAi4I5PY5ByeDkVg3l88kqqyKpBCgAAZHc45yxzycg5qlNdmVZAzs4IJXnsvG0DI5GeOpJ4GMYqsuJAQpDNtJ3McZxjOWxgnqNuAO+a+jw0Eklduy5rbaaab3vZ6r+nTowirtbXS0tZ6K197aJfcPkm34OXU8ruPJLZ6DjjOeT2AGeaq5Ekn3gThQSTgcf3T3I6c/hmlbD4SLBIYgDOFXHUN0B7DOST27EwuNrMw2owG5QMkcEjjHQN6d8AH29JLmtZ+d7a6W0v2t2vZu9noxqEbvldm1dRaV7aavZ+q+evV5Yl2OCm0cMQS56jeeeh/iYdDg4NNUgP5jMpA6oSFDuORhuc4zkjAzwMjrSkkJhjklRuYHeQf7nbAPHzHI9smmZ2AM6BipIwRjcpxjIHVhyc/U+1ZTTa5Vd3tdrdbO1+/e77lKHV2as17tm+muuybsm9LPyELvHzGygB2B6FCDg4HHBA6nuTn2q5ukcyKjKWbaHCYO8HqCp/ug8Hjrz3NVrWJWXchQOxJYudoUoedp+h445Ocj0sNGqTho22u/IOcEqOpHUAHsOpx34rnqRUNrtrW/W77v8ArX530uk2tfdWt1ZvVX3frsnt96vbuMRqD5y4IAO0qDyCQR2PGCOP59loGs3tg0SoGnlVhE0RIDJHn5uCMliMeh7DniuUZsFArMZxkyS85f0UDH3gPcf0rTtlJaKXcAzMHMgOTu7En3x36/WvMxqpYmg6NWKnGfuyi0+tr+d0tLX301sjow8lKcYyjzKUlZtN3d43/C3bZdLn6MfAb4o3Hhu4sdQ0qdo1aRLWWCSTIikm+RjyDtWXkMCONoz3r9ZP2a/jr8TvgJ44vvEOnSvokniTS57O2vLoCfTbKPUl/d3d1EyqHtn2ffDD6ZBFfhD8J9E1CS383cLM3lylxDJO5SBmiIZCrkEKznoPUcda/aH4J6tpfxP8KaRp/iO5jXX9HEOi+UUw4tEIRGjYE+e0IAKqQuQ3Wv5C8QuGcsWY1qlPDU61GdV08Q5JynFRlGUGore0m1J/K+x9Ysro4igo1aSnHljKzSa51bVWWitulq7vR6I8a/bg+OPxRHhLxJNeeL7WC18W6jcQeJ4dMuPMt9ce9OVlSMKgCvggAEgc81+DMk7NNcHs7uArEcBjxnj1PfPJNftx/wAFILWz8GeGLXwZHo4ju3khkbWI4swaguMw71JzFNyRv3Nz0Hp+IZXbI+85fJOBnGD1P09xyMDiv3Dwmw2Hw3DUJ4fDwoutWd2oxUpqEYRjKVtdEna930baPlMdQhh8ROnCmqcVGDUeWyV0nay66O9tlv5SqzKwLjKYKHADDcc8LgDp6gHGcdMmmKSzHcpI6ggAMFB/hPp6j6Y9S8qMKVYHcAzYbcuSOhHVeo4xkH3pChAZvmUnG3IySOcE452nsOe3Jr9fhrFWTX+FJ72t3XX8PKx5U0k1e7vfrtqm2lovz3+9pCPvRFbcc8nqxHOWHAZ+uOOM1GI0B5JaQ4Gz/ln8vTJ6kn+EdQd3pU7HgfOnmDGCvJXrxkAbcdjg496qkkMGyuc4DNz5hzyFPGfrxg5rqhdX2vtt0Vle76PTy+8iKdt2tFuk73s0npr3W1+vUdtUZYByDyedzArj5ccbc5PUnjvjOIVZ2bbgmPceTw2PQnp9ev4ZwJt4C4yyyZKkqcKSeCmB2X1z3x6ZRQD8jKcbycn58g469CM8/MT8uDgHGBrfS6WyX/tqfXy6JNaPqVa26vorN/LVJpX8rW+aIUGHcsDnAIwTgADAI4PyjoTzkmpkX52Y42rsJRiRy2cYPQ9uT6+mKeixD5nbEi7VEQHBAzgk85/3fl5xhiBTnHzOY9wXGSxH3cfeB9zxj9MVm2nunFpLdvbR3avt5Lpu7aEt3emm3Tta9r69N/Na7gVtw24FSGX5ogSf3mCNpPYdwMY9O1Mk8oruO4EjhVGdrf7Q6Erxjp396RPMEjMAu3jA6EA9WznGRwCT1yMZxkSKjFmydqdck4IPHRiD1z17jH0rOLlF3dmuaMr+Vttr3s779NrDTs7t3ejdm3ppdWtq1dX8iIKdyE8xgdR9446Ky4GSc8rx6AjBpBICxxtjBJwqjC+gBXkHoS2DwCM5zUxRtzrGzMMhlYnAUfxFsr34A6Hn2xUaqEcOEAVT+DHuMnp298fnW6eje91pfXqu/rf1NlZrS+2nzs02t7rz1u/vjHmIwcDJ3YcE8AYOMf3c4Oc56deKdGSMxlFKkliT1XpjByAD0OfXt6ySCMEsrDszKjbhuHUP0wfcdOc96rHeVfIBDnKnvgHjB7DPTPXpxyTL97r1W77NddPTyKSTbT00fyt+fp2v2JnOAWVywC+udoBIxgZOT05xj8RUG5gI32biSVA74PYj+6OevT1PFWAoIHJGU5C8kkjjOAemOhz/AIX7XT5LmQAIGIIODwH7Dtyec9iB054rGpVjBa2VrfmrbPTXR+i6nbQw8604qMG78t1H5LVrolfTS7elitBbSTyIQHbfgHPKgnGNpxwPU+mT9PWPCPhJZpY2kjXAO52ADMvTJI7Kc4J57Y681NJ8OJCYnlKscqVjH3VJ67/TjHPUY5z0r1bRUWwZfLwq8FgmSQmOeSTwPx4/GvAxeNnJNQtZNq6dk1ZOzW/fda9dj9M4f4foUpQqYmimpOEuVu8k5OGqbVrt9Fp1SPWfCvhWxtI0JSNYYvuhsF/9sluCADjaPm5PYV7no720KJHHIU2kKmCA23giMnJ+UjI6ZxXz9pmsGJhsLBAPvHgEDsBn5sk4J4+mOnomk6mZZYC53jgDc2ACcAY56DjvyemMV8ni6tR83NJtpXd9tLWSv0XVJvVXufu3D1LC01TVOhTik4xVkorlXI1qkm2l33tqfTOiX0G6IoSoC4lZDuZzwM8/dYcc88+xzXbw6qrg4Du6qitvBAwQeAeeQMbfxzwePHPDrTRtG33iyhXKAcqej7R1ZgTkf4V6fp12qLte3dxkMGPAVlPGAOQhzg9e3oK+XxdS07t63vu27qy+/e3fTS5+x5bGDhCEUqdrSbs3zfDt0bV1p0t8j1fw9flzCJgV8v5I1J+dTkYz6pzw2cV9QeEWimtI/MUhowjqiHDrgE7uPvD64x7ZIPzBoEtozQyvBMrzhg5yS0Lr6R44B6jnscZ7fQXhKO8SIG2meNmRVLEAkoT8wdc8diCM4API615dSdubZu97NdXo/kn5Wu9D6rD0HyJOTdla8rLm+HTztvZ9dtEj6e8F3MdxcWLStuhl3K5Mn7to+BsII++T75GeMk4r6K0jTDHGXlO7T2cGKOQZlUH7ro2cg5yACDjBPbn568G2DKkQMaXDgCQBTkquRvZTjh8cg+xGK+tPD9lauljEswllMcZlhaTPl/7MmQASnQEYwTxkUsJW5Ztys27ct+Xqo+7Z23/pWPOzGCUqTVpKzjKPLzNar4XbS6SV9e+x0R+Hg8Q6NLMGyv2bMcEhAmIlGGLegTaMj0I+lfmJ+1L+zna3T33nWJ8uztHuLaVIdjFI1bJMwyFUOwwhRi/PIxX7P6FoEVzZpFC0kVsjRH5yVmZpAcqW58yLK4xj3FedfGH4SXmu6Dqs8EHmzCGRXCjKGBV+cEkEeWwwHY9cAAc5P3GDpzrUFKnGcZRjF3j1aSdtEmnd6pd3vofk/EOEo1YV4zlGUJOSSnCLstNE7LRdmrtL7/4tviv8Om0Ga8eGExGKaWKZGBkSSWIny/MOF2yAMdjZ4+YnPb5I1S1kguriCSExwkmRSBuYKuScHPKrnvg5wO1fu/8AtJfCC4tLjXrVtFkto5XuJoXt1JQgEFFJYKVViCUcjnByBxj8bviToEunXdxLtlaSMvHKETJQIQG4GAVIIyR09Dzn18FXnGUY1I2lJK1nfX3VqrtJ3ve/l0dz+aOJ8odGpOrTXuwk2+SNoqySuotK6as1bq9+3g15EmS0Tl1JDbX4LPx29cjqe2SOnGUyZRsKoYncMt82T0H4DOccHGRmt29hiUMzSYcKuEjOQFJPzKdvTAwVwTnvWFKfmZSMKTkOT8wJ9cckkdM9x9a+vw0m4pXl03WvTySfmvSx+c1qEnO6s1KKu07dnturq99fve9V1JGxvlc4bIH8PXqBjPHGM9uDSom7aWOPmGCv3sDjjPTtyQD1qb5icBvlxgtt5xjgD8TgDt2GOKt2kOTyF24ypJ5Pt7HHJ5HPcd96tdQi3fbVJddVppZ7Pp/wXWFwbqVFdpp3W6d3po1ur30fT8S/p0GSu5AqhiDn7+DjoDzhj2BOe+DxXpGlFUQKpAwu35cnB6jOT/COw/pXG2kYBJPzDAOCB94dCT6E54A6Dn36iyOGTyxtc849SOwweOD1bg889a8DFVnVbfvLTVJe69l16Lq2n+TP0jI8I6PLGPKlorxSV4+63fTytur/AHHpWlzCMx78F+nC8MD0P3j8wxzuOf4s85rv9PnMzKCxU4Ayo3MCAAMkEDAzyOOo5wa8w06V5HUr0QYbAySw656+2PwPoD6DoiOG+R8IW/eADnafvA9xk4xyDjvnNfN4zZ9HHS6um0lFJ/je/wAnZH7Bk0U/Z01FtcsXKW6s+W6drJWu/XrqeweH1ZYo2LqUDqSQMEgEg4B+vI4J9yK9e0mWMAFpCgTCvwAFPU59j3xzjjjnHjeiTeUilkXC7RjGeOCGAHpnj6DnmvVNKkWRYzGC0TdUxk7v7wBPvxzwM/UfJYqTTers3fdrb7/ndL/P9UyulHljy2k76xafaOltNdbaK9vuPYdMuIysQTaFfaikDCFuB8319T37da7rRoWSfzYiAquuXJ3ZY9R2wMgAemc59fKtIuo32w4+WMqgQMQwOT27MOuRXqvh2dJJEjhG+JUaRmkO0bx0AODvcZwOg69+K8SvKSbTS7K7Vnezt0emurXfofoGW01KUY2TUbcqaldRSjslq3pZa3t1SPZNHnmjLyyXbwQl/ljjG5ohxuYcgtnIPbjpzXtfhvUHuJre2ilDCIAeYTlJd3IlBOMOm3Pfr6jn5+09pxLFNGYG4YhZBhiowGCjkFRnAbjOQAOteveHXm/0W5IiUEssqoACxYj5o1Byu3qDk5ycjg15ji5TT03SdlJNJ+6raWW/VrW2p9aqcXTblFXivcb92N9LX2d3Zd0lp0Pt34bxC/XT1O52YSMW3D5UTbhSxB27iSVXkkA5wMV9baDpoksYGlO64iVVNxtwfIU/JEzf89yCQOD05Ir4e8D+I7m1udMhRIkdHRJD0aaKTG5VXHMmAM8jHvxX6KeBJNMvrHTpZLqEQ+QjGCZxCIZB94u+DvYHGRwQOOBX3ORYGnXiqSVpy5buWm/K9G3pdvZ62T6aH5bxPiq+Empzj7rbtGm7uLdnrp2tptdXNmbwbDrFra30rSSxSwqgtpjiYOON/wDugrkHAJyMZwTXlHjPwQunfaGjiW3aDAt5ZSd0isDtDDA2FcHIy2e+cGvrLRrWxuLm3ttsUkUkoKiIk5gjxllIyC3zfKOCc+nFbfxD8Breaf5yWrTQzRRSWheMCWQqp3B1J4dMjH9/uRjj698OwdOVSEU5Rs3brok973vbpZa+dz4jD8R+xr0aNWo1Cd1eTs0nZJa2XR39LrofiX8avhynjK11XS7u3jvkSKUxJEA4WEIdxLFcIvIJwpwT7HP8u/7XX7PFz8MvF2o6zo+mzLol7KJzIIyIYZZGfcAMEIGP3PXH41/aV8S/CaaROsqwLDb3QKPI0gjkGQRKjjaSszjAKgEjbkHtX5OftVfBOw8d+H9fhurcNGVuFsBGokWdYBuSSOYhSSGbMmFyCRgda83DVp4Guop6qahOMkrcjt6LZaN2PD8QOG8LxVk9Woow+s06bqYetGKbdRRTXvataWT6Psz+UVBLGZPMU7hheAcrjqSCAcAEcdDnsQatYQKPUgZB5+bB+4cggAjnrjt7ejfFHwRqHgPxTqWj3lvJCYbhjGzsWXyyzbCH2KHBAOCVGMDOc8+csqlCzYbHII9D1LHnP0wPc19jRqqaTS0lZq2ujS1a/PtsfxpXwVXAVamFxFOUalKpKMrppXTtfW2+jemvXYaqk5YgDI5bkAH0Hb6dckYxSkkLgA+p9SM4688EY+vfmnD7qgEhcZzyAeuBnB6c/WkBPXqOAB147cdD0Ppz+vfFp8t+v39L938nr1scNSN9Xbezs21ZWs9rO73sk9XoOByPQEY6HAwOSfc/4ZpwCqB6knGOnI+p5OO+B+Jpw53E8DadwxjOAMED3+ozxQqqSo+Y9xzjjsccjA6H/Z+93yWXZfcjNPlVlpfTXuuu29l5XdtdhgBDbjnAPQ9eR0x9Tj2704kkAAEbhg5wCee3fIGDx9epp4TIIypIAY4bqQenA5IHY4/XFGxjghWyCTjkYOAOR09cfjxxklkrdLaLz8vPoK9lq/Jvz8+wqrnGcsBkleOT6n1A7jjH0IxKQPmK8bs5UHG08Y6dunc9gexMaowPJxgkkA889jx6HBA7ZxTgGUtnOB8oGTjn/Prjr2pp6fL7/wAv66IluLSTd9Vte99Oyv5PS2+w4j7pJyxGQR1JOeAe+OcZ4GPYVYTgAMMc5zgFuxzyR+tVgpYjI4z6nO7nHPOOPTp+tWE3BmU+hPPJGcAAHj1xwOa1glpfrrr8tVv9/ktDOUruy2X/AAOnls/O/cslojxkFiB855Bx6/Tp047ZoqsCA2WG1cHrwcnOcHPQ/wCSeaK3UWr6v/wK3ReTvr1+XS4ON3drm87X/Gx5YV27iThjxtHfOOg6BeR2wSOOeRX8v5gF4wc+vJ9DnH169cfTRkhcOpMeQPuluhHfd3wfX69OtVnXaduVDZ4Ck7VPoCSRz1/uj6dIjKyktrtdej3va/W6ff5EwqdndteTS2dvknpfvvcVQ6uWlJ+TAwAAenAGT90556+g5NNyQMgcAkDPIVj0x0ORjP1HtTndWzjJcYDlQQhJ99x3HJAJ4+bjGMZhkWQk4BAGCAflZh7egPTHPQAH1qzbfm99erVlZ9uz6fJhHV+9aOiWva0bK3Va9dk/NjUEq7mJIDnBx3xz1+pOeufw4eVBxleV5Uk4GO+MdQcEjgdvQmkUkKA4OQ2Smecg+3HHH0zT33AsU6lVLAfNhDwQcjhjgAnsOMYNaRle+muzTvt+HYvruk9bNabWV7p3tbS7ult6QLtDkjO0kn5jtbIA+U8nI5zjpx7VMucPjkA7hJgA8di3OCfXB+neoSCEBwWO8nrkKOOTjqCCeegPHJqeIs0cgBDKV+YD5QewGCM5BzxkZPpgVX4J2+/bd2118l57Dkk1fdJpNvVtq1mu/mno97dRq7gySNgJndkdDjgKT/LHHFSnKliTyBnco3DpnOcgjcDycfhnOImYqrYJ3bvmjZThAM4xkkEnv0xyTz0k3qyZyQ5Xb8vB2r0LAdMEdvrzQ1ZtdhPW11dbdV0WmunW93e/oMG5SNxBVsjKn5ieu1sAZyPfsfrQuCpGQGDZDk4PGcleOByOv07UZBIKBQyj7zD+Lnk8nkfh2600Rur4kABIBHOAw6DaRyQcZI+lJR+LTSUY9N3dPbvqrh3d0tFpbe6XTaz6/itrPjLYaTcChJUKDhjIeAhXsRg+meR60nz4ILbhkKysM8An5R6HHOfQ4xzUgVGjkG3a3BVtxAO3PXP6HH4cVGQxZSSiIcZY55PQLk5+ZiMfUe1bT1i7dLNpat2tpt13/wCHKVnpqrJXuuulrNdd10d/TRs43NtKrsCg9/mB+9npy3b8Px6Xw9bB2UK2wIwbcRllI7NkjAHQ4zwfwrnGdvN2NjcPQAhlHUIO+BnJ6jrgcY7zwxHGGSUxl1wVC4OGHy7nAB5ZTtxnOM14mLnKFGavqrvTZOSW99dtVstbva5tSUnOnHW6avdtK11r1u306u72ud7YWDj70RZQBJtTAHzchuN2c+nHQfjuJYPJI/yBVDfvMphlzj7jE8rx0wcnB6YrW0y13IGikwcIJCpziQE53DjK849B+OK6NdMO5XdGAdA6BvlDoP4wRksV7ZxyeADzXxVTFyjNptJqNrtPfTo9L2X3+p9RSpPlhK+mibburaa+q00W197njniHTAqEfOEXI6Y4xldxx94kn1wOvTFePTl4pZ1AzsLYLcnI5DbsKMj/AD14+ovE9hGsDhFEwCAnH8RHzMxHdmyMsT1U8c18065bqk0hVSqGTLgNhupBB9OmB6+4Az9PkddV6Uoys72tp1Siru/a9tX0fTVYYqHJJyXw7JrrFJW3e+jTt8y1o06hld2BYBgcj5STt+7/AHgP04HHNdfJOVhXyyGUZJJP3WJGcAEHHQe/J+nnFjN5cuNpBUkAk9sAEEdCvQ+uMc9ztm7QK37xiMgNGG+Ut6pjBHrjnHYk8H0a9Nqpo9Pd2tZp26pNeSe/4nh4ppySvslZO923Ztq2i3fZX0udAzu6mQkAMBhm+8Rzk46FfQj34rm79ArSOgEmB1XspyDk9gOMHsc4x0qeO7GTBIWAC5jB+YBcZQsehXBPJHJ6980554yr/Mu9VKkOp2SMejAZGCMEA46Z7mt8O5Rb2UUuZW11Ttq0r3VtdPO3bkdROL91XWm75mnZXSaWia6O3oc8MI5YfdzllJPJycjPIGcjnHJ6mgjO4qSVByzfwknrj9Mk9cdjTudx3KCufvHGAPQE8ntjj+LnGMGRRGqkkE4z8qZwxz3HTjg4wSM9a63USd11td6O767W9X5eqQ3K1nZt2inbVO1l338721V9COPgq0hAjz/CfmOcbeB1HqcGpN0eZMdHIG5hkBvpxnHTOQcnGO1Hygsw7gDLDkADntn9OMcZzxGoZ5FjfaFY5HOMqD0JwQFOOTjP60nN9lstNdG7dLa62uvPbZi3u9E1q2na+qbsurXr59rRn5Su8fNk/kemB6DPI6e9NIV2kcAkoASzZC84wFPVs7c/l07zhACzEYGQgBy4YdeCOMce/Q4xSMqONo4TJVQuVO//AGh1OPqM469am6k9vh77rb/J6b7LuaRaunZp23V0le22+vfft0I4oynzlhtPUDh2I6AAk8c8Dvg8jvoxlwdoOCqqxLDuAfmz/PAPU4HSqiqiFFb5pVbb6bmONuOuM9xg4PfJ4nWTDRuS3DFSucH2AwOcHg9RjI+mdRu1276K2i0fu7rv1+a3sdUHFSUktb682qaVtUno9Ou1723RsafbyAh2wEPzADg5JyTz16HjI9fatQqYskoQWOVd/urnptIHU45B7jg+ufBcpHGuT+8Y8Y5UA+v93A9R649KtvOZvlfYNqLtwPl56dD8x9TgduK82XM5+9blbto9Xt3et2mttdT2E4RprkerSacpXu1yqzvtZtuy9Ni/AMJL8+C2C6jGCewXHfOcDHXOTjGOZ1WN2eTGCFIAXHABB65xgdDyRg889K2RcCCPc5HmAbFAG0kZPK9QScnj9CM1j3U5lMyhvmkUkBug7EAjHK5564GMeta0FJTlJRe667Ncrd10u0122d+pyYmpCUbNpNJenS1tFa9n+L6nNqNhV8hVwwJLZJzxtIxkA/w/jzxUuAGK5cupUhxynOef91eMnpz9QJAo3AHY7AfMW4V/R9vO7GCc8c9j92mvw2EIO7AzydrrgHHTaOQOc/liu93b20ly9de7S2089vQ82pNSVre9pfdaaWet3ff17pCpt3b87AMhgx+V2/2emAeuOcYxnB4VCGU4Kq2SwboQV6Ec5HBx16c1EQ7ELtBIwwU8cYJ5HH/AjnPTjrT0yhCugQFiCQcEdxjvjqM9DwOgGYaevTZXdt9LWV9f87O2pi1o2m29LWa2XZbvfbrtp0RQ4kGW3E5JUHGeDgenPrx68HoAFS5JO5TzwWUK3cMOR25x+B5pzEAh87l5yCcg9AMcZxnPIP0IB5R2RxtUYJOPkUAMc559ce2CfpV7213tHrfo2/v+/wDES1tpvZPS1ndJ3vfRa6W876DMgtl/mJJGQMjj09R749e3NKyNufykO0Df0J2r0YnI4OQB2OAB0ppE2VQIEdVwBgDA67iR19SQf5YqwrsWl3HcyIAcYxkjjaeBnB54OOn1auk029ezWmy1e9rr8/RW01qrOy1SfRuO6XRd+lu2pBCGBMm4CJlBxzzyRzkZGDj1z6+jnLxkFeZMBsqchBg5JOOCc88Ejgck1EFc5A6AkEdGPQ8nP9DnOM+sjE/ICNrDAO3j5D1BIzyOPUc0+ZJvR7RukkrJJf1di05m9GmtY9krJXe76pvbVL0jDOqly3DEKygli3ToTjge/Xj05lJVQq53HhgQ25Qx9R1J9TjrjpUYRHDbFcBTuO4Ejb3JOeucZPoO1NACyjJBXgEFjjPYqxzgY9v8RfRpWs1dWS6pa2+er6uyd9yrJ36bya2dtGk7XV1drr2eg5UdQXywU4BI65J9TyMZHAzznpUqFVYj5lbPEhXgFhkAHn69PrxmkBQKVbDHfkYJKknueADjPAIAIyB6Adw24hwy4HygcE9BnnnGOvXpxUt3T00Ts7eVm77b6/pdEN30adrJX6O1tOt9Hr/kIGZJDI/3tp3DcFU9ACpOQSeSRwM4BPFCsQmwbWbduRzzg/whPpnBPHXoSKVgXVW25JAyV6qc4BZfQ5GPqeRjBj+YbchIlB4UHYSBnPHJGTk5/Ssm7J72bstdrW11Xol+RaaavaKa5dVotFokv6ut7aMGEmFGOccqTyRzgqMEnnOT+OOM06PBdUcZ27h94EZ5IzwRgc5x1496VwmBnJ7bl+YsSTx156Hn2JxTAARtChMZwxJzu44J4yc9QBnGPTNEZXfu2ta2vXbW3V67b9LMV7xta107O1lq1rvf9UtG7ljcql2wyg/KQRkMRxkHHGfTB6YJ7iMytu2qWYD5vmXCKD1CtnofoO/GTyrM28uz7vlAK5GAV42qAPTGVJ5OeeCaNwd1RlUKANoHyhgSSFPJ4Pr9OuRW3MtNd3b8bX815/LclRS1avaOr6K3KtrJtrXr+A1WdTvbAXJGAQS2OhHTI9c9eSKeBhVVcluSxBzz27dgTz065pqouxvkBLMVR+oJHfnjPoR9cDHJtKSAnOGX5uMAgjBxzz0HH64pqV9L9H89vWyd9PXs2xvlvorPdKys7JWW+tr7X3XSzsYZXBAwxG7B6FTnJbPckfiMH1pxRmUnhiqlvvZC4ySWHfH8OOeuMcZiDKd0bZGCSCMNhT0B7jaeeM9eQO1hXiGY2UMFGCTkmTOMYHcg9gen1p2aSfS367L8/wDhikmrO22vXVaevn6K97leNdkgl3AAjDDO0yEnjAwfQnGeDg8dKmQxpuaIsQzsCclgCeoI45HqOnHoMNYuzByu5VLAgrgKpxx34xg54788UkYxIT5SeXncWBxv98nORnrgYOKTve6vbS6t3SV15Xs/vZumpLd3a3ulZXT7q7VlbXy6ayGIKjvHJ8x28dTnPLA8YUHtz/KrBYoE3OsjsoVmU5VSf4Se/qRzjHB5qKMo5faAj4yGZyBuHXbn0wf8gVG20AkqWZmGAWyepyy/3h74pNq/L17b6eb22/4YSitY819bNP4tUun4dFpuSs4UOSytGxHGMg884/o39M4idQxJQ5Veq9MZ64/vD0Bxx9cUqBdwManJIBGQwyOoPPQA9uOlWWVEBLdGOeOQOMAHGMHrx6Y9zWM9E7K8k1ZPbS34PVd2uu93C0Xa0mu+i0sr7Ldd0/nuiiqsXYlsgkcBgBuHZeoxg+mOfXpMkf8ArcqRkqSQQcgdMeg564xx0pGUb1wACDkZ4znoDwRz/n30IkYr1JZgN4AzwBzgdsADvyPXiueUura76NqzVvx1S63vpc2dnbS7el1203d+nZK1r9CKBH4AAClxsPcYPTp09fU9a3YZLiMhI3yVYMAox8ueme4I69CevHSo4IEGG2joABjK5PUnn5egx14x34rWjijVQeQCNrHOcHpgccnnn+dc88RFXvHms7a97rd790t+y3RpClePNa6e8Xo3trq7uyd3ZNX7aix3NwiZV2QAgYA6k9yOPlX+opJ7y6KOPNfJwoOPlIbqTycjjqO2e9XktlEeTEWVwNvfJzyx68H075HpT5rVTHIAEZvlwg9fQHHuMjBI5BrjnUpSt7ibbS1i0tGt9r9b3XSyWlzpdCErK3vLdW6aa23V/W1vOxxF3NKrMctuBOGLYAB5JYde/XPNVGncqwEucqEyepBzxjoOOMe/4Vv6pYkEbE3OVPyEnkdcEDkKfz46ckDnGi27mbCjOCNwBx3AyOF6dxnGBgCtqbjKMXypeaSvpyrpq9l5d99MZxUYv3V1V2rdUlb8U991uQE7nXaNx+U5IIB6/Uex4JPHGOkjCUMwJC5AYiP7q465PYnPIHpzjtGACSN2EHAOcHqcKDjjuD6n86kZyrruXG0/M24/MvpnHPbJ7++a9Ki7X00iuVWVna0bvTd6dPPY86qpaPd6aPVttJ219LeXmhioVywCjkDP8RJ6MDzkHHJ+nQ08iMx48zDnIJwSRk5yB/EB68cnPXAoZgm5kB2tnLKN+c4wuOOenP8AQVXdmCYC5UkBsH5wOcc5Py9Sf59a71JKOie+je/Szfpp033d2ZRTk09VqrbK1kkr+na/Wz3JCzSnYCFDKQAM8hTyxBG0swIOc80jISCwywG1t+CDt7lhjqvbrxUau3EedmwkliwwrdQcYyBn5SenHI6VZR2IcllY7CDxkv6BRx05JP6ZxWbtHp0/LS2y1vsvzNeWUdv11vbyta1n5PduxDGNpYKwVGwDjhlPfGemcjkntzjFaGUbjBLIQu4jO4cHCHocHvxz+FU4sBwHGWIIGQSmBjG4cZbngnHcjpxpRIeQiFQ6hQR1IGCzBscY47dPUGuSum1K/SKej6+7566adeopN2V73Stfbzv3aaW70shYow7gopDK38RG7ceijjjHQc9+9aVtbTtMo4AZt2DwEzjgHsee449eeJLe1XdG8isGA5ccBc/dDcZDnnjnJHPtuQWwIlYE4D4fr8q/7JHVu5PGOM9cV4uJquNGTTslFb73ulfzad01vtojswMHKd3q+aPK766JN3to02mtXp2Ps/4E6bF4u8LapoF8ILK60+1nnt9QD7CDHg4dhyWbI2tkYKnjFfqn+wz8HLzWlv8AWoPEOkSQaQCTZ384+1zzwFvOlVTyJYiUaPJJkycYwK/FP4ReMdX8NXl9DA4eDULZrRopF3GQuAEKLkEkjdkgjGMmv3B/4J42Emo6pepb6slnLe2U/lpfP/oKXxU4kni3LuVdxDtkFcgYNfzR4gxrU6+LaqKlTrSpVIKKU3zScVVdrXTfbZ36M/QMNUg6OHclOXs4yhNxdnK6SjZWtLZrW3k1Zo+Wv+Co0Op6nK8w1i2voLJYozKpVZWkgyGhYjOSvCjgZwK/DIyP5jFsEhgDnO4jJG4n0+p57jmv3G/4KOfDbxJoF54pe61O0ntkka4+z2EgmtpGlLGR2JI2xnGRweQRwTX4fuoiZkYgnkZI4U5wrEegPGc9/wA/13w3ahkNGhGaqKnJJPl5ZLmhBtSaS8u+226PjM0i/rCnJOLnzW5rq6SirJdtr66K/mNXIHmHIBzyB0PGQF4IwcYwD7Z72Oqrl8MULKWJwAOhb/a6Y/LjBqEAhlLE5IIwDkFz2AIHB7Z6Ypqu/JdQTgqwPVQMfgMA89a/U4L3d21o7La6XTbvo7LbpoeBWu7NXdm11V78tttHte77rZJCMpV8HAdsMCwyCT33Z4U4OPzNOQKfMQoXkUlwyH5B+OMLgk9Ae/HFNaQEuAW+QYDFeCH6BCfvYI5PHrTw7lGRfmMsZVmVR9wjq3Pykde4z25rqT0V1/Kvys/66/eZ23TTWzV90tG300162W/zrQ7I1SVnUFWbcrA5kPQDgk5x1/HrVgbSpcsUeQCSNxkYcDAyc445xxxjpUZUJGAc+Yq4C7QVbPTBJ6n6ce9ISdoHoMbFOAm3sgweTxu6f0rV6t+a0311Xf8ATRFO8mpW1tbyto0vPbV6J2svOMBgwJZdzklufmHGMDjHPJA6Y9wRVlSG3QF2KoCcqc+ZI3T1wOOc9PTpiHMY8uRgQGzhWOQxGBk4zkDnGQO/pU7BEDMoUsy/K0fIDDGQ2cYbrj1/Cpa1bvpypK999LabbJ2f3vZkuzSbvorJtaXbWifytbRadXqEe1kCjDSoxLIwPIPTvyyc44H3sfRHUtgbthILbGQnB7jrk8j730GO9MTIAZRtD/LuDEOzZPyA9vf1BzxUsksinKlS6DOFORsxggL1yO5zxmsr2nbo7WslsktdvN3tbT0FTjLnXLdu/q9d1sla6vbpbzBN+wNncoXlR/Ex9iM4BPAxzjv0pJMkALhQQAVI4LNwSw98A57nt0NPRoyuBjGMsxz98dMZHHPQ49z2qIs+9N+CMMdpGGKgAceoA+nt3qoybcrq1mr+elns/L899zq5Yt3Voy6929NP61VrdLEW1o5GdwFJOMLyDn0AHQ9T3+lKgz0BLE5H93PTGM8Ad88HrjjItFfnLICcruJxnBOeDwTxx6Y6DtmxZWZkZOCqkkmTdznOQCCOBz69eg5zSnOMU25Wsrt20S06dX02877nTh8NOtOMY3vKyvbo2ktE3bbfbuyTTdOeSRCy4UnJwTyoPJGQSBwBgYHB45Jr0jT7RIkVRGu5TkNkEkkd2xzxwBgDnjpWZp0SxKEK5IJQMcFcfw47fLg87lzn0zjeEm0ghsdjgHI6g9emT2968LFV3OSu9LXi0n5NXer16+isfpOSZbSw8FKUL1Fa6mk1q46paJNa2enrfUsrIIyqsOSeAeM55wG46Yx0zjHtXRaXfqAFbtuBB53AgfKPXHOTk8Y9K5Jj5rhDuYkAoegOeR83ODn8s/WtrTIHDKQAGVuCf7vXBJ/i549x9K8mr8N2ru9/K8rLV6bLXys+up9vgabVSMkpcqSXvaW22aTv5a9Eutz0iymSZ1bDGMjBC8NkdTu54GfQ8Z55rvdFcq0bK5YbwAGPzgccg+g9SPXHYV5hav5SqSzfOMnByV6AArwRuJGevTJBrqdOv1R4s5JDbcDJGDk4PQEDHHHUnB4rxMTBy5oq1nrqm9V0V+z/AAt3P0bKKsIuHPa6V1e+usbf9vKzt6vS+/1DoeqxiOICZUMeEkP8Tbs5443HjnGD36V6voty10QY3CKijYQdqso68fxMe5ycntivlzR7zcm5SwYYdC2cEnoQP4W4Pr9eRn2fw1qzA24up/KSCPO0DmRj0Q4PIPf044r5fFUWnKUbvm1176aO3nfd6X11R+t5NiIzUefsrW2t7q5t101baS/M+pfCwuPPBkjLQFlkRgQoOByVJzg8g7f4iBzkCvo3wtf2SQr5jvblQrm4LELuborccqSDgdDz6CvkfwrrzXjIjTpEsIb92PvFT/s5G4kD1znnp19/0C7nu7aNVIMDoVR3wAI1xlgODnnk55x07V41WEnKSt5NtWetk/u3S000P0nBU41KUbOL0XLJbdNtLq17/gfaXgbVFj+zAzYjlZEjCkOdpzuJHGR90qT90k+ox9deDrAT3Nk7O3lFlbaFILlsZbfnjaQOMYJxzX5/fD1VW8sJ3uhtSaPcM4XKkblXriNcjC5/HpX6J+ErywtVgle+V4PJjk8zh2LsBmPgghx09s98kDjUOSrBSfNFTi2rpPSSV9dXf0troePm9H2UVyK7knZpXTkrddru7dul9z73+GfgSTxFHZRRsLiKGFdzSsIlDKAGdnw28wEjHALFj9a9a8SfD+58OadNpt1bCS2uoWjkmlXeoWQExySKRkkY4UHn1GK4j9mjXtH/ALRsRe3YWxYl4mkbMSQ5GU5wTI59cbMAndmv0D+LFl4P1TwjcarDfWyfYbM/Z4pJRncVGZJCCMbgMBcMewIr944ao4PFZbz0nClKnS5pxlKN2+WN7K99dNbb+asfztxNmOJwmc0MHVo1qlDFVLKUYOUYSm4pKXKnazbu37qs/I/mZ/bF+CNvrdxdXOmWmJmgaFpfKEVtdTsrYQOMhxIBgDaoUjqc8fzEftG/DG88N6pq8U1lJb3MMszskYHkKG4PmHHGRj5v55Ff2wfGfTtN8RaDO8jRxSwxXZhiQCeQGHpNkbdr5IManOTnniv5hP20fCsdtf6hcxoz3N08kN1h/NMe3ftOCqB2YHnBBHAzivCxa9njE6ekea+jsnrG8t+/R6Oy6anzPFGRTlhp1XTk1y3Sja8bJNt3Vt2tb6Wsz+fTVLP7FdTrINqIzKBtPBYkkDk7iMfQZycdRz9yI3lBBLIMAgrkuxzksPRSeT2zkmvTfHdgbHV71oyGjEoBGAghLFh+8GTw2OeDjHcnjzV1ZmkYgbWbBCkcrxgqf7pxwW64Psa+ow1ZuMWk1aCXTXSPNp007aba2PwXEYRRqzpNJpTbV7XtpZOS13036WGRwtuJXAVSMMerbugx39wOOn46EUbbuRgn5gBwct1wM8ZI55P0qKNclQMgA4HcjpnjG446ZLepxmrkZLHGACOMAYJB757epwe2adWctXZWvq7bKy7Jrq7ra99jpwuGjCSk1ZySjfl0b0tfTslr36Ppp2SNuHOMjnuAR1z3zg5bqBXVWSMHU7SxAHOBwme56jvjg4HXrXMWQcEA4HXJ64PbBPOOmT7/AJdjYAFlDEkFTuOPlIx+WBnr2x7GvIxLab13j0skrWdtN9fX77M++yWgpcvdtNau/wBn5fLbV7HWaVG+EKqRGWzndk7upBXHTHXnkZFenaQBtDYJY/6wAZXf05HXjp6fka890+NUaMohUNgk5yScAEj046DtzXomkbw0aZCqckFTzs6jJweexPPHpjNfNY2b967fz3Sdr+u3RLofrOS4flUGlq3Ft221j9+2zvvruz0rR0dFVwNu0gfMvVCO47sOmc5zkj1Ppmmz7FUgKuSAWHBXsNoycHrj868v02XZsG4Pk4ZQcgEdG7Y4zgV21kZFVZEO4Mqsykgnuckf7PbHr35r5fFTUrpW8r73dtG93v669en6ZllNKyvZtqT06tRv911otvOx6bpphS4gmjYkhgzBgcGTHA3cgZOecY5xx39a0maeRU8vaiIhMqoRtaRgCQD/AHxjOM14pptzC6xOAyOoXcnUuB12+jDjBweuB3x6boV7GCrJKwRCzGIud25cbhICOG6ZGOw9Tjw6l5trXS29rJ+V1ey1t2ttc+6wEo0num21FNJ3WqXTyu9/K6Pb9HuVWGASGUrEVLdmG7gqB/d45HfAzXqegXa2wVJ23CErIkZBSRCxJUPy2STxjqcZOM14tpN/axyRO0yFDsMibt6N/skgDaw52g5244PIr1Szu7NIBN54gSXDIXO8FxjBfoWHseB16VdOheUZN9baPqrO109ddUn1t6n00Kzcbay+1f8ARWS1etr9/I978P6re291byvcosiMp8tzh1jYZUHJ+WQAYHQc5wSOfr/wN8QL+aCy0+SSKOJpI4oYyMNLJyTIQG5VRg+bkBsH5eK+BNJ1ezlh2+ZvmZlkV2feJXUZPJAwRxgZ9u4r0Lwp4ouItS06eaRUhiuwWSOUmaSOPgAHGNqbsugHORzxg+vhMRPCzjKMnyzadlvHbe+qXp83qeBmmXU8dTnzU4OpG7jdXvZLaz6WXy3d7n7r/BHVpJ4bYzqk8AEkjliBh125w+CRuyScZzgeua+hddvHu7GSIyMk8GWskZfMCROBu5AUA4C8k8ZBxivz9+AvxV0/TdN0yG5uUMTxgXdxM4JiyR5fyn73QggFcHqTgV9a3vxK0t9PuVtLqKOGe1dreS4VWVXcYSUgsCD1OASFGOOef1nLc2oVcEk6kfaRgrq9uZuMfPZtq2l0fz7nOS4uGZOcqD9n7RWdtFaUb22stNvTyPk34x2/2q4LzhJkSeQzRrguJUIxtI+6VyevXPOBXw/8WtKi1LS7GKwt0jvIiTfRBh5kcecOUjAIefGMvkZ544OfrX4n65pdla3c2oa1aWmd80ssdwqpLJLlleU84jbB7fKRgnkY/PXxn8dfBuhR3sUGoW2r6jdzNFGsDGea3RCQVHICJyCTk54PUV8fjKqeJlJygnUs7JqTVrW26L/g9Ln0OHwtSWFVCPvKKirOTW6imlzWvr6p39T8R/8Agor8NodIe21iwst16RCl/cSHHnOxJUGMJ1UZ2yFueePX8kUR4SY3Awc7lAzkt1PbOOcf/WFf0BftOaLqXxtSPT7BEma8ZPMnLBEghGfLU8H5ogzeZgg4IAPNfHi/8E8Ne1Kxlv7PxHZeZHu/0UyBpRjk4bjbg4AGCSO/Fezhc0wuGio1qvLKTUYWXRRjvZPTydkkrdz8A424FzbNM3q18twU59avLFxgpqSvdJa36PT/AC/MBkcMOpUEDB6DOcDHACjq3r0A9JljG07gwZunGQCBxgccr3wfpjrX6CXP7Avj61aZTcoSWzHu2shUdWVuvA5xg5yeRzXFa7+xz8SdHWUw20OoxwxqS9ucOxOcAjDckcHGQcAj0r0oZvg5OKVem72aUZaL4eulrX169nsj8+n4fcSwTlUy2slFvTllo9FtbbrdWXTpp8YvGw6SAjcAQMEnPUAdxkjIPHA5HOFU4B3A7wRtDJ1yTzx2PYkfTuK9O8RfCPxv4dmkTUdBvbcI7YcRs0eAcklwMfNzg4HccA1w0VvPaMIrq38siUqyyo27KkEAMejc9OR16V6EcbSmvclGbsneMk1Z29WrbX229D5nNMixOBm/bUa2Gnf3lUhKKbXKnpJX1ummmktNDPjQZwRhjkFcHB3DjJORnucdOOoOa04LEykEEK3GYzk4H949iCcnv+gNbEdlFdsskcZyCE2jjc4/2hwFJ7/Q1vWmjmMK7Ah2zweQqjp82OV5OTxyAMdayrY2nFJqVnpp8W34363387aHgOnO9m2ouF+Z31u0la19tN/XojlTpzJnYiuzAkHGAgHp1z37g8cVVksZ/vFTt+8V6dO2enqByDjnNerLo+9TmMkKuAzttw5AIVTg9TnI6DGPQ1nXOjSqpXbkN8hRuHVV6huD0zwR+PFY4fH8zs2m7rRtWtotLNK+mzt0XmU5OKaTk3a13fS9vu0vZej3seZFBtwYzjcMEDPPPQjB557fyxTSCM7SM9CWxg+2eOeenr+ddbdaP5RDgOpOQFwQG9QD2GOo9/rjnZIREWBRwQSAp+79e+MfTHXrzXrU6qdmnfm1fo2vyvq03/nzKeqTV2nqtNXddbevl53KOzBG5sjIyDywBPp2wOAfTsKKsnaCCCckYHqW7gkYHbjjrx3orvhUi43un849l3idCqqytFfh+Huvp+f38FdRxhmClQxwNj88c53HjngYwPzHXEmQqwcqVIyPm5UDoNo98Egn3966e6RHLK20uIvvgkKSOR8+PmPLZX/d57Dn7hdzDAO4YwCeuOpI5yT68HPHNc0NdL2lZejj7rS829X37dEcNJ8rS5nez5rrd3StddHvf779aQZ1cAptzyARgkgdMf3e5Of51KvllG8xj5x5Ugkg8jOScZxxjgfhxTGDplWUAqeSQNyk9BnP4468nNMBbflwVDAEN0LDHOOOcf16d60TvddFqtdr2Vte7d/JLzub8qeq0sldxvd2s130a3XVIRFIO7eMgEFtuC2f4eOp44HTjr3p5ffnkiQAZKjAxggFhnn69PbilIXBIJYkAjYdw4Jyz8ZHb29xUCkHHyALKSEkB+bI9SQfyx+uKd7vR/Dbd7aJP+rWf3Gi967tqrW0Xl0dr2XS19PJXd5ahdylRjhlcZZxnk4ORxjj8QRinjYishZQz8AKu4qeACOmGx+A6nGacqrtZSVMwYAOpwcnHPcZ6Y4HU/SoshXDlQSF/jGCSccZ6Z44BHToRTlLVWvpaTWuq09dtPn660lfdt236X1XfdbX62toravGDtYyhiRtO4YJz0KjPLLg9OBnB6mokZnYsdg3A4fABwCckgA/N+oJzgkimYDFSOWLEgA/ID2yOcLxz09MYzSKWjcFk5QsMEcZGOBz15OfqKpy3avp301ulbo+7+evYrkaT72TV0tFe6203fp5WRMNwfcxIAwcMPlPXGTnPJHII9MdKeCuWA3b1+YttygyOcfkMnucYxjlQqhSzNgPjndu5zxwfoB1HPSly5AUbQM7WzjoOp57jqCOgz9acZLzsrbrTorf53f4bYt+Vvs32/lem9/nu7aaaEQfbvyvlnCnIw5yfQ8Aj8cj6UxyM4yWQZzHjjceMD3PHsOOxqdA205Kk4GWOCWOeCvH3h2/PnkVDKW3ABhuABCYAyOSG3A9s/XORjrW8mnB29U/W1u/ZW0trfqVFpyun8ujScd7LT1WzfQjPmK6qoVclSCxLFW/2ifuM2Dk4OcD3Nd/4SWXzlaPaX8wAqG+VsgcZx8ue559MDjHBKpkmBbLKwVuemAecYOeo5PX17Y9Z8IWiTRlsmJsBehJLrjDMoPAGc9c857Yr5/MJpUZ2a87+dl6tLV6dnulp3UIOU4tpXvFppvTVPffZLR9O7vf2bw3bSMvmlAilT+4PGWBAyHyQAvB6fPnqMc99LabEGwZZQrM8gJHzg5QjjGSB6AY6VheGLFolxKMR7AySgna8fBjjc45JO70Pc131yimN9weUtGEbyl+7jujg/Oy59M9cD1/NcdX5cRo/dfKvdb0TS2ur6b7vbVs+rpVY+zh1VtbaXeiSs+9m/8Ah1fyTxFDNFFJtDGRt6OinKIQPkEZx1bnJwAeAR2PzbrcMbTXPm7AwDOsbj5c5O0t6svO0HHJB64I+nvF0caRN5UpVlAV/myxb+9IONzAc57c+vHzB4lkMV1MHQFS2QRwCoz145z1Ygc9K+t4am6icU0m1ppZytyvTz79dVr34cZUXJK10otaXel7a3skrtu23kcgmd3HUgsHON/XgEjGMYIHX2qQPJvDY4UgkkDI6rn0weuM9BjtxEx53ZBDIMKPmOeckg4xjIB+vTvRHkqFyCWyQSckk8DJ/wBkfKPTPcYr7SvBpXS2srXVl8N9NtLfO/ZnzdWV/Syjqr21XvLXsl66FkzuN3luA245xj5s8YB7AHk49enWo5mMiksDuYAFlwAAOhHJ55OTwTn1ByMAARjaR2BzycZ9MjnrjjOKYFI+ccKSSQCDknoCvUA4OBx0PbNYUo2Umlyy00S7b38una+r6nIrXvfbrZavRx1Xyto09n3IiX6bBjIOTjke2Tnnn1x/N6ZJCgruHTBAB46c9x+HfJJ6KCuSuTuOMAdD7k/l3444OeGn5TwoDjHGCDhj1PQ+/XsODWmju7dr2urPR9rW6Lut9TTV6Wt2dnu7a+npp+jsp5jbSd5xlGX92rAZ4IPKnOdvHucUjHAIGCBgk4z0zk9ARgEkg8gdeKbjDbunHJ688Yx6DIGSfyoDqZBz+7PLAHuM/d7cDPPp71V9Vvryryvpdelt79bDtfzsk9tG1bS9u2y19dx45JKn5SGxk8kDp7Hvk8A++KjA42hvm6DsQSB0/oeo5+pkKqAxQMoY5jZSM4z/ABHnk4+Y89MAcVAQ6yBskBgWUkkK3BHUcYGccD26mne3NZrZ/hypb39PPvtZw62fS6Tsn0ev3tO127aX6SgMJcMCTgMGbv8A3iGxwOuBjj17kbywQVQnIxhTkFh0wCOCSeT9cDvTEON67iCuXZj905xyv0xgZx9KRflcsxO0gkdgcgcccAkjp78HGalrRpJNK13vvy/rt6nXFqzstHq+q1Sfo9PKy8tDRs5UWRG5PyHMcjZBYkc8jt2/Q8VoCRmDhQApBOFXO0kg7uTkZx+PXpmufBkUllTbjO13HIzgghsg46n8z3ybSzukZDPknkledpHQDI75Ix34Oec1hKjeSnvtZaWT0t7vzbV1f1ZtGtJJKdrK65Ut9kuvp08urNCedljKs+SqgqxXOeoznPzDkkdBycd6yp5pGbcdm0DI+XY2D0LKCcluMnv6YOCyS5kd1UsuOFAYbQVbsB9B1B/So4yrAqcbtxfdk/e9h3APf8xg5rWEeW92+ZtS0V19ne2tra7bmFSq2ru/S2i20s5W32b76dBPvbNgy6A52gbSOeQD0A7n69McKCwck/LwCQD8oHGAuCcZ5yO3ryRT1ADZ5YjIYkYHfAGODz1GB2PplqFCXHJIwVwCQRjowyDhT35+nJNU3Z6d079b+nlrZPVd+/PzXe17JO9ubdrv2sraX3fkxmQIwx83UnuwJ4z2OPXPQ9DQ6FgoQDG1t2OScjkknBPc59Mds4ei5BGVDAjJztHA5AODng8e5I6k00f6zqw3DIVgAPlyCOTyPT1xkn1Ot73s+bWyvez28rb69Lbtk33avo23fVO6jbbpva/btqIOSFKhDjCgHBbPQ89F7t6Y61ErBTkBe6k8kZA6qeAA3H5VO2E3EEsfl3EAHahyM8kYb6dOM1EAC4IbKucFWGCR0zzwRnvkcjJFJWV183rez01187rfXa/V3HVPS6su77N3vql+OvZpDc7dzbt20AjceADnKgdTjtzj26Ye21htxtLBgzJlVzjCt3yffg56jFP2oNy4wSnylSShPXL91JHbkcCq7rkqCG4wFDEjnnJOcnbnHXp1oUVezbaTTT8ny2TT6Lzv3tuapJ3adrW/Jb7Lo/Xr5gBKKQBIysVODy2DyCM9PcAd6dtAYOw4PDbgC5z02nOSM9vXqehLYyEZGZSD1cAj52HVV4xjHTgg81NJtU7lLHkN8uGwpz98cfNx+A6elarV+8n2tfS2jeq/rffZJqzstb9V1TWqWlvmk0tNRPlKumSxcYUbsBSvQuOnGemepIBpr7dyLGpY4AweckDkEnGAe3X655pnI3ldu3gDdxJgnnJzgtj0HHB7ilVtshJPzAIRuJ2ng8r8o4HbPNaJrR2ul+Ksl+Glu9+gJO290rdk+bTe3rbdbboj5wzgBcjlCQzBuACvQqB3znmpS21UAYq6hd7L0Lc/IoGM9cDGe/pgK2xVLMSJDyCuTnn5gcnqc8HIA600qpbBIOOi7gBu6Ak45ODnpk465qHKyWl3r+nl3t9+i702tNG7NeerS69ei1303sT+YSX/AHjFHjXzMKC7+zYI5yBg8ccmmtGu0kFSSB/eLgc5JBPfjHt9BTfMdTlMZXAyPmbjIyWwOvHbrTPmj+fbyASxONpDd+vTGc9McZNZyd2n1t520s1a2z79u99TO2qaVtLdk3ZWfSzS279L9VCiRSoBCkEq33cDPBJ5+br9fUdpBht8YfCqFAY8HOcnaADk+2ccHmmKzhQPvDcQqFsgHAO7ZjnAPDZx7YFKfMKhQ5DgbSFI+VR0U4By69/T36iU9d7Wld6ddOmnktOmwOL7aXTV7O2q12vfppa/VNXQgdCQrgdSFBHDY7Nyc/l2ycU7C7C2AgYhRzuOF/iA44A6DjODimZMZAOVkBzub5gxYHB6DGRnDe3rmnIGAbY4wVwdvILdMZxjHOcAHPsBVq6XW+y3t9nR32/J6rsNwfn066N3W3RdN9X5a2RJOVG8oigk49T1CjHBP1Pr1pwLNtRd+TuVieQwP+AHXtnJ605gqKCgV2IwzBsop9CpAw54AyTiomXOSDkY3bWbBTODjb6HscjI/A1Sb6pq/k/Ls9NuunXyDkTemvr2Vnum+6/NaiKFLARxsHK4Yv8AdKjG/DE9enHH5DiYooV2H7sAgjnJyOGJ6cdML3x14quflRtpbkjax5C45II4H+I561MHX5l+YuVBAAJ35+82MjAx3x6D2qm7aPT/AIFlr1Xz0Xlcp6WfxXWtndq3K29ez6PZp+gsbBnUkloQSxOercD5xj9M8545yKCVTeqtno2XTdtz2BBGHGOPT8acyIG3R5CEcKful+/HbPrzj8c0LFI+4gkKc7lGQ4P8QBIBxk8YHIyKnmaStfWy16+Tvvf7vuNIWbUbttLRK7bd0unV/n5b1y+XMasdw5Vcc44PXJycc54P4AVMrDyy5YFlwORk89h9Ox9c47mrcdlcTEIkb7iB+8KsuT1wr7fmGBzhTjjA9Zl0q9jRQ1pOJC4BHlucqDnIGMNH6tke4zWM63I0pWTa8k/JdHt81fV3endh8Bi665qGHxFSG0pQoTnyyfK/eSWiaXXTbVLUzEH3QMA5+Uk4zn34BH1Hb87Ksxdo2YggBQAMK3f8vU5z0OMZzObOUCUyIVKuflCsu3GOASuApPy59Riq7tnghVIbAIBLZxgg+pI5Oc889axdRzTs2+z63+Wj/LzJlg8RRnyVqdSMrOcW4crabS2lvZrzab6W1sxxKxUkHJwMsMMTyRjrx+HA7Hdita3iIUHABGSH79fX1PGeD24zWTFIOPmDbRwQMMOuD14Ax06j3FXorngMG3DkYXHvgHn88E4yfYnGabsvxe1+/TXtZ3XRd7guTum1a2iaW9lvbvp2Nq2hwQMAKy5GTgHvzweT1z14PvjRIVQMAYxt6ZH/AOs8cY/Ose3vFG0OOo+XIzj8z0GMf/WNXxcKuHLE5AO0DJAAwcA+vbPGBmuKrTno9lpts2mlZp66Jb9OvQ6YqF042sk3rp1V+9+qel79WtC8hIEYJ2gDIAJ+XnIAByO4wR7dsCnB9rbpCBxng/Nk8gnAA5Jxxj+tVopo2cEkbNpOXOcdeCuBkjPXI7gdaSaWMbsyZXPK8gEDuB7jPH5A1i010a0W672/DX/MuMbqV3Zp73fw6beTV/xs+pW1OYMG67wCGfp3PGcfM3vgemK5qWONs5BIIIAyMkc8txg9sfj3JI0byQOxCtnJwADnA7lt3XjHfn61RYRjdvKjaDt4IJ9OD1I756c881rRvGKd+VXvZd1a6s+t++ljjqSU1LVpLq9F00v5vazW3mr5zRxhtoXPTjGFypyeT6g8cdO1Nmj24yv7t03hUy3HRt/A29OR1/LhrFwcKwwwO0sRt7nHuT2/HIpWkkXCIx3oAGCkEMCQTt45GMAkY9sGvSpTcWne60biu2i9Vrulp6annVHflu72euuq217Le13vv3IOoXBKxOCihSFZQP73B68jJ64PGBTSuS0ahsYDAtyGx1PT6+vJ6809tz7SFDclXA4PHUY7cY5yPyFR7tinY5+Q7cFdxIJ9QR1AweOM9Dmu+Lum1ZdGna7b7P03fbt1iKunrre68nfV/wBK34h5PyvIFLDapIOA2eQWPJ46ZXv6gDNKS21gBwcZwfugckH+nXinl3AJBwwADKML8vTaQSR25Oep56U3GclFUgNkEtlip647lR+fPGRxRNppt7Ky100TV++mj/P123avs38tLX9LaW/DYfHhwuQfmO1WHOOflx0J5BJ9Rxjity1aPeoklDbFB2kFVyB8oAyck85GRnAycYAw1A+UqMM7gHaTgepx2K88jOMnAOcVpW0iAeXxkMGEgGeM85bjdk9cDvz3rnhHnhO9007vys9t72fbSxM0te6bS7q22vb5/jY662jBZWUEo6ktGQd3XBbrnHp6c499VHgRIwG5X/WAcxgnnYRxl/cj61gWlyAMiRHKL5ZA6jgcx56MeOvpg5zWtG3mKi+WSsm4xMwAAVeTz3bnoccDrg185i48sJqS0vd3ST19dHbR7vyPTwOrj8Ka3to3e2vn8/K256v4RtV1K5tBbmONklXeygLIW7SEk4IHQk4HrzX6y/sXR3em+NVge4vIVNtKWeGZhFFIyAody8NuIORjjg8ZNfkj4FeG3vbMCQMjz4mETEnGQGyvJA/uqeRzjrx+nnwI1eXQ/EOjPomqRxSSSJFH5r4V2nAVxMOr7sAKDgrgjocV/O/HE5fWnS0jGpFpKUG7NTUr3e1trW+W9/tsvSlCN730badk1tazS3u3p6a6Gh+28up6/ovi+Wa/SNrFt8NwAXjurePcLiAXJJzIAU4KDknHAr8LLgxm4kA5UOQOcMV3dOpGM9uwwO9fuH+3FaeIvBfhTVmuYzb22rWqTz2oIe2mS7BYXLIVzA0hBOMsD+NfhrHLGzvIwbe7FkAOUALknJ5zgkH6cjJ6fqnh0nLJlVTThKdOKaa1/dx5l0fp/SPm88pqnVprVv3mnfo+WzSd9En8tem8jq2N5Qg9VUHG0dMsBnDHqSSCMjNQhdvzAH33H5NxJOG9fy57cVYds8gkNtG7+IAjj5vfAHPYDpURByQTtB+fOfvk424BH1wfqMdcfqtLbb7La1eluVff5vbc+Uk7yl2vbXy1+7a3ZryGyKrjBPDf3OAHHJIHdT0znHHQYpv3FcrLhgBgDglfQg5wFz90ehNJvO4bwFQHoThgeMH0wc9+wqaPYQ0ZAMinKuT1/wBoDHbOAenf2rrg/dV11jLTytv/AEuiu7GTbWttFbS3TTq1ZNaXS26a3EdkAUqzFggwY/mUuc5U8jkcYPbHQ5phRCFJG5nJ2jOApJxh8Z79v1ohO7ZuZSrFsHOGyDjYeoBHr37dhUmxh5rgsF4b1bOeWI9fb9e9Xe72vbXe+vupJfq9b6tbjTV+XZ9lfo0uq6aJ666uwijYxUhABwrZyx4+6jdFBA44P6mkjO6QBNrZPALcjaPmz7gdTnP4U1zlhHl8qUCseFwc5JJz17Ht6cZLB8hxtCurcksd0gJ6jjCqMcD8umaUtdfJJW2vp002V9Hr26Dcb2W99Fto9PSy01S7kxXOQA0Ue8HYfmJfBLFRgEbumM9uvFKjxlTlmbqVIBByOoI5ywPcZz/JzmRQjoQ0u3c7o3y7WwMYxyQOjZO3jjsUQ5OxVU9SSe2CM89CexPX15rOTWj3s0r9tFfs3p016+d9EnFx02tqmraWTVmm2tbarTzaFk2vhEZsFgXJBwGYHqe/T+vFKEYfey7IQdwxyvcgep4yBj8qSQtvwg2q2Ayk8KQDkY7D8D0z0FT2rb2Kkc8jOSS2O3T1IOPX04xDldJpXWl0mrtaa28raP8AK510afPUjGz6bfhfp6Lpt3LttGsjA5OzHKkAYPGPTofrj1yK6GK3QDcRnaMgEcH1JPYdDjvye1ZceFRFG3qeARwSTk/j/XHXIq+k5CgbucAbTwMDIyOeT747AHNcWIk5JKDaSlql1tbztrva92tNVt9dlmGhG14rmkopuS1srbXvZtvdJ7JbOxrRk4ULgemeAM45HPHA65PGPxvwl2JDOcHAGfU8fLwfx6dueKzICSVbPzYBGecg84APfHPOfWtyBBtUtnDc5GMjPIOOM4Pf17d68evPWTbTta2l9VbTst10tuj7rBUkkk9Ektr6W5b22T3XZeRehjIIBJDqQV4wCCTgA9z1Jbg5HTrXQW0qwkgFHckMAeQew47E85HsB3rBRjtAGRgdMZP4/wCPuDg1ajL7sFTkEANjGDjnDemOnT2HOTwzaldtpWsu3Vaq9rq9r6316vb6XC03BL3bppa2u72VrvWz1T++zep19vN5hUhyCwxkAZU+jd+MEAAHvnkcdNYxtujkADBT8yv0YZHOD/F/dz1+nI4/TnCYy4RlJYFiBuHBzk8Ejg5966m3vVAVVdZOu7YMkYABAOenTdwf6V5dfS73WvR9e/4Wte/ft9bl+8G9003bRO1rdPm+t/Kx6ppV6jCNPMCbfvqFOQMjkAZJJPp6cc16lpN3bOqO8z7VACqF5cgYAI7Dnrn8RXgNhdP8rRjMhHTg4yRzkdG9scfofStGvJphEuCGUbSP7xz94c4A7Z59Mevz+Khbf7769LO33tdtkfp2R13Jxi+rV7vVfCk49N1qk9btWtqfR/hjUYXeNpspHuwnlkrJx9zPGeucg8ntg19JeGtetntoYDK8YVVDsvRnHG3pwDk7j9c56V8g6DdJII41VmdMMzDgrjO7cP0GOenSvadI1IqioGES7dvzDaduVzhwD8x67up6fXw8RFcrlbe1/O1o32vtd9bP7j9eyf4IJprSKXZ2UHeOjs79Pnc+0/Bmt+QYAlzFFAXVZy8gZgGPzAuTyxwCDgdO/Wvq/wAL+OJLbZCNQUJFLD5IPAYDOCnOJZDk44Xp7jH5kx+JUsUhit5opriXauxHJQKMY3ngBxu4yOCPfB9N0LxzdvNbxzbUW22lcDDTlwChDg5AGCQ+OmeK+cxVaVKaUY8zbS02uuV2TS6tv8LapI+peW08TS1cdFLeN2r21ld76PV/fY/ajwN8Y/7AktbuK6aEpEknmufkM7cFHQcFnx8xBHQYHQj3K+/aF17XdHmtJtcaTT2lA+xjIjReMyq4YHyxkY4x24wa/IHw34ynljtXmu3lt5IljVHG5S6gbmTDDLLkYfv2FeyL4+t7DThGbhGmMbZcvlChxt4zwzYORzyoJr6XJs3rULKU5QhJKyjN2W3uu3TvfyXY+Dx/CeEnUjOdGnUq+0S5nFPlTkpJq6vGSaflr8n9peIfG/m6PO0q4smUq7QOPtF2HDARxDkgSkHdJnPyjivxC/bWMeqreiOzgVt7vE2zDoAGLrG4PLsMEN6rzivtnWviE0mliOK/WOaaMO8asGbPzDHJA3KCMNgdTx0z+cH7TevXE+iec7brqViYnEuUeIhgCAQCkj8licnjnivpqGY/W69OK+K2zas7KL1d+ur1trbufCcWcOxw+V4mclZWm+WOjtpe6aaXdW89z8Mfina20WqXEm5zkMW3YCy8lQsgBONhB2rznJ5rxVR8rj7zEEDdnaOy4wOo+pIJ5zzj1P4j3cc2talGsgljWVvkQgqMNyo54Cnlvr+fmAQlseXtUgFArYOOxIxyPfuOoyK+7wfN7JOctlaztdaK/Lq72079vT+O80w0XjKrhThFRm9lq9V2W71bdltpsOVMHAdS2BnIPBPpjI46Z/DGasRIqkjBDkgZHAIwc/MMn0xznsKYFG8bgFwp74OQODk9Rz6ZPHPerCH5l2nAHfrn27k4x16Y7EnjWT8uiTv127vr06aIwoYdKUbJa2stU91pZ27PpfXfdmraIcjjjOBkA8emT19c4/GutsAw2AHAGAMcgknkY68EDnooI965a3LAJ1B+9npgnjjjrnocdM9K6myDjyyOSM9TzyBkJxwR1H1ryMS372z1fTVW2/p9+p9vk9OKcVHrZtW/wp2031/HyZ3Omn94jsTt5cpn7xAw3B6c4wD3J9SK7uwYR7ZASQckMedqnrjplj1HHJ9Otef2EoBXkFuhAOQcgcY6c7fXHHNdrYTLtRfMJYLgAdFJzgkZ6cc8YxjFfM4tt3er3Tvqla2jWnay0XX0f6rlEeWCaV2oqyfe0PJPvpb0O80+SRFLKADj72T8pYjAOOT9c9z9a7uwuwFjT5mYLh1HO9myeOg2jHXPHHrkeb2TxGPLzKXUjIBGCvPVRnB6deRjpzz01rdRARv8zBgAzb8MoB+6Rgj5uvB5xivn8TTc72T1tbpdrzTv/S32Pv8AAy+D3XGSSd9Gul2no9dOnpeyPWNJuSqwlWEpAaIKVwQWztY5IO8EH8xnvjvdMvhDhjFskGIwznJcsf3hU9iRjBwceuc14hbXxz5iMEkJYqC2MrwCy4xyB0z78V2dlqJjWAOZHl+8eSUYuc4H+7gEfXnOa8x0Jc1217ve929FZO1no9T6jD1bQi9Y20lJu2vuO+3Xy9Lnvek35G5ImVUJX753CQgno/HKngAcnJHtXpthfTvDEXAlAQKYYxkIR/GEz0PXOegz2r530jWY9ixM0QAImJLAOh7jt04yvGeucivQbDXnWPcJl8tE3Dja5UDAQNz82eQMHOPStNLPRRd9eq15de3bTpZHt4fEy5UlKKTtZvVtaLmV/V2beup9C6Tq8LG3hcFycBGjYZhbJ+aQADAGfvZ4Hua9R0Oe0AkeWVIxAm9pS4KjP+scPnBduAOAcde1fH9j4ojtAJCq7wSVYk7SJMcyepIGS3YjPQmszxH8ZLTQYpSNQjLzq6CGF9wTbjKxjP8ArH7k8gAcUczlJKMObmSSUdXfRWaS+7Ta+tr3rF4vDYen7SrW5LJuWqbe17K9k+uzP0c0z4v2Hh2y2T30NtFFI0duDMBcSNGchd3JCJ1Q9wT3IrnPHn7cMWlRx2+l6xPfXMPli2jKELHKoYfZ9wfBTnPTnqMd/wAfNY+Muu6+wjhl+yWpmLxyKSJmiY9N2fl6cEj5sngYFYy6pJf3KPdSPNLFtEYYE4U8l1BJwVA5PPWvVwtPEqPvNwi1FNJtaLltazbe6vfW9/O/53muc4TENvD04zlooTmkkrWbldO7vfbbc+2vF/7Q/wARPHL3NvLrd6LS6mZnKuUSOCU4SOOHJ2KMMMbjjHYGvOlvriydZ1Mk827ZLPKS6srY3y8nh+fcD1NeXaTqiQKBJJkhWXchGQTt2k46jjnGPwrpYNTnupI4T8qyjAB6Nzlm3854PA4Gcg9M10X9k2resnd3fu6Xsn8tLX6nLhnHFqLSTacX1iubRvbRdLPVXPcND1mKKJllVyZwJIyDhncgD738KjJwcevc17T4WcXEESQXTLhg9xsbaxUct8pzznAA6enHX5c0pHTyGbc6sCFJbJCqcsA3Yc5AI65yfX2Tw7qZs3ja3V/MJA80EmLoNpCng98jOBjB3HNcVTEct18TVk29OVpLbW9ktei7J6n0WDySNVqU48rbTbuk2lZWk7PdK3prpex7jNGbqaVzHtlG0HcmQU93yBmQZ6AHpzTrLw9azTiO5tDgbpgNoGwHBVCxBzuP3UPDEdetLpWuxyKkUgUsqofNcBUEoGQrMRyvcjJAHU8V6FpslveFLhVkNxJhjHs3Ru0eMs2MZjwxKMSBycjPTy6mYTpO0brVL3fJxu33su34Xd/c/wBWcM4WdPRpJ3UORv3Ur3Vm35btJ7nE6n8F/CXiWwuPt9hZzNPDtmeWBHaV3GAZB/ARg45OM9cHj4p+K/7AuharBe3vh60FpNJ5txDHGQbeRQMsGwAVZSRt64yc+lfqlpGk/aLMgxPHHc7WZkPztIpPAfPQA8qAC3qK7VfD0U1qIwdxVY4nD4LYIbegGM4YY3HnOM+x78PmWKhGTp1JU/d+KLlfXktdu60s3s0rs/OOMeAsszKjKlXwlObfNyzjCLs4pJ9NlupJrva+/wDJ38QPgN4y+Gt7PBfabdw2EMsipMiE25DEZ3S7f9ldpwRjOAAa4nSruNQ1vPCxk8zH3fubMcMTgAsTwO4BPFf1S/En4FeHvH+j3On3umQNm3AJljVNyhW3HftJbkjIHJ45GBX4o/tD/sj6l8P7q91bw1ZPNp4YSGNR8ibizKroATHgg7GDN1OQeK9/AZv9aSpV5xhUTioyk1abXKm7rvrv10fY/lvivwuxuU+1xeXJ1sKm6nJKPNV5WkpRp2i1KMGtnZ7t72PiiSRWAMOxoziVs5JUk459+ORnAwOOantraG4CM6KQ7FvnYBjxzs4P3eBj/a9sVztzNf2U93a3cElvcI7K8anCx7CAVZyB972Aye9Oj1Z4XXyyjOgWTYWwyjoVUd+OrEjPtivpcPSdk1aSaTTjJNNO1/eXyv8AefhuIozhVlTkpwSlyyhKPK1ONr3Ts7t79NGttTS1HSYJPN2qQU5Ax8ue65Hr1646CvMNV00W7ZTLDBViwOFJzjPPbHp6e9eqNqCTKFLAs6qSvBALH5l5P0yxwDxwBk1zeqQwS7zgkYA2NxkHOfmyAQCDx/OvWo1HB6rS6Vmv8K36X38nr3ZjKMUlJJrR6STT3i7LXTW2nR+tn4/JG0b7mTacHnoN3boD8p68AYNFdDe2iF921lLAhS3G5QcEHsecAHb6iivWhJSjFuNnZaJq2y8n+noc08XT5tY62V728v5l6+XS7TbPMtRErKUCKqMhaMqADlT8xI5OWyO/POMYrm2ZAyB/mwGbGduW7A85UdfUfjit3VZ/mIWQSFlC7QPlXOcqOhBxjnkDjB5GObcM7KdpOMgA9MADncD0B5H5jvWlBLlT721fR6JrXdK3W36DpRWr1SsnppdpK7be+r66LfzGyn5yVyVY5YsM5fv1xnHY9+uagZCyhscAkhuo7dR6fXqCcEHmpXLAsWBDDOF6ID/CFznAHYc8Dr1pBjZlsFQ2DggPg88cducE5xyDW6te2ujb0/7dte36O+1t0bq8VFrV6Xe7eyv5vbV6PUhWN03udpBAyo6nJwAAP4eTn0GOvNPVNqlf4t2VYHdhscKoGAMEkZHA54zVglGQjb+84A2knDfNtJGcEjkn0J/CoGLhkGQCSyuW4GBnKr0PPOe4/Okr+82rP59JLR33s/z89dIuUk3onfW6tpFKztrdbpu2t9hiCRiRlV3AHceDx155HQenXr3NXRah4nLbmZTmNgp+dRyS3bHoOcgVWtinmqsgJy5zgnAXtjnDDjjoN3cV09qI9quThZP3TFgCNpz93p8xAyB9eoFZzmo9bOyu/PRbapXbb9NO1tkknqmtU9FZtK2ie9ntorX+8wI7AjY3yMrAlmxtZ/XI5wwHA5/HHSc6ad7lFYeUMfd3Kf7xJ3Dnpx26+tdhBZI0hlQb8FY90gJjUNkHaexfAwexB6456Kz0bex8xGHmAkoOV4xtJYD7pz83HpnrWU6ri1Zp9OmrTV9tVdX/AOAr2pqT03Wlt1aL9OvotLdkeUnS7hYt3lEhm3oo5kwDzk4GFORhcEtz6Gop7Z4mAwGQgL1JCnHIbgbPod2Oe1e8R+HoRGXVYmZysTN/rQ82Thl6ZYZPPAPOBxWRqHhLbvkELtIyEKgTbuQgbsYJzk4/ungY9aUcWlJRkrJauy62S31Vlr59bkuEr81vdWtmlpe2m/fro/yXjKnchQAxYJBKjJwM4A+boegxnHv3CxEgWSNCVAbC8k46H1Oc5I9+R2rvZPCohXmOZQFZgCpAcHoG5OCnOFxzknjknOfQpNzKgLSI3ygKSp/vAnPQYHGPbmuh4qm0ul0+mi2V/Nequ+yvrUaS5trpuL7auzvfTS3TXdWt05qGOQypIgJMpJI/i5IJ4znJ559u/Ne5eBrVyUdvLEYJd1wSyqduWHuDj5e+e2DXmMWjSxTrKxZAG48wbRlsbVwchSefl/XivbfAyi2UIq75ArZTIZDjG0BjgNzkkDuMHjmvns3rR9g1CUZWv6pOyW2j1b1v5WR1UG4NJJebsu6totXa1vme76NGphjWNdvllJpCmP3jKD8pGDtyM7R0Azg9K0CJQjMnlwB1kMceAwicYwwbI+cZ+c4xkhfcWdCgR7FzKq72XKYG1S2csXI7JkbepOTyBxVHUSkTHa4zGcbmO10XOMMMkOzY46ZAJOMcfnFf95Oa+1F6Nq76a2b1ts7Ju21mewpNJWvaSStdJXVmtE1orfC7W62PKfFyNH55fBYr5jAA7wvaRcZzu54wPWvl/wAVTF5tkTKy5AZ8ElpOf9Zk8n8R0+lfSnji9EIkMTu5IRXd+iswPynrz6EEjHPXFfLeuGSWadmAA80liOcAjqGGNoHYjJJJ6EYr7nhOk06c5KzhbdWTdoq1ntfs76W01OPEy0kk2ldea6aNXtZa2tdaWt1OcQMGyTxlg2D1BPt2BHTA9AatqGR8dD1U+59B2Hv2z071GgVHO8YUgkljncM8e46f4dwJpABllOFKgqV5w3OSw/vevrgehr7qtPRqy1u3t3jfz6d7+SsmePVavbV3jvpq7rtd3V9ra2dtEmIHO4sX+bHMZOFBHGVzwRjkjg/kBUsW2QyICQ2CwbPyuV56Y6jPA/UEVTUMJFZkBz34Gfqcnjjp7Y7ZqdS4DFl4I2jIwRgnnHUA5H179q56cfemm+ZWTW2jdk9ra6ee33Yyjo2nuo2ael01oltZLo90vIWMspDblJAOcjGeccY/iHPOf16SOyMh+b946j5lGdz84LdCTz1xx+dRZUkgE7lwMHoAckhugODjkj14ApAgVmYZ3EnIxjae2PqeSOuR7A0Pur6W6bLz9enS1u1id3d30s7WtfVSs9Nuvyt00jZGbb3AOXxnkA5xjgY9z+VJ5Z8vIVVAbGeGkOe4HpxjqeMipBwmHGWVskKeWyQVG3Axt9SSO9SqOJGUKSEON+AR2+XH8QzkE9OcA9i7tba2ut73029Xrb/K5fM4pbfFbtfb5WaaW2it20iVgHy8mVzxGPlG44OBgsQM+uccc5pu8JuALsuej/cJPUKMnoc8nqfemiMqw+Us2AzFgBlWP3icsOM8nqe+DQUILZLMcgeXnKjrntkZ6YHQYq1FrRO6cY3W+9pW379fPrdlWSe71Svay1uui29Xo097vVfMUI+FGGO1sfe47LkdORk8/Q93b1AK7QcgBUPJyenygYJ9yfrycU1lGdmBHgqFO3pnOS3uc8nnpwOTTWTYWcAEfeDK2SF5z/u56kc46dKbgnfS11azXVWtf0stLGkZaNa2lay891o/08/kbmYONwUZ5B4xg9D1yV9c4/qgdnkVJF25APHCsuDj1HY5PXp2qNMI288oc4HfI6Bhk5Hcdjz35p4wEcMzfMQQwG5euQOCMEdOvH1OKmz1bstFZ30a0vp9y2ttYbd7X1tFW38r972s09tnsxNgI3gHGcbSevq2c9PyyehpdpYhcCM/w54BHcHqOuOSf/rKu4/JuKgghdpwwI5BxzkdyO/TPGacDlApZcq2AT82SpyB6j1HPXOM9Kd+VJJNLz2W2+7X+fRkttK/+bsrJXfz006DsH55HJRm2gLgMzHoO3APcdvX0cETyyFCktja27ARx1JxyGAxgHpz1pJZDyQD5gYbiAAGBHbkkkY5P0qL5/LJAC9N+37wXPUjqDnqev0qWrbXs7a94u10vPd9PuISbtd2V4rystt9fS736Lcc0eAqA4Y4dc59+QR2Pv0/m8sm1S3EisAVHJJJOGUcYGOw/PtUpAkCKofIGDJgDoBgrk5A9PTBqAK6h2RC20ffIwQo4Ykdjgjkk44+lD00l01TS13Wi+d+l1uuw0nK173vfonrZbq1l0s9fPvYaVQoRWVXZAnyDkkf3x3Izzj1+tQZZWUjHAUMWXJZuO/U9OvHvmoo4iXUhBtLccjnjjPHbv0yMc55qyIW+cnciBsk5yVX+IBR29Rn/wCumrPVvVaXV3e68+i01t5bGkaaV7ST011i3eyWlrNpdL+mwqpKWl3uC0ihThRgL2br90d8EHoeD0e8O2NhtD7RuMiD5WAHI9Rjnp6HjniaCMs7EMPKwOT1bOckDnk45GeOlXY4m3u2SUCjC9M+rdOc+hIycenMuVne6Wi7PotbW01002Nowum0ox6WVk7+6uuln079ErHPhSse1R3wrEE+uBuIBP07c9anMhjUsyDzNu3CrkEHoD+RJ9Op5rSmVBGY1AOWBZVAzuUk4B7Hpz6/nVOVVY7h8rcBQudwKnABxgEccpz9Tg1UZ7vdPRu2ia89enXr+dOndK6Wm7d47pbO776abrvZlBjyrkHcM5D8hs9PquM89/yw8lShGFVxiUFRncBnoc9Oc55wM4qWVDwTy3BYqcAZ6KRjknjmogdhDMBleAp52jgBT6dSSDnvk81smrWWqun3d9NbvVW38iGkumr6aKyTVltprq+rT07Ea+YMblXGD0wd6Ng4Levf26HGacYQ/bbuYFSx2sM9Gz3UgdMdR2HFSuNhXZyHAZg3KZ6mMg9BnHOQcCmuxYgjAAyQFXCg8Dle7DqCe+OPWOZP7Vrb3Vuiem/XS2t9SfO6Vkr2TSSdnpfr301vbsBQiRiTv6BiGKhdo6KcfKTwSO2OoPIaNrMyFQScgMvRjgFSwzgIvOfXOfXEhO4szO5cKAodeuQeCBg49Dnjp1owvCphRjD7gdoIxk4XueMYIwKhtbyV9bpvp20aV/nbTRWKi0/s3eyTb/utvRd7WGRoqxOhdTMGB3Lld6k8lV/hbsCCffJFMAVHDBgTuJwc9OOH7E+h9R06U8nEioPmZmwARj5QOfpjtycdRns9EDo42upViUyOJQpOSCcEkdl4ye/rLmtdm+rW7aatp12S29S3Lmdo2b3tJrlaaV7Xa06LTe9n0GswL4CkMD8+AAJM5HJyRnjg8Y/GkiQ+Uw4DpyCVIDP2PJPzAZ56ZH0pzHcCScFSFBxhg3Qkjnj1z14xjinMpygR9wJBZzxkjnDei9O5II5NNTm17sbrby03s0lp21T8tDPmWkbNxdk9L6rRpLW9nfr9+l4VyhPmNk/xAj5SSQMjJxu7jjtTnVQjjzkyORtGd+eu4ZGGyBkZ4HXmntBdOwWOCeZ5CAqrGzZ9AoxgDPHtjOOhr0Dwr8HPiJ4xuIbfR/DWqObjkSSW8iKiN/y0wRnYMgFuMZGMd8KuIVFXrVY0Y2u3OailZK7tq3pa9rPpruduDyvMMdUUcFg8ViZNqLjSoznbZK/KrRSvu7eV0tfM1d2VVCswLkkKcA8dhg/mM88YrY0/Tb6/uI4rO3ubiWXgeXCzbecMdw6HkDp1+lfo58JP+CcPxN8WvBdaraSw/Z5le4tMNzbk5WZsLhY2AOWyc9McAD9pP2df+CYPhrSU0q91nRIr24fypWikQCKGU4JUMynz4m5PIXBPI9PnsfxdluCT9nJYyqtHGlLaSSSu7bN2v010W5+scM+CfE+eSjWx1CeV4Sym5VYtVakZcrcYqzSbTsm/O/Q/nY+Gv7J/xX+ITI1j4av4rV2/czuj7GiYj9+Ds5Rc5OAMcD6/pZ8Gv+CTHifWLvTpvEyb45Ns80kjlLV2faY4vLKkHcA4wWHK9K/qY+E37I3hbwvHE9ro+mWFvHbskLNZoZnfAE6h8fI/CCOMg4G47gM19CWvwu0TShJaxaZAVgZJPKVRFEOu4OQGPP8ADgcHPTNfF4zjfM8Rzwo01g1ZpJNc7i0knzNvW2qfd7K1l+25F4McLZbKEMRTljcRBxk5Vkm94t8y5btOWi2dn1ufgR4U/wCCP3w/Vlu721t7uyt4oisITZLDdDmWSGXkrFuClk2EN0GADXs8f/BLT4P2Fvvk8L2l7PIqRSXIUSG3fJBDAqBIQPvD5cnHWv29XwppGlWLXUAuGtgy+bArFgnqisBnuMnGOO1YU2h/b5pGsGa3tJkO+1mPEchAy5X+JjjqCCPxNfF43O80qybq47EckdI2qTTvpezjLR3d9NP0/Wcr4UyLDpqhl2GpRp+628PDkv7tmrJq6S3kn16aP8Avip/wSZ+DuoaXexaPp0Wn3t3EGgaJdio5HDiIfdJOcoG4PQkV+H37UP8AwTa+JHwZmv8AWdCtp9X8PxRm5SFVZ544zuJ4AztUDI5bA+or+27xdpstl5sWoW6bo8LExIVEjGQkgbDdck9Bj0zmvmb4i+FdM8d6fPpmoxwXNvDA0UiSIpSRSCCASORg5wSPaqyvifM8BWpznjK1WlpzQqN1HJe6tXK/vdOZa7+p5XEvhXknEOBqSll9FVrt0q+GiqM0202704xTd1d6e8n5n+fzd6e2l3E9pdmSK5jYxypIpUxsDtZWU8rtIAxjB9fWKNjCQAcrtGCM4AxwR1x6Z/XNfst/wUc/Yal8BXd38TfAelSNpt3tu761jjO1UYsZXSIDbEqlRg5Oe2K/GWPMJdJtwcZQoB8yNyNjZ9CORz75r9yybM8PmuCpYqhP2jkoxqQk7ThUaXNFrrZvR3ffXQ/iXi/hbH8J5tVy3HUnSTcpYStPmcK1JtJR5npzrRPRNWvorF5X+VS24EjIx6dSByOAOhxz/KxHcqhIDkgLnb6t1J9gO2PXg4rP3sQMg5XjIHOTwM8jjn6cjJ6UA4bzAAGIxycDHoeTgDOMe/Iwc17MaSkldXsra7Xum726/wCXkfI+1ktVpyuz1VnZpuzV1ZO+zNX7WOdrjOCQuOd3ABABxj16d+1D3bvGw8xQrAbcjDD6HOQRzx349KyepkO7HzcsT0J4+X24z19ODik3PkxnCpjGRxwMEEYzge/ftXHWpRSai7Nei0sm7X2dtX5LTcU68ns3ZxV9ddbavbToumnd3JxIoKEnKkMSS2APqvUYxzkj6gACmSS8M5bfG27ciqM84+ZfXB68ZOevrC2CclSQTsUY+Xd2weDg4Hbng1GWZcDbtYghQenfj2BHXOQPXtU+zTs0mm2unp2v5JNJeu5xTq3vbZtPZdlfV7Ws+m3QCyny4xktwBhgQWyep9u57d6TPJZEBKklmzzuGM/N6DIzxjoe1R7cFQFAYnjnv1x7L1x3/lT8KrFiSCucgjKlm2jjnjPOR9Oua6oxaTtrdXd9XstNNfTbb0vi2tnrdPfu2l0v0stXp5ojjV1YSsBhSCQvDMGzjGOOD97jgDORVltnIVRvbBHl4bD89R6juecE96auFLYZixKkDjauB169cdc8D8c1EA0bBmAEh4475ycg5PPQEDsPUV0873SvaMbq76WVkr20v57X1e6vzNt2VkmrbO9raPezW/RX7DFXawkZcHkNleCT0Gc85wM5qwpBUjK+ZjeDjOGXOMnIAIBIIx7dagI5yN7S7icA7gCTx0xz1zxxketOAAZ2Zism0bR7c8jnA9jgnsD1qpq6v5RjZ/LfX5u9zdO+i0asrabaeS89bve/YjgUoySOVKh8yYP3x0Cgd+vIyPX6X1YRnOT8/AKruAyehAxt4zk4wOwqsuwKAoLyk/K2dw3H3IwcdAf4fSpFDLISwADYDhjglsn5VJPA/wBk9eucUqbUOZyVm7Kz1Wl97aLo1bR6+qpzTdnvZW89VZ9dL9ravXQ0IZUScHeQrplQch3b+8jc5ABPGO+Mjg1uW99cbABICI8KAVwdy9DwcAEHBI9OlcqXXcduAyt8o6Y3DgDuMZ5OeuOOlTRTGN4mLMXDHepbCoDwOADg9c/yFePjaKmpvRtWttyykmtXd3tZa367noYS6bdktrebfKtbNPu1rf8AT6H+Ec5TxDaR3lvAY7mQhJZAGiUsQFkyeA3rwRnbgZ4r9OPhz4M0rw/4h0DWL7UXgnlMGq2NpMC0NwUYO4XBAWEErhcHIJ6d/wArfhba3msazDaW0sayxMzptfG9gAwERwc5wcjP0IWv1T+Eetw+IrLQ7XUogdR0C9FpcLLGcR2MZUFhKSA+e6YGemSMY/mfxMhUo4+NSM3FKKhWjCysmvck272Uno2tn26/cZFerJRT91xfrGyWse9r2e7to9Tuv+ChPxIg8dfD53utGg0i6tdGtbKS8soCE1JYkZYyWwAgkGdoG4qNxycjH4BxuibjsTeAwUY4Rs889jnv644zX9CH7aGr6db/AAjubWTS7eJZbEeUhgDfbWiUZcMSvkq24FQN+MHPfH89U9wbm8nYQx26GQr5cX3N4J+ZT2DcfL7d85r77wfxUcXw/VcIyjCnXcEnO651GCfn5LRNXutWedxhRVLEYWMeRR9m2klr0u2r31eq1tZ2te95YztbncF2sc5A3HI6HbyP73TI5PUmkJYfdw/UnIBDDuADjJxjJ7YFM+Yff42ZB5xgdjx2Oc55OPpyhJVD1Yr83GSVGRgjBxtA7gkcjABxn9qpO8U99V87JL9D4KaSlpt1S79/LTbTrsObA2nDfvMDceQG64I4yvHH8+KcFZUkY8ZABZTzsHXt8vUZXnPHNMdWKEqf3Zzu3HDAcZdRwMZ6dM4JxUqFSxQ/dVSVbOFc4yCV/iU55JIxgda64rRO97fhstrq+vdeT6mbdtttdtbbLre1tdF27XIVI2/KcLuZl3cEMf4WGMKTxzznuKsK5QAttbaoU4wUY5OAnqfYD880BY2jblt7FRwcorLnLnA4HTHUcZ5NRld6fdPyhjGT8qjb1ycEnscY6Hg961STd1fmstNr/DfTtZdmn0BcstU1o0rvV33td66307X3HeazEFyCocqqvgBkX+HHU4HQ59eKkGxkZSCpbJDrwflyQOnK8/SohGSr8ZfIYMQe/VlGeAf1x71PGq7DhsyhtoGdo285x15zgjAHQc0tbpJaKz10SaS1d0vLpfqrXHZaeVuvZq1k+vd72+ZEqjcrHDk7vUAHoqZHBx2GB3H1R0bLAZXOW9wT19cD0/Ekc4qQ7d52kqgJznghsEEDtjI5PHI4HJqONtxCNgenOTgZwpJx7Ent6emU7vaz6tSetk116aXV/v6GkEr3e17O1tHdWe+1+19hQGzuZmbJA29Rgdcf7IOOB79+urFHhC6gKeHBAPTvk9s8AcevfNVIEwyMoYDGHJPUk4IGe3Tv6jkVfRpPmDEbGGMYXAB7ZOACPX6elcc29baPbs3JrRefl/k0exg6Evdmk4yb1uk01dXcXto7O29732IU3EsxyAWxjJ+8ccdOeRnjr9K1LdiAoIY4JHqex446DHQds44OTT2gnLEgFRtHGCwzjHJx7e9TxZVhjC9MEjB+oPv647Z54rCo1Nau19bdOb3fJebfno+iPqMC3TknLW6Su3dK/LpqtHfS61vf5dJBICAOhCjJABIPUn/Ht29a1YJnJC8YwQpHGDxj8MH/APXXO23H3Scn8RjocnOR9MdCOeBW1ACpU8jlWU5GMHsenOfz46HivMrQ3Ssna66au2j03tor6W83c+0wDk+S6WjSb02stW0ld9dtV53N63Ri6ONpYc4K4U4xkjJ45Py/e54HatvYuzlcbsMWzg7gc5B9SRj0ODzxisS3n2iPPJXhjnk4HIGPQ4x7cj1GmZldSSSEboOck8bRjII79B17+nlVFJyvJbW22VrddN2r6LXToj7XCuPKkrp2SburOyjprrrdeeqJ0YI+Fy3sOpA6DryD0PYitexuCr4B2bTuIDc7j0AHJyefbOB9cJHQDILFwvJPHOOefUfp7drVrvJiYkDJxuyQd2QQCe5AyMHrkA1y1opwd9NFpvK/fdrW2+6XVs9vCJqULPW6662su7s+tlbX72eh6Zdsp3SAgD7rehPG7jqDkg/kTXqGkagyKjQsACAWkI5DegPAxz2H4AV4tZSyEquNwXBKqcjn72Rn72Mc8ntzmu+0ucqg8xjEpAVUJwVx8wOBnJOMtyPxArwcYk4tpdlHVNtXSv1b6bPV67H6PkUrTj/LZXb3Xwuz69F5/ee8aJevCBK0+x0HJGASrYyWH8WcfXoDxXrel69ElqifaFeVcED72Seck/L16gDrjtgZ+b9N1JUjQjDvgLu7hxnBxnhcHnPfkA12Gn6qkUsAeJWMitkqfmfbyQOxKgjBOCc/THz1eLk7+bW9npsrWTXXW/8AwP17KKkHFe81yqO73a5b2WlrtX7adNz3+x1mL7XDNKYzbo5LrvBJc7eOnIJHHTHbNej6VrsKSJPArqk7vI28kqEO0YTgY4HyYPdsV80W2qwKkUp8wxSb1G7kFgRhRz8pBPUZ69RXcWmuNFHEqyqXCKUETYAJBwCegHByeTjtg14dVRbd9Xd28rW0Ttu+vR237fb4erGMG5N81kleWrTt66Xe1vK59heGvEkscYUXghjilHl24f5trYyV7sARk9ASDk8ba9Gh8ZRyJLFLO8kyxZtwoyobGGMr55wccADH518WaZ4hRvKkaeaOZ/kxGfvrJ2LbuV+X5jtBXGAea7ey8SwsySNPMUSJkuGU4USKATzzu5OCOh+vSKTcXZbK27vdX2+WiV9t15urTjJuUkrqcbpK921Gyuk+++jeqaPom3150QPNMSuJJSOdzK5GZCCeowuADzyc8YPxj+1p40iTSfKiuUQx267pCf8AWTgMHyAcFm4A5G3BAznj0W68aRCOVjLMyRKWHlvtbuAMkcEgDIAwoB654/OD9pbx1Jq8r2EUmUik2tAh+WLBPBIOGkIOZPlGT0Pc/VZDTnVxlJ3XIpxV7O9rq/S1t/k7n5X4oYylg8hxblJxqSpyhDo1JqNklp0Xm3slrc+KdRuxLqF1MXLNLcyMw3blZ3PJX16dPUdetVvnZhITkADJwOp9AOBjjnOG9uQYJeGdioLb9654JDH+LGe/b8uOaejHb/dVlOMnIBPv2Pp9MjvX65CK5aaiklFJdNbJaPT7t2931P4UquU6s3Ud3OUpX+07tbu7e17JK1/kSHdnIBwHKgng4J6+vP1wOcj0tQEcYzkDlSOx5Ps3Xr2B61SUN35bcCT/AAgZHPbJ98Zz7YBuQBcjqDngkcgHk4P156fhSnomnqt7arqrX06r+ty6aalBx+JNbpa3srNX189e7222rY42ngZJ6n5s/T/634nFdVZMCEG09Dux13D7uTnoPyP8uUtyCcj72VwW6YGeBxgDJOCOxB6YrpLVmRwPu9GwRxjGTn6/Q4+hzXj4pXTs0ndvp0fyfz00+Z9llMXFxcXZuyaVlq7O6trZ6a+W/frLFgiKVwz7+e2d3Vcdj7dfwrqbCVV7YYsV68jOBtHByRgg/SuNt5UK9SrH51AOOemT7D075xkVrwXG4AjOQcgbsAY/Ec9QOcHGc8Yr5/Ew5nvo9769t9/h0T6WufpGXVOTkabbuuqu/hdr6Nt6vr5XPQrW9QFFLfMwIYZxkjA2hcHLEdOeK6SC8DCNSQEC4Ut038AHjqQcn27YrzSzu/LGCON4cEZJySOTk9PTn866CK92gBD8jHIAYlRxwOg298jn2I5ryK1JXtq9temrWllt8t+p9lhcTKUU5TUZW0TVrL3d5d7fjroepWUyLb5Z0MockuMhCO5I9QMYGeenvXS6fqAjhCxyiXaxbLZDqxwcAEnhe3IB7EV5Fa6wUjkYOApwPL+9nPGQM4znPT09K04talQP+8AwMADlzkD09Om7dwPXOBwzoSu+uzSd9vd0W/TTR9d7H0VHFUuSKlLmiktE+ZO1nrrdW3+foz2NNVSNoiWUMHWQBsk55PPIz69wPyrpR4t8iMRhsSwlVldm3RkYPABwDxgfyr57k8RiCNQoJZeuTuIyPvA8YA+nHvk1xGv+NOTHDduyiMeYE4DP/FGWz0BwGbHXt0zvRwFSvKKUWr66pOzfLu+1v/ArXd+uOO4joZZQnVc0rRtZySSVk7K7vd6aJ7o+gPFHxZlgjltreWOOSISKuxBsmcYyC4bjHUDHfIznnwi78W3d7dCS9eQtI5IIYlGDHjaM4xg8sOTxgcV5XJrE13IJZJXKsxABO4KCfTORjgnjOfQGr1tcSO292LqdoU5wwYcZU46E9QB+lezQyqlho3aUpNat6NN2bt00t6JPofmGYcW4jMa0pKpPkbtFOXuyjeO6vZb7Lz9X7Dpmrq4kIyQMBS4ztKnjAB+77+4z7dfDrssi7VAVlAAZl+6McgcjcmOfr156+JabcszoRIYiGcgK2CoGMAjpyBz7HtjjtrK+MrNFtOc8MvcHrjrweP8A65orQjG6SdkrrZbaaXX5dOjuxYapPEJO7ettGk7vl0aXNp1vqvyXruk6nKrJJ5pVBlJGLcNnGOo7kYwOnHPNekaPqUrMrtJtg4EbsMqw5yQcjAGOWyM56YrxDSp4VCpKGfa2JFOeSPucc4YZyevWvStEuwYxGWDRliDGei/7IOeGX15GeO/Ph4utKMWlFWSvG1lrdXTsut9b/efovDeETcOdykuZSld6KPu2TV0rbXdt9lsj3jQrmYln3ExDG1SQQEOc98nB7DoCOmK9j8NvIYUkJVoWcKFI5wcYZT0yCAAOnPevAfDsscYhdl82OJijbMgMjcANz/rAQM8D8K908MTRooWUGWMJglhxHuPBOD97jg54xzyK8iVV7PqlJJaPmsr3fWz+7XrY/T8LRUYXUXbmvd6tQ0jaz0s7q3bW2+vtGnPcXU0J8sKkRCgONm3eBkBSeS2ARyc47Y5908LLNCtskipNGMqyPx5bNgDB53HuVxyOMjrXjegxCdrYxYkeeRFUE71IUZ4PHHI5xx2PWvozwXZo9zbxSWzIsbNtBO0cYw7nBOTzg4+XHfJFefUi51FytqTe67Plvd9L20fl0R01anLTsl7qTfLzJNOys7NN211a0WtrO7Pc/CPh86gLKIKJWSQy5X5SRwfnOOCoyNuCTkV7Qng1IpikdlLIfJEjSuu1GYj5NhwQu05wcnua5nwbazJeRfYY1nVQDNxhYSAAw3YO4txnHAA7819mfDjwlfeJYZ4ZIZGSSOSQKVIjTywvzebg8LkBRjkEj6fc5LlaxsFRhFzqtq7UXLrFu+9r312V36n5vn+avDydapOMKP2k3ZrVK8b6X11ei38rfGPirw9fwWgEKwh5GYySFtwgVSBgAAAvg/Mc4Hvnn5p+JHhC18TWs1hfwJLBDD5VzLwN8mDtlVtpyvJ/XHrX7A+Jvgdd29lcm706dRcr5ts0SEqIuSd0hA3MeMLhSeQTXyD4++GkWnLJEsUqwMCZS7FS8pA8kSAqTGQQ2Blh1zXVmHDuJy2Tq1KSipaxur22aW+l+vT0PHo1cvzXDShGdGspw5HG8W1e127X2u9GrfI/mB/ar/Z1k0J7/X9EsWiZlDzrtPl3boXO/OByVPA5xjuTivzImubizuLmOdykkbsrA4fYw4BL8HIxjBBA6fX+qr4x+AY/F+ka1oskMLvE0qOuAZSqq2CpIwzAfe5G046gmv5sv2gPh1N4A8b6jC1pJDbPO7Wx2lY3UscEg5ywIyp78816nDWJVaM8NUn78bumrpXj7vu6q766a7X1P5X8W+BP7JxqzHBUVToVpKVXkVoxbV27KLSTu3d2XbRs8nhvWXlm+Vl3KTwWyBuZTk5A79DxjirM1+jRbROW2psJT5mOOwO4Ybk+uMdjyORkugsjFZGYgADdwQxHoCM5xjtx39Xo8ikjGNyg/KRtGMlvm9cnn1P0FfUuk7rVK+uuzSte2l16eTWiPw5UnP3VqlouZ67rVbXu/l+Bfu5GkZWGCOnJBPPXjrnPQ/UdqKz/ADTyM5KNyAcnnuRjjB77u/TrRW6ckkl+bX4Wf429EjmqZenNuUOZuzb0W9nbZ/m9+17+QXgzIxVQMbSe+CcgkgdD29f1rMUYYOxG4nmMng5PA74zyR/MZ56a/hthbuwkBkwSQpyGyeQ3IGRxg9ACT3wOfYIC27GQo2A4yVPTIxnGOfbjscV1wbWjVndNu9rWtvfr59dFfZvlhLmjZK3u2atdtqys32u790tE9yvLwcDgEsWIyVyccd8kY6gZ5oGPLYYUvwdx64HU9sMMgc/zpJA53bQxUBflLBgAccjjt+gPvkSIgaNkHAY4DKDuXHpzz39uK6VJKz0d30fTTW1rdL930Rpa0Yu99VtZ9nd21tbbXWyfYgVTuO1gpAxuOFIB4KnIPJI5x6UNGxLKytwA25uUA5yU6c/TNTqsZYfMSygDYeA5HRgBxnnpnueRTHLbnBYKOjLg/KBycAnnnqc9T7GnJt+7FJq2q23cdtHZq7vdaP7zaG71W3bfVbP0729BI0I2ALtDMQrPj5lHfqfzPBPvmtyBxuYiMOgUkDOeB1KLngrnj1zmuf3nLKhXMXAByGkX1U9we569OKuCWRIUyVGMF1U5fOTkD0A465yO3SuWqrtK6unrdtrVR00tpf72vM2jpZ2vrZaXe6vvo/vvr6HoGn3VuViB3ArgiPkYA4VpO4IHIHP5V3dhcW8RjaIoduxJJZeWAYHcq54y3HODjHsa8RttQkinPIGxfklCkgrIBheM5JIB3N0weD0PS2GtNGzicnDfvFQcjap+baR6Z449fw5qkOzWydk297Pttbq9uz0N46yUWrJLf7TTSemj26q1/I980wQS7giqymTfCT/CR/FuPcZOOBu9RjnqBY2T2qhyplQ5kkV/m8w/3uMBVGeBnaD1PbxvTNbiMMWGFsuzdGZHKsC5GdygHJGBn1z7YrsrfX4wyQ+bEAwHJHzhAMkJ1yWzz+f0427N2vq1ZO+q0dtdN/uNoxVtFbpaW7vZ310731Wt9Ls0L/S4g0zKECYKK64Rdrgjyj1yXI+Z85XAGDmuROm2xkAEYGwEbzgKp4ILccv6Enrxz0rp7rUI5oyzys6PHmNdp+X8M5U985PA5x1OAs0TyqqLglDuJPBPBQbSOo557k98VjUmlCVpa2Wm1np138tPm++nJduzUlo0no00l1uuu1t3v2K8+k7wzyR+cpAYH2759eMHoOg49djR4ZInjSNUiiQs6kEbmPGE46HrkHJOeoHBHleVMphsr+8VeFAHGAP7+DnIPXtWppcMMh8vJ5XcP7oY4xk+nUHt09a8LF1Gk9fdXTVpNJJ7vS3363WxMIfAraptLVq7um7vV67L16XaPWfCt88UM/mLhBHtRFG45YfcUH0xy3Y4qXVGASV4xvCxY3bQSMclm9SOq9P51j6cqQxNgkqDH5jRjczA53s2OiMAAFzwcg81narqqxtcKZFiQRggRkbWAzsXP9/k7m4A645Ar5yVJ1K0mtHLlckr7aJbJvdvbTS73PVinGCcopKNn05ru1rNpdP0dkeQfEC6iYyBZP3caHgLlt2DvDYONqnG3g/xcd6+er0j94N25WYHB5BOTgEHoRnOcccdcV6t4ovWkuJ0YhkJLLsPGBn7xx8x5Ab1wOPXyy+I85RuQlgXC9mP0x644znsK/R8go+xoxp2unHmXryqzTsm3o997Wsefimk5K6tdN2aS15dLa2fZv5ambsI4yD0yT8xUdQA3GM9hjnngYqNpMZ3YUcHbnB2jGCOvryc4yec5NWGCFSWIBZVKrGRtZwTzJ2zzx2PrxUQAG0kKScgFueAO/fjIxn3Jr3azTjF7vqmru9o6O6T1fbyaPIm03d3fpurWe9l03f3dRuw8FCcDqD1BPOMdx759cAYp6gqhYqd2MMFG4kE8HAI56j8+PR6IhRiWw+7OAQM9egxxgAbRnjPpzTs/MMgHPKAnO5GGADwPzxwcjrwFRTtJu93Z62ul2b/AK1dkjFy3Vm7NXsrN/C7X3ad738ttyCMAElgFPJJK4PPKnAxlj6E9s1YCxbGYknKgqQcAt3yMcnjgccc5700plT90M3zDadwUDd1Gfy56Coy8hGw4QFV2AfxL6jI7cbufbPShtyb5dtOa+nRfK//AAy3dizlqtL2svLTVLVvqtLrTa1gyFYeWSXJ+UYweODnqMgH3FRs8gUOAN38YHIC9OgGB2yRz6giphkhWZsMMZOcdAeV6Y6dfX36xkoyHAJIJKgElyDwFxgdOcnA69O1JLVddVf0ur7/APDN6XKjum43s0m9Xddk3bSz0v17bDPNCoDyR1cY3cHP3emAOo59TijcpMb8tsyQD8pZeiZ64IOeen49V2oVK5+YjBVT8wx1zxyfbHGD7Ui4w4BGAfkLL+8IGBjHY4+vfoMY6NHfeys9dOqS366/f8y1bWye79NbLSy09U7tase5WRgWKrlQsgRQdpHfqNxx1zjPJyABUYdHKx7lGU2DaMEgdm65PIG7PBpoUB8KSznIIIwqkYG5jngAdePp2qNk252xhTnDNk7QxHJB4ABz1Oe3tR0fkr9O66/1b7y0r6c0nolF2Wm1r9bJ6ef5ybIypYllVfkXuGYZ+bgYOTxzx6ntTRGWXbkqrgNGScAFeu7GTzngc/iBzHlyvBGWP3QcHg/Tjrk85PvipRhGOVOVYAbjgAnuP9oHv3OMkVlJt7O+nTfdKz8rfltsh2a82nst1t+r/wArgFUbhkAIcmRjhmbH3R1wD/8ArPNMXCupcZBOc7uOoxk/mc4981MwdyHwrYP3e2B/eGeQPUHnjpzSERgNwQyjcFIwNp4IwSM4z19OPmyRST1at0s7PVaR2110W2vVMaTva17rp56303t+L07EzKCWwSqtgK685c8jLevbI/I0ifIpExBKEk5+844yM9SR1BxwfbmogUbevmMkbFdhwAQRnH0Xnt24xip4yCrhn3SYxEyrndjgn36jk5yfWjZu7vfvfTRPbX0tbRpFRpPZa6XWiukrb6+u3kx0shYAozKGA3uBgqevJ754yR26gYzTQfnCyAM3Lbf7+eg/PB57Y5waDGSYyJA6MRmIkKokHTI54J7dARxz1mjgLTZ3rgEbeeAwzgKeRwe2Mf0ylVUbu99Fdu6s7rTvp38ilRnaN0024tNaxV2rapu+9neybtYBEXbOCvy42hRnJzwBxjHfPPORVmIbVWPG/aPm284wc4YkDGM9MdcDpnMghuAvmscys+GRW6J/eAHQ4x2HJ5FTW9lcsyEJ5cJchy7bSCCOvcD0I6kcgjri8QnBNOPLe6V7u+/Wzdr3Wi7dzdUZOapulVu0kuWDbfNb3lq76tO6euj8mhUk5VPlCZAxyB3bOec4G3p1PXNTrIAnljaWdMKNuSH7MT2GMY/GtKC1dTtdXUd3GQoUjjDBeh6HAx+oFZ4blpSqxHGcRvtOGzwBnoc+v06DryvE3atay68yas7b9UvLfV21udscJUVmqVa6kqb/AHTclJ8r1jpZNbN/g98ooI23HO/HzBRjdn72OTk9j7Z49KTBcOwyCDhQvOR3JwQTyc474z2FbZ03UixIs5mLZHyxsy5GcqpAOQSOAAO3UGqDaTrS7CNKvdzOQP8ARpCGViQOAo4HQ+pPryNI4qndWqQi7rRyjtZbaqz29L6aLTaOWY2V3TwWKnG6TvSqvqr3aTW7XZdUmUyQPlMnbcX6HPbP054BBH0qrLKZJMFVbgLvEeD7nqQw79uOckZroYfB/iy9kP2Tw7qs8R+ZylpIVQdSWIBwCT15z7mti1+F3xFup47aLwbrZnlYeQsllKgcMQV2Eg7lbscYOD+L+vYaLaliaEZJXtKpCLS031Suvv39Hcchzms4ullONkpytGSwtdp3suXm5La3Vmn27JHBmL5d5kypw68Dc2ezc8HOD/EBnvTo8jg7hG7Hndn5uCBjGcA49sHHOMV9J6N+yj8dNbgjktvAOskSOoKm3kysbdwNn3OhJyOMcHFfQ3g3/gmx8fPFKPcHQZtPiiUM4lUmVQcZcAr9wdCe3TjGK8/E8R5Lh43rZjhkotcyjUhPVWTT5Nb+ujd9Op9XlvhRxpmzTwmSYylFwi0q8JUk7qN5Lmeul9NG7et/zpkd5G34BlBK7iBggDgH1yOh/LtSpZyM+UJKNtcttIZiewwGxgj7vfviv2x8Gf8ABHn4o3dxaTeIpzHYuUklPmeVFscj/Vtg5VQDvJwRxntj7u8Bf8EcfAmlxQPresxTBCk0ZmcL+/BBcRSENsQHjZt+bOMgdPHr8a5JTipQre3TV709VZWTW2r6Wt1WrPu8q+jpxpjeT6zWwmCi5RTVSTlUTlyvSMU3rfvbrpa5/LtaaHrOovClppV9PcStsiENtI3mYxuAIHUA5OeMD1AI9W8L/AP4o+JxG2leE9Sbe3lqxhkBXkBm2lSeMrnnpjt1/sX8Df8ABO/4NaDZosGg6VJeFFRrh4EeRpUGHmkbA4bA6AZ7dTXuum/sz+A/DQT+zvC2mQXNsqqji2QLJn77sAMYIUHbn5sYyMHPzmI8SsPdwwuFkpQb96pqm7RvdXXfS2nfS6P03Kfou4WlKE85zmVfSLcMNBw5WnFuMpOLbW+q1s3roj+RDwX/AME6/jN4nntzcaPc2sUjoZkkUqHWTGGjbHKrg+YQDgkfj9j+DP8AgkZqlzcRx65esyRRx3UoRihRe+98MI1OTgDc3yk9MCv6V/8AhDtH0UobaxtZJI40HkpCqrDDypCJtOM5+fnIOOoxXaeHNLsFljRdMeSB5C0p2hwrcFVK7STEnO0EkfNxgZryJ8c5viFz06sKcVf3OVRvs/Xqldb7+Z+n5b4EcCZbShJ5asXKHLzVcRzVJacvw8yUYxk1ro3d77W/Gv4a/wDBLL4eaMLMX2mw6hOoikLuwA3deJmQ7ySMZKjBxxgmvvHwT+xr4F8Mp9qi0y1tLzyDaMFt0klRV2iTOFAdZBjDYH3c4GMn7o1DSLQl0iTPnQq64TEhlGTuOMFOAMjnbwST22PCvh3U9V8m2ihkillVglww8x9pK5ViRhiAOOn8XPr5dXOcdmLtVr1ZSdnyKTa6PRXsk7du+qufWYfhrh/KqUamBy7B4aNNKCapwjdJx5XNKNpPqua9+/Q47wJ8GvC+j28Wm6XpFnbo6hJ5Bs23EmAQXUrkKhBKYPyknrkV9NeHPh3DLdxrJHFKEjQ29usQjbanDAMvDgjGTx0GM9a6nwZ8IbqfU7FbI/bpWKtLGXKFJFALRspBADEjav3sg5J7foP8OfgPq9xobSXWlWtvIPuXFyu64hRfuqp2g/L0IPUnPFeplvD2YYqDrqnZSW9Ryv8AZ97t53d9tHY8DiPifA5PTjz4uhT54pKCSgk5ctrp7bWurbrc+c/CvwrYadEImUK0gVlOJGUyZJbjaVCYAx/CMjnPLvFPw9g0a5CwWy3LBPMvXBzGIyDmXOMLkEnbgkEY6Zr6R8VeC7zwPGwE3lRuGZSGwkjtyw7bGJA9ccjPzV80eIfGMq+fbXN0y280TLMpbG1M9C3J5wMgDsCOtdOOwFDDUEsRCSqwWrdtXGy0vv8A10R8flOMx+Z4h43B1418PUqacjunG6vqrK6vq7XXc81nstPtVnEMUgij3GKEofLuJCCWVmOcY9SCQuK4qP8AsyNxPFmVo5XMsL/KglJG5EH8RX+E+5GPWS/8XztdXoWSJ7RVIhQIAdg3AEHqCe5xnFct4b1GLUNQaW7eOOzjuWcCXhGYHJ2f3/Xtk/Tn88xeIpucoR91K602veKVtlt+R+n0cLiIYepKoraRlZO99Fptq9XZ66/I4/4nXunXtvLby26wuYygmQbcED5Sw/iI5wMj0GTivkm/ktLexuIV27hI4kO7a0pycSAgfLtHQdDyO9fUvx11TSVltUsZIJkQqzSRHCqSHBV8Z3N6cDJB+o+Gtc1ArfXEY5jdy3mo/wB0uTgS8fKox0IxjrgdfLq4haQUk9Um10irWbem/e76o+zyPCe0y+DcKi5pczhJtv3Ur32stmt9/u8O+Mui23xB8PatoGpRxahb3Fq1sLXarp9nCMGYErw2CNycEEDknNfyW/tc/BJvg78RNUt7GyeDQ9TujPYO6lVUuzM8aqR8i5I8vBbOD0xX9c2uaomnm6jkVHVkkUKPlQvLjZOnBJkGCX5A4HHavxW/4KKfDf8A4TLwtc+JIY0mvtHZZEkZAB5MO4yjzQcAxgrv+Xkkehr7ngfPKmEzGlSqTfsK0lSlFPRt8qUraLezvZtK+mp+M+O/AtHOOG8RjMNh4yxmEh9YpTcY3i6a5p6qLlflWvK+mvRn4Hb9jhd5bcBlV+6p7BQeg988dKmQqxyyk4GDjj5R0GeckdyR3FQtD++dANkkTEZbsIyQcnHQcYBzn1Hd/wAgYou5FAJDOeWY9cHuPUDpx26f0NFqdOLdotpJPfRpO6XR66t9fJ3P86JxnTlKMnJNSakk9pJrm0eujWjSu9VtduwwTc2DtjA78hmXIAbPUkHke3PNVdxZgoK7Cdu31I46ZGOo5/ng09g53DO4ZUjYRt45J4P/AH19MfWs6gHbjDZyG6DA79O/QfiRjNY1IKUdLNXXS6eiTttttutNyXNy1bs9Ntl0u/ne6emunckOAxKllyAAPvcjqRyAqnIzwenB71EXLbCoJ2ZB5HOPvdemM+xJ+vLTJhiQGA+6QuT14BHbBx19h2oT5W5DAId2SeDk/wAWB1IxgevXjoJJ276Xvbsn91k7dX16mdmt/wA1qlbtvutiU7SA+AvyjKgfOeT84ye2f84qYtCOFIDKg+ZR1YcHeTjJPGeKj3fOzo2TgLuCAsoPf8M8/hnjNJkpL865GMkkYY+pyM5B289j+ZNJcqtq9b/N2X3dbbLVkNXVuiWi2bs1pt+N9vkMjwHWSR8bs553Bz0wSMgdTkdeAck4px8kccqDkhlOTvIzg9MA9BjqB0IpWCg44QNkpsORuP3hjAwpwMYwMgniqwLvncAFAO0hsE7OjdMEDPPr16ji13T6JdOny6Wdra6rqacl3vqkk1tZaW9bLa3mAZldd2AXBAY57fxe3Xpg9QR6VaRo/LfcEL4LK5BySBxtz3GSB256d6gAdozwNoO7eQCR/tZ7A9AO3OMmlZnOEZFRSuFbodhwcgdMnHBBGMdTjFEk/spWsrvbR21Wtrv19LsvdNJJO9m15NdL9U3f0toCjJGcgE4y3yhc9Ac5x6++TzgDMgDBmDZJOGXnPGeSMDBwcYIxzQu1grF95OFEZOVJHBLDgcYAAzyeMipjxHjPCtlWIy7AdFznjJJx69fplKpZrlab87+VttrL5K+pmndqLu1p+FvTyfyet9RMop3sCAyAYPTcDxgenT/E4ocq77tpK4BxjBzjqfUc8g//AK2FXBYjBC4JGM7Qc8tnggcZI6ccc0M+AVPzHocc8cZOc8D8OfxzXNUTeqd/ebaetlo7N208nbv6v08PLVJNWtbv1SutL7O9/TfQ+j/2coDqHjjTLC0AMk0hVw3Kg5VC5xgggHBx69BX7xeGPgXBonh1NbgsbnRb+KCC41Cz1CMql5BIQ3220kI/exOBlMdQMcYr8SP2RPDeqSeO7PXbYNbaekk8ZvLpCLSOWPYdyEkgujAA8AZI6DNftDov7S/jDxlqtl4J1ye0Fpo9tb+H7NFjQQ3sMJCec82BucgjYGBI+bn0/knxlqY2pnUlgJRdGhh4yxjcrcri+Ze6170mla100tWfonCtCVaU5qPMotQjra8lJOVm782jaVtdHfYzf2vvCcvjv4NX0guraxj0rTEnsnSEBbgWwYXKtMGHlpjy9w2sWbgdK/m8uYYrfUL63SQOsFxJ8x4X5WKlh/ewckYPJOea/qn/AGkfA2qy/BzxE9nMkWjJoaySW8SBoxceWxlcyA8RuSN77SAcda/ld1i0lg1bUY5YVgZL24QxHpGqyNwCeoGchjgksDwAK+k+j3mP1vKc1oOupxp4mMo04pJxclFyVnaTaabduZXlbyeHH9L2NfCzdNpzja+jtFW91PRu3fayd7lcLGxfDOFK8Pz8zDuBnnJHHT6dRT0wcIpAODliQNxHQdDxjpxgEcHmqqZIwBhQSNo44xnjrx2JGRwCMCplz8rIoLA4HXOe3HGB1zz9BX9N0rqK87a7W217+r20ex+ZT6a97L7tX66/cSF9zksQRwWBHLZz8vOM5+vp1zy4hkyQMIRksoyQpHQjHBPcYzwM4ximscpggAgAlkGSpzySBjPXPp6dAKeAxYkSF8YKoTjeMYxtxgj2z2rsimrNq2u9nbpd9nr+Onri1qun/bt7+afyt5dehEhK/Oy4RyVwpALk/dz7cckgZwDyDUqNsB8xQwUlRHnOV4wQuMnb0zkYOO9BSPk5xghmRDlcg4IKnt+v40i4Us+CHJAUMCw2HqeuAeB9KvtryppavrtbRK6Ss7O1++4cq1t5Xs9dLdemnlrp6k42lXCls7CdxPGV6Zz1Iz2A7UgJjCkkA8KSQMBW9T0J/Cg9Gw+0sMFSAT+BGevp+h4pWAJCBspgbkzwr4OS3H0z/Ok3fpbTXpdaW02WiW25SW33Xfy3f3XGtkDOWJYZxwTg9M+m7tg/4URqCyZUKecMeAo7KRyST9QMH2pyFejMRjO0jOG7AYHUjBx/Uc1bjHylmYFsZ5BHOeoIJBx/L1NYzb1S7Wu9tWunV2atZ9T0MDQdSaevKtdOruvdd77WfR97D0TaN2dxJJwpBAGeT83TOORyOfekJds54GTxnqf6n9MilycZGQcAAkDBz24xx2B4HP1FRgSb/vELnByev+73BI/n3Fcmqu272tJNqzsrLp6vVau3Q+ljCMVHlVrJJpvXzStaza1WittoShsBRkjnafUKfbpz6Hr+tWYTjaOoDYz9R1JwT+npxUYUY2lgCQBlRwfQn0OPQ9hxUyKFBDHJByDjqDgYyOOgyD2yOAahrmjZdbW3+Xn/AJb9DropK1lro1vdvRr8/Pz1uatq20xtuIwCNuSeenXv09eM+hzW3BLuyqkZHt1/D16cZ6dM81z0W0YORlug5yM+gxwCeOfT2q/A5RwQNp5weR05ye3I4HTOMdRx51amuZvW6s9b2bSV15PWz066I+zy6peMEml7sVJX1btG6vvdvbS9uqOlgYKikj1BY/rg547dBxkdauGTlsE7TyF6YU45HTGDz14wPSsFJXBHzBgQCQB1zx8vccDrnv64xcWUklQSTjgnkD1+o4PbI54HfzqkG5N733Wltd+r2a8mfW4WsmtHazS31SVr76/PTbRG1FKM7mfIHIGcHnjB7Ed+30FW7e4cMiIyhSwIBx8p56jPB59cH+WNG6EHLHcFJ2nsONwHJHoT04HvxZhkUMpUgYzksCQR6DqA3pg8YrjqRi1JNNvl009Nbrz8t/mfQYWpFyhfW7jZ3+K7TVtF/S2STOx0+Z1kUs3YhgvPTHIPfp16dOuRXaWFyCVOXbncWIOATxyAeeR1A454rz62k2vGSSMqc7RkMSQcnHPbA6fQ11unXRYeWWIzgjaM8g/KXH90c56ZyBx38HFw3bvo0utt0t9e66PbofomUVlHkjrqlHmfd8qd5bXT389T0WxuGCABlz8vyjhmGTyxHboduMn9K7Sxkl8pHyDvdc55YKT0x1UAcA4HU815zp0sQjjLBd/G5s/eXGA6DgjaOg5AyeDnnrrO6lUeYm1kJ2gtjtxnaDzjIx+vTn5/EQc3JRaTum2763drdlbVa7+h+oZVVhBU5Sk3skk305e1762VjvYbtVJQ4TYFEaYyGHJ4Uk5kByd3AOfTmtaG6ugRu5icBoizbFB54J/hBHY+mfpxFvdMJEZ2D7Bk5PzZ4wM9sZJIxnv3GdsXAM8J3nEyb5I2+eInspyAMP6YGT7k15lTDJtJq2j5uuq5d7vfdq9vyPsqeNSXM01rHfe14XSjra+m/nbY9KsNRcuwEis9uf3ZU4V0b7464zwu0DGcdRmuwtNZNlCzyuzRNGUKqgPmuehePPLk8788fTNeS2V8GinhjliEhGcYxtZejDgYZfXJ78HNJLrr28aYnV5trBAwwGx94uNxBdgBg8cA5rOhhJTnyqyTtbRJ/Z7bPVtettdTepmVOlTc3LTmV3dXSilbV+SSVuj9ToPHPjCKy024dbgwpHE0xP8AelIOYmfcCADjK8kgjJFfmx438RyavrFw5uHcmVyxHC8/dAHIJPfPTj1Ir2z4r+L1aKWGK4VVkcq8MWdseeuwZ4ZiMueTyBgY4+UPPa5llbAHLPjOSAOnPGTg5x/Tr+l8O5YqMY1akdbJRTvf7Dvrrbr9/wAv5K8X+MZ4/GRyyhVUqcJuU+V7pa62flbZWd9LWHvK8jtwB820nPUDnceucgfMO3T3qVHJxzgE5IHQH2HP+OPXpVU4VwSThuvJGT3xwMdCOe2e1TKQGTcRg846ZyOenOR0zx3GDX16SUdLpX8l0Xy30ej1vfq1+Cr35ufM010bVm7+b7frZssbm99p44GM9ge3XjnjmrVvztByTvIxkdPTg9fpwfWqecEjPy5zwR7HgdfQcenUdrlvtOOfmHbPXPBJ7Hp1+p6HnCeztqrrbrrb7/k+ulzpoS1V9G7JX6PSyt0t6a/JmtBJsZcHGMjB6DkdO7HsfToOnO5BM4YKG68Dpgt14PTHPPXpXOxlScccDrzkEDrg9QMD6YHbFXI5mYEAZXOFXocjtj35BP0HArzqsFJvZNJ2XS6euttn1Wn+X1uXT9ny3lFN2Wj7Wu9f63SOut7nC/OwJwVG3jjGeOmSMdRjJIrZt54sRtksdoyCcZAPVh0KjuOx9BXE2s4EeGOXyw469uP4eRg88k9xmtq3uU2g7sHHIGR2AycEnAA47+hryK9FK76Nqzte/wAL07fjd33sfb4DEWimm/WT32ttptez379LdZDdPuyhAIJyAMg9+BnhuR34PtmtNLyQH/WMMYBDcEjvj3x0GOMdc5rkIJ8SKQ2ARlTnaCeckjBH5HnqOwrUFyQC0jZJDM5wThm7cdD6deOmK4p4dNrbpa1ldOzej77N7dPI96hjWk25c1k0+Rtrpbfrvpv1736db4BMNuILYDL/AAEDOX9VzkDGOhOemZ21UIuPMCkgMrufmDEfKcfw9OnOD6k1xk16I1wsgVSoJXqVbqSSMc/y98isHVNUIZdswkyBjA5RQD1OeD6Hr14GaVPAurJLpe2qdr6bedt3ez/FmK4ko4KnJqVnyqyum7vlfl2s7f5m9q/iKRQEikWVjnccFSoHXqWwcd+vfoK4mS+MjM7swZmPyMeD0BxjGCD0789qy57p52LsxJO4kkZ+XPXOQOv6ZzntXjkLMikhVzhXzwSOTwM5HP4/hXu0MHTpwXwt+Tu+mzSXVfhp3f5lmWfYnMKs3Oq3Dm0jdtNdLa3v5bdPI3obhwzEAbQcjPAB6YHPU/rt45rp7OfKIwzuIwdvIIPdR3xjHQZHauJiGMOWbBKtkZOcZyeoPpjPPftXQWRZXChyM9B6Dt9cg84/DsaislytbPo9dLK1l/lfRvRameDqVJSi0nq1bW1r8u6vd9nbX1Z32nsjSKd7HaTvXoCxIxkjJzzg/wBMivRNNiU7fm2q+NhI+YKB1zzyce2eteaaYRuTLlGcKOTkE9Q2OADwfXHGOevoOnO6hUZt3BUL3A4G/wCmB69K8TE2XMle6sm7aX02007aP8tf0XJoW5W9U0nLRvRcvzbs/VWVr2O906SL7REmARuIMhPQDGBwOSf7xPuMc16PoxiF1aqf9W7gllHyswwOOTg+vT69K830m3ZlVowFwRkuOMcAsc4/DpnpzzXpmjW7loW3oqrs3PINo3DPAHJPseP8fmMc4p9b2tZNta8u9tG3v089tf1jI3yqLUdJNWaXR272b835ao9s0QwiWCN2Bt5CzBVGN0nBUu3O0kg4GCMg8jFey6KCcoqLGjFQkJfKFs8Oemcnqc846eviOibt6gZwVCqxTAymOig9RnhsjOenSvXtC83zY7kEbAEwr84IBy+Mjj8ehyQOa8qTSSt113u7NbNdrW+75n6PQfMoJWf7tKzTb5lZaLRa+f6H0j4Qkt08vz3gZ1cRuEBWFOm0o3IJPdccY65r658D2UV2beYSSMyhSAMEtuwDMSDgqcAAYHIOeMY+KvDty7SQBBGsOXyHGFkDFd7x/wB3oOeduelfU3gTWobN0ZrlXVC9v9mgJKKi7Qu1udxc9RgbSO+QK5HVVKSdly7tN6pJp227W69UtycZhpSpycX71tls27JR06/evS59+fDjRYBqsFvJdT3IuPKZU3ZlMoH+rRwPmVs8qR0HJr9sP2X/AIcaMNO0y8uLeJ1kPnz29wqkEg8rID1WPgryAS3fFfil8Er+DUb/AE8LNHF5CeaglGDG42gxI+RlyDkAgZ9eef17+G3xF07wppsLLrFvK1tbx/JFJuVVYYKyAEEN2I7dz6/r3h3jcNGp7Ss6cYNrlcnFNS92276WWiVup/OXiZSx9al9VwzrKsmk4w5l7rtFK0bPqt9T7I+K3w/8OXWm3F1bWVuLlI1DtEi4eIjmTYOGk4GFUgr9Ov5efGj4VWf9lXlxNbKk7RyXGmpsAEsSg+YjnrHtJXIO4sT0HGfvSz+Lmia/p6G71aG3cyyMqNMF8oDGZt2DlDx8u3GSB3r5K+OPjXTLhporaaKeFZnjN27qB5JHKKv/AC0d+wG0AeuTX6xxHHKsZl2tajNqFlL3VzPRr5200+97v814InnmVZjDC1YYiUJVVz8/Oo2k4Jp3Wytq0uvoz8OPHnhqLT9Ruo5IWitJVna6cLgCVjiEFO0gIO1t3rkZ6fhf+338J7rUbC8161gWa70x0Mk5ADvBljvJCgMxXG8/wgL3IFf0o/FLTNPv2u9Shmja0U5MU4EYkny26XjOUBI+bnOc44r8sf2qfBdtq/hPUIEcSwXNtNHeojhlkldSYWL7fvxlTvTHyggluc1+FUG8Fj4unGN4VI2adrrmVm3G1+3VWsj9k4qyeln2R16Vam1J4eUXZ7TUbpR5ovrbR79Nj+U+4inWWSNoiDC7KCRiQbSflb0GeAxHzAHgd7EDkfJ8wKr8wY5GcfdXIAzx93v9cV1njvS59D8VeINNmRIxbX06xgOWjIRvkIYqpYrknO0YPBGM1xMLFsfvDnj5sYPXs3J4xwcZPt0r9EoTVaCqNNJxi1ZKyclG6v3XXS769D+E8wws8uxlWjPmTpVp0rbNqm2ktldvR311v5Fxy2TzgEfKWXa5ye4ycjnrwO3PSio8tuBZgWIwTnLEYPX645/LgZorvppcq2e3TyXdHOsRFbxae75pO99OyaW332PJPPYkM5U7sqVzx354zwPTn045NRvmRiANwAOGIAJ77QPQYx7j8KtQWhJLt8w9MYVwevPbHHHPUVfNlgOMEHGV+6oOeeTyQff09OKyqVIJtqyatfdau1tdfL+rHkqhZppXeib3itUrPtbTXbTUx4U+QK4bIYcA5JOThenAA64J6k+lTyQuu9kQvgZ3D+EY5yOzdtwyR2Fa8VqYtrOilh1c4GFPTjnOMkZ64wfYxToqh2RmOR8wPKDJ6eoyO3X2rP20rrlV31erW+zaf/Dt+ZX1e7clo07WWzbtqtdt2kr+tzm1jlB+Qrhhxj19zkgjPfg9qmWNlBdw7jOGCnoQRg7cZGQTzn6jIrRW3T5CpJLFs5Py89FwRg4PQ960bazZxtABmdN2RgoG6DHGAVGe3occ10fWEt0norv3ltr2vdfpuaSpO19LaJ7JvZraSu/XX5GAI8iT93nc+QSOUI4xjrg/iGznvSpFhhI4XZzu5OSzEAAcc4x09Ociuvj0JiFk8phsO3AUlpFX+I8n5ueBzn2xitNfDkpVfLTe7OEYhQwjcjJZwSBxwCRnk+1ZTxdONmnG0lqkrb20ettu+7Wmhi4yi2rataWf2bJb3tf8brbXTz9lcqyAbACN2wYyVydx7jqBjgD9KZDNLGpUEYDbgc4Ydeh7Dv36816hF4QdjveIxK4RAzLhZmcnYVXOdwwd4JwOPWkn8DSu0zCOV1jAIYoEVsg5eMjIZcjheOh54rmljcPJcjlFLSz93e8d9fNdd99VY0jU5X711a15Lo3pbTfdWV2cHBfzZSNpAMfOpIzhh02jPIOc446ZzmupstXdWi3yeZtGA7HDtnoB12gYPfjoCc1zt/otxYbo5FwdzfOxIw2RhcYHI7cgH27Z6PKXEZIzHgAKCNxH93kjb6453YyOc0SiqkVaScdUmlsnbs3pe2nr6HRGSsnF3V03u0raa76d99rvey9cg1KWSM5YhXDKqKAAoXBwFyeTkHpz7YrR+0OwQx55QEAY3AHqT/nnPPrXmFjfNEyu7ufKymM4G44IOO/OQQR+J6jp49RyiyGUM5XDHOcE9mPHzdMnBwMDI5FefVw842cbPVau9lp53td+f4o1jUT0kk0t+rvpZLrp07LfodxbXyKyK0q+XHg43AcnHPcsfUfWuittQjaTdG21l6uwwGyeec4OSOfw3ZzkeRfbSjKzDBU8MTjcD9wE9+h5x9Se+xDqcqqdp+UseC2McDOzGSM/ieDz6+XiMI/i1lf0stVe+jb3+9fM66Uou8ErPdqS9HfR38tbXW3n7XY30qLNK0nyYBYKflJfO4qO4wv3TjGDzzXK+IrzdEfmzGjlwehZs84XPz9Tk8Dr0HTBsdcIQpvaPb2XJ3AdOeg456HGT0GKztavDKMoVKsCdueuOSMf3m49O/pXHRwso1k5LVyWtlqtLNJrr5bfcdDqaJpO7krK2m1vxv6ee7OJ1qRA7YJIbGWbgkc5452k964C727pGXJKliueSuevJyOnQkDI6AV2mp/Ohcg5xtX1XkH1H59+prjZlQlgSpfBJyVG7HAyMHGOw469wTX2uXe5Ts94q6dnbpaLvvfp1tra1zzcRLmTbu772u2rNX308r3dvmzNSNgpORhmBJBBJ3dhwM4IyPx6c07HUbsMMlWPGO4wO2M8H8wSaVc5UkFlbgNngFcgcdMDnnI9wepPmDEsc44P3SNuRgDjkEYr1bdXb4YvoktE9F93du1+h5lT4t1smtF2Wmz7W17dBQWSQMcfKM4JwxLZxtPTB5zzwevepg5w4ZgSqhlOAQWAPI6gZB6d/wBKgyu9s5OzGCRwwbnhc8gEHuO2OtPAADkhhuG0ID8xA5Vuc4zzuA7ADPes3e1la1o39dPP5321e7MmrrVa2i/us7d1dN9PJkEeVcFmUAk84OWP93HQdecHjqSAKXIjBVgS6uPmHIwc/dPHGDgHBA4GOKf+624O4yEkgcEeoBXuB6DA5xzSnaQei5XaGMnLSDGQeCME9AOQO/INYzTTdkmrLS2693ponZPdK9r6XLve2j3WjVl0s1bdNXWl35K1lFtIUZOwMpOPvZBzt78DsT26npVjbhSMAHgpIv3MgZwzZBHXr68njBquxcDcFU7SclhltoIByeBjkenr705BiPc8m1Ocnd82Dn7q4JJJByOAeoxV07qzsrWSffR3vbrte/rvYcotpO6eultddN7atJd73REsTD5gVyxwWHOeQCPY4wfrgdOkpSNRIGk/f5wpyME9SOmAScBT2yT35a8kaYUBt6OrBcYVVOeR/tdMHB9KrfMChVfnYsMscjnoSexPzZyBWrvdtPonZu1ldLS+l36aGkYylq3Z6O2ib0X4JdLLy0SJioJLEOrYBVnGFJPVSeQeTgdcgjIoMRYODIAZAFEZJAQr0Jx0xnj8eaeu9WKkHBChtxyMt0KH3xjOM8dOTSgMCzsoby1AKHAZEP8AEcHOfXHP61Sbs9Vra+zs109Px8yrtaaKzVmrNPZ9V3sum3mQLthIV1SVlY8DhWGB8xxzng8cH881ISJnwELk5I42knPU+oC9uB9MihXjkJQKflDFW43OT0OcfMO+MjHrikeeUSKyKHkPy7MZKjvjHUnryCeABx1l83Ra3Su92tNVa7tvdbm0ISm0rP1VnK77Le7fTbVpvSxKNsMwDtvU4G3dtC49wPXrkcEDnmkkleZ8hc5ynyDjbxxnBznj6c9ecen/AA4+D3jP4m6haQ6RpN3cw3FyIBIquDnIDBMKeEyCeRkN0xgV+u/wR/4JP+JvES2+o+IgLezEKXh3KSyk4LjJwWBGFMXbghjnjws1z/Kclh7TGYhKpe6pxceZSXL539Wlp+J91kHAPEGfKnUw2GlQw1SUYrEVYySkm0ly6avXo9dLtvU/EXTtI1XVpxDbWE1xIA20RxOTgYGWKhhgZA5HOfavb/Bf7O3xd8WE/wBjeCNRuYZCFNzJBIAiyYCSJ8hIUHPPzZz+X9WHwa/4Jl/CbwJaw3Gq+HrSaadlkNy7CaSXIBEbI0YMasQeCT0+tfoh8PPg/wDDTwbHFbab4M0y3nt5F8uRraJ1IjxsDkqAQCMqcYHPHBNfmGbeLKpzlTy3Lo1bLSrUmrO6WrSel3uneyWu5+z5H4FxlGE80xs5VFaVSnTjGOnu+61KLbTXX/M/js8J/wDBMH9ozXb7T473wjeWlvcmO6Pns0cElvKSV3EoMIcEjjOBggDivqGx/wCCPHxWvoLt5EhtHtbf7QYYQV3ygZcrNjv8oCBeeQDzX9dl02kzOJf7Msi8UMcaQJCip8gx0Uc9OOgXv70o7+GOImOyCKr/AL11hGEGeVJxyucDBBA6dya+HxfibxHieWUY0KLi1+7hB2teO8nO97JrVadT9Cy/we4Vw9J0/qtSvOcovnrOE5RTt7sU4RjBJ7aN33T1t/J7on/BIj4jppbS31hGLksWEhk8y7AJAVWTYPlQDOCfmz7DO7p//BJTxVaalbi/R2ggVbiXL/u5mPKq42/KufvAk47g5r+rO3utOeKadbKNGmkCtIUBLN/sg428dMe+aaPDCaheboLeNEaMFFZBtcsOQ3U4HYEHpwcgV5FfxG4kdR/wIRnG0nBTbV1HVe89Xr2+R9fgvCrhWnGDqYGD9mou81Hm5VyXTfI211t7rs7b7fzJ2v8AwS8sr2Z11cR2yYWIQ25Fujznh2abBEoGAOEGMHPJ49k8M/8ABI3wVqljNcSSQzpaDKrGxWQyr98mXBBH3fl24bP3gBiv3f1r4YaaZwJrVJJ0fzUfOIuckhug49z7dcVtWGhWmj6ZNFH5MNsR85PHzHAIU9evQY5P4Vz/AOtWdVoJxx+IjNyStFuEVflb3k3bl3u9t12+owfAHC1P31lWDkmkrSoxcpKyaWqd1ddb9+zPxf8ADH/BMj4ZWBsrO70aFpYpFkmnLBhlCNsbgpwDk5wSCeozg17PcfsA/CaV5seC9OkulgEQlWBSj4GBL9zDOQOQCMN3JJr9QNP0q0t7qK6kj84SDzDG5+WT0xgHJFaEk8N3fSweRDaxEYQBcBMDk7gM5P8APA7CplnOaOcZfXKzsldqctXdaP3tmv6s2fQ0+HMhowhTo5RglFcvM3Qp6bXu3G7S5dns9kmfmh4N/Yd+G2itHaSeFtJisGlWSeZ4EDxOrEgHKsQQfcgD1Fe9j9lP4VrqUWpR+HdKu5LOGONVks4/mKA/vVfaAWOBj5cADvivtay8FW9zYzXzSq0MYV5oS2DKozh1POfoO/c9lmbQrSBLMhEM4EayBcuhJ+/7HpyMZ+nVzzfHVXzfW6vM9GnJt68vVttJ2tLXt026vqGWU3CNHAUWqco3UaUGo25eWyceW0X67aHhvhz4b/D23heyTQtJtrplEJLQRbgUA2uRtHbp35zj03YfD/hLQZvIj0uxj8kEPPHEiiVGwGycEHPBIIyMgAnNa/iLw9pVldLqUN3K8r7SNp4kcZyuBwAOOenPfNcY15PdzPbqySRAMhiLDcrDgDPJ+bPv0xg1zLEzjJ88+f2lm1KV7NpbXa0baSv/AME93D4KjXip0koRSSlJQSs1a6aVk1ZJJfkjp9U02zu4HuLGJJrBIlf7PGmHX++ygc7c44x6151qsV/JGlvb2vkorlkYnJePIwTx8uMEAYOefSuqbWr3TNKMMELRuE2sz/LsGTwvB3enT0zis7Tp9SuPInkt5NjMQzuCTszkFhg7VbJIPchjj19GOJdOnBqm+VLRpXSd46tW3a2u9Lb9XpDATU/acsJcsrxaik5L3XHts077/JFXQL+50y7RtRtjJbqCJAHI4foy8YyNuTzz6ZFb0msG8vGjZ2SBgVUsoAjUAYBcnnrgADjHJ6VB5ktzdSWrxo4ZiGIUIqk84Bx90/TPrgc11yeDCtrZX/lSHzA2Y1yyEHsU/iVRjBzwDwK5oVudym3Je8rRSennpd28jvcaUXB1YOnzrlS5k7tqNmrrRrouvVW20LDw/ZXtt9ovYiYREVEkQO9nXncMbiy88Docjt06HQdCMUri1h2ROgLKUy2VyFLkjhj69sd88df4H0azjWKK4Li2WI4Qg/fOOFz1Geox2xX0z8O/BFhqV4tqbUywXkgjaXZuSMj7rqB/Eu7oSMHJOBXv5bl9XGypUqMZSlWaitLtKXLvb19e76Hx2dZ5Qy2GK9rKbp0k6l7e6opK972T2e33Wevzlofwz8S65qMW2Am0nZo4SoyUI4K9PmLE/MOMYHXIr7Z+BHwE1W0kuU13T9qPE4hlnhLpGsmNjMTjY+AdrDJHPDACvrr4e/BjQPCS2OqXEcNxAGWRFkAcNIcEyMCOhJAPAx8uScV9KXN34ettGup4jZwbouQjIGAAGCwHIHGT6c+pr9xyHgOjl0YYnHSi5uClyO1uS0Xd33e93pbu9j+eeKPFetilPL8rwrlTqShT9vCLShLnjtbW9t3tfTqjwjwN8AtN0Ux38kVuypOsqeWcyEk7ixbOTg4wMjuD0r6Avtf0DQIBZzz29lIsSgmQhVIAGPvYySCcjrxnjt4FffHrw74Pku7TUbuJIkH7jeR5bHnBySMkkDHAx9Ovwd8cf2iV8TanM+nX6fY4SI7eJD5YMhyDJuDkyAcEcACvRzXiPKskw6p0HTnUirKnFR921lZ9dE9bX1tqj43LuFuJuMsxg8yhiKeC5eaNeUZqFny2STupNJv8ddj6I+PHxL0O4vPs0d5bXdrE64CyBf3h3cBucgEZ6YHT0z+dfjbxTYzaq8cUiyorGaSRWyGA+6pYYCqOmBwe9eY+KPGGoXL3V9cX0lwUYlFVy5ZDwzj5vlK8YyDnpmvFp/Et491dyyO/2E5VHJOZMgnHPJDHOc4zjHXivwviDi+WOq1UoRppu6V7720smtd36a7o/o/g7gSlkWEo0KdSpUjSt8VopuTi29evW/l0sd74j8WIIriaNZFM0+3zUH7oqCAQp6KwyBk54B74FcnD4qaNCh1BIreMB1QEb1YZ3OMEFsk88jt9a8p1PxpExksZpSLUszRY6pIckZJGRnkevfI7+Z6x4htfs0skNyybCPMTzT5jygn51PaMgrxg54GQK+DxGInV1f2mmnbRWabTSv8Adsu5+rYbK1yxhKCa0tpdacvV6X0fl+J6F4x8WtqEssLXIUSOUSRjlWK528EnnOcnA645FfPutXghNxctdqJAcXABBEiDIMiqTxwOnAPHoK4/xJ4qlLSrHI2/B+ziY4DFsZcd25Ay3HOOB38n1zxTJFaXDyTF7hWTMMblw6ZO6RQf4FB6ZJHXk4FYU4STTbavqmnorW3u20m2/W3Q92jhYUYRUXyxSvZaLpq7rrtv2LvizxTbSFlicyPwY5ifmRcH5CTn5jghRjK9Dnmvh79o21h1rwH4kgeQx2t1ZXE0cWzIklVT5gLbhsySvmjBLDaMCvYvFPiCOUwSIu+GJTst/wCM3L4MchwflWPDHaSQfWvm34r6pLPoWqiWZbpWjeY2+75IIijCVMAf6xgFySecDj097Kqs4Y/Dyi2pe0gkk7Ne9BqWivfprf8AI+T4yoUq2TY2DtOMqNW6dtbwatdaq/VK2l9r2X88Ou2jWmtarZoiKlvezIpVtxVVc4Bcgdc8n6ccCs472G3G8RgY29FTnIxkZX1Pb2612HxBKxeKNalghVIpryYrGAAEbcf4ee/Q5HB6cVxyNIQS65zzIFPHPOOMdOB6DPbNf1dl85VMLh6s95Uqbur6Pljdva/bm31P8j88oqjnOZUoRjy08dWjFR1SSnoktG91v2d+rb42CDcQuMEFCSASehUDJ3DnvxnryKgEkgYlyGUfdDdT9QRyBnrx16cCppFJQlQQhAb5hgj1JAJwD26+1RiMtyNo3EFZHI2hR1C8Egk9PUDGOa7VFa62vo1bZprr1+/t5I8ttLZK7dntdXtr39dU9LDXJK4VQd5ydgyvT72OMAgdfpx3p6qfLIwWCsDkEHcD0OOvY/TPPpUYXbMWMgHHO47QCOy5BUqT2P0yMGnodokBPXPzY3A56bTkYA6Dj165Fc7jb3U2+r0a3tut9PS/Uhr1drWst9lqr9NLq22rTHyylcBREHbgqFwNoHRuc575z+pyYWYttfdkFRjKkKSc57/NjqM4PtmmGMKUIUEgjcT9zLDjDdlPfIGO+OadEAHAc5jXkrk/kMYG1fp6fWr+Ha70utNbO2ifb9LByq19Lq7u7eSto9Gr6XSvs9hGRxukZyAcAAgZOeAFUZ2+/qO1IseBjaQqO4LFsDHoF4wepzz74GMWdqYkyCxOQjKc5bsCc8kY5PTPamKFBVpBkgHqMhiMYB45J5yfy5xWtlK9trWXXXRtavvdfN6iU2tVr00sn079Uu/npqMDgsQxARV4A5Dg/dyMnIHOSfUdSaccvjAQlQQAQMqV6c5yobr17D0FPIzkgAEgANjgNnpn0G38R0G2o0RUfL8E/wAJ6N1x07H6AH+d220Witt0Vl8n1S167dGpXWzW1tVe6tf7KadtL/8ADKGFAuOEwxIPzd/73TgD8M+1XRhUy27llUFCCOT/AA8DHTnt0qMKuwjb+8JBU9AM847jjt03fmKQF8hf4sZUZOTu4Jzz/XqemSK5p09ebRPTV2Wl9Xvq3f8ANX0KjFzlZavS602e277db3vp3ZIWQhgSRzlRyTxtIxjqOvHA4pQdzbgQxOM8fMTnDYXpuGOxzyBnIzTEULjOFKYA7j5uB6ZJ6Hn045NClo5AQcA4bGd2DnLD2z36jGADzXLVk4wlyp3a1cej0d/Xrve34+jRgoy5dVyu7TVtPdu1a/yV2ux/QD+zR8C9F0j9jzSPipFBZ6tp/iYTte6hauHvfDGpq4V4NSUDdCtw2AmSQPLbnvWH4e0vTbnxfo8tpMLXMUEzFyVBmVzulByP3hyOpyfwr5a/Yj/aV13QPB/xA+CWuarPJ4T1zRbifStLdfMg/tCMBglsCwAkcnPAHQnsa+kPh7aTeJtesmS78uxGE8rAWbzFJG1fmyQhOCBzyMkda/kDxIo4vDZhm0cU3TTnKtGqk3GpSrxXImnvKKTi1srNH7bwbCjVyzCOCjGf1utCbi+aai3Bxd7RtdNN3T006H6YfHW0mf4Fzy6VqYazn8Oiz1YRzDddSNE2xwMHeCVIYjBxx1xX8kXjNp18T69FcnE0OpXMMiE52mOQgHGFLKQeMcnk9Biv65v7J066/Z78R6JexXFzrWjQyXkIiLFptOKsyu0gDAoAvTggE5Ir+U/4ytaXfxJ8VXFsqRK+pSmXagX98rsHAUZyF457n6cdf0cZqlLPMOoufLJyVXlUb8zg6baV21KLlrteK7Hh+J9H2P1Go5Kzk4KDdpy5VFL3dUls7aqz3ueTxKV2tgkqRuPVQG7Adj+fNTMRt9HbB6ZAY5xnHQ4OMZBHGRTmBDBVcFSFLFPvO3OC3TDduOmOM8YZtZcEghWLZJOSx4J2nvjsepya/rnDu6Tlo1r32a+e3TR2Vuun43JuW6W3VN81mrJduq6ke1lXAwTwSVJJOeCSe46dOnHc1NGdo28FkGcnIO7nG3PQjJxwR6CmgrtCD72flI5Jz2I6+vHr6cABQ4BGCRjI43Z6ZPfAx05/Xj0YPms9Nbb7p6bXta3S62012ePMno1fX3X6v02uu+m1nqJDvXLsFyCQQVxuz3HJAI9uPYirSg+WFOdzfxerc4wRjPoPx9KgxHsCkkPkZG7gtzjPrnsO/rjNSNuIJBwBj7p+ZT3OOeeee36Vcm3fpZWV93qtnfpZd/xNI3091J7P3VqtLW6WS69e6sNxkAqDlCRk4GCPvA57jsPelWM4ZwdyuwDDqTnr64HJ5GQaQbiSuMKTndnAYnpn2I49uPoZY5APvKAMZC9sn243f5GD1rGcnFKyUrq9ld9VbZf07anTTpSnotU7XurpbdbeWj0S0uSonCgIBjgZ7N253D374PQHjiUoQTkkE/kPfpgA88duOMUqoSitxhup/HoAc9MdfXI7VIchsEEIVA456egGM54zj2PHGee7ve91JJ26pu3fpfq2l87X9ulQ9lGCjotNe70Su11V7+d+4FAAM/M3G3DZBJPrgDsPpj60FQck/KeCRu5HXnH+cD1600g4bAY7SADjp0xx69cHH404Kxweo6En+WMDgepH44qXJJK77PXV7rrprq/XyvZ9sHJK6WrSeivtbW672/TpdRggKeylsAjsc8jOefrj8+9pH2qCM8Dpzjnpk9CRnsT681D8hGCnXIIzgZHRh3z7j2PGKlVcjBIIUHIBAGM9Tgc+w4z69awlPd8j0aTsvTbv33stHe7O3DqTaamlbW78rW7r53287FqNyGG7AwOSTj6ED6cYx+PStCKY7e7AewycnA+b1PPHcc4rKDKcDqTjnOeOgxj0HGPbvjAvxEAAdsAEdcYzliM847Dvk8iuarNNaxbulrbXdLvo35baLc+iwU5JrVOzV7rW7au1frorX6dtTUilZQCxwQMdsAE9OR1PHbk84GK0I5sKeAWIzkdz2I56deD269qxUIOAW4HOMZyCfQYyc9sdfxq9EdpP1XqMED1Jzxz2+n+9XDKCSur6Oz89tNXfS2i06Ws9D6vBYnVK105Xeq0elv0Vnt0NWMnO5jkkYcqOgwDgZPPp6j276lu4ZVRTlfvByApBBPByPlHJHIOe/esRWGEy2WbP3Tx9PYnsMevvV+Fs4HzY4BIxkDtn0zgY9ea8+tG90tFbS/ZK23ptr33tp9VgayTi48raUUlf3ltpZaLfdpWu9NNeitiMIS+WIHBJ5BzkjsQMdsEcHIyK6mwZ/mZcNggK3cDoQ3bHbt7elcVbyYYblIUH5Tnr2x/F07/n1rrdOlQqUVsMQSwJ44wQTwPmA79sV5GKp2i+t7PV3slb79d++2yR9xlFb31K99Vo3dRa5btt316/i1qdpaz+UIsYJxliTlVHHA469ASDx9K66xvUZFTI2KucEZO/nB3dwOu3+XFeeQTIuwbiWJAIx82PYnPcYyPXFdJZ3aKjKXjThSRkb8c4IP8AEeuf8a8HEUrpuMbvmabS/wAK217uzVtbd2foeXY1xnG7i1puk7axtazVu119/U7u1ZkjR8GQEYJK4O0n7+cnJxxkj3Pvpi4VZHaOd2JVUWJlwsbf3m9j/TGeQK46HUR8p3HAU4QAhecYAHPJx2Bxj35mOpySSYaRVCqcbhkYOMbmBB7E+3TJziuJUJSlonp3XpdrT7tdkfS/W6UIxUajk5TS1k2k5ct+t7a7fiunYTXaRiRHdo5mTcig5DBgfnJHIzg4/pxXNa14hS2s5pTKVmSJljHDK8wHyjPG3Azk9upBzmsO71uNd4YyrLFHgMP9X0OMSYwQe56dAOhNePeJtdk8iZFJBJKEq+SoYnPToW9ec4xg9K9XAYLnnD3bap3fWLcdFa+nmkfMcScRRwGDqtydnGUUlZu6XK2039q6e1nbY4HxVqX2y7udz53MWIIwmWOSBnPGe3AyOcYriolBOQSCDkdt2CcAY44yfc4x1GKsXczTyMxYkl8kE5O3nI6cAYHB54/3hTY0UgAAYByAM84xnHGcjjj3BzX6PhaSpUYRs7tRfZWtZ9NH+f4n8gZ5jauOx9TEyle8mub5p2731e3Xd6jyEOAQS3HI659B7jHQdKcAMkDPTjPDDp9QAcnpz69qGCg4AIOeO+QQeSPT14P68PAXbnA3EA8fKScHIGfw4yMjvkGtpNefT81rfZfP70eRHVLVtPVK/oradd9W0/Pu6NcucA577jxzgEDt6Y7DoSTVuMFWwOvQLjgAgZPGe315xVNDtKtknJ2rnoD7gdfQnvVtGORkcnOW9D2AOfbpz9BxWEr2a627f1v0/C510bJq2qSWvkmtX18u92aEbfXLDHTkfiR0xxx1x3xzOrAYAOAOO+ep56k57AcjPSqSsS24AYIwW6HPsAcdMc1IhZCGBwRkZ6j+vb6598CvPnG+mselv6tql8+/l9Dg5/Alskm+Z2u1bTVr0s9dzTjLHIA6D/64B9QOwIJ6Z7VoQS7cFgq8bQRz7ZC+xPqM5rHEgUAqckj5iTgEknPJ6HOMnH/1pYpGBG8g8nH8QGMkc9SOcnjPrngHllBu91ppZWelvuS6abPS/c+nw2KjT5WpSWqUrapbaWk9Gm/Lb7+siuVVUTduKYwQOTjuP9kehx609tQYbiJFBOdq9QCcj2ww/MHuawY7gDcVkVWIKhewJHzMO/OD+uec1UnuApxvywO7kdfTHHTj0yc9TWUcOnLa7utHa/y07rTv6Ho1sydKhJqXK4x8k9EtfN/g915Wri9kAZcsR1OMZ2nqxJwOcYPToPSsaSdnz8xJJIYDqFPOM+nX6DkE54immLZ+YscH6Z6c8e5z6ZGBVdRuHPUg98gjIxk45Hr3HSvTo0oxi1yxu9W4rXRJWk38r9rW8z4XGY6dao3zSleSb3kldp31eiS007tdmWFk3H5uTjoucHge/IHTHTqB0pyk5GM8A5543Dr1yceuCe2agHykYUAjjGeMfTHA7dOv1qZV24YcEMCy5yBnr26dCT9MnNauy7NJdtNtdH/Xl0OVSk7O1rarXta33/LY0YGZMOzHa3KqGGQT2OeOemO5PtWzbSMpUn5j3GeAp4wnGQRjk/jjmsSIncq8YyTgHGT04/HBPpj8a0rYgsBuIJJVCRx3zg+uOT9Oa4a0Elqk3o20+9ndLf7mut2evg6nM0nK0r8qe6b0aTs+nz9WdtplwzmMrj5SVCjk54wcjoRjrxx7mvUNMuGWKIjDOAN+Tkld3C5xzjknoR74GPINKBRkwDgHBfp0OPXPPf2/AV6bpDOkgjJOXI2k8lDwcrn88EY6e4r53GuGyfqnp89l5vzWump+o8OKryw5o30S5na9nytWd0nt567nsGjOjKrMcEkPgE8gYx052g9u+fbNeqWEKvsaOQhGVd0Z7ydeh/Qg55/LyPQGaOMfvQ0iNgsQNxQkdADjjPBx68V61o8kIkjZpVYZXeudo3gcAHt2559+eT8piYtyck27WV1d30T7d9L326WP2LKVBwheLWyad018Oqet+vk7PZ7+naNDLD5Um/YjKBtc7257BeMFsDnJ5x0616hot7GisjkJGh3SZO50A4AXOPlyckDOMe1eX6ayyL5is6hx8z/e2gYxx3PckYx2Fd5pqRmE3JkUlSoXrzn+9wW49gPyry5zSi7q1kldJOzdtbdLu91profeZfTdopcz2s42d3pot2n1V15dHf2zQNXkVvK80yxpE0kY8sFyqY2AMSoA+Y7hzz69a928Aa01u9oPkkaV5WIbC7nbaRs5/wBYfywMV8n2moIgRDJ5qMUDiN9vlNg4jfA4J7n26Hg11+ka9cWtzAiyBnEissscmxooged4wdwUe4z9Sc+VV5pSlbXmjp5qyd76JX8+68j6P6oq0LLqk25J6tOKu9F+lrdEfrT8LPGSWNmHS4VL8xOIlIAR2JGc5PyS4P3gTnHTGMfRWlfEjULZJ4odTxkgyBH4j3cM8isx3A+gwOOeDX5F+FviG1oygajvdZUieUZCwRkE9M8vnOQCCSeor2zQ/iBIGiuluJzvj2szoSilupK7vmQ4+YdV4+U5ruy7MZYXl3Tg9OW8d+VPTq1bvofJZhwoq9apVnThLnSl70feclytataK13a3T7/09PxZutNhZItWhF3HtRXVixCN18vDAEEZ+X0ySetc94n8fTasYpLu9S6imiVJo43JcYGWkX5sIFzwOSxP4n4lsPFV7Ksly10yxMFl8qVxhkbO94Bzk9ADxjHYddCDWriWQwQXMsME4BhMzMQ5GcAnoo65PYHvzX0Ec+xdRKnKrJptWTk7K1ul1d+e2vZHgLg/D06ntHThzrVyUU+2iaSemuvkesa1r1vqdk9puP7vMSRAkllU/IZAc52jPzDOT0NfL/xa0m1l0ya3IjutyzMxhyqMqrnCKMjzGzgDBzg4Ixz60t6MNINs5iRjNIhwmxBl22hTh1xwcnOT0wRXnHiOeO8t7pl8srJE8lr829QwU4k3HBWQDILYO3OcHPG+HxMqk053UtnJbpNq6V3Z7aPztr0ePyhRwlenGDt7Nq1k9oxTtdenVbeZ/MP+2L4WttH+IV1qNlC0dve+WXdk2tNPl/MLrxvcnA4AwPXmvkUMioyIdz/e+X7p7gke2D/Lpmv03/b68MWsbxa5EmWjuyFYgKqszEPHgDgkgZHOD04NfmJGFEQbdslYErt67fQnoenHT1wM8/pmTVXPC0025RTcbtu90lJXtvp1XV36K/8Anp4kZdLCcU4+HKuWUnUgopKKvZNrZJ30eujV7O5djZAFycuAS27k8456/wAI6D359KKhRgCDgDoTyAVHof7wPcDsRnFFe/Brl1aXZPSysv1v+R+fyhJu+uy+01by3/HrucFY/M6BhlMZwCeSORgjr0+vY1vmNAgwrEnA3NztweADjvnp346Vn6ZbFsBUJyepB2hc5wcjoD0I5HOR3roXtZEjIZTGwwyg/dxyOvZeOAQOhzzXlSqxfrfVLbZa2+e3S2q1M41L2SVk172qTfbp9+l+7MWUB32sFCgZKgFQ2ep6nk499p9cmqksabOVxhSiquDkDpk8bjyewPvWnMApLbWG0lGbGdp55HAwW7dfpjriuzp82fkyehDNg/wjpljxtHc1dNu109mn1s31082uz7pXZV1dJLTVOTadtt7pfLrqMZVTbgAE4IBP3eDkEY+U8HjJHqwzXR6UiySQrkBW3DeeMlgMHpwvB44xjtXKeaox8+8gYOTj8Qw6MPT6ZzzWvbXSK4Ik+RNrkBcuQM/MBuGSDjrjI64wac1Nxdrp6293yXqt/OzvfsjKVTl5dLPRO+1nrrp6Wut1229n0WxhlSKMxKJi+Mr8wZhwuQQMgcgn3Gc12dv4atvPhdbdQOEd3JQeaeEXGDlZcHrgrs5JyK838Na1DHJDGGPmRrugnZdw28b0BJ+8RjrnOO2DXv2hy2twvnyXkbKyQLtkYEMz7s7l/gUEfMMnbkcnNfPYyVSndttbJ3vrtbVLW61tdeZLrOSldWu9G9bJWdov+9Z7X6a7GZa+HvMw8kUbSNIzSJ5eVwMbolwMOwGNjfLk56YJrfXwra/ZJY3VbeM4Ux5IZVb+6MZV4z/B3z1GePTdF0a1kjVFaGKNw7xorhVkDAeXvbB+ZMMQT6+4qXUrBIopI4CsuSynzE2bY05685abPyvjnaTgYzXh/W5qUoqV2nok+1te3q3531MZe6uZxbSXvczV3e0ltvp56Ws+x8o+OPCkc4eO3gDFVMiuFwCR95W/2z2GSeCR2z4XLoEkc2zyZAP4HdNpwpwSVPQg/dBOePavuXUNKOoKsZiceaJXZY2B2lMZZvk/dsuRgfNv57DNeQeJfDwt5JyYQtwGDrlg2Yxk4X5eSw4J4J5OO9e5l+Y8kI0pWbf96/Le2t+ttdnu/MiNSULct3Gb+y3ZJ8t7pu6a2tptr2PnG501rVmdW8xdo5YAZJzkP/dIOfU8c+grxSSc5yqAgsueRt7gds59PTr29A1q3gZJCU2koAsYHzKw4PPPA69+CfoOFmhdN6qpGPlPdz0wSOhHsDk4xk5r6GlUjWj9yd3be1nb1ett/LpvTqPRO/R38tNVpfTZrTW9hGkDAbg23bwTyc/3ic5yDwMde5JBNXbeVVjCbwVUFsEkMcenPbrx/WsKeR1woOBgbtp+VR3GccHtkH9KfBdZKK3zBe4baWUe+BgE4B9gMHk1jVwrd1H1sur01t3td99Nex008RayldtWXNq7Ky+J6dd799NdX2VhdSBTsZWLDhyv3QuMljn7w554Pbns+7mjJLeYSxXJB5G8YGR0wM8gAcA9TkYwrW4kBYK4Ctk4xnb7A8lRjv1HpzkyTTMHIOA235W6AjjIHOfm7H8s1hDDtTTW6jqr2btZNNWu9t18vPtVWTjazabd22mls1Z2vbs7dWilqEm5Hbdu4BPONp6EYH64xwOO1cZcH5hlBjOBgksR6ngdc598HHGa627cGN97bS4AGM7dx+6fc9wOxGD6VzFygALAsSMZJPJUHlyR1A/hHI75Ne1hUoppx0aul3btFW+/a3p3Oasrculk0731d29Fvt016LYohcAddvJyDwM9sc7SQMEd+1IrhiUYbQM/dGMgYwcd/cnr97PTLn24Yo/HGFI56DIK9ME+hPB6dhGVwepDnkZGRtPIUemSD6kdR0rvvzLyat+FvwPPmrt9NGk7PS1t9LXV3d2tfckDZRlXj5gWLAncBk+vXJwACB1608EhS+V34YEkYIH90erAAbcdPTkVGpBEgV0VxyAxxkjPKjHBGcgnAwehpm+QuA+3OAQCQCVwe+ORnPJ6g1Du012Sb77Rte67a7vtfYz5W9VZWu9d2tPJXVut0rr1FTcGyEXBGAzcHJA+XOOvryO3TGaWQEjYUBIBO0DGTxluvTgdDj0PNJhiozkFmyMNlTg4+YYBz3ByAOnJ6ODKr/vAzMF7ZPykfKB059ckYz0ojHmaturXW/4t9ey2W+w1uu67Jtu1rdbNej+++jPk+WKQlNvJZOBluACR0yM4GD7+86eSwkZyrDZtVSCWkboAuBkuD09ME1WIZiMLnDEMCMkADIOR0wM+4/Wnt5myUoAflDDZgFVBxkDByR/Ecj6AVU04bb2fKrPa6u3vp2/FWNIpScdbSvs5Kyva7Xqlou2z1uSYExXA84sjRgFtjRuP7zbWzwMle+PpmBMBkV1UspZsk7VkKkYwueQMnp0x6Hh3zA7kG0DGSvBy3GW44ye/J7UxgQAdpLhjkHn5DjKj0PArFS16Was1ve1knprZa66am0Xy2TWl04tO3LordOl7u1k7krkcyYBRm+ZskjzDwAffI4IA57VWldgQxDPIcI4H3eeAvGOfX05yDzTtgZmQnZE5GVJxiRc4254567ec4HPNaljplzf3kdrZoLmaR0UYU55yNx6/Mvfr1GMVd1BJuyStd20UdHffo36O68jSFKVVxjCPNKcrJbNvRWte99dbK1+mpVs7G71GeG3tI3kuHkCRrGpJznB4HXGcY46HjqK/Qj9mj9iPxF8StYsdT1ixmbTmkR2aXdHBvO3ywxKn5WAI6nGMemfV/wBj/wDZLh8SX1jrWsRRSM0scifaRiNHY5j/AHTA7mOTlMjnB3dMf0cfCD4V+HPAejWNjGtpLbiGMokUSCRCoBYOQSUBJygJY9cd6/N+LuM4ZfGeEwM17RxadSLs76J2s3Z9tfwP3vw88NXipUMfmtKKTUKsKVRqScZOLjaEk02+qd7Pv18v/Zx/Y78FfCvR7S9m8PadFcwss4TylkuGkbG+XOAY+QvljB3gtkjHP6B6MdP0azQWlrHb+UyrDGqDHljAAIwMoBkknpjHOM1zel3FsJgsUBCyOV4YuCIwNiuMALgE4wMHn6nrZLi1VzPJEGWNV8yEAY4zyAOp6Z44Jx65/AsyzWvi6k54qtPESqSu3OTbTsnbV/JdX+X9QYDK6GDo06FChGjTglGEYQhFfYt/DUdnHVpaa2Wh1NxLNeCMxqjSSqpRY/m3MM5OBjkcY44OeOproNI0aa7iEjTCGRfldSfnLADJI9V9T1zk85rldEmkvtt5DiAAYhyAhUDpgc44z1ycdc4rt9KkkhvY2llAjkBztHBfjqeePw9eteG8VGKagrOWt9LK9k9NNO13qtNWe1RoTbl7yul0u3KSt7qeqa8m9TSsdDto9QDzXbSKoyyMSOB3bg89ABxx39dnVZLS1syYII/KlJj3BRuc5wcjoeeCR6HFVbxowzG1k8xyuZFA6bu3qQMNzgEnrjrWSxu5jGJGHlLnMXQgg5JB6ZznjPHY1zV6/MrN62TUtla8U9u/bbRM9HC4dTlGpOTSi0nCScW7OLSsu7dra6dNWyi6lrVhCGUgh4yi8KR6jsw56H/6/TaXcXljHFczZeJ0EYDHO0nHPHc8denII4zWWmpRCFrNSqMxyG43Dgk59Bz3/LAAqF7id4fKjkJKsGMWPlYA8lTkDnr19OTXLCqpu0rJJ6t7XWt36Wte1kvx9mFKNmnFQV7Wtq1ZWWre9/wW50kt01/dNDIQqEcYGDzyuOTz1+v1pLjwXe31qfLnLAKCYmbAIXkfL0PU8jGCe/ejo1oJLpbq8LpHG6suSQpOBz3yAOp75xXsF1c2llp0UkDq67VJlUjjOOGPQ7fTqcHvXq4alFpyjUjFKzdmtHeNrp2v62122ZU8TPCypU8NGU22o26KzStd7aafPrqeVWlh9ngS2vVNuYX2rI5wSB1wxx97r/TgYwNbhj80vZShp8bAd3yAd8jucY+bPGPpXbeK7m41O1h+wxxykAZaIc7Wzy23kH3PHWua0vwpdyQtd3RY7MnYuWIPOSx9u2Pb6Vu4uFuWTm99EtVpo7dO3R+Wp6NCvDk9tWlGjJ3i6Wkmm+W900ktdPu1vZGTBqet29ksDXEixZCkKCSVz8y54zkY/XnqKyLnVrZZczzBXCsY0kGG3AHHqCCemfetvVNVsdHbYUEsQBDNIPnRlHOTnAGfUccd815Brl619em6skbYchCxwqkYxg/mB7g84Oa4ajcG2m072lsnpbdXVtbW6W03evuYPDqtO8qUKcZLmjNKyk5cr11au73ve/RK5q3F5qGotPH53lphzB5hKrIB0VWJIBfOec8L9a84tF1SDVhIxk8tJWEcpyFaTjaOM7l/AA456cdFNeX0VtAsgHmF1DMH3bUz/eC4DDndnhePrXZW3he8ureG4twpil5yZAwG0ZYRjGSGJxkkdOfQqjSrVrz5ZJxd01d30VtHu3fffRfL2oTpYKm4SVJU694xdrpzVuvRtWuvXbd07TUZHaSDUoRJJvRlQjOxMZYjpuC7hxkE888V7ppMuhmwsokgia1lKfaCVGQwGApGOACxwn15PSvn27s7vTL9WvZRiXaquRgQAdQDydw6kDoDnnt7H8HY08a+JYPC8BjFx5xIk5jtcxFfnMh3BiQxJbbgEAYOc16+Ax1StWp4O/NOpOMIXtpfl0fa+zT0trbc8vM6dKGClj5zdOjQi6spU5SUVFKLcu9ktd36anU+LvCGnaXDb6vparHDPEjyIJM+a7csyxgZQJj5vmOdwwDWx4Ktp/EJi06I+aXbyY4OERBwBufBJ3nlQF/hIPavu3X/ANlQ3HgR9RF4JbtLRpLWJQQJwqZEsb87SedowQ2Dk8Cvz28Na/8A8K18dnRvEY+xNA82yS44iKq6iMrkLvd+dhx1B6V9fjMkngJ0ZYimqUKqhz8rfLq48z10Tav/AIe3b4/KOIcJxDl+OWWYmOMxWAcmk7KppZR5VZzlyNJNrbrtY+kf+FZ6xpy2NxHbmMbll78RDrtXA3Lg8EnnAwK+nPgvYGy1K0+0SQAZDIsgACM2A+5SeBwOvTnBPbkrL4peG9c0CJEMAkgEahE2tIqgYRyw5YPyTwDjHFeKfED4x3XhDXNP1LQhEfLeFZtp3qFzkmSIEBioGGbIxkcen1WCx+WcP1MPjKVRV4fupTju43a0stFZ9n5eZ+b5lRzziCFfL6uF+rV5qrBOUdGnHRt/FZu23oftAsulxeFZ8zQrPHbERhjgBtpPyZ6BsDaOPQ8c1+dfjr4q69pGr6nZWt4TbRu4aANlSi5+YktgDgH8OnOawZ/2qLHxX4TWza6FjeG1jWQW+bUrKqEMctuBznkZGBXwj8Q/i5cw3b+XcJL9p3x5kOSiISA6nOS75JbBGex45+g4k8SMNWw1NYKUoy5ORz5rXVorlUVqrN23fR7HyXAPhtjsLj8YsxoxrRqT92FaHMqdpxblGT117qzWmpu/FL4u3usyXT38rssTssIDbFUgkMwIJLAHAPAAPTOTXyjr/wATY9PghWdVkWWZwzpIT5hBG4xjJKYyMqSQc8dzXDeNvF13fQ3Zhu/nljlYfOSmU5MYBxndnr1PUkYxXzReeJHv441lVxsnMSgOckHO5lIxtLEAng8YHqa/GcdndbHzlOU5e89fe3va+l9LdWtVbtY/qfJeGMPh8NRg6UIQjtCEWnoo2Sf5u1vU+n2+KVlHLK8d06wSjdLHLyEZh8wXPDE+g4FcvN8T7IS3sQIkWWNljjMWU344dRnrk5HIC9+TmvknW/E62bmSG4YLBIUjc8bZlGASM4yTweSGxwRXM3Hjm8t4GuJbxWluPnKIoldFAwwGCNuMg4wST7DFeJUhzyTc7b2Vlu7f8HtrbfY+kjklGMLwjL4bJXtZrlSd2m9tH1ffv7R4j8ayyXpJ+aTkhX+RVi6lh/eIBAU4yOcZwK8w1nxdDJbTsb8wO5PloHOXcE5IXoF6AZz6jGBnynxH4084mZLiSeeSNSpYEYDD5lP93GBxnPOODXk2q69NIiLv8gFy2ScsGPVevVT2PqDzkVrSpJ2Wis7JtX7W33ej06ab3bOpUadOnFNKMkraO60t0+56K/VvU9R13xg7JFiYm4jywkmfO2MEblVcZw3G05OfmwK831fxpGA8lsGhlVArSAb/ADZWBAIz1UcnHQ7ucY5861XxLLd3CRByjiPYzHkShfvHH3dw42gevU1xGpaqFWS3uLgRQMqyI6cMpweQwzhvVccDHPNdHI+mrStZrZ62eysn6aK2tzza9dQurptJ3ael3Z8unZrz1fR3NbWNZvjtKvsU7vtJJwW5z5gznbtPYZ4PUda8h+ImvpFoOqwxvK8gtJYDO5xDK7p+5dAc4YfOGXJAGPmyar6v4gkdw7TCZmdUjUSku8S8SMVAw3l4AYFh94deK8q+KevxjwpfmWUIotnKkgGUMo6IQRjbu5JB9AMZz6+U4d1MZhkvi5o2cYu61gtW0t3d672PzDjXN4UsqzC81aGGqu6ve6pu6bvdaaPs9Vsz8ofGwlfxNq7OUybyYYJyoyx5LjHA5AOO3Q1yqBo+WORzwoB4J4Pv7Htz65OrrtyJ9UupCzF5ZW3ktnfhjg+hHTPfI5PGKzMMCMFSCASc8ng9eew68dscHp/VWXRnSwOFi7v9zTT0Ss1GKt530fTXs3c/yszerHEZxmNaOiqYqtNXd3fnfkk9Xo77a+Y8ksGOWJ24Kj+62MA9iOm7nn3p8Zba2djgKQcrkKO+0gjaenPqPaiNSwB+Qn5zgEjPI2rjGcjnnv7VGCQDjCk5GB90Ecbfoceo6Vu5KKWmt7p+i218/wAdkePNaySWj63urNJWVturvqv0dIgIUkkqV+VSSCMZG/8A2h147/hUSBcqmEx93Jxu2989cH06fzzO5ByCNpGACBwR2wOpyc5OfXj1TYu1WwxbqpUgAqMbiRgY5xg5PI4xzXPKV7X2vZeunm3uvLrfoZx5uVXu3ZJbdlZ267u+vrtohjQnPIXABz8xZlySA3tngYOe5qJ0QNlAfmG4A5JGcggj19uO/wBalAcvwpXPzIG5yOcEegHf8u/EqLltpb51HZsHP1xkgHgjGQT7mtabvp87vW17WTWtuun5mkYSsvtejV1tp1T9dX95UJIO4/KeOADxzxgdQfXnrjgjkPZG2BhzgBtwwxUd84+77cHHJ9qumNSzFsqoH3nwTvByCp4zn2xjg4NM2hASHJU8FRyCD65A4OBgex6cVvGSS9Xpbrt8l00uaKnJ2933usbpNtpWt0dlv1KCgFwpJVWwxIOQc989hz1x/TEpSNR0YhT8r43ZHfnnaT/PGeop7rg4O7oCM9Fzk/gB3+ucd6RFy5UEYIOD1x3xzkY5OT29OM1pdXWuj1b1022v2vurr5sfs2pbW5bXb2T02st7PV21vrqiNfv7s/IAcAjB3DjB7HvwevpTk8wHcCu0AhsgMuT1A9Occ547DNKwwreg5B5wMdSeep6YIPYA8VEDtyxUlTjJwcBm/wDQT+ft0rOok1ZWbj131bjZa6907W7Pax0U+VSTSfwxi4rR2une7W60a79UTMMryqkHcVZcDDdn6kN1Oc4I5565hYjJb5SBjOPvA+nbnpntyfWgtkkHIUkjnI465Ixz35znpn3aSgORnHBz2yRwCM4BJHA659TmuPkk0+VWaau91d2Ts/O7XkdcG5NSs72Sbd9U3da2089FovOx6r8Idam0nxnpNxEVjxOYwSvAVwATyRg9skkHHXGa/Tr4a315ofiXTNVMzCG31CCWSCNyyNbzsuJSoGHj6mTt05zX5G6BMF1bS3SQxbb+FTKpKumWHzD3BCg9M5ziv1y+EviL+zYNKm1KzS7BvLKxl8xQ7yeU67ChI4Rwx3cHBHfiv508aaCoRp16dKNWVfDzoyhpeSjrfrfd6Wbu/M/UOAas41KtOcvcjONWCbtFStC7S033fTT1P3B8C+ZPDHonlxPb+OtNfS1tpVBgVbu3byiytt8uJ8N8y7hkcY4r+Tz9qTwQ3w/+O3xL8NGNbYad4pvoUtVBZYlaUsI0Y87Mk7SAM8/j/YdePpGv/wDCm9Z8PWkennRtE02fU1t2AdJGUIAyr/rnALYztPQYr+fX/gst8B734W/tHzeLba0vDofxF0nSfFNvqNxbvEjXGoRykwGTDIrMUOxWYMME4BOK/Kvo9Z1QocQ1sPUqqhLM6FakqE3Zzr4apFqMVKy5lG9ratdEel4lqnicuw1SajGtGtCUJSi25PSLinZ2sle3W2jd7H47g8gFSxB5xwSOSBnpgj26+hxlzqdqkA7jkAE8AkcrjkA4IPU5x0pnKkhgVIPzfU9iOeOe3Tg8VIpyfnyV6gcYO3pzz159AenXr/cVBrlVunmmm7rrvdvpc/EXTTV2kpdeVtJrTTZp7dUr3YxVZCQEUEkY53HJ5/I4HPbNSAKVkZtzSqQxIICAY5GOQcYHHHvzxQdp3sMgNyGbJ6+vbPOOnbjpmmKMA7+p468tz3PIPfp7V2xUVazV9Hv1089HrrbbuZ+wjpto9+rs1a99W7rb1GgBlO3G1mJDE/NkY4OfTqP54wRcUIQFZjvUHJwSrYHf5unTPzd/zgXaVwQSdwwOmCO+PTHcH1xnFTsSckHqMN1G3OMnvyQAeTz04rSb2s9bW72ejutXbqtd/wA940XZXTWqcXrqvd6qztrfRd/O6hFbO1mJGdyn7uRkdPTn6DpU8cRAJxkZUBgc8c57Y6Dd/P0MS4bAA6g84+904B54x1xycZ6g1bjYcxhAoVeT6t6+mAfwHqO/K3JN3astdGraW321VtraeZ6dCnFaRtbRNq+rVutvLtbvuPAGOBhgRgLjHY89Qc85JJxxjAFSbiB/DjuT1BzyBzn8OfbimIFUBiSOQvTIxzgjnr2Pr/JD8x4I2BeeCBuH3iBz6881k027tNpvS3TSPXa/k/8AgnoQT2d219mXRO2u1ut9PvY85Y5JIU53L3J9cHjgDgD3I5PEYIK425GM4GeW9cdR349eD6hxKnCgjp8uDjOf73HTIyT9MUg2jsAQc8dxj644Oe/68U+VdXd+fTZdHfX/ACNlLlVoxW61vr08u/d3t3I9pwCV3HtnnPJ4z6j/APVUiK3HB4IGOOhHQ+uPTHPUc5NOBzycEkdTzjOOSOgxj5uPTikHQgMCD1HbOcA9O3Hrg+mc0pRXLo7rW/XR2vb0+XXU2pVGmk0+mza22vZN/l5oeoy23aD8xAwB0H0J5x36ZxmrMZAIOOCeOeQPfkjsM5J9DjmqytjGeBj0OT1BPuDz169xirCsuONp6gYzx6HH8Rx17ZyeMCuScFdOz6easnrpdb37+mrsvYwsndW0vtK/TTy6K+jfqW4pMZOABnqCOv449Ofx7A1fjckZ5BI4J6Z6nnp379zxxmsgMQNuRnqAOCx/P0z3/TgXEkyqqWI24J2+nocA8Y69z6muSqknotr/AIWWt76J9La3ep9Jg52srLTaz1smv1+710NSJypBYbienOM89MjH8uRj3rYt2YKBnjltuRjqMdhnb29eM1gROp2ckKT948ZPHAOeMHr053HnoNKCZVByTk8Dv0GQV469snA9q8+sk9Wtdtt/PpbTXTZep9TgXdx1STs77tvTt56738lqdDCVBjZj3PDcKfp9f/r49OhtpkbJDCNhz2zjuRgc8fh65rkBIDsIIIK9OvzYGQAT3xknsPStC2ukRVZsjHAwOgHoMnd1789BnjNeXWhzWtrZ6rTfTRb7912vZs+xy+tKnOLbsnZ7NqS0Wy6233fnc7GO4MbBgxZiBweyn5SRxn64PB6e+jBdFSjKQDu5YHueMdefTPv9K5YXiBU2kEhdxbOTt9DwMdvzHfirCXhA3DBGcg9ywxwOwHTB7ge3HC6PNpy3d7Xbeq+Wltf16nv0sxnCUZKo0lZ2V1ba9lr3XW+ne1+9W8yEBbn5dpQ/KD/Fk9BjuD15pZL9YWViw4LbmJ+Q9OQvGSeeCffJ78RHfuGYs4UbSVwc5Pcc9ff7oHUAmob3UUdXJnBCIRtALbjjkjBxx347cGpp4TlnZpNPR2V73tdJ2ldu9u2jPQlnzp0ZTU3Fxs1zSsm4pNbvW2uy/BGvq+uII58SONysMYzl8cqBnkc884+pznx3Wb0yzbRJv3BQ45Izk8HnPHpjuepPF3U9ULptLYIYqrDG5Rz7d8kgnPcelcfMxMhc8rkfe+96Hp9fxHSvo8uy/wBklLlas4a9Xtu7LS2u769T8a4w4qqY6TowndNtv3rO6fk722W2v5OA3sfmJz95ccZGM7h+hGRnoDnmrAVd21cLjBHPLE8DAxxnpgg9OTjpWRjkhRk5BH8R46s2Oh4/AYI9pgTkn+IYIbqTn+72we+M9MjrXtrR8v8ALpbyXbbT0trofmsqjnZu/STT6uyu2vN+fr0HkEHODnoDnJA9wT+vTnrxTgCyqO5GGyOR1zj0H5+pzikUknnAHOcDnPHXP8v1Bp3b5c59++c8ep/MdsgZ5zktNW001a1lfRXez/4FkvWqbUrqWyW9+r0XS92n/XWQbe4wVwRxwFz256gEn1yamGDtYfc5BJHOfXGDwOP8nmr8wPUYGcqT1x2/X09etTpI+CuQFAJGcdR6ex9Rn07Gs3/Vm1267r/h7XOmjJJq91sl6u1r26tp7aX1eupdGDgHJXlhg8Y64PUkg9BgdQak+VQPvHGF7kH2+uO+Dz0qvGSANzZwDnGcHPA2kA8/lg8gZIqZWYg5Hy59uM4xn68Y7/nXJNO+iv1ad73sr6769u219D3KE0oxd3daNJO+6/FJapPv5onx8pLfdIyoBxjpnIxtz0+U5B/E0m8pwAep255IGOc9Bg9F6/7xxUIcNlSexGQB2PJJzyOTngc4PXgsLkEYywxk46cdznGDz/8AXIqIwcrXStorWtfb8O23X1Ot4uSScm1bolZ393W1/np5XLyy/KWDgE4JA5YDpuGOCD2HfHSq8sxbAJ5IxnGARzgHn0GCc4PX3qoJGK/7OSCepzkf5478+gpWIYLnIGMjJGe/zYBzxjoDgevprToWknZaNOy1VtLJPVvvZ9bO+pzVszlUj7Nysu7V3JaWTs0r316p2+964JAYAYyWJJ5PPA3dDz6Y49qXauSVBAycE5+8e2fRcgj65OM4Mfy5yCR6nPAxwW9McfL36njHMyqcKcnaB+JzkE4GOCDwelbcttrLy11/He+21r33scEajnpd8ru7NJa+dvu7aIeo2sS+N2OGJ7dguQAcc8YHI/Gpxkg4ypxlhjhm45J5yO5OB2xUGWAwMDnqffuMd+cDse/XNWI8ggthiOoGeQf9noccjtkE59sXJa9LWdrPayu/v+7rsawd2079HffRW8vwv6W3J4MhQxAJ3EE45Ocdzjjt2xzjmtiBNwDEkAAHAALA56kdSSeCMHd+GKy4+wG4EYYgfdHv+v6+hrXtnYBWTB2gHj16YwepOTnpzjnFceIT5Hs3eystVayVn83vZux7uXQvNStfRJqys7OP36dr31VrJnU6WI28sM7MQwkYY+8COQRxgjHGcdcZBr0rTVRgHK4J/hyS23j5snHJ6nHsBzXmlhNKWjVSihuq4G7d0yAMcemPyr0HTJZNyglUKMFDdQ4OM4BHJ9ec5HXnFfKYxNycnpfpu1blu5W899NOl1ov13IVTjGCi2lyp69/d102SXrvsenaFcIGRnjZUwUTbzu6AhgQSTx1PB/CvS9OuULKqxFgrDcMcc9Nxzww5zjJ5GO1eaaOw4G0sSQFOMeg5985IP4cEYb0CwMkbHylUDAcE98clV59+B2z1rw6rXM9L2829mmuu+m3mn2t+p5X7RqEeaLTcWny9NLa3vf1Vmvnf13SbhwoMbbY3zuUckEY4x7ZPBx713+n6pIHEarGiui8gDzFODk/TGTt7fnXk2nXbrbglgHdQzkAfuxjpk8Ajq3sQfaulsrvYoc4LjGJF6Mvsc9eOn+FeHikpN9Gk7JvVu6fptrfzdn0P0fATcFBO0bRjeV9HbTTZ3e99Vo9bnqdrdQRuGIZQ8f3Tn52A4dH5wWzjG38Tk437a8jEsDDMSzoVlJYkFlAwhIHbPBxzu9hnymHU3SSVv3oO0JHCxPyM3dgRjpzg+vWt2x1G4y2ZAIuThV3uo7F1yCFz15I7da4o09Ek/wvv6b+q3t3R9BRrwlZc/vX0cd0tNb+bfe3oj2qz1aO3kaOJ0WDzMNIylVllUDaCOSrjkbgTnJ969I0bxXNCYt8y7vmIjR8EBCAkQ46HPIPqMHpXz9pt7Nc7HhmwixELHgNlk4DEYBL9ePfvgCu60Z5GeORlMY3r80gwXyRuZSD1IxgjqcZBHXiqRacbOS3dr7u8dO1vJN2W57cY05w96PMnFWbs5O6V2mm7qVr6LTo1rb6tsvEpmWAW87KSq7gYyViLYLAjd8pyBg8+vHQ9vY+IJUmCSxpKkSL5gTqzYbZJu6ZBJLdz3xxXgGl6k1tdxwtLEFlCtGwJUyqo4Y9cs2cMv8AEQOR0r0rSp7czRzfaGMKshnjZcDnIY9R8vHB7fz7sO5OyveWy2ur7228n0/E8TEUqcNoK3K2klq3suZ2vez0flrY9c0jVJPnD3W2KVJAzKvD7sERuDnbI3QjPOOo5Fc14ruZZQbdVTCKXfY211jbO8xgDcJCANnI7nBqRNRtlidPkSDcrLKeFY8lDMepLclcYwR0JrmvEOpxwxySLcI/2mA7RJggOAdkm/OFXBIAxwcnJ617uGk4OPM9NL3e6drO3dvRLu3ounzuZJOhVk4pNx9260TvFW08tNV2vY/J39vDT7S98M3MwLMIpE8uUjBkclgyqCThs9Wx8x5wK/HiAqwaLBMgAAYjIAHdhnB7dMZ/DNfs9+2VbC78B6iGjQSvcvNbkfvI1jU5bkYKyMCefX6V+LaKElkSLKDeSp6lSDwSc8ZOeMdTz15/VOG2qmFbfNbmVrO6V1Gz6669bb6I/wA8vGjDxo8TKUYtRqQlPmXxL3veTTVmk3pv0tsXGChxgFcgAjPG4dT7A5GOfXJxRTdrrIN/zuwX5s8qAD8xPpjj8B1PNFfScrWiTdtO+y9F6/M/HeRLvrrqo3/J/mQ6FEJHGXVlUrkHgBhk4HXOeecD0JrrL6zjMJcxlmYKylu+AS2PbB449+9cz4dAU78KNoyzEYUeh752/hiunv52MKgOBxk55Cg9McEZ/HPtnJr5rnbnorK669m77WW2/foeTQjFuLd3ZuLts9rN7ap9tbXVjhL+BI5WwX27VYgk8gZ5I9Ofu9ccciuZuG8vIUHuQWH3W6AjsCBjn8eldfcHL4POVfJzkkjkHp7HcP8ADNcrqMbOCVGQBu+cgYPJx69s/TPHNepQknZN9VG7sr7XSWz7ro1bzZ2OmlBvRSdkrbNOzt56W07edjni7DgYyTyT9wscnOO/6epIpUmZGY7uFIO0DGQfTnkDAwOuDVeaOQSFjGQvBBJzknIAx2AHOOAc0IJMFQgJQjDYznHbIPIGeuM8nkjNepyxsr+9zWa7W0s9Gr7q/wDw5wzej1XmrJt3a162tptt95rwatPbujJK37uQhRz8uCCdgGMFuhJyOM44r0/w541e2khZpQQWQi3ZisgUcExDLcZwQODwTnGK8ZaNN7HO0HGAwIV36ArnHAIOeM1etJPLRyzoHBDnuwXqSO42/wAI7g9OorhxeFp1IONkryXRdk97W+VvLZkKMeW+rs9to3su7tdabf8AAPvvwv4+E0NmizKJjsj3keZ8+OYlG5cOwwC38Q9MYPqH9tLe+XIziWVoY5JYWYLHHHDu3GMDILDfwuRyepr869G8RXFo67JAsBLMjhiCoOCdpOSGJ4+pJr23QfHh+xmKS4kVkQRvsk3FkH3QSQMgZIx15znvXxWLyupTm5QhzJz1sukno3Ja2XVLR9fPFzs4xfvNr3tW2rWtr52ffe12j6XnuoEVpGZbWF4yQ0h3Ttu6FRxnzCNpHBGOcnJryHxTqsLtcjYu9gGRkwwV1ztLHgjbk5HRSfmPNUbrxUJ7dDHMxiWJdquufLHP7tSThiePnwMenc+d6vqrTxM5EcaPuOCcyFD95lXIZGXACDJzk4wBWWFwk+ePNzJRfKm1tK8X1ere2+ybMnPnVle1k001fXe/47dHrukcVrUoEkhJ+VtxT5cZOTuwd3QcfT1rn8AxNtXcdu8tkkhgDjcADtYZIApuqX7O6jA2qzgnf8xUYwpGARjoT9D71mpe5RlRtpIJZecu3YjHoOp9a+xwsKns4XVmrd1orW007ddOpcbqCVn021aWl0+2mq73My9GJSxGVfJAxk5Gck88DnJ9MVXRwBtHJIwuOST17npnr37/AEW5dpJCzMBjjBAxt7nHXHI3N9D9Ki8PyQfnxuB4GRwo+nY16saacYvZ72ettb6bf8E64SbSd9VZa6bW5U76X2vv0udFYOdm92BYHBTGQcnnJ3Z47fh36XJnGSCxGMDCjOO2Qc53DjHJHfrWVYSAfLgk8nJA+Y55+vXrx7VcZ4wxCnkjBHJzg9R+PHOOwPNccoWqS6WsttbaO2vZ/id1KalG90k38Nnvpquy6aXe+3SjctIQ43ZAYY3DGB2P8+hxntWLPkFiuGboyk8AsMZ5Ax7EZAz09du4CEP1yRyCB8pAILH1I4wOw96xHEbBicb8HBY43EdCxI5xk4HXHSuulJXT1drJ2X+Ffcuu2w56vV7R0ve3n18uz0voigj7Rl2UMpbKjGeOBtB6t1yeO3NDSIGYgktgHJXIJx168nOBnABHXsKT5cckMRkMAMFsE4HcEc9eOuOaYcNy3yKw3KFGXBHQn06/N0J9etdaau3p036arZaaLe7s9ttTkqRvZ91bTS97PTrr8999iJCynLBSNxDNkFuvABHYkn8vpicHaqRkAtu5K4JYdcjvjrkYGOtRYLERkZZvut9OdzdePbp6mp0R13sFydnOf7vfJOcE+gx9fVpXtvZ2unbW1td393lrfZ5ta7J7O219rP0V30189GEb7Wy2GCk5CnBx2De/cn26VIWOWdNzBhywUHnHKnPfpyOM9M9qpA3AR5I68joWySPXvwfUgipwkhV8sVbaNoztAx26E57855x060K0bK2/XvovN9fJbmcoJNO6u7R2tu10Xn6W8yErICDwmchweGzngDknnvxjjFTIVjiQBVZ0fc7Kw+cMR8oPBAA4PYHB9qlMI8sysOSAQWwW5zncSB8pA4HPftzTQqMMIgJcZRixTHrv65I6jjPpjmspzv1dkm+jTTtur3eytrbbzNYu6Stomls126rVp+d9OmgowxfJy5JZQTlQMYG/1K88j34qsMq5LfMy8OAOCHHykc5OME/XjjJqc7Y3UuMycRkrwu3HBJ6HIGDjG0Y61NaWdzrN4lraRvI5PlBEj4dxgZ3DJBG7jI+nWog4JOTajFaylJpJJW0v+KaSunudFOjKrKMIRcpyajGEbOUpOyStvq76+liBIZL+aGGCMysGCqsYxI0hIzjAPfjOeSBwCK+/P2avgHqGq3+malfaY8u+ZZXikyJPJJBRo8oQVHdsjaMDBzXY/ss/sc6j4xu9P1PV7KSK385JizAkKpIKSNkcRknPAGe9f0F/Br9mKx8JaXYW8OmQSzDYUndV81VbG0MpALL/AHRlfXpxX5rxjx3hMvpyweEqqdS7U3HRX0WkuZbu17t6dz948POAKmIqQzHMaXLFWlTp1Lx5NYyTlFwa9GnZ6HlXwd+Hs+kWFlBb6YsEMJVUwnlTtK+NyBwPnjj2jDYHDcAZOfvrwN4dljt3S4kkMpDKsb5YKxCleSeufujPB79SO50H4PwWNsjtb4jKoYfLUfuieZMgcsDxk+3vXdR+GBp8GyFNrEYMwblkbGGYdguDwTkHPoa/nvN+I5Y2UrJRcpfad3f3db7aa9391z+m8qwFHD0oqMouyioxhFxilaKtum1on0V3ddCjpNhb20W0Iv2leA3XpnDMvZsDlsjP05NlYlEk2wCdySrHO7Yw6hV7jpn0wOo4rbs9GkNwikl9wCvOnVYwTuC/3gScD25yc5reg8N2QvFCzqoTBCbiGkJBJHctnoQe+D3zXiQrqcuac7aLfWzS0/pdHuj6FJQW7lezjpa3Ly73urO+3fp1ORtrqWG28uOORSrdR2UE544AU/TPHHv1+ha4srpAwB2ZQtIOUcgYOD06HucjngcH1bwX4HstbFwJ7bICusWRgttU45wcBjzgdSOvp86+KtRi8MeJr/SY3EfkXLRsT8rAHpyQDyBxjj24ArixGIgqjeqTdna7/l7aW9623zPdyqEcZKpQhC9Sk1Luvet71+r120+R7vpH2YG5uJpkD4JCA/KWIPKjj5eCDjp0z0qiNTsRLcESRuZSyCPjcrKT24/P1yevFeZaL4uS8gktEkVroKwhYcrt7hiTgkDJPHHHXk1WtjNd3rvE6tsbMqoc9zuII7jH4E/StITVV2UuZLRJvSz72Wz0s1tZ22PTp5dJVaqrc0LcrTaVre7ZLd9vysjqJWP23zoxwTtYAfKMHnB7g8YGBzntXpWm20NxBbsyAkFWdmGNoB5Gc/ePoOc5rD8IaSNSuFErbo/4gy5PykcnoeQPwyOe1d54mvNN0a2kS0aJZolCGJcN5jEfmpyM84568Cu2GElyuUrxSs1dPXRbbdHve/e720nKPtYYf3pVkmla/Ko+6lJ3vr1vtsYPi+UWtnE1myx4VRtiGPMc9SVzknjJPLDjgrkV5pe+L7+2sGspLgtESCW3AspH8BIHDDsMcdDzVHV/FcmrmCxtnaK6ExEoUbiI88gKSACByeeM/hWJfaMbq0kuUuB8smxoFYb2nj6FTk5Uknf29yaylBp6Ofvby9LLo27XV99bbn0mX4ehSp044qnB1OZOMpR1u2mrtJ2bd2v89/QvB/jBkmaC4JZDuBEoy65xjg598Y9ugr2PR/EtvBFeFYoJ47iCRAjoP3TsOSuf4xwQcHB6ivhDUfGE+g6rDazExsQA82eEZeBuGMcjrzng+leiaX8Rpby6is7WSOTzo03MEC4OCCV+Y7m754B4GMVpQqVqTineSTVr32Vlo+r321e2qR05jkMMTF1I0lCE4KbtdRai4u60Wruvueh1XjC0vLpruaCNmRmLMFGQgDEljnr1/UmuUXS7tdGMkcZAiJklf+JyvO1fRTyB19u1eneRqF5ZRvADNHMmWQDDKRyQwweWxz1OMD1zzl1aX8qzWltHJCgU+bGBj5xnKj1Q456Z+uc90YSqzUnB8kvttNpvRJPRaap21X5LPD13ClToKVJSpSje91aKsnGz1ulbXZd0eT6b4jgbUEstRsyIPNTYpGWMZJXcwIyI88g+59K+p/B8vhqC3jhlbMLfIDu3Fy+DiMdepAAGM4696+RtVimj1qMzW5hKSAGYqRGqqRgcqOB65HPr1rvrHUbmG5ikjuGWFUR4AVJBZfvPG2/5WUkEKVO7kkjGT62DnHCNwlyy5pxVnZWvby10WnTU7Mzy6WNoU/ZTlS/d3ShK6m9Gkn0crdu3nf6H8U/DKTV5ra6061e5EuXiLKcRrwQpI6cc5IHTIHFZfhfQNc+HPivRtYu1ktoywVZ4YyBPGzJ5lvKRjy3OAGPJHHrx6p8NPilbtHptpdJDdrbNGkqzYdpAOGJ4B29Mksc9sDivrvxH4T8N+OPBf2qytbdbtoDJbxxBRLHIBlUiOP3YB6rg7s8HIr67AcMYPEVo5hhqq9tTSruGnLdct4q1766afM/J844pzHKXTybNMHP6nipfVHWd3aErRjKTbW61d0k1vdn3X8NvEuj+KfhlYXEgjSWLSVVrUOJZARHgfOP9aD1zgZyQRgGvx0/bY8DWmsy3mu6JZfZte05HkMlqMyzxwk7WmHBVtpJYfMSRzjHPvXww8afEDwtot54bEFxGtpI8NonlMZGjUnDRzdwVwPu4I4GMCvnf4q+Ltb1rxO1hq1nJE2otJFmTKCDgqzMhB3LJu+YcB8Y4Ir0eJs2o4zL8PQlRlCpRSjOa0vZpN3vdd7rdfK3yvh/w/ish4mx+Nw2LpTw1atKtClGre9GXK2nHVSTWlmtH0ufnN4Y+OPibwpLLZ3N9cC3jIjkR3L7ZlOG3ZPyK2AAQWCcjocV9GeHPiDB4vtY7nUUDrcSMUCNvYMACsYm4zjuhUdfvA4NfM/xt+G0vh+8a4s49sVy8h3p+5t5J3IIPlYbYH6hSxAIzn5q8J8FfEjWfCt/c6fPOXQqYw+wusBGdrbSyg5yQZBj05BxX5rOtUouUY1HUptXavztWadle9vR7a+h/ScMqwGZUniaNOMa7ipXWkvs32Wl22mtnpsfoL4h16SWE21hujIdyY7c4kI4w7BSM4wcrkbcDrnI+cfHHi25spVaW5+0Tqo8syEkgJnCEchCc4wSSOck8msKL4jwmGO8a/dbli4C87HV+sYJ4Zjj2/DFeaeJfE9nqdteGbcWdSE2gh4xk4ZWz+LA4IAHU4FcVWtKo1e/pe1rcqtbbt1189DbCZRGhJS9neKcU5Ja3938PRL8Bmu+NIdTeEMrW8u7dKiDCHI9M9W6knA46c15TrGsw28jb0BieVmLDgFcdFIPDjjHXoea5bU/EIgfyIZ4ZJI97vNIcbiCcoBzyg2hQCcknmvI9c8XXNz5pN3HEAXURsQCEXGSgz94cYPUgng9amKkna7abV1LVdO6utWu/fzPfp0qdJbJRty6u/vaa7aJ7u9lp8n1HiLUmNw0IkZraXZsjxjkk8Bt3z7CQS+ACe3TPD3GupbPcQHzWWAGQNGPlDMASQevJx/skjgdK831nxk/ysz7ZoEPkeY3y+U3TevRZGx97Jxznsa881Hxq4iu38+SMlW80x/Mh3fwjptJI4PP6110oVJ2UopNXtfWL20v03VtO/Y56+MhSjL3rJJWWzT0u35W0Wv3XPStT8UoI5jHPESz7hG5DToFOWGMjBPBwc8Z5x0828QeJxPdE208ciIomlG7KtkEMpTjLE4ycggjr0x5Xe+KI13M2AG3DzVYqc8Y3Ekg5/iHHTGetc7eeJIY4GiWVVuHQ4kRiV3H7uRxgL3GcDIyeefUp4aq0pWX2bedknvpqm79lo1fr81jM5oU4u8tm2rtK10tvJ2320d79O5uPE0tsrqpQpI25g43SQhujK2flwc5GDkY64rj9Y8Rt8vmTs6vEWcEgDa33gB/fHHQ1wM+uzsFie4WQvuHmIuHck5K7cnJAztOR1PtXMahq63EjFWndgohCbQEVgcZIB7556AYIxjk+hSwLqOMmm78rbe1rRu+71ut9D4bMuI4RTUajjKbtB9Fs972fra2zsmdefEEZt5o0CyMgMsJ25lWU/cYdcmPByMjqCfUfPvxu19rbw9K8tzG/nHbOikKRv4IC8klgPmIPboBgV6E+qRWqSwkxhwuEnTGwS4yoUk8FsHIwQMEHPb46+M/iGO4ujp8crO7kPKu4mNV5Kkc4bHO9flwcYOa+34UyiVfNKCUPcjOLk3f4U46W110sm/Q/AvFbiqGC4dx8/a8tStSnRglK0ueovdtZrmb3b7ddD55k+eV5Aow8jMo+XKgnp0zxwcZPPTingKCDyCRnn1PA+XgD3Hb37IuMtg8YyByDzjgnnkZGAcc9KeAACRy3B555weh4yCT1IHQ81++uEYKMVraMY2W9rW769LPXWx/CMJNpzqWc6k203u5Sa3b6eSvdvQftf5jxggHI42D05OBn1xzz6U3GCpwGVTgYbgr0ODnPXvjr0znl4YMoUDAYBWC85I9u3u3H5DlrLyoX5cnGDx1P8v8A6+T64SVrvzvbV62t1ukvK3bUVu8bK1rd7LXp0batYUlS2BwVySdvysTnBODz7kdfT0VVdXZySMDawU5GDkfgvcjrgDr0pykAnftAHynOcnGCFBI9c5PJHcU8KjbiA3Tt0z6twM/59KwT3vprvfR9dNFpfpdvuKME3a3TvZu2q6df8ugigBcAZyATnuR0xnoB0Ht+GHLEWMfyqg5LMQdzHsqnrgY44OePrSopYdOpKgjI288DPbp3B6Dvki3GoCbSvzFsgn7wI7KR0zk/X2wTWkZON7Ja76Gqdl8Ld7ap7aJdr3d9NV+N3HJCx++eVwVIGc8YJz/Ttj0zUBQh8LhtxwRwoz3P04zjHParjB8HIyM7QD1APOTyeepyfoDURjxg7SW6jPOOuBx/PPf8aftO9lfv+Oz/ABW22u5XKml5WtZtaLa+3n+REsYMhLjAAIwckA8ZIGcduPTPB5qOSMAsQoXALAHoAARlRgdegHvwauSAkK2FAA3ErxyemfqByBUTowAZgdpPToPf8Tnnnjp70KrG9k+qSvZqyaXXXVevqWoxbbitGtNXv7vVdL99O+7vl72IO4ZVuF4yGJ6ZIH/6s8U3cyhsjI/ujBXP6e/b07A1OyrnZsIA+7g4XI5HTpjHI7D8ajyFBABDnIHXB9VB55P+PQYrRyXuu9tFdNXula2z+Vrffa5cVKOsktZbtO7Xu23d109Fr6xkZww+6VBOTkg/zAXoD09AM1C52hifusy4JGAByPlOfmHqBgEccd5WLhGBxgHr6c9M+hye3GCe5JrM7bQOSoOD2BGckntjn5cc8n60lOO19OjXfR3skt/Xz7Gq1adlupXWjeytZWT6b+ZuaJPH/aNjvwpju4JFbGBhXAJx1brjj8eMk/sVpdlaQaD4OnsZ0AvNN0zUVlICmK8bAuHm68/cCD1BHXg/jVpVtLNeW4QiPE0bE4G0ANkk/l0BHY1+uXgUJc/D7wPqZkBSyAtpy3VY0eLEYGQXxyQOM8Htmv5x8coe7lFWFRxj7atTkrOSk50rwTffmjZb3V73vr+q8EtQp4h8l5OjHlWnPKzi3ytbW1ve2nnZn7W/DzVNT0fTvCpgm+0XGoPolusYb92YppAbfGchWKpLu4I9q3v+Cr/7VnwR8IeB/jn8C/2j/hJYeO7D4j+AfDbfAPxjY6Sl1rfw/wDEptblvt9hdLJFJbxwSlBEpYgAtgEnA888PeLLX/hGvhRNBH5FzeWVvFISS08d5bNELKdY+NphDy5OSMMMdMj89/8AgufrN3feOvg5aQ3a3SXngnTHv0IGfMtIj9ll6kkuZJNx4wRgg5wP5u8IqSzHjrJ8vqKdGMcXicW6sJqlUg8NK8JU58r5eZwalo1JXjpdM+y4vy2WK4cUo0KdSpRhDE+0qRU5uTkqXJFNpXSnpbWPxdD8CEk8yWTeSEErMh6My7m2FxzzsK7gehzg5xVpBkL6BSGPOWPbIP4HjGD64ADFs5y4LIxLMBgAhjnuB3B9f61ppZTZK+W21QMkDjtwQP1HB71/oh9Zp0koxnDRRWkvedra3snf89bbH4NPK8ZN3o4ao9naNN6bXt83tdrd9jMKjJBbgDPXg9ePqf4uemPTlByB0A7YGTk9ST149frweTWo2nTFQVjcsSMgLnbGejAEdOPmyO/fkVej0OfYzjCKuPvjBIPBIzxgcZAHXpzwdoY2LXuyV3pa6d78qdkuuu3VP0PQwnCPEGMn+4y7ETi4xk5Spysue2ra0X4d+pgLGOMHJ+mCc5wD05A78564Pe9DHgMTkAY3HPXJB6Ac4+vbtW2NDnd1kKNggE7sqM46/wAXPqRV+30GcYPlO0bHLOvGQOhU9wTndnpVTxkYRvzWbtdO2l7XSVt99bb+un0eD8POIPa3q4CvKneK5+VqEXZXt0SSemtrK3mc2zBQSEJYbccYAB75PYZ46HI4Oaf5bNtK5wQRkctu6+3y+gzz+ldFNp5bzAIyrL8hAO4krjJ9zyM/TvxTotCu5IS0SERgZaUqVOf4iAATweuT6dgTULFRaTlJRT0TaWt7W1VrX9Ltnof6jY+lVdONCdZvVqnFqUXpore6leybfS7tffmsMfkPVTgDPcc5bpzntz9aUgqxBJIwclemOMYJ7E/eHPse9dzpvgLXtTnjjttPnk8whjIQQu3PDH5R8pHJPoCcZJrvF+DmuQxO91BJE+wsyqPl2n7vlNj94rAHJ+XbjPOeJnjKNLlcqsU21ZKV+2r7726PTudNPgLPMZBqngKlNqSSc21ZRcdZc6+7o+m54RvyRgDJwo7HbgjJBA7EHP8AI0qg8lQTyQOcAnnOD/snr+VexyfDeePahh2uHKEiUApIOzDGOvBz0/SqM3gGbDFJSphG0JIcZbuevIPGPUnqMVySzjCwbjOolJ91d9LPRX3f+R1UfCriqq4unho1E91G9947XVlZX+7ayueWqrYILZYdBtBH4Z+XJ7ZAzk0gj+fG7JIycDkY6DGemC2M9wTmvSU8B3CSq0jsGfJ2nOFOflPb5Wwe5wRntVu38GBLgfa0YKR8xXLAgDIIIGQCOScfhjphLOcN9mrzpXvZau1tr287eb2SaPpcr8FuKMXOjCrgJ05SrKM5SmlaDUbtWd0rPs9fmeYiKR/lSNnOOqgEEA8kHHf8cGpRa3Hy4iYH1wTwPQ8nH9e/r7XB4RiBaWJG8uI5fK4KKMfMST8ykkcY574q/FpVlEp862w0h3CMJuCqeh28AKecfN83XjBry6nENG7UI82qV7u+8b3Stq03t2trofq+X/R4r1OWWMxnsbKK5KaaS+DeWqbV9Xo7W6ng/wBnuS4xC+Txnbn2/DP4/QVYWG4QnfFJtOQTs7dRnnK4P4+55z7/AAaJYeWFjigVS5ALKDIoB53DjC89iM/XOVuvDmnKCQwnwoLLENqqDnIUYIz6A8+1ck+IaTnGm6bu3rZNvdXeu1315utz3aH0dJRpe1hm03LVxg7WSvFLazfeyvu/I8FUTDAaMqv0O4e+c+nOOMHj0NaMUrBCAMHnG7oDzhunbnqP517bb+ErOeNPIt1RZGIXecsDnjggYQZyTkZPB9Bcf4ZpMw8hVaMqB5q4AyRzn72ACfcg469uWefYNvkk3Bvd2XK7W2Sb1emj1va1j08H9HriGEVVwmJpYq9k46q6fLa13dPWz2t3R4rGxAXJOOTtGQW4zwecdfyGAasQzMCowx5KjGSBn9M+/Ocdua9bk+FV64fYGDAExqoLEqv3iXyOcEYAGSM89TXNah4D1SxEm3lIur4K5P8AEABnnjAxxn06iaeY4OteMasU2la6t2S97/J97dDHM/CHjDJ4Os8sr1aUUneF5NbXslrbW6/4Y5pJCUZtw2naGwcPg9SevHqMDJxyKti5QRqoy65wCOzHoccZA/zgVkPa3doGilieMZIbIOcDrgjkn3wMZ56ciSkqwYlAoyuRtLBR82Gz97kYXHIzyMYrqWqTVmu6d1a2lmtOq9Ln5zjcFjsJN0quGr0ZxVnGpTnF8y5U91Zq77vR9zVnuCoUo5wVycjjJ7deje3pj0zjXlwwSQlyCwztUbuDx0zyMjJAI6ADNMnuy6kF9qr8q7cKOew6kkd+cgZ4rCmu2dTlgu0E56kgHo2fbueQM+9duGpc0ou3Wzvr2fy89Lq3VanxecZrLC05qSbVpWTdkp3itVdeW+71XREd3MpBwTlhhSwByMEp7Ark4Hv6VlvnfzuPGcscNnoWOCTyDnntnnrSzMZNu4Ac5Vs4wuT05G769vU9KROWO7nAwADgsDznH3SO23vkd+n0tJKMEtFZW0eyurWfVdG/u1dz8kxOMlia7lJLV3dl7ye13srK3TbR31HQqUcsBjj5sAkHOBgk/TP9auAYUgglt27cOMDjGT2xnuPfjNMUKeSMDrhuo9AvPrnjnAzjnkSK27KkDkcdl46fj1HHA68dKct/17qytr1+75sqNlFRS87vfpvfq7a206WuJywOW5YkHBzgdmPyjJJwM/h1xUu4bcbslB2BAOOCB7r68D86QqhB2ggnaRxnHOD7jJ7dvxqMjcem4YxnIUkZ54wPTHqcnDdKzkk99LJ69P6WmnXz1NIJN727LRq7tZvW9l+HrccMuwySMnnBOO3Gcdffp+YxIqsCwGcBiCT1Ax0Hr9OuR6jlASo9AMZ5+br0UYzngc8HOMcinjaBkBs8EkdDx1Y5PXHftWTtpp0623VtV9689WbQWq2urNaXSat+G+/e3kW0kOBgg4Hpkd+3Un1+vHarJdGO3B6Z4GBkev8An+dZqlsAryScdcgYJ/E+nv8AhirSyEbh8uFXvyd3scde3IyR7GsJRs73bu++ultL+t9P8z0qWJlHlTsm3rZtXat6v7rduop4zg4wCDkZwMnqc4/L9O8KltzZGePl5GeffpjIGSBzSLuA3H16ckEnsenbPPcjj2RuM4Dbgc9eBnGM5PsPQ81UIrW+qsvvsnr39NtVfYVWve6s+ZSur300W6e7f+WiE+bldrZzkksMD1PP9ev6UvU8gYA5IA598dcE9enT2JEWG3kspBI7Ej+Rxz789OlSqBuIIJ4Ock4HQ9M8fljn1roik0m9elr7bL7/AJ+i2PNfNJrmum3tdq19el1q1+GqutXJnBIxj7uOcZyB09Ksq5ACgYOAAAOp+vBxyvQf/XiwMAbuODtBzyemeOn9MfWpV+Ug5AweCeSO2CeeR1Ppwe9RUs1dave266afhppfXod1FOKil3V3qrK6Tem99/x6Em0kKVxg53Z5bdnhQPQduh+mKlVipzjkgKCeuex9QMjPfnNNHJ68MSRt46dCc+uPfn2yalVAOTnHJzjJP1GDj+vfNcblHVrR6brS1+637O3bTY76cXzeiW2ut47det9ieAlCHZju6cjg56YPt1rZgkVm4OCATx/E3OMnkAEdRjr156YsZywIUsM8A8YHIxg9xjHJ+pU5rUgKq5JxtJIYDJIJA9sdcA8nAOAKwrpOMvk9NNNEtbb27eWp9DlsGp2Surpu93ZPl63Xy77WOgsVxskLrkHBHOMg8ZGByO+M/hnj0nS2CpGMKxA3bgxxnAPVuQOmenoetcBp3kY2vyeGXA4z798+/OO/Wu509gTGjKdrALuJ+725xzxjjp3r5bHz5m01ZLq/RPTVfj6vdH63kFOMIqS96TSS5ldbR0W6V1b/AIY9G0e8aTCMUhXcdpBz0xkZAyCOwweO5Nen6VKvlRgyEFTkOCN2T357AdznnHrXkumpHCEwwZ94DEYyVOBx2zx1HTv2B7S1u5I49scbMSVCEH5QO+705HU9c5xXzOJa1VNWbe+99k79fX56d/1rKaTUFOul9lpJNJLS2zd9trW/J+nwE7ABIXLYLMOSR2AOcHtu6YHJxXQ2+qNDEi7UjKlVydu5CM4bo34g9OmcZrzmzv5Aq7ydpwpI52OeTg9TxjjP199NrrOCCWyCdrHH3cZGfQ9c4498E1481KUm5Jcy3vs3ol0T2XZdFsz7OhKNKKnG04ycdHK6itFe2+m/Xd33PUbe+lVhI8iTOwBY4xlflIDAAg9enfnOMVoJfwvcwyo2yQAMy7sRsf7rflgL69+K8w0/U5cPI3+r+4gDbhg/wgYBDD8OvArpYNRi2LxtY5UZH3QvR/XB7kdSR0+auaU3HS1ndbdNddWr6fP9D0qVZRtomm0lrpe8dd/h0f3eZ6xYatE8mFlMV1EdwZMCEluirxgv2wcAjAOO/oFjqUxWA+YzqY97HHRj1AYnknGRjHf2rwvTNTgaHB5kUblkJwdy8EBhnJGRj1yfTnt9J1NJWiCzMsiguIyxVQTxsPqT1wBjjk1wVJSbajF2tdaau7inptbf107n02DxN0knG/Kopdmlu9HZeuu/c+itH1d/NjWdtjxRoIzu3IU5wynA+903cYI5Gc59T0zVIHVv9IhijCqFiZ/mJGcOTjlG5wCQTggEEYr5h03WElYOGWJlG0gLgO2PmZWJ5VuM+nHJziu10jWHMbAnKeeMogLO4TqIm42EZzjnPUE8Vrh6jVuZXu1dvTbbyXla6fQzxVOLTcpcrTS0d07pabpt9rJWPoO81dJSsEFws+1QFi3kNGQvIDdGb0Jxg8AciudvtVmlXzruN32I6KqkCZEXAyg/hWPgg45yQMYOeOt9Tibdm4aLashkkBwSRjBVuckdyAM46Vk3GuImDBJ5zSkxBCxaQrnDdRyM4J9eenWvYg9FJ3TutkpPpbztp067aHzOOt7CrG6tZr3t18N7aq+ltlZadT5Q/bKu57jwLeG3YwQwxmGWYkAPK+c+UBjfK2AO2CDg81+LqLMJHUDgsQz55YKfmJPABweeO/4j9av2zNVS18EbGc4u5XcQr8iMhwYyBk/vFOSW78ccGvyZVh5AfJO76jqejEfXrj1Br9h4QjzYD2lr++k7WV7Rho9XdLtey6uzP88PG9xfFDp86cadBNtSu1Jy25Veyb0f5tE5YDA552ozDnOc89skY/xHSimI3l5zgIC2DjLegLH9AenPHNFfaxpq3wr5vyXdP8z8TnV5WklzKyXRbWXVrzey/NiabM9u5UH5FIDAYBBHG0A9jkZPf6Zz1MwkubfdsLKY1wu3oBnLdQCo7H9QDXJui27ZHySA5KqcAk5OWz0z2/8ArV0mnu0sC5aRg38I5IA7LgZC9cD8++Pg5NqzWrurd91p1XTr0Vranh4eu4JKSbV72ule2r1tona2u+3UyL2AxkbIsNtwHbOec4xx8ufTBHoa5a6t5pJOY9xIJAPAAGPY5xkHHB55HOK9NmsHu5FVd5LR4Ix3x2yeozyPr061v6V4JluIo2lixIx3Io5dlGeWbAx3O3A/LptTxapWlOyk7Jpt3S03tulfotjeePpRjzOStayjezTaWjXX70r99T50u7CRTvVM7ySxYn5D6HgAZ4GeOcd+KpJazDBwMMNp9m64OQMY45685HUV9L6j4A2NKViaRsgNGvGQOmOC3P8AEvbpznFc43grYjTMrbAwWSNjkIwOPvAA/j6fUV6tLMKU4pq9mkkvNpL9XZ6Xtpo7LgeYQaas5PtbV7NLR3utk79tOh4NJbuzbSG3pgoXBWNcjjaf4h1xgYJ9KAJULLIRHkFTwAx547thTyARz1x616rrmgiJXBhRflBjlbH3uoK8Antnpjrz281njWNzG/Gf+WpHLMM4bk/MPy7EnnNdVOrGqkre7a9tbrVXa/4Om7NqOIVZO0baNcttdbO9reeq9H1IVmcN5YU4RgVC8KygDc3bHUZIx9Diup0i+MAzuIbBBXBZWV+rDB6rwy9sE8DNcsqgEnZ1GWAYBAF+8SQPkznhcN9R1OhbGVQXRcsDhEOOYTxnJ/gGck/y6DOpRhJSVrp6JXW/e2r00Vtd7im7OSV1olo7dFqtrfpf5How1WTyWC3JMWzYY0B+QDq6kHkfMPl7+oAxWbc6oyQsqOzKFAbfyQRng/NweSe+emMYxlQzFYgFwpYE7ByCWPVB3QEHJ7evpWujyHDkAABgylSzc4YnOVA7cY9a56WFhezS1a05eullpvdab+reqMd7Je6k9bJ81nFPpZ9LevntnXcrOwbYDgltxOOCRwR/j9T0BFFZmQvnaBwMAHemc4xkYGMc8nHHU0+5YliSTu3YO0cPgH/HJPqMADGarkviTIUgqGbceg5wzN/eI4GPxxnFetGklGEW42Vkkr33suvdprr26nTBrls7XVlq7N7dO97+evVq4hdd29nZjnaY2GeD0Kk4yO59RilIRWZflbdyrDB3FiTgseA/ABGDgEDmoCeCflCkZXJwytxhVP8AI5/rUckrMx5UEHIUd1HdRzzz3PI9RV6p+W13dWtba19uz17aGtOLk0le0nZv7umltHb0b8zXsX4bA+bsWyNwycg5PbjHp6cVZZyrMVyHIz34bjOCei/TPtnFZ1m7FSoJ3MePXPofQdsev0rVWBzJyCGwCOM8f3vXBycgfQVyVLKTulzPzeyS1er19H8zvpvRJQuo6aatLS2ttfLV3v5Fa4LtGxBbc+AxPJDnIxn/AB7H6VhSl+UK49ABg8cfNjgjGMn/APXXoS6Ws6csRvA4A5Lcnk/X+nQnijP4fkZWAjbOdwKqTk+pOBjH+PFYUsXTi3Fztqnqtr2W+m3TrvqtTqdCXKpQtKLja6TvFu26bs9LJ/d004NhhSduDleR+J2cdOnI5xjnimsjMMKDx1GDhevp1HOe49wevRvpNyhIaPCnIGcY3jqOgyR6gZBPUZ4qNYzFyhibA/i+ZQAO+QCpHv6dBkCu2GIpz1U0r8vVLXTfV3a3dvX156kGkmul0993y6J3e1rbrtoYwXByThhgK3UAkgLj0APHJyevOeLDFkBbltynzdpGBkc7c44HH9au/ZGyY9g2oQfMJ56nHzEYPPTPqaY9uGd1KOwAwAvykjb1Ixg57dT17HNbKrBP4k7LVdGuj33at59uhg6b0SWivd2Sttu9Lu2y31ffTPRkRAxUthmxjlhk8HqB8v8AXirCo0bKzEBtpDA8kbsYI6ZPHP16dqeIDgr5XPXZnDbc5IY8bTyO1OaIbsqrBeQQ5+4vZD3JxjHf19KmVVNWvayd3da7O2t9bK/n3sL2b00d79E9U7fgrvXVEYl4CMu32U8HHP3emOg//VxEWIYMGB2DBygzGH6N1HzDuRkjsDnmQRoszbBjaPLwykhiwP3TnqQPX5TjPrWnp2n3F7dJb2qGaW7k8oRKpYnkAKuMgEbupz61zynGN3skldvVK3fS3rfe62NadKU5pRi73a5bat+7ay0bTbVtb32K1nYX+uXNvaWVs0zlhCCinMjZGSSAcE5+Unk/NxjBr9Tv2Qv2OtQ8RX+mavrWmyQpPcLJIZU3bEYjy38oqMIedxDfUVr/ALHX7JF/r91ZarqlkYIrqUSNJIolCElTCTEVX7uXJ54JFf0hfBj4IaZ4UsdIhj06B7q1jjhScoIllifGQ8e0iR2A5yRjjuefyvjXjingaMsHgKkHOV4zknd3t72ztrdfPRn7ZwFwNHEuOYZhCaceWdKnUVotRcGnytK17tq7Xm9DG+CHwQ0LwNpNjHFpUHn2+yIsyBYsNggFcfP5hBbcTxjpk5r7P0XwwLm6Rlt2jQqHZeBudeyMAAoXtj1JwO3f+GvAcctorm3jYOqEI64VGBOWU8ncMg46EnjFetWPhODS7LcIWdHl3AKMk7uAEbnaBjAbjvxX84YzNZYypUnUqSnOU5NXldPWL1s973Vttt9D+g8PGlhaUKVOEY8qirU1ayVuq00+a31PJLXS3stzShmto2AGTyNrMGTPQjJHYD0yMCsHxE1nbsVj3MsuCHDfKB6AdOO+Dn15zXoPiG3eK52xStCrM6+Seg3gYXJyM8YB/DHSuHvtNaYxlguIw2Q4wwC54GTwcn5icDoeOceNXqLk5o2c01smrK6WrdttXu18tD6nKE5yjUlK0Wl7qS5m3Z6X76J6elrGDpt3HCV2TFYcgOWyWDjOFPQjIJ4yVz1rqI4BJcwTIcyM4xkZyCR2xjP6H+768Zd2LQyFo3QQSPhgvJL54JPUYyQOM9upzXpvhtLWeW2EwZim1BnqWOMY/wBkdj346YGOelOWurunq20kvS26TWt2umy1PqXSjGEZK7WmktXG6Wjf4vz6XR9OeArAWthYTAo8bqxlVVG8sykEuvZicZ64P51+b37RdtPH8WdRSSPZbysrW7JwC4zkSY4IBIxxzzyBX6rfCPRUvG8iRdhCHaGUlcHtuJwGIPAx26+vwH+3/wCE4fB3jjRdes3EdvfWhjlOQFa5ZR820nls9M4BGT0GK1xFKrUjSrJL2b9xta3laLbfXV/11XVwjjMPHiGeXuX76rRc4SUXbRwdr2avbu+6ufJEeu3Gi3L25ZVk2kGSTgKjdcMDwWA4IAzg9e3rnw51uW7vhGJnxMu3n5kcnnABGQDgh256DHXn40n8WvJdOl4ZfPd1C7mB2xLnGAAM5UnJ4HIGPT2Pwp4y03T5LZrd5IbkbN0rMSmeOVUABAec8nt0xmvRyynCDaqys7rdN3d09JLtsr+fofqWMwMpUWvZt1JqymrNaJaq26dtPLztf72t9QXRrcziaO3nyMBCDGVPTnjO485+vHc8ZruuyazHcLyt85IjeIbhtI+V5Mngqcknocj0rmbPX9O1qwUw3c1xeSJudSDsBAG4rjIYKdoB+XaTjnJxVWDULaUXEbK9uFYSEt90kDerjbxgYBPbjr1r6T2lOonClyygkryTu3pG0eySfXW7300XzdDCqlN1Kq/2iMl8aUX9nRaJtO2i/pYWnaVqUWozykmWZ2aTzExyzH5lGCcDAHHbI7YNet6HaaZN4duxcQNFqNsZXiVnKOcZJwcNgNjg4PfpwK5DQ1SW8upLqeWMGFhCqNtw/HAAzwx6nP4cc6Mj3cCS5mIR12FyPnC84zjvzye/Ue3PKj7SHLpFuzv81fW/S/fpY9apz4pxg3ySi4O8XJfDa+3TX9UfJnxRbUvtEt6oMLJK21C25toY/I2Bx7dODz0rD8E+JbuO5tGnaeO6MigxD52VcjkAn5YjtJVvrxk8e+a9pei6jDfrfKsoiUlQX2FnIO1gSpztK8jjGR7V81pI+ma1fRJiGOJ2YTyjIJX7qx9Dtxnt125ByBWLag+V01F05LXa8dLPbVbNdF0PsqOKVXDPDSopSpRhaW7nG0Vp1aW7XdJn6L+EPE9ve6XZw2qiK4WONJmxh3kPBIPqecvjnPIHGfUdPs4ZL5fMtkk+UDey43M/OW4OQSSSuQDweOK+E/hp41vTOqSgLB5o8lxnhV4I3dMng5IzxwO9fZGi+JTP/Z8Vs8csjr/pAzuYs2Bndxjb1HGevcV7mDrQlyr4tFeNrvTld2+n/A3PzTiHAV8HVmqadp8zc4yfu83LJK6ta3e++6e5mfEfwKt3DO1rFG8kio7LDgqjDOACOVK9xjuDk5r5c1W18R6WrweTIqxuEVtpLKueecDgjq2O/THNfeH/ABObNxd3Vl9t06dW/eRgN5C7Rkvzk4UjIwMgZ+vE69qXhu7leMaTCwaVA6iIbuSRIGGOVJx8vYjuScbY/BqqozozlT6pLs7a6J9e36i4f4mxGDjHCVcP9cpws+b2nNKPwq2r6JbefofOXgXWru2vBAGuIGIMqs2V3twQFJ6LkcDGB0B5r6o0H9ojXPDyW+nzFBBG8azMqZd4lY/OecM5BIY5BHHYZry/xJoGn2ZjvtNsmjgdULuvBiI5Ma+q56en1AFeaa3NbwwSPMHcyfOVRgAuOh3Y+VkPLx4OMjnJ49PK8RicFSUYVZR011bk42Tel7Wbtrt5atHdmeGyviCrGriMJCUXblpz5ZSp1LK+qvrZ6bao/XDwL8YvBer6VZ6hJcQiSeBfkkRRcB2BB3fNllJ9QCD7ZFeAfGbxV4b1a9u76zSKW+geMyTgKCoG7y1DDHyrg5IHy5HUmvy/uvijqnhjI07VZIYiRFiTH7jJPMXOPLJ6jHQH1zXLal8atbluY7t76O5aZSk26YmJYU+9IIsYkY54XcpOByTwOfMMxqYmm4z5ZO3uuyS+zpJv7XXbfS6V2eRlvh2sHj1jcPUqKFTZSk1Zc0ZKmrpJ6aPXqltc+hPil4nHie3ubKeO3kEWBERIDMpjBIaJ9oOecA7e+K/Obx7MlhftcyCe3uGBL+YeGlQnbGcKMIw53k84GBkYr3q9+ICho7mUqyXrZlZpSq54ykUe0lD227iB75rwn4uXpv4ri8sfKuWuGV5nK5dYlByYl6fLkAtxkjgc8fO0KM483M+aMp82tmld8rs9t+v6XP1PLsI8C4Qimoye8pfa9zmiui317nJt4tae3ihEbJJGgLiNNqM7jJZvmIdhtGWG3qOBiuVv/Et1C8rGTzN6AFXyqmMgjgZ4J9upGe9eFXXiS90y5RpZp1gZZAN0mB5uQHUpgkg4HUgA5JPBrKn8WXU8rKLhHlTEiW7tl2XHz+WOhKjbknGAQADnjpWCn3U1a70fRRVm9der6r53XrVK1KmuVcsV0T95uWmm+lnr8PTTXVdL4p1m5ZLtI3bzMiSPGYyyc5+bJzjuuPmHcda+fNZ8bRadMsN3IGRTIhlB+ZFOPmZQehIwB0OM12vifX5jZyMskZaRFe4cHcVHO+IdCJU+XA75zXx/8RtVlhkNxGc+aokdWO/eATjKgqFSPOSDnOR6c9OFwbnUhCcUrtavV7pW8vl9/f5fO86eCw7qx+ze6uk3Gye/VWul1/X07UfGdtKkrPOGWSMALJgOMfdbAOSw5+XIOD+Feban4v8AthkhDqnmjJWDCqMDjzcZ59QPbpnFfPN54tuJFlRZ5Y1eUKPLOHRY8ZZcHo2eT1ODwcGsqbxSUnKYmDouGkJOBjo27kMXOc5xwOvavp6ORSik1FybUbNtpL4XdNK272dtfw/KsfxrCqpNVLJaWcvebsrp73W9lon63PZ7zX0+484jWBSGCDKuTnlTkZXsR0+mBXPy6sJ4yxbbDI6mN8kPhcktknODlcA4z1zxXlU/iCaVmaNmYOpd2cjaOnyxjrkY68Dn2NPTWy0B2/OuzAI5+ZeoU5GCM+g6Y9K9GGVyjFLkum4pJbK/Lq7Jq+v6WtofG4vih1ZNKskr3s3dJO3wro1fRXaV3voehy6msrBUdgYwNkwbAYjIIf1PTAzg5PTqcOe8lFxmDKAKGcFyACfvSMcHKrjIHXJ6+nJRauyqRK48gBs7Pvuf4Ay5yHPILZ4A71QuNWeMEuxYuNgt85aKMg4Z8YwCOCeT6813UsA6bs0m2krWb1urrb5abPbc+bxec3g5KSlacb8ztZNLZ23atZdPQ2PEGrx28NxI7lkViYrhsAqAvITkAAf3ueTxXxZ4n1SfVtVuZ5QuRO4jkGciNWwMe7A5J79O1ewePvEkj2rWcUgWTPlZBAIXkkDp16bvQdPXwhtzFsncSxzk8MT3yB2J4I//AFfqXCWWrDUXiakOWUlaCs20rRfra99n5+v8t+KfEUs0x8MvpSf1fDpSqq+ntXayUU2m7K+vdWW5EMHjDZbgnrnPUc+vX1GfTGZsAjAGMgZ56kdenbHJH0AzTlQDacAkehznHbJ9sY9cHPFKVAJ2nGAeQAPboPx5xjPJ5Br6+U+bdWva9l10v+Xd3aR+RxaStZJLVXejkrO/l03a679GDAOE+bkEEcAEdvbpyB0PJxSnP1HUjPQZHygfiM+nHTubQMsck8EHpgd8HI5z0B5Oc8rxQeQdpIIPJPUev59OnueMVnJ6Wto3FP7189fIvSSTfWz13u11216DirMVPDAgscccnkgj8OuScdBzy5FG5iOAwZWUEjJHQjr8vbHPPfjBRMtjp1PQ4CgZ6jgfzPX0qwqqOi4I5GRgd8nkkHvgd+gPFc8pWdktN030v03t0vve/a43FykrO2ytv1Tvf5a6fctCVQpjKDh1YNnI7dcZ9uR0756YqYEHYC+WycjHBOOh4yfz7e1RRqq84Y9mAB5I9ANxA9fUdTVlIxyxZlU8qNvJ45x9T1/HvmsXUUVo/nbW6s00/krdk7NnTSw1Su3GnHmauna13sm+Vv8AXZJb7v2AIpJ+XJIxyfx4wB7dueOtIELDhRkklT3IznnIx09v1GanjRnUBs85PynB69+Mn65J+uKvQQh3UBM9hnnOR05OO3U9MZ4rGeIS9523d1dK1rbJ9N9EtbJs9fC8P43EyjGFGqk+VJyjbmd4uS1b2v5u3RmUIuqAjOcsccZ9u5Hv68dDUDKSxV8sM8AAnHPvjoPXOcV1SWKHeQ6+mOeO3BOcDOeMHoOakXSomLMrhj90KvJJ5547Hr1zxntXM8XFNvVJW97Xvo9vLV7O3ofRYfgHOK6g404XlK1uZRaj7u7t1v0/Q4eS25DDPlkdSBnPcH07Yz7/AI1TA+3AUkhiRgdh0yBnjJ7YJHTHWvTIPDTXLAKykFumfun1Poo79ccdsVsQ+BLl34TZlflwOW7blXIDKeSWyCOOOc1jPM8NSS9pVinvaTfRxtrq7arZ7rs9frcq8E+K82SlQoe42oqUpLlveN9tdnpvdvTRO3jZgfY2FLFsBuM/jx0xk9CcZz0xVT7FcZVREecHhT29D79jyAc8EGvoOw+HUkxRWVMbyhcNnYc8ZGPunoTkEYwM9vT9K+Cy3SPcF4GWNlDRFwGCnrLnHzRnrgDsea8jFcTYPCNuVXzTSclvHZxvZv1fpY/Usl+ivxbmMKVWfsqMbxs3J6O6u7Wd1ZN2vr6tnyfolncxzllYIVByjA5bOMcng8D2/nX6cfs9a1b6p4FtNEuYJZb7S71ngUZxgMpJyeACMhsDIwOpIrzrTfhX4csAVuLKO6lOR8qhiCMcocfdIxgkdeg/iHYfDK9h0P4hWPh20spNPtJmSMCRdommcEu7sRwjED1x3zX4/wCJOMhxDk1dUaEnPDpYqlUclFxVONnpH3rOMna2tru9kz38z8Es14CpUK+LrQrKouWfsoNSTlyLmu1btdd9UfsD8PvsGq+HdFX/AFd7pUFjc6cgGXt4wx+0qwyPlIEZ6AnGeAOfgX/grTBbav8AEr4c3UVx9q+weB7SKUYKyxmaMhE43ZjYodjcFcHggjH1l8HpL6Oa5gu4Sky3yRJHCS6i2VsllfjcjjGAAM4OOTXyl/wUIu7HVvilpthbQiZIdAtCSv8AyyZ0YRRFTkoF2sWbJxkcHOK/nrwrpTwPHeGxSnUaoYfEqUIyUotVbKTvvopW7XV9WfVcN8Jwz6EcurK9GpFQmpU1KUbTpyi02rpptXs7WWnl+RdvoF3ePvCAbcgZXDf7JUjr78YxngmuktvBTxRKZUdpGO9lbhArH5WU8kqcc8fL6c17bpuk6fYxR3E8SyEkh53wCpX+BVx/CSBnnPU+tV7q7hmaRQiooJW1VeBx93Jx8yn5ugHTOBg1/X6zedWq1TbjHRNJK/S6W9un32sfs+WeA/DWBo0quOprEVXGLUNFG/uyT2bbdtVqtWrHmS+G2VUCW6NeNIQyk5WNFx85XG0Rgc5z3AxnruweC570LHHbRT3IKuqREtGSeisduELAHC/NnHXIOe10yylvb+2gFss0s0kaPgne0ZOD0BwVHBP69j+iXg34a+DNL8FWut63Fp1in2ORbfIzdXN2uzO7jdHNEzDysg53NzzivQjm88PGnpOU5tJcqTd2o21XTXXRNdU3t9tQ4C4eyylGdbAU6dKpGMKUeWFOTa5byScbuys7K1311Pzu/wCFdtptlBc39rGblgTJDExZbdFA2jhBtL5OQSeQOT25bW9LFw5t9MsBHtCr5aLu3s2dzN03EkZPTofx+5NT0nU9c1G/sNB0oS2dwkkdpLLEMXCf8snZSMmdAWwQRtznB6Ve8Ifs+zT3JvdbiNo4Kxo86ARzzAnDqSR5hyTyBjPXtWqzVRXtcXUUZQV4QUk735Wk0vO1+uu/V/NcRYbhyhh1CFClRUUqbUacVNuyV2oxdnfq0736nxv4N+BWveJnWZ7d448lywXDSYA3MG44UEAr3z1GK+mdC/Z60TSLG3XUYke7ULMY9wZ3IwcqCPlD8kqS2SM5OK+q7/S/AHw60uzWTxTYWs8cXlylAoZJiuGSZA3GTgd+g5yK+UvHvx2nE8un+GoUumZmSO7kizF5LZAYMTnK9VbgfNwOTjkjmuLxlVyjPkopu0WnG6011+en/APyzGyyrDKosPhaM4zStUpUl7RX5bOU+W101bpv3O11m1+HnhazhmWOzjlWIRvCiq4FxGPnWZ+AhYkYXBB5GeK+fvFfxM0++haz09YEkVmigIQfu1BwVc9T8oGeOeOhGK8qvrL4geLr9w91LOsmGlgjytuNxJ3N1G7HbnGO+a9R8D/s56hqaTalq+oRWoiZX2yvkurZ3ujHAyMDC45B9jTqYzD4aClXxV5a2ipc073i1FLTRvXXTqeFQrVsTV9nhsvUErwdWs3Gm23Fe0cFHXTXWVr+e3iF9b3V7c/aLe7Lsx82SGEsBhuWz/dPU46+55Nb2naKY4kvnd3gDFm89isxkAGEYc5VTngD5sk8HivpjVfCPgbwgkUZntru+Xat5GjrgnBwFBJyzc5HGB9a5i28WfC23eK+v4gUt7jY1qQSw2EBiVA5QE5yeD1578ss0nWgoxjJpq6vFObjaLtLTRt7O3k9T9AyPA0ISVSVOrUqKylFO1JNKKvSjfVbrm10d97HkDx3F3JsWw8ydsLAkMZCDqAXbGGz34HPJq/JDPpdrLPPpN5JLCqNKI7ZpIokAIUAkDaTn7wz7jPFeueLvjn8MbURxeGdEt5ZdkZlnigwySqDuLNnBZxjC8Yweec15Jq/7Q2sXunXen2NjaWqTyFQz2qFmgyRtZuofHB5I789uGVapVtCFC2sVLnk4WV1dpatqyvbr1Z9hQxVGjFOMZc0b2mnTlytcr96UoPmt8NrL1PK9V8WFJ7hYYDbxBnWON+CWYj/AFgxkrkEgH7vORgEHFXxZNNbSwS7PMkwRL5ZO4gnADfwkD7oHXJ96ydSmuNWvXvbmNFkmYsEThQexAHIAznGOv1FWYIpliaMCNw+NzlPmIHcDt79Ac9q74qEaUV7JKSs3Lmva1lrvpo9G1fbpp5dCrmuKr1408XVdKSlyr2KUY35YuMlaKSTdo2+7qNg1W6Mi8FiCQMEAMMjaDj+Ic+n54rsNOvb6RSGRVDgMUPUDp068jp6nPpk4selQsglUlGUBgvGSwPRc9MYxjrnPOa0oY7qBkYjLnrvI4XgjdjPT9Cfy48RUjJXhGKkmtZKzuvn5NXVmtD6LJ8Bm+Bqwq42pVq0JSTgovnSTaWttt2rNPU6CKd0lWMeaWDKeQQvPJIHQAduPbAHFdnpOrqksCgkRRHeUyQTJ+u5Qc8HGfbOa5K02TunmoS7DgIOD6sDjkY6/QcDFbiRWgf92QGLDeOmCATjqMt6c8YOR2Hj18VF354PmStzLVX0V0u29rLfTdn61kPtoS9sk40m+XklHkbiuVJLS3k3u79j16y8RWyqqukUjO6LLcOgMm05zx/eIwCB0wBWyNK0vxAxijhwrhiiqoypA5Xf1ZG+nGCK8z03TvOCyRzpIpK/uWY7gB65JPHqc/TByfRNKtNQsbiD7KhlIG52DjYucbc8cE4PODnHPOK8qeJnSlzU5Si9HbVdItK2yX6K66M/ScNTpYumqdalT96O1TlmpXtok7pfPf8APL1P4NJe2rzxW4gLozK0i8yjn5cfw7M8ZPO4nJ7fPviz4cXmkyyqiI4RwjpGPunB5WPHcdGzz1Ar7ss/E9wunyJfwvBI20mYL5jLgkMpGBuDjGGyOhz0xXL+ItP8NakTMszrcSBwYJmAiYuB8wz0kGCV579BiuzBZ3jqVSKqScqb25rvl26p6rsuvZLR/F8S+GuQ53RrOrgKVOfK5KpTUKck0leScYp9dnurp2R+a2q6RLankMFA4DKVZDnBVgCevQjOenNchIHRmVgMMTuXGcLkEHPHAz25r7H8d+DbS2SZraWO6jUAFkC71BycSYJ2gnjj07EV81alpLxO5MTR4LYOORg8Y6Ag9QeK/RsqzRVoRcpJvR7pNP3dGt7u+qtrfdn8B+MHg3VyrF162XqU6FS8+WClNJJrXmTsuZWbs2rvTc4JgdwUg7Bjr2Hfj09c9MjnnmWHAUkdAcgnJIyeeTz34xkA9+eLtzbNG+7lhnkegGBjryPc9e5BqJV2jLDBJyF9uP8APp+BAr6+lWjOKtZpx3W3R2ettfX1tufyhX4fxOErVIVIzThr8Otrpd3dpN6dFbsDDdgk8Y5xwQeQCdpzg888H+VAKrnglhwp7j0B/rn1593kDacHPQlV6ZPU/QentxRgnH8R4JOcHGPu9AcAdevXJ61t1S77a9NNbvy163Svc4ZUpQdne1t2tWlo+mjs9tdvQcCSF646EDucYyfoMd+hHoKUAhhlcKBjPUnvjg459fx700EgYAPHb+Eew9yOnXPX1pwLEeqjJx6Efe5JA45x65468q9vit5Lpta97X3fXp5XNKbhGLulfuk22nazd9EtbPpuKwKseMAjOBz34x6ZyPc/jSqAAWfPJIIHUnsSB+GcdT9KVRyc44GMEg8jHY49/wAO/pJj5Scgn72B2Pr0/TJGeMnrUS2W2q0+XLrdq92r6eiHDTo2klfy1Vrva90uvUeoTbwRkDIBYAbRzzkHOOgUHrnnpQXJbI4Bzwc84xgY9CMk9ce54MKkggscbgQPQY/Qfz4zgDmnnkDAOO4J9D+fPJGO/Jzk1m7d7PZd27rzWnfy289G9LJ629XbZ9VrfbuxQ7BWHPXIGc4J4BAB4Pr+GMjgAJGGPJ2nPTn0yc89yOgwSOBjLV4bIJA6FSMkj2BPbt16d+lOznJwTgYII6+/bnn8eARzmiCWqjd2a3Vrt26PS2q6v0JjKy953631u7WVtb62t1f3Buzk7j833gOeeOmOuOPbPpipgFGTyOjA4PIxznB6+w9utVkBz93IyMkjqRnAx/Mc8+va5Gdp5xwvIYZB4xwBycHbnn8qu9rdVpotHdJO/l2u9+jWhdP3m29dXpZppXVleyutb3v5X6iKMncTxnqeeATgLj1x1PQipjtyOuACemcg8Yxnkn1yfxzTByQFOVHOBnGO4x+gHPXHUmpl2k7u/QDnOSckjtnBPf8ADrWM3Jt2uktLLola2vW+76rU76UJK3Ns00kkrpXVrpKytrre/oC7g2QvUAKCe544A4wfXPGO5Iqym47RuxzhweARwce+OPbA+mGgKMEnJChsgknI65IPAOegHp9aMtyQRg42knkdB6enQ+5GMVjK1n96el9Glbf/AIa+/Q9OjTaava72bV3q46vVXtsn+K2L8YUH5uAO2M4I/DtzjPbHIzzpR+USG5BIyMDkn1Pr1OB7fXOSrMAoJ+YgZ25PBzkc/Lkenfj0FWYmZjgE55AGO4OcHr1zk/8A1q46mqs29+lm7NJa3277H0OXWhNO+umlr3272tot/K2qOnspQu0HJL/dAx0yMggds44PoO1dvpsjb4dr4z9/IBX8AeQTzjjn8K4SwGUXIOVIy3cdDj8Ox78/j2Fm/lgNGM8ZPOfTBHT73cZPTgCvncbDmcrLXXTtsr7206L7j9RyNKPs5NLlbjK+/wDK/RW10tfz6L020eNEUbiWJBUqMggnPJyMNx78dieK7CwltymN4DLglGYg5Pccdcc5xyO9eVwau2FiCCNlGAy4XPruznrjkjHf0JrXt9WfcqEhNoGWbHKjuenPrz6V89VwtSd3rF377p21216+f3s/TsDmVGny3k5JKKSSWklyq7d+m3fXVM9JmuYt58iZ8Y3FGbC7z1GeeuODx+tWbe6cqsjSBxvJVCwYsDjbxxwcZ549+DXH29/GwaQzKMJllIwTxyV9TjBPbsfWhtVSNV2MpYk7VPy8dCV5+9+PPXjFcrwslGzi5NaXtZu/L3SWvlv1eh7azWirXnGK6W1bXu9ra76aLrvv6PBqVs6Sw73icgMDnCbl7Y7Nk4HI9+TU8WsTARcK8edvB2geqPwTnjPfOPrnz+2v5LgREtlpASBgZHIzzxwuOCeeenAx12n2D3F3bq0iPFIyysxO1dqdVAGcvz93jHqe3n1MJyuzi9WmnK9lfl09N0u7Tvc6oZvGUVyz0SWt+aWnK72bbVujte+3Y7TSb+5uo1WCRo1yA0gjJZAxG4A5GQMDBwOcnBxx6poenapdmKSTpLsjNwDgKhyDvHUleN7epwR1rN8PaNao8YkjRoG2mPCAKzdMOBycY+Y57gcdvoDQbOyhkjPlxMh2D5lyhUcY2jI3L079TzWKwXO1GonyuVrRezbVnHfTfe2u56uGzOdPlcW78qfM7PTRpNq93dO3a1iLRvDdzGj3EkxkjjChdilY3BGFcrklApzvUdQQcjFdtb6RqlvbSOkKSbtrbgvywu2dpAzyQM5PuGwO3pGj6bbAW5d4vLZo3jGFGCc8Hj5AvOV559Otd4dFtMgRxQySOkbEKNqhMNlwRwrEEc4ODgAZNdCy2jTXuyld2SUm2nZR100advPpv16ame1G4xcVPo3LZW5Vv263s9H0Z88yRarBbskqiIOpViF+dYx97d6J2L5yc9BjjNM6bEjhY7Uj3mVl2yebzwDngdwcZwxIHr9FahoFs1tI/kYcERHIyZozgMvAAI9yOeuPTkdW8AQXFmi2oa2MJEzKnMp2kkoRxlRnhe4PHTmo0+XWaTit5LTWyutX0Xo+6dzjxGNliaE4pJVGraN7+7ffRXatZbed1b8lf2yvEBmt9P0sKXYfK7bsKijJ2x5z83Jyc8cdc1+ddm4YSwEqxiVm5xtL5yPqV5wB0PSv19/as+BGq65ZJqFrFLNND5RiVIyHVfmyjKCccctgk8ADmvyd1Tw1feH9XvNPvYnt5Lecht4IwDkdcYGSOOT/ACA/XuDcRhK2BjRjUSlq1Ha8vdV9Netrt66H+fvjHgcxocUYjFYqjVp0KkkoTlFuMoRWnvNa3u3Zeq6GW2N4VWyuAT0LZOckjv06npjpRUDOonY7SOcKMdMZwAc9D1wBz656FfdxXIlFqL0Vm5R1VklbV/8ADn4vOUua8dnqnyrVfNeXl103vqSabM7sfQhWLD5WLd89QBjHfvjrgemeD/DZuIGdgAWPyBjww7c4OwDsTzz0HBrInt1g2u4QqRl88DHq3oR+PfivV/AF3azr5JwE2gEgck54XJIyCegGO+DgmvzDETkqatorK9rO23bS99lp8tWfPYiEvZ8qbgpNNSjeyS3V9NVr2uvS5UHhSZbyHMTKxcssxP7qP0zx82ecNxz6mvSNL0W4WONPJRWDoskhGSo5y3QcNkE4OCcHAFeg6VotrfXEUpQsCPJOSCjNGB8+eMbNw28ktk8jHHq9l4YtQilwgjdUVncDc7J951PfOR0GO4JzXD7d6Ker2d1zWta+mnTfbvqzwKtaouVXbjZpSdr3TitbO7VtHqunVnzvc+FpVV7gQzSsyPlWUqu09WHJwT2Gecdua4K+0I2UTM8e2JN26BQQwmblCBzuIIJCZ9STX2rPo1rLaOqgIYwpdrYfM6x5x8+PvvknAA6YPWvJ/Enh4FrobQIGDPEUwJC4xsRuM7yWOTxjJ6110MXaSUnaN15StGydk9Nm0nbbXY0pSk7Xa5W4Xeiabtt21srPXdPu/hzxjbzbnypy7FgNmPmPB2rk5zwQeOR07V4Vqts7XAhCcxk7Gc4PPLBhjqQADz26dq+tPGeh3RkkHlqwZ3AhLhWi2ZyBkE4wc7u/pjJrwHWdFlDufLcJICxLZZm65HQ7SCMjk4698V9Pg69KUIpSWukbP/DZP17WS+SPWwzVNq+iundp9Ut/et321X4HmccbI7zEkMTjywuVJPQ4z27jPoeTWvCFEKgZL5G8L8xJ4PHTAUHkZ6H8aQ2jDcChDglVVQTwOu/IHPXJ/HmtfTbFyIwygjJZwpwF6AqwxkEjGOe3OMV1zkt3p81a9lorbvbz6drd1S2krK9r2suqS01aeyv6WugS1dgGXAYY2jdgqT/ADg5XOSF75HI7RXVsy+aZWOVKjGeNnQheMYxjC4xyec813kOmo0Jd0UK43RkfI23uc4OecADAPPBrBv4Yo2UYyBvGejFh1JUjDn0bIwcEZrGlNSd7Ws+2ulut7/f95yqT5t3fZqN1bRPo9d3fX/I4SdAGOFCoGO1mJBAPIBb35+719KpB2RXDn5FAOwjc/Ge/Tr+BJ646dDfwR5LsxETqCAB8xZf4jj8v/rVgvtGFVgTnazkfe788HAAHHocnnt6dJppbpuyfW1mnve/nomkrnVTvJLTay29N73t577fMpzkswXkKMHkY5bOOOMdP58ZpsZG0LgF4yWcjqUyen+7jkeufwJz82d/UguB8xUDsB25xkg4H40qgBjtwVYYyR8+Cc89c/n3/AD3aumk1rb8Lat/r6bbvtgk12ta3fS3d/emuv3aNi5kkALKqkkhucIy42gD+9g+nWu406BpUVgpZiASwXc5K9WPJ2qM9f0FcNZ7UcDnCsQQBgHGOR9fbtg9MY9C0mUGGPD7AQFIUZO4kZQYxgkct1yMdia8jMuaEfcWu+qvbbR/r3sejhYxUveV29OXRNJ289du9tNup2ml6UJkUCNdhw6MBgAr/AAl+cDJ6bec9up7iHw/HJE/7vJyoZUxvb+9gEDbnjkE9DgZNc/pKECLa6qgB+dmxgDblcYGUfseOh6Dr6DZzyLGsYEYViquPvmPG7kdM7M57ZzjpxX55mGLxNOq1Gpaz5nZu6+HfXy76fce/QhQjFJwv8KSd73stL7NLa+2r1tY4a78HQln2RArE5B2kHBPJ4I+Ur0J5J56dDly+E4TE8YOUUbpBkFs9S2cfMidxxjJFeqy2yDymRJGLozeWAQJMn+Je5bHJ49PY4N1EYySu0NyHG3iMrn5GJJBB7gg54/Hno5niZ2iqz0te/dWXe62eq0NlSw3L71FSkneNtt49LvVWsrvqtr6eT3/hu3DuqIn7sDaMbFUn0zncSAOeK5q40eOOOT7nGRuJyc/wlRj+E5wfc554PqGqwqxc4aRyikRleADkEYz179cdsYriL+FlQqMsyLlmHJiU5wG/ugYPGSccHg5r6LB4ms4x56jsrXbulry92l92/S9jjrUaC0VOK0bSs77LVaXe60u1f73wc1nIjrhA8hU4JyMIMfMxxggdQe/HJ6VnSRSNP8zKEC8hx95uNwX1PTGSea6a53ZVZBzHGFL7sq7j+AccKOee1RWOi3mv39vbWMbTMXBUIh4JwH3bc8n5QpPX0GMj2IYlpKU5JJLWT2Sdteyutmtr2OaOFdSUaUKcpuTXLGKV2lolpr2a/LoY9jptxql9FbWsZnmkkEcUSR4LuTyB0OTkEHoeMDgiv1B/Y+/Y+1DxXqFtqN7p8kbSzhh50ZeJJXKFWk3D92qAHJwQAw5rpf2RP2MdQ8S3mlazrGnSI5uFeJ5gWCM5UxmUFQBGAGyTkkH3wP6VPgd+zvpXgyy09reytYblYY1ScwiGJlZV3uy4bBl2giPOTtJDL3+B4t4whgqNTBYWs+aUbSqQaST0drp3d1/LqumzP1ThPgpTlDHY+lFtShUhTktWk425otW0137fMwfgL+zzYeFtL0+2t9KjE1nFCrkQiMuHHO0djuX5Tk4B9+funRPDS4t40gihlg2xvFsDMskf8ZPGGUcFuvNdv4d8HNplgJVUSS3EUfyxLuWFcHaUIGS3HXA/lXY6V4SuFmNwqMrSsFY5/wBYD94MmOGzgg5x19DX8y55m9StXqc9SU+aTs23zSd07c0vvbSWvmj92wtGnQpQUIxioJRjFWSikor3kkt0tHt16lvQNNlnWKJVMbRggoqDO5eknUfeye354FerRaYZ7JYmgYSJGYwUAPAA+bdyRxy3HbvVjw94Zkh8qWS1IDqcy7vmKgfKAB93qcHJPJ65rvP7OuNNaRlVWtpYc4ddxQkZI3fwk9+Pcjjj5yNepTi6koy5Ho2k73drvbbVf59V0SkpzioSV1LRtJR3ikr3bbvtfv8Af84694ShQzziAOYxkMzZVyc7sZA2suBg89gTivF9alt7GO6UKMAMFH3mDnJ47gDB5xznB5r6z1izlntL+MYHyO8bEEAvzhcckA85IPIBz14+QPFlpctJL8oDiRkK56ox7cYycdcnHXrgjhnj60p6t8v2bq0WvdVm0rXvdNdelz7vIMPGVlJp2tdLo1ytaN277dNbHkC6tAb17cOWPn5WMElwSclvm/XpivXPDELzm3kjlQJE4Z0wFkx/ex/6F68DGM1866vZ3ljrMrGFoJkcGBuqsTzljjGGGcE98Y9K9h8JazLBHF57FHkCB0cgBWXqFPq+cH6Z6DmsPimpPnbu7pcyTts21Za633eqbd0fcVsEnRj7NXvGL97XstHdLS1nf8bH6TfBKSKaEGdzE8SARSPyJvU5yAfxxjnb1FfOn/BTD4WJ4j+DMnj3TYXmvvB8tvfO0ZK/6IC3ngsAeFwmFIJJPXvXafCHxRNLPb23l+SolALqQyyKcYIOOh6nuO47V9U/Frw7Z+OvhB4p8K3KR3Sa1oF3brE6gCWQwsVjY4bblhydpxwcnPP2+DqUa+XyioxlOk4tOzTb0b8uVLR366O99ficPOpk3FGXY6TcYe3pwqpaxdObUZRbatazbetlZrY/i81T4lS2x+0oZPNSZElVfmdERipdhwMN/EM8EY6cV654G8a/bIZLiZ43EwM1mijcS7D5wr4GApxzjHP3Tjj49+MK6p8MPiV408Capbtp76Zr17aPCzGRYY/NZraRGZUPyjO1wCpyeBiszwd8QbxNUtrQthIJQ8YDhRIpySox99D1bO3GffjjcqrvKMVbVpq1942tpt/V1of1LGhQrUk4xS54wnB3tdShFq22nbda36n7MfBvxpCtykV3LtgkjlKiRdyjeRvXdu4k4UjPTGM9a+utNvNCutOmgLo8THaZoypcu3O4rycjnp69elfj/wCCfiBLpkcWoXM6NDMCIQoDqFwAzKAQUkGRxzt96+sPh18U9Ml1KNvPVzKyA28shMK7Ry5zgEjdkjaOxyO/fl+L+rWhNpynJJprZPlTer5dGulttLXZ8jm+R1q0p16ftISjyu8X8bi4u1ls/XZp3PsRtNtZHFvYyhIDMuyYjMjlDyV5Bxzhh0PFfRdj8CdW8ReC11/RbOa9hs7WS4v5XJBnSMAuYhgn5V+6ONwJHBAr4kf4haKk5jWfYhYskiZEW8kZiDZ5ZiOG5xx+H7Ufsm/Fn4d618C9Q0i71C3g1hNOmWaGXaZJYpFIAJJyzADKsB0yQK+owccNifaRlUjGoqcpKUmoxvFJpK9l0el3d7Xu2flvG2b5/wAOYDB4/LsHUxTji8PSq0lCTfsZziqk5uNvhTbbv2Pxw8WeGHuLk21gYx9ldzMki7JhJGcFD2djk4Bx3Ofmr5M+KFo+nX0dwWmhWPIndxtMsgyCuzPPAweQDjqDk1+nmu2vhk+K/E6CW2kto7u4MAVxlsuT5m4ZyVJzjpjvnmvAPjN8LNJ8QaWdQsLi3vfsiok8cbBpgSD8wHSRRyWIAI+UYPGfn/rkfaqM2pJScFK0Wm29d03bS/6H6dl2f0VUwX1mFSmqlGjPmnFtKVWFN8rdrKz7/Lon8ifC3xtYoJrKXzFuJSzwSOCVj2H5WweVPJx2719SeGPFS2d1G8d6rMIx520fdYkepOBj8scdePgKXTZPCev3lqZXijjbFpczjYZI5CRyDzgbSAc8Ade1e1+CdXE3nJPcygFSFl6Bs8ZU5Oe2DzjsO1elSrewknC/LOz39Ho7Wu3v57nq5thIVoSqU78leMX7yundJaN6db7/AC7fqRoPirRpNGi8zV081kAeNpA+d4ycqcfLxzjtzXNarYQkSXNm4bezSF0YEBTySDnGBx8wHGcciviW3l1/TmL213NJbNtlhYOWBU8scnsOOO34ZrrLD4vzWEUlteXTERQmPLDEgJByCMnOcHPPb8B6sMzjaMZ01eySk3e+2j0sru+/Ts9vzxcPV6FSdXCVnWc5c0oRVmk3G8Wulm7bHdeKviDqtlBLYosaxpKcQy43lV4ZmB6g8ZPQ+2M14drPis3NnK7XEMcsjmQRA7W2vw0ajJIIx3y3PTvXLeLPHCazdCQOZI1cSb2+UgA52ZzwpOQeefxJrxHxhrxZ2RCsUxR5IxjCuTtzgg5EgxwemT0xkVyYjNZc3uJW5re6tldbb3STf9an2OV5XGMaXNT9nP3Zzer95KLs33bStto90U/G3iWOKWRImmXZuAjkcsjNwVUAAYI+Yn+nFef2/jG2ihht55mVWDPhWJfJx5bOediDnC87RkEkkVieKtajlS385Wd1XbKIny3uo9HXqTn9OT4PqPiCLTrl3jYhYVMUbS52sJOdpBJ3HI4HG30wcHlhVq1muVSbvqk9dLWT7afetz7jDUoexjGSakne6Ss37q11vpffft1Poi/8ZOyO32rdjGYs5kMaABnRScGQAgHkdiOpxqQeKLSXTUhnZZEkhIdZCGYmTPlsSR8hGDlR2I5wOfkuLxnaiWTzHdmkVtyBt0cJUZZiRztbI+U9MHn01P8AhK7WNkMF0EjcoYlZ9zow5UDoVUk/KpyBzkmvSptSSVmuWKVmtW3y6bJL11vuuz0rKmocsE3Ja2bjre10mr3/ADWhv+L/AA353nywkkEPOqqQBIrnPynPPl45HBG7B68fPt5cS6XfOQyyDeIy7gqykZwxck/Iem3A3Y6jGa9lvdZm1IBVuSXcE7w2WVTnzEUDg7sAjgcAmvIfE0UfneZOWwMrGkh8vKDrxjhhnjqTk47mu7CXu4u0oq10ldp6P0130dk7K17tfM5hV9nFzi/ejvHm917K10uivruY82oNNbXKPMr+YjsSRgKx5Ax2K45HPY96+aviBF5cM8TyE7kZFBX5XEnVxknavAzjOB+NeuavdGOKXyGCyJIzAxbWQK+PmYDAL8ck8H0GBXivi+Q3W6SRpSYo9ol3EKpIwQoI+YHpnIIPrk16dGhCNaEmtbxlJdrctunl0fkflfE+YqVGabaag/dV3pZNKy2fTWztZ3TPlHWbiO1vHUSBGg+ZWCkeYGyEdsHhUIOQM5yOnBGBPq86qFRi4YbpFyMuF43ZPOOT0B+ua0PGNu8dzNIY2CJKUE+M/JncFyCBtY/dJHJznBxXnbSMjKmWkcEr5jMMqpyRu5G0kAgrk4wMn0/T8Dh6dXDU5+7pFXvvtHdb2V9k/l0P5uzPNKtLE1oxTSvKzcndXcbJXem/R9VdnXw6g/kq+/CeYXDISkrA9UCHcfkI6Z5BJzwavx6xM+UUoEJ4DAZPHzbfXtjpyO/NcTBdkgiRs7yRHhOCQcjgkfKeQGz+uatpM/mctsCgkKeVVvTr8pOBwQc464rp+qwUlaKsrWurrZJJJPyWjvd6PseNPM6jipe85Oyu3r0d7+i+/vZnai8PlCZi2Qm9FJxuIOM4ySR0wMcjue+Jq2srawPMZA0koAkZ2yyeiLwMY7AevPArCmvxbJOzu4UyAbc5Cqe0fpkjoBwBxmvPda1Y3D4VsoHI2twQPqSMgd2A6Y9xXZg8sVeum4/u7ptu1t09lr10u2uvY+ez/iKrgsE4Uk5VZxtFSn1stb62Stv1+8ztYvHv7t5pJC4Bwik/dXkjgdhx7++euRg5IGOnbtkjPJO7jseTx9cRvKA3XcSw2jdjA/wwMnJzgd84pquAWJIGDv5/iC434f1AK7VI555HFfdUVGjShCKtFK0Ul0ajsvlf59z8Ex054rEVK1R+0q1XKUm3dpuzdvJbaq608ycDnvnIOeox2yepH5DPXoCAevQg53ccHjI46nPTrjPqMVGJUYhVY8DrnHHZQOnOeTz1p6uXjbAOd23GOSRgdM5Oc4HTNUpXtsru2rXdLy7/APAd0eNUcbtJu60+Ft3Vuiaur6u+t3t1E8wMCufl5GW6E7sZ46gdhjgnpSjBXOcegPBOPUEcgEYHYjp3pRGztldx24CgnBL4+73BC+3A59K3tO0LUb8osdu5aRggYqSRnoV9AvcjHXOaxrVYQTc5xjGOrcmkkmlrq/S/332S7MvwOOzKXscLha9eV7KUKUrJ3XLdtJK91u1e2u2uNGDjGchjk4PT1HoO/Pf0wOb0dtPcSIkMTyOcBVjU4YN03HHXjk4H949q908NfCCa9eCW6uIt5ZWMG5SXB6E9MZOe/bHIFe+2fwl8N6Ykt1Nc2peOJI4Y0kVlW9kGGjLgHdHx+8ULgcYYZNfNY3iPAYaXJGp7WTaSUdVf3Um0k29bNvTy1ufuPCXgHxRn1GlisZUWXUZyj7leLU5wly3lFJu7s9lfV9dT470vwrf3sUkm1omjyPLZCWbBBZywPy7M8DBDEk8YroT4G1VQiJEXuGAcQhSpVc7lZRyCGGefY9e32zofg7RIBBDFpQUguJLpSNzWw2s25dpEaP3bOeOOenrNp4F0C402fVrzSpZLeQiOLyxuW3Cn5SjBQfK5OH5288c8fOYjieSqJxilT5tE3Hm2irW1dr3f47Wv/U3Bv0a8ljhofWarnXhCKlzQcebWLlUi5RvLlWlpWunfXU/M7/hEtThMRNoRJICswYk4Ze6jHQ568HI6VYPhS9iKTG2kIYN5ZKkKcYyoA6jnocZ/Kv0Rl+HllJqV5PJpSyWKIjra7fm8ts7GaUDg4BzlcHAPFUIvAxvU1FF0hVto5fMisAgDJ5ecL0JJOSS2PTqegs+lUWlNWtHmSkrq9tG+6T3WlvU/QYfR8yeEU6UJSal+7XIoqMly+83y6c1r3t+Fj4Hj8KXiwyyEGMsAY4yMlmOc+69e+f55y20+5tCDLC8e7gSBTsc88g55HI9OvSvujTvAdyt45XRo7m3mdvLZ12ypkkJGwIJTzNrZLcELjGRVKb4OT67Fqkkf2Oya0E7tZkhw4UZCK2AVcdioPU8c12U8ypyklVqRjCXLq2kle1k7O71d9dHfV66RV8F1h4U54TDulWp1JKHOm4VLWuqjS+1ayatbS/U+MNPeCKVTv2hQeeV3gdSeecdAOuOR7d1DqlusUKJIZXH33c9hn5E65ycgnI5571geJvCGr+HbmQTQOsKSyKrgEoqqflwQo3AjPzkDp04rnY7meTEBV95XBKnYERP4m78gnC+o7dR04jDUK0YzU4ySSad4v3dHdXd+nyXoeBgMRieGMRVwFfDVcNVU5XVSEknO8VFQbdnFtdFstLX0+hPDL2t4I9rpG7iSRlLYDMMYVgeijJwPfjk17roul2/2NIBIsckoCGRPmWBnxjdEpJKnrG24AbW3DHX4istRvLW4QW0pLIykO2QMg+mQCRzkjj2xivp74Xa9f6pdQ2quguJ5HRHkceSXYKFTaTgKedz54JHyg5z8ZmuDd3KEouGjaa16W337/loz9+4G4uwmMpxwWJh7GukoqbSdOd1GzS0s1tZuze259PaP4K0mZoJZ7otPbqElC4CXKEZMnBOyQEDaMknngY5818a2tjofj3wvemAvHbzCEXEA2SSS5TyjMBy5T5s9Op55Ofr/AOGHgq1t7FbzVYZIJnDFhKCRJvAwGU7vLBxlWGcDtzXzx8eJND0XxlbESRqlm8Ey+ZhYZJGY/KpJ+aYY+b+7kHGDXx2YUZyy/Gx5Of8AdVIcluVy92yUVtd3tputlYXiXSwmKymtGlTU60Ie7zxtzWS0SfV6909GkfeHw0jS4/szULe3eBZ7aOUyS8zNc7R5rKCOhBHl56c9c8/F/wC1jpdndfFLU77UGTyUs7ZbaVv3QmUq/mAnLEMhC465z+f1z8KPGugR+GLTUptXsraMafGsUN3cILgTbQGaFc/dBAxjtkAV8aftE6loviXx9cah/bmmzq8EKOq3CrbwFQc7wMgkDliMYOOK/n3gKjjqXFeLboVqUIqrBVHCTVnOLavy22ulZ2S6dvybwzwtehnNV4vBSVJxkk5pexc7xdk3tfur9LJ7HzMbDTLxltYZAIuEwAQCp6DGcNu6OTgnGeOh6jQfhdpetXGdQ1Kz02wtvlQeYiF8ffdGJJbHGVI78NxzyPiq78P2VtGLDxHZ/a41KbLSLeDIcY81965LY+Y4AG0E9ePFdS1XWTOVh1K5UDO1kdkDbum0EnGAMgjrkZA4Nf07l9OfJ7Xmd76xqKcEknHVOyvo3fvbsfveMzrDYGSqLBfWHFRhBYWrzRpK3w1FU92Lb15oq6t01P008G+Dfgt4Q0/zdQ1/TZdVR9wuXdJhubb5SBSV+4A4HORk55r1Cfx18JIbG5jutf0p9NhTy4U3L5ssmMALEWOMkBjyNxHXjn8VJpdWuGzcahfTENkEzORuPoSeMY7DrjA5zSCx1GaKVnv51VDuCNNJmQknkDJ5wM5POTz617cffs+WMJJpe7KUm3om7aavTS1/VKx8PmGbV8y9r7DAYuSbbTlXbcb2vyyWsbJaONraq/Q/Wu++MHwg0pUlsNcisbuKCQrKm2RhIcfKyZGA/AzkY7AV4d4q/aMglzBY69JOkRH2GFY/kts5BBYNgq3XHUHqfT8/YtNuXYKxmcMwOC7MWYcknpnJ5BHXuK2IdElkGWyzZClQD8nIJyNwyOOcY5pzhCNnUcZpW0klJvZ2vJ7p6/cra2Pi55HmOYSdSnl9WUnOz+sVeaKS5XK6mrysm3Fp6trse2a78SNJ1XU3nv4b3U/MbzLlXmcQsRkqEQhvLQkkMAT655BGH/wmdhPK2NHKsxAiRV+RI1/jLYyynABGOcdeuOQt9JSB4htaUIAZNgO0dwATnIPIOM4x78dFbtbIwkktgwB2+UCFZSOh3BSeeuMc4H1riq46UGo04TaWiaail8PS6aj0Tb9ejPYwHhbiMdKM8XOjhE3H3WpThZqN1GKlZyV9+jdtFvvr8TNW05IpNL0wFhIpYyRhQqg9Dwc5HqRjgc1Hf/Fr4h6kRbxSppsO4rGiLsDJ3yQcEk457H0rb0ObT9Qc2t9bQqCccEbcLwFJ2gn88ce1elz+B9O1CyjubWG2lRggjbKpMzqecsOzZx0zgDkY482WZ06crV8NFSvd1Jy530ae9ns72+e6P1LKfALL6lGlioZnXnCpHWjJpxaXK2rK8op9HdrXVdT5tl0jxP4jeSfUdQnluHcumJSQzA8yZBBJAII4PHTFLF8OboCY3V7slQqMyfOHDk8kBwMLwOnOeoxx7/d+BNTs3gMVhJb4RmWWMF8E7cIAOkbjox3Bc8A9K56XQLm3nZbqOffEdyjBEe89AGHVOmTjvjpnOcc1nO6p1acFo42hrb3dNU7pWv3vf0PvcL4SZLgqdOEsHPESilGNR86tGXK76ON7PZ3T032PKk+HUkc4iPlSBwHlDjZ8pyUdBliVfJwcjGB2Ndtpfwnj1NlMwSNT+6QxqODkBnkI+8vI7DIJ5zXfaZp0LTWKXglkdg/nuzlQiqRtiLBScAE4I6ccHOR3dvBFp08c1ospijjb95GfMj5A+RhwFz2z7e9YTzKutfaLmSVmlbW62em7Wllsz6jLPDHhrDJTeXwkp2c1V5ptczTsk27Ser0bPPY/2a/tNvNcR3MSeSgaIs4YTKc5ZWJ4VeBtAyeOnWse6+A97psMskrRxJFGrIJSA0nXIAGQF6YOTxySMZPsEfi68t0ktBugjLhk67lGOMjOOf04FeoaNrem3VjaLfQvetIPJu/tA+eFX7RHkjbztOM9ccjnFZpj4uzqSadnZpNJJRuvJaK9306s9xcAcNUYyqQyegpLT3YqLkmlZ25Vfbq2+t31+Gx4GuYyUkt/LQEgNjvu4ABIJB7Ejnrk5FZ0/h5baTZtMu04YE4IOeg65A9T0+mDX1v4xtNCivDNp8csTD925uBiNyc/dBJGfTnGfauSstD0nUY7zDQxukZYFgBvZckbXzwf9nv6jt1/2jVlyRlJczcVo73va9n10bdrW0fe75qnBmUOFlhYws4cr1ly3UWouytptqrLbsfP1tpjNjyjtIzlcfMo9gfXnAGM+3eG4gNpJtdcsR1IIJHc5znP8vX07DWUjs7xkjOGXdmUfKTjoMc+gzzXJ3jvMzs8hbsMjAVeegzxyORz3PvVxcZttJu6Su9U9r6J979G++rPCx+T4XCJwpU17Wk097wlZRfwt7200SV+vZ9i9yrBoZCCOgBwDznCgdM9u3YV1Nt4m1LSxvNwWwylEznryVJPO7I74PXtXBW8zwSMUk2lDuBxwSfYEZPp/kVfuLiO6ifzZh5jJjKrjGOOTkEd8+p7cmqeGUkk1e8r6J6Ws1r101+Xmr+VDFulBOKamrqK5lZtW0bfRW009H0PbLH4w2jWxtL61iRshZJ/KEjbMfNg5By/p/DjkmsvWtV0fW7GefTrj7JI4eWJZMqpK4JRjnhwD8p65PXivC3tZAjujEgMORjn06c5GPX+gDVvb9AITK3lscFSD36MwwSR79T7URwEeZSi3eLT6vT3W003pfur2OSrnNdU5wxFGclNShFwV1JydrSadla6tZXvre+xdajfiaZJLiQIS4ABJV2IAKkknIGBt74yARg1hyRwXZkaWFS75BZRlAf4dy9sevbntnHQfYEmRXCs7tgMgPI64J7r39T1A7mqU2nXFtukRCUI5UDgHtnAwCDkeo6dOD7VCSSSu4S0UWtG9mr2s9ba32snofB5nktXGU5RxVBV6fN8L96Xs5crjG1teXbbS2vQ4e+8MJJvdPLA5II6EE8kHPAHHHXrjPfhb7SprQyFlzk4yBnDDgE/3ScdR2z/AA17E7yhWDDbjgk9VxkfjnqOfrnBFZ81tazR4k2ofLJ+cDDHoCODz6DPGT9K+kwOOxFCzlJ1I+7orPRWVpPR9/z7n8+8b+EWVZpSrVsBS+q4iSk0lFQs2uqSXXS+mm+9jxExtEQzcE54PJ69SemCOvGOoJPZwYbCAASAQPlPUZzg5GMZ/Acg4OD2OpaNIWd41LfOfugYGCeB6gcc56dBXKSW0kLkOr4ByD0P9QAB059CBX1VDGU60U4zSbSvHqrqPkvS78z+KuKuDc14fxdWliMNV9nGo4qq4trlvo9kkmrbepW2kbSTnPXoRgZ4x0Yc9/0p+4FCq4x94HHG73A+oHtj6U7G0A8k5IIOMYPTHp79e3PNKVIzkcHBIX0HXGc98YxnPaupTvbbpum9tLq9nZXvddfU+PWFn1b7+6lK6dnrZtr807dL3j28ANy5OQwBwR3GfcdWH6fwyKBkksA2TwTgsOOmck/zx60zLfNgnB+XH9McenXHUY69FYKSD7cdjn1Pse/r2zVRbdk2m0nbTZe7b1a28n+OTTpN3vd6qztZdOZr11VtxC5wVwAPUdAc9Ovfnt+PqFz0JGccA9c9jwegGcEevGeaZgZUHdxgbmPGV656ZI/xz7OCgZ6jAGDnIx6k8e+CMn8aOXW93o9bq93pbRa/e9vVEXTf8ul+jTbt87/1o0SKAqqSxds4PGQwxxk55I5znt3PZx5BJG3JHzKcAHuCB2JPTOOtRjHUA7d2ATkY4OQRnGMEn+tKORgkbATu5GM/zxyMZpx1Wqbs11VtbPRra1rpL8SHdNpK9rPXRPqt/wBeuhIoHQgkALjHOe4yehJOc4Oemal28qcAbgMY67hknj0OPqe2MUyM/IVIHy4IwBzjkc+mD+p49JV5YMecNjbnAHXA7ZI9eM+vrkdlJardbLbbbqra+St8iSIA88A5II4AJHXjsDxx6cZ5yLCqWJ4OASQcDGeecdMH+XtUKsAPmwTuPsQCegyfwzz16cVODwM5wy85PJbPQnOOP1A64GawlfmfZWW976LX1Vn0628362GhF3TV5NbtdE13733v3FVWyV55HPAII/I8ZOfcduanCqAVIBK55HUgY4+vPI9uxFRbmBAB2jng46EcYGc4z6+v4iTdvJByODwpC5OMehwDjuPXBrJya1Tve91Zb6a3stl+Tv2PSpU+VpJ+Vrq6+F301dvXXTuOiYKVYnADMCTkkn0HT05zx3BxWtCwOcL1OVbgZPqPXvnI79R3y1CED5T8p6A5+UkYHXB+g6ZPXpV1MbF69OApwVHrjv8ATP8A9fnqpNN2d3vfbpZ9NW9F01W/X08L7kk9Oi1+G6cWvNPR9LO+trm7bzBcb+AM/d5wyjqR36nr2PY10tveqIQgKs4UckfxHocj0A4B9a4hZPLKge+ATgFe5J7Hn6/TgVpROxQ7CSB94/xALz+Xt/k+PWw7kk38mkl23Td12630PuMsx0qcVFNOyV7K9kra9L3d16Xt2OqW6aRxlsOOSc4yMfKM+nfIGeTzV1LyVFfDHfuXBY7lK9cAdz6Anv7c8rDdISgZjkAtwSB9T16E5459BzmpnvEKsI32EDJJB5Yg9DkZJIBB6D6muN0bOMbPomnd7Ws01vbS626XPc/tOEU3KdrNuyk/i913tda9ru33HcDWkMZjkkMJUABUUjkcBgAfmLYPBAHv6WP7TMwVQcrt2bmGAT6o3b0x14PTFeYfbZHCAryOQxbkjgAD/aHzYz6t2rorCaSRolkOQuNuWzxkfdHPAI5PA+uTWdTCwjG/ldLTy17u9tVe++hFPOa1VxjzzcL6Pmu2rxupatppX37adz03RnkSSMEs6kgAtwAr8kdDntjpjivd/DEiEorsWIUDHTH9xV/3cHIx3z2rxDR1jZ4cbXUgBwD949snrxzz1/WvavDwjEUe2J0ff0DDcq8YJGM56nOcH04ryMVBPaKTTWt7LdbX7/n0Pr8mrVJOMnNyi7WTtony69dt2tbPfZn0D4atpzClwc7PuFTglCSDuA755wvGMGve/DdkzpG7RMVwoiP8RQnknpwMDtxzg5r548NXqQ7IfO5B6dUDcYwO55Jxn3r6O8KapCBbxtIHDRkhyOEYY4zzwCcDjP515TaUrXenW1rvTvre7s/87H6BhIzlBSScuWW+tmlrt+KXz8j1KxiuoIDISVt/kETHnYRk/MOMnOAOeffmvU9Juljt0kuCjqsZMjqdwVmxt2DByRyAnTJIz3rznTYJLlogxdraQgSRqeuOQVYEgEc9B07ivVrDRxIkSCN3txtZVXJJUEEh8AfMO3r3HFS6ztzJ3cXZt62Wm6s72t1626anpOjFqPPK19bre65bRu7Xlra+m/c3bOG0ntcyxSyKwLxEn94rHAUkHoQc4xnGeR0q/baEhLGRdgwf9Y2DMXHVeOpwPl45710mn+Hp59hEbBYowYAvRARy7ergjjPrXZweFbm9ikhEUktzGFeFgdmIwfvs2MfLnkdcnHfnnU5Tau3dtO11aztqtHrrrrrfRkSowSSU+XVKTlL4Oy0aXa/r5XXz34s8ExeIYby0ubYPOYHEccagsqopAaU4G3IPJ5PAGSM1+SP7QXwAsLG5u7lbOdprlZZYpVjOxnJJQPtHyuhzhSxznHHAP72at4cudNkUXFuytLEqGckDchHJU4OS2ePcEk18wfFv4f8A9qxXNsLRXUjz453TeqxqMyAOcAOc9MdeM4NfSZFWq4aspRqSintGN09FFdXa+6V736n5h4icPYLOMorQxNOnUnCMp0qyinJyS676eb69e38t3iXQp9I1e4sZ4DG0ExxngkgnIB6AdMnp6cDkr6o/a9+H0fhTWtQ1OGF0Ro/NtpMFEZHJBWbA+QMR8uC3T3or9ZpV3Wo0ZqSu6UOdSlZqVlzLddW9dvudv4SxuTTweLxGGnGcHSqyilFOzjzJpq7W6f36Hzle6oJl8tHRmwAQB8p9s9yO3Tvn27nwKzlUbaCzsPLBXGAvVyc4UjPAwTy3fFeHW9xL5gVwdhYMoydpA67WI/DvnrXp3hW9eEqRLsjG4gt2fjKqvUq2ODkY5J/iz8lXh+6air36vVaWtbp+B8JiIuVN2tp0krx6Kyt1u7LS999j7T8KzRLFGoWNpFTcAv3ZN2MxluMDg88nGQOK9njufJhgUvCqxwhhETnaCcrHDyPnXnJ4wO3p8YaB4wlgZSSkbROhViSVKDI5PcH06+vfPt0PjKI2MfmTeZJJCAI0HmfeyAEbK8qCSWx0PQcV4dVTUldXbau972UVprr13Wn5fO1cPJSUmtNUlFPlfNy2Sbt01v8APU97i1exktxCJESXYFbynD5I5EZ+RQXPzAc9uvAxx/iJrSVLpoGSSfywPJCbiEXO+RgCoywI9cFcc8E+Y2eulUfZcpGAMBiR8285GeTnGOTxjpWsuoid5i9wAyx+Yzk+V55kAGIxluOOV7fjmufnakt9m133jpe23na/nbUunSu/ejJRUovl03XbbS9r2d+tu3l3inRbWS4EqRMxeMK/mODGWAJBxjCEDrgnP05PjOu+GBOJ5Cx8uchQzcbWO4L8uDtZsHLDoBgjnFfTFxp8csrEojKG8x4mILGJe+45+XnJIGenGBXOapoAZJXQJLCMScDzDEeQXJ4wF+XBwSR+Vd1DHSpKNpt8trPmvrord77etvkejTi7pqKs9b80n2slF26a3/DWz+NL7whNEzJ5DCQhmjdRiN0U5BdsYGASAx75OB0qtbaQbUhmcbjxg8IcEAMjgEtnkE8DOM54x9Qan4fXyVVYmhZgounkbcr5yRtUAYVsfKc5HIJzXB3miATvG1ugABMRUBI1QDgdOc9T647d/Yw+aSqJKprG2tn5JN2fnra9/Q3ldpxutEttXporvdad9ddjzVoWMWZAm75iFjHIT1VR1BJ4OV6kAeuDqNllAQnzNyq45CnksR0UjABPfOeuM+ly6d5IkGxpFUA71j2uQSdzgZYAjoBk9TkDqebubNjIwZyI2BJYfK65B69fu8ZXrjivVo4hPWL0Tvd6Nuy3+XXS/boZWu4p2vfVtW6Lbr19dXda3PH761Zgd0bKjEBVXlgoyMZA4H4nJzjgccxcwMcgJgRMUUtklR2z0wM9DxkZ6dR7RcaYGQygZG8IPN+4277sgBxhlwcAcjn0rj9U0ZYWmGGbKhj3yeTkgLgA9iDx6DivToY6CSi99E7X391Lf4U/XXvc7qM3qrbO9011t539dW22zzp40yVIJcBRuXGGPOcg9Pcc8dOxpyxKPugZIxhuSSf4uANpHt0OOvbTmtmDMQuAucM3UEdcn37H646VQ2OWJKryc/MMEDsc9znkfhgV3RrqSbTs+ivpbTZWt5X16+p2RSS5k0r2vd91stU/z7sSNAjFiXz8rbOpJA5xwCvXpz25rq9IuljZCyjHzMBkj88g4I6j3z0rnkViwBGwqAoIP3sZ5JAOA3fgnjjmrkJkQ7dwAAyMZJ3dQBz69cccds1hiXGrBxejdtW/NJtW0b8tfx06YTcJRlZ6JcttV0121T06t9Op7dpWqW0duodtx2KGC8kjJwT07nn+orsrfVbYPHskRIWXaxLAksQCAuOnIAzkgfTFfOlpqlzbN8xIHQYBIUMOSemcYHp37Vej1qVeRLhOcrzlierqCeG9v14r47FZD7acpRk5czavrL4uXouq7t2SR3LMG2rpXSW8bqy5dU3ffr1v80fRL+JIXjI80QyRoU3RkD5VyB83PB7nHbn25261pDLkcrIpO8nCM+BlSDnJOMZyCCDXjQ8STNGuJCpU4bDbiccDg4K8dufb3pXmuzTZxMSD1wc4GTkZB4P5H065rGjw+qEmmtG/evq3ZRdrLpd3GsylFfacnbbRKOmivp+r031PSbrXt/mghUZWJCr94Y4B6fd55BzxjvgVy93qcQLliNpBO0DlnfoV6FwMfKOgyT3riv7XZmGJZAWUks/8Z44I65GOBn1zUP2i+u541twZ5i6okWNoGTgs3XJPHPUdMAYx3LCRoWVkk0r30XZ3b0tbW97K2vY6KOIniOVrnldpW0bbajtqnvporNG2yy6ncwW1qvnPcTBRFEuZGckBtuOQDxkEdsAHGa/VX9jH9mCfW5rK/wBU0x2N27vHIV83LuUaGMxFQCqAPuG4DkZ6mvFf2SP2Y9S8a61YapqGllop7kFnZWaFWdlMbbCBsCkNnBIz161/Un+zV+zhb+GbHS7WGxjSVI0kS6SIKUDBS4Qc4JAwoyd2TzxXxnEnEtLCU54alNOPK7yTiryVr6ppOyTty6bX1P0/hLhypOUcdiKM4pSXs4yVtLxkvdkkm3fZNX132Oq+BPwO03QNN0mGHSR50cSBJUQRBSmN7mLByjcdWySp96/SXwn4MgWxiP2VppVjjVokj+4BkKOoztydxODz0zzVLwT4CTSTAhgVoSE2RBt7RP0dThchj/dyfxFfR9lp5imWGCFIHfaJHCgeWg/hQD+ID5QeRg4xX8+5/mVXGTdptqUuZybk+2mrTWl/RrySP2nCUlRpwbik1HlUEo2jorNcu8na7vru3ZtmJpXhxre3WJLYyoABF8uANwyOfmwykEE8jpx6dTYeEpsqJDtkYlhjoTzhcf3+55569a7Kws2QQQw/vkVUTDKBIo53HOOQCPbr15rurTQwtvG4BLlg4boysevPGOR0JA9Md/k6OEliptyndwTupJu1nH3k3s9/Lqi62IlTsmneel9L9N72v2slpr0uc74c06N1WGVQjxqVB2kkkEYwAeFJzx3x17V2t5oVs8Ds43RzIM7TnDnP3eOB04POf1ppFFY30ShuJSA5U8ISeN3PByCQOnXtXbx2wIRGYNE4GwHDYB/ixwefxHAA9a+jwtOhVoVKMoRlUilC75d7RSbS3ei1tu97I4XOtGpCp70VZSS6Ozje1r6uyen5q58z6/pq2k0kA5Q7kLdNyHOR3wTn5QehHHevkfx/psWnalLNKr/ZW3FJQMojnJAwD0z0PY546V+ini/QrXb9q2YiIBbnOD/EenAU4wO2Sc8nHyp8XfDVrJo0t1bRs0XDZBIJ5JwWI4GRnGMdumK+TxuVyozq7JU58yXPbfleivtfVvXufo/DWY89SjfT2lqfwq3NeKTvbdp/M+EfENiNSvDcKf3ClBIwXbkrxgMCOfwPXHPJra0rw/5tujPIY2LrszxJtBOCrDsfbrxkgjFcxr1/PZSMAJ4bTewuADlvNU8FeOgOcn+XWtzwv4pDeTao6SSM0aB2HzoOfm2E/Pgd8461xYeMJTfMuZPVa+aS3Vn380+lmfsFOlWeHg4JtJJtKyfKravTVu673PqH4YILa7gieYoA4DOud0ez7yODjLZI46ivunw9qkM8KwXTOYIYTC7FT5jRbcKxyTnfzgc559K/PbwrrEGn3VnJIUXc4kba2Effyzkdd0mBnuNoxnivsbwt4hjvI/OtpBKwhXzYvlcKDxvCDG4L254/HFfaZTyRg4xcYv4eW/vSXuq9tL6dtbLex8Nn2EqOtGo4SSi1JSStaS5bWtF66aq+9t2fzf8A/BZv9nqDw/8AEmy+NmhaSIND1gR2mt3UKbBJfR5Vp228EOX4Y4IweSen4daPqtvp1758Nz50HnLkk73jB4CsAfvN2APGDknOK/uB/bV+COifHb4J+KvB15LbyXN3pkt5pQmAWM3yxu4jD8iPeQoKjHIFfwY+PtN1X4SePvEfgbXIrmw1Lw3rE0EKkkW1xZxyv5TidlXe20Hy22fPk4GV59ajgYVZTpU5NzaTlC6dr22tqlbXbTqfsPDHEMa+UUJVpuVXCRhQk5c0UoRUYxk7pNpfC+l1fY+7PAet3MiC1a5EnnKZ44Hl3BEGCkbNgYkHJxgdBnivcdE8U/2dPJdyqRMhCRYJ2Ko4LHGNzEdjycey1+cnhr4kvbrazWbhmDpHc+YdzAsfm3ED5mcD5ugGBzivoTQ/HyX3NzdqsLgyR7vlHnADEcfdl7E8Dke9ZSymdN3cbty03dtt9tduz87H2VHMaOJcXCcKkZWbXMm29LNJXuvwu+up+jWl+KbjU7CC5kwYiFMaquWDDALAbshc4IJJ7+le+eBvi14o8HpHp+nahOsNwkaLvfAWBgwIQBhlIwWAU4OT15zX5u+GfH89qtm7TKLcqQ6H5VjVcbnOScls5UYHTqe/pWnfEWV72QyXoe3LgWYchdp7KJAThM4+UjCDPXPGXs5ppaq2k73je1ovZ7dbdLvoRiMthiYyVTD0qtK/MlNRlreKWjVlb8u5+gd14zu9g1iOWZjO++cyyEebJITuAi5IPBG3JA55r3DwHqen+JNDuLbUmlsZ7wOLWKKQvLsVTseRcjgknd3x64r8wbf4lahdypaS3ESKGUzosgLx20WcEdBubjJ+mcgce4eEPi7o+kQpdNdzx3Fg8MUbknyXgn3BwATh2TaoyO7YrKpg01H2aTbcbavpa7e9rLW/ndNHhZnltd4W1On+/i4uk4RctE48qtqtGrbaJbbFX4/6I73yi1aN7i0d7Y3CnDOkZyIwowDjPLZB/OvN/AV/dWDQpfXMjwKPMnLDEigcJtA6KOdy9DgH0Fdb8SPHel6tbTPbEvcM3mqXYbzJJk+Ynp06cgc18x6j4vvNNvRLb3aNLKEjuUDArBEpyRJ/C5x97pjA4Oa96jh6n1aFNtNpqzlq73ivdbS6L8jvwcq9TAU6FWLU4Pll7TR3Si2o2vyrV/P1Z+g/hnxtpN6t1p9z5m2FQ9jLjGQQQfm77iPunjqOOa8z8Ya9pQuZjBKC6ny5CcAM/IVQDnPftgdTXzRYfFuaNRZMYUSRR/pMagE7QT8pByB1z/8AX5zb7xL/AGij3AdZVDsSCTvYg8MhJBPc4OCfUAc5yw84LVa3Vtm0u1+6emu1+phRwfscRKbTh7RxsotSi/hvd9G7bNN+eh6vfamtzbt5d01vK+f3SkFpTz8wyQCg55/+vXL3k8E8a/aJJHlUAoQ2JWIB2kAgkDOckfSvIovEkouo5Jcsiy7IkMpV4+xB68LkcHr2PGK1b3xRHBJG09yhmH7yJw2AqHjblchXAzgc4weucCY4f3kmr31clokmle6eml1r22PQvyJe9ez5lZLmbTXu6brvdOyv0KGuRXG+VFUtGScu/wAjFT1VuCCx/i6YI/CvBPF1pNH5vlxvKkDARyKMl0wc7gMYPJycnAHcCvb9W1q2dGW3uY5POPmyGVt0ceRynRSxJPbGD7cV5VrN0TFcRxBJI2bc/kt90nOVU4yY17njqO1evhsHfWD95JK+rSSStqtNttb/AKOWNmk1JJJqyS0d0lr0srvTt33Pni4uLmzuZnikaON9yFRkhST84fJBXHHrgHvTDrspuHO4m3VQrErkliDnyRnnaec9j2x11dZtkW4ctEUBDkM7bN7v3A5yDjqeBx6iuCvWugJQjLyy7QhGQqZzsOBuABBc57j8euND37TUZJrSXLZppRV2+uttb9/U8nE5lOkpuMleL31dm1GzWqvZ7WTVvw9g8OeK4I0W2MjgLBlmuOfNk52hQRxIMHbyeSfQVj+J9YkvYbh/ORyAwARQchT91iOrJnBIAPP0NebR6iAEQFjsKszDhQB1PqwHrwD2A5q3NqUYfh0kMm9WZT8gVsZOw8bz/GueDg5HSu6lhuSSlG6V78t9G01rb89NrbPb53GZlKrSlHmfNKLTmkkrys2nd99bJWut0czcXzRq7KxaNMqUGcMXIAB9S2B7D0JNcZqKJfEbgJV3GQKh+Zl5wQezq2QVIGeM8V2V/CJzMiLuyock/IjEdMdgFzxjrk4PauUv4JLZlJVVl2fKwyMqeCfQE8Y/TPOe+Dir30krJvRvZdbX113XQ/OM2VWXMpu6d7vo78turbb0t6WeqPn3x/pkDEyQl1gYY3SYDO+Tt8xe5XsD27jAr5/mtGll2NGS6My+YehAz8+eOce3bvX094zgxy5Z0LFtrDaEbtGTzlST1/A18+aohhnkDEhH6AnJDkE/LheSeM4x7+h+0yTFKOHcXJtp6cz0StHpdLyadlr8j8P4hwXNi5cq3ab5dFZ8urWz12s7p76IyoraKON9+FaIDDquS65OT14HHTn6joKpdlUDKnzTwS33ByQWPX6/hjNdv4c8HeLvFlxFY+HvDuq6k9y4hV4LaTGw4G0NsIAGQSx6A5A9fqXwf+wl8cPFG6STw0+lW8aDzJLpsusZwVaJdgLFs4OMEDk5ArfF8RZXl95Y7H4ek7/DKpByVrWXKnzJ/Ly0PBeWYqrCMaFHqoxle6btHZPy63sujtqfA981zMJo2xyX2bSSjBcchupYj+vpXHXNnIGUYYsCWYE9c549xjoBk9Rnjn9uPBv/AASc+L+vSWzajPYaRBNki4up1WNkbHKAqCyYyW6bT0zX1T4X/wCCSvwV8IONQ+JfitNaubHy5X0y2vvldgMyllVT5cecBEw28E8gDjzJeJ3DmCbtivavpGjFycnZKyt1l031OOr4dZnmbhOviaVGM3ytt87gpOL1hBP7L01vp2P5lbfQdZ1OZY9O02/vZm4jS2tZZXkbptUKMFmJ25z/ALOK9b8O/sz/AB18U+S2lfDnxDIs20h5LKSJDC+dsgZk+6RjLEZGOp4Ff1XeEPhF8CvhlaRw/Dj4KaDrFyrK0d3qNqLmVZDgecBJECw+XI5GM5wckV9beBvgR8X/AIrtGnh7wpB4btb6AWgktdISCCCIgCMxnaN5UE84GeOmK8rMfGavSpqeAy6jTptaV8ZUulortwh73Tro+yPVw3gZk9GMMVmmbyqwdpS5asMJBRvF688ZTv5NJu+lle/8g+hfsB/HvUXh+0aAmntO4hRLiZWkiPBw6FAQCDwTwMcE17v4f/4Jl/E27Vmup0QwRhn8jLgSNwocADAOCGIztwDz0r+yzwp/wTfj8Ox297451ie61JXjkkWM7YryaTkiQLnyGQD7vzZzj2Puugfsl+G7aZbDw14MvNZaQrK0suVG0AB1wYsFDkbvmzxkZwAfzvNPHHM5z9jSxuFoVXGLU6VOPI20rRvOTndaWvHS6PpMDwH4aYL31Qw+KcNZSxE/aQulHmk6knBJO1nun030/jA8P/8ABK3xM0iRy2l3d3MYWdEMjohz3DeWw8k8nB645PGD7Vp//BLvxxaWkt3GlnZQQpkBiHmdj95Q+wFhyMDaME8HI4/t98BfsU3t/bRTX+j2mhRTMytE8KT3BjJAjA4Q/u1LbVJAyx59fZ5/2Jfh5Zptu7dtQMCrvuDi1PmsCSFAWQE8ds8Hrnmvj8V4p8U4mUXPFTnSbUva8jcHH3W18SjZpJ+6ne++lz06XF3hvw7V9hhcBlkK9O3PPDU6NbZx3UotNpq7fNo/vX8Byf8ABNX4yo+dP0u68iRz9nYZPmNxvR0Az5S/KQuRkElT6U9R/wCCcvx30Tzmj8NX+pyw+XcoB5kYiDBiwjBVgy4HzKcEgLziv9AXSv2WPAVkLcw6Z5cqGTa0jBmypGxCGT5dvPzc9SCOKb4n+APh+S1uY00mC6ZYQrI0SAdwDEwXJcj73qMcCvHxXjHm+DlCUpYeoklzL2dnZOO7cr7K23fc9nDeN2S4dxp4ehQqUl1nyRcUuW1nFx5fLy1baR/n9Wn7P3xC8K6fBBq/hjUghdoNVZ95dmQjYQBFnyI/mCMuMBmz2rdl0bxVbaWsEfh2+0+LzTArtbORHbRn5hINpMsc2RiXCldudhJOf7R/Ef7IHhrWJRG/hW3MjyMCQFGUY5WNQYsKp/ibnII4wKyIP2DvAN4YlufCdkLsgRSssKsRHkkRoAmEyCdx5zgcCs4eM9avyyeBVWUrS91tJybjpZN27L0XmevD6R1LBvlpYOjyNJc6mm+V8t7Xel9G3zW80lr/ABRajrer2VrqVn/Ys9nNIog+3yW7MlvGTklsqu+J8HGMbPQ5qno2v2kV7ZxtZyPNNHF5l5MrB1JDAxTRlAHExxsbcCNpzX9qHin/AIJffCXX5pZ7jwhaNCkKO8KRrGrsoYkhwpKNnlgVbpg8Gvlnxj/wSD8KaquqX1jpwsLC1t7m6js7WPa880SfujFOAGBDYAGw9eD6e3gfGTCyi1i8uxWHk1703JKKacVdX1tfa7St8j7rIvpHZNjKlOOMhGkm4Rkm0oxvyJSjKLel3qrbn8t/i6/0HRUX+zrqM3t6IJdSjnkEosgzHcJLbYoiKgnZHvbvz0At6PrPgqxS8kvnsXGoLbRG4RhEYDKD8gTnDuQfMyegGa+4te/4JGftW+KviJqFlo/gG6h8N3WryW2k38sjD7Vp5ciGWaRozudFBO4heuMDpXpOqf8ABC39oWztLx7pog2mLDcGOacqzKAWuX80qPNEY27Dj5RkDIxj6Wt4hZI6NKnVzWPPVhGrKOGlKtKKaptQk6akoSXk9+h+25T4l8I46nRgsywD9pyz5XiIKUZyUXdQu+VXaVtH1t2/FD4tx+Gdd1G+jhskNsIfKhaJRHFL5YJj+cBg/wB4kkAEkcGvhbxRoC2F6xshKysv3mBUBx0Xd/dx36cZIr95fjh+wdqHwysoYv7QfVzpkTzav9mk3zqSo/czHGQFZWCgZ4znrX5z+I/g/cXdzFZpZTTvPE4sgB5UkKRE8z5ziPnhicnHQ4xX2fDvGNPGUozoVqlWioqK57xdkopNp2eyTtvs2jtzfgrC8YwlXwjpynJ81KvGN3Bu0lLnjFNwWydndHwXDHcbWEinngKCSwK9Tnqo9cHGO2OD1egeIdV0C8gu7MyxNbukoZQSNy/xAAgfN7+nbGa+idR+Cmo6dZG4FiZIlOLiVFJMfOEIULhAcYzu688nNcDF4RZLw2QhYyMxQwqN7IFxuLDjKMCDkdCOM9B9NXzelVpuV9Fq437crel2726+XyPHoeFGd5PKjVhiJc+qU4xbSkuVq97Wu+q1T89vaNG/bG+I9lo502G2tmlEJgWdrYM7qBgyGQkYk6FTjg54I5rwDxX4g8b/ABI1T+0dWuGbEpeFFzH5bA9WwTlsjO7gHHQcY7N/CsWnwLN5C7GJVXIAG4H51I6qBxxnB9citfR7CzETLtjWQDzFLMQsr/3RwSNuRjn25rghjVVUlTgpwbsuez5XZN2va2y3Ta1XV3+twvAGPzX2VPNcZUqRjJSmoprnjaL5eZ2un25b6dVY46ytPFsFmIW8Ran5NuwZoUnc7Cc4HGCir028jpmsTUfD2qSzzyy3dxK23ezSTOxkznnLHvnnrjOOB09iW0YNISMIwKzFMdDyo24wzDkdRnrk8g4OrwvGJlOXbauw84CkcHPRgcHIONvH1OdOMaFXmp0aEJSavKFGnCT1jdtqK63vd21dtrL3KnhvgcBh39UwtWmr6yTd27R1u7v3m7baO62R5Fb+HbmRsyMBlyCXfABHBJJ98YODjB9xXQJ4btQubi9t02nZhpAzEZ+8cAZDe3p0pt/HIrsysyiRVLjOBkcdOMAc4A4GOcVlBNxG4kknrknntjp7e/8AKvWg61RJupZWTso7NKOmuivrdd1sfOLJMPhJ+zqUfaS5k/3j03VlbVPtfsvQ15NE01Cyi6CbFO1VO7c/Ybifu+jfXPUYzWsIo32R7piB8oxjJJ6YHbvnk8ngY5GVkJAbJXseDjtxzgdMjJI4P00LaSRNzHBJwSQASCBwe/GOCO2Occ1103Knu3JabWT2i73STX3+Vzvp5dhKslH2EaUotXnBK9r63a6Jp/jqQQ20pVDHABIMghhuyR9QOev9SQK1LTw/qFw8YjiIMuck/KMHnLZPAx6An8elP7a8cuVypOc/j2wQeD39efarMWr3okG2R09CDjGRxtxjAxjpj/FVKyUWouKe95K9mrWW+mu+2ux62CwuWQnFV/aSScYpU1bX3W5NtX27230d7NdGPB+owKzSyRwBSu4Oy4CsD8wPXBxnIB6Z+sy+H7FDvutQhQD5W2tk545GOq9MfjWG2oX1xkTTTTMSAMsSoXvzk56g988nIqJ4Xzl3OQTwcknPvn8Se/4ivKqqtJtqpGLdtktbWel9VdLdX1vr2+qprLIq+HwFSpBO6eJmrq9k9lZX30vdaNJpHoOlWPh9CU80tOWwkgYEsOedvT5RyBkk+hxmugbUrLT4pFtL6cbCCqEFVTOcgAt14OT7fn5HE8tvKjRE7lYMrdOc5HGeg9On05q5dajd3Yk891CygA7FC5IPfk5LdCTyenrny62Bq1p3lUlKPu3Urd0nbrte6ul6H0OGzinhsO6VPC04Ti7QUVyxjZJ350463Sjezvc9t0f4jwWT2wmvHeONTEUlferIfYjgrg7e/wAx5FdE3jPSdXmcJDBJjKrsVVyuOSByWOTzjnkcdK+VJIWG0rkbfc55HBOM8nHHv0PWprO+ubWdJopXRoiCrKTwwP3vQ/Tjt755/wCzuVxcZtyS5Vfbp2fk97r5LQpcXYmMlTxWHhyrlUXFtpJcqb966vZJ6bPXsz3q91JLTcI7d3TeXbzIypRieMEdAM8Hp9QKzbXxPJbTpJuMdujuAitlCD1MikHeSSMDjHXPY8bL4yv763jt7yVGO0I06oFkkXv5pGdzHOAwxkdRWZ5m+Pcm4oX5GeCuTjnsevHODx9LWG5Y++teuurtbXW3a21+mp7lLPFNf7K7xsnKOjktIu7ab0at/Vz1hPEemN5tzMitI6sQGGWDEjBVeME+mePxAEcvia4jtzLZ3TIzFWELDgYyAwIPJORjoBzn1rygzlcjccA8E9jnng9eOvHT2FWY792AiL4UDaCOSOc5PsMdD/8AqzlTkvONnbdO11fZJNW7d/I7YZ3Cfuytz2dk9W5PlSu79fnr8ztbjX7m8dHv5TJtJ2JkkJ/tOcjBx1I6c/hMdbkt7eRLeSICRQCEXO5R3BDfI3JHcZ7EDjjXPl7HLhg4yyjkbmPQ4HB7kDOBxmnwt5jKi5BOAoxuyeOQRxjjnjI78VEU01JuStez1tFq2t3rpqrpvTXoxwxsueSlZ1JfDF6qz5bN2tt0StbvsF5m8l3EYLnAHDkj+LqQeD/u88ZwK529SOKTYCSoJwzAAnnoffGM57k9eh7SfT7iKITCMMHG4AEkhT0IGDgHn1I+h45a9XcCGUgqcDcMkDvnAA4PB4we+K9DDWfLaSsmra3fS93ve19U+jaS6eHnGFnGjUnKKdabclJwfK0nFpNtPW19VtZXbOekjRyTEDj0Hp359vcep470zDKjEhWKkZz1yp9++O/4Zq3KpiOQeT82ANox1x+BHP8ALIqv9rcgK+MK3B6scdicZI6DBxwTXtQSSVndJq19G/w73u3Y/MsXyKpaX7uV27R21ae2yv0vpvcfFI0WMngE4UnjOeQRzxxyCB19DzdVY5yMooZiM4OCD6jOcDkZHH14FZ5VJAGjzknlW7HuQcDI5HvjJ6U0s6ybhkAcqc+n+PHHb61tzRS2vtsttvldaddPxSo1ORNThzU21p16NyvbTS3pffdmi6PauXBwccBucg44Pqfyx7ZFWotTRlZJEypIBIA6cZ7fxYOBxgrnPNVvtK3IVJSAAApZRhiePmP97pz2PUDjhkmnPtZ4WLqMNjgErwcn6frg9aj2i+JprXdXXZWdrNK63ez7rfpqUpStPDr2lNRTlezaeifdvfzdlp0LM2m2V9HJJDlGJLBfUg9/7oB7HriuVvtKkjLo0ZABPzH5jgYOQ3cNng/XgE4rXjknhOBvQZHIyMkdifQc5HOen06i1EN3b7cb5mAVg+DuJGFAA+6AeR1wD3yMdVPFVKOrd4tLTW3q23a/3u+vkeXUyrC5hGUXTjTqbN8tpSu0rN8qvdtWfbZHklxaeXklTtKkYXkKe5xj6cZJP51zGp6SjL5iktuAyG5HIO4ZHfpnOQCON3QetahpssMkgMOwEn1I9QBxgjHQ9/wrkp7Ub2RgCA2fZQucEenXt3AORXqYXHSUozjJp6StrqvdtdLqnp5o/HOO/DnB5hQr0qlBKUlJLmjZ83dNpPV+VvJ7njVzayQyFWT5VYgEAnIHQdhg8jnGDxz3hyQjcjkBgD1yMccHjJ6/yOK9J1TR0MbMnz5AIxy2exxjjODwM+navPby3lgkYMpwGypXjjPHYHgZGfUDGRX1uDxtPEwVnacUlJXs29L9eu211ZdrH8C8ecC4vhTGVJOEp4SdR8lSMW+R3TUZNRtpZ9tHq7plHDBeSd38RHpzwD7cfXOMZAwgyeoIGMHbwfrnJHPp3/M08liBkg54x3/p049McZFMIx2wT0HO0AZ47n6evHPp6kJXXzXrf3bpdl67/l+UVIu/Na7vaXN0i2rOzt1dla+3cA3HHHdQRwCPU9sdOeOv1pQWxkhcAEZAGRgdPQL6/wBOgaMjnoAAcdccc56gj07EHkUhB6sGB3HGSeRnqByemc+vPArZK+y7N6a380tns/06nNKo4X57JP4Y7aKy3taz8vOxYXkYzjI59OPcjA9lJGM9O4Nu3buzjcNxGNuO446g8Anr16dajUg5AztPGMYyR3A/h/MfgMgSkkAKM4IwMk8Y5OOOCf0wOOuZTeySu9XrtovVW2at00sTBuacummr8kk7/wDA37d5V2nKgnpkgjGM8jt1/QcY46ShcEcnkbiMA5PPJxyTjjOfr3NQxjjPTkjnqevQ9cn9QfarS5AAGQQMbjyc9euOAc9Dk8HPJ4ym1qtYqOl9dfPS2r267a9j0cPFyS0Ts1qn/h1vbXfz0+RKqggEg4OQMZ3A9zjg+vPP155ftBGwEgHG3jrn154PfA/Wm5JwOD0PGQffngD3+vFPVs4J4C5HXp0xj05Jwe3PBzXK5aSa1s7a/Lte+jvvvv1Pdw8VyL5WVtXskrO2l3rfV9yULt4J3EDKgjJAOO+Bt6Hjkjpz0pdqjLchjnPXG0nkZ68/3cZ4FAZcHn5tucng/iSOcen0ozlQMgnjG3HIHPzZ49e3oMcVk5N6NPp19O/Trvd762R6SjJWTitdPi3skuz9O10x2RgbMjBByemR255wT2AOTj3q0rnauB06jtnjgZ6gduBznkcVAQhUdVO0NuHOBnuMgcdh6+ozTsgcqxJ4IHOfcMMdfbpj1zUys009n5tdV2d97X19WbxaUddNrprXo9emmmvp5l1ZBuGcnAPJPXHpyOOv6/hZE0aL94AgcADqeMZ7Z/LPHHIrNViFwDgAZOB0OOTnsc49x700sxHAOWIKsO+TwO47en8q55U76N3S2s9rKNtXv083tstfRpY502ktJWWul9LadPXX/M0HupFIw3zHIYEAAA5+VTnjJz+vrUbXEhkBwwUE55+9x0AwPlHOTnv+VRizEMRtKqAQDkkgdT2yc88e23mpEQMN33enJOCAMZYdcntjg+ucVjKnGznbV6XaXRpeautNevbcf1yc5O8mtbpLurPXf1V0tXvpY0IHVyoIIAOTyQecAMOvQkYHORwP4sdZp+5ZECqQQQST0OR0Hr3+p6dOOUs0QSITnZ2YnByOAST1HB4OO3tXe6ZAkmCQWQEAlcZOCCDn3zxxnABOOK8zFuySu9bW0vo1/lp26+nv5Y3Wkr3utHbS/LbWy67/ANNp+leG0kDrI2MDBIIyGGByBnkDqMdc9a990WKJkWY5yU4I4wVAG5sA57Dp+ucfP2gSYeELuVt2Aq9fl6AjnIOc+mQK910C+3GMMyoBw+R1BwMKM8nP8hkCvm8ZN3k9LNLZbO2ndu9vTyP1XIqScYae8raX6PleuiV73s+2+jZ6Ho4drqOMqCpO4NuwXA5AzjAx365z09fojwpPdeQkTMojztjIGSQSNwQDkY9ehzyBXgmkRtPKqxq5J2hJGBCMp5+U87SABgnrnsQce1+GJpIJo1nUpHCwBA4bII+7nrnJyOuMYPevFqzaaVk29PRtK8tXr9+lr2P0rAUOWMeV3+G8XdN3tqveS0e9/npv9ReCbe6mKMHIiVPkGWw+OuRjlzxxkHHGRX0X4ZwDHCY8guCpY8g+g4xyRjkV88eEtbW2jt1cRxwSOCzsvr1IyQCD6Zz3r3zRrpLvVNOa2UywM0JfaDt2H7xyoOexAxjr2qbJxUYrmk0r22Tduq3adrrzSO6pHR86tGPvKVru8baJvS+m76ddD6A8HafHdQXck/miQMDbRqch1OdwUgfeOFB4544559b8KeHJpr0M6p9mlUo6EZlhZjgKFwTl+crwBtFafgTw4v8AZ8F3LaN5ZKyBsY2rkAqy4ODyMHPB7V9RfDzwXpsniPTGlgdrK6mEjoTjMzY2Ddt+6D2wd3cjkj3sryarjZ01DRe7Fp6OKk462ffZvz6XsfGZrnWHwyxPNKVlGUlZxteMY3Se+6vZNPV7nzl4r+Hd59uQzW5mtBFGQrrtDKRkFXPKNnoMNn1zzXyp8YPDj6L9lWRcx3MUsbHdgwxsF8tWGOGU5Dc8g1+yPx98D3Hh/QYNVt7Mx4ERlmVd0RjXBDBQBhgpO7nnjmvy3+Pk1jeXOnPG2YJ7SJTxiQXSKw8tAM/Kpx06g19XVydZc7Jq8ZQTTja6aW2rurqzb/yPgK3ESzXL/aQipUVCdKSU7uLhKKfN9lytdvyu7n84H/BRbS7q28H6zqSQpugENkbreNisS24xjbg44BYH04or0P8A4KgW8em/CK7CTRRT3cmx0XhXO9SQQespB+n0B5K+kwsVOlHlqK6Uefmg3abjGTSs1ottVf8AI/nDPlRjmmI/cznzSUubmi1q4qy3dlbvfpurn4+Q6LNczIfLIiUggnsTnjPpxjA6Hjrmtq2tZrCfExZVVRjqqt2BHqAOi5Gcnp1PqemeHRvKeWuxWB3swBY5+VSgGCT3GeOOla2q+EmuIS8UPmNGyl3boRg5HHXOMf4ivDlNN8v2eWyS0d2lbR/da7st7O5+A1NW4LqtVpsuWz6at2aVne9+x53Y6rFGrgZLgjGwbgMZ+bbkZUc9/wAPXq7fXHigjPzlfvKobax3nLDcc5XIAI2j5RjIxXnmo2N1pUpLRTwAuzoNnyqo++CcjuRgHOcnp3wZtXKBiLkooBUHO11J+8EGTwT05xweah4dT0V3fXmvdLVW1269H0dlqec6am9Lvt2urdVbTt2110PaU8UzJ/qpWZ42Afy24izzsZCSOMcruGOx5rttK8WLdGJDcCJiSplbDKmOu1cruVeMnORk8818krrc6zM5JkhUEEqwGXPQPnklf42xxxx1NdboGsNOArq7DzHCMoJKRsRlgcgkkgcAcjPrWFfAOMXJxWyd7K9uvSzfp52buKND3kmmnfR6O9rd1Z7X0avfTW59o2urwBfM3+aSgj8zdv3ntNCONqtzgc9CSajlvzl4ZiyRkNIiWxzKjDBO89xyN49CAAa8z0K9M8FugnkaQBYVMLbfLlIOFBO7bGn8XBAJxzXSzrc8MFIcqY3dnyC4Hyuh7PJjDZ9BxXjezipOm3bT3W9NVaz38u2re1rHVGg207/DZW6dL30XW2lrdrlu9uI5SqGSJ0cIF3OTIo5JVDwAH6kfwjnJyMcXqMQuC627FnCsGLD5GPAJHTkgDvgdcevNanrV3bagEu0S2WJ5tsoBKPt28bjjoCArEYOTXR2bxarYxsGkgkMbj90MBsYwxORgHOT17V206UqKhbq1LpJa2WjVrXS6bPR3vpp7CW7trtZaXdrabbevo7M5W7ZxEw2IgVBlQAwwCcnPBIBP3OBzwTXN3cMaw5GApYbSeY3bnDLnjIPG3PJPXpn0ebRJGi8wQmZpNyHblmCrjcrMcYZMjAIzknnrWLeaYYUMYQNGxCOcZMW7oyjjBGO3oe+a9WlWV1baL110drdddLqyuvnfQapa8skt9W7JdE7WWnrf8jyu985onkdIkUscJGPuZPBMfGT9Tx9a5y7bIBBWVtpV2P3Q3TCc/iR+Y4rvdT02KNnMa4EZ2/NlRKOfuk5OTye5/PFcTc2bmTCxBcO/l4JHGRyCc59jjqcD1rvpVFUs1vfqlHW6Vt9Vrq1dfO7KjTas3trdOz0ummrPpa135p678de2RkLkoQhUkgcMSP4l7hR1x9SDkCsNrNlVUCq2QQCfvbvzOMZA9ufSu6uLO4SMNIM8Dft5Kg4IUE9T/e74HpwcySxkDRuIlZVxhcfMc9G3D0/vYI7Yrvp13BWbTSelnddNbX0XTdLfVnXFKLbtdNKyb0vo7+d7p210W5zUVpIirK6hm3YxjJIB49AAAeOTnPAq41uNpYow34y+BgH0GB146nHf0reWyxGVKZfdlwhyuR/DnjG31HTPGetSnTkdAfKlKkqVVCSd4/iB6YOec9QfxGUsUuZt7ppJPVK7TurPfS3Z6X6FNN73b089dHv6v8u5zH2ZSpVdwUnKu3A7ZB4+mP65qF7dssrJgEAbhgDafX3PPocd66w2Y2CNgqqGAA3ZZSOi9Bye+eOe/NUns3JYjBiUbirHqRx0xk56EZ+lXDGR11tta90npq7/AH+t36nXCmnFWtzdVzWb21trbTf59dTl2gKb1RELLggqMkoRzk+mAB0GOMZrOkjCMW3BBuO4kE4JHC46HHrx3xxXYvaLsnwhV9hOAP4iMccZ4wcDoAfasK5s/MZY0jeaUuEECqSWz0GMnIPqevNP60pS1dtr6p6WT67NX0d7Le+xTo8yVvKytdv4dkvxa11el9Dmo1uriaOOBN0plCbVOGYkjovqcjnuc9SDX3l+zD+zZrHj/XtNvL6zE0RkWV0lUhFiQqVCjbh5iGPy8ex61jfs5/s46p4+1zTbi40+V7U3SmRVjLuqqRyRwAoGOh4yfYV/T3+yx+zDZ+FdI07fp8NuohD24dFEjSHbndPgEKMcJt4zjPPH53xXxXQoKphcLJcydqrjZS6NxTu7J31fTR7Kx+scDcF1sQ6eYY+lKNHR04TXLTaaTUrtO61TVlu+m76X9l39mu28KWOlZsoPJdVM8RCmWEuF3RDC5A+UbG5xzX7A/DXw3Z6dbm1W0WJIrcC2Bj3nefuseRnaAe64zwcc1wPw78CQ6Yluq2LSKRtLIu123bdu0c/ewTn2GR0r7J8IeGRJ5caQMkxJXzSARBjbjeuPlbGQTk554GRX4ZmmZTxU5OLclJWte/LG602euzdtLas/bo0KeEpQpxilCklZNaacq079Hro2W/CfhqUqZ1iV2yBIOgZsfKyoQShHO7g54HvXqtp4bEc63D25BlC5V8HHGMxrwSCO/rWzovhl7WJnEp835V+XiORCeXx1z+f+HdNZx29uqyAysAChOTggHIB7Ad+vI4PSvlK+Fq1JSm5Pldkrdny9Hs20tb2tpZmX1pc3JBXbvftdcumq1X4fe78F9kFnOpMYCHcQQOhBHyn1IOTyuOnTpXVWk8JiG0FSRkAggKSMEdTwM/gMAc0X1mJkgIGMkZwMgHnAABHy8/Xrj3sQ2oaM25T5wCBzjIXHb27e/wAwIFZ0KFbD1LLnalFNOySbaj0aaT3ve+2j7W3CqoylvflaT22Tbvr3va+y62OcurCWSZbhMsquvuGCnOB6gE/Nx/MV0Ntfyr5UZI3L8u09vcHkggcDGP5YnjiiZAp3EqCMknkg4KkY7e/T9KigtUE7ZR1Gd2TgnvzkZ4wT24ycGuulCpQqQnTfvVGlJNtOK0u29F01W2x0whGpHllG6g3ZpbrTo3e2v3Lc0Lu1jv8AT7mOdVkcRs0bEYAYDqD2yD24H4182+JbOOSG70uZQ6yiVYQ67xG3onIz/tfh0r6eK5jBhIIKkOoGRgjGCCRz2/PoOvlvifw0k7SyxRL5q5lCAfNjILGMdceoHORgZ4q86wznShVpwcpKNp30i7qPva3Ttrdp6+R62RVI0a7i24rn5oJX91pxW11b7rbs/J74t+C5dIvr2NpUBlYzwhGBVxIWPKYwB8oBXJ+vPHzRY6lcQau0U06WrQSFkC5LKEI2Z6fI3JY59MnvX6OfHDwe72suqm0aSSFvkmILIIefMXOOD0wDjHIHt8DeINCjWcXiQlogxjmaNcPEM4G3B6Z6rwe2RzX5+pVIVpxs1Z6JdI3T0T21vq9vI/pbhitRxOApqfLN8ri5O7lzKMdGk7a672d+p7b4U8RyXrpJdukqoECORhXPU7RkDKjGc+oPSvp7wR4mOnGWRY5fJnRRuVwygnqEUn5QM8j6Yya+IPDd1Fpv2WMss8M+C0soO9FOMY5IWUHnAJx3r37Qbi6b93aTlovleJg+QwOCTnocYAPcYHWvo8HWaUJttSja19Gnda+nS7W3yM82y6E+f3GoS1s09dY2t2f6ep9e3+pabqOm3FpO5CzWxubZpD/CEO8CUkhTkj92V5B5IxX8rn/BXL9lyXUZrz4ueC9KkOqRgR619mU77pY92yYRKvzFgGIIPy4OSc1/RdZeJlgsZIr0NKygRtliNkZyDgnOCx6cYz36AeHfGPwxofxF0LV9E1S0jME9jM8EUsYJdVUg/vM4c/MCU2gnnkdB9HhcyeGr08RGz5LKotbv3otp663St697HNlOF+rxr4Rq1DERcL30jJ2fMmrNK7V90nq9dv4JNA8a3+i3gt75pFSG4YSxkmNlnU7WVic/Mp7Edq+kdF8ey3p06cTIsKYZjt+ZncjccbuHXaMt3z0rrf29f2Vb/wCD/jbVPEPh2yuZPDl/cmeV4wxS2nZnL4XaQEJK4IOAV5FfA/hvxTe2cq2ss8vlLMQBnkMCNwGehPGAO4PGTgfr2GweBzzAwxmE5VNRXtI6XT5YtprV6N6O3rY8SWeYvh3M3luNUvYPllRxEZe61Jpx952TSSu7Wae6ta/6oaB42814zfam+zZH8nVUVhiMgA9DyG5IPHOa9W0zxzaWwNtI0U8cjKqyyH5ohziVRn5Hb2Jxj1xX5r+H/HEsLwlWV3ysf79iXijPBUHGGOANpyAMd+/t+leKpZlM5ZJA+AVlHzEqMs0XPygD+Lqc8etfN5hkk6Um1FpWavFW007rrpe+1tHqfpeUcV4bEwUXOLbSjJtrVrl97ftezW72Z9vvrxlCy2Uj+ZNI4L7vMaZEAOAcrwAcAe56V1tt48W5itYmMgmtBsXZ8sRPADuv8R4Pyg+or440Pxat2tuIZXtpcu4COQhKY+QFuFJByQeuB7V2ln4jR4DEl1CzhciUHEu1CdxK9fMGQOvOc8Dr48MJKnKzT3ur2td9Glrqrp7a3Pp6WMo1YxfOpx5lyySvraNmvLZPbq/M+nbrxRE0UqXl6ySoqNGQTskJ3YJwT5WMYxyOhOTjHnl1r4hnmNw63UU5l8qKMnfvYZDA8nK4zjv/ALvTylfFhWVPOcNG2dvOXIAIAkP8QP8AEPoaJtViaNJC7JLcMGg3yDbvXJYLwdoIK4XHPPpz2JcjUeklso26La99fza6Mxr4inFN8yqJq692yi/dV9GrJaNav5s9ATV51AdZHbBO6KQZVVB5VlznK8YwfXPHTqrbWkkt90l61rMQFRVXCAP93jdgAbTzn5ep7V8+Sa7PLukZ0jlLOEUtyduAAynAy/p0Y+mKvx6zPNFbefcFZwc+XGMKqqeGfBHy9QBxjPXkV0ypKVttOj01vFO2myslZb7Hizxa50udL3tHH3v5Wrt3tbXR799EeuT6z5M8sf2jJYBsZ4Y5JDD+8pOM5wR7/erOn8WyicRzsi4KeWCMkRAMGwMHdjjjgnPJAzXk93rZglla4kbzFIxjOxx0BB9x7cHseKyrjXo4iGVyBcSBFEjguNo5wMZ2jdwDjPPSinhNrpu7SWu97XvZLTdfqJ42KSd4ySVr81r6x2793pr08/e4vEFuyebK25JIRiLcUaRm58xPbjJHHsOSRkanq8d24jt2jjLAkxoo+aRxyCAeHOB8445JIJ4rx221+aKHzhMGQMyqka/vFUk5IO4ghT0+uMHHyzx6xI80DfIqDJExH32fG1evyjru4PP447qdKVNWSsk72tZLVK9kvTSyTd3uclXFRk3K7b27WemittbXXXuaGrGaUYfLPHuWNJW3fKezEYHHAzxx6nrxtwhSBgyq5GWaL7wDZzlR2YHHHII6Gurknie3kMrSI0m5vMHLknG0knHyjHQEe3U1hSPESWjlUsEKvvOAQMg8EHJGPl+p+tZOcnNu+zWz8ktLWaWl9X6bnhYyt7ujtdJXcr6X66rRLq90tLHGyTTTXMiTbABuZGjG1NoChA+ScjBPHHrnnFWcw+UZgpAYKTk4DYzl1x9xRjjkk5OcUXwjluHEalGVd/z5EbMcgqVwMbscLnjB61jz3L2sZiZWKbhEVUHCB84Ld/L+XnPGMe1dsOaKu3a60V730vf3rN/4ntr0sfM1cXCipOclKSdrc2j2vptqn569yw92BsdgDESEwH/eRsM8ldpJQcbunUddpFY2pTC4MkUI89mMcW3/AJaPI2SVjwOAABk/7WCMV1XhPw5qPi3V7XTtOtJriSe4EKrGhkSRxgHysDGUyckjAyByea/TP4P/ALDdpq0NnqvisILeMJd/Z3XY6TH5mDFj86MeCOMYxnPT5/POIsJkqUsTKo9LqCau5XSto1ZX6t2SbPhOIOJMHh0/aybm/cjTpq8tLWuk9lbVu3fbRflLoPwK+IXxVv7Sy0bQLq4tpZhbySKjR7YkIBcSFTn7w+Y+nTGM/oB8Hf8AglxYG8s9R+IljJPcw7J/sbPuiVSQRuIXGyToCQCNucHNfr98OPhX4R8FWsFtpWmWunyQ5SExRossoGMo74yvmYyo5PB6YOPdIPMa2vILZTEJApMjoC+9c4CHHO3I24wMnpwK/MMz8Tcyq89HC1lhKF0lGEpRnKOl3KS1tZ2skk7X9fxXOeLaUKjk5QSjJJOMou6urKans0na67X8j5K+Hn7NPww+GtoY9N8I6PpzyAxoPs0cs3mYC+bI7Abc9V4Pf8PVdK8J2MF+ItJ0yKOGJ5Ga++Ta7fLiNl2dDggZPHYg16zD4T1O/YGSF7gSHc85yoMY4ByQQFwCAw5/PNdhpXge50+0kMUUiW7fvGZkOfkJ5BPJGT8oGRgGvjKvEVXEzc3WlUnUWkasnJO7i721afvWu/v6Hy1TjKE5xp05pN1E001B9FraW199OV/I8e1nwTfR6XPqf2g2s2wzW4bKxxpghmTHK7hj5Ap4/DPhlj8H9X8eeKNM0fQbS71zW9RvVEMFq8rRMoYeYJpNrpFCoIY7skZPUELX6V/D74MeMvizqYtJ7pdN0K2Dwx3qQbUmjj2jCSEnMpBIPHHrggn9QPgp8Cvh98HdKt00fRrK71uUtNe61dQJNfzTPjeUkYZjjJ+6q9BnmsZ8Qxyun7SvJTxVV/ucOmkox9205Wu7L7K+0r3aPdfHlPKcHy1KvtMRUinGnTasm4q3NJP3fW19emh8pfs5f8E9fD+l6Rpeu/EvTtNu9TSCJ/7OXa4ik4aMTNgCSVQDuyAAc5JOTX6N2ng7w54fs4NO8J6NpenmKBLdJ4LWNfKAG15CQAPNAA3NgdBx3O211d3JEYIRB94JwSoPGAD+BGSOgArVsLaNTHuB3Z65wSO/JGG5HJ447V8li89zPOMQlUxOKo05SjBU6MpQpKm+W8YwWzbd3Javvpc/Kc244znMq8qlTGSjBSvCjCpUdNJNWTi5NXVvPffTXi7P4UWmo3cV1q4jvfnV3LYEAbk52cgHru+Y56jgc+xaP4U0XSo1jtLCzhHAykSbxwckMB/+qn2TBF2tkj7pU+g9PXPHI9Oo77UcgIG0ENjnqcn39Se/rwDz1+54eyDC8s514xqS92S57uTaSaabl0tdr/M+ax/EeaYqEaM8VW5ElFJVJRh9m3uq2nW3ddbjTHDCw8pFXbwGwM5bjAx0wBx16kjuRk32mx3QDvJiMASbNpA3rnLZBzx1OcdT65rXaNiSwJIJ+UDr6cDnPX+ntVWWZUXG05UEPnkZGOcEHOc8jJx6mvexlCjRouLjGm1ZR1bWiS0ilHS2u9tdUeDLE15Nvnk3om1dNrTez7p31aa8jz7VYIIVCRq4lDFkYr8pGQVJPbIHB7nOOnOV8uogQSBo5CoVmAwoK4GOnLN36Yx7kV6W9rFdqflDFhkgAHb3zxwByf8ADsVtdHto33rCoII3k4z746DI7enPHIFfnWYcM43GVlUpVL06s1LmSaitYt6br+vJGuFxmJTlq0pcqd9NEk72X5+b8r8VaeF7eWNJDEpwQWfbhiF4yODkg9unUZA4PVaT4dsYC4SxhBbDCRkBIcZwxBAOffPAPfpW4xWIokYVck5A4wM42kKPmPHJz9R3rZhidhHhgAQNxCnJx2GOxyP8DmvUyjII4KdpR55wiopyildvl95X7PTm0s+vb1qeNrTduZ7JW1tZJaXWln8uxjDw1bKS5iQhsggDgk9eMH8M9Oeasr4W019u63izkDIQc9MrjGCMHpzg/lW/EszkKAQBjjGcehbHTPbPTHOa1o7cgAsB6g+g4znnke575HAr7XC8O4fEQvCi+ZpJ3alFt8re63bTutVZebPYoVsRFRSqO14tOLa5bWas3d9O9rJsxbDwzolqRKmm2glUcSCFC3I5b7uQxxyRz+JNea/F6DwzZ+D9fa9ghj+0WMiqUjBd2KnaABg55OFz6g46H1+8c28RZeCo55wf4hgD6YGP09PzT/bA+J+u6NdaNo9q4t7bU5Tb+YWARHfAV5Dn5CCcK3IyTjg1143KKWEpRo06OH9pU5IW9lGGiSb1UdfdUl11e99D9I4Hy3GZxnOCSxNWnCnUhVnJVJRbVOUJWWvVpN6Wtd9j8Jv2xvB2g6S08ojuUjv76aYOX2NPbO53Rv8AKxIJKbAAc8+vP4mePfBVzfeP7VJ9JS0hleKKK62G3T7Pn5JJ025Cyg5PGH2D04/pm+MPwJ8W+MNO8Ma/rDWVylkhuvsUzKRJBuVtzjA3nbhlORjB4Oa/K39srTtJ03WtButJtIZPEcVtbwX13DbhYbqKEFUjkjX5VlgBYbsnduJwMZPt8HxnTqcsXztynTai2owUXFJJa/Pt0P8AVfwlzmg6eCwEa8cTiJYecJS9opRpSgoRSk+ay2SvrfV9GfIvxC+F3h/wf8LZbmTT7a9s9f2xy3ZZWiZkA2yQJtzC8RYjbuOMg554/HTxLDaad49mtIYAB5wigxwoLH5DIgHzuRjepI3YHPBB/X/4q+ILxfhdPHf6mn2SGF5baxCkvZzKuZSqlj5ZLbdh5zg9hmvxqsp7zxH40juioNz9tZ0B/wCmbkgucgFuOpHbgDrX6TGE1CpKb2ptSabT1as77aWS3Wnrp/QuHwyrUFCs1Uruq5SkrWlG8NNVo4x0v56bHV+M/Dk1tpK3yRsd0SStEF/doefOAOcE/dIJHOSDyBXlNg5kaJVAUrk8Y34HckYxt54PUnkk9fpz4hs3/CLqm14pmVElduSSwwoAzjdxhiM84OK+VJEltbgRZMbbiODjAOCC5Azt9T6Zz3rTJpTcaimm4pu3M7v4Y3fXTW/qjpqYKGCarKneEpKzvdq7jyvys/s/g3t6JYpBPnAwV+UuhwuV5yRyCfU9znk8VR1W3SWOYvEAygrhOM8EBiR1z1I6DBHc5bod4YAyOwVWym8DcCGIO4gkEA87scjPWu1bS4bm180RgoeQynasmQeh6jB6Aj69OPdrVkldWTSSfLZ2imtk1fXX4btO7btt6dKnRxGHlJqOto8tr62ST5Ve11rrvd6HztqdkyIxYAYJJ4AwSTgZxjOMdMk1y0kRXBAPJPfqcjBHQDPXPHH0r1HxHZtD56lCuXY7kJIU54B4/izw35eteZyMVZlYjBY+mcA/T8zjH4ZrswNbnunZ2at0bVl3Xyf66H5DxTgqeGxUlGMoqzV0tmnG130dtO2qTb3Ku1ht3Kckjr369jzyeM/mOtXoZFwYwACVwxJ4UZ7HHB9B3/ICtgu2SCeBjPGOuAe3A+g9qsqgwNxwVz16/l7fj9B39hxhKK6a28tLapLe2n5ddfk8PGXM3Bys9+fRtaXV3s3vdO+/qo2jywcnJHGM5B29Memc/gfpU0aSTHcyhTtHIHbHHAXgnp789MVLEcuseAd7KoPXBLADGOxJz2x05r2n4l/DLUvhgfC9rqpiFz4l8OWPiOJYiCFtr9WaKNx1WVQuWXnG7rzx51dOLs5K10n0vJ7JaK7er9Enqj3csyqeMp18RCryUKHs5V+ZPmbqOMYKOyd5bXtfW2p5JE7RxqhYggcls8euOO+MDg844qPeXfJyWDcZPXt0/TPbj1xWq1ozQvKFBCqGP49emOfQHkemayYQpmXerbPmxnjPqxxkHHfHIGPxzfK21dtq2t7t36K3/D31PSxcKuFWHhUb5J2VNa3lZRSbu3Z666XutSxCWYENgFWwOPmHr1HIOevHI5HHOxBZrNDIqoDIo3Dn+EDkgf06ntjmq0a27RfKGZgMllGSCODkeg55zzk8dqt2sssatydjKUGR/Cfwz1UfjgH0ocdV0stba2Wvl1306pv19jA0oWiqrVVVKbfNF81k7aO9kne1mtdFeyMwRLGz7znI+UHqOvQ5wPrjt3HIzJtrOxUbcEgEYzkcAY9Bnn2J74reuISQTz3Jb2Az+fcgn8eec9IIyH3YzgkAngHrnnP4Dvnk4zWb0adk79lqtbXb9F29WcWNws0/ZRioxTck5fFq4vR7v5a9trFBGIADEc4Bz29+/Hrj8B66lvcCMBdwKkAY9j6Z6H/H61mOFVsKDhSMnJ6+n+c9KsQBHbkZHdemcYzzyM49j2wD0pcierWz/Vb+T0v06b3OXCVqlKoowlaStHmk/RPXqu/XXe1k9JijDJ79Mdu7cY7foPXNNVljbO76EAEDn3PoD29BUbkZGAQvQ9zznj1J/L24qNmVhyTz7dOvX1z6D6k9qxnGOqto1+G+nl+b7nse215ly+0TTTTtquW7/Hd+qL6zs7qoOeeAMnOc8dc+vrxnvxWlEzxPk5VgcgZ+6CecZB+uT0xwOMjmlKow2khx0IP1JBxjBHTPX09K0BfNtycE4IZuSfxHUnPBPP09MPYJt8qdrLRrultpe22+nkduCzOCbliJ2qRacZKV9Fy3W+jTSWitoz1HTte0uGye3vLbzndNqsOdrHBEmOgIx0znPXjmuP1Fbe7ed4VEKHBiGOTtzuUvxjjBA5BPBJ5rDW5cJx0IByOSOvqc89M8Yx2xTBcugOCQr5BwM5JzngdD2yMd+wzTp0JRvyySs011elr9Fvdr8NrHpYrNqNemoVKfuyVnPq1JLbfv5Ws7q5k3UeWIcnC56HBHXnvxx+nvVJNqZXaGByRuHAPqOfz6jnHHGNZtrHk855BHB78emT39B75qF7NWyyZbBDA49CePoffIHB+vdCbikn1VtU1r5JbavXRX6WR8NicHKrUlVw8Yys2rSknLk916JvW1ndb/AHq+YC+7AAwCd2Rz9OnUdP8A9WalAUggjJ7fX+v5Dv7ZvR2DOpOduDgccMe+Pf25/CmXFq8BClSeeoHJ6+vPJ/8A1dq0576W3Wul9X9915fkc6wNeFJ1J05OD3urxu7WSSs167P1sUwh4Kr3PfnJPp9ex6duOk8bTRsMsVBJAyeMHPHJ78ZwB+NCKQPvEDr0xz2A569eM9/zZIWYgHkdAc849v8AAYyT360r3fvW2trurW62suummuj6E8qpQU4qd9GoxSSavHRpdH5vR6et0J5nLICAc7geQTjoccDnOOM446CrEU7W0ySg4wyMScZO339+ucfTOKpWrlcozDacnDdM9AR/tA5AJJ96lmLuN5G5fukgAkAccDjJGcc4/OlKT+FvTz0fTXXvp9/od1OcVS9rGn7+jdmr6W3emi0t56+veNNZ69Z7DEBcKFV2QAM5GcnA5ZiMbuR2HWvOPEGhyWEpIUgOAVyMfKeRkE5XOMntkD140NM1GXTrgTRsQoIYqPunPG7ngMOcdxk4zWlrWpPq0Ac4ldcLuUDcBjAGOhx3I9cYJyKqhVqUq0Iq/K225XbSi7aO/kr2Wt13Mc1p4TMstrTrRisTGk+TZy5o2s0t3e+qd0tTyogxsxlGFbIXnozAYIPPGO2Pyziua1XSvOSV41+Yclfp0Of7o+vGfpXdS2xYNkZIbPI5AC8gn+8D05x35OKzp1AXaCADw/rx24/X2HTjFfSUa8oSjOk25K1lG8YvbR99El5tr0P5y4u4Mw2dYCtRxdCElOM4pygm02km9Vvdpprya8/Ep4JLeVlcbSHbBOenbkdR2HHXHfNV2Z2ySRuxgkA89uehwc9Mjnr7+ha5payAyQgMzdQcEHHb3Gef1GOa8+ljkiJUqE+Ygg8AYP8ALGeCRzzwK+xy/FRxNNNtc6tzK9pK1ktHt1a8t+5/AHHnBWK4Wx9ak6FSWGqVH7GrZv3b6Rc9k03pfW190iPcQTkKxIG0EdCP4h2BGOP05oAZiCx+bDMSTknHp9e54PHvSYYtnORtIY988Y+n+falHyg5AIAY89Tk8AHrx1Pr6mvZVktU23a2j8v5rX80t++p+TVm3Npp3TStLTZdXaz9U9bjkBPTIGSGPHP8vTt2HTFTDJbgcgDHqQOnOf8ADpyc1ChIALcg9uuM9x2x2z7kZOamX5mwcYxxnnGeMjpwT9MjpkBjUTbfazWvTZpLbta70S8tjow8Y3S1UnZ8tuZO7XurW136efpbiUc5zxjAPX/ZJ9uuB6dgc1OVB54ww3A5+7wPbjnge4529KhQHYSeNpGTjkgEEHuTkfj19qcMk7cHk+hCjgdPqex4bvj5q437st72fS+y2V766dbaJ9T3KNNQtFXVmvVXUW1daaNPbRem80YAPJzz8wHAOepGTyCB0zxj16uIGSF6HOFzyT7jHH9OMc0wfKcDJwTgdcev1PoceuRnkvUEEMc/7RB5HtggYPpzj8K5G1d+8tXtaz0sk+id7PZdXrc9alFro9VfVdXYlTlTkhdwx0G5SOwHofbOffpUyhQMnbkAk9s4HJB47np+oqDnrz1JPsfp2/xH5PGSOemMfjxgn1Hqcj6d6x1ve7traKlputFbrtounY7Iu1lad3o237vTV6vz226gpOOoO4EHPrkEKfpjn1P6zI4GQ20MobBOQD0247EgcA9ueT1qMbQoXkkc8EBQM+uM/l+RwKibLfeBA7Y9s4Oc4PrTS+0r3as3zXavbRW699Pv2cOpyNat2t7y7O1k/us7p7dycuQygHPBJUjg+uexz2zg9eabnJKgcAZ6lQW5GRySBk4JI/nUeCpJKkkdMdSBjg4PTrx17A08KGyuCu4MST6nng+n+GaVuZ6LTTR9Nu7v0XZ9FsS6jd1Zq+q7pLdO/d/g/udauFYq7KCC3yn5jjPykZ64z/nmrisHYqxJAwTwAGzz265yc4PH8qyRHJbI+U4Y/wARXBOM/wBfwIFXIgpZAx2qflPTnHTBBJ3fnnt60VbW0W2r5d9Gl+H+eyR14a94PV3kkrJ91fXdWT127dLl+0j3OiqoO1jt6jg9c8c4OMjg9cdSa9J0C2ZYWYkEqcFAOGB6Z69B0HHuT24yyiQzIUYFQQAxXliec89+5GR16HGa9F0bcqeSu3LYDrjLcc4I469Sfbv2+fx1Xmi0rK3R7u3VrV6eWvpofouRYSMZxutWoyW7d3y3fS7bvv6NaHX6DBG1xE0Y+fdgg5BHA5JxwT1z1JxkA9PXdFtHS5QxqWk3BgSfl2j04HqBjHbFee6PbFY436SZCqQo2kZ4yRwO+T68Yya9i8PQF1j3A7i4ZgrZyOy5x8oX05yCK+WxVV3dndPR9umvpfT79tGfsGS4TlUU46ys76vqlZ2trp12s2tXc9k8PzJFDB52xN7JwRkq/qp7dSD256DAx6rpFpHc3ySIzFSVV125Rt38YzwSMYXt7evi9m06CBXRY9pLgY3FY8dQMjk4wemfbpXtfg69ikjh3lU2MqqpwHYj+Meqg8kHOcivGnJqSfZt2Tulbl17PS6318nY++wsVFRVtbRTSWjaSuvv276XfQ9zsreeSzjSKF8W7hSFyOmMsQBxuzxzxivqr4Pa1Y6dPD/alsJEhQgK0e9t/wDAD3x17fUd68K8HzWuoJ5G9A8ibyiYypXk57nPAI/DnNez6FKljcxxxooPmKPMGBuKZ5Gc4yM56Y461casqfLODtyNNSaurytdWt/XfYvF0faUqlGaai9VyStJLbo9rPZK/R20P0j+Hetf2uscFlGvkzSIGt8D5EJHHT73pkdfXFfefhPwZqRvfCk9lD5RjaGTZtwQxIJLgnk9MHPGelfnV+znf2s9/HNdMp2sxHH7uTaQRtAJ3NnOTx3z6V+sOm+MtJ02w0S5gnijuxFEkjOQfJYY2E9MsTu4AHv0r9U4JnRq0pYjE1VHlqU0lZJLWOqTd929L6vS2zPwPjZYjD4lYXCU5zcqU7uV3dzSs21fVb3avfSx6T+0alqnwumhltlkuF05QPMQMHnWM/MRxgqTnHf6Cv55fiNZ3N9qa2haJPKuGYBxumRGY7j1IVhjJHYH0r9qP2iviFc6v4MFsjnM0DedKGCMFVPleM8gZ9T65Gea/DW91a5n8S3bXKmTZI8ePOHmZXcVKnB3MQDvOBjj1r2+LsXh518PSoWcbqU2ldtJRcX0el27q++l9z47hzAV8JkuKdVWTrTk4ayalKSt5rVNq/l2Pw4/4K63FtFp+jeF7eWJybRLqUxoS4EhHMoJPzZXIPQHP94AFeE/8FK/HVx4m+Oh0YFJbW1sI7UwxSbtiqGBQADlyceaf4flIzngrwsPiKyg5xk1GpJygkmvdVox0tL+W/Te1t7/AI3nWJdXM8V7NT5YVXT92OnutLy7/wBas8psbFbh0eDaVZhuLqOvG4jnk9MHj2AIr0Sx0dJ7fyHz8+9wikYBwNjMcE715xH78nmue8G29tdwwNmN2QHcmNqlRgb2IJ3Dj5TxnINeoSRQ2SiSPau9QrSuMsysCM7eMM38LdvQCuanVbdpO32npZJ9Fsr7dkuzR+D1ZcqW7+LZXelktteju3bZ2dj5+8f+EWe0kkhQTNFw/ADlVzuB4ALHgbvofevjXX4HsbuWN0dUDkopGGCn+Fk54BHrzX6V6zDbXFpJG5DSyW8jCMAbX3gfvmJPMgwMrxj15r4L+KGlLaXksykGTeFY5KPsySCMAgkHpwOOM8mvYwck01veSttrF8qvfdardLboYQfJVjyt2lunZq7cdnZaxT+XXTfySO5j3jPmEOSJN4IjLnG0CPI+ZfQMAM5wc4r0XwqVkkRyzt5g8oBVwmR03cnn+6w98juPJ5JZlmjBwCMMM9QGzktzktjBPTJx0r0jwDexjULcN5QZHYL5gAR95XLEZ4KkDsRz0yK1xcH7GTSul5ttXa3379/+B3KF3otG0nbovT876NKx9heDdB+0pGVt3VIQjTyyLtCMQSrbs8OxHOAd2O1e5af4SivYNskMivIiyb3JDqzcMY07RrhSoBP3j0NVfhZpNtewKAWdfs4WUA4QsuMTyJ0kQniPkdSSfT6p0fwpNcLZssduskbljhszoUxlIo9uGZgVEXzfvMMOCvP5/ja3LJ6tNPSV7W89dbqz+5b6W7Y0lFqTUW5RttFxvFpK6tdad3frfS58GfEP4bXIcGO1meVMtNJtLCVYusciYG4EMMHdx79K5/wjoWoJcRW8scj2m1QsbIRNGGPzIV6oWI+Yn0GPb9KfFHgCK401W2zC6aJXuZd+fNkXJbYm0YmJIwN2WweQRk+Fy+DLfS9aa4E5jhaKBlE6c3G9m8wzgAlZ8qNgHfPrSwmY1J01Sl7zirRe7TtGz3fZ206NLewTgopXs72293Xurt2Xa6X3oZ4V+FDazEwjsJd7R7opVT5ZFfmRGc/6uQ4XLbSR2Ho/Vv2eJR9r32k6skcrRbIS0G0bdyC5yNzgnAHlgkkjPevtv4SaNazW9qGJcSIsUskkgaeRv4zJHhdiycCOTJwFYAHpX1lpfgDTryFlvNPSNbmNpImtiDHHCuAm75eTljnoWI53EAjKrmlWg7c703V2mvhenlZbP7yXKnqocikrSfOrq14q22j06b+Z+BHiT4I3yIYH025UwKyoBCVaN15ZCc/MACNmcZ5PvXltz8JZ1C2y2tw91GHkZfLIO4HpKucR7BjaASfvZPQV/RT4o+A9pPblYrRRZSxsVd3XdNMQDyNhKsTwGJO49u5+UvEXwUnj1Bk/stIP3jFLlY8oVHOJztGA4AG48Drgkms4cRVEn76U5bpyb35VfRvfW/XT0MpSg7XS0tsra6Xe7bTvouvTz/FLXfhdqNgZTDbzTqkaSOCpBjBDAgjHUY5yfl4PPGPNLvwxdWow1u5ckgpgmRCv8OMDPUkdjxmv2/1H4GNqdhdvLBNG1ypkWFCBH5gx/q/lJEa4DAc53HAAr5N+IfwF1GxuzcGwkjZCJDMmdijDbVkO0fM+3oRnjnOa9zLs5dX93VcW9OWTk9b20u/wtZee44VoKSTlFNRukrK+y028n08j84U0Vo4wogZmb5S2zaSBwd55wQOn16VMdOCJtZQAo2rhsgZ6hjgZ9sj1969s1zwlcWFzPFIp8wAGQyAkmUkkKyYADcZUgkDk47VxD6bMMo8aOrnIMZO5Quc5GOq9+g7969pVnUXNd201d2raW9W738lsjoS0TVpPWzd7XdteyW2nye+vnk+nAZVYQzJkqGJwCO2SMH13cfTrVGWzJiVm/duBuIByVY9umMDGQM9672eyWNHch5R8xwjE5UnDE8ZUDIxg5HPaseaxuGYRQrmNyFCqP3nmD+AepfOc/pxkJVWk4qSl/wBuvyv0t11V9OvVG0N2kndpN2jZdFp530Wv39eAvI7iR41hjEkjMqrGoxgHhmIAPPTJ5wfrX0h+z58A9Z8f69ZT3djcC0a5KhxGXBUFNmFwPlGSC2TjOSvOK3fg38DtX8d65YedaE2zzrmJwQ7Qoy7iuAQW+YenPrzX9F37Ln7M2neGtO0uYWNvbzJHlY2jSR1mO3zHZsDCcAocDdg8DgH5PiPiSll+GqUITftJRkpzptJxv0ve6e7bSe3ofrPAnCFXN8RSx2JoyeHozUoxqRfJPl5XZwnG0k1d7r7mJ+y1+y9beFLXS/K0hbeRYllkYIFHIBy6Ff3gk5JORnAHGOf138BfDdI4bdYIS8sQiZfOPlRxu3URoAQx4AxnkjIxgVX+H3geCBLVWhUSsYg+XCP5UZIbbhcEDIAUY6V9i+FNCs7MRCOKJFVlBjmAEqkfxn1YYznB79OlfgeZ5xUxNZ/vLxb3bvJp2+JPVvWzbeq1sj+h6eCo4WioUoRjGKsoKKSja1lFLSO21nb7rbvg/wAELBb27SLKbgKj4ZDsTaD8uP4Bg/KCSPY4Fe86PoyWrL5cSxqw/eBCTkj+LGBnOP73fpxVLQr1bhduyBH28yImROEwA5bPLY4IwMHp1Fd/o8asXaR1BVCXU8K5AwAo56ent19ZoezlGMuZSktFayXS91tfS729e3gYlVG5XTS0aTSdnp0V38r7Wvps+1dCVTbs24VQB94Z+8QD1B7ckZx3rqUtBdRRqew9MMMenfnoQP61m6cseWmSOIq2dpK4IxncFBOA2evHORyDW9brKqG4K/u2JUMByrA4K7cjH6jjt1rqfLq2k1vyqzdlbbz9G1ucEoSk0tmmuW7tzN2vZu22nXo1Yy7iyXe6vGqmJWCZHQEDbgdsHqeRnBFY0Nu8Nz+8cncSFOCeD055O3+uR3NdReM4eOUjcGHzFedoHr6DB+g9c1n6jbvCIZ4huViGLAggHsO3I6EevX1rz67pz96KklTcW3bXlvHTre3S7u7tXO3DQlG0HypT0vJ6Rl1s9bNvS2l+xUlkiidQQoU8blHPORnHqPQdalnihEazRnKBBuBADEkDJB5zgnkfmOeOa1qS8gCSxKWUFSxIzgDqfUgA8kHtVyx1NZoUEp2rgrk/MFbBHA6Zz39eoI6c0cZSnXnSqKUVFe5Le/w/Fb5q21mvVevDCzVOM4NTurSs9UtE0l0fz1+8RtSWyuVjU7lk+dcHJweCPYk9hknnB9Lt5HFfwpPbrJ9qhOWXG3chGQ+4k7++eODgc53Vg6np8011Fc2yHy49pIHKsQOMD0PTv0A9a6DSbl2ESyx5RSUY9x0yowTjnPfsex578DOpiZ1cNWlyUW1yScG1yuUbpN6LfR+R1KHsFTrRu5pfvLu0lpHRq+t+979deninj7wnNrmh6lZ/ZF3TROs0TLy7YOxgf7wzncOCCOOlflF4o8Nar4d1nUdFvI2t4VuZsCVC4IyTEQxwMSc84I45Ir909S06LznP3beQlix45bpweRg9Bjvxivif9of4Pz6zHNr2nQn7VbIfMaIhVuVA3ZYAE5UfdJ9+zV4/EeQOkli8M7ypL97FKMeaMkmpK29ttenqfqPAfEtKlVeBrOMVV5XTm5XXO5QSWr36X1PzDht7qz1FIbkyvbRsGhXo8aZJ3ITwVU9TgYGBzXuHhbU4reGNXdFt0B8tn++u8cZOQcrgZXBxnd2rzTXbW4sfM+1KwkWYQurkl4ShIBU4Hyjv7c+pq1YG7jSBVVJiFXzAWww3EcgE84HQ8c5OMcD5nB1YqTvfnuntrb3b73XW2vyb6/suIp+1pU5JxalFPe9k7dVfv5XtfS572ILa8hkuJJXZXBKhOjsPuup/iAHJwBx154rznxNJfb44I1bZG6KtwTghDneGHcngYzg/rXW+HtQZ44oJo1khhjYqA2yUORyhGDlRngE857dKoeIRCJ/NHmyRYw1s2VSNY8+ayYzvIyuFO3b717UlzQ5o3TUU7Xv21b+74b9zzKUVTq8kl5wtaz2283o9n3XQ+Ff2ifgr4e+J/hbWNF1WwW9+2WU7QbYlZLeQL8qzN1RJScuuSRtHIr+SD9p39nrXvgl4x1BYbW5/sF7x3hmIJihZnYoqnHCdNh6AA544r+4TUrCwe2neEbUvlMbJLIGd0bIIYYypPYgHg9AQK/Mn9rX9m7QviR4f1y2mgtZJ/stxdWkKR4j+RQRunxmKUE5ZNrbsjJGOPr+DeJHlGLVGo5OhWko1ItvlSfLdqOm1uun3nmcTZPHPcudNRjHFUtaNRu04+7zRu7S0VuXbXRO3T+TLRvFEqvtuJ3BVsHAzI2CACvqT0P65Fe46D4wcQwxxFg5IikeRvnUHHJGedw5OOw6815f8bfhhqnwp8aahpUsNxFZxXW62kdG2hCzDbzwSMcHgdO1cNpOtSebGWuZXAZSoI3OGzyXXOBnAznkgAjgYP77UwNDMsLDFUOWVOrCMlbVPmUXZWutbvS77+T/n7L+I8fkmY1cBi3U9rh6rpXbsuSMkk7vVpbJK++61PujSvEtzDEiyzROkgUqQgx67433AhjxzgjgY9+w0/wARrFIhunaEbTIskWWEgbAALLyc4+YkA8Dr0r480jxRKksZMhEqlVAeT5jGmQAqd1Gc4ByfoK9JsfE8sSowkBW42lgw3LG4yCE4+UjPyn64HFfI4vI3CTkopvRWs07Lyu110ezu15n7DlPGUMRCHLXSjDWaUnLVcl003rrf/g6n0zDrKSowWZwG5jLjJjRsHcuT049eh79DJca9G0kMfmogiC+VJjAbb3CnpIM++c9q8KsvEFwE837Ti2Vf32OCUX+IZySy55GR2GfTTg8QRthcFow7eTMDj5+MD65/I/jXkyyqakpyW1mkltto+11f5+p9BPiam6cGpt3srOTV17vS99977Hs82qxTxnzXG+crIPJbncpIZ24+UYxxz/jBFq7hx+9QROpVMnMnoVJGN4PYYHr715Za6lKHkzcBkIDSLna646Ko5J/2h7CtKLVoFdo5T8y4liA+6BztOR1JCnqOfXFX9W5Gorls1rzdL2s02lZf4W3pp5cDztS5rSUU2rybTaS5LqyvZX679r3O3uddYyNC0jZiyAzjCsr/AMJPPoNoGcc9BWGNRR0LqUZ1k+YM/wA+ecOg9gPXnOM8YrndW1hC0UkOxg64lBIPIHKAjvluvTGOO9ck98pjdkkXymcq2w4Z3z/q+hIBJ5Izn8hXRSorV3eiSvGzs7ptbLey20s9NNVxVs9UeX9437yVubeK5U21ePptr2Z6jaa/FKEiB3RkO6/MSFKkZ8zgcDPK9snk4rTg8Q5Z9txuZwFHmoBHGFJ+aLB6Ag4OOD9a8a/tJy0mwYCH5nXCbEbrnk5Jxye5+nDRq7Qu7vvChBsdjmI7icnHOeemMZzwetdEqCmrRj7zavttdJq9tvRd3vY458SqNotpNy0u3ulF3Vtb66NX332v72+vSCHf5yyYTGFIJROmM55J5ByOmOeapyazG0KzF8SA5GMAgEjbk9OCOOD35rx0+IIFj+ZnUyLgv5mP3mDyB2QdsZx0Heqf/CRuZfLikEoYKm7kKW6AAZ6nn5gcjHc9Of6i0+aUbpJOzWr+FaPytp1f3nm1+I4NW5tW7Wcr2dkm2tWur6K113PVLnXQVBRUkdiolQMCQMkFx0yo6E+vGOld78PPAviT4m61bWmnWrmykk8m5uNhKlUKgspI5C5+92LVifB74VeIviVrVokds/2BpR9pmZCEEYIyy5H3sHB559PX9yvgn8IvDXw70jT0j05Huo40WaQRqreY6jeofBySRzxx6fNXyPEnEVLK6c6VKcK2KUWowi03T2TvqndS172R8Rn3E9PD0qj9pCUnolGSco7WXKmrWWur0bfRGL+zt+y74a8CW1hqmoWiS6jEwlgnlXKLJJguCxHJGAcYGDjnPT7YVbezhmgspWSMY8tFGABnoqjlVHAHPTGfWuLhuZCph+ZY45GaKJT8ygFcZAx8o9+fQdRXTacsjuiJ+9llyORuAXIwmcEAjoR3I7Dkfzpn/ENbH4idSvPnldRcHLmi72vyRfRu1nbTt1P514o4zknUjTrOUpXfM7KV9Eoylrdpdulr6NM7TSLP7U0OxHMkjqfNZvuv2XHHygEd/bNfQvhHwJPfGCTymmkkkVyp5XyzjJ24wevIJ6HtjjC+H3gITC2kuVMruySk4IKknO0HogXpz1B9q+4fCHhqGyht/MVY3CIrbMDYgGcEgAg4PzED8ehr5HEVqjTnteKi07uytFre1rXs7t229fxXHZ1icbWlGVWai7O/Nd6yTSsrLV3tfayvq7mF4f8AhlBHaIJbRW3IoZUUHYucnkAEEdcdiO/OPUbT4d6LPJaxvZiSGFY0jg2AITg7jIOd3OCeRjH410VuzRjyoBsjGV3Y5dRjknHQ/hz9cHsNGglO1wNqluB645JycbRzwMY9zmvk8w4j+pyUKSm6sZLmk9LJWe61bt3aT300NsNjHh1GrKT9pypOMtddHza6p7arbudb4Y8P2mjWVvbadawWcMWWWG3QDaTjceMHnAyTk8Y68j0zS7WeaQEKAvGWbI3Hv7/THXJwM1yui5Xbk5XgYOPvd+Ox5468DHGSa9M0l1+UEA/3cAADB6Z44JxwTjjvXFluZvNcRBVarcpyu51G5SirpOPM07K2yZnXzGtXmpSctHdJJ21tsm3o/O+luxsW1jsKkj5Md/UddpwcZz79jjHA1rZVQjMSsMgA8/KemcY9RuAHcdqfABIFUYDcHH3QD6deB+RPH4advBjnAJIBPt7D0HQr+XFfqWWZJKpUo1MO+ZyabnL3r8vJpFO6t+HzZzrELqrS6WulZOKb0tut/S+vS5BD5hQ4PzAcZzx7jHPTv69c1pxQEbgTgKcgjj6jjqegwMZ/HiK3XywuQSMHoeQB0YcdsjP8x0qyrMSTggYO0gZ5wMZ9c5GT6/Tj9dy3L40KEHPn9pZN66KyUbaXt6aJ/cT7X2jtfRWSfnpdpP8AO+2+7GSSuqnaAMYxkcZ6Egkjr2HfHJ5NZMu6RsjaxIA5JGcn+LvgdvX0A5rYm3bclT09uQMnn0HTPH5c4zlGTuVR3wAMnA9e3r279a8bO6MnUi3KUoRTahFroo6yT89tr/Ox20lFxjG2ja1/ma5VZave6vrqrW10bbZHgBJwBg+w59MZJ7npjHUVpROfmYng5Cj3JGDjrgZJ6AAY681UIc4AwAeT0GBnGAOCPfrxz35nYPgBCGbG4gH7oHXjIA4IzycZ9jXhUs8+qJ0/Y1JQptK6Vuz2s7r5q21tj0YYROKerur3S6e7810012eujJhEXkSQp8oIwGGAxBySD6Dt6Z/Ea8blAqqcEgjk4I44OMH8+R0JHSsqEy7VyQQMDBAz8oP4A8+uM9ga1rSJ2dJMjj1A9sg88EdQc+vNejgsyWYV4KnBxU7JtpaaxulbW7V7J9r26mmHpuNSOml1fTa1t72u9eu689t2yhYRqzjLHOcnhgOgbIGWHOB+Y61edlHHAxgA89frx78ED1JzxSxMAvIXOMhcDHQdBgds9Me9VGlRmYAggN8xHUH/AGse/t6ZFfqGFpU8NhIpSV1FfFo7tLvrp6O/Toe3UfLyOMeyv1Sst0/8rvXXcz9TlYoybQfkbJxjBHTDc4JyewPX0zX40/8ABQGe4sNU8N35MkUFpOlxv5KSlJFzDMMHBJI8s8A/N2zX7Da7JJbWrz7sKEJwcc+hx0yAecHj15r8Gf8AgqH4k8VxR6ZN4ct5dSjs/Ik1WC1UyMmnbyZZnVVIBhAzuJBIbOOtebiMLPMJ06inKPs6kGopOKvyvR3tpK+68tOp+8+DGCli8/wcf3cadVeznKTslKTpq2uivfTVWtazd0euW/jLw34w+EFhLq6XNlqkWmpDHGDskcquNivwZITjIAA45JGRX4pfH/XPBWn+MNYt/GEIudNmZYNLnJBSEljidCASJMkAtkZ9PT7K+Fnj3xB8SvAOn6PpVmskOnWpDpM4S4t3UBXMkf3jFIRlCSMYPAya+IP2kPhJqGv6R4t1e+ktoZ9Html02IHa809sHYornIOc5Ixl8DkFa6MkhDCNU5vkak5Nw0nzSkmm7XdmrK7bsn5n+gXhpwpQyXNMVSxmMnRniKrjGjGratRU5xcZU9G4xbSfZrWx+bH7Wmn2w8M3174Gd/7MMG29cP5sZglU4LqBxkAjccDIHNfjr4baePXrVRvhmF2chuNshYli5OMKTjd64z2BH6S3fjySLwR4t0jVL2V4oTNbTQRLl7tFLrHEJCzYWMg4QA5yfm4r82re6V/EfnyDyo3u2YqCD5R8w7Qw46cFsbevXivuKdbmjNWT09XJKyu11e9/TSysz+uMswrwtPCwnOc0pyhGb96pKm3C0pX1vd79dOx9M+LrC4n8OG5uIXkjMUcyu2RG0aqdwR+Sh5G3IOcH618kag4M5lAGQ5HJycgjknnPBG7j8Oa+wfEeoSXHgu1t4Z/NeaBIiR1kKqQ3I6hQTggcZ6dx8hXOnyvf/ZRhlabajAkqRux2GcjnIBzng+terltGMISaalzqM2t2o2jvq7a2VvR+nbnVKpSw8YcqbnNctk7x2aUkr2ir6Po9N9CTT7maSXggHngcBj6BuwPGTj0HOcV6t4aluLq2ETlmUhiIwSCGUYZc84OD8ox82ScrWdb+E4rLSxN8jksJGYP8zKAMEcHB5ORnAwQSahsNXGlyGJWAB+V2ySwwTtAUfxDnPcZHHppGV6trKz3Xk+VO+yTu+67NHPl1CdN3lNzlJe9FO8VpF7O3yT3u9e+f4ttlZphHFhj94jkEjPDLgYZdx7nBPFeEXlvtldlU4VjgcEAdTnkng4HXOSPUV7f4gv1nabypGJdN0hA+VQc7mxgYPTBHT0ryi5QO77skNkjB446E5HQ9cce2c120YqnNyg9NNFpZ3Xn2aSd3/l8nxdg6eIlBRlzTS5nor83r3vZW8lq7HOBWGcOVyTkKOOegUn17joMY6dL0ar5eGAJwec8E5xz2Pbv3HPBFPMQYkYBx6Agg/r19Tz+VLsZVORgAYyACcHA69x+OR+delGtJKzfZ9Xfbbr09eqetz8/pYR0G5WclyuNmrq6teyWl77Wd/K1rLEi+dDyp+dGJHIIBBAU4HPHfp0617b8W/iLL8SdQ8My3FwZYvC/hjTPDlluQoVtrFHAVjk72BfliBxxjFeJx7iyqoA6BWxgj9emeeeDzjBzVsoYGCEqxKhgVJ2sCD0PUYx0PftUzfOlfZTi0trtXjs90rt22vqz08vxM6NCrSjSi6FedNVZyi04ulJSgtez1ejTtbXU0BdMI5IVkwkikMozhufXqD346Y+lZTB89eBuGOB+KnkjPf/Jq/FsYqdmD3545OCB2HqPf6Yps0Y80OuCoOBnOfpx19umBnrwKyaXNKVrOy2te6t1Xru9PK+h6WKhLE06MqklUUGoxteyjeDVlbRJ+XfRISzEgJBJUHgKDwcnPXByO+Djnoa2IYzgEjAzx7nHA7enPp3rNQ4ZOhJJz0xn8uvT/AANdhpljFc28kkroskahlUkAseTgDGDgDoMc8e9TOoopt3a2stXutuulle/nse7keEdVqjDV01dub0a0bT0bVleyXnpoUo7eGSKRCuZOQCeRnttGBgg+ucZxjIwOZvLeWJyhB3DkHsR656dOB07+5rtI4fnYMpBGexz7c56f5JxkVjahGxYqUbeM4Zuo9OOBgDGCecE5Hrh7T3kku71d1rtezWnbVbruelmuXQq4RVIRaqwly80U4uy5U4yXXbRv/gnKJA0m5lA3KCxBwOOB0PB64AIP5UJuQrxgZIx6Dvyfx9e/1rRCNGOgGc5Xofr+Oe/bjHSkeBnZWzzjOM4OQDnG7+uaI1Gm4uz66dFdbXva29v6fybwDiozgpe0S95WSXLeLWnnu1v3V7I1dF0p9XnMEW0yBSwVm2DaACfmwcnJwB147dq+oadLZSyRSIY3jYjB5BHIPbkkd+/Jxmlsb+5064WaEDzFyORkEkYJPI56kHP4VekuH1JpHuHLTSZYbvukgc88fgD65rF+1VS/uun21TSSWurtrfTba+h7lKnhK2EVPllHGJtOcrJfZaT93dWSTv5LocjhmcjHJPHPXP8A+v60MrIccnn6c4/HnPAPpz7VpPEkcjDnqck+3Yc464PQ8/WoJRuAA+92xx0HOMZ/+uOnpWsJe9vaKSs7b9Lbeit01tqfPTwc6dOak+arFvazWj00t+P3dLsjuAFIPIxjHv655OR2z09O1TqzfKMZyCQM5A9+DxkY9Md6rLAdpZhjtjoQeeDz+PT1p8atnJygHHUncMjjt+OWHXnHWuhKGnLpf7vv6denyVmTGddciqJ9OVLfRp+9666Nr7ywAqNkBWyQeTx9SANuOoPX6A1bUR4ygGDglSSRn06dOO/I7VRAy23B4Ix1JPr26gH3rWtoFYE7lzgEJj73rjPGR1xjnk8cUSsrX0fW/orejT1XZX7XXqYOEqs3FQjZXe2qbceZq66vTrotu0ts67tjoChOTjjaeBuH09sAcDpwZru2VmwrFkBXJHVBzkAnvnoc/wBKqlXRzhSCOMEYH0xwcE/TnjvThIyrknk9RgkD0z/LByOvrS5rW2avovX71ZteVtd2exT9nKnKhXhblb6JJJWS0aV76/fpvpRvLAoA6kMr8jGMke6g5z1z/PoaoJbCRSW+96dBj1BORx0Awe2ccituV1kChTkj7yntxnj2PP8AXrTYvKEgDKB6DtkdQeemffPvQ5xequrtfja/y3+56rQ8ytl1CeIi4WVOVlK7vGTaT0eln0tbW5z01uUPyjOCMdsYyTkY9xmpIASMHAI5wTjLcfqM/l6gGuimtd8bPGQ+WAIA5Uc8gAY7fh+tZEkYjl2ODk4AJyCMeoxxjGM5Ht2xDadtdVJLW+1/+G3107nBWy54ao5q/s52WqvG7skla9rL8NXtYzJCxZsKCPmyD2x07DAHr07HkU+2bLDc+wjLbQdqSAD7uSeuRwMc9M8c3ZbFwpYN8rAMNvIPsf8AA56gms7YEOSMHJ4bsPXpjpz1Poe9dMLSXS8fJ3eiXo3daf8AAdvDxOGrUp3qKXs56pXtFapq9npe/W+vkOvRG/7yIbScLICBywzlu+c9eDx2HGKwpbUOrsADgluSM7fUYIyfbHJxyD16NEjlVgWUAjnPBGO6nnjrkc8Y9KzpothYL8wJC4I6r3J68HHHf2NdtCooXtK7SS1vqtPvtovTpY8HH5ZGrB86XJLWNtbSez2ta3S35nLyW0UkZRlJJyY8AZA6EE8c9sZ9R9eC17SR/rogcHv/ABEgdx7dfXkcnNeqtEoIIU5APyjkBe5z6gDjp1471lXljDciRfu8ZRfu8j7xGc9P7vfjoc17OBxiw9aM03Z/Frp0s7dbq9/LXTp+DeIfAuHz3K8VRq0YOryt05uKupL3oyTina+i8unS/g8qujFSSNpO7I4x/eyTj8s59RxmLJLcEFQCTgHJ9Rg+p/L3PTqNb0wwNI6fN8wByMnPvwcg9yMYwK5cbssDwTnA5zn29h0PXjGTX3uHrqvSjOGz030TsuiV/wANNr2P85uLOHamQ5nWwlaM1KE2oSab5ldbOS6O1mlfe3ZzoemPXbgDsTx78Hgj9M4qwuMAkdPvAkZHQemT+Occ8ZqGJdy89cg54HHcjjHB6dsZ61ZKg/MCDjAyRy2Mg5HTHocfTsKqcly3u7pr01sn8mvn5Hi4ai42l10a21ta173Tt5W/MlRtoAUk7skgnPB+vfA4H14qU5yCrYOOgXnIHJGO3059qgX73TAIC5A/LHYcjB4qcYyCWCpjqTg5HuR19uT+NcrbfR2vo3dNrTfTppZdfRnt0KalFztyvS99npG977X2WltPkPOQVIJyMk9Op64GOMYyDz3IqRCGwWBOcHAP3vQjJ6H16Z/OoWwGABDEj5T3AweD6eh/TvUkAGeSABxnPpjHbqMnI6fWspx632tvq3bp89/Xp1OyUrSjHbTzd7W0662WnztbrMw54Hyj7wPJzjjkHgYPr8v1xQrfIVxkAZHBDAjgYz/dJ79eo7UsgGSFO5doIwcnJHOc4PA6Hseh6UihcE555wrcH6dB06Z4yPfNZtXt5O/5f5f530s+Z3S1Wvl5dL9X1V7W1EAbg9fmAY4yOc4UnuB+Gcd+KQgjPBCDJPOAD1PbA46HOT1xUg2AAfNu44z8pz/EMckj69cY6U5gvqdpA3BjnoT05HPt6Hvipimvv+XT53/y9GDael7J3urdrPe+nd3T87NDAqgqWDLtHJycE44785HOM+tWBjOSVGRuGOvHTpyPpnGTnBqLIGQATuUBSei+/fB9O/TnrlEBU88jG088HPpnOBjGcYGe1Vdd196HBNddLtbXsk+zvtZ+q3LEZJX5XwuCCD1OPQnnk4HsPfBq7bdVBBKE9OoOOmOw9B71XhVMD6/dJyD67e2eAO3tgEZ07eIlo2CkHjaMZHUcNxxg9Dn6VzVppJ6atLdbJ2+99tbem59BleFdRwbjq5KUF71lfl21d5abab67WOn0+AOsYTK9lGOdx/hyTnB9OBzwDjJ9B0SzKzZkUq2ASe5GOT165/nknIrj7KGXEeV2OCO2Nv078889vwNei6KJNyFQGUH5znnPtjJPI56YAHIFfMY2bcZWa1vfXVWttZ7P0+ff9dyTCWdKXs9bQs7a/Z0/G7t5Ws2en6JZRlERWMgZM/LzzwenXjPGPzzxXsfh3S/KMEqqGUMGcMMEN1O4d2Ppxj36V5h4WaVZ13bVBwyc5JB4yDgEt7dunTivcNK2bUMcmGOGZF5O7168579MAe/Py1eUk2uayas3o97Psku11a728/1bK6a5YJJrZaa2+G6/PdvU6WTTopWS6G8pIAjJggD2Ht378EfWu18M25gmhhwCrMACrDevOeeB8oHOeeM8HmsqxMV1bxpuwwCptByCBkHAGM54yeO3HGa9C0azt1kTEZidI1JDDGSvQ8jhjn3z2GDXB11s16Nvpp5r8n6H1FGC91q90ltulZb2tovN/PTX3zwYkFq8czEGQL5YZA2XDdWIzyxI49OeBXrunus16sqyF7dQDIvZSoJY7s+p5X0r500K6uo7lVSYhCuQOdowOwyMseDxX0F4K02e5WOSYkRyyMWLN8rplSeOMHHQZI/2uOdacVUcYwXupx020Vr3aerXZvTS3leI5YwdSUlzONrO11on/wAN0sfbHwK8SWWmfZkLGNjKUDE525IALDjOO5Jx3xxX2lr3i2Bo4fs92CI7eFmfJCiU87u4HbsQenNfmP4buUsdQ+z2hKSQSAhg2QUJHKnoCCOSefzr6Lh166ns/tk12UhEaxhCeHVOG47BT1GSTkdK9rLsfWw1OeHjGLUZKd03aPK4txdnu+nqfA5xk1PE4mOL15rX5JK7blypJLs1o9NvJq3094v8UXuu+EJbaS5NwqxBUBYkqAGyqH+FcEZGT6d6/OHxrb2+mXurXDosTWyS3EblirFCjk72AOFBA2nryeea+jm8aRjRLmziuWeO4RwpByySAHGO6j8ckDPHOfjL4i+I5p7HWgreb5kMkLsjYkcEP904OcH73I6ZHSvYqZhLF+yum5uk4t6vblurJp3u+nTTTp8XmWWvC4PGKMeVe0b5YxSbkkpK0UtXdrZJa63P5sP2sHa9+N3jDXHRWmsnePJOWSJS29JM5HmKCOR1yMEHkFYH7TF9dTfEXxrOHVmkle2gtkwXcZbc7/3pGP3iSBwAOMUV9RSk6OGwlKMab5KEE3LR8zak0t3ZX7+iVlf+VakXLF4+XJJqWNrNOybsnGOuq1bTbt1dvSfwJ4qfTbpLe5kULGRHGACXjDEeh5TAwSR+Hp9URXI1W0VwsUrHBQKwCsgAOEbpswT1wQeee/xXrGjTaVrQmt0aOIjI4IBbIzHJyflbGV5Gefx+tfhzcR3eiW8ckZMj224JIp2gjkFznK7f4ev3uvSvFxE3KUZRVnZuUbNNPRWsn5/rofh04pRUrJ3V21olayfVN76pO3TQx9XVrV5WU+QmPJUyAMVD5yE553cZOccZGOa+UfibawXK3TeW5Ygv8n+rbaTtdD2KenOSc5Ir7W8Q6PFdCRvLaTy12Y+6icHjnO4rkgZwfpmvmH4geH5DbvAInXywwAII2q2Ttf8A3seueD6104TE2nFtu/uqS0X8rve+qfZ2fbzyjGMnG1tE7NLSN2rve/q79tbI+IrtcSzO3392wHOZMrwc8AEL2xjk8c1f0S6e21O0dWBRp0cEDkMeoC5GVOOTnsOMCt/UdFkj1CeCK3OEfezlMJ1JKk5+UYAwvIJ+oqg+krDI8sZYLlVIk+UZbOCjdFHBHQn64FfQyr06kXFNPmiu+l0rd99+/pZX64aJO22uu7tb5u1152P08/Z18RW1xHBG84OA1mIGwPNJ2+UsDZz85DeaMcFVwfT9SvA2lW13pUDlY9pLFkGGQou0/KRz5kP/ACz7Hc3TBr8KPgP4j+xXGnwM7xusjJHMctJbISuHU8BT8pUk5yDwRjFfsj8KPH1q0FpFdSwxCO1eJ1jIWKZ2VBGip83myTHJYAqEK5ycjH5tnuHqRqTlBPZydl6WWltdHpr00OlTvTje2jivd1cfhvzafenZb37nsfij7MtncxmCFFVJJLd7ZA3lRqBtSV+A0452sdu056g8fJviCW3a6Z0mRFVEeVZ1DktuwXViwAEZzmPtkHI5r6a8RazHewjyDAySSkS+UokjCKMbTECMsSeRuAGec18X/EYSWZvVt5BHGpaUGI5YK5JHzZGV4OUP3eOT0rz8qppuUZNwlJRevS9u6au9V/w5yVZc02m9tVba22tnv1Vr9X5n2T8ILmxiuVK39tHDFbRhHkAFwe6ts3ElSSRjnAAIbivvHw34ssbKwg81ImkV0jkQEXDvjOycLhRHsJJV1YgbjlTzX4M+Bviy+k6nFbTXk8MhCWrShi5SDnzZHzjfEuBwcEZ5yDX3R4N+N1jd2kaC/jjjSBlhkD+WJyB8rH72HPOV5IB6nNa4zLqk6i1ctFeySd/dte26dtejd9rGFVSTjeagnHazTWqa1aXVvXpfufqFp2o2erXEcLPaqtw7NsEwSWGTAKQYKkSK/OWBXGOho1TwFa6zZJcyWqJHfs8Msfmq0RdOFfAX5YweQMkZJ54xX502HxwawkM5ljuF3uwCylIxGSuFbJyZPl+Yj255r6t+Ff7RGl6pGtldajFGXjZJEcjyVYgbVRSRkjGGIPHFeLjMpxFJe0jaVlsk7r7u/wCf3vlalBWc+ZXUmruWl0rPovXbVeppX/hKztJSlzaRW7QTfupEA2qifdZuMCRsYXIPfvzXmPxJ8GeHtZ0e5AtI7SaW3H2kTASIZ1zmZWAG0txsAzjqT6/QOs+JdE1e0nmgkjlW4QxiQEeX5gP7sgdWxzgjG08EnNfPvinVLlVMMywGJnMuGkzN9nAOXB2kSY6bTt3dOMc65c5x5VO8JxbVm7Xs07K/n1+V+gldyTlHmcbOLur9HZq92tHpfbRrVNfkN8ZvAJt9Vk+zQQiFHYrOgyWCn5d7YGXbtwCMfl8j6toC2t3Iro+FQMqsvEiPkk4zhNpXA6nk9OM/qV8a7SxlTfCpJuIi7Syfx7j8peIcqTyEG715HWvgTX7QyXzQJGDIHEapGwc7FyCzAj94n98/LgEdTX2mCqycUueSvHZ3vpyq2+z6N/nc93AwlOkm5XeiVtujWmln723Zep4j/Z4bdGsYAPyoFUN5hP3c9M7ckkDGc4Pv6b8Nvg7d+KtVtTLE4hEqmU7MgkHghR90KCcEcYIBFeheBvhrc+INUtFS1keJj+6ZVLxCVTh9q8eUBkAn5xnoMEk/q/8AAT9ny10w2t49qkX3GmyNxct/rGA2njIAVehyTng54M3zulgaM+WaVblUbrdNpNv5vd3+Z+p8IcFYnOK1LE16DjQpTi5JprnjdapyTT0tddDU/Zl/Z2h0FLFm09ImZRNBMyhyWO3y/OkKj5QN2fl6cEkCv118BeCodKhhCWsYTy41MigfPkclFHVmxkEYwR0weOC8CeEEsreCIWUUCr5e1xIAUAzlcbeBgDAyerZPWvpbQoFaBIrceQRFHEWVPl3DcFYAnkr3IxnPrmvwvP8ANa+MrSk5OXM2tXdq9rNpN2Wz1e3lof0/leW4XK8LCjRhGHLFctuVaaaqy0d1aWnZXeiPUfDJtLKApLFAHUr8hwzIx+5uBxyCDgDqT2Ir2bSJfMgjncEySDKDbk7nxgjBA4A5+o6ZrxHRtIlbZGiPLIrIZrkEhHRsg4XsePl+Y4OQM5wPcfDGmy20aJIrGEJsjLKfvNkq2M8t1+btj3r46S1cnrzW0vrZWvp/wNFZ30KrpNNppNPmave+2l1b5L7z1nw3Jgx+WPlYYcjllxglDnsOMn+uTXq9iFcp5bc7NzL3dj7jJwfTr9K8n0zTnt4YjFK4cHLnkA7scAZIOff0zjGa9Y0G0IMTSTOrELlDztx0znkk59gMY5zXXgas4OyjdXi05SXVpJ3V/Te/poePjKUWnLmWid0trO29tU+q89NWdpYWiOilgEcYYpwpBOCR9GHB4GcceldAsKOBAG8tZFwQoBVWHOc579vf0FQwWzRKJJ2iaIoCjqQSR6NxwfxwP5wCYR3Y8uRgpGdmck5PPGMBT7H0OK932ns0oyilKTSdnbR21XfTor366I8KnB1HdX0vJXblqrWWl+XuvTXuTJZtDK0UwRwQwG7+NSeCo9Bz3xyeMdUeCIZhYfIc7QRuAbqAOwA+uCOxwakF4rzeUUZZFxhmBwfx79z+pFa8awSxKGCqe7MMEkccHBODnv8AXgVtQpwrKoouN72TbWlrPl1Vt387JGkuaDTkny6WUUrXTTv+XZ3dvJcDq1jE8bqQu3YU8s85PQEHoAFzx09K5ezsRbiWEEbGLYDdFLH1449Cen05Pp11axnzU2BlYHcejdsEjngHr2/M1zEtkNsqc7lBKt6jnpwc/nzxx6+XisPKFVVeSL5U4qzeqstNreTvfXyPUwtdqDpxnKKkoppq66NO72t117voVLezcIFV9yjcpCkEKDjJPoBwcZ+uc5rW07TUMg2NhCSznb80eSOoz0BHPHp0IrK0+ZUnaNmK7MnPQMDjk/3ieO4IIPauoiuLZGViWTAUM6jgk+g6HkA57dSMGvoMnjh66o1JzhBxklKnKVm7ct0lvrprsvJ3sYqVdRnCKbVr88U7bR3Vt7+ut3bYXUNO2KMtvGB83Xef4TjjnqME1yeradZX1nJazQ5WSHyXRhncOQN0fqckk5GfpxXavcLKFVplEYO9ZWPTHIJJ6k8jB46c1k3ccclyk3ynADMnGA398NwCCD2HPHHSvWzCkq/NCjSjKLioSjKSenupuzeyXRq9u6IyytPDOnUlKcakJKpFx01jZq6Xdvp5LV7fmx8bvglbWb3mo6ZayC0mWSdUiBxvyCWA5KlSQQPXPPSvjKWyk0iYxuzFTiITT58yIMSB5nQhgfuqMkY9a/dHxJ4csdWsWhuo1YMmwMQN7A5B29gFH5jivzc+O3wlttKv5dU09N1uhR8IDglSd3AxyMjBJHJ61+W5xlc8uqurTjy05NPT7PM1dadNt7rsfv8AwbxdDMI08Fi6klUSUYTbu5JJKzvfV2tfR6dU2jwXwyrndDcXC53YW4AO0Aj5Dx6g8nvgV1t9pUiWUyxhrgh3kJTkEyD5n/2cgYJyfX1rgtKlTTr50mkdmYjYh6Ih44OTndjgdtvua9vsmtrrTkUS28UxiETDcH3A8ktwPnGBt9M4PFLA1YYhJOy5VG62vqr26Nv8nu9T7LGVZYaopauDfxJbbdUr7J3ts9tN/mPVFitb+GR4FUWspZFIyIyM7vkyBnkfNkjqcdK4vxXo1j4g0q7baEDIBG4QCaRXDeYyHcflyFyuDu5GRivcvFXhf93dskbGcM0yELuLxk/MHOQCPQfxc9MYrxfU4EFs0Em+JYHjUDJDq4LBVA5y2Rwo4JFXU/dyc46Weivro73eq8tOvqd1KdOrTjUpybcUr26r3b+7fvre1t73ufg5+3l+zfZ+J9L1C8sdMD6rZREvPAn7xVjywdcAZIU88kk88V/PHr2iXXhfWbrTblZYnt5CEaVSDJDkhXZei8qeMnvg81/ab8d/DI1fTZwylg8LRSyNglnIKsxG07mfjcuRjse9fzE/tkfCO78NeIb/AFq3tDj7XiUpyYoiXJx8vCkck9sdMdf3Hw14knUgssxMk6clakpNNp3gkrtqyfZK26R+CeLHDidKWc4OhNV6fNOq6MbuSunLnSjrorrqtux8Z6dqwTy0RmZsMwkc/vGZSNilscLyQRg/zrtbPW5+GY7G3KCxOOBnoMkDnhj16HArxyO5dJgvTysjIHzA5wcjGMsep+nHat+3u2Qh2Mu0lSAzEkqeTvXrtHoO3fJFfr+IwMKu8Ip99Lt3VlpZdtPXzPwrA8U1sPThBJwTlafJJx2aWqeraejVlrY91svEO7908wJ2klAQUY4yCRkAg4PGecA+9btr4hkVVQPGwc7R8gzEf4XTDcMe3B689BXhMGpIquyyAsRleMcZzweTkf8A1sVpWmrsjK3mkZJIKnO8jsfTrx6Hk9q8WvlEZtpRXlt5PWyul5b73PqqHGU5yhH2ztK13KekV7um+9vz37++WusOskbSSgPtbOBgMTjBY5Jb39OcelbdvrTNuG4bkU7gPmBHd+TnIzxyea8Fi1txsJkZ3K/vGL/LzxtI7nqC3bHQ1s2utyxeYQVC4COd2doPQjGSOnb8MZryq2UO3wq9t2r3s4699L9/npr71Lip3UXVbTSv73R8ju2nZvdfqj11rsuHfeWAIbaOCy5OJMhuh6HGOlZdzqCKp2BtxckRxthVPUY+uCTx6c1xi61EYySxD7TuyWIBOM8cA5HT6E+9ZkmpxFjtdzIF2Ek7QeDgL14Y557YGAc1ywwNSL5XB6tdHtoui6X89PPRutxFBp3qKN3Gycn7zajbXstjs4tZR5HJmT92hkaIN0x05ONzE53L19T0NZ76+8jKCyeWQ2OM8jAyRnPvt78+nHnc+qdCQGOGBKsckg/Lu4BOBnjrzx3qi2pt5jKkoKsUDLnHzjrheSCOp56kcendSyuHK5NNXS3s0vh0XW7s7PW/3ni1s9m3yqbte8nfSK93r/Wr9T0ltRZzHHvErHDQqgLPIDnAGCSCehXv619LfBP4O6p8RtVs3nhMWmCYSSuFOTsYZz0wMkAdeSeRivBPhJ4O1DxfrtmvkySWpmBcgEDYpGQnXaTyBycgE9uf3a/Z7+Hdj4e0rTGg08LMpEVyGAKpkKQqrgbieSWGMkZAr5XiPGrLcNUVP3qvK+Wz1jayWl+jsul3dXRw1c+VOnOcKilJK+rk2rcrbTvutNE7v0d3778DfhTpvgnSrC3t7BInVVHmqFLycLlHbaCyN128beAc5r6higjCNEzYEXzIgHQj7xYg5CAH5evfjpnmtMgjsreMyyDCRq0S8D5sDIYfwqODgbsDBrsNJtvt8+ImbEx5VV4OMfIM5wDnB4xtGeOlfzHn2NqYitWleUqlRuUndWUtHa127eV732Wh+SZ/xPzOpatZtyas1dKWjbb0teySs3daI0LCzad1SNAhmIO9cmQjoEyQSoY5xgfMN2MfLX0N8N/BRnmje4hCAyHLFckDj5Acnls9MDpmsvwX4QWWZHMO2WPgh1ygBHHPbjODjg5I55r6w8J6JbWMcRjhUStt3H+EcD5xgYzk9D1IGD6/mWOounU9pW5k7KS6pX5fmtNFbbvofimb5u61SSU03zayeqslHmt5WfbXqej+CvD9vZQxJ5a7UChiRggc5BHI7dAeAexOa9tsI9igRxjLDAVucA4GcjrjqDjoD9a43RLIJDEmCQ+18jqCSTnGPbpnjgjivYfD2mpK6Py21QoBwOBjBPHJ9eOMA+1eJWxntLwj0Vkl72uln2T9fw0PEw9a7te92m77Nadevon2Vupf0HSGkkEs2WQfKo6AYI4I7gE464x6c16hZadGqqTEOFAHAwcfdPTAzg549McmjStMQGNERSFUdAO5/Dk9TngH1Ndpb6aAFwuQAOcc8Hp0HTPT6V8nicuni5VXCnKpNtKUrXs3bd9dvm++qPSjUafM2ndLZ30Vkuy17K7M+1thGYyEG0kEEccrjpwP89Qa7DT1ZmQRjG3IJ9wRnrzn09h2quluDtATleB+HHXHpj3GfxrZsYnjZfbd+JOOp/8Ar49OMGtMuyvE0cVSpxg6avBSkotOSfLq30Vra9X0ZtGre2uq3ulZ7bpXdt1p5HQ2SuCvOT6+wwMA9Cc/17DjpbdVUqST0xjGM5zjHr154wPY8VhW2AM+o+b9Dzk89eo7+mONy3lQBdxIHPLcntxz7+o7nOa/feHHChCnCcm17qbk1o/dfupJ+iV/L1ak207XfW27Wltde3a3buacZzyRyMqMDsBhQc4znPOPr6ZsZGD8qjjIbAHI789AewP4elNhMZwQMqVBGQFJPUHJ784AJ59R1LpMjcOAu7jHfqcn1Hp69fr+r4eMZ0YtyUnKNlZX2atZOz0uknqreprDVqytqm122+7p8tSpJv3EcEfxdCcYHoc88Z4wOmMCnRRqQQByAQB1z0P14z65GevapI03kcbs+pzgnsc5zjHp7cnIN6OMJxtAA5PX24GOeecA4PtnFeU8qqSdWo2pKSu3Jc11po+qve7d/M9CnUta+nbor6ettmlo72+SrLAFGW2nGSW6emQPp06fhUUSq5IQ4K5JOcEnAwv059eQM1clV+injPHcd+v1x+mOKgjQg7mwDkhjkBSCOOeQCMdjx7cGvz3PKDweKhGNJcqVnvpLRNapptX6rT1PewdaMko77Ly1su2q6WsraaE0EUjDOQeOTwMcgNk88jPBA5PX32bY+SvzKCAMhupKjHJOeP4iOBxz06UbPpncpUAgAnnrzk45PP0/pZuZliTdxgDLH16/dBx06dvXPavUyeEcLQWLjF3UXUe1oxTi7pWveyvs7tLqj1KdOHOnyrdbaapry8r2Xz87814FiO2QDehEbH7ok4Chl4z7g5J/CqthF9nhDTN8zM8szNnL5Ocg5PyjPA7c5z28s8Uays9/Zafb3Zg2t5oER5aXKnDkHO3+9jg9scE9V/ac8mllEkZpRCEfK/MewIwTwck+oHNdmW8QLHYuvGLnOnhqajSqSldTm1zTapr+VJRi+2j2Pco4CVeNOys5ytroowbSvd2V+t0umm5z/jHxGM3NopYwRx+X+7+87kNkL3/h+96kcY6fkr8dfjN4I+H2neO28X6bBr9r4htLrTLe3kgW8uLUSK6IU3YaKVCQQV2twuG4r9NfEZhgt7q+vSqAwu48xsRszKQXJ4AwBwOoGRz1H4Pftr6Bdq93qXhPTY/Ethc3b3k8UM4lLzqWPkqFVtioT8wx1xxnGfpsLmFWCpupzKUpOydlzOSi42Sas+XRX1Suf1P4McN4DH46nhaynCjajzVFL2alUpzhUbVW6s+aKe6utNVdHwD8Ef2htE8EfGDX7WS7vbXw3eyTQWywREyW3mufLs57YsPNVwQDhhsxwT0r6L+P9/4f8deCtXh0W6ksL3UbMTxJAQqJFMG3SkHAEiqSWXJODjOa/D/xJqnjWw+P1nfa7pd3o2++MllZxI0VvC4ZQGuAQBIgXILsoycdDxX6F+PviH4QsfAmmzXF5c2009i1u7AE5uXRVYxjcpaBTjbjgAk9K9B02q9CdOX8aEG4pqTSTjeLel5aa6a2dr3P9G6vB+G+t5FmWE9pLESw+GvUoP2kX7JQSlUS0lbaTd9W77Jn4wfGrTLTwHP4osV1N5YvtskVrLHh1vYSW3+Yu7IdST8gJIz97HNfENtpyf2ijhgBM6TohG6LbKx5Y7gQR3GT+dfZnxo0t/Eza1qNg4uodL1MmB3mwLiKVi07oTnzJF2pn7vOAMc5+Obx2t9UaIOQY3BK54443Ac5IPGO3fivq6amoxSum4K7a1tyq720bvq/O17o/bcLRcIYP23NKUHTjKUny8rfK3qtdEmt/K59KaPoj3tlBDIGuAUAjhfgGMr1UjohAyOCcD06+Xa/4fi0XXJpJYBGxTMBkH7opwWIXAyyDkMcnJ6DFd/4Q1+ZrKxjikSWZYVR5HPzx45KRL3DcYJI6dO9cN8UdRlkuYrrexuFYJIzH5dpzwg5ABGc9eDXv4SUoJyUYrlhBR31fu7WbW1nrdW3SR6OdxU5Qno1BR32t7t2vWO6tfz0DUtatbXQZId6nchUADglvuhG6bgQcLtx1PWvDJtQdpndHw28sCTlwc8lm/iP1HfoCDWxqF+LmyVCXCgDZEnYnqxXOADgEdQCCB1riQF87amSM4PJODnnPX8c9yfau+Lg3GbVpcqT92zbund9Ltu+lvQ+OzLHSo1KFKgnFyu5STcW9I6Lldt7a6P72lqyXMjtmR22vkvg8H8Oh+n05rGnlAD5QNkY55CngZ7YI6eoPT23FsjIisp34YEKOCAe57AHkgn/APVWu9Pk8kt5WFUnJXkj1UnnHHI6nGe2AeinODa5m90rvS21nZf0t3c8PMKGKrxdRXenMm3duVo+7d93fvfsmc1gkls7QTkc49cjPUj6c8/SrCIdoZuVznJz2Hb+fY+3qySMqfm+XnGAo9/cd+e3H4AzJJhdqgHAwc8YI7jPIOc/h+nanGyd1a1rvTWyt5dPPyZ8zSio1HGpdNK+uvvafZ7etn56DVUbiFGADn3B6cfr9R15p4DMcMPQZUYwPrzwc4PXkjjJpI8d+G54yMcdDnoe/wCHTqanCN1yeueTyM/QdT+dTfo+rXmkr3SSV9/yt0OmnSU0rLS/NJK1mrp6rRX+/d302dHG2wsc/Kefrzj3xzgnGB+laIWMw4Awe+cH3PGP55PPPWo41yqjJzgAn0HuOpz6fn73VVVwNoxgEkgEH3HOcH0/niobem2u9k/LTporadLvTse9g8MlG3uuM6aj7yvbVfyq1l1enqncyjFtI/vA9Rnv3JP1+o69K3bC4ZHj3ZChl3spyAo64A6naR3444FMnWJnTYqgEDJX15ySO2eBgeh5qS1CpMepVc5J5VjkdexwfpzyfSuSc21bVrW/R2vbfdX8+6PRwGGnhMRCVOolHnXM9WmrppSezS2vul369PK0Ejl4A5jyNvmDbkDqNvJBPYZPPJ70+80tpIWmWCbY2GLSdU9cHGBnqefY8AEQrKsjRtGCoRAWB5JYccDI5/HPfmujbWA1klsCjRSL+9Y4DqQCAXJ/i69ABx64Ncbk4tKKfk2m+W3fW+vW1++lj72FOnXpTU/ZuFotqNo82kU7b3eq1u9d+p51eaa0BbJVsgMNh3EjrzxwR3B+6P1yWQuRu7exzj169xyfoPeu1eB5SdhUBiQuejA+p5xnHGfQZ9ax5rIqN5BG1irDGApI+vX6Z4GegxW6rPrZ+iflto93pq1rZ+R87j8mjfnw9OUY3vJPZ35Xe9k21roktLa9ubkDEjI6ZGccjaOh498dup9KF8zjDEc8euf8+9bq6VcTqXjiZhuP3eSB3J44AHAz6cZxWbLCbdirg7gTjjGM56jPX1+taqcZJ66Kya82ldbKz+7Trpp89UwNahL2k1KMJfaTduj169HbrtrdlWWJ9pOC3Oc9cA9Qff8AAde1QKrRnPGQcqeM5H547VpxyFQRnAYfMMdyATjtnOM9f1qjMrlsgE85wOwGOfqOoz7Z9jS1n1sku2nrZrqvPzOXEUoQSrRcpNqzSSdm2ld6XdtbX7FdlZyCSSxOWbIwew4xkn8e/bFOjQr8hI2k85HI/wAMc57cdsnFq3VWdtwHAJwTgA8+ucn0Bxyeoxy+VE8wGMYGN20+uOfYcjoM1cJOOltN1dabp626q2q/PW/MsKnasru9tHfyvpqlfto9/UUQAAsVwc8cDpjGQTz7HAGe/SrVvtSQOONpBHBJJzwfoe3t9clI2MiqrAZOFyOBkZ6njn1J69e2atQwkEkAFAcFRg4Gefrn/OOlOdRcrTSd7aPq7q1lrto/x9fYw+HTlTnQippWc7W5k3a91bXl0+7ojcmSzu41ZAFd413ED+PByPQAkc8feAAOCKh/4R26ktnmiGSBuEfUsoyc98EZ6ADvTYEVWLbWCEkAjj7uOhwRnrnA7exrq9G1T7NcQpMBPbSOqylgCwY8EsCMFeg7dBjFefUxMqbVrKPMua6V0tE1q7K26WjfR9vo6GDoYqopYiCU3HkTgrP7KW11Le9mr6t+Z5fJE8bMjpsYE5zkY56c47jA/wD1mk2JsJJ5XkHgZ9P169MDpg16r4w8NRx5v7VUEFwEf5SAFYg8BeucdAP1Jry5oHRijD5fUckc9vUZHOOe/vWirXtb3uu6X430uvmtr6s8vHZXUwVRxcfaU5r93NJu12uXmTWjTtq1du+nQgt7020wYfhkZBHTJHA+ucH09asahLFelZkUIzAeZwB8wyCe+evsfWqM1sN5Kk4GTwD1xyPp0Hf2IqvllJ4O0Ejnvz3PGT0+ldMJtxWq1s7PWz076u+u91221+fqVasIyoVqadPmvBxXTR3TWtl20u7lgCQgK0mVB4AzxjjH09QPpyBTLm3ypbaCcc+3vx0I4z+PcU5ZPLI3AEEAkYBGCM88ZycfUY7U9pPkYI3DYBAOeOfqfTP8jxXTBuy16b79tHp699u7MZRw06U4yu21Zp6y0S5Wr6NPbTa5ibXRjgHaD1wM+p6DkZI5yOe/IqzsjMakLuZgcA54YggEdcY9uN3sDT5EJGd3JznocHr07n1H/wBbMcTkMAexHQ88cjHoOuAenWtnJpKS1VtXe+9ltb8Va2t0eD7BQkqctYyb5eblkldJpRWvXTXrbs0ZUtsy7twYMMjJJIwedoPcfp2yTyKEkW9Ao2jHPXjbk5OOuCSPUew4rrbq2DxtOG3LwQB1GeqsPUcdPbHOK56WIxsSAQzevZT6jtnAwMD3FdeGq8ySbV07a20enRdr3u3v8j5PPMo9ldOHNCavdJSXK0ra66aXadr6fLidYsI3B+TAkA4PTfzj2GSD24Hc548p1C0aCbowUvxx6dRnp+OMfWveLyLzgQyAhAfmxkjj7wJ4/M9O9cBrGmGRZMAjBypwM8ZyTjJzkjjPPHOM19llWOjRUYyk7NKLu720VrXsrfc300P4y8ZvDeWY06uNw9FKpDmqQqQjy6rXlei6p6662T7nn8ShM/xZJBU4PyngdCOM9OMDOM8AVbICjBGCcYGOi9uffqSMdvc0wxtEzqAVIcgNjB9/069Tn05qQHfgtnOACQMkAA9T3zk/Xp7n6CUlKz6PVPo7/LRvRI/j2eCnhJujUi41Kc3GTf8AMmrp9bPfZ69V0FCk7hnI446ccf1789fensvIXjacH2IPUgD8Oc84HagbexwvrjBz+P8Ajz+FPbACgEEjODjkntlgfTP+TxDu+vbfytZadNNeppTcY6Ju6Vmkmu2j62Wmr8/kLw2cDgdR2A7dfTjjPbvkiXB6gbVILEEnqcDLDqPbqQPeolViRkLnBO4nKk9gD6jGfqOfQTK2CAW5AJye3HY46deOfwxUSWiaWurt16N3ej07t7uy8qi7JXV3ZavW1ut/Pvv2e4oyFAXqABlQSTk45yew784wKUgsQCPlIBPI5Yjr046H/DNM35JB52/MADjOeeQOowfXqQanBUKFIxnaeDn5TnI74Pt29emM3r1a+ev32KvFpJQ5UrdW29l1+emjQgIBO4AbQVXjtxwD3I7Zxx60MpcAKxII5yeM98+vA7YA54HZsmVxwMAnGTkEnjAx0HHJHvjoDTl+9g4yAPlB4yRwAPboffp7S46Lfd2fV2s3fpf/ABb76lRV3FWb7N2127Ky89d7pvUfHjO1gdiocZBPTAyOnv069+uKmCgt1ZUz1IOVOeMAfoR2z0qI7tqkggbsEHoRxkDjv+uM1PGVLbWDYAXgcnjpx69s5b15rNyk9VfpfqtHuu3T576anbRppyhT5V7zT5rXd7q6+S7u3Ut2wCnDEOMkZA+9nGD1PPTj3HPJz0OnxrvR8t8rA4xkAdsZ6sOfr+eMCPYNoAJUn5QfcZ6dARg4z74Pp12j7QIzgOcMQcdP97g/N6ZPr65rz8XP3G16K2l9t7/Pp1evU+3yWhF1qcGrqKi+Z9HdLbz7a2Xfd9ZDGzojIPvYMgJwWHA2gY69D7Y9eva6AqxYDqCrEgYHzbv7uc5xyODj8jXIaaJJXjXAIaQhwM7gMj5cYyffp+or0e2sTGIzCjYKpyRng43E46Z7HOeOMV8xi5uKcW7Xfe1kkr331ve1lte7sj9dyiimlUSfu2jaySsuV3W+6et7eqe3pPheGMI5KhpAchmJxwSRtHYgHIPtXomi3BE4dJdrMxBHVSe4wBz24Gc96840KHbFjLLMF2LjJPzDofXjAx+deg6VbBI0kUAyRtljjII64Iz26jk846V4VZ3lK/XS+mu19NVZ6ba30PvcJeEacYpa6ys7bqN3deV1rpfY9e02Le1vNHIUZWyRtyCThicZHHGAOP0r2GzyyWkjHBGzccfeY5BJ56gdvXpyRjx3Sn3QwMq+WzlRk4POOSOcY47459e3c2+pXAEULuqJkKoBGWxxnnAwT3PX8cDlUbuy1vK6tbb3bp67LXT9N/fpuFrN6TildJ62t2vs9nu+jR7Nb2yRz2tzBKVBVSVHG4nHJ5z2x7Y4wMV9DaP4hKadDZho0JgXDgDIZejDGMnqc5z15JxXy3oWptcPFbS7g0ZwMg5ZccN3GMZ+n1GB7BoNwTcrFjzYcjYr856cA45B54Hbvya3pJxjJxTSlfVK2v4O/nfy0W27hGolz2bglZ2VtLbebtq3vr3R7T4Q1aVNVDSzmVN2CC2VPoSO5HGV4APBPFfT0VxZX+hbhM3yKzzFDtJbI3KPm4XgdD2xgc5+NbeZrG5MghMeHyMfKNnHA4PX/DnIBr1vwprOoTwmJJG+y3BZWUE4RV+8Mdicjg9cetXQfI5Ri+Zzum5O7bvfa19dH16Ws9vNzDDqrH2qfJGDi1pZyScbpdb6Oz/zOhsdQLSa1Ym4ZI08z7KCeTgEAjP97Pbn09vC/EF00ema7A/zzw2lw4dm2qGCOUYsASoHzDgE5IBNerXFzb2OoTPKpMTI2McYwOfqWzxxzjp2rwrxZqsC2viOWFZJM6fchlHLFmRgUXJGduMtzx79vVw8nGpTa933JW5dGrpee+9uuu70Z8XxBhk8DWfLK3K6nMl7ybjbf5au2lk92fzh/HHUWHxN8Y3bgSz/ANoXKLbZ3BRvwuGI++OSePT2FFY/xXkg1L4keMnikAVNUuWhOM7juO5HfPzFeOccAfSiv0GNZQpYdOMHahSs3G7fuRd3o+tv+GP4uqR/2nG+9f8A2yvq5crbUktVda330Wuh9Ta74Wtr5TKiIWTajLxzjnGcYz2BHPOB3rufCVpdaakMS5eMRKoLAIy7B+9jxgliQV2txjnA444XTdfMzC2YDAbzYxE3GFGQC7AYHUkEE9BjpXp2l3KzpHNE6J5ixxyvI2drR5V0UYGQ2RyCBwOTXgyrKrKzik7W73vZqzt+P9P8A5nd+7Jq23S9ou+uvRXfZ67HYRWsFzAxmTe7M7rDuIUYIzKWweRxjIweQOleS/EHQ0kjlEKwsQAsjEZYuAdwGBhnY4APcV7LaA7i6jDeVujbnY6ngZcjjzeAuV+UA8HpXM+ILFLiOYEQpndxkZDnqyt/FICQSMegBPOc1TfMmk+j9NVd211t+PfrMXZparTW+2ttL9+7Xruj87vEuhSwXkjch2JYeZwd2TuRhxyRgKc469qzP7DS5tkV7fdLICAE+9GB1Jb0HUjGRnH0918c6NEk7yHeXUZMj/ekCcgYA468HOetcUYo/IHmGNWMYKgHLFmBy7YAAJx83XJwOxrenVnqk7WaTktLJcrTv5ee2u3XohUjFS3bt7t37t9E1Z319W7LqjK8I2sVheJtaSDyyEYqCDISRhpP7u3B2nPJPI6Z+2PAXjxrNrSzON6GPZKRuMcS/feQbhuHIDHIPTjgV8Z200MdwoO0x7dxdMLuI4UE4yw65GBn2rrLHxFc2zu8F2FUfKPNcO2F+9gfLhHyONxwB6ZrHE4d101PW60k3pdvVqS+WnnZgpqXuNO7d01Ldqz1drWezXbfXf8AR2Dx3aoBcmfyQYgmN2I5WcEMwiJOzdgAqCc+vFeLeO/FlvJHdthzO0axiDB8pFG7LopP7xlyM4Iz9Qa8Cj8fz+SsRmkilQhcKOB6GIbsEryA2cgk5rL1TxfJcpGnzBgFVnm5aJkzkK+fvOCCfTHNedRwTpSulezSS1d0rWs7eut29u1hyblZ3Surv3UmmraaW0d/0ZXvL8w3Xmx7yYXZjtbEjRtyvmeqjnngnrgV22hePNTtWtbZb51jSSIxSg/vYnGS0fl5HyrwA2fn7AYOfH7jW7aeacPGqOzK8JX5vMK7uScY2EkDHbng1VhvGinaRcA5Mj7vmAcfcG8YCqnPQdwOmK6/Zyd29euq0a019OlrNffc0cVUUea0mrJK9rbba9NOvm+h9Y/8LCmv0kSS8upFDCNZEJVg6Y+aNR91TxgDJf1GBXoXhb4iXthe23lXUslzFLbmMpIV2oN3mZXkHAK4GecHng5+OLHVS8ZjV/KYfvmKHLkDO8Rr1DnjbySwBxjv1Gia5PGY5nn2xF3ZGBJyCQI0IxkMcHeDnBA781jVh7kk4q1naLtaS0f2Uut97GM48jSajytKEr6u75bei0e726dT9Y/CfxjvlRJL3UZXIjWUAPuWOMAg4iz8hQH548ndnOeOes1T4k2LwxFbiSRpoYfNachypYn9yqlgQgzkKOBzknivzY8P+Mr1JCn2qS3BHmHaN7FT/wA9XOMhf4uPkyOTmu9HiW5uYrl3cGARlYUkU+YQeBLG2cqG5A4+focYyfn40eWulJL4tOXVO9rdP+BuyaOGVZx02lZKMd3pdvVvzt93n7D8QdatbqO9mmmLRvA0NtDncZGHVtwP3ASNpXpyc81882HhOXXNbgjitJJC+FEkZIZC2NkbPjCqRkFjkHBGMcV2+hWWqeJmis0t7iC3DtDHIx8wzQ/KfkbAK+oBzgnk84r7o+DnwZiEUbhNpeNGDTpjzJBw5EhGSFJHGME9/lrtxuPhgMPa6jJJXUXHmW3u2vo7bvfr3P1rg3hOrjqtNypL2d4uUpt2t7tlHRr4bL5W7ozfgP8ABd7GKza5hEJjkJnUP5jIjkERoNoOGxlgCegFfqN8PfDNppltG8en+ZGoSJnC4Ur0U7CCSgJJZSRu45Fc94D8B6bp8VvDDZRveblIYg7Z4xjJbjG7PPUEAdxwPsLwp4VLpGTFFGiovmWyKFDbQN4B7MM8jHOfYGvyXPc3niG+Zuzk+Xo91pdNvW/ktHtuf0/k2X4fLcLGjSilFR5dFvJKK01Wu13te7WhZ8OeGiwt5JbcurIrDJ2/MxG1FxnAXBwcY5+hr2nT/D9uAEkiIRnCrLkhYySMhhjIc44J7jgc1e0LQoYERxFtRCIotwChgQDtwNwJPdiR7DpXqukaVbyMq/uyCMAhcoOBljgfM2ccjp0HNfEVaknJtNuz237O7t+Gvqru56FSTXvR+GKSSbvppqrJK/zt+N6/hywtB5dtKsiogBEyHK5X7qPwM9+ByM17HZadAsEcgjYROuBkbSOmCF4IOeQAeD+OMvStIigdMGMq7YJIxtHGd3JJ7egGe/JHdtFbI6AlCEUBVjG4Mx/icA4x0yfbnORjCMOZ3tZvVuz127X03Wlt07K5x1JqTTs7OOu+rfwpL0bXfre2+jpVtYsyqGk81sZXOVBA4Le47dfp1rvNOW3hcsMyno5x0bruHt04GMeoOa4ewLLITEI34AZidpUjP3ep47n+RrXN24YRWrlpXGTjGB65PABx7c+/belal7yprWyV0m03bVaav5bdVZ34KtGVVuKnKKa3d0krR0klZq1r7O7segBnYECVsE5VCSAo9hnHHU89+2TiNYphMJXcGQZAOflcZyBnHPfv6cHti2txMsUbsu5Yzhm3YyT26cjjj05HpVoXyiRQzKqMRzncVB5wBx09T65xnkb1JUZKnKo3zOScVJy5fs21SdtdO2y3Oekp01JKEZRSacklrZJWdk3dpXd9b7s6VTCzI2794MZAIIBz0Pb155IH1BrVaQyRKiBEKBcFcDJxxx1Y/iAenJrDiktY1yJomLYYIjZ3E46H5sE9QMjp16VbtL2L7VGqsoBbJOckLxwDwA3rxjPYYFe5R5b04z5Iqs4axl72ttWrO3rvezZxyU5c0oxknS973k1HR6K2i2/W5fktZxEz8lip+YA55wCCOhB/p3rIkiPIZfmA+TB+U+xPcE54zzjg9q76ARvDwQ6uCcE/MG4xnIPQY55rEurBXZg6ZjJx8g5Unp6YAJH49q9XEZHN0qc6VR3smk025J8u+60vZedrmVDH80pRnHZpOzStflWlvPXrfa7secX9ojShkzGekoj+4CO/B5+vr6jiqEN00MwieVmjBHJ4OB3z6Dpjt+Ndhc2H2ZuWDo/VCcEk427SQep9hnHqa4nWISs6iBcFcMSMgsOvzD1XByehHGK+axOCngpRxL5o1FUScLvVXi9FGzV3rsrX3eh7+DrxrSeHbi4OndS0vfRdW216X2vc3bmX7THEIXJUDDYyMnuWA6sCOSOBntmqlsLyC9ijl3GB2wrHByvUnJ7H/wCsR3FbSbiVJlSQoytwoY4YYGNmDnr65HQV2kCRyjaAN+3CngkMCMKpx0PTj86+gwND6+o4iFWUZKUYypt2s0o31W61f3Xduk16iwsXTlTg4uMmpaNq/LLr230vp26QagsMPll2DRSAGMHA2tzkdOM9xjAwPpXhvjnRtK1201C0aPO6ORRGUyA3VQSf4cnJPTn8a9h1CKU3JtZUcSRkScjGUH8QycY5GTnAwc+o4fXtPxuf5WWVtu7IBXtj5c5B6EdxketLOMDHEwnThTbpxSjJtfatG+76PW773ttfsyavLD1KdaNWUKimpxcZWi4pxcWkmu2t99fQ/K/xv4PXQb65MY/fxTO0EnJiMYJJj39scYQrgn0Nc34b1i3huHDF3+dR5eSJGYdJEJzgKSc/eJB9q+x/il4PS5t5jb2vLZLMo+XaeXGccDAXn0+tfGesaTDp9zdAFbd44y6KDtdnOQCFxjIH3hkE4GK/LsRh6uAxLi9Iwb5ea+uqS89u29vRP+ismzClmmAh7RynUcIxlqrx92N5NapJvq3956Xd3lpd6cE8mWKU5SQ/eKKQME4GNqjPORivnjxpo1obieS3dhCsaNtkOZHl+cmVTxwvRQB/Wuu03xDeWsE1rvxGImZGmGQZF5Co56GT05HAOa888RazHdM2MAswR4kJLvFkg4xjheuffv0reeLVSEdVGWnRWveK73bs9lv8z0cFgauGrSa5vZyVkk204uz010elt9OZ9j588fvHLbLalDuxy5f7qrkHYNp2sM9efQ8ZI/Gf9svwVY6vpGt3EVvO6zQSRyB1DNNcoG8qXoOVJbcOMdcEnj9qfF8CTXccFrbqwkdQGkGNsJ3F+DnLHGQC3r9a/PX9pnwlG1pqUvk7rILLMuEK+Wuw7oZF+bcZD0U4PHFfZ8J4tUcfhpXaacZLV62lC1kmm/K/3HlcW5dTxOWYinyaypSbUtW7pWVkttXotddD+T3XbWbSdZvrR1AaK5liwDkqdxAXfgdMnnGPooogmdQDlmPRwSWCnqD2zwcgj1HSvU/jnolvpfjPWWtYZIbaS7MsAdAhGS29W91IJU5784xXjsDlRhWzwSSwzyQOnIwSOhGew+n9eZfNYzB0aul5U4yWn9yOj13vfXpv1Z/njnNOrlubY/D8/MqeIqezSbSScv5W1ddF59L3OgilIGe2c9ckc89QMj145PA65FxLnBcAkbcHuAQe5568emelYaOxC7sZHJw3ylR0wec4Pryam+0EvyQoBByOp45BOO/fnsMdxW8sMm9tErJWWtrWs0r33+Svcwwmdqkl7Sm5arW/vSd49Uku+/a/dHRQ3qCQqN+0jK/NwSByWJ528+vFaCaigyEkYE475BYdsdQM9Mg9TnuK483BDBwcqw55456jb15HuPWpIpipOM5Odue7H17bVGePpyKxngYyak09FfR6a23v0dtLJK/pp6az2bdqU2r8u7St8Nl+Hns9Xsdw2rTNgo6bSfnGMEDHzADPBJxzxxnjmo5NRyf3uT5ikDnbwSDngE4A6D3HauW+0fJyTkEMSDzz1JPfntx161We7IcgyAMFAUkHGCDtCnkHPTJ/D35Z5dTabjGzT1jyrm0eq1vdvdW0a6994Z5UckpSu3yrV3Sfu30v11vvuvntzamUIjyr4J524O7sep5OSOP0rofBmh3nibWbWBEaSB5wZdgILAEbtpAOD09efwzwtlb3GrX8Nsq58x1RhGDvc55YY5B55I9K/Qz4AeAYLH7NPPZ7J3fbFNIoKyE7cKwI+YAHB6ZO2vBzbF08voOyTqN3UOttFq01r6XWlz0P7Tc6ba5nFJPXaSsr2WzUWk/R31Prj9mr4XRWUNkTabJ1cqXYA4Y7flkbunHJxxx61+sHgvw7b6TbQqUVUWGKVSfkbcFO8k87QcDYeQR1x1Py78IPDCWFtY/umikKCRi3yHkqcqpHCy4G1s9Bx1r7Agu3aGRQwVUiWAxLhwwAAGCMY7jHIw3fIx+D8YY5SjOom1zX5lzWs2l7qavpe608tbnxecZ7Uw1OajVkrrWUdknZ2WyW0bvfQ34rtru6SyRSRJOFREclgvAzuAPDe4O76ivqX4ceFW+zxSGILIpKbCMgL8vGT/EO5wc8mvDfh14be9u/tJRYvLkZoiV3AhcEkk9yCccdz0xgfbXhqxis7WHbFklVJKjHY5OD/e9c87R0r8LrQhVqznU3u5Kzd7u2sr38nfT77H4/mWcVcVKdpWje/NpfdXW+l9XZrW9nY7nQNMhgMMUSgO4XcV+8TxuAAGQPQ56g/h734a0kuEL4wEG4HHygAcnrjr79Oe+POvCWmCeWO6xsY5RCV+QgY3A5PXgDr06V75o1kI9kcIB3kbsA5AU4LewOT19h1yK+Kzd+1lKPK+0XfZLldt13+bv0Pm23JuU3eLejve9rLVbO9013SXRHZaXbokMbRqG3DAUnHyggZ6HJ6fN355zzXr3hm23mIkbCuTjGck4B5Hf1GD1BHvw+lWJleLqAibWz1c8DHsQcYwBjpn09m8NWawiPIBAJxjqCcE/N1JAA7euOOT83LKqkpxUJPkkvfaV72cb9rLXa5rCclKLuk01po10dnbvomd1pVqV2nuRnoOOOMA8d8fUfjXWRRLsXj5yADxjj1wByT6nJ6dazbJAShwoCqMnHBAA5A9Rjr3/l0sKjAzjoQDxgj6YOST6n6Yr6bKsqpxTpwUbXinKUbty926e+i2u1326dqrXs1KzWqTS2un+ll11YlvbcA4xkkHPHXJx05HXPI9CRkGtJLUAqQPqRwSAemOoPTPfbz33VJDHjYCOwJxnk8Zwegx1xgk/jWjGm44AOAO3GD14wRn69vbv9pR4aw8qcY8iVSTjaWl0m11Xltf1fZ3TrtSve7/Hv238tfW1yKGNge4Byp55+nbjgd+c9eauoCnUZUZwTnnnj5ewxnt/PAcIyMFeQQM569unfIx9R39pUQ8rjOVxgnPTpknjuT1z7dK9KOQywsIqHN7rg+Z2dnaPVadeuvod1KqpPdJ2V3dJ20vf9Ffpr3V6KQkKC2AANoz37A8cZOeh6H89aNfM2j1HQ9AccAY6kjO09z1PSsCNSjqMjJBOM4+Y9OOSMngZ6fjW9aNlR0DAhhz0HHPA6jPI5P86+pySrNVPYVUuZS6yT0tHRbb6bauXXv3xklypbtLS/mtbb26PystXveSJVwVABB5J4Bx1AOPwH4nNTfKcgbc4GR1PGM59sc8ckc+1QlwqAnPUknt1xnqOvJI9McdqWMggNjHI6dcfmc9+uce1fU1KsIpWjfa6l5WutrJbv/hjqg1pfdtdNmtdd7X2t9/QZKo+UbhuLEjofb8Bxxnr+WY0j3uFyDuyrcenJB9fqcZ5watgK55xnbg5wTjjIPtgd+AfXnKkIjBi4IBGCR0645z2z74OOwGPCxWSYbNqjqTUeSNnZK97crtdOyu7rXU9jL6cpTT3Teqey1i7aX2fW2jvr3spbKijgADnOcZx04HTOf061zHiO5eKymMe7EalztB+YAHPPPT6ZwfU1Y1vW1s4SqSbm8slyBnZgDBJ7ZHQ5z04rzG/8Vw3dqViuDhjm7QjJjUE5UHtuzz14zXzmdvC4WnUwNCKiuSUGoSXu2it2t2rXev3H2mBy+tX5ZRhJq8Uk07LVLr5aI5/QtEudf1NtUmn2JbSlII2HzEgn55OR8o/8ePJ6CvR57qOyxblmBVdrSLnBI6AZ49R9Oc+vN+HNZtZEuDHH5RXOxEQgsQeuQcHdnr359MnSuZXuY7i4kjDbUJjTgFiBwcZz/k814vCmXYenCpUppKpOV6k6l3dcy91QdrLtvfX5/dYTAShOMZx5acFCKurO8uV2T2eq7W9GfL37RvxEfw34U127tipeOwnjSB+C8jLsQRZIAZSSeo9gCefyy+DF9d6g+ua5qETa2k11eNHY6kPPtdOlkO4O0bkApLghTwGKAAACv0d+OdtpOq6JrsfiCBQj2zw2omOzZcsGETr/AHwCCWBwRkGvzQt9Z8L/AAu0bWtP8SaiNKt72/ilW8MmxPsbSNskMgzmDByAACOeew+lqUeeVozam3GUWk7JRcEmlrZ+mu6tdH9e+FuUqWUSw2GoTdatKkuaEG5cvLFycJRs20r7d99j8/f2mtA8P+Mfjtp32fwnbwzv5SarPbx/ZbbySSCqOqsqSHH3cEjgZINfBf7dOoWfhmz0Wz0ZJtPjs9lmgkJWJuFDRiPI8xowTuckbgy4Awa/Xf44618LtE0LxH4z0HWbLWdUjhF/bRxXK3bXLLGzRyowAKBDnKc9cZziv5//AI2/Fmy+OugeNptSmQ+I9Du/tVhCgA8mGEuGgCA/JIoVd55LDHA5z9plWChKEaqSdoxjGTV7SaTd9Uklfv0P7t4BqYhUcsl7DELCYHDU8FJYhT51WfJ7/LPVRvo272W2x80eItb0i20mO3S+aSW5VGWBHOZDIGLSvydjMR0YnbXzHqk4t9TuNsfLPgeaD8oOdnPX8cHP51VvdZun1BHlckW2+NASSY8EKx68txk+uKW4imvJhdfPM0rbtzDnYMclf4V9OeT0HHP0dKhKLTqap07NJ3vJW6vRJa6W7WVj9MrY14mnKNLmVWnWUm1vaLSukna21m99tOnqfw01GCS7NvcORPISy7csqsGAjGTgBQCeD05z3xu/Emxt3tbp2H7xArxFSDu6hjx0x8u3PXPQmvPvCgXT9VhkdhtfG1ux3fez7EDoc4wBXf8AjGRbixZoW84jahQAlWRgcMq4OFP5HqB0renN05pcycXNJpuzSbT229dnp3PTo4WtiKKqVZNp0uVXT5tOW0rW00afndHzm8rqkkZcqOeOScZ5wcDk8cdOPaorOAM3mEcAk5bvjqee/oCck5IPBzeu7fZKwKnIY8HjHHQjGOwOOpzTIxJGoCgYJ5U8jnkHqfz6eo459KU7pJNcz3v233Suvn20aPk5YWSxMXVi2qV0rRbb1UVdvSy3sl69bdBYCKYPG6/MwGAmVJwTgMed2BggEc5HNX7vTwlsqxBhGxxI57kDJHOeegOMVT0hFedA6H5sgbRwTwcDHUdz0z29a7u4sYU04KFLScuNxIVwQCxA7YwMdumD1FbYd8zqK9ny2UXLazjrrrbp8tup7UMNTeHu47tuK0una6l5WXRJpK23XwnUYVSR1YfdY4weOGPOffIJGfr6nFJ2/dJBJyT25Pf0H59K7LxCkK3MqxD5QAcnqWwck/57AjiuNcL3yTu+uQeozwcdwefT3r0qMnyxvurKS11dlqu72WvX0Py7O6So4uooJauTTWklZrRtWd99PIsWzbmw5AA6YzzgdOAfzzW1GBsG7pjg9iV7+5544/M1jW6BSD1zkkZ6D0/p1znnp11I2ccHjDcKfQjk9fQcnnnFHNdu+m1lt0XTp0XyN8tbjCLqK/M+V+7d68tk7uzXS26enmXo0yMLgDqSTz6H0znrx161KGwcbRtUgZ4BPPU59eM4OBn6ZfDtOBjII4APG7tnnpye/wClPeNAwYDjcAec4YdRgdj29cHjtUOWr22a0f3a237dr7aM+phQcKSlBQTv0XL7tk0ttEur39bIktYg8gB4DEjGOmDyVJPcA/hwB3OpLbxoUliDCI4CFx8zEDkkHgDPU+n0JrOtkcyKiHDFiRtycdCf8k+nNd//AGQzafDduM7CqAKoJ5x/COh7Z+vpmuGrLld43emtrabK9t3fvr19T3ssw6xGHnyxjGUEtWr3bab5UtVa3b5K9zD0uzmu7uC0iUZnYKm3gMc5JL4GBjJIOfTrium1nQLnTzGZofLjKK0MjJiScHgkgE/dOccnIycDvo+HtFuDqtoxRtnRVY7AWbopbBKk544PTocV654l8OyzWthbPAzG2QgzsTI5jbBWMcDf5fOT1O4/jxuTk29Uo2s4q65la6fqna61UvLf6rA4SMKWHo8sZTqO83p7tuW909Fq+3R6HiulWsMpggljMgZySqNtJOAVV8DIIGeeMA89ap6tpimSSRA6FJNtwhyVD4O0D+8cEgnHPUDivUYvDDadLDcSRlBJHu2KN25Dx1/gZhxt5Iwee1WLjTLO/kFvEgjlVdxhAGERejMSf3jnJzwMGm5uLU5XSUdFotXa7kk2tNNr7dT1q+EhKn7NQ5tFaXRSVrtpaO3z9Oh4vp5+xXJmaESW6oUkhc5OXHDhcEhlxkEdM+4rL1PSbm8WW9WMCMFnGOQiDOA/yjGB0OM9uhyPUNT8KPC90VjeIoCxDHCMVySY+M49R/8ArrlpRPa2dymC0Ew2SIRl/m6HPUE4zkHGOeexGtdq197yT0V1s7ve/r0301+fxmWOdOpTnFO+1lpZ2tZu2t+zt53bPKXhZSx5HPBwcA88H6/T/CmFSQAe3U+np+fv9eeK6K8tduPlxkHAznBxkDjqOe4HX0NZ6w4A4HOA2cZAzzjpj69s8iuxVlypq10tm+l1fXbTS+/yPip5bKnUlTV+V6tK+11a62urb3872SvksChGByTznjb09eD15z7U9MkhSMkE5Ppnrz7kgdOMcZyautbFmON25TkDHJ9h35HOD9BkU1bfY+ApyDkEjGRjvnPI9eenHpTdS9tbWs+vlv03+7qcn1SuqqtFum2rKz7xv71nZvS3W/3KSCNUZSAWXPzDJ49SMdj6e4rQMaIAyDhzkD1APBI55Pfr19xTDZvCqu5UBwGAUjIBzjjt9DVkyKEVFwxBAzxkjt/3z74Bz3rKU02tbu2ur1Wm3k9v6R7tDDqjT5JRVG9mnomnZJ2e+undO1+9r+nxSTska4wzEhmGFwB82frnof05rWazRHI4RODkcHcvXA68/wB3/Ii0W4HmxxSKChJ3EDnnOGPTpyOPXnkitXULOMET2srOhPzqemfQLz9fYDmvMxLlJtJWimrSu2r6X8tdLLbT7vo8JhoQwkakE6nLJNtu0tHGPu6O9rqy8r23ZqbvttgYZw0gRR5MvG4herDqAwJyc8HOT7+baxDHFcOIm3JkDccA+4IGehAOPYke3oemzQgxxsSp+5jqqg8DjoB6jH4Zrnte01MyTxxkAsSyjkcdWHbAPb6/g8PV1Slq7JPRp6JJXvo/S3m3saZlg51cJOdJKTSjJJvmajZN2b2afS3S1nsuBKux/wBgdcDt2GfXv3/IYqq5jVXVgrbzjJzwV7jB69PX09xpXA8rIjOUcDOTk+5OehycY6Y9e2Q0TMARjG4k9effJ4+nsT68e1QeielrX6Xs7XVturW+r8z84x8XGXLCHPNfGpapfCtO6s3e2z81Yoy7xk84KjrnkdQR6Y5x7e2MU0eQEEEgc9+Md/X8P171vGJnQZAJHXpnt+fTgf5FKWFclSNuTwDxk+3HYnnsfc16FOzXm1s9V/wWn/W585isBVjONWM5RVlZNuN2+V2uttFbuRoA6jAO70Hf359Ow7ioGXBOOCOcdOfw5wOTyOMfSkYmJspzgYJ579wePTr+XtWaQljuHfI4znPJ+oHf1/KqSsviT7+SVn316WerVlpY86vXjGKi4+/F+mqSafNZrVtqy/I0YbnKmKTHl9TgjnB4xyOfTjPp3qpeKHcso+XA6DnvnI9/w6Z69YnBjw20/NhwB2RhwSOq5/me3Wl3ksVJO3aScHBPrg45zjngDnqeBV07c0ZRet7dNLW0t/XlfpzVsV9ZovDVleTS5ZWu+XTe+6Sd12fW5mvtDOmQUIJCtwC2flYr3YdsHgZzXO6hbk52lWycOM4OCPyxjqcg8fhXRXBTzMqoyD0YZ4xgD9MY4P8AWi0ImMhUKDhmOTtAAHJB5II7DAzjsBmvZw9Sy5m302u10TVr79ev52/LuJcvjjqNXCezi3rZ8t1yJLR6N6tW0/4B5Jqtl5buwGF54xwSfTnkZ+uOOM9MNSQ21gACOBjoPbB6Y6A+nHFek6xY7oWABJDZzg8gkkckeo65rzhg6yMpJTBIAIweCMenXGc/QDrmvrcuxHt6XK3rHRLv8NnrfW7u+10+5/n/AOKXDH9jZs61KnyUcRNzvGKtz3Tcdmu7XbUUBFH3SFyDzjBHUkDsc/Q59wCZQFYn5RnGQSSCMce+AQeAP05xCCpYZORwCMHDcjJHPTr74Bp4IBb5iAMnAPpjt1zjjHr29fSs9tL3t8/S+39XPyWSSdu/k9enT83a9ridweQcEZ6AEEZ7entknn2LySMEYPO1geuM88kA45HXp3poZcjk7Tn5mHO7gce/PBHTk45NL8h9885ByT7nOMEcZ7j2puL3a+f/AAfO+9/QjmUev9Wv13v+P3jwecKNvGB67uoJ7/T19+alj4HznLHcSp747jPcc9vw6iq+FJBUsrHkH0APP0PHHf8AKpcNkdG6huhB9Tn09+/UYxWco3d4p6JX1vrok+6v59gjK/r1X3eequWGVDgAkqwHB5yccgnB49vY96VFQNuLAYB+91ODjC5649u/FQxk5/2VPAzg5x1A6nHI6j0+U5qQgn5VHB+Y54LHjPqR0Gf6dKyemjfrrb5X7+nVHVRSdtbq93b5Wt36eXmTFS7AqcpwcY5J78ZxxjrnjtkVOgALMMAjoucEqPvDHcj078DNQp1AyQFA3d8HvzjjAxjnmpTjjIGVJwTyW6egGOO/cBvUYwlZ7bPo+2nX7/8AgHtYam7p6W01fbS/o9dd9y3EcjcvJ7E8kHgDaPp6en0rpNOlljjVVADcBwMZ65zj3GDj6dK5+HaEABAfhzjtjt/s9eT+GO9a1i0imMbVBLBg2OuPfuF9+fr24sRFOLTato77LWz7qyXW132PrcriqdWMvevKycle8UnFfrr6eqXpGipIzo8bjcpwDjqWxkkZ4Yjv+PHFe16epktkAwLhFXcyHOfXPHb1/SvINBKKsZkI3OAx6EjHOQOMHI5wenQevo+kz3CSNktsKnYy46nGC2Tg9Mk/mAcmvkMdHmk3o3C1r9W7Oyb32v6Pufs+S0/Z0YRejkknfdXUbN/1Za/L0jS4wCXcABcEkHBPr65OfoT8vauuF4YUWWAgI5BxkYLDqABx14H4j0B4aGQCK1HmkNJ/recZLHrweMdvTHr06URRs9usUoeP5QVxxuOc45wAPl5HOe3evHdpW5rqzbVtb6pPs9enmfW03yRS6RUU3fXZJvq9bdnq/JnrXh+5S6tY8khgRtUHADcYBHp69fUYr0PR4UuBvkOTAcuzcAAD+HjHQDtzjjsRwHhiGy8j9/MFkVeFB5OfmGPce3ufYd1pEhV2QZEEhwZD0Zc8c559+mT9M1ajZpJWk2nbs3btr02/TQ9KlJ3Vm2tNXe7Xu9bK7XXu+nU9I8L3Mb3Och4m3KHYYZSpAAGf4s+5zjOK928O2cl3cQ7JREqLlnI+YjqCD6j6dfXpXgelosBeSFFAypwpzsxnkHGeRn/65NejaL4gvLRWljbMa/K3fbjHJAxjrgkZPGCOapWjzX3ST1u+Vu3Tu9Ommm53uT5bq6ns1LXVuNtF0vs9l21svoySwdYgCzSiMAGUjd5i49fXngD0x2r0Xwe1rbRiGWVUaYbljYcDIwDnjk88dj06mvD9L8VGWxMfml3lQNgHcFYk5GCOAOP1HFbX9tXkRiaMq8yqgQoPur1dtoBJYYHHAznBwRSjUipKVnJ2WiWq2s7q1mle3RX63Mp0J1IShUbSej2+J2at3d3ta9/Q7zxlLb2d1M0SESKnmIXb5HODn/gPTHHtg85+avFrMPDGv3ThPOuIblwckBVVHDKwBBCkEbh37kYr0rxh4juL5YPLYPIIUW4k6GJQPmwpz8wx1z1OPp434+1F08H68bd0Aj0u53Z+V2cxHJHB69z9PfHVRqN1VZJ6tpO9tbPVp6WZ4Gf4acMsqqV9KE1JX95csFyvV6O+vpdvc/nf8UH7V448XSpEiFdbvXUZ6qjncuSTjJwenJxzRXP61ez/APCR+IrrBVJtWvIwgbLKBKcs3XOeuc9AMg0V+hwtGnRi021RpbXa+CL35Xffe+u5/BWIlTjisWnTcv8Aaq7vZy/5eO2uvl9+2p9IR6xI11GsQRVVtxCAdcfeHIwT+PTp1r0rQPEUsflF2WSMfOoztYbSMiPqGkGRnjJH4mvm6S+WKRWSQqCOcHIZl64IwQp6Ad+e3Fbui+I2F0FlZo1DcqjckA5I6cK3POO1eNSoxitZK8ZLmVrdtX+Tdl0tsz8JlSatZS+G2jvq2km73+a1a166H3Db+IvOggS1eISMkbTOwwGbGCAM/f8AYd/zFHUbxpEViYxtJkGUDEMfvOhz8zD0OMgnpjnyDQtbjuLYSAOBJgqhJ3RlMYZXH8IzwAuG68cmumn1nZAiKI2K4Em/DPlRw2c8HnJx7Y9K7lGLas73s99NXvdqz6K2mhh7FxmnG7aaunonaytZfind63ZxfjdoZYflLSyhdjlRuDOCcEZxwueeQO3cV4Fcu0cqrHjaHLNGfmcMOu7p8gH3lxgA9e1e1+I90lu6x/dkLOnUkHjJVu2TxkfTAGBXg+rh4rmYsPKYrkuzYGWJyCO+QDyOM9SKhU0pJLl1t2bvpeXkl5LRb6WD3ne6cWmk7Wd79l0Xn01Ks9yEYsQwVAwBCZjLcZZee/GR6jtVP+1oo4GjeQBwNx2ncC3YjpkAZyvcnk8Vh3MsrCZWbu20qegwMEEE53Hkjj6jrXO3N3KGCMpBXaqADGIz13Nk9Bg+vtxiuuFBTSXMny6uKu7vR3V9Ol+i7+esYtS0a2Uk7a+a6b6a6fI7b/hIXIAa6RgqsWJGGZjjA27s7uME54OMmqs2vTTTIGkbDEs3mdGPdhk5XPGODwB06HzuWaQyEo7AqDjjIYnGM9ffJ+nbFPaWYAnLqQQWZm3KV7r1zj0HXrmuhYSnGzT0UbapK221rtfC9tHfTqbOMlvor7JrW/K357qz/pnpkOuB0EGQHRiQ5A7DP3u/UcDkYGTV201XIRzL5mC5xjC7iQQoA4JXuO2RzXmCXMrRnDjbkjdggZxnOc8Bs8kdcCtSwnmKwgEMyszlgcK7cZUjkZ6dwT9eaynhopN+npr+Gv4rVM1hNtKKSuvudmr6J9drPtZHs+l3slyytMvynLq6HLbF6YHHJ53DOK7fS7yBgqgMx6pJgjy1z1x7Yyfp3ryLTJXRIncbpCcqCdwjQn5gFGMkcZ5GB168+oaTcsJVRY1ZCI0CqAZt4BJKr/GhzlsEdsZryMXTtFqLT76K260WjaenlfXyZM4uTaWvwuSeqWqs/X77ap+Xpun3csMtsyBbmUx7ZIydiIjDhsc+aj9XGQRtH4+0eCNH1bxTexwR5itY5GSWSJcxSxfKQidwqZIxzjcfY1574C8K6p4o1WyUQvNCGALFSqoikEqxHDFfp6c81+l/wm+Gdppq28X2WMMQYmiKBllL7TIvHTJVcHqDyRzXz2KksNTjVq2uublaS0tay0aejd30Xk9vu+DuGa+Z4mjUacaCqJczg05pcra2d73Wz26PY3/hP8NI7RbMPaiIB0AJJM8sfYhio2uT90YOQD05x+iHgPwfbrDDDIiQvGRsjXhkkXGyRsDndkluB0OK5XwN4SghSB1tl+0IAVZ8ABkHByRzt6ID1JI47fQmgWsVm5litUuJ5F8tzjHlEdH469TnpnHavznNc0dac4qa1dk29LaK92119fu2/qTJ8kp4ChTpxpwjyy6KzatFa6LVWul1PW/CWhpAi/8AHsxhTcpCgEsMZKk9/UdMHk9q9w8NRPbBGZVkJG4gcbcnAAPzE9s+n15rxzQJTiMNbybMjMoJGA3UAAZwMAeo+te+eF7eN/Kn3hgCpVZF+UDjIx7cEEfXHTPwGMq2lNzfPqla6s27Lp0/PbqfUqiuWNlok59V002XTbW+67no2mb5fLMgEcYckAdMYGMcj1I9/wCfpXh+W3to237AHYgKwH35OrAZBAGM8j3OOBXIRwxRwqGCFmxIqodw6DORwefQ9Pwq7bzSJKESBm3BdjL90MM5yO2N3Yg/livK9opSvokrX+elt1ZeW91dowcJPV6XVkvueu1/x8r7Hpn2pWbELNgnnrtBwc7R7dsY4PfIxdtr2e3d8yK8eNp3jkBsfMD1OMemf6c5a36QtDHLCdzEF5McJ36YyeAcn1HSujEcFxJEw3eS6gNtX3BBHrjtxxznitXBXThUXPonFJ63tomvLZ6duhk4cusneMmrt2t0d07aNdLdvQ6ODKwqYrsnf82exyeVxkHjt3H5Zu2lxcWlwTuZgc4wSSWYdwQeO/6Y7VWhtLe3li+WSSAkElT82R6rg4xnkZ+hq3d3On7ykDMspA2nJCgjopPQEc88j9Ma+xbXM5OHKk2m9et2ltbrrbdanMqiaVNRc731a00tu9bu/TdNK3c1hrMqNsG4JJy5PBA6YC849uTz365ZDrJSdohMkqgHKOvzxkjjBJ57/wA884rAecySR7lKgAqXU8A9ww5549cdOcEkV7iN0ZbhHOcAE44YdtxyOoyc+oxXlY11laVOTbUkrrS0bx3vtfXW99emx24ehRVlNKHMl0v72nX7kr6b3VtD1HS76FocNKoDdd3XJzgqOuc/w8VopdxwSo8bhlPzMAcDPU4x0HAOBn8a8hGryWpjdEV9pG75zyO7kHnjnPt78V1mlavBevtkcAMON2AEPQbT3yc54GQPXr6WAzOjKNPDS5FiouPJOTdlZxdkneLs++/3nPissqU+atHnqUZfFGLT7aO2q93zuvU920LX7a/VYAFjKkIW6uTnltpx1AJznufXA7KS3h27w24MCWDH5Sx9u34fmcZrwOzIt7uF7VtxkZS5R+Oo75GAM5B6du+a9osLpLq3KCba8SZHOAdo5UjP3h68bjxX65w5mEcZhp0sVCDr0FGMaicUpRtG1uib3W6+ep8LmuD+q1oVaDlGnWabi1L3FeKu767vo9tjE1OL58Ffl6ZxnAPuPT26Z55JzwviC0kgKXI+5KuUHBGegBx904PA5z17CuzvNQIEsLMGILbFbHP8XHoef8RXmeqa6k7y216TE8ZJhIyFGOmT02nufavJzr6pONSM5cs2uaDadozTWjl8+unT09fKaWJlOlU5eZK3Na7k0+XWzveOz8vMwRNMfNni+SSEkuM4+77c5/qMk5NX9O8eW7uLedo4JIgFyUAywwAx9yeRx26da5hrjzFYpKUBLBihypIyS3PHIIzx0yAa821+JzJJLbEeY0mHVH2qcZO4DBwOmQOnr3r4R5ljcunCeHqtwWtSKXxJuNnbZaNa7tbX0Pv8JlWDx/PSxMVGejhP+X4b3fS97+R9J3OuRS7JTKJ3KECRWDKgPRWf04zn+HHAPbgda1Rm8yQ7lMJMi9CoI43gZ/Mgcfia+f7Hx9caNLcadf3E0ZjyIGb7rA9mJPIyBtORkZI9RZfxgmpW9xJHess5jZdhX5SOw256nnnI7fj21eKKdejrLlqON5R3cppRVnte9l162fl10OEa+HqpxUp0G48lSz5VFtbNaNLd362R1HijW7e609lXY4cDzkbDEN8wDk4JyPb155BFfC3xKsnt76a5RFjZ5C8W4ZUgAnbngEEEjJz05GMivcdR1u6SB3MqiPdtEgJDbgTjBOeScY65HTqTXz/8TdbjurIxSN5johMjxMFk3AfKAwzg9N2OgxniviM2zCWOqQlKHK7RSSXKnqlzLZ3+/uj9X4WymWBnFQTlGcnGSs3ZSULLRtWTsns/I8X1e6WWydZQYGkjZd/I8uQcqWGc4bPXPJGBjvwNt50O+Sb96gk3OwPzk9hknB9h9cdaybnxQ01/DYXs27zmKsQP3iBDyrAYBbBHJ6/hVyXULJIvK3nyBIxMjybfMc5KjnOcnOVzxjGTXDazjeLTTWutktLNap2+fdrbX9CnRnRvCUN7Tu1tHS6u1drXTa+7vYw/E97EtvPcFvLkQMfNkG4QJnAKLxkjGBzzk9MV8RfH28jl0K8WOcTSzWi+dhg5dlV/mjBxvkwfuAjJzz3P1P4q1J7u3uyoR4Y1ZJNpCKFjB+c9eec7h15xgYNfCfxVuJptLvEMgtLXy5jHICDIFw2DE5POCAEGADlvSvuuGaV69KXMm4yi1dtu14W1a0TV9Hot+x4mfUlHLa8ny39nJR1u/eUb2v0s9Emfz0ftS6N5PiO6vCqossnmRRjjckhbeZk/gZto3Lk4IGCcmvkQBY5Ag4H3lA7r6HsNuOF6jP1x9j/tPXMk2sXavgFLtoUYN90hjgKcY3N3XJwMfNzXxiruspSRCCuQWPOFyOAcZ647ZOOehz/XfDCn/ZtDnT5lBWum1ytQs9P872u1of5veI1SlT4ixfs4JOUrTTV/ectXG3dWem1vN30Mg7W3bUwSBzlm+mev6c9aeCoA5J4HJAIAzk/X6jB+uc1ACDhmDBMDAxgnGcnpxnjnJz6ZBy8tgnbkgqANw5B9R1yenHPHevp3FpXdj4RVEndyXLFRklZqKvq173Tp17+SkyodXVgQcDbwQcds8bRjgDHHfJp5lIyAAfm3EfwgZA4OOBj/ADzxVJKDJXBORntjtjqM5xnoB+NPZ1C7e7LjJGcntk4GR7DGTnvxU2cu1km76LS8evprfyvdBTrurJvltZJJ297dJW12t10f5lpJGUEkqc9V6ljkgcdCBzzx+tRyOrlV3AKBySAAWGeMnoRwRwcenpEpZs7N5IUAErn5xncM5PHTjB4z+F+y0q5uriE+VvSR0JTkDAPJIA+mc9xxXNUqUqcZtzipaNKUktdF3W61+7Q6YVKifxXfRtrZWun528k+mp6z8I/D7ajqMV04QIkjbpCCykAqPlXHDEdTnv07D9Vfg34SuLhtPSWELBbzI0Mh6RjIIYEBcsQOW49SuMCvjP4D+Di0ts/2SRQ0oSTHz+YDjaix4GEB6888nOMZ/Wj4feGI7C3sVVVaQRxsYmGCzHqFGO/Gz0IPBAr8S4tzOccS4R5ndpR663WjfVPZ2fbokexTxUqVFc8ndR91pqzi7Xjbrez7L13f0z4JtVWIM+EgjjSMgEgMkYxkPgcgnoRjnr1r27w+8U0iqqbodpLNjKsMjZvU4yy4JPzfXqa8MsdXWCOKzt2zLJJ5UkKnew3Y3AAY5UclhzjqOK9/8GWsVxPbYEuAIwRgkySsDkMBj5DjLY6juMV+JcUYiboTcrx5r3T3U5WSSfbr0abPy/ibGTqTlytqPNda63b2dtLJP1Ppz4eWUdvb28xRmdiqmZsBSeeIlH+sXpkAgjjOc19SeHLFr+WEE/uwBuUIc/KBnJB4Xn8+OnX568J2caxW0IRS8e7YnIK7sEjGOAuPUgdjg19deB7RI7eFiSfMjYSK4yoY7dojHGc4Of59BX5I8W4e0jrppfrdJLW626a2266nxVNvVT2au27N8t1zKzv1ez6dD2HwrpGIIwqKqxAlByC+APmznoTjHTk8Ec17Jomn/OrKI41IGTjlv9kDnnPU8E8VyHhu3EdumQzAqqv8vB74Vc9B279eo6+s6bDHDFHnb84B3E8ANjB7YOcg+/Havlq0pVMS5Sd0rPW7tqm1ZdHfpey0RU/Z2UU+WOjSSvppq7bX3vsvuOr0m1SMrj7rAZz/AHuMgAZz79cjHHHPqOj2+AjFSpwMDrn368dj3znPFefaYsZeMkbhtG0EEZYDLEnPI6HPHb6V6po8W3YScAAkKfTjGOeewHPTkbe/v0aMa8IpRjFNK7s1tb82n20OeLbns+XfVvXpfR67dX5eZ11mpULnkYGcjkDPGM9jj9Onet+3UsyhcAcZweR0zkfyPbisi2wSBjJC46dSMDGM8n8QeO1b9qmMA4JGT7gnGB9c9vb3r2Mqw8pTUKbslNczs0mvdva71809dnfS5vGbS77r5XVktX2+W+iujRiUAL1bnr69/qDnqe/GODzpQgE5zggdDweO49MDOcenp0oxg7h2AAJB5P1A+uff17YvKQAD1JIGP9k8KePxPB+tfomAowio6XhFxSb/AO3dW9t117LcIy95LXXf5pLXV9dvR27FsHGAuCOo9Mnrj3Hb6ccZp4Gcc4JByMZ745OB6Z9c4GeSaiQ5IPAIA4I4ByeRjoD0wPTrUoJPXqTjg4IHGTg5GMDOR17dq9mcIThy8rdN6O8Vq7JN3t0k9Hdtdn09CjPqrPTV6dLX1b211tqIu3cGAJ7HPGePXPfHH6GtS1cjAOB75GTjjgHqTyOB78VTSIkjGGI9DxwMAY6d+3Xk55ybUIHAwAeTx7YI6579/bvXi0sLPDV1GN1CUlKz+K6s7qW1tLNXXmerSqXSfa0Xd3fTz23t1fbXTQIbbjduA5IORke/HU/px9KIicY3AYJ2jtx0z7nPqOOp60jMFj4ODkg5Ofxwe3HTHAJx1xVbzCAW3DgEcdj65HUfl17A17k6kVF87a767Xtv97232V9j0adny7aWvfra2nnvsXBKY3xKASeu0cYGQvyjtz+P6GWWQMm7gjbld3AJHOTzwOSSMfQ96zRJ5jDDBmI9h0JJ25z0+nt1w1ea/FX4j2ngLSVubi5gjaWPaquQG3k4XA6lzn5B3I7Dmvlsy4ho5Lh69arOUaDlGnTUXyylKdkmtU9ZbO19NtD6jJKM6+Ip0YRbc5WWmrbcb93d9Px2sdD4hu4re3uZQPNUo4YADccg447AE9ePT0x8sPrN2Li8EIJtzKzSAKc5LHKEZ6ADJ9OtdhpPibUfGOlW01rcuGvHJnkbKbYMj5gDnJIPHGfTFX/+EV8kSM0Ucm+RG3IAN/OSduSeuMjr3718LLHTzJe3jGUYzm5Ri3d2k4ybnK7d9dbn7pk2Bp4Kn7OvGPtJuPuPdK8dddmt3+Ju+CtQN/pwb7P5E0akHK/MSueMnA2n+EZzg5yTwDXdbvYN8ZcW2ehGA21fvDr0bjk+nqa044jounlkiVRgblXGcH+LORggjGPXryePMvFjbwdTmk8uAJvlG7jaoPzEZ47jn8TxXr5RiZUlNXjN3V10vFx919dFa9krderfuYChHEY68acZQdVQgpLmcpJw030urrrsl2PkL9prUta1K3Nxp95GwKKv2Rn2AsgOXVMtlhnIzgE59q/Dz46az418YanN4FWdphK4sbS4uF2RxW8jDmV8/wDLHGUz6noTx+r3x4+I+hSXCmCYTwWt4IZkBwZUyVkUc5LLjCkcYJwM1+Zvxn8d/D/TvEn9uac8cyCeP/RN2ZopiRuVRjJLN0XjbjqetfbZFTniqlSpOm5R5+aEmm2rqL5b7csWm110v1P9A/BTKJ4TCYZSwD5lQlOjJ0bxjUcY8id9Ulf0stbWPlX41+B7T4B/CDULzWvEFtqF9e2kqNAkpuJLwToCyDc48lkz8qjdycDAOK/npk8Y+RrfiO6tru5htdWkuH8tWKl/Mdgd4xkkdM44BwOMV+mf7eHxT1jxdqken2UWowWlsYVmt5wwghSVThQN21y4Gex4446/kNrdu9jeyQyhUYOWwg4AbkcduMfLng9+9fouBoqMFTatzWfuuydndaLRbptaefn/AFPg4YrLMuhWxDhKvVtVmoJQgudwtBRi0k4au6d738iN5DJOXUIquzkMOACTxjPOSSMke2Rxiuz06SQaa8TISWU5YqCQwwFbP90Ek/TtiuAgkDyKFz8rAjJG3ORkbT2Pf+XGa9M08mSwWPcu6QcDHKnqqnOBkDjP58g13zpuKWiabT1WqSSd9n809O3d+vkDVerial7r2cVy6NNtq6XXdWV3dXd27lCxkm38MqlJMAjOVbqMEY6juRj0xXqOmwyXOkzNLGdxZ9wZiASo+Qrwdoxkkcg/ia4zTNLaKYtOrCTG9flyGXGAeD0XuSOM8ACvUxLZ2nhqUFo0leJmUAqSxHReo5PPHf8ACvMxFT3oqFnOUo6p7O61VnrZJdOmytY+uy6nUjhpOfNJ80Uqck3KLuk5WS0TTVmt++x4FrFqWvZpSwXEmHB4w3HA/vE4z24qgQuFAxkAA7RjPqfTPv8An047eTTY57Ka5wWlkZiqjnPoGPGDjj8ga5CBFE3lsQp3kHPTIY9e2c8YwOeOMV6lCftLtJtr3bpLV2T3d9L/AJrc8irh1Cty2jH2krSk27K3L1tbslpp8zb0WxeeaB2CiLcS5PDA/wARHPTp9R7VueI9a+xWxtoyhkA8skgYGB8jIcYJwW9MflUcDQ2Nmsz4ZsnOTjC8ZCLzkenpg+vHAeIL37bcBsu6DhQRkE9MYJ7A8nHXiuilzKb3s2lzW1jqr+8m9fLe/YyzOpHBYOUofxU7U1e13LlV21ut3pte5zV3PJcvI7sWdnJJxkYPOeMfiD+GMVnCNQGBwzk8Yx+Oc55wBgHPfntVyUrGMKTk9R2OOuPf+XA4qSw0u/1J5FsreSd4kaRwiknAznAAJLHnAAya9OHSEVLWySV3Ju6vp+tuurtqflmKi61a8k6tW75tOZe8rq1uve+yXmyjGNu09zu47cHjp1/T6DFX4XO0gAEkkFse3Q+g9MDOf0qyJLE7RuhjeMlXQjDKwPKkEAggjBH/AOqp7c5VlxjIyCeMHj6kZx649aJySbe7tZ9+idr2+b032DCqUJqCuklqrO8tk1+l3t6I1LdnC4BwT6fr15I9MDk8dav26o25WJ3E55GAMnnn1/AfUniqMKAoSWCyAjCkjoeCRxgj1HXnPWtC0jBl2k9cZHY9Oc8dODjj345rJzb+FO943e3Zf5delj6vBJ3pKUOaLaiuZ6pNLddNNm09N9rE0B8iVjkBl4UsOBnABB6DPbkkHgZr0TS7yWe3t7Xc7yBgzH1jBBJAJ5ZcjPPcdRXBTiIOojOQCA+ORnHBwPvE5J6/pjPW+Hb2GyurfzwsscgCknOY2bpIR3C8ZHb1458/FSadlLlT0bjsldavuvV7aLXb6XK26NerRjJODnFryfu+69U9OmjXXpp9ReANB03UESe5icNGodIy2FeXIyX4BVQcZU5J6ZFfVVl4P0TVND8iZYnmR9zyxuFeZhgoFYjmJegUcDnk8V8Y+H9QvJbuztLG6ZXuZQjOr+Xg5+ZnByHjxjP3ccepr7c+H9nDqdtZ6ZLCRcNG6xXCuUDIcBXZyPli4PJGeuK2wCliOaEVFxT96aTi200tdLJbXvpv3O3NoV6NH6xDEuhy8tRxi3zQiuW8lbdeSZ4Z4j8Gm01CaG3WKcQBkItjuDbeSXbADq2cE4/dkdCCMeWXWkSW9w0yRtA9ureW5XATtuLfLvUdCDtzxyMZr7Kv/BusaRqVw9xbxXdsTL5axPmGYYG6SOQqS0co25yCPl9ufFvEapdancQQ6aLEBXVbeFfkBOC0rORyOMlAOTxnGRW2Y0fZqDUdVZe6tJP3dHbXVu+q+Z7WT5nOv7GEeXER9hCU6qa1TUFJuN+jev8AwEeTSaNDLpzy3FyfPnXiZyMNIw5Jj5+ToDz6fj5ldeHhb+aZSsvm5RVRSMF87WxnjHY+5J64r6HTQftFnGpkVYSVCxzttljY5I8rofL4JKnpk8kZrmbr+zNKvo/7QslvLYSBVfpGy9HJODgk4x7gYrxEprXmaTtdcr+JO6vZXS0tpq7PuelU1lU9mpVppOfI0rK1ko3klZWffrfs38uat4ekgJJU7EwecBucgcfxDrjoee+eORudNeCTcCSnDkEZYDOSSMnGPTp6nHT6b8SWVm0t1c2sDCzvW+RZG8wwZ+6qnCnJ52gdOuccnxrVrJoLnALLPtyYWXcqrz8oXPDc9M9/xq1XaaTfdWd3orN/Z39d+vn4dbAYerer7NQqJXmr7P3eaK69ddbW6668RN5RaMRxgMig78cyEYHIx8xPAxkD6k1UmbJfKgZIwMYIY5zjk5/M5/PG09pltwBQhvuMp3dckAZ6HORnn1qaTRZJbSadAodFLxoerqoyxBxwy8EDBzk/MNtaxxFrxS3+bVmuu3fbR+tmeZUw02pOMEoxSaurKSSXS177bt/ec0zEInJY4+bJGRz0Kt0PGehx0NLH8w+Vcgnqeqn09Ocdc/yzVZpBu2sDgFhgjBB5HPY8/rWzYQxmAuFLyb1CjPDKTzwQMEcAEA4J+tap6Jq9m1HRrq1azvrpezfRpHFRX1iryXTjFXk5PWyjFNrm6a2s/mu6WcssMm5V44GSCdpz2Iz+gxnuecdNA0reWrn5G+ZjjduYnOOvcj+R9hXt9Ku2gNwkRVDkgnqMcfgTn8emewmkEkcEYHDqMv3PGOvpxzjtz35rnqu6aT5Um1trJvbbe1le1+nmexhqcqUXTc7wjK2jvZNxvdpu3puvvN2O0SFFulAMcxKMBwVycE47YyCAOn51n3Ija6NmGEsUuFjdsg7n7f0H4/jraQyXMAill2XL7iisCItgAO72LdsDn8M1jX0YS6J5VoZA545JUjvz07Dn73J9OROUU5dklp0kkrXsr2Vlbv3sdc6ySdN6xkkk/L3V5XS21u7bdTi9f0S602dvMjOxmOwr0x1wR2AzgH+vFc+FIUKQQBwc9iCcfjxx16Gvp02Nh4t0OFXVW1G2VUyMB2jTHAx95sH5m69OMdPHPEvheXR7qWMAmPb5wx1Ef/TQDhSvJAyeDj0r1cNiEqcdXJuMW3p7t7aPpp366fL57MMoUqs6lC93HmnSkle103KLjpZ9m72XocEyvGQSOMY5wQcjOR36cf5waUwaRssvK9eOe+B+n456VsupcjPA6cAjnv7+nr9MVq6bpcVwxabYFRdwD/KHUfe+Y85HULjnJ55OfQWI5VzbNJNrv00svu7K97at/N1MsniH7KE37O95Skk+XRa23SvtZrd90zz50IbJz1PHTk9iP5VA7DaSvDDj0zkfeyT7e3HtxXa6vp0JaV4kZFU4yMHcw4JxxxnB74GOcYrkGiHKtkYyehyDnrkH8e4GTwK6KWIjUV2lrZu9tny7a262fmrt2R8jmmV1sHN03aUJp8s1pfVfzWcb+mt1o+laMuD88gJA4XsSfU5xjAOeOemBimSFuo4OSenTPXj04H0xUgQq4DdDyD9OADxz+Z/rSzDK8E9uOOfr644Gew9BxXXTaT11V91p0XlvZa2v1v2fgewcaU03JSi7Xv7ySS8k77u9vPppTbaQdx+bOQwwCMY4/D6Y/ma0yhdzqTgj5h0PPB9OuG49sZPWrDjYM5AJB9eh56/UH1wfQVAxTkg5YjPTvnjGPQ/Xtk967qUmtL+69b/crX3s9fne19j57H0FOEouydld6c1+jUt3vrvdaaGTcLFIuxgVGOiqAMck9hg89ADnn3rzDXrHyrlmUHy2OBwApPOfqSD3wPQV63JAWjLY7/eGc9TgEDHX/gOc5zXKa/ZLPbs4BGzDKCMcDrg8ZOf5/QV6mBxTw+JpWbcZe5JK7Su4+90e/VPz3P5w8W+DoZtk2InShath1OtCSV23FXbvZ8t9b+m3Q8yQ46gEgn3PbA+nUkD39Kc5G7IIAPzE8ck5ySO2BnJ6c5Hs51Cv0IOcgscknnJPQEfjj8aiYZLE5Y9uPpkDpxyPQivtotNRlprbo3o9X8rd/Lc/hfF4edFzoytzQcoytq0+Za9L3tbbb5DuR90BlHzdvXkA8YA75zxkZwaWMPnJxk7sg5AJOBjsDx1Oec9u6KduSRwBg88AnG0HAycDvnnjnik5z1+Xr945JIyAAPQdB168Vryvbo7W8tr9Ffa9uur3seVKyfXp1/q99LW72t2lB2sTwDztBGQAeuODke/v3qQOxY4I6HI7Z74znrjOeeO1RkDAJwTjAPcHtnnkAHnjpxgjGRAvHzYZiRwcdPUnt+PHTtms2l1XRdfRrb7/AC+8I1OVp9G1/muv3euvlKmQM5IIJBPBGSeOc5B68jk9s1dTDBVABYYIJ6kA84P+eCeema6IvDMTjkYGSccgH69c8559OTYA4DKCFAzkcEe5/Dp+QxXK7Xct9NU+rTW973t53umvn7OApXUW27vV383Hb71vvZfNxIJwWOOSRj+PsD6g89+3AqwqjaCQFJG4Hrg5/LA6+v8AWsFbJAB28YJ4yQc/nxjI6+5HNlCuAG+UDoOnIxnOOxGOKxct3boravRWX4P56an0eGo8yce6dnunt5Lra777O+hYt1wFBbLdQxzzk8DcT+HP14HXdtEkdwzEgAgAAAgZ4zjIwB1PqDWMmwsgHzKMcLzvbjGemeT7D866LTtjFVOSrYIUdQR2Jz0GeRzjgk4rzsTOye7enpe6tvbV/wCR9dlFC8ownbmTV3tp7u+uvqtfmrndaIBFJGZXXlxuy3UHpjjOD/nnivWNIZZGZVlVQBxHngjsRjkZPqDjgV43aN+9j4wYyCA2ME9hn14wPTv1Neh6NI4eSWTcpUBFHRSf7uOcYxz19R7fL42PMnJrl200ercVon0W+nnsfq+TyhHlpSV4xaak7tWSjpf16barV6HqdtGiBlcMjnBjJO7cT3B7f5PWu20yKP7OXmiBfGUdSMg9MnA57H2J6V5ot04WAoDnb8xPOM9+nB/un/Cu38PyzFlJlDQJy244K+nPU8+mPfjFeNZxv89LOz1SVu21tdnfoj6enNWsru8rq+yWmi6W8r/Kx6pocTL5bPuCsmARknBx1GcnHHX8u1euafJBFYCNlDvIoAJABDjv16j/AArzLwwnmzSEklQpEa9gWHUercZxk+tenWmmXRs1McJVo85ct8zY6E5GAT7YOMValZaK+vR3eu9vXT0Wx6NBu6u/KKe2jT30stNXfob+mmW3zvcFXTOM5KgHgEZ5OSMDjHH4dl4bvVeK6t3XCtvdHwTg9VDMffODxxjoK81iuWjaPzC21Ad477xxg+vIwP19u68P6jCLG4dkDGQEAKB+7A6DjnjA4Hp61mqlruykla6elr2+/fqtPPQ9nmjaC5bttOLWyd1zO+raimrO9r+R33he+lhS5WaVRsmLKM4Y4JwF65HHOPTGAcV1Fr4hkt1uHkwH2v5RAyASPlGP7x/HJIxjkV5ZpMs4hnKlnbLFZeSpBOAcc/dx+vbFdlpiNNZyvKqtOT94jBA65Uf0z16Ec1nGespXS91rl0utVo03bfXZa6rqbwUFBczk25WstbSVnq7/APB7a3OlXV7IaVMswLX0ymRy3IDNkrgjuB6dePUV49441o23g3xXcGRFa20K/LlwCDK6ZVFBIyR3bOcY7VvyX0wuJreNGklWGRkY/ciUDJLnkcZGPT9a8L+L2qQ6V8MfGVxO5Zp9MuCjHJDMVIaMDIyGY4HPbPI4r0sJFyq0otK7XKrbty5U9Ot/nte6TPkuMsRHC5Pjarl8GGqzd221aF97bNXstH6H4gNJu1TWZ22usuoXjMGO4MGlOGzxwM8nHQEY5orM8yOHzGh5zJOx3HJkLyHKgAHAGMZyfSiv06GHlyxStZRjFLTS0Urapv5XP87quPjKtWlzyXPWqT0sk+ad72d3r5+lkmzvLaV7qEKZfLkdgArDhBkc5B468nBPGK6vStIdp3nCmQxtt3HhJEGMuRngL/D9cDpXnuk3UgfYy5QYO4ZKjrjAOOeDkg9O1et6LeebESv3ZCC7YKk4xkIcZIJ7fqTXhYijKGkWrWWiSWnutX1+dt+l9mfmrpcrbdtWnq722s07aX9LLfzPTvDFu5h4eRCq+WgI+V1BwQCTkY7HnOSK77+yJHjduGRWERP8b5HLhufu/wAIx82egrm/DM8DiNCoYBfMjkjJThf4JBjgn1PJIbgd/R7XULfyimYBHK6q7Kd7xhcjdjgkqWO8cdsnivKeJqUnaLaUVq973cXp92zT2RjOipO61babdnzctknZ2Stpbbtpoea61ZKIWiJZFSNl3cl93ZGHOWOeMYxz+HgfiOCZpHwobYzAoTk7Qecj36A5HTpgc/Vmu2NtJE8is0avGo3A7wynPz54wrkcryRxk+vgXiDTAZZotrhnBkVkbcgzyCT3IGew4z713YfHQa5mlzJxfLJPTa+vrroY1cPbmS+C6s1a7cklr21e687djw+6SRVMbKAOrquT8/YDqQB2I5PtWBcQSPIMAkbD5gbHI64B7ZA4PJGD15rv72ySPzdrglQFfIAJY5+YE9Mdj0BznrXPmyYTOytI67eRkYIJwcHHbPH1x6V61GveXMmulrJ9baXa6/NPe9tDB02pXSa5bJtu2yWie7809durOYhsw4EvlMCSBt3YbcM4AUjlfbPPfgCrzabOzMsaKWADzAkYCnqGzwWPHA6Y6c8dNbWCNncFGHCIpBEiA54DZ4I+nGc1tLpaTSxhFyzOqTqr5wF6qQFzkZGemADx3qqmMu7aRsnduzX2b7Wtv/w2xlVm4JXvdat+btpo+vXrdLR2OIj0mSYEmCTY527UXapA6hD2IPT1yeRV+0sfKdYTiMA7pw3DrjoSOfvdiOuD6c+22PhqGOxkllVQmcKpyFAboA3JWQcljgnBxjiuO1i0SCVPKhUhMoZioEuMjYknd0OTg8fhXPDGOcuVapvlUr3vfTRfpfd22Z1YJOo3qrqN4rq/nppb5fcjM0+Y2uE+8qhlBAHDEnYWJ++W5z0z27V9F/CP4e6v42vLO4VGNusgaSQgqQFIBVOCAx4HQ8Yrifhx8L7vxxrOnwlWGliSKW4YIRlkyWQsMgBcjAOcAk4zX7BfCH4V2PhfTLeC2iijVUEi4QFncAYV2x91/wAOeveuTHYyhhoNJxdbblktlZatXtfffZXvokfb8P8ADNfNMRTlKlOOGjJObdrSvb4W7J6eul77mv8AC/4WWemWdoi2yRXEcSonyA4lwu8yy8ZHTAwQRkHkYr7N8DeEVtUgWC0iW5kcOJmGDubB3sSCFyFOeMe3eub8K6E1t9naWFVG4MeQSgYYHIBJY4BOcDA719DaBaxOLdoQRPGVRwQdrL0GOmOhIPODX5VxDnFRKVKnKMoyb6qSS0vbWyj8u+ySP6Y4UyChhKMX7KMIwgowTXxNRjq13stNm7J9Wdloml3FnEJABc3KjafLPyDGMP1wuMEnr7etemaHHMzRLkH5y0rhg21V25QLgYYZwSD+WeMLTIlYxRRh4VddtxIcESMOwGerEEZyOOcdq7nTdOEckIUrmQjBUEKEJ+6PUkd+vtxmvzDE4yUruTs23a3pra2nb9bM/RoUKcVFW+ytejdo72sru1rW8lseq6FpquILhJkMDbf3LLjdn+JhntjJ9sYzXs+hwSKREqLHFtCxnOByAOPc4+Y+wODXmGiLBHt3hnkMYART8uUAHIxgHPJ9D+VesaGQdj5YNHgrEVJIXv8ALkZJ/Svn69SdnzXlu0ls9tUlq+u++9zodNQSSW0bbe83dW1d7XtdK/T7++s7O6iCyFhKDgYHIVSQDk57Y44/MV2GmpKreVGu5jht+MlAO3bnryT+XOedi1WOaExoqxlFAfCbTnrnrnPGSM/qK1NOM+xZEmZFDbWYnBZecLgjJYjPOeeO3TlhNy+196d3s99NdF389DklS5l71lLvJPVaaq1tbbaO+lnod0mnx3E8Tl1IG0Sr0JxwTjufx6c9q3oprOzcxCZGVRtVSeSw4Axzkep9u/fhIrx8hXfywqkKM5Zs4+f17ehz0zxxZiW3mkLNc/OjgurZznnBJPBzjp9fc110ptWkuVu+t2l12drq+it326a8/wBWulGcna2iinrsrvffpaz130O7XU5Q7Mm3YBjDYG4HqFHPXsO/GM8YtQXVu0MvnRiIkMUDDLE4yGU5OQcnuP8AZ4BA4iSYyKyRL5jKMLIWyCSDjBwcNgDgcevYVV3akAqXDOEB2rKM5KjoHIx06Dj257bPE1NYuDqNxSWiTVmtXbo773Xd20KhgabcVzRhbdJ6t3jpbW7dtnrr6W6pr/yvMVJV6MQTxuI/hUH+90GDyck9KwrnxWyRvG6qqg/MuAWOTg+uPfsD9aydShuvL+SQ8gMkignnPOD79CfwxXM/Zr04WQboyTvZmCsPUA4PGOeeD156j5rHVMc6jjFSp2SWl/eTtZW9d3otttz6PBZdg3Hnny1JJptSWt0k+l/Sz1v6Hdwauklv5kboQ2S/zAuCewB5INW7TUzCA0xdVY7hs5BHvyMHv3715Te2l9anFkXdcBs5OdvU4IzwpBB6k5xxWrpWuujLBecSRqQoYA46dfUH6AnHJrhpTmmlVTpzS5VN3V5PltJbaWbd/wAO/oTwNOVNyoOE7yvKkkuZRXKtdWr7rb8L3+g9F8RKzKRJJhSdhJI/XnhhkZx3Hrz6fo3irZIyyCSMMoAdlzwfvEDI4IxyCee4I5+XLLxDCk6INoCnJ4O0ZGQBzwM9OW29wc16TY69EwiAmjlBHzFSBtAx1Pbb7D8a+2yHPK9CMYxxMV7NpWtrP4d7u703dkvyPk81ySnWV5YdpNJ3f2W2muV2tfq0+x7tqF5bzBbpGB3gEgHBDDOW4PQ5GQ3sMjPHmOuBZ5i2zexADhQMKoyc8E5Vgck9gBk9cRQXyyMn+klonGJArE4B+9/THJwPc1V1CUWc3miZntnTKFvlZVxgoepwfQ+nfnH1OJx0sbC7pcukW7Si7vRt7O68rO1+x5mDy94SooxcpNRUYqza5fdfKtPi6b2t6tHPLcxwNcIGSSBkcbWHG7gHbzww5MZ5x37mvJ9Z1h7CWVJZSkKyMYpDkhec4LHHXp07emMdt4mkRIJLyy4Vcs+1sqCepYfeJz1J+96da+f/ABTrUr2rs4JCnMgHzhhnnA4wo6Z7cYzXy2ZXSt7seXSNndtO2+rVktl0sfoGQ5e6k3JaxnyxkppJx+FtvSyetuvbQXxfPBqkSXKujyIFbdGNrFcHgKpO4DvyD3wO3nsevzaVJMr3DNGyfKzEhSDnjP8ACRjp79arPr1qwESTBVK5jiZ+QeSYyffoB29eprzLxFrtvLdLC2YgpcE8qC4wQxbPOfoRXylStepZJuXyS6ejb1vp16s/TMuyyaSo1IylRSWlvet7tmk09La2stdfI9MbxAt5a3KSsTHJGQCq5Ecn8LFcjBBPB554rwPxTJPvmhDZVm4dxkOrEnBOeM+gyR+II2I9UuLZDc+cJ4nYqfLI+WNMZZhz8ozyeccHkk1yXifU4zZTTqN0hYMjnlI95+YEDgAcYPbPQ9uuNB1YKo1dx23v9nq2vT77I+iwWGWGr2gvcm0o907Rto7bW1ja6bsfLPjV/wCytUub0ARBXMnm4wFUnDhDnpkDjA9vflb3xpZXVqsKOzvBH5SIh2FmHRuWO4nnn8ccitn4xXNubaOe0l/eSRoLlWbKbmB3LtPQAj16kAYxz8hx+IpZNRitJ4mjgXf5EsRy8kY+9IoyCUPGSclccA10YTCvEpt3bhpK+mllaye++iv5n1boxnRjOpdyilZW2Xur3k3drd63Vt9ND3a617dZXMKXb7DE/wBpXJXMijPlkcnkHA5PUjIr5B+KupFdN1IzSPLD5Mim0TDNHboCcMM8EE5J6n8iPao9btHtLwtPtRFIDSDYssh+5yfvsMcjv2xg4+VvjLryx2d/5Uw82SzaPzTgxPuVsh2JGNwGM4O3GAea/Q+GMH/tMI2fRXtduzjZ3tbR7PfZb3v8DxfiY4fAYi0rRUJzvo7Winp5Jqyt3SfRn4mftEXsF7r9wFJ+zRzvIYcfOsxJIcvkk7SD1xkZ59PkeYus7qFwN2Mkk5IPBDDg4Bx24I4xX0T8V5Fu/Ed+WaE4vJN0QJJCAnkn+IEn2DEdT1rwq8tStwxKkoxyidwPQpn5euQMnPr3H9U8Pv2OCpU3eXLCN7tr7ELaaN76rq9rn+ZPHWK+s55jK17t4idnZppKS7WXV6X6dighb5e+flOMfMD1AXHTjrk446ngTFCBuIBxggZ3HI4HH8v8mnLCFJBADbvujBwufTjqe3PbJqwsakYKndtC59T09/TjHtjsR7/tbNt7X9X81/XfbQ+F9s+bWTkrbX2ta+71b3vp03uVC285CHIGOB1xjBHXO7+g55qaGzkuJEUITuI6nhcHofT05zx681s2mlq5UvhRuB4POPpjp04HbPTqekgtIICABul9sA4HcY7+/wD9auaviYxg3GymloneyWm/TdO/nZLYcsQ4RUla8mkot63Vuz89N9/Qi0nw8OZJN2GbDgHO0ccDj/PXFes+EvCkl3ebEhOJJEEbMuQRnHzHHCjpuAGM9CSBXN6XIWVUiQ7iwUIvLB+f4f4snHp0Of4RX2j8Cfh5Pq97aX08aj5kJQknCKRv8lcfO5+XA3AEg4xivhMfjpRlUqVZqKduRPS8raKy87arzVu30eUYSri1GXJJtRTUpXaS0TTWmq6+drn0L8E/h2mmW1jcSWq+dlDG7ABBu7vkZO7nL9cKMCvtOykTSreR2dEnClbZSoWIbAACr5O0nJHRsc+prlNM0220+3so1WJI0hSNkKBpEZV+ZyQVORx9SegwcVb/AFR7kfZ4n8tY2KDb82dhGOcZ3cknIPbk84/Kc7xcsVVutW5Watdacuq0uvlqnbYxzytDDU1S1v1TVmtIrba0mmtG2emeFZrifVbRniCvJcNLFMh5w3Adh2BweTkn8M190fDfT0iKSuxkLurorrsUhRztbJxjJAx1PtjPxR8M4G1G9hKy4dIwHdoyxG0jleR8/oO59xX3r4LBU2SnYghCq+9eQigbmwOpckYJwV2/xZr8w4lp+1STsra21ak3yvm1u7p9PPfv+XZjV9rNtv3Y2V330666du59Y+ANMiuDGyxlsJ1deRz1JOMbQML6g4r6l8LWCmNFVY1jjbYVDENnrkkjruHPtXiXw9hSCwt55GzHJAViRxndIcE55yCuR82ehGQOlfSHheONY0DhNgxvbbj7wOC+eSRjCsBwc49/y7MMMoKSi0m7yk1dXfu9Gl6d1v0PBq1HFK1lJttO2tnZ9OrVttNbaHsnhu1BiibACAkJ8204GNrZ55PPbnH0x3sTxuVjVAMYBzkEMAMkAcZB/P36Vw+myCG2tt33soQQuDs5A6HnPY9Tntiu00n9/cRgPubcCQVwdvueQcc5yepBweDXzU6OsYyjeUmrqKd91bbrfa2/58yldJXvJvTyTeqXqrdb38z0XQIC2xnGVBwAW/hB6r04I/l3r17To0ATIfCjCsACXbHXGTxnjPavPNKjxHGCVVfvAEgHPdSo5BPpxjjnmvSNKDfKSeCMAZ+ZR6gDrn1JxjAHQ19XleBfJGnPWT1d1fTRq3Zb8z89ex2RjZe9LbyV3e1tl29ba6HYWERADBcccE8gDPccYHb7xOfXkVtBVQfkcjkHrkAdRkjg5PTrnNULZ+FI54AGMfeHJJOSAOxJFX0OcsxzkkdOCOmM546Z7dfTkffYPLMPTpw5Ypzdm5J2tflb06W2u99Cajs7b6adtLaaa6a9fW+rU8cgQBskknAz6dPoevXPsMcZvo+4hiMAjH93J5HA6Adu3b0NZ8Z3YI4IGcDHTPA446fnzyN2anaTaAobLFiSPTByCfY5HX6nHNetCjGhBWmnFtKytvZa7tvf7rW7BBaq700bffVW18vVO3TTXSjcbiGYDI4ByMDjA5z3zj61cTBxznCkHI4OOdvHfH0z/PIh5fJIIIxg8g4HXvxzz6/lm6si4BPJz0Bxx+A4zx+PrxV4bGyjJqdvZKaS5pJ9rabvdX3XXodNOXLZdL2bT22bfnbtf0NVAOGO4D+Fefm6/r3/ACHvT2YrjbgnGMg8g57nvxjnr6Y6CssqmMHDfLjaQcsMZzk9h0GD06ZPdu5WcFSQp6nJxu4yPbB6Z7104yrCpTUaco82jcr3cUuV2tvr12tezPVoNtLZ3s9vxvrbfT8uisSMWQ/3gQOeoJyMnPQH+XXio0ZlUFgwIBAAPJ4BBPUEdePy9akAUqSQCzHJxznA4yfbrg/jwc1GUYH5dpJ7YPHXIAI7ev5Z6H5/GTnSTc09G+XW3NayWutrXVraLp2PdwtNvlT3aivVtxSWnd322IpJ9qlgwQqOOON3rjPqOSCeRnvXxf8AtLeFda8SQLOFd4reSCeMhm2ARMSw2jpkH3yR2I3D7BviYR5jrtyQWxj5FBzk4646e33gSa8Y+LeuwP4b1KG2COz2xBkKDhlzgA87T/ePTvivy3iunWzJ0lKMlKnVhy0km47K0pOLSSUWm13T7n6zwhhZ0MVhK0aUJ881TlKcU0ublWt07NXTT72d1ZX4v4Vy21joGl2xY7hEQckM77QA2PbkjHbPOcivaIHhvCULBAAoUAhSMDII6/X64xjv4H8CbC81fQDf3ca77aaeG3O4lkiDDLcgcEY2jqfYjB73xDrUnhnU7RJIz5FwSglBJVGfABYDjA54OQaeV4qrQwsaNenGMYWhzQi1ZRSikubVt7t9W1ppZ/rdHBSr13h4STxqTkoxfNazVkua1nbVrez0u7HUaxc2WnedHcXIVFh3u0zAK+RklSSBg9RnqM818B/Fj9qDwX4P1DW/Cuo38NxPdWE0NtHHhnBlUiPA3ZBY9GA4I7g5ruP2nviXJoGgWyWd8sEmobIzLz+73dSTnK9eDg4GeD2/CX9oTSvFPij4i+HvGMJaS0soobZ5iWEM+05YyopKuVGcSE/LnGDk4+g4cjGtjJe2koUZ86pyaXvOPLpfZSk1bRNvtsf0R4TeGWFzp0sZnFbkpyvOlFvkk61JxklzNXXM01bdq1nrY9p8ZeNvDF1perajqNyEaWeRrSKRs4Mm8qFZiBxnlgufQev4v/GLxZqS6nrt5BdrYR6VdPd27yZMV8I2JZ45ScNJGCoCBQGLfhX0R+0X401fShZ2ejiMqiqZCnJWUAEqYtwwSScOSfpzX57fEvxNqd1ob3N8VuF1AP58Jwv2TGA3y/Nhmzk89sHvX65laWFpwdKCUajScZO9rW6Wtta1/wDK/wDolwJwpSyzB0KsGp06sYJRk7uNlBe7tFOyvZ+aPJ/i18X9H8a+Fb9JjOus+Yk8l0q73mliLKAx/hQYwVzzkdOa+ANYvbm+vpLm4dXkdsAdTwCPmHPJz1z1P5e369pt0lvcpA8ItZ5SCFYM/lZJDgEHOO/twR6eI6nbfZrlkJ3bSCcMCckkE89GwBkDOO5PQfWYWSk73i0tVZOyjo2vK6drdHY97iShChThTotxg5qU9Ve7kuWMm2+ZLXSyS3smitGHhdP7xYHjBx6AjueuRntxya77TL3zZIBMrFVBZ9oxuI529McY59jn2rh4Y/MdGzksVwx4K88/X35OeM9a7bS7TE0Y3L/rFBcYKlP4fwb0HPHOSK7K0o8ivZSUH0trpu1rZNWtv1fU4ckhi4YqLppvD88dOdPm96m1orXbvdXv23PTDtkgtbiMOrsoHmMeHIA4HqD7HnGPTGVPLPNNHbsT5e4DywSFTJOeOfpgenPv6LZ6dA+gm4mA3xRFY8gZcgDIDcYHpwcnnI4zyFisFxdzKRl4ySSSCFxwCW46dD2HHcGvBVTkveKTezas1drVPV2vf08ldH69hqM4zUXa1RRbVrNO0dfTR6vTst0dJZ2tp/ZCRpCC75QqSAXLYyTz+ueOw9fG76xMevm0iUIHmBA6gZy2COBj6senJBOa9ldVS0cxsARnafcYwFHGc55b049CfJb2Z7bW/PddzI/LsACF5+YjtjHUE9enIz24GcpOUfhcYvW61uldvTdadXbTXqeTmlCNLS1ourGbkrOzXLdX1a36rp9+r4gtxZ2CjcCQqYYD2+YZ6ew4OPbNeT3bqWLLyCTgHBYZx34GfU4H8ie513VDdMyK2+PaCD2z7D69R698dOImiVmJ6ADp09ye/X06cnpXsUU0mnp5K++mr3WrT66r5nyee1Pa8kYPSMIppu1nZK+ja6dr7fLGuI33En25JJIGOBnr+Fe6fA34jeFPhvq+oap4j8NW/iRprZobWG4XzFidlYb0UjAkQnMbnIXLYBrxWZgV2gZJGDk/kT75APf8gDVEq5xg4I5IznAznoeDwOePTNejRqTouNSnJRmkrPfVJa22v2enXyPgqsYUa05Kl7dSvzRlfl1sndxlF3SetpJaXudf401zT/EPiLWNY03TIdLs9RuJZbayTn7OrNxzwQ2CecYPGQOK5iDOABwV5IHH45zzz178Dg4qqrICcguegI4wecjjORz7E9OmKtRSLGflDc8cnIycDj06fj3J75TTd5Sk5Su2213a3fXV6fPqZ0JRlOMnKMElyyUW4tK6ailayasle7Vlq76m1bRu4Yg7unGByD1wTkt0GOhPPNaMGxXAAJzkMQMEE5Jxz3I/Pn60bBt67Q2wgEjnGTjgE8ZIx09fStCCEZ3iTLA8/L1469+Aee3UccmsW0o2d03dPXVWa7frvo7vU+zwlOPsqM4J1FJLVNaaR0d9dPTvbYARHKwZHBORGSDsYdQc+x789eOwre0tEW4t2ZQ2JBn68e2ev4A49qxXMjzxqdqhRt3KNoYnHJ7ZIByRg8jNdVpFiDI7AKzE7o2POVJ+Zj028AYA5A7Z4rz8TZppptNXbvtql/na1/Rnr5bCcqzcVf2c4ydS9vdvBqMrX5lfrbpprofRvhP+zpNR8PMY1fZNDv4+aaTcvyMcDqeCe/Ffp5YeGLzX10fU7HRVtJbfTYDqU8C+T55iUAMVGQzgHlgRuJ5UV+eHwY8E3HiXULeCF7WO6tl+028csgiaZVwdyTkfI8QztTaS+4jK4r9Zvgl4st0MHgzUdK/tQJNHFf7RuvZAflVdw5EcRGWAyGDDJByD7mQUFUupe5CrpTUWnJuKi2rbpS721tujHizFypUFPDXrVsNTc6tLnbiqVk5NxekopK1m1aXTY4z7NpSXkR1qS5lEURit4nBDQO+AIZFA/wBW+Ml2Pygc8HA818Z+FNCvdRxYwhHaMCQowBMo5jC4UfKBksMnPy8nHP3L8UPgB4g0V9M8QWukTyaFqUKXFwJAQYLWTtFtB3sgPByv0rx7/hWSHVY4ZbF7fTgq+UZ2KyBm48wudzAEnoRg7hmvWxuXYhwXuXilo0tLq2jvs7WduvVnzeRZ/llSjSzHDY7lSozjKnCrFqnUpySnTcItJSjJbPRb3u9fgDVtBgh1JrPzw8AkCtL0KIxOQRwWxjg8A8nI6VzniPw3pK6fclJIJYV2xQKXzM0iA7ZEXGQTzkc9Op7fWvxZ+H9lo2uR7bV4Fgi8lbiUGK1u1cA9cEFo/wC/yG3ZI6186eNLKy061D201qVRgCYW82PGehOBmZckNwevavmZYX3pqaqRlG2yS5tE9Lad9ddXddL/AKbgcwp42lha1GrVftIRmrWSndRbUmnyq77Wtr2PjjX57tLhrRZHSOBi8frsOcMB3Ix1Oe3civN769iF1mV5Jpo8uXkACqOB15LZ7HHBzk9h9AeNdItC8FxZQFVUMLgxuZMu4X92gwOTgnJ6ZGBjNfO2vWr29zIWiMeCAkUw2MASdwc9T0HB9OeenDVpJSUlB8r5eW72as0ndNb2/HuelWS5FNR5U73SaT5rrfvo9W2tb6FC6uba4mNwgKEqA2SMCQfxDgZz16DPP4kEs81xbWY4M0mCXPDK33SfYDJOT0IHFc7cy4b92QOc/K25QRxlTgcDPXscE5zmltbyRLiB1JDoy4JbJY5ODgdB2xzgcHmuSrD3lO7STXMl1jp2W2/ZWa10ueZHEwdX2LhFJppap3uktb6Xu01bXrfQh8T6c1hfsqqVRlVgRwGJBJyueMnofb6VHpExWQ/MTsQlkIAVgODnkYIzxxnvxXaeILK41PTjeyRYeJRuk6nEeBgHAwQDyOvTBwTXnlmzxsyhgoIIJYjjGOQecc8kcgjrkjI3UotNRTUGk1zLXZbd1fTTb0PEqUXg8fz6+yqpfD30UrJbb37dWuh67p11DJZiEM67xyu4kE9lUkZwfXHbsKqyWW4MpJJ+YFVB3AgfxE9T1z/s9uDWHoV6Wb52UyJwjfeG0DpwcfQHkcn69vKsK28U6ECS4JD5ORnHzH0XPGPxHUVyzk+fl3UVd3dne6aettX5JLa7e57lCNOtRU6d+SorybSteKSa67dNNPy46BhaXZQyEA5Udfkz2JGPlOOv0xXR/YRdxl8B3cjOOGJHcDA+Ug8e4rIns/Nd/LB81DuYcEsoJySOBjJOTk/XjjUs52QDBIlVQpRjgMR06A/n+npk2+Zt81mlq2k2/dta2jsns/vNVRdSKjZpQsoXWrslfXrqrb6b3Et7x9D1CPDMI225wTtIb727k4xjleckDmui1W1tNbgM6u0pb5UPTapABXGCRk5+U5xjqe3D6uZ5XeRkSORflQEche2BzwcZJBHTNavh7URG8XmKRtVsMxOXYfddf7oHPGDkDqMVjB1IN8t7SknZv3m4tSW+iT16OzRFGTVapSnFylCKSlNK8o283f5+mmhyWq+F7i1PmQLmIgNsx84Bz0GBnv6Hpuz1rGlV4GAUsFGMHJHB4IPTgcZB6nNe6Xd1bXdmkrRoJEdoy/B3quAxYAcPgr83OeOODXBeINJgjjE8P3XVC8YILAnJwp4OMH26/hXoUcVPl/eKMrpLmSV9LJXXdafPc56uEhaUqUPZyckpJq65k09LaJdrJdG1uc5b6dJqFo0iR+cEB37SQAhA3Zz/ABHpkf3ePSvPtUsDbXDqEKgcrkcgHOcn/D+hr1HT9QWwgkgQ7VmRhKhOMYGM4bv1wcdzknNcNq5klM0gG5JG+XPJUc4PqMDj8fwruw1RqTV3bmdotLy1SWtvP7j5nO8BCph5Tmk5Rl7llrok5aXT3SWr+dzjiE5BHOMj2HU5PYfj/hUMigqTzjqMcYIzx26n9M8cU9w28kd8jH17fTr/ADpoVuoAIIIwcH05A/PH/wCqvcoyemt1pu9bO2y6bJbdelj8wrpPmjyPV8t1G3azdrX31V9PxM6THIIJGcA85HBPJGeP1wevY1Cp2MwXHzHDAgnkZIA54x0PX88VemXJYAYXuD/e/LkNx0x/WqyqQW4bYOSp4wD6jpnOefbj0PoqVuVrfbvppfbR2t2Vt/I+UrxlOrOLi+WLaUndWSSWvu67O1n6W3IFO1WBLbSAGUD0P8J7Hn36cCs++haWBlJ+ReCo6hT0LHkcjIOAeh7VokgAsegOQDgj0C/h9D6c8mnNHHIDztLqOA2eSMc8dRg47jmujmaUJ68yale3o3+t/kfIZzgIYvDYrDzhzKpTnFL4U+ZaPVPskrK68zxDU7d4bhwUCojHBJ6jscfnz147gcZwIZGGSGIO09Bxj5v16fUc9D13iiyZJG+Uk7uW6cZ4ycAnqevrjPGK47J+6CFAwB6591GM54JOcdB9PvstqrE4SlPTmt73a6tZd3tv0fVbL/OTj7JamT55jqMqc4R9rNwutFC/S6u15vXzVx7AYOCSMBiSepHUnPXgjjp055NMBO0sCB9ABgj3+bBPoc849KXcFxtYMCuMjkZP157c4HHbrSLwOVwORtx1PXOPw9vbgnHek9Lpa6a36OO6V7bau3a/n+YVLNrl1V1e66X9N7avt37SL8xByCPmG4+q9M/h69PTkipQF7EZA5PfHfBPTPGcdPXoREo56AKVIx1245OBk4z/AJFSJjOCeWBUZ4z0xjtzgjrx+FQ0ne62W192mreXyWmnRDpJNxum9Vby112tovLo+xNGzbc9AuQMdQcjHp0/z6C6jMflIyCvPH3jjqfbnoOn51AigoMYO3b8q8Dgc56/UY7+mKlHUYBxjI7cnjnH6Hk4OT145Jttu6d7WaWm6T663sr2s9z6fCQfLFzi462VrJW0t0bVra27XW1nKpBIALfL2HOCOmP/ANWAePpZj2qN527yOVbr9B6HoPxxz1qmi7ZPQEYGBxn3Pbd68+p682o/v5fHyDPGQCeO/PHOMDr3rlnLS3e19HfS1v8Ag/jsfSYWnez7NJN6q3u6LXv0v01W5bt48bSccDsSS3XaA2PvH8OnNdNYRy5ilQYUMVycEjdjkgc5OfUg965+MNkeWyqpy+0dOQcgDOM46EYx0x0NdPpUPnHYGOwqzEBsZIx198ntyPpmvNxckou8k3a6tdq2js7Ws7Pp1+bf12VUuWpq7y37aNxd9m12VrdejsdTp8mZVVyqIjAvvOGwpBPI5xz0zyfwr0C0lgOTGyiAgGTHQN/eB4J9c8Hj8a8uKyRDcoJZG2kH+6emT36nk9Diui0i4k2Ku4vvPzKTjPsOuePxxxjGa+fxMVJc0e60et27Xdu66b39Fp97gMSoTjRablb4nt01vpt2frY9Tjuo4I4YxJvVgMSDO7Gc7enYcY9z747TR7iCERzLkqSqiNjzIcjkbeM+nXjsc4ryuGVGSPY3mOBgBfmVQcA9Dnd0JP8A9au/0GRRHEPlGWCgv1XIzkZGAeffn9fImrO73vZ3010VmtNW9tnrp3Po6NZtpbppNLmty/Da7tu30vo9NLnv3hm/hNxGd5jKgEKucAgZ5I45/iHbAGcV9GaPq9leW0du8iAkBQ4UDORg892/3gOxPXn5V8PK0LIAnzEgbuQHRsZKnnO4dDnjn1Ir1rTbkRzpG28MgAUYIG7g5A5wB/eGfcDFYxi7L3mrvR/a5U1olpZ2VrrT16e3RqaRUo6qzWulmly3XRWum9UvO56vrPh+H91NZs8iSRrv3LgAsOM9c4IHcYzzxzVTR9KvtPM6TH90+QOcgAjJ5A447EfoBV/TdRuykdrgT5CsEfhXC4z+PIx9PrXXXF1YyR+XPEYJCieWiEfM65GGGOQfQ9e/SrdOMfeTt3lf4npZP1+d99Ez0IzlCcGpJp7xVrJJxu929b23s9drJGdpsotLeSBZS2SeSM7B6emASMdO3PprtNcQ2jtA7OxTeSoI2gn5uQTzyMZ6f+OnBVN88qQ5iU580OME4+7sJztGc4IB3ZPHPOlLcy29kVZdkYwJZCTtIHB56n8B9e4rmjG8m5WfM1tfVe7q/PdP037+lSnFKSadkr7vR3vdW36duiuc7f3V0vlGNyN6mOXnDYc4cOefl4HP8uK+eP2ntQmsvg5rJURQz3bfYoZJDmMBiwKoODubOCeeRXvWrOjXNobRy6SIPOjzyzN0wB0x19sk8cgfJv7bGpRWvw50fS2l8h7y7jeReplZCCAoBBBGSST94nvXt5WlUxuFp2991oK2+l7NvTbd7N3+4/JfFXMVg+Fc4rOSh/slWMW21eU4pJbPXok9r9z8mpo7i3IUorShyCQMgDOSe/AHf24NFWriWQ5aNlIBAcjmRwRwSPUfKCQTjp70V+t0+VRs6alZ2TajdpWt67fh6n+d88Wm03Fu6T0ctL9NPm7+a3tr2FlYyDa+07AyjnIIA65OMjr6c9OnNeg6XdC2MO9ECxSKxYjKkDsyngg9zng4Nc7Z3FuF2sVIB5BHOM5VgM4I/D2JwKlvL1EBYSAAMAU4AIOec9D0GOuOvPNfItSlJ3Tcdbdnor6bPpq01o1vt47jJvVaJaWS7pr71pr1v0PcdJ1aOSRYonVCSWVlUeXkgblznnPGF4A7A556uDUoYnWQkIXXy0L/AHXZ/vBeMJu7k9Mc5PT5hs/EwgkCJKEC4KEkhgffB55J5zkj9OhXxm5kw0/mMoXbD0UqAd5jGeWyRzkZPTpmuGrgp1J3imk/st2/lvsur00267oiTV1uk0nZNO1rbKyt6+fqn9ES6tBNE4eRuCFESEgjaDgRnJyQT8wAPbJHQ+eeIZVkUm3Xc6HDE9Scck+hI6nocZxzXHW/iyIRxykEZVhGpBbYXxuwCy/dxx9Tx0qG/wBbinRwLkIJE+ZWHzZOSCoBG0H05I655NYQwc6c7SjJJSttp06X8+qTIdrayWj6qze1ldtLTra+zt5cjqRMVxvaIYfcCSxKlsjK8kZxnB4z6dwcyPBDFGJY7hg/MR0x1HK+vXt+Lb+czyksispBK4YEspwd3ucjJ79+hqhHOAPLRxlCFyCckr0BOMgnuMfj3r2ox5YJWSkopN3dvLfdq9rWTXUxbvfTRu976q3L6pXWnls9kdbYwLuCkRlz/GF2qD1JY89e3BOf19Q8J6LBJKZJEjberuMLuQMxX53yQSQB6jqO/XxmyuZPMiRwEfepiYMMhlP3c4wQcndkDI9ORXsejatNZxwBVXzSShP9wj7xBI+UNxuPOMcd8c01zONNy+JrbS/ro1brb/gHLXw6qXSV9Y2Ss07WstdPi00Xn6egajY23keWgTYqq4+Xq+D1APGP4Rn9BXLweCZPEmr6bbi1eWGXym3jKZZScxuRuwYwRtyPmyeRtNdV4fFxrcpjA2wyM+XVdykLtDFZBnOcrtJAB5xX178LPh4I0tZpoFOWLKpw0iRkj5skZGex+nBGcOrBUIe0je0UlG6krt21Vkr99mvkfScO5FXxuIjFRfLdXbVuVe6raXSSvf5P1O6+DHwvsPDmm2ii0WScsrugTa4Q7cMOpYA4DAnpjnqB9veFtMwdqwOWLIrEfvNqdoxgDlPXA6/lw3hTTEB8ry/L8qMDIAJIGNmWx2OcAjIPr1r6H8I6BLGd8W1FGGyyY3f3nPJO4/KOOgFfBZ1mDhGopyTd93u3Ll3el+lnovPdn9LcN5RToQoUacUlS5VJ8rd5WT5u6u736Wdt0ddoulxr5VwrBtqJ50EmSWxkFYo8AAjPXOc5xxXr2jRIqREQlGdgEG0Y5P8AF/dBwOfp+ODplqtikUflodrl3lODuAxgbsnHJ4HTryT09H0aBZpI7jYXG7IYrkR5wC45xkZHzDgZ6ZzX5nj8Q53erW6d79vJWu3vrZLvY/UMHSjRiopSutGk17r91N6X000avrqdJo1hK0ijG1cc7hwSeeDnIORgZ/MV6/oOmjakbKDOv+rZj8wPOMccA5PPYke9ZWiaPGdsrlhEyIQQTyxHJPXjgc5P0r0bR4rZJ8+UCSMiQHnAx8pPbPGccdDgV8pWl7Rt6WT0tpq7Xu3ZbrZb+jPYjL3dk5LVpPbbz112XW3c39C0ea1m3XNuylyWGfnzuxk546gZ6DuBXp+ni0SbasQZkX52Tohx+XzcfQDvzWHp0LFIJV3nehVY3yCowMg9senI9sduos7BlZXVeO6jlW55JPfGecY9ADXNOg53s1zK1pSa2XLdWfolt80iHUVm9HdJWba6xtfu/ne+npqI8bAyKBHkjDepHCocHqecjnHpW/aSuBEZXCxTKT5YIXaw6ID/ALWTge3XNYZjzwzG3wcqEUYHodo6MT1HSrdrazMfMlkN4i52x52sCSOmM5Y88cDpjpXF9VcW2+aMr9ErN6Xd77JbNvzepaUZK83yJWezu27WSbd79vXzNWCRHuEDOnOVMg+YIvHyjkZYYGOOfau1t7SCaOJmO6IjdKAcS7lxhgeflOenGOOvOOLt7MPKBDFIqoQNzDhTkZDA9MH+LkVvNDLC+Re7FOxii/MylQcccYUknIPHA44p0qMouTcVJys7t97XTdnor7LX9M6sITahGq4OK0au3o0t03qu2x3lpb6aY0jhjeNiMK7DO8jB+fPbHQk8gHjip7iGIzFHCxRkHdHF9x2z95cdC2M7cce+cjjItdigAjk+cKAhdztKnnawA6459evHNSx+MIIFkV0imC/LEXGSD6AkjJHrz1464r2aTo06S9o4xls9b320cUvN+Vu3TBYPFJ6Xm7q3M05Ne7q9r3/K+p0ZSzVGgGWRckljhgG7AkcAEdBn0z0rnruO2QMVACdwxwW55A7Ejsc9c9ME1yt/42jnnMZiEXmN+7KDbgDgqxAwASRjqPTgCsubWy7RvO+VzuCJyR064GOe/wBPfFceLkqnLyQ5rWXNyW0Sj33tZX2t2se1hcDiIJSqVKkXJ/Am2n8L3WifTXZ280dgl0ghljSBHjDExJMvzg8ZGTz83Ax34xXJahDuZ5FgCFhklQAE/wBknsPpnGc59NSTVtPW0LtKvKAp82HychfxXjrzn1rl59dBOwkMrgqysQ2D1z0GAOcH1PArycbhZTo35E5LZxSbSSW6STW+t33VrWPYwNKrGTnGk07pNt2v8Ou7TXV+tzBuNRvbGRnLkxnGwk8IBnIHOTnj3zjkCtTR/GzwMFLboWJTB5OTjc2DzkEAZ9eRmsPUFe/EiQuAwYB4nGzCAcMo53g8knIJ44xXB3qXGnzx3MYdHQ71ZeY8nkhs8YGOncHn0r5/Dwr0qntHGUV2irtfCurWy8/uPqaWGpYmmlVhTUuVOzSScvdsnprZ6J9ruyPpey8YvCyTCdmiYlFIBCk8Flk5ODkrjPT8hXUN4/tbuAJOyh1CquzBGEyQpHU4z1z09SK+UoPGyCCVJXQFVUSIEDb3OfvJnhyTnOfl5P0p23iPzJECtgtIXLo+SQDkLnPA5544xk55NfQYbMKlGMXCq5x1vF6tfCnu/LW3Z6tM4nw/TqvmlS5JRfuODto3HW9nbZX27N7o+m9Q1yFkd4XAjmGx4c4CO+QcAc4xz/snnmvAfFM9xb3VwHQCKRGLNzyADtceq8+2MdDnipP4hKSSkTsoDAo3O0uuPnU5O05P3ecnHpVLWNaF5ZxgyeeCirM+N+COFJHGNpIHU9enrOKzB148skr2Tjbdq0febb1ei9eyPWyrJ5YWrFQg5KclzXv1cdb7uSWmu3c8E1bW4tKuZPtEiqkpJhkkONwckOC2euAAOAT7/Nny7xRrxubGW8t5SrQOoUL8xlGSBnnOE79d27nBWtn4uafLbWz3kZMzJtnUqSVVMkuknThBjB43EnGMc/ME3iuD7PcQSSSoAvlwR5Ks2c7UcduQcnnIAzjivPp0lNKrFtu8VOKV/ddr6efpbdn61lOBp1acKrbjOHKpQsrNJQ+ez1v22ser6d46ZY0s2uE8tSu5XbDBjkNGFyTuOANuO2PQ1pT+JwUu7OaSKSL+FHAbymIG2XzMjoevA5xwRXxRP41+w6kqvIzCCcsTG7CQKrAx7iB6k7hjGBjoa7qTx/FMoMUwPnLGtzIcAeYvdhk/PzjOcN14wK9yjBQjHSXI0raK2u97X6dbL5Ho4vK4QlCVKF+ZqXNtZuyTsk31vfXS66Mu/ENXuLHUHUeascpYJ0Z9+7Bds8BdvGB39K+ItcvZYL4Trti8l2Xc5LnccB0VMjZggAYJByTivpjxBr108d3+/acPFl0ibEW1Ryobu2ME/Lkfy+TfFN/Ch1KSZRtnV5LdlOU3/wDLJmyOGU7snqTgdMCvTyvCydZcsE1Kz0XS61t3trp06o4sZVeGoT5tNFFO9korl2fRt6PbvYp33iuaSA28jKWjmdVhiO50YcmZFBTqMHnPQ84HHyx8bfGMM2mXSPchPs0EkW/7zBlGHAwy8LleecknkYrU8ReK4LMTMLhlYxGJm+4puTnEjENwnYtjgc96+Kfij4xknt7tI5RKZi67YyTGFfO5iSer4G9xySBha/Z+HMojGcJxitUna1r3UU7p6Kza09XpZn80+JvF1PDYHE041UpKnUgryi05NJapa6Pe6XbofLvia5hvNSvZf9ZmR2BPORnKg+6ZbGMEZI+nmF+AZZJA5z3JJ4ODyO5I4x6c455ruL+4JRpPLxufAUcgK391TyG45bnOR0FcJfnY5YjIPJGcg9evXkZ+7+We37Xl1JQppfDaME+qlolvbS3zWybP8/s/zJ4rFznePPKTm3FW5m3fq7LsrWv5mesgV/mGd2GDHHIxn5e+OOh59DyKkSdCSQwyecEAAHGM8dR369gB7Z8rlS2AOeM9cHvgduOD9Oarh2DA4XHQ9eT1IP6c8ZJ/CvU9lzO7bTslom29n2Wjs+vc8FVL/N3u9d7X83rfe1vwOws9QjhjCu68ZJKjGWbHQEnpjj8T2qcapE7jbIdxIAPQ7s8KM9SODj8M568RLIygtksMj5QSAF7Y/unqTyMjHuadpxubm7jijG5mmCrztYZP3VXnJ5GD6Z45GOLE4a0Jyc+WyvJXWytfZX697fcOF6tSnHlv7yVlo7tpP17re+vU+ofhZ4cuvEmrWkaq7RNKGaQKSisSuT0ztB6nnGR3r9jPhB4OTwr4ft3a3iZpPnjm4BUkDaHkIBH8WOOD68Y+Q/2Yfho8OkaVqtxAjvJIpkjk4wuRlQpT94pHUEryB17foxewrZ21raxmOONIkkWPGAw2gFQgyFdTjaMkHOR04/F8/wAfKviqlGLk6cJ8sJJaKSaSd9PX5rsftmT4GOCyqMqkffnTU4yb1gpKMklderenTd7GFeTPDGxEasskhjOwEJIzdVUf3sAHI4BGTwa5wyzXN7bmHayRKGdSNoYsfnIUE/MMDDEncDzgZqTWrmZHRUkXzQV3c4CAZLl+oDHgB8HGeh61m6YWN1G0nzNLOqgCTAWPPysP9luTyOg5HavnVTU4TndcyVvev7z78yaunt3013PyLiOs6mKrRbvGL5Um0r2tr5dvK59ffCqCUNbyMqlHiVypA85twHzKAAcnAzjjABPevtbwEwWXy5o1YKV8qNTuIbqMt3VRy3HP4Zr4x+F8ql0jct5luSsZjbJZeMIWwAIzjr1/Kvs/wOxjWA+UBK84DHORuX/VhT/CAC3zdCc8Gvg87oudOrJLWKvHTrpa3dbbbJs/Oszcot7RXkuvutXun0ers9j7e8GvG8VmpTBwrhUOWVyPm8te+Tg9vpjp9M+GjG0VqpjIJjDDIG53B4ZjxlVGMZ4GTXy54DA8iFiQpVstkEKQMF5B67dwCj0z0NfRGgMykOrkRsFWIE4AU/fYHkjPGD+gFfkeLqOEpqburXSabe2iWl7PTRnz0pzc1e73j7zWi0Wl9b3W2/oe02c0btGgLArw+ercfdAweB2OR7ZJxXpPhwgMoIDbRuTIyx3Y6sMHAHQD8DXj2iXO6RRuIRhtRCNyu3fBz+vfjjIzXrmgvGUCgKnOSe5bqcHnAGegzkenFeTQoVKtdTpxf7u8tI6NK3Xvbvp5GtK6aailaXmrJWtZt9d793rsetaW3mOPmxt/hXnAwM8/Lkj6cdycmvQ7CYRoAuAwA75PYc5PXr6/hnJ8z0xjCqsACWUqQRjcDjAPpx34rsbKUMY1XhmIwR6jGQeeO3sO+TjP1+AhUoqM+X3pq9nq1drS+ny00vY6vaaN62vazvq9Nd9tbeuup6TZT5VGLEE52j1OBgA/yXHPfGc1sLM2CpK8A5AAOenJPIyfTjnPpXIW84AXe2WQBgPfnHf684z9DWnDM2A+SA5+bBHPXqM4HIPOfxPFfWUZVadOMW2+ZXtdabdV9zulp6ibi27tpWT33b5bJLa9vXX5HQQyOqblIVWIBwDn1x3HQcfjVqN23/NnDAHJHAA6g54weM/4ViiYhNoffjkYbAA4I/Lr6EYq1HNwu7OQSSd33cYwAVxn8R0780e3UbRlJpaS1Svf3X220W19PUuMuZP1031+Frb/AIK6eZuK6hAAQO4IwP0OOD079+1NSdQW3AlRyR79jnJz7dDnqTwKzlnLKM5AcHaxAwCMdOfQD2HUdDUqBjjDbscsTgZHp+GQcjjntnnnqVHVq2i0rK/NFXSu42SWz0e1tNG/LenurXs9L6LW61e99dlo/wAzYt5XfIBARgcE9zyRkdc+4PataIsIwBtDA/exg59MsQOAcHjI4xjPGPbsUHbHYnHUEdOmMf8A6u9WJptihQR85XaSenv7Dp+p/vY76Tjgoe1qydV2V+a0VFtLvd32t6O7uj1cPdtRvpe1100Xp0et+z7M0nBKkBxu6kjjjrkgHP4g5PXHApiylWUMc8EADAyeMMfUDnB788cVQEki4DEuzDGB6cckE5xhvz9Dk1LJIoVSmCQOp5OckEYwBjJwf0x0HmYnHUcbfllGKhHl5WuZ8zcVbTZ26X0vfY+mwKlz01GzacdLecdXdq3r312Oe8WajNa2jvHtyBhycBVVum4nPGOoxjnHSvi34g+NJfsWordSrFCXkRIj911XuGyMF+nPTBGe9fSXxP12y0mycTXCO1xFl4wRnAzwRnjHfryTjoTX5y/FzxfpUuj6zcPLiCJdvmZ4EwDBgCCNynI4GOcDPTPgYuMXKN4Jt1YxajFS5NIJNyTV3Zt9HfQ/pfgDJXisPhnUoNxlUi1JJpqTcUn5q2r+TXl9jfs4eOLPVdBezhgWJInnEsygf6wFdyk+jDGDxkZ7V614itLG+u5Jb5Ua3C/Iz9Acfw9Pu9cjoSa/Nj9kr4o6dothc2907rHdXM8glOSZdx/doe4XBP8APNfS/jX4g3N5FeXFhqP2e1toXmIUgL8oLbQCRywGCAcEjHrXlYr2VKTox96KlFXataT5W0+Xmd72uvTU+/lwnjaWctUIzoU6rUfrM+ZXbaTcXG78keE/tY6Xp11p73VrG979kkiglh3kxyRgtvZBzjaB97qM4718Zz2/hybwvLb32nGdJNotRIpM0DENyrED5QQMEnI+ma674j/G2z1DR9ebVtTitBamQBJDtdnjLbQwJORJjHB5weRg189+CvifoHjHQ7uCbV44nDypbwIAz26AkJPtyMqcEk547cjNd+XaS9lCk1GDi1JX5YTly2SailvZp7u61Z/XPAXDmZ4bIaEa6xFV4TE039Yg6l/ZyjCztFau6162dn1Z+eXx30zRrbxVqqSxNOkbhPL8zaqxuGPnB8Nho9vPHUgHrivx8+NviC4t7rVbfT7p1sZL94VRztEYUkBYxzkjnPI4xknt+uv7Uur6JoU+szQ3McsxkMZlkfdHcLMGy0XT5vlOFyduD161+NXjjTIPEdtqWsRylRHcvIsBbMjPuIeRTnhcBcjBDD6c/r+VUqlTD03yOo1CLa3+CMW5PXotXfy0ex/dPBtKSyahUlUq60qVJOrFr3uWLutHq7PWzdt3qeZaFqmn/ZZbLUYWnkmiYROTu8uRurg+56ADqMd68a8TW0cOp3CoWIVtyBxzgk/KOeBz/wDWPOek1S6NnM8EbmOWMhlccHgHBBHc8gj3A69OK1CWe5n82cs8rHJdjwQPbGOeO+M8jngfR4akko1YNK9rx0Wrs29kraa6+pnxDUpTozpxhzTlKDbtF2eiSutU7dH116FWME7Tuw2eBk4OMYwMDJ/Pn9fQ9FlidYUb/WKPnJHUex6jHTPYkDPNcQscZEbbWCAglgfmDDg5P93654xznNd94dtxuRwgYbhgc7mXvkexx0Pfsa3rtyp6X5uitte3RadNum76o4uHsPVp4pQUk4SVOWt3azi00npZOydnf1ues2E73Ok+UpKrGCqc4L/Q9i3Jxjsetccjy217cbAygEquMDcScnOMA4HHTpjHBrrLW7Sz02WMhXt3bKjkOrdgG7N6dePxrirW7MupyxEZVySoHJTBGAxIIz689O9eNKnOb5W03zJO+yS5XHW+rvo9VbW5+lVql1QjKry1uVOLp2V5JRvdW1SS29dTu9Onivo2tnTCxoSGBwXYjpwPujjoeMfhXn3imzZJXljQhuVfv06MD1IPc9gB2NekabZfNE0YIDIWZhkKCOvTjGfqOOeKZrmlR3VtJLt3EAA8jduGc4HPA74OCO/GDvhPaUqjck3yvdXaS91vW6va2tr3202ODEL29N0JSXPNJKSbdnpq/N2Sd/NdmeBLBNJbOcA7cnc3XrjAODk9wDjuOD1550be2eF3Ec8cjn+uff8AKvV7m3igSS3MaorcZx+BbPHX04x64JNcHrOmNaSlwf3cgEkZUnBU54PHByfoB2zXt0MQpyaatdXT3TSS6pba7WduvY+RzfLalKEZpzqKmveknulyLTSztZ/kjlJHYSYCjIP0JI6dO3HX/wDXUNyjKQxPDDOBnPvuA/Dg56jGatsCTvAJbJGCDk9gcdR0HbjjHvGY3nbD8YwCO/HUcYBP5+/fPowqLRpXVn8mrX0veyT39Pn8LXpOpGdNc0pzkuTlVrJ2fvXXd2aXZN2MwAqQV9eQMe/TuB+eM9+9lCxIyMHsCO47nH19cd8cV2ekeHluYkfynklZsKucBV7lgffIB79gDVmbSra3iuUuEVJY/lT+IEk9FII+YYwW5GCcDNVKLcedSXLp1tZ20Wrv6dNEdeGyLEqCqSnGCbT5NHZWT1StaT66dL3aTvyluWAAJ6njBPOOnAP14Iwcniup09kcRuUcnOCVJCHHUZGcD+6c8n1HFc8YShJXOFJyM9PTOOhHXvWtosjLc4JypJJVuV2n7x69yB25/HNcdSSavdXurt2bsrK71SfTW/o7ntZZGdCtTpSdov3dU3FWSaavZWTTWltXZGuVXI2gqqMQC3LAZ6ZIxjrg4+ldL4bljFyivvXsqZOQ5IOVPODnB4znpxzileWaeQZISWSRwAifMVHU544AJ4PYcdMVY0AfY9Stk2JM7OsSO4wg3nJZxn5egwxJPB49OWsorljPlacr3TTvs1qururr8j6mLlRqxcEoqST9za75Um721XRdm9dz7V+GmoDVdU0PTdMP2bVIzHFNcxnyiGUAGeVhxI2M/KOmcZBzX39FpXiD4TeIfCPidWlurbVljkguLT94JhMUXbNb4ylxknKmRsdQxzgfmD4WvE8P61aX8cj26fK0bID5cs3BJBz9zIG3OcknuK/oE/Y28HW37Tdx4F0nVpobmDw/bW6zvKigRJbMrbFLE7mzje2ARxXt8NxnPEOOrxPtoum7bKDhZK1kk9N18ro+J44zlZPl+IzKpyvB4TCV545OLblD2blq9EldL+ZKTXbT9W/hD8N/H/xB+Fmg6dd+Ep/Ea63p1qbY3KBEsLaVS3nfaGjYgR5/1e0ZOAWHf4y/at/ZZ+O/gjUJ9Z8LeBdZl0vTbeB7x7WA3MAt1DEkMoHmNHyGOARkdc4H9RXwZ0fQvDPgzw94dtbK1s57HT4LaSJERSTEiKGQkAlG6j154yK9Y1Xw9pWt2NzYX9ja3dtcxPHJFPDHIjqy4O5WU5OTkEg+mCMiv6Dhk+XPDwjiaUqk6lOMqkudJKbjHZLZJ3830tc/yjf0pM+4a4qxlTB5Hg3lf12vahiXU/fUZV3H2ySl7JOdNe6knq1Jyu2fxSJ8E/Hfxt+FV/qGpeELyz1PQre4a2821dFuEtVyxdyFMMbdgQ/I65wtfmT/AMIU+l6re6Dq2nSyNDfN5UE8Zfy33OLmJi2NqEhME+nSv7Lv2mYvij8HbfxTpXw4+Etrq3hS8sWj/tS2slkjgiuhILiSWFVBDRDGF3ZOeTiv53fG/wAKdW8Sa/qGqXumLpuo6vPLOJHUWotLliWIEG0lkLHBO4Bcd+tfm+f8N18vryiqSq0MSvaUatNupypuLSai3KCSsrz1bezP7s8DPGupxfhcxni6WAwOXTlSxGXexx0K9WnKsoSqUpU21OmqblezvZN9bn5UeMPh9psd3dxR2rafbmMyRwNlT5y5KmBucEkkhCOg69BXwl8VNPvdO1GVXHnPuKGR+JCUyPnUDPQgBs8kYx2H6ffGDQPF3hjUb6y1WCZb/T7pBbymIyRPChbY+8BUKMp++PoVr5G+JHhmPxXYW+pyW8UF8Ew8JYJcNt4Xy1CkBDklupPAOcc/C4vBVVRqUpwalFq0rdY202u1bTq/yX9Z4TETxOHTeIjWpV6ScKkJJpNpSUovW6aST87a6I+GI45ZUPALBmLHAGAR8wHXrxxjGCeM8UWm6G5jdk3FWDCNugwehJHXODjGM+5rd1rTbjSNRnspSWdHI8zOAVOflkGPmcgYHb8xWYS0RUt1bIOB8qE42jsRnuO+RnuK+YqxclKNrNaNLo1ZbWuvwt17mV1SqxbUpShKKk29Wmkr3fzd7W3vrZnsVxENQ8NROrBXuIMS7MBzKepbHU4GB0JXoM4NeBXFs9tNPG4O6NyhH97k8k9TjAzz16dOPSfD+p3AMVs7b4UkOIi2F+b7uOMHPfv09eeT8UwyQapI7oEM43hW4ByD82ONw4HPAx61spqqqPKuV04RhKL1SenvJ66NJt9b/IzzCMXTlVavTi1KEkk/ispRe/W23XyHaBcRKroSC5B8tWHBbPVc8gL7c8kH37aFlliEUhZ8jKlSRtI5AU+hOOnX2xXkVrPIkkaDCsHCjB4yeQT9e/OOM9eB6Bp1xJvCv94YAA5BLAYAHoPXvmsKlG3M01eUut7pXirppd316/jWR5i6lNUOT4Zunv0fLvrprd6/oae1lmytwwKqcgnBY91APUYBPrwO+amiuYYLtJXRjtUlwc7S3qOx9xx157Vk3byJIFIKykhlY9Np6gg4xj1yfpiopGlEYYOHJBJxyVyQQTkA845xye/rXPKmm1JOL5XbR6X0d7Ja7Pd6+h60qrpubjFtUpJONr3i3G7j8n89Tor2KG/2TQjeCNjhiTvJ7gZHIznrgcmoltDZRF8sz7WByAUjTjCqOueuc+la/h7yLpEQhRLDH5wQgL5mz7wzzk9gO/XtW7qUmmtpdwVVI5QCwjB+dmU/6s9Ao/PjPTFJYadSEqqnCMVeKi5Wd9PhV00+22m+l2tJ1IUqvNGKm6ig3eytHR6rRK2t9d9rI5nTdQs7eQ2sw86OY7iG7K2eQeiEHlTz0INNuooXSSNXdo8sYfmzw2COT37dOO3INecXWoSpMzbio3kDaD8oHqRnkfh1wO9bNrrCubaOTJYJuU5/ix93b0wM8ZJ5J5xinRg1HlaTUrK93fRJu99Frd7J3+9cc8XTqy5IVOWUmoSitbtW1V+j7202Tet6F1DLBNJvGU9epA/hzk9Bz+tY908DW8kZQByPlxz82Tz/ALLc4/PsTW5qkpDjyzvNx0b+6RnggZGeRjJyR9BXITs4Zg2ckkEnock9DxyeP88V6lCjdKXOnZ3SX8ulr2W/r27qx4eYVY04Sp2d533s0rqK2vfq110dzl502uTuyVIyOvHbnjpxxx+nNdjuIIyCMH37j+var93GFbcoJO47geACQffH4HPsfSmqEnpx1AOMn+Wfp/8AXr2qTXLGS6JXeutrX0XTfV3a80j8oxdKSxNSm07ubsunSzv5PT71uyLZuJBOfqeD27Dk/rUrQo0UiA/PIBjjgMPusTkYOfTOO/PAdIoUdMZII9cHr36+o9u2KiEgjYEHPXDD29OvQ98H2rpVTyX3ejdn59NGtfVnJKlSi3GrBPmTTlrdRdk1126PZ+TMqWEpuUgvIpyDt4KnHHBxgcBcgD8+KZQhgTtAU5OcZBHGeuSB29dxx2zv3KrKiMjfO33ySck9sn0B4+vXrmsWSJgx3ZBBOck4Bzkc8fhx1zx6dFGs6l4t2eyvquitbTrpqloz4/NMIqdSCim6fMlG9muV2sm077b3vbZp3ZgeIoFntnYIS5HJYcORnGB2Pf8AL8PIHi2O4JAZT35PJ6e/t6+55HvV1G13bsrqMIMO3fnjI46Z4Jz/ACrxfV7V7e7lQrgBzg8gEEkcj3OfTOfz+x4exCSnh1JKae3Vv3U2lfXfTr30Z/HXj9w9FTo5pTpyUV7lSyVtbNNu2z1SvdbIyE6HjLA5Gfrxz047jvUhySDgqSVPAAyecnvjPc4/A0KSwKkkADtkDgZ44/i4wOaVjuIDDaQAuVwu7jjBzzyeTx2xX013dp/Nrp1Vr26v9E07H8d1oNTlZPd/g9tvl9/YaQQSMkE8r3BPPXt0x06DAFW41AAO3dtHBA5z+nY//WGKrLngHkqcgnqeB+gyc9AP5XI8sAoI+b7ygjhj1I4xgDrzkZz9MqkrLRaKzel3fTrpo3e1uvZrTbCxV73u72UbbNKPdK9vPRaLzHoVALEELwGHcH07E9jjt61aQh15UnA+8ODxjndnkew6HHUVEFwAAmRjJTqD6luDgd+c5OeeuJSpKlVU4bpsPI9sY4/rjP15Ztu97ae89dtbddb69b3Wuh9NhYytFuSbdnZK29n08ur87uxLEEcn5SynOD0wRjjoSSPTPvirSkMGODlepPTgjtzkjuSfw4qqiMMBMALgMOhyP68YJ5zVuMiNt0memQF5B3cbvcZHPQc46ZA5KjteSemuj6PRK/XX5f5fS4SCsk1ZqWkr3XR6pKza6a3vtoy7a43AqxO0kkL3yBjP5fT27112mKXlUAlGbGeOWY/xAcHJ69sdOtc3b4OCC3OSQpAHrnd2xn069OmK6jSVj3rIcKAMYyScD7xDcfMMjsM5xyea8rGS/dNJXaW++7i1bz/yWh9jlVNe0inbVp3ast46ej1XZaWXbpxbQmMKobKvhw5+bd1CjsA3pz07mprW2mSURCLaM7tzYygXruOcEEHpx9ODWzbQ29xArBVDFchwfmJXGXZjnIPGe/GMDkVMlqC7SLl9vLFuBjowAx34GB1P5V8/Oo2mrtWvbpbZXtdbdraX6an3NGgtJJPprfVXs76a2v6abdb6umWYhQzAqTI5B2ZKPkDJ5PyqOw/lzXoOh2dvNDGZWKF3xlclgwIwQox8w74685OAa5bw/GkrGMsFGChQg4APIGc8EYySAf8A0KvUtE03ylVIdrEEhTtyCG+8ck8SdNr8fRs15lScXJqTbV1K/V2aa9XqumqtrY9vD04+61pF9Xq2k02uru332W1913WlOvk26gN5kOPnPK5OM/8AAgB75PfrXtNhAl5b28qxMrwRAyHZjhe4z9445PPPHXpXlvhlYbebyLhE2gE7nzlOR8/J+8fx7H0r1a0v28s29m6sXYpIOMCPjjPr+HHXPpSno5S20aV+unnot7pve1tz2KKtD3ldO131aUlZXulu9kvzR1unTrDNb7y3khlEpxyFDD5dw5A7HA5478V0+qGGe6tri2JVMoAuDkH3+uOD69RxzydukvkxzoFJhILRnktg9zjkDOcg46ds118GyQx3EiGMsFURKePw9c4POeOmT2io23ZaRbV1tpZK66276rRnZGUUlZXut4t9GnorNLXrp0uaj2Uk7pMZsbghEYGOQOjEH731/HoRU+rW1x9hmiKsNsWAMZBK4+cA9OvHrjtgAS+akMabcSYI3DdwrHGNuTnPHOM5wfWm3t810JLYOyyPEAij5gB/tHGT7DjuCeRWUUnNpXSj16XutH17vS+nmONVx157OyjK13vq3pdX0OP0uwdLu0QIXkKCTMhBYtnK5BH14FfA37fl1DJfaBYQyBhFHFcBFXCq3Ik3kNzjtxgcjvX6Ki2lt5IhMT5ojAR+VZVAO3J54OMHHIr8l/20dbudT+I0NiZMpp9rHHJFE5KRsqnLEFesueT/ABYHAI4+i4bpSqZvRm7qME52srNpKz6re138lofhPjzjlQ4OxFKN1KviKVJO211GXvP029NrXPjG3jVFCqdzYD/P1O7qB7DHHOPTI5BU1vNhpWMZJ+6COqKMgf8A1jjkHtRX6tCEeVc0VorK8bO3mtdb7/5M/gadWfM9YXWjvda/h6ff5W6JUnjYFGyVKqDkgNu6Bz/Cg555PUnrioLk3TLKjbwrlmAzhCQACASpIGOg+vGOmhpN4t4u1ghDZIwc8jopIHfOSM+ldOdLW5jwgL5VSz9Of4to56j+L24GDXyEqqhK00tGvuve70Svfp06dD06lPkeknNSjtp/db0vq9d9evTVeQTyXIkCIhQqSqkkr6EAnBOACc7cknJHTm3Bd3PkgeYQwbce53Dqy8D6eh98mvQ5/CzyxblhffuZkVcMHVehZsZB54XGTzz1rEudDntg5CEnOGVl+ZSueOnU55PfB69t6eJoyait/lZt2ve+/X+tTzZt3s9fuuttO6t+X44J1ecA4mZSgzgKdqk4ycbhntz/ALOR6Uqa2XdRIXCjPIJwxx8vr8oOeBgds+ksmku4EhViJFwyDgJzgjgcZyOvGfrVCbT5ISESIqEG7kffx3PHXjjHP867oRpzje0W9dt1t26prR2OSTT3bt5v0118+nnZ3NRb2N9jbnB2scsSSN3tnkD2GBkcjip47hN8e5wdhYgqP4hgABu5HPGB+ArmWkmQZIAAzkAAFQOvGevIyR2HJ7Fi3TiIMAD8zYYjduz1AxyMY6duMkDg41cO2nytWvona/fR2fW/+VtVlJPVp2Wj0e/Zdf62t09Hs7hMoCy7eHPynduH8R56+3oMg4rro724uLi2ihk88yYRkQlWkBwGViByFBA3YBAPT18XtL6VAkaSlPM4cDnHOTt9OOw9MZGRX0N8I9B/tm/tZnV2RWKKz5MUjMVxuBBKhcEk85JGa8+GGmq3NK7Sd2mm7RTTtfa/Rb39T2MpwksXWhTSs5NXaTu1eKT8+lrLXtsfXHwV8IlLW3muYWRXLCVC+8bXKlQAQMYIO7uBg+1foj4O0VIbaJUiARkRTcOuA2BiPYOp2jIABAwT614D8N/DUUMNvAIlJXyypI2bj3DLggBuNrZ+bB7jn7J8O6Y8cNsjIm5I0SONukZOcqQDwfU89jnk15+a4uKjyq1trOySs1ta/X5t7JH73w5k8MLTpxhFwlOEXKXLq5LkUtfNdV2bPRPDWmQ26xCWIbXQNI6jCkYGf+BddvOQMjnpXtmgIEUxkt5bKpULy0Wf4GI69s+g9uvnugWjeXsLCNQjqv8AEDtIxxwT1P5d+K9g0a38tbeRFRhtBYEYJboX64GOoGTjnrX5BneJXNOLkrOV1a+y5fJrXv5b7n63leFUKcHFXfuvs9FFXu79X0XS/c6e3tlfy18vzUAVZHP6ooycDpnB5xjHBr1LQLCXbmIDyVwu3aNqqSMrjOdxwDjPH51x+lxxCRIghdmUSEhs+gC5xjCjjgZBP4V7d4Y00hURkIMm1l2jB+Y5LPnO4Dv93Ar4rE1ItSjza7pat2b10vba333ufR006dnotHo99bO17pXT8vmdZods0sS24LiMJgx5OT0wBwME+oyep7YPp+laTF5cEcS4fgSuc4bnpnpk9vTPc4ziaZYWsJAZkDsABySNvrxjBI6c8Y5yK9V0+3t4IoIyu6aRR5ZVsKNvJLsOSVB4+vtXm04N3b2k1bmez6q3fpe1/XcJ12pNRTafXrpta+9lv66tmxp1rHEY4/kREXGCwLbQOQvY54PqRx2rqLRlVpI4/LCMhQMw+VZMDAJ4G73z+g45mGHzJyYQ7MVOSc5yRgjOAMHtwScdRmtq30+bKhUKHGEVgfkZuckHHXHXPp3roVJOF1Tc3Jxiuj1tazWq3V77JaXMnOL0nOUdL6tdLO+97+SV1fXobkcdtb25kmMRkxtkHUvn+JB3xzg549+zBq1rbIxhRM4DKSvzKwOCD1z6gnngc8AUxtPNtCXnuBLgfMM5yT90L649OB1471TgvLeJyrQLIMMynZvwCcAMOd2Ofm6f3QOlY4ilJKyh7OyV1LV7J+uy6+mh0UEpJfHV5bNXaULvl6W00/JX3uoJvEGpGZgi+Wm44YKTGwYAHLDbgDBzwfu88HNNGsvGY2kkZVlI3O7ZKseGQAc46YPHrVLWNSkuMwwW6IvIVFG1cgZIJ6g45xg98cjnjTaXZO+8kjKFmJAOVjC9ASOjAeuMk4z1rk5YySTag7tJu65r22Vk+9trX8j2sPCnOMXWpwg7LljHWW0bbemv5dTt/tUU14DFNvBwNrnKk84LZOBjnH5eprozZNLakyqQrAYdSAFzk7h1x7+mcdzXCaTPYTxgKeANiMW2ldp+9IOckn3P1Pfp4WSdlgS8eKJgAWZiVLA9c5AAPTryPbFVSpwlpO/Rau9/h01atp5afejepOSa91U1BJptNtq6fTy27dLXLZsdOSPe5BIzvPVgGIyfcn0B71RkWJlkW0iwrggO55zjGDk8exPU8Y+XnpUsYoYgxkF0uMMWABz3AOScDqBgUlxZ2qQhzGVjYZwCeo6FT6r1HrkYx37lQvZKK1cd3zKPwvpa2ju99reSmGIaa5nOV2rauz1TSto77eTXXXTzXUbDWIonSM9DuBJJIB5z6bu4HYZIPTHmV9qV3YX4eeclN6iRgeFU4yyrjDjGCRwOOcY4+iFaxvkFrbsyyMjbjKTu3L1znuM8d+cGvHfFOh2sN3Mp3GVcG4TkxKvZUIAGSScDv1681vPB1IQlOEIzklslu7rW+uq3V7X/AC+hyzMbzlSxFNQTTcbRV+VcurTe9r7aXKdj4rt/NjElw00aAqJpgFf/AGdxHOF9OeOhq1f6tpV/I8Xmx70Cb8YCrwee3mM2euQOO56+Qa7ZPbGWSCeSKHaPIib5HwN3yuO2M+pzycg1wU3iC8tpI9hkcJhXZmO+RjwSydSqkdc8ZA5yK8SclaanRTbT5lyp77+dmtL/APDH0dDDU6/LUhOcUkkru1rcru7PVpd72d0uh6/rmkrE63lpMRHIqkgnBkbBwPl3fLyT6Z69ePOrq91CyuFWJpN0bEGQg7Tv4LOvIwMYznngjPWrmj+JpL+FRLJsMeQgcZ2oD8zYZs4OSB19SK6IJp97L++BjVhtXHDN6FevysepPtXzdbD+83CMkpPmilPTpduLu7dN9tn1Pewnu0/3sm0rKLStzKySd0r6at6WWqXQz9P1cTWLQzSOZXIcHk7SCec4zt9OOh4rLn8RvbXRhjk8tMhSpOELngkE5wGAHOOMd69R07whZXUCiBQ0wx5kmeqHqrL684DdiTx1Ncr4v+GNzFBcXUDHzMpNA2073cZ2qpBwqc/MOc5z8tc/sOVxnNScm0pa6RXu3tZvpbdarY9TD4nAqoqc5KDeijyqWzWvRL3tNXt5Hlviu8fXND1GGURi4MUitGx3LKAMxPEQADuGd7DG0gHBr8r/AIh+I7/R/EdxYSRGDZO5WT+4YjlpVU4D5DDac84JHFfqDd2GoRgWskE0GyMpIzgh3B55jxzu/hO7BAJ4xivy+/a/8Paro8s2v2kLOyzLFdNG5/1RLAOBt+4o6v2yPlPUe3lVGnKsqUuT95pHVXSvG13s9tr67bn2WVYqlRjNRcGklyK+693Vb6vpr1SPK73xGst1cX/7xopBtyygKzsCrmNQTlCQMrxn1HWr9j4hie3YrMqvvWF4Gb98ApJEytnO9QTkbTuzjIxmvlO08WXDWv2YXQUxFXIdwzJySwkyR8zccDbkAdc10Vj4qQLPdPJE0uwSRNF0kyGwqZ+67YID89Dkc19ZSypyi018LXLZva6SumtndLfs3pY2xebUlTk7pcsbtuSWmiejb+e1+y6/Rms+LbUWTxxTKssWFkZjncwwCHTI+bnpnnrXzN8SNfUANDdxSO0QlRFPygYPmb+g3MAPlPTHB54yNS8Yx/ZbiWRzGi4LAZ3uDkM5Oc+YOMgg7vXgmvmzx58RLT7Fexi7SJoIGKyFgXZlB+QycEsM42AAc9Rivqsmydwqwm6bd2lay2dtU1pumtnv2R+TcY8XYfD4OrKFWMEqcndzUGpRiujaWyte35nmPxM8ZraMrNKN5mY7Gba20HBCr/dz0JPP16/JXivxVLf3LokjNE6rG5zgZ5AVR02jJwMd+D1qPxp4zk1e+kj8zeQ7KJQCzGJScICDjByefcjGK83uHlkZ5PM+X7/zqVYLg846gnPGc5HbrX7dkuWKlSpynCzSjyqyScWo2vdJO1rrq30sf58+I3HFXMcXiKGHqc0Of3nHS0lo4uSdmr6tr5Fy6nLRnJIGeDjJyepz2746cAjk8VyV2fmbOTuPG7pg4IA74PrjJJx0rTknLhlLNjnHOeOMk9cgYzj3/LIuWRhgg/dO1jwGIyAT3HXjrznn1+xw0HBtNPW2yb7K+ttu33H4vzyq1PaSbcmldtt2VrW9U9/R7mTJyzAZGeoHqe5/Hp059+sTALuODtHPXLZOcnB7jH0HrzUjhQxK5BHLE5zyRnj2I4xnAI/GsSSWycqeOT+uOPToAc+nXPrwgrL/ACXTdvR9t+3rYUm0rXXk0mrpJbfP5eiaJQ4KZDAYX+LOD9cfntz68g16f8F/DA8TeNdNsnBkRrlCwA9CCAv99gDyMDtzXlUgAUDIAbBULjkdiRnJJ547c8nBz91fsceEUvNcj1mWFWNvIxjZkBIJA6N2AwOo+XPvmvnOJsV9SyzEzjJxc4ypRaVneaSvda91fTTW1j6DhnBSx+aYSjLSCqRnOz15IuLu+l5dduvz/VX4f6Ja6Dpem6dCyK0cEaBVULK8m0fKxHIA6A9RknkHn0K5vFeS4afeskQEPlcPlVz91u7L16Dr04FYHh2FX87aiKYFZ1kPCjYByzc5Azwo6565Ap+oSxWwu5EVMSgvISGCqCDvyOcGQ4Aw2cryRX4hGKxEkn7ziud9bS93Vt7dej00ex+0Zjehg6iUXaEGo31tGKSjfXp0WnXU53VL1Zrh1VschJWyCwcEgKxzghQw4wSM4zkg1oaVCjX8RSQtHEgcl4/mZgAWYknp0AzwOeted308LXiOm9XnkJVI2+bc5GCAT8i4ByeTj8a9F0CUSXLqszh1VPMJ+6ycnanUEjH730+Xknmlj6EaNJRitVH3tVvJR/K6stemh/O+bVZTxFWpu5Tbduqun0b1vru+9rI+vvhaEeJ9r7HdN6oRu4yMGV8gnZgkHGeTnIxj7M8LIga2hjDAhkYSscshI5LHt0wvcetfFvwwlRZULqkURTCMrAtsYgAIBgnJB3E8jjI5r7j8IyRusUxTzQiBFLL8jvwTKTxwOAFxjnrX59md3CpdXVlZba6eSf5WfofI5hK663vu77adNFb8NND6s8CXQjCCRw6W6eWYydhLH7o6HJY8EAc45NfQWiaiTHtaQjC4CFRgeiE55x17cEd+R8zeFJYGjjSOSNQilZG5Z9yAZDDK888HPTpzXtugXT5jw6lHKkMCCh28LlcDgepyc46da/I8wwilWmk7c0k9L6JuL381ol662Z4GjcVLmTbdrSjq+z1bXXs3c960WZ/kYFWO5V3MOEBznnPHTGcHtXs/h+RkWNnIySDu+mQSf9nnt6DNeC6LcBfJ82RTmMEleC7dsjH7sDjCnORn8fYNIvkSOAghWbBYno+M8Zzg5zleB9M9aw1CNLl30snvror3t6bO99tTZe4tEvLW99r6tdbX89fQ9utLkFIFVwW2gsRjk4GRjPHt/Psew06UqqNuyfm4I6EYOePU5z2ryzSLtJNhb7zZKqRycY5z2AzjjqOMV2cN4URSpG4LkDOODjr7k5weOntivpcGqdSSdRKKUY2irtWSSTS0vqm9Lq4+a26e27337L06+dkeiWlwFcFjnJOOO5/PPbvkcHJJrdW5G0Dd6cDoOSc4GMDBJ+vPNcBa36lEKswfAwT1J/iPbA6c9+/Wpm1CRZAAQXY4B9jjk8ngDGBjPIxxXo4jF0cLRThaT0u3ZvXlu792u70sibt63atZaLdadOjej8131O/+0pgt5h+YMAecEj+hPGR3q3HcsUBJwTywHRR6c8DA7c9cDNcVbakolRXZSqjBKg9vT37dM9+lbjXse0ENhWGSxB79jjpgDBxXkSxCq+0nzq1k7JrRWVtO2173bv0e/ZSpvR27NN6PorX0aa1+e+5ui8LMFyu0ZC9jjAxwTwOB7dCcVr21zGyKGcsQTnBOTjHf25BHSvOzdYmU7iynIU89O31I9eD7kV0Vlcbiqg9sqx4OT1zyenTJ5zxweD5eHzNQxEo395VI2bvZrS+1lpa+2t9bnfRp3lrd2jHta7a0ab3/AB76M7xpwUUISCRnGMnOc569+PTHvioJrgs8YY7Cu3nOCFySMdQOvXP+IpxSYjUlx1+Y9M+pwOhwOnQVTnulTLfxBjncc4H0I/PHXj8fVzKc8RRThOUI2u3d22W/k3fySvY9jCx5XtbW6b6uySbtd2W/dm/Fcr5gALbgMBucv05PQkHuemcdsUt7eqLVwGVHRCQ2Ocjk9SDycd+mPpWJaXYdm2NkKp2nBPXsCccZ74/D057Xrya2sruZyFCo8hIPIUA8r0BHGQO/oc14mW1ZJYiVRx0u4N+7FtKPva90r2v0ut0fZ5HhFiMbRh7qbdO1+rcopprquu3WzVmj5W+NfieSO/eO5Tfj/UiRysbrhsh+MKhH1zgjrX5dfGLxhbmW70KEiS3u5A8UNqxPmxuSZAzDPlyKduEwc9d1fT37SnxPW4ne1W7jhZCbcFcec0oD4BA5yedpJHGecCvzH8Oa5c618RohfXhmsFvJEdGYgRhSpw+SSz8nn6da2wkXiJ1ZyvKFKLqXimudqzTWtrb2dtT/AEG8LuE75Zhq86bUYUozty6TtGLSi7aaWS66363P0W/Z+8FW48MWurSyCOfehtLaRv3gAY5Eo428feU5HAGfX3Px01jF4e1BLVZLh3Vo5Gjy6KcdUAJBVSeSenevmyX4r6F4T8J3sVlcwWK6Zal0uWIBmkQfMIWyMyOf4VGTzzxUvwW+Oej+ONLvbSZ3upZ4ZpJxN8/kgnapXnaFcZPB7cmsMPl1avCriakJqPM5JWa1bVkr6fC1bd3v2PrsZw7m7qLMVh6jw9HEwcaKi1aCkm22k9Hs+t3fXY+R/i/8JrnxRpPiGK1nl+03W5hJDKUYKd+0OAPlA5z0PAOMdfibwRpF58KotWgv7yeZrFZlhkmfzJ5N+SyN0zCoAAOD+hNfrZ8TPFngbw14c1sW1zAt35hlaEOGkcHfnaDySD24wOhya/Hr4s+Npry012+0mCCOO8kcCZ0/eLGwdfKiOf3Z9fvEnOQBX0GU4LFOSpqLdNyjO76NOO9rWsm9O+/RH9TeFcswxOBr4TEYWpDBOdJKVWKg5WVO9nLRxSbtaydnp3+C/wBp/wCNGka+mpWkO6e+hn8n92STAcsHVUBAU9MMSf4j1r418PN/afhPW7x55I5raUmOFsnzhljIC2QAAMEHGCfoaxvipLfRa/r0d0y28pupJlOTgqxLBg/8RGTxgZGOlcB4c8RXNnp97a+YwtpopBIgJBlBGCoOOCc9e2K/UMDQqUqdOUuVpWg4xuuaMopa9LJvWzu9Vbc/pnLKtPB0FhKbSj7k4Rk+dRsoXaTdtu2yta2pweqzrNfzuARtkcLu/ugkLuI78ZPr+FY5Dytk8jgEe2Pp246Z4/XTmQySMwThyTz1AzjBJ9yAT6np6aOnaY8yFyrBVyxHTcB1VRjO7uOuemBXtrlpwV7KN0lpttZfLTVWPFr4eeLxMrt2lJt8ul3ePK7NpbWSf4XMm2tpWAYqfJJw6gDdgYG4ZPKnqMf4V6B4bbyZY1CB4ypUEcspIyS3QBR0J6ZPPvUs9EkHLQShCSVGMMAxGFkHPyjHU+v411VlpTW7xfuwiDhXHAJP8PfhRjtycZpVKkJRte9n0um7Wd79X0e3fpp7GUYCeFnBtSmm0ve+K/uWSemi6aLX5lvVZIra0BLfIwJDD7uSMjBz1yDnuO+elcFY3Ie48xXdJA/KZOWA5JB9s84x+QrrfEUewFd4mRMFW6AZHCBc4z6nPAweRkjzcP5M27B3gkFAcHbgYOQDz6jHOBzzms40abjJJN63et9dN3tdNK+zV1rc6MxxEaeMpKcJKKXxKUrKT5Yte67L5tq11bc9mtdbFtZRvHKVkHylCuVcHjAPHGSOCeDzxji3Defb4CPNKycsie3+1xnngemBxjFcdpTR3VoAzICUO0D7wXsD1JI5Ge5PoK6Lw3PFaXDedh/kkVdyZCxkjJbnPH8J4z0H3jXFVc6bTekF7sY372vfu72t3Wum530pvngnFO8VJNX1soOMXe9/PXW4658N3uoqZo4z+7DK6lShbBG7jPLHjAzyOc55rmNS8MSzwuQzSNESGUL+8RVwXODgYGegBA9cjj6e06fRTprBESUsMsFYA+a/8W3rsHfnr6554fVbC0s55mcCITDcoBBV0kBIYnkAkgAgjgg8VUK04qL50lJ6ctnZK10v8temtzjrYr6xOth6tGUYpPlSVm4tRbva+itovJ9GfJd1bNazNAUJaNiql1AODnBYHJyv+QBiiOxkETTOQMgN0HzDOTg55IyDyecZHOcdP4wRk1SSXaVy3zPwEkI+6/YHI46ndisVpZJbYYwxXKsRjcB2AGRkdPTAHTJr16NRygndJNJbayv1625ne63X4r5TEYKjCvUTjJumoyppJc32ZXdraX02PRfCLWOoJDZjP2lIycj/AJaN069gvQk9Ce4AxQ8RaDfRTsklu6SnLBDghVOdpJI5yOeQPbNcv4S1BbHxBauxIQsFG04wx7HnoTjcOpPPHJr6A1y5hubNLm9gUT8Hzy25eQAGICjPHr059ePRhNVqSVSVnTaXLyvWDsk79H08vVJHdhsQq9KElBSTUoVFF7SVopvS+tl00vv0PmCZHikZG4IY7s5HQnnt3H/6qgidxIGHGDgk8dMYIx1x9PSuo8WRWq3/AJlmxkR0V3ZvumQ53bcdQMcnnPpxzzduA2Dg8EjHc889+OPzP0rhmnFuCasna9kr3aa8np062ut7HnVabjifZJuPK+a12+kXZX7O91e/lsei6XfQm2hWXdtORngAuBgc+nOMH0x6mrq2Iur6C7tmSCOEhp2lPDFCDuwDgd8YYfhzXHQTxlI4gQFQ5UADILE5yedw4HXj05zXrnh6Oy/sWaYp58jwyRzRv8/msAPLdUwNjRndhsnOT0zzDirRhJJQTlUlLdrl6JNWu720b9LH1dFwq4aO0naKctNG0lZt7W66726nei+kmttPlzGZoXiV9zcCIdcJj5nbAAwRuHTpX6nfsX/tJQ/s/alH4m1K6ltbGS33R2EQ3wz+YV3qIsgrISoLRgkpxyc4r8y/hnoUXi2x1KMtZxTWMbv5cz+Q4KZBCMQ2XHWMYw+T0rp5ZbqGOKKSRnNg/k28KrmXaTgrKcgMGAOTgZIGRivZyXEOlWhiYJWjyKF7OTUXFt207LfTpqfC8V5ZQzPLMXga6VWlWpzo16coq1SnKKTi1rfmejXa6Xn/AHufsC/tH+Iv2q9YXxNo1+B4W020EM8RTYRdQqA5R8/PvLAEEDaFxk5r9lYVljjXfglcBzwSxAAO0dP19hX85X/Bvb45+G0vwO1nw9pt3aR+LBfXFxqEEsqrdhN64Qoz7mCkYJVSDnGe1f0epIpGQVYMCQSRgAkgd++PQ1+35HmUMbgPawnKo3VcZupbmjNJXja6aitk2ryWrvZH+F/jpTlgfELOsrp5asuwmXVVQwlL2Kpc9K8pe0jZK6nfTV6L5PL1bSrLVraWG7toJ4pIykkc0SyB1fOQ4YHOQTjpzznFfjp+17+yBpA1O8+JHhSyisW0y2e5nsFULZzKDum2RrhVfC5wOCeAe1fszK23JBzkAEduRgcHAJyODjnPGeK81+JXhqLxT4P13SJrdZVvLGaIqygHLoegPOTwT1xx9R7Kp+1ShJc100rq9k0rO7ukuiS2bdrO7Xh+GnGmZ8H59gsThsVVp4WtXo0MXR9pJU5UJ1KcZycOZR5oRbcXv0dtT+TX9or4Y/Cjxf8ADvUdVdJIPE0Onul+ifNL5+GESSqPmWMFG24JxkgjJBr+f/xDaadp81/aXzhpbK7khtzkt58EZIId+zBdvy+49q/oL/bV8KS/BDW9bgg1YG1vbS6ZLMYnAju2bdFcRl18oRbPkds4DEjtX4oa94AutX8F6t4o0vRWnh86a4ubpSZI5GkZiRG2MwuQDs4bGD3Ir8yz/LJ0qj5ZaqNSTT0bd+azevR2Vt166/7WeDmeSrZRRq1MwrYjCY1UamFlWqJxh7SEGoQc+2vuxdrn5xfFew0179NR0+N1jZlSXcMtI5+7GnHOw5zzxuHfAryG9iidVIQRLCgLKo3Nu77j6nPPfAzxzn2TxRYz332+3lRkEDSyW8bD54mQned/bBIypX5zgcYzXjlzbm2QxMzhcHYzfeKH+Ju5Of5546V+W11CpVlOyjzNJpOyvom32d9bXd+yP3vEe6ueSTjKK95Pa1rN9dU+r1e1rDNMnSGZZGBYL1UfeznjjH8OSc9uoPaq/im4ju5badXZ2SJFkLHglf4UPcjkH8D9GQNEjp5TE5IX5jkcglieOMcHjGD2POc3V94U/KQqyMAQP72MHrjb6Hp16c5wjRhF+6480m9E291pdbWe3f7zy8TVccJyOUbRi9EpaPmTi2/J33/MzQ8ZmWUjYo2tgdj659+nf9a6u1uk3wTRsFB5Oe75HYHn1/I1xIUHZz8uRlc5O09Rjn/62a3bEEqqEhdrgopPbP8AewB+nr06HSVKNtXqtn03V+9+l990c2W4ipGpyckLTknzJP42o2bd9Nt312T6buq3skvlyZ+cgKWzwW7DGOp5yfXvzxWjut0QiB/fcBWDcew6ZDZ689hwahvFQxndhGXoBySR3HQjAxz9MAiseKbbMpbJXdnOc5Gfy49c5OMHHNYyhGUW1FXd7pRST77Xun8Xl00PWqYp067hOU17blipOWmnLdO62W222251Sapc2gUxzBJAu3cOq9jzkHPtnlsH6uGrTXLLFM7O0pP3ejA/eJAPsMHk5PHNczeSDerxklWAJJGBnH3f57fXHTtUEM8gmidT86H5Occ9MkE8AA81zTw+qkpNctmoN6N2V7WS8u+2pMsxcazpJxqaRipb2uorS19Unt3v5su38Do52qUjf5hkHGO5GT0PbPPsaz45QkgZWICkBWxye5U88bR+IHTvWxcSM9vmQlmYZPOQzAdO2NvJGOMd8cDnsc/dBwTnjJ7k9cdD1HPf0ropwio2str2VtNter301fquh51dOhVhUjpKT5pKT0iny7Nd7/jtc6F5TcKuGzsIDSc5fqVHbpzWRdxk/vGBBBwM9eAck49eMf4YqxaTYHljjgscnGSBxj346Y/Wq90zFWdWJBJDH+LtjHXPtyf1rekrSa0SaWum/X02vZf8BLFONSiqjV7K1ld20WuuvXfS26Mi4jjkDcbSMHA6t/ieRk47dOTnLaIq2Dxjoe/b34zjIx/hWoM5ZiDszgHuPp2z35P16iop41ILtySCQc9eTzj3Genvg16NJuNlq02r7va23le3p5JnyOMowrKVVQSmtbfDddG9Hrfa337mXIC3B6g+mM4z+Wc+hqs6gpuY4KnKjPbnP68cnn6YzO4PUZJJxknrgYwDjGO/qOvOMjPYkNtYfxYBPIIzyR7HqPr6iu1K6VmunZXv0Sd9PxPk8TUSclKLbldO7dm/d5dbWbV07d0tWTQshcKSAucn8Ow74/HB4GaS6hV90qMQvfgZYjgnnv04z16nimYwwCgYzkEdu/Tkk9c+nfPNX1iSeFlLYJXI5xnnnk9wT6nPPFXTbjNSV94ttP5bPTVX6ab3014KlFYmjUpSUOeKTSaavZp7pbaaWvb5XMZAryGJiBHtLbM4LYHBwM5yDx/OvMvGFowkWdfmQsFZmHJZewGOuGxn0zgc8enzoys6leVwA3OcdjnGCD3PqPYCuU8RWhnsZmCBiMSEbcAZ54Iz6ZH0xX0OW1PYYmlNNatcz1vZ8t9fPZ27fN/z14sZI804fx9G37ynSnONr6yh7yWzu7X6X13sePnYQ/UAY2gEjcfU46g46fqKQ8gLtww5A6A9ef1yD7dyaSTIY4BVQSR/eHcnGOfXPv1phzz8zZyME8j2wOMZz1/TvX3iblyrmT0jb5pave/m++l2f5yYyDp1qlOSfNCUk097wdmtk9Hb1v1Jo8gEldzBsEdeDxgnj269cEdatxqwGVyrElTjnGegB7Y9evt6VoyMZLEE4zwD04zjHXjg9ParqZIAD4ycEnpkfhk/5OBjFZ1NHZ6aa+nn5aXDD2lay/F+XbbR2foTZYbMZJACsT1PHI7ccc+g78nE8ZzwTxnoGOSDnnIyQM9Tjg44pqKduAyseuV9ecknI9Bx1B471YjQZVs/Nk7gQcHHXOepHcY7c5GK45OPv3T06PTV2tfffp28rn1mXQnU5XGP8q3TstNtd2t+qtbXUWNSDk4AAwQOhJIH49ck9ePeraEA/KPm4J4Bxjk4JOR9c5A9BTMId7fMGwCdowu7qT09OoOfb3EIVlYnjcF3Dnge30z2/LpXJKStbbRPe9trapdL307an01GmoP3Xrvvd300eluzsutvI07KMsihcbS/JA4J7LzjO7HPToMV1mnxkjCKRtcjaBkfN/e54Hrjv+R5y12AqrMSXIKKvQsc7SxH48nt36V2WlImY1aMFN2WAySNvUn1Ubs/5NeXi5OKl5JP+tui9V5n2WUUHKVLmd1dap3a+Cztfb89Oh2lmrCBC3yCMAbQOhGM7QTyx4yevY9cV09kkUkKhcszqSWwOB0JIPrn+fcc5EECMqzp8yyDc4JycjggDB5PX06dOa2LQjO5I3RQAUOCMA9VIwRzkYPfAwOOPmK8k03azW99EldWVmlt3PvcNDkkoylZWjyuzaeiu07WWi2fXstTZsoooLiJF3gNgk4IBAPB4HJ5IPUj0717Jpl5bwx22wDZlULMMHcenUZyfxJ44PIryTSZ0M4SUAEHKtgH3x7ex6ZIPSvV7OxhurSIr8sg+ZQozlgcg4HXvnvjGME158pL7W6s9XfTZaWs1tt5M9WiuWEW7SitEo/ZWnWW7eny2ud4unPc+Tc2chycFgo5OeoIBOQCD7dOO1ddpcE1ncIHYtkgAEkMd2SQRj9D1Az7HjfDOsyQObaeJg0G4KGUkkj+Ic9eM/17V6lor22tTs1wRBLEpKcYDBfujnjJxwB6HPQURdlGSezcntZJWuut21ZXT3O2nZ2ad1eLiou7VuVap36eS9LXOv09d1uu9mDPlDx8y5IB4749e/4GuzsYbZQludzSx4JyT82O6HBwR1B+nFYOmQW5JgwTIPm3dTxnjHTJGOc5z+Oen0+QJPIzKrGOMqd552nAITPJfjg9c9BWFSuuaTTstdrLR2tvdNavTTY2ldJJ2UlZuLdrLq5K/Z9WulvJq25uJyI5CkAJILk5Mi7sDbkfN+f6kUiRzDVUExKpAvyYJ+dSOCf7wIAwTx27VuabCm+WZ0ZoQxMS4weMnBGc5+mfXOeKq3M8EOsO7q3lNCvkKRwhGeGHUjOCPXua6cLKVRuOjja99N/K99Xr0sjjq1lTTadrys3Fy5Xs7ap2tstN/wAdHUjbMrTHDiCxd2IHzNhCSR6kEdOi89M4r8I/j5rv9tfEfxLN5wnkt7poA5IZWEbOAHbA5GcHuCcdDX7eeIriGx8P61qFwyhF0+eVt2ChYxttRFGDtb+LPI44zxX8/HjS6tb7xT4gvVOUl1O7cohOMtJwOpPGOxGea+14WgniqsuXWFNJS6czcdNtLWu0vlpt/Ln0hc0UcDl2D9rG1aq604tyU/dSSu7JWV2ld+aWqRxGxlcqckMQ0j5GMkk4zxkLjjpxjrjNFSuhEnDEptCsCc7cc5KY6j1J79PUr9LgtNdWrK710sj+Oqk7yb97W20kr+el9/XaxBoWsAsgAO8txtHAIx3yM/l9Pf2jRb6OVY1MilSVQnOApJ4VSQSGz97jgd8V8u2UrQzJjKng7gcBUxkg+2OOhOcV6XoWuxwMfmcx9hjGG9V5+pPfGM18XjsLJpyhrqtEk7SbSbX4W9H3PZeJ54paaLl6391pb92uvdbXPqjT7WCdWUqgkPQryRjG0FcAbX5ywPGOlM1Hw4jxzTeTubYSNn3OepBAGOowME59K5Xw54kjaOAeaFVhGsskhyQ2DknnqcjJP1OM8+oW+oRXAZZGjIPTy2+XOPlQAY3HH3zwOmT6/N1pVqM2/e92Wr1vpJW6X2Tut1b1MZKM7tJpNq9rSs7K+q0v927ueHT6M8DPmILGWLDj+L+6x7sP4QeOKwNVsS8blQDKOH2jk+uPf+97+hAr2vU7KOYu6A/MAC7qBGW5OBzyvT5uO+Qc5ri73SQTIGViSsjAxfKpwAeX5+fpgEc464OD6+CxjlyuTtL3Xqt1pezfddeny15qtFu72s1azs7Ozu31tpbfbz08FvLRoJQ5Djgna3IYADPGeTz+P44NMIro68AsCwO07AT0Jx/9bvwB07bW7BoiCFd5FZlO4/KMnoDjsDjPfnrXKGOaMuH4L8Mi/wDjuMgfex6gfyr6KLU1F33SfR6O2rVn5O3+Rkqfvct77X0WzSf5fJaLa1naNo5u9QhtUZpJJpFYKq8yAHIA9AMjA9T61+jPwS8ICO0ti0QjMPKqFB2SZGWZv4QoxxyGHoOT8gfC/wAPSXt9DemLDRyZiZjlAuRkk4+XA5IHXgcHFfpv8MtES1tbURxqZZAC7LlQiHB5U5JeTr17Vw4itCEZpNqV3Zu3921tFa9trfO9kfofBuXRdRV3G9muVS6tOHezXrdJrTzf1L4C0q2tvJEkaM7hcyP8oxxtHfh+cKSemQTivpzQ7QMDPIrKHQqm1Mjt+8DDHAGMHH8WAM4NeKeE7JJI45HiIaMKf3i4dSo+d0zwyjj06+vT6D8P2rtChH+oYKqgf6sKcls9cZxgDODzwa/Ps3ruUpO7utL7NL3ba999V5LqfveWUlFQVld2s0l7t7aN3totL32d+p2VrCmyFM4jJAaZcgo/Gfl7buPXoRyK9B0i6WKMW7ylY4UA8xzgMxzjHTPPbPfrXC26AlLfZIiuy7ZFb5YwRyWI7jj1681t2oEiiDcsoLhFYN8ykZwzryfm6AHnAwcYNfmOZxc976vvq/h130/4B91gpRtGF7SjFbL4dIq+miXfW103vv7N4XvFE8CO6ldx3EHAxwcEYJwTyOnTNfSnhqQ3Bglid0SZdiKRtXoAQPZuB2GeRXyv4XtXa8hAzPM7qpwSqL5fDAAhsLz0z3xz0r678Jaa88UEzLjoIkHDQFcA4HGCexB5x2r5WtG0mrNu3KldK7VtNd1rd6aW6I7JuEW+dq7Tk3v0Wi0W+/loeoaNaoWJZFBClgevA6jPbGRk479q9B0uEqVIiLjHy9gZG7qCcLkj1I6fWsLSIEt1XepcSAqSuMMxwCWBHTp3H0xxXd2lqAI3XcflG5R90Ed8Y6jg/St6GElyKbu3f4E07JaO6t69Lu+1zzqmJUXyqTjFv3b312tfsn0tq21e1zX094bdYyV8yb7xjdPl+XnB6gAfxAdfboOlhuFvJd7RRwjGQoQoCox932OOfyBwa5lLpYpo4VQBcqGbGWcfUnp15JOO/vu5Yx+ccHco2Mf4MjIzjvjOB36jFdalKEeWMoyjFp8q1alZap2toktm0r2Vjmc22m0+bS3M77pWsm2t+/b7pBapqDvF5iIqtg9AMDgFvr6cYxxnvlX0NhbGWNJo5ZADGzryAygcM3o2fTHGOetKxlti3kk5dCFkHQ7upLHgD1Bzjk9OBmSKkIJmZWO0lyT3Izuzzz79uw9ODE4qNnH2cZTas3N7bPZ297u9t++nrYSLbjepJppXUeVb8rl+KXTT8TE1CIFdyEkgZ2DkA+u7g5PHOBjnqATXLT2l9ezPGX8mBmz6M5wMDjnPPT9a6efXbZYCIoo3Zd2Djdkg4JB4IHbpye5rlLfUr6S5MjrkBvlRRx83RWI64OeMd++RjyXFxesklbZe9u1qvu3vddOp9Nh5OK5W4w0spNvm3i1Z6f5PS4y4SWx3RlX3pt8ohjjp04AyR05GAT7Vrade3kpRCzKF2lcDKkjJAzwDn+I9Dx65qWa1knDS3SASSAMi4ywC8nr6Z7g89utXrTypiI1hkRxjyzCp2grx8+B8p5AJOTn3zVxjaScdbWvdLdcrd1a9uq06b3udnO5pXldRVm5rVttWs+qdrL5W8uysrPVbtImeXyIdoYx/3tvr79+OPx6bscE1wBbyDK42b8AsADztUkZbHIGR9eOJdNjv0tLeNYmupFwCM5Pl45PfLKDypx27GuuktrS1tYXChbhkzjcCwbqSewA6ZP8AhX0+AwntnD32ouzlzKyVkvdu+9n0et3bv5dWu6Xu2UtP3fLq46p3k/l8upkDQNFsrVLu6nitwGUBpWCEls5kJ+YjaQOuOp6V5L4x1rSLd5LbSxDqczECVoxvBJJH+s5JK9hzkk9M1c8bQavqYmjgvAsKbQLdn2DJzuIAzz6fMef9o4qb4S+GvD0kt3L4ou7eGS1kZ4VuJFzJkjMgLH5iMDAI4z6V9FSwdKpNYam4Q6zqTn8UElf+6lu/16lwqrD03jK9WdeUEpKhSu93FxTS1ettH+Fj5n8Yxajfzl5LOSFmA8tFQqduDhmHG5jkHdxkc+9eN6np18jxb4GaRm3AhiJI8E7hwM7SMdfoCTzX6SePdO8Ba9HK2lajpkMtjGwd/NVJJtnG1VAGWY45zz2618ga1b2a3025GcRqFinKkB8EjAPccdhzznHSvncyyaGHc5QnGs5yv7rUrOy6dF0W19tnp9jk2drEUqcVh6lKcVGTpzUoyalyJbpbppPR9EfO9xBfiYfZJ5o5Y/nkDZUl+SYwP4gAOD6dQK1LLxhd28sdnNuWeMBWZ/vhT1dc8Hb7kZyOfT0vUNEivYCyoILjduAUcvj7uAB95+ep6gV5brvhu6tgpQNuXGXbh9391xg4x65PJFfF4nCuM5Jx1aautkrJtvqk76fpqfoGXYzDYiEac3Dnj9hpJq9rN99Xs7W02PafC/jmfT0SYf6RC+xim7/UsM5YjGScnkdAMjHNfQ+ga1Z+KbH7NIkBkkZnjZeWHAwFc9Vz2wOhGTnNfnXpup6hYuyiRgVYFEclY1Vc7lZiP4hgjKgnBPQV7/8AD7xtLbXdszyoCZB5ipyMfxbeCMpxgjGM98jHFWinScXdxWltFe1tb+j3TfW2oY7L4ypyq0opVErxlG8dVytWsrPu9L6He+PtAn0m/Ie2HlXP7v7Uygvkcgg4Gw9gRuBGffd8HftFeA4PEvhbXLUwCVmtnR1Rvk8zaxiIkKneYiG3cL82PQGv1G8Tq/ifR7ViUuJGhjdnQgNHj+Dj+NAfbhuQBivlb4l6V/Zsa2WqWwjtryJoxIuBvODseUAHMsZOc553cj187AV5U8QuWKTjKPLd3dk4tb3b6Kyt99zTJ8ZL3YTSc7cjtJxatyuTUX7zt1+7XU/kj8eR6r4K8Xarpl6ktuILx0jMjsqGFmwkxbavmBQCEGFySemOaknjmWYxrE6W/lOokYuBE+OAdvQ788kn5fQ5r6u/b++Hz6JrNz4ogj3lp1t7y4U4jmwWImibAB+U4YDbswOvSvyq1LxjZ2knkJcPJ5JU7ZHzKxx8pIIAZmOf3fXgc1+9ZDQjmeEp1VFOpGKUuSKfvJR312t6pednf5DizP55Jia8K9dUqVZ+0oxm+WWtuZJ6Jr3b73W/U9g8X+OrrTknlZyTIgQLnCZ5ILE4+V+SXwM49xj4j8e+O7nULqSCGZ9iyMzFX2xpk5bavOR03Nk54461p/EP4hXF/LNDFetcNJFGkmzgbQGBicA8Ov8AFg5XqM5FeByXTTSeYWcMxO9XO7AJ5VcjJx6jBx25Ar9WyDIlanXr0knGKcY8r952i1dduuluutt/4q8SvESri69fBYOvU92rNOXtU4OKaTtyytq1s/XojUivmLF5QWLYUNjIUnoRxk4xntnrRLcvK7En7wAC8AHg4O3jg9+R71mGRmPGNmV+YAAlieuPTs2OmQR0p0cxLMG6KM5zgZ4wVOD6njjgnkda+7p0IpXSUWtOW2nRWsr7fofz/VxFTEzcqk7pyvZp2u9bvd36afPQlkdgAVG0jOcYzk5wMDPb1z65PSs+WVmBU9DkcnOcc5PPAGDz1PYc1NLIXLYPGOcDAzjBJ5AIPQH+dU2Vhzjdkjq2c9STg4HPHrjHTnFddKKWvXe19Ve3pfbXTy1BNK8U3srq9k0nutV026WRXbBPY8bfbtwc9s9u+TxzUDEqSBwABt7YPbGTwOSR9atMBt2kYYHIOOvofw/xBz2pSE5bkjoOR3P3h9ee+cY9+fQpNaarRWd91tbbT08r3IbT0vZpKLS1ejW/k77/AOVxkUZuLiOL+/IoBHB5PT6E8HrjueK/VL9lvQW0jwvb3ghMck87ANu5UZGQowMbhjJ56exNfmv4JsBe67aRbGk/ed135YkYwOM45Pse/Y/rz8KbGPTtL0mzCbFECP5cZAALKMs/HCjGB6YxnANfmvHuKfsFQTdnKEnrdXdle2u/fz6XR+mcDYOMZvGSXNL2cVDrpzRTtfS6W97X+Z9f6HLNbaVJdSKNs6BFdT8gY5xhQMgk/exxnt0zQ1K6nFvKqsPMaN2WT+EE9F3+vHQrkHPrw1hNHpdrGE5lAdAp+8UxuAHc4weozyMZzWBrk7PbLFLcGMSLlljG5llPAGAR8xHUAnucgYr8kwFZ+1av8c1SW7vbld36LTS72t5/b8RV+TA1Erp1Kenm2o6P7977ehyG8fbGeSMMwbep24wnTceeXJPA747DOPUvD8xEsJjAG4LuJXBVD1U89W9fTHSvI4pJ/tSnduaMqPlYFpEjOHxwMn5gcZ+mcV614a3NJayHCxuNyo4+c5xliR90KccYOeBnjNenmHPKKVk07La91HltZPrZWe1t2tbn89Y9Nzm2knzN3V7aWatvft1/V/UHgiRDd2ZVcxwygs+NpEjAcoo6qxBJGMHHHXNfc3gW5kbyhv3qiqGUYVTtBA3dc5BO7jn5QBXwl4OeKN1DMw8vaFkwd0iDODIB3P8AB6gEmvs7wPeJ5MEcbAGQRxySAZLkD/WE55Cg4YDA5+8cmvgcxoSaqPpdrVaLv+O9110TZ8njUpNpXStd387ddn6u/lofWPhO58toiwUQYDMEG1lfJz0LZ/2hzu45zXuOjzZkRw/7sYZsrhi74wRz8vA5+U8j0GK+dvCtxGGiYEssa/vlA2xkjo3IJPoeOMduBXtGnXaK6OX2Rsikhh8renOeG+o4PTvX5vmGEftJNRcU2vvst/La99X13ueNKn1fNo9ovVrrZrW79V5WPfNGviGbdnedm0MOGVOu0d8ZGTwR6HOa9Z0q/jMabsJtHyBm5yPu4GCACegJ5Hoa+fdKvlCRM0pVCAWkPIcMfurzzjHXjBr0/TdQjkgTcFAZQE5PJ5Ckr6N9eSMYrgjTtdOLb11b7W0+75aeTElZO3M043tKzfu2Vm31trvpp5X940bVMKqs+UBbtjlsc4z044HQ464rvLW+81Vw+TuHygkFgfr2XjuevSvCtJ1DEcTMyb9xY5OMgYwpx6Yz1yR7Yx6JpepiQRsJACAS2WwSRxgemOOOcVVOpKmneTW6h1s/NNq99b3fcqKvy220bv1TtvprbV669r7npkV6yRFdwDKQcjIwActz1Pb1wcd6cL/cVcOSnylsk5PHTHYHsO/fpmuRTUB8zrIOSSxOCu09QfU+gNSfa9yEKzfMV+8PlJDcHOcge3fvmsK1adTSMmuVXkt07qNtLNaarS9zaNN2UYy1ildNX/lsne+iXRd1fc7C31SWO6CfN8wO0KR16nexxx07HPBFdNFqG6ItuLgg4GCBnrkZYDAzwcjP548qimZJwzyFnKjaFIJ4PIPTK9hnn3J69NbTg7FVs7l6HqPQkZGMH3wfcZrgjVk3KLbu7LTWybV01t5WevXfQ7sOnZ8zd7aatWad+lvSyVl8kd3HdZJYgEg9zg9uQMNjH1xjp153bG7ZXVy2Mk8cjg9vr17EYxjrXCW8nKAsCQRnn0I6npg11lk5LJyC3BIOMkDpntgDpn3+hVPBSc1NTaSfuprVax1u7Xlo0n00sehTUotaq7kr3S1WndX37v8AQ7/7SzRKAxAKhjwc578ccDjPv+dVy8gf58Ng8EkAMvbnv78g896zUuAnLEBMcgnjOMYBPUkjkZyMD0FRS6hFnqB8pBPXK+/Odx6dBjI5r3JV1TpKM7bJPm+T01d76Xa2b0PYwcFzxctG5a7W+zqkm03ZaLpsbaTsEYo21QDubBOOR2BGB6ep5yABXC+NtZktdG1Rwu8paOEi43S/K2AoOTgfeJ7E1vrqNtHAXLBQiHC53bz6lePTIznvXjPxK1jUJfD+qi12GZrKVkXbtCjaRuzyQpBBzjnrjOBXj4iaTcKdoxqW1S95XS0S72duib66n61wdlsa2Y4K6upVqV7b35o3fW/zer6H4+/GbW7LUNW12a6i/wBGjlma5LOfNiYFg3luDlfJOMAD+Lvk18I+G/EFhp/jO0vo5Xkjhvnit55GPmTuzfdmQfezggNnoRu6DPpn7Qfiy68KX3iS7kullW4mmWe0J3MuWYOfVpGzxwMEAivl74e6gvii8jNoxjfL3lsXAM9uzZyGBI8oE9c7icKBjv8AZZNl7WHqTqJunKlGLab5UrLR9m9b6LTS2tz/AFS8OcmjRyShKcf3KoU03pZp043vro0+qWvnqfQ/jzxBdeNY9Q0uKRoSbgQ21pakxsZyMGXCk5i5y+e+MkcV7H8HtPHwv8JXssl6jX7K7sspy8hl5BMmflVcHCgHGSOOteA+G9CvNL16x1HWZJI7WScRxuzYRlfPmSEYy5yFyTjH4kV2fjDxUn9jX+naY9xcXSkolwvzRxA4G0DoCAAEJJxzXo1KUaNKnRg06UnFtpJxVlH8X6323aP0d4KGI+r4Ggk8LUnCdWcUtlKPMnJ6Wa6b679Ty74g+MNb1nxHO2q3LpYyzMmxciJo8ncRyQWAb5fXnpXyN8bPHGnWVvpXhPTLmB7m51Atdsg5aElTGBg53Alt3PPHTFfW954currw6NSvCHCRhnEiElJD1385Rj3yTivif4k/D37X4ps9ZvbWS0NuEu4JGOLa5t48lyeRhyCPKyCW9ODXpZXyVaiVnFr3IxhG8ZNuCvJdF3a21R+v8P1sspJQbVOGGoygoUl7tScUop8t03Jbu3nrofml+1BYT2HiizuAN0d7axtLJGCkZYjIiIAOWHOT1zxjjFfONjeXCI0CtiGZcNn+HjGc5OB+Xfpk5+vv2tPGPhnWp9P0bS4mOo6XOFmuWGA8CjCRpjptOdwzx8or5AslLAFcZAOeOfrnvg88H+VfoFGDo0/ZzSm1CMr6JXajbRbNet9r2Wh9Ng516sqUv3kJSVuXRTdN6xbs9E4pXTd7dtDXsbNLmeKIozB/vOp4yCMYHUKD69e/WvTdN0oJFEQF2R4YhuqqOpcHJJPf+o5rz7TsrcqQSrqwAweWI78dQDjkj1GATXoTahLHBEUXbKRtcsMgkdSRnkgHgn1/hrhxE9ZJyeivy6b3Wj87dlfZO1j63L4UnCpZWtFKXPq1JSjv1unrr39Dqbe1R5JNkBKuAzsn3cKG/h6DgnnP4HFVo47aK88v5ngLEMWGAmMZJJJOV54P59KqaNqE9wwgVzGQrNsUZDsO2cjah9O3UVbubdhGZcCLnIKkcn2Bxluw/MmsoupOMZRW3KpJLqrXWn4vbXZXTPXpxUHGW6kklKLSXrru7p9vJu5keILe2EUwiz5DiTYTwZM42Mecggk5x6jtXj2oQPbSngq/XgggE8gZ5+XIHTJHTgZr0LV9RllmgTaqtDhcd3K92Q/x9N31HSuP1rc4aVhgv8yHuc4yv+8O+Oufy7adZqXJolGLctNZSaStK99ktLtWu3qjxsyjCunFTm50rTUk9Lp63tr21vf7xuiXEjXEcBDMWONq5y2ccdOhxyO+e9dSr3UF2NyFG5Uddo4O4FeBj3J5HauB0u4e3vrdgSDuBAPIzkc/zP4njnn2GLRNQ1S6sAieeL0jzGTllRsZ3Ac5YA49gfXjhxc2m7taR5rWWsrJO19b67L5HVllX22B56lSKlSb5uay+GzVmmndq6s9XtdXZX0/UbkSFUungWMHaxDeSzcck7jjJ4C4IOT0ANbhe9mjjnnBlBIAV2ywA9vbjaecdCOc1peMtCtPDOlLJGwXYFjVQNrSscbzKuSQ6nHOSTn168do/iSzEC28z7QxO5X+clj064wF7HOBu6YrLDQcnFtXTaaVr7pS1bSSbvqnbfW1tOuWIp1qTnSUJN2h8K5rJK7TvfVX3bt81fnfGsCz2iFIh5sbHzHBBKqOiqoHA9Tkgj0xXlcV00QMJY7SW7HcD6845I9QDjHPp7jfQx6gsiqch95AQj7p5Xd/vDOccg44ya8W1uwNhdyhAfvZVjwuO+OgwuRgk8D6ivoKVNKMV7zVlfZqO1rpbaaX0Xrex8RnkZUakcRSd2klU5dXy+6rJbPl/L5FKOeK3uIpEclo3Ric4JYHOQcHoP6e9fSdlrEXifw9BAEiaeGNRIy/I5KjGXGTvLHO08dDgE18rM/Usp5OFxznsPw9sdfQV3/hHXn0x3jebbCU3eU3CO2DyDyQwIXAwQwz0xg9dOapuUG3y1FySfVWs0+itsnp5bHkZNj4yrVMPNxiptTg78tpK3xXto02272TSex2mp+EZ2sZpli3OsjHIOW2nkszdkXnAwc5AGMCvOWgksTJbEDLtgEjJXHrydue/BzyPug59jstabUbSYRNtc8bRncWGcLxkDdkfUDrxxxNxp73168jJmRndXX+6F7H+72Oe5wPasJQ5Y3tom203dvlta7TaV9LWts++n0tfDe1hGrTmo1Y3XNK3vwaXM763ave/a1nscXEwikGW46g+vock545P0AHevpj4RnTLmF7a7dJHuYblCrruQoVGM5OAQf4hkgdj2+edZ0q4sXZmQ8YK8fwnocdCOOSTz6V0vgjxBdaLfQLCdpcGP2G/G4rzwTj0JGec5xWddNwpyjZ6x5k7tWdlJ2Vm2vzS7HLluInQr1sFUUoqpFuLknpL3WrN6Wb82e/6NLceHNc1NLa5NvFhmZlA2C3z84wCAxAI2g5PJ/H1Sx8QaDeafcOYo3lnKxLcN8yKTkGYx4GXJzj5hzXzV4i1i4M8Eo8yO6l+aVs4UA4IPl56kE5bcM/hXf+C9QhuYbS08uItKp3lsLlgcrtB+73IbBBB7gjDwc54aU6atZ3lGUrpRXu6RT66/8ABtY9Kll6xlOo63MnBLls0nsvefV2XVvS99mfot+x58afiF+yj8WNE+JPhC+vEsrhUmntLSZmspreQjdbXVsDtjimUEFmzj8q/vM/Ys/a58K/tXfC3RvFWklLDxJBaxw+JdBLgvZXyALI0RJBlic/MjBQBkjqQx/zmvCPjvV/Bd7/AGrq+nS3+hPHJZtZzxl4DDJtUy5CnDw7cxEd2YADJr9mf+Cf/wC2/efBnxnoF14ON3c6BrMq2d3p8+5JpZrh0VomthuMzqxwjZXPB4xz93wjj4YXHWdecFNKM6Mr+zqpuF5SjqnNe9ySj7z2vrc/h36UngPQ4wy3E5/lmFw9POcuoOpTx1N8kK1KjTlJ4fFRjG8m0l78m3F662Z/cySZRnG0qARkYB25GDyTkfpn3qhqsfm6VePuCBIJTuI28hWyD0HT3xg8nBxXDfDfxjc+MPBnhzxNeWslhca7pVtqL2cmQbfz0EnltkDDc5247cdKd8Qtdaw8LavHC+3zbOZQwUsFd1KgqQwJYMVyM+uDX7lRpKp7OdNpxm6cozaslCXLdu6vs1dd027M/wAm6OWYpZtSwK5VWpYyFGaXvRi4VYxqNO7TcbSvpra91c/jg/4Ko/2he/HbVrbS9elv7a+n+x6rZWVwwmtLSXd9oQoocIybFyOCM4GMivyrg/aBuPAGk6z8PZoLeTQbiNo2+1W4+2ofmAQKWznJY571+3f7dfwltvhV4kufiPqDRa9eeM5bi51OwZjcIUlJ8lgzfNFKSx2IquIyCATnNfzi/FWxutZ8c3lzq0MtpNJIfIsmjNuwiBJgyT94qucIQC+eSK/M+PsVLC1ZU1Lkqxai+VaqLikpcySTbu7q9urP9t/Ap4bGcLZHQUaeIwtHB4eFNtNTlXoxpxnJR0kmpLSdmn06W8o8cabHf3V3qtgwt7a4czRLEQihJCxOFHXnHyE8Z5J6V8064ZIp5WGX48v1VAM/MPc5+nHFfRuryWkLSWpcyR248sTHjcTyNw6MGIPTI+XPBznyXxNoTlBd20ayw3Hzr5JzsUYGWGMhcE5z78Hv+NKqqjmoPmtrLn33WiT331a1e5/UOKgquHjCkuR8qirt6r3dG0mubzev4Hk9szq6yNhSG7c56d+vpwBwc8dCNHVwstpE8fymTBkbORuA6DgcjBx09/SmSWz28rxurAo3A9z1ORwcdOoOc9OtKzNPEIiNxXlRzwo79vXkeoH1ExbUnKLi07N3d3pZrsuZdV5dzxlTnGlUw8uaMuTl95Nyu1F672ur7WXoziT+7lVCxyrHdzkkdQf0JFdFass00bJn0AUcs5GOPr39AOvU1k6hbrHc7o8gEbv9raQfvAfdz3B6YOeoFb3hqCO4u0ErFIh8wB43gDJIPJ4PUHPHQ9a0qytBTW6j21bbtZfJ6Wey8tPLyulVjjZYNuzlUTU3Zq0bWak302drtvu9B11BMAd4bh8NglivXgjpyDzz16+2Q42MBghQTgEEHrjJ75x/h2OfZZNIiFkskMXBkYSyu371jwBtGMLk8Agn14FeYeI7KW0uGyDlznrnBbnnpknPf8qyhNOai0rtc2tklfle97d9L6JNdWfQZjg3h6UqqlKo6STd027+7ezTd0ne3XrpoZErIFz98qCRjt9Pp3+vvSWchBLkNkHAz1B5weh4/HnnHvQjLFSGJJ3FQD0HqM85B6fXPXvrWQyAgTcxJIHAGMdc5GBjueBjFaySikrXTev+V2vvt2dr3PDw9V169OqrQ5UmrRu27xV29W2uiu+7RauHdlRuCxB8wk8jAwAFxxxjOc8HPWqiQtIdw4PLdAMAYxkdD6D8j0qV1J3bQdu4Ac5yT2+nf8PyliKg/NkcEYHUjjGDyD27ckHA4qVFNX0VtNdFbSyuvnf5d7npJKtiGqt7aWTa1SSV9VfdvS/e2lkUpFMYI2/MDx0HXqcjHUc5BHt1qqJyA0eB82Qfy7de316fnZu5QMnkZwvPPHOPrnrgcmsoZ8zb05yT164wTgHp+GfyFawUUk9G073Wq8r7NdP8u/j42q6OIUIOXK/daWyWn3N2e/yJAGXgc7sD6HoOo+nPtg1XnBwcqSO46jnucdP8TxjjOns2YOclhnqc4PXPoM8d+vpTHRWDAfXaeT+ePr7HI7deum9FtfRXdrrRdL63+Vl3vY4a+FdSLjzSjdN8t9Fton1dndqze+mpzrfe56L26Y9c8+/6ZrMmIaRuQADnnOeR6DtwMkDgcj0roLqNQSQM/wAicYOenpn2yeOax7hUILAkE8KB37AE8ccY/Wu2m729bJ21S0fW9tO21vI+KzOhKm3HmXuu7b1b2117K1l0+RTVmweSTnj2yeOfpkfXH4245HXazYAUh2GASQDycd+Mj6HjNUVDHjvkcgj8jxjk4I/Dv1cjuhIYZxxgkA7R1Jz29SOe/AGa6YxWmm1tknr66WfmeBGvyKLTbsrPXba19Lv5uz6NGhcshVigxvUAZ6F+Rkdfw59+c1zF0sggljPz7oyJFHKndnBz6D5ien61vSkshH38NgHOCgwMEHBAx/ePXA4rGuY2TufLwA5UngHqD7DA5PIPGK78LJJq9rpp+9q+no7rpbyZ8HxbQdfDVoOEpKcJJ2smnKCV9Hvdv722eH6lA8NzMCACJDgDJUA85yeoPQZGemOM1QLoc8chee2WHfJ/HJ6e3r1PiSNhdSkrkseGbOD7r079D3B9K5ADkg7ge56fUDrnPf2r7/CTVShCb35VrfayirXtqrX3avZ7o/zB42wMst4izHD8slF4mq4ppXtKV03p+NlbddyzCDxkjJbBYc4B646cg+3p6caa5G1QBkAZY9G9vbPf17g8VnRYB24yQMnb0z26Z7kZ/njFXxk4wScsMjOfwAPORz3z6Z61vO1rrsl569769/nr0Pn8JzaPq7JXvZba27bK+j/N2IAwIO7HJzjuSc9cd888Zzx7jQw6gNt3HGW9FB6nHYnr7VVhyBjbyMHdkZH1GBxgDnpxxx0tggqAwPORnOcN2+oyD+FebWl78rLRWT9dHfT1d93r62+3yWKVH3naTeutkvh6t3b1e1lr80sTHc+//V4JIGMntj3+vsPwuRtmPAQFS25JNvIPPQ84xyD1HbvVILt4UEyAA7cE5Hfjv7Z7DPWrS7xGGLEEAHAOADkZHtkdufqK5Zb6bPVbvXZ6PzXfU+lwyS03d07t3uklrfbZJ+b19NK2TBiBwWY4JByFyevsOfm/PI7d3o8ciSbgxkAwCDjO0nk49Onr6+1cFaMSy5UFByGDHgcYUHHX3x+Hau40WYFkVQyuqsTzkFcjjvyfx65PavMxikqd73TWvXt1vbR/po9D6/JXBzTtLmVlG7bacrapbPR9f019P0mWNY26ErnbH6E9RjPQjt/U12losU0bO8KglMqvTcVA+bHcHP0Pp1Neb2OVVdg2ynJCsMYzj+LJOT/9bHUV31gXKxq5JYcYHpxwTnBHrgc8+tfJ4pWbaad2kl10Su1ra2vddT9GwkpOKTSdldKy2bVn0SStfp0a2L9g7ebgxrtLfK2ACrA/dyMDPPT0yO3HpWkXE7hVVCGTnKgdDgY/+t9Ca8/t7dnuUiQgLKQwUnG1uzLgnHXp7c967rSbd7e4DmcyYBGFwMkDkHHsfu4579K8+ST99S5dEpJK6but9dL2svkdakkrS2W6Tiu1raXtfRux3llG7Zm8sGQ438YyBn9fUdTmu90xLlYRJCCJAQx7dxzjrj1Hp2zXD6dcLJLGkchDliSo79M5Hf3B4HP0HommyC4ZIgoSQnYwX5Q4GBnA6McZz39u43dNqLV1qlvp07PTp63WzKjWSa0cd3ZN+9rHVWStbu9ba6I9E0Gdn8oyKfN25LA42885OMdjx7fWuoQwvcxyK5XYckDOHcZyD9eOOfbmuO0+CayV1bkEYJYc89APU444+vOa6DRrxnvpLcqGypCl1yBnOMc+pHOPoDXm1JPmat2tZ26LV30V9vOxpUqyUeZNtStzbXSfKrp2b7Lur69z0ezvEubaKGJPLdCXmx/ED6jGAODjPTnNVbq2F1LLJtUyW0Xyqw4G3gluuduTtAHfJx1q3pNqqWs7tguyvjLckr3Bxx17ZHFYWoXkljIMvkzRlFDHadz8DecHOedvbg8816uX63s1dWTV3reyfnaL0W1ur0R4OOxduZKXLGyklfR2sl239UtPQ8t+MusRaB8KPGlxNLGtzPYSrZtIu8eYwKmFBuGMj+XpX4Lz7nkubjaA8tzNNlu5djnJ7j0HAI5zxiv1o/bO8UNpPw6g0RJY49Q1aWNWHm8GElidqYG1yCfmz7YwBn8j5XnChS4ZlYF1BGFUHnqMYPv19BX6lwrhnGjVqW1nOKinqrLlfM1rZO+r6baaH8b+OOaRxmZ4fDpxl9Vw15Rdl78p33vdq1m3otvUVdoYFgS5yzDnbtboTjvwTgDjI6HqVKzbVRmX94wXBxyuB09BjpyeeCOlFfcRWisrpaXSettLt66vr/Tf85TqJv3km/OyaXRddtvw6HlMrGNtyZAOBluADx8vynk++eOT82au2t7NFyNpJYAjttA5wCQATwPfoB1NZjsScMflAHy8nrnkdfbnPU9fRqO6/ISFUZOQec5BUHtzz/8AqrmrYaEtYqLkmrK6SautE9NtNjelUlKSd7qNlvZWVtG110u0tr+p6zoOuvC6JlfLYrJtIwVVfvBSSSBzxnPr7H27RfEMUqxkyEKEyrsRvIf72TkbTkc9e/Tt8jQ30kMgKzbgQAQBt5P1AyMA88cjkdq7vR/EsibY/N+dTnaTlgq/e2pwGAGMknPXAGOfmcwyxtSqRjZ3d2mr6WdmrXte7bXWzu+nq0JxUeVa3d5WXV2t6PXXTqt7n1tZXiXhQFg8aOoKggLlQcgDttz09ccmtKexheJnUlEfhWf5pGQ5+cr2PqAew6V4voOvQzCNywQspyu4gNvxl2Xkgrj5ck9etep2epmaKJEkjdkCxFGwMRqOgzkEkNkP15GBXzEqc6dRq8oxTVua8dlF6a21a3ffyTOx04OMVFp31s9L3s+rSstb+XQ5LX9FSYyqU3jZlGQBVA5yCwzkgHIGBnGM9680n0kxzLGoZuQivIuHYngiQ9m4+Xr05I4x9BXix3EZQx7Ff512khQRjhlwMH1PI4AINebX1tG2q20QIaRpVwF+XEakbjwCGYZHB6c49K9jBYmXLyNvpo3daNaprbrfXS3o1lHDKU4pbNx11bs3FvW17X9Xpvsj2n4VaGkNvZMUWaVnRpWVdg2n7oI5BLD73Izj61+gHgOzIKOwbMCqoL4AfIG8p2GzjnkHPGMCvk34fwwxJbllG1Y40AIGXcgfewRhe3Tqfy+1/A0dutvbN5SlOYiJB8xZwNhz33Yx7bfWufHVGoSberlfRatLl9299b9fv1P1zhrDqnGEWkrQg7W7cqbdu7Tvvrc+ivC0bQpGJPmIwrKRtKx5ATaASSX5JwRyvPNfReiKslusOMkxsR5nQsoGwqMZ4Gccjrxnt4b4OgLiOTkHJSOWUZWNU2/KOmWGQE/HFe4aW00TbgF25DEDAcqmQFAGdzPu57YHevgswbqNt6KLv83ZbrVfK9307frWXRVm9FZKLau76J/Lpt6bHRW6tvSPcRMkbSIxHzMB2K5O/HAPTAx6c6dk032mMMsfnOdyoowD2BK46qMnLY6dqgs3Yyl2Z9qrtX5fus/8IHByO/TIPsQOn0uwaS+iZiPMIUJIy5Rx/cPGFx3x689OPkcdFye3zV7K1mr6bfc1c+jw85wTatfSyWkna2jd1du3RvXrueseAdNlFxbhz50xYSMwGxcZJK7ju2nA5x14719keE7ZYIYpZEzIOWhY4jCdmYgctz1689MnI+fvBNokCW7yAAuoDFR96QkYXI6DjIB4656Yr6R0OeGKNcRjzv7jcoif3m9cDnP/ANYV8zKknO8tLN+89eqtfddNdjPE15zd23TvypKTvreKu9W979dHvskei2Bdm3ERiHoVXG0qepGO4wME8+/Wt6HUcsIbbbh3K55wdwwSTjAxjpkd+eDXI2V486PBCQq4bleSD1HHHHtnAOc8HFdbo9vHFGksmMod8ozgsT95gCTkjHPYdgac6nLaFP4m1eaaUkny6bJb73e27TZyuTScp2lLTlbu10torvTS17abJNGzFZ3MkiTMy4BGccKcAkhscE885Ix1561tvdxxqsbMzlRggfdK8EKpHducnt27VnR3yyoY02xqCQXwDgHGD29B+WfariERLvYq/wAu1WcAMzNnDfj2J7Z9qVSEKUXKDvdRTcna17aaO19brr28qhOUppSTutVGKcW1pZt67eb0My4v57kiKF1t4RkOpAw4IAGGJ/iC9R9PTONNbXc8UsbOTGy+WGPI+bjAGcknjHOMZzmrl7MkbLuRZHcgbB0CjOCcZwQDgDGff0uWyvNHvlYpGMBSQVC7QcAHnPBH445Oa8X2ca05ylNvlty7Wt7u/fTdfOx9Dh6jpxVktUrNq8o/C3Z2bXRfnqkcx/ZttY2zNIDIyg+Y2NzYJzmPng+4z09xXFf2tcxXrR2enPIyygpIE4GBgM/UBVPXrkk+gNeurbQ3UjRcSK+CSeikdR04Pr6Z9uZZIdN0+M4toZrgD91Eqhg7L0ZyBnv6c49sAjh3N+7aKjZNu+u3bfRem990etRrWlCMkm3aV3a+vLvfVetla3mksHTIb6ZEkukd5GXBZgDtDcEAdtoz3GPTvXp+h6bY6ba+bIsa78Z3AYOeSSCTkjoSPx9Bx4upIbUvPLFCuN4jyqkHqqAcEjAyenGMViXWrz31uoivTEqHART0GcZxn7ox047fj10MPThJTSdR8ydqjVtEleyTve17Nd7Gk67qWs+WMZcqim9bNa3S0dra9T1u51qxso5pLe6hjkVOAGwwYDoq+rcY55xjtx4br3xDlFyQGuFSGQgyJkgnPRlzgdvXPv3gMcF5JLHNfknI6MckgfxL6DAIweecdazLu20yyR5QgnAxvRhkSKOpTqc9wTt68ivV/eR5Up+zTtdR930203Wumvn1rD+z50prmcmn015rNbqySdrW82znNX8c6im6RfM/ePuLtk4j5znOCAQecnFc2b8a9JFi8kh3kZ2SMm1geSSD2JORz07iuguJNO1G3nDQPAkykICpEjFc4BPHDE4PsPoK5TSNMS2mZVjX7OZnbfvIdZONyrxwPu9ODzwBQqlWTScnJWUW920+Xe29ldW+/qj3cPGlGlJ8sYTi9HZe9rHvu1ZWu9/Sz6+28GSxXXmW9210rBWIMrHcDklipJyOmMY6dCK7mfwlDJZQJLbxGVlypVwGCgD5jxwMHnOee/eq3hldPjMj3E1wrbOCCSWA6jPAJbofQe9bb30cVwLmB3lh3bVSXJVR0KnP4Y4/lXrwoUnQa968orfVuXu21/SzS07HBWxmMdeMYOKdJpqolycyVmldNJ621tuutjz5NOt9H1CZbmAXcBV9kbJn5zjEinPDj+Hrjj1rkNd0oatO0sduIwCdgcYIGO5xnK4xkDjv149zmjg1Zo5DAgZAQqKu0cH09ye/A5z1NU7nR1giAmtlfzG3F152dc/N1Hvx79RmvnqmBc3NSVk3qna6StZNa2ulvptqtD18HmvJOE5Jqs7Qk27Xs1sut7P/AICevyF4n8KkwTC2gaKVgDNJ5e+KSQfdAPGD15A4zyMAV5Xo+r6x4U1uGG5i3xGRpYnRf3jLgAAE/wABySwAzwCOhr74vPDyXELfY4C9swyFlAIMgzkknk8fdz6mvD/FngVNTlnVoFt5IxgSxJtKbMEBWwNoYHJ9cYHSvm8Xl86fO4wcoSbd9W1e1tFbbZrp3P0DK+IaFSm8PXikrLWduZaR1ad29drPTdK1r9r4N8ax/wBmwNdXRhjMRkxMSNpc8I64yzDGM8cnuM15f8ZvENrf28bxTpKYWDs5wyop7qD96QAHAJA5rgfEMWu+GLUgXbzKkcaLKqGYnG7KzAEfdGASf8cePax4nt3E0M920WGy8U7ZHmuPmmViBiNsfdwdnTPNeZRwUYVHOSlzbJdk+W7Wl2/l1+7uoUaftY4qlNOHMpe4uZWk4325WtNfW6te58U/t5+E9L8Q/BXxDqUZyRYtdIzJ/rZI1YsZMHMbrxsGfX2r+S/XfEcn2q6heco9tPNCm353fa5XBcYJKdB6e+OP7BPjPFF4q+GPjPRFmhuIJbO9WFX+YRyCNi0YUElo5OCzccgEY5r+OXx1pVvpfjTxFpsIYR22q3axryCqCYjDA8rt6AZPBHrX9BeFcaU6VWjUu5RSlrZpXce99b9GrPutj+ZvpL43GYTD5RiKFS1Kp7WlKSbjNK3Mtt9HbXbX5YBmacu5ZmdmYtkkLzjJ9gQBxnPuKYwAIxgkAEAH1zyRxt78Eng98igKF+6eoDYBxtJz1A+72459Tmn4wTuAySAedw2j+6fT+f4V++0oQhFcqSSsrJ6WXLp0tvbbzWj0/hzEzlVm5Td5S1bbbvs356tvXRNb32GqufmAxyMknPI+U8Z9OmOORgdqcQQCP+BAgjGR3PsfyJ7HJpwXqVB2jp2APvnHTPf8e9ABBJPJOcjk8nuR/d9cd/fNadfXW33beX+Zz2S0u1fbVPolby2e92QqeAScqc4wST34XPcc5I647cmkYHawGNuM9fTnqegPtnpxzipiq9O/X5Og57f1HvjIB5Y4OOhz07gc9z7fh+HWrjJa3Vtdr6W0s7dWra7PrqJ3vryqy0t8rtvS9ra9NdGUJQV4HGec56A+p64wM89MnJqofmcjPJONuOP54/kc+9aMqgjPPGAe3Tt2wCOn8R5qmUye4G7PuSP1PTj2Gfrqprld3ZpJ3a6KzsmrfLTT5oujT560Xe15KLVnZ6rV30XVK3Td2TPVPhJYmfxTp2GwBKWfou3GDkEgkE5yew9O4/Vf4YRTXD2cTIyBmEZYEj92mMoWOdsfPJ5J6ivzN+BNmZ9cExBWJZmiJLlShAyuwYP3u+Dx1xjgfq18FrVri5ZdwJWNmG4bsbOy+uQe4GT+VfkvGuJTlV5tXCEWnvrzaW9NLL0+f7RwvTUcFRajG858qsrrRxcr3XVr536s9w1FZkRI4mKJCVUgtny8g/InTC8ZDdW/u1w17K8gmW4T915nyFucIMknI6EkjHTPGBxz2+qyQ7rnyZQD5bMwb513Y/iBIA2eme/fgV5oLgyWtw/nM5dCy7huUspOAw7sOcAHjvnivzzLKU7pqN7TUk2ndNuN93bp+Wh08RtOg4+fMndW00elm/V+VrFGOaKJ4o0VlaNt0r7ifvHCKVwdu/B45+71659T8NXGZ7eRz+5LBJQq5b2EfIH8j3POa8djuIFuN4xI25TlxhgRneCefkXjYccEkYHQ+keHJFFxEIo3VGcOFL8ySOQQqnkqoI4655zX02Mwz9im/i5Vurtt2bs7aO99dN/I/GcfST55Lp5XS7tJ/fv39T6+8IOGNpgF2jVVGV3LKBncWOQDJyCW6cDtmvqbwjMYZI43PkEqJY2GdwD4IIPYPt9CQAfWvkPwjdSp5au3lsELJu6gkLhF5xuYAgc4ODzjFfT/AIV1DAts4DsyAOTgumMYAxkEAAKDgHJGOK+EzCk1Gb0um73dm07aqyfVvS/TofCY695JtbJcyXXTTR26efT5fYvhe6zIkzSu6PHEhGdpMmCGbGDxnHUDOCT0FewWt6nkRK2VkWTy13MCrMMYVW6blydx4x6Y5r5t8KXKrDanlZJGLPGzZ88kjZHEf4OMkHByT7CvaLK9yB5qCJFk8woG3sjDBCBscMcnccc8V+f42kpTkrPRu3z6a2Ttrfd7XR5N5e6uidn5rRaprRartd/JHten37CBGYFPLZUZc/KSegX1wcjPGOenFel6fqeyGNnYh1USBg2FQcHHPYjvz16YrwuyvleKFgpAx++PXLEgBhkgkjby2BknOK9Dsbs+Spfc0YAKjJ3bW6FjggKcZA5x15PI+crR5G7dN/u6aWXfrfXRFpbb2s27rXWy1726X7tbHudnqT+TGI2GZAjE8cE8llGeFIxjr0JHXnttN1VgDySu1QMndg8gkL3yccj34rxLTtRAi2K2QAuE6uGIzjno3Hy9QMHjpXZ6fduiIQQZM54BUEDPynqC/PH459K55RlKzs1/l53ejs1trre+rRtGMtNN1ppskrPdaO8k+176o9utNTRl27lCEEkHGCTjacZGAvcHqT7GtVLlNsaM+wFhggck98HIJB5x+OD0I8o069DhOindwuMkYxljzyR1I7juQa6yG/DurO2/bjnGQxXPbIIOOh6jB9gOVxXNaS8k9k0tdXrbV9NbLa+28IJapK91G9r8z0966V/V62bR2AvQZAxC5TaAV6EA4AI3Y3Hscdfvd8dPaXe5yR95VXknhh6Y49cHPXg4HFeeR3Slhlsg/dweh6498Hr2BPHUY6OxuQq7cjcSTlickgDkHrjGCMcHqaqFLkbk7b31S02tZvV+e+nkzspR1s4813ttddn0+9237o9It5h5gbOMgFu5J6k5JwVPHHGeMmulsr7MgQNggdU645A4yeO3U56k84HlcF8QCyyHcpPmoScBQeWAycHj1PHT26Gwu23+ZG2FOBG49W6lcnrxyeOMfjXtZQlHZJ2umk9dNHbVXXlrqzsglJrTlXNZaN2tbW/be1++h6JLeYQjfknkZJOWOcn26ZPpx6iqNxeiKLfICCcHcT97dn5c9hn9P+A1z8V4VZy7sUfOSPmIJ6gDA9OR7j8aV7eSSwtGrMpX7pzu+Ude4I4I/X0rKu41oyulGT0jJtqN1bS1uj1X5nq4OmnVS3V1qnq07Xun+iVjN1TxBeRFoULwxBiWdHwVUcjdxwrHqcY9O1eceKfGk40XWFjKtcNZvEBLEWQBgVUoSV9znHQiuk1Z7dg5mAK+Wu4K3LOM8ZAxjjO3GfyFfOvxD8SrY2l7DD5fnouWG4BigJKqhwRn1XBzjvg189QWJhiVG9SfLPSSu0k+WO13ZW+d9den794e4F4nH4RU4ylyVKcpWV2rOD2Vtr77dnofkF+054RvrjVtRuZbCe4nvpXkUoQUeOZizE9AkigAxcEj5vYj5N+G+s6ToHiGOya5jgkhkaOZpD+9GeiyJ1YEqQeQOvHr+gP7QviO11nR9QUwwwX0BiltL6IeVLdzRh9ylskKFDADgjn8T+SWlahP/wALKnlkjWQFp2jEhCjzFIwZHHAAPA+Ukg5r9byiq5YeEJytFwjHXTmfurbbXpora67o/wBSPDWnWxGTQoVr04UqEVa7TnGMUk2m3Fu3pbrsfd3iPxHqEthJc24E4RhdW6tzFa24BDcDHJGCF65HXisDwd4ptdSuLWxu5JGkMjyEEjBlJBjaVSP9WuWGzOMHrxmuE1fxdc2fhoRalYSWpvE85ZV4aaIcLDEwxiKTJAGAG46YrwXVPFV/bzw3uku1jdLlbQSDYCGPEs5JAIbBwSR04wMV69XDe0ShTgrX93l3SXKrvySb6aed9f1/JsnWKpujDkpp35Kqabi1bV29dbX1R9ffEvx2NKgutJs54CqRiS9jiKqsKgFjJGMkHPAA9j618AfHH4tX9x4avJrAxHESgzbv3sSxh87YyPlU5GTv5Pp1rB+InjrV4N17cXk9zcTFIZtzEIwO4EiLOc9lOSCC3pXketW2pa54WfzwJ4r9JEuWVcNFG5BRAp+9gZ3MG6nGMnNd2EpLAulUq8iTnFpaKXKmr9E/w1/E/Rsp4bwWDwlKpVlTnVTUoVJac87KUrtXu3JLX89D86/Eeual4k1u71HU3/eyythMEBArMBkZ5Y4BZgMMwz2AEFnIYVG7OCdu4cfN2OT949c9O9dd498Jy+F9YWBnSSG5iS4t2jGNsTlsqw6qVPABzg59eeODCAoQQejDqR1zgjOCR9enc8EfXwqwq006ck4ySatZaaW8+19PK1ro7sPCrh5uq3+8TdpO9rTtZJtJJdIpbJ20bubtg7C5V2O11PyE/dPIwD6Z4znJNejx20txaW8roZHKgMqfeQ9yQOWU5Bzx6dBXnOnmNgRIeXw+SckHg8Z+g4/Qk16foZZo413ZA5Cg84x93dj5s+nB9Oea86ooqq3V5nzwsmuusbP7lt267X+qwEXKnP2js6ijK0bpy+C6dt7rfp9xr6TbG3cHbyVJYjG5R1Kgg8sc9AMjBOaTWp5II2IDIw5RXOfl5BOM5ye/65zT5rt7MmVU8t1dgpblM45bB459exxyeo5HWdV82IruZpGHzv1GD/CD39hjiqpw0nGMpKW6cpK2jWumvTs720629W/JFSg1y04Wcbq62svk2tU+mpzs8kjXLSPuUZJHr25Izk7vXqRng4G7F1G5llODy68bs4XYM8AY5PYnIxVwyBFBJdycFsgfKnsfzweh5x05x7onJZPmjLnGRnAbuSOTyOo/pzvGHvaNKWzut2rej17vv0Z89iK/NCUl8c3JSb912bTd9r6K63tfoZyzukyuuQVPJHylTwRweTz17c4PpX2P8F721vzYxMgmuYoD88oBD3HGwAnHyRcgAj+Lv0Pxmcs52jJLZ5+v9M4GB2GPWvr34ZRW+neGdP1NwkVxJIzRyA7ZMH+Jx2Q9FIOWwcgAc5VqLqVIqPKnGOql7qa0vult0v28ziw9S2CxlGUpXrOKpOKd/aT0SS03VnK3r2RhfHuS5h1BI4siN8eYoAVEcg8BezE/Qjr7n5mjvJY25JGCRwPw5xwQehzjvycV9HfEuYazZz3CzPdSiUs8mdxIQ4QgbeNuWBH06d/l66d0ZxkqQzDB6gjggZ79cZ5HtzW+CjGdRx9zs2l1XLs+r+fTRJMwxuIr4PDYed505UlySUXZNxjHV3ejet7u6R3kGvRRxxb3OQu7KnjnjAXqGwORnAz1rmvE1/FO6GFjKWVSzPwQAc7cHPQ8kZwB65xXPJdsQVYkYyQynJG30/L/AB71VmbzzksScYBbngDg+mT9OePQV7caLi3e7Ttrq76Ky6aX8l6M+ZzHO3i6EoQgnOSj7z+KLXLdpXs166O+txqHzmAIzzgg9j0PHXr34/LpqCPy124BbGVxyR09Pc4PGOfqKrWUCly+7hTzngn3B78/z/LUdULjbjjjcSAQRwcZyO34cnrmlKO6srWV1rdPSy2utfTt6YYPDzeG9vOyqylFp31aTjdLl10tf73seieCb+ygtNSa7kRJVgd9rHG+MDG2IH+MHBU57/UjlI9cmbUZZo5cO0jOgJJBGcAHHGfX6DjHBxnuJLdHVRksNrDqMHI7evfk5xzVTTx506qcKWb7xHAIyME9gOPc57Vzz5oxd/hW1tXq1LXvrZbK+13serXzirSjg6FNR9rC7q8ybcrqEeWTei0vfa762PR9Q1NLyxUyoTMVAJDZ2beQBkfcOeOe2Oa41LhkuFkiYIykEY/hI59eB689OMjtvJaYt3iEgBOCEP8AGRu5DZORjlcD865eVGilYfd+ZiecrjPGM9QemadCClCV3qnaPN/LZO709Ve7e2p11cZOrKlXlT5ZR5W5LRtNR0Svpa93r1+a797+6vzZSzOJJTGMsxxkJjj3JyB275rd03VbvTpxdmSSI27Axs33Acjd5YHA4Az1HfHFedWF624L97bjPODgHlVJGADwWx14rtGmU2Bfy4xFIuZNp3MrjgqoPc54weuc9sOtecXHZpK0n1as7X7J2ta+2p7uHzCMYc9PmleKSV7ppJJ6O93unfr6n2JpHx20lfAV14Vu9Bsb/UdQjVI7ryhJPE/8EwY4IJySEzjPLN0r0X9kz4+aT8Lfjd4Q8QeMbSObw/od+s7WlzAJIJArxsgddwDEYzggbTxls8fnRZavJo94HQw3VuQrqjtlo2GcSK2TtYZ+7yOR2rXsdQ1PUb+5ujepHIxMojjAVpwpyEViRkrwAgAzySfS1j8VSqUK/PKNXD8jglBL4HFRs1ZyutXdNrW70PBx2T4HOMDmGXV4NUc0p1qGIjzycoxr01CclG9otJvVJW6a6H+kZ8Gv+CpfwM+IHw5h1m31Sx0h9D0+C1l06VVije6WMJBDaLuBMUmMDC/KRya9zt/j9pPxP8KtrMLiws5bZ5lt5blGVbgKfs8kU3yh4ZNpJJUeXwCDwR/n5fAX4kT3fhqXTn1mbSdS0yK6lsoxIzRahOgQrFKN6hTxlBh8HOPf9KP2f/2uPGVh4M1Hwle+Mr21sVjka4t7yctd7AG3w28rMCS/yKsIADA4Egwa/VeG/ESvWnTp4/2Tg6bp8sYxheScWtLW0097du7ufw9xN9DrI8uxOIxfD+Jq0q1PHqrJYhSqctOrUjUUqUre+m21eTZ9u/tA+Nde+J3xi1fR7yWLVrLw/dm5s7aNfOhjsbRmaWTcMoZsFdqY5OT7V+N37c2peC7vxBb63oMT2uvGeOPUhJw1zOflkAiAUARheo4AP3cV+knwq/an+Evg3wx4tv8AXrG0u/FmoPdRTTXgE000LArbhFbBSXlzvDMF7g5r8L/2j/iBF45+Kmr6pbEf2b508iICPs1tLIxZxGAcPNjbtbHGCMHNfK+IuZ0atCjKjiVXqYqspThG3NRpxadtHpbTdu7S76f0z4Y8N4jL8csI8txOAweR4Khh6OI/h0cZVUaaqTUFupcvNdq0npueZ6xPbTQS3tuhIRFW5hY48vspU45k5Ylu3GB2rC0+5t0+0RXUcjWM8ZEUUZ+5K2SBz0BPVRkdeMc0q3tlb2ZicKFlDNdTB95cNyoAAGH4YEAnb3ODmoraKO72R2q7EkZJcFyzMwzkt0IifPTPGOp7fmGDm1OXvJSUbpSur2s+VdXdJu1r2d3oz+gVBWSabV7LayVle9tnZaPVWVr2OK13RZHW5miQIjFnTcucgZwo9CR15rzBna2lZd20jI2+jZxk9hnj+Xbn6hl08PYXAuceexJQL/q2QAjaWx8pbsuOcdR/F82eJI4rfU7xEXADDYDjB4PzHnH0/Ptz6NPmWrUrVHqmo9bapb2st2lsefjnT5ZTp8qnT3k76q66rstLfjcwLx0l3M5zIuT1+9gYHboR2yfw6Hc8LbzexIYw4YMFQeoxyexU8ZGeR9K57Hm5ypyORxleOcEnqMdPoMkDBrv/AAhp08ga5jjyI2JZs9FXHy+mTnjJBzn6i8ROMIX2acbc1kk32u7WVvnqkedllJ1MfSrJK0Wm5RjdyaabUvN/K35eitcW9tp0KFeB84XopOcfMucgsenPPP4+W+LZoriZZE3kuPnY9C2MAA5OSB1Ptjoc13t9P5gSFxtEQKhQMKV9ccZPfPQYPrx59rcMbM21SoRmCHAxnuOpPTv1P4VjTqxqOCUbS5o3aSvvq3bRLfWzfkz6LMuSrQqyXvTcbeV7q6SvfTu779NzjQFIKp9T0Ge/Jx16ccZzWlp8LqhYsA3J9cjpjPp2+v1qrsRDgEjJ568joM8jGPQ/ia1LSRkRk27lZSMe44B9Rjrn3PSuycG7JNpO0rt3+SS6b2/E+XwdBe2Up7wVnGEnZareN7rS13dX13VmybywgGSrLw2OmR09ORnHTv15wa0bRg5Lcj8Cfqceuc8c45NMuH2hlLE55654I3c+vHbHHXiqZbIG0EE4yScZHfrngnuD/UUJXjby1tpqrb72fTXzNa+LUaycaUOaNrv7tv5r+nfoPufKkZ2bcVH3euDj8uOT6HpWeAwxyBk8DPXnHPTHfP61cxu+XPPXA/H+X09+OKXywSuQowc8jk+3484459eKcXbZp27WuvTS731fdX9PJxMalWaqKOrdpcujd+XX0S0e7V3Z6j4IS4XbhnJOR257A9h1zjpnjpws8bRsQqkHkYJHyg9Of054574q5bRozgsMFQQF5JPQFh0Gc57+nQ1JcxqMAEtnIMnG4tnpIMHkHA9D3FVGo4tO6s2vVWS6bXez7Wuzqlh39XUo6vWN03dNJO73eva7176HNTRnaWIHfLAcDPTJ9yMDntkdDWBdDGeOnGPxOemOOhx6e2TXVTrwykkgj7p6Djg9MZx9eR75rnL2NQ7EElcfMckE43E9ug4wO1ehh6nvWvo+unle/Z+e70vY+Jz3DTVF1FG/R6e9ZtXa0201126bIxyeckkd8AYX059/THX8stDgcng59CTgduw9evfvg4qVgrFsdMdCep559+vAwOaqk84JyM/XJ9cf15x17V6Ub2S1SW3fdLR+jf36nwFe9OzvZN6206Ja2Wiu1tp3LodAp+YliMbtpAxxyT6jkHjpVSYKnKkEEcgg4JOM9eOBn6DpT1kAGQFYhcdcdeMg+oxnODg+tQjcD82OhIJ55znOR1xkenPfHFdFNu7XXza7adPW/Te2x4mcRjOgk0pPdRXnZLXXvtt+J534ttmclmyCrDHoo78YGTyBng4OOK88ddpIKkMpABJ7ep5PHX3wTk+nr/iuBpLZZEwRjkqASzHIJIx145+g/HyGXeTswcbiGyTkjJ6nsPU5Nfa5PUbwig/sS1W705eqaXn3Wt1uf58+OWTQwXErqwhpXgpt20u3F2VrJ2unr52u9BYgwYY+6e/ufTvt/mfWtZAAvOVceoIGR6dAemM/h1zmlAD93jgkkDJCkYAOemB1APOelXDuzglecHkZ6ehHXGOo59a9GpJej8tH032dlZrpufkmEwyjrJNJtbLd6X013sumui30Jo93HzEnPJPTnoB9O/rnHYZuZY4HACn5T79jjnJP59eAC1V49uVOQTjp349R29evp7VNg7iyEhcsSNuDg4zx69+OmTyetefVbbtona7uuum7W9t/vv3PqMDTdKm29tG09nor6dHfS3Vd9y0m4Nvf5mAG09MZ5HsADgfUVOjFnBZgFLDK9c5OTnnB7fh+NVUdSCpDE9dxyCOvHJ4Hf2ODjnkBYNgYGSR25zgfe557gcexxkjCzd772T0ta2l293trazd2e5h5aJrRJpXWt1ZXVvTdd7robKBAxAZsM2VwflPDdhjBOOeTkemK6bTJiqqwHlspO0g5JUY+96fjxn3Brk0csYw/30TChB04xkevByMnJyPWt/T5UXAJUMMAE4yc9c9MY9enYjBrhxScqbvrrolf70u2zT8r9z6zK61q8XdJJRbd2o3XJa99b+S6a9D1iwulaOD96hlCqCeu1vRicfy655HSu0tr8sEbakYCgMdozuGMsB3zjOeP6V5FYufJygG5SCMElmAOScjsc8j0xjoCe800yExNlpE43qR8x9R6Y6H37HtXyuKw/LfVNX0vZ20TaVul+67NdT9CwlbmjGNtWlrFv3krLd30aXXTXTqeg6XLvnBkdHXdw2eOTwFx909vTj8vTNN8pPLdSW3RkFfvDecYweBznPGRx+fkFjKqSjaRhTwc/dz3PA4HIP04616dpFzLMBuAYKQwZQNuxQCcEYw3r/e544rxZ6OTb3cdHo7Ra6aeu3n1O2VS6upJ2urPR62auldaeq/y63T0livY3QFNxPOegI9fcYA7dPrXpujpIJ452lYMmAGxxn3IyO/oR19jXD2BgeNJHYoWxuHAKgcAjufbn1OSK7/TZEjjQwLv4wxYfLzzk9eP557Vrze41dPRKzSb0tr3Tv6dPnzuu3JLX3rLWyelrtPXz31PSJ7gSw2jRliw2+ZgfeYngY9R79PQ81rLFLG8V2m1X+VcjAzkD5SOpxt4yw579DXHwPdOyJGFaUlSkS4II9VwThgPU9unAz2NgkcsLw3DMkqMGCliSpHU4xknPU9xxXnOF5qTkn76233jfTdvpr07sVTEpQfLK/LFRcbO+rVntorW362XRHWnWDb2yg7UeNAJCrbTzwGOM/M2Pbpj2rm/EN7vexmbLIkfmsjn94oHJYDI4xyOcAE/UZ9yYE+2id3iRI/NLMcq4UHkHOOM8Lz1+mfEvGPjFrLTdQ1MO8trBBJAoLlWKY4WNQD8oxyRjPI45Fe/llBSqR5Vo3Zpqzd+XT53fZavzPic5zH2NCrUcuVQg3HZOSja7Vt9Frfa58V/tjePYvE3jGx0qK4EltpVtEhiixhJVBG1+cE8YbpjtXxp5kbNtjyCnBKgl2bPGc4zj6g8n1xXY+PtVbxF4k1PVyUImuHaI4AUR7iNgOTkL3PHJJGAa4aCNVZmUgjOS2cfKemOuc46cE475BP7HkmHjh8NTjB+8kqk7tX1UbLp6P10P4W41zWeZ5zjq8pKcZVpQheTfuRaS6rTfVO3XoW7gtlCCgAUbzkFmY8lgMfeORk8dsc80VCSpJA3YGcsw46HHPqQeeOn40V9LTprlTe710st9T83nKXM7py89dvx/r7zyUSYbLHgAgp0VuMDHzfjjGc8Zx0TeXJKgEZ+YgYUu3JJIwMe+PYYzRtUhsgqw7t3J6DoenVfzzSIGBJYDacfMOABg4Jxkdc9BkjjtXnQoTbTk7crTfZqyV/VJ3X39NeuMrJ67LW2iteNnbbvZ67a7CjKlpH4OcFQcAng8cEBRgZA+vc1bt7l4/3ikBgAylucnsynOBgd/wBKjEYZWJClgAQASF/3iQOeCOAoyMc4prxsuAVKNldg+4CgHBHXg5zzyT6VValGpGS0cnFrXbp972V+nQ2hXknpdXaWis+ml9NNNGunodlpOsT27BfNI34xs5x1zk+/HoeCea9t8M+KLf8AcJLLuVQyygHC7uNhBOS3Q5GMDHXmvmNJJISZN+WATgHO0nPPqCO+OxH1rd0vVpLOdTuIOCVkVvkKnuwHBJ6DjIwc4GSPncXlkarlJRTkoLRWSctL9m3vt/w/q4av7ySblFqPNJu9r2Ttd9LLt5n2VLrUdwimJwONxBTnLDDY5O5jhSp4xg9c4rndPlNzrdu4xK27bncAilOQGBJG5ecgduPc+PW3ii4eIFZ2dGITYT91B95Y+SeScE8AAV3XhW4aW9tbkyJu34Ikb5gWIJ2D+GQcDJJyDwOmPNp4J0ITk3blso20ad10v9/l5n0mElTnKFl8Vktrbx1aXmlu+nyX3F4Hi3C2KvGqSrFIFCnerjrnrtH9ztx2r7W8KRBzYElPKitw5Yr95iAMkZ++D91iTnJ+UcCvhrwFdZltl+ZmKrGyuwxK+BhYsjgjnA7+wr7W8C30kgEDRgKiBtoIdti4yAvHAxyDzknp28rHqVnd3er2bum10Wqtr+L9f0jIpR5oK6v7qdm27+43ptZN2Wq+WtvqzwfKIhbo4Bt442HllSGMpwFzz1GOepJ617Roqu0kUyy5njUgxuhKlWHQ84Zh6gcfmK8N0GaKBbFwQqsm7JYsNxwM7QOpPGB0wcjkmvYdD1JLeRZXAZJCAksgyu4kfKuT3GB3Ge/WviMdTm7pK8ney2fS/wCmy16b2P0/A1JRpwjFbWbl53Wml3d21320PWdNsheJG6MxkZgsqMnHykYYeg64OPQnkcenaPYzMYrdYkdlKkdNxOckA9R06fqa4fTbyIlIYSFDwrIzIMncw5C4+6f9nPTOD3r1LRLQFLQpI++U5WYk7lxjBOR1/HkdOK+UxUUk+dSU7uya00tdu2qXmvPSx7MKvuqU37y1S21stOmt329D2DwfZHz41k/dxxrueJ+MMDwRk9Rz6cds5Nex2l4sc6QxLnzAFaTGSFxxz+fOMHg9ea8y0yzktYLaQymSWVVcydWKnqrd+MbffvXomm2+XScuBjaD75HcE449TjHdiMV4NWLbbiruS1sl5J3trp/wfTOdWF1KScpSdkm21Ha1nq0lvfTX009I0u3wyypIytkrkDhj6knI2nrz9Pp1+5UYLktEoUkqf4l6s2Oq4PPTHGK4OwuiwaNQy28DFncH5pN2OFGenHOCeB+enFfS72AO22BG0McbycgqG/L09s4rza0WkkouTTv81b3WmrpLXqxU273u9LLztto76W18raWO0EsMYYeYwL8RqvQY+6c45D5+76jg9q04WN0yoZG342r5ZzvK4A3EdDzx6H3wK4m3uXlZWbBhUZRM7vl4JYEYy3GScdMckV2/h+KSTDRIfmZtjHOc8ZIGAR05+vp15KcauKnGlCN1KS5uXmdtV6Lbvta3Q7UlTg5yn6apOysvedtd106a2Oit9EhZYpZYxLIFIGMsSR2Y8fNzzkdOvPTd/sM3Uca+WscKD5UC4wR1dh/EfT7uePpWpptiEg86aTknO0c4HJLY9Cf/AK3TJq6z4ls7KE21sSZ2IJI5JIwGI6YXnODn656fUUMnpQpe0qpU04q6bTbfut3XV6Pbq/KxxrF1nUiqUuZ82rTdkm4p6X0SVlfVbfPmtagtdNAQGLzGU42HJPpkj7rZ6jn065rzTVfEQtgRAM3YJCDbu+UYyQe+B3xnJ9CK6y8vku0ZriQB3OMn7wHoASevcdAMjr04W9S1ErqB5pc/LJg7w5BJGecDjAPT69vLxlCNFpUrJaRitLvWKeibV3u9NPI+iwOKukqnvtS0aWr+FOz+999L3ttzlzqt7dptkupCGHJydyck42joeeOPb1plvDPsHlXMrEsAzsSrKBn5ee/tWvb6XC8ikQM2SCWflieRgZ6YPHfrz3I1xaWsZKyoyoByR8vz8H5Tjr9OQSOazw+Glq5y10ttZXs72T7LrZdz0Fi4x/dwUde+3SyV9Ol9vzu+Xj06688NHOEwBvYg5YnPHJyRjr0xxycVtrYJKqxSv5synAVM7WHYEZxnggnOR65Jx0EdvYtCszQurkFRhjw38ORjkkZ6epyT0rVsY7NztWBVmByXJHy+5yOpA546ZHpnrVOMWt59bS05nZPo72u9ra3t0Q44qT1UWraLRL4Wr63vZWd7dNeyOebw7FfW0cUMCCaMAAKOUx/E+O+PfqOmBVVPBRh3EwLHghjNuwrE/ewDkH0b19BXolqsUMpePaA4w4zldwHJHcHrgg1KLUzzkeeArEt5ecjGeeB1BOPQ/nz10acZTspXfu7NLWUo6ddF2+Q1ja9OTXtLQaTfNdvW22+2vbTRKxysGmWMUCxZkyqhQT8qoScZwOvHftx1GcdXYaHYvCqBE2bcSM4y2SMggEcnP6H8a6Wz0e3S3DPbIdxA3sMy5Y8MvHAxnH19emoLC2jiZordnOATk8DGc/Lg8g4G7P5Yr1qWHdOEm4tuTvq9Vorpav1skvTUwqY5Si4U6lSMubWTaabdlvdWW9vNa9DiP7DisJHaOF2zlk4O0kjOAcEZGfTuOo4rmr2a+hkEMm3Y2VRWToDkZJ4P04GSOvBFeteddXCLFJaqkScQhVJJ7ZY8HnHp2JHrWDrOmeeI2aJSVIIKj+71Bxknr7V5+Nw8qvvQvB7y91xvFWununazevy8u3B4905RhWUKmkdXq4vSzV3u0rX6+jOY0zSvNRCkoyTudG53M2eQhznA9xxz14o1bwxZi1nNxBiOTaS8ZAfeR1BwSvXIOT+Oa0YgY2Rk3RLGSu4DIz1GVHJwc/Nnr27DoJIvOtSl3KrkjepUc5PRgc8gdDnONw4xRRwtGVNRnFTla0m7NNt3so28tdr9NzStjsRCtCpTqyjHmTlFfFZST1stbLZNtW6M+PvGXhuK1FyqxNNZuGVJWTcyg53h+T8pIGcHGMd6+G/iv4AhNvNLp5lEk5LLbcjfKS37tDk5ixkkDpwO4r9bdc8P2l3ZTBRGrxozPG4BEhHVjxjAwMEH1Ppn5D8eeCri4S5Q2yRl5Ha2kEYO2NeQdwxgkdFIHXGa+dzDCRp1uaKWibXKls+W0ei87bq7Wtrn6Jw3nvMoqpOVotc0ZSs5K0dlfXrr0W2uh+InjvxFd/DfSfFv9rNK2nHT7xZGfJAaaMqgjU8LtwcfN0ySa/ll+Is0GoePfF+qWbQtbXmtX1xbhWyFilk5Oc4BPOfQjHXNf2YftV/Biy8T/Djxfo8MLprd7pM5tDEvzSTJG58tj/APm6nI6H6fxc+KNB1Dwn4x8S+GdYhe1v8AS9VvYJYZ92fkkO0hmC7gw5DY5OcZHNfs/hjRoeyr1YySxDSUqemsU4tytsr23sum60X4B9JTMKmOoZRTVKfsPaVFKai+SFSycU5K9+az97a6tdmNEi5zgsfmY553DHJXOcg569OO3SpXXOAozx1xjI9zjg+vr74BDIGCts27ZBkYOQFXIxweBwc49hmryAljtPyrkHOM9Tk9uDkcZHTuOT+yRly/D5b36L12vrp1P49nDlslyybsk2m1ra1rvZWtr89imI8KeCeSCByO3Bzjg5H1HvnDhHyflGccH+6ewOf4RyB19z1q/hcMPkO75gSM5PYscjqD2GeM5qVIdy5Lc/oVXkgdye4Aque3Tzduzas/x1+85qsJQ+Jxbb0SaWvupq73d9W79Puz4rdl5IB68YyvPYD0GOv6c0jQOc7U5A5GPxGfbB6/5GxHCGwSG6EDPAB4GAO3ueOvrUwtByyq4yMkZ9Tj0555z3yfeuf26jUvfXS6bVtHHb112dtvQ54u1rrmle2u6St9619NdXdnLPZswAZMBjk55zjPAzjr9B9DUD2hVWBBxyAQMkAHocDrz0Hviu2WxG1wykHAKnjjHpnpx+fPtjNntipUODuDYHOHU+owuCc4x8v3Sene3inJNJPWLeq120svuXrfQ7qDaqQdt5xsnZ21Xz63+VlY+gP2ftKjjt7i427pWEjxllwoYYALMSRwDxgdSeAOa/Tr4IQLbQXkq4EwhfaXUBEdhhSz9QAM4JBB6nvX52fBNJbezEYIZCrjyvunkjcQccEgDceM4GPUfol8LN8ek3DsBGJgzJIGy6xqcFM4yIx0U855xX41xNVlXxGITb1fLrokouOlk1po1ZbdHe6P23h+jbCYWKVlFSmn01inqlvq/VJb6nT6m2Lm6LnLSFlYxggvuzsC4yW6H0yQM8CuVuneOE2oTBfcRIGG0SqcuAOAMAgEZPT8K7i6hlYy3BdRKpLpLjeqouQFydoVxk9j1+orzbXhOP3IkFuCWcEnGA5BYqMDLSY6lucDoM1xZTTjUmlaziovTa/u62Ttpq2rX87nn8SSkqDbUt2kr+celt273slolYy7VlAlIcFwpHC5Vgx+aXBJ2kcYHPU4wBXf+F5ZFe3k8xg64fPO1Ig3y7hjCnnAGCeeuOa8vtJmMrBw8cQcKWjYkyIc7mTjpkfMB6nk5r0Xw9dBpfs8e8x4yMnCFwcoCO5HOeRycY617mOi3BxiuZ8urtezXLdpW18j8sxb92StpLq7uy/Hq9bfPW59eeE7j93bPIJEaVBiaU4LNgbSAeoPY9CM4FfR/hG5VbbaGiaaN2YuV3MQoG4gkn92gx0A5PJzwfk/wxdTTJbFgJFjCgkjG1VxuAXIPy8Z5yAepr6E8MXcsLl1/wBXImyQKcbS2AM4zywHI4yBkcCvkcVh1KDsk7pu/ndWW3d6pu73eh8HmNNc0rbXdkknZaXb8rt/jqtD668IXBW2trlpEUqS6ODuZQD8qjOME5O0Y9fx9mtLvzFt2WVTsBeXA3DzWI3FCSAcYHJHOcAAGvnjwpfJPGsADHYFeOQHCs0eS2H6qnOFBAyc4zXr2jziSKOOO42EAbkUYYOvUAgneSfvDAPSvzjMcO4TqOzl99tbLWyur3elt9dTwpJ35Wua7SvC990k2322dr/JNnsNnqLYRRIyP0+Q7xxj5tnGGOOGPA54FegadqxSCGMthhsUbhuyWz+8B4yq46AA+/FeGWV+FlUFywb5XVPvpgdCfRu57AYrsbLVJUKKpQNtWNRjBKjPKk55HXPU4GD0r5OvRkm3s+qaupa7L5fevS61p+6kldO9rNfy2Sv+T8td9/etOvihikDNvCkEqMAyfwZbJ5zn+HnOeg57qy1EukHz4lAXzyrFgWbqo6AbduCfc9Qc14dp1+UjjYyNJtAZju7nkr3Py5HGeDnmut03UssGLgRscgZKtxxjIzjdnGeQe3UCuOS5IOWruna/Raab6dem7OyCSTco67NrXe1k+6bdtLbdz26x1B12ncGTByN2dnbbnHJ9cdP1rrLDUC0hUEKoBYFudp9R3x/nOa8btdSRRCSwVC3PzbvNxjCuOvzfXnFdhpt+rENt2lmKD+EBeNp55JHr0/DmuFyfNazStddbvS+it57afcdEYPlStZ+67dVe23Z679fmkesWt6oBBKckgEnIJOMsOeh7ADj2OK6KyugQCAAzA4Jbgn1xjgj0zz7V5ta3CsQxwQvDBhklxjkDdgKOec8nBNb9pelGCsQVDHA6ZYY9uT6cD27irhTcnZXS1d7+SXrZdNb36aabUor2nK09E9U9Ha11p12vs/TQ722vCJGdztXB+YDGcHuuTuHcHjriumt9QCwYUFQAOCMDOeqdwTnpj059fM4LosUlJdhuIOWCn2yPf16cZ7Yq+2qSgMqyeWwbcFPQIcc4zwcAc9Octk1lVhKN7637paJtbPe1td1+KO+hGyu27Pa9tr9db9FfXbz1O/W/eNmLvuVQeR905Hy5OcEE8Ej6/SIarkSguEcqWQBty887STgEdAB6/wB2uLj1HeuMlm2YOACSRxkg4+U8gDjjJJzjORe6utvG4E6JjjcSFAbn8cZ6da8ycqsG4L3lJ2V03q7L1T7etj6XLsHUnKCjefOotOPvWTto2tbb33fQ19X1TyoZQsw3YJYlcqrHHUZ5/Pj8a+XPiLsntdRuyUAt7aW6unJyFSMElv8AYweijPPHWvRNb11Ak0YmO11ZpnztGM5Y55+X0we4HSvjD48fETVtB8P6ydNjSQXFq0MxdtsQtmDZd3PQOo5OOCBwAa9TAYV81Oc4y5nOMdNXdKEldLVpX1b0frY/qrwhyHEVcdhVCLjOpOmpOVtPehfe3S7/AAeh+afx1+JWsajdahpUTR21tb6jcHAcCWW3U4SVW6IGGSDk5A7dR+f1rrt4fH8FzcM0lpDcZAgbK3IJUhpsdY+P3jE+hxya7Xx/45m1O+1hxNMhub5jMkwL7LdWbd5X3dsI6g9xxim/BjwhF478XuJTDFawL58ZhUmO9VTlmRif4sZ24OOeT3+/wuG9m6bsryjGKct02k1fVJLfW11qz/ULhzA4fKMrpyqQUF9VjzytrZxineyab7X97qfQ2u63JqWkJdahMZh5ccVvGy4gjSMfurWBf4IzkndzjBA68fMfxC+ItvHBFpEwS3vY5V8ucAAqpOI4pHyMg87flz1+p+4PiN4f8Pw+FNUkSS2tJdH04xG3ACC5khU7XQ5wHUkEjqxIwQRX4t+MfE9x4g8RyLLJ9qjtLma38tjl90ZwgYgjLJ1X0zzya+zy6lClCdRpuU1KMVLVJtJSt2V7K77J2PvODK2GxLlOEZQVKo1aUXG7k42a6NW1fxXvozpvGHiObU9St1+2tJcyKmHzmBMcbUGcNJ2LY4zjHQV7jo1ney+EtOjOJStuZQV5mVMAkztx5jDsxA44218im6tpLuIPvD2pKyyEkEuMbFjc9k78HPXrkn6E0XxrB/wi4sIpZUukKqs28k+UudqKcDPB5x1x161z4yg6jhFxacIylqk25Ll9297q6b87u3kfqFaE50aNLDRU3GorJxThFPlV3Z6Jtbb203SR8vfGtHj1YPKrcM8cQ+6qoCMhRz94kdhjnrnjwJ7hW524I4BIzyM9c9frzj8RXtHxj1IXeoq4na4mIIuA/SE4wqgdCWGSxAHTvxjwoZK72wASSF4HGeg6YHYfic17GXUm8PGT01sl1W1opb9+lr6u2l/NzbE1KNanQvG6px54qL5U42vZvez1um/Q07W98tkO7DI4IHXj168cf5xmvS9N16KOSAxukZSNGY9RIectjI5555JGB0rxhXO47ARhiVBORgkcE+vHpzx0xzdhu5VkUkjarBTzyp6kL0znv9ffnevg4VZRkpOMoK+9tXZ6Lq9H97678eB4grYbl5qftfeUV/hTSeuzv21013Poq4aS8tPOXLi4QuWBGFZOh6cKM8defrmuHuLZzExY5YEl8nk9cjpycgdv166nhmZ9Q05VRpQ9swQ4bmYHqoBGAvAGPUe9U9RCxtKJVkCqfKYE9u4Yj+Pp04+vWvPg/ftzLmi+Vp2d7NO9r6rbddGnqfZ1pxxVCnWjP2PPBTcFommldPWz/Bt9LHLtN5AdSNyklRkYK56Y69OQPx/Cg0o2MnXdyp6lW7ck8fpycnrViRDJId2OT8uDztGcEnBAPoeTwetUJBsJXbls/KQcgDPOef8AJzmu6PvL4dE9X56bW06Waf56nj88oN03G9NQaVR2aequvvX3dVoUGISTaTkjlj2564OcevHb1ORX0B4K1e41PRYrAKUFlHJiQcb0UruGRyduR15OTnOc18/SojElcjDDJ6Ek9j+R9RyfSvafhWk4+1RIzsJVxIgP3VI469Aw3dOpHbBIis+RpxdvaRdOTsm1zW7+aV7Wdr2b3c5a5KrWg4qcI/vIu94xknFxu162Xdta2PUrN9OTRbm1ubZpo5VmLSMCGZuNg3c/dPPHfHvXyL4lRI9Vu1Q4j89xk85IPcgLk9AcAck19maqLSw0ImNASX8p4mGCgfIPHUt1z06Z4HB+Q/FtrFFqtwYFYxtIXG7PG/JOex5PHfp1PSMHF0pxitXzOUrPZvl0fZvo+n58+cUlVy6tVjGXtJVua823Ze7st9b6pXulucYzjcoHQ4z0A/Drj1/xzTwoLjHTPGfTvkDJ/Dv79abJEgI2n3PXjr0/p6d81JCvGcZAJUA9c/h046Hpj9fpYWcVvqvhlpvZ/er+bZ+dxU3WcJqLtJPmi7pJW08/RadFqa9sY/lCdgR0xzkj3z0/D6dbjIQM7QCMHt+h46/59apWzIuQflx/TuDnBBGOh6+2RVh5iFyME9D2GQOhPt1P4c96xnpZK+qs9NeifXV9NdfTr9bhalNYZOfKmk7RhZdk3Zpu/TTr6EN0+FYY25HXIySoOCeeOvY8Zxz0qjahWkDhyrZI2nPOepOOgPvkEj6U2d2OTkEE7cZxzjPX64yR7Z61BC4TLHg4PH4cDPTIPt+fUJU3Z3V7vorvpo1dr56aeV0vCxGIhWxlOUlaELSd2ld6W1Tvvpa9vLc9BgczwRbWUyRsFLBjxjpz3K4+v4msjULZ1mdpQQRjk/dPUcY6gnkHjOD1qlpuoeSVVnCrySOu3cQT754GDnPv2GpqF2tzFlCHCcMwxyBkjjk7cdyeO/Wq5IxTbSWiWujd7J8tlvby2XfU+goVsLVwkZJ+/FXtztv3eXSSe/ov0MJZHUsqgDnPXnjuMZOPU/1BrobfVXjtXhMqlXUquVBYM2NpzngqM8gcA4965IytvPzd+575IwRjn+929u9SrIzDnnnGVx07Yxxzz2/LNZyhs3bR6eT9bLpfey/I48PjuSUuVSad1Z7RSa2Ssk1vt6NWNdHHm5di3J2A8h/cnOfrx1z0FXmv5kdSrFWUOQF45OMnHduBk8Dnp1rDjZjtDAjZwCeTxn1698D9cA1qpGJIwe+47WH3s55yO3vk8AHPbPNVjvZ2bVk+l10Xla17b/I9bCznVpSVKbjJtTu3Lmmlyy5XrdLzt0t1Pqj4BXPhzUtQhstY1qPRL1Feb7TMxEUsXV1YgqNx+XbHnJ56YAr6Hn8S6PpWpanpVrqa30DyiO1v1wvl91leMMSwyMqC2eAc1+bMJnt5VeOSRGTBVlYhlYbsEEYz34IHGM4IFdn4d8U3ml6lBeztcXSwOGEDSECTPVjkNkpjIHPsKxVdQhGCoxjKLbdeEnGTulumrN/4bbap2uetCH1ypCVb2t/ZwjKjJxlTlUhblmm1zJtW01V1c+9PEdrbxWFpqMGp6rc3c8MP2lbpGizLLuy8GXIe3QDKqCAM4JzzXzV4shtLG/eGDzpoXU3Pnz5WWRX6MwDMRzkAgnjqOc1c8Q/FOfVtJtLG21Ke5AhRYVkQRtapzvi3ZJZgeFPYDPJryu+1O5uVieeZp5lLJGZCXaOLjC4wMZ759q4cR++aXK56WU5Su7q2zvfVLTT5WuepgJTo0nGu+W0pLl923KnFxs1Zaba3s7adtXcZC8OWkiG5YNw3PGHxuBOec4G3PQDvkGu48JWhkuUgVlchGYhgE+TGcAFj07AcjnB5xXnmj3R84AsXZ0k3KTuDO20KcH7pHOOe5OPTr7Ce4tJ43jBWUOAHLY+RycZ7MB35HbsKyhR5Jxm0naUbp63ta6vuu19N+p6VPkqc8Iy5eeN4y+KzajZX9X07uyR6L4gukOnSiGFIDEjl7heBcSAfKVB6E45Ht0r431t5H1CbzVIYTMpzz0OOSepx16kk9ulfUeqyPLpZaZ2SF5DvIOFYnOUQD7gJxhjkHPIx0+YtaRmv5lZQoSVlIB4yM88jLZ7k9fUDFevK8nGTjGKXw+jsrJLTTpfX77PxMwpKOGcIXu5q7Wqk7x5pXd7K9nb5XdtM+KMsM5IVecZAAHGcA8Zx6du4Feq+ELqGCDyZGJjCM3ycK7N/eHp93AJ/AZxXmkS4jOFLBiAVHP0PTjOfc8dea2tM1Ca0JiGAkhwwwDtYE88AHAz7459DXJiqblF2t9l9lq07tryT76PUMtnTw1SPPdXirW0TknF7drq1rdL20O3vpBLeeau4QLySOTkZye2MYyR0HtzXOa5Gk6blyAOhXqz98gLgnpnpkZ6VpQ5dBKJfMLt87KfkPHIA9Dnp0B7UyexNxA7KNoG4oCcqpX7wLenAGPxrjoRjCacm7JaO6S89LXvfrqvvPVqRVR1OdWUn7qSj717W2V29b2308jzWaMhxxk7iCOnA4yARyOPX61dhbEDKGK8HlfvHHZT6dc9uORir1zYn5to4yeWPIOfmycDjpj1PQ5Bqn5UaROWyGU7QMdcdeeuBweBXqKcZcrWuu129dLdlt803fufPLDzo1Kt21Gan70tGrct0mtVbtfySe7x3ZlOcjGcEZBI+vTPPU/gRT3YBBgFs5zgY78ceo9PqM8VXnbL7U6bs88D3z06c5xzg59KVC5U7R7HnPbI7c9SPUDj2quVNNar5+fa+vR+l/O3jRqNTq0XJ1HspbvdPTXuld37Mlt22srMc9Dz39j1ye2evPQ1aJDMjDpnGepOOAD0yTgg8D0qnGhAywOQThTyNv446Y/A557VciKM2WYKiMSUAJJGTkj3/ABHU+lZ7J212d7axba0vpt5f5nThpuUFSmuV62vu9I6q/Zavz9C9ExU/w4HDE9s+noTxxz7ZOKurCskfGQWy+wYHTqc565IwAOeelUQYiSylkQk7SRyQO556k47Yq6HJi3rtKhTkqe/TJ5O0+xyOhJ71nvFq+kbNPp0TtbV7X30/E9KFOEKfI5pqyabtbVJO9+u/V3dum2LfRhXIwCBjByM55HzcewPGfyxXM3gChjjqxBHXO4ZGOuRjnI6jr7dXcONrMyrkPtAPzMR3z29euM4rCuVBWTcB935B/D7cD+IDgAkgd+1deHlZxSbeqevfR3S309LN99j5TN6Cq+0jTmnGMXdyWl010+XXyfkci6k5PRQTnHYdQB7c1C6kAlVJ9SRgr2yc/TnIq3MQmQeTk8Y4GenPr16Zx71A5yu1R94DcR0JznBI6j5TjPTk46171ObcU7O17rp2+equuq3Py3F04Jzi3dpqO2z09Fa/3avQrKdpYgdM88E9uAQT7nPQA9c5p4J8vA5PBAJzlgeuc9O2OfYcEU1gFXIzuB5U5285Hucjv6Z568CghScAAkcYywwfmOOMZBGOTk8c1102m72Wseq/rW1/TU8Curt07XUld3totNrdG7aaeuumPrsDtYsSQdoXcoBAXqeufxGB0+teMyqPMLFSNrsAADkc9wecfhznivd9RVDb4RmLMjK+f42I42nsR0x1xjnpXiVyp+1zBx1dvoCCOTjueSe+e1fUZPUShKGzVrNLZWje9uuzTeu25/Hfj1gE8dgKqV24tNtt3V46X3Wjvdq26WmzkLHBVRtJ5bHJLcHg9f0Hf1qwrAgqNpK8qQDkMRxk9gOvTJ/GlCfJ8pJVQDkY6Y4IHHHOfXt6UiIVYHqMjDHv14znnJOB6e9etKV22tbPol5dWtXbp520ufgNLD8ijdXukm7WS2s+r16dr7CRo/UthyTk9iTg8Htjj354HUVeTI27skgEHB65GAcY5yB838zmocEYG05P3ucgD0HcZPOcH25yak68Y28bcsTkHPU8YPbn3IHTnGb2tbZJ2srWtf5Jq63vr0Z6FGPIm7vZLe2jtuun5b+ZKpLFhgsMYOCMKV68AYA9cY4GD05lRk8sgEFw2Q2OVOQRg8kHPQ+tUxujbcEw+duOcEk/KfUd8jqMdMVYWU7cAgswBfGDn6gnkj04qbW07aHXTb0vvvts1bRb7de7f3zxFg+Q2GJwQeCCccBcHqcZ54PHeta1bYwLZ3g44yVyemTwM5HOBz0OM1kRbiUZ9pGSCSOSBjjHXI9fbGfTXt1VgHKOQxO3C8nOOT7jJ6/TrmuKq0ld3d1ays+19XZ6a7dPnf38BGTcUm1K6f8AdbTTs/J+Vu53uj3IRApYFwBkEFgGB4x0xtBOfXjjpXollcZMS7lETf61iuMZHOeep/oOcHjyHT5JIgg8t1IIJJPYkdyeSR6c9enFd/p0skqh2feBtGwfwj6Z5J6c+mcnmvmMdS1k1rdrrvoum2vn0sz7/L8XLkUZRtKKSaj0sleSffR66X3uju44lEzpGSchQhbjduyDgg8g+n06g16P4dM0aGFWZwiAM3HDY5Unj8Og615rp7eYykhvkYBDnAxj5ex3EngHGfpXrOirb+SrbWjkYglA3+sbgZJ5GOcelfOYhPTWzVnZpO7dtd9eunlqeiqqack27uy6uL03V9PK9/S+/a2zYjiSJgG8vLDOASAM4GfX0J616BodxJLbQxuv3jjIPI/ujdjgHv0PWvOraMl0YEqAwUIckkHpyfcduo7DrXqnh3ykKKUBPBI9dvT8snJzxmuWKWslZu3M1ZJdFpdu7vZtPtqZyqpNJXk+XR9E9Lt67q/4LW50NnLcWE0TOSxViUY/eXd2JPUD/D2rslSee189Z1S5bDFsYyvH93nOOBjH3j61lGJLyWKV9qJGFR0K8sQR82c44AGOOeemKvT38MCOjELGNy7+gVcYRcjr07/nW8Iucl7ujSu9E733Xa93r5I8vE4tNJpKCtaTenM7rRvys3a+rs2ZHiDWIzbvYxhnZYD5sh+Xcx6liTz7jnJAHoK+Mvjr4hXw94Qe1W8hju9RJECgAs6sSGDANgHDArxnrnjr9B+LdQkhEckMwYSsUKg/MRyTwM/JjnOfbHzV+d/7QGvvq/iC2098LBZsN4ViSjLnGBjHOMk+oHNfW5HhnKtTjJpwcovzTXLfdOyb0XqrdT8e8Sc6+oZLipU5WnUpuhCzb5facqutdLX6fJ9vn+S2ZQzvIG3OX2oDtw53HucjP0/nUWI8FQCAB8uOAPcjjvn35PTmrUkrtvK9B8oGeSRgD1646/TI7VWdT0KgEkAjgY9DyDge/ODk5r9Twb5YpcqWiTfXl07bvs9X96P49xMuaUpXd5W5pNc2mjvdtvXfZPz7sdM42jCkd+fxxnOD25I6mik5U7TlsDG0dPp06D26nvyaK9yM1FJN9Fa9trLz/r1PKlTTk2uV31v3uvn/AMPfQ8pYJhmJ6AcjHHU5PXPHQYPXr6xoxyARlNxYDqwA5yeBwOvGMdxk0qsquSWUAjDIScMx7jIwVGOR7ZyOlRtgfcy2eeDx15B7jnGTjAzxyQa55y3sr2s1utV56pq19LddtdcIR9dVa+um3z+69/kycOTnJKrIcYxk7sjhW9x1Hcgds092Bk5yyBQGb+EnnPJI59vXjtVMlichBtAGB6EHjp1HPJ78elTKeXXAXIxk9Ce20dVznHfJ59K5nValFvRWs7dHonru2mtUjRQtqrNWd1po1t1d7N3v562siUHcWB2hMsVA6kAgjr1GT+NRguZVGV2jBK4+8SOhX0wOTkdOtSYUKCCpZtuQpzgc7hnA2k8Y+nNV0PJdflwfkO4sSD057Yxx7+lXOClHmVvds+27SX59nodGHvzNX0Vna+mrSvdfLXte25vadOyyPukYngeUuVAA5wmM846D8M84r3DwZKoZGYnzWIZQ3TPUMxzwUBOCem48V4ZY4yjSN1UFSOu71OO/r3P0r1fwlceW1qrKCrkLtUgMFU8HHU7skvzzjjmvGxi5otNNS10sr/fq5d189WfU5c3zQd9PdaXWNpRaXmtNtfS1z7f8C3MZWAJsmYypIArkGFlwcM2OoP3ugBx+P2P4LvTBcBIDIJZolO7smR8p3dF34I2jOMZz0J+B/B2oxxXNqiuATtlUj5C2MbnA53Z7rnsOcA19o+FNUhWOH97td4UKNnaxduqgc8jgjnuwwcg18ziqXOpJK7SSv1t83rsl66ddf03JqkfcafvWi01rvy6NbLV9Vddkj7H8Namsts6SyE3EJWFApB2xdmHPLYBJ6Y7ivZ9HfzooYvN85rfYyoxI2gjJwMHJ4zxjjjmvlvwbfwARq6sHzyysdskY5+U/xMueR8vXrzXvPhzWCLqSS4QiLCLBKh5JJOA2TjHt68etfIY2iuZ62trfzstLLX/geiP0jA1pOnG80tdrauSet/O2vbolqfVng5tjfamAUBNrbySpOMAKT3AJPXjPPrX0D4anF1JYnKBYifNI+6AuM49gDnOemSBmvmfRrnzLaCG3JZGjSRzuxy3LAjHG3Bx6ZPevafCuoNbQSkZ8uNGjVP4gcj5uR7Z7/j0PyOPoXUnHWTuvee1mla3yaV19x6satlGCfNPmT1vptf5aW797PU+j9Pv2nmEeNkSN8pBLDyxnpxxn6fw9QM13tlcNcEmAkQREDkEbgAOcc5JBGBkkn6V5T4XnZo1kYM0Lg7pGJIY9go6rtGQDnB7gYr1G2uhDp6pbqu2Rid+zk59ORz0GMZOO3SvmakJO8I+64Wcnfq1Fu7e17rb/ADOmMpSbcVy9bSjFdrvX8La9kdRp92V8xW3BFBVVb+In7g9885wDx3rS3XNx5URYMN+7AIAwTwoPOQuOeOD1rAsbeZliIHmysVcgAq4Xux7blB6ds/gfSdFtYUSJplBYMihS2S7Hswxn5CDk5xz9a444epW92Ds21eTdmr+aXTe3Xq23pcarjZ2je3vcr9LdXd6J/nujZ8P6bcSOEcHLpwGxtjBGc54xkdznHHBJBHpWlwR6bGpc/KpId1OMo3U9SfmPBxxxxzisA6hYWMSMrKSFRuQAC+OVJz93nkeo6HpWHqniYyWzxwO3zncVHIUDPyqeMAc47ZzzjFe3QwWFwNJT5/3rtNv4ve93S2ml1v8AfdGbrVq0uVKShd6yVo2+Tte3X5s7bV/GYsYZrW3CLglQ5XOQRwF+noMc45ArzyXWjdKzPJ87Esj4ydxJ5znGBkdfeuNu5Lu93NI4UM2F4AIAwNxx3YDkcA+vBpttHJGP9YzR8biDwN3GFPQDqcd+mccVw1sdVr1NbqnG9tlf4baWS66rtZ2ejNqMacbOMUnZJ62d3a6snrqrpu19PU6B7w5AVzLIQNxCkKTg5xnOcHqR2IOAavWSmb94+zOSCMjcAvTI9u3rnBAxis21ildsxhGJUquVyw4xkY7qOdw7n2rctdOliRt2QG6kcZJ5Ix1zzz3HfrXnqM61RtpuMW7Jq1rtWSb8/O60s9TvjiYxtG7XK9Jfdv328uy12swxg/PErAbsHaOAPcY6f4gGtUae05SR1EoKqI4goOCMjJ4x1PJPY9+BUdtBJsVIkbjIYkYyccjJ6k8f49M6tvMEJVgfkU/KRjB7fXHsPQDnklWXsWlytedvtabOyu9Xs7+tmdFKvztSur3Sd+1o6tduq7X7jl0xgApVFYAbQR8qcZA64zjHOf8ACqx06eMSAcuRjIJHPOMEe3Q85xnHJFa9rLcTOkbJlS2Uz1wMkZb+Ic857dOua6poYREhaED5QTjr05Of6AZHOe9b0aVOtC6dnHo76u/XTsnbze1rI1+syhJe8pRd9ErJtW2vrt2+dtTzL7NqEDgRsZMnBLZVQeM57EDPJ7dvWun0PMnyyKys/wArMDvAK9wMDCjP06HHXO1LDazI0YZYVPBAAUkHu3II47jjjn0qeyFjaHy/MijJAG/OCTxxnryO4HPbvW+Fp+znGUakOVtXUn7ys4p2vqtNfLZbnRLEqcbWvL+VK7ei0eye/n6nSLbS+RGtu8joqA7Cu5mbr8uT2yMDt+WdOyfaoSSMKYwTKrEAsepY8Hk9NvJ4xkZqSG9tzbeZZttmVQoYLu/4FyepyD39DyKoql00qvNGJVkGVdernrgjgHrj0zzg8Z+qjNqMZRacZJafEk3a+tml03fey6Hnc7leM7RfNblulK+nS11f7n1XUka7t5Xcxh4lZcR5jwMjgYHHyn1H1+mO1pdyu8a/KGJYJjJbPYZ5PGAoI7dc12Vv5DGIOqB1XaVwMp6Zx2Hr6du5oalNb286upDMU+XbltnXjIPp144IrKtTi43lOyjrpo3rF2afl09O9jajWknyxpO9kuaV3flaSd2klfbrfdnIwaSLiOWNiI2G9nY4BIXB4BGST/DyM+3bKurQQQyAXRLRjhWOcYwTkdxjjHHTpzVy7v2NywgkbzGYhyMgKCcliORjIOO3BxxVIxNJcBmmEiM2WccNgjPK56DPJJAHpXBJwULQjLeylfpdK7WqXre6W3l6lJycr1JaaTUbXb+F3Tene2ttbamKJHmDooBDoULY4IHXgjAI9M+3tXD+KdFmMOSiSxPjeCAVwQRkYGVYnnHQ45r0+7ElpC+20Mz4JQ4DIUJ+8pwMMR7EcdOK5KcXl6olkKxRpndBnrEucgMfl3DIxwMjPI4NeFioRu+dttttKN3azXXZuy2a6Jnt4HEzhJVafKvh91tq+i0XL22dtF2Pgv4heDJpZ79L6IbHVzayKmVaJ85Vj33dxjp+Ir+Wj/gqR+yjcaN4jn+K3hPQXB2o2vmziISTlszPGq84yxL7ud1f2UeMbe0v7a4s2iRX/wCWbk4CIM5ZjgYHOQd3PfGOPzw/aa+Eek+LfC2paTdx/bbDVrKS1vN6B/OkKsIzGCMkKc7gOcEc55r3+F83nkuNhVi/cc482/LKnJq8JLq2tVd2VrdCOLsjo8V5LiMHiqcVWdNujJWvCoorlnFyjprZtPRp2sfwVtcSCX95iNkJXAXDbhhSGXkk9QdxwMHHPS8JF3A7htZSSM9+MY9zz34/I19Oftc/s6a38DviDqkaWky+Gr66a40+4CFo4UlZiIy+OAONpOO4x6fJ1vIoBJOQHIweuMAdScZPGMc/1/o7AYvD5hhaeLozUoTipe7ZOEnaTjdO1/I/hjOspxmT46vl2KozhVozlGMmuVTgmlGcdUvetey76o6CPy2Hq2enoBg5we3T05z2zV2KSMMQSuSCMc/ePpk49OD049ax4ZgXwMFSMblB+QHs2TkH7oU89+2KtKw3Bhg5wAMHORnlRwM98c5x+W8k2tLu1l0e3fvt87bbngVHbWcZTk97NNLbvpovJde9zfRF4IVuhwBwCPUn2x0z/LicL8qhFPHbB2j6+mAe/Hp75sNxkJgsAuFIPfpkgdM+g6D1xmtW32tt2thckHJ6YxzyRznvz+FZOCau1zNO9m9Fs9et/Ta7vrc5XFKV0nJ2vZ76uOvZd16pLysxxiRQMnJB3AA9sZH5noSeAe+QCWzjYZkyOcLxwy9MNx1HIHB9sVoQxKV5IDjkMCPu9zn8hx78VJIQpiDAuC2PQYAAwR33Z9QD9K4q02qcrO0orTTo+XZ/d5/hb1MDRlKtSlUsouUUotrdyjZWV907aJaep7r8K4EtYrZDllLBZFHG8vjaCOrKpHzAYI456Z/RPwHGYdLSSQ4ikRASoABUjJXbzvDccZGMde1fnx8O0SL+z3IUAsGO3O8hj8wVcAg4C56k5b8f0F8GBVsU2oZBsASNnwQjDsMD1JDfU4NfkucN1MTN6te0lGTfqr6dddXqz92yimqeFpu948qto+VJxSst9b73/PQ7rUSgt3ZWKQzQIq4wS8j5+ZgeS7bcleny4ycV5Drz7mYu7yRxYRmBJL4BAQgDgHOeTwc9eleq6oFe3jVAjvHtQwg7lVwCduOMbDnk564x6+a30TyrPCyJH5pMi5OYhtPyPjg55OR3PXODVZYvZO7ile3lZe7ou+i3bbu9tjwuIkpUuVttq703snFefl+Bx1oTHLNAhYwM2XKrnGBlFAzw5yemTkda7XSVeaSzVEDxwAyuSdjoM5J25O5hgHHbHHeuRmhNtcMI5sRlYXxnKjcW3vLjlVYgYHO3Hvmt/SZw6yESMrCVHRUBOY2PChs/d+Ukjn9K+grRlOPNFa+el3aN1bfo/LZWVj8oxkWlJJctrpO2iemy1312vufTng64JhQPL5vIkMajaIyxAbjJOwgZIB6gYHWvo7w/cRJcQkyqIvLR9kh3b2IO7zFwCu7Hyg/dwfqfknwlqLLNiYBVIXBhOXVVHyhR0bGf3igjt1yK+ivD1+DJHuYOJViDMxyXlOQkacABBghmzwSPbHzGJpqLndu7drLu31suvSz9d7r5LHUfeuk37r5or5bL9H59ND6u8J6lvLshHlyAsyldzOFGF8sgrtGTzjGAR2r2DRrqQLFhyZHTO/IABXoD6vkgM2PTjNfMOg6hFuQYeFVdthiYqzMMZBwpIVyRyB8wH4j1/SdWwkO1wZTkby2d690Ufwk8bccnB54Ir4PMsMnKTStd2aVtNl5NPRO97O6PEdF86dnZp3d7a3Vlpay12069Ue0QagIpQxLF9pR2YYDdMdD827ucAkheBXQ2msshXL4yyrnglVXggDPAAPHPJPsBXlFvqJIh27y7F8yEgqwOMBRngDkBuvPTGK1LG9I5QlA8mJBnAZlPGSeUzznAyeOR3+Sr4Rvmla8X5bNNarb7vns0hwpO6vGWtn72luiXnLVb3bXTv9Cadq6xKn7wlGQKQ3OC33AOeCTnjAx0ya7HTdSYCIM8hy2dwA2hT0BOeO/zY6549fArDU1DiL743AxAuF8xuuXGDtT0PPqa7qx1BtrBGDsxGxVkHlKBjIJwcEjOB259TXi4jDu1o3aV1drd6X2vpfe927/M640m3GyVotNrZp2XlZ9N1a63tY+gLK+8zyzgbEA+UjnqOvIB4x82PwzXb2F62JG3HagyATjC98DPPTgdecDNeHaZqSlE2u28Asy7srkYwuOMcnrk9+9d1Z6pJFGrFTtI5L8lQvBC/wB7IK4GOPTmvDrUpKSlqneyd+rsr3S6dtFpbU7IUb3T97W6et2lyu2+2+u2nrb2bTtUGAofdGqFgrEn5uPX0/oMVvWupPvXzSMBs8HJKj2GOnAB9OnXjyaz1JjIpU4i4Yk/xZzgHHJyT8o4wOK6OG/2yMxI8sYB+bC5xnKjrnPPcZ4A70QruFou/a6WrbtpqtPvWvXe20cPJ2smnpdpWfTVa2em/fyPVhqCCJB5wIbJDFuCDjAHYEAAZOenQitC2v43UuGw4Ul0Zu+PvZPUegHX1POfMIr0SDGGEYbajHqSvHHt6cc4zk81oyag0KopcDcoZNuA20YyWIxnOeh7/nXQ5JqN03FpNbdeyfV9fl3PUweFc6kIWbTTbku65X/NpfW+i8tTs7vWfJO5GXDAhnc7RGBnK55zuJOAOMZPXp5d4g1yRjJEgLurNIpZuinneuNoKL0OeRnpzUWu6rGAoExZRGW2MSPnXBIY/wAODjnB5HHHXx3WfE6L55V+GUI6OxGF5ABcjg/geg7cB0MBLEvnktOdOO6btZNKyt02Sv8Ar+s8P5OpextTUbyirJNdrtq2q3epd13xN5M2Zrhs52r82Y1x2B6YBIA7AE84r4d/aH+IVlPbazpl0GEEdoUdIvuXEiqcIkgPEZ/i4HvxxXr174guNQfV496qtlBM9vuGTMwBKoOcluwPB/Lj87/i1pnijWtA1mTzWgm1K9YQTMczCMFw0EeTlGZSPXp8tfSYDB+0qKKg4qLjbR3uuVJ97JJPuu2x/aHhFkmGp4nC1K7hCUJ0pRTfLo3BrVaN6aq2vkrs/PTXfEFlrGva5Htlt/Md4/JhPywzRkhA2AQySZ6A8Ad+K9N8D+K9T8C/YNXtylm/kgtIq7o0VsBw6fKTkbe4x1wa4S28Gto19Ot2xjMEpvLq5LbjdjJba6sAdxPBUMcZGK6zV77RV0BtQvLe5hMi/LA52mFY+FEaAfdk3E4yAMdsV9VOKg4JpOHKkuW19FHVdLpbX9L9D+68LOnOlQoKn7SlONOm07STTUUt7K+l+tujLHxm+NPiHXtEudO0uVtt3Gy311HHst5GnHz4JYlpSB8pz68Dv+e4spBJJItwbZUmMkjOPmJZiXdskMXz/F9cDuPp3VdX0u+s/IldUt13SfZ1Iik+0EZjDSgN1OSwKkDI+lfLvia9nGr3E0RSNCzKVB2oDyFfvuYgcnoTj0xXr5bVUVZtu8tE9UnZWdnZf52002+2yShQwcV+65YxldWTd9Y721uvO2i0Z0sdkZbd7x45ZY4ASwYlCy4AEgYZ475Az+HToPDNzazQyWrCXzoQ0lsRgb1H/PQ54AOPXr1GMV5cniQppclo87xEJucgksVGWba/QxnIwmD1681Z8Ka3c3F5E8b7IMGJW5MixkjO8dg+Bg8kYb6H0K9J1ORqNrNJ6P327JPVx300v521PvsPjYe0jGNSLk7Sgo6aWhpJbN2XdvcwviVbOlzKXi2yK7SiZvlMokJwFz2+XIyScnopzXixDEnPTOMcg4Bwfyxgj2wK9x8eSvemXzBuG/CFW3HC53ADAIAPJ9zwMdfKI9PmeJ5FTEaZZifbAxx0zng9M5969PDyVLDpSUVytRaatd2Td/O+61+5HgZ1QqYnFRqRclBxs1G+luVO6V099fLe2hh+aq/LjB59OCMY54OevPenIxLbiTwfp6YOM9Tz3OPyqu4AkcMCCGIAPYgc59cHHHfkDkirEOxlPPzDkDG0MAcHkn0HH90Z75rdpNaJ622WttLK7evy0X3nzNOc51ORyilFtxV0vhaT0/m3aW+uyPcPhrNFc3Ytrh2ijEMjhgON395znkZPGen5E73iOzgVJY1wXSTdjjMnXYzjvnqT14zgdR5f4PvPK1C3USBSdyYHAJYdCQehI64wcdMnA7bWru681mmj8tlADMXzvbkAYA4DA9Mc/XFfP1KDhiZN/alzJNtW0Vmr203vbV7o/UsNUp4jLsJPmVoUVTla15SjbWVlpbRXfrc4W43wSMSQMggZOdvYEYPXjr/jzjyTu0nz5wWwGzkYxnjucHr2Aq7fSyPMDgEMDuLfKSRkHC+nvjGT0xwc1lcMMc/xDnoDnB6+oHY4/Gu+lLlXI2tLXTa0287XVuun4nhYnESi/ZxqNQhNNuzbd2r2+T7a99dJZNgK4yVB644bp1GeM888nrwa9f8Ah5ffZ2mEcixSy25ER4OZEHBceh3Y9se9eNEO78jPIUnPIB9unJ24PAByPYd94IkUavAkpKReXIDk5U5A9APTjHPoByayxbapOUUm0la700tZ6PRu3f1djryvFU54ipTSTU4p3s0moOLs7fO9vTV6vq9f8RanM09rI6rG8rOSn9487g2eNw53DPPA65Hjurys0shcsxY5+c52kAgEHHcjn1Ar2rxBpsXns0as0BUAyg/fcD+HA4DE8DnAP1z51rOh3At/tCoGU9WXO8Dk8jByMd889ecisMHVppxd1eVm1e/ve67t63s9dWtO+tt80hPFYdqKjCCinGNnG7VtNrdLK/Xdao82ZSxyPU5z0HTHbp2A56e/Mtv3yflAPHTn/Hng49cdKstC8YMZBAOdy+vBHJPXqDz1x7VXXcMgeoBB9ycY5z19PWvpKc+aK1TaWnktL6/p19dT8+nQdCqnJOLv719PefKktGurXZ33fQkTcT2+9x0PTt29x15PNOeRt3zcjjPXA9RjoenI9R1oGByc5GO+MnPT2J7e/U9KryM+e+epB9PT1IOOMf8A1zotdNrf8C/bTzV2ut7aKpNUYRd5czd3rum9eno/6RJMqfexkPhkCnJ567scqfXrg54JNUs7SMnHOODn647fj0NShmbIPb8vf1GT/ieaTZlS2R8uePxI47/5PqauCcVyt32VtV0+d3rrbbe5xVb1ZqdNaat6JbNX030vpra/qV2LrIdhBBGT0AB/Ht+IB4z61biu3bKFjgDJUcA5GMdee2B+dVmCsTzk+vGfxGT39/xpVURk/Tg5HTHYYGMccjtn1p8qdrt3ta+urtfr/wADz1OJU6sKrcajUJO7XNZLVJJJXstPxtuxJWYE4xnOPyx+JxzyOD0I7VbsyDgMcHqATgHGe578ZHY5/CqxRSSSSem0c9MjkZx7nAPXJ4qeIov1PHHGO5/p0+vsZnFOKTu7WSfXpa+ny38ndtnZhJVIYjmk42091u8XquvRvTttfY14lVzxnCsQx7jGAePb16g4rotIghmn2MchgSq9ATjkZOfl+nT3wc87alEI2kFn3AKVyuMYyPTr1/Ujmul0ZEluY49yoUGdx4CgZy2R/D6465HvXFUXKtVezT7JvbV22e22uy0PuMnqJ4mi3GMpyunDpZJab6PV+b+80zaI29Y1YEOep4Cjr1A65GCvAqNYdkowoUgdznB9QD0HTj1zjqa3J0Fs4OzBwdpJ+UqRy2OckkA5PX9ax8StM0ucHJbGM59TjPbp19sEVx1ZJ31itvdcdLqybuunlbfyR9bzUrtqKvZpppRmno72S2210t+BZRGUtuBAXBU56twQc9x+GOhzVl5iq/dIkP8ArGHQ8YAC44Yg9efbNLE6OEWUnGDvfoxcfdwcf+O8fWq1wCGDEg8nGepjPfJyGPHJx+XbGCTd5WXa6sr20afn5aX1MasVSpNylJNrTRtXeru16/8AA3C2lmiOFkK4bqDznPyqT174Bx/hXqmg3G8QpKjM8gwrjlixxgkn7yJ3HbIPGK8gSRkZoicjO4j1bqMnjgE8AY6kZ9O48M6jN9ogt2OGwzoRzjbt45JwuM5yev1racErVGlvrZpOz5Vqmrrrftfdl5fPmjGLk3vy77pLvrbVK11q7rues3kMENhPEY3aMKQYy2RKWGSy9wM459sV8z+IlX+0ZijM4L/MzEFi4J64Ayen8scZr6Fu9rwLIs2xJUBGTuUE5DDr8isR8q44Hc558O8Q2ZhupZAMEOxRhypwcEgk4wc8cc8+lXOrBckFJOSSul0VlZN6q/fujpxlDmpNJb8rktfJvS/Xe/ZavtlQ24S2RgSXdgQpxn2yfTggD68g1VRFEjdQ+47lI4APU59sdv5cjUQo1upLZkYcFWwwfkhfXoeB/wDXzWaJNxwGEmAWVu6jOeef06jkioltaSbb1v0e2nVf8Mvl59fDxi6TjBLlUXdNu1rLVdF/Nra1l0R0+nGNolXkrGMBegk7AjqDj9SefStFbkqs0DAeV82Y/ulMDufU54wehPSuf0uUFhH0WNSQvfB9Dxgj179eeatMrlpN0uSTkkDLjGcZIxk44J/XpXBUjdLlVlpFau76362389T14TlUpUqkacakbpfu7RvZRs3ezf4NPfYozFZnfkBDnKjgKuOPxHY9c5BFY95btHDI6ruGMg4wApOTk8AH04OM446i87qk8m7OwZBxwMg4+8M5HU47+tR3V1/o0qRrvWRQrAAHPJwd2eSM9hjkdjTpSa0vorO33XWjt9y+RyVkqtOUmuRJvRpXTtHfu1ol+uxxEuNx+Qgg5BIwR+X4c+vf1iRiH7DHIP09/wAfw96kut+8kAHaclRxt7DPGM/pzyPWCABhyQCe5PPGO3Ujn6455wQfYglKnFq2sU3fVptJP8766Pp0t8BUm44mULNNT0ltzcsl5avWyaei1djTYlkDMCp4OezYOB06ZxxzkY6dKYJAGI4B6gjp1x15z7k4z7iqxLbVUEEhsKAwyQfbngcdc+lSrtUlm4OAMEd+4JJx07Y5wc9qidLe1m7Jb3u9Lb26rR7aXdzq9vKc6bho42U5O1tbXvppZ3vbXW5ognEe3aV24J9euTj2z8zE9OMVYikwQhfAGXKKPlI9fyP4/oKEbqyqSTkHKgE9B7+p7Ajv2o3MG3AYAOBnrgjv6nvjnHWuWSs76dLp6ptpJq613+V+x2Qr/vE5tum7bLs46JPzs/LS6utLNy6YZcAhuhP8LdCeeh5GBz7npWBdqWVlQ8JyeeTnOcex6g9RjsAa0ZCWQkEkkjg9cZPI6HHQ8nPuO9OTbhg5zuXIABAyOB7gjnJI9eoPO1NO8Vy21WvldPeyVtLeSfZO/HjuSpCoopxTTvJpJu/Ls/N7777HLTI4ZvlzzgHvzxzkDIOeOAefXBqjja2R6HbjHX3JI6DgZ9c+1aFyWViowRknOT24AB4x6HjsaokAnng9TwuCOOh6jod3IOeeea96g+aEe6stHZ3sldq+u2l+uu7d/wAmx0IqrNKMubnbblra7Stfqvy7EHPJIcMCTkngc8cc/iffjtmQfMhI5bbySDjgYB/U46duD1qOTjBAJB6DJyfUY+vUn+VIHZVO3IByMZ5BGPUDAPb1PU130VZcvVu62vstPvv+L0Pn8RKNNt8vM5Jp2VnZO6u9FvstvkJPGXiAbk4LBgOQoxgbuPp07YrxXUYtmo3EYwqiQtk+hPT8MDAHTNe0OzyAKGO3ADc5wp9DztBPTrz+NeS64hXUp88sz85OF9h6jPOT6DnBr6LJmvaVINptrRX31irpddrWP5i8bsHGvRwmIjGyp1uVtq2krNqWlrPZf8BleBhjbzgKSAej89MjscevFSAB925tpx8yjhQQMZ4HJ6H1UdDzmoYwCuOhAwTkADnnAPXH6Z6etlVUH5uQwGcr274Gfu8cc5+oGK9uSte+l230V3fVq66aNW1+Z/N7oXtZWslpbe2mtldeS/S1movzfM25STyGPB4woA7dfT2A5qdQXDbeOpOBg57NjqT0zxjPABNKoRWCoTngruGQM9QfQ/genSpgGUncvTK8fwn+HBI53c89PXnk87fTrb/Jbffrr6Prrh8Pd6xck9Lb7tWu1ay0/wAr9IXBHz8sQPn754wSCeMDjt+YNV84IOCnzZDZBY85VTgDv1/TnFX9hYlWVicEgK2CADwCPTHXnk+vNPNvkFQpBIzk8nd9MjHX/HtWbqJLVW7fK3b/AIF7PS12ehHL6iXM4tK14tW/u6PVNO3b0WyIk3N5ZJKhx82QAcnjPUj8e/pnBrZtSXIDSLtQ4ABIY4xge+R16Z9D3yWjk+QsFYqwXYOCQM9h9eeQTnAFXrZtzrlAu3IGcjLc446Hnv1469TXPWm5Rvppe9tdGlts3tr2s9D0cJTnCcU20rp2aetnHfp1T16vXodLbs4ZUbPABUNypGeo9c49vqK77SHiYxhh5bDawUdGbpkH0ODxn0x7ecwyOWXB+YdjyAODtGOmfx6dOa7PSHkY242qCGz1+Yk4zjr93vkbRnjvjwMalKLezT1S0fTXVW9dL/gj6fCTlGVre7JJ35dZNWunLX/h1Z6anpdszyyARPHHGOXTGCWbpjnqeeO3XqK9P8PLNtUyAkKR5XBO4egOTjrkceuehrzSxhTejbijyKN/ORuGCBzgr37cj26+neHWnXyAgXlh8x6KE5IBwRkZJDe+MfNXzdeG929GtHZX6/O1u7unY9eEnrJpwXup31beit/wei7bHsGnW0LwwySRMSVKqVwSrDGST6ZxjpkAYx1rtrKyeMDarRFk/dseDtbuDyNxxzxnAHbNcZo7N5KCWQFfvfKOpzwQR/Ep7fTHrXpFtf28Vmg5lliTcFdRtY4+Uhs5wvfjHPGK40k9HdK+ri7JW369Lb21ZjWkvd3u21e9krWW17W2e23TXXQdpoIctKHICYUHg4yfnPrjnryBWfq07LZu3BDR5KnBIHUsTxgDtkcfoc5tRa5VjI6xAuThcYCjtnI6HHb6dcVxmr63cM3kRuXMiNGig9cdQx74yCcgf4dWFXNNWu7SW99bOz6p289FvoeDiJNOdnH3db3Tcnpt0sr6vX/Pl7u7e9mkghZ3WFHUMTuByOFQnqDjAHcjt0r4C+K0sDeKL6FWEkkTYdzgbW+YlQSTkA479uvp+hV9bQaV4X1HULqSKKZLSaRpcD5cLlVX5s7yScDk8dQMivzB8T6g+qazqEzMCWupCJD98hWyMnPf8h078fd5DSc6rqJP3fu+KLvdaO/RN30Xqfzt4uY9eww+FbSnUnzuO/uxcVdq19Xdp9LaaI54bd3Ukj7+ScBm6EdicgcHHOOc1Iyo4Lc8LkZ68fxe/sP5c08xoOGCnKg9cc89e/P+P4sbcBhcDd0weAOuPTB9xjjqO/3dK6S30slbysvPffbvfdo/n9X5Z66u107Xae/lZXVtNb6XKzBS2FBJI5LNgMfTbjnr17YGfUFPZctwBkLkEdx2PqB6Y5/DoV6cJPlWrW3XyWuje+/9XfC2k3dR116/5P8AM8bkR88AFVHIPJAJJHQ+/JHXqBjFJGxXaePkJygIyzHjC8Dj8PwGM05mVQFDHdtO4Eg7nPQ59B1xyQKaoO0sMtIgYtkDgDqSM9RkYz+PbNyg3tve97ejs09F33/KxzqFtLJq1k1ZNX0d1169311Hsw3FjgAnk4zknoCOhORkHA+nXDxuKF9u5UH3yACwGMknnJ5645A/EQujnc+AFYKVUjJz/ePPB9h14qVSDGylzlQCMttBbqFGMk57fSso0dU2la66bPRaeV73vbVW02E0kkl1stul12S8tb693pZkRMRDsBg5Dd9xJ44/Dpz+lTKGETrnBbdIrYGM9lPTGOf0qMjaoLBhIv8AyzIJUqejDsOPToKcDIUGX2q3BAHzADsPQjvjGRwcY46OVJNWV3a/W7TTutVtbp0ehtTdpX0vLlTfkmtk97aW6WLloX/dK7DbvGRxkqT1JGepHPP0NeneHmKSwlAVKMpXeM4BI5IJyucHkk844zXmFu6CQIGJCjKsBwQMcjv3/l0zXe6PcgSQyGYpkKGZhkkjsSDzg9Bx1+tebjaV481le1m9Xa/Kk1rbXRW69z3cBLlqQnfmfOlq9dbdL2eitsm9rqx9Q+FtS8iaBCWMg2JEWIwCcbUD4ODknC459ea+s/CuptLDAJ2XzdzAx9WQnbsKHIwRg7iM5yOBXw14evmkeBQF3oVkVm5wqAHfkEFSM8dx054NfTvhXVEjESkjfMQyNIx3CPIJyTg73xx3GO4r5ipSvzRtbdX3bW+r6bPR26vQ/SMorKySlytyXk7e6kn10afn87X+5/COqiKW1USec8QdWYDbneFwCpJ3f7QyOR2OK+i/Cd9JNMcPiAOAVcBkVhgkZz8oHXAz3FfFvhDVoWuIeMTIoZVkb7h2jcxbjdkAcnGDj6D6B8L66IbqHa2xAr4hdtkbMo++TyGaTOQeOBzXyWMpNtq15K6vbrvvbfXdW6dHp+hYOsnTjF2d2mpX2aUUmumvqttE3dn3J4Y1PySFDJJJMWKwoc5C4+bvgck44x65r3nQJkmjjaUFNzKfl5w2eVcdMD1zzyBwM18T/DnxMNRu90x8oLcGGN1O7cRgbQc8YPXrngYzX1n4c1C3a5s4dz7mJdypwrsACinrjecgDJ5BPIwD8pmNKULxklz8rlzJtaXTitNL6tfruj3KNTmaa1k17z87xva29+lrd0t0fUOjXOy1tERiA4V4mX93ujPBLDuW7cZ5yOhz61pcJdIUVwdiksD8qx4wSTnjK/jg/XFeKaBepeXFl5uENvGqSRAD70YxgcjI5647fWvUo9agtkRfNXdIdsibsHthAB0Y45GfQ96+OklO6aaabba2fa7+Xl3vsjvlJ2VnJSglq72fk30V15eutz0uzvI7FQSdzqcoScEk8EqQe/rjnjoOtlNZcyAiUhs4Q7c5Y4AA5A/MfXg5rzP7dLcMMSlmXO0AHBTGACM4445yOe9dRZrutzM4yRhUY/3j1IHfHXOfoTzU8l0mlZp6JNpXSW+r1W93fVb66w5p2akryl7z62aXW27s7X1Xe53P9rPMDHJKzlVwFOR8/Q9CeD9eMcdTi1aTYGN2V5J3NyPU8jJ6deM8AAZNctpseWBMo+bGSeob+7nvyQPTjGfXqkhiLKVdSyjJA5BI9cY6+xH9KVSpLVNJq/fW6s0r/Lz+S0HGpGOiU3rbfppfR+q1d9h0kqlgpwy8AntnJ4Ud85wPywMCtyxhtTCFcAjaegPzMdpHHYr6+w471irA7Txts2plVZTjBwe56fTnp19a62xtwHTcB5ZHAGMKOOevOep/rXB7aN+X5aNK17Kzell59L6G3O+j92ybbez00t3Sbs11XoJYxxxuojJ4PDdCPQc9Md/qD2FdTAqphpwzMTngfLjp3PPGM/T8s54YI54yuNuQc8ct0HPGAMnvjuTmtE3EYWNQFPGCO4I6AAcEc9sfXoK0jXjSg1KScr3tv201tey+THCTbive1d31S66+Tv8APqrmukySEpGqRYXjaoGcjqTkg56jGOxHeo87H3EBpOrHHAA6Z4wO+c/pWBLqEsTbAoDrjaMYBU5IP1watQ3V7OrFQCcZZSvVR2JyMDGcgZHT+8ceVUxLxE3Hkm5J3hFR66N3/Fvqk7XPTo+4k7xSlvf5a36+idupvbpJGDxHYiAqyxgcntkZB9Rnk9Tg1JPq88EQVgS3RSVywA68ZPOOvoMAd6x7aaZtzKHjUZJj25BbgEHB5H4fjitiztZ7hwZYPkbG35eGAPOSTkEn6Y796ul7aUkk5wlPsnZWVtV20V3brp1OmM4K/PyNL3ruVpS2tZ3XbX+r8jf6pqs8oNnEXUYLMVIIU43Eg54BHtjI5PNdFo9hf3wWa+ulSNiAUIwSwxlzzn5eB0PXjoa7yHSoorUF4Y1V+SdoyR/dz/d68EdOoOKwri3upCy2WI4lkByCAVP9V9Vxz2I613rAyouNWtUnVb5Zeyi2n9la6tpX9NrlwxntE4wUYcslFzlZ6e6ruWr3su/3adja3Flp8QiV/OmCEEKCyMPUnOCc4JOOOvetG1vJp8rhY4yP3TbRnHqPTHbuP5cfY+fbSH7SgZ1wd7Dhh1OCc445Oe/510M85IV7diN2FByNqEjgAEd+w9819LhMTOdC0YqEIWSiviimlu2tbbLXby1OecI8yko803K/tJarW2q2Vrd/zGXV6LCZxNcxl3OAMggbs5wcZHHQcd8g8VBFOksqsG82Nud4G7B7/UHoPp9M48/h67v7xbi5ldk3qxBJC9eBx1BwRn/DjpRpD2fl/Z4i6hQoAOUX1A4BzwMmslGpKUpSdoX69dt3fe/du3XVHWqtH3Yqo3UlHVtJQi/d07J37dNwTSrdw90Ig6uSHGOcHHzA45Hof8KydQjsdreQpikjJJjXIVhk4DDuV7sDznOK3La4eDckiuu5TvB6FRnlR2I+h/OsC6u7c3yRbW8iRsFiOcZOTnjvkd+Acc4qayp8lou3No3o9Xaz0WvV66XOjDupzNyd1BKSale8dNPRLv10tsc093I0vkmRjGgyhyfkP9w84AHHAzxgAnisy9aMQOs2FDkq0xHO3uAAQCx6ZOO3Wuwv7KCF2EaglwxVgvJ243Adeu4c/L37gVyOrWclz5kQiZXWIMiBcByRzx3OACemRgc9a8bE0akbRTlNxs+ZtJO/KmtdO9+uvbf3sJVhOUVzKCbW7Sa29Nm3fbzSPOPElhbXSC7ji/cJCIpEAwGA/iz3/wBrkdeTk183/ELQ4p7ZmtoI5JGJETA5aHbziJSMKy598+3Wvp6+a6t7cRNEQIeWViQ23nKqMHIwQMDGTXHajoFvqNusyxMDKW2Rsux0c988/LxzwCeKypRmpO8bx3te6i9Hst79N0j3FXcIq/vQTspOSldaO0mnaza7P531/n2/b2/Z6tPiP4G1UDTd+pR2kilzAGmlmjVjBKpwCVUlssDxnnuK/k08YeE9Y8Ga9qOhanbNBdWF3NbyIQSAVYqjKxUDBXBBx1HHt/ocfFv4YnxNZSRraoHRfLm2Dlo9pDMUwQzqMfMDk5/L+XP/AIKKfswafoGu6v4mtbB7YzJ50MsSeVDcXC7t7ShQRHKmRhMkZbO4Gv1rgjPlhf8AhPrWdOraUWkm6cvdjZq+1rbetj8Z8TuD6ed0v7Wwc6dPFYaN5K2k4JKSUpJPq92r9O1/w0gllQHcOQQGDA8dBk84x19zjnua1EJkUEFVB+YgHnj+IeuM+w+ndb/SprSaSGQNuDsO+xgp4+bgbvQHjrmq8YVSE5IDLwSQQO+SevscDPvX63TlGa5k24vXyatG1vLW7+fU/lPH4apharhVU4u7TdvdurbO17Pptfe9maUDINvXle2R07nJOffjuevffgIkYHIVR1B55A5OPT1Pp098CJAxG1cgEAcHOM42kdAPQ9c9vTo7eJl/hJUnacccZ54P0z0wMc+pUmkmtHe9tF5We3kt+3XU4o03JKSVoqPM331W2zvvdbK/fU2rIggZYlWAGMeuQQOoB/UYzzxWnKpLQjA25UA4yoP+0Wxz2yO+e+KzrV1A2YAPUY4I9vUE5zxnHtU88hRo2LkBGIbBwFJPTHd+vIx0/PzcZCUY+bT9G1ZPb1Tt1e3l7GA0nFP3nzxac1ZpXje1+2yv5Wsz6C+He2M26swMyDeBjhgxGGOfYcAj88ZP3t4MuFNtZSLwwiVSGOQCw/vZA2rjjjvivzu8CagwmtGVk2hkhdy2DnuFz/FwAew4xwa+6vAd3lLZGCFYWJyr5Z3G3Zg94RzuyMHI4zX5bmtFqtNyST53tdXTtZ3W9113Xfc/bsvaeCpuDajyQvfXRRhZtv1su3W7PWbuBwzb9qId07shIJ3fdIftvwflwOBzg5rgrwyCWXcAF3EO2RvQ8ghVIAAOeTnjg49PRJCzJJjc2QuZF5XHOQg/ixkYweOoB6Vx+oWyeajKFVDg+W+Pvk8FunDcnGflx3zWOBqKDtNLRpLumlG3ffr1PGzmk6sJtaaJ21vb3b+nm3t26HF3aJC9xKC9xLtVJRgMpHfyumQnGRxjcOKs6ewZY1ijdFcb0JPltlTkc5bCLk8d/YcVJfxmKWWPIiIXzDkgEF853nB4fAAGflxyeRivbNPEwcCN8OiMhABiQ5LljkgZIXHHIHtX0UX7Wmru91vLTtZdE7XvtttvY/K8fS5ajTVr7t9FZarq7t30Vtux674fuHjIMWWVWQmQH5VK/wCtB6e2OmfTgivdvD18d8SMRBGctGWOSEIBUevm9ffBr5s0O5EBzJKf3YZ2WM5jYnG0gdSeDk5yeO+a9c0XUj5UR81A2yNgZPnORuHXjIPQDGRXkYynZOKS1e+t29Nb9rr8PkfPYihd3UdfvS26Wsr6Xd/lofVGg6qYvKmeRfPkRVRywZSWJztBGVAGAjcnr3ya9X0jUkQQzMUXeSpV/mDuMFsdMKuQUOe5Jr5W0LWHCxOWR1I+VN2flHAQNx8y9SMenXt6npGubYow8qgjLeWCWyTjBVedhwDvIODgdhXxWOwzbl1b+G297Jtd9Vrvs9kzxalCTata/PFtJJpw0Ts0t+99tN0j6JgvizibzNo2kuxPyqOgAXjGBnYecEnrmr8N46MZI2wuAcucZOTlgvoeNp74PtXlthrpkgCNMCQhLRsDtfP+rwePnA3FwO/pjNbsWpBoE3SbjglSV3YTjAODk7cYUEYGSc9h81Woe+0lZ9Ove/po+iuvxEqLu5cvVtaXvs9kkle2/U9l0zWGyH83ytg2l2+ZZAAAShAGO/QHk565r0LTNWjJiwZBIMndnLMTj5yDjKcEBfTnNfPFpqqpH5SsjFwpQEA/NztXjoe46e5zmuz0rV5QpZ5QQqlQpG1mHGVLjPI7gDoOnNeVicMr3krXaVuXSz5Vfyet2n1bszrhRb1fKtNFdWt7qaW2t1dv1R9LaXqzytGoYbVYqVQgMc4/EE89OmDjtXotjqWVi+YEZIG/neAMMWBIySD19Rjk8D5n0jVmjVJlfYBk5znfu+7kH8fTnBHHT03TtaLCIu+MhhJt+8x4BVAfcjnPYZzXzeKwycpJbRtZNrZWv0b6beXVXOqlTSTuklH3bLqm1pfVbu17/wCZ9Aadffu0KkLKo3cj5dndiSTkL2OB1PauljmD4kWUYZl3hshhwcybc/Mo4xyM5JxjivGdI1YwRhWk80FXQAgDaTt525yW6emcHPGa7Gz1VpA0Yc7lHULkdCQW54AyemcZ7jIryPq71drrSWrs76WWzvo72132OqNLROzSdmlf3lay97TvdpWtfTTr6RFfs0qKJCY1LBip+8QBtGPT1PNTXGthVKm48t4oiCpIPAGSFPQv3wPrmvPhqrwIhbY5dHUgDbjGMP1OT9cHiuP1nxKsLgSnBSSTaS5ZSMjBZccg+p5OOcYralSlJ2lG8U1dLd25bpvR9uj7I+lyPBPEVVaKvFqy6uzS2ab+Tdup1ereIDNHIZpUjeMHBU5ygbCuzcbj1LDAxtwT2rxPxv4hsbMyxy3sTrIof5GwW4JLEZyqjnAOevXpXO+NfHEViFkS5ibzXKiNDuZQoG8EcbCdw2qcgHJ6ggfK3i/xFea54htrKJ38zEQUZISVZc5w2cNtxyfcEj0+hwUeRxSTd7ONla1nG+j1fX732P33hjKJTnRco6aWjZ2v7t2r9X2Te+j3T9rvtZvJdDGqQQzRu1zIZL8NuSe2UjaDGRnEnIzuHIBOTmvl74heOtL1NjaNILeNZkEnl/fEw3ZIXPDSEYwORjqe3qWr+JJdL8MT2c0ir9nDrJE7bWLIvyxo3OME5wF5yQORXwH4xudS23F7JiEQXLzKQ3yEux8pBgjdIuDk8Yz2zX0WDqSpS5Z04uVR3ty2vH3UpcyV0tt9O/l/W3h1lFObSqp07cvsmpWv8Ft+3a6tZnMePkuL7V5r6wVLa2t4t09tK4COADlpRja7P1CgjBArwnUfEE11HcWMhknEQZVbd8sMTfeYkcBVwOfy5rzv4j/F7WrKe80+IhIpncXEufnTBwzFs8uc/d6enHXzfRvE2pXNk1ykzpZTxuryMcuyIfnVhgnJJGw56A9a9eeHcoRmo6S6rVdNFbR6Lr3P6vyGjUpYelCrVpvkVONJJu6SSs5ytdt22aWvY7jxRqUVkroYmWCFy1rKjACULjMzdc5JG3g9D2GB55fWX28RXKwFkn2kSztkEfxGLgcEkY6+3fOHeapcajfGNnV7RQY7eORirDqCFJ3fMf4cj2rstFsr6/tEgSGSdIFPldcxhBnYCOqAtxnGAB1zw4U1BK8XFeT1Wqsm+j6dde60P0bB1rU4xunblU5bR6J6y00evRt9NEcBqejTxQzCOACNVAZlHygnO5FGenA469+c1a8ChoJ2jeNjCxG4FctuXPyEkZC+vXkjua6yexvIJcTlha+Y58lhnY/IYkccjsT0/E1t+E9OsY5Zb+aNUi80gB+QV5+bHGFPGep6ehr1adVSUGpWcZQer2a6u3V7P5eZ6saiVqnMpOCtCUW2ru1lpdvby6pnH+KbO3le5EKYdgSuOgLdkHX2BHvkcVwV9Zz6Ho8hmUGS4GcZyFVvu5zyDjkjtx1r226gstT1ydACsMIU+sexMkdh83PX+eMV5H8VNXtZpf7Ps9oaNUWdlYfMyghd+O688cHn1yK6YScoy59Yup7vrdb9XFeV3ud9WvGGEqSnF80cO5tpv3ZcqfKru+r6W76dV4k7mR2bklmPQZzz2P8Ah1P0qaJlQAEAgHBH14xnjHfHXuegyWxwkRhyRnnj1+9nkZB/+t17VWBbeSAclj1Jxxkcnv369K9ClLnk0krK2u3ZNa2v063vp2PgnOdJxqtNyqydr66Nx1ta7btte1rX0bOs0G4ih1K1lclY0kUkjJHUdfpkcdee1eo6qxuikiYaKQqfPLZLv34Hc9BwFHPGea8Vt3kjkhGD/rVYBSSSB27+nX1PA616uryTW0AUcEKysuQUXAyGz/Fx9D+Az52YU5RlTmtNH13+G1rPVt26W/E/QuG8Q8TgMXh3FN0pU248tpNVOVW3ukraWtrfc5nULeS3Mm4kqJGyRgjnkAE46c+wJwfbKEjbGUjIY8A8kHcT1xjPfHvXYa4AYQmAuAArggFz1YkYyDnpnJyPWuUhVcopOSATzyQeSDnuRySSfSpptuN7J6pyaSb1Sduztor+fdBisM6GMjRpO8alnK+qTtHR3b0Wy+5O4sEJOC2Tk87uueOG/THfB+orrtCxFeRENtUEbmHBx3z1wO5Oc9sVgQAKwUEPu655wc8AcjB98HHXtzt2ZeOQumzpt2Hs3UFTgce+M89uTUV3zU5Rd4u2/To9rX7+dr27nbgsNDDSVRtJqd5KFrtSa7avXfXzfc9XuIvOt1SJjLE0qF2UggKCRvXOOOeRnA/EU6VrCy0u7ikiWYTRvEiuAQGYfI+3Bwy4Pcj+mVoM0t3biDJXy95fMhCySMBuCnbhEOMAHI9wMmsvXLxobdlYAoGIG0neBgnk468HnGD+tedQowmnKKs7NWV0m1azTet9F1a7ve3pVcMq9+aXNRjryqVndcrV9d1a2nrd9PJ9YgWG7nABC8kAkZGecE469gOn0rCDBUbPfOM9ASeoHUED+uQOa0dUuvPuWcg4JAxkFhgdD2JJ57H6ZNZRG/GTwd2OBnH4devc5wOor6XB05wo0+dtyaTd7vW63b/O+y+T+DzOrCeJqRppNRurXv71432s7X03b72HKDs5cE5OSMnB7Dpz6Ajk9sioJGYDncG5GT/d7fQkY/DPqaux7PL2kZYjoRxnAweo5HOM9yc8VRmZS4/ujGe2Rn8s47+/4n1IpqKb729dtbdtt9V6HiYiLjT5pVG7pKKutNFdb9N39+g1DhiMHBHIz17jnoRx+ufQiRskALx/ePY57Y6dvbOKgPU46dR64x6/Tr09xU6oShYE8YzwcDPQZ5Ax1/Dn0ptq6011tt8931/qxhScpQcNbJXdnyu11s9n3tp1760GDFioGDkjOeM9c4PP6j6cci7hyxzkEDPbHpkjp2B9Md6lfByScEkc9Twc/wCPIx1qMZJCkkjLHrx6gjjGexzzxnrkB39Lab21frul/n9/nyglJL3pNyVrNW1atzJfnr036PEgOMkEDg46nb055xkHGec4zjoakUknp34Hpg5HJPIP9e5FViFR8gYGepOPQ4wOPw4x2qxu3c9en4e3p9e/58n9f1/Wu+5pSk9YyfvRaSjf4tv8lsrX7ux0WmpHJGxIPmqCFXGc9MkHp3x+OOoxW/pZjjukdjtjwAwH3gD154z7/pWFo0ZkjlbK7lX5VPIdf4huzwRkHGO3OK0oSA5YAEDIGeAMcYzyQR0B/SvOrNNrXltLZrXRppp9Vr131sfb4Go8PDLcQ0vifNKzTkrxiuez5e9naytre1jt72YS7HjAKRoI/N52kDjaBjJPPX0571kxhhKB2zkFepPqT1wPTnHWprcq0C4SVkUnLqcqrHs3Qbf/AK3pVYO0LlgDtL4TcAfLU+pzkNn647YI54ZtOcrNJWtb+Z6dL7Lt/wABv7aTi506j5IrRt7fFy6N36L0VloackMoXAUYGGOMELnqzYxnnHQjHYdqhkZkB3qrbF2jIwQxwRg98DoMcevOangkD4yxIKkkE4wT0X3BwcdfbtVe5R3BCq5CkhvUDPDY6c885xx71MLt++lZNu1tdOt3uvTzt51UpqMeeHNUjZXi3dfZ18/l0Wytplyl2YO2B8wIJOAc8bTgHn7vPPPUkk519HuXguUAkIMhI2hhlX7c5GF6hu2QOKyXCDh+VDHaCeAV9fUnng8ce2KdC6LcIUG3cwGedxP8S4z945HU9vpWzadN3VtH8Kbvta29mru/TTy04KemIhVc+SN4/un5NX6pK1t1bT7j2mxnWSNBI7Had3l7d3C9Nhzyq4yeOOneuP8AFrQOrlEAZgAGHR3HQ/7+MA569e1PsroQhHR2bKELJuJKo2BhlPqfU9wcisTW5DKnGSyPwWGPlOec5xk5yT6D1FeZF3k4PWV3y663XLrfbutVp010Pfq1acouf2uVLlet1pZ79OvTfojllkURlDIysSf3aqCBjgtnIPTqB/hVlbhY2jAcOTkFj7/3s9h3BOB79axpJCjtxg7goJOAOOSTjnPoMcc5xmpbdtySbsEKdxz1x/EAefT0PFd8VyxTabWl9tNrLS79dtGtdLnhLGpu0r88XZpxeiVnonronra3TyOntDGrCQLgg4x2ZD1I9R05/nVq4YeYrRnBIG8DkO5znnsRxg4PfNYlrJuiEiElFJC54C4OCOOvfj8OlWVnJn5OABhQOpbHfvjuD+PsOeqkm+V2T1fdNWVvJfP8Ez0aGMhCjGlaPLOTfMotNOSi9Hra7v69r6mddrKC24fd52Z45zkhfVsDnOOMdaqGR0jYZJDDJXGMH69v6nHoamv5GWRt+RKMq7E9DxwBwMkYGT/+rNnudkRRTncuTnrnJI59+f8A9YxRCM2lazaajzJWvdLXTV3Xa+qtbTXhr1adOVdyc9k9W1fRaLWzVtfxsYE5cGTnJJJwT2/l0z9c9O1VkZjJjPTO7ocHHPHoOfTPpxUU8rl2+bknB54/n0AHb9RTbV8HJORyMdScjt6n/wDVXuUYctNNpPSPRt7R9L9O7vZbPT80r4pVMXGCc4pTera2un663d7NdtbI11ZVCgHnJwwz6cEe/HOO+fTJcFdgcjcOTu9Oe5ycke3XuetRhd6rgE9s85TJ4Hbj0IBB9qVPlO052g8gE5yOuQOvbj69s1jOzWnTXVapK2t76bPTy06npxbm1CXupqNpQbV3aNr9Lt/PTQv2mwSESEBdpB6Dk45A6ZA/X07STKgYlXJRgDknlj3B6YA7+n04qsSxYFAMYAbnA9uMHJ4OeR7VZVGdSxBI6cAfy5xn6/WuNzjHRpO7V46p30+Vu6s9LPW56tCUHD2ad3Hq1o3p1ej36a+hUkcqPlx/dwMDHUZPGABjr7+pFVGyTkqD6kcgY4OTjGBkfiOvNX9qk4JIG4k8EHIHQ98k9iP05FV0I74ySMDgjI9B147gc+nqnNRi2lro7K/Zdru19LdFutzOvTqT3vy6cqdtNraavX8eu13h6pbFGWVFYQyljGSpAO3qAehIyPfpmsR1Jxjt6cZ/z7/TpxXsF3Yi88FSOIEaXSbjeJlA3+Xcn95G3BJChFKHOOpNeR7efl7kjHGVHU545JI5GPTjPX2MDVXIrNNuEZaOyV+W6+Vn63drXPz3iLAyw+JjezhWXO2k0le1klqt3bWyel7LQgIcHJBJAz+XOPbHfHuTxgiIlWDtu2uDuwVyMDOc9fy2+vPpYcOoODuJPbkjkYOOCGGOTx2zVcMCW35YgEkDuQep9e/1OMjjn1actnbTZWtre2qVnv5P59T4jGpU5KFpR5l1V9unR3fb5PQjgRstlgAcluwx7+v6cda8z8SAnVHDBshQc9mI4HQ856E9RjjNeoQEmTKsREykEcZJHf1B9weD3Fec+J0kOotvAC5AEhIw204yCBlgMjj6+2fbyifLjYtpXlFrXV3tFt26WVvNs/BvFjDwlkzb0arLZc2q5dHrdWv0WjvZnPxox+UY2Kc8cEg85HYgH/0GryxkgMdpGP0549gPQZ5+nFOIbACDu4wec5z1x7deOo56dr0WSCh6EHIB5GcZ4OCQPUnnmvpqsr6t6uKWq1ton5O++/5H8wyp203a01u/5X319dbXV+rUsbBsEhFA+QEL1I9QOecnuCPUcVoInmMFGcY5AUHJx3GfXnPX2zjFeFRuVRtJxwpXIGB1wevuM49+K2beJAyDIJbgHgHnrzkbORyOcEdcV5mIqKF9NUl9909V39baLfe3pZbh+dyukoqSU1Z3lH3fk9/wvd7jILTkEod2dgcrnIBOAM9SvbA4OecVtW+gNNBJLkjdgx5GFbruJJ5Xt8uDnpkdat2NqkvVQSGYq5PA6ZHPfHT+nNd3p1g5RCQgVcMuBwcdh9eOc8+nGa8itjHFtqXK7q9no77/ANL12PvMuymjUim6anGSVtU+XSOjk9r36ar5s81GgOjDcjFSc5xhSWxwcZYY79wD6mtaDwxcX7/u4Skw44BXdjODjvnOSeM4HbOfabDRUv45h5AGcMi5BBI/2uo+mMcjmug0HRo4Lx/MQNECvzyJ9zaemeu7PIB5+gGBxTzFyTbl78UlZu91aLsmtE9fnvbQ9GOR4aLjCUFKM7tbxta17yXZ6Jel9rnzrL4f1DTZNstvIy5CAlSuGONucDKjPAIJ6HGMV0eg7opEWWIK5fOXX5lKDGQO4yeMY3fz+sn8JWOrE/uY327VGAASvfb6kZ7/AJ4qre/CqxjiMkUeJU5UkDc2eAD064P+Oa8+pmMasXFq033ut7Jeer/RW6gsm5JJ0GopW9yd9OZRWjWm10n6N6nllnaIoWaSYN5qiQJ356j14+nbqTXpHhwxxW7EJuXBK4Ukj0PXhhx9f+A5qF/AOpWSo4RpEwPKU5OMgk+uQOOTnrjGB83SaFptxamJZ4mTAKuMEDJ7nqDkDr36/TyZz5ouSa+Jq7enpb8r7aWfaKmEnSko8k04pO8lzJ2to2r+b8vxfTaK0sEe8o0wyArHgnPTK+oGRng/Xv1S6isMMsbjAaNg5HDISMcd85xwPTn0qDTliAaNo93UqwGRu7FvUZOCTwM9O9SXenySbZApOzLPFjlxk4Yd+BkZzjB9OKxUm5W06eXRX/qy7vrfx8XGcXUae97WsuybWnRJro9+hzyzP5LsZpJFD7ePldlJ6Hrz6Z684rUstNkNzFMqrKTgkSDIRT94uCeGAwMjpnvT44IvKVmUJtPKY+ZSvc9cnrjnjt2o1zWIdBsWuSyBPKO7DjHmOCRtJx8x2nKnp0Pv24eM6jhTgnzStF6K2trWS7aW6PTXU+UzDEUsPCVWu+WnTTm72vJq0ra6PrfXZ+R5B8e/EFnpui3FhaTBCYl3xqc+eGDbxhSMeikk5yT0GK/Oe6bzZnkTADOSCBtO49sY4H4ke3HPrXxS+IL+I9fvI42cw72h2qd0RSM4DDBwd2cuBjBAPHOPJ0EbKDnBPVcYxzyQeTkdhwcd+w/U8lwMsHhIc6/eTSnJ2b0XLa62d+3fbofyD4g55HO84qTozUsPRvSp8vaMlvLma7J7b9GM8osOdx6Z5yQOpHcgHn26fi9Y+Rk4xwAO49MYyBwcZ5/GrcUO50BLFWxg54IyOnT/ADmtBbUYJCEAfLk4DD65B647fX0z7idn+fp/SPgItd3olpq1utHZWaSWmv4o56SLEmdpUEcemRzkdc5HQY/kBRWldQODuUABTjqCy8njHbp1P4CiuulUvBe9aza3te3X9Pkc8rSk3dvbaN+m2qlbzVzwJ4iCVCEEsR1LkZ55bqBkZ9+g7YVA/wA2cKEOHHQse4PHPTgdP69Rd6XtkkUIwbAIZSfLLDp2PzdeR0J6dBWK9nPGxJjD84C9WwO54GRyefTHXljpRxlOdveSd03qtVpq3e1tdfO/kc0Jcya2a1V1q12u+6a89L73tTZyi/KSdqgOw5x9OOuOp4x154qIFiU+ZGGcjJ+fOeAT078ZB9eastE3zhkIVjghTjb046Zx0yMnrge6BEbKbUAUhtx4JPUDOefb+RxW/tVd6v10a3T1autummnTdNtNated0076W1d7fzdtunVX3KgU4DYG50JIHPK5xknpkcgj15xEQXwCoAz8uTknk/XrjqRyc/jIVlJPy4+XcB0G0d++DnvnPB5PNOCkxoCMMxByDndnJGCOMdcZ/U9MXX21u9Gkt7q1t3rstNHr8wpppp336xWz0u0r2f36dBYwBJywjUELgDrjnIPIBxzjGM9Oa6vS5j+5jKqEjwd3QsVwCVPHXOT/APWrltqswXjcDkqoPbufU45JBI7DOK2rJsYAfIVwMjgk+x6DnqcegzUTm509dHo+Vq6V5Npp9bdFsephZOM4u6et2uunKlp5efyfU9g0e/eCa22zJtlGwqxwdny5Bb+Fn4yFzjGRivpHwlqgkETybSYvlRiQ244G0p0B2c56fTNfIFhdhpYNw+ZCAWOMAjoFPfAJ5GPoa9/8J6htjKKQx3blUcnbxkr6g44BPc9eK8SvFRVmrc0m0+i91LuvXzvqj7vKMTeSjbRSTcr2T2dlq7yVlZ2dtt0fZ3hvWfsii4aTd5qbdrnG1eMGNsHk84GOMYJHb2vw9r7SiNRKxlkjcL5q4BEgG0Rvk8ptOOANp6E9PlzQbwSwwqyFnZQqqDlkfgkL6AD7ue2AMmvXdBv4YbiKGZJjM7AsVb5UQAYVT6nP3cHHJ4OK+YxdOMXL3Xdt7JWasrNX0fRt9r/P9CwdVuMVGXInZtyd7t6uL38u135qx91fCCdbS1jPmqZI5ZrtiVJbDMu7zGJOSMfKccAn6V9n+CLyO6+z3LbRLvZyjAsVVcc5BAHfkZ749vgH4f63CttNAhYSbPK3cAgOQAox1A5zjBz1r7I8H6vFZWcbJKFeG3RCuMZZwcl+eEOOuMAjnjp8fm0Z+/KUZczso6a2XLbRb2vv876H1OCq8lOLTjdtP35PX4Vol32XTS+59T6D4h+wS3UnyqhbCAtku4GSvYoCOcjIyOTnGOssNdOqXcNzCUMa/MF5AcZxkjJwy8e545Ga+NtS8WX8ckUMMoEznMckTFMqx+Y7e56AE4zzgADn6F+Hl5Jc2kU8pk+0OMTLjCBjjIQdQpPU8AZ6V8jVwrV6rSSk9LJXVrXUlf8AJdOvX16k3BJ2abs+ZbO9tJO93bRarZO+59T6BdvKHkmUxheEyAd4PHBz19z68YwK9JtXWUW8G3IIAQKOg7lvqfUZ6+hrybQWlSGFXTIDZVieAi9RuPUZ6DAzjrmvTtOvQ0iqZBGNmAo+8R3wPTv68c1yzSgm0lZ22Sdnom3dX18/PyOSVXmknFQXVtPRXSS069X1T3OtjhEEYkwQoBWMDkO4PJGO5zwOvHFdLptuZYnlOEOACpIycE5yT1PXt0+prmo3MiIEAxHkrnggnkEDPX1z09K27W5aCHaxyOmepY9CAOmT2PSuKrTT0Wz1fmlrqtNdOrsrvsY+3u/ivdpvlstU09b9N9ehupDFsBwQx7H7oHc/ieAfWtK3kXClSyhBjaSSfl7f/q56DkVnxyCRFQsfMKjYOgVeMAn+IgdDheoqyUKpk53Yw3BUgd2+uCTnvntXmVsLzNyjJq2sUlurrv5X8tvM6add6Jt2bvpbS9rJ6tv/AIL8ixJfSEkKygISD14IGCPxJHbHuKfZ3EkrBsnAztHJznOf06j39qx2spWJeJm2cKwyc5OTnHXOOpAAwRnrXVaPYxqYt3Lvwyt69hzxz0A6Zz2AB5oUXKacnLmjbdJJ6q3fb8N7o7vbQhBO6afK/cXvPRaaO6svPdeZegt2umAMO9sjkj73/Aj2PTt2/DcgsJoXIELAFfmwPuZ6gD1OOB6irltCVwIowTzjptAHB5A4xx244ArVsTN56LLkIGP3lyMfXGD6ZPTuM5r0qME7Jq8uaNpJLRXs7Py2vp3ujF4l6tNKKWkJN828bdbJ9LENhZh5Gwo2kY2sMFSexOAQeh/Dp1rorJTZq6SBXwCVDqMKScYznpxxnHPtVyeS3h2FQjHA3MnO7GOp4HAJPvz9Kq3Ect2qfZQCJCAzMOgP8Cc+2N2CeuMV3ewjTScJXqxSatrZvlWltG073aT1t1THTxEp2dRpQaSkrt2as7LW7fklZ3a1NeKNLyBY5JY4ywChAcDGD/COw57+lNj0uGzAKJJIxcsACWjfJBJzt5B4wMfjzio7TTQu0b/MlVhkKThSCM5Pcc/X3HUdxBHCyRwSBXlwgJ7AdDx1yepz3B65r0MJhvbe/Vdqmii5PWT0W0vK1n3eqLeJjTuotuL95pJpJWSa1+S69EcwukC4nEsqNsK4CKNsYOeAOucd+B1P0Nu40m0hU8/MGPEZ3ABec4xgP7f1PHosOlW/kCRH8tdpOxuAzd8LyQCBxjJ59qwLvTkaQtEWAJO5jyMHGcA4Xb7f4Zr0KmDhQjda80k3ZpWat0Wlk/P8rCw+NdZ6zlTjC6jDW0k+Vpp6pbepzMVzaSRGOIudoAxIm1yRkYHqG9hyB0xVhIrmclUBhCjcCSQwH0wc5x+XNdLDptqVicov2jdgFV4BB5LAd/QEZ6n2rXGnrggBD0GW4JHHoB9Mj07Z4weCq1eVOdo/yxt1a130Vr/pY9COMoUlywg3KyblJ8y3W2lt7pP8Oh5vLp1x5i7fmDAhnbGQGA4Pp7dcAVBN4X8yRJ9hUoBg7cqwHqMgYz69M4xzXsdtokUsZYqFABXacA/LjJGOSD1PAJAPOKsvY21vGAR8oIHJweexJX5T39x2PFdlLJI8ilUneK5XFt2T1T08ldv+rFTztpxVNWcUouyST2+XktLX6bHj8+jTmZJdh/cpgjytwkwCDnBABHXOD1JPTNYF/Yk7ti4kztEjKPkHOEXOdp5yevX6V6/duqM3kkKz/Kc4yFPB2nBwxzxxwfeuN1CwE/mbBtxkquTktxgj+9nnHIPr74YnDUYe7Be0bau0r2fu3eu2rurpb9WdeCxlSUoynpeN1rbTTVvXT56/ieS3vhgXLNPPErsE5UgI3GQpXru65Ixz6YzXn+saTLpzPGI2MDfKoK58teQZF7bh/CfUn7uBn3u7guYFysHmSKFHzYJQdORwMntnOPXk1xniPTrmRFPlNOqxqVWPkknlstjoOMkjj8a4I4ZKV1G9lr7uy01b0dr/ANKx7lLMK0eXmmnHbRXjpZtdldfn1VzxO70/ToIWE3lSQvEFQS4aTzJBwcHHzjn2Hfrx+Of/AAUL+DekeKfB+uS28Mck9tFLONi4KIwZpgJPmB3bUUHbwOnWv198SW2yVlzGJ4m3LAW5bGSWx0ygPXB685r46+Odnpmq6F4givLaSUXGnz28Q2kxyu6MFVOCAQ2QzkHtxnitqbng6sJ0vckpc3NezesW73s9tbXdvmdlJUqykqnNOnUThOLV1JNJ3WnR2963pqmfwlfE3wyNM1m9sUiMKRTyxvGy42yIx2uRjgg5xg9/fjwuaF7aR43CllYFMDg56MD3GOSeD0wAa/Rr9qjwSvh34gaxAlvtje8nYBOEUliVyP4yecnsAB9PgzXbfN20iq0YjBE2R8zSD7zL0DcAAjp09ef2bI8yeIoUVJ356cG5X2do3Sa6/Z1tbqj+fOOeHqFGvXqqEWouU4QSs1F25U0+vmtuxkWqthWZscEhh29iBjntnrjp3z0lvE5QEK7kgliOTg4znpgjjnnv+GBZ+S0gjZgzFVwS3Bzzkg8DuePTOT/D2NlEqeWcNjkHBAJU43A8HPb2yM+tfRSly6P7Vne3679bdfkfjroNOV07xduVt6r3Ule/T11VrdGXbW0UKkgHJwAM4Ix2Jx82egGF5/iz1ralsiXKjkMGkBHC9clsjqO3U+laqvtBXKqoAKk4HJ6Edyw9eP54wtYnKxkK+4sSNoIHUcHODnOOvbHfNclWMpxvdtrRb27Wtvt1t572t1YWMnVp6LWcVrbRO2jbS7a6duyPQvBFwyPGScIXWYBgWACkbiefcY/wr75+H1zG8VtISMMiAu3zZJHJbHOOgB7nHHFfm34Qv5IWiO8GTzNrYPO4HsMfMp79BgZNfePwv1MyWNoXeJgBkqpyR0yEA5frgHAye3GD+fZ3QlCcpWSfM9NrpqKtfXXdq97edz9kyySlgqaj1STV21ZKMfdbvd3u9dd7aXv9Tq0sdtcH96owDHzvJ/ulTgbVPJxyeee1c1fyQhwwLoyxqzK5+XaxJyp9Vx8w6cjGDnPTQMHsonBKh4gzMx4OAN3GPvEEbMkcZ9q43UF2SbQISu8E/MQ7KmSVU47ZG7OOo6V4VDlcvee1l52923zXfXS973OLMW1CUm7Ra06K91ok+q2t3uvJZF5cCaSXG6M+XgznBWVx1VW4ywz93t+NUodkjByjqzBgGJOGZMYYvnBAzwuPxp2oAsjI4LxSnzFjj4Cs2PMOf4cgLk9SBjFUYpAMQhZNrnYQMMyjPYfLtDY5znIAI619Hh5R9m0mo2stOqaW19+n/DXR+aZpRvUUrO+62XReaburu3on0Z29kyxjZCSzRqjJI3ClCDv83k7ieMenNdvY3vl8upijG3yyCcHbnBwMkjkknAByAK83sbmPzniJRhHEdwJ+V3ONp9xwcge2ccCumsrpg8IkdHJBCIRuZlUDciDIyhG0noeBjua5q/vp30s0trt7K6vp873+9HztWjdyWq2vfpstHdbb76/ie4aXfj/R4i7EyAhXUgKzMOoBPyqccN1HXtmvQNM1HfhVZncbYxIgwV25wrc/MSOnA6GvB9MuwsZkwqyZHyEksSv3ti4wg9wTkdO+e803UHKRXCsYmG8je4kjGcgfLhcNwc5zgfkPlsXSeujt089k7u66av8AC6382ph5p3erto7aNu12+W2vm9VpZHvmn6sdpjdm8zK4Y8lF7b+3H6+nFdzZ6qrctMjSiLbGzfKp45B5OMdF464wQOnz5Yaq+IwWVSQpdm5Icbs7c8HcSMHtius0/WZkB8wxfeQRtuyzQ/xOAMZKntxu9q+dr4WW7atfVK+u2nk+97a9XsZex+G705dVq/e0v3von5v8V7ba6jGrRbn+fO5W3EPu/v8ATqRwvoM+uK67T9Y2SRme5XLKvlKX4bbkAsueDn8uf7vHitrqjnbGJWmEi7kk4Qq3OeMnhcADkDnAz217XV0jmg3SBHY7RIW4B4+QnGCe/OAMd88eNiqTceazaWy120VvLX/h7Kw4UbWnNN78sd1q0ulnzO1rdux9J6XrKlSjODldzZ5BZfup7AZOOuOuQK9H0PWvMgQGZTL5jBlA+bysfMNxIwV2jJx6decfNOl6kJCQ0iksV+ZDhQAT85Hp6nknj0r0LTNWMVwizFiWRtrBcBgcYzg8qO3I4JyD0r5DHJxb0cXrzX16Kzvp2026W6ndSo3UeaLjr11uk00mrdL3d97PW+p9QaPqcNzHFIGBDKSmW2/OMg7iR1GemBn1xnPdWGr2yurGUlXJEiklSNg6KozlPxH54r5r03W3YRlZdkZYhASBtckbjgHIBxjryAw4HNdxbeIEtmiG5JJJw6kHG0R8fOAeo9W4z09K82b5bRkrSaWjur2s769dUtHre/ZHZ7JtpqNm7JJXbS0W3bu9E9dT1/8AtuOZpE3qVSNmQAkfTef4hnqOMY6nPHmPinV5HW8mgCO0Masyk4fC7g3zcgIM8E8dKy7/AMR2QZds2IlQkM5xG0jY3KpH8KY4Hvx2rg/E+spb292q3Mbx3sflhYxmPa4OwtzkOSDtUg5xyRxntwtGbtOMZpt6RkrRcdHzX810Xf0P0ThDBXrQlODTcor3laL1itGtE2m9Nnt0R4N4q8XXE+tvbGVo4HkZV5w6FcblZDz82B82RzWtbanYizg1CePbc2m/YzjEiM4G3aTjeo25Azzk9OlfPnj67udP8R2lxK8yS3EapFIr4UK7c+enYtgbT+orqNS1p7nw/Fb5BuRtVZgcMq4+ZzgDKjI2ntk8c19fRwPtY4WoqaimoqTVnK0Um768zTVtH3Sex/SuSZbTk8K43jTtFSs0pc3u3V7arZNrXX5FHxN42tdRvbmznnknLv5zbGICNnIjYjq2B0/rnHz58Z9Vt9N0uBrOYi0uEEk6SyAkPghmweUySCuR/e+tO8QyQ6LqV5cSzRwReWsscssu0K/JZzkfMSf4c+/pXzb8TPGsGsaPcTtcPdZlNrG0Em4siZwUA+6nJBI4xnJwua9unhOeoqqj7sV7OO97O1rdEtH0t80f0/wdgeSeCqUnalTVPmTV1KUuRKzdtdpSs+99j4++Is1ze391dBt8Zmy0LNz5UZOxk7OxB+YccgE57VPBmuSWwFoSTbNG5EbjcXZsYVgSNsfHoeB6Gub8V6mWuDBA290ZhJufJQtgeuGb1xjA7c1zMF7NAYXEpEjLtZwwKxr6LwCMc5OeOgOOK9mnR5qKSSjy2to7q1o3f6O6urJ3P3rC1IRlTfM0vdU1bRWtut0vJPrvue0+dBFeTS3ESmK4BKPGMrGf+WTBR1K85wR26HNfUfwwt7AWdv5rec12AXIIAUEEhf8AaH+yCM8DOAa+LtH1SSe4jW4cSCEKqJg7ZQf4g2efoQO/fNfSNl4n07T/AAwq21xDDeKhAVT86nnGzBBUjPB7k4wDXOsO/fb5bK7km3fmTjZxTs7/AIWWp9Rze2w8KUJSSrShzSi3G0brW99EnrvfV7Fvx5eaZNqN3Bp8QS6afYII3GO+0bRkKeDnH69K559NvrXRXlRme4WJmKxnGM9RnOCF55x657VwVlqcl74jiu7hnbEhkLZIDNxhnXnP+3k/hjBHa+J9dfEGm2K4uLkoC0bFQA3UcjCgYz1PrjoK5KUaahNyTTvppa7bVtNNVr967Hv4OqqUKWHpSclBKcpVObm91xu73u00rJfPYh0SJrbQtQvZoj9odn8maTmWQrnEZz0C5ODnDDJwRk1876/aXdxd3N1IpZS8jOWyGO4nkZJyRxg+hPrur2nxLr76bY2On+aQm1RMxXBLDHmbx13cgKcZPJB654PV9S0uWIKMMJYh8wI3DszHjrnt2P416sIfuYtte5GLSja7k+TfTVJWV2rp/M9SpUWJg6Up2TcW9oqSVnFPuvLRvvo0/KnR0QJu2Lxuz6jP059vb8qZR2cN1UHGeBjjOcdMHt2P5iuwvNOikUtaAzR7VbggkD3GB8vPI4xjgVjyabJBGXZWxwcdcA9z6Ec/4c10YatBOKk4p2V07J9Lvuvltp5niYrL6snbeMNYuErqys42Tv8ANLX8CnHIweIqSDuADL94+4z/ABZxnGMexr0uxu5YrO2bdkkfMQeSynK5z128jnHXrzXmEcDGeNcEZYYz1Bz0HfPOcf5PqCwpDa2RwH3IBg9S3GQR29B35xkUYydOU6UeW6W99rXjdq+lmtPL77exwxXr03jJwjJW9nFya1fLKKlbo2rPztvd71tTkMwBwM4JJJ+uMD2zyeck89a5wNtlVQCDkY9BnIzjP69PzBrpp1jJLsGTd8pRuN3XIXB7Dnp+grCljXzvkGCcnI545HYD8OfyzXOpRi3pvquydlo2rp266W6M+gxUak6kK7q8l5Rurp3Xutaapd7O1n6FmBUWVV+9IWUg4yPmwccduOvTmuws7YBNzx5Zjk4OWHoQe+B24+XrgdOQtm8uZTg5U/eY5+YYyfQjPvz7V2Ed0CqnO1iMjp16Aj8+vsKwrv3dbtct9lfotLXa36p9baLXvwHs/Z1JufNfXpdL3d+t/wCXtv1OvsJIbWxuRKChdN0eBxIOcKxGMFeO/setcNrd+JbZlbO5iSrkccZ+U54yP55POMVuaIZdQ1W2sPMMqXL7HRvuRr3O7sR1x0Pbpmuq8d+ELfStIkZl2uI1ktgi7RKnJLN3VgOccg+1cWElzTsoNRg1KXK2klo/e1s3J7rSyWx04rE0VRhThU/e1tIuLSXMkoyv3Wju7db3Pm2YFsnphiXGCWOScd+e3t3HeoFKgHPUAkE/h+uB3/PoKszhQXAyQTkY7Ac5PQe3Qd++aols9DlV4J5yMk4xzjnrxz19q+poNSjF6W921klpZb/e901p8z82xlqVWd7uV3e7um3bbbTZ9b6+o8kBSVO7kDI6jJ6DnA/+vzmq7JuG49CSAe+fcZ4zzjtx0OOXNuGR8wAOQCTgdhx/I0znJ/Qen49812ppK6fz030633899u+nkVG5txcXZaJK6Sbs76va3q/0A3AXPIGD2/AcDIxjjtzxzU6PiMgchgePck4+n1xjtxmqgXMmAPmLHPU4B9D6c8DH1PNacFo5AOQR/dHXA4GT/e54X61E5dP0S3/Tdf5LRvBxqTk+WL0TV10ScbX23W9tde70zZRtB3HkEjAGR+fX/Ajp6NRk45Gce+ev6dB19R+Ml7hZDsz1+bI59sg8AnnjqMe9ZoYhvmztJP6d8DngZ56evFJS0s9JL+a3k0nrv/w9zirz9jXaetnZve17K6fa+73TVnre10qGJOMA5wM9xkEfXk547Y4pU68cYzn1I44xjpkfn6cVDuAAwcrgc47nnv269ORjHtU8YBIOO4yfXP8AgPShTSSu38/O3fu9Ndbdb2RUIqc1yqKb5btW1Wmq+7p6b77unXht2BcEpyrc8kNgHB78gev6k1rQTDzQzY8sn7w6KCcEYxx0xnnp17jmYZCsijACg/MOPoeeBjOO+Pc9a2UlQSR5+4wXfjnj0xnnHHf8+/LWtK7tZK8rrXouv3aadH1PrsvrOpQjRqVE4U5xUVKNmr8q0fbdLRet9DvLOcGLyt4FsVY4OFBJPyluCGbJIAz/ABevSo7DeRvwjHLAnOB6ntk4wd3HTkHplPepEIUVwRtBOOCSOnH95eO/P4VGZw25klwDliG+8R1ODwe3YdvauB04ttpSSTbXLypX91u913e+l738j7J4qhLlhKykox+1pooq1rtL7rtdDoPMSNRgbVzhTjJOcYzyMdD3xnJyORUvnJPGBubeBkgcbiOgI/iP44HrzXMvfbo1IOdgCBQOQvPIB/iweTx2+gfBfKzgBwu0Ak989xnPJA4AI65+p0jT0VlzKzu7vRu3TRJ6vutrbGscfhlKNNTTTSS5dlte7vpv+CatcnnZmYAAqVYjGQVGRgkk9Cf6ccgVXjkKncG+dCcKR2BHJOc546/XnIolnjYsd2R3KjjHGCefXvnOfbiqBnjDYxuD8Z4HA6g/z3d+3OTVRp3updFsvlZLf1vpf5aeXisR7KvzyneF7xau02nGyVlderbVlY9G066WeGHGNwUKwXBLYHcE4C9eT6dMVU1mRFRyBkkkbjzlcDC4HKsp98dfXjJ0W4RSUXHOSQOPlbllyCNoOBnAzjgYHXV1K1ea2J2hXBZ0xyuDgBN3HJ6YwB+lebKmoYm7509HFXtbbdXs0v61PYjipYigq0FFqNJJevu66W0T63t5o4WaQM5Zvyzkn0wM9M8nkEc/g63mRH+bJUE5wcZHTPGQfbr+Qqpclldxgr85ABB+UZ6nrzg8dffpy23PzqD0B4/XIBHrn/64zXc2lFNa7NWVrbX223s99XbVHz6ry+suy969nfa7a0s++vX73quijuhEjcgwnkAjqefvKDgnnjHf64pYry3muYWdwgQAkn7pxnkn29Me2eKyLp1UoFbPAzjpz2wO4479vSqBlUMDuAxwcDBzn7x6ZH/6+/HJNN3bSjreyV9/KzuttdF82yquZ1KNeNPkjpKLf8ra5d2rrS/3/cdjrluHRL1HLxyAEyHA3YGMhehAOAO2PTGK4u43GNsE4GTyQDjk89eo9PYA8ivRLhIZfC8EhYExxcEN1UHPI7MOcjqSRzjIrzOaUkEc4+7jnke49eue5+pNd9Gjb2bUk04RnJJ/BJpaW13Wu9tWk9NMc3r0o81pyk6lP3nfZtRbSV9kmtt97JGQzdWIxg8EkHPXIb/PrzzTYWwRjgbyScc/n1xjHH/18pICNwORncQOo/n1wRjr/iyHO4YxgZJ7njGD+eMfjXoRXL7q0XVX0tp11STWmn/Af546jdanZO/NvbXeN3+uutvRnY2qiZFUDLlcr0UhhyC3XGAMge/pUckISVlQMxUZO4jr+fI/QDGck5qrBcNGqMCCe5H3uOpBz1HJI/mcCtOMu53KpYt044fnPOcZIGfQnGeK8+tFxbs1q9rtOytdq+nfZ999LfdUJUcXTpU4tRqRjFyaSTcUo9Xd72TXdbEccYJz0OcbepxnsOODxg9+PSuit44zCI9oMjr0AyC3O3jghhk4I5XPTFVLW3WUg7SWIJQhgpUgjKn+8euAcd+cVqQqkcvLZkzlsDKp6gYzzjHJ+mWBFedUm03e11Z2eq1suvXXXu7dj2MNho0IXt8T+Jp3bvFWd1dd7/gjnLq2aPkMPlYjJPy54OATjOOTnvnnk1Xi2EMjA72BAJ/vHGOfx6e/eun1COExPjgN8wJPOQeOMDJ69OgzwDjHLKyiVFYgKrcnPGO59cDoPT2zURlOzbinbW91tZPtrv1WmtrGFWEITTcl+8la11tprZ7aLdbd+h02lKyWt1YPIWjvYWiljBIBYj5CSOmO2AepGRXlt7Zy2dzPEy7TG7ocYI9AQcDhv59h1HskFukf2S7jIaJyuQTwdoAO9f4vRRx1J4OCeV8a6fH5v22BRsmVDJtG1Q3TJAz0zgHqOAec42wOK5Kyi37slbfRSTS5baXv2769DwOIsu9vh5VYrmlRV007txUY6bN6JN936bebOcKzdJCCeRgA5GCPc9T354rO2PncR82cZPQqemc8nj0xnnvkm/OXUICcDIDAcYA6nPfPU8fyzVNnZSQ2dqjg8YJHGOgyfQ8dDx1r6yhK6TTi76JO0n9n1V12ve+vr+PZlJTs3zNRdrap3VvW9uX3mrfo3WaMJiu0HPbHygfjxz2HHQexrz7xeqLqBVB8qhSMcBc5yMDrk4+nfHNej2QkkfhlVcEkYGc45ORg844+o/HzTxWZP7ScY+bocnjAHGOOpAOck/jivbytP+0IS5lGSg043Vnbk9dbPyXdtn4j4oTUcjn7js68Em0n1V73XXpra113MGAKMA8D7yjPPJOMn0HOBnoD1q6oUksAQMHkkg5XjGT7+2TgVnRMXYAheTjuDkfQcDGMc9+2a1YCoZt+OFyEXhcDqfY++CD+FfTzau7O9ld3vo+q1tr5fPY/lWvK8ublsuZKyVk16f5rpfqSQD52/hORhuvHGAPUHnJ49+a37MbuGBJA2KAADnsw56H16GsCJyzF1ClOwYY56cemeh9x1HSt+zEhZGCkjGGKkkkHg/jkE8knpkHivLxbunqkuib7Jb7rW1rb9t236uUyiq0W7yldOyvqm46NPS19rq/ZPU6/S1VggKlQpyD054wGzkE9eMc569K9J024jeAwMsakL1AOCccEjv8A7XIwcADjjzrS05jBChecA8AN6k/xHuSRnp9T2Fom2ZCMEZzwc5HBJUdsjgcc+nGK+WxbXM027p3Vr+Wi+6z/AAufp+AkoQUeWL5uVJp2aTSStZaW7aPU9O8OGWJXKsJA0ZTYRzg5AKsd2HOeDgkehJru9F0+eZo23L5blmZSBkqT0PQ+ZgenU9OM1zHh2GOWOPJKlsbgpG4MMcnA4AGcjHOO3FejwhrcwiApK7LjkgKAMfNjoeAM4xz9a8GrVbm+XRyutUnppr8ktf1Z9HGlBRp3ulyt8z2u0k9357/jdo6WwgSCQrHG6yBSY2bLJIp6k9MNjPPI4ODiu2S0nuYFkb5jtGcfdAXBwD1zj3571m6IjTwrGcZ2s7NuCjoOBwcgcg88+mRXXQsyRqVAIUYxt+uQF6ZPHIPHuMCsZTtqndq1ru+qav6Py/NDdLsuZae9ddGl1v69tLbtIzFjjuoxG8YKxhVC7SCRnHUjnOOfofxvnQtPlhjleEblAIAbaM5xtzt6d+R0xnk1oMYVMPygKyEMzcEE8AAcc/8A1yMECtT7PGzQxoy7XXcefvg++PvjAI6Y59cDJzfuuTer0VrXtbW6ve+m611POx3LCGunxX5GkpO8dOqv0a1s9fMxToiwIXi2/Mq42nIXPUNwOeAOM9+OcVS1Gwkt7csFaeZlO4KN21D16cDbgg8cDHHGR6aml8IjzeZ5yrhB0VOxJHRh0IHI75rK12xisiUgcMTGVeQ8I2RgDJz6nj3OehFbUm3NXemm6te72Wv+eyfkfFY+rBU5NJ+7fWTtr7ttktb7v77N6+PKI4oyfKYyHhI+rHdnGM5wpAPzEj9cV8l/HvxnHY2E1lZXDieRyHA4Cvg/IgDDbsGR15696+o/Ekx0+zu3eRVuBkwsOA0fzdT2JH8WONvTk1+afxU1R9R1+4BkZ1Ep3L0VFycBVH3s87iDzj8/ssgwUauKpuyUYtN69E07PdLor2u9k92fzr4l8RVMJg6mDpVuWrW0k9YyjB6XTWve3ovU8bhSeWRp2fazys+4sWOWOeAeQD0/wBxW7EX7AFiM/L0ZTg7h6sRwwzxx65qgR5eduCDgjoCDnJAGOO3HfHSrdvIOhypJJG04+hz09+v41+naqCjZWsk1e3u6We/vbWbWunS5/L85uz5m5Sm9Xd3bbTu3ffa7UWrqzsbkDgCNRwMHI6kEEYwf07j3rcXa6lVGGZeT0PyjgjgE9eeAeQQcjB5iN84Pofm9cg8t/tZ4yfx4FbNtLJvOWOCo28YxwMj0+vH046ZzVrWemj1d0ndW1u9fXpfTvlByi7pc22itfez0bXTz63b2HzwoShzySwkDc9O56/gcdc9OaKnOwkM4ySoycdQOuBzx7fXkYorNTa0tJ/4UmuyW2+33/fLp3bbqKF+mvl206/h6s89mhlvplSKDavIUEAh5D6E9S3bJ4wTz2mj0J2hKNEQ7MVKKN7GROQVX5SVXPXIxk4JAr17SvAwlhYndGY3OcjMisD8z8c/LwCp6564roW8PLAyRi2UMrqzXDtgpHgEcEZJbjAyNwBHbJ+WeZr4Kb1ile+mqtq29Hr83Y76GXVbc84tpvlW0rNW3SV+t9Xq92tD5svPCrLIGETllQs+ONwOAq7eRgch8/dOBXK3OkTxEsiqIweRt+dWz09hngevcc8/Vt/otnGJZdrNvkAfd80eWzhowQNjHoygkDAGec15bqdgC0sQWKORizqqYZVK42gnAxgE59B79PRwecVtIStKPuu7T0vZvq3o10WwV8DGmnadmlromr6NPVX63avftokzxOSKRVBwV2jY4XJ8wnqD6dMkn1/KPymVQVUlT8pJzlR6Y4/PHXtzXezaWqlspulVWJZRtDg45AyeccDgnIP45slhEiksXTOco3zEnPrgAZ7EdfavepYqFTVXTdr2va77+ne2rXTQ89wcLtrVPl067K+/V/c7+r5RbchhIq4AJ3Eg4IGMkE856fj1Hc6tovysMENlyN6fKD/tdsY6dzjqOMzSwKkgIDArwyEjaDnjI/izx6Y59xT0ZVVlPV+wU4AJ5x7+nqe9dHtLxTUU9Leb0VvR2vv26XOimrNOzi7pbLyvrpbdfPWyZdty0ZWMHdgjO0d853cjofbp+Ve0eCb4wEZcErGxAJ3ZkIG0pxwRzkc5zjivFIiocPucqSAVXON2O/XvnnPbBz0Pd6HeCGe1xlQ0iBmQ4LkHIDAZPOOWHXjgduPEQbSbWnLZa+l97JOy/zex9Plc1TnF81opp9rptWuui2vZPy0Psfwhd3EjRiSTczKAu0cp0w3Xg469T93sefddGcQMtyx86RZDJyeBF/ET7DAA9PxNfKvhTVZEkWVXdYwTE0oIIRyFBjXkbi2Bz07juK+hNJ1SUwQpHIMMhWaQEFRnBKDnlsEZ6Ak+ua+bxNJtydrqLUbt66WSs9fLVL59v0bAYhOmrJp8t3o3q0tbNLTXp1dvM+pPhxqKXDC4AkjiEjtG27crNGRknj5QCw3ZPOOlfR2keNYbS5jjW48wyzFZTHIQW6CPKjI2gZIXvg896+TfC1wLHR4GzuklTzDtBACsDubI6bgAM8ZwCMd+88E3X2zVYnnBEZuQ0bHB34OEDDORtJbCnv+FeFiqEaiqTcFKMINtPd8ttnZN2a027aH1GHqzjGneUJXS5lJNpLR2TSTu+mujT1Pt/RIzrE8VzcPvXzYgMrnA54OCMKPqevTpX2N4EhW3EKRqhR4cAlRgrxuLN3K8YJPGDxzXyR4ZC29vZxqyvuhSRwRtPbcxAzwB+X8OSK+pvAU8+xVBYL5bJFuOVKHG5g2OCeD3xyK+GxlJN81K6g3K8Hey22b1e1236aWPW9snCLhJ8trckkr3drta3dlZLR9Omq+idBBupIt7nbEWCqCVDBcYyO2T0zySfTdXf20htLhHl43NwGI46kAZ6jnrgbhz04rzvwzdwQjCuk0rZO1DuwOBuz/8AWP8AKu9njad4Jmf5Vx5gK4+ZcZHXnPHHUc8nJx5VSnvfy5bfJO3a77rW2nc45VeaT5uaOl4wWjfLa29r37fjpc7/AE+6W4lZEJ3FQU2A7QGHU/Ngnrxx/U9HbqsZQztvCZwp43HqCPUg/h2HFcRpE62siO55dcqucbewO7GRn0xnAweK6GK8FxdRxzSKhJBWPuw52nOTknkYHT34xxygle+r5tFZJ2dtXbXbVdX57lqb0crJSSfLqpXdkl6t7rotNdL9vZzhV84phGYKoYcgMQARzgbR1wO2c9a6RkiVUbIYyKCFAzu45UZzg5/wyM1ytvIjo3QiJQABn5j0OB39s4OOODmtS3WTzBIWYqQNik/LnsORjPtx6ZHFJw1fMlqle67dnr/w/VAquqVnGS2u372zutrtb3Xf5rrLO2t3gLFP3h4SPo3rkjnK574z056iug0qyE3ATa6ZYA9gP1yB1/QmuftxKIlkiVXYYUMuDgKeSBgcDHXPc+orvdIRSiMvMsi/MF5wOpOfXHfGfeohQU20or17JWdk+mul797q9yvbWTfNLR3WrfvaO1rbfd6pk9jGVdo3AwmQeMEnHJye3HP88Zqa6BRoyjmIfdAUYBJP8XtjGD9cHrWzJDDbxQMowzjacnJcqMkt6deB69DzVJ7X7TJwp2tgKCcAMNxOMjt1rSdHko8sVd3TVt1e3Va+dktvK9inX5pJvpZ3euumq2S+5r9HWUM8xEbBXTv3HcZDHqeOncE9c11ttaeXH5ZARdhVCRk9RwCcHuR3/KucsBJBKUcl1iwA3BJOeQT/ABY788AZzXRHUXWSM7kO3B5HAGB/9cc+gNThalOMXzKfPGyu1qkt723V7d2krWR288p2UYxemiu1rottWmtNVfTXYs21mYiwUsC5JMhJLbvb1HH4810enaXcbjcxTEu3IL9x3OM4yMce/frWQs0lyUERXLAH5AcA55I6cjkH6/iektLuSGJUyMAbSSeeOvpgnPp9e1ephvY+0V5S5eVcruneTab3vbRa6u23knec4qF1zacycr7WvdO2j27aLZG1KkgjjVcvgEPzhc554HH15GOOetV4dhb7OwXDhgCRkqSDknnoueOoPU9jUcV4JF8tXwGU9eCOuV687uAQOnODT4oZGuPkwMdGz27jJHQdeOpOK9Kc2px5NVJqPLJc1mlHVdNd1Z2/AqHPGLU0opfDLRaK1n679ToLCyghjBkAYliSehweuCQcY45x3GM1PLbxtMrIp4xuHtx17HA5H69RieG2nCAkglgB6AAdcf7PHXj6dqtwOqowKglQRzj7w+uevqR25712wpQUVGV021Z3207KzfXf8HvMZtyvFuV1rdvRO2u2i91WV9E7+RTDFVxiQ44weAOnzY6EDjn2z2qCdo5UZWZd2HypOCCvVhycnpjkeo7k23Uy5bqcEKegXGM8DpjgZ9c8ZGK569jmjkJ3Hpg4wOoPHfPTPb5fzqqlSahyRjzwemzSW2t/e2e3m297HVRpQnJOdRRkveSva7TWluqa0/PXbn5p2jn80gOiMwwfXPO4EYIB6dsHHSqsmLm4SVVCMmTtAwj7sZ7gKTgc857YpmobwwC/Nng84Xjkgj8yc9e/SorabaiuQQy8BC2QF9AAORnPuc9sZrw4KalJS2lLZ3dtte/dvZ6I+lUE6UKkIxukopxsn0tpda3063WtkV7q1d5pImjYq67iwOQ2OSM4yMZ+g+p45jUrArFO0Yc7Y2Hl9cZyMAgqCcY3AjHSuvF7GZW+dkY4AVF3bfU+3ofTPrk09RHIFjEYyxwXIB+Y/wATDJyCTyMHPTNehh6cJ25ZKV3yuySvt/X4bjlUqUlDmpqOkW9bfytuT106W7nyR4m8LzxTjUJbdk5kCSknLAdFYn7y4J649sDr83/FfwzFq3hy7CWwS7ghKsUTuQQdo4GSOpyP1r9J9a8LNcWFw8iI6oCxjIIRx/Ee5XAOVAySehFfLni/wo5t9RgRIorfa0hD/wCskbkhFBHLHkA5AGPzWIwE6UXFJvms25KTdna9rX1XlvfsjrwuZRqq6ahOnOKveyfw2Vm7vdeW1tj+OD9vHwDBpXiW81KGxmjM1woNxcAsjsWbCR8AtnnkfTjt+OHjLSI4Z7hWV4UkPmIr5DNv/ujnCnqV9x7iv6kf+Cg/wf1LWLHUdWjs4zbafcF0dVDPEFJ2lOByxznI5x14AP8ANp8TNEvLG5kWSBo5LW48uWNxmSA5IIKY5BGPmzj/AGeK+kyGrKiqcVKypuMXFtpr4Gt0rLdO6t03PB4vwCr4WVVx5lKnzOcVu3a8W0vebvda/LS58wMBbSSjYVVWAR8khlBPIbjA9c49gK63TNSjEaIWDAJjGfnBODuA6jPOc+nToap6vYsqSyCMiIgPtwc4IJaQ9xnC4Gc4rlFuXspztVkDbdmeMds9DwMdOB1r9Pw6jjaKStzxS0T1bSjdvV282ro/nbHYN4WtOM42TfNG60tdWvre6136Ptc9NmnEsa7CNoPzeu49SRkcDjJ9+nesW9ljYMCxMgGFz91hn5eR1K++f5EY8GpF94MvlhgWZB8o9tpxgg9/0NRT3kkh2KMBGwMsAD7g4/iHQ/QDiodCSTTSTV1pfRpK1297eXlqcuGpKNSDUuZcyeuqXwtpXulto07LRX6nZeFgqkpy82S65Xrg87mzwFH3eCDk19ofCW78iNIg/mNlm2hc53Yxxk7NvP8AM8hRXw94XupEuvKL8uWTG4bNwxtBO3hOWy3Q5AwMk19bfDm+MFxaruX5pFTKsUUK2MbuDkNgg9BgDtzXxOf02nO+ilFNpq937v8Awbo/S8oknh6cdVGLtrpouV20dn5dlY++dJnjewjySzeSVMeMgHC478Yyee3fjFZl2geIqqgsGeQbhyHByMn1BJ45BB/2RVHwtdPLAiRxFmwcAthFOB8pzwEPdsndgcDvvXMDkkqFUcK/zYVHOSEP+8c84wDjpXwSnyzceZLVK90k9rqyta/R2e3zN8ZSc4u8VJN3+JLtqlZ+rer697eeXkMyROQ580He2SSqgnqF/vcc44Xk4yRWWhHmibGNjBmOfmcHBDL0BCkdcDJPTmusv4C7To/yoMsZVOQDjCqRgblY5yO3b0rjZtwkBkwoWTDgZDNtyAT0HQ8Z4I4717+DqqUUnZJWu7t6WS7va/ZL7j88zXDqFWTfM3ZpJ3atZbeXdW+b1Ogt5V8yJimyMI5VzxljjcWA6jpglj079a6C0lZFeWJgGbaQ5G9CVJyACRtHQDDZxx61xqTmFUDsWQPsUcHg/wB33PTJ64Pc1uWt0QgTg/IH8vOAGJOAuDyR39fxrSq1ytxvffyTbWySv+G34/N1qN7+80mlbR6/PSyts3d6X8l31pdPCyzsxXDBmBOFDHAyVGcLnpzx2ruLK+XJiRw+UDHHzJ5hHHQgDac8Y4BxxXllpNLgMwDFojvx87LG3IbHGXI4GPT1GT09hdhwNh2o2VV2+Vgi9GKjox5DHPOB05ryq0HOU1u1t98d1re3bscc4K2yvfS+i9Hf017vTrp6VDfSbYVwgKncwJyfVVzjovO49+K6ex1ULExLMRGykKQclvmyFbnEQ7D8+orzCK5kY4Cs6oqlyh5OP7w5GeRuOcjj1rpbO5coIgSXk2lCSGEW7P3sgcjHJPQ57YrzatHmi48t0vistVdataJadvLW2xjCF9fZ3cbtNu92ntbVeffS/keqWGrSplmkZ1MW0xDggt1C8544yf64x0dlqBACspdNpZdy/dyeMc5IyOWyOe2K8kiuvJ3pOxEseGwp5kY52ueTuB6/iemDWxFqXl/Z1y0srMuSr4/c8jyhwc5+vscZNeRicG5xfLFPZLyWjV9Laq68r7o6I0facsuXlaWmr0krWaXf776a3Z71oerCNvMkl2l1PyKAweRsbdiZ+XOOcE9M8mvQrPXNkaukjGTLGQu3yopwNo4JB6g8nBNfN9jqnmyJEh2MHyu3ATB5A643DGCffsTmvQbPU0whNwESRQ0iZ3hsfebg5UL2xnqeeK+Px+Wu6cldpu6tdW0l0T036b67NHdDCqXJzcytFud1o/h0fW297au3mfQVhrUwgD7gibsghgXEZI2SAjGQcNg+x9wKXinx/cafD58EscT2zR20zFgHbGSHlH9yQDAwSBjuM15VYeIwkRAIltwFIBBYY52M/TgnPOe3TtXmXxL8RWw0y8LiVlkeHeTJhzMm/wAry1wNynLblBHQfNXn4bKZSrxdWlz029ZWvvy2Tu01tpquzPpsjyZ4rFQU6XPByjZxvdRfIrtW1utOu9+zPdrP4vQapO9vdTBZoN+y2tkLeWU2kFmDZ8p8k7j0xyCK1f8AhMbG+unjEskkcNur7GYHbMAcNG2cFkIBZADtyDnJr8zJ/iPHoHiKSQXVyZr9AqzoPliRx8ytFnOH9NwI29T0rvv+E9u0tYUs7qWOW4TMc6HBSOXlpAuSfMbHOScV9THJ4QVP2acYuKUObZfDey1tyrRrtoz9oyrhKUHF04yhCoozi53srcvNpft0XZPsfQPiW+t/EGuC4kl864tUC3EMihSIo92JJF53GPPQYIJ69c8tq3imLTLO5jE0dxBG7AISSyuhX92GPVWz93AJxjNefaT4jkt5pNTuJEmkuYxGFbJ3kg7nUc4mBHKndu4Hbnz/AMT67Le3RgYNGhczAAfKcEEkEEfPjk5/mTXq0aX1dRildK1lp1tdXeltFfruup+v8P4J0JU6c7ShFRTslq9L8t+12ulktLtnLfGfxXJeRG4tWaSOQ5PmZVEkcENHsByoXHGSR698/NGiyXF7F/Z7su0NNsj3bXkzyEyQTtOfvbccc5HFeg/E3WNPe4S0MxeF0D7lYoXLDC7gNxUggnAyCfpz4xBd3GnXtrcq5RolA3/eC+z9NwcEc4GSOe1ei1C9lZpKM2o68raTSSVtNlrbXTU/obhrGQo0KVGDin7ru3o3aNnfmu77KytrvY8o8aWVxYatcRzQlMO+0f3GPIAPBI9TjByOOBXFp5jkBFLOWGxeSfmO3B7leDnpxwK9x8ZWj+JNSt7tQDJLHGt08a4BfByEXPJz1wR6c1zV14cTQhFc3Kq8jZVSf4VK/IxHHOAefXsOCOrD1IJezu2tG27PVpXindXa6Wfd+v6tgMV7SMOeSU2ov3ZXjP4U0tLbpvXbojHsX8lLYSIqgnaWHy/NjvwCF+nGeOorVttXaK7likLOh+TH/PNTjLgZ4UDGfT8a5S+u914jF8DJITqFYdVYDjec5xnjA69DntdMly86yMCWKckAAMFwFBHXjkdgevTG1SjCcfchye6m3Zpv4b3s2k9NLJfNM+jo4z2cotSs9E1dtWtHmTaaSbtdW26tWPonS3tIpbYxCORpQjs6c8cfNg9ieSOc46jivR7LR4tS1vTpzCskaRhzK52AlMboz1HcAdT9RXzV4d8Ry2l3CsreYrqoCkg7WXogYH5QSeRg9vTJ+gLGe+EFnd20xM06pJIi4BiIPzIy871IA2tkE9QM15nsVKSlyW5ZxfJG93sr+jttbZdNz6XD4lVKd4StHlf7xyu03ZOL12Xlfe7OO+M+muNQju4U22wVVKgbFkYgAFUHG0EffJ9eK8BedY5MMSwXIKls9TywPTnpjnAHvmvqrxYU1uxImjYvbRszY+YJsHAVf4Qc9CTkDnIzj5hudOeO8kRk2gyvtHVmBOQM98ew789zXTUUVN1Ip8rilytJOOivGy1Xk9Xfyud7nKVGEou8o8kXUTdpJcqUna26Vm2u2x03h3Em13+6xYiE5+YcdCM9TyB0+nStK7gidpgqFACwXA3Y6ZQngDB6jHJ7cVlaPcLp1xFJKqMI1KndggI+P4cdRjjp9Tg16AbS3l05tQgCsrhiyjIwfUjoDyccnvXnyozleVJrnpy5pRuk3Fcuy9dXa1738z3sPJPD07NTTjGM2o3kk+W7Uk736K1uzZ5sumiGWOQ53bgVHYKc7cgDIPXg8DPtiuthge7eFTtCghSCfTP3uFPp06ZAzxk428/aQrkmMNu5HTHrnI6Hj8cY4rqIY1PlyRkfvegyMYHBz7nsc5wOnOK5qlWpOSTutFzOzSsn6dfS+v3elgaVCm5+xuqDkpTto07xvrrfW11bToQ6tpJFqTsY7DuU7gDsxyegyO+7ueveuIX93I4YZHKLkZJJOB3zkZ9D144Jr0y9k8yzaNd27aI9xbGV7jJ9ccc9Aeccjzq/jaOfADBAdy4GAQehzxkg5Hc8ZyM1VGrNqUZ3sv5l092zS13Wl72117PvxEoScKsHCpBSjGSdtnypNX0utb+a82iaKIuQQwHG4hjjGScnpznvzg+pqR7kwl1Uk5yATjI3DBABwAMcZHbHTHOS9yyOhDEBzgNknC98DGCR/nrT5ZY2Zj5n3QCTnBbrkDp+I6/zrSUnK61220aaVtFs1a+t9NEr2scdSpTg5+wtdPlkot2u0ul7P8Oh6N4AuvI1L7QVDuqMAHGV3HARs9iPmxwa7zx/qE2o6Okm1yDJ5RLHIVz1C4zjoP546VwXgWS2mkaMnZcukjQgg+XKUX5VY8Y6naeeSRgmul1q0vX0a7CFzCGeSZOCI2B6hzgDbzjAGMniuXDT9nzqCfvzak+kIpRVmnrdfF2SsvXovTp0qCk4ud9G2rrm5XFJPW3+b3Z4BPEEZ2K5VmOMA8DpjnjPqfy4qptXBVRwTkdyCAcng8Ec/X2PXVmMQQqyndghR0xju3qc9RjnB6YwcnJXAHBJPbnoc/T64zzzmvpKNRKnBJ6cq1srNq2l9Ht6X+Z85jKUIzbaUkr3dlzJq17Wtq/Pb5FdyV3CQj5uFGPTODwOvHfGP0qAIzsME59OvsPbp1GDjmpJQXOTyQccnn1GeBk469egBxxRHlWGQT/nI9eOMAfh2NdKmrK2j0e2nTVa6pa2W/yPAkueqlJP2fMkpN6vbfTdXdrO35kqQiNsuQXB+XBzxwBjpxycn6e9aC3CiNxjDNxyMjIHDAjv6dQSfwNL7xPHzAfxDgDvzx1+h5wMVC8jx7lPBbOG6AcZBUDGOOB/TjFRlJ2Svut9L3stNbX6p9PuOmFaOGc+SDVNJrntfVpat6O6b0b01W2pRnlYsxJIyST3JBPUnPOOo44BJqkWBfcCT0yOM5IPQe/tkDsc1YkZVBIO5j2znr1IPqOntx161GgRjgDaevI5Pv8AqMD0+nKd1fR9rrd3S1tp3W19L7HzteXtKvLzx1fNyvdO6taSum9Xp3+RqaXps9/OsUalixGcY2heM5J4A+oPcYJr0uDwiI7CbzbZfOViYmHzMy9CCwwFYZ4655PAzWf4KtI5mQltoCOzvnr5eAF6HAJOPwJz69rdTXaQLaqDHmYhm35IjPAXOPmJHO44Jx7Yrza9eo5cilyxi1dX1urNu9+q7dfuf6DkGV4aOEVapTjOpVje9S0uVOzVk9rttJrur9Dxm5haGdlPBVyp+bJ+XjJPf0yB1PamrKEwWJJz09vzzg9Pw/Lo9bszBLudQN7cHtjJ5wORnOT1/HOa5aYx7wAMEgHHrxnOccdOOP5c9NOblFJq90t97NLR6q+vla/Q8vG4eWBr1eSaiua8bu0UpW0itNtNm9y4bxSynJGOAT24AA6fr+gzVtZCYtwbjr+B9ucjqCOB9a5ud1V1AyUJAzn3zjsM4HX2xWpazK6hVOeOR9QOfqOvX071Tp6X1s7aWVltqrdXt+mtjnwmYyrVqtCpUSlHlUJRb5npFr7V2lbXv6FwThQQCNrsQMfeU8857dsDnp0qm0zI3GM5wTuyAe3Pt69v5q2wSZHAOST+YP15wB07DqaZcKuA6ng4PAwc9+FJxjjjkn86pRatvo9e9k12tpdrbsTi51pQtCsoTpvdSaTXu6v1a1TfXfqWEvDGhBwSVxjr+Wfp1OR049GCf5lKsA2RjngZOAD0Bx34z781nhkLAfNgZzn24xj1yOec49wRUuFB3Z+XII7Hvx/kH+dbckU2opO9m7JXd0rX669L/mZQxmIqRipTUlC0b821uW7V3e2m99NNTpdNu/InSZmx3I/hOe5x2PoMYxnPWvQbvUPtNhAUKL5kfzMxwXI5JKjqzcZ59+leTQSAEEjheCRwcHvg4znt16du+9FdEQg7jgEBMjJVccEDsDg9ec8kcVx16F5KUV7yei1306d766t+Z9XlOZwp0pUpJSTilHVtdLq/l03Y29jZi7ZzgjkEkcZyT0PpyfyyazlcpxjvjgDOfTPfp0/pWs9wjw7CDuIyS2DhueTnHXJHtj1rM4U4znJySOSAe4HGQOnUfWsOSWzTbi9YvtZf12/XSvrUjUhNLntzSurq7Xl0XR/la0bSdS3VT06/Q9fyzjPXvkV2BySB16DqV54z68Dr3781fliQJkMCTgjHU+x6EH069fpVEMwBwc5zweT9cA8DIHf047VSScXFpq6V7306fp6W6anFivcnGM2puWqlGyu2lfrrp20b21OqsruKDS7uKaVn3RHy4WJ+bI4KkZ5BGTx6n2PHsw3tjIGSyjgY5yBkk544xg9sdObwdlhGVyQTnJ/h46dhnnvj25qFUVkkY4Dgnau3nHJIHTA75GfvcjBrfDqNKKSd22kmt7O1k7avrr5vc5MdVliHThGNOKjFO0bprlUVzSb6u1lr03RiXKHOSSccn72Rn36+gJ6cdajWThgoADADBI6jI4/H6dfzsysGJA69hnJ4Of8AOaoujIMYIJ5GcdCQc4IyMjn6dOtejZSilfVWfra2lvL5b77X+Rrt0qs5wu1LSVtbPRLWzSVt1a+uty/aSvlAegYjjknOOhzjsAQB9Dnr1kFwViRe+GDEds9x7+oz2965LT1JbGctg8H7uPfnOB2GPwroWGxEXaQSdwA688kAccHjn+YxXDXSclDS+70u+nfVrbo+99D38nq1aOHda7bl7r095J8uitdLrayXl3OhtZUVBukBPQccj3U8cjqScZJzjvWratGknzHeXDcgZ2kj7wP079PqK4lLoh1QqAchVAPp6gHr1ySeeMYrcW7kgVCCqNjjnOT1AbI6nJ5IOfXqK4a2Htaz+932s0la/wDV72Pq8NmMJwV5SShZvmu9dLrXS6v6v87GruilVTc3OT36g8YHUg9BgcfpyGWecKuQS5x6jLDGcevXjjiugnuFuVd2P75wQOmAoz0/PJx1wASMcZlgyLMRtBlyWBbDYYE4IycY9seo61i4cqk3e+l0tL36ro0lvvpscmJq+1r0lzKMJSbV0nd3i1bW92km1fy3PQ9Njc20Fu+HOQ3TG3HJY5zx1HXvnPWlvLKKWS7sriPfHcRkwHGFBIBBU9iCTkY5/EE0lvZbSCC5ypDKVOTnB45A6Hjue/FXprt7uzgukKkowDtk7gpzgBQM8dyD3Jxxz5ns6seedmowakmk027xfRd/nbfY7atSFROLi+VRUJpq6knFcraas1fRLdvrseKa3Y/Yb+W2kUqsb7k45KZ68/T+nSsO4VSjMT8rjKD3HByP4Se4+npXdeLYWeeG6GHZ4QJGyTtfBIUDPTnGcnJ5xuxXAXDs42tyBwV5z0wMAdO34dq+uy2bqU6bcndKLfk2km0n1T87PqfivEeH9hisRSjTcU/fholGzas76W0bVldrRdLEmjownDGQuMkADO7gde/A7e4PPevOfGBZtanBIZFCqpAByeSfqeg5wR+RPqvhxFkuyqgrlWZcgZ2qDn0B49OcevfyzxaiL4guwjK653kLkbN2Rzk9cDoOcDPNfU5W4/XXs5KLfNp15NHZ6a327b6n87+KsZQ4fpPRJ4qCk+9lok3vre69LswolDoQSckZViO/GcDI9ef55OKs2xWNyzMJGGVdSOvQ8c9SQQRgHgnJFQR7QCSWyR8oHIAPB59R29OfepMRghvmEmQVTHDNjpnjrjv057k19PJWWm+reyW273TT/wCCfyxip/vF7ybVttuWyteyfTTT8dzQjwxyWO0k5UdQBkdPbnjtnPbFbmmkorRllbbkqCDjnleOuRzkd/XPFZFsiSBiylZDyFDYI2jglSDwcjI4zgfjv2cIIC7W34+Qjp6c4H5npjpxwfLxaTg1rpKL3XS3f8X/AE/Syy6qwqLbSNr6PZ67P8rM6G2iZxEi4ySQcnb8xOAM9cc9Oce9eiaNbSHyUzvZTlyeSenQdCvcn9M1zWl6eJY0cjDLgjHfGeBxgfQHnj059N0uyljWBvKKbiMMeoPGc5H0/qMdflcbVSi1B31aemi2d0730XZ/i2fpWBTvGc+WzVoJJ6ax1Wu+/S1/kdjoVlcWr7i5EchCoB83J6Z9OnUfXmu8snlMqK0auTlQM4baMZwccn1xjj3zVDw/ZHUmWAt5TIcAqMJJnjKtzgjB98noOtep6d4UIAZUdpIyrb8nlQDjacHg84OOuTXzsrXbek3ps12287PdLfyufUUZyjTSi2043adnbVd7vTySWl7F3SYnltoVj3RbgxOchlY4OB0OBjGefXHeu7sLaQPGj/MGTIbpyOpOSQW5Bx/9as2102SBDsQxso+VmwOvBwvdsDg5GACcd66awUpGEeTARTww5yDnIPHU+nrUXTUuXz9212vh1b676eb8kdFC3VvdKzdt0ldq6tpazatfSwy5tY4AMquSg2jO4YAOMYwVLE989OnQ1asflWNXRgMkox+8M428jBCk5wfbvUqBrhljdSA2VBC52gE/M3PHPPPPPsQNeDSjHGZdvmrGwLbSMuhJ+b2I4yuTjHXJqKSXMue7S6K2iul006p77eV7+DndWEFGKclo0vV8vZv8HrfRas3tJWJFHziSRVwysQQpbq2PU4yeMenesjxV5KKzIY8lAcyHCxnHUdcgnjnpz65qATJHIGtwcPnd2KMO7c8qM8nqOw9OU8VXUgDuWeUeTzGeV24+bA5yOQcfT2r1sNR5qtOFrp8u920m1tayW2nS2+zPzvOcU6OFr1VJNKN2pP8Awva/rv0Stu2vmH4v+KlgsLu2jIhCRtEJ0xtmJzkqwwWz+h44yBX57+ILlrm7mmfl2YqsnUkgnGM8kHOR7/jX1B8aNVWa9e0gKspcv5KHKqw5cEdlBwSMkd+MV8u39uFywGQ2HIPO3Oc8HoOuPx681+iZLh4UZRkk4t+7svi0bf8AhW9tdep/G3GuZTx2YVovmlGE5Ri3Z7NSbvbz7217HISRgsx2Ek8kk4Ibue5C5wcHOPXuIUYoemQBnOOh7AE9h0OeeucdRpPFmUjqrHIUnnIJHzYPT0/Sqs8IXcoTA3cdeOpwQeAOOp6cZ619dGTvZ6tRV9NLXt5fjvp8vzeb1u2m72um9klfZpadlq+quFvOytuLEdRxwM8fpjr64499y1ugxAU87fnJIPJ6fQHn1rmUQq+ARleQp4AAHrgdvrnpwKvQM6NuXAGMA7s46ZA7nB47enQ8Dad7rTsvl5O/+Ylpo21F9Vo9WtNL8ztbX18zqBLI2UOBgAjIwfqADyP5gd6KyY5gAq7jnuR7HnB9vaipStfrdt/j6LoJyjfTma78zT+61l12029D7FsdJjsW8x0D2w3q7ldrBudoCnJYt1J4IwM9RilfWW6YsEDqyFoxKmQysQGUHI+6OUJ6c/LzmvZfFWi2ulhthgDEKZl3nyyrZJMnHyK2BhQSV5yfTwbXdaNu9yzssSBHaHYTsUkYVVb1IBwxxjI6dK/K8NOVa04atu3vPXXe68te29/JffVaEaXKpJWT5rRel7LS7e2u/fV9lzniBbWK2uwZFRYVwqqvKnuS2eMdCcc5xXhWsXCLKCqgBhuEzHAKN1GOOD2PqB2rU8QeLdpmjNwixsHVQjgYbPyoc5LsS3zOeCccda8i1LX5bmYfMJHYLtVjtXYpO4KOQOoOTyMZGQcD6fL8BXdr3tJJq6s7NLyt6/dfqfOZhXg5cqUeZPldl03a7aW0trrds35LmB3kTa4JQshQ7VOOmDyCOvA/HvWbNKjYXCg7SoHcem7nOffjGCQPXAN/N8gRsLJlmUHcwX+NF7KTkZOe3pmlF9EVD5bfkABsng8HOPTnqfy7/R0MLOFnZpv3Wnezty69X1/4KPClUjzys27taNaPbe3np0/GzmuEjyXCncdo5JKnr7ZIyfQ9xyTxT2hfmIIJbuT8o9+Bgr29enGcUk1zu+VZA5xjCfwnqQOeMZx378DtWM0jEMGV8cEMeGUEkjPcA98evFenSptRV2t7pW6Ky9emz/C5caiu/PZq/wDdSWnW33vXU1Ldio37g2XA3AA9vQdP5nBxXT6cyh4X3DKsvJBJypGAvPGRjHfBH0rkLWQ7WI2hghIjHRmA6qSQFxj610OlyyN5YLqsmGcYBwQpB4x/y0GOvqD1rGrdqSd7p79krdbv56O+i8j18FVv7NPfmSu20tLXd3r6bpX8z6I8OzsI4cnKPJHKob5QF6MWHqc8fTp6++eHJmuGWGJtqHYGU5YHH3sHP3R2ODkV8u6FqEjW6MHBIiC4xxiPg4IOdwydwwCMjjFe++AL9jcLK0mbS3jcgDnzJMA855A47+/bmvGqUpNu7SteWz2dnbffqrbddj73L8SlyR5mr2TSu78qjd3+7yv00PrLT9We20/7OrfKkXlhCcFowB3boOCRxk9iTXpPw+uUm1CykMreSLjMwK4kiRSDlBnj5vz79Dn508P6iNSkcSSEwuxRMfMQASD7HGRjnH0619G+CUjtFtiCrSvLsc7f+WTcBic9yMD+vSvIxdCFOnPT35a31s0+XS1uVaLa+/4fUUMR7ScOWUkotXlJ2i1daXtbbXX1Pt3w3r2G8pmOYQFTacsw4wmR2HG4fTGOAPrfwZqA+yW4eTmSMlixwIzgHZ7D/Doa+C/BAlvL6FZsLEuSccCV0xtbcDz1HB449q+uNB1IwRQ2u9HkXG9U5Khsc5OASAOn096+DzGhCLSi2pOTk7L3UtNtN9fLXXc96nVje1pKKV4zTbV1a9n2ez2ST12VvpzwlqKrPJySz5AYZICjsCeASDxxz6d69ysp5JYYmB3qCDsY9TnqR1zjoe2R1zXzf4YvLe0jt2ba8zrl9wwR0G/+o/u4+lesadrckyxxRvtyx2bQCF6fewe3U5PIP5/OzjzSvFNwb1ct21u7N77XvbYptySd7rS19J7JWvd26PTpZu7sesLcRfuI3AMjEknIJC8fLxxx36dcdTxYgnklunkQbxF8iMSefQL6D9MD6VyNtdoqKsbsSgLSyt8pdm5dVOckFRwBnjvk112mQzTGGaFiDKW2qQdqxjGWBzgDnKk5I549OWpTjZXbVn8T1s00rW0T09GuhUJKPxWbvHlb7q13d9u2ttNru3o2km4l2MQ6u5Vi3VeOvsTgn0GenTFdldalbYgghIOEAkPo4AzyMZJPPU98Yrh7DUfKhjtISBvJEkp+YAjqQ2cDPTr7k8VMJ084JDuZsKxbOS75PA9BjPHPHGc9OKtNwtyyTbsmkr66bt6+nRa7I3hCdRuclJuK0atbmbV7u+7u9EtFt5+raHOrpzuMYBUn+IEY6g+/+ea7rQ2mtmLMTsORGW5zk/e5HB78Z6Z9K858NNHLEVxtkJDyDp93op68+4GT9RXXjVQUjUHymikKuoHGFIAz0Pzdfz9qdKpyqLb3vd6K6u76q6ur7vqEouXMn7yb2b7W2VtOmqW976M7CV7p8nO51Hy4yRyTlgM4zjuex78Vu2LhYY1lYMwGSc8/yGOvPc4HI5Nc/p06XkRDkHdko4OAuOQOAeB1Of8A61an2WVmSJCSTgh+dpUc/e9ccAdMYya2jJ8zqU5Oo2k7NaNOy09NPJ9iUoO8ZRVNK12krNJRsk9NbPa17W3LUkczzMI5FiRydynAxgcfMOpPbgYxzxVxdOkaCUISzn5lIbOT9Tg8e3btTIIyJVWdzgYGWODlfX8PXHJwQK6K2kjHEBVo8bWPIK9Mj3I/Dnr6VnThGcpOd+aSbsm2ouyb6aL5N2e/fojUcILkS5Uk7u+u1vW+6XTW/Ur6G8saNCxx5fBz1Jxg4J6H3J+vpV1rh3uRGrFVJIZgckk8YzkAEjjJz+PdvlKrkg4WUgdSMEnj3A/T261cFtHEN6A7wRu/u5/vKMc54OT7j1z0RotwilOSUXdLW3KrX69dfn3ub06i0bUrz0Tt7t3y3V9Xvr5+ptW1o7CNySXDAjtkZ6HIz6564PYV0MYYFc4Bx/DyPoCOpx2P0OO3MWOoHIjx904HHPv0x/nPPTHYWKrMm/G7Azg+xByOeo9MflyR7GH15YxvzWW/SyWqa3V/m9763FNclm2mlK13q+i6X16vr9xs2DLOxjUunIA3A4J+nG0n+vqc10sWl5hMoVXYAHaTkk+4H+PU+uMZVgYEUOwBPHKgHvzyPp065Ge9dImoxKoAAyRgMowMdtwzgnqByMZ5OBivpcBhYVYtVpO8Umut17tktL/ftrc5q1WasqatstXa+zsrd+2uy0Ocu4TagnaMkE7SAAM4zgjHTH4fz5S4M0rkeW20Dg44YnPXHoecZPWu21SQTgmP5iVy2OoHJOABjrjPOf1rkJHa3DqX+QFhg87SemT2+n15rpr4aFJSjD4VFNzTv/LfX8FdXva3W/XhK87Rbs5p2fNpJtNPTu77XS7nLzWymR2kXaCOc4x8vGMdj9OuDznAqnJZxlGMfzE44PKkc4UY6MMD/AV0Uts0pDFtwcM46YAzggeuc845yQeec62l6dC4IkWMjklT8vIHU5BwPQYz3z6eCsFKvWcYxXvXSlLe2mq6W80z3VjHShGrKclZx/dpX6x73vtfXydkebJpdz54dUYJnDNjB3Hscg8cdT049RWsum3DPuQsFCfOFGSGI5OOMe3PGTgDivVE0eBY3YFVQkblxkHuSoyMdhgnr161NbaRbkOFZYoWGGbG3dnphTnPTPr0xzXoYTIpx1lOUbtO3Ps/d89m3t2e5nVzuFS94PRJO60fw3W2+l1q0+2p5Bcm6aBwC5MRG1FO12VepCgHLHjv+Pr5N4v0m4ullEVvuWcAu0igAHDErgdAeOMnnOcYzX05qui2qO0kR+bbywBwTg444xk+/rnoa8Z1uymlNxE7+XGu8Id2QxUcEdMg5PPoO4rsxVCVGk41ZczikotTTVrx1sldX01vrZNvQMFiadSpzQSSbXMpdJS5dLbdFr39D8pP2lvhPa+IfDeuWt9bvE11ZThbZEBhkk2kxT8DqmDnnnqW9f5Bv2qPh2/hbxNrduQxjN1LblyvCurMYnbIXc65YFhwM553V/eX8SfC66xo11GkaXcnklZGBBYQ4ZZMAA8hfQj6Hgj+X7/gpT8A9P0e8utes9Plgt5FLpIMqZp5NzMrrtzsUg9T82evHPkYStKGKgo81ptRly3e1viTer9fL1X1FWMcdl0qU1FyprmtLX4FHW9m0mm3rfRvQ/md1O1miae3nDBUaRcRtnJJyjk45PJyQBjjjnNecajC7SFyuFQhTxk56Y7Y7g46ccV714s0ueK5upfKYoZJYpADkKYjjIAHHU4PIHPqM+LXsMiNJMQREp+ZOig8hjnjJGBzj15B4r9VymraKkmk7W838K18/PvZ+v4PxLgVCpNKM9JSbva1tNU19lXaWl7J9zmlMiyszs0YByQTgsvGMDoc45HHQemaspK8kgy4wGXIBwWx36478/qemIrtWXe6DercscgkccnaRzxjnPuBmqMcrq6hQFB79+Mc4OTkfqM8+vvNXi2/nzNNbbfLrv12tY+GjFwkotbNeWnu20vdKz73VvRHbaNIYbwebwC4O7oQMjBxjkgcHnjjHByfq/wJcRh4ZS2cqrDIA8wjuOeAufT9RXx1bXEkctuY3XcxCuwPLDPJIz+RHU8npx9I+BtRbZZkMolUlSXYjrjAQYI3Enr6jpXxfENBuHOrXbtpvb3dLa2Ttf7+qPuMjrRTlTT1Sjo5O+qjpZ62dt7/AKH6AeDbxUWBFcgTBWJJO4hs4D8HjqAvYA884HrUluj4mcuIztOcEAMwHbnBOBsPOcHivnTwVqgEVixkAkVCJCTkMBtAWJMdByQc888DFfTFvIsljA7MreZbxsu5chiQcs394KBx068da/IsbP2VfZq0rPl3lblVvlays15rY+plTVSmna7lFXu7JXs7dXa21+l1c5rU7Bnt5GaPG5RJEw+U4Gcu+DyWyDj8c968+v7Yo5iHzufnViMjaCeGIweO47n14r1XUFcwsQxbB2hAeQg6tjkbf/QRz9PPb+OQzSYxsUAjLdjnJXg4wR1+o65r0cvrNu3NvrZX7Rvvrsr6dW9XqfHZxhYNS5YxbVk7au2nXrZeRzAZYnIIJkJ3oW+Ygt6DgqeOOuPU5FXreX5mZSYyG+6eG3ccLyc5wCRx278GCVB5pDKBJt3CQkhcex5wR2Yc9M8iqq5RTJn+MMWbje4JwYz3Xnr3x6c17CvJXs7uz76ddmfF1qTg7NNJ6J21vo7ea179/NnYW167NHwF2ABig/1iJ1XGcE8jP410ENwq7GyUjlGH7lGPQrk4AOeg47VxcExj+RTuG7e2DnDHlucDCjjI/I1ppMxwo3BZcZUkEkjJVgDjCk85znrwe8She2lnq3or9LN2tv8Ag9Dy61J3sorreTd7fDst9Uujv16NHoNteP5IUs8hzvjYHDEdySPvKO/Xj9d22vJVYFnRBjJdfmcjjaM8Zfk5J5PtXAWt0uyINyI+QoOcg9R67eBxjqeQa2orxQ8QZmRAwUKVChiR7E5IA4HbrXLKGsklzPrdq+6Wq1bf+dvMzhC1rWdnZRV5JPTa71vv69Nkejrqm6VCjNhEAkJA2vkfNzknd0wPqKt21zi43HCp1y4OHIP3lb+Hv0Gc9MZrh4LldpLyHeF3KFYBSR/ExHQDjGdxznn00IL5dipvDyKCdknPHUYHt+B965KlP3bRVm0ls7XbXpe91rstL2ud9Om+sVd2vqlba+1ndLW3qelWt66SuxJjiwF3M2SzHIOAP0PfuOtdPZ62YnWPzFBixt+bJKAnIC56/wB7PTjgng+OWl9MwZHYYDGQANndGv8ADnA+dc8n36Cty0vmBaRWCoWITecPg9drYycjkjGemenHnVsFCSk5LmfK0l0bvHZvfS+muj32PToUoyk38WqSjZJ9Nbt/r5vz9uk1uJ1CG5+zxzxq6onzZkOfvgY2qxBwOMEH6HyvxjdrqGkahJBPBNJbqbiNpFO5hBnzUMmcJImQVTaepwc4rPl1VUdwzn988Ziid8ICueM7SQGzkDPJHbrXOa1dzQ2N/FE3mpcW8yukAA3pMuFLnJ+bKncQOeOOlcdLBRpq1Nvlck3Fp6Ncj0e9nt89ez++4YVOniaMU7N1YX5lyq143VnpvounbufIWtajc3GsNqDXH7uCYwwq+NxZCQ+F/vg42tyCOeDkV3+ka9cSQ6dM1y7yFAjxu/FvFH92JRjgHPPPbrjIrzbKpqEtrdQyK/mTeWGyTHk5QlcDcwHVuM5rd0w2sdi7F23+ZJHtzkru6MBxzkdunUZzXrzjBQpwcU7Rio2it2lfr8+nXWx/Q2Bko0oKykuSNopLqlrrurX2+d9j1ybx4JBBYwFVkSRTcCP75I+44G44z82SDx3GBWfr2szW63E/nhmlQGHcc+W20kqvvxzxgY7145awTDXHcPK0chUGQElcc5BPb+WK0fGOqSRWc0AY+ZAAGLZy24EqGz6YIHuTXPKhBzioppS5XrpZO199LJ7JNW3eu/v4GdJVYcraaS91tp7xvvqr26Wtu9NTw3xV4zutQ8SxxSMzRwOVMcTb0whG4sveU5GT39BXokscV3pljeQuGWWBVZCMMnQkFfr05z9DXzRrsnka3JdRnau/zHAbld/IJJ/jJHPpnmvXvB+t/bbSK1mmGRFhVZsAlceUi4zgnLbjgnge1dVfDqmqbpR0lFKUlu27S66NWVnrs0+tz9O4eryaqQm7uCTjdu7h7rdm3ve60/M9f8PaCkhhE0S3BkZbgRoRkxEncxfGR+R4z2rA+JmlwiO4lt1KGNFEY5yYyPmHoDGeEOBy3bOK2rHU7rTokuYSrmKMwsD8xRDjJX1AA5GMdOlXNcSLWPDVzJLKjXQhMgjyEO7nJzz8o7jr0rjopxqOPL1jq7pWSSel7pu291e2nc+9wWYTpVYJVHyxcYqMpNtNuLk9bJqP5ep8r3EEL2rSKreaGJdvvSMvIDZA4Gc84PPPBFYLzW5CREAMoyzH7425xjsWGSAPrgnGa7B4kiE0Sk5G4Hb6DOFLY+6uT785POK87vFdJGRmIYMfmUZGPqMZ46n9K9mMfaRi23FXV0naTta++trWaTvr2PvsNjYyhGatPms7Xd3bl1adm1bZtepftbqR7uJYXKfvQQ3BZih6EEjGf/1+309oOsfZo7I3jSMQsYn+cAbAPlUIAflbPPTPPfNfIRuAk8bISgjI+YZ65BJAB9R6dunNeuWnieaW10+BWQyFESSfvL5f3QfR13HcecnqKxqUXTnOcb3lFJWW2z1fTt83ax7OU46EnWpe1UU5xat8LUrJpJu6fS1ra32sz2fXdRIkl+wTusN4PnDnaozkY75wOnAxu6HJrzCJLdb2Y3ab1TJTMhHzEk7l+Uls88dePzdJq0qSRyTSF4yCMNkhSANxGP4T39QeOTWTqmoCaRfs6BP3eJPLIA3MAMhcZ7cHPBBHA5ricpTnCVuSMY3aa0bdlaW/XRaq1+zPsaGJsoRhJOnFK6ejTSjbTrfbp2Kl7OrXMhiyiKcKuTjIyMdBz2wcnng1rafrd5DA1mH2wS5DDjdk++cDB+ueOTwa5FZXaYE/MMkYJwS47kY5HHzH3B9q2beIOEdAQCQXVjyMY4Gf1Bx1rkrStKT1i2touyW0UtGnbvbTod+DxNT20p4dycJNKVO1nC1lpG9radH6aG+EaRRJGDu6Y9Rxnbz36nnJx1rf0+RjnJ4jXZtbAwy9SpHc46d8fngxzBYhwHABBBbkHI4UDoQc9f67qtW5kO5t7HzASTuxnAICq2MAgfmecYrhkk1orN6O1tE2tVa7fS2z1PocNWhQlywlLmnG86cZXSlppZuyt2eqd1Y6pruG5t9nl7JeSQq4ztPDZJ6H04xzn34bVwQ+/njKgZDYHIGcdOBk/qOhrRg1AxF84CrkDPAwo5we5PPT0wO1ZtzdwywzA5DMzEKBkZ443EjIHHb19eNIxajdtOyjG7vdpJaPvrvo/luddTEp0W4yjG795Na6JfZTWiau3pr62OYkkLyqGzgHaQSRtyP0BIx6561MVIYDlstg5bt059yPX2HOeajBw5PIbOcjnPP3QemfY9OQOK3rC1MzRlosnB/eE4csR0J/unpt9ccnNRUhK94ySb9EraNtt9O1umnRnl0FKvKfsqso1Jyba6W933vLV323fzNHQ72SxvI5VJWMJsIzt2lgBkN0wDkng5717CNT87w/PanLCWGVpDuH71jgqu7GQoP3hgjPTGePIBbKJlG3aQwyBzgdcN+GMfXr1ruobSZrSJQ5CTMqqFB+QEHIK55IwDyc59uvnxq+wxDkmndOUkneMlaOurSel7a366nq0l+59jWtOrTnBxmlqrNat72Wttl6njF4zieRcAZkfjIO3Bz17j6565wOtXbHSbu9j86NAUBZdxO0MQMkA44I44xknjFbWt+HpIb3dCsn2d8OjEYY5yMbf4cYOBk4Bxmum0exubTT9vljDKDID95EOc4yOpH0wdp7V7Cxf7uEoSWrV07aJ2vZJ72t+FrXMIYCdWrUlVbdJv3HzJKfwuyvtZt6J9Ur2PJ7hHhmaJ1IZHZSCMHK/Kee+M9T7dM11WheHzqgZ9oHlqxAccPgclW67s9BtHf0rVufDRu7tZUQqsjb8Y3P83UEYBGPcD69K9BsbCLSdIkkMeZUIUYwGaPkNj0cfLycg/N0GSNJ41csIwb55OK0tdX1ad9FdtX6WtqzmjlsoTqupG6Uo+zi7O692zafa123a6vvqzxDU7ZrC4kgZTuH3Wbjj3Htn2/rXPzs8jZGM8gYHBx/dH+I9c4wBXVa6bq4vpHlQhixVFYdQSRyOxOOST+ormbiN4ZArcMBnHHHA/n6H+tetQqKUIu/vqN73Ts1a6e97Ky0vs/M+ex8Gpzik1S5op2suy01tq72a02suhnTRuODkEc8DOcj9R3/AF7VHFu4OM5+Xjrk9PoOOScdcd+LM5d8dCemc4JHoeeT6nHOelOt4mO0cE5BAPt7+oznOfzzxpOpaFm79eui6K7f3d+u54MsO5Yh+zUkklq+tnFp6NrbS62foewfD27jhuLWG5VPICMX28+mARnktzk5B475Net6lZadcxR3URVLdhllk4+fvhj2UgEYHHOK+bdGuZrC4VwCpJ+/1AIGQASenOD0x04rsrjxFeshVrgmIHKxYOAe7DBGCTgjHUfp41RqM5LlcoyvJSja6lpu2mrP1vbotj9JybFUXhadOo5UqlP3J86TTSUVaNrJb7212auyTxnHAZCFBLRjCydpVAOMH+LaAACB064HFeSz5D4bIAJJI5Pp0yePr2HPPFd5qN4L1IC7lgMKqMc4HHHbrx65565GeT1KApL8qbc4IA6fNjjoOemTzjuTwK6sNNtpSVnK7avouVRW2u7vZdF5WR5vEjhUUJUNVGME273kvdbdl201utFoktsi6ZSsfycAYDjuQMkDGfxJyex60+3YRkBWIJ+oP8u3rjvVe7z8uRwDjp0I+8O/Occnrj3xSxMomRghwBnB5yR1JORwee/+z716sIe6ndtdbO607pa7vTpbzPh4V2sXJ2Sk3Tim1ytX5U7W7W9d/R6ZcnGMgjr349T2/T0qVQHjIYkEAnOevOMY4Hp0579qqTSfLlcAOScA/dGegHHPP1x6DNMSdwQpI2nHfJxzgkc4IHA45yfTJHFct76Pe/Z7rfda9vwseosXTjW9nP3lUik9NOZuKWttU13drvQl8srz6e4yBkj3z1HJz9fSIs45OSAeB6ZByc9MAkZxyMjjqQ6YsFBBGWBOcj3wfbpz6Dk8cVWDPjrkdcE8EkHpk8Djk89Md6uC0u9Outlva2t9enp52VuTEVqdKTjBSiklfld1raza2d+qWidl10vwylQOeMk59SfT69OPfnrV+O83DYTgHqV6g88n19T1689cjCjYuNpyMHnBGBg4xjgf45A9amQsFO0YwecYwFPtyOpJPTn86p0oy1attZ6K2zXXS/XT72a4fMa1LkjTjJ02lzS0Wul/P7vQ3g4K4DZbcDgEMGU++Bnj0HNKWKs3TIIHTjBxg9wef5c96zIrnG049cc9+549M+3rjtVhpiSG3A9ck9emeB3yAcVzTp+nXvrdx8td36a7I92jjIygpRnKTTjdJt2Wi2bdut/NbamkGDoAeucZ7YPTP6ZPHtg1WUbWA6fMCcDp0zgc5PcDpzz2zLA6OCGK8KxHJH5e46j344ycxllDDkg9ADypOeAM4/xGe/biqpQd2n2stVdWd13/ADuvU9VyjWVGpKSs+qttaKd77N29NGtLl54CQFUbt+GBI5Kkc7uRgjvzxwahe3aNJQBn5SAPT0PGeOwI9hx2sCZniVjw23Ct1DAEevfgjGP/AK0iIZjGoLGUnG0nhicEDJ9CTkY55rnp1ZLWTSS0s77rz+9el169XsMPUlNwi5ylFWcXfZJXs9Vq3ZLR+Vjj5CQ4VhsPzDj8cn9OO/0HSJiGbHJKg/NnAJP8sfzHStXVraWG4JcbTnJB4GCDyBjv+vOMEVkKCSeT1z0IyfX+YPseM8ketTmpU4tNN+Tuls7bW39Ht00PhsXSnRxFSjKLced2ckk5LS3lv2T6eZoWI2MrMMckEA4JH5dPxGOfUVpz3TBMAhXGcEdk6AfXHPYHOPpQgQeVljtQkc9STxkj068jk9uadcCMyAI7EHGc9MnA69wMcH6Hpmk+WTbaTd9LJeV9fn/nszvoynRw/Km0tLJuzaair6db67XVkyzaoWkR2O3JByeueffucE4/LjjSuMSPs8zJGM8nr02qAfpnrjsOtVI1WKEb8bccMevIxjoDjPbqOPx2/D0EV3eRmQZjBJwehYEY3bgeBxnsfUciuHEzjG9RtpRTej630V9bPpb/AIY9fBrncMJzLnqJSTu0+W0W35vzegQafILZ5Du44UEfMQSTgH2wMjvj3NZ62MsdysYRxIzfL9CcjHfHPHrz06V6TqUK2ltGBEqlmLYJzkH0xyN3G3rk9gOazraya4urZo4WMs0gC7gSOfYfNwB0HrXixxsZ87b1Urpp6tK2miXdvbW7voezUwFFQpJXlKmoy0u5Xdm29XZXVr/joirrCxWmjwLISJWUBTnO5hjI4xg545APX6Uzw9dQy2wt5V2mRZAxzzvHKEDOAOvTJ/U0fEGJ7NrS2ZCpYqSAeh4zlcAjnHqOuK5y3861e2UZZmC4wOAHA647j1xke/Fd9OlCrRnGMmueXMr3bskvPZNa3Xd9Wjza+JnGtOCfupQWqfVRelle6s127k+qpbyaffRTKv2i3kLRPyS6KT3zjIGOB3J6YxXlJdHMi5ywLYYjg46H2x09e/09K8Qkw2sjSkq7jAB4yTyeOp7fgR9a8uURvvI3Kecfw5H1yQOufb616OWRcYSd72nZWu0l7q00+HyVr/I/OuJ6kqleirQVWSmru8XZNWvZW116atdmzpvB8Gb6cliSkLbgBwu4E5Df3s9Bj3xXj/iV0fX9RVFHmLISWbHJyeAOxGff0r23woFt3u5FYMBASQeCflbr6dsDGevuK8D1iSa41q+lYKA874Knjgn6e4PJwRnmvrck97HVZXVowu79ZXhuvu2XXzP5n8aa3sMkwNBJOVTFzcle94xSXutN6K6u+pmDerhSuWJznpntxz+XTjODV3IIBYbWX5VYDByD2GeuCOe/6VHlSoZ8g9VcdMDjp2yCeDkHoOlOYhpI2BUp1DHqSPbHHbBz14+n1z7PdaXv28uvordb9T+Taskqsmr6tXb7aXSXyte/fytqWaEsn7wjqDjqcc46c9efWu80W3jmmhik8xUJyZAQAB6ZxkHPT2OR6nkrGMFoTnOcEKo5YnHoRgHA7cD8K9U0ewzAhyMt8w2gblA6Ejgg54POTyOO3z+ZVlCL13bt3ura3V7Xuu/+f12Q4aVSSnJaR5Hq7p6Qdm9knra93r02XbeHLWCFlibymjDkhmHJJ+6QefnJ/DHuePRrHT991GFZzBJ/Aw+VeRwvbOOgwPr3rz2zlwI4VgETRspSQjBZjjDE+uO4JGM5HavX/DqGXyHkcB4wODgBwT1x0GOzZO7npivjcRKfM29L66Wd46O91pZq/b8T7/DyjJckHblai1Ztxfu7Oy79N+9z0DwxYpHzKMMrhYmIAJyflzz9714wOfevbdLmhKCIJm4jCEqCdzFc/fx1yOT7Z5xmvMNKitZ1iYq0Lo+3dHnb82MMcYyWHU/0xXo2k2ojn81JMpJgeYOfm/iBwcgD0znPpXC0p6LbdJJeSVn03btt6q569Ks4wUZLmjs3dqV3a+ive9undX7ncNZw3sUbsGhWRQCFGWDA5yfu4XPXkk5I74qktkgmEeA8YJUSAYIx0LcZ9D16Djnr0mnxIUCfNuAwXHQhuucHrkd/yrpIdFjeOSSGJSIhukGR8y8knBHJwDxk9sdRTcLtpJqy3WiWqb776dv88nmPsYtycnsopq+um97+adrfqcIthLCQUwU8wAkcBeeF/wB0e2M/lWgsr28d1GrsxVeAoznceR3+XPbrznvxvSLAzNEEKEHCsqklnHrz1z0OePxFWVtbZLOUyKgZsuxzlm2/eB6HHXgj86KcY3V39pJp26NdVumnp2/B/O5pmHtEueLfkrNxS5dbdd9O6TueeXUcmxGVjH8rZx1YcduwHY+2fSvHfiD4lbSdKvmAMWI9izu/33YMAintjHzn6DFeyawHjlMkCs8bKMDJCjghgzdFGQOvHv2r4X+OXiW53T6dFJgmbZLEpJ/d5+bgAYC8fMDnntjn6fLMOp1ITdmvdtZ3Tvyq17X5d9rbarY/FePeIIYTL8QoTXNKPKnC0XzSjG0el9Vq++lr7/L/AIl1O61LUp7qeQ/vZZAvJbC7jnB6EHjnHuOK5WZDiUNgls7Qfu7ccdM4J+mB9cVq3BO1wwAwxZN3VCB1B59efqPWsiV3BPygHIHGOnrgDPPrzX3FBqFk3okuW1rW/Cz03/U/kfG4idapVnVm+abveTbd3ZPV62asrLaz3tc5mWAIzMOG6emBnj8Djg55NUrlg52gLuOAxHcDOfb6dq6WWAOpYAsSCGHbPQHGO2T6EHtnms99PYkMQq47nuCOQMDnoMY5zj0r1qdaL332XktNNr9e7V+yPHnBppRjJ3e7slstY3/y1+854xnI2/xA7gep655OfT0z9eyp8pwRhQCWOBkHjJHfj19c98VqTwbCwIwAM59+Oc9/XOPpis1wPm+p4PqDnn/Z/WtVO7Vlppra+unW6Vtez/QE76bSjy3Wm3NFNN2162tdq2o9MD5gGIAJzn+ueR+H4jiiolJxkklc4CDgEj8eg5PP19cFbKN1fXe3T/NEVJqMrPf8N3e12vy+/d/oh448TWsKTgzJJKyHemMIVfBRgQeS205GOCB35r478deKvMgmVnkXfvMahc5lz87Ahv8AV4Ix2PNWvHfxEjmeQCUbfKwNo+Yvg5XeD8yjPB9SfTj5m1bW5b4sDK6ID+7CMcpknKjOSG9B355GK+PyTJJ1JxnKNuV8zvrpeN76J3v6Nb6tn1OOzWL92MlzSh0aaUnZa673a16W1s7Eer3y3Ep8ssxUK28E55z1XOFVe3JJ5PQDGR57jhjlgQWLEFiRnAGR0AP3f54AFQElucEZJClvnZT0wMDjOc9/pzT3JD8gEEKW3feOSRgYIAB4yepx3Ffo1HBwp04wspTj1tou3ZO+j89bny9bESnaTSUr3b6627p9b21WjTb6lh5cPuUkb8gK2QOcfKM9cnOfp1pUnbKjAByQMDhiMdecc5x74znsasmcrnbxjaqkHBHqB0/2vwHYkopAXAA3hiMdDjIJx3zx685696uVOKUVFRukney20vq/K3o/vOS7au5Nu6d23bW177Xemqv87lpHO7cgUsSQwzxnpnPHHt268Yq0jkIeATjBAOcE9cHggDvj15wKqrsXncu/b8oXGSx6AHjnPUYwODT0dQQWyMk5GDjPYjnJIPoQvHXisnTUnpG70vbqrpWtvby/DcqNZqSst9PS6W21uz06JrY0ot4UcksBgsuMgcYGR1I7dM88YrobKSRXGCWAxux/dIGcdADnr1x+Vc5E6kDBOSc8DO9uM7jweOO3HHHetq2Zdue4AzwcljnoeOQev496460Z8sk4u6bTW3VW0d9/+H2Pbwa+Hmu9E1a+stHv0u07K3fbQ9N8P3ypKoYkLGG4GcNuxgEZ5yBl8+1e+eHtQMUCxx4iEsZaQqP9Yzn7pGcgkDgc5wegNfNmiOUeIhQ0hYAN0G0n5i36fT6Yz7DoV2Fnj2EBy6mNQcqAOynA4OTnjHHfFeZUhFyaSurdXonp669+mmnc+uwFWUIppxctPlpG66NPTr01PrnwMUMUfmFFV4TIuFwN3ykEnIw2M7q+jvD021DFAju6hZGKH5tueVB5I2YGAADyRXzB4Sm+0rYQ7mRYyJJmC8Mxx2yMqCOuevPGK+kPDFy1o6yEfLCDk4wHD42g8ndnBBHGB3548nFwfK01fS1vR2Wvnrpt2aPrsHV5YpR97n3vrFN8rb/7ds/LXyPrb4eyuWglEhGYmdSxPySR7RtbpgEnAAyOCe1e/aDdf6VAYpP3krZbr82OT82MEdsdvpXy34Y1OMRIXlMQABJAHDNgHEQPKDuQ3f15r3/w9qflRwMHR0JASbZnDNwOc5wv93vu4I5r4fH0p81RtKyjy2dlZaaaXv8Ad1XTU+kw9T3VGE03JpyveyvZu19bfildaWufVnhzUHlIEinzEURqnfaOhHPA6544OK9Z0l5Ld4SjMzS7mKKc8jGec5wRjjHbFfOXhu9igFmQrSXU7u8zbuqcYAU/mPXJAwRmvZtInNxMu2d0KIcED5U3YyCQeDx0x+NfJ14SpuUklZ3smra9Xr9+muuh2x5ZpOys7JeumqTd2umvz2sesWuqxfaSjtIEjGHC/LhsDkkZx3yMc9q9W0fxEI9NW0jdQ4haFXwBhWPRG9cHrjv3r59aWFIgbdXd5CpedjjzGJ5x1+U8HIzjgcZFdDo93dPIpDsPLIyAejg8IRnAA7gc59uvk15tpxe0le2ml9LtLbu9fPyXXSoqbbnFNJtU4pJNNW9LN6u3/DH0Do91tsblHY+au54tw5PPU5wSfQ9sZxxXXeHohK8QuJQGkyxcjG1iR8oyc+gIyMDHOa8p06+kbDTtjJCl+gHA4IHfdxn8cDBNeh2d5ap9nAZgRgb+fmkJ4XvkHHt0xjrXjVZuMrS5nZJrlejatZ9Ldd9lpodagor2d3BSad4vXmaWr3+JPbTa+p7BbzRaUVHmby/zbs5yvHU4HXOQBz6EnpuQOlzG07AfvGUYA52scEY4xkAZ7jqeOa4OQicWrLl0EaByOobByPTk/nznua72xxALe3dcrKgkUgjOBg4J6jPOPpU08QneCfutf4t2n16t3VmvLcHTagvZpSnqnzPXo9Ot9n5W03O08OrJG7Qof3KqDtJ4Yc5APcjAI6AjPpXpWnmLZtkAcAHyjgfK2RkcdRxz07Hk4z5dp12ILgbMCLGXzgFccFQOck4+X8e3T0XSp/tEJQ5DAO0ZQcKhxySDgke56njjIr08BUivdTvK9nFp3aumkndpW081p5HHiIVElKaWlm3F3Satvtqu66Lq9C3dwxvJEUOGz869AxPXB7ex9D6AVsWUSRxnf8vyllB43HGB2JJ4x9eozVO3VHeDdgkg8MeZNuMYGOSSTnnGfxrQaNNkZBY9cE5yvUAE469SK9CFFJyqNJt2dlvra/zW6V7/AKZ86agm52touVuN9LO1/npbf7mw2z3LSyMSjKdqL2BHALfp/XtXRW1jJLGVYnJAUY5HTk8dScnv+Paq9nbFo93+ySVHUt/X0HT3wK6HTwYowrrxnjIyQSex656nnHOPQiuunRg+VvmUm7vRbO1rNrfbW222xpCs1eCaTvzaapONrWvdp3u/O76XMuLTpLedVRdxIyx9BnJBPqBn8u2a6jT9yK8ZyVKkAYxnqcc9Mcdsc9KsW8KOQ4++ecYJyOO/f8e+OM9LzCLKqiDcOuOhx7dORnJ7dWGa9fA4JK87uMXaze7WmidtU+unrsEsUpNRSulZN6p36tt6LS6Wt/vGWkskKNGG3NzgEHAzxxnI44yfbOR907VqS0IBxvO7k4wMdSBzznAx3HuMnGciN1LA/MRsI5xgkjIGflAOMdff12bVDKF3Aqq4yV578YJ7eo6e3WvpcBQim0nNtpO2mi002+a6fijKpO8VZRsmm27uSkrPyb31tbTyElSRImK5IYNyAckseRnHA4HXg1yE5k81oXXHJ5wDgHON3XJB7evavV0toTbsGRSQPmz8ufoSOMenPJ5GQBXJX2mNLI0yRY2DgdmOeBjofXPHArtxGEbpxamrJrnjLV2bTs7rddr6hhMXeclKC6NP4XdNJOzeuut1u/S5gwRBYl3AAoMkgdQeyn9SMjpj1q/bSK5XgqwPBUA9O5Hr1AxgfQ9ZYoCMJJGykAgkH5CBjgEHnH8uvTFSKqQMr8gE5GAAMdByfukfQ/rXPTwcItOL5eZK71Wmlu1nbr8memsTZO6cm1eN3fWystE7p2td766aG3FazywjlsEgqOjYHQ5GeCfz/lk6xdT2gj+Ty2iJBY5AYjB3e+Qef0BNdJBfWa26Heu5MdBxg9SfQDjPf2H8WD4j2TxAoVdZFG4Ic468dOBjOT2zjNPE0oU8POVKu1Uh73LfR2UXpq+tt3bXbsYSq54iMatFezbSTa93ePfye2mq3vti2+qw3shEzE7l+UEfKB0GDnByDn1I6dK5XxJpsRjmIVfLmTaqg5AY8AgjkcH0PU47VVTdYzyB2PQlG6fKM8Z5H1OPfnFaMxn1CyJztaMfu2BBO5cY49x1IwOuK+cni5YinKNTSS2ez2invvqvLyu7o91YeFCqp0Vam1HmSS5W2o9bpaaJWbt06HjcGlfZkura5RXidnb5wCzbicBSeRjGANvfPGK/M39vn9nSy+IngPWGtbRbi5t7Tz4FUbWjCK5IJxyRnp1GOM4FfqhrsF2yLLboRcQuGnI+6yqOpX+9xjPUDsa8q8UW+ma/o+p6fqkIMrW00Yjbk4KHAJxjB6ntwBx35aUXSqQUW01KLb2drpt3vfSzv+OyPThUqwcakJJJq01zbpqPVJ7efTyuj/Ol+MPga58E+Ltb8OXlnLbiG4kgKzqVYZZxG3zDguNxB7gcnNfGniDSnsJLmOf5mR32KEIjkBPysfmxuXtj+QzX9FP/AAVC/Z8m8OeLp/GWk2pktTcyRXMqriRH3HY2AMEruILZG3IyM5r8HfHlhgsCNs6SeXMHH8bcENjlieOQBjoOTkfo+WYyLjTlorpKS7SVrN2X2v8AL5fB57hXVVWUo25byg+klouVaK60ve13romj5+k2ujoT+8CtjA4/2c+pXknOBz26VksNuM4LAntgHnHXvg/xEjPtW3dW80VxLGyYKnIccDb6Z9wepznBGOecmZfm3Zyq4BGDkZzknv8AqOnGK+vptSpqSaaaT01Wtu2iTvez218j8oxNKVOo/aK0rtNWcdE19672T1ffa1aOu5CThg2FHJJzyCOeCOw4zke9e8eC7gKiI4IdT5iEABsEqOWOfu8YwD3rwGFlDLncMjKgcAkYALDj3zwPpXqXhC+lt2t2yrAYiwzEADIICdd3B+boAR0GSa8LOqPtaLa6Xd23bSyX4+fVHqZPiFTqxu/ecklbomot38ktVfTdXPvLwPeK0UEK8ylQ8ZXkpwMAsf7vIyQcZ5znNfU2gymSxiVh8rEr5e7BPTHln7xiBzvHG3I6gmvifwPesrWRPClgQ+cfIQCcAZAB4/3ueR81fW3hG6P7uISJJ5hKbs48tXA+Tbg7c9MEmvxjNaDjNy2d/ta+9eN0rpWWi83fft+iUXzQ3bUrPV6JK2murWuybSs73O+M2Y3jdF2JxGgAJMpOPl9BnHfB4ORjB4fU4UklEuQdpbcCpBcsBlRgnGMD+XY16AYDGrDbh2G0bRgooySxBPyls9ep5xxzXJX0aCVpQMBT+8jUAFduQCD2wclsdcjmuTCTUXdXUlqkrNJ3S1Tv1erv076HkZjS54NKMI8zUnre+kVe+72ta6urXOAu4ZA7IRkcMSxJUKeq56knj6cg9zWeCqFYmYhQpwSCw3D7oY8YHUD+9nseK6m9j37kiBUOpLsBkFgCdoJAyQM5IOM1zjRkxMgxnBIY4BbbnJJycEZ4PI+ucV9DhqnPH3rJ3fe7Ttsv0T7WurHxGOwz50ottS+000k1bzve+llYSPckWC43Mys23G4gk4YnjB9R0zWssoU4Yu5UAKykEhSODkZII7gjoRnrWMjfJuUFgecDB+72Ydcjrj8u9W4XL4UkDahAYDOMYwWPHPY+g/vV2KCaW7bs7+em9kteltdFqeJUpJX1UrPl0vdPRXS7Nrpp8rm7FeNGTt2bHCgptyw/vnrwM7SD9fXFasdyOG3b0HzLuJbBGDkL6cjjPTP48qrHZuyoMbAMPQZPzg988Z7dOeObUd1mRQGXZtBZM8nOflH93HU+pwPYZNPmXLe7veVrqyt3Xz/PoZRpt68nKls7pN3t0Vnu7t+TfkdjaXbxO7MWKspUrGc7pDjO0HkAZGRn6ZzWmt2fMjySJAgKtjOQ/fPqMc5HJ681x1vO4d2OPLQbQCfu7+gJ53nIOfwyMVeivGVi+8LHCOcAZ2nAUgAjgdsHOT24rlm7X1T7JK70ttp1636fh1Qo1LJJO7eunRtac2tmu+j736dnDduCY3yHCl4ypA3AEAhiOoOQQD1xxgVoNqLh4gzCPaDImR8rZ6YUHG444yR64PIrg11FkIw4CEjL7QwcfxdSOemD+J4zUsupqUBLtuwCpZNxx2UEnkc8HpjOBkYrlkpNPbXVtXTjt2Vlfqkr27nqUKNTRKCT0962v2Va7287PT8u7uNXjjAjkcM7cKxxt8wj5FDf3+CQAOOvJ65LatvjmYCYsyNE0Mh6n++CvUDHTg46V5/eX7F1i2jeZA8ZkkLFVPTJxwR37jp60yG/miad5JWwCQ+5sZkYfdBPOW4IyOg/CpjTjZWScr9NJN3S1110XXa2mmh9ZlinCUJRXO+aLle2yaeju9NL+TTa0bZ4x44vblPE7XxkKFkCh8AK7DIIVcAAYwM57nnPNM02/doZZnO4B1xGOfXJ2j0yO3U5xxWN8RpbiW9iugQVEmyRlPyqAQFAXjjnBYk4/Guc0zWltFUl0YSIQ8b5yp74Prz1Ax7da6J0G0klG8UnZbpaPzfRtqybWp+5ZZiozoUG5pXpwslb3ZWj1utV0Wy16WPWodTfTYZLiPbK83zRBiJFQjkE4xjPQfjwMcc1datc+IYNRWXbHOsQwFA6rnIOOnGMHPPfPNU9EvRqbT2MsuMxtJbnGcDqGB3D/gQ9+1UHmOnX11Gzf8fEbgLuwvQ4wfVgc4weh9qmNFSs42srWk72utbN2XfbS1vU9+jiFCcYWuvdk5xjduOj+LdWva/f0PnvxYUs9UnUuZHcKw/2jnpjgZGORx7dhV3wX4jWfVbK23eWiNmTHOXBGGOMbVA+91zkY98jxgspvZ5JNnmCVsSkbgqNnHzZG3I6HHqeled6Lc3Gma5A2flMwD4b5QHwRknsR/KvR+rxq4Zp6zhG600bXLeysl1XW7vs73PrMvz6pha9CFNRdCcoU5yv720UnKWj1e+qT2W5+ilutmbS3naNpLO4U7mJ2hlIG1h975WPoD061h37yrpF00MmIgRGGU/MpYnHUHjAHbn+UGhaotz4d0xV8uUKQEQH/VoQD5YHU/X34X034oU+w30coAgnVmAC/cOMpnnjnPOeD0wDivCjStUbjFWTTbaaattd36WdnffyR+lUcZ71J813zU3dvo3FpddXZ3ez02Z4OiiK9bz13RPKcsDtH++R82Vz69+fccN4shSG9eSLHlsA2QPlBP54Ax198YOQK7LW5orHUp1BBQgCM9QDyQR7jB569z1FebazevNJJudnRyBljkL15HuPTH9M+nQXPCyjfaTVknF3Wq7rpdL16o/QcNjY/VlJz5bxjKKTSfM1G9+2un4evMNMQ6gcruweM9xnPPQ9vfOa7DS5T5Srt4jZWjBGG2nOdxB4yeg64+mK4O4Yo/y5ALHGBxnI4HPJzxjj0HTnW0/UpY5I9uML/rCDy/T+f146YxXZOg5RVrWabatbaz1/Fa99FrcvLM0p0cTJYmVnKUFCztdXjd76626fLU9Ve4U2vyhvmHzggkovGArdOM8H1PBrJedgBtLbAShVjhiRyQenT69zjsams52lgG4HJzIoOPlzzg88gdQOh/AUw3NviRGCNuwcjACPjrt5B68Anv1rxq1Jw0jeSbV9FotPi6vra/l1P0mniPaKnKFSMYNRbSdrppX+1u76uy3vqWrKFpX3IpL5+Vh0QZBKnnqDwD6Z9TW4FCBfM6g9PcEA57gnvnBOTxnNZunlFdAZDt2cDuzfwH8eevpj63ZS7krglt2RuJBIPYg5A9CfpXjVU5NNuy87rTROzaX5Kze+x9BhsVGjRXI5atfD8X2fiabvaz0d9OjN2CAzxkpGHVcnIboB1LH3OOR79wKuiOTyGYAqme5AIHI2g889Cep7ciszT5TGu0sSoDbhnoc457n8e/HU1sHfJHsCkp94HGBgdm7c9B09vSuRtxdm7JOyb67PVu3o1e+mx9Fhq0KkIVI3Tejd/TVvfZp6WfRnKXd05PlEfKpYbcc4zlc9yffHJ5xmsn7RIuAcgDABx8uDkDnn8DnP0FXNWZvtW/G0gADHAPQ8AdR69Pcc4rDeaRpD+H3hgEnpx79M/TPTB9CnBOKbtblva2iell2v+vc4Z4hqu5Oo04NRUU/i+HdX2d3vfzt1vlpHdXxgqflIUYyOM8cHjrkZHHXJrqbCaTZEFbDDDNg9SOMjHCge3Gaw7O3eSDcoznLOpxuUDqfbPv8Aj73I5WtlYIeoYAHnbxzk8c8fiPYVz1Xz8yXLzR0Ta0tpdd/O2vluevhqjpOVSquWFRc3M9LR91p+vTdfeegaRHbynMyl3kY9QTjHckA4/n+groklktlwhO1GJ5Hy5zgEEjAJx9OAfWvN/D+qukhV8EISCc5CsT94ngdfz75xmu/Msr2jlU3blLkgcA9Qc8nGM5JA7cCvGrx5k1GNnTfxRTXRPdrz0X5o9mjiIVKcZ0VzwUlzySXM1eLau201trZ6NtHdaX4bj1q0ErW7TPIjMgBy0bJ0IyCdo6859O1dFF4bjs7OS3Mcb+buRjKm5kYnGA2c5ByBjgfQ4rK+F1zPPLFI8iosbshVzwwBwcD2GAoPDduxHonime4tlNwoVQHxHIRtXcMEZ4HOByTnr06AzGopUlvo93ZNSSV1003S87XYqmIqqvHDxlHklK8Fs1tay00T3879mzkm8HJpVs2o3MULu6AjcwKhTyrBuikDndg4zyORXk19dO99JCEym5gqdsZ4Gc4z3x1bt3Fdpqni29vIzYs5MeGQlBjC5H3j05/hwP8AvrNZEq2xs0uGgZWLId5HzF1ztKnlmBJ547dK0pzUZQk4pJpLTdy01vfWy5vzVrkzrVaMWqkoznN7r3eVO1lrq0ra7XWtzzjxDZB4muBC0cjgmQEZYvx91eqjpjrXm9xptzLLtiR5GfBycMUQ5yGIHAyB1GRmvUNfuJGErggRzOWYDkKe2O+Tjn047nNcx4ZuoRq6QS5kXLMIjyWIzy5ODsHUjGRx9K9vDV2oxs0o3UXdNtare1lon10fXoj5zGVISbhK3Nq0teijprbe913WqS1OPTSLgeasiGNoVLhWP3sY3KpGckcYHJNZ0a7bmPcpUB+Qw2EYI9eg45OMd+gJr3zXNMjcLcwRRw7wGD4CgnjIwPr0/H6+V63pk0crXKjeqsBIQMKM55GPxIB+tdaxCmpLmV1o1s3qlpddL3Tfy3PKhecIShHllCpGTho3ON4/DtZW3SdnfrYW2hSeWIIMsz4OOQMgZwB2PGORz+FdRqGiLYW4lbEpl2lXDdFYEjPcEHtzjjk1zGiSPbXkJbLAPwpH8R6Hr2HUfTAGa6rW7qf91bEgiXaw4GVDc4BHTGB16/z5ajtJWu7+6k72SSTbbXlpvfV2XV/V4FUZQlXmkm4txTjdJ3jFJxT3118rbWOQeOVJNmAAMkDqCeOjHhT3HX8QatPCBayu8Jd3UkE/MyMBgMOBkAnjORj8SblyqpHknJAxuwM+o9T7kZ71mz32ITFE253QrjIznHqRxkcHH/69acryilTvey5rP3dm73emuuvc5cdSi6U4uSvFtrl00bT0T6J9u3Tc4bUAwkYR5ySS2SM89SO3Ppxx8veqtuxO0ZwwOMnjv29+3Hr+VzUdwkOcliRuwMAg9RjvnjB6A4B56U4VXcNvBzz1ySOePbPTgcdK9+nLlpRelmum/S/yd9OnzVz89rp/XmldWklJXVmvds4WT1WreunzNVonEO4kHJyBjsckngcduOmTVQD95yWHfPpwRjP4nPA5q00pMCkE/L8vHORnsMn146D9arKXLhemc4JI4Hc9Tz3OD9PeXe1tlby1ejtbS1tfX5o76saPPSVO7a5PNJ6a3b6vtba3QnYbQrBywPHOOh/Hpk8+nQcZqFuT0I5GST047gDjt1Pr9K0GjUIOSwPft1OQe+f8fzr7QXwcEHnAI7Z6gHtxnqOgzxWkGrbdErdtn5rbZ6vUrEUJc0UpJSqKK3btez06u+3q9GUxGynkkeoXkcehzyOeecVaB2oCASCBn1J6kjI9Sfp1ODgVM8Sq25c7WPHUfKezDuTxk54HrzgCDbgDB3cHHQZIx+I6DofxqvaRtuuzv10V3vdu60a6eZNPCVaN4Ju7iuV3evNypWvp30897lZWw3GMAZBU5wCR3/Dk4Hr0zWjGMqOSTwRzyccYGPU9ARn+ZptEVJUIQT6jscDGOueMj06nnOLMeVGOvHK57/oMe4J7daynJPZ3fdp+V/n5+vz68FCdOTjUUrJ6r3tXpeyvqktbdNi2rAY65HBGcnPAPf3wCSO+MU/cpcZyCpAypwBzwcd+w6jgjPTFMjV2XG1dx55PYjOeuMeox6c+q7AoZSAfmwOTwSeB0zyR+HH0rgqa3l12enp93f5+Z7kZ1LRTUVCLT5WnsnFb2d3b+tktmKN5UXAG0ZGV+UlgOSuM+vI6flV62dInEe3cy5YZIOT6k4UE4z24zjviqemKMMNx5DEKTzuwMYPr3HX6A04ypE54wSxDNn7gzjAOMliBkdPTjFcU4XXLZNdmrap9HutLWS6vS90fR4apCCo1ZJRUlyya1S+G1+1/uXYqa8y3EqTbslVVW5A+dfvDGOvr6E9B35l2BYbSM4xgDGevJBHOf6c11V5AsltLJEAxRs57lT97I/PP0HrXKFV3FgvAORkfhk+wOOPcDFdWH5eSybvFu1tbbNp9b9bt9+2vzWc05RxN0opVOWV27aLl1VrrZ2eut13RNGzPGCWCqG5HUnPfGOfXt356YtWyrNPHEx2hc5wOTjB55xkk9v1OTUMQUqx4DgfKvY9c8juMcdM8etaGlohuI9zBSGJIIHUdgep74/pgCtKlRqM/iTS0av8A3b/JLffV9Wc2HhzVMPFyUudrmctbpNWSTaS81007s1G02a8IaM7ogwUKvDZHUkZxnkc9foDXTaXpUlrNGGYR7CD0JYjIIGMjOcnI/Xmtzwva2V1uJLAqXkBPB3jqoOeQcDtmrrLH9ocsArZwqj0U8fnjOeB6dMD5vFYqo3KkvhULS2+JtNOy12u9V62ufY4TKaVKdHGSacnJRk1KTcbpO1k9FbdfLcr6m0l2w27t48tduCRgZxg5+U5zn3Pau78JWbCGO6mVB9nYuCSCykZxxjg9eO/4Zpui2Vld2szsg8xw23HHzr907s8j9OvPWtFruPTdJ1GKFgW8ty5A5BA457bcjv15FeHSqKNaEEmk5a9Wm7RWuiTu7q7svme5HCxp885N+8pdNopXT3T621d7aW0uvBviPrCan4ilIlDCJURgv3Qy7t3T+IDGcDg5wegGVYXyP5UYI89Np3ctz1xnPYcH19q5S8L3N9czFTgyyFmJJY5JwQfyJJx7HoRqeH7eSaaRgDshy+8YzwOQDyRj6ZxyPSvtqVOMcPHmldxgrtW5nKSW/rrve683r+aVcfVlj5pQfsqlSpBOzbahs1ez1V7vZP8AC94pn+2KQoUCNFDgHq/QkAHp3wCDw3px5xJEUQsxC8Ht2OecDqenfqa6nUZpEupVfIiZtu1uAOcE45BPYdPXB6jlNSJSR8glCMqMcAepAOPx55zxXqZZSaXKrNK0u972Tu9dVpqtD4DijGxnJ1uSanFeyi5O693S9+m2ttL9tjo/C83kWOr3ZwQkTrknb1UgKpPTqffn248Hu5XN9PIxVg8rkj0BPOAPw+te7WzpaeDtRl6GXcwUDLnJwGXPc5+brjivCpBF5jMjfMxzhuefQ8/zxkYz7fVZHSftsVUcXFc0YxaWjsoN67dNlbpd3P5Y8bMW/ZZPh1UjzShKvKMb3jztJN31dlG62+7QGCuRI5LRAY46gjoT6gcjtknnrxcVUARWORtIV85BLEcZ7AY4PQde4ApmNSQQx2t1TPJ+i44/McY5zWhCkYAVslgRgkE7QOijHr3H146ivo6slFNptL8el3oun5d9j+dqUOapaW7tq+t7LSydr3v9z9NvTYChTgEFwAefn3dNp7D3xgcAA549a8O3BhKK0eAoKK2cgswUbWGOQedvIyRwc159pcUbOm8ApHtUHoc469Dx+PPXPNe0aDpNvcQo6KvmAhQpxjj+LGc4/ljNfIZvXi01K+7SbWl/d6p9ntY+/wAjoTpQ9x81O0bxk05P4bq7S2vou1ld7HS6a9rPMitENpX5iAANxxxwDleOB/D75r17QLSA27GVGTIJjGCN4HUqfy6c844rzXT9Hu0lAEDdSVPB9ODwPfDfjzzXsnh6HyEhWRQwjGACDxjqO+Wb0HbvXzFWScfcd0uVvW+javZ3bba3XrZH1uE9+UnytOyVmkrW5bX20t/ldXOk8NPdxSujwhYQ52mQYyM8e5yD97jjIr2rQIYEAjlVTG+W3EDKyHkFRz/kj6Vw+lwG4KO0eYtysjY2r7Lj35HccHmvW4bAbbJ1QImAMDgBlx8ueM56HOOAO3NcyndySaSk0tNHFPW6Vr+S+7qd6XLT1lzvVK1k03bTTrddvPsjdsIEiiOxThicljub6gEDBxnGB7Ywa10muraKU25PKbSHGSA3UHA4A46j0yTVO5E1uIJDEVRkUAKc5bH3iQQSe/IyBk09tUTH2JISZnQ5k2kBs8ZDDjP159TWkZNxvflSvZddLJX82n6LRnjV7q8nJKFry522ua60T7q1rryv1Eso5HQmTaCzZZ3HA65wccex/kazLppoJJY7ol1biNkI27MjkkY9jkdO+eDXYWXkQ2Uizqzh0JAAywY8hsk5GCCeTxzXG6pcwi8iQHJePaFk6ZP3RnPPftwAQcnNb4eMXNK+sUm7q7VrJ3+9aebfY+TzOu1GUveV5aWWr2u0306t+b6KyZrurWGk+G7+9lSELDYzfvZIhgSFRtwSSS4wccADuRX48+P/ABUmo+JdRmMvmRyXEiLg/cG4/Ngk7s5AxxjjANfeH7T3jaDwr4UTTYph9r1BV3qrYCr82FAznOed3GTjvX5Vz3sl1M1w7KwkcyMWBLnceV68dBwT/TP3WS4RqhKo9pOPK0927aJ200Vm/Pc/l7xQzZyx1PL6Li+Wn7WahZNSbt712799r/NG/eszKSrHBG8HgnnPUY+7z/I/WhgMi4+YkEktxjGcjPJOD9D1HZqom9RgVy45ACjoV6EAgZ/EfdwaY9zgYQhQRgAnODz04785HI6d8ivYjTqJpJNtvRfcte+72fXpoj8XqNScpu8trpvqrO9rPppbzvo0aIjyPlGCRg4J5PXp+Pf35yeUljwACo7EZGT7jGOx549+4qCzudw+Zifm78n8MZz9f0OKt3LcZzjdg564OeM8jGMnB59666fNG6d1LmSTWi6brV/p1v0Oeei5lzJtdE7atKyXTR62XztcyL+1BjJ5IKg4A6A5HOfXnjpycnqK5WWIjKgtycAZI6Hgn35P1x+XYTsSoyWcY2tt/HAz78fT2rm7iM7iCMLkAfMd2eSOOmck575P0r0aF7cr11u9dttel15Pb5GdRq++t1za+atzf1e3yKqIB8rDG0fLjgMccdMgY9u2eeTRS4kGeflIJPqo4xkkDjjJ9O1Fd0Nvn/X9Izd29k9tW32W2j/4fbRI8o1HU7meR9ztscbSnVOnTBPAz15PY9ay1VWUyOCRwO+D1ycc5K9AOM568VLcKQ5IKlck4bkg/wC0OynscnofWoUwMFioQEkIMYPYd+nB6gc12UYQpxioLldknZJN2stVv810T7k1LtN+d+jetur6t9dF3814DBWPOMq2eCOqjdxxwenfjrSOSWY/cOAc4GM9ifQt39x7E03Jc7FBbbyCxwq46AEDke2DyDzyBTCXG5gACSCBwSACRlT2BHU8kdsYrtVS2tleTs3vtZ3btuvP17HOuaTV7LRbtLZpN7J/h1fccFwSxYEluf7rZ6/mOQOmKcAq73Xrw0e45IHRicfxdMenfPWmsxCjaykkZYqc7ORkDvuHAPpnOORlVdgBnZxztUDkA8Ed8c8n/GovJ68yv20XRem23lbQVpLa13ppe+/T8Fd7Wt2bT5gR1BLDAP3hn68D29jVmPdhh950Ofm5CgZzkevI568DtUBO5kdicHgrjJLcYOcgYx7Z/TExbbwHwzdeRtK4GB069M+uBz63FrT7vNWst0ujd7/OzRSSdtlZr1VrJp9Xb5dC7bb1ZXwGPJIIwOcAHJOT2478fhuRBmQqq5EhLbv41bIGT7HAxjOfzrAhaQ4IbCg4BA+RuOg6Z5HTv1AArehlIXlgoKgMSOCw7AnH+R0rDExu7pJu1nq3fVW6rsuz126Hu4Nx5U942tvvLRaeVtH9/Q6zSpJElTICxgou0H94Tg5Occc/XjnpxXrGhTGG4j8wEONrBiQBkkbApPXgHd0+nr4xp0oEiv5gHTlum0Y79z0x6nvXqujTgrGzMxU7AZGGScHCnaecDOB26DmvFrpRlto+VNbdr+tnps9UfR4SSsrNrRJq+qsl0e+2tk+l0fW3g68kAt2BWR3ZSyghVSMcbVHIPGSSCMZHrX0To9+wl+ZTsRQck4/eEfdxnG0enA7V8v8AgFo4I03yrIdvmFcn5s4BIXnr2XOB6jt9GW0TM2nSLMQrxCSfcDg7sBUAB6+g9z1rknCM0tNHe9um2mu/ZLq9fM+mwlVqEXZpvbe7ta2297bb630R7xoGoZS0gjbdcSHeAflHl5B+UknLjqVx6V9OeFLoGO2lMrPuKqYD1UxZG4g9Dzwe/Jr5E0GcwzQhmzOVMyY4URrjaGPPJBYleOMZ4wT9JeHLtmkhQMF2wqzHGCHcDBUZxnAIU56nkV81mVFuLvFW1XM7OVtNH16W19ND6TCTnyxlO6jGK2XvSlo1Bq22u7S1bt0Pq7wveLJc20oXlVYB3/1asAAQ3QZI4U/lxivdrC+itbdRlT58TGPsSzYKljz1zkdiPTrXyvouoSxGCFCrAqGRFbDbjgszH8y3Az24r2iw1bzhbwsVBWMAsPmPH8O44wwPXGc5HHHHxGMw1SUk1rDVRSWiWmvntdLXrZ3PXoVbNztN6J8luZJab9U7Xu9b6nuunyy3FvHbg7pQgCuB8kankrk9MZBIzz1967TRbR42dPvhiC0gHBbJz759cY/Hv514aExSIiUsXQtJuG1Ttx9ef72Dwfyr0zQ5ZDOFkxGhJIAPQKRyT175PGAOO1fJ4yLbk4tOy1W3vXVlbd7XXotLHpU3zxTd1FtNJfFG/Kr331S9F06W7yR47W3t40YE4DHpyR1zg556dcdfqdfS7uWcq2TGElwiE43AYycEHAbuee3rXPrGLh2C4c8AN/dA7d8EZ/z1ro4AlpDCsm0uxJAxztB5OATkkEd/pnIrxJxlLS+zu737rb1v+p6FJq6hH31G3vSsneyve617/wCZ674dnnmEcTnygOgzuLAcnAGMg+vX9K9N06bzJYwWzsQrgj7o4PGSOfX0P5V5P4KdJMySlgxQlFbhRj7oyc4PoOcepr0eAMgaRAdynIbJwwOBnnOevB7muKbaalFXScW0kuXSyv8APq973s+1ycZSlBpJyVk4qyeiVnrurX2fnY7aBPMudiE7VXe4yQMjpnnkcEA/pXo2gz+XEEZmKuu1TuOemAv0PJCnGehPevMdLlM0RYM4KDaSTnLZwRjrjtjv1zXaaW06+WDkRow2kc88ENxzg88j0456+rg1RhUjVpp/vHd35nZvl6OyWu2r+WpxVW5xdNuPu2XLLS6VndLT3vV/qd7bsweJZPvRsdjAf3iTjJ78E98578V3iRRTWhBXnYGGRyMdOf4vfoT+tcnaWoaNXUEl3BYPyWUEEkD044Of6g9tAY1tSAAQybcHJK4757EdD7DnHf7LAYWTjKVVwkmk0mr/AMrjr6+XzWp42IrK1N002k7e7ay1V2tfuu15jNMkiZAjHEiMFAzxgdAfrgdRyOxIFdHGkTht2FdQcx9iFGc9Me+ee/sa5OyhkSabA8wOQV5AAz7jHTGf84rp7SJ4uFcsBxhjknjA44Jx7nBAGPSuilSblZ072elrWSVmmpNW0Vn/AFcanzNKclBpJqKb5uid9Nnr6db2C2ld5igOEzgPjj6f/q5J69a3fszAq/mMwPUZ+8Twc5/Mnjj0qK2gjLMxjC55IwQN4+9tXsCSM449sGtCMsz+WyZQthfbtkHkev1zxg17MKdkk3a7tt10WnztounUHWc7OKcOVJvRO8bxbT21dmv02Cyh8+QRuhwcjPfcOQd3p+eAPU13enaUzKGRAwUcgcnrznA5A/XnvWRp9gG2McBSRjjJHb259fQY5JzXpWmwxRFVyGBU8r3JxycZzx9Bn8K+pyXCKTvVSjsoXe+vZ7LXXXTvo7cOMxXKuWCfdpXbfwt3u/vtp0d7pGZHpweFkCjJU4ycY7YGeD7Lgf72aoS6UzEIY2ORtcj5cA/xOMdschScfQV2z2wZgFY5ZSQwBG49Mbh3Hp0PX0w5bSRTmQb1Yc/Lg5PQZ/Hn0yeM8j6ytlkKlJJUoymrPmilZ7JK7/Br9NfNjjJwfMpq76Wd18L3Vr6Lv+qXl8+jLHu/dkqcgEE/MCTkcjvwR+ODXL6hZyxeZlSR/CpHK54244GfX1z1617Hc2TGRgwVBkuqjOCozty3bjIIwfUZxgczqunpIu/AL5+YAE7iM/jjHX0yO1fLYrBcqqp3p8iejaurct3F76PR6NW0voe5gse3KmmuZPl6trVwtq21fbVPTW+x5RC0qB41L8kgAg8FuvGQAMDjI59c04RzMm2RmZQDx0Bzk4BGfqOmea3rizA+6hVuQfTAyR06Yyeg74FZ8ayNmI7V9B2/2evY88D29efisSlCTjKc29Umm3dS5bfpe9trK/X7LDSjNc0Ixv15nHS3Ld31SWm3dHHahpz3DsFRmCAYK8BB1+bA556/jjrWOFvbEmEOdnyhRngA5DdRzx054xn6etR2jeS2VCuVJKkdiOOc8gk5Bz6evPG6lps7tIXQgqWKY4BGBjGBz37jnPvXNHCx5lNaprtJq++2l/LZau1tTthinJOE2rJpRTs9nHXbt6aLruYEWnLeQTMykoxJYgEYYZyWPXDd/wAuMivBfiBpJsbhnskJVyA5BOcEHPbkDkA/X8ffoL+W0t7iEjZKIyxUjKuACBx6/TPLcdM15/cWx8Q2N08iBXEr+XuUArtzt3E4AD59+nU1pKEIppNX1d/tX2dnZb9Oi17F0KlRVW204OUXy7p7apXt+Fl3vqfjB+2/8FrP4hfC/wAVlrILLb2k97bhY9/n3EKsd+CMqUzzknOc4xiv4tfix4XvdG8SazpN3bSRva30qjOWaNUdgxL7VJ7DJXt27/6IfxR8Gm70+8025SV4Lm2uYblIwBjeuDFGuGOXIBY9yPev4v8A/goR8IJ/BXxT1y7gtibW8u3WZ87GjJdiX8vYNyvkZYkfdHGa6stxUqdaNNNOLXKl151KLTXS1npo72sdmNy+OJwlSqk04apKz5ou10m7t6vRRWz1vsvxf8QWgidpBu2uux2fg71yAPY9TnPB7c5HFMimOQMCJN+5COjc9iTzgcDIPpXtHifS5FW6DxswDM0TInIA65OfmxxngHnFeTXVugDbQWK7QRGcMvPz7jg98bRjrnkda/T8ur8+HitW246tq1/dSXVpWduy6bM/DeIMN7HETklZb2bejbTV9tWunTqjIkUKVBYqRg56DGCSRwcnjn6dcZrsPDlwP3JO1VWQgMeW4/iBzx1GByTk81yFyuGOz7uAORk/NnjOM849B+Vami3CxSxRnfht2Rn5SRjGRwAMg+3JGBmtcXTc6NSy1SvZatO19k9u3ptseBhKvJXpya05uVrS95ctno9k7aevY+zvAeoKIrVAzec2C6g7mCqAUDnAwBluBkAeo5r6/wDB2ox+Zb+W2xwiITjcDIOpcZALDPXvnp3r4P8AAWo+W0RG0B1WMlgDh2BDkMTkA4GDztI6HJx9UeE9QCeWGYLuZWUoSgZQc7vMAyQcjdxyRjPBx+O51QTqzunzOTaW12rXWjsn10tbzP1DL6nPRg4tNcsddduVK3rb/gWPr+BleKOQsXZwFuJHYbs85BwOZM4wAOPcmsHUbGRGZvnAI5B4BU92AHPTofz45n0K8hewhYkMNgMZbjHbOeSc5GDjpn8dTVADCjo29pI080lt29iOAnAyF5AAxjOfc/LUZSjUktE72eqejae+/wA2vl3vFUlJO6loly/Fa2l32e3fY84urVkW4dVJUEEBCN0XzHLEdyf4ueOM1yt7G8cjylAEwFP8Jw+c5GOny5B47gcivQ3iB8yGXcGPzAAYxn7oz0ZRg56f48xqNspnXewEUhwQ3Qlc4HcHg+2O3Xj3cNV5ZRbta61W+tvVdOujV9j5rGYdcsnb3na0dUk1ta+1109G3fU5VWUMScqkbkoykqGx0zwARz+nfBqcfdxkhZgXEgPcdQMdCewAGRzUlxa7BMAFlBIZGjGBt4ySOwHbA49+lUy2GKsW4wERflX5hgAH0HQk9e45r1YVIy+BWfZ6vy3asnr+Ft0fO1cK4yvKDjJ2Ta0te2llfVv5/mKlwwOHUxhgQBn7wzjDHuRxke4wSeaejqs3HyEEFkC8uc5yrZ6E9wPqMmqzA7irKMBi3PHk543A4JO7v0HHTiomnmCtMFQcryxG8KfQdTt9Bk8jPPFKak327O9ldpXaste176r5M5JUJ83M2ox5rJWs9FHd7/hfs2dLFI0jseQmNyliUIPo/bHYfpillmVJVRZRhto2knBbqdxPRDng4PTg8kVh29y6ruZ3YMo8xepJO75EA67vw7cDPKy3OZnQ4RZEVgXUj5Dnhv8AdIx1znqK5ZQk2laytZu1luvwSTfX/PWnRWj1STTv0b7a7/ltpc0XukWdcqVhYtllf5O2xVJ7FgdxAwfyItyXyABZZCFUjbGuANpHyc9sHn8c+lYMjARjPyR8gN0JQcnameM4+XnJ5PtVK6uUEKlABn5kEn3xHnklem/gEHPToORlOCm435opN269FpvZLZdFr6nrYelGUopNpaK7ty9NUn1877a3Rdk1OEyCNpNwjBd2PIIB+657kfQbeBznFP1O/jNuZ4tyLIA8gIzvf17HGMevTHXrwdzfl7hRHlDMreWEAUMpC8MTuwx5xkcc8VLPeu8SwMCdmFKg/IMckDsSQcZHPy49KnkVlZuNnfzslolpo113vr2aPrsJSp03Ts7JK7W7aVr9dF8vk1vR1ywj1GxuZt/KqCMcsQM5AA4789x78V5FeWn2aRDIrgnDQq+d+0E4LgnAOfevbhMJbOaONcqjqWc8BQc743znOeDtxxz65rz7XbcyrLN5LMI8qnGWb6EYI7dD6+vE80k21Kbu1dvdJ2T2avstVb8D7HKMdyP2Nab5H/DadnFe6lF/NNu33W1ItEYttlQlZQrLvU8ANgFRgHA45A49hVHV2u4tQjEys0bbWVzypBOcdd3I9f6AVf8AB4aIyZJCkyMVkU5DDgrkng9MZwT3911mN7lRcRyADDIMnkDcCvy9mB7ZPAB4qoK7ioStBSTlF6Xd0mkltbf538j7fC4iE4pxq3VuVJ730s+ut+703vpc87+IFlbRRC4VMRyRRyKVXAYEZbj24Hp1rwe+EUd0txC5BYoRuOTvAyM+/wCOfxFe2+Pbhk021KSiVo1ETqxHC9gv90g/iMjg8V4U8iXQ+YAOvAXOCMc59MDqRj19cH1aPNGnNWcYc1rLZJqKV09eivbd7HvYaN+SMfdkuWrd6JtOL3T3XdfJH2D8Mb6K/wDDtqJnL3IV1QBd0m5MAMvOdqZ6Yzk9eK7691SK10y+hklCTxqcKASZAM47n5gDzyAM+teAfCG8doBbCQKsTM24nhMDnB5IycZ65/M161qgnubW7xGd0bO+4AgSAA5AI/vcZGD78V4dWm1Vna7baUtdF73XtutLX00Z+iYXEOXsZuXu8sXK17rlUU23tbTbf77nimp30l9c3BYb2DO6njdtBz6cEY6884yBXGX6yNIWAOAMbT1IJ7A8dgQRk9uMZGvftMb2RULKyu6sCcHAPTaR2GQTnt0zxWFeSyNIY2OxVyWfdjB4IwcZx1/unIyBjFduEjZKyUny3bervZaJu1rtaLXvqfoGHxcZYWMJO7jFNa2ckrOLg7+avdO176ow7kFmDEMoDYz0KgcEbSc56EcZGevBp0e2OULuxj5gRg7j1B/qRz7HtTbjY0gYM4DDI9/XPPQ8dunGKVUBaNgc/MATg5AI5Hqcd/pXqwWiV9k1ZfJu+1nZa6bnJDEudVVVTbaqU7K7b91q7Vlda9vysei6TcebbRkli7LtLE9DjHfPXPHXocDjmKRdsjKfv5OQeQe45/vfex+A9xNoUKGJt7ocJuWM8HHZjy3IONuOvPIzgV71gjsVByXbLH0zyVHHI5z6+/by6sE51ErKN9L21dlfrsr66W63P0qlinPD4ZylKny8rb5nfRQ1vr1+/W+lmattdfMicI0Y24Y8sSe3HBHbrkH611NuJJ3jVlbOCST1AHqffOPr6gc8FBJDmJzwW+YEtuPPQHptx3BPA9a66zuJEwEyzE5IZuWQdT1zj3GD+PA8TGUZWg0lHR2TV09r6pu2rtr0bPscqx7lKCrOKhJqOj1bdt+3lvffR79CsflxM0SuuSQCTnHT2DLnOQfT8xNHdSJCxZ22Dhovc5xyevIOPyAPdY2ikhwxcO3QIcBQB/EcEEDIwMAnBqs6qm9WLkbvToSOCwB5Zc9BzjJzXmQvJuM4t3s7JKXlp10019XrbX7KnOUJL2bbglpFbP4dm36K6bvvZMwb1/NPmEjglcHqoBA6nGRxx0zk8dM4sSCSctkhVYlgQcjByc/TsB34yMk1ragIow4MocFQVCHKkHOMjOQ2ByOcdycVV06KN5t2ThuArDKgZyWY444Ax647YrtVoU+ZxcdNN7dLW03W+263I5VUxVPnptNuL1ejei1s1b5eTOhs32pFnBRmw7g4YDrtxxww6cnv0qSe3MhPlofLLEr2zj727Awcgj8R1xzVi2sS2UiQyqihmIGBg9vfHy9CMYxjmuii08CFcgsQDjJyB074wD6ce3SvLqzgpScZJNXk97623u0r7O2z08z6VSniKLpVYxioRST1SaSSVmuuil5deluU0+3JuUgGQZHGQvC4BBIfjnPp1716xBZTm2jQuyMybGXOGCcZJB4HtzyAenUcHHHDb6lbMxJVHUyLnAyCep74IHGK9Mkv4JDBsVQ7KvmHOcqPQ468jtke/WuCXJPmbbivd0vrJtqyUbaro29XvqbYSEcPScIT99taRb5dOW7l+D1vokzq/As0Gm3YtZtpidgGYj5jzkDr2JycevpnHvGu2VrqWlfaFKNFCULqCMgjjfnn6DjJx2r5Sk1OKzukuGfyo0Knk4HGc7u+c9ieM108/wAQdtg1pFM0qyK27GWUDH3gc9fQYB/AVlRjFU5xnF/HJx2vtFJapXTeqSW/mbV4RmqeIjWVOrS0eq967V7q7d7O6t6Gvqun2jTMtsgB8tQxQAs6jOGdvTHUCsTULKf7IqQht6KSip/F7oMnsOuPwWsS01iRmWVZ3WNh+9RmycE8jByQO46jr16VrrdyvasbdJT5QZy5JYBT05wMDk5Pc1jBVEvdTjZ632jdKz1to1bTRW+44qs3OMOWaVpJ3a1d7Lq7NNdn130OVu9OWeL9/jeV+ZH6q47NjoMHkDnJ715xc6X9g1eGdNyRxyBgUOQQTypOMlenXsccV6JJLIrgu2UZzhs8j+9gnghejdMc9KuS6TbTIlwpDFgN6qd4xnlweNvI4GTjOSSenVCvKELc1kla+127aJddtGkckqEJ2m2ptSaumrx13T2sl0b7X88vVL+XybJCwZmdC7ZwhBwCApGB9e+eaj1a3tDp8jHBVhsAUbSRxg59fX1yOetM1qW0tjCiuJvnRQeAEI9fTbzn1xniuf1G82q0CuHBGSwIIXg/l2G4Dgjt1HVhqk5S5pOLU0lZ7rRJOztqn5vW9/KPq8ITc1OPIop+98V7L3Wr2/4JySyxw38fOzyjkHPVlPBbGM+h/l67D3UuoXsG87juWNGHQ7emMAY64znr+FctOqT3Kk5Vt3XJyNvPTqQR269PQV1mn2Y3wSFyqRENk9GIxnP047//AF/Q5uWPvXvZau+2mqv0303t03vGHx1W9SmqcI0lNapu85NxtZWat10vbZmhq9i0VruZSpC8ckk56YznIPc/mDXncpYSYHBGR7+oOfbndjvxXrmqNHPp7guNxwoYEEjgg8D0HY4xwcivNfIWW4aNdxdM5A+YjGQcnpx344HvnOmGrRi+XmurK6SVlsmnva/z0N8RJ4m0ruDWmkr22fvdFa2lndKyu09ObukLje6sWP3TnnIPf8/UYyOvanFG4kCgHc5wB1J478nvyeMd/Ya16kiuyHgZOAcdgM9MZ/PnnmqcKN54O4h8ja3PBB6gcDb/AD6HIzXtU5pwXLJWtey8rdVf71+e/wAvXw98TGVpOSlGPNZJWaV7vq306FyaHy7XLAJtYA8ck4+ueT0PcZ9MHIWUqwAyTk8HkjIOBnBB4wPTk9etdbqMKLZLI5/fOufTcRgEgAnGOp571x2MMFx8xJGfpn+I5HTjOcds4xVUpc8W22k20tt1bp289/MjMVKhWouDSi4Rs0mrt8tk0la+yX6dNqFmdFAwW55HOB9eoPXPXPbmnCIg5xyTweCeT257dOOe3rUFuSi7V+bIAbsAD356HoR+ftVhZCR225x7gZ7dCR3+vvVcztvo7Lprtb9Netra2senSlTlCm6rfMoxbbflFb9W/KxYkQlVAABHUgcn69z1PT8fWmIpB5IAxnkAc9uOh79/Wpod7sEZhggnrxnHHzYPB49s+5yEmVcHGVUc7ehOPXg/McDkduveoXNezTe2yflrfS6vq1bv0R6EowlFVkmlG2l7J25bPRdL6/jqI4VuoJYjhl655565PfIz+NR+UqYBLMcDPIJOewPbGPQkHH0pqEsc9F6cgj0GT357498DoKcxIJB4bOOfcHg+3pjvx9HGMm32XTololff8H6dSJVKUlztK9rX+ab16tPVPfSzuJE4EhLcrnjnsO3T0447Crh2tIrZ2oefcgADG0dyBwOg6Z9KKABjgDPv3GCM5PGPw989jfi2MD84DLg7MjORnp6+oxwcgdQaipTT7Wdk3p0t8tFbrbotd6wspVIKEmtXpd2bTataT30Wmjt0ublptMKleGycAA5IPUnnIJHHfj2Oap3WwSKmOHYFsjPseevHtn8eauWzQi3dyzBkUhVK4EjcHqeg7HjIPGMZxlGb/SFY9AD8xycZ789foD6/SuHlTm2lL3VazT1atqtW3s+9r/d9DVqxhQpUpKDu4tSvpry25mtN1a9999Vc3Zkih0+ZVJOVO3A4ZTyM9cH1weMenFcFJuDMACV3HvwM4Pp/9f61217N5dsEVvMZ0BZunDeg6Agd/wAMenFFWaYKCcMcNjnHPOep45OAf8KvDU3BT3lzS5td9UlrrfTTRHl57NTlQilaSiovk2d1HZu6d+unm+w+EEKBgnkK3fGfTH6/j0734VZJBtBIzgEnpnHsBjnn0yee9X7OziE9vG7KYnZSTnC/N3ZQD0we4xXQ6lpdva3Mf2dla3KIWeMZBYjLA8AccZ6dDjg8Y1ayi+WUWr8z6q+i3T16va2z7nNSwVR04NTinCUE01aX2WtbaKy1d7pLS2xe0rUP7LgilcgKQQwxgYxyAcnB56H164qOXXWubh2jcAyAhCcfKTkHJyME89ieM8gnOBrNzFDFHHCckAZGMAn0xnGeO5A468ZpfD1tFeXCmbksSVVSM9sZGcADv1z0PcV5lXDQkpVJJ810011WjW610Wt7WWl+3qfXcRUrRwNGcIqnGMpz59LcsU1va9l2/FXPVPC11diJo2kOMPgnja4xnJPUdx079ecxa/f3NppV7ljuuw6hhkZQ8Hb6Fh29vQUzzI9Ot4Y5MIZHDOVYjag4UEYPynkHA5OfTjmvGmpma0jjhyExuI784GR3646jp+vnUMNGpiYvkjyc6bauleLV730a8vR+Z7c8Q6OBm5VZTVOndy5k2uaK0fXl+enz18wjnVROhBbe27IGdvPf6c5zn88Guy0NY7DSZrqQbmnY4X0ycAgY5I9CRj1zjPFWFtLdXsNug3GaQKQD13Hnn1HfjI7+ld74rt5dJ02yt0BV1A+YDAOcdehznpn+mD9JJ0706Kkk6k4ytdXUYtdbWSutFvbv1/PoVGqdXETi+Wjzyi7PXnkm33krO1vWy7eea5MrXhdJCEI52/dB74JP3gRzxwecenPTy7/kUl2ZNqsxGARjGfTnvg44/G1dyF23S7m7YPQnHP8AL/POMmZC08EKZEk7hSg52qxGMjnk46cgAHnk19TgIctNNxuuVtO29ktG99Hr53V0flvEmNvVq8qcY1HeDbd7yt9nVLZ73s22i3rss1p4ZigIBkmdvMwScKcYwfQDPAABwB715KFy2SrDP93Ocd+eoyPYZ9fX07x5clGtdOXaPKt4/MAJGWI5bgZzx056444NeahmYkbsAgKcDGWGcc9up9ep7Zr6jKo8uH1i1Kc22kttElfze+1j+Q/FbHfWc6jTV7UMJSpRXM2rwcdUrXTe+nW71LUYQ7cAgr8oJHz5AByOeeo/yK3LUB2Q4VsYEgI+ZdncnJwfXk9O9YsWMLyqj5QU7ZOdzDPOT3Pvge25ZRlHLplVGCeCwZW5JBPZsHr29uK6cU7Qdt7Nr00X4/PbV7Jfm2AalNNr3bpPo3tZ23aS69VZbnXwxZMbRKhRsPwcE7uCNuPvHuc4HBHXA9H8M3E8DKWLKPMwEHJwcd+MbQB14PzdAMVwemDcqjsGAQj5m5ydoPGGGOOQME+uK9c0OxgeFJZIxHuwSQcMucAllAGSOMjjHvXxWYSVnFq9m0k/lq7vrfdem591hIzS9pSbV4qTik1FL3V0XVdG7p6W6HtHh6/t7iFVnAVgNpK9Sw7H2wSTk9cc167o+jxXDQzxFnjxuZdudo43d8+y8HGc8V4Po1l9luIxErbWIZQ52gqewbOSDnPbB44NfQ/hTUhbiJJJUAWMxOCnBZsYbIJJYHqMdxzxXgTVk+Wy1Sttre+nTra3na2qPosul7WLjUVpQcWvsubdr6Nq6s9e+vqejaXs2x2wURqpA2FAnA7jrksT0Oen0r0W305biJYvOWE8bIywPOPvc4xz82eevvz5sswa4hZM5Kht7LxjuEPIByTgHnnrk5rtLeK5eexdJN5f5sKSMYx8p5JyD9cnGOc1zNW1d30t0vpvfa2n3O1tz08VKzi+VQSSd1s2uW2ndeT0u7ao6waZco3lTTCXy1Hlhn3Fuh+XngcjJAprW80fmKYAm0nBIztOPvgngKe/XHbNdBLp08ItJZJWd5Iw+EyQgwMRuexHOc9u5pl2FNo1szF76X/UKCflRvUgfKRxkYzkcHHNaQezabm0kk2lGztdtPayWu7s7o+ar1Ve7fNF33t7r0fRK2yuvOzOOmuL22t5I3ZllkRlidXBGDjngcgAcY9eSa5mZ1mzNdHzVs42cyE7WjWMZGW6H2555GDxnptRTyGjiuUkEsCK3zDlgeAQBwenXnrya8f+O3izTvCPw01nVZLhbUyWjJE4BDy3BDBY/vAlsnHbr3xXtZfSlWrRpxhedRxV4qNnz2Xd9G9tL67tHwfFGY08Hgq2IqTjTp0Izm5X5YpJRlvdvZbaO/TS5+aH7UfxAk8R+N7rT7e++0Wdgwhwg2rDJGCGjYgn5RwNwxnqAK+arO9dY3VyM9sElQe2O3vj1znHahqOqT61qV1fzktNc3EkskjHO8M5IJzkdOmSe+DycinGThjwCCMEDGMkntj+Edc9x0r9YwmDhhsPSptWcYxUlZX5mk29dttN1+Z/D+d5pWzXNsXj3P3atabhe7Sp6KEIu+ySSvfX5XeibubdnjHIZsHgHnOe2MnnB68Z61OZmfa7NuQqSB16dxgjGPTB69eMViuxyCC3PUZ6Hpg+nQdDjJqZGbHXAHJXsNvXB9D6/ga6Xh1eLTSa6q97O3ey6dtOh5ClyvmlfW6TWqu7JtJq7SVn21eux1FhOigICQwIYHGCQBnn3x6denOK1nmRiuWwOMKejE5AHHX14+vvXFQXEiBcMcY7nB2/XHTsepz9K1hdsQuMEck5+9yOx65x7D2rF4dqbte97vvfTfRr+na2wnPmj8Tbi9bqz3W72a17dt9jeYoAc7RuHyjIOeOp4GDyeO351k3KLnOMAjo/Pzf0Hp3z39HJckty45A/hyVHPAJx+I+nU9VmKyqwyCQAM45OD1bPc5x/njppR5fJ6PXdrTayem99b32fV8snJvmkkuultNVs/XunuYc7HcTnBbI2f3enOOhznBwRgDrxyVals3ZsbTjOAx+9n04Py9sHJI9xzRXTFxs72vfqvJeRXJVlqoyafbRdPPz/AD7Hmj+Fr9XLFCw3BcEEBt2evqB3HHYccZpSaHNCzBon5IUu6naA3dR0xx746DrX6UXnwYtXWXy7RJsM2xQu3K4G85OcDkFT83IIAGePOtX+EaRJhLdH2naCzDYrn/lgh24JHQn2x9PHjxFSuly2u0tUtE7X1V9k7NprXrtfGliqVZqL0cot6NWuknpaya+abt6nwY+nsBJt8wBSCu9SoIwV3L6HsF6tzg9aptCUjI2/MCcZGTnJIPpjnI5x/OvrrU/hskUEgkikSU5CxCMHCjqQwOGHIwcAHnpgZ801nwIlrIghQPj5N5OAwOcAr0B+U5GTg+uQa2o8R4ec1GTWrVrvT7Oje9lsttdm7M9CnhHWipR6pNJtXtZLutNL3b07anhIR0Clgvykg7VUFi3TI6k/Xp+OKViFXiM7+2BnB/2uRgHnkfkK9GufCjQbpAryOGwo2bxgnBKjIy3bPbn6Vky+GJojlgVU8Ej5kBbJzuB6r0IweTwTmvTp5vg571Euj38nrs7Xe/p2Jlga0Pijd7pKS1u1orX2vqu3lq+JCSMVBA2kYYZ6Nn0OP6/hVpCCfmBJChd3OB1AAHHB5+uMjocdEvh6Yhtysqrgqx4D++eScEDIIHX0xSNo10oVREdwb7wAUZ9DnqfU8Hp8vGK6Y5lhnypTjLW29tPda8tNG/T5vP6tWur03Fx1221S1V9U7rW/rsmse23KSg+bpkcbg3p7kce/TOOK2guQin7oILAZ3KT1z0JA79aqNYSwPnkMJMbiflB77/Q/TNWY8rvG795gEj+E+y4xgc/yJ5rpVWNRxcJKSfVNdlvq0vvtv8u3DQcFyN2vK7d7WldbK1unR2N3TpQjqrkFRn05zjYAO/fPJ7nB5z6bo84Lw/MCkihZEwG5OBnGQQTx+HXmvKLZ3VlYq/mZGDkcKcY7cDrg9cfXFdzpEsiynnywGX92DkPk8AtzkD+I/d6ZrzcdT0vto7PrZct1t39fTq/oMHUtyx5rSdlZ/wDbq691o3959ZeBNQhEsajPmpGARkfMc/KAOcgjPzY+UZ45r6m0N0mthIZNp2HYgbcFIxhs4+6fXgHGDzXw74QvJFnjTAjcbI1YvjCN0VXwflYg5bHBwAOlfWvhK9822QStsCMqsVbLOAMjY3dW5yDknB6HBry4TSjy83vayt113eqd+l0tdHqfS4N3um1pJJXekbKC066NvX0sz3nQpFjljZ0y5hD+YzbSd/RR1wGA4IHGMc7sV7l4Z1EiRXwRGQFkAYOysuNoA4wvJ3HJzx6HPz5pNwJZ1kBdmjUxruACKgH8IzyynAXjAyeeter6AXWKKSJ/LdXUgkZDFCcBh33ZyM4HHOa87HU4VItu1mmls9LLo7Xb/O6sj6TCuMbfafu2T1V7q1vut0t11Wv1L4f1CSSaMvhMbVim7SA53KOmQvpxnPuc+66PIJlsppG3REq0mE2F3Q9OGbGcjPqOfevmnw1cu625kkZWIBnyvBzjoP4SBkrjJ+nWvefDs8ImQLI5gChlVjtUbeMtkcAnt6Y5zivi8dBK6i1Fxvs7726dL/O21u/uU2re9Pkurcre/wAL93XRaNW06bO7PpjQL3ZDa5kKIVJXoDhyoVCP7xxg9OnPavVNHETypI84j2hj5RQ42+oOR9R15/HHhGizJLbQbkZoxJtTJ5DE5zx1Axxjpz+Pt+iukUCSyur4jVVQ/wARY49Tkj0/UGvhcfTUU0007tPvZNO9tU+mtrr8Tvw84txp680WveSvZaK0u6d0/nfyPQtJlha4FuJVkBDsB0dW+U468euMnPbNbTuks6MhLGMD2y4yCAB39P6gVx2n3EYuQw+9IflOMBDgDHsx6AdM8V3GmmOa58hQSXIMjdQD1BHXPf16V8/UpO+t1s+mztd2VnorW/NI9CLtFrVNPdRTVtNN1b5btbHeeFXaN13EkFTgM2SpxkjOMgHjcMHgDHSvbNIX7XY5fYcHG4L8wX09vf1I6dM+KadHEJ0ghVgFBMmepPcZx3wOPavafDC7Y/LJLKwyykbck4GDnIJAHTsD1pUI2mqaXNF35m9LdbPW2q3HUn7inZ3iotLls+iundrrrfp6q3WWtrJbWrJCrOZW3BwPl2noCRznK8ZHJ712OkW8lygt/nhn+U+YoxwD908/xEjv14rLsXyqpyyrnK5zgMRyPcY6DPGcZ613mjWkcZF0XBJUnyj94nGBgDqRx6V7OBwXPKNtKUF70E9JPR3VnumrW3Vzgr4r3W+Vc907q7b2XvPlt+X6nSxyGFYYyF3Im12YcuezDnrnrzx09K6TT45Z4uCG6HGMj04XI5Hfr0+tcfCHMcnnZU7j5RPUZIwSe3vnnpz3rpdNvvsaLn98VA4Ujv03dcc9eD719PRanJOrzRhFJPVqyi0tVd+Xl3PMnBqPJBvmumrPo7W621v1fXZ6M7HS7fyBL5qh9uW5AI56gHnIHHUd8e1TxRNJL5pZoE37UIHCgdN3XnGfTqOg4qpY6lHckjBVj1A5HX14+o7H2rpLOKOQAsMpgjaTgEjJJHU5PGOPm/CvSp8tTljTk+RWaSVm9Va/W9tHr03uR78G+dRUrJu6u3te13Zvy8m+19OO3UIjcSEKDuUDqBnJ5Ax65x6extQRhihQAE4yVHpjIIIJzz19fpUUGRtRSCpGFDZ4OeARg4J5479a2rO0YYYg7zgk45I4wQM9eeDnHPAPWvUoQc5QjGnNtNJ201Vku61dn+pi58i1k2mrrtbS6aejS6P0NiziYRoQpBXk9OenQAdenQj8O+zbXLW8g3H5eB1yoU9c+/6DjrxVW1hmUAeWwUHPBwSuepPPp3H8q2Y9Pjk2yHBU4yjHjcQc/UDqc4/nX1FLD1ZKm46OnGLd420STeuz6fffe5wSqQbd3F3atZu9movz7vX9XZ9ZpZiuUxkcgr83IAODuB/TgjHGeOK6aLT4GRQpBDDhiQSWP3u3TgceneuAt43smwTmElfmV+RnqOmAARx17jANdXBrEbIEVtpBVduOnbcBn5j6kbeoAxxX2OXY+mqcaVZQjOHKr3TutNErW9G3r0seVXo1JSc6TaXlstu27s9Nn8t26jZxpvBjV2HyYHdeRkDqpHGCT39ck8FPa7neFX5ORuXqvqRnvnHGevX0rt7i6xLlhvLg7lyAQP73VhznB5/HvXPXEIlkdowUJBYAcMp5yMf54xxivMzmlSxMuejFKVmpKOjleztJ6b7pJ6XPRy6NWFlKTS5VJa6X91dVdJX7ficRdaZHGSSwdz8uVHUkHkg9Cfp7VyF3arFNuAYNnjjGMcjAx2/+v2wfQbtJYZCWCsTxlh2PpnBGOecY5POKoSafDcr5jRjLKTg8HHfb/dHfp6da+IrZfTqucVFxqQafI9201pfW/dK21t9T7LB4mdFR9paVOSUbJ+Su/JdXfVWvZdMKxdJBGvWQckYwef8AEcD06e1Sajpi3KEKgDbcHA6k56ex79ecnrmrUNisEm5Efa2Ru9Bk4XOMAfnj2GRXRJboFB2nH8W4g4Ixk56AfTjOeM1FLBStyONtU7tK9/d18v8Agpbs7KldQlGcJOXMr7q11Z2tfbo79DxTUPDpxLIqAyJkMG4GOMlvX2PpxzXMJpsgSZUjCBwVI24B6d/7393oTk8E17nqtskJlZU3oRgqvzEn0AA788A9VrzW+CW0wkiLPFMB5sbrhojyGXJ5z6HHOT0xXPistUIttvvZa2+F+9stenRad0duGxM6sZPlS00dlulGy+56/h1Z89+N/DUhV9sSzSI3mbjgq0WDu3DB+YA85PPfGK/ns/4Kqfs022oaIvjOx0tme4Ky3LKhAi4OVyF+UjJ2rg5JIyK/pf1O3N5Nd2wZ1QYkhIGdobn7xx8rnhjjggcdK+Q/2iPAFh8QfBmvaBqcMMtyunSrFHMAVaUI3kGE7SPMQ7txHTPTjFeO6PsKkKqTvFxdnqrpxbvbVX+S2PpcJjJqMaNRJxmoqfVrm5UrW0spXu+9tNj/ADkfir4Ul8Oa1dWhV44JMrCsq7TGjbsLJnlXbHTJxgZPU18zahZ/Z5pldQDMG2KigRyA9HUZO0YByo6kEc1+xP7bvwT1PwX4w1+K+sTDJZ3kot3I3ExFmKsJdoDbuNuEwMFeK/J/xBp8hP72J/NhaVWRPvRcjA7bQOxyep4PSvs8rxqXKnK3NZaNO9uXVPya19er1PhuJ8lTnUqxg5QcZSptppN2jzJpWUtdU03brZK55RdIqFmO9cqCqEcH0O739/pzUNozJOjAHcW3KAO3f8QAQeMmtK+jbaflJAOCp/gOfuk45UZ5xkmshlaJiFLAowDbTlR0IwMfKWz0zyB2wM/U8yklrrJP8k9+u+/y7H5BUpOhWs4rSSe6VkmtNE0td7/ftb3bwXfiK6SRnVY/lkEbt0C9W29x6LgFT3Pf658JahFNFCzBj5iApubI3fwsR0AXnb16nPWvgjwvqDwTWyMXZmkA4bc20YJIGBwM8k47YHHP1n4J1NpVtxujChhiOQ/MgPUFQMMD0J9vpX53xDg2pyny9U00knbRLpq9N93t5n3/AA/ilWpxjdqSSjbSy+H3mtW/+H111+4PBWpBLYQhTPcYZBERiIjsysSQuOuSDjivSEiiltCiHe2XAVTkAttwykn7uc9yOMHivB/Bt4Q4H8TITv8AuqobHy8fdDdhk8j1ya9y0lQ8G5i6pDjLI2AGkziNsZ+XI4P51+aVf3dZ7XdrbNNuzS3vZ+unfSx9PXoScedSbTauulvdWnVpadtO+pQNu2G3AGRQGVnbBeM/dUrgnkAgc5H61zWoLGglOzch5ZeSQyjspyOpP3TyMnjkDr7neHlbywXVhvdjwFb7yrxw2ACO+QVx6Y9xaSSRsVVZXclo2CgIQOWznv2YZOTjp0ruo1VFJt2ezSem6V03116aLdHk1aEnCUeVerSlta6V73s0r207XOL27c5DBGDEx7Ruy+Nu1QcjpgDn0z64r27QsN4HVirgYIU/xH1YdCcZ9cd+tkhjJxsbey+ZgtjBTduB4PQngf8A1ycKULMisJF3l2jKAbmAUgYAyBkDOQCR6EkYruhWt8Mn095q7d7bW83rr+h8/icLpeau1KK021UdXpraztbVX7uy5y7QwuWd2AulBj5LR8HJJXGQM8gd/wC9wRVUuixYd8puJA5wzfwhexxz69RzV+9jaVgRki2QgIFziMEY47lRuyffjnpgzlyrBFZiqsVVzt8tSeWxg46AgntngYr0YTvCzd72Vr90n08no3fzd9TzKtCMZOM4t31jrdXVtOjXTS17fchGaCNmEzoxkDgsN2ASSAQT3z1/Mc86xlVYSZJQzeWWWQHdndjafwOcD+hrnt2QARIZpWVV3NwSeoUEH5McA/U59HJcrKpB+8pMaR9RtT73TqMke345qnGKtdWvZ7u23na+z+/tvjGjG13q722vbrdLTz2sr9NDZM8ZC4lWS5AGYx932LdBkDr9QTzVGW7ikEiu0LTjOwDOUMY+7kA4B3ZzjjHPSskXRS4lG1fm/hIwQ7jAZCDkqduWx0wN2aqh5k+0ALkMShZgOS3VRk8Fuu7JzjPGM0KK5L2erbT0022dutt9PnY9GhTUXHXW6kuq0s0tdPW+5z098ILsiTeyqxCMBvYbOeACMKM8Y6YGM1rxzR3EiS4kRN6vgrtjIPB+XJywz6+4zXMa8HjlUxpiRpAqtHkctjO8HgcdTzu9utammXjgiKYEqoxggZXAAyeOpPf2BzmpjBuPPJxja8bN7aKzsr7u/TRre90fSUHKSjN6WjZNa6aaLb8d7PXU6V5DapcMFPltICquMISM5c46k5BxgHpVFY/tjMXTCybuQNo7bs9RtAxnt6Gq+o3m+3OwEQxLljn7zkckD2wuDnuPYVi6Tq4WdIyxdOeGySh+bIP1PcjPpjpWapOSSaVr3v2uo6N7drXd+7PWoOUkuVJvq30dr6dV6dVqrm5Klrp9vdSIFRYoS5RRguR99iRnJyR/iM1zdvdW99a3KMQzSI5hOcBH7b+cEnPP/wBeqPi7VniSbygCHjZNo+4qydcZP3zjqQQMZAwQa8+0fWkik8lg53goFfkoWPO0ejZJz78citIUmp3Ste3ZpK6tsrpq13vr0PqcuqSjGMZS5XJqyT91pJOWl9NfXz3sZ3iqIPY3CFfMeIsSQeck849h+JP414VIjhnC/KgbDYJ3DBwQSevt2Jr6N1iyeeGZYgGWRNxCggDdkk56n39M+/PhV/YvbTTb1wAeh7HJxkdOMcZ45OBzXpUo3g9t7Prflad0rLo2tlqfdYOu5xptyb5EuXWz1ts1fZ7q60VvT0b4Z6lHZXDIXKp8wyOMKTghwTyTjB5wMc9sfQOo+JLV9N8iN0QgENtAILDqxYnndnoOBjvkZ+NtK1BrJ3be6g55UcAfxDIxyeM8fqa1z4nvJAY1ndI3OPLXOM85Kgk+oPft05rjqZfOdaTTcU3Fuz3+G6a2va/q97H2mAzCjyU1UsuWLgtrJuybkr3v2Xffz7q9lgutXkkRgys43kEEdwVJGMgnr044xXPa1bsk5VEAj5KkdBxnGOT64z06jPavo2oRyXsf3t2SGBPDHjknOS3OTn065xW3riK8ayxuRHIArN33Drxzxj34OOvNbRwzoSShzO0UuZq6vondJpa9NF3vrc+zwWNhVo0rSbjblWumnKk423aXfRteRwsoCEFuSSRnnHoMdPxxz2NXLICQBFLGTJIOcgYHJORkDHTrVO8ALgIAQMA7uME5yxGckngk8n164pdPkeObcMHBwV6gk4Gc5/LnOc564rrXw6qzW+lrrt330dns9Wz18JX5cVGEklHZtppu7iur1+/pZefo+mxyxxQyIjbX+RmfnA5PGO3YDt61oXFnJMhkWEgbiVcjAKqMSBTzhuh4HPbpVbT43ktodu5toLuobCopxnI4Gfft1GRjPSRvthjhBAV/lBJ3HLfe3HG4sfU9R3ryK8pXbbi0m+a1k1e3e13p21s/n+i4OcJ0Ywm1LZqUmrcqSsrJ3ejt/wAGx56y+XOOu0ORhgQR9BzkjnPHToABXT2d28apsG7C7eeCM9uScj2IOOhz0GZrdqLSZHUFFYgqMZDDoDg/MAcjHI79OBV+xKS+UFKrkKxBx5jc9cc8epzkcD6+bXqc1NVJarZO2iUbNtp3V27Nef4+jlcqscRVjBt3cfZqbdre7blvZK23TrrvfudHcuhUuGaQNlAM7RkZbnGDzx3PPFbE8cSRbw67ypGDwS3cHpg+2DxjpXN2r+Q+0Ao5HDdFZSCBkZ4zg8+nHvU9zduybSqkqykMMk5AP646kZzgYyRx4vI3NzUpK70jok1o207/AI2eu17afpWDxcKdGnCtUSqx01d29rO19u7109DndTX5mbJILEAE8DkjGOeBkHqOap2N49sjoFyHLLgryGyMEHGQfTkfyrR1MARx7MvuBLEngE/wjjOQeuf1JrBiR3kWNeCWyBydp6An6e3NdcL+zu1zdru+1n06K2l99txzr1aGI9rGblOolGnGyaSfL0u0tGn5fid9o2qTREh2BjOc8DIzjr3PPXqPXmult9SlbdEuGjfIBK8oW79Rg/y/SvPEt5oIAcgO+QSeFx3Htn8fYcVsaZKfJUliHL84OSMHp+OCOO2PQE+bXpQf7xLVvVbJ7a976NaPXWy7+7hcxxEPZ0a8JuVS+qaaSfI02t09Xa3ma0qzRzxktkSuNrHHIJ9scfiPz5r0C3SOE6fIySbJQUZ3OFLfLwPbnnj1Ga5C2sbnUZ7aGIAkuu0gYAUHoeRzyOef1r0nxPpN7pujWbJhLiARMAU4kUZ3lufQj5h144A4rzazpycowpNPkjzTtdQd0m+99Ena+3mejCvCi404tr211GU2lZvlT67bu3V/e+C8URTRZkjIeNSSQGyPnAJxxhiAOuQenFc5pFw7zbCSUBy46HII4PByOT+R4rsdTuLS70xIm4mCrhM/OW9B689RgduOuOJgWS1mIK/K24cAbgRjB4JIGPvHPOOvGa6Ixg6cGuVycY2aj1SWtrvW+qbd2cVXnWKU5TlKk42bT3fuqKfyvt6HbwTQCRUEhVjjeC2enRl6cHoR24PrjsY9YtY9Ne3gcI8kTRuSQCwPop6Dtx/SvHhdFLtAwbftypH3TnPyk5PHT0571ce8JmWMq/K4BXojH1OOQMA4PBOeRWNSi5OF72irtpryTbWt1ZadF0Rp9apSlGU7qELKMddJK26a2/Kz1Z2lsks5MCkOP3knK52rgeoAPXOR6cgVo21td21rcyqxdY+SmSAxIJA54AA/n785OlXi28kIfLLsyWIXIz1XPv6H169K09Y1+GO2aGAKnmjYzuCBggjn1JGQB3OfSuB0qrlGMIppzTvbVpW1afR6d+vkctbEyppxptOMvek00mrW01tvrva/U8i17UJTduglPlK5wvdTkZJJPJB746/rVsbxHl2lyq7TuRvmL+hU54BwR374565usPEb+baxkibb8xJxkkkkD0OMEcdeucZzxcLCyuMqSwRX/hUZHUcYP9fWvrKOEi6MFypScItNJbtJ6pryeyv17Hz/APaVSGKVSU26XNF25muW9kkm9NHe6s0vz6GSKOW8jKgoFYZJyQc54HGDnHtjrg5r0B7eMadAyrtyFQgHaWkwcg9Qc/Lnkce544OGVZUgIxK3ytvyBuJ52+g6jB6ADnJNdbZySXcSI5dCrfInOFOBkA9CePx+m2sK1OpayaSUfXrvq099ddrn02Aq0b10+VOooSjfXpH4W9OZ9bPW1/SOfbDbtHJIUyMNGxG7BGeBj5geMHjq2DjisjQ1EVxeSlVcHOCeeOckZ56/n7c1Y1cSwko4JcdCepUn5S35cCtzSrCOLS2upBltpJJGCy4GCM9e+B2/CuV2hTd5Nyk7tre0Wm7Ps0ktXqtDsptSqXUHGFJ2kmneSaV3ro7ry/yPMdZVpL+Urwu7PfAJycYzyeD059jUNnCrzxhmK4IwT3IGc4wMAnOR2PrV/UIgbiZ8MCztt3HAAzwCMfePf0FYlvc7b2INgKJFBwc5G4g5xjPHHXpnrg17FCV6KUb+7TT3Wza63evS9/lqeLVq0qeKlKal+9qWio/Cvh5Vs0n3tZ9u5sauAiKTuADbR1OenIOQMenBHv0xy8rrkbeT059uPrx0xn+tejeJrOL7FaXKo2xwhTaNvGOjAZ554P1wDmvN5lAkwoI2tk/KeAehx9SePw7114WanDW6abTW9rJP8b21a6dtfPzOFak4qXK4y5LO95pPls1ftZ6d9e5es35wSec5UjqOwJIPpnnjnGR1q2m1n5wDuwB1OMjAHXnGcnnrj1FU7d1yA3AxhSe5A9Ccc8jAOMmti0RJWwOWB/MDJY4PbgYPIPPAwc6N91K+jtpZtW23N8JCNSFGLqqTjLmd7Xt7t09dLdH3ZM0flqrDA3DK4JOF5O0+h59z7cc1ZGcsOBjOMgnqexzjP4DnGT7WriXeAq4Uj5TjgH6+mD1B/LHWkEPUEbh1Pp+nXHY8nOR60RUl7zer6X1v067/ANLrb0a9R80KdBuUdFKKs43Siuuuln1asSowJ2EZ78eo785ODnoccemBUcjEnJblRz6gdOmOv48cdKRsoxwCrZ4Y8HHcjnA/M9TyBTCwbPoCST3P1PTjPYcZPXFdcbWut7aX06K97rW/fe/QxnUcounNcknpqrdunl623tfoqEhS3XjGT0GcnnP97nA5zznnkIshDbtuCOAR0yD9PwORjjr2p4ZWQKpHzYJx/Ee2Sewzxz+fSiJDuDZG0nBB6/5JHH5ZxxWVRJ6Nfjurqz/DT9LkxVS1FUp8yi03bZp25rW/4F7vobBuN1vEig5x85/hwcfNjtzjOSORge9VWPXaxXIA6ZAPr2+voBnHaoHnKq6kjGQDwM4/TqMY46dPWrtg0b3MEeSyOwyOijOBluTkcY9Tj61zOEk0rJ7/AC202ts7fKzdz2liFWcaVSajLlildR0d0tE99V/nbUrSXBVXjGcEEDOcgk9+3GOPX8KpWqkTbznJ+YA89MDjPVv6cdeK39Ztlt7hY1ARWVXUrwDnkYHbkdP68VkMfKA+U7+hJ6bckcjH+eeMnhX+y1aSbv3aVmnppre+3bsclSEoV1LEVE3QcVCVrRcZW5dNNbaPR+fQtPNIhLIGLDoCOmMcjoQMjnqf1q/HqNw1s8cm5gQSBjJBGcemepH/AOqsn7QJAU2gE/NnoVIzjkdhj2Aycd6mUOYchiG7OOAQOTyc+3UDt15rGUU7vljdXTbs9Ha6Wi0dvvT3Z0QrSg6k41XUhUilaKSV9LJbJ9k1bW/axmTSSyElwRyQpPLLk4555PoOPyOT3PgaOP8AtCJ5wNgyGYnJCkfMeMcAgbhknAz3Irzu5kkDEEgfMcjgbSPT1LH6+ma7jwf5sl3bMCdqh96qQGYgDg5H3TknOe3Q8Gsq1K1GTuoqzs9mkrPrs7O1uj19OTKsTCeYwi1JyTUZc1tVZXUkut3p2t8n3Hix1a6hS0RmRADnJAcjso5wo9c8E8YzXAa9LdeQu+NlLDbhlyBjockYB5GDz+ODXqVvptxqupRrschXVI8DKAZ53ZIwwHH45FdF4r8K29vo9zNdW6qUXCDI8wsPQ4zznpgEc56ivAeOoYSrRozs5Tlrsmm2tN7NPq0tLelvdzF80akKEmlUgoytrFWsuibXra/36eD+C1WPVYJJEDOHyiMMg4IwD2GOuc9ScdK7j4hK0sO9Y9o2KRgkgEg9OBjp1wOntXLaPLFbXkR8sJ5Upy/IwVbAXPqenT2IJPHZ+IJRqGi3MwILqvIHJG1SVxgjAx39hxXVUVSWPw9WEfc927bVkrxe707t9fmzxVCNLATpO03GnOM3vfVOzute3S2tkmfO944clASvl9cZ4bPX1ye+P8KZpcKPqMTuwLqRIwYcbF6HJByB6frgYqnMWaeVGyqhmySACSCfvfhx+I5GObumxh7wRuzkyAhDH/cA5GR1Hc9MckV+iYZxhRhFNfDzO2mrUden4PW2/U/AM/xXtqlSWsJUpuKg78q5dmkt7avpfTUxfEOlajqc15q0ah7eGRkkkIJDBePlHXHTd6E9K4Mk4aM8HcFBUc7hgBsemc5HsAeK+x/DyeHT4aPh2+SN7nUo5bm2lY/OLluiOSM/KeVBJJyV7A184eJ/CV3o97eGGJ3jV3c7V+ULnJ8s4Ixz2x9fT1MqzKnKVShJcnI+WnKScef4Umm7pp7aXXqfyz4gZbVq16mYUU6tSVSTqpPmcY6W5bapWVt2tOxxkaIZAdwO0ZbGS25eMjrwvJ/ma6O2nYCJI9oUcOcYLsAQMkjgA9s+31522GwklDtyygEd2H3WweAe5GR6cDnpdNjDpk7EKDdgEDBByAp7nI5wATxnFejipR5G1drRK+t7JJ3+G+2jWn3H51l3O5rlXK5OzuveS93o9Fu9X3skju9A3gxoCMljvwOmOjbTg7l559/xr1jSWjTYXuDlxhVYZQtxwp/vHnjAx+ZryLRt29CASOQZPubc8bcdzngHufrx6lo8knnRRTRblkKlMn5WUfQfKcYOe/OQDXxePg5Tk009nuk2lbbd7ei066H32DcvZwj7zXLZyvtpHe2m+vk9D2XR1/498RtKGA2Mx3le4KnHC+3b9K9b0S1lXaY4z8wDkZDDA789MHp0IOK8i0WHbtjDEqoZQuciMcfKMAbc/jXs/hyNniiiVnG0Ak/88+mV7cH169ceh8SatJtvVN+70t3bX5Xv1PosHeTTcYuySve7Tsu777K1l16nr2lWzpaRySMJOjHoSp/hBHfGOc+/1r0Dwvf21vPiVVfaciNxkox7gHoBjhQTjvknNeT2UlxC4iDr+7ILxZyTH1LAHgnryBwT16V6Bo6W01utzGsjTM+0ruw6t/ex6HjnuPoazSbd91e+6fKrrW3dJ309F3OqvJqm1OL5XbRSTSfu2el9kt9W0td0fQVlcadc6fO0rIJSjCNCAxR1GFwAeO/Tjg8cADkCIY3nW4+ad43eJxngD7rKB09wSecZ44rlEvbiFHFtv+0sp25JHOdpO08ZAJ6HkHttOd3T9P1C6szKJs3atvIfkGIjMu/n5c5GOe2e9dMaN+W7TTtZLeS93V/f5d073Z8ljIex9o+ZyjJ80btu3w3d9FZp2fW1t9zkdWiv9Z1Cww3mrEojlCnDlEPznHG4AYIzjBwOSa/Mn9tv4g2mr64vgTS7ndZ6asZu44s4NxHu3JIAwAw2OOQc9TxX6WeNtdtfAfhTxD4kusRrp9lMEkdhtaVkYFoyM7juA5HQdeDx+EniLVL/AMaeI9a8SX4Z31C+nljZs4aIudvXnp1zj06/KPtchwkOaOIcbRopPom5Xi0o79HtstV1bX84eLfEEqWF/sujUaq4pqU7PVUVaM4tR1SeiV79TytMrGYlG1V6nGBnk4BPT1A64zj1p0TsuQzZDLjkZUE9eRn0x9MDHFbGqW0Ucj+WvyjkKOoPIJK44/DOc8cisqOJT35GeGJ6AjJ6cA9u4I+avu6U+eCnu5K1n1Vt732t6d72TR/NdlFKDstW+XVJa2tb06263vbdyKSQSoPzYLDpt7AHjvjtx2ANS7VO9Sp6huDwBxyQP6fn1puzqEz0H5cZIz0znj8KHOwBuQxHr83GevqMEjHTPB6V0x1/TW9+2tvz+/qZOVvyu7/qtlprfa9n0HFkUbecYAyAecc/5z/SpopVzhT3JXd19Mn9f/rDpRyxwSwGccHP05HQ9CTyP500OEbrjBIHfI/mR78bsHtVqL1XS26vrtfW2t1fTa/QLvTW17XT0v277Pp9+m+0rgMCPlbBHzA85PXknr78jnrV23lYO6ktgjp/CAAOfQdv04rnvP5xuDDGPQg+3PbtV22nYMo5Axjk4PGc9eOc/oKr2bd7auy3S2Vnvul81rbtrrFJt3V2rOz9Vft5WWi27HWW7x7QG6k4PPB9DjHGMdPcUVn21yjHnnAzjPU4x2GccHPQgYx1orJwabtzL/De39fqbJx/lfTdu+yv+N+5+tQt4skMLaQBdpZsAx59fViccdf5VxWvaO07SqI1ZGUsFCYCscZKnP0wQO/XkV49Y/GVTO6PIDnd8u4YwvAKd2DE5B424Ocg4rpW+KNndWuwXqq7R+X0BkiPGfnB6npjIJxzjHP5XUxFVWkqVndNPW6V1qnFPXXtrufG4eFanNOSne3uPTkt7q1fzWmzfTtzetaTLHCQibGRSsqt8zKB0UjaMDGcMCfxJrxvWtCLSO32eOSJySquOScjJzwU/wBk4PfgEDHrGseN4JSyoI3kcfvBIQBsXJ/Et6ev0NcpL4gsbuOR5I1EkoyyMAzBjkYHTgD8wc8isHWr2Uoweslqr8yd1forJ97W1ep9fleJVJxvK7UeW9+ZNtrSKbtpZX0Wmx5FNoaeWiLGFKPjCkFjnogbsPfB61RHhuNpBi3RY925i3QL1ySR8pPBIHt1J59OuYdObdInlk4DKoYcls8NxwoPT0549ahtERgysG+T94GOVTOCMrjkYxz6jpxWyr1V8M5pyV/eTTW2j3v6vXbfRn1VOvSkk5Qi1FJ6Pmbvy6qybT2vtv8Af57J4ZikiZUVUHB3hdrYJ6gc7t2ACOBjr0rIuvDUYCkWyo8ZO5gcuAuD8qgAAnPBzg9ugr1l41kiKEpGMbd0Yycjq2epz3J9KzZ4UAG1R5oQAgcnAztyPQgn3HTHataNfERk1zy37u17LZt9dF2FVlRmktk1ZRe/2Vq2m9NWle7t8jwzxB4fhih8xcFWAO7GGdiCfnX+9kD+LgdDjr5gY9krD5gACoOedw4I/LoM98cV9La5pvmQttj3sQWEZ5AXGSABjkknp+NeAatbCG7kITy0PzRowPIOc7hzjHBX15xgV9xw1jJzjOlUk5NXau9l7ui21t6b+l/HrKKqWjJJe7Z2Wt0nfzSs+l/v0oxsPMQg4JAGcfebHRvXnOfTjGQ1dTpUhjmUyNwCWZc4O7spPPHqpz1HPNcnGEBJPTqGHp6A8/ieCK3rBwrIQRtYHrn5j3APIJPrjHGTX0+JtKnzProlbo7XXW1rLZrTTzOzDSXPG+rVldu1rW6el7Lsu57j4Xu8XUUgfaiMCxZc7gerIQQMDC9s9sd6+tPBd2rwxl3JG3erqAULIR85OeSNwJXHHrXxXoc5RoMENlxuRWBDKvXJPPTHPXPbmvpzwPqZWO3eORQp+Uxk/OgOPkXj5ScZJxk+lfL1VKnNtXSu9NLp2SS323312+X12BlG0U93uua3bV23euuyfzPrTSbqGJoWEqKxjUOzNu3Nx8g4HJHQY4x1xmvafD15BLGqYODGXR8Zyx+6fYDnAB44ya+XdKvgWhiaORWOCFLbsupwsi4xyQc5647HNe2+G9QOCjuqgDCFGGQ4HIHoOOeuemOOeOtLmhvZO7263ikrabW720+Z72HqJuPu8ra6uytp089m21fS59QeE70W8cUc1xE3ktvJYBWdW9ASdzYHAJA9PWvdNH1W3HmBSGVkVlLHBOR0KjtxwM5A3c8Yr5G8O6mblkfOEYsqyhsncmOO2TnGBxj5uK900a/SXyt5Tc4UsCMMoj6MOTjOec57cdK+cxdHWTe976XtZ2tfbe+j/wCGPboyS5G1Kq9nJO/LrHRW2Xlvvq9T698JXX2mzWIsVOC4OOAR0XHy4AwRzzj1r2TR7qUWcCMAAhA3ZyzZ/iz9FwB0HQDJr588CXkZtnmkcEm3YRoT8rDg7sei4569favcfD0pnttgOCo3b8cFFJAUZP8AF2wCeOnzV8fj4Wcm422autXsnZu1tO3S3U9Oi3eU04xlKUeVPdpuOjv2+XXsj17SJoQ0UoO9mw2CuMnplf8AZGPoM8cZrrbG7gE0QRit3I7kFOFCZBQE85I54PT9R5np12+6EZYRqmzyhldoHXAxyT3Bwe2eK7zScrdOFJESEKj4yN7c4Ix91jxk9xn6/L1aSjPe6T0Vujtsr3S8/K9tD0oXu+ayk9bJq+vLaz0stO2i03PbtBMMiQsm9ZGG6R34YkdSBjlRyc55z6c163pACC3nQMUYkSFjja3T5R3Jwc56Y55xXjughvIicAblBJPOBnG1QvHHUHnqfavWNJfEMMeSFZgzBRwHyOgJxg8984rFwUJWTu73aWt27K7fR+XTe+hlUcr6p2f8sm5XVumnfo+nzO0haSO4ieKZlRwNqKDjJI6demMkdB69K72DUZLeKFfNJm2bk3dGIxyRnHGcc8H+fA2tzGfLVwrqpyncqwP3h0wD9fy5rWmZ7pkK8Mm3CKdu4DoD1xgdccYB4711YWbpczhKTlooxe0XdXfMtH5JfjsR7O7XPdRfS6Te1rrey2d2+9j0hNRa4t0mBOSoSYjqD1JxkDb056HAxwCKltLqVGSJWyXPyMGJ3Zxyx7Y7YHXj6cTaXske6IkKFjIKk5OMcqOmRjqRzj61v+HrpJ53ic4WNnK7hg7hjuegPBwPTt1r2aVaVRx5ruSUVJ7aJpN313f4+T0y5HTUvcgle8VFyfMuju323328j0vSrqZHaNiVde4GDyD0GTwBnBwPU8V3dheyuMSAiIHhgO4xySOoJ79eg9K80jlAkjkjOGyAzKDjnPXrkjBx25zxXoOiqtxGdhJYqN4HTr1B9+cdRXt4J1PaKMJXTa00129Nutkn1tqcdbk5OeUVFaX01i9E79tOi667s7SynieaFThlb0HcdycZxyeo9a7q1McUkXIbcpOchsjjJPvz3zn6CuG02xZCmBlx8wzyCT0wQM498g12tpBKjpIVK7NuScEHGOB7H6dhntX3GWqoo3lTad7Xtppazd2r2+7y3PGxM4NxipNpLTu7pJPe2urtpr62Ous1EpwqnngADAYZHU8HaQe2QOBn03IoUDAMCM8gA8gn+HpwByPb3ya5221BY1UMMHOWGDggEjGRggA9TjGNvetlLxWKPvAJG7b6D0GeoyeRxnJ69K+sw9WjKNnOPPaN0klb4b8zttsrXei+S8epGaatdK9uzTTVt799+9lfqXri0R4nbkdAFJz937pBx0PfqT9Ac8xe3E1hINowMHJz69cHp7EY9DnjFdLJeQiHgqzEAcHKj1AHH4fjg5xXJa7IkyMqnJKjzCOSOp4A5yM9c98DjJrHG0qKp+0hNxmk3o9Luz7r3b+T2fz7svjOdVQnH3XLl185R1fqvyvrsaFnrkd0FzKiMoIKMM5wR8wPcnHPAC8844rQe/hyGDqzZIfGCpXjnJxyfTPYEnFeHXWotau6JIyITgOQByDluc4wMDAxx39a1dI18XmN0pAy23AJGcHoQQcHHP5Z7V82s8anGjUSc1aKknpK3KrtKN21+t9rH18ckcoe0gvdaTdorb3W7NX1e2j0Wvc9LnQXp3KATnA288KeuMjjnrzjJGO1V1t3R1DAbcrtJGSVIJIC44A9+x4FWdODXMEbKAAULYBxuJPG088cdOvfpxV+VWRDtQHaB1HKt14btjHvk/kfQdqsfbNPncVJuKSUkuVrz/DV6eRyqXsn7JNJJ2s09LWV79PNXXZ7MqtaIi52ruIBwDxg8kDI4PHY8+9UBMjTfdKhSQUxwTx1yep/HbjnPJq7HNJISpYKBnIIJKnjPPTGMEkjp7nmnPC8QaVFzzk8ZAzn5x1wRjrz9OK5pV4ySnShonao0tL6b3dtrX6rZbm0E1eMpJN25dGr7WWt12/yI7u1S4Q4UkhTwTyCvIY9SSRnjp1OcgY8x1a2ijucugI3544BPOAw/u8Y7+3UV6BdX0sCk5VgR8y85GQSfqSf1znvXB6s/mzCRS37wE7cE7FJ6Z4HcHofXkmubF4qlOm400+dJKTfS6Wy6K6tZXV+1rHpYCnWjOTcn7Nwaja+6aa3sr262tucHrlpCqS3MSbGY7WXdwAwxtBxwo68Zzkj0z4l4o0trq0ufOtRJC4ZVnDFZg+Dtw+GGwknkjB7Y7++30BdJndmMI4K4zgDqy88EED3rn73RlGnsnlGeGcgYVckq3c/3CB6H/Cvn6sVUl8Ekt2rdLLa9utmrfOy2+hoT9mlGTTl7sbrS1lF97XXfZpq6P5j/wDgpl+zjP4y0HUfGmhaaHmtIjDqs8CDJlTduYqB1YDDnt153V/JZ8Q/CtxpOoXavbPAn2mWCdGzthkDEITlRhXIJVicjHQ1/pBfGL4U6brmi+JNDurQtZ39lKphZN0dy8qtsdxjiQEZJHPv1Ffxaft6fs6az8L/ABz4jMmlXEGm3N1cTWsqoWtSjMTbbjtUZbc3P8JHQ0UPbQqKUbJRalZu0mtLrz0s1pe+3Ze240Mdgp03rUhFNLq5Oy5ratK6tJp6X18vw21WxliecOjkJIWU9FZB/EG/iXn5VI9+cccxcKjO5QFQVXcDjb5ozyx6bj9B06dc+0eL9IksZXYq8T4ZWQjdtPbzBxkYIOeg/GvH54PLEuFQ4fBKcgAknJ4yB0PHp1r7vAYhVaacnZqMY76q/K7u92tl+WjPwXiTL3hcRO0X70m3paW6euvd9LL79a+nTm1uYnLDaXG5lOTtJOVDdMAHAGOemfT6Q8A6shkicSMEYYHmAhyx4Xgnsck55IwcZFfMpVQxY5Az0xyoOOnvnofb349N8H6mYpIX7RurozAE+UDhgy8Zzn5eevfvXNnOF9th2rNNac1ls7arfbf9NWefkGL+r4pKUrc0krNaNXj266f5M/RzwLrCsYwroC0arK5AYl16sCWyTk8jORj1JNfTPhu5tZZkUyMYbiJXQZ2iViDtyMYGTkgc46V8I+DdRP2YRxAZRhJlT+8iVsHrx8hAPrj9T9VeEdcxNaSM29kSOMlzuK7sqyRpjBwAvzbs9sE8D8WzTDOnWnKLk7NbKzVuV7rdXX37dT9hoOFakl8SlBa2dr2Wkbre7drJtrtsetavEm92w3kybBIQOoGeSwwV6jadpBOfpWC7rEAoYLEu5ZCQRsjPAXPO53x04Axwe9dnqCQzWqSIdsU0EaIG4Zs5JkUDPyKTyTjAJJHSuT1GCNFZECMhVQCBuMgOcSnbgZwpKnnq2eRmuTD1k4Wd7cy/KKfkt/lo7WOWrheS75ZNNa30Svbv1er01fk9+Wu1CyKqtGFl3BGfgKhHzBSQTljtwfXvXISrLAV3RpHG8ki+YjbSvzD950JK8Dt7+9drP+++9hZUCxiPG1nGCEI4OFHOcccjnrWBd2EzwPIqgPENro3GY0GWcfezkHpjOc16NKrGNk7NXTdrOyfLq3fVW+7b18SvhdUlBS2euso/Da60Ts9e7a9Uubkm8t45Mbo55PJlORlWxhdnHCtzkDg9sYrnNSI82aKM7kLtG+7gkdQN/wDcJJKjHYkV0UlstxHjZiPzokXKkyB0LE5H+8fyxurH1aELdLcMDFDsUfOuFmaMdFIJwRn3J/4Dx6NKcVJJ3ta62ba0dmvLbT8jy6uH5L2hzSurJpaP3V0+b0u9F5M5oZUx7mU+WXGATxg/KpbsOTu4PQYxzVWJ3SVw6qBtIUJzIquRg8D+MjG45zjnGKsXU8UckasQqTb3cgg5wQQoH/PQ5IJ/QCqWoP5KoUf/AI+VQ8fLkgnKk8k4yCq8dTzXX7RtRVo2kmk2rO2m1kr+b7fjyyoKMr6d227pbaLp5arZvew2eVUuA7u2T9xgvzDoNrD0wODkEnOMZqG/mdRG8JYMzrvYD5WY8DvglecGmXdyzqGLN5iqodQo5A4AU85Iz7Y/KokzKoTeSu4ADkFD13c5IHXJ9hxWqe1nZabbLTtp08teo407NPl00+Wq87q3TtbZamdqIMvlrK+QQC7ZwdwwSFx6d8Zzx9KzLu6W1iyJgDtxkAbmOBwxyBt4xwO5/DT1y2MKY4dsBkKMW3hslgTx047DivONZmc20kasVdeARksBzgZPQD8jg4Pc7YejGpJXdldXi9r3e3+SWrWh6eGjG8ZSdt4pbJtWaenVp6O34qxsT64GtZF84ZUj5V+8wbPHJ6np+veptLngnnjk8zY/Vk6MT1OSD90D8h3IFedwkyBYyyvJwWyMkhSc8A546ZBz9e3Q6XK8V9nI2xoQAckYxyo5OVJx+vWuyvQhCLUVaUd3te9ultVp5XW7PZoPlfVSum73tstU22n9/wAtjZ1wpdl4mG7GBgHHC52nnOQeenX0zXASwpaXqrtx+8ypHTAxwTxxyCT+orodYv8Add7xgP5W1SgJBzkEFe2McnkdR1rLdJnVWcZDHO7vtJyR2IwOuc9h248u84tNNLmVtHrdJO6eu2t+nzbPew87OCcvdSvF6813a3m9tNt0rs6Z7iGK3gZ3Kqy7WDfdDc9PXB7474ya8V8YkLeSOh4mIODjHH8QOe4OCf5CvVNQx/YyPCQ5j2lmGSQBxhc+nfnpjA9fMtct1vbf7QoDOg5IyWPBzjnIHAyPb6mvXweqd/s6N6N9G1tq/wANrpH0mCxLhblk1JNJq+n2b2Vra9r3afzODbei5XgsOUUfLhuoPPIOOST70y3jZi2Bghty+gXHzAn1PYEZyD3xUU0jIdoLAn5WBGTxnA7Y7cA88+gq5prEswLglgSD2wAcDJ4J7/rXpTprlctN1rqndtWulpp3e6tq9D6XA4mNStFa7pvpHZb+W3S/XXY0NNla1u422qwDbNuBj5sDH1GM8A5yfpXoN5sawWZQD8uQD0APUgE/l/8ArzwEIUToV657n1OCQcfe7Yz2r0KaSE6ThTltgIQHrkfKeew5x2/KuaUFOUtrez89WmtNOq9Omltj7zKK8oUp073inHlT+zdrZ3vZNvX10OKuHjKfKGDc5/iKjsc4HQY5P1zziqCP5bgIw35JwpyM9M89CCOfQnFPZ23uoJIBwwx90HOCTwAAMc447A8VV2sJCwUnGSWHGM9uv0H15AyKyUX3a0XNfvaOjb7O1tdlfXS/0NLEztBqN5Rkrq7tZNba6rTuttn09R0DUcxKrN84iIYFurH7qAY5G3IPb2PGLNxeTRTI6sqFSChOSpPoe2MY5PftzXCaVcyq6Ko6Hcx4yCcZAGew7Z5Ndg8YubZixCuwLKM4ORgDP+yOePTkZzk+XWoQU0nGyvd2avK9ne2miWq1f3bfeYLMXPDwdpRndXSb0b5d0vTt387O1S7kvEDM2XVQGy3UKDjYP4Rgnn05Ixmsmw1B4bhWYkCMlQM5bd3zkdMnPPHJ9Tm+ViEDqTuk2Nu253dBgDjIweh6jHvXJRv5creYdqhiTyc4XHIJ7jj09sc1zexpSUocto7KLVnfS6+em/6nV/aFehiKFWNVxkn70VeScVbq7Kytt+Wx63ZX/wBpki3HMqKFBIP3R0B5647nqcjkiuoEQVkWTaDLtbaOQu7+EDoFbjbzxjoc5rxW01KSG5V0kba5CgO3fA2kA9SBn6fiK9K0nUZriSAPli7AAsRjK4ymccJjOBXi43AypWlB+447app6Wv0013erb11sff5NndDEOmptzrSaVrXbXutJ2bTutNvLsndvrNS+I1PlgpsWQ85Oc5OO/foB+tZT2iW0qyFdpLcEHgnOdufQ9+CfqTXfyWdvJFG0YZppJVK8hgEb+EkepJwc8c4z2raxpBESsqKArKfLXBOMcrzjnH3vTjp38ZY1QcYS5rW5XzOz0sk79397ufdUZQlD2sVerG3u6tx+G177adndNeZnCKK7gjkdSQUCqq4GGGRn/aB7c9M/3hVqxs0SWOLaWLnaNuCwBOOQOhGOnf161WiheMIrExxgY9h9R1yvYZ/i9K7nTtKTfa3UcbiNV3yOx+XC/ebGOAAe3PHpU+0Ukm03a7sndtJJpPbV7aej10PWUo2him71LJRi3FpP3Wlyp6tdrp9OxtWVtFoMcd7NguCGUEjlDjHHXfwMjnHTnOa0/EXiR9asQsjDa6qgccFBHnCntznBPbr7VxPiLUGuXEcbv9nUDayjCMB1zz95eMe+Me+eMfYCqTtIwIP3sk568E8dOMVytczk4txUteRN8rvaye+q0drLXYj6xSqTj7SLTg1JVZJtQlo3HlW3TTd73TWmdeSDeyK+DGAwAGSRk4Ykcgj/AB570RQSOEuG3OhBJDHqSR0HU5x379j1qW1kheVtyEzFGDA8qGHPHT73oOQfqBSQzW7wyRhj5qSNtRMnOOoC5JyAB/nitEnayjJKKV7rrotLeXn8tB061L3pynOXNK2jv8UtGo2StbXZ3266xiISTqEUqwGct6emSeD9P7oPHBratLe3eQIcLKoG7dzwQeckZPvkdQetYlpPC7zNIdnlA8ZwQwBIx6keneok1EF5ZcsBt4xySoPr6jPPPPtxndUXZ3i9UrK19Wlfe99NfL7yJVqagueajBSlLnk/i1jp/M73/DRO50d7LaRSCGKYrIBnIxz684HPtx7eg5/XL+B4gjSn5I1ZOnzPjtj0x1I544zjOSL2O5nLeYyRhjlm5Y4Pc5ByPfHUY71m6vJCz/JJ50Yx8w6H3I7855HfJ9a3oYVOcXKLVrJtJrR26/dsn6ng47Gx5G4O8U7JqWjbcYpytutNvK3cwJpBJOTyDngtwXXqDg8579Op9zSOriRc52DBJGSAD3OPzHf6VAXIY5wQCRkjn8CTx79D9KdBM0jMGJGANvfGPcenPPP06Z96MLLSySjazs2tku99N7/hex4ka7cUpSjKUpJNrXqmn0fdW3+a03bWUbkVeEBxn0x17/e6j1716npEUUNpa3UjK0TuSST9MBjgfQdewyc8eN25k86EMMFmCgjAB6Z4PBI+v4dq9ku5IbbR9Jg3xlnVWYIecnnDL2YdxzknAweK87GxXK0mk/ifdpOOias9HfslfbofVZLiZTpVpu0lTUEm+uq9be793fqUPEE0E91GihcSMi7x0wPu9Txnnr7DvXQXbRR6MIkYcRgIBtBZunvkk898Ae1cbcR+Zex7iVxhlBPBxwPcnpk9QOxycGrXsyxiPY67cBAN3UEANjnORzyQe/rXkukpOPLe8lzK8r2vy2Wtt0tbPv039+niVJV7tRTaaV95Ll1XR9F/w5larHFDbtJKDu2nap/iY8ZY4ycZPUL+YAPEoAt1HISVVSG565BB+Y8AcjpjnkH5enUa3czukKlByozkDOeM8dyMjngnp0xjmHhdvmIwiZIGMHH6bh046dfXFerhYOMPZylbm0d300WiT7rS/mtkr+PjZT9tTUIxk4yU3daL4WnKzasvvfnc9A1GUXej2jA75OU4PBI6Bev+cZ715vdRyRzEMpDk9CeeTwuAeB7frzXS6bcCSGG3LN+7Zmjyc5bjaM55A561g6wzm7kaRSWLcnp+Q98/U/rXRQpunUlHRRu2ld6tpXfna1n30XVizOUauGo4mVnOHJFKndqyUE7t6pLWzt1T10Kw3FkyF+Vtu0kcnHGDjjPPvkcelb+nIrAOTtMfQc4YAcjsAeRjnnPGccYkO1lCfxsRjHOfTnoMdSARjtyK6SwRY7YsxXcuVCnnGcdD1yMZAz1JHpXRNtJR0u2kvTRXtr1fddr6k5ak66d+Zzhz2dm18Pu3dltZ2XpoVpFyXYDC7ztIycc9/U49v54pN5jwrDdwD15GeRn3z27ep6VdZDIWVMGLh2fghTg5+mRj8D6Vjyk+YVJIXJ8tu+B3I9ff6ZPqRk/hbTtZ6JPZrRO9/wA9nZX0O5zWHlJqLbbumtrtqz/u9f60JZJN7ErjsDkAEED09OD0P50mMqcAsQewPBIxyPTPH4Hnpl1vGJW2ltwALEHoQPUZHfpnjngDmtRNPYIu5GR3bK9wEboffOCRkgd+Dg1oq0Yp6rRXe3W3TRu3frdX7GscNWxC9q0rTW97pNctkrNpb6X3MNcpjIwvPOD97g8Y/wD1e2eRpweXs5+8egB5LDoe/ftjsOmeJ9V082aptJKEKVPds8/MOef8njpl+YixtubawwVA4J6ZB7++M5HpTjUjNqWrTtotd7brVpbu3nZ+TjGeDnKNSOsYprmsk1ZO179k+71e42Rg0mwcF2yeQQBnuR0Hc9cfoNWyhjjuoGL4X5SST8v09D7jrkfiMGHMjqzHaHbaGY8Zzkknngd/r1zXTLbGO0WaMbii/O2euD2H8/w71lVkoPR8qd1dt9GrvXZ9vN2toLAqVeTrcsZRhNVHeWqinGSVvNXei222Rp6y0MtxAy8hVjyxzjC5AAyOvHbP0yMVkXkcTEeWAC2AQD3/AJAcj6Y9Otd55JZF+VtqrhVGSwPUk4756+w474tRQSmeEurBS4Uhhg4PGSBnjuBzwcZNcdSso3nzK9r99Vvpp09VruerUqwxLlGNJyVVRhzW0SXLG977rRatdErIy2sp42G7IyAeecg9ycf/AKs1vIttDZyJMR5zRkIpOCXONuOOfT8sitPXrYW6WzQoWyisJAOCp6r/ALOOCD15Iz1rjL+XKphj5gPI5bB64/3v6fnTw1T28U42fZNXa+59Hp1vstThryhlKrRS9pJxhKEZaxi5KOifRpvRLexlXeDIpBKndyrH3wBnuffA7jOBXpHgS6QXAhCfOyEAqOXf+E59Bk8ZHXsMV5rIPMdWP3l5CnBPGeCO4PP0x9a9D8Ao895JGCscoyFLYAjxj5i3Y464yfu4qsdFLCTTeyV3bZtq7uundW06anjZNXqLNXopKs5ST6LSLd72Sd3on+rPaNDPk332hTKwXeZQxwVmUfdX0GSPoB1HaTW9RutTWSzMuTISxDA4GewJP3xxwe2Md66LRdHjli8qKQMcsbhxwzkD5sH5tyAnhhzj0JzRHpFrbXtw11GFiVSIyzFRn+9kZG/1zjPT6/AVJ0p1pSd5zhyxi+W9rWd9XoreS69z7DENOLjzKE0mnCDveL5bWVrLfVrb8TyG98PpY2TXAOZBIHdiQDIDnJx0OSM+3OeaqXN1aR+Hrou2JyjfKTuznsDxgZHX+RrtvE6QJZX0kWWhiyYW4PmcYJXnjnr1yB2zz89anfyyWU0PmgKZDuUfeUZIP0AyMdRX0uChKvyRnNpxknFq70dkr20vvfsl5Hzua4ingsM4SfLKpGU1GVve0TcW9Hrfrvfbc4SV1kuZiXCgu3B9zkZGTxyBnLHjk+vVeAdNuNY8WWNnFbSzxiQqypnYIl5kMhwcJyCxxgAgZxzXMxxx5kkk+ZAQY5OeBzjdwOQOg4PPPHA+6/2O/hre6ouq+M2tEe0SR7G3nnGYxM2d2AynPQc56459PrJ4p0aMlH3pKMaSs9NeVNq3ZN9rWt5v8BzzmcMVXlK8WpckY6fG1o00ntd99FbufO3jzRrrw/qsUjac1stlcl7N4y2wplSqp0wck8Y568YpmoXFrrekJFdExX7hTggeag43bgCfmIAxz7gV93/ED4Z2Wu6R47TVrc2+tx2cureGnjjLQSyWILfYyQAA9zuUpxj92Rgmvzs8N6drmoX1xG9nIblZ5I5baRGWVGRiOQenTj8vWtKMualCXtIxlSs7qXvXbVnZpXW7sr677n4fnmHlRr0nfnhiY2dOSbt8K1jbdrRPXVvXRnFa34bt7e5j+zrKokUb45Bgs/O2VRn5lJJ546HPWsY2M1pKbaVWV1AeJtpCupyffnjBJ4xgdeK9Y16C8S7lt72B7e/snESLKhV4xg4V1IHHox59u9S6jpjvpcFyYY55WjCSlV3PC2OCx/ujpnAxk9K9anjpqlGNSTnFpe8ndp6b9Nlbe99D4mWTU51KtSCdGUGp7Wvdx0s3rrLffe1jjdM+TYFUnBG7sm7pjb2z+OPXrXq/hxSxiR1IZplJCnd+77BWwOFHPQHBHfiuB06xRJFUSsGUBij4BkPOdo5yB0x0565zj1Xw1DA00bfLvGNyvxlTjIU/3wR7YxjPSuDGTjJXit9et27Lq9W16WdjqoQaSSTvorvRtWjrZX6pPpdto9g8PQQAqMNsLhXkJyQxwcnjBPY9OwzzXt/h+3tVjZSyyy7xggbVZ1zjGM4z0zyOB0zXkWlIrLHtQKY8hVjXjPAJPTcT3bkDt3FexeHfNxHLJEhwV8vA6Y6kLzls4zyD+XPztWTW1lzPo9fspu683+mh7+DTit0lKN0731dtHfrurfN7na2emLMFkmj/AHyfKAMnEZ7HoSce34110OnTQKq2jMoGxlO0qADnAxyMnB59PzpbVrWGFGXa07qrgxnehPO4N6N0wPXORnOPQbe7tJbJdyRoFVULkbSjD0OSWI+nPWopzk5RtfR231W2rVtUr/l6CxVSpFb6WatayUlZa9L73fl98GmaXI0IeQlXKgv5mMgng4JGR2PuMZ7mte91xPDmn3Cpai4nECwEk9Vk4WQdmJHYYOMfhSvtV8iKEQgNuATeBhcAYBYjPUEkAemMjGDy2qx3GpTWJublfscCPd3UrPsRY7dSxLtzwM45znPc17mBpe2qwu780krvpZr4W+l+r0130PiM8xjw2GqVG+aVm1y3fLdK7be1rWVm1v6nxn+1541v18L2Pg60ulS71FUl1GFTuzayEkxyLn5TjoeSTkEdq/OZLE2VkyFs4PyLxgDnq3fPc44I9zXunxr8WjxV8SPEF7Fd+dYxXrWtuAPlSKIlAuMnO3BA9RzxXk8q/aGaMAbAoXKgMWPcg9D3yOufReK+5oJYemopK103a7drJXT6aX17O6R/DXGmdLM88xOJlOUlQlPD0kpOXMoS3Ss0veetu3dHnb2guBeSsmDu+QkHZgZ385HbAx6jGemOTfEbyLgA7mUk9R05OT26+nFereJbeLTrEJCwRnRWIUffVgeSARtbjoSen1x5UULSFmwWOSCRyQexHP5D0/P6HBzU4KSd43SXS2i376NN7Xv6nyTlzcjbXO029NdbOzs9Gl3XkluSAkKDnk5HHToeB14OB7emM1E+cEkEHB+Y4B+ntx2I5B561I5YDBx8nYfj/nPuKryM5yBuK5HXsO55HX9M+1ehHorvS129O2qv177+mhLjfe7trbo+q01/z/AgbcSB0APPQZ64wT36dO+e1MIwT1GOBnk8dQT+J/yeHnpgk8H5eec84z7d88DBA+oMHacZJPORx075GCe20d+ucVvGz2vpZdfLbfvv189DOOtnd3TtbWyvy669unf0uMIbjb1+vTI+g6Zye3OOtTxTGMpu+bHJ6ZBHU7cduvOM+3NLIoALAjO3J2nv6kDGMf4DFVjjG453ZyMfz479RkH26ZroUVonr0Xppppe+3b5WG5Xt37rdPS/yt0vr12ZuxXKAhg2BzjIznj1z04z15wKKxkkIbABGMgE9c9B3/wH0oq44eLX2l5Jbbaa7Pf8H5D9tNXsr38l2Xd/1udQNad3LGVlkHQoOQB1bcDjaeRjHHritW31+4VdizsofBLj+Djr1Gc56Hr3OAa4G3cyMgzhgOVJyHzz8xPbGMcZ9q1Nj/MyjOOQOoz0OR+P+Rmvj55bSjG1ltrpppZrdJdOt79e6mVNJW0aslFNaJaPVNq70b0Vup3o8QuyLG1xIzDaGdmOGbsQRkFuuR05xnHWb/hIZWUlZSZSAwyNpyBgkEHq2eMA9Owrg9jAbeuMBcHIB9AeOeeQRx65GDPHIfkMvyFWGMHJx7479CO4yeSc1xPLqSd4xvqn0XN8N9F5rXZW9NeKTlC/K2l/cbT6OybXdaWsdVDq13G5l8w7WckDqGLY4POAeODnGenHFdBD4guY4iXlbPQhRuz6DHXA9OuO+a4PzIVGBnBIK4JI6HB7YJzyCM8DnmpknZc7SQCMEHkcdTjPGeMde4qHg4TV5UlpbVpf3e7fS1ut/LaqeNq07KnOUunvNtxWm22mltX2bO6PiKRmXbLsOQM8Lyc5VRk43cfTB+90E3/CSKMh3YOAuGYA5POADxkfhgda4QnzFbd/Hjb03ZHRwPxyfTg1FLKFUqSTgdTycjOB659PXnPYlxy+m0rwdrpbK1m11S11asvQ7qWY1k4KVRatPVN2T5bX0Vtnpqzvn1mK7SRXYNIwK7BwM4G0DAPPB/nXhXilGF67soAZyOSOOenHJH06111peMjvhl+YM2T0BGOTz94Zz2POQc5rm/EcYmBmB3HgsAvTJ9yQPzPpyCceplmFWGxClBWjKKu3a6b5VfXvovw3O1VvaKLerSW2iaurO10tFf599DkgTtGOAvykfw554B6Z9OT9emNSxd1CKWAG0hQBk7+MDdxjHPr29cHLBQNgn5QCQrcksR147jqOc8npVy1P71SH+RWGcZGe54CjI6c8demK+mupRXN53VlZpNa/e/J9uh30ZJON3a21tGm0tf8Ag7eR6ZoMiRmLcQzLkGPG0SO/Oc88YA3DgdO5r3jwvfhWhYMisrAMuOrnGGHIIIx1556DvXzvosx3owLOysADj5fLbuzZ+7gADHPHavXvDzeQVumVVCuqujHAY9FfGTwDnHUk8/XxMVSTcm+e6l7vK7a6WdnptvvufT4LEcsYq97e9d6uza92/wAvI+p9H1OTy2DEB40Vg+QwUdgDjgqCeOxxzXq3hnVGRkhJwiR5yDuYFhnDnu56k/z5z86aLfRRKiu+FcFMk5Dk4woHGf1/x9g8NzJvibzHRD+7LtjC7cEtjnccY24Ixyw615U4yi27W2vpp0ur+u+2r6rb26VbnlFq620u1rp966dbNKyZ9aeDrqNYFDbShO8oB/q9+BuUDnnA5yOpGCMV6/pMi29zGYmlQOq7ULAgqcdSexAPGB0znIFfO3hYw24jCXhE1zsMBWT5T1wWGCGVhjavGCCM17tpk6zzKsm95yFRgnCdOeRwOADnAxnvnNeBjU1Ju0ktkn1Stsu97f0z6jDPmhF+9Tel2rXu7PV7d+1uuh9V+F9WRdLiYLIsjMsUb5G1iSAzKe//ANb6BvpbwjfecsNkSV3wriQD7w7YPByCcEfUk18j+GLlUtbWDbtVCRHETlgBjLg8YbJ4469q+gvBWsyW1w+8bgVCWoYH92SD86nJ5HbvgkHJHPzGYwU4TSjbd693bVdr63WiVj1qLk7Jyi7OOrVmlZK631ut9Pudj6EsiIb6CNZMlcEJgN26MvuP5H0r0jSF864cnKpITwvy8gDbj0IPUcjnp0rxTTLpxfrLLKWchMMoJBGT36Dgjjt646+x6LP9mcOZCyzlghYj5QPu7cnHck9MYzyBmvkar5XypvRJO62fu7P0vdprbfa3q048tuROSsk29W00rJXTve7/AF7Hv/haxDWKpI4Dpz5nADr1BOP1/wA49DsUaDgfvFA+9xnH+zjPrzzx9K840C8le2ERO4tGGDDAwvBP1AGOSRyc+1ei2JLxRguCzjjJJJx1PHbv78c1y1FBxTlq01u2tbJW7O7/AK6mDglKUJSaV73bSlH4WrLRu/lbT5m5a+bhwi7gzjaR/Czc8np0B59PSu+01DMiM6gso+9jBOOmcn8x/SuK0lwCwfGwdQcAg5HIJ6Y6nnFdVaXJy3lMApzkZ6nGcZBHPrzn06mlhqnK1JaK+sd+1lvb01XRu9xWurK7i7PnavdK1rPe703a+7bTukhWZGXIcYyU+6Qc7gfXHYZ4we5rY06WIFWCKu3ALcK2AecnHPBx16nGayYSZCyOWOcMD3AHuByO5G38yKtxQuZgkRJG5flbr9PfH1B7V61Gs21Lkb1Vrdb2SV9tPn03GkmuVtxurqT1VrLdbq9+n3Ho2l3UKEFgWjcgDd1AbGec87s/rwK9K0WS3gKMj7CxDKoPBOeh74PTpn3548p09Qqxo2PlO3kYI9cDvz78dq7KzzFIu0kg4A7DPH0OMjrntzX0uAc4WqKm5NyTV+ZJXUV9/l59Xc87Fwi4q0nZp80Ul71nHvdW66dPR39y0h0uCCeNpxuTqGyMZ54yeRng4GT1x39tZ7lUvJ5iscFwRleOVIyM4BA56HrzXlHhyWTZH8xU5BPOQcdFwR1HHOe1ei2t6Y9okZML8xXOF4wcjuM9/Svt8HXkoRdSnJylv7rS+ztrZfdrfex87XjLmlGEkkmktua1orRrZ3vqkrO99Vrp3NqLcM0YHllW5Oe+e2B6Z9Oec81ThutuY94BGApPBXPoPTdtByQTg4xjm7cX0U0JVfvlNrfKeR0O7OQSeT0HQYzzXMyxzLM0gG0dAR8pOMY9eD0/L1xXSpSU4zi3KMnd3TtFOytpv0uh4eDnFRqrlaStzK9kuX8dG9dtjsLdMxDMoLkncxYDI9QOnPf8OnWsXULfdMSu4gZ3DJJdj0+bgAKc8EHOe3FFnI5CEvgjgpnDcc7h6jPT8DxjJna5jcsrqd2MBj/EDwQw6jOOOuefx9OVSNWlyOLi7WV9unldetui2sdNGLo1HKLvbsrt6x1Vtrd306bHnmu6T9oABjyDj5wcOGP0HOc8n865a1tLixu49gYRp945KqCSDg9ip4yPYHPFexyWJuAWKAKf7xyf/rHrnjuOlYuo6S0aECDefldWHLFeuM4OD/fHfg5Oa+Xx2WJVfbxTTi1LmWqaum16X2drdWfYYHNUoRoy969003azVrPV69dFpq7o2/DWpgRqrEgRgDc2SB2O0dCD+Ge/TJ9IhiSVAQPkZMkHGcc8n9OB06eufGbBpUmChPLUHIAG0jBA4AXnock9e3J59RsLiTbGFVz0wSf4VAAOcAENjsOo5zX0GTVadai4zTly2hJ2Sk7W6enVdXtpZ+NmsUqynCXLzu7SScXqndu+13pZeW9h1zaLG7GNQSxwSo+ox6qBjjpjqffMuFljTYEyGOORnr1x9BjGMnBGQK6oorK74Yvk5RgR9ccjgdgTz271nyxIy7MgnklScE985xyfbpx2611zy6lGUnSqOKm17rSStpzeV0r2suvS5w08dNSjd8yjbbe2mr18r9E+vl5nq1tcIWAJZWUgZGSC3QH0PoR61zUUcqsI5vmTJJZlGSQeF78EdRjj68j0a9iSV2U4LJnjpkAEA9e2eOR9K528tFZsqvyA4Y9MNz6e5xxn9a+axOBaqSdOaaXvNtWutPdjq7a3u3dLoup9Phcap0owkmm0novhulZt/wA1mtPU56SK0SC4RlVlmPypknYeeg6gdPofY1yMUotpJrW5z9n3kwhhnax6bSPlGT35AHHeu0l05onbIO1skE8q2e/TnHJB57nnArE1jTpJIi0calolDKVIIyByT04GB0PSvMq0a/L7Tk5fZt8q3bTte+l7Pu9N1voenTnT5lH2l3O15SatGT5Ut77W2XZb2PKfGWmtqFvcuIFd41XZJwdseDhjgckdvr71+JH/AAUh/Zm/4Wp8MfEWqWOnA63penyXCtEgQXH2cMwDYUttIHJ5+nNfvVLA32CVJozIkgIywz82ORnIwAe+fTjjA+XfiJ4Xl17TtR0544Zba4E0N5E4B3WjKwdQSP8AWOvQY7YBJOa5ak503CcpRvON4praSstX0tZaf5XXp4DEezrWkk4pptxs7RvFOyu1re76Lqf5ovxU8P6jpWuanpl/ZmG6sdRltp4vvGPY7KVbIG5Dj5uAcADHPHzTq1lLp80yzR5WRXCmNP3SyjGyReeCuTgY4ycGv6Cf+Cqv7KyfCf4mX3i/QtOnh8Oa/OZ1eFSsPnTkl1dAvyuzAeXg+pzX4feK9JMq4TePLLq5ztdMjIEo67hjnoBj8K9jK8xtOKk24zSU7qyUla7T2s9Gk9015ng8U5S60Z1HFXs50pR1ThJK2lkk7aS1et12v4bJAyI5dlEikMRwCQ3PysOpOCDwPw5q3pWoS2zxbWIw2xcDdgZGNxOAuOn8WOe9Pu45YTKJEO8MEwcsQMkBiOPlPY9+SMVmn906cAKxHygZQEcnC9VPbHP88/Xu1aFm1KMkntdJaaLtvpa3bofi04zwtW8bqUGk766pxW1+mi13eh9mfDbXw8Mcbn5vLUMr87ZSOCWyPlX9M9M8H6e8O6lOotoz8zJKHiVGxuYEYBb+71yTxjoBwa/PD4f+IZLK6j3kkeaIyTLwgOCoA28A4wxyeg7dPs/wrq32qa2uZHcYX94Ef5RKAB04JjHZsgAnOB1r8xz7LXRrTaSUJxdm3vLR7Po3t8up+t8NY9YzDwUnzVKfJFuLtbSCs7u9/Lz+Z9z+H7r7ZpsMEkiNK52sycLEQBjcckqjdCTnOOcUy8h2JJH93y8xYjGQFY5yD/CBjGOcdR1xXn3gfX4o7hYXdgrIQhlG5XbgMX5+ZjkbeBtwT06+q3cEbBpEkzGQjf3d+c+Yc5OUHHy+55xmvz2tL2FaUJaXa0t3s79enZdd+h9fOipwU4QbV1fba1tdbO9tfKxw01oS8jIf3pUb2UDAB5VsY6nnIzxwcc1nXIkdGePcu3PnN90vj7yoSMnccYAx7jvXXXMMbySEB1IADMmULbgcHuHIx145zxWVPZTCAnEmQN528lY+P3hx/Gc+/tngVvSrbKWnbpty31e97d+tjx6+Hvf2aV7PVaNapa+a6dehxU1p50SyWyygeaRKWAVjg4O3srHPynnJyeFArm/EOnyx2wjWNlEUSvGW+YFmJxuPGehz39xXpgto5nCRuI1IWQxsfmdhksXwOo444z69BXMa0i3NvOiksNki7V4YPH90ew56dRjkmvTw9WScGkuVW3vtdJtvuv6tex5dXCreSk15JJ3dtWuqutr+u7Z4LdzEk74ztBaOIKckYwCTjG3PYnI9M07KSKIJVIGAYMglgDypyDxtwfmx6VZu7eJIpo2BMySlmYH73PAC9SB656846VgNdsrLt2go53DdyqAEbWJBweeMevoTj3oP2iUYJNJJu6dr6ap+d7vv6WOSWDbjJO3vrrq0k007Prppb7tiX99BcSMwZVRA4zz1zkjPUsB044GfpaaSMzQyxKVR/wDW8DAfHK47Hkcc9e1VZJGAe4LFhICzZ58rgjagPHIPfpVa1lDKXJbIZWXIJXIJw5OOQejH6cDva3adlbdJu/Td27fa6WscM8NOF7ppKyUt1J3WjvtbtsMvppHmZch1Xoh4OCcDJ/Dnrj16muG1fyIjcm4XBliZUAGQpGCrAcdecnPPrXb6oimPz49xJOHZDuy+OQo9AcDAz175zXm3iYS+TFNGQ7xACbcfubvvLjvgbcnnHHXjHVQlaceZ9d9nZW5VfbdrrdbHTQp3cFNe6mvLlaa62S1a127PozhIbhYNS+87LgqNmcFjzg+x7cntgdCOiF6wf92GjeQqqkZXPA4DehPUY56H34JpZReoZiFdG/I5yu7oNpx39q9Us7NL61gmQI0sYjbgYKnJyWHOVxnJHYDAruqzvZptvTW+iStfpaVtb3+S6nrU3Zpq8leydlZbW16voyFIZZRG8iZYttYEjJGfQDIJ5I5Ixx9N2PT0lt/LRG37Tnvx6A9cgk/XHqK3I0giiSOWH52+beeRwAV6Y+VucdePSksr+A3j24UKMkEnBG3tjPqB6Dnnp18t03J3V9+bTZvSzWrevyNfbzTvGOsZRSabv0eqdrrpr31tuYKafFFo+o20oDt5cjRf38kcDnj1yex4zXiqxskN5A0mdkjBAfvdTgHJ6DHA+uDX0de28chuFgIAkQrk/d56kgHqDj2Bz36eN6n4Uuori7uUBZEJdjuARlP90cZIOMdMD1r0MI7JxdlKXLKzTbbTirPt6t6rq0e1g8THT2krSlJOKs7tvlVu13q2u666M8avk3XDFlOVbDADB9gMjHIPGP0HFJbttUhTsBzyeMPxkgH1754AxxWxqkai4lQccbiQeeCQevoMH8fwqkqRG3KlSOdysB1UYySecHPrzx6Yr0lUTi4yvdNbN2Xmuj1vtf00d/rMDUcbTUt0k+nROz1ul5d9LD7SVvNRScsGOARkE54JPX3I9ciu3RHbT2AG5ipyFGMjHJyB0z6Zz0zxXn0DbZQucAsRuHUYI+9jv9fTHTNei6c6PbLHvDb0Zc4wCcZGRnI9BntjvinZczikrSg0k1o3o7aNdV+R9tlONtBpSfMmotX0WsXrrq/Ra3+RxSvtmZScqGJYHgk54AP4f56B3mBt2AcMQCB2PY+2OeQeAeh6GS9t/s00pJJJJIIGRtI5APYjt39DmqVvIuWAXLFSACB36ZOeCOcHHXGR0FZzinvFvl0107bx3s9d/wDgL6jD4x2hSqaJ7y1jrZXtqtb6XVt7NXLtpJIlwmwsV3NgKc5JIIHvnHI/HGa7seaY7eXBzLxgYyGIwcj0HGRznv0riLKMm4iEW4uW4QnAJyOcY/nzzjnNenJZyCCC5cEEAMUxkgdCSM9+5OM9vWvLxsor2aVoybsuZWelmkt3Z7N2sj6jKa01CoueXKno029E423au9b9dHvaxmpFIruWBYMGU4GeCB8vA+8cdAMe/auV1C3YzqUQoFYnkYHJ4+ueewPTjrXpi2qSxCWNQWPXkfMT1I9yDwPSq82hG6gd9p3opLAclFU88Hv6+uR0AAHm+3hFvm0d0nr9rq7Rtor66X7s92UlUp+zTfvctp2lzRk+Xbey3Tu3o9dDzmCB5pY4wpLBgW7KOeR7eox/QCvSdGDwS28UjEBmAVicn/aGDgEgYx3wSfpgaZaCK9CE7ohv/eP13LwAPUk9+2DnFXHuBFew7zuTzPvZO1RkccA4yOnHOD0OazryVZRSa5FF33b1S1XbrZXvpc+gyepHL+Ws5JVHKMU5q6T5oq61Vrp38+11Y9y0q7tLYP5qq0aKVjkbgI2DtCdeeuB/PFVLi+S6L7XGDISO4yM4HHQ9M9QMj1xXMXepRpZWWxlHyjCgYLDH8Rz29Se545pdGliuriUb9rABguDg5+9nJ4HI6D3xgZr5epgLSq1pRlJKSbjaytFxaavf/g+W5+p4XNqSVGn7RurWUeaWqT5lF93dbJX9HrtBrGoPHPABhFR1R9pxlwf7oHIb3zn6V6XaeIVbRoLMGMSOgRsAZKEDA9uOc9eePWvIdchWW+GxWBQjd3U4PUD154GCPxrd05iY1V3G3C7WJIfgcr7FeOOo7e9zw1PkpyUpRWrsnyrXlaUraNf56bHThcznUq4jD1ZNU006U3ZNtON2pPS+/wAr6u51D3FvEJYZCHjkLbNwwVY8hs9/ryeOlY9m3+lvBn5XfK8884x7DPB6kge5rN1KZkm8oZZh9wkkgjAC5OPlIORjnr360WvnGe2c7vNeVVJ6gKM5BJHH9CcelKFCPK9YJ2TbWmi6699tb32sdscc51VTpKM1JJucm2m3y2cnqr6Pfpp2NW8AsY5GYrl3Occ4UjIwepPoMH1wBVfQXhd7iZwfMKuYhjIYkY5JOMr1HHGcc4JpPGVwqNDCoyrImWHAYqOcgckjoTkZ7Y74Wi6iyOsSsQOQVPJYN0KnI55xx+IPaqeHTpzTum52bSumvds97WfZrfzH/aVGGJ+q+7GrFRenwtpRei2dntfXttYvSB5fNESFirMXXoeDyQBnOPQE+/pVdEkjjaQoWjJIx2UY6fQYz2x0qSVrmIzuzGKTeWVlIwysf64POMd+TWitzatYtC5ZJHUcg5PmkE7s+gwfQZGOwrWMXyrls0tE0nZbX1srvrorvboYYitGo23GdoO/KrpJvd2s01vql36nJMzRyuUB8pic7c/KTnIPPbv9c1VadgcEAoxK5PJ5wQcce/0A9K2oYFctboxY5IcngjPJP0IHrg457mqt3p3lzCMKUZPmJ6hh/s889eBkfhXXCSSu9GtG0nfpr8/ubvZ7nztarUdW0ZXpyvZWas9FotF06/Na60JbCZI0laMiOQkoccAdckDr1/nmqRh8qVyuSMk9MhgRntngenbt7ddqkcsNrbRgHc0YY46bSBgsPXHBx+Oa5plIzgE4J6jIyM+uOM9/Tt6VCs78qe8U3e13L3W79Vrol6+RgpRhaUYJ8zu7JtrVW0ukk1fSzsuupYsWcSptXcokHGfuvyQSuenJ47cdcZrrjcPtSaYkrHgLGSc5zzgevHI4/lXH6ZFK84ZCQC+Bx3zyTkjjHHqDXZTWpUw/MXV2BIXABbPQDB44HHA/Hpz4ucbpXjfpbrdLmV9Vq0lps/mz38uxVVUPcXK20uS1ote47tLVv181tqtmELIba5QZ2seGJ3ADBwV5Bxj6n05wcfW74SXcSKig7lDYxgY6cZxk+n9a6e2jijtVZsKzExpnIGeAB0zke9cpqtoI72OYllTcNxPzKxPQj1GfwI9M15cKlN1HdNSinyq2j0Tsuu720e3Q+nhWTpqPLByaTly9G0tb972XfybMPW2dpYVPcIpJ9T0HXp65AzWbqSG3jRcEOwG7nIwe2MDk9ffkjoa7e40mC5vNPkDFoGKed/fIHPAGenTnsRXP+KIYvt0iQktGgCk9MkYIGcZJB/8Ar88120a0JTpJbJa6a3vezS6dl18jgxWlOtU55ylKEIpKWsVeMd3br+e5z+mzkTQqwIQOd3PAPfkZyDxjAPTNQ6s5knlIGRnK9s8H88Y69/5aVlaeWUZioDEkKcEjI4Bzzzkk/QVn6giRTupDBs7gM8MD0I4z0BOeO46jn0YzhKp7rV3G7e9tbvr02va1t76355U6lPBfvJtqTikpSu2rRV++j1SV3tYNIt3nkTAOd2MY5x3PfIA6EY6854rdvSsCeScoV+bOTkk55x6en489qXw3D51wjgjYisQo43HG3HGT2yT9Rxmo9fWTz3SQANu+XjgrnjGOo549jmiznP4knFp7tNqy2+e6t92p6MYww2Vwqw96pJ2Uknf4ou903ZbK3SyGwXh8vy9zMpBzgfe9D9evX9MZrPmkJkYucnkIp6rjjkcY49PyqSwQ4YHJKglT1HHv14HPocHjqKz7p1RpEx8+/hj/AHc5+hBxx6c8Vro5rTW6Ts109fxv1vbqZ1cXP6tCrNxi2k3o/sqLV973btqt7fK/ZXKxTJgjdkn17/XJ/wD1/Ud5FOJYYJD84Kj5yOA2emMn5R+QxnI4FeU2ys1wpXLMXwAB3JHHv+R5JAGK9QslkCWyyg+W7hGAHQnqCBzz3yRn8q58VywScbOTVm7JOy5btK33apaW16+tw5iKuKhVjVjeELODS6rltd9b623toZ+szb42d3HBCJk8A9uMdCBnH1znFcbI29gvPUDPqDnOPXHp+nIruvE6W0KIibTHtDJt4Jzzhx1BBx9MnPbHDQoJJ0IO1d2OOpPr6dM5zx+lThZJ0pVHeN5O2yTVk1ZK+vfvbW6uY5vKrUxVOjKSldw200uo2d29N3ra2luh0VlpTSLASVZSd6gjBAON5YYwFHUHJIOfx7OLTvLs2jXOxhhnUZA68jsRnqc5/I1Vs2UQW7urDyztMo/5aDGChXnj3/E5HT0yzGnvpBUwIsqxgqmcSFj0xx8wI6DaPavMxONmlb3pK/Vqy1S1td9vxbWp72Cw9OjTlOjFK8YKcddUkr3Wtru9+lvJnN6N4TAijlMUbCUeZlsMyL/CzjqM8464wRWP4gtfs92PKjCumMhB+7HQ9fU454OSK6SDUZdOugZTIkTIQU7quBt3Duo56+4xmkuZrW5SW6MbSrICAzDLAnuFA+uBznn1xXnSqzk1Jpyi170U9XJuKv6WWiVrLfY0pxk/cpqLSlBXSaXxJ3Se9lv0XW5UhhuNV00xG3VpBH94AbgAMjkdOD2/mK8r1myS1lmEhKvGerYLE5OScHqCp647GvoLw2ba10u5R4w8k6MFLD51Zs4weufTjj06Y8f8V6VNNNcSRIz7CXl77QT6eo/me/bfLMZavUpTjKnHmSjrZSXu9d38m9H5XOLiCCqUKknTTnTSUWr7cqtL3eie293ojz6KPziXDbdp4JJ5A7++cnt6Yr0nwPpstzcOIpFQZHmMGwxQA5x1yTjkZAPHTofP4IioIIYqMqcgjBPPAH+cdeQa9s+FK2qm6e4hdgEkSHGNzyNxgk9jjHX8a9PNK6WGm1PqkrJuKvy8t0+1+t7X7XPmsooRdWlOc+WcE5XhJpvSCV1/ieq02s7HrXhW7Fve+U0oENp8sk3ABAxuUjOGx6cZzkdODxxrltctIulbVCRhpCgwnmHIfIzwenGSc9O+eR1DUoNISd5T5MbM5BByxPXKnIO48cEDocnOceZ2er3uqa4Y1nd4ptyiEcIQOM45G/nn3x7mvk6FBzjUk4p87jd8t07KOsWtm76drn0FfG06Si6yTqpWgoK907NOUVZtu1/L5WfciL+0NDlZpTIUDIyEdME5PJ7dCO2MDpXz1riw2moT2wcOgO8MpBUo3Y+pXkYx3yM819Katbro/hC5ckBpEeSNcA7t3UjnOTxnOevfJr5TvLiaa4NwAAqk7g4JdVJySw4zj6jqOgOK+iy+MY8sFdcsUpNtO8m1ZN9H2S1Sa10PheI8xpyjTnUbqTTcPZppaPl96/dLytv8nLZNeXtppdtmQ3s0SQxqpBZpmC9Bx1PTJ4z16V/Q/wDsyfCqx8A/BHR9JuIon1G5sxe3lrNCHae8uVEjOjbgVeAAbVIIO4jIxx+Nf7KXgSL4hfFrQTe24k0rSJXvJcx5hlktyjIjknhWJJOcjgdQK/ok0LTUjSystPjEdlBAHZwNpWMqoG9W4Drt/dru5JPI5rvq1PejST5pxUZterS6W1S1f57n47xPiacqEYwSjztTbV72TilF2S7W3d09+hwOl/CrQfGl0bTUwljceTthlC7ETbkFJzjBcqfufxc7uAc/Knxq/ZVtPBdzf6n4ItUubi9lTN9sAQuCzMyxYODJnAIORjvmv0og0s2E8EkVuridUlZ9wwIxnzGKjhZDxtGTggg89N99Kg1Z5f7StxJZHb9mS6Vd0DcmOQZzg5HEZ7dT1rqw6itW1KT0kne1rpqLvq7WVt+lr2sfmuLraN1JKdN2UIzSk4tWWkrNpWvpfz22/m0+K3hLU1vpNa13T5LS4vNtnNEI/KkgmgUqsjrgFg27LHABA65HPiy2F/bQ3kOS0LRASFVzh8EoASc7T3btjvkV/Qx+0D+ztpvxQSCLT7a1h1wbI7m7jjEShEBxIygFZXVT04Lcc1+QPxl+CviX4SaldaZf2t5fWrNuTVGiYQ7OqCVMERryw3FiAD7jHqTp2oKpTkkk0pQV2ltd67dFezV+uljwatKnKTesXLW0W7cmj1vfmXXo9FskfHEUE6TrNKwjKsUjbbnIJ4wepCgYwcZyfYH1LQFRAjsFcvjIX5lUnAMinjaD654ORzWRLpYunaeNi9wCfMgdMxkAjIUnAweMDGcAn3roNI0+e2JNwQIm2lR90qGzhRjOBxnYOg7+nFVxCnHX3ZKyt0uklp169/u6ebHCunUuk3GSbTTv/L9lN2b21+a1PTNLmlgnRYV3IUyqFsfLnkk4PBJ6j9civcPDVzKIQgwI0G4HG5t/YA8Hg5446k14Rplz5aq0YWTYViCsNrBe/P4dSPwz09e0Fw8UDwMY3Ygyq3JB7EA8BR/XGOeOCScveTjJ63W7jtdtWtppZeu6OyEnDmckmrPlstYu67Le3zXXpf3fQYZCAw+64yN4+6W4J7/Nnvj8OmOoe3LRyKW2BPnMQOVkdjkODnIKnnHOc8+h4rTNXVLONGIEsYBYj7srdtpAGMk8jHPHetyHWElbN0Nj7WMUackquMFznqB65+oAxWdOn+82lzbtvXS0b216bXd9vQ48TVjKLk5J3STu7u7tdadX+D/GxrV3dxW8cC/u22qELNgNEercjOOMBsnuTgYavL/iR43l8K/DbxRqLgQu9nJpOnzFtyM0wILpjBdiQMnIx+IrY8Ta9FLkMS/lxbNin50LcIABjcc8lcjAFfIf7U3jGSx8GaD4QEvlz3my8vCSpMiscxDGQY9oJyPmxnivtMiwrnOnJ35k7t3TXLaLcmtdFtZWeqVz8R8Rs5jgssx0YSkr4epFcuklOUUoro1d7W/4L+NEuElmvLuY75bqeSVyfnMjuxJJORnHbjvkmr8KZK7WHTHTo3uOvGOeR96uc02RvLCM6yFQCMYJx3Ldc474xjA4IGDvmSKMh2OxmGdqtlcMBzxjk46YwOcE19BWVqloOy0SWysrLRP9L+m6P4sxKnUvUnJpybb5/ieqvK663aej3fnc5bxQN6tljleDnGCDnPry2OnY+ua82LKgI6Py33sjrxgYA7YP9MV6drnlywyvIpAK/Kx6EAcknn2PAGfpXl9yRudQp+Xow4yDnk89M/jzz6V7WWO8JRdu7+aSte+n59CKCkk7vmbafdrZafO1k27rutSv5rFiTzkYHf6cd/pwPTBxUbSFiRkfd5GMYI6H6cY4/XrRjHzfxBvu54wPY/5z1PU0xgoJJH3sYAPA6EenGPXB/IivXi1dfKy07r8PT8irSin7zd3bXpey73Vvnq9iM56nJ5ILA568evTn2yD+NOJGBhunIA6Z7dj175NPVMg5IAPIHX+pyPfGMDkdamVVC7cAnBBJ54HTB/njHXnmuhScdUr3tfTZNrXuvu/4Gb+L3VrZLV9dLNNL1trbVP1porMx3chj/F93Hp/9bHXH4XgItoBAbGOSCd3+7zyR/nnimFAASQwIznjnnGDnp9eQe/PZF2lVUHGDye6j8ec9P611Qtfv1XZ+frbVL19SLcjk5XvdPe+js+VPok9Onf0R1UsWUEDHPHJPUDB9TjntxRUxQZyMlc5AJzkD+Jh/L3HBorpg1bdbvqQ2m73WqT0UZfjfUxLeQgsCAB/Cc5YD39D1z7+9bsN0qxYZg4UZJHIBOOnGcgcnOccH3rmt+xhk5DKCcEYUdzjr+fcVPDc/LtGMBiApOWxx8zHjI/2fTvXzNWldfE7trTp/Wt/v2LdZzWvw26evdK+/Ru1+qOodkmXcHYB1OemRjOG46Z5zz24qJ5okVQASygDI547kdOemPcHueM+K5Zk2bhuCjJQYwOPc5P5c9qe86LggnOOB1DAnrxzggY+nvXN7Nxdr89nsl6X1Vl+aMZwUorlTTTt73W9vO99+nyJzOxVe6E9cgMPf8R7gflw+Gdg6n5gF4yDwQeoOe/qDweew5oeYhJO4t8wJXPAPtjt9epBGeM0hlCqxXOd3Q4OAc49OfbK555HWhdEovVq/Wz073772Xl3WUqN0k7q3Xq9nv087a9zoxcqQMO2M/OQex4yB3I44BB568mo5ZDgMpJUEBf7xJ6dOnPTPf8DWVHdqEAyCx68H+eeMd+DjI4q6J1ZCCBuADKSvcZxjkYb0ODj8eOiEUrK1m/L0fy1flt3s3PJZprW6s3vq7b9L+d1rr5kM80kRAX5DuBbjgZ4bODgN05Hvx1xBK5mtZgxLEnj+IEA56nvj2OOelUp5SWPmMWJbtyeCCOnPIPH+NWAEFs4yfnUkqOSenI754OR65712RpxTStZOz0+T2utV+Hoz0MLo0k3y2sk9unzT6XutNWcjIAZBgdGIByDjOeo9fr156d7UJw/yn5g+MDhQT2HXGe/px64qoxUSyRq2QpOHPbueTnJGfpk9ARUysokVS2VbDcHBY5Oenc46/wAq7Gkklu1ypPq7W69G/wDgnu0npF2e6TfRvRN9PN2XTy0foGhOWG2QcgAqRkgbcEAlsjC9u3bmvcdBigkhDOjPldy7W2ZYYw5xnKgfdGemema8A0diqDyzlS+0hT84B64689ycHivb/DU3ywkyNlcBol6ADjjI5I78DrXnYmlzNu/Le17t2srdNbbfhr3PXw00uVW0Wlr3s/d1W1tno/L0PTrafzFgUxbQEIRTwF2EfM3qwz178Y9K9f8ACl4pWIlmZW2sSzZV2XORHxhSMjAwM9x2rxxMARh43IVt5LZC+WcYyewOPQ9MivRvC9ysMKoQhOBsZjnDZyAhPcE8/ljvXmzj7st3a+r76apNddVd/fc9vC1Ze0inJNJrde9srcqve7vvst7WufW/geWK8KOwXZGRsU/NIpAOQCcHPfAAwRwSDXvnhlJlvfMDNsw3mFjjOQNq4z/rODnHQfTFfNngG5ij8l5WWSSRCUZOAQ2MHI+8SMdhj0719K2TQwwW8okP76LO0EEtIMbA+MFSeRg49uua+Zxy/eOPddU3a3a7/wCGufZ4aXNGn71k2nZJW+ytbvy/Pse7+HL8pYRTDDCO4CNMDuyrv86FcZBAAOcr2HQmvojQXBltpYidiRq7EjIJIHAyeBxx1x+NfLXgu5NxawwIUg3uxkjI3JgEZY5xluD0xjt0Ar6L8M30MsCQRSiQwssb4zkHPYZ+7+fvjoPncfGKhOSVmldq3R2te35dNPO/uYNxnNOK5lGVpebSjd32fS2rWtn3Pd9F1BGuY1feiyZDvjcqk8AD0Jz1HTnPBr2zTApW1QMXWIgKSckhzyeBjp1PI7GvA4kaysoJIwGaWRGG3rtPQk84Dfjn3r2rwzqSG1yYiZk2gEDI5HUEDjGBx3z61+fYmpGU24tx8k3Zpb+b1T0v+Gj+gpxlZODjFt6RTXMmkldpvb79HvY+h/C07QFwxLRRIAXyACSD8oyTkDgY+vavStPvHMkDKFaMHDgff2E9lzwR7Z5/DPjml3Si3iK7kLAGTcTtJ49fTn8+nHHoWhzO06OG2rt6twCBgEDHrn0/AgZrhq1L8sYyTd1e27Wl7+Vno7a29TOpTsm5dU3d+dlqu6srWt102v63DGxiQqx+cB1CnBAPZvX8cAZHGCTV+1meB1yCqZIHOfn9+OT0ODjPTsKztPnfYd6gqsY2A4Oc9cc9B6/T1FX4oJrhhtbADEgdFIHPPY555JGOeOld0aUJRhOHNzpLRLRtcr9GnrbTzvocl5RunaUV5pK2nRW21+56K51NrceZJGHZc99igsAcdgT056Yxnpiu605YCI9x3YwVZgN2PlwT9PTrz1GK4HTreaM7thVW2gZ7g5BC8nqf0Peu1s2jiVPM3AkYCpyQex45OSPp17Zr28t0fPVgtHF8krpW02eur0vp3ehhUa5Uk24xSSUWr30Tvpq7aJXt5nXxRq0iPERheGVQfmYEZ+uePp2B611FgZiVJX5EAGSoy2OoUAnB44J7jOecVg6XJEBGxKF87mVgAO/X147cYx1GK6uzY3D7IcDGWfAAGD26YBPbHII/L7vAxpqCqSlGLaTVNdX7tttUrPrfba6PHq1Z+8knypfFK+i0Wm+uq02tp2O80W7jhWLKLuIHLnBwQDnPHI56jkmuvsp0unRVdSznbkDKjGOWIzk9PquCPSvMkhnQFi2TgCMDlVPOBjqRjr2I9q2tP1CTTmBIG8NkuQHA3YIIPT14xjnB46+9TxShKnGpFwg7XlK6Vvdslpfpu9djzJxTbcJXb66222dr201fm7dre32tokMSF9oLYJAYevUjGB0zj6cHIrN1IxEMyBV28BgOCeeOuAD0z6Y4Fc1aeImkCCSZm3YOMDAJ/EYHPT9ar6rrYWHKsJHHJIPr0IUDHHPOc556jNeliMVhoUr05U1FpOWqve0dXfb0XTRp9IwlKcquqvZ7t6Nu2ifa+17W10uWhMwkwrgHpgZJBzyCeMf3uevOK6O08jChkJkK53HJUjjr1Az9exHGTnznSr83E4Dgks5LEdFGRnIzgY/TOOO/rFpbQLBGyclwPmLDcw46jHHGMc/1qMvVOsnWhPmhokpXs9tbau1k9V5Xffrxk3S5Fa1102totbPte++u9i1DYySjMY4AHygccdDjIz9c8eg7ST2hK7WIGMnL4GB0ODjPXtketblhcW6KI3CnYrgkfeyccdOAMc9ecHgdKcypPcFVcbG6kDjPbIHDYwCVHIr1qmGjVhH3ottpKmnvrFX2vZfi7q97HNQxVWMmpaQXvKy16Wa36Xtfozjp9NP2hGiVCARuPXPIwOM8cYzkdOT029zpEDoiBjhVUcH5XA7rz2HoQcYqxFYxKiHau89ST1P8Rwegwe/HUUlwoiB8shMc5GQTnoQCORj6fTkYrDZbHBuVWLSjN80o2u7+7eyesdOvXbsa1sc8Ty0m37tleSV38HTvdvtqvLW/IVUk8YyfmU8j2wOvp+npWXKheT5STnGW6cHqRz2Jyee/40kM6vuEjuwILY3YcEYxg84Xrkg8/SqF1PKjL5WAcqFAORjnk/3iex/+vV1qispW5YKyvZaqyeis/wAfIinGSlyr3V52Ss0tNbvS2yd+t+9W+t1Ri5Vuc4cHIOeuT6DHJ/PisaS0kkAeA/Lg7gTwTxk9gQeQCe+K6qFfOjCu5ZmViy8BhjB79R7cf4VzB5SsFQ4UkheuAepwM8DsPX0rzquHpy95aQkr6qzurWeu+2y072O+li507RV7ppXveDSsm7aJdPndnOGy862Mco+aPJG7gN3HzD198eh4yK428G1ZlXJChyVUdcHlR36cnofavRZZGljki2BQw+YoCOOctjt3JI7846mububAKQVGVbh2Y85P6Y474x+VeRjafPBKha8U1J2tzX6au91ffo7W629rAV2nJ1Xfms4pPmW8bN7W20St5anmS7JYpYVP7sklgeSoHTA64HO4cEivLfF2j29vFPceV+8dSCeqkOMGQEfd24+U/NjJzwa9xvdGUTNIgaKJ/v8AUZJPzEEcjHYdD0HrXN6/piNaSJs+0iRNqPjdsUjGMEcE8H69elfI11WhGftKWsL210SVtbPfto7eR9Ph503UpuM3adubok3a6ve3ovRs/Dz9vr9n3RfjB8NPEGnXGnx399bWcs+nfIPOt57dXKtbSYOSpb5uATwa/h8+MXw81DwD4p1jw5exTQyWd48MrTg+aUVn2yOpxuOBjsOOcZAr/SH+IXgC/uN9utsrQSOTIh+fMTD987KRyu0jJyOo4PNfyqf8Faf2O7fwz4mu/iR4f0xl0u9hWSdooyEYy5E4IAIVYiAUzz8xyR358HVcKkW1JKUo+7ZcsXpaT25dt9V116fRVqFPF4OVNJVK1JJwa96Uo2SlH7Tbs02krb/L+XvxJpqq00qlvm2so2FUkK5+dWBbKgEHGOOOa85uPlcOWBw20seWBHJRSMYYcE8D8a+k/Gnht7NbhNqmGCUMijhhET9x/U8HuM46Dk14BrNlHbXDsqMIph5ke05RS3DFV5wMgY5P65r7/LMQ5wjGUrrVdHeyjbVXd3e+zTvptr+FcV5d7CvKrTpOKUmqlk1rZayve+vXf8LppN75N/CwdlG5GkaPCl9hyTx0PPcc/rX1t4F8QrMbVxNiGTai5+WSQnhtwJ4zwCf6g18Yx/ui0o3Lg9O7Lgfd+YAE454zwMcV654L1tbS6sd7AxAjaS2GZcj5AB90qTx1LDp7Y53g1iKDdk2lfo7Wt+O1tNte5nwtjpYTExhdQhUnHmT0Tk3FdLWT6p9T9D9C1eaOW1RXUGOZSnzEmNO4BH3wM53fL19ev1hoN5HfWESBgwO0MgbaVRsbT0O1T82RnsPqfg/QdXVrG0mjuFd2VTKTjcV4xhecMgzgg8k54xX1N8O/EJkS3tskiWNRI2795IVH7tiMYK8sCBjNfiWcUZq0lf3KjT7r4d9nb8H5M/dcLaatFxcJQi7P0ja261utrd/I9YvLVVBMSOsRjyhb/loeRuPUq64wpwdwzxxWVPuhiVwBKwTZKg+Zl9QWO3lRyeBt56553biSOVlaKQsJ12iIAqOAN4xk5IOPTn8azjFjepdSF3NsB4yccbu27HC+ozznNeTRqxS1bbbSSk72tZbvTXZJ3890ZYnC6JxjFN2Vl7rSvFXu9+/XVNO3XlnicMjEKAkhKupHmOHI25I5AB3A59R7VR1DT3uYpgkZG7MgCoVAZM785yQCCDg/e654NdJNDGBMrIVZDty4wgY42t3yPQ9AMjnNV513LgToHztcrkgDGCGzn5jnBPUZ59/Vo4jS0dLeVk7WtZ3W1lo1a+x5ksJNqS5WrXvdq8tY2d7XSe+1j5r12CGK4lVVdCqkIGGFdRkNls4zkjHHP4Zrz2SPHmqmMtIC747qTnJ4weOe2OfUV6942tJYryZX3eUC7Iw7R9QxPTa2cqeQCD715c7YjMagOB8xduAznkKCc/N7cDnrjivocLWlKmnFe9JLduyaSfNdPS2lkt3toc1XCuSik9VpJK/M3p1a1j2v53u947lC8ClztD7GVFOTKoB+Ur2yeevQfSsiORllZRu8rYwWIgjyx1K5zyT1ORxjqetXjJOP3krKuzbsUtzjBxxjn6A5z+FViDJItyC+W3M4K44OCMYzge3PoCMba7IzmrRm07rfXd2Tvdp66av5X0PNr4KcXeWkWm0o+9FN8tuZN2/H8B0CzN5sSkuFYSgdFPryc4ODzxzx3rmNWht5Y7qN9igqzqG5w2emOMnnp78GusMhjUMuMNuYswxkN6DjJxnAyMYI6A1xerSB5fmdQsrYY44C5IPGeXGR16Z96qNRK1raSjqr62ttb5au3Y5qeHkk5NXjrdxs2pK2iWu2y28+545fReXdu/3ssASy55B9fXHYZ7juDXaeGLi5iO6Rj5eQu1TlT1AUDjg5PHTjNUdTsFaXKAtG7iRWQbSwPB5GTt4GM8A+2TXQ6JZKihHwAAGUNgEMem48e+eOPT17VV54X57dGuy0vp52t130R0JNN8kXaSXuapx+FX1sr7+mmrsdb5mTHuO8M7EAYJVWAypIPGPcd+OCcUWtxBfGUK4XfgOeFP0OCSuSPb0703YwnRhgKGRXbHX1C9zj17AdCK3dRhiitodrhmkZShXryeQ2e49AM575GQ4zTk2notUtuq8u2+9/z5dnFrmvGybk1p8PRdd9/LfREV7LssmlCsCE5YHIGB97JA/Psfc4rznWNQu2tBFCMRMxDknBYHHA4HvkHn616TdnzbQRKAxMYVgFwMHO7v1HXP0wDXCLaqzT20gBTcXU9QCMnryRnP6c1rCpFXatvZvS+tuietr32e2r3PZwdRJR5rNxcXZu8ot2s0k9Lb7NrTfr4ZrnmrdvIPunG8cfKeeD19ucY9+CKz7aZmGHXKnIx2HsAOo5HORjBznt1viyygjuJnibIBGcc5OCfbp1PfJ4FcjbKpJG45blQOnvg9D044+oOK9SmlKnFtXWiTtZtJKzb1TfW6etr27/AFOFrJzTScVaLTvtZRevW666NrpsNZ1SQFcAFhzgnBzzx156Hjnue9dnpc6AQuWIHPLHCg8cDsRjIPXGQD1rjpI1V2Uls53ZxuGMAtg89QM4/EHrW7Y4MKhRuZDlSM5APTnsO568HJ9K6EotRlfaSs9tbq6em1l3t5Jn0mCxLjUa53KMrXs72vbT18vXujptXtIWt5JlT/WICofoSejDkkeg/lXARrJG4JKqy5BII4OcAdBnjHHA/mPQopXuLIo43hGMeV+8AMgL06Enjj156muOu4wlxLkFAWBAI5AxypHY55HHAOcHmsa8oyfMo2vpJ76Jq9769XtfTS9j6OjiG+Xnk4yUk4vZNaK193fTZ9dNDS0PL38EZUhi+TjoemBnHABHzE5xwcdq9cvI2S2hlRMKV/eDOc5GCoOB17cY68EmuA8I2byX1tIFyN+D3G0kdgM98HsDkV67rNnDGrJEHePZGzKuON33sdgRjkHPJA9a+azCpfE01HaCve+lvdTWr6+ul9z7/JKsXhXzOb5rq17u7a+Gyu11vfp8jA0qB5QMsUiDthSfuoST8p7H14+tWtQvIYIZ4VzGArjIPzMDjq2OS2Oc9cdeldHoNhA7woWBWVDlWwMMMfMT1J6ZOM9Oa5vxXZrBqIjAJQoeQRsII49vpnkHPTmvNlTjOScvhjLVJtdvJ3unu0mkrrbX6SjO0Wm9LXildtJcur21T3e976W0OEtrl455SQdjltueiKehI68Y68nsPUMuGLyqw5LFflHQjJ7HP1/KrTWhjy2Dzk4HGRzxkYyfTjqO2OYZYWTZJhxgjqM4PGAeh559ietd9P2WiSSa369rdd29E1bY9KjKUoQg56xacYO7cmmrPrvre1jXkmfEcbDIjUBcjkDqeegz0wc/rmug0oshkuRJs2xnAHcYGcjseMY/yOOupZBGrrySoyehHXBPbjPHpn069BotwsmmXXmZMxjcxp1LYxjvgDGScDHr61y14tRk7JqzTTV73ts7Ldp73fSx9VgMxc6sYyjadLlUb8zSUVFOTV7ryfX5GkJXnuVc/Kf7wAJyCevOBnjPX+gsfv0aPHzBWOcDGD1y3zcdcD6DOMjGZpc8Qx5pIc9W7A59c4yfQcj1FdFBDHcOWt3MiNgOCeUK9TjPK5P4enFedUmoWTg7KKV+VNaJettX3tddFo/pcNjFWSajKU+blSbcYrWPR3sm+u2m2l3mXFyWkTcc7SrMw7c9Mcnj6c+vJx0SOCtpMiBdh5GBhnPUg+pOOoyc8cmudv7JobsLltkhVt3GCrepAHpx6c45NXGDpEgVmMZdQeccjgZI6EE+mO/pmX7Nx91JRlFXvpf7la+mitZbdNPQwWLcqtRKMoOlZNRbs5R5eZbL018la29HxPLNLMHZeQqqNwHA7Y9c9Scjt14xi6dHMjrNggBhhiOvJ4xjBB4H+J6afiKOWJY98jF2UYzjGOmDj7pGOuD6/Vmn73hEWDljuYjOD0yMnoAB6d/oaqCSg3eKvLXzSUVprq3stb9XZnL7SFXMqlZu0oOLSTWl1G100tf01Rs6q++C3lHLSRhJSCCNy+2cDBJPbqOnWuZmvJFCoWBUEDGMEdiQM9eec9Qo4HSuv06wj1Bhp4YCZyTEpOPnHYtk4LN0HfGMGuZ1rSL7Sr94L2BomVl2hlG0KckZJxnI6HkgZFOlKNm07a25fPTWy1u9H/wLWeMxddOEoYiMZaJ002nKN42bSTurK2t1590E0kLpNGzb9uS+epOOvOeMY/IcVfjupJXWeWU5UDavBJAPU++emc5yOlZ82xIIwiKXZSzOvzDGOn16j2yevNV4riVBlDhSCGUjJB6Hr0PHIHU8dOa0nDmjdap21WlleL97R/Jdzgni3CcfaXfNZNR1WlnZSvZvV7PdJanTahfR30duFYiWJAjHGMhcADHtzyR7+orEADs0Cj53IyByAOuM9ffH8waktF3oz+YBJzuXPJyRyD1zz05z0FW7Swf7Qsh3ndyrdW+Y85HU9cg9unGDWEnGPNJtJxiopPdySV+mugKsqjja9OKdlzO7eq/T5dvKe0sWgSOTIB37QSeOvIHqDngnp05BrrLSGa4niTyyHDYTHKso7r7jIyenPSm3OnkW0KRgyOi+Y7DgfNyU/wB5cDGMnvjsNfw5DJ5qTEkLGSNrE5B5OTx1OcDPXng458vEYiMoSnzRc1fltpaTcUlfdK6S1tbz2PXwOK5XyN6vZK1oqy95u/XV2Xa9jQv7R7ZrSF4hglZlzgjnpvGGPPrznupwKwfFFmxjhNsu7zNjnHB5OcH0xzxz14rs53kmuTNKHXCKkTnITavYnnAIOPU556UfZo9SP2eQnzA+5SrbNqpjHIDYUZ54yeOmMnwvrNWnOE568r5pbuOvK7xd9Um9bbeR9HTxEowg5PncXHmas0oNx6W3XXy7tHNeH9PnmgeaSPmNCgDAZGMnjng478ZwcDiuavdKeaa+dodqxq7wsRyWGT1Iz+P+Jx7vDph0/Spmd1UyEeXnnlB2JxkkMOvWvNtWEsSXciRtiVHQHaSCDnJGO7Y54/LkV0YfG1atZpKKTnF8y/xRT+Vl1VnrvpcqYr27m0o+z+FSd7NrlT66679Va3Q8NhujHfrvZjHHuXHA+dTg5znIB45x0xzxVTUZpJrlnIO04CcZyATjA569up7Z9b11bxx3J7HcxJA/vHt09MnrVKOMyStu3NGchWAyM8YAxwD79uOucD66k6fKpdfZq+i01V35vW776pnkuVWUlSq1JOLmnTtZprRJS62T00V1psrHU+FJDbsztjaRz13ZPpjqPx4PbvUHiC6aa6mYLx0U8jA5AA6Yzk59efrXV+GdAlkiDBflkJDFjgBRgDZnOScnv+NO13w0weaSNx+7UEngluu4Fc7ifxPYYNEcTRVS2/M1yu2l42ula9tb6a/O919G4VlgKdKyuvdsn10fVdVq3t1t1OEszJHCCxJDgkZHCkDgA9Onbn/DGumLOVIOCxzt5KjOcHnr2z78etdBdxtbwsANqrw3c8HB4/Ec9hzXMQlnuSqbjGG+bcPTklu2enoPxrsppS5p7X1T26ryaT8ra/ceLi6qtSwt5JzesbttK0XfzW99NF8jptCgTcuMF9wKE9j2znI4/XPevSzaNthmQFCSDg8HcOcgdMNxyOuAK8utZHWRREdoDLyOxB5x0yO3+NeswPL9ht5pjjYh8vPRiB1wepHOBnHfGcCuDGRmnGSafNeKTtdPTzskut/N20d/t+HJ0KmGeHpqpGpSkpylblgrNX5pa9Fb3n6dzg/ERaSRy6kbCEX0IwSxIySOcBSTjrWDpyhbiEEZDOfQ8bgATxkEEnPXjHGK0dXuvPu5Czlt5IOOBgHHQfxEcY/Dr1k0eB5buLbhVDfISMk45HB55/pyMVLbpYdxkkpNa62T0i9NH11T66+hwYlxxOaKFPmbjNRbaVnyyjs3ffvo++x7NpehG5tbQ7UKED5BgN0HBXk49ycjt0JpmpWF7pt4iqGjiHMTYHfoCB14HBPTHtT9O1h9PaJlUO+VVlzjCr1K4HX1wAOnSvTLG2ttag+1XVusjRruQPhQMdBtPY8jv2yOtfIVqtejOMpawlKy/wC3uWyXdX62aXVH1zcFFzhf3IqU3FpS2imndq6Vr7veyPDtUju7i4gclyxZVZtuD079Nx5PcCvWdF8OxSwaerQh3kKoyvwPmA3Sc9lwMHgc8VoXWgafHM1xOvloi/ukB4BPRicfNtwewOOCMihrh4IY1s5W8xMbHOSu0k/LuGMDjoP5k47ak+akryipKFrL3XzXV9tNNvO/ojx3iKk5ONKcqUW43m07Je6ubd2tf5tbrcxvEunnR5pI7XCugym05AYg4KgHGR/k54ryfVbqNtOaOQP9reXc7hhypJ3BsD5sgdzxx2zXuENg+phxcsd+eS3zORzngkHGeSRnHbnJrz7XtDtLO4uN6Ku0F1LNhceozweccDqeM1yYSuotRqqTjzKzt7yelravW7ffz8niq7nCFBTdWcF70pK7nayTejevZ2vu9GeUS28UssMdvETuClkHUNnBJ4wFJxwfU+tfQPhHwle2ekRTLaiLcn2iNiMjyWHLHrgnsSa8q0bRzf6zBHaltskoaQMSV8sDOASo2qfTAB/KvsGwgmh8D3aiEH7LAwebeSyRKv3B8uQD2OeT2rfMMQ5eyo05NxcHJ3u7tKLWl91ZevZvQ8ubjh4wlGnBVKklGT25fh1i1Zad738mz478d6kINQkgkUyKQ5iADbcjAbcMHLg4xnpnOSKtfDCx+36pAUA8xQz4cjAbIwpY9s9jnj2yK5PX5J9Q1KckBlhnlWHIBfaWG8MemQAOmOuPp7P8INDhYyXs0kUUVvG8xfHzSKB8yhg2Bjjb/e5HIGa0hJRoUY6KT5NFdJO0dLataW9fXQ+fxWLbxbrSm406MPZqV7rmdoptbWe1te+2/N/F3VF0/wAvTY2CyGNlZA2EXhepxjLckDGBzzXzJd3YjZirO0svBRxzvY4BHAz+B969z+MVxBqGryzxL8tpCSr5IMpjPzbhjoAwwMk5z0ryHwP4bvPHPjbRNAsIXne8v0kmiGV2W8ZUsSRnAx1GDyQT3x7OWUoqE6km7KUpSbV25e7KK130Ss/VadfzPiTMa0sXTo3XK0orWzcdLysurXmnayaP1N/Ya8BvoPhq28TXNmDqGr3EjiRsgRWzEFCxKkL5h46jp0r9U/C3iCHzJBfmVnljdSigNFE+MRsqjADLztI9SMcV8zfCrSJPDnhGz0SFYbVLeCGJ40QNI4iUZJYbcNHlvL45LHOOteraPqFm+oxQozLFLndK2R5bLjJY5+bcc7l4AAwSawlOTrTnOTTcnytWs9FZJ2vo+vXbsfD5rP6zOUVFuFNJNq7eijLrq7dLdLvU+iNE1KVpDLcSJLDHIPKiaThY1zhScHLNnocYIxn09K80Xlok8IWIAp5iuuWEpO0Nt4yODg5HfHYV4lYXlvYKiyCOfzmQh9+Ubbg/KMcOQeF/DJrqn1iSVXe3k8sbfmh2/NGBjIZc/M7Z+UkAjBPtXZDEKlHllL377bO111ula3z9D43F0XKrCSSjG3uqW7XurZNa9eva9zv5iun3SXF2imVnVoXAxG3+31PPAGwH05HfkPiJ8PvBfxH0i/s/EaWMT3enSJHK8Kt5zup8uNl4MUmeGbdlTg4Oahl1hpooFnm8w2yIAxGcFjwOOC3HTjHrzWZreoTbGUKpmmMcqA/MqwqCcgcAMRzzyMYwe29DH++1fR2V2/dT0s9HZq6e6vseZVoNuNrqSumm7KztbXso9Nd+tz8dPil+ytqvge5vrvSYrxtPmlu5bdFUyW8kQIK7pR9x1BHljacnIJ4r5eNjfWUphu4RBLBIY5YnBE6qud7FSMMoGDu4+nFfu34kvhqMBsL5i1sI2QK+3LoQcbyQNuT8pPPQZ7Z+SfHXwn8I688yvYyW140UrCW1X92rjHlo2AoIfJLN1BA+XnNd7hDEQU04xqJ3V37sm7Xukrp9309NCHSj7NQU4wm3Zt6Jr3XeNlvfe+/nY+ANLsIJQJUAijdAx3/f3DuBnhc8EdOnPFeh2JS1hGZC7MuSFXBVPRecZ5HYYHFbWrfCrWPDwdLOFr60hlLCVRllVDkKSP7ozg9+cdM1nRwPBGjXaFJZA8SKQSSY8ZxgY7jBxjBIwDXJOnOnpJWu7JrWLWl3fpfRp+WiRwVvaU1rGzaa538OyV763vd2W23V69hp15BJDDEDIFmAQSZy0YHIZxngeuDjkZ6c9BbXLRrL5jAzeU0aSHBBwOi8jG5RznpgDqa4eDyrRIpC6l26xAnCq3tgDnORg8c9R1palqccEbFZ3j3k7ZGf/VA9wO56YHP5c1tRpczSUWnZJtPd6Nayaa76PWyPn8ZUjShKTdlu229Gt3qte9l5O5buxBqmt6dZLKVjaQ3d0pOCyW5JdQQTkcjC5y3PPFfnN+0j4qtNd+JGqW0Mytaaa6wQKpIMboCoTqduMZI5654wK+8Y9UtdO8NeJ/F14RGNNs57awnkJ2TPgrhG4I5wT68ZHANfkf4x1Uar4m1LUyxk+03Urs5HDbmOSVGc+5GRnp6V+icNUHJSc1ZU4OCv1d4+tr+enfY/l3xazK1BYaMkqmIqKqtdeSMktLWdm2rtpWtorM29P1FIgFV1+XGGJ+YE5AHQccDgHj8c1uJftPIFcbQRj5RjPuOeQPbGM8D081juFYbecDlAODnv9F7Dr8uMV02jsZnALEjGMAfwnPOf0PI969LGUYRlKfupx1013as4vun6bn89VIRlyczTtfltsrpaa7q+t9eu+po6vc/6PtkYmMErjGOnYE9znnj61wEr75DxhWPX0HPHIP8Antmuz1xCsWEXfs3Fs/3ecE4+hx/SuMXbuJYHoWCnsOnI4IPI4Pvjua7MvjBU1Jfaf3aqz01s311f5E6Rbik9k27X7aNvVLTTp95C0eAc5PpjHQkY4HUj19uoJqDaCc/OxzwCMAj+R7nH1qxLJgnaQMrwB0xgkj19P596gV2GGJGOcDPIJ4JH48nPavUS7320SWt7r0vvfR3t1RlOXr8rvs09PTTy16jlQjnkcN8pyQBj9D9T64zSHcGXhgOfUc9uRnrx1B4/SyroynJGSASeTkcep7n6A0jcgFG+Uqe2SPoB29AMH+Vax1XX5p6vdW3+Xy175u9rJXV0m+vTW9k1q1oraNXGs2F4z0wffg8YPPB9zz0PWoCGAGMht3U5OM9B7HBHf6debWwcfMGyOB16456Dnnkn1601kAzzkk5AHJzkYzxkYPHbHoBzW8Hpv6rXayt9++nyHbyuvm/LX116/mJG5BAbIIHPGST9Pp/jzRUZV1wcHeRySeAOeD746Hjr2FFdCSfRP+vO9u/zv1M3BXdvztby2f8AX3vlFuE8zknOMlM5QnttOeR6jjnHPTL0ulfzQQFIG5SeMFecDGct7Ace5zWIzuGIA3bVDYBwRnJLHPUcjOPp3zUZkYoRuPzf3eSvTng9Bk/jwa4p0bxk0lq1vu2rXdkm9dPLvuzJRcVKy1fK7tptLS7S6PXr2e9zfW6kQjDAg8sd2OAORjPI5HPIOcVcjvFxtJywXgkcAnrk9+vYYH4VzMc6sViUliRkNnC8AZz9OMg+2eoFWhIGUqM7c8kcng4JUZOD0z054rF0YvR+7LTSK6ed3f8ADf5hZbptO2t+mze7stF2vsb8ZIAdWy2PmHXOeuD65IAzkEcHjmpi4VcldzHBPUE+pPP5cc1z8V3ubCSBFAGRzuJHJJxkADb3qyb1iCVZS33TjGMkADJ7A569jjik6ajfTVW6W7WSVv1dreatVmmrSUtUnbRvbd376+WndGg12FO0MAA3GO3f356cH2OelWor4ODubjB7ZGevGT7YHH8sHlGckLuweSc5yepwR69Opx0wRzw+OYx5JbIXHfkg8cdyV45HX05qnRjFJq901Lms3bRdb6avZ+ppy3XvO+ijayT6Wd9nay6vXudVbp5jl2YHadwPbnjJ54J4BOODj2zelik2A5wcAr05H/6j78Zz0rmrTUYlfaz4BXjOCBjnLAHOQSM+hx25rRutSUxjbIrjHBHzY9v9kH+h5okpPRJ3drWW2q+HR22aszSk+R2S95ySXZq6fXS7+XysYN2QLhkQhBjBbseeSSO+e+emeM0REbd3Bw2A+OijHIJPbPHqfbkUJneWbcRgO2OvRRkgggdc8j1OcmrcG3cYhluQuB0JP9O5/wDrYrsjTSjFN+8kmlprpbp3vp19T16MtEnK1re7a93ZJ7X6rbqr7nbaE6D5S2TksGBwCxGfY4Hp6nmvXfDl4kbwyuxDLtJHYZ43Y/iGCM/UH1rxjSZFjYZ29CqlhjqR0Oeoxg8AD0ya9K0WR1aNymRvCYJ45wBnqCvckY7dcnHLVSlFtqyfne+tv6Xkuh62Gk29LNprTRX2V/v9PW7svoAXizQRhX3ebEm2QDgsMnKnj14HqByTXdeGBEYJFJLPCGIwmTjjDNyOM46DIHWvILa6YxxRyouyD92qjqFGDmNgPlzkEnBOOK9V8HXCiVwrtnyipV3yX3jATtkcYJ69sDJrya1NqDtdu7s0tHbTf+vXQ93DNSlFW5ZK131WiTV1dPR6X9bH0J4C1L7FLG6yKS6iPynwSJnPIjB42nAwO2CM19M6ZdXEke9ZFlRFUlVOSMDhlGeCP73Pp7V8e6DLse3hEAysp8py20xjIO6Q8nYuTg9wTjvX0b4W1GBZFmEjKxBSdFOI5H4CCNcnjruwew9K+fzClqppNvVPrf4evS6v/wAOfV4GXLBQnq1aWu6vZ6X1stdbd9GfUPgicrsmkZjE8TeUsgyytjBAGcjk4BJJPfvXuXge3JuV2yNAJpfM3FiQwzkD8snGffnmvnfwXds0kR5ljkk2BAoJUn+HAI9M9eOa+uPBlpHGkjSR7tgKAspzHIwG1lx1I6ev6Z+VzKXsqVRW+OLe+3w62ejtfWy9EfUYJpunytKMZJza2afKmtV36t7O+p7xpbCe3hjLebtiRC4ALq/PK/3T3789+a9f8MWhght4/vGc5JHJAbkbjngkA8Hjrjjp5R4Zsy1ntiBWSMl2lPAKIfmA9yCAOwOeuK9e8NwzmWJyzhACUJ+VGxjn/AdBjHrX5xWpRU5crbs3LTTtonfTd9L6vR2PoYTp2bTvryx12TVr6Pa2rdr/AIW9UtpGhhit1ySyHZjuD0+pPJHPIO3PFd1oN68CYfLMDhS3DIG4y3rgZJ69sdCa83ilke9gjPEabcZ4BK9eeDzn1xz2yc91a3ShgwjAyQWQAArjjAwf5evWsHTmpNpqN1Z6K7202vfTa9vPXSW5WUVapGScXdpJPR8z6991e2m57ZpVzH5cI3kl14JwSC2MgAHrnOB3x7c9tpsXmBHQlcY3ZHPB549Tx34A+teGaJqR+0RLLuVNwCn+6B36cYJ7dBySa9w0e7hlERjLEuNuAME4Axx3759RXpYLkneTcmo7dGn7rfT5dm1drqefXh7OSjBp6ata2vyq909Vq9Glrrrqegw28SWyMBwFJDc4LYwFU9sYPqat2xQoFDDcGyCMnAHftkA8HvnApunWTXsSAOxyQGTJG3bjJI7de3071vx6BLHIMR/LwVbOCQ2PlAxjnHB5xivoYwrVHB06M3TUUnbVyuopttabaXXf5HAqtKneMptuTd07WVrLTXTv2Xd6FqzjKIpWRlLDDbjgAnrgf3T0A59MV3GkKYZFYluF+ZVH7t2ONuWOc85zx6cgHjn4NN8op5rsFAGUIPLDkKpxg479sHnpXZaakbNGo3KxwoAAIKk4+Yn+IdznIBAI717+ApyjOMprlasopvfazslrd+r6dzjxFSLju7O9278trppbPstr77WsdbZQtNhnjxGcggYC4GM4HT5QPfg5IPGLd5YwGJyI8D5SuM5GM46fiffHGBU1nF5ADswKEYOeAvrnOMAcAjOMHPvVozwyuIY1GZAIyTyBu+8R0yeOOme3TFfV04e0glUsm4rXS/S1k9b67aa79TxVKXO1Fe6m22k7aWT69mrJ7eW5xks08Evy7wFICgHJOeDxgYXn1PqAa2bS0vNQQAhixUAA/eIPdj6+oxxxg9K7G28OwsEuJvmzyQ4wMDsQRkY9OgxnPat+CK0tFxFHH8wOOhIY9fTjgHpk84HTOVPJK05c1Wu40201G9+a9rJK/bqtn2s76LHOMXTpxvJP3tU1p66pddL/AHHPaHoQtAZZSgI+8obbn1PPfjpz/Su7s5UwBH8yEBcAZZCccDkYHHuR2rL+zXF0dsQK5IJUDAI549uMZ54z611On6OUhZnBVwuQoOCffODyTnuOmK97L8slScYUlJU9FJuzfLpdRelvXsuhzVcalG9WV5WSUb27PXe+j8utloMVJEIkBZdwGWJyCD26YIOOv6c1qW8XmKHXYvAL5O3DD+IfXPbvg9RTJbWV0OAVUAAgDOT+XGec55GCfWqxtb6MAh8AA/KpBY8cE45wccjpxzXswoqlK7i5R2uviWqs766q+qbWv4Y06yqtJ1FFXVuZ/ZXLZJ9N19/a4t/q4smClkABxlj26ehyBxg56A8k9Ug1OK9VVVsllJ255yM46k5z3Fcxq2m3F5LvdjhcYTkE7c4wc8AHJ7dz2NQactxaSjjAQ4JJ4B7jODnGOuM9jjtxuvXVbl9m3Tfq7xdknbZO2y3XU96lhcPKimpp1uVtu6vze60rrd+j1/P0GK0KoJgMEhsqvAOc5ycDg8Hrjr7ioJkRnA2rngDrjPTnsDg5J9+T2OjaNNPbBdq8rjnPuMqO4yDz36+1WRp5IUhDuIwzFh8zE8KOOvuM4x6V6EMveJpqpSb2UeV2TUlyvbXXfla2dtDyZYj2M5Kc02pWTUtLXittUkrPTbp0ZgtbOrJ5QGGwcjkKT+WCO/rnn3mAlR0RjveQEAke/I9gOMgAj09T01tYjYwYZ2lsgYznsCMZPfGOPY1VbTpllDGIhc8cZbBzxx0OB1HGfevExuGr4eqoTVRJ6xbTa54uOlktb7Waene52YfFUpxa5op9m7Sadkld2v8AdoteljHFgzN90DK7SCMDPHyrx0yc9eCelRy6ASjP5WF6hCMknnDEFhgHHTjHvzXSrBJHKjED5SCAwJyPQjtx39M4742Siuq4BLtgcevPpwO2APcY7V6OCwOHxEZcz96XvSTTjdtx+G/ZrW979PLGvj6tJxdO3K0ovlbd7NNJu+6Suuib8jyi60yDyzFIpVF6ju3t0GM4644B46Vw91YwCRlziIMSqOQCCTgZ4LE85/E4HWvdNSsE4yqjIzk8/rjvjGO3BB6CvN9UsFaViiKXwSc9Pl75wQByOp/OvnOIcNHD8sacKbSajfVSlFpXu7q9npslqfQ5Rjp1EryesU3zWaT02TVrrva76dTw3xB4cWcyzrG/J8oAj7wJwHGT93AOT2yOMivgX9sf9nnSvif8I/FeizwW95qK6dO9ojoDghHMW3IJDLkgnpkjjiv041i0eS3PyFJFXa+z5lfJOHIGMsO/QYxjnOPNtR0qwuoJ7W+jd1uYplkTOXcOMEbSDtA4JPzevY18RXS9onCMoqUfivaCTaavfqtbfds9ftsux06UoVOa6ptRlFaNpKN766LTfRtXvc/zNP2hvhJq3w48VeI/Dep6fJbG2v7lEhlBkjhLOwt2DlVI43bX2/KOOckj87vEmkyxvcQT/K8c0rJ5Y24wc4Lc4Y9zt5AIIyeP7Fv+Cu/7KM9h4g1fx54e0pbiG4+a+lhHMOMlJPLCYlQZO9g6FOOCDX8qnxH8KXFpd3Al/d3Ak2yxOMhpGJDbOzqoA3Dj0z6evk2MvKMbKLT5XZptNNO7WjV11e+nmi+KcppYnDLE0oc9OrBTlyPV+0UZODaSTSb1vq/W58myQEBxKGBH3UUZAIOdxJPJ55GB7dcDR0q9NtNEBnKsAgAB2hTjrxyB3HQn3ArR1OxWC7eFYmXao2lhgMjZw59jg88njpwBXMzQmGZlQMAAHwSQCCSTnvyPx78V9raNanyt3jNWd9Onnrpu3662Pw+rSng67cbRlTmuVLdxUk1Hu9b3er0ufYPgDxLFJbpDI+A8ZESyDacHGwDnljgk4xt49TX1R8OdakS5t1jnG9ZCuSQQSMeXHj0QbtoHXcelfm34N1x7SRCVkJR1VWJHyKcbo8EnG7AAYHIw2ODivrfwL4kKXSeW7hXaNikbDaJTypCc5IIJxnA/LH5ZxJlDpOs4q6d5JvZtWbTe/Xddeqvp+xcN5osZSoKUvesoyj1VlHTf5P0tc/R6yvFljinYqjmLCRMuJCcDKgZyHGM598elOupvMcsgjUsobLLhQvcsc58zJ4Xv06GvMdD1qSaOzlklDO8YJDNzGmB1BB3Ocbj0x69a9Cjuo54wvmkbkEqk4BU45IPU57g8Z9DxX5dVi6NSzSdm9N7dX36/rbVq/wChxoqUI2iqkuWNr9G7763S07ssrH5wkIkzIYVZlfHljbk7uegOTxgkde9ZgCQTSrIgZXDYKg/OXxlsc46DB5zg45q29yGLJFlSyhtxyFCKPmyCPmDfwjIIAFRKSZmAbzcYWPIwBuzls9lGO2cfhW2HxCvaUmk7W0emzfe68+vVtI5q2DSak7e0bScL6W921teid7r09PKPiHZEpHPCSwkjVJCeMgg8e4A6H5cdB1zXhpg3/uhkJuLBs/IyjPU9vQZ7cV9NeJtPE1tN5qk/K7IgGA7diTzgDuMH3614FcRSrPNGybRllXoDGFPHPTaO3K5AzwK+iwmItTaUtLq2umqitPe++3RW0tY4amEck7KMWrN39V1vd+ui003Oa+yqYZTKVIRwCpGAT/CAvbHr+nSsuOGS3mlfcXRsqR2I9Ac8jOBjGeldUsJeQIoMgdF3EgYLnPIUnJBAPORznrxUE9siyYddsZDRxjhfm9W4OfmOF6Z4I9u1YjpzaSSurq+rV763sra7Hl1sPFxbjKbTXvXV9uXTVbuza1utTnZCpO0bmQg7wOWHHIA6hQc89epxXHalaRDzty7oyc7gSCcnGASCQB264Oeuc13Mlr5W/arDOcE85yQWAPGAeB1OelYUsK+bIJ1cKwbDDhcDux7A5Bxj8BW0at72fuq11fR6d9/P0u79/PqYdctl+7TaWm3Tuu270s/uOBltzE/ztI0JIVFJy3PHB/hHB7HH878SRxj7QglG4ABSDlH+6G4OSvPPNas9oqYdIXYBWyrP97P8Q4+UZ5U8/wAXB7V5Ldo8NCok8z7qb/lTqegH4Y+XHUGumFZPRtpSevR2aXZbu2+/Tsjz503H1SS0XvJaK/Ty30elupMJGVcMwfzCHDq3AZTyAccfeAwD047VqReVLtjfO5yoROW49R0AIxknkZ45rPjiR4VGBvD/ADLjJU/xY6cdRgcDGAavxtHbskkmOpHA4BGBsLY4b0AHHrjraqX0jd2tZ8yu7Wum/lqmtredslTSXKrtaczlu9U9nbt2d/xL6hIMZVmYNtUAEgqf7/pnv06Yrlb+GN5pNsRRwXLBcgEEZ5GOV78+5967YSQTttOQ5jG0Y+7n7rMc9j19P1rHurfE2XXcZMoDxgkZycDAwcj6nt0raFRvbRJXvdPtfu9F5dNb3N6MZJ82m2r0vpya9W0l69Pl4P4gsWaaUiPchY45+UDpgn0PBx/D1Oc158Ue1uMFAASVXGcj/dHbtkDv34wPojWdJgkaYDKAHgN/G4HIU55xkY9cn8PLb/RC7yP5bl4y23byQBnLA5xxnnIPI68c+vgsfaHJUfNG0VeXRvlSa7Lvou19me7h3VaurK270s1pa+3TS7V7nAzO4lOGL8c5PCn+H6Y7joOPQ1raTPJFLGsh3ebkeqjcRgDjBUc5OapXlr5TFfn5GfMfguR1BxjBB7c9vpUFqXM0MitjDlOCSFZQMALjtyc5HXGK9hThKDsl7yV7rr5aaLXvuvM9PD4qVKpHmtdtXSsov4e0tvnbfroey6fHFaxusgj8uYb0GMruYfIx5+Vs5x1A6Z4yOF8QWbi6acAgO3IBJGc4HbJPPp2FdNY3b3ENuN4LqFVmPc4OMfn6d66STT01C0MU6AvtyrY+ZSuCuep7c8c4GDjivEqYiVOfJPaUn7u26jrr0fr0Xc+vpVI1qKUJOLSi01s7KNk233/P76HgW98sRR+WC6E/eAzuHTJ7Y7HP4HPPbatqMlsZ3cARyjnjKJxgMvPGBnPGOcjvXJaPp7aZc7GO0B1Icn+HnOemADnHXP51sa7i4WUxneoVMnOAxGeRnj07546159RKpUbUnH34pyk1a1lot9L7eq1PrcBmtSOHptrklBWaWivaKTavu/Xvs0JpmpubqBo7ggEgBMnkHOeOOO3IywOa6PWoXmVGeNmLqDvJz8oyV7EgHryefQ15dpgcX6tu2xqxXIBIVsj5W9PQHHtjnn1qWUtZRLu3GRNu89RjoPbkkDufauerTUHrs05f9vXS00V31tfZPsfT5VmssTFTmouUZKKTWi1ir2bs9lvvfXVHn1032aRFYqNrKy5+YHBwRjjIOR6d89DWtcNbTWm8RKSyjoACHHGenTuOuaz9SsmlnUck5A554JPp24OSSOuORWpFabERWUiM4ymecgYwDgnk9ug/ng+W0G7tvfl0Wy39PO3V9rfT4fEqVR3v7jTTutVFxul2W+3XTQ5ia2cowCkoR2HI/PqcnsO3ajT2e1SUEEo0ZQZXld/655OMjscjg10ojiG/fEwVSWQk9QPU/X+Y6ck4V64k3vDhQvBC8ZYDOMYAP4cDGOoqozbfJJXXMk3ol9lqy1W3XTZrY9vB4j977VxUYpNXU/e15Xqlo+z162IoZHCsuSxJIUHsCcg8c89cZ/Guj0WeW3uApBAZScnlTuxnPYg/p7VwCXs3mliQNp2gjpuH+z0GQe5z9T07HTLt5ZoCcBDgOABkk8DPoM559faor01Fa2cdG1GzdraJL0dm3rrp3fuYDGxnUXLNqV72XxOzjpa+jatur93a1+tkuIXm23DHavQjOUzz1zxnBzjJ+tSNZhiWimVkJDEOcDAPUA5BIHcHHP1xTvbQwyRk5LSBWAHJKnkY4HuB/Ks+5uZYtzKzAKVALZHseOOmeexrxm2m1By5Fok33sttvNuz323PchmFNOLlGUHdxUlaHutx1elpfl69bXiKGIRRK24ybVXdn1yAR2OPyPOMVa8P2Cm2bOCfmOW4Zt3Rtx4GOPqT3yKw768FxHEkhEm7aFYHkDj06YJ6+vTmuw0mycWdskTGWNhnzM/MM8gNjsBgYyM44NW6klBqel7PTRJJKzutdWtbd9diKuJpVJVakKkVFKCck7SbtHdpatPXqvLYgs/9B1aOaP52t2E25PmUBTlgemR689T+NXfiRrY8ReReQ28cLrFCkzKoXzDGMDgDC+5OSfxyJRp5SaY+YUBwGB/iDDBH44GcD0z2oOn2txHLBKjbcOcAH5XAG0hsdyT2yBxz1rDmftIz1STi7PRN9bvrfu+nW90cU8XD2fNBupOnaMZTXNZPls91e19+h5bbysU8onPXI7gjpk9g2OnHp6CrNv8AZ5Ebc2HByVwAfqOucnrn361n6lE+n3MyBlI8xsNuwAGOORg46dOg6YHNTaYVlmiBywZwCy8gDPU9CV9fTrwa9Vq9L2kL2sru+iTSdrWt6fetzhWNjz+xqTbqRs7rZOSi1rppdta7PZq+mzawNhmIKjoMDIIz34xzwOcdwSRnPa6XGqRReYvzMw47qc+oGQBzg4+oNPtdOgECNt3JIAFZeVLDsewAzxn8/XorXTh5cf7o4AHI+YgjJKnI6ZHGeT79a8LG4uNS9NuV1Jaxa3vFPXtda+buUsbaycovlavd6pe78OqTd7tp9b9CCMPmQMjGFDzjqGP3R23A8ZwR0xXQ2BjAgjSL/j5JPygAqVxycdAe30xisGVnhkBT5GWUELjIHGMN6t79sj6l9m1yskTmUbhkxxgg7VbBxgcFz2xjFeVDDOt8LUrOKsrpaW7N3fV3Vtj0MJj0paWakkuZu11ptfrrZPptr17e/EUNmzuqIzDCswDFTx0Bxz6DI4B5rK0p40uoZS20SAq7kMO64U+mcA5IOOuehrA8R6xNnTbaJdr78zhDkSPxliDjBGDuyTnPsK1LZ3ZbeVlAJIbYAMtz85xkYOMHPPUkVhXwsotRlJLn0STt26W1tfXp6XPo8JjYNyhzyjyRSd5K1mlu07N381b8+08UagrxRR2xQJFbICVHys7DmQjIy4HfHX3FeZ6qZf7OKlztxl2HzNjOCAR78Z69CK664mgnWdbgPGuxQqkgOdoIBBGQRyQR6HFcde3yfZZ7QYDMjKpBPDH7pPU5XufXJPpU4KEoaKN2p6q7u9Vr30fXfbS1jpjXhGKjF8yi1K0dU78rbevW7vu7JdDx6+tw17wSFZgcH5jtP8JOc5zk4xjBwDzWzpmjyXM6Ih224UucjBKjqxHUe44wOoJGTXU20VzknfJCGMysd25mOQy8YUAA4/Gun8NTo1w4++jhyfmG0YA2qRjhSfQ9sDkV9BKrVjSjyrVRinzJO97apOzejbTf4anZllShUrqNVqU5uys7vRxaUdNLJ2drXujutKkTTbFkwBgDaepYrnGD6AkkdcZ7nmua1O+mM05EYaNsjg/dJx14PPfgfiOtSX93IqCGMhYyeGHVSSeAB05A789ccYODeRTKVZXZhtXeSf4iDz7e5JxjArSheS96yle9nqtWm3a2j7X+elj6rE1YOmoQfwxjeLSck/dv1bW1nZbX1MLVUMdvJO4DEgkrt5K4OMnPOfoOvfNcBFcNFLKinAkJYD72OpGT278enc8iuw1W8eVGjOeBhkBwSBnI5GBkE8+x5riIY5HZtvykPnGDwOerZycfTB598+9hkox12tG91ZXdmpWXTptfe7TR8Vjq1aWKoqlZzk5axVntFNJ9/J9dddTe0iZnvrcMC+HBwOUGCMZHQ478HJx07+yeIZtuk22zbEjxgttwPmwCwx3PTg9M45xXmPhfTWfUbeRl6NvYkEqB1wenBPAP6da7HxJIrGQFj5UeEMa5ALBcfuxnIbgc9/euTFuM8TRcNoxbeu7vFJavvpe2+vR3+syOtPCZXjJVXL2ldOPNfll7qjdpqz03dn+LOasIkmLmUKXILAvwSOwzg89OPT1xit7SLWRrtiFIU/cOQdrDucDgHgZPQfhXMWF2AjqRtIYEY7DuCPc810ujaiILna7DZIw4645zwvOO3TFceK9olNq0ttGnKy93RdtO/bsdWBxWHtSakotSbbb9+TskubR6N33v1Ohm+0wzRqVIbd8rDocc4zzjngDHqR0APo2g3t2Z7QOziF8eagyAp4BJ7EAdu/XvVHTtJ/tSeGZMyRPHuVSMBe5LYzg55GenTJ6j0C00aWzjL+WihTkO5wxznoeyk9PX9a+YxWKhNJWjzRTcr2smrN2Tu09v0se5LEqi5e837aPwLVWtFbbvX8fwt6myPDaQwhXacqoOeck4IYdAVPb9RXb2fh3T7DSXn1CON98fykNz5rj5NpxggkHsMHpyQa8oZL3+0921vKjJ2RnkDf1/HjIPGSOmBg9HqniGdtJktUWR5I4ioRR9wdCynOQe4I64PA61xU8Te/Nyyk4tRstE9N7vWytt/wAP4eJxDbp04zlCLlzScW1KUrx93y3StZbablVL61t7yVzhdm4BM8+irtySfrxXn2ukazqQhgLYkYqEIxtJ4IYHOGbjC57dccHpvDem3l5HG/zTSTkLF5o3S5yd5kGRgKCOefc9q9E0fwLBb3E+rSwKYrdd8jSSABzyWdMg4KnG0e45I6EZycn0kle97RTXK7tPrq7Pq9XcdTMqNJJKP73kjBSls9VZet9v+GE+FPw2itZZb66KSSSpIvlScBS23YwJ+7jopAIB9c11/wAQvFVl4K0DVvD0JiSS9tZIpFjUEv5qkIwBOSw54U8c9c1geHPF8tje6g8bKLS0DKNzcjYG2hhjBJwSu7v1r5n+JXii+8T+Ibm5lYyrGSkKAHEcZJG5lH3mPHzcdeBg1dFObjNzm6nOnCO1oqyu7a2tdabq3Xbw8XiqlN1a+Iqc9J6UqUJaqpZWk1e6ila+mnyPNp2aN5HVtzTOxYuc7WYk4U9RnIzx06mvU/A2tyaHpd48gLfaI2jaMEkMHACiPp8vB4HsTXDaRost/coJBI4RwSGQlCo5wWyABk8Dp9K7uPTooLeUOip5DloImXJlK8uSwYbQMgBSDnnGMnPsRg4whvbmV2ktGrX1etn32XXXf5OtmSftfavlpys3H+V3Wi+7e/TVNnj/AI4lmuXkumlfZJI2B3Qt1BU9zwCeTgdK+mv2K/hpfXerXnju5t0CRMYrJ3+VY4jkNKuQcq+1QpyM47AYr5z16wfWNdstJtRJcXGpXUUUUaAoESQ4JbhsYHGeo785r9d/gP4Nbwh8PrDR4VWCdLTy5IlAZnZwGAlk4P7o5KEqchmzivVhP2eF5EuZ1LuTTTcU7NJvq00rW7+h+dZjXjLFVcUpSm/dhT5tEo6K/L0tt562Vj2rQbuGS1u7eVZDNcI8eB8yq+RteJOPu8ndngHp0I6bR5IYPJYKrzpH5UrFCpkbPL43HDY4J7EAj24a2t7yG+RWDo6MpG35fLt3BJldehUYAwSCcnngk9tbW0oDygNKxkzvxjMb9UAH3QOnf35Brz3O7S7aa7b3bum9ntdd2eTGUpcznJLm11tu9Eu90rWul6nqemNHNPb37ufJVVQwyD5BKM5ZQTkEEjLehHFddcXLxlJBJHH54Z2KYdZIgBtRemJAM88nJ5HrwNnG9paCZCpXYCsOBtDtncoUk7iOx4I6+tdFasLiBFRik+zDAsf9HBxu2ZwMH1/Q81jKcnKyburu78mn3a37/wCZ4mISd5O7lFKNnrpdPa3n1emu9joYLy0uoYo4leF5Q0kjHgK6kESkHqpzwufXk5xVK7lhHmpc3hlfDYljchsLgqAATwfT+dYUt06PInzOShQSMfmjUDk4GODxyDk/UAVh/atlxLGZCUEZRtw5JPUqc9OgPbj8yjN3vdt6XvK6vpZaLrrZKyWh5s6Sld6x+GSs9mktFdaprdWXX58p4l1ISSxMnAhuCpZj8ysD8oI6nGDhuhweleWa5rd4rym3YSzcNIzJneozvY84LY4zxkmvQPEnkYku1b91CGUwtyruOjpnkkc84zzznBNeP65PJDBG9tkSTqSMksI16lD3DY7Ecdc9RX0GFrq9m0m7Jray/wA7rVu2px1YxlFOUEnZ6t6K1pXW1vO99fJq+ZJrKrHciRBJHMhLR7d6STOejAkbSpB2jt2zXlGtQ6bLNI7W4hcLKysSCscrY2KFUDZ3y2TwOwFdhdX0MlrOgxhUV5W6usqk4IJ6A+oJ6e/HnerO/wBlleV1fcMlh1Zee/Y4Ge4PTg13uo5Jxcm02ktXpdpq1915q1n5HjYmpDkkpXVlzK0rJv3dbPe912tpucNcStbs7STKyOxBGRgDOCQ2R14PQd8c1wPivVbWIBftCurx5jQ/eLkhQQM88njjsareLfEFvEj7AyDeIwBJh+M5ZeO2eSR6dO/iX/CQRajrqXEvnSaXp7IJC77S4iyWKkjBGQMgkdPpn0sDhZylezSWzbW65bPSyT1319NWflvEefUKNOdGFRSm24SvJ3j8Lu7vVW79OyudB+0h40k8LfDDRfB1jIltf6vBHc3MeQJJElyZGZf4S/GD3x681+cxeRYxuG5wMtkAnJJ++cdBjrjABz1Jr1v45fEGTxr4zuLyW4M1paJHaWGFwkUMKsqxD+E7SADgck++B4w140gYcAY2kD+IDp+B9/17fqGVYZ4XAx54+9P35WSavZJpt66L0T1Sdz+SeNcz/tTNasnNOnho+wgm7/Ba7Semsrtbuz6lqIvnI4BYd/mBPUnnnHHb0GBXc6JKqqqAsSgJDdCcgHp6cHvjoMHIrg4yMxndnj5gBjPPHrkH69fau00tkeMFTjCcFfUZB3fTjOM9MjPesVBVY3tfTW3Vu3ntvZfkz4V2k7q10r2ktGly7aJX26aLoy/q10GhdRwf4skEEgkAAY/Dk+tcZICzMw2gEHgkBuOx68H2P0Irp7+FyjbRzw2TyDnHQcfUHHr24rnpFCglgM+pIBz+p45+v6F4aMaUFFK1ne3XS3+adn6XMJybd37unS1t7rTTpra33szHI5LDHIXA6gfn1Prz6dc0wlAMAHPUdM/U/XPU9fXjIkcrluCXBByT1/DPXuc/41DnPOfmBC56e5BPc+nA46DjNelBK19WtH59GuvnfX52trz3vfz3s9L9vl5iKeQMnBbB/H+Hv049vftV2N12ADBIHXAzjrkc/dOPx+vSiSAcEHJyRtB4x0PoG9+tTRkAKTnOdp55x/ex1z0xzj61rG7a3/yTtdq39X8iZO1mtdUn2s9L/Le5YAYEHjJPfsSSOR1/T8KnXjlsbgOoHPtzjvnnPcj0qs3BAyBn7pJ7H+XI9Pz4qRJB3JwevXOR7n3yef0wM7xj0V3e2+tul722/wAhaLR68zUla27a/D56ardarJuLdxkDORknPqOmPcjkZ60UOxZhg/KQCc8ZHPcdM/16c0Vp7N9Xbbz6fL5eRTSev6L9V/wDyZpsqc7gcgFRySOykg5A+pPvTd+WX5QzqwBXPBQdAD0PHXsfxpCF+YHhyAQAfl3An7xxxz09h1xmgDBVjlXGGUHkN/dB56e/YdutbumrPz3dkvn8tfPr64p2SaTstVr6abLpZf5bD1YhcoNnzEE5y2e2ORgE9Tz61Z86TBwV8zaVKJg/MRzkdjgY/riq5Gd8reWrEAgKcAOOgwO57jocciq4LIyMcbmbJJOWOc8dsgD6etc7pJ3stdGnu7r0W2myV/Mesr9OrStr5O7vb8uhol2XC4y3G7A+YA88nPU/06UiuA5LFoxhcEjuehPPfGPqM9MCq6TckFiGcBWGMhifT068cjnj3qfauzBADYzkMGJI43N6H2xx7mkot7bbPy010/L9DSNklfRu2nm7X1vrvZvX/KyGVogHYA5yPr1GB7/nVUysCARtByNpbnjsBjp9c+naiRnIwpIbAKDIBK8849OeevvweWnd1KnIxuBxnPfP4ev0PNaKldNvXo1922m33aaDvdJpXWl9Wn8knfz0fpuhwkKAOmACTnnOPbjGM8frilW4lZjmTCsclcjGD0GOikdOMjPrUSqrNngKSWIPQ4xgcnk9Rn5SO2e7NqLIduWC89RtA9xwABxx1/nRTpxl7vK12bi7t2Wibez6u+iskVBpSVrK3dOyWjS1+53d3foy9EzM5XJJ5YZ54yOQff6+mc4FXIyqzZztOVwcZyT6cfQkd8DJrKjkbJwwIJIzjkAdgMdh+uBjitS3Yu6qeRgbuATnnGCOgx29uoxgbyoqCV1p7qb2Tel216q2p6FKSaSWrul99uvnbo1dPfRW6C0lbzowAqqAeR/EcDJA5/z68mvTNBfPlkSEgENIq52jBOF284I4IAyOeDXlUBRZo9wZVJIGeh445wcZJ9MkjqRXpnh6UqyHIKvj/fwvVm9zkY649s5rzq0VyPl25trW2trtfW9l307nrYV2lK792KSXm9F3+VtdrevsFlIHUblZo2IP3cbsYyRjPtkHp3PSvUdBlhXyki3LMuPLbJDuvQDnOW4IHfoSBXiOnXk6XEasQE25ReAGYdCucgKe5IIODx2r1zQrth5LyAecgLRvwdoGMMDj+DudvJPPQk+ZOLXLdva61dtvXo10fTW3T38JL4eiTeutneS33TWu/l2tf6H8PW8ksMe4SNLMFfkfMAMZVh2QZx1yM5717PoUZ8y1so4GWbK+QrHAYcYYnGDnHfqTXjPgq+MpjmedSzxmIR4yNxwPlOR0/jyPp2r2nQZZV1SxSQ7V3EmZSSVDYKgHqMc8dueynHi4tJO1trvXRXstmvW1u/m7n1OGlZRevRJ2d9EnbTe9+l99ND6k8CWLCdJUR1k3ozKpwpcY3ZX5gP5YwcA5z9r+H7WZbSGbdzdxBogD93AGcnGd56j054OAa+U/BEJVtLeK43G4TM0hUBQpC5TAON56Ag+oxnIP2L4XjeS3tIwGLW0YZgeSqZHy9Oo/rjHXPwmbylLli2tW+ZWs+W6aV/Xv0s00fWYGVoq0bJwXNKzdm2vdttfutXf5s9k8IvLFZIzcW+CjMT87Ochg+R0GOvI49xXqWjySW6rI/wA1nvOwgcR7iOAO4z0OePpjPlOlTo9r5MAMbM/yq3yZ55IBHscE/wAjz7V4f0ya706O3UEOpUyDGS6dd2A2cDDdeT1AAr5CpSvOV4pN2cY2Wtuul1d3XzV/M9ONW0LylKLkuRrRK+i5tlbTa2v69hDaIZ4JV3BZVB3dgDjAHt9M8nuK7WDT9iibJyxXbggEFc/ex6ggMOOoOSKqWFoHiVAQjW0ajkD5lUdB78+/Hcmuo0sLOqqCMdGDDkbeMY6DOevbgnisauFTS5lytu+mmqcWkr63t0317780cTK7Sc+WLcZb+8tL776Pa93+cFo8i3MCKFG372AMt93HcYxjk9/wr3TwxBNIEkY5XgbV++M+3AwOe4x6dK8rt9Il+1q8KEANt3E5QgdhyPYZ69wOcD2vwvB5KtG4IOxTzner91GOx4HT+lc+FpypVn7ROMb3i91KSaet3azWqN6laMqbVNpPS+jUns3dabL5bs9a8PXS2zoJEBJBAV+ORjuBx1GD9BXqdh5V4ELIBKoz6rhuoB4yR2yBj88eN2EpRg+dvlgBlJxwCSAuc/NleRx256V3GleIhbgrJtABCgFcspIOOnTpnJ9xntX2+WZhSjGMKr5aeiUtH2TT2lvu776WPBxOHqTfNCN5aXsrWSat5dr936HpUmmwPEvKDBJYDC4GRznkE9e+D+QqaxNvaSK2wuU6MOSM4OSR2B6n+IYwDXNNrjSR5ikDAqepz8xHc84PqMf1rU02/PlLJOgVt2WJAwyjuD6DjHGDn3r2Z4nDurB0U4y5Vyzknay5do6LXvrLXbqc0aVVRaqJy1V4K61dr79Xr072Z1E09xcFEg/1RxxjnGegXPJPYZGcda1LHyLNhLclS3BC5wCw6DPUMM9j2rmYdSicnyejcIQMnJI+UkdMHHYc102mae92VuLtlA3fLHuxwMHOTnB46dfxOR62BlGvPm5pVal4uzVoxfu6qytbpZW7NdDDER9lFJx5I221TtJJuN7K76vv6OxvPqzXCKYG2x4KsAMfoTyRn2PXtxWhpkM9xKHkY+UrYDEkAgcAg929enccVDbR2SzGNlLRjg7TwxGNuW9STzxz+eeutki8lSibEOdyKBuUnA3DHr35P3R7E/TUKLqS5p1FaOnKpa6WflZX02s7PS55M8Ryw5YxUU9E2lonZ3cr7q2vT5G3bxxWsMYAEkjEcg5O316jB+uOnOK3rVklVVxhgoDAjDAYHBP1HGfqO9YtnsUL5siqoGVdvvEDkDr3/XBHWp/7Tht5D5KLKzNkE+vHIxkD19D+dfR4dTUeihZJJ2Ta0Ss95LrfTVdNEeVUqJvR63d25JLrr1fdbnTGFFUcjcMFueij069e5H5AYqmxibJRcgZyQeB+Hp6Y59+lUo7ueVQZTglck4K5HdST6dMe/PXFXLKRChbA6kspXn5T65J4yR05469R1pJK6T6R6u70ta1r2Xe6Wl2Yxq8kkppyT19167rV6rRLZee3fKvLbzMukZIAO4gHcevTHXk4yf8A9fJSOkVy0bhgu4na3TAHUqeOhBzkZIr1ZoVkVWWMqWGVBUhuTyPYenXHGM5Fc3qHh5bmXzfLG45OOdzMD0/HngjkYrmqL3k/ZppWeqbbs4p30V33S9L6Ht4XHqL5ZSaVlG6alZaK72tte9tNmSaPqFtJHFEFK5wME5J7cE9jz+Z6Cu9tLGOeJWwQCcgHgDGO3Ufz/OvMrTSrq2u4GACrEQBk5G4YJUj1wOnP06V65pjSOieYy8KNy7epxxjB5b24465Oa7ac40op07RTam09JWSStd31dl56vzOXMo8vLWpzU4ys3qm4t2batpqmrK/T7ohp4UtLHhWGRjqDgcMeufr26nFRvCduMbWH8TcgnPP8PGevTP8AXbkaKMBSANwyBjI+mM9+g6jvziqMgYqcKxIOVyMZ6kA/jnnJ6k9ayxMpYm8ZK6ekea2idubbXVrRu+nVHLQru6TbabWrWi223v1vond3Msw5RtyAkcgDlj1x8w6EeuO/GarxiaMspAVQPkIGXUn+E9MdADxnOM8GtJZG5Vl27c4YADOOgY9wOc56nJ5zTmjD/PtPAyDj5eO/XGCDjHT05FeTUwFWM1OnXlHlVuVK0XqtLrtrZvS1tGz1FVjJNSS5XouiS076N66Lp9xh3UEksEm9iygHB24z6Yxz7d8+grk5tJuBl0A+bghh8xByMYPTd1H/AOo16CYyXUsCAnXHQg5zkYHOeg/POarXdtGBu3dcFWPy44PboMD3rz8VlyxNp1HeUFvK7vZxesrX6Wb67J2Z24THrDy5Iq6k07rXfluur7O66dDyK80uORGgMflEEkjgg5xtLN1x+Hr0ArzbWtJtrK9idzthdlHmkZ2luGGf7ufu56e+a901aGBSWDAM7dssCBnrx9MAnJFeXeI7OWWKVY4zNkgg9cKDg89QBjJ5zz78fE5zSpKE4xhByppbWldKzVkrbJ9tXZbn1+VYipUqRbm4wqWUldre19ZXtZvp59nb4K/bC+B+g/E/4a+JrWPT1mWXSLgRRtECs1z5beU+zlgyNn+I43fhX8C37WnwC1nwT4l8QaPcaa1u1jqc91CHUtcCESOVXzyFDYH3l2cggA1/pHeINQs76zbSGiUGSNbeX5Rna2Qdow2XPc8duO9fzI/8FS/2UIb3V9Q8WaXYyxxybkuXQ7WE0gbbK4CHfFIRgHACYPXdmvkaeJWHxNOrTaUZcvNFSdlZR67Lzu31Vup+nZLOOKw1XLca7Ts5YeTaU2lypJa2d4u+2qS2P4m/GGkSwSIY4WMkRaFvNGHiDEBSq4YqBtO1iTwegzx5VqULxFlZC7xja5bkqzc7uOo46/kMV9y/F/wTfaTq+q6dd2Ys7m3maNyz7wWUnygsm1d/AJZgB1HAOK+Qdc0qaFp0Kur4GCxJU/3CTt7jPOMDoeor9IyjHxrxjGdotqNru6s7Na36LvvfZXZ+ScXZLUwtarUjC3LKSbSs204tO2/VNXstX2VuM027ktblWG7cX+7gkEg5BPI5XnB/2sZPFe+eCfEP2a5TdMYwSCBu2BHfHlEA5yWKkbBynPJz8vzzKjxyOhPzxuQzg56Y/iAGR0xwM810+iamUnt3dmLQkANgncwI5PIGfQgN6cZyOvNctjjaEnFJ+602ktVbdLXyt1136Hz3D+bSwWIjCUmlKaa3VmrNve12k1bu+x+nXgvxQ9xaw+bJ5k7RbCcghTHtwIz/ABFQ37w8YBXrXu+m3wubaMSEeaHZmVAW2xrjy2jbI3KctuGBjPPPX8/vAHiaGWARtN5e9VA8zkox+8ApPA4BPPOeoxz9aeEPEKPsWdw6og8sgFg+8YXe+chOM+/vivwfPsslhatRcrjaW1rPVRafpptbvrY/ofJsdHG0qcoVrScabvtZe6tdlb8brse6w3UkcoWTY2wqQ3ChWHHzNzjIPK4wOc1dkfzJPNjJGFxIpT5XL9CCW4UYOGAHPPWuWsZBcGIsykJkyFRu44OAc9V449TyRxjoC23zmLFw+CgHykRAfeC85zz7np6V8jz+znrFuStq9rv7rat+d/U+jdKE7+43KySqNpqWkL6pX1a6W6pa3SraiXmjaB1WMRg7nyGMm7qeCPlIHzE/dA4zkmvFdb05IrmVkdWGA2Qch+u5lQgYBwAPQgn2r2u/iVmgaFEYTKXZ3PLkgZQDIwRgngnr65rg9YskkneSOLfI5XDDO0EA/KgPRVwQ2Tg+nANethcQly3bi3Z9OVPTdaWvsmtbW6pnLUwtOV3KKc1eySbX2XaVlp176+R4zJDNHNLIqlXUKIFJPzBiRkcfwnjkg49BVuaIuER8M+yMuSA3zckqp9vxyePSti6t5AjfIwKZLELwAp4K8jAyeByW65FRNZuyx7mBB2uQeSpycZ6fMMcrn1/D1ViU9Xaysm00k3ZJJeet7a+mzPJq4WcX78U76RjDs0klbV9f16HMGBbhzG25YyWUHqBjo2cjlsHI7eoGSas+ns0U48stEg3BifvdsgYyFPYY65rqZ7RY5JUjcM7AozRktGGOOB0UE9Se2AcHNQlSqhHVkeRdhVDkFWGCQegxyD1P4kCm8TLpomu6Vttbfhv2XW55tTCqV/aU+VppWbbtfla10u2rJ62WvRs83vbWZIVki24yQyEjKoeobjG0AcAdu+c1nx2o3BAiuXAkBQHcpU8hRwApz90nnsTiu5vdPUx4HyIgwwJCkrzjaTkHOcEDrg9qxhayIrEblLhWUSDG0dNgbOc4xjjueG4x0U8UmopWVTtLS+i0e9772VtTya2HcajTi7RdlKN2mlZWs7tWvv8APyMb7LFCrSEMZZCGijHTA75xw3GQMH1BqBtrAtICqqrNs+9+8/2xgZJ4yR0x+e/JCGXIw80ZeMhFzs4788EcZyOfxrGvIWjH7tCwCBmB/hYnkkZHHGM4P6iuyFZStfR6fL1fzVvI4Z0rNprqk393zS/rcqxzvBibG5WUqpDfNt78D8DjocDNWHUsizI+ZD1UqdxH+x83GAM4Ht0qJFSZDHtYFccqOIyD91Tzhec98c5OetweZDtCxiXaokUEAsEIwGz3b1HuOQBWqqqzXO439dbNb2TdrL5eRVOm7pW7q3e7W73S+f4MzNUtjNFExj3Oigu/IOT1JGfv9RnP4evFvbh5ngKgI6EjPDjbnLHPU4HPGD1znNd/P5siuhbAVupIDq6/dBbuGyQeBwD1GMcdqTPGzzQqrkjY27GU3dXUYyGGAGIxjNbYepOb0lGztZ2ej0XVWv1S9U9kd9NRi7pzvF2bbvFpcu11Z630+9ao8+8QaJC5JhQuIhgFVx1zu9eRxjjg9cHpxcWnIFkOT3YZXByeoY5HQ4I79AcYNevOouIdjlWcnj5sEMegHHzKBkZzgg9OMVxmo28kD7pIxGNzKWO3nBHz49D2Psa9jDYiorQcpe80k21e75U1ZJ+ey2dr6I7ac0nzWv0jpdLTrp573sulx/hi2hlQwsRuG4A9OV52gnn6c89a7JXa3k6EKnViPvY4OenIGORnGO/WuX0ZZLeVWVQ0T85XHIJ5Oeu7oPX24rtLqDzIPMQAlwpG3tjqvqefboajEyUptSndtpNptW27tK++yV2tT2cNWnGnGzad3e0m97aWd/kvKzexO8UckXnpksygtt7dckDr6e3fNQx2ZkQkt94cqfm+XsWz39uvr71rCZ0b7PKSNmcL6nAAyccq3OM9wavzTvBMi7NgcBgw5A/yAPrxk5rlbeqT2Ttza3V1829td/wPZw2LlGcWppQ63Tdm+Vbdl87PTYqW2nwrdsGyMcbQuQe27knJ7n0/IVvXbxxxxqpO1TgufulhjgDj5uTkHHr1ANZ24rItwhJLZBAHIDHB+nbufo3bQkt2eOLIO2QgsDyR7nr3yeM/Q1zufO0ptP7L1dr6bLv3e17XStp9XgMfKnGUYKKTV25e6nL3WrW2fp9wlrFFcXMAOTk5LDkr659h3yfQDuK15rOBGUhP3W7Bc92OASMDGTkZz0xyeeKFvbSpPGsaYLjg5AwD1yRnHP8AMdOldU1s32cBVBKcsrEAk4BOPfGPlHNZTcY2im3La8XokmtHpu9ttrO59Nh8wmoxkqmrUYy12lJxvf022v8Amc/q2nQx229GVw8fyqpHHqCRwDwM9685NuuyZAWWVmZlXORnqc+x47Hj1PFevPH9ojWJgFzwkeDxt749f/1g81yGr6RJbs7OhQMvyY+6QR1XH9efSsVVXu6NWavbVt2WtlfTbe2uup7+FxyUqdD2klom3zXUn7r21T1323v3S8yW0XbKQRvVye2MD7w6cnn3/nW9oK7ZF+YAiUHLDPA6IB3zz6fQ85puojdl5UkkHjKjJ5LerjjA44J610mjwwBo5WkVXU5aPIw4z948cdDgZOeBkit5u8J/Fa17trVe70sr7XaTXn1t7mBxFOnVjUpzlJxTi7PRN23S6J6abfcdpdEz21tM2DKAsYwAdqjOBjsQDjPOeeetcxq00TMERQu/asg7g5wSw754xz+GOa6mPZNBgKxQZVRn6bSeOPQ+3tXD6qNl8pTJZsg7s4bB6D+8eue/tnNeRFe/7zfLd9OaztFptvy2+7zPa+vSdBuo41LSXLJOy1srN9LPySv5XKogYSqqqvyMNrY3H3/IEEDsRj6+16HFbjSo+CZo494ZuCxxyCOuF7c+4PArxxZXVAdmHVg2Sec/Xuo/u8dua9c8M3Pn6fA8qEu+8LwMYBGTjPCDgZ69xyM0V+bRxlF3XLs5a6N30smrXWtnt66UsRH2U+VqUnJO26Xw6NeautbaEyIk5YkNuJIUgYCjPBPX5gen54rSj0maO1Em0yGRwCxH31PqfXHr0J5GBWlFCglJCqu4BnAOSp6EqvccjHcE8HvVW+1ZohNa5VI4kL7t2DvB4Uc9WIOMYGBXLP2llebcrqKs9EtNlt37+qM3XqTukoRipJztZNRVru2n3t9PmeLePtNhi1G1MA2LcKHmXB4ZeMY/HgZ7dgTnG0rTnN1EUACxZw3IXaedpB5OT168j6iun1KUahc+fcIxIJjQEEhIwTgqv94dSc5PX0NbOk26iFjHBu773XAYdOp6gZxxjOP4ccdqxE6dHklLRRV3paTsrtduzeqfbWy8StjFLESdOV0lGm5t2TatFPe33vfyNzTkVYo4WLlRHhQPus/8KL7rjBOQSD9MdtbW5SLYynJ2qpA+ZAOvHfdwSe2AcmuSWQwbSQiGMqwPB2H+IDOPm6cdOelb9pqZcIxcOSdrBhhVDdSWzjI/IHHBBrwa8JtxcVJpu7fVK8W7aWX4Wbvez0xqYp6pt8ycWotX1TjfbdadbaO70avSvokgebzUYuyExjBIAYHnGMljj5T1GD26VopLW3tUkkV/NVsMS2TIzEkbc/dCDknngitS8iuJZZGQeYFjUjI+6pHPJ9OCPQHoc8cHrFxcWkEyhcgKfLx2Y7sgEjqeBk49wO/bg6d1fS7bg7vrJra6tp+mnn14TMLS96bbgk3GLWitG1k9G2rfcc5q2qfa9bijE+2SORiFUk8AjIIHBJJ+bnk4IHQ12dvqMsrQxAZcFN2Wx0B4KnB2tj73c4x2ryXRF+36gbhm+cy9H5IOSMknkKOOAOcccCvWNH03zdQjwWdckTyE4UquNqR9QeSenp19OnGU6UHaWns4X1W8mo3SdlfXbv37e5lmYzd6tSTSqSdoXle11ZPs+yva3Q05byWCHbMhkDKQhBJ2erFuhAJySR/jXNarIsdq87BuASDnG5fYduuMk5NbHipzZTJZ282FaLLHHA4GVPpnOFB985INeZazq0yQbE+cEbHyeVU5BBHfOTx1HoOtc2Dwsq8ozp8kbyvK+/LeNrdFo7+d9dWfSf2go03LmUHyJ2aXNaye7112233uYkE4kvZNjHBBBBJycn5lY9gAcjjnJ+td7oCNboNqKDIx+bpgE9Cc8bRnB7896800ti15G7Lw7nqMqMcDdnuc+nHPbmvT/NaK3jWIdUBIGAATnOT0yfpnrjFetiqfKlBJK6in5tWvtbd6pb7NXe+uU49K2IqQlDlatNLV/C9tLq2raf8AwdO5WPz2cMdmxSwyGDMAc4JxxnnGMe+OlOacSQNHHhpQCpAUHJ7DOc8c5GefbIrNa+fL5wojHCkDk844Pc54HUnkCktHaabDKRGW5KHBz7gdGPHfk96wo0mo+9J2Vmtk38Oj0u9L39T6ihm9KvJpN800le1r2SV9tL63+eiRgX9nIYvOYgPzuVsg85xj/a5I4ycH3weXgxHMQ/KhsFP4sdeo7n8fU169eafbvZTBxjYh2ksQz9SMn+InJzjtjkDp5xa2yNPKp/1gZigIGGGRgk4PqCvTPau+jVtGSldrRpaXUdNVZXVtbf1bKtRbrUZ05p+822m21pHVuyVnsl1u/M7bwnGGJmJKON+1G5KgYweo2g9M5zwcDHXJ1q5klvLiJifM3EKCQUAycMSCfmOMg849K6vw/Bbxafc+bLtnKMCAMEgY2gHJ78c5/HqeSv1TdMzDdMGOXY8n+76Z6n8DXPGSlWlezVtLau/ur5rS+3onqew5zhho03UUlHmc3bW0rJ3211+dvmZttCqhsnMykBuwXOfT17e45rTtbf8AfRgHkknI6AkjjJ7dMdRx0HWqFnEJpY4kjZp55VXepAwATyRjkDnJ5xnivW9N8KTeRFPJAQhKqjsME4xnB/iOe55PHrxz4utCkpRbto97Leybte1r2+5ejVGNKVOm4SUEpKW+stY3VravXa/z7eieBWtreBJbl3VI4gsisR++P8IQDP3cckNg56cA11V1rdnKlzGtwiAZYIGy3y8AA45OPujA4z368FNGumacVLiKU5WJVOTk446dTxxWDYyBtVSK6dkjddzbsj5SQQzZJ5HP8hxjHyVbBe1vUjKSjOSdk7rRLp29E1d26Hqzx1JzhzpymopRVnyuC5dUr3u++nR2Z7f4dsU1C0Fx8oZhJJycSMExtUsRw3OR16noBxxmpQ3cWpyQmVUi+cKABgqCBtbnB6/Lx6nB4r2TQjpNpoZeadEjWPMbbcb+MjIzlgT7jJ5GK8q1+e0uPE1ktvIrm5RWk2nEZII2ocZwTuJK45IwGHWuWeHdNQbtdPmVo3fKu9lfRvR/M8dZhF1cS5xap6+zbWqknFtRaur/AHWt3Zp6ba3kLW/2V0VnlVWkLFI4jJ1IOPunbknt2zXUeIvEVzp2jyafE67yjLO4bKuGHIVh0z0wfbHUAdzY6Gkml26qqyeZCjyME2uoUE7c/wB1eitxnNeN/EC5itYrq1OY2aKTDR9JNmNiSnj5gc7eR+pz0U6bXvaNSs21Z2bslo1o0+q79Tgp5tTrylHlUvYx5pc9lonG0nK+t76JapvXqeOaj4kuLK0ura3nmQXh/eorbnTJOCTjkMTjHGOmfXE023lvp1lYSrNMqrJITu2sw4C8DKgdRxg8gDu+x0e41aVmDHy8CR2dQV+XOSWJGFORt2+/avUtC8PRW1s8jmNfLTzlZh/AnVVY8BxkYwM/lXo040IU1bmcrpN6Xd7XTbVrJ9En6s+XzjPI05WnNNXilBXa1ajd/K+r0a36MemknSdKDrIizeSyxN5SsGPAk3/NlW+6FPfJAxjnzfxJqrWNs8qKiysFjKq/+qZ8759pU4AwM4PGeM847W/1IwI81y/+hwyMERWyWC/wsONuSMgAEgD8a8RuzqPijxFa6XYhrlrycQRhRyqFhuZsdkXGSfUdM8+lhKM5OPM04NK++mqbum2mt0ul9GfAY3OVCFWUakXKaiqcLcylJuyUUlourbetmlpt73+zT4HuPF3iceJbyFvI06Z47SaT5oZmGMSIWGMKMng98j0r9U7CK00u0hjt2kADhHlHAkZgBjbzg5BA5/OvA/hH4V0zwb4UsNLxHFKkUbyFAAGlIBkYt/vEZ6d+K9qieS4RZF+ZUZWO87YyoBA4OcsRkgdCRn3rrq05c3NaMUrJR1aUdLNcutn62b8mjzKs6k4RVSTnKVpTak0rSSaS2dlaz7fcdrYRC6uAHDOGURyyZy7Bc/MWOAVGRgcYrsLGOWJWjtY5DuUL+8AIVD1ZRwQeeDjj0HGeK0ETrKfMb/RQBsYHaxZuCijB5zg4z2HQ12lu7RzJL5roqLIYmJADFsffHVl44BI9+TmuCpHe6b2dovTpZtPt+PV6nJUlbSNnb4Vvfby1t3XR+p1sen/Y7J2mlLErvRGbcFLZOSOMH+4P4e2QRUUEEsirCkhYvh8k4KKTkAtnOFz6Y5OKdE8d3bqZS0hJXeQQVJXqMcc5x3yMc81oAxQOnnJIZJxutkQcrEuMncOnBBCntkE9K5rJNRldJq/M27N6bqz00a8vxPNqVLp8zfM95aaNW3XRW3013fYzpk+z3EgmlEoPzYAwxOD8uATxwecnHXB6DldUvLbzCXYwsqk7UySEHXCjlmbPUHtyK6HW2Xz4yhKoqZJYH5nbGMkdCMYxz1/4FXA6pKyTpOZUJZssMggBSBuJxjLZPGMEg8+tWa5Urtq2qutLq2juu26V7voc/MnZtttq99Wu6jtZN37tfejC1S484SPGHaJCRGrgtuQj7zHPbGduPXnivNdWVXjmaZSikOlqwOGjJxk5wcBs4xj/AHa73Vb/AHRSraxhSVyWVOJM9CD0HQjAz9a8rvpJmZIrhQ0SNmTDfMcZ5X17EdyePXPVhufnb5lZNNqNnJvS+iuum176u7PLx9TkpyTs5Pfulp0Vttvle5wWpTDPkGMwySRs5mztTZH1BPK5O7oc9+vby/xBrbWsEwjVXXyiqZHCswI3L24GeePpnFen60IDBcjzGKM26HI/eJtJ3RqeCC+RxjnBr578e6g9rAbjyABBkiMNsRQwIYyjBCrwMsSevCmvqcBh41pKOl5Je87p3birpba7aK9kfm2dZtOjCpaajaLelk4r3Vr0/LZ+p82+O9d8qZY5JN05kbaijCSGVuCccnpj8T614z458TnRtDXTLaRRfXLedKUPzoGwWB79CNvPPIx1zoeLfF9vPqtxcSFTDp/mDzVIbdMM4G7gFFIxuI45IHXHzPreuXmsXs97NKQWZlX5tx8vJCAk4y+OpwMgdOmPvMryyc6tFzgoRpcsm1eV9rW6LTXvvufzRxXxVCEsT7ObVWpUdOCundJK821qrXSVvVmdqDNPLJNM2Xf95znccnJyB075I9Tz0qmjY4wc9SM+meVyeDkfTOeOtJJK7uWckkrtHfIH3TnHQ92xj0HOaYinIbIAGCQThvXOecAY6YAGc5r7VxajazajHlildWStayWvS7smtreX4jjMQqlaUm+ZzlzabuW7k9e7V3r0VuhaEjKQQc8gDOM49QfUAetdVo94qqBI2WAIGDxg9yMnJAH+Htx7MpJBBPsABk+2Dxg8Y461ctpSj4U7QRyTwQeMEnkDGfTP65mFPmirpJ6Lbppbzulbtr17efzW1bbaekHZ3bte+l/R6+T1PRrm5i+zAgqSVHQ59cH6dscdTk1yc5bcemOhyeD0A4z0x68cZFQ/anKsrN34BPIA6EeoPoM9s4yKTzd4BZvmAyf/ANfqB1zxyM8VKoOnO/R6Oy2207dVrvvprYxnUTceaybVmlsrWu30S1KchOcj1xjtjP8APPH1zwCajUEHkYHPGRwT3PPYd+g/HmWVlOSj55yVwDz6/gMY9ufU1CpycHJBB4I5PHftyM8H+ldSTav8m7W2tq7Jff8AoK61WnR/Nta269NV9+g4gH5iSMcfqOR6+oJ/+sVycgKwOenPIJHP/wCvkdBjFMOegB9x2AH+Py9PXqc0Z2/N024Ud/m+g7Ek++fbitIRt5t/NLb8b9r7aDJWZhtLAhsj/aPfGOcAcY79x16TIxG3O3BJyGHPPpnOe3/66rl2OOcHAYn254A9RjP+GRlyFywJJ25Iy3b1zj6dCR2710xXKtbX77t+W3ZbbaEvRadNLetrW8/VP8WW8/MeOwxu+6Py/THqOuaKXK5ycj5RxjIYjqTjPufY8fQrZU00m29Utm10/wA7j5o919546hdHUlcrg5weGJHHOemM59PwJqVDuD/K/wC7O4MPu+zepx3x046cVI0a5ZudyquEUnYM5yGIHI6dunr2RYpdjbVA3Y/izt7nIOMZz365PsRXNdX2s7NPytf7k1+VjLez22Xlo1d6u+kdPV+g0rIVY7gckNgHOTyAR1yCO3H5ClJVdwZSXGCAG6sePlGOMH/PczgIF2HCsoBYjkFiDtVfcc/7v44qBtxkQHZgsMbziTYOw65wMYJxmspWVrpx0e/XVb6LbvrvqELt2atrborpJNNXsl30d+5CqHALYVySVLHkDJxz6ntxz29pgrqzsXOcjPrkj+EZ6ccg49fWkUKGDSAkZIAyMbuNpB6Y5wT356YzUkm0biuPmXG4HpnP3jn73YkAYzx2rJVIydlZq9rp31snoutvkzS+99LJq9la91te7v591ZX0Dz1CgbgGTAJ9RznPPJz24xjGTUx+cghtyOASQMsx785Hp6cke3GaArKVKAhiNrluDgjqT1AOPpzzxVxMOHywDKpK4+RWVey4788jufTArtppO2l+uttFbtZLd9LscVaO0m073dtE7LZLTrp0V/NjWPIHA2bhuPy9B0Aycse5+vTmmgHcz7jlyFCEckdDtHJAz3/SjcFGH5OSSc4U4HGDwM85J68DNL8rEkZO7kMcbDwO/OT6kD734Vo1yra+t07q6+HyfXy9fK4L7TW9l6u6drXT0u3tpvYkBwhUcuD1BwQc/TtnPHH4VftlbKOGCtjcc8tjpnjv2A461Qhgk3KyoQcYzzgk46HGD9O/8riu0b5bHHBA4IHQ8Y79PxrKpUXI0mpPTbV620a19X89D06NNJN9eZS1stFy9L6ap9NjoIyd8anLAjIJHPIGeewOOOmfzrvfDsvlSfM+QqFApHLMwGABnB6f8Brz+F2YxFSG4J9TnI6D/O70rsNCd/MyQFIw29ucjHOCePpx37duCtFNW33d2+qt09PS118vRor3ovZyaae/WNrrqnps/O56fYOrXVvMQzBGHBJw3PAwOgGeCM5B654r2jTo5DCkikJuKvlmyEBGWQDAxuAHGeccda8R0dmkuY8kKqlW28LGeRjAyfmOePzxX0HYgvpsKsx8yVQVPDAYxz2wTxx+fQV5VdqLSun7z2vorr07a6dl3PoMIm0+Z2aim7LTdN7d030Vrps9W8EMJ2soJDtjhnVn24LsWPGMHoMdPw+n1VotmIp7SRsiNkTcxHI47DHBbj8+lfLHgGD7PdrDIxyBuDYycZBBJ/vDqDxj2619faNGs8NrOspm2xpHtGPlIwN5xxlfX1zkc142J96V0k43VrppXtr6W9bbPTY+jwN3BL3W4yWjbVkrWs72emrW7+8+p/hvPClra6fKAJY5HdARltjFTHk5ycDOemM4PrX134S1KJL2SGJ1IKBdpIPzEY3E55A7+30r4q8DStDf26N8ztAjIzsM4cdz/d4JzyPXGK+kPCkjJrVphNxMy7WVjhif4CRn5eeO3T2r4fOKC5pPmtdaX21cdk99rff6P6rBSVSDdm/hla7UuaNrWStdJPR7a+h9O2dwyXsW9UC+YuQeARxwB0Oecfz9PpLwZPHFPKNp8sQZUNwAxA6ckH889fevmdN32i0Z13I7o4dR90D7ynAxjpntX0LpMqxWFo7MoZgCOQJCdowTjOR3I44I6da+Y5f3aS5G7tRs79lq99Vd2+7pbtnqnHWLcVJSdm7pxe6V9NU79D1aykjLEA5dt6FRwCTjHfn1PXvzXRaNayGaYcB1BZnz8pIJOARwSOee/PIxXklrqe28tctIA6su8N0ckYDL02A45z6nocj2DQrjcUG4yNIuWIPIB6KPUD9Bj3FY1IwvFu90k0raXtHztutdvMzs4qMVJOTjry3km3ayb+Hfzv5HqGlQLNDHlCJAARICM5GMKeMYPpznsRjFek2cbExlAN5jx6DjGe3384we/Q+teaaReQxiNFOx2I3ncMIAwyOhwc+oGOcYxXe2V0rOrCQBCgGBjr3bPqeufw7ZrmqyVW0U1dSVrq1tY673VutrN+ly6al9qEm77rVWslfTe912d9jtUSTyhkMjEAk84I6Ht94nOB0B4rYtLWSTYURizLnIPU44zxx9ecYOe9cjBqUbMPMlQRoBu3H5VC9W6gE/1HPTFddpmr2uwASRhcgAkgPnnlQDz+Z/CvXy7A0pybqVYpRWq5opqTs7rV2301v8lYVRVIL3YSd+rWyVt/Prbv8AI0lF9BIgK7Y85JLYPHTjrnv/APXrpoJ3aFVeQrGQAw4GBzk4ByenBz3Oe1cs13E7ktMWXOADxx1698cc4HuOKtpe26AhZEHoC2CAM4IPoeSR2HckV6soU6PuxqcyumlKpdr4W+Vrvfa70au9NVTpVKtOL5VZNL4bS6atq7vdWTW/oenaZeWVvFEcR4Yne5PzEnHTpyuABjBwT3rfTX1jA8nBGMZC5BJ44546/n0zXhDa4izBEbzIw3Q5wScHOMgYXjPfPXpXYaXrHnRBIYw7E4DMDjd2P/ARwCe3fAxXv5VXjKPLTspKzfI1rqn6tabNvy0358ZlzilN3k3azm9l7r0bd01fV20d7bnr1hqTM2HlIUnCs33iuQxzk5wD0PTOce/YweJIrWNQuZXPABXIBA4YDPzAE+3Tr1ryrTLC9uERmZmkG0AjAB9AT0AIPvnHHSvR7DSmKIZYVaRQgUDleeNvPXvnHv8AU/YZfg69Rrkcr3V5tLV3j5rS2mqevRM+Zx0KELtyjK2iiumi776+W9zobTU72/IRQ0asdpfbhcgZAAPU84A9/eu60nS3kCEIfmABZ/nAk2884wM4GB06A9M1T0fRFkCMygKNpEYAAEg6hj26rnnB47GvS7O0hiRBsck4GFzgBfukdMnk+mPrX2eDwVoJ1pOcrJJWVo2aeut7O1ra3V9lt8bjMZBVOWDsla1++l9vztor9d84aKzR5Ys4RcCIcHcOCcDP/wBb24xp2GhyRMJTbqMHhTjPpsK9ARg4Pv37dPbWwYArlTg5JyMAduefqePc4rWitiCDknk8j5c56ADHY8dyPT17pRUeuySWySWmlreS69TgjiZX0bfV3b6WstU+1ltZaWtqZa6eQqs8eGKcAcYGTwowcH39ulVjoiSSeYdyMQB65I/vdPfJ4Bzzg9epEWOvI5O7I4P54HX/AOtnq9UAyCozgkE88Dv2yPpkfrWLkowlGPVK75Vsmn162VkdUa817yck90uj20a2bvvpscq2jRKSSok24J2nJLd+duQegxz0BHQYyHa9tJspBtjDEqSRhhx19uOvUdx2PoEajBHljOcZIAJxnPOTzg8AfXtxn3mnx3KuGQk7cbN20ZGcZ4Pc88+2MiuahFRmpVXKScoxavey0vo2lp5pW87noxxNWcXF25JpXUm2lor9Xa3ltdanJw34llhklJDOSwjHIXHQE85BJ49voK2mlSRV24BYA5C42kE/yz26mkh0JYgWG1cjGBhmAOTt7EAdjxjPbNXRHGilHGwKMB+2R6n889vyqcXVhGSUFpC+u1k0vX5avpq9TWko+60uZ7tXa93TZJ/g9/ywLuKTzNkZDl1BXA6A9evTnvzzwBziiHdGjQuw24YgEDd6HuD3GOCOc471qSyxxJ8+Nyg7D14OMc8cYHoT+QxzN7eRMSY2G/jAHKEnOAT2b2+vfNePOs6UXU502r80Xv0ei1V3be34np0ozq2g4rkTvdLzW70svK71ui+JkBYM4YYPUcrnnPfn0J9SfWsy9uFeORSAOyknkDkAk56HtxnnHJNUJI7lomkDbW7gdSO2M/mD7dQKpOly67mcjsVIznDDOPrkc8cc54FeXiMzqzjKEKTjGas5Ws3FJax7Weq1tZnqUMHRi1NzjdNOzsryVna2nmlf1MqZJ9jYAdC5PONzZ6beTkjke/OcdTyN9FcmOUKuUORjAJXOec46+vcj6129zcQpG0ZQhgOBzhueCMY4I5xzn15rm8PK5KSYXOMbcFjkkcd+SevQZIr5HGxhaCU7zd1y2s1pFW179L9nbufSYKpKLvye6rNfZd1ayT1d+3y7O3hWq6RHZag93PGQpBZWGWAJ6nP1IxgYH8vm79pP4WeHPiP4A1QXVpHPdGynS2RlDNNLsOx5ABnchGV55yc4xg/a2uabFepcw3ClX2kjHAyuSMkYwC3Jx3656D5/1O1MjahpMjB1VHZEdvl24ILEkHg46YyfYV8RjYRw9SUIKFqjfvN6qXurR6bvdPpsz7PA4mc50MZGpKFbDuDfaUPci1o9U09W76aLsfwUft+/s8ar4R8Q3+sxWDrCl1PDqDuCzRTxuQ0gwoznK88H06EV+L3i3SnaWVJI5YmhZkkbZkMy9G6j5Tzz275r+6z9vf8AZ8tvHGi+Ira2sfIZre5lJjUFZrnBaFiNpOHw2459PpX8a3xq+HOs+C9a1nTL21lhFpdzwyM5JikUs3kZyoIfAYmTtgcV6OS5lKNqM3BTpSVrt3afKrWtro79vxZ72e5ZTx2FWLp05TdSmlUs7wg7J3iopyXRy5nZX67P4F1S0aNpQqMQy4fzBkGYZJKtkYx1HHHrWFbl4ZApYAgncFPUE9j3P58gcZAr0zVtOaSORWYAxyt+8U7fK5z1xllPOeMH8K85vYpIZ5CUcMpwXPKEHJDE+rDkDr16mv1fL8RHE0VG/vpLTZtWj11779m9Fsfz/m2DnhMS58vLHmdmtr32Tvp37PS56V4U10WVxEnmFAGGxsZLK3OG6ZBwOOOxzX2b4E8V+YsGGCR7UV9xG0yrjbGDxgNnkZIHrmvztsL4wzwtkYRgJVBGSAeSPoPyyMkivevA/iV4VSNZSitMHhz99VY5JbJOWPHHGB0FfIcV5JGvRlUjFSmk+jtfRbWvfrdptX06n2nB3Ec6VWGGqyXLdLmbtdPlSXS/ZPr01P018P38jWyyebhm2vIqKCWIz8wOe2Rgfxd+lehRXqeRBGMnaqhnxukLE/dboARnhcH0yB1+YPA3imOaC3IlDeWixFS+8lj/AAgnB46kEfjXudjetO0DKUX5Q+GXJkfHytuJ+8OcEjBz0J6/gWYYeeGrVIzhZ3WlrXSs0ttVbve1tz+g8vrxr0Kcqb51KKas27WjHq3ovl37WOnP7xyoYMyjCLgqcDqCueH5wxHTjg5NZ9zZuA75IfAZNp3GNTncpHGHPGDk45qyt2/AK+XKqqW80BgwO7DcYxvyNvPb0xU7SxT4DBoZgV3hU+/Ec/MeRkLjGODz1J4HHTquN9U7a31Wmj00V1r59dOp3+zbteK1Vn2+fb8/noebX+nosqtl40c4wVGWbnPmDpjrz05I9ayBEXDR4jboeQS+4ZwQe/H3QODzivSb+zeX7KWUKo3JgIMbVxtkVs4OOScjqTnpzy81g6M5WKMSKySJK5G8IS29mXkIoAyBzn8K7qWIVrN6addtEtO+mltb+SOWphryk4wu07ednZ2Wzst1prprscc9sQjKsZc5ZUKjDe7kFuSMjcSehGKrCCSENEUV9iSJIU++rkDaBnAB655AHvmu2e081XJj+Vf3kbFtol3Dll4yFz0Tgc9awLm18wv5SkMnyupX93npgnP+s98cenStqdbmfZXtffTTXXR20vbax41eg4yvJ6qzs7NXsuttdf8AJM5G5s5CShQsQSZBuA245GAQQSuTuA9QaxZYGlmjtzHsQlsSgbkJHKyN0Khsnr3Hvk9tNZF4HkZWDnlnByAo+6AAMhn5+XvjjAFcxeQyKU/dlSTJEsatgxocY3qF4PU7ge449dqVV8ySknZqz89OnZbOztrsePXoWu7u11tdO91fqunr5aNIwRCLZpX48yUlTt+5I3GWQdAD+POOwIrKmUOjoVKlXxuUHeFOc5OOmOq9fxNdK0Tq3llXcqcsYxuRAeuD2KkDOP1BxSC2h3TxuxiUoxdJD90joygdzjpnnjPOK9SjiHFeb1117Xutdr+r9Dy6mGb0UW9/ebu3bt26Xvb0OSa32b5IWkVJPlljYbHjK4ywHOVbrjuBwelRXEkTxxwZ3sApZ0OSFGdyufQEDg9M49a2rmHBfaHKJGGJIwAFyCGIzuJyMY7d6xTCvmyGFAIJFCOpI875c529wCST05/CutVedXUm1vor66dtum3rZ7mLpcmjUk7qzs1fTd9VbtZbv5VN7TSJGPLbB2yMMFjt4ViufmGc7uc8ZPSsTVbNIElbawDlpFXOVYt1YY+6AAOORyeTjnZuIzDNuQGIqD5TYwWHHU54DfqAfXmK73vCixqTLgxybQCrhhwDnsmD6dfbJ6aM7Svf3bLtZ3tfRdfnb7hxTSV1K0Wk2rJPbVWs77WXV6Hny7Q4XACk7sHqO/ToMdj2ycY5qlrVs19B8jIXU4chRufb3wCAeOGIPJI44JraubNlDMkYZySVC9h69vl98ep471I1VWlhZXA+disa5CuwGN3OcDBLDGQAB0PHrU6vwzirvRro/k76dfPR+ZpBtLr7y0TWy6X1Wyvd6213Ob0F/KZrdmJVCygy9Qf7uT07bfx/HvbBXeNkYg+W/wAqZztXquTwG/THGM4zXJrDBb3BdnZkKsp/hDMcbNoB/hIOTxnuO9dVpc6MjB2VgRtwwyQ38J9/qDxj34qvNTSlZrbdatu2+vnv00tqerhpWjFNqXdpu13bpfreztsvXUnCLMs5TbnAY44DKSAcdQCTnHJA5/vVNcr5kSMuCCEbLdQVzlQAOBjHH0z1p92jNGgBJZWywUbi6A5wOm4ADk8EDr7yxReZEqYGFwFwMEDoA4z16g474ya45VEviVk2k3vZpqz0vr383Y9SlJKyaXKmtumidrNPfTr2ZlxzNG6rIMN95RkEBRycnIxu449c/SustZoZrdQ+NpUkFudjkZHboOnbg+uK527siNyspTABVyeXc9FHHXnp7VQh1GW2UI68qQrA9QTnkHkAZG4nBzilye0ipQsnvbRtXS9b+a336tI9CjjPYyhzXcJNWbl8L0SSvf8ADVvVdj0LS4gs0KtgjeTuOQcE5IYjIwM84x07Dp1dxZkQSSIMZG9Spzhuc5HGO2OuOee1cLoGpEMfOAkJyF8tc/O2AOOyg984PTg8H0ZLiF4VTcJHYdEztiU9Wfnk4z1x07CsKj5Wufl95Wurq0vdu3ot18nrqtj3sPjb2lzS5U0pKOrs7O60te2+j1/DjWV1nSU5UIeGByWycAFeuD3J9OnOar6kkk8EhZTIVPLAbgqnOVHpt685xnua6TVIbaIoyMGDRruXgEsc428cjkfl1rEuG8qBgg3Fxzz1IBwpJBOTnn0zkZrj59XFPVvRu1t07vrtd+v3nt4bHtSUldWso2STto79dbN3stbeh5Ze2qQtJvjIzh1JbAI5yRxzjHQ5z2961nKiyRNliBIoxu+7zj7vUrwMnk+o5rT1aORiSR8ik9DwFJ5DEgHgHg5zgdqoW1m7IJoUXaCSRn16YB4xnnOTnt6jvgk6b5ppvZNvS1k+qur7adevQ+iw+aSXJyp3Tjz+6+vK3JtPXqr7u/Z2PTrOIS24MZJUqQAuQCGGAcA4+vPHsDxyt9ZvHdFWUggFUZlwAWOQWPH5k8cdsZ6bw5cnyYlbJTcVYMc7ihAwBjhWBIHJAA79at67AJWaSJVYMis5GTtIHKjjkrnB9sDpXlN2lOPMrbve2/bq1Y9+GYKpSXLKLpyak9bKyavdO617L71a5zKaUrQNNIiyEqCQOpA+8QR1X8jxwD0rtPC8SxQvGoGMBdrfNtB/hHQhQe+PmJ9OuXZxs0OG+YHCgAdAD8wXnqBjHPA444x2OjWKoMoRGxZcKx4KcksAeCew5GTxnIxWc5qSiopuya1aSd+VbeS13u/RnRTzDDwkqkZu80opJppaJp2tsku+5sm2MAedyS5B+7jBJ6YGMY6YwOgPI61w/iCOWWa22q6tIGdlHAUcDEg6kjgDuMfl2moXaeUIwWSRBtZ8YDsRgEjnDH8h1PavOr2eaN8zsSys/wAxOdoXAIYdxjGDnnPT5TXPFyc4RjKLnb3r7LZJWW+j6O3m+s1swi6UvecXJuLc3ZtNaW7XejVvLQzm05onEzEyIXwFOMYI5OeyjjdgHqMnINbOlzFAbeaMNGxJQjkg5xgkdEHf6AAHsiSLcKkSopyn3wMIQQOARnAI/UnHFaml6ZMISEQYVvnJ4bAPI5JyOw6Hn6EdE05QcZbq2u1lda7qz76P/Ly5YulT5b8vvfDd2tJ25XrZ699SrqVuRHtOdrFWU+pzxg9fTjHIye4NWbIx2luocb5ZSVc8nHI2jHzHge2c8jGa1J7eORVVjkxAqqpgN14JXABAI68A9earWtqCXZyfkOOSAcn7xA53ZwO457iuTRrl51o0+XR6u3Z3dtNlq9dDjnjacZNtv2nZt6LRptvbd3du9jo1ube3s8ELI0iEMTgjJxg+xH8PI9hjNeY+L2YQOkZ3vOhIAH3d2cAY7oB/3yec557RQC7oWKIo4DNkuT90KuBx9OTzyCMVm33h5r/dI4LAcKr/AHSOoJYdMZyOPxrSE40nFN6K0lzLrdat72v1srLddSKWYQhJNyjea+JJq692y6XS62V/PY8h0SyaKVGdjukdULIBkFiQd3ovBGScDnABJr6c8L2dlBZxtIsbOYcI5GCsnY56ZXuMfLk15VH4ckgyY41jaM7i+7cSp6K2cbs+vGcetb0mqXVpaxwwlRKm0bc4KkY3HGO2AM8epA6VOKk8VJKM4vbm5dLW5X56W/panu4TG3gqSq31T5lpyu8Xo22l17X0S3MvxzZFL97lZcKu08HhgfRfQY4Prn1NePXrJeSSoHywYfKo3bsZwfTBP68DvXeeJ9SmnhdWJkfZ85LFgxAPy9Bg9c+nTHXHltlI6zPL92AF+eh6fr2/+tXt4Gj+5TulKMVaySWijdPrr2aeut7n0WHxsW6dGrL2qnd8zS6KPK9Piu33emljQtImgclgMqdwbI5B77h6fz9q0f7RlKkOwwrjavv2yOCQOOPw71gG/Kkk7lyWCyDnAPTPuPoMHmpo2VsOzjaSCxbjIycEDPQc59flrtdBz1aW99em217669vVJaP2I4yNGnGFH3laz5m9F7umrVnb7tOu/R+eJw2xs72G7aCW3D054A7Dk5zXZ6Fb20YXzATIVJ65JbqCffGc4HOe/bk7IWaIfMfJ2BlA9RzngYI9vwJ4NUbrXHtZkW2kCpkgyYzjDc8dcd+1c/sE5rR2vvtqra6O2uq1XpZHfSxdOglUq1Yw5lFRV9Yu0bLlTStro11vrozttfu4I4JVR8CNTvVj98jOFUgnqcE/ywM1w+jYuJJJwCWzlRnPIPQnsp7/AE9qy7/VJbpWG8yszE5Ixw2M4Xt69SMj6mtHSEkhtFIX5GLblX5WySDjPX1zWjo8kN0m5JJybWmj+b1aeq31sz2MJm8q2Jp04PmjGEXKb1u9EvtNXV3bu/LR9hayHbKylnUO0cpXgKw6jGeV/Q5571i6rELm5HkhigUAjoQ44YYxypzxzgn9bVpCzuBCDGhkJIJOWLdC/HKsfunGB+JB6bS9DmvLoNKJFiyhKqONoPLrxnce42gnPOSa8+rJYecptqPutc0mkt1/w10/mz2quOdSDoJu7TjzK1mk09tbarV20ST6l/wB4NbUbm3mnjKsrl1QqWITggYGMDsPbr3B+jtai0+z0+ziULuh2o42YIdQAw2g4KrxkkjOegxml8E6XHa2xaK2ZSmQjOwPUAKA5A2EkHAwSvI5zxyPi2e7kujaBdsk8pMKx8d/mYk8g9ByOe9fJ4/GzqVU2kotuLm27NO2q0d0lr08zCeMUIq9rUGve3Tvy22te3a1lq90Zt1DBqWp2iKMwxsrzZPAVe/cHnGcdOhOQKpeLtLa3ilu7GELNCibipAJAyVIT0x33c8HFWrK1uLBlmdtxDqjKHzknPDLgZxjp3yMn1s6tcC6tb2CHzZHl4eMgsvz8IIunIwePqMngUYWvBvlledmlF30a0Vkr26b9XY5K2ZzdSnOLSsko66t3jrq7aLRXd9rXOG07xfqF1app/nTq6YiWAplg4bDOsmfljbPyDBxtODgiu/8M2kdy8F1IsjNG7RbmGZmYEbZFAzhCSfnyeh4BrA0Hwm5kY+WyPv+ZtvmNsc9c/LhCF5H8OAOcivb/Cvh+CCSOLyFVWwICeBIeu5zz8reowM44pY6rSpczi1Fq0YxbbaTaTe+qu72tZLTpr5WLzzkpS5pXlCXM1HRN3S1e6vddddW/Ls9P1e50vTXUrG6leIiA7AOPlRT1GAuSOepr5u8eXMut6mEaTbCWYIFGSrAjexGRuGSA4xzxk8V7n4xmXT4GgtQrSlVeUr8vlhQcEZJ2d9pAOMn6V89QW01zqbXbSHyFyzRv8wVedzZPVjgEnA5wRXnUJqpO7rNRVvdb92942ukt7pNp3018j5/EZ7TlFTS9lUmuWUo2Sfwtqyls2t2nutNma+k2lrplkomDPI4KTKn/PM7doH95HI5BHB74rsI443tjLGrlShDqQCoz32jA4HXnk89RzmeHtPk1S9k8lFFtCS43jdvRO4c9jngEHOPWtXx1qNn4c02VQqx+YodNpCF3VSNnXIDFhlcHdjnGBn2qEPacnVpqKjpq9Hdrd3v0+aep8PmmcKdWcp1LqnG7ldWSXLpq7XWttm/LVHgHxK1ZEljsbHauAHuGTlhIhYuCg7EkbQSQOcgZzXp/wCz34Nke7tfFOoxAjzGFuJMD5TjLBCAOQB3yTyTxz4X4Y0u78Z+KbfJla2Msjzgn92q7skKxBzuGMgjORx0Jr9B/BemRaRpEdqsSqkERS32jcqsNuWJ7jPRSM85zmvqqFN0EoytLS8m0uaKcYq3dW3v89Vc8XJq9XMcRPEuD+r0p/ud/wB4otWe9rbu+qs7t2bPXUa3v7u3+yjbbgRsyhCkiOv3ieepPUYyMDIxgV63YyRTrbQboiAIw6DhnCc8+vGc89h0zXk2hWbJE1yiqrORsSRsK4JO8q3btsAXJyRXU2l3Gt5FGQEUvtMqtuaJ+6s+OFyen+NTUjFtv3XeyV+ZWslZPa+3/B7/AEvLzOU+Z6NpaLba2rvrd7b327esWD2T3C+YjR+WzfKDtiBGPmY5IJY5x0/DrXRGWK7CSuFjijbywwGDJj3445OT3yMY7cZbOFt1Hm+amdwbG12Hy89STzjLdOc+9djpgtpLYYZZJHVisJ+Yg9mxnj/PtXBVpylzaJvRb6LZ6arpppu7ed+Gc92lrJ6XveOivrp3ukrXtubNmCuQGK2bHk5yQwJ+Yc5OP0HOeK7DTrhNqwSsjiLazSSAFipBwqN0zxgL2wM+3PWdu0tqsUMKCQfP5kpwVjGcoByNx/HPHGKnZXdXt1jYSBVVQAQSueWBP3j1546YFcsaeq20S31TbSXa6s318zyq9RS5ru9203dRtok5N310a022L2rtbyoZI1P7xSFTqS6njcO2e3P515VfWcTi4aScqykq68lXKgkhOeoyOnrzXoN/dNbhFWFg6xhfmUgKFB+cg55OeT+vSvO7u5ZD5E6h/mkeSVRxGpIxkA5yBnjOec5PberR1Vm9rWV1e7SfXfZfO9+j8z2zjHljKyTjJ2bvp8tfPRq1ro426ndYzBC4EKhgSQAQ3Xax65J6Yzkg98V5xrCLI63CEnylYNGwKrK4PDZBOCCeTg/mee78RS2sUsBTeY7phsQsQpYZ4JA65yBn3rybVLySN9QiLqtqUYIM/OpTq2eD1OCR7Z5GK9HL8LKU4tRsrqLTXa13bZvonole3r83nWYQhCya2UpSctbNpN6W8r9uq105HWbuG5imcv5YTKFI+XVk6lcd87fmz8wJ3dBXxb+0F8RE0HRXhZo45J4zbNg7jI3ICnkFnGcsABjIy3GK+nNS1AafZ3tw6holD7A3BcEZZi2cgtgkPjjA47n8hvj54qk8TeMry3trkTafaXBeGNflCSDPmBssQ/IHoD1A7V+jcP5QquJg5RtTiueV0rtaJp+fls7JH87+IvFU8uyqu4XVXEv2FKz998ys3F6aWe9tL9TzfU9auLqKdGleOO5kLNGOWlDHJUtnOwDkjuP1wcqPlUkgKMnGckew9OOR+XWqio2/nPXpnGFAz0zwp9Mdjz6WsAHIG5CgIB7Z4/DJxnPfHQ1+jKlClpTSjHlUUrK7tay07Lra97bPQ/lrEYqtXk6k5ycru3NLmtezb7dbW7O4/IYHGTtTIAxjI6jJPHX17fSkX5mwQeM7jnv6dwcHr19M88IEwMqOM8jOeuCT/wDWzxzxnOZE2gYwcgZ5/Dnn9M9M9fVrWy0snppZvpb8bvW/XU8+Upc3NdX8mu9+mqTav+AhXB5GecZ5JxxgduPp3zj3nRsEE9MEfN175yO+T0z1J9qjJJYADjHB4ABwec8jn3/TgFQVAwc53dzz14HXGemOMd+OKte613stbvul5fL08zJtuV+ZvS7Vr31Wt99umunrZ3RIind15wD1zkdB3xnPHr060pkJOMnDDIPOO/BY7hg9D3OOPWqjYyCTwAMBQd/1HX6d/TnnCAkn5SxBORnjpjIJ9R7jmnZPXdPp09fN+fYTSk16b+Wjtbvru1on1JuR6Zznp6njPTP9e2e5G4BG/GBjOMdAOo9PfB4xTG465CgHjJLdeQeuc8fU4HHdgwQCM53dGHbt7enf1pKFtbu79NLWsuvVCUGtXO73Tsn2S69+vpZd75O7G0DPYjsfz6dP8MZzGyMoyVPYkduep47+vrnOeeIEd1JPYAggY7c4HI6YJ98fSrayFht55HJx+fU+/foK1hG+/S222nT7+nbfceqdmmknpbZp2t189tvusQ5wQxHAJXB44HHTvn/JGcVLvABAwBjcAR39j6c9OenWk2bQByx6leGHXk5PtyOemfxXZwWOQBj0yFPoOQR1/PA6cbLS1um3X8yu2v5a6f09B6s3GcEYPTA6Zxnk4zwB2oqHI6jJXGD27A5UcY+o9+lFdMLNdNNO3Rbe9HTtpYThG7uvxf8AmedoVDBsnavBTGQ5yMHI9BwBjJ45zVpvKEedxy4AJX5k3DG3PQ57A4wOneoiiqeMBuG2qe3cnHQHjpxgHPWhAxYBiPLYEqQ3BXJ25BAO3tu4Iz0BzXIno3dK1m01rslZX1v5t6+djFptXVk0lvZedtN3v3v+cbRBgoByGGQe+R90E5zyegz369cqYSUX5QSONvQsc9M9/QY68njjEwVQpOBvHcHIBB6jj3wOO3c00rMFyr4JA5XlxgHBBxyPT3x6Gsqta6SjJO29/O3XvbRpb39WNP4WpJLmutWk2rdVptsnvt61pIzu2om1QMIGByB6YzyMEkngnP4mIxlztOVG4AL/AAgD8O/U89uRW5HDvKbgWY7c4PPBxjA6Dn8KsT2OckRDIUMR147kjuBwScjGeQK5YVYxt7t7u13Hq2tVr376WdtmXFt22ta/Mk7S1V93q9rvz03ObEKYIZSSP9WxBXIAP6846+/GKcTtU7sZwAowM44z09OvOMYOOc1amRV2g8bQ2NpA2nOMHpnOeT+H0TYcBmUhThc4LMc/xj27kHOO+a9TD1YqLUne7u27K1ktdtvuSd+uhrZ3iknaWm+rsltfvq1339azoQodS0qYU4HO1WzkEY4PHT8u+dGxsGuZEGMJ5gyQMsCvVcDgck/KM7unQGp7OFZSynAU4yDgAdgSe565GM56nvXZ6VYbJUMaIQFDMrcjA+8+R/eyMHPBGMcVzYnGqF4xd5WdnorveK9Hr07HbQw8puNtPeStq5bpXUU3rZ/F/mX7Pw8sloXWMPtBLBR85K9Tnvk8DtxwD347XNOFnIGVdoBII4JI4+VuOMY4/wAens1qRDaZz5Zcb9udwRjhRjgbenB756V5h4pO6ZztwxYnH+x2OcdupOO+PTHjYSvWliHeTcbtu71abinG2115a6Xsey4QiowilZJc0tLuVlpeyfl/SMCzlcpuyBwOvVRxwP8A9fUV1+lyNESc5QKQAexIHA7kjt6EdOa5CxK4w3BKZ2n+9x82OTgZ4+ua6C1kbruB2qcbTjHTnnqe5HHGRxXsSd732asl6teas2nvf0LjaMbtP3baparlt93/AA92eteHZw9yitIVGAxJ+6D/AAjIPJwOnbntX0JZTH7NZySSkRGEqAg5Drtwy88HnkDngV8ueG7pVuInYFmYYfJ+Uk4wQO3c9T+JNfQtnMzWVoYyrPGhYr2werdenTn34GK8rE037Ra2je2tkm2o3t5X/He+l/bwc007fDKKte9tbd9pJ7r7l1PdPD16YfsUsZ3M6FGfOPlAXduOBycjHHAH1r6Q8BaybpN28xRsywxxhgVYqcFhxjDd9w5wOhr5A8LX5ms2IUoys0UeSCcggMQM8LnnIxnBzzxX0h8PLhI4I4xwUmXawHCqDyM/j83/ANauPEUo8kkn7yWmqd1dXtbTXffpvpc9/CNys3ONnZKNnrql312vbqnt0Ps/RLxkvLWOPKzNaIkTgkAdMDPQE87c8jBweePqvwGZHmszIW8xTG28A8D+6SD97jrz9OlfHugXsEltBcSFiTPFFG0Y3MMHA5yCMHv7+9fafgO8to7G2wAZvKDfOAcuADnnnGMe2fU18HnfNGMfdk7Nxslqm1Fbu6tpvdrTbZH1WCnflS1Tsny2Utkmrp7PR27Xsrpn1FZQSyfYJYx5kccYEqlc+Ypx945PzjHU+vAFehC6mSGMqpAij3RxnIOzAwjDqCMdev615l4R1aC6SMRyfOoDyL/CD3Hpt45HB7dq9Ls8XVycsGZgEAAJHc7dvYYwCfoeAK+RhFwbUkvdd3da6pXd00r9+nR9L+po3J2uk1H3+r02T0v5/gbul3T/AG2DcSItiybWJB+bOVJx909Onbkda948LX8cgWMY3GMqC3Y9BjHIHPDDnI9ufEYI4oxcF9i3e0JEqn+FAQEU47gk5x27c567wpfywLJvJUBjgnnGDwozjA57nGfpXLirTj7ielrd3fl1ut+z7N263CEdOVrlvJSalfm5Fy2t+l9EtNT36yU27PIrAByWDM+RufGVxx3Gc9veumtNVEBG85XA3KGxx13deh9D0wcGvL7PVYZYlLuWdiQoBP3hjgrnB4PXI/EVuOzyRK6syOVwyPjAXoAvIA4yQR78GvGryVFNxtzJ815pq9nFNJve7fqvM9bBYaM5RVRSjGTSSSSbv16tq19Xtodrd6/CX2+ftXqIycNuPdzk5Unnvjj6VsaPrM0rgB9oByDkk46Egdcen4+9eFXkksdxkSEpgA9ycdFHTgZ64x7Yrq/D+pCGZWaQgr0zznOG5OR+A9MZrlw+aVudynCUIP7Wtk1ytu++7va/Vs+i/suCgoqzm7O0lrK/LpZ662eltdD3qbX51jQhiCgA5H3s/wAR/Lnr6VQHiOV2GZgozypIBPTGOe+Tx7YzjiuWOppfIEjKhyNhZcE7cem48nJ+gGfaohpEbyK6s7vkPhmIAcDOQcEEEkjHbGO4B9KFbFYurF0ZSnBuKvFuNn7r6Xvpvrdrd9HpTwuHw8HHERUJaW0TbVoq11Z6vVJ6dT0ezvpL64h+YlFPKgFS54wSf/rfTHWvePCUMCtE0wLEMCufu56jkjHBHf2BGM14PoFpIoTeWjZSEKABzwemPlx7tk9fTr7j4eiuWKRsWCgADjGAcYPr6DrxjnJOT+lcO0J0UqlRXnfm0Wrsov3n5u9lpZd9D4rP68G5QhNqMOaK2Td3HSNvuvZdrbs+g9JktXRIY1TewXATJweM5HbH1yfTivU9I0mSVFYuqxlQShGGABwACc/TPUV494Ygit5YXcsWyCxUEjnHOefp+HcmvobSru1aOKPKjCDkDGSABknvnufb1FfquVV1NJTSgotWW2/Lr077vrvsfleaucJctOLk5Xbe9vhsu9116JvfU3NN08xjaq5TcMZxkqf4hk8gE9Oh4OK7qzt9qhnAAC4Bxz9RxnHQEcnn3rnLGdHmSNMNJwQFwRt7At0GM9e3bJFdXGWJHIzgkgYxk9Vx0B9OSOQPWvpedNK2iVrtaX2su1/PZXPk6kZSm22ndarVyT0vfb5X+4vRBQQ3QkkZHQ+mRz+PpwMdqvoeOQBxyD2+g6Z44zxWcDtBO7Jzx268flngg46j3qZZX2NuPzYJ+gHcf5J9KxqT1ja7u7PR9WtW9retn1HThZ3SXRtvs3fSy8raei1LhkjUMCRnBwD/ACyOBkgnjge1VpblcMFYh+Bj+FSO2PU9M/pxVdGLkYOfm6AckKPXjPB/oasqoyCwUsOSSQckdPxboegA+uahbu76q3lttr1ur7X2OiM3F7ea09Ndv63vuOgkd0GWJwODgBs9gffqM8/WrRUFc4XcexJ7dSe2OcEnnOBzTCcLuUDJIK4wD9fX6jOPX1qv50ccpaVxgghh/Buzxk5xnjjseeelc1eq4NPRau7WrsrJdN73t6fM7YVHU0UeW1r26tW6aa6N301LLgbe3XDAnIIycn1I44Pbnn5qozICrFvmTHqOT3PfGO3rzz1qpdarFCwTcCxztAGR8vdiDj0Axz16jpX+2vdNsgIYYyQFx8pxnIzzx+fcAcVg6sHzLmbTd3y6pX5XZ3tbe2jWuiVzsp0aluZNxV7NqSdkmm727Ppp2Mu7AbzEYkIylVcnhSexHTI4IBOT1AwDWJGggyj7GIbG9sZPuwPYcnj1JIrp7hU2Or7GKg55xg4PUfXnj1PcZrkb2a2RWUYLHIZgeVI69ugJGAD64xzXm4qpThHnnFKNvd1Sk0rPbbovNrue1hZSqLkp88U7J2XN1Sb8k7vbq93sWbh0jjYx4U4XIBypODxgYwOh4Hf61RF1blWB2q209SAGA5wOSecdMevYVztzqUqrhXbap+XOevbdnv1z68Dmuee6uJbgO7Minpg4B9CR3GPy64yMV8zis0XMvZq6XLpy7JW6t+TvZee57+Fyuc1ecmmkpJ8zu9rJL5O7++xtahdwrN8pGOgOMg9ARz0HPGRg1mXU6i3SRF8s5BDDPOOmOpBPufzNULgyeaqr85IB2jnHqQTgHH0OAec8CmsJSeXyh42kZCH+LGeg7dOOe2M/LYrEznOcZfaaSSSTUmo21vt9z2dj3sPRjThHVuyve7aa93SyW6V9tLPZI5+7F1dysgnIUg7uOdvvzjkZ44559K8f8ZeGCj3V9aySC4jRXBDEB8EllYYJZfUcevpn2a8RraQzRjzAcgqvO0dDnoPTtnJ9hjE1VVexkbyhKsiFX3HLR55YEY6jpxj9M18vjKEaqnGpOSqRnzJp325eWz6LzXl5W97B13SqQ5EnCajFrRa+7fm018na/rufAnxV8HR+INJmmuLP7SHQRuyqA6lA2RjBKnk8nd0xj0/li/4KcfsqS6fqVz4o8PWW61vvLe6k8var3JDmSJwF2u3GEGVxyASSTX9jPiTQrxYLg26qlncPtlUkEtGckMGIwF65btxnFfBn7Q3wM0H4leCPEOiapZQy3RtpbjTGkA3C5CM0bRkLkncCT6ccZ4rwqdSphcTCq5LSUd3dS1imm+ia3V9LaW6fcZfjITpvD1bujO6coyTcfaJW0bavF+TW70P85zxZ4WvNNvJo720nthFI6m3GYvLckhRtwd68E87Rz69fEdastz3GEYMuI5WdQrFwTtfnBOeQSowABnPBP67/ALZnwT1fwB49120u9PkQQXRV5MYYMruCXUookLjGx+OFJxxkfmR4u0ia3kEoxKdzCdBnfEOMqMDG7uScbcjGc8fquR5m26XNyq/LZJpxt7llq27/AHO9r3Pz/ivJIwjWdOEnBpuMkmm72WjVovV9NVtbU8Dlgkt90ZJ3qSSy/d56EdvYH0+gNdH4f1aS1uYAzH5SqOd20ueexOcDHUdOwyMiDUrVo9zMCRhSwBJbLFuMYHT17cj0A59sxSZyVOAQOm09R3z6ZA6+1fcyhTxdKzs00r21tftf12379GvyVKtgailGU4yUk/5Wlo7PbTa+umvZn2f4A8Q+XBGwYxbp1ckDlNp5CtkcNn7uPmHTpmvrPQPFAMUIMzq8SLlmYM2452xADjPcqPu+5r8zfB3iJ0lgiaSQsp6s5CNtI2BsDG7nCgn1J6V9Y+F/ErPHaIPLcDahAIZ4um9EGRul4G5+M8cZzX4xxbkXJWnOEFa7s0tbqzvre3V9NdtT9z4K4ijOhCNWUpSUbayvvyppej9V8mz7RstUgSGNTIkkso3CQEvIGPJZfRATxzxyT1zXVW8sLJy6TThiH3rsEqHB37gTgKMAfU8+ngPh3VlZC4lXdOSfLLZBZ8bPKUn5VO05UE4Hsa9T0TUlmaVGZC3lMGjY8kKAQEA6MuCTg88Y7Z/J8VRdCUkuZWtfRbbX2tZaX6X2aP2TCVI14Rkr3aTTd7WSWul1q7+q10OvmjjmRzkgIjNls5TH31P99V+XauB94nNc5KjEsqiOJYiAJGO53A684GQcggclffqOhtJ/tCyM2dnlSFSqgB0TaCGx95+f+BYG3GMmhKpkcowIQuGhAXDPgHLkDoi8ZGeRgZ4xXHCtZtcyWy1Tvo1d6+Td7/cdFWDilZxi3JN3jd2smktrX21X53MWZNkhKq2yVSGfqcEDc0QGMA8BenHesmW1dZA8bOqhmEhZTwhyFfPqD1Pb8a6lbZY3JnPytHl3dsmSQk7USMg7c/3M8Y4OOkUtmrpICWgjkXIcfPIGGMKq8Eh84I4yBj67rEOKauku7e9uXs7vo3tZvXy8nEYbni20r6O7aXZNvWyura766XOLlg2qRIis5XEqO3yyO33NxwenJBAx7da5m8snMvmOG2o7LJKzYVRgfLGcZcR8BiAOxxxXaT2sziZfIRQCoUlwxeMnhi2BnABJPG3p71nS2u5ngwSi72Eqf6pnXbl14wqkHHpgcdcV006y0d73t1SVm032+/W/keRXwqkmnZ6rWOt7OOq7X67Wv1PP5rVPMys2YXUecA5JXP8AEO+8kck8ADpzVK8swhYiXzRJuaLd94qMbSffJO08ADt6dFqFkLfYkQHmXYZVkVt6yHgsOg2g8YHJB61TlIk2oySCWIKjqi7lERyoOMc7D971zXpUa17Xco27u1tPl/wVfc8yeFitW3BR919tbfiunWz3MQwqY2VnfzWj3LyPJPq6/wB4AdT3OOOlYjWiyZ8mVBOpAmTbjeFJ5HOAoU9sg5+tda9liMMpMjJkBtx2IDwSEx8gPGEycYODzVKK0aaRlMiRlkYuzMQWIxhSccDGMnk9OOOfQp14pX5mkreb7XXnv3807ac1TCptJJyd1aye1lq21rqn1t18jlJohL+5aP8AeW6hVXbgORnh2znac+nbvVQWLeXIwAiU7zIckRvIMZjC9i3QLkbsHmuqa1MdyIWTYkmSsikh2VMlmZsc5JGMjk56U42k0cUjuweMMMBwFBdido28iWUYO0DbjJreOJ5oxUbO9tL3vZLVK6ts7tLfbUxVB+9Fqyum99bWvdXa32879NDy3ULGRZxK0Zh2yYkB+VnUjG3aOi/LkdcZOKx2tJRKAIy/mEt02sqgZDYJ+Zlzw3A5PGa9d1DSklgdy+bg4WaNcM4VR0Ax0UdTkkZAI61xd3pjqxfDJuQRx+aMusa5wGOVyhOdvHHPXt6VDGtrk5lF9tkttL620trpe/yE6LSk2kkmoxbe60+/fVHBXllGrZVQfOJMKg7mdG6qMg4PHrx+VVLbMGTs8kIQFLty2z7x4xtXpgZJbnnjI6+eDZvMkaRNAyxxBeZCpz8yjHHIGcZrDvbBsRuq7iw8543bBIIwUdOc9Blif613wxCaacldW1fmrr7/AE7iVKUZRlB2cXd6O26vd3srLyW3S11qR3cU0apswRlo5AD5bNxlU4GSOMrznPXgZvWksfmIrE5EeSCMB26jcc/PnHTjHU+tc3aeYbby4mXLAsU6FAnDFCfukj0znA46k6tq4ZE2svDZ3ngDkbtvTn1UnBPfNYTlFt8sd09E766ata309EnttZejTqNqMnHfl8tVypO66bpfLR2N+VVuSUZQCHOSo6JjgnqCSOw9Oua5mfTAs6Ao7JI4Jf7q4z0Y4JGSCFx15POa6eymhmQIrbmPLuThCB/ssODycHJznpwaVo0LEIrsxP7sMAQsa88r3x6jt7EVjCrKF09F+D2vbqt9Hbvp1Ojnvba1425776WbST626K2mr6ZtlA9vIUUlgOV2jaQFwflGeSB07euM5PUW92yyM3nNHNjIgIwpQj5j744AwB1yBzzSERVPNEZ3EDcVxswOyjr6Yx6elSW8Kxs0oj2rgHzGbaSRxgjHKtkn3weAaxqy5k3dt6aJWbV46N3tra2i/wAzspV5QceSpZ295WfLvF+7qn93TRl6SYXEgV8MEVVVgM56kHOc/KfQ559eadJDJJExILRgEIBgZxkbQAOvrnGce2KY0Y3L5RwGL/OrZV2wpUKAOMZ685J7ZpzGTYjIMuoBkUEgAAcooGcsB1x1BHTGK4nKXNe6W2j36a3vb5X22tY9WjiZtxvJactne3bfW66b2T7anK3tmSOYjs3txgZZR1XGB8w7HvkkZqvZQRx/6Mu3BBBU84B6ZxnkZ5wOK6iS3DsGRWZ5DukTdnaoByVXPyqo46n1xyc43kQxXG5A0JJb98/Ck+inJGCRz/dI5zxjphOLVnz20fVJvRX0u7a22T9b6+7h8ZZON7ptK13He2zbtpo1bq73bNLTofJl8osAueTgEKvrnjBHqevpjBG3d22+2aQH5VOGYjl1HRlPPHJB/vYx2qhbwAbmeRZJGK8I24EN3bAG1TjOeeSBjtW60fmW0YTcNilSAeNnG0dCMjPB/wAcnlm05SabsmtG7Pp5a2116vfz9OGNVNK0+Rdpyk223ta+1l+lzlxdBPLWOMoucnAIDEcYPIyT6focCu10W4RgEKswKsQxyV3cHA4+6Oy+vGeK5t7VgwYhFQOAu/hAMdWODx/e9OO1dVpLrEjoPLZnPA2/MFJOCpzkjAJBAxx+RUd0nZRUkm72/u2bei33d1roXDMajb5XCEE/i6P4fh876+afYvyW8ciSrMCzEEoQPm3AZG5c9vr+BIrzPWre4aQeTnPIkORlg+MhgeC3AAPHevTFYp5rEYVCFaQkDcTnHynIDHge+BnHIrA1W1V3aWLdn7/zY4HA+UZ5U9evHvk1zwcubmfKuXRt9U7Jq/V39O5lLM5yjKMr1FLVSvdJXVtYvX8LW0tuc/p0RVApGAibmRzhn2gZA64bngcHPNa1tqyQQyeWflOd0ZBJDHgAAcnPI46+4FVpIiEZnBJaM7ARgux+7jByATkZ7dABVC3hy29x5e0gIpBH3cjnHfn3/TnrbTi2nbRXtJNNPdKys3Z9/wBTmnj1Gzc1dJcqcr2aStonfT/gXuahuJ3Jlj+Vn+UbzhQG5wVx8p44JJqzbSzso2rhlKvnIYljzkcDg+mOD9SKjZUSBPNVG3nDZfBBA+UcD5c9+v1zUOmAwlhvAjYsGOM5IxtUZxjqcDnHPP8ADXPBxi7KPKkkrys25NJJNpXS1XXZdNlx1Mzc9HJOTtd3touXRLe+1tf1RJcX0rTqhXZIGUggfKcEhgfQ9ByeO1dO1+rWIjZcSBAflHcDPPQkev4ZrlLuMxTozEB52zGQASR3APQYO3PXqO1R3E88IjDsUVyF3Enj1AHHPrxnn8K2q01JKTsnrZLd3S10em2y0W9u0UsxdRpyTtFxtytNppxb0vfez02Wj6pb8d3AsDCcDe5IHUkgYxk8kevP9CK5m6tzPMcse4A568AZB6H2HHX3zFeX6Q+Wm/ezDKuOmDjJ4HUdh1+ldLpBsnMX2iRWkMe8IchhkDO7OeWx0PA7k5rghCeGSqNPmknort9NfJJ7t99Nj6rLcdHWMG3JpOUVq46Ld67K930a6dPM9ZspVkdJAzsy7gTgKqn19eP/AK46V59fwSwjCAAB2JCjgDjAx6nn644xXu+u/wBnzTytHiMKm10buV64OcdxgYwMn3rzC6gimNyFiwGDNEwwDnGdxOOh645x04Jr3cBibxi3Bqyjf/yXWz+d+jfqfV0a/NTi1zc6itnvortLd3trZ6PWzSPJbq5mDhS52lsMg/hA9h0J/U8mtGGeWZYyo+QLgDHOOOeew7+3P1ztTtkiuCiEt82ST7nkYB5A9+uc4FdHptuDAcKz7UxkqVOT0GOfyznrjpX0kpwVOMtNrp2s7O1r/ht0vcywtavUqz/fSUU0rOd3fTa/5XfbVjba/kWURMXK42KMZyT6njC/ex069TVa8kYyrhhjHJPY9lAJAyehxntgZxh4CRzhWyMcbiO+TjB6k57cZA7E0l8IlcYOThS2Ox5OAPTPfoeOPVKEZWcYtX97RaO1rt/non67HqxqTnTUJTlNqS9930jpZ36NdO+mli5p5aaZY1ALEclsDBG3gDJwcZ/DqOgr0m3silnGgVdsqblJA3A4B9wp5PfnrXmWm7/NQRDc7HavIBUdiPp39Tx6Cvd9MtFOnxtcI7sIgC2QBuJXCBQDuAGcjjOQe+K8nML0uV8yUXZq7a+GzV29L9E9b36H1OXZhTw+HUpJzlGKUnGzbty9tXo7W16eaF0vS7ZIImlJVsnzlJDNjjYAccEc7vbGK9K8LR28dwka7WSVPKLMu4SHjaFP+zyCMj8+a5tbKEWc6ICH8knyzwXOMBd3YHqG9RyMk463wFaXMMatKADA/wC4yMvJIWyQzEjhR06/XtXy+IqvEPl52+afJZtXb0s1pu+1r+Z2/wCsFKEaeivO94ybi4PRpyasrvotrvWx7e1rb2WkxSJcokxHmSxLg75CCQCoxgrj5Pqe2M+E6pdS3uuQXbykSWzmLcTnlWOSwHRj3POfr19bvbizSyugJgbh1beqnKSOF+5gf6vaSMnt0NeMSTRrMQiiV2Z3uJScYbP3VBzhV6YBPUmvLzei6ToRhyOSilKMZX5X7q15lo9NXbd92cUs6jKHJHmk1Pmlb3k78tlqlovn3SZ0gD31yqxh1kZTIVVcltgzvBzzkHnjKjOc9a6KxsLe6gnkncpNGv3ANp81SMSFsEMR3HTn8ay9HtLlsXCAiXyyoLqSyq/DlFyGHQc568kYAFdbHZGKERAMoYqFkzhwzZzkYJfOOTkAjGOteZQhzaNvnvdcskmpK3bS6vtpot9Dwcx4hpRulWUOWy1ktGmr6X26JrVjtOsYlaPa0iNKDHOQx3Kh6KFx82/B6EEY61szX02nv5jID5WI4UB+ZEwcSBTjd9cjOfpm2kKWlvDc58y5bhyACABjsANuMjB7E5xiuV8V6iAFkUqhZVjZ8jLzLksqjuF4BJ67gMdyYmLrS0UraLVO7ty3d0t+unqj5LE8SObcoTg7tXWqUrWSaT1v287s5jxR4ke5Z4nuX3SIRK56AdkB7Bd3I556VyFpO0NpJKjGTcNjgAgKp6HaMk5+vHpziud1Jjc3TRNKGjlkDlRksCMnKtxgA8OoyORz6dbp1uIII1Kb5pGBMXUEY+QnOOAMkDt9DmvawGXU4QjWcZapOXuqVn7vut7tu/rc+axef1a83GDtbVWdktU3K/VJW0XpdHonha8g0mye7mUMDGQVAztVgcK3OQc84HAyO9fOfxI1y/8AGHiFdOtwzwhisJXPyL0O4A4b/ZJP69Ot8X65FZQPp1nc/wClSYmlEPLK7/wLg/KqY4Xvn6UzwZoiy+XqF4jSSTg4Zh8wfjG88njJ4AGR34xXr0aUcNbEqDirv2aldNuytJ99eltdddzx8Rivrk1hKcnKpPldVxk3GNktJLz038kdp8PPDkHhzSrcGIfb3l/e3HBYM/3QT2C889Oeeor3/R4L67tygmIZpd6BMKdo++R1yOm4cc49MVwem2EbMkcKhguwkN8pJx8xQfxY4xjJHJIINemeH1+xJDDKQEZypycmP0DHBJBzgjv7V2067quU7+890nu9N+y9baXWh+j8PYVUcPClB2cFflezaUXdWe+7tb0ud3plxMDFYLNvO5QN5wVK8vkE4/8Ar8/XvbDT7dCbiAiRSQJo5G5ZwPmKA55yRgjn868+h08pqNpISssTHe2Rgox4UBwT8uM5GOe/ofUEEDvHDArKY41Z3UYRjyCMZPzDjkEjknrjG6aaTnzWeqemj7eiWzXe6se1jXDkg01FpXdtGnpb1adrb/idRYqY1AD7UcqsYJz8w6qQcYHuST6479RYqEJZZTExxGW9HbPT/ZBB5Pc+/HG2ot5poIJXdV3hwFYqxZeRjA5z9Bng9q7izSGYyRGOREjkClicu3IAboB2z781k1zNys3G9l/MnZLqn11Tv5W2PCrTa99Ts9L3Ss9k3bZPo++27R21rcJHGiyPh4dm9y2PNyD8v+8eDjnpzVxLtJ51dZJAY8YEmVyDxg9OAf8APpzKwW+5lkkcogV1ckgeYmSoY4PckHscjnpW1G0V1B50xaAqV3BTgMByCox39M55wDUSpQurKT13V7pq19L2t6K3oz53FVHG0rbq97qz23t2uvz9bd3G17FL5j4llzApjwFVB0J9M85J9Oh7cRqmnCGKUCQLNFEXkJJzIoHIGOpPAwOw6Y69At5F5xKmVYf4VLYeQ5+97AY5PfJ5zXF+INeFvJceVHJI7RsBuHyqhGMtnpggg9cjr2rd04yaio3dtt/5VffR3v0sn6WPma2McG7PljdpaWbat2/S6066HlGuys6PMdixW4ZYA/VXH8Z6bWHbuM9K8R8RX72qK0kTySiQmRi2G+zv0ZVwd5br1zxzjIr1bWtT821vI5vKRXDbGLc7hypYdiCSffj0r5u8UaxLBFNc3royQRyRxMz45j/jdsYCKCMHHt9Pocnw0r2nCyfKtd0243avrs9117o+Gz7MkoynOV1qrRWqjo7+S8+yueF/tHePJfD3h5vs13BBLNbeV+7Yb90gO7coP+tXgbuBg4xzz+SlxeT3t3NdzsxlmleRiTksC2QxJznPBA4xznmvdvj14/l8U+J7yytrkyafaymMAOTG2w9ep5Jxn0IGOK8BiBweMjsOAPbg+h7jHJ7V+vZNgKeGwqqSsnN6NXvy2i7dG2+rTX6H8e8e54s0zedGnUlLD4WTgoubcHNPVqDbT3tfvotrl1STlyCcgDdwM9hk8Y6deMc++JFbuRjHQjHHsM9VPHfnnkd2qAFA7EAkfwg/j+ec/XtTyFxjPBIK4Oc59emOn4D6DHozs2rX32W6T7/LufCOWj8lqtei9Laq199umiHbsqCAAcAjqST2JA6H+9xz74zTPmyqk8ng4IIx7Hvjg4HTnimA5AxwM7erZByc/wCfU88Cn7j1GARjJJ9efccnAwcgjvnNZJtN6eSsubtZ6X6/jb58snJv18ldLRa23tf/AINtCQls4BAULzyCTjGCMc9D+fHB6uGdo/EsWzwM9G544xgEDIznPWoww4IOT7eg49vTHofWnBgxOCQCM+h6e/A6d+T9OlQlzfZ7Xi/JLb5d3r32IWujXZfdZNfdv+lgJI7AEHA/I9SegPb0H4ZcGcZOVBGcDjBJPOBk4P8A9emdSMAZPr3A9fUDpnjOemc0oC85wGDcnOMc4yP5DP8AhWibva3u20vp1WiVlp+PnqCVtn10i+julZW3W97ab6X2tBiSC2C2Mk9T9Op57kc+mKcVU5Y8AckDg+hIOM9eev8ASqakAjBJJOCTkj+RPGPp647TrkZ3PxyMcfoMfh378n71Ve/9drL/AIb0ffW49XoujS76eWmyvrZ6NEpIAwuCOcHGSSMkZOcn39SPbJXLBVIHUcsOvDc9+3IGM56Y6ARkjgAnb7cEHnJ+nPt7dCKeqnH3sAehJ4PU4x34yefwFaRaW+7f5208rt29dOiHdX8/L9bfr6kpkJVQOD3YjB9j2OOgx6UK7EbeDgYbjryDwepzkcHvTPlGMNkgdjyDnqeOoxjsc+lKOVbAO4ngnr19cYH16+oq77+u/wB2+r8+3VuwyZUUgH5gP7pHBOeevUnkgeuPc0U9SMAMTnHA6Bj6Z7jj0Hp7gqk2v+Hf6NCt6/e1599d/wBOh5qpZZFMn90ttB4bPf3U4PHXgYqVGUBtwJYAMhA3KB0PGOCDjkd+3FDMrEEcMEAG0ZyRnhvUDj269c5qSMERiQoGJIUgHg4xjA9jnjjr054zrtWaWnvJbaaNfh1/Tczt1s7SSW/W+yfkvnte3RAjqQRsJOMnPQYGCOeox6d+ccA34WREKSIpkU5UgZww5yO3GcgdznFMKx7AHAMgHJQ/xZ6ep69MYHqAKNpJVFRxnlSeuTzjIzk88Zzjn3rzp31Se2u1tOl9X3W3fTWxnH3kuZaKyd9L2tf1/Nli2V1YNnJJIyRyenGehPXB9sECtGU4jY8liMNjJ7duMYOOx9vWs9WdI1U5BDZ4bpgdMcdcdD/gKtBmkU87gRj5TtP0JORwB0weCfTmOa7Tator9Lv9OmmvRJ3R1Uk2+WzcU18r6LTR6a6adN1Yx7qIP5bFThgw2kd/Q9/XHTvnipLSxchmBMgU4A5yPXA7Edj2+p40WgWR1DAqc45HyYB4yeBn0I4Hf0PR6faxLFypJYEYAPzE9Me46f1qateVOmlFu92notlZrzTavs/O+p2UacZyUZXSi7O3xP4fuXzXXS5j2mnN8oMaxgyYTDDzExz8w6emc/hjoetssQ7Wk3fIpViDhcHgbGzyBjBHbI5wadFZJ8reVux8pUnDbhnIxnnBPJ/rUF26QxgIASAW2EgoFU9ACPmZt3P4A9K8+Nf2rSfM5b6bq9ur6K/ld69j2qeHVOMWoxTS66vl0vd2Vr7X111vojZk1GIQsrMVAJ5Q/Pn+8vPzDpgHHfvjHnet3/nswQjcvyk8HDjIAY5+bHcD880t9fsFwr+XvxiMDd7nvxjHA9Qe/FcrNIrzDYz5PzbXzyQMHOeQSeg6cYzXqUKSfvNWer2e+mr27NaW676txJp8qStypW6Xdlvq7rR9LJ7vtetchCxfcSdpx6nuM5+g54OK6O1dHiGDywBHPGeckZ6jPQepz2rl4VwGwcHdnJHAA5I6HH1OM9uhratnLRLuxtGANpw4I7L7DjI45+prrlB8t272s9dN7Ja6q3zTfXUum9W9Xu9eui0dtFu3t0sdlpDbZIwr4BcHgnB2ngN3wN3Tr0wOhr6F8PzM6raCQ7ivDj3HAz3HXOeeBnmvnTSZFMsIOcKNzYxwe3sT0yTjtmvavDd9MuGVFdVQIWDANlTgngffHAI7ZOe1c9e3vXi9Emt1fXf8tddHfa562EaglrdSbsuqfuuzW7slpfZns/h648uV41LMkZYFVGNjcehPUc56jnr2+i/h/riQRTxFFcTMnkpjLhyeh7ZHXn3r5k0OKYGa5WQ58ssUxw5wAGA7Y6HOfy6+1+AroRokap5kzSB8g8rg8bT2PJ4PB4PQc+dWinTm7X91WXXot7JWVrX9E+x9BhJpTgo024ae9dNxd1rvt5a/dY+4PBmpoCYiNyqF2oRny2PJPswxkHHqc5Jr63+HmoEWyCR97vFNHHG3Xe4URjduBAAU+nQ84Ax8WeAZ8X0glQbzCHdX6cAENnoO+AOWA/Gvo7wZrk63UEWwKu8ssiEgrtYcADBIAOevYevHx+YUudVFFNK12m9b6bX07Xt01PrcLzNK9lZqStdNK0bXsrXt0/Cx9k+A7qW1+0xuQS29Q/Qq7EFQfZf4frk4r6M8OXkdtZSXLyeZKgG5RksScgEdgAByfck182eC1EiRXTAusqchSNoY4+Y4zu9QfU89a+hNKU/ZPLWPJaLAUHJbOOxAP446D16fHYpLnabveynZWa2und+basvwPapw5orVuPW/Rqzu23r36/I9EsnFyY7lhh2AIB+9knjII5PH4Z9MV2MLoIwgG2UnL7ThT3zxnsfUZ9+SeT0FFdIg5USIuwR9yx/iHbI6Dr09q6VbWQXDBiQrLgtngdORxwc9yefTGa8erJRkklpFqyvfRtW+Wqfz6HVTjZN2Una8W7ttJLRbfNWu+1jqdNmUwErIAyk/uzzt29CPQ+pz9K1k1+WOHyJHJiYBVkIyy56HcDzt54zz/PlrbT7tTthk3IwA3E9VY87j3K/dHTOQMZ6bkOmgqASCAOVfnJ4zxyTz06HtyCa8bEQq1JW5JTvNctley91+Wj20/Q9zBTo01FxnFzlZyjJJunblT32trbq9/MuLOWAcStICScsBhvbBznOMHP07V1OlCG5h3KxjkU8x9CdueT6DHbpjjrWTp+lmQKrx7V+6pyWwMjrnoeT+XU446yz017eUBFDoSAX9iOh/Hp7n3FVRy2tKXvU4yhJrmTT0+HVK+jtfy0toelPMKcY8rknJLmVTbblcVdeevlp136DR7WNSJCrjLYymcH1LHnj6Ht25z6fp1rE4VvmA24O49CepUEfMT6kcA4+mH4ftUZVTyxwR1A4x19cjHJ6dPWvQY7MERmAKMZ4QEDd3HqfdvoQO4+vy7LqeFhGdNKVmm0vLlaurrTo1dL7tfl8wzWrOTip+7fSV9bPz0/FXWvax03h63tlZMWvmKGGAHxnsSxAPTrj3Gc17voNrAY0KR+Xu+8hwTj0BwM4zzz6npgV4x4asbgPuDOh3ng9CfTBPzAY61714fVozCZDl+McbuAQemOBk8jnrX2+VVE1BSg4ptWdtUnbtdu3qktL9WfGY/EyctXKTXM073s2k7vvrt6X0vZ+l6NCY44/3YGQMFgMkDGOcnOOx9672y3LtY/NkcBeSM4wMcYxjtjH5Y5e0ikkWJI9mSo5AAPzdcewGO+ffPXtNOiSBUErZOQGbIzn269MEc+2a/QMHQiowt71ktWuVbRvd3ab1u21fTre58hiavM3s2ru2/Z6rz0W/q779RptzLCwcthQMcHBHbAOOOMHH145rurPUAFAbcTjcGIBJyc/M2eOnHTOPrXniyWi53sT0BVScMR04AyCex7ZJ9cbFncSMcQkqjYXcwJyvOMZxwucDHr34FezFxSjDnV1bWOvVa9rWfz2XU8apS5pc6Si5Nt83VO10lu2ulrW6efcteqRhTucjKA/dBPTkZI68jBPv1qWCG7cEzSgB/lCgZBHYg5HIHb65rOt3SFEeQLwobefU9SOeg/PnvzS3Wr29sBiSPGOACDsb+6QDxnnHfHXjFNzV3dpRWt9trf8ABv33CNCUpcqUuVrVpNbcrs2n1fo+nSy30eOBCMrleDuOcHGcg8YPTBzUTSvJysgWMcFQM5JB5zkYwckdeT1rzifxBJNdBVkJJf8AhBC49W56DuT7da6C0vLloo1IYlgN24njBGNoH3uBznGDxjrXH7SMpKMW1e7vZK6snf5/jpqdKwv7uySVrfFpf4dNXra1vR2OqSWR4yDJ8u07crknp82ckjHHHcCs+dNkfMhdmcEtyASM8kYIz0BHQ+meKbDNceWcRAsvAYA7cEeuOegOCepwKttayXIw7hYmADfLgFh3+g7/AC569+lypqcXqmnbWL8k32V22r7eppCPLZqzVkny6a2V7a7vfq+19zMAhdFBBOM+Yw+8cY4GME5zjqO+SegWNmhYpEmOMxuwIDfU5wOPTPr9egisLeALsAfA5Jxz689CQRntyODmmXQQRsSoBUbsHG3HGOV9+M892NYQjGnGUUrKSV27t2XL8r9OvpZ2OynUhdQXNNN3d1pd21tpddLbXS7HH3kd3Izv5vl7sqVABB9sZHT9M9a5ia2lRy0jKwJJYAcfnxyOp6Y9Diu3ubu1ZlXBBfpxnLY5GeTjnjsOnTmubuolnmdI924gjnO3j/axj6cZ9TXj5jhI14uUZXlZNU1K+tklp69fO57WBruDUXTUYS+J8q2TWqd992nrb1OUuhEX28YGAMjA46A44x159T+eZNAmM7lRAADng8A8DjOCfX8fbfu9OkZxtHUckHOD9exwenXOOM9c+40qUICZOXwdo+b04PTrzkYyOmOePlK2FxEVNyw6Tik7rqtHdra1/v8AnZ/SUMTRvFRrON1ZJtLXRO+tl06a9zl2mRJWO4lVIAJ4Yn09cYHP4Diori5fH7olgcZBGMMffnBz36VrT6fu24QYAICgHaSDyzccA/XBxnms2W3lSUJjMeOBj7pHQEjHr6Y+nIHw2aLGU6l4KajOVouOvLolrdbPvdWPew86M1HlcW4r3tfdaVrO2t2+rsvu0M1JX8x0lbGQcggEE9BgnBG7oT+lYt/HJKskkeQFBJHdl6bgOQW6cj24rprvTzOYpE42jGB13HjO0c8YwOQOfXAFGSJrbCSHeQDgqeDnIKnjHbpxk+hPPJShiJxkqkWlpapJaOLS/NuzaV9Dsi6NNqUXFyvZ2dnpbTovRdOnY8m1CNmgkWQ+ZaOjJtZcsGPGDzwAe+32HqfBdY0e3t3liuys9uC6ozZTKMeVDAMdx6DPocY619OatBC3nYUeXIAJEIxt6jIA7j2/nzXkut6KtxaXEEaI06PvRnOBLHzlD6nGOOOn0rwsbTUW4yupXdraJ9b2d9XpbdrVLuvosDU51/LGST0ve3uro7c2r6PR2Tez/ne/4KufsvaR4y0Ofxl4S0tY7qO3jk1qaIfvXlVXwVQL82wFipDDkk+1fyB/EPwlNoF7fWUsciItxJBidSZoxkhZHY9C5yCuDgKMk9v9Jnx78OdI8XaBrfh7WrBJ47yzlgdJ4wyKSrBGVzn5lycYBP1Nfx3f8FFv2OLj4WeLte1S2026i053a40uRojtlswWaVt+Bym5djFf3gZvubRXsZBj3TksM2/cfMrOLdvdVntt9/qz6CthqOYYT2LTqVYJtKUW3KKta1k29t1t17n86Or6Y1vO5lDiOTftYrtRnH3ZEPO8HkHgYwM5yK4W+tv3jScfKEILAAtkH5hzg5z1/mc19JeMvD5DtmJhNbSkeVtyAi8EtGMArjuGGD1z0rw3WrFrTcRG4yowC28YPRQuOBgnHJIyQO+P1zLMYqkYK7Tduq8tHdtX1fS3k7M/DeJMpng6k2k+VNvVSurtNpuz0W7vrey225OxvHspwUG8YBK5AV2yCCTz9zJ7cZ7DOPe/CHiB3giJuikrNlVABBIx91wQCw6EbQBn5c14DNAGbIBVdu5RnaRjoADkjPpz7c5rZ0XVfslxChLja4wFyFZejgj04A4POPTmts1wEcdh5Npc9uqV3bW2q1t07adGjwcozWpl+Lp3lyUbpS1dtWrLSztp0tvY+/PC2vpcPZBHy8WQ0gfCKcDCkEfMBg5fsTnGK950DVXLC7WcIAQGYKGGxuGkDE9cDBXHzDoccH4T8K+I4i0DIzDABjJYLsYjgY56+ucY6Y6H6Q0DxBH9mt9sxc4CXDN+8zMDyVGV+bBA6kZwck4r8G4iyidGpNqD3ktVp3tfXZLR9Ln9N8KZtSxOHheUZe7H4ZP+5Hppb5u6fXQ+rtOv45GG1mTagZJN3yASD5nRTjduwB2HbiuhhdJQ0KshhLhVlIDYKZJK5IKqc/MMntg9q8J0/XnYW4PlBVKqVcYBiHLELuO4twS2RjPA5r0SwvlnikaE7YpHXK9EBGOY+6qOMrzkkcjHP5ziKUqWlmle93smrK7drO9tV0eiXRffQ5KkeZe9ey95Kz1Vut09ba7rTzOwktDOsbLHtEXzlHQbi2eq5yMJwQRjcGI4xzPJa7MXLIVZFDq0q7QmARgLkhUweByeexpdLuGnIiWNlUbpEMfzsVGN6KxxtTAyRg4HvXQRG2lhYBTPCImAYZZld+hdTgnGDkg8dh6eTXxEoSsnK6aXV/y+mlrW31WnS+VWim9Hbf3ZO6e1172i+/RPbR34K9sIXZZEjaLyNylGj4lMnWSM8YPGABu49sAY01lIGKJGY08pWcMQBJIM4yTzvOenPuTkY9Bki/dYYLLHGWYOijzFZSCnBznBzn6Y9K5y7t5kLtM0Ykjw6yPlHZDn5Qx++xAyRjKnj0rpwuLlJpOStFu6bd7/AKL9PmefUwy3a1crJJrXVaJff1dunn5xdWgDuWBClmAbAPks/VsDBU8DHOBz1zWN/ZzQuyqHDlVeObPMiPkncuD8o2/KoPIzzXoF5Y2+IpmV5HnVy4RyYyzYCPuwfkXBzkckjpxWFPa4KIhdwE2xEDbJ6suOdyj5QoyMZOT2HuUsTJqMXK3dN2Wydl0trtf9WcM8KmpScXe6T5+9k1ZX6W7fgcsczmSYrDblXXzFiUsZUfcGG3orJgHAJAz9arzWPmkrEyRKqFzO/wDq3J7joeRkIn8RzyCK6E2oSR/LWS3hWMo8u0GLA6y5yS8j/wB3jJAOVwKgcHymRYo4lBCkFcB1fqdgztfuFycknoevbSxDfVLTZPdbW09Vv8rPQ5KmFej91JaWVlfazv1e9lf5nKMjFMMwjCMIoYwd8sirneUYD5VX5c8EDcKd9mDpDLEJBJNEZZVnbfHDIhOJUXH33zhTnAx3NdAbCVYMIkk4jySEwrIpP32ByVbHXk5479KUtqYJy6hRnaFBP+qVs7kkGOXfqRxtK5zk10QxCbSTabW+mm107NN37LXfyOB4eSUm1azbe1nqrN7O+ttLL5mUkEkcck8OQ+fOnK4VpHbglScgDj5kwM469KxtR09JivlIDIyeVOwO9nlx0VSBheck+3cZFdAQxlf93tRVZuDn7Qh4Y44wq8EdcZIx0y1okG2VmjVPNOx24Z9oOxkf1BJ+XAzx6CuylVUY3u7u1lZ6PS9r9tba6Wem5hKmmtVzaq2nnre+36o8znsXjPlmJnZ2Z8gcEIR+8Dc7QmTknuR04rnryyEe5wpLOWOS2SgONqHjq3JIwMY4zXp9/YyJtJYsJAyTO2SsaufuBuMORj5QOPU5rjrm1DOVlGFU7hg8uI8iNiccLy2/AO7j616dGvJrVt+fe1rLfVWv+fcxnTtaKi7uWj7bXd7b6WSXqcM9hJliiTFijOQCFIyVIYE9YxySe2eR1qRojysifOSiqiYZFXHLEcZ6cvxj0NdFJBllTyggVSWYZId+qsp4VQN2CvIPv3yXtJFcSSxxjaxO5iVeRM5AV8HCj+IY59QRkdkat12677batXey7Py3uaKNuuq2d9tFsr77O2/QLWRWBgdeFGxHiyzLt+4xfjMbc5OOMdOOelh2uFj+Vwm7MgOECgLux/tqMcHgg8bcDOIsADqyrhXy7MvzKNw+VVAxgHB+YdcfnrWUcijzDgJg8dmc5P4scdAB169axqzS95y30adl0Vnfo7+l7X01GuZRTqSXM7Oys7q8Um7728+t2T7t5WPZ5TgfLGRtUx5wXY8/e6jNWUhaSAsNwZdzNEoJyUI52/xbs5AHDYI4xTdoCkySbDI2EaQ5KbSNyKMDC9gT68cddOAYkV5CHcH90VyFljI6YHDBfTge3auaddR0ctrd7dLO639b6O5Scl3Wi101Wm3e1lpp+ZBBGGjG0blCs3B8shhjrkH5TnnOc4/GrKW2UAiHmupZmy2wgNjJOQemBjPfoauGHYhlQYeX5njUhhkHnjjlvbA69uKR42UyMI25jA4Q5QN34I3dPbbk5JzzjKtGTave71Vr3Stu9NNlutdDeE0nabtGyeve6XTXtbRJX1sY9xaxiXCiTzAdzKvJyOoLAjII+7+PGACcfUI0UxIowilgkjnapZgMqFwT8pAyQcD05ropE8verbFQ5Cqp+ZyP7x5IzkA4yT64FZd5EPLtlMZOxWBV2JaPaVJVFxk5H8RPbIGKqNVSdk+iSbenSy2T2vbS+vzPQhXsrym1FNWto1ZLZp6dGr3XnqYaXTWczNFLuWQ7JAhLgq2N5A42gjB4BA6d+PQrSaJreJT85dCcA7yd2NufRBgjOOc59Meex2ziZmVPkfcytnAUrjcfTAyPl75PIIrobKX7I0UpwPtClIgTkMOA554UEkYXPr6VTt195tWeuz0XTp3buvPc66WNk04tt3doym7uy5VZtdbJ99vIv6kgMRwMSBd4Q889wOnzY/LPvVLSrmYFZGcoiNsRsZYkcYPOegwAeuCccCtR1kuow2FZ4nJBVPmZed2GJGCvGeOuPxzoQ/myRlR5IHmsVwGQoeMHB4bJHGMY69AG9nFO6S1XTWzbV3r2b112WumkcVUlN2qJxjpODd9fd1Tva3y6JI6hjJLbq+7YpG5f7rHjLspP3+nsTnBzTJIkMIEs4Xoqk5yGJOAAM7V44GTjPXkYypZiAhkkCeam6NA2VUDnHQDf+Hp1PFN+0rdAqxLbc7cZUYPR2PQjPXpjGee3PKnJttLTq2tdbapX2bvtfbYzrYyMbR9pKPw2aS2vFdVr9/8Ak1uLF0Vnkmk2rgjgE7Bk7xjqvHXIxjHbFQQvAsyqXEi4Pls3BDMDg4PTnP8AnOSaVggUuwdQIyc7hs5+UtjLBu3FZjSpKhYxMzDeqBnCYHGWX0J7Z689K1pxm022pLRaK1tnfXq1okeVXxsYpv2jVtlpfRpXsrLd21XrctyFHkSMBsH7oJyWdeTtJ6E+/cHOOa2bKIGFYyufmxgkCRXHXPXOB0HQ5wMVzEMk7EFRJDGGIjmcj5mHQAdh2DZ5HPPNaa3N0kUkR5K48xgeSrfxKRgK3BJPP4dadSm4L3Er6XXTW2qtpd/PXvc8z+0JOTfMnLXl5ns046RS8vxsWpZ4I5iJyXCK+07QBGRjbznqe/UnHHvi6heRSRI75dFyoC8kNnsM8gHvkDk+9F7JOEQoGkUpnBIwiY5Y9M5B6nHqK5maSbDOojVVcYjaTDY5y2Ofl5OR+HSqo0ZTvOc+XlautGlqrLZNNta/8E6aWZSppcsru6bTbbvdcz0el03u7b+Ron7m5owV3AgZw5Q9ucHABxgUsOoi1uJZIJ0RFwo80fNu6Mqk/wAPY9RgcVjJdTCWUMN0aoGwe5xx5fOAMg8nt19KzfOkaT5ypQFl3HnryoJA4I59OOldkaCndVGmkkrt3WrV1d3v2v3f3+/gs2a152mnHVfE1dOztZ7790bl/qJnmVs7nAIzjEe3A+Y4PPuT1BHYVzepXbqGjjICKuGORgFgdwGQOuB09u5q2rlDuZgYgPm6d8ZGMg559Dj1rK1k27xeapKoFJK46t7ZOTyRk+tddKNOnOFPkekbJpJx0S9E/K3XzPrsHm9RxUnVkqnKmubSytGyXnZXs3q09rHBJmXUHUpvG4sTn+EYJAPPzDJ9uR0zx16xTi1YIm0fK7PnYVX+6PqOBjOOnPfM0Ozgur+HK7lLkAjndt7Pz0yQffpwK9B1CzgUQJjy0ZSNxXG4gYOEA6L0Xrnk4GK6sRiIQnSptOytbmtZ2S6X802tPPXf1KGYUlFKpJudT3rq6kleLb3Ts7+VzzS7EkThjwrELkgZG7q3ufTGPwqk4ILtl2jVQMsPusOcrnOR0B4A4HHJrptW07bIBG2YwFKb15mLfdAByQewPPqenEKaFNII1O8yk7yg5ATqoJ7sefXoB7130sRS5IyvsrPvurp+bW2iS+Z6eGzGF1CD5k4p/wB5r3Vqrte7bVu2u3Yo6Lva5hYowCtlXGc5yMZyMEEjkfjk19F6JZz3wt0TPzRo2wrjaw/ixn7x7Z6H8M+S6bYJbzQoxCGNshXJGcDPJx1GMEDOffkV7d4cv7dkebcUlWImPnLDaNo2nsGGf0BXtXznEGLlOkpUIOSV7SeqTSVnezv57eiW/tf2osPQspwc5RvBaK17ayWu3TS+6d2jt7DT4WEqXDh57dgjIxAE2eMd+PUdMnjHSu0022EFo2y3MZkVzGAoCjpyPTd0JwckYHPXzjTp7u4u2ZcAyDCybzgMTnLjGWPQN09BwK9khsrmK1tXaUPL5aMQOBGOd3B64GMZx345r5KniHCHvcrlCSqXV/ie6utLp7XtdWZ8vjc3lCUbVudyknNKTTi/dbstGo6NJL8XocLfxXEcMgjR0aU4kcejZ/AOemRjkHis/TPDTXM8EhRRECJpsk7goOc9Dyx9eDjtXYajZSTzoAqPBIGaQA4KgY3N0IXGeTgjkY99S0KWtoYFjWMriKUkh/kBGwBjjIOTlcfp18+NeWJqyqqSb52mns43i+++9tGvPU87EcVKlFyhUk5Q3T0XuqKu7vXbe/bztoWKG0tniMAUFQqOVG9kcfdB4yxxwD0ArZFg07QOdogjTLxgY2j+JicknHYDB55PNR2NtJcJHJIWUPlYXcZXdHjBUZGE56noc46Vc1HUobJSInTdk5RiQAAAMjGdxY5wMAAjHOcDV3nVioL2bUkrJWvFct/m10vqt+h8Rj+JZ42o5xSpuDbfNJvmel0vm09duxkXtz5RcRoy27HYhIIEmQSCDjgE9Gx/SvFvFd1NMriOX92H3eWTyAPvbT1JHyjsD65Ar0PWdeK2/lNKqKQdzcbguCVUHPAHJB7HjkivHZroXk8sandGvCPj5Tk8FjyG6HOB785FexRw7cqVoNuLvKLXRcutvyWvnsedRzZ80pTqSjNtNKUnyvm5LpK2iWiV/mtyjo9gLmbzWAyhLxMxz8v+0xxzn+Eg9etN8SeIrTw9ZzbZVS8l+ZcEbvMUHaq8/L1ORzux75GpdS2mg6Y88jFA6F41HDSyjk8/wqTjnnkcDkV4VDa3fi7XZdRvC402JnZVIIjJX7qkdPlxg9j6V9BhaUKjUqnu0aaUrLRykrdru2uj7Bic0lGMaWHkp4nEPkUlF2XM46t30Sv1a19NdjwtDdapqTaxqEzjzXZokYkoNzcMVxwSO349yD9N6LZLHbR+VIqxyAPtRRwxzuIH8I6HoT3ryLw1bh08hIAkcRPlkgEKynHJOMRtkYHtjNexeHrG5WWBHkRFYKwVjlFXoMknjHccde4Nc+NqPEVnyxioQSio3VmnortWu1Z8z1v8rH1vDeFnThF1V7SrOzqTWjUrxcnrdtK97X2R6p4cgiMlv50RMZ5QjhnP8JPBPXkr19+lev2PhqK+3So5iIKLLFI2CD3cnH+rI/L6EGuI0dbeCCMSKriMIwm2ggHJ2mPjIJ+p6cYGa9I0221Aq7xOuJyHBYZZoxnb3HPPP+HVYeDgnzWvGzaSWj6ydr6J6vTbW12fqmHfs6NNKcYPlSUnHV6LRq19U9dGr3T7nRx6JPabmTbcQRAJGVG5zjO4g9SozgMeRzgZzjoNGdYS0M9uQFBLNgM3PIJPckg5PI6daZospik8mQlWRTISeVYEAkEnJOSOF7YPrXXaebaZcqgWSRmR3GMhBjIcc7cjoBnvyetd97pRi4v3U0klZJ8q7avqt/yIqTnBzhUUqkU01NNcurXTpqrW0WliGxsorrU7eYBjAFLSt18thjHT2PTvk13fkRJLG0WJIygEeBty3beMndg5xjpjANc3a6cY2m+zTeWiAs+OW57HPY/hgdM5rcdWjFm6yMuMecFPDL1wo7kgH6HqO1EUk+W75tHbVXdlt5a387K/ZeTXm7NtWTTXLLVWesdGrtptf8MtLrRxySRrO5hhLDe23AaQYIXIOQB6gfQd61n8hbeR5JDiNf3Uakc8/Lkjpkcgc45Hes1mtZfKJSQpnb15ErcKxP8AdBBPcdeaqXLC3aQTs3luRsGcEsOm057Z4GRThG7tZp3a6v8Al8r9NNfnZHymMrRu4TqVJNLaLaTd07O+21n+dylq13KGhVkZJlQfZ02kK6MRhyQcce4/A15t4o1V186NpAGghIkZcEndk7WJxyvqeme/IrpNc1yVQdgDfZh5bNMVB2t/CpI7gKBj06V4B4t16WaO8KOLYtFKJnY/JnjaD6KvzZPQ5wDnJr1sHhalWVN2jbRX93mumla672S89D4vNMaqcJOSjzKL0uvLbrbTTXe66nnniXWRPFPG8xtV88kHfh2CE5ZuRgH6e3UYr4g/aG+La6LpFzo9hOiXd1B9jVlYNvRwQxByCSMDf0wSMHtXofxJ8dwaBZXd/dzfIAwX5yC8RB3sg7B8Dawz0OK/Lrxv4suvGOv3N/cuxtkmlWyQZ2Ip/u/NgZwCSccgdMc/oWU5VGc6cnFezgk5XV+Zqzts277tLRdNkfz/AMd8XrC4SvQo1H7erGVOFpLmi5aOSV9orX+VPU5WaZrqaS4mfMskhZ8kliSxJ57gdc+uODUkaqOQM5XIwcMxP94dAfUZ6de9Vgp3bdwYEZzjB5ycdQAcjI/pwasopADcnJIJJyvoR7dDnjivt7KEYwi7bWjpaNkkrdLW0tZN9r7fzTVcp1JznPmnJtuUmm5OVtW093d/qS98ZKjII9en3eeD9P8AIMnkE5PTn7oHbOOOe2M9hS44BIAAHfqDnrznj0PQ8UwtgnkHI+7jJyejA+g4IPbjoOmP9dvyM+aW13r999Grfdfr5ASq4Ugjbg4AOAfQc/p+h7gYHPUg84J+mMfXHY5xjgnOEyTgk/UDGQB2PHIwPp9BRlcAHJJ4GOMDJON3Y8nJxj07007P187dV1/r8CZNLV7bXt/VvP8AVgo24PQ5zwCc5PzDOcHjjp7dOBKOoGSBnawI6AfQcHoeD9PSmgBQOckHhB6evbnnPb2wBTudoPTOPTPXHA5I9Pr1rZWtpqlbte+iXbXb0BW3Vtdb97+vffsOPlgjBJbkAjOeOOTjOOv65p2GILEggHqMA45B+XIzwOfw6U0qcbsduDxke3f144P9KRdxJGNyc49Cew498cYGT0wTTDVeenzb/KwoyVyF4LYU5xznsRnHT657nGam5IBICkAZA6epPuSOrfXoc01dwAAGwZDEdMEHGO5BORnnBNOGSRk5Xvnknn24xioSte71b5nbysku7X/DdRJW7Xdm9vK+3ZfjuCg5A7A568Y9yRySevXgDPJGbAJxx1xx3zkevQ59RUYwTlDlQOUP971PTpxkZ49c4qQE44GeOcY+nB7Ad/UcdaE7tauytddW7r8F1dvyHGW6v5b66XWve/VWXTUSNSCzcY6Y55JHIA9sY4PX8zcHCFTw3G9gBjPJ9+p7evqapRsVG4rwCeOPXgg8+uT+Az1NXN2VGDgMDkgDqffpnkduPfoLg5XkndptOPk3y79HZrvu99S6SSjJySk0+qu7fklotnfuyCR3BwAMAnpnPHfPXkd+nJ7UUyRMtwR9D6EHPJ4znjnn9DRXQCjFdJW3VrPR208nbpt8nc8/Ve2CqMTnndnnjnjaP8MHNXI9wVl4O0ZHH3d3AwM859cn178kir0yo3KpVU5AOP4zkY9uDxTU34VyAQGIQE8noVzx29cHB6c5rC/MnKV+bS199ld7Lo7L5mDba3StZ269La9WutiTa3Q5Vz0HUYPQgjOC3oc89eelqMMY1+fayHLEnjA/iX1A7Dqe9QbiwAYkP95mQdBzxk8nOOg7U5ZcqwJTHPDAcp3C5569MZ+nFYTi1dpJ3vou10lv67fcJaNdfTXW+uny0vfurX0nVgr7nLspB5b+9xwB2PH45q/E6nLrxvB27hgA+oPY9unY9ay3J2oxJKHnZ168AjkdeevuMGpo3yFAbC9F+bJBHXjqeD07njPc8VWyTevbRXX2eu29rvTur3sdNB8z3tzJbWTvpZO7vp1e2xs28fmN8y7uhDNxx3JPG48jjAOc8jkV1+nwsRCqDocBiPujjAOex5wMEfnmuY08h5UU7mV1IBIPO3GfrnjAHUZr0nR7VXlQqSVZQPKGSpDEZZcdwQOPr7GvOxGIjCM4vSyVndtXtvd7bJ32v3PoMNhVOVP3eXla13upWbei1XZ36rzROumsqq7oUyMlzwCTnoRnG7seuQOa5fVraZRMCChGXRiAADxwvGCP845r3JdEaS1DbAWAxJn7yADHC4wcg8H15rkta0GVkIWIjGBIhwXyueOBwGGCMHnBPbj5yjmcFW9+UVFSSs3a2sXe789+/a+3uV8Oo01KzvNXT2XL7q1t387tX11R853ltMzlChTurA49eDwR3zk/rWT5RWQs45Q7cnOSOOB2/vHIPP1Nev3vh8sZB5Uv7w/6srubevByOMYPIA9OnY8vfaIVhdkiYOOAGGeRkZY9Fx2478jJFfVYPNqM+SDdvh1TT3cdb3e+3lv6+DVag7NtX0Wmy0S87N69/wAGcqmwHqFQpyASS5xnnpggAHPt04q9bSIIgoBf5ugGSCe4HGOMjgkfjVF4zHuVgOCVOfv9ckevc4yOcH04uW+WwuwgZLIQANmPvdD+n07dfoYzhOCs9NL9dLp6a38raNfMum+azTd5WXR3skrtrvt87dDr9JBEkIVcsTgD2HQ89j+vQ4xXq+jD9xG0beTl2L9SS3GSADwCSfXPtnFeZaJCjzoQCw+TjOASe4BHQeme/wBa9l0WySOzJkjIZeYuThivU8dV5GDjkjrwa8+q9Xdtu90ktLaWtd7btW6vvv7FFWaTdmkraaNtp32V/Xr1eh6jpd1ILWJQ58xIAknGN+8gADsTwM9Pc9CfWfB8EojeIOUkjx84XBVD0ydxJcZ69P0NeQaMkv2VBEoaDdk5GXXGMqDjjGenPPSvbfB8mLzySC7zQknHLIdowX6cDB5x645rhrSTjJJu9rv8Leel7K/yPdw8pqSVopcsU+VdHZXt0vrfyd/T6J8G6lJbziKWdmkRFiDl+ZDjhW6buhz14OOmK+mPBOp7pZXJwkKFXQjkEehzwG6Y/E18beHiPtKRO+wxPyV5bO7IY89uhx7V9U/DuynksdTu2u03wAPskYZmQHg4PG7HbnpxxjHz+YU4KLb3cV0b/lt1u93d97a3Pp8C1eKjT5uVpy95+V9H02Tv00sfcPwt10/Y9gkwAxlkhZhkLnhVOcgDGcBcdB0HP0z4V1YzzTT+cwUKBC3UL1xHjpnPXueuOor4P8A6uquImLfO3MitjCHojED7p5x+NfWXhHUoWhZIuZItrFU/iPcAZBwADz9T3r4fMaPLKc+X3Zcur2veOttWnto0tHpdaH1GH5ZwUXbkbcnba91ZSfmredl0SR9IaRfM8i5kwzOA0gx8o44wO4GeMk/lXqtsWfy/IkFwWQFgeM47HPPpz7ZAyRXhPh6/s2CzpMqljtkt3HO8YyM54PvyK9d0XU7YtHNDIEkUhWTIOAeC56Y9M8Yx618/OmnNK9r6K8b3d1d3bWyXpezR0P3VHljJuPwt3d72d02r6d0rXuvX0zRoUlIiaJlKIQUIIbJHHHOQfXjgfWtFoo4riMSpgb8EYOR0GSOPcj6c4qXTrm2nKSI6CfYoc5CgkAcfUjnPJPbOK1b23WZlkQhmAXeBzljwdpxzjP49+uK61RUYK9m9HfS9tLLRa7r1vq9zmeJkm7Xhd+807SvZdbaeWy8t7b2mJblAVaN1YEbCACOMccgg+mfywcVsBERgBIU5AVQuQckEbmHQ+v59a4OK5uLF0dYy0QIzkc46dAAT1/8ArDoO40rU7e7eOIxkluBlORwOckEDn2q6bUlyuLhZ3Wlkr8t27vXy020vuJ4iUI+5O0H1fvNt8rsnr0+7pc7XRzPlQoyBgYyMY45HTjGc89V78Z9W0YJhC6jcWxjsc9cZODx1Pt7GvP8ATLZQsbIrKHySQThunBPQZxwOPxxXpWk2fCum7LcMc4Kj2BGep9f5GvUwdSUEo2k2nblS3+H5JbaW66rY8yvKLvHW7srRWt202029tNLL1R6Bp4tlKBVCnIOEGAWHXcfT/wCtntn0rRrwROpYLtA+bP3ucHAIP55H0PJryaxilSSNI0LAddxOBn1J47DjH6V6Do9vOW3MxxjBViByRwRkDOcEjGM4z2r3sHjKzmkqMoyjNJNJ25VZaq+i81t10PHrUoSjJSlo0mr3v00vqr21Xm9nbT23R9ZjIXdgbQQh6ccY+oI7dew6A1vPfCaRESQgsByOg9B6gng9+cZz0HmVo5VFzlADjJbGAOnbnH1wTiulsZ4ll3bidoJdgTkE9Av94Zzz6joe/wBlSzWtCjGEpRaVo6WUk/d0s0r6ap37+h5E8DG/PFSTV1pZpLRp2utuq1Tt3PUtFtw7iSVy+MsyMTgge/OT1/PjPU9W94kChk2jZxtUDkdCD3BHfj8s15pZalN1hDLhfmLAjIODjryD9Sc/WuntpJplJkBVSOB2YHltvv07D+p9OhmEKkVTpxcpvRtKTu7K9n0Vn0S1020PNr4Xkk5VWmrL3bJaXi1e3zVrPp11N+51GaRAsbBgqDO0FQo/iUHcQSOxAGB1rJWG4upsrlwG3FMlht/hyex5JJ7Y9uNGzsJLlw2zZCQq4PykjODg5+8eOvHfiu4stLhhhGEGSCAcc+hOfXoBjGfYV6NNzqW5naFrq6aV1bTVWfXTbbU4pzjB+4+XXW3mk0mtVddddLvSxzWn6bsYedGFbIG4kHOR9xj1B9T07DNdZDBE5LMMkDIROnHQk8dT6Y7ZyOtkWUT43IcE5JJ28jPIO3rjqfT6YN2AQwBo1C4GeDyATjGR14PGcc9evFaxir8yUUoqyai01rbba+i8vXUSnKd1G/azd4tu2ttNHpu3621JbeF5Q0ewIp6FQRnnPJHv17Hnpg40jaKq4JJQYAQEAbgeGzz1PXp6YFMhmRFAZiOCGxgH8D0K/wCJ9qpXGppysUqAAkEu2AjDkg5J9eB1z7VaVNe9KSinr0S1tro7Jta36b7lQjNtWVuj5Vpd2u++m1+l1vqaEiqOG+VeC/OCBzxnr7/5Fctq2psGa1tkUykFQDyCM/ePUeuMHgnp65OreJDbCU+cJWIAO3PCjHQd8+3p6msLT9TjuXWaYum9my4JJZR90qDyMkgH29eh4sTiYJclKopzkrJqzSWl7vfVO6e6+Z104Sg4ydnL7KSbXNpe+z208lfqbNnp17JhndJN2WywCeWOMg5+8enHBHGPazJaSoCkZHTLMF4ZmPIHBxkDnoP51bstQtm+UMflJwQOG3AcN35PJPQn2rS/cSEPGwfjlSuQPqDxkZzjrzxkjNedh5R99qfNK61b1jqkny/y+XTU9L2s1ZOKcVqrR3a01a/K1vM4iaO4iZiehJ3DGeDweO2RyvI9BWZcXCKCn8YydzH8iTjpn8efau31OAlG8tUXGfMbbgnPoMjH05xxxXnGrRTM2yJcYGHABBZj1J6856nI65I7V5ubYqpSTVOKatrLlvzLS9rr731PRwTjXaU7KzT1fV232stV5ffpTnuo1UyH5iQyjA+XH+yRg5HBz7fQVn2QF3I56KGJA6AhT0Oep6/pnuaetvPxGTvAByM4bJzxjnjGAevvTRaTxNxmLHOVOVA7AY6jqMnH518NiZ1akqdWdNypJ80qai002oq7lre3n5LTr9JRjCMJKE1GWnvXeyttrq7q6a072Lcywwj7uV7jgkdeh9vTHOOx5rnLoRSysSCsQJ2lgQRj1Y5Hv0B6gDnNb8aMZljd98ZGWLAbgRngnup54OOe2Sah1KxRmQIAFJww67h2PTPB9uec9OXOU69PmhTtGNo8jXdx8tVbbr2tc1hVVOoo1Kkvei2pJtpbK7VtfkvwseYa1aRpKZlkEQPJU5wWHffzjOehGM981w2vWtrLbFk/12xT+5yS+M/N3wR7DJ55HBr1vVtLCxyGZd0ZXgZAPH8IHPvxxj+XnN1BHbRyM0MggbcsMpXcI2JxhhxyT07Hrxivh83oz9vb2fs1bm5pSekly20aTs2rK2tvkfWZZiE6cJxm3KLjCKVkpR000u29lqrp3s7niOqafOsn7+3Eti8izLdF9kjqvOGIBxz0z1GRjivz+/by+Aml/GXwHK9vBAdbs7SSA3IRVLwyIxaHysHex2KC+5ccHHOD+nWr6PcmxnghLXCRoJBGB0QklseipnJA+ma8tn8OxanYahBe28U0dxFJE0U/7tSg4DxuQwWYclSFycY9TXj4arPDYqFZRldO7ctYva61S2V7dL2+X1dHFqEqeJi4P6vNc9OCbbsorWPMm16a36PZf50H7RnwT1n4d+JNW0u60uaxjtLy5WZDkpEzOfKkjBUZin2kq4yBtOAea+A/EOnyXBuozAA8TMY2hUbVK4G9ieCO7Y6cdRmv7Sf+Ckv7H2n69NdeI/DcImkSLyLyYpiSRUVzIrxhT5jjIAO4Y9eeP5Qfiz8Mb3wzrGpWM1pd2dvHczZLoYgwYnb+7IOA2w4Bbkda/UMozijU5OWSUrRundNSTjpZvXu0tErbkcS5NSzHCLG4enzxrU1OpHlvySlyqV7pWktX7z2PhXVdPmt5DIwZnYKGUcKueuRjuPqOoOTWIybd7cqVclMrtwgP3l789OMZwa9Y1TT/ADpZ4drHDEK0RGFAyBtyOU672Oe2Oted6haMjuFJYqpXbj5ig43g5yc9h2GeueP0HB4pVqcIzkt00mt72263vtd/Ox/POa5XVwlV2X7q8rarR3Wite+601tsbHh3XjbzxI5YurgKR90knB3DOMDPJ9PTHP0t4Q8QArsWT53bekeMIGwCQDnowxzjj27fGjkwyZXaCuCCoO4ZzkdCeMckcd+gr07wdr0iNFCZTGWYq0jHnHBZVB6M38IyM846V4fEuRUsbhpVqcffSbaS02WvW9r6LTrfQ93g3iqvlmNhhq837BySg5PWNnH3ZX21ta7Vrb9D7t0nVIEAlJZxOMgM3LMMbkiAztVuOOQMYyT09e0TVY22u8sihI1eKEsUWVj13tzlh1C8bvUAcfI3h7XxIsSl2aS32xBAciOAdFU8cdSWzyfbAr3Pw7rQdo4iypAF3LNJwFYj5GVcnlsEAg9ug4FfgOeZVOkpKUG+V33aaVo7aa3td6PXa6P6ZyXOcNjIxs024wdk3e75d9Wnre2is2lY+lNM1NZLmHzZiFwGcoTHgsAJFGM5ZsLkZAPsK7+yuA4hcukced8IXCeWO3mAE7zJj5gcE47dvC9GvsWjyLPEgULLtk+XLEn98v3iFyPzIJ4zXfWOrbYvOhYTMVVJInG4xdd3ljIOxf74xwT8tfmmPg1OaSe/RNJNcr/4e2t30PqoJTjs43fW70aTVnutOu97X00fosohjmUmWPlQwEZwrE53CTAOAf4Bz39qx7qCLCJK2+JFKxADcQR9xHIGS4LHnt+VJpmqPdGVFKKrZMSxqJfOVMcq2V8sHOMYbBGPYW1aMmOYBWCguVJORsyCMcF5CWBDfKRjv1HFh63LK0lommtr82i213btstresToRSfuuUl7qsrJt2a17p9d0+hx4sTHFOm7DyqdyKdjsWOTHGpB8tRj5uue+MVgyRQgrKQRh3jLbsSIyYHzjHyhM8tnDZ6cV6DcxKz+Z80csrsBOx+RGfaxiPA3OMALnHU1kXNgBFK8IX/SlB2FsqACd7425ReeUxzxkivap4tWs002o2u/d0UdUr/562Wuz55YZ8qSTbum76vpfys9fxXc4iS3RAMW8jBgfMdP9U4H8Sjgb1z1zxk9CBWakTIGhkjU26yhojFzcDd92Rjj/AFS4OeDyQAB36x0DAeWirHANscbKd+1sh2KHG4NgdSCwweorAuI5PNjVWXzGyY1Ubd6pjEa9Qic4PJye1d9GurK8lra2t2r20b0vo9r7t3ffGph22+aMHHlSSas1JWtum9emzTXW1ytHE8bAzYuT5B84YwzO3RGUcgjHAz15ziqFzZPMo+STGHMkeCuxQRtdnJOABnjHTAq63mx3LZDbpvLBTdtMcvPykhThV/izyc8HFTEMVXessgCyebk7YnYldiuOSRnOT346cV0xrcqj70nfa/R3ja13Z9U97dWtjzquE08k1fpvZrd9+nVrW1jk57aLfsMbKf3QRkzhR837uXnhD+mPxL0igVXjuBFNGjKgfHy/NkxlF5+dCDuxnr9K2PJ3hwFYqd4DlgB5Y285IJxycHHfIxyaqzQookQxKzGTfEy4IAyAWTgdTjpjAHA7V208S3BSu7ppKzVtkmtbeq7b7HnzwqjKT3Tdk+jfu9/V9Hfz3eVcWUYZm8uRoXjcIC+4SyuAeRgcLt6DP171w9/YRuoWKMlt5ZAcqgYnlQcHKjue/tXpdzayt86hjKixh0U/IrnPyKB/GAOuevsRWLcWyySDgLHbHJ8xxG0krZBCgqSqLg5YZz1xjNd9LEvTllfmWq102b66vdX6bdzmnQXwtddWr6aJ797PbXqlueZmxdJWWeJkARj7bsAEnjBU/wAOeoH4CvJYDyi0gLPjCoF3DapOWODz+X+Fd5NZuzziTexcAbVAkIUdGDcAg54bHykfjWVNbeVLJKRkoilIyvGxsghjzuIPXA9PrXbDFNpRvZre7fla1vnp109TB0owTtZJ6q6vq0lp69uv32463tRboJdxLqBsjAwsrE8sxGdpXIwMHGfXNaywhUV5Y4t0wKuFO8RyjG0luMbucMR/hV9YGfcqh1iIJZV/hY/d+Q9EY52kZ6HjgVS8qRDsaMRyEFSgZmAKf6uQOAMOcncNpAOPYnSVbms29fS9krW06OzdvNdERKKjK9Re80rPlutOWy00S2d9vIaLRthEkZb+9tOWAXBVgTxgA7nOOhAI9NG3BVhG5DpCVAJASRXP3duScKe55z9KdbvEcRyo7lB5jNkkNIMAqRgDZ35POB35q60ZkKs4YuspCySEB1znbgAY28cDOSM1xSqyatzdUk2tNbK76/otL7aYbXu3NXbir6JaWa66Wbtez09CFEliLs6kSbh5ZA3AjPDDnGQMZOOeOKvlVMXlblUlQ3mlvvt1YLk4ySBhM/j2pNhJgViDKykuYslMnGUB7PjoDkjPJqWa33Kyuu5l2GOBRyiDOTtx80p4LPxjGNvQHB1XzJN6p9HurLTXRdHb16ClOUVzNOWmq3S2XXS19euqv6UWhiuFBZVaaT5xsGNqLj74JJVkzz7461nSWEhmLiIlgofY3AZQOcN/Eo4zx044Ga3o4vNEjMGARQF2DbI7P0UNz8vy/OdvIx6mog8IdfMEzBIwoVckbycfOP4lHAkXuWHK1rCet18LSt1/ls07pdNbjVdJXi+2932v6J7NO/knpfl2sJC7qcxbUAIB8pA2c5x82/dnt6DnpUcVpI7IbmIBEYCIY/hHSQN2OcnpjrjpXUeRJK6Sxqw8tnCvOn7uQHgFRkYAxx13f+PU828zqsLoI2JkeSVV+WTldmw5wofL7U5zg5rpp1f5mk2ls9baXt53Xn1+VrEyd0naNkm099Fflv6Wemr+ZmxBoFmJG8IGUzE5VxxlVGMZbow55xyapfZ97pLCzSZOXRfmwoxlQcdFzyT1zjHFdStiVVEYM5K7TgbowFzs344LcnPTAHWoFsnt3k8yLBdzI6xrtRg+PqRHx2z/ABAjNdDnaNtbPq7c19knbRb6u19fmTLExVo3lFaty+G+2jaVt7d15o5+4tWjb74kidhLHGw5C4IBjGeCp5PTHHemwIoLxyOpSNQWRvlZAOVI4O8nJxz2NbptowhwsjMhYBixLIzkbkI/u4A2nsB3qhLbqzqI4TIwJDCNshWjwSrcfM4zkjjGepzQ5KVo+87aNp9rW3fV9HvfS3XmqYjlvLnckrJWd1523tbz1sl6uheRLLl45EZEGA235s9Mvz7cenXjtky2xEZdJVYghiDklXGcgDrsXsO+e/FdKIA6lVZo45AWnj37cMvRsgcZBJI+nSsyURIHkhXz0bcpZGJC9MqVA5YccjGOx440hJXb963/AAVpp001/pnk16/NFzbcYpb33WnTWV+1rXV0lbQyI2R4zHIGdAysQPv/ACliAGyMEEnj37cGq6TyYlV2GznJwdxjGAkZPdhznHOSBVyS3jkdhHGXKsCRuwzE5wDj7oHcY9s+sUqRsjSS/u3U7oYo2GGZM5DEAnJ3DqOeeorrUVK90k9NtHpZp72/A8eeIk3zKTVnePLdaabp6rbRaeiKFzeMiLF5hZlUqxA52HpGTgDjOCD1HWsKZo4IomaMSrG5Z3xztJ4XOSTnJ5wD3HGTWoV3q2FCwuzCR9xLjnO08DAP970B4rPuYgGQoC0TYCxE5DnnnOBwf4TxjrjuOilBRbdnZ26JXVl2fXXXT56E08bOMneUney3dlay0te600VttPWvcSean7oBVmUuyKAOSB8pxjngduuc+orWrAxLEWj3qWZ1K/Mq9iGH8Y+meeMipzGS0mx1VoztVGkCmNh1J4IIwTnt0wBzWfO0sLTbHDM65Mh52qB8w3Y78BfTGc8VrFJpUo+6nNNb6XaV77Nba3v1a3PawuZuEo62jF3vJ63Vunb0/wAyDUbqOOVBbOVLxE/OCOehByTzgdTj881zt5dXEnykligDeUeRtUn+Xf647CkvricuisBHyNjZ+Z1PfPt3bqevUCqBnlXDAAs5YEkjgcZGe3Tnt2xzg+pQw7VNP3ZSWzb5r7aq17bJXumu57lHOvaVIqU7U2opyi3FczUbcrUtdtfLzudP4ZtpZbpJQiqrNlFVe6jLnAyAenOcew4Fdzqbwrd25OQdiZYrlCFGNpGeGBxjk5571zXgq6KNvYb1RGZVHLFjxhjj+E5OSACTyPW9f6gkt08jSBlYqPKGCFI4Y7Qe2RnPy+mCM15mIg3iEpxT5Un7t7Rvy6dGrr/gI+ow+b04UopzkrR1lK0m1aOivtr52fS2hqajHZTxhlf9/DsUEgZBwTuU4IbHb/DpqRzaYlpgFTeLHxvGSWHUjAAHGR3wT+fJz3ts1vMzyFAikIqqVLkD74bPKjvj8MVz0WoTtIxjbzIgpKnBJwOpJzwRwPr+uSpTmmozkox1V3ZW0+12Wj3b9Ga0s5pqUYqpJxUrx1s03y6b2ktdtuxtyE3N95aKCFfg4IYtnkHHOPQ4x6V6F4ajlt2lD48ovwFPzBW5Kg8EgcHBAI6dTXC6RtS6hkkViJ1Z0ZiAFxgktx0HOT75xjJrvIb9UnjFrH5wkUb0BIAYjkjAwPXr14rizGVVwWGgrxcY80n8KSUW23vr/wANbY6Z5+1OXPJuCjaTulbRXV77paN+vket+HkMc0bzRbYHkUo5IHA4OOO2BwP8K9C1fxLHlreykQgoscmevA5weoGR/UDGa8j0YXklmZ55SrxM3lRZOTFkBeMDp82fxrUt7C4lu1fY2G2sEJOGVskkgjtjIye/Svmo0f3lSlzQ0VnJPRtNWtfq7pbv562+ZxnENNKtNSTafuJu73W+vSytZnfaRfi4DGUFwIpI9w+4S2MIfXkc8g9uozViePz9qbXxhS78qBIp6HsQM8dMe3eKyjgtbdA21EkceSAMETDhix6gcgAEfj62rm7ig2IjhzhXcg5xwScrzjPQg9ODg5rD2NGMm0pQkklFLRN+7rpv+S0duh8hjc2VXWU5xc05O0nolyt2emmr6ddjcjuWsbcAvJIqMpR+nkhhgsT6DHp6cjtzV9qkUpdMlSEOC+CysASMk9Fzkk7eMDrisuXWpJTJEcJFudSAcg7cbQ4O07ecjsRxXL6rdM0Y2TtjzA7uExtUZyrEc5bPI7Y7rmuvDUveXPGzbT5r63bW/Sz39O7PJhjuayg5ODs7vm2Vuravey/pGFreoJNK0P8ArN8hUclSGH8HUjngjOAeT0qGG3js7WG4uGBkZWmgRjzETjLOxxtVeNuRzn2FUBDHJcyXUzIEZw0MJBcuw+68jZGxB3bBOWAwKx9bvrnUna007fI05MIWIZL/APTNcHhVzyemMZ4r36dV01CmmuduKbeyjpopPV/K69badkMdUlNS96TSSjBXbcrpJL1su/3mLeXWoeL9bttFsEd1uLlYC2T5KhyBjPRcjO4jgEDivYvFvw4v/Bekw6JZWxkuZoIJJp0jJT94CZMcDOeNr7uoYgdMdB8NvCGnaFYxXV3sl1jejshAYxbuWAbtJHxj+8Tn0FfSV9JB4tsIIrtXmazWOzecqBcRLgiNnIzvYfNvU4wCOSCKjE1qs6kVRSUIWjLRpyknBtpWbSeyV3fW7P03hXhjFYvDTxmL/dVqsY+yi7t04c0Xd3T97l6rq353+G9LtLrSSrTmRlCBBlSFLNjAfuvIOeehzmvZPCd7HPcKJwr7MMmH2o2B90cEkHOAOM9hiu/8R/D+1trZJXmLwFlSK4VABLuJz5h7BBgH6nNWdG8FM6GXSrQXSFh5sVsC6+YB8rgDkKDk8fdOe/Fa0YOcou8VKTjNrW/u28lq+rul95+lYPLngoxUGmk1zc12+ZWTd9tequ+itY9M0eO2ltLYyoY4xDwGUF0bAwqdNyoT1xnnvXrGiQWUdsBOqvuQGPYd0iuw+QjqVLYY4xxjGOa8q0rRdX062iF5Y3UIibD71OO2TuwRtIA7dh3PHd2d20SxMNrgIVQlSu1QMEZ5BYE4yenoCa0qe7JqL5XfXV2srO/XmT3etn6Wv9Rhmp04K0aiSSdnFpO61W/w6q2/frbsrG28uUNMm9hvLSvjlBjYrf7WM++QMj00QPJEj2dvKszBSqKPkc/3l9SxI9B+FVNM865jtnlbzANypgfIHBGCcY4Xsc9zwcYro4ra6gJlZVcSArDwCo6bi3+0B06Y59K6ou6WijdJ3SSTS0vbWytZ+rvbdOMTKEG3KUldKKjF3Td1a63vdrpo29b75trceIVl81bcrFna2W+/v4+bjOBjjr78de5sIpZbeMXTeW0LF3yMZP8AdA5z6AH3PFT2TotvcCaNAQgOT8ys2MHsCAP7oHB6YPNAv/KVjMieQyEGbbwcZ6jPUfT0zmoqVN+WLUtFzN31urvsne9r9b67HzuNrzScHukmtVorJuyet9Vu22/xrTXlrHIwZ2VhhEjwRgnoXx0JHTPpXO6teyZjkDMfs7ZAkzkrj9TgnHrn3wZ9S1C0jCOkiy3EzByDhgUU8AHjBB4I4OT9a4PxNrJTEkJAUoPOjOM579z0656dPSuvBwlUqQ6OWjumk9FonbTV6Ja733PiMfirKTbUaau5t2Tu0l0tZJK9vV9zE8X3qziW8ebFsY/3kAOxhOv3ZIyPvgc5UDHTkHivlf4ia/DZWsxN1lJV8yYGTbIkYySm3sGI4B4ODXpviLW3WOVp382OIl0jL4/drzkqcbgc4GSMgEAcGvzX/aQ+LIthPY6ZdLJeXL+ROI/+WMS5Cpwxxtyfm6MTng197kuXSnKnGze1+qje1k7eV3um9EfiXGfElLLcNVrKcVLVQU5Jc17La71Vtlq76W6/P/xz+JM3ijU5tHs7pvs0B8h5I+FKpkCEsD8wBPBwDkn3r54ii8uPYMDZ1GOMDr83XcQc88H8KmeVrqaW5kJ82R2kYkk8tyxJ6g/59MLuO49wwx6BvQlQe2DkdgB9B+k0aEcPShTileyvJcrVrLvqn0W/4O/8l5xmmIzPG1K9ScmnJqK+yop3slfTRO70s7W8lRQAHIALDgj17YPXnjJ6dhg4FSlgoC4+Zuowe56gDp+WPfjIDnaOeWHI7AdQw7dunU5pVU7lYgHpx3wM89OmMev61Ults27dFfS19nf02Vr+R50XFrWKlJuyvZu26a1v0Vrt9vIUk5AB4IB5HXI6dvYZ/X0iGAx37txzjGSQO2R6Zx/npI+CWByuOhPHA69Px5/pwYx0zzkZII5JGe564I59vQdRlZ9Xft9y/rby6ES3s200ldO+/W269dh+BgkZwBnn7x/D0+gBzxj+9GDg9D3PToTkkEdOhwB7/SptoOSSgO3g55PtjgdOMY4Iz1wBFjJXBIycZAx8w9+n149CAMchD1ST/wCGb00v/XzHhyOg5AwfUYGeueO2Rn34zmnh9x5wO2QBgDsW7j3+91GB6s2Hv1PzZB3NjvuHXOOuB19yMAAx0PDdwSSPTvz9aqMuWytZad/Lz+fntts0lFJLVJef9f0/JlhOVHzcbSRz8voMjHc/h2Ip4TAzk/KAcDkfh2PXj889Kh3cEjjna/cBfpwMnnkEk++OV8xmx5ZGCMEk/MTx3wQM5Pc49DWwFjjBOTyAMYJOe/GOmf6HHWg54IGcjOAAOmRnn6cj9OKQFtuTjcPQZ+gP4e3X2AwgzhSd2QSehI56fQ/yHagAUsvJUjIzg8Ek5yOD7eowe561LG4IznaTkj6ep9/XPPYmmblJ+YsWxxwevPPPP+frloyR8qnOTux1Gfr6jj8Sc4wKhx1TTa1v3vbv+T6u/qQ4u6cbJpr0fk+iX5lhYxs+8SVOcZPI3df+A855z17VYVc8DJOMrgkDpz1wPU/MCRkdsiq+W2KAuDwDnqeSMH9OPXv6TK7AcnBA24B5zxjGRn6/y9d4LR3W/Tp01t30327baPmk9GrPRdGu9lp+CtZdUKQu7HzbvRhx0/Mj0yRxjrRUvyMAx+9t4zznt/8Ar9x6EkldUKba+Ll125rdu/3fLshObTaUmrdl+OrX9I86U7XDtuG3jYSCrEj7w9ueR1APfoJ1kUo28YYgOhxkBh91hgjAI4z0GPTNVsBDgA7s8kggHd0yecdD2xx0Jxhm587HbG7OAvBCjkDIGMddxx2x2xXDJOGj07LfVaJX7q9n9+xhB3je2rtdveyfa3a929bJrzJl3MPmkJ35PJORg9N35kD0+lTptMYQhCVIO8HDfLnO088e2OefxrqVaEblzIxCowJBGOp2jIYe2Bx3Gc0pcBQgYbmGwMMcEc7cngHBHrznt0FaWlmn1W606p6a3T+a0dtS3ZWtdu97art1XMlovJ3B5UynI2kgKBxgDttBIJAz7eueacDHvQ5O0sR06EkYJ568nA561HIFKqVGAOAxILZ7gg9B6EfSpoDGZo4yQN2MFlIzg559+w/Lqa5MTTUW1FK1nzaO2y79ddLab37m1BttK7ab00vZpqzuuyb0vstdzvNHibbGy8hBuVsYYHjJPB254z19+M16v4St99/DlSRIDhEYBByONxHU8kjAPANec6JGgjL/ADyx4w0YGMcY3Rrzuz3HGOD3Fe4+BbBLi+gOyTeDtjJGdjNjLFQQSUx83TAIwM5A+BzXHOjHEJrRJpPbVpO+jW34bn3mXUlN0FJ/CoNSa9125V3fM7eV+z0se2WmlQw6fExjRkcFSuMhzwVcKe45yCQTxznrBPo8BglZokzIdq5XKhyPkcg8+YvIYdu/Wu6NjsshHGkZurcqE+cqGQDgQqQQTg4DE56nOeuZftIkMWVjR1BlcYAdJz1SJe0hGMNg7sthcg5/OljXUndPTmaSuvn+KT7/AK+xj1y0+Xl05bRcVorOKfd3lp21PGdV8NRxYkZB5px85TEWGyU34B+Zu/sBx1Nea6xocdvb3fmKpkEbMiqoWM7urFiTg9McHOPrXtPiTVo44tgdDKI8Txk7nZ167TwC7Z+Xpg5wCM58M8UeJMwSxRyJG0iiOaENh1ZecNjO5zn5uBjivqMpeKnKNnJrmik7O9k49bdEtdNd76WPgsVJKo4q7UnzNLVNK1vudtNvVnht9FFDcXKhRkBiqcbck8Fm6AqOmARyT6GqttuDqWkJOcEADr0Yj+8QB229Rjjin6nK7XWcZZ3BPOAq84yD1A9OpB6cE023+VwGwWWQZKkFcEjoOBnplhxnnkmv1XBTl7OPO9XFLXZWtfTrrfz3R0YRt2unutFppor773vfe3Y9P8NWZuGhlUYYHasrqcEZBCg9MgED3zn6+02kLR21qJQYyokFwemcFeVXBznIPB6c1zfw9tLaaz2Sxhm8gSxbhyjHnc57gc9efQivV47CJrRkRdzxEsA3J7b1916FemOR2rac+dtu2t/N20s/XTbTpdWPoqUXa67+enw+l9H81ex0PhuCBIreIBWEoJVVAJ3YyGc5zg85HbA7iu40x5rXUAVfbLOpjLggCID7v58gcD6ZrkvCVmJ3USZjQowXAy0ci9lHUDnA557muy0+0m+0uZZVMgYjLkZIGSpHT5sdRn29QeWpKL01ulqn1el0l8/npq7o9WlGVtIu9+VO/Sy1t2av5+ux6h4eTCrctcs108u0sDhZSD99R6c56dj7mvprw3qCLpsSRXKm4dVSaJZArNknORwH4+mQccda+ZvCkaSQXEkhxLbJI2GxtYL02nt0z6g4GemO88L3e6XzA8oVrlUSNTukMmcEdsKOBzzjPpXiY2ClF6tNJO2l02l96TT8z6XATjTdONtanuXu07xcWm7u+t0vLTdn258PiLh3t1uPLkBV9pU7SgBJByehB69BgcHFfSnhzVG06RzHI5mCt5TqMq0YID9+TyMnt6/er4u8K6zc2c8ezzJZJECs4wqhCcYPHzHnBOfbjnH0l4V1uF2EE8rxxbQY/M+VwxGW2sQc5I78YAH0+IzGE25Sb5o+7eMV8WsV1tfu9+rPrsLGMY2Tae7T1T1Xbf8Arzt9X+GtTF1LZqrgrsDyIBtbzOxbJPTnJJ9PTFetRal5JWSLLKW2FlB2nGCQME4Iz1PPtXy/4d16Fb23eNS0gBEyA4DIMAFD6kck+3QAZPvegztdxxqp3JITKFHHlKDwD6k855yBjPWvk8VTcXdylFO1r6NXtvbzVrW2flY9WMXZWaaSV1a1k2nZXtqtLtfLVJHtWi69LtHkyN5gYYjyCTnqSSeMd+Dz9K9JsPE8q+VGVSVhgsNw4PrgjJbnnnn3r55hllt7ndAjFcqVPJ2knBJxgds85HWu40t7hLu1Mjs7y7TwAdvTg9Rx6EYIxgdq82nXrqdrtPmjZp7Rdm23tslvpp9+VWlTV5Pkej92y/u/F2bst35H0fp7jUWXzEGWx8qg4wccAZzu4JI/TnFegaTpohYMkDKTjDY2nORgZP4DpjGcYxxwvhhV/wBHbJ3FctlSckEYGP73PU/TFewWCeaqgdWzuC/KCTjtk8nBzg5HHXrX0NOXuxlK8pW3Sfw6Wtbez89dr3aPmK2JipKCjBcr0TV5R21jZptW0tppf59LpSmF42ICkcgMOOcA5HAGeMfh+Prmk+VMifKpbaAcDAHQ5x0+vOD17ZrzvTLHescjpuC8FCMH0Uk9QR+vp2rvrDfCqlOQT26Lt4xx37d+o9xXpYWs24+7JU1q3a1vhbs0lo0tt9L7nFOrGT0k3L+Z2SWyVknd7N91p5HoOnW0QZd6DaOnHH+9nncO59fXiu5gSPylMKAYGdqrtAPvjPB6j6ehrzOwv2XaZBlFbaqrn5h0GfUH3GeOnQ11lpq7AAAADgLuU5A7jqckn8vxNfVYOvSUEoNR21aTk9t2+6vpf0vsctSnUlaTUZtO+ujtpe19Hb10tsrnf6fYvcpuB68Kv+yvX1A6jI47dzmuqsNM2yKWUbeMjjnA4zk8g85HGc+1cho2txxbQ0bF2U44IB9wTngDHbnjvXc6fdTXEqkKETkg9Ny8YweOB6H8yK+gwuGp1+W9224tpp2T913v2vq9L2djz69WrBu7jFWir3d9UmkrJXdnppbzZ1MNvGphVFG/AwBgqfTOQNvT069T1rr9J0952BkX5R/BjGMYy3B7DHA/SsfTI42AYFWbbgnH3SMcDkcdQfQjrgiu4091jQHaMgg7unX1APOMenP6V9LhcB7JOV204rRJLVW1bTutU3ura9dDw8RWdRtXbk+ra6W011v03fy1NWC12kLEp5OMHI2jIYlTxgHjJP4+tbUCmNSH7FsZIPp1BA4HY/4kVlC/t7dUJcZIzuYjA4zg9O/B9yDk9sG+1873FudwGM4+6fXv9c9D+FbzrRp2b5nskrO+mva+qstb337s5aVCdVtWcUt5SvbZb9G/+B1sdhNdRKuN4BxlQCRznvznrkYP5ZIrEm1IxnCurOTjzScAgjgYz2xwRj0A61yjatISASG3HJ6ADH3ieuQueRjnP1xk32piT5YiJTgNhQRyOq5BJB45OBjjjHNclavzO7fs01pFXvbfa+7tt6PRnoUsJ7Oyk4tNatbRSta76b9fWx2U+seUocTYYbcLuBYsCc59BzwM+vbpympavcXDNFas8hkySqDHPGGyOmO4OSMjB7Vi22n6heyh1V/ncAorfLlsfKBg7QcdMEjBGe9ej6J4ZdP+PlRH1GR8zA8EqMDkHgbs/UdBXOsLWxCjao6dPTm1bdm4rRNvbfV2Wvz6FOhQuouMna7S32Vn17rbql2Of0rw7qOpBZbrYBxlWOWwcdQep+hxnj2rpp9ANtASke941wp52lR0GAPlPbvx0zXf29slvH5YUAKMbkxk4HGfUjPHGDknvmnyxCRSuAADnBBPGATzk888f/rrvp4ClTi4JuUlZc+zV0rvpfbbtv2OOeKk5pq0UndK2iStdv7tdeq0PKtOhvZJtjKbcAuq4G7cqkAArxnB6dD1NegWy+XEu/aSFHToT6k46n0OcHjNQT2oQM+wfLwGVQN3dQevUnIOcnk44rmrrUL+2YwqDHkn5jxgdS3UZHpgevtXBU5Mvk5VISmpNe9Fau1lqrp6XbT672O+KeJjGNOSjonLe19r3Vm+jWmvY6m7MTRsCAAR1BAA9Aex6Y69CB16cXewQyOxUfMSRwPmwc5P6Dk9e/GKeb2WSMM8pIP8DDB4zyB0x7ntz9a4uIjt3sDIwOB1Jx6+nX0/DjNceMxVHEUbOnGLi0oykle0uVNWbv5Lf57nVhaE6E78zk97RT6cu/6bW76GI9usEu9sMVIAJ4+9kZJJ5xg/T86lSKOdzuKjjHGNgAznO3oenXPXHTpcmspLhXdcFcbh6H0HTvz646niore1liQnBXBIOWyM/jxjpyM/TjjxKPPCqofV4yo1G/eadrO17abqz2dtluetzqUHJVeWcVbfRbaa9VuvV/OOfToYtjoAg+UlieuOc57j8PzxVO8VAFIADADB9ew49Px79T0O3c7GhVcOSFyQmcE9cY/p9BVL7DLcR/vCdjDCjb8ygeo4xwfQjt71lipwU5Qw9BScktk9HpsktHpZbdFcdFzahOtVekknfto9d91r0ul5o4zUrR7mBimJJFGWx0AB54P168ds+lcDqWnmaJonTaM48tTgEgkhuc5GRwR6evB9shtVhikjlG8gPg9N+cd/XjnPfOODXm+s2Ny8x2LtCsdpAxtyTksOzDsT1+hwPjM6wukMRJSfPFxnTjHncWuXV3dlayt5rVH0eU4zkk6SmuXn5oNtx35dVqnZ9uyV+x5Ykc9tJPBIQ4kjaONCvBVhn5iSSQQOQeuMZx14bUtJkiM4nHlKWVhGvKIpzhgMDfnnjjGPxHrk8cUF43noTIBuhZiAMnAHOQCOAeePTuKwtZto7x1fYkgjRTIiA/OBnJBBOdpGSuMDIH0+KqNcsk+Z8knyp2uoWjvs7prdfifWUa7jPVWhOCblG9nLS3L09U9mvv8Ahj4reA9G8S2Op6ddWEN/HOsymNkBl3uoxMox8igjgnPPGODX8t/7fv7H2oQ3OoeI9F0tpILSSSW6EUeX2IW3sIwoErDICtuX1xwa/sC8Z6E9uXe2hVTcE7pS3KR9kJxwF5GMdz1yK+Hvi98MtN8daJr1jf20Ua3EVzHHbiPet0wUhJD8vyAkndyfU8cV50MyqYKtGdOWjet3ortJ6X0uk7vfrpsfdZRi+WiqdSXtcPUShKNS0pxbsml7y2tbVPfyuf54/wAQ/AsulajPBLbeSYFZzE8BQsCSMp8w2beS6EndxgjHPznremfZriSILEwcqFYjgBR8rLkHkk4YDg4/A/v7+3H+ynf+ANf1G8t7OaSx33Miyqu9baEkMIDKFBKtn5BtXGBgnOa/F3xjoMlrJdGS1KmC4eFEVd80bZxkdCiL82Sc8Njjmv1Ph3P1XhSbqRnBuMUrptP3dHqnvr59drnzfFvDWGq0qlbC0oxc43i3HmpvSLvHZJtK2l9d1ufMd9aPGS7qQDg/MMHIJyqHAPQZxzjGOTxVG3uXtpQy7tgwwjyVDMDw2euV+bb7nvjFdxrNjJGskzqx3H5oieUYkYUL3B6r7DdXF3UDRucnOdoYdSM5ycYGMdj3PbgkfpuGr08RTSummuVprSzsrWWlrPR76p9df54zDLsRgcTs7xlzJq6tZrdLorXdnZbHs/hLxK5SKF5TgBUZt2QOeQeBuOByOO34/Qmg660rKUZMpIMhzjIGDhV6Fc8hsjABGOa+GtO1KSwnVgWwGyARwP4s8dcc9x+GPl9w8JeLNqRo8oYvhkdhlgxICEjPBHORk9+2MfC8UcOKcalWhS5oSV2rP3XLfp+WrWu17/onBPFUqE4YbFVeRxlGN291eNmr6N7Lb53PuzRNWYxwnzGbeoM7RENEG7wEY6DjC9snmvT9Ov8AzUi+zPlQy5jkwC0JP7wZ5PYYOOOnP3q+T/CmuB1i33bb03Osf3TKSQeDk+Ywx8qjGVJ5r2vSdd8xIXSWDzFPlumAMqeo6nDLgcnJycY5r+fc5yipRqVV7O6i3b3XfXd7avbz890f0llmaUsVTpfvL3jHST3dotPR3Wtl56W6HtVvfrukRRJD5ChjLtKrFngIhzgvIRz9B6c9dYasFicMkRKQqjRyN5gMmDtMfAxIMkkc5yOM15hY3Ud3DE4aRpJw78YHC7cytnAIH90/njJHQwXUWRH/AKpolRi5XBUjOxt+SAznIzhuhI7V8HXoOE+aMWmrNp+Vt7+q02S9T6im6bSe6eiVtHJpN22a6X6a+h6DE8zW7KZkkdVd0GwfKCFwxJIAkxkE87fcHFRTARqytIFmMflRrG2d+/lgRjCyMB0yQMdTmsuHUJVZBKilk8tJTK2QzNkthuMKBg8gZIH0rTVVl8szusnmuduU4IT7rhs/NgZ+UgZ4GaVOUtXJ2t0V9eq2vZaNvbYFRUI807O+qULN/Z030u76a2fZ6GTdWkUmwqkkgcP5Zb5VZYsBWZx1j+Zty4+Y8ZGK52dBbuyyFBHgLGhXa65yS0YwSFXumTnI+YYrtnIjlbzFRkjV/LUr5W4TYGGXLbWO3IUE5PI7Vk3McQjJt2kyATHC/wA8waIjPykfNjd97I4PQgV2U8Ra2uiXbe1tPS/4aGTwynDmitmnySWmqXzbu7X218jhZrOQIt8u7fLEWcTDEg2kBB5Xdnzy2QeBTGDQEu6qzfLHKsx27EYcIyEHfI/93jaAeckVtXsUkcgkMaPNcYBdz88IX70TY4JbIyuOqjDDrWRKqyqkk7PLPKWTayEgbcBZQMgBcE5bsB/tYHbTrXUZO/kr7Ky1W7tZ79bdDz6uGgnLT3tFa+70Xzt5IZJAJIpfNDzCUSGM+SQh2bQjqQ3zRfMeM8Eck9BKsW2LymxcRwNx5ce0kSgbdhOSGQ5yOeo6cVZhkaKRlkKsI0YCEDy1laTGxolywLNtJdc44HOeatRxBdkswYDdKiFR5avv2kFF58ybgbcnjB64FdH1jlSlfW6tG99Fa6003t835HBVw0Vebjfa0HrHmdkrJ6vXtp8tsA27qHEzjYUbbt/dyxk48tXGWDSYz5gB4wOoNZdzY+coZ1ct55NzhfmD/wDLKTIPzAgEuOh4x3rrZ7dFMLgoftKkM5G8LtyXiTgB5WyM44BGc96pGEgvKwCQ5AMWSWwPuZTtgbuh75HTNa08U43d9b3tdJ6232vr/TOKph7yd4xTdml8krdk3vq911tY5iazeGSczAGNI/lPRy0wyjoMfdbBBUHAx19MK5gnhU+Tb20ZMnlxpKP3gi5Mj5yQAMAkfhmvQZYAi43FUwqSu6eYojlz8pJIyePlQ9OfmrHn0s+U0iwbEkPySkgqACcSR5zgOCdwx6cgDntpY5dZKSdr7vVW3tb70cs6DVrRTbdtrWt16K93pa/4s8+ezKzC7KlJAxVnwNs7EfugjDhIx82AVO8HBxjmGS1lZIpGjZcBpJ5e7byB+6GcqDtwFzzk11D2ETF1CmVpjGdskhVTINxEES4wojwTt9CBmqSxvITna0yyMrRRKS3lL91MZwoizhx0GRzniuyGLjrZqy2Xra97a+V30tfy46lD3feSvflWzsrq97W3s77er3OccTM2yFLdIXdVn3Ac43bwRxw/G0dsHOTmrcfkoHhl3r0uBtTegCj7qnPyOv8Adwc5+93qVrPJjYCSMyqR5ap+7xGflYHdyTvJfgZwOeKdkQTOrGErjyklZcBlI4IXnG88YyQuOeabrc0lrpZ6X7euvZtvscE6bTejW6Sa7Lfpol1fy6EMLuq+crJIqs331wyq5G0EZ+Zxjrx3+pu/aQpjmdVjuFYb12Z85WHzKhzgdByRxnuBVeIgxlAROx+ZY0G5gF6HB2YReSc/hjNWbaNWgVY5N0js3mZG4KOoUKeQRyGxnHHHNUp2l3bSWqV7O2ivfRX69OutjlnFpvR72t2Wl29+7fnq7lsC3nkD7Jrd974Cny4JVkClCG+YFWwcDHGD6nALKRjDNtAdRt8sgFMr1OcfeHGeCCDjjGKktzbiOVJJo5SP9Wr/ACknP7oJzhWX5snnGRxjmpo5YU4Mwfy432Bwd7SS4xGx535xjHHTrQqr/K/y0elrarbps9Tkd7636vXT8L9f09DOmhbES+X5TxA+esi/Jh8ZKYOecZQYxxwc0+ONNjW0ieYg/fLM8hJ3L/q8MVBLDJHTnjmrc6B/LO3e53RtGjbpFRegY8DKg9OMc8ntBNECiFWKNFghVGAiN94k5O92wNx4C46E8V0Uqt3ZvRK9tE22lb03smv1C/e99LbLt1e2lrP9Ce2uVEjKEiuUTJdMhZlkH8Ajwd7AE85GeuRzTp7hGZvKj2l3wIigG6H/AJaBeeRyAT/D2HPFIxyKkk4XyzJIpWSNMrKzDAcODmNSAdzBTnpirhKRRqZiqyxqTvVvNcSHG0pwMAYPy+/XqK7adVpO27srPV7rb8lp1fo25Skrvyvu+i3e297CLZiVUKxssrI78/LHEi4IZyc5VgTtPfByMkVTay8p2YRgMc3CrHnLF8eY7DsrEDjPGDzWjZTblwJjI6o4w4AbDY+Qp0+Xoo3Hv15NStdGEOMIzAKI4/vSAchiTxwvGVHTI5Oc0KUu6W6s7LRNX0X4ddLaM5p3T01u+191q7XXXrou2xzN9ZmIQSCKMI4ZnVmyEDYyoHqcAhiT0IINZJRlkKJGqxy72Rhw5OAWZeD8vPPrxiunuowDEqK8jSFikbMN5jH39owRtXcCTkH271SltZPLjSD97IMsxCgbVHXb6Ouef5AcVrCesU3duys2nZL12vbr12PPrR5ppXtZJtLa6S0s3973uu5x0tvGkyyQhreRm2MmdwkPGCCB8qNhuMc4wSKpyLvlPyiKRHXfhflctnAXkbQSORzx34JHUHTX2PIi4Zz+9Vz8qsh/gGOG57dfbFV57AItwYwJVCoWdm3BXAOdpx8pUZ+X5scc12xrQ0tJpqytZNWbWj69Xr81seZVhJN8sHpLl+G6atHVOy+W9ttNEcTcwzynywGji5afKnBZfumL6/3QSCOhz1yZ2eNAoiJNuFQmRcHYcjzVPI2r3UjnPUda7VoSFXJBhDnauS0rE4+RuBnGMk8jkZ71kXFqJpFjMYVSxQEgKVHGPMPPP9wcg8jIzXZCotfh0abjLrt/Xy9UcUoSV9k1ZK+nxW79vl6Oxx5tiEaQNIJJD+8IGd7ZyNqg8fnwQDjiqE8UzAO5ZAh+YHHzAZyCCR97qAen612S2m0yyMDK8asvkAbAXAAAxk528fXsBWHe2imNJuglLh4WOcyLg4AyPUY47ZHIralV5mrTSV1d2220j66u/dajjzR05r2Wrb0v0XZ99eq7HEXcUUkrMEkZkOd2CEIYcbexGM+2fbOMhbJzM7NJhWJyGIC4GMcEHB6569ep6DvntlljCGNkdwTGoKgoOxbrn3GeuOTWXLbQxuBuClcMd3Jkc9FYdlPPHPTFenQxDScYqyad3o7q61WltbdLL0NYVnFr3r2src1knoru6/Bfitq+hyzW0dzFE235TtLDlwSQArngDLEgkNk549c6eC7F2ZCWBLEq2TtIbqW9SMZH15x26/TFhlcxlVC7hIzqPlIUnAHYLkn24/GukXTorgu4j/d8DgAM2M/MGPABx8vOMZOOgHFUxPJWlJ03rGycmvLe733svO70sz2KWPnGlGPxJb2k7dPmvJ3007HnF7JPNCkMat8ieWQcjO7gjp09O5x0rR8M2rRwtFKpDbXOTzjJyBk8fvO49j7V1UmmrJIkcsSsgfkIRwOcbuMZPc59h1NdJp+m29vEZGQAKyLESgw+cg7eeTz1AHpjNc1XFfufZxpvmb2i11Su/VpNpLy2vprLMYpO6kmrTSvbZR7aWez6vv25W3sL1nRV2iNEZMEbdoYkgADJO7vkg8D6V2/hjT7m1lSW4RHCsxY5BKHjb8oGWB5x83bHWr8dnGweXDjzAASpwVI6rjBAxke34V0enWoQIqAEEFgeCSmOpP8AERz2B69zmuL6y/ZVKbpJc+jVves+VXu7PTX8L9Dhr5vVqRlFQUU7Ny5nd3tvdtd09O/klvWzMWjuVA8pf9ZkbRnPQpzjHqfc8Y52UnxKZElIfCFiyjCrgjauMDnPXpx0NYYnhhdoywjjL/vVJzH0yCpPG5umMHI4ySBUy6hDISkewBgyB2AAXHcnJ3Y7AgexNeZOi0l7jUe+l3dq2rT07aprdnhV8bq+bsnZSVm/dStbd9157mzLqDRzMQT5jMiqM5AUZ5Ho3zc8HjGc4p9zfs0ZPmqjqoVpCAN2eMHk5cYH154rkry/W3kADpPLOocBRwAM5bec7cZBxjnFUBeTSYaVtigk7t25sDnf7gZwB74pPB3Tk7W3V9Hpbp8vzur3PPljKsqlpVG9FGMbbR927elkv8vu1rm5Lys5J2kB2Y/KJBkcjB6Dlgc9c8DrWffX8LEQecigpnf90OWxgP8A3enOCccY9KxJ9UmlMRhj3srlNrEbSqk/NtIAMZzw2RjGRnPEvhjwn4o8f+JNO8L+EtCv9f13X7tbXTtOsopJXeaV1RRtVWCopOZCThQAc1tDkpRdSq1CnFJ889NraJWV79Ipt9LNtHRh6lWpVp0aEZVq1SSjGEE22m1fRaLW1m7a9tzNM97dsmm2lnNLd3U5trdbYNcTXTSMFjhgRF3O7McYAPJzkck/b8H7EXxy+H/wcm+Mnir4aa1p9rexrcWSXcTCe00uQZGpi12blDLhgWYMRzgYr+gb/gmt/wAEddG+E+n6L8Y/2jdFsdf+ILxJqGh+Grgi60vw4z7Ht55IWXZPeIv3C4Xy26q2RX7CfHLwjofizwRqHhS/02zuNDvtOewNv9lRUWNomjSPYB+78vAUBc4GMDByKo4fN8ZKpiVgauHwdGMZQqV4uFTEydrRjCUbxi42km1zWd0l1/YeGOHMvw1bDTzapPEY6c4tUMNVUaGEXNBw53KE3Vq3dpRjKMVspPp/n/eC765SaUTJK8F4JHiUIHa1kUjzN65BySF44yAfTj6A0u4tdEQXJnt5xcW6O9tHl4yX+5I3TZNncRHltp53HPFr9pj4H6v8Bv2i/E/g+a3Gn6RPcyar4elZgbSSzuizpGrEAblxgg8qT0HNcnodvM0TR3sTAOry2obhI2jwQwmOcLJ1Py4OOCDmuytV5GlCLUpKLkk78rSXNHp7yb+5NeR+7YLCqhTUIwcVy3TavGV4xa2s03FprRpW0bPePCWlaZ4qtJNMmcP9qdpYtsYC2wblssWztbjeO20Ada1NO8O6l8PNYZlZpbeZhJCdqtbTRk/LuzkKowQF546nJ58y8PyajoTJfRhkZ4twiVvkaUEfu8DncBgg5yeTivqDQrmLxroSQX6rHqCrutJFX5WZBwhI6hjwVPTHXBrSlKMpRtZScVo7JNq2je6fk9N9Wx4inUjJVF71JtRktbx0im9N9Xp8mdrpOs+HPFmjXGm6jodrHOYyizRIEYylflJABPXkfN8vqMnHlWveCb7RJWeBDPZfNMm1dxUOOmRjGwDIHbJ9cjs9D0TUdIkEpgmdDLsMmP3Jdf4lbHUDof8AaOcnGO/uLn7VA6TINxCoFxlgDw5fAHPHoPXjNb+yjKmo1Fapqk9brZJX8uita3bQwpVJYSs3Q5ZUbR54tq92k299Gm299XbqeC6JefZ4XVm/dGMkAp0l6DH9wnn5ucdMcmukttQkjiZpCpWHLR7Tl1Ldj6njk4/iroNT8OQ2yi6hjV4ghMlvGQBlhkMEAJJB5HQnOa4SRZLMht7NHcMzLxu2qn3lZuw55457EYIqqcZRjyXu1ZRv2bSV3qt2dNXFwnSk5Q5V/MmlK7adl1W97pba3sdM90ZoTIsu13UcZxz6Y9Tx9cg+lZ2pXciQRws4CMqkjOdxPJ3L1z059Paua1PUiojeCQfIrK0aY2FiQDzn7+BwfwPty+qa45Ad5iJliBCE7l2DrjHCnoMnB45Het1QlVlF2s72sk+ltbq1nv8A8Mz4TNcbGF7PmbTSfM5P7PW7St0072NbUdUSwDs+xjtbaF4LKw6r1wTgHGeTgA15NrPiCSRXlWXLK5GAQzCJs4+TIwygdT2+hzD4i8Tx3Ee3bsdIdjIpBZ29WGQQR7duvfPzn488dWnhnT7meaYRTFGdVL8vIc4iZs8EnqOcdSa+lyvAXUbwlz3SjbVNvl0S0+9pt/ez8w4gz+lh6c4ymuVQ5pOTSvtqtbNrzWqsnbY5/wCOvxVstA02VUuo0liiKfIAWuJznMbYYYYZGT27bhhq/JXxLr914h1W6vrqQytPPIy7icJk5XbnORgY/Piu++Jvjy58V6vcZuJJbQyP8vIjVskYHJ3Z4z0ya8lKrk4PA5wevtkj0x9c81+r5RgVhKMZTX7yeu3w35bXTVlpZPrr0P5I404iq5tjalKnUbw1OTWrTU5XtdXv2tp8uo0RgMB2IDYGM5xxuXsD079fXrNtUDI+UjkMeML/ABc8456CoycnkH0BDAEgenr/AE6dqUc5zu4JG3qSPbvzkc84yRXqSd7baaaf1t2Pz5Wdum3l2vv8+vyWzerAk5yQOR8pHBxj2x14zjrilYknIHBx67iT/DjjuD6+nAFRqQQ2ODwNh9sEjrxz3PXgc93ktke/8I5znGAPb2PpyMYNR1e367Lf+trDbae60Vrp9NPmtvK7ve4jADqOduDjnPtwePfuDntShfk4yrdQew7deQM8988+/ISDnnGQcdyD689ewJ/T1F3BfmOR3JP1wenY8t696zlFRem3S2mvr+OtvLraHLlto2m0tL6K6V29dbq+vy6DW4IyxJIzjqccdcLTxgj5eMDI4wcj656nHHQ/zccdDnIzhugIHTGO34Z5+lIu3GSOV74689vXBx9D6GpSvbzdv8/TddGS5WaVtHrfa2vX9e+oIGVjJI2ckjGOM4PPHYdx3OOMVPweFI6E/KO5/POf6cjioCSSNoBUA9T8u854OR1//VjNPUNnJOMZGAc59OC3fuMcDsQeSzt5PX7tP19PO+gNtO6V00uvnppv16LrqGxwu4EnJPoep4B/p26gVaQAJt2jcg3cYGGHPfpg/wCI5pisuADnIJYckAEdMZ4/z1BzkUkAtyQzEZBzn/63B9B7egm20o36PW9tGr3/AE8kvmlZXbSckubV6JtrRK/ktt+6FCMCGY9Wzu7EHOB0PPbp0P5PBIySBtAPUDPsfYE/T16Gl3LtAJG7ABU8j/8AWf8ADr1CnDMox/DyAOOmcAn19K6HvZffv+Xdetg5rrr027aX3va3Ru3k9xoPOWXcegwOQOo4XsfpjnpxUvGzGACwByvUsvTOOnQ56Y+lRjYOzHB78+3fmnAHJJJAJIUHsexAwMDH9R60iW5Jt8za+5dLW1s772ejWpKithS25mzkEnI+mcdeCB/9c1YCDO45yRkL3B6cgdjjkg/Q9qiViFC5GcDkDJyT68YI7DgepFSkuMsx5OARjkDpgj6D/wDXW0Uklbqk2Ju/f5u/+Q0jb3+9wAMD0wQDjj+Z5OMUUkhXCnBzgYODkHp+nr25PTNFdUVpq19zXRdl+eojzpmIkDBiygKWySFY9kb/AHee3em7wvzRlB1JwCVAb7yA+mcdD9e9MDKxdcEDBIJYnIGOd2B7cY/Gnc7CqDKqu9SeWwO5JAwucYH4dRXNWSk9HezTuvVab7tW2/Pead1ZNNJtPbRp63b2a/r0f8+0vtwpAyU+8FPPygc887jx0HtUfHmA4yhcEEtggrnO4YyH57nrnuDTtxBBJUdFZiuSAeABk88dD7H0pdq7BsBJL4BIyDn+IZJ49MYHtXNCTjKz0V0/m94pX89+nzHJpfhtoraLv0dt97621Gy4XLK21ScLt5H07bT1OecfoL9ixeZEY/Odu1+qqo4BPbsemSTjoRWVPIoYb26EKUXgHaTkY56cZPPXNaunsplG0kxscF8ZJbA2jGRkdefbIFVio3w8pb2euul01Z6baX02ub4WMlOD11dtVZJ3WnW+19tX956voEZjWOZY2bY4BIOCVOBhPTdg8HpjPUgV9OfDLSWnnjm8iRIpZAUbb8wYH58P/A2cDod2c54xXz14YtXYQsh85UVGdW+XKfxHHOWPAU/wnOM54+zPhPpksjLLEwicbZmgmbMMQ/hWbIG1lAOSM4yODzX4bxVi3ShiE2tZOPm1pe+6vfW+nS293+j5PGzouS0aSW7i3FR11Vr2euvfTt6pNZQLA0LKqXUaRtKd2dzAHcVmIwEhGNoAP3yK8r8RXkUK3IkuFUZecBTkhUA2pJMQMtF0OFGzcDk5r3nWrOSCy8qS3hiUxmSOUNtt3icDCKBzIZeWZiVJ2Divl74i3ccCs0YjeFWEIKqMOj585hDn5hIVXcd3zbAeK+OyS+JrRjq05Kzumub3dbvRea0e3kepmtoU5K+qtumrpuLta6en3eaPAfFmv/JcRRzRr+8kdJVOGCL0jaTnEgH8OD/LPz/qGqtNM+6V2DSFvMxljuIG3qeRj1wMg5AzjrvGF/ELiYqSFZlwVHloVbJxJEGYq7YxncenYV5hLIN7NnOTlQR8oU9crkkEd+/NfvuQZZThQjNwVuRK7tpJKL+XV6LW+7SPzitb2spPVp201ttotbLTtpd99DXkcy4kEhf7m5iuZGK55x0OQcH6Z5yDV1BGMOr4LMrAn75z3buSeOlZdq6sjEOqYAO05IZhnJXOAP8APqat2zqRjAIDHyyM54PAyemOM/hz6fQew5VyprT3krWTtb3b7edvzsdeDadrXTdr3dm9nsna21/L53+gvh9eyRCJmffuXyXRsHYDwoA7lhnJPQAcHNfQGnpJgswCiQjBznP0PbtknrjNfKngy/FtJbiTd5hJIJbLEgrsHTnqfyHbkfVGkTJNpK3UBLhkAk284bnlfQA/iK45ybd4q0Y3UrJvt6X1tqtdfNN/TYaLcNk3dyv0tp1Vl3XlzanqnhDTPNVpTwH3CNzjKjgPnGevGD1wDxxitnUtMkV5GtZmDRkbtuQCQfmJP0JBx0478VleBGxKgMpaOdCSpbcFOOccjGcHg5+vXHX6jILKWbepSNyRlPmALADce2CeD+Fc0pXclHS0eb3t+mna+vm7fM9fDw923vRts23daK99Vrpe99NNGrFvQTcQ27xiXiaIh2I5JH3gvqRkEnp+eK9C8LxwpdRmdZ0hKMDtbaFmwNkhAHA6jcRgda810i68i2luFY+Ujrl2GSEY/eXjlRznvycDNetaabWKCK7iZWEiqHQsSXEnVk45I5P4jHevNxSTUrJ9I383a7Vn0u0/K99T6HL0rx5pLRcyet1pG0r20lpp+h6npGrTwmG3MudrERtnMmMjOGPJ5A/h43evT6C8N3M1ylvGBIZljZnLMFwQFO5R3GMfjxk4FeA2Giq1zZXMOZ4ZEiZrdTucuw+6uMdxnb3HGfX3jQbS/wBLa3e7TyRcyKqpISGWHj5WXOQuMYPQ/XJr5THUnbmUVdJuWmult1fp6dVofW4SSbTk7Ky5Xf49ru+zWy0attfc938PzXsL21w5LBlC/MQCzDqoGMkfp1+p+j/BOsZaJGkaMfMWQYJK8HAAwQSPwzj8fnXw3Gl9ctZIfMMRUgjhAwGdqjJHJPIAGPcc17X4djaylR5FKNu25wRtUgDA6cMB949cd8mvkMcozUoyS5ktEu2mtt+zvpptsz14OMr80t1ZNXTurOztdOyei26n0fp97CZ0VI28h4+GkXc27+8ehVWzzjsO9er6DZ28lzbuyklyuFUbjgDPBH69Mdfr47oEwkVZfvKYwgZRnGOAoHrz2x1r0/w3qk9pcxAxedyVQbdxJOAM9AMDnPQHg56V5OHcOaKtFLbS7bV1rd2b6X8tup5WNk/ZycPdqK9rt6q6s5d07+mnY+m9GSCEQNFGCyIFKlcYwOCAOp6jPt9QfSNJuVeWFCoj28kqpILHaTnooJ/MY55JrzbwzeLqDRCWFoZPLBJVeG9M9sHrjOT256+p2EXksHESZDZcHHzD1VehHseOmOa+lo1PZRjaDs4qSstfs6vTr+Gl76I+DxOIk6koz+Lmu2rvtazunZXs9Onkej2BCQK6xSSEhgWPQE85J7HHcDB9OTnqdNSSYeWpG587TjaAVIJBIP5E5+veuV0SUTZiKhAfmwowSSMZOfT059utegabGsa73KuoOMkEHgj8z059vatYP2s4Qh7RQbvL1unve1kk79H+KVPEtaXi7qztFp2SWzfXTW1reisXIIniySA2D2OcEcZAPTB4OBxuArTs45JTyuOeccsMYz8p9BgfQ8Hjia0hS53bCQNoA3gcZ6g5/i54HYH611+k6RFHH5kjbTk8luBnrjI/AemOvWvpcDlc6s4zjOXJHlbVrp25evNbXXsrW7O3S8bTjBJpOUtnrdbX0W1mrN27201J9PiMCxGRi0hI2dSApxyTyAMAADBxg+ma9F0p55gqq6LjjsCVXGCBg8DgE55P0rh5DBFIIoyGJI6EHHPPpjPGegHv1roNP1GG0HmNIfMGFJYgAKOQAMYwPx5PXivq8C6VGfLOryxjpzKSTunHRvrpfZ/rbzcSpVIuUY3lPlai7NrZW7q3a2l3daM9b0ydbZS0jfwncc7VyfTPOSQecdfqM3/7eO07JljCluFONwBGM46jngfXPofJ7rxPCsbILhDIy4VV5GW4AIDDgc5/+tiuZk16WRjsl2hh8xLYH+0ccHHYdfw7+li8/wAJhIckJqpJf3lGUlZXbd27Ws9bX69TLC5RUxElKpCS1vdJPqvn1a36XWqR7LceIY5jh5CUztKE4PYZPJwMdDj8scUJdfhhSQCX5XQhgvBJPO5WJwSemQAfvcdq8el1ry87S2HwTg/eGOCBn1J+bvzjHNX9GtdV1+ZVXfHHkpu7FQeCRjI7hQD26nFeDLiKviqnJhoc9SVrRipStflV7p2W/kuz2t78copYam51J2pRV/eaTdrXTb0737pHaJ4hur6URW3ygsULINzPnBAJ7kDGB698cV6T4c0W6lVZ7pCAeAh5bkDBPuep71H4V8I2dhGJ7iMPN1G/lCTw5UdAchSAegyDzivToJIYo0C4UZC8DkkcH8sgcAZ6V6+CwOMm418fVnF6SjBaK3u6Wb00drb9NVo/nsbjaUqjpYWNoQ3qNR961lpu7La7ttpYvafYRxKpCohACKQvzEDuT64/iP4dAK6WHaqjgDaSSw4BJ9+eOuc+/Fc1HcbCSDvTdxtIGM4+pJHHGR1zn101nDKArHPByB+YHOM45OfwwTXsU5qUrJtpXSTS91JpdF2739bO55FRPRt3tpr+Pb52738zbRk5IPJznJx047cemPY+h4a0kYB5/wDrHpzn154785zznHN0BiMvjJ4Ycn6sB1zx0Ax1xVlBJIGBf5W+6Cpz8vVs55PBOeO44rf2nLo27W3abfMuXdrTbXqu+mpjd2e+m6+S207NJfdomWHjSRGUgYbnGeR6Hvxnjv29awNR0yAoXaMsw9T7cHPcHuD1H0xW+mEOGYYAY5DHORjt/Ujjt3xg6xfrDCSh3YJ3Z6gc5wOegzg/ljOK4MdXoxot1HGUknaKd+Z6b3vskttd30O/Be29rGEG4pyV3rZfD1Vruytdq27vc8z1qeWzbZAoJU7tuchcnpgnBwBgggfU54NFme9mCuH3tu68AFMcdMbTu57evtT1OW6vpXS2gMjuwBKjkRjOTnnBA+8T3HT5q6fw94duisc858vqvysMg8fK2OSOvJ68+nHw9KWIxGKi4QqypOSfI46WbTTTt07/AK3t9ZUr0KWGs3FTtrK/vO9n0f2u3k/I6e1tUEChkHIGeMHrjg8nn9Kp3USoGIRyqng4wpx1OcDJ/nxjjFdStoyqqHDbR36AccYI6jHPJ6mq1zayMhHYngDscHj0J7cHtX1/JGNKK5VHl1ScXpte62vpd2fe9zwqeIjKbbldSet5PsrWunrp92l9TipIZskgBFKlgM44OAD3+bHHv+dPgfaoV1ztBGO+M9cgnJ9RjnPbFS3MUsTgy8KCFznoM9vwPA9hzycSWiDzVfl1dehXG047nPf0IGCOOOny86tOGIahJe0lNrZq2yu10V1otNulz1nJumm37llblundWe/ybb+fWxjXhjJYKHjIOBxgEZPX3bOBzx3Hauev44fIcPg7ucN15HXJ/l6+9d3fW6AGRVzkDK9cenIPT15zzkZrg9fURImwDzJBkBckZOPlPXHU8ev0rzMx56MK063JU93SPKm224tO3V6We7u9+p2YJqdSmoTlF3V22nrpdN76XV/JHlPiGMM5IUgBSFJAz/wEArk5weBx271ydjeyWcxSXbOgVgI3X5m3D7yk5IYdxg8j1wa6fX1vhdRSkExIMMD328HIPYA53ZOM9+a4eSdY7wkwsCGDI5JKh2529vlH6Dk1+QZhUlHGzq2nTblonFqLg7OzS067vyt3P0zAU+fCQhJxnaN201dNNb3d99VZ7vbdGPq9jLqv2yK5Xy1kDJbFUxgkEoQ2R1Oc/QemK8H1vw6lpO0N7bpKZS0TFWG0g5+YccM3GD1POewr6a1DeEgba2yU4eUfNGpI/hGOMY+U989K8x1bSorq5lmZHmeLJw5wEcfcYAZyevHuefT5/GJJvllKU37zSXu2bT7Lp2tpt2PfwFV25W1GPKtErO65bSu2kl0e93bSx+O/7YX7P1p4/wBF12CGx+13Emn3CKIogiIQpMbSAg528mRs8HBxkjH8gn7SXwMvvAWua1ZTQyW8cMspWWT91b3AZzkj5SWlHReRk5yR1P8AoAfEjQLmeyuozbI0d2gicSAbljIYOVPXJ4JHTpzg1/O5+3r+zB/wkFjrmqR2Ub3VvFdXcSwxglbcfNJ5swACg4GUKEn1BBrbJMzeAxcadScqcKso3sk93HVq9trvXdb2aPtcLT+v5fKjOMKnKtHG3MnGMUls9dW3y20Wi7/yG+ItITzZo0Z2njLiNVGQIv8AaHAyOoOa8mvbNAZAcbwSr5OcMe4GONpBP1wc19jfEjwedI1jU7VoTbyWpkRVZCq+WpI2t0JYkcnJJwORivmPWtNjQSeWkZcyHEgJypBOAXxyo5yAvfsa/f8AIM0hVjFOUnpHV2s/ha8tV6tu/ZH4nxhknJKcowjpOTbvJNpWuk203vZOyWm1tTzG5gID9coAfmwDtGcNjnPPTtwemM1LYag9pIjKz/KRnB+8BnIB7DnoB06beCL17E7F0MZBCjeSep7ds9O/YAcZ64EqkblAxt42D7uMds+p9CPujOMCvvYQhWpWkk1JbtXTWmlttmn2fysfjmJlPCV4um3GUZJpN2ad182tlrfZdz3/AMJ+MQPKR5CJEwiuGyNgPzK354zx+A6fR/hrxLAskE67hGwzsXlfNwMPuJO0eo55468V+fFnqMlk4ZCcqfufwkDock8MoPycY68dj7L4S8YYMcJuJEI+ZFztQ4xkSHOMHPTAB6/T854o4VjWjOvQheDvzKKd9N9I9L3e2ibaufpXCnG9ShKnh8VV1UoxUr3i7ctviejsrPTWyW+36LaD4hEm2bzlRHjZWEvXY2A6p0zu4Ix7445r0XTtS80pGXTyAS6zFgcgEdGxkEHoOSMnnoK+OfDvihZEh2yIxjAVVcjKgDOAmec92J6cY5r3rQNdt7qGBA+wmMzOSSoMgxtGw9AvOece2Tmv5+zzIZYac5KLcU+W3Zppppd9L/f0P6LyHiOljacFzQk+SLV5Ju+i0Xva3bW/bse5wSRblkdd3mFCoY7ysi5AyuR8sgI68cd8Cunt7kJNMZG3gorqjqE2lx8mwZbBXDHbg9ua8nstQjV8+YXeQCMBmKhWwSZAcHh8YPIC7cZwa6+11JlXznddwZUjGdxZAOZF4+YIOBnGSeB6/D16EqequrNXUlbRWV9np6fM+uoYqnNqSlZpWcZNcyd079L3vr67o7p7gkEAgG3RAzNFuWfzgc4Xd8zJtA3ceWTn5s8M8kOPLKQkzqcujAlSepx1Vx9evasmO/jQqUJWI7WmMyYfYQc7RuJIJwSMf405FUK0ihfMeRWn2ZztOQrxHPKIP4MAknrXI3JNPVXaXu7JaWurO7v1XXXbbrVVz1cr2skotaWa1atrur23v8iC5sUuZI3VJtp3ArI3712g4PzY/g3A4wfMyORgVzNxE0P3lW5kYmNhKpWRVyOJWydhbrtweg5xXcxSQSyCAbHihbJkMmJTu5aNhg7VBxggn6ccUbq1W5wSjlY0aEbjudkyPlkbCl2HZsA/nXZGrZK71SdvO1lbZaXd3a/VeuU6cLOXKuZrrdXelt3pbdLqjkVgjchZleWRZJfLKqVjZH2+WyjPysoBySf8K1IQ6jDgSupDFSnliRfmwIjubZKBxxkd+M5E0lt5Hmh1LRRLney/M6sf4ox0P/PPDEcHrT0STzGjeSVtn/HvcSny3WF8FiVAYMEwAGyCAehzWrrcyi0072vZq3u20tv5NNnLOknaTSbTvezVk1svLR9mrq+yIFiVY0ERkKIdkESHLgvzuCHHzcHecngc+lAhQRyQSGJlwXVgcgbuYyRjIKndk54J6N0q40EUZWUEtJMSF8mUsscpzl1+XDIeoBxznqODEbdnleNFeQk5UZCeWU5CqTkPGAxL9CMjqTwKslutNk77Xa16rpez0va25yVKSWtlo7bd2vW2mtlvp2KMttCxmO5pInUbIQcElAcNL/eU5OBxnB9M1RubHzIZjEwyYI3iS3JVVBLb4pExwAAAeT6Y610JQEXCM2JgyBkHCGVv+fYDOI0A5AJ5J/CpOXhdQoWOSJFEbsdv7t8+YxPzAnpnI6Dpk8SsR7ySd3ddfnr2tvftomzjnSWrtp66/jpr+uutzi5bNbhYtvzI4kEflHaVX5Tu2kDD5BwxJzjoMViz2qxpneIp/LIkliBDyLJjCuedrEKeeSefx7mWMI8QMaujrNskztMwTbvSBQCEYFhtb+Lsuc4yrpIVeAQKqLcwNsdj8k8bHmKXg5lfHDcbcEkc12UsW7qOrStfte60et7727PXU4qlBNqbtFKN2ldXenTS71d+nTyXEvEil4ZXlU7Q8JjXe6A5LLMcgB4/4sZ+8OOKz5rVHSJyC32dNpTBfazHljnBcjHL5GOOK66W1iYSLHArbhGhCsXEY+bfIHx96M9flGd2O1UGijdlAt1jWICWNkkKs/twpDR+vucYPf0aeI1vdc2+r/Dd6LT+tuGph+ZStrfVPqrpWTVu3RL8jkmUxXLTsjom1HMiLtMjnO2F2/ghfqTg5x7c2lHk3Ku2XkVWRJoPl3Ehd0e3kM54I6E4PQ1tXMMqOzxmJlmQAxnEihYuGG44xv3jHy8FTjOKqRWubZJokVmlkJSEsS0hQ4Z1BAIAz365xwTXWsQpJa2d1HRq72+evk9fLd+bUoTSSb5p9UtVZWs7d91532RAJsTRi3BOCwSVkwjiQDcqgn/WZH3sjBHHOcWkkYsVaIsyAgEsBDEyj7xHJBOTg8gY7dmvA0wQRArJjZ5aLloBF99VHADc/NnJ5GOlRRwSI32q3SIDO4vM5kBHTKcAEgg444yR9NIzS1TSur691a6vvr934M4pUWtHZXcUtHurPR62876snij8qKSVYiQrBpgpxHPK2QZN3O3PZcEN83pT1eFHRHCmOGQnAYk89YASMFgDwT0AIFWomdmkj2+WAA7llyksjA5EfzYGOduR8uTjJyKq+Qu6MMQocFtrDCSbDwsT/wAbfN854wCOCea6KVTVvS94vZ2a01WvRb9/W5n7Fpt21TXM0lqrLZvsnrovXRjwsAeSSFZUSTcpUAsrAgYxggEE9AAAMfiYhB+6Ejl9x+fIGWCtyq5JHzLk5BHGenWr4AScs6hgSUWLdiIlcbVUAHBOSemCR2rQhjgEXkCNXZ1JYP0ZuOSCBt2euSM8Y4wO1VUle+ismltp93Xaz1vfdkypNvs927WsvN6Lt+iMN4iZoyAY1RiFk4Bd8D/WAdOOh9Ce5NTyRFom849XWVCrDcWX+HOMjdnlc8465zV9LbYGiSLzEAHzKAhLZJJYc4A45zz6c1ZjtxHDJm3kl8tVMj7gQSckuq8kkADHOQRkelU6r01W1td1e21rtN+ffSzMp0W7Pra1lqvs2te62T237GMlr5isqiQojM5KfI2x8ZjfIJjCkYzg9cY7iE2sUbl4BIqZKSEsRGGfuRjO2THzAcArnvXVJbwgpN5chFwvkRSzLtCnu28Zyp9dvGM46UXFtbxvHLC6MjKkSxgFkMik5UjoMnuSScdPXH6xySdpWWiWuuiSX3p9dt3ucMqCV1q1fRykvLRPyT6db37HGm3khyUgikRGXduIIRXznBAySvUnA6jJ6GqhiJlmt4sGGSN33IuVkbhhtB9Od/cfLXVSwJM6y4QMwdfIiJIkXjMSj+F4z1PfdjOeKhlsCqmRFMXAkRAu0xOpP7tl/hlfjJycgbsECtqeIhZOUrO61tdtK35X13W+upzyw876NSVtFFSTbTtutNrX1tfq+nFiwfyXbZEoK43k4Zgc8qMZBB6sOpxgd6xLmwgRiI5EJyqpHI20tIOwGCWAznPHU8d67S6tbkSQJGo82UiUru2oIudy7tpDEeoAPOMc85s+mt5y3RiUuoAdNp4duFbP8G0g84bIx/FxXfTr9ea17fO2ln9++u/3+dVw0Vo4Sc92tdNr6prXul3stjiZrN45pJiGLlShKrkITncRzxn0A5xke3N3dpIX3EFTExTBH3FP3Gxk4brg5yccDAr1e408hHXyZJHj+aWbkwMWz8gPHzLnCj3J6A4xbnS7WXcQyiYqBKinksvILEjkrnkcEZHXqd6WKUd9bJL+bdrtfZd93pp055Yd2s1daJJ6duu6etvxPJLpZ1Z1ceW4DYlYY9OVJJ4PUEjjr7DnXm8q4jilII37lcncrheoLd+23n15OOfRNb0stJ5cfzoUyQ+QWIBJRSerHueoA5AyCeINgSBHGmZi77DguXRcAjccFSv8II7nvXv4atTnTu2o3V3srWW93qumndX63OCrGVOUYxXx6eaSsrLXz376F6zaGOSJwAsMvzrGVwHxjHfhT1C/Wu4t7iz+zshlRXZVACA/KSSVA5UhhznPfGCetefSI9vGGHysudgU5JI+8Dx8uRjjnPqMUyG9DMqncPMG5HTkgD7zHpweMH3IqK1KNVXu2k7t3Vru27+Wi6rd7CVflkouVr2und7WvFNaN9tFvp0t6BgAqsThjtJLMM5IOSCc8qQepwfbtVyKQTLEHCpGjAAggMMEfMM9D6HGe3SubtZGcxMJXZmBYr0MuOCvXjIbPYdc1uRrGF3I6gsSGRjudGGCQT0B67evvwK5HHlvFpuSabs72Wmt/lZpPTa/bWdR7q95aW1e9u73jvbR6X1udLBOtu8AnkUqCGRw3ysQCfnOOD0AyD+PONOO/tnYurOmDhWBVRlhyA/Qjj8D0PSuEkuNlypLMS20Kk2FiYruBDZzgjI2tnvwOabNdysSWx5SSkKqchc4HGM5bJ4P1GOaiVOEmnrG3xaK+qTdvJf8DsebiKyi5e/K8FZpO97JdH1flsdvPdQuw8ydmh/uEF2MiAkMTwQnP8Iyw6cCqwvHwJEYRwbQUB4YbTjc4GQDnhc4zzjNYkCTvBGiiRYCshaZh8z7CpZWHJQqGGxeQ2TyMEi/HA82Iyk8cDgF3ZSsbqDlCpP8JOdpHHXOe3PUlGK5XdprRO1tLa9ul9/JXucXJ7ZKXI25apJOUr2X3NbdPPTUtPqMx2+cjiRmKxIg8xmjbOJGbAxGSPmPbJ4JIojaWTy2ZQVMjeYu7O7PVFQDnbxh8+vFXtG8P63rOoQ2Gk6Xf399LIYLdbeOSYtGcARqyIQQuQC20YB6HOB+un7Ff/BLjxd8W9W0TxX8WJDofhSHU1d/DccgW9u4VZSrXfGYoZADjKtu3HOBmuWdSdpRhFyqP4ElrJtrlgnpa/Vvp1SPdynhXN83qw9nhp0cNBr2uIrXhTpwfKuZ3S5tJXslJuz0asfDX7Pv7I/xg/aZ1+z0rwB4XurvTA8Y1LX5leGx060DASSmdoysjgdVGOgwe9f1kf8ABPD/AIJ2/Cv9mJdOv9YsNO8U/El0X7V4gvoEmktZ5wpkt7ASbhGo24OD/CMEZr69+GXwu8AfAfwe3hH4feHNO8P6RYWqQk2dogku1KgSl58CRnJQEZBznP1u+EtXYeJHuBePGBcGaLccxlE+8o5HIzjPb3NerlGCjh8fg8TmbWJhKUXDBvl9hSqXg/aXa96UE7LnWj1R+yZbwvlOVYDEywUfaY6lTcZ41q7qXUeaNOHLdRTV47tb33Z+g39m2YtIlGXtljQt5eFKKBjCp0KjIGOMjPTFeeeJ9E0u7tp43hjfIb7P5mBkgHgv8w8wcELgDnrkVraT4vsptMbLBZo7faqMc78qMEdM9+g4z3zxz9tqMOopeRD96Vd8cYBIxnJJyCcgD157df23H1MunQpRpUqb9pCMkm0mmlFK70T9Xtc+KwUsfSxlWpKVWKpTT5tWn70dk1r0tdPt3t/NZ/wWs/Zxvbn4eWHxq8M6cJ9Y8D3MK67cwEpLNpcjkMzRqp3JCq5zuwpb3r8Mvh9dSeMPD1qwcLciEMzE4DzRj54JE/gDYAhbPJ3nbX9yn7SHw10j4rfCPxn4Pu9PS8TVNDvra5s3AaOUNE/2dgCpxLE2TIoySSBknFfw7J4U8RfAP4ueOPhrqllLb21nrN1a6bFOd6NbzTO1q25lXMSru5GOw9BX4zxBSlhMdzqMFCvFSg4u3vO3MtFZvls11unfe5/Q3C+Y/wBo5dCc5SlWw+lRqWvKnGzcddE9PJNdFY7Sye7nLQttRID5MkZP72Mjd0BABLHlmHAXpnOR7X8OjqGmXMCOwngnUs2eVRT6epA6Hjoe4FczpuiQX8EN0Ssj3anfGCFCSEZRs5Jz6nGBx7VraPJqGl3sdtKGDxAgbzlZOcgK+AGGAMHgda86m7TjPXlVpLTrpZW637ptW0d2fRVVGpB2vfls+ZvW6irqyv0vdee2h9Vx2xhsov8ASgtvMhYBfmUE4IJB5Dkk4HsSMHiqscVmCEtgJXdyblXOWA4O8nrjk4+nHPXO8J6rFrlm8NwscbQIWCAjouNgI6E8nPT1FbkmnyRzefbhUG0s6IQCcfxKOODxx39R1r1oyVTkcU5aK62d7LXZ79ddD5qcuRvnbUk2ru6hNPa/R+TfVPbZctr8CRy3kdqHSQxgMxX5NrA5xk4JP8PfHPPQ+J6pFcQW7NbZlSNmacdeHyT1PAGCT7e1eweIp7t2BjJwFwGIIzgnIJHceuC3uK8r1xpEtrlIlbdICZ2QhgQwIYH0x19yc96qlF+1jaMWnKzWj0bi972WzV7rVdzjxmKdKlyqUXFK91q03yvTW71V7XX3M8kutQWO5Vn3qrEsI1b5GK8YPUk4Pt2yc8VzeqavGwkMY8vzQYzI4GEc8ADPcY289fryde9WKNo2UbuCW3cMoJznv17/AI5615P4wvbWxjuJprnEIBlRCcBXUZdmAAKgjAHOOcZJ6/SU8PTnKlCNNqata7W/u2Wm66XWr81Y/M81x6XtZ1KrUYRfM9otKzdtelruz9bnF+K9bt9BtLu7up1LEM/J2l5WDHGTuwDj5eoOOvBr8xvjH8TpvEGq3Fpa3LvbrL8zBmCxt2jRR7E/MTyOo716T8evjGL+a40fSrok5MUr27btqrkLAAMBQhJyRycg+ufjG5uRckyuW805ZsjDbhjJLevOR3HPQYx9/k+VTpuNavCzjFJRs9E+V8zu9d9L2aVnofzBx5xUq9Spg8JVg4tfvJRbbirpWTWzb6Xd/K6vHI4Yn52ILbueTknJ57YPI647E90QkdRlWBHJzyeR6cE9e+fc1GgGMk53/d6474/lnAI/QUpxuQAnDDIXld2c8Efwk/3c/j6fUxSSsndPVX0WtrPfdfPb7vxSvPmne7avrJX1a30vvddbvS67DiuCpCkdOpJJHJOB0xjnrzSHI5UEnJwT7gDO3gDvnPHQ5oZxnuMdx1PfGD268dzSknbxyMcHqc8857Y+mOOeayb13tZrq79E0r6et/vaITVpXai0rfK6dt76a6Jq3boRDIPAOevHfP485/Lp7U8y9MKc8Zb+9nHtxkenHTJqMZCqWzwee5xnqD0469OPftIQMg5ULwRgckZP3up6c/Xp1q4vfTS7/G1tvLs7LS25hfXZ+bbXZWS1a1Vraoc3YqPlABYhvmOe4GCeoGeeQeelODKAN25gfXJOc8Zx37j25pjdWVcYwGG09iO4/p9ME80Kcg4yck8kZA6Yzx2PH8wKekl5Nf181+ZOr0vq2n3a0TbunZK+yt29SRiGKYB2qCDxhsY/M47fj06UAYBDH1wSMHjnB7ZxkH8ximnIwDkDoQOMe+R2/iAPGOuDSr975vmUZ688nrn3/XHTvRb/AIfS+6fa29/z32dtLLmt179Gmuum1ku/Xd6bTnBxgZIJITI/u8dT165Bwean2oAxL54yNpJA+p7n8+3aoWwpABUK3zADBXnoOnXI6D7vv2RHJO04IICrnoxHXOD0OP5UlFLXror/AHL/AIf530Er6N+Wrd7rTp07p+WvckBUYyXwx4I4GcHqRg8j2Oev1mXbsIB3MOQcnAx68Ad/f3poAC8jGOoU5X6/Xnpn9KbuAwBkZ64HJDED6D15zz271RLuk5X7Xst9Y6prTfV9dNu0h6BvU9T+qg9uenoO/NSIeQCDhR8x/LGCOQP8eeuKbgDbkkLgPheWAPt15B55z9OlPDKVAXd83AwM5IJxnrz1Hpx35ovf9Ouuievna/XtZE9Nmr69VdaW0+XzJcp8oOeBjPcr1zjofXGOPbnDmC8ENjIJAyG9/U8+w6Yx33VF143KG25yM5+vPT36n1pM5ZSfXAxxnr+H1HTsKP6/L/gf0i1e19bavXXXe9n5belvJypuBDc5yccHp9OgOee/41ZVu5b5sHczA4B9Pr0GfcjHeoUIBUljtBwAcY9MgAYyefqPWrICEn94u3GT6jpjHpjvjHPpWsXdPe6av32XfvbW3buRZ6Jp631s9+3l99/VkJBJKkZJJOTjBHHUj1/w7UVKwIbAbKttx/nGeoH4Z7UV1QbS0b6dX2X9dRvfX8dX8zzGVVVmU/u0ZBhV5+Y853fw5xyOgAxnOCBJESNUYbwvOcZYlh0H064z3z06xs5d9rKQy4Cjr5i84H4ZPpnj8RY2CMIwVUuNwPBBB6r9c9AeTjkCuamrwXV82nb4Uk77+S1foCi4xSk9ldLRbWW/XTvq9b+ckzCQx4DqjhgdyYGTjOOSCBgHj/HLlSQxu2VGwYwDtGOckL/exwcHn1BpHVlBUlgFAcNydgOBhl7FuePp75i80qrFRvyoU5GCFOc/Lk8gDIbPOeB651fdtbe9uvdabO2l9LEa2tG1ubq+9nvpprZ9/UrsVU7gN4YkbeQSfQA5HQ9cZOBW5p8e6aNRgCTAjXrtIHJA42sf73fjgVhsTtXYzK2cbjyq5Py4PbIyDx79DgbumFhcKXbdMNhyT8qqPu7RjAbn1Puaiu/3Emtdt9dLK6ei8/zOzDte0inJpN6tu+qat3Vlffd9u/0Z4Hjt3A3M4YIox280n9y6Y5KJhgV5AyAM9vur4YW/mSwMyQosqlFJAaVJMLtYRDHnq3UqWTGBziviDwNHLLEiIDJOjb0IXDQ2+RkZyQnmd3JOcYx6/dnwxtTN5CCyMSrNCyBpPLETsD8rt8wSNsfO3zGQ9lxX878aRl7WrropySTadtY7rfV6PzXff9Jyh3hT2SUU0t/eXJd+V1v6ro2era3Zy3CCNY7aaZABtUbmujCD5zeWcDaNymNdw2/MAWPFfGnxR00Q+dNsZVbzliBQrKqH/lnFDk7D1CZLY56k192ahZzRWpWS1+zSlQ7zRtlHcA/Z41xzCJDv3ABt20AkAAV8o/FGykdJpS7i7jVpC0h3bA2f3QbABmXptAG3Pft4vDEJ08RTbfuuSvdX7dFpbR6u76GmZzThJOV5Wvq27bN6+W1+vbY/OLxgFE+8rKu8FYhKu1tq53blHVhn5TxxnGa87bYJdyniQbgGbnceCCv8IPXYSSMnnHNeq+PbSeC4xIjJI0kiursSYIjgx5JAwHycHGePSvKpF2yHcoLK4BJPOMnGw8fKemfXqOuP6YyZKOCptO+zjb0V09/Oz/p/BVP4km1bVdt1a23V9e9kXouY1LFSm5dxXgqe23PXvweta1uUdjHtIAH3ug4wQew79Px9jmWasUdfuhTuGeRxnGevY9T1HP0mikdGBOcbtuGyMjPPP90nPPXHT1PdWjzxaTs7XWtld2/PleiW+1i8KuWrFp6XVrt8utl+PW3TY73w/co97awBhkSKCemD7jJ7Z/8Ar54+yPh9CBp9xp023lXaLGOVfBVvYde/YDpXwrY74JRcpuRoJ1AIyMg45HXIIHJ449MHH2T4F1VrnS9Nu7dw0qgRTEEAk4H3hzwOQCeP5V5U5atJNKybdre80ua+l7Xte1n9x9fgZJvZu6jor/DJ2a5e3X5aHu/gXSrmz1G7bzTKNpZRnpEoOVH0BH1z+fZeIZS1g0wRsHKkAfeHsORx/wDqxXJ+Gbx4Lra8jCWdGMbdFBYAlcdlYEdQPu8jtXpc8UVxoLzeWrNEVyFwcdeQPT/9YPrjUVmpNL3nFXXe9t+zSVvV36nuQSajFaJWSTunquuiv310Xc5nw6r3GnvF823IB3L90E98nA7YJ+vUV6n4Wu7aznS2vNpijiYL5mWRmIGDHnbggj0OK870vULeBzbqCqTLtkQjaFBPyknnljzx6HrzXSWii4ds4EkMhZMAkeT03bgeR7Y4+hNclRJ3TumldNrRXt5J3XfzPTw7cbO17O7S6JNXbadr76K6P0Z/Zc/Zd8dftJWfxCvvAus6VZp8NdObxRqtte36QTvpNuGkkOmxMrNd3IVDthGzBOC4zWbqOsW194guLcXWRpBWwd5Ivs8kslgxjmLLucecWP8AeBcckjkV85/Cr4q+PvhNqWq3fgfxPqnh3/hJNKn8PazNp0jRPc6PqKhby2ZgxXEiogU7SRg4PJFdLFqlxp88kjTyTm7laUzTfO10ZiWaR3Jy0pZjlu554rysdCE48kV7rjBwdk5e0kkqqk7fC0lyLpd38vbwbqKdSXtW6cqcbQ920LcrXs2o3TbdpXetk+h9iQXMemvpV1Z3SPJeQQ3kEkYGfMGfMikGTlkJXIJ7jtXult4qbUo7VGijQiONHmVQhkcfedlzy3PXIxzweK+NfDt/dPpVtLuCtbSF0RzlwpwU2Z5Ctzk5OcZGOTXrXh7xLJcrbBV2AyFbqItyu3rtHJGf1Az71+cZjgpOUmrNRk05O3u3aetrdNEnp16s92kqsYp3aSSi3fVfC9L/AJ7bWtex9v8Ahi8mt7KMks8DbTkEYDHq3XgDj168Z6V7d4VugbmDYnnFyGO7kIMck5IPp+uetfH3hHX5mX7NC5aALgxnkoe5yxySD3/TNfQ/hye6sZbW5Ev+vUKQDx82Mcds4IOenPIrxsLQdOs3OcHHpaOv2X3vd9bvqcOLU5wkn8Urxir2TWmrv1010sfZugajmWKE7InCgschTyBwOzE4z07dM817Vols92m8yEEHA3HjaOSeoyfQHuetfJ/hG6e8u4mnd1WLk88sVA2kjPAGe5Ge3TNfSWl6+IoYbZGQNkKCSQXLcZPXLHA9M47Dr9hh6lH2TcopWVo21vbl/PS/bfdHweKpVPa2pxk4xfvuKbTdo6W00WifotLnsmjhFnUNgKvLsuDlR2BxjJ5+g7mu5W4iaSFVYLEoyVX5d7ds4z+OCQfQ15FZX0gKiN+CuAo4Lk4y5Oee2P612aStGBhicqhXJB4IOcnsQfpRzUndQgo/D3tf3e2tlfRa3trYqjRb1btdWsk3rp8r90721Vu3quk3cEZBdjwrMEHBHAK7m54J/l14q/d+IAgCI42rgsFPTJwOME5I7fgcnArzMakttb+Ysm5xtC4bLMpHOcHcucYwMkgcnOc1f7WUFZHk+Y4cqSTlhkDGcfKMnjp2J7V6kcynRoKgnGOiUpc9k/hVt9H5PvqelRwaclLmblolHlb/AJXa1tPua2buemJrLK7zycopAYliOAD8wPOSB1OO/sMZN34lnd2WJ9sasQuOWfOOQOeCQcZ6cY9vMtV8Vqqx2kZGcEyBGHLHGAfQ+gPTkkgAVl2upTzp5as6tjLvnopJG0ADv0z3zxzXjYvNGr0qWIm23q42u5e69Gnp8909T38FlO05wUU/e1T9xJrfX0V9Xex6lFrlw8y/ekkZiAm7J2jHzYAPAzyeeeldHC906JjdJI+AVBJABB4/A9e3T8eM8OabPIyvKGCZ4weWU4xzx165yP0r37wtoCXsiO0YWJECAAYL44GT0OOR796eWYDFY+pHmqTblOO+t78vza/DW+ljoxOJw2Bg5KMW4WVraOyitHay2273er0M/wANeG7rUJo5LlCqL8uSPlwCMBRxx3JP49q+gtB0VNO8tlQK2ApyMAKSABz2x1brgjmrmkaDb2/kqqJsCIYyowQRksGA6fTnPPTGT2K2qY2mPdJgYYcDaOzDbyc9DnjHvX6vlmVUMuo2VO9eVuaVtW7Re9tFdfLVdT4PMs4ni5NJ8tP4Wk9LRt96XdXtfsiit1tdbZemcE7STnqcZzjoAOwH41s2sZYAqW3kZx2I/HjHIzg8Zx2GK8WlAylijAkhtx5G4/wnAPfqR6Z9a6S1hEakEFQqbeRjgdsnqMfTNerGM5NqScLK6v1slZd7a9L9r238lygnzLyTjeys0tdrWdr2Tt+Nq0cbg4YcDk5I6Zwcr3JxgDt64JzfR1CHHPXbnjB45A6gf/W54zUc7pHglxt5PJAHTPr94j1Iz29+fuNUWN/kkCqox/iD1LHn9RnrWsZU6KbqKMdL20u1pqkvJq9tWl3JdOpVfuxXS1m+tlpp0s3a233mq7YkZyfm5LbieMEnAA657nr7Gr9vqMYQIzbWGQhByrdfvdevv2+lee3euoCQZF3Mfpn0PU8jscZ6HgYFUxr6LE0YkBldCEx3Oc9e2P4Tg+leZicfSlLlpzjqumltV01d+2nVeZ20cBUjHmnC/Na2j20e+mnva9O/ZdxqGsPBmYSE79yhASDkHAPXgD15/Hvz13f3d+UFunmuflkT7uVbkkDaeQAckYxxnIGRS0XSr7VX8wyuAp3Mrng47e2QeCMD8K9c07SbeBVIhRH6ZwCfQ5IHJPcnsOpxXnUcHWxknPm5aab5m03KS93vpqrN7dde3VVr0cMrcqc0lezur6XvZ6f1dO2mNoWi+TAr3FuElYE5JDYBxkEkewOGAHXHBxXUx20cabQgGfmwOMnAPTHXgc9eOMCrYjVQcAHkg49O/boOOeR0z7qwIGR0PIHv/IcZwOfTPevahQp0aUYRgr00rtqzWq6bvo3pt66eNOvOtK7lu9EnddNH/wAN67lNgdrZABH3BjoO+fQnrx296r7WZSCRx8wUDv3ySevpjgVblcBeQA2O4457Z5xn0P48VRD4ZskYGcdOmAcZ/H+Xes5QjJX54q+id7dVe3XTVK/3a3HDmWzvu+/Zafr+C3ZkajapIhZgpboUAAHsfTPUkZGcdeK5tB5cwRX2qSVYk8g8YGOwbPHTPHHOK66SaOTcjHkAgYAPQdz6/p6k1z8yQG9UqVxn5h6nsSPc8D/Hg/N47AU5V6dSlKOripODXle6V++t/VbntYPESjCUKkZcqjeOl9Xa2m3ezd3YJEdo2VduNuMvx05yCeT7c5J46muBu4ZXnMUmCGdsEjJAHcd/cDGCOnWvSb3ZHCSWIAUfMM88EYGce4Bzx1Hvws0TXE4kBCspbHIBOOm4Y4bnuPm7DIGPPznDqEsMottpxtFJXlH3W0+6e+umr0PTwElJTkrLR2beqfu2aafl0W/nY4zX9PtzGGKDYqhXZlyM/wB7HsRnpnPpXl2rWFuqt9njCcc57kZJwCeBnt1BzweK9g8SRymIKoZXChh8uFJUHg88556nv24I8o1mVvLZJDgtjcQQp2rnOOCce2a/PeKKtGjCs3RUGo3XMrW0jaN2r37vVNW12Pt8hdefs+WbaUleN07pOPvNX26+r62OQW4kNvKGkysYIiiYA4I468H68enfOcV9Pa5s3nY7ZBOPN8v5WIycFgTyADggY7H6RzajHcPJBCrAoSoBwrH5cBmPHOehwDjA56UabLcWsUomJnGCGUffdhnGO5PAGfxHWvyKOYQr1JJ604qUZTS0i1otF1XW++u9tPvlh504xlGyk3GSWz1UW7Wvp6313Wh5T44sGufMjVNyMf3ZHXgEbgcfKP7w7YHJHX4C+M/gTS/EWma3Y3CoI5LaQXQI+d0KsHjV+SytgZTAyw/P9CPEd+kl1KSZF3BgsG0BoWcYDlu/IGemexxXy/4y0UXkV2HgCyFmyScO6gnn1JIyRxg446HHkYrGJSTptvltabezTS87XVnr06n6Dw9RlaHtE483JLSybT5U23d81utum1uv8Zf7d3wBm8Pa9e6nommMbY3IMkoQhY1ZpOGAGGBAyTnjGOQBX40eJ9Mmt55op4nHlFvlVdqRyD7uB1ZTznPUgH6f2/ftXfA23+IOjanFFpyYs7YvKPLVSURX3Mj4OJF9+eQB0r+Tf9qX4V/8IH4qvbe3hmS1WUC3nuV2ysGL7i428kkcH0HcnNfqXA+fxqOlhKsrVUoxjq7te6lu99V0tbbqieNOHac8I8bSpRcYqPtYqLd2/tONuvW8k7p9j86NUt8SloyCwIWR3wC/PIwRnIx145+tczPHukfBzhehHBIznB9Gz1xjPtivR9WtRK85mG9z5oQRKdoEeAJAeCynJyu0Y29TxXFXVsqMsQzziMydyRk/d79Tt5GeT7V/QuV4tTpKnJ2dlb52V1b520Wr7I/lLiPJ6lGtOsor2bbtponpdPXRLff8Tm2j6bcYOeRzwM9fc9fwz2FPt7h7VtyFgQw+6cDHp19O36irrwAFlXG3naR8oOOq45IGR/PHQ1TKZ3AqAB3z3Hc88DPXrzjJ5r0puNRcktY2Sa0aeqt8/wAPPt8c6Tg02uXW6aeqatZ6ebVl1evmeq+FvGi2jwRO8mFctkvszkjjkNjGCAOhBPI619O+FvGhufs8n2lY0PJ8zHmIWA34GRjPHzccZwoINfBIzEwcAjkNkdh/DjJ7np+PTiuz0PxZcWU0YMzuRgAM+CwxzuBBwRxz3/8AQvhuIeE6WY0p1MPC07XcWlLVpbarfV3a+R9twzxhWyqtGFebdJSjyyTafxR3fdW1Vnv3R+n+heIreebLSmdmUhcnAZio4tx26fM2RjAycHn0PS9bjFukLFJm3NLtLZYHI2pj++uD82cE9uw+BvDHxCVltjJKUKHy2dnyVRwMqBxtJKj5uc44Ar3vw/4ytr2ONU3qz7hG5O1l2Y3EEnlDnuMkHtmvwTPOFcThPafupqyfMrfy9Fto1Z2fVO/U/orIOKsLj6dOcasG5KNnzXf2bp66PW13fb5H1ZHfiRpJd8jqSqliPnRlzk4/5aDkBRxwPxOpa3UqB2ecqHIbeq/MWPCrnPygA/NHz3O7HFeO6T4ls5CrLdsY4ogWKny45COq4+f5ieo746gV1dhr8YaNTKJImV/L+Us0TjADMc9Fyd/XGeAeMfnGJwtWk5J05Kzsrq172v21u7ea21sff0MTCajKM07tK6krq9u7ve/T0W2h6J5sMckexhjcN6hsh5AcgtIM5GCSX2gE8YGM1rPdBZHkjCJlY1aYHevlMMO3H/LXhcgDJ45rzuC6+zRySbowJExJ5YMwYZ4kj+YfO2cPgccV0MOoJCm9Dn7PHGoLkrtZ853R4b51IGCW5yemOfLk60HrzNd2rpfDe9ndLRafhrr1RrKV0ueX+J3193s+uvmvM3JJYS0iRmQCONSCU/d3AYEEE8/uTj5SR8vQDmoo1fEULkyW+2SVEMY3I8OPLiBzlcbjtycH8KeLwBcp5W6ZUV8qCA5yCXbIBByOgI9snkjTzPKWRzJJKku+VW2KqAqFWUj7qDd6e/GKpVJRSba10tbbTtutNPW/qTJ2s3Z7Xd/RWej166ryvZENpHIICA4DNG0zIi7AZFztyDu8uUZJJy2cgVKIpEtljkVjJEwmExO6aORj8m6PIJkIHDFuMZ9qaC8PFxKJCkPLbMDc5Plwsd2HLBTtGQRg45NNJy0sUxkKhfMRwmUeL0cAjIXs+eATwT0TrLRuzb6dd1f56X1+Quamopp3bd7XcrN21WlvXRLffYGjmS5kLIiZCzjCjztzDCAcne/UsRgAkZB7JcmSHfBsgiMEqRsrkSC4xuO8vkc5bhOxwM807ESLEl0owSyM/mb44ejRbDgZU88ZGCOTUEkMbl2EBHnkK/zM8xZclZ04AIOORxkHsalYi7tZ+t9LXXzfVaP1vqclScdbpyd7K7Wn43s9Oq1v2KhtJlgYBzICZJJVkTZHtGCGLZPlyRgnYAOckVlXEKxsp8xpCoLW8fljfEMfebn5Yzk5bqAAcHJrWad5nluChEbErNCMMkwyA6HGNoXAJGOOeTULrG7lH8x/KXd5oQKCjY2pHJkllUjB4B54GDXRGtazd9Nd9LK2z6ejunpfa645pdbNy1spLfRXdr99vS3Q5cW5hYyoGigEbmSWPhXfgRvIDy0b5ZQON3PTHOZNbtEVijAVQzxvvHyIHILbeMqT8uH5AJ6c12O5Ss1vcExmKLzmTYGBZfuO4BAOBnA7ewrNljjd3laXam1EbfgN8u7iMc8PnrgYI4zmumlipXvq3prd6PTazv3/AMtXfnnCNrNJptJ3etnb3k91qrXv6dEc55BieGRncy3DN/o7yjA3Y3MqheEAAOCfm9iDUZtpJmZw7BhHLMJ44RHGinAZA+47QeNoCHvnpWtHHEY3cKpGXImIxPtcjerMc5MW0CIZGQzc1LGYzPtV1QQxBxj5VdyMASR4+cvzhdwA2Z68V1fWZNaJvZt7aeXXTrr563OadKMm7qK5mle9mrJdem3rrbW+uD9lljkklLtGgSOUk/LI7NuHLjJYMPvjaDIABxjlVjhVWg+yhQSBlELHzhuIC8gAfxbeh9cVuyx53uJFE/CkRoGXgHcpG794VHQHG31JJFUTZXLKhSMMZA0nmofn28bQQeFYcjknPfGK66WJ5lZuyT89dV3SXp0/C3BVo2vaN4pKz0d9Ul22dl835GQiN9kiSRZLhXdo5HCbZC0bD5PvZDqW4ODnngAVJEi7UVokdkJKR7d0qKxAGeeCmBkg8luevNkQSSy4jSZcyhURzgsVBMmVA+XOeME+g9nwxFZI5bhfJ80kRvGcEnPBJAJiAPUHduJOMGvShNL7S1WiS2ur6u+jtZu+vy34p072aul1tpd6aXvba+ml/wAoYbOSMD52aRsMVZONzfxOuf3Z4G7k44ODmp0EamYllWZEjXPUMrbskN0DrjHTJB5q+EkinkkF0vz9+GUq/BVyQBvOASO3UdqAYwywhJF+Qgug3RruwSSDgEjryec+tbKvoveTSSau7XulpZdb/KytsY+wsm1vZtt3fya2W6t1VtzPhlkCo22IeXISsnmZdlJGQ64wFyOhzj1wan/eXDJK5RWdpHdghVXUgZQAMQQwAKAnjB6ZIqzcR28Ec4lYIuxViVT8svJ2lwOVyc8A9eKFuIok8t5leFkKN0JD8eWRyMEc5x19ecVUsU3blUnvru09LW/r0M/YVLR52nrf3bp7K666q/T7upPB5sghhEglgVMxAYDeamcYP8OOcHnPOe1Pgt5CIoQkfkys6xlG2v5oHBkGDsZc8nnfnA29TVGpQxLJG8qS2+z5nRNjkZ+QZyTv/vY/rmpbfUFEgYeXKHYqGRCJHjA+XYueNuSN2QSOfpj7WpJtpX6vTRWsrb9vTvp1zdGnBKKSV3rJW5lqknaz09Orvurl5Ld7aMyNBG5SQswWPBCZGZhlid2ccDGT1OCati2FwJGkMqvNLuWNF2rFnGzK84VxnkHscdMgXVImQxNEYrhj5ZhiUOAxxhpGBAEZGTtIOwjknNa9rdqkmxmguBKqwws4BYtCOFVuAdhY56AkjPSpc6ibctfPy0tby26a6+ZtHDRdoQlF3tzNq3a+rvvfa1rd9DnpdL8k+W8I3RvGIlY5LbtxQlsHCKQc8cEjPUVXu7SGVJJliUSSELNG8WEWRD8yJ/z1J4IYbc4I6V1bPd3Nw0jG3iGDFDAItxlH/LTBz1TgbscbqWaOaOGGWeGJpRJ8hO0AR5+ct13AgDIwMYHJJ56IYm9k5K66KW90rb+iv8r6bJ5fHldkuZac1k1ry35mnpr6aJtrdnAS28915wCRomJNiQW+AyqFC+Z8w2SHJ2nnHPHc82fDM0kqjypQ7hp5ZUU+U4XARC/8LcnchGCSOa9ut3sbrfEkSETSPJ9ohOyORkGSqkr8isSB1POfesm61KO3Msa2yKJ5CQgTfEgQfLl+N7EnJ4XoPcjupYqW0Iq+iV3pZ2d27331Ss2zjnlMZNOctrOXLpF7aJ7Xs2uita3Q8UufB97OwUrGojaJFebgKX3gl2OC5wBl8ADisG8+H95+9AijSQOV3RsGwU6MTgfMc8nGAOx5x6nrk+p3u4D92o2Eg25UkqcruIbqM9Mcjj+HFcxcW1/HGALmYzTu0lxcbidjHGyFU6qODuAzt4GTnn0qVetyrmqqOtuWPX4XdvZJ2fR79b6cE8soxd40XJK9pSdnd8rau093ZrprfdnieqeEbuwnO+VT/AyxndtZv4WJH3RjPA44PBrjhpVylzGYYJnkwYyrL8g7MWHQJyPm/IV7td6dNJcos32m4nY7jE6FY9jj5Wwc7GOPm6kHB7Zp9jpUrqiQ6fObsyY2IpclB3HygsigElsr6AE16lHFOMfeqRkmtrrRWWjvo9fW2z1PNfD1atL93CVOz5k5drx1T323v3vbqeYafpuoACNYJQ+3qYyyMD93YxxkjnsCM88Yx0en+FtXlV/JRI41P73eeuT8/wAxOfNXI2DHOTjjFeotouvSy4t7aMTMo8pRGAgYjpI2cNvwct0BUcVsaF4G8Uao0DlhabppWlk80BmcFQIvLA4Bz8oz83c4BzMsbFNylKMFdayle0fdal03269ttt1w5JSiqletUvbSmu/Kt1o7eaWj1vpbzu3+Hep3U0Ul5Nb7C0aNamUbzszkSsACMBssNvI7jHHb2Pwx0G3ASS/EcKIJri82sVEoOWWOME5Q5A3hvQ4B6e9aJ8JdOsY7zUfEmu21lJbMnnI1wCwWXkSxPkBCADmMK24E/MpFejaXrHwV0cWuk6ev/CS6wlxHEkMHNvEFOSQvzBxnG9c/NwCRwKyeY0Ztqm5VXHlUlyJR1te3NfVP8baa6fRZdwRgq0eetCUZtpqMlGdSpzOOyaSW+t3o7dDwjw58N7rVZI4fDHhS+1gyyj97OjFBASAJyhQAQDOSxPBI45Ffdvwd/YB8a/EKe1utesLKC0VoCtjGm3ZC7cMxCgMrjqcjOzvjn6//AGX/AAPdeM9Uspk8K29lp99GN0CWyq7wx7drxnHzpMDyBtwQMk5Gf6Cfgd8IdKg0e2szpUVmHtfKVWUfupQBsRhtBLR5YoM8ZIzXqYHJ8TmyjKnFqCd2opqdk47dt9bR1bWiPWxGUZHw5SjXnhaNWtGScVX5Zun8L5nGKcY2e1rpHwb8Av2EvhR8JdF86fw1oEl/eoP9JeyS7uYLpU5PmsQYHBPDBWPJJ45r0/wO0fwr+INzpFxaFPDWsyMtrcgfuLOdyfLVeAERzkbQe3HBxX6RXnwvbRo1la1S4t2X5o2IVZNwO4oMtsI/h6/rmvkz4s+E7a9guoLeNYL+KM3FgQ3zpLbndGrPy2+LLHGOQ3BAAy8yyatltOk6dOVJ0antXGTcpT5Um7u2qaWid9NEz1+G83wmZ1a2Hn7OVCtHlapRjBxl7qi7rX3Wk3ok0ux3+p6pdRwxSp5TiSEIz9VRJB918/fyAMHgg44NeNQ3SxavgKYsN56kZAI3ZZg2RkcE4wM9BjArL8LeMLjUbBtGvLgyX9mBbz7/AL5kThZEOcmNhnI4246kVka1eXUWqWgZAjoShDEFSrdSem/HYEgEZ6daxWZU68KVazjKLXNFJ/F7q11013d+m1z6jD5bUw1Wvh6ig1JNQTveUXyuMk+zja/XXXpf6p8NeK1ubYx28oY7QmG+8DgruQHoM53Yz2yOBXU6BeahBqLlpGQTna3GA6nOD+Gen1yBXzb4L1drK4VZiscAZyrKNhLjBC7ucBict2xjjvXvNhq4luLSdmUglN5XqycZC9wTkds8cDvX1mExEsRSozlWlzwmlo9OV8vRvp122R8tmeEjhKleMKScZwuno7Ssna7XRp31Vm+m79OvYLgRTRXUZMDwukhUBV2up+cnktk4yeAR9Of5fv8Agqn+z0dL+I2n/FO10yRIrp/sWqXEBEUEoBANwjKuGbBwScEZPcmv6j2u4LiyBSQpDJGqymUA7UA+6nfcCTgZ75NfAv7dHwVtvip8HfE1gsRlu7SwbU7AIm6RmtQ7yPtByCQwyu7sp5xSzzDU6uHUm5T9k+ZSSTlG/LrF30V2rW9OrM+DM2eFzCNKqlCjiP3Er3ai2462fnbvbfsfy7+Fre1iaO4t55WtlaJ4Xlbe0a4w435+dOgwFw3TjBB9/j8NWPii1R7VY/tMKhmMXyyYQcAdc7s8gEEHrnivDdH0U2gPhqOJ4X02eeOK4V9rrBE5Hlzy46g5AUrkHqOmfU/BGs6nZ3stikAS706ZmLK37q6jOD5i5ABJAwMdT1HSvj6KTknJXS0aer0s/Lp/wL6M/WsRVfLJ0pe/RSUd9VKMXqlo+vTzd+u3YaLqOi3uGjlR1YK78gyFuoPTpgc98jivQTcTRoJDu3JGApbIABGGU5BBGcDP6dK6Ox1/TfE8Ettq1gLSWN1MV1Em35lJ2hzx8rE9CQRg4J6VBqWnLGssDo7oyYE0WHVV/hJPTjPT19ua9SjTcbSpyur8zgnfTRWtu9dLu1tr9T5uvjXKThWhyTjpLZqV7W5Xqr31S7aanC393DLFMksBCAfO+Mqshzt+bsCTg8YOOvr4VrlzNaF4XA8mV3LSFuRj+A+oP8IwM4Hpmvbr+5TT7a9t3ZZAYGMhYZD8HbnvvGcDqep6kY+cPE8/2yKSZJ0xE5J3Hoy5ycZGMd+n6GvXy/DOcneMWp2vdecbWtd2s9/kl3+OzvMI4eE3JtRkpNWklbSKvrr71nayVvvtwPiia2BklaVEhSMsrYADMuScHPKk8kdMdz1H5v8A7RPxqi0q1vNL064jW+uHa2ZEO4LGCQI9oIwDk7uR1yOea96/aD+LFr4R0K6jSYLO6iKJC5DNwwYovJAbIw3OeRivxx8V+I7zxNqt1qV5K0jSTPLGxJJAycKpJ6AHA6/UYr9CyTKISnCrUjzRp8tre8uaLT0uuiv5Lbfb+avELjlYShPLsHKPtK3Mmr3nFSUddNeml7JNOW5l395Ld3E11K7mSaQyjPzENIQWJPA4PQcZ59aohDuJP8XzMMcDd19RjHU5B9eKjUMQc52k/KO3TgjJ4yOTj6981YAYeuNuAxBOPTtjGP0B4Nfa2UUlFe6lpFaq2ltvLt87H86Va86s5VJSblUlzSlKTv0ur7PV33XTccVJGB8uADkHC4Gee/U8c9QD9KQABzzk7QOh9eQvGeRnngYx0oOQBgjkkEZ6DkjaPfrnHTv0yAnI/vAlTg8Adsjrj0PPPc5FM5XPW+mjdr2tbTXve++u/qDD7x24UZwT94H3Geevfnn0xTUJ+YkHHI4BHYdBggD8fwzxT8/wtknuABgg9AeeTjv1/QU1tw5RmHqCRgdRjGCQOmc9c4pNJ7pO2qv3Mm4vt08tLrfb1s77Xe4/5ioGw46k4B2nqNx5wO3Jx+XLCoOOOfQ+pzz6EemDjpTwdoIYjlRux0OOmDxjjk/pweGgLgsTyCNh7kAccjscdePT1NO131+X4/h2s/MpyU1dJxs+j32dnptfy27aiKWB3EDK/LnPTHTA9Ov1x+UwZsEbgwcAkEDnPcYxj68nrx2qIbTyAxIyCRnBPfrn88Zo4G4gEAA4OCWHTb7YXOBgAnoDkGs+XRrzW2l0rW307trq+pXu2aUVfS70W1ttO9/Pr6SHJOQV6YYAkqAM8jp17k9vTmnBjtIB4znpwCOpyOmeORx0/GAKcdDgnGT3z0Jxnj/9Q7VYRgFKHGRk5x64xxz9T6Ee9EE02tWktG7918tNlq9CLLRWd1Z82mr/AD73E2sQWY8ZwQBxjjjHfpwM+2MdRMcYB2AnBx+h79e3T04NKrAnkg444AxtHIB9fYY+nSn4BIC5xnnrt9Tnvn/PWtBtKWlr6/ivTsTKdw25AGATjjPqfw68jqBx1phCkjnO3uvBPHIzxzk9/wABijBycgAdsdQOnHf3A9+tCrg98ZwO4Y++MY645weKASsrbK69el+u776babIchwxZuOTgE5PTuOc988AflzOMcnJAIJXjAx+vXnr0/OogAMgoSwAweoyevHpx79euRT8g45wcZC8AE5IwCRx3HXsaindXurK616tdVv0ffqhtpr4FeyTb67N2369f6ShMkEE5JycjnBznHsMYzwR+dTAYUADITG455Pc47DHTP9TxEM8fKe/zZx1GMYBIPPbHr83TMwJ2bV5JAyOuST65z+vXNaqVuid+re21vktXp222vK5Iq7Tb0tb4dWt3fVdLP9bjCclcKQMj88+ucZPc+o5OAKeu7duBxnGR1PT9fpjPTgkgUpjyAOrDkqOmBzx7jPXPPHpmnbFznLEZzk54Pvwc+x6njOBWqfndbaO+9vL8fRvyaSbu3zXadm7LfSy0td976k6OMkN1C4+YcfUfTrng47YAyUxVG7BVsAkkkdRgevGCenT25OaKq/p9y/VCk1zNqF07Pv0SfTur9LXt5vzySEJucrkBQRtbKAkHJZgOOowOmO2ahDFMYZMlW3BhxGTjjqdxI49eh46VZ3Zd4lJZCgIB4Uj/AGjzjge+fpVOVImBKAsoGShIIUhiSWyMspPA6cD88qXNyavTS2/ZK1/W9vn6GDeyd3ezd7NWaVnpo23d/he+7hM0zNGxAIT5Wj4+XtkEHPfJPT1zUSojKHJwuQj7iclhnOPYAg453dOAMlgU7S+3CsQwfn5VwRjHUZPqefzyqtktGQQhAGcBSW+pHAPPp/LJNNaKzu7tJ6tNKzt1t1tvv0KSSbtorK9raPS7tfe9tNGt0LI6FemEHBI4wi9sf3vTB9eprVsJYFuImDr5JQjdt3O5JGMkFSWHHzYGODjNZUigKxQHYQMgHcMgHcehIyccDr6dDU1k4SaFgodvMXbHnjYDhgBjAOOpOfTms6kb0pdHd6Xtsl0fe+/5WRrRspJ63Uo9ra2vv10Xy19Pq/4ezRyxRxJLJuliQOy5YySgjaZOm5UAI2gjbnk9q+9vhbLbu1i8s0TlY2EUaAqY9gUBic/v5VJ4ACY569/zp8BSytEUi+RZXBjSLCS28akElW6xgjIYYIYAZx3+9vhnfRBkDQB1hCPA0TbGMDjEq+d8wDIQCTtJGT0DV+H8U4T2uIndfbb0tfePfS13Z+W+1j9Ay2o40qTvb3U49VolbbS+t976abn0/rO1IUaO6t/OJEVzct8wlkjHz+Yc4805URHC7DnAOcV8s/EcJJDcNcORHskLFQCUTOI2J4LuCD8vGO9fQc95HPp80ai1ktuYXgSUNK0ilT5qP1zFnJyPmyORivnz4oyRgNbxNA6JGsi7kBkkUK32iZ0yN8s+UCSZH3GwuOvl5Rh406kVZJxktbK+jj17vy207kY+q+S72d1Zu972Sv12s3e2ie1kfn58R7VZb+VlkLqdkclyV+edwDktz82e2BgdO+K8Pnij3yxKSwUkl3XDEg8E4Jwy+nPJ688fQXjwRSTyyCNwgjGxZAML94EMn8JYH5Dk7ucYxXh19bwic4QqG2srMcO2fUduRnBbt361+05JWaoU6d2opLTdPSPotU3btb1PknK9SUd4t7rpey73sv8Ag3KVmSAzFiy7SuGwCGHt37Hnr7cYnkUk8N8wkUr7AHIxnHGfX0I55pEjAkLKAhJyhHAPY56kDvkqc981eiRjKA2AApJJOQSATleOeuQDxz6mvoHqtXZq3MrJXSabXnfo/M6aMW3FWbUZJXSs3tZvR238m7u/n3On2MVzYLuG6VkDMFHznHXjp+XA/Qe8fBg25luNLmlw/mMY9+ODgbVGemef/rV8++GNRxrFpFJJugkAU5wBxwQOcAAHnAzx34r2bTrhfDvii2mgDLaXDLIJIzhDuxhSeMEnP5fQ14GJcoVdb2Uk7NuzTV2vlsl0sref1OClyuE7q0ZKnLmT+FJJu99WtbaN9Nj7Js9Ekh0+4YRO91ZyCWJxz5sRPKA55UDGPXvXo2h/Zp9PlsnZkEtuMQ45XHICjIIYEZz6Z7VheBNag1i0i0+7RUuJYA0LngzEj5UB/iDd+nqCK6Sw064s9XlhuYnhf5lSMjCtGQNrAehHf+L1zVVZRdNNNaJe6r9La28n1731Vj6eMeZRlTXNe0WlFp6JdtNra72W+h51qUYs9Ule2cy+WpIDn5CV6bR3256ev5V0XhrVxEomcKXWUBlcllMbEZO3uFx6/LxnnisrxPZtb6jdxbSgQMySLwuHHKnOc7h6jkjt0HM6PqD2qM4UOsMwSSLoZUZh8w68jq2fY157qJvXq0rN7WaVrrVX1tsmnY7qMVFpJpWteN27PSy1X332v9/1GbVzp8N5bkeV5qSAjDbGc9OcZX06Y+vNdDd3UjC0jkk8po0RoVYlS6gAtj+9k9QMdfWuZ8MXMd7o8diWDQTwsQmMtGTggE5B+Q/d9D16cVpILm9nj00znzNOkMkTFtr+WTnGe446HI9q5qqTTcpWe+yurSVtbu3T177X9zDu8FLRJNKoo6axSdmnte2u9/vPqjwNfDWPD+PtBjubBwkzKR80OSAwH+zg/mQQOo7rS5fsuoZtr6O7AZS6RsC5/wB5eSRnjPU9x6/PXw8uf7OvzbPcM8N6kkckQYj5iBweTznJGB0z+PomlQNpmuzu8rwyGQyJlyfMQchcdD154yf1Px+LpWq1IxcrNc8Y20nfkvf06Pt2Pbw6vJtc75klCKvy6pPXsl66dD7A8N6rHDIsone3lkw21jjaBjClM9Dn1PTivrTwXq/28WttdOQsSRyiT7vytnaAecfd59RjPNfn34a1u11O7ie5nKSKUQKPlbCH5D1Pblh6Y564+yfBus2k8sNvFkyQwKJCpAyQBtB9f4vTH5185inKDg403ePNzSsraJa3t0VkrP8AAzxVJSpxS5ubdzS2a5dn/LulZfmj7G8Pah9kUShQ0bsV8084QYAJbIA9v8BXqGmapNdXFosLFzlWJJ7AggEgenOfcHqa8I0PUoIrSO0lk8xZbY8KMkuwBRc56KRnJ5x24r1TwbNFawnzZlMyF5p5S/RP+WaZwTwB049OlcMMbvFSlGzvrtf3b21tq9NOvpp4bwsWpPkbqN3UoxvFRbjrLXVrVerPpbSdUlOxS2DGq7Vzk56kN6dPpjoME12K65NMzYYIsUagjHC+pPIyxxxjPfivn/TvENuswRJjJ5u955OcIFxsUEeuSSQOeBjjnpDryxwhFk2yyg7yD87A/cAUdjzkk5wK7qeMXLbnUVa6eqlbR+um3y0FSy+KlpGdotSd2lHXl1dtNF0euvW2vpkGtzXNxNFuaOGJRsH3SwOd3XjHQg5/wpZdYyHiSQhwCIcnOGAIJJHQD8Qe5wBjg7C7NpaPd3TZaZNwyOFXPyDHY4PPWltdRa8mxGpEjbdzqPl2sThOOAfpkfnWFSvKfLeb967s7NtaK6T6trqt1Y9ehh025KDUEo2kko+9HlvbutNd30Ous4HnmLMSzSMDvzkM/wDdTGeFHAHOeg2rXsnhfw6gVJ5137yCxbO09MAjHGBk9e2PUnlfCukLfywB9saQgblHG0RjJJ75z16+npn2awWOPMEZ/dIhCMBhmbsc5Jw30yepPFe5leU0JWr1VdWTgnu5uzacW9tl71n+mWLzCSTpU5KMlFKau02klbZaeSfbW+pqwQKbm3tbVBsDRhyDhRnqB2BUY6Zzz0r6J8MW62MdvHhWkfZycAc9dwxwTzg55x0ryfw5p8KCO8nK7N4Lg8cA5DZz0/ljpk4rto9U33gkgYBVIRETjavqT685UADaR19P0DAYbD5fGGIrSUJScHRp80b8vu3emrXl3ufK46rVxL9jBvlS96Wur0sr3tp3vt31PoaweHb1B2qQTjHOBnB/mO+c8Vr2wxl26bjyenOMYPf19enpx5joutKihX5fdghm3BV5ySOQSeuDjGOvWuwbVYXiBEixoFBII25IBPCnt7fLjFfXYfF4avBSVRXSTabTau1fV2W1+vrs7/K1cFVjNpq6dtnfRWVu979L+mmp1f2lPugqT/sjO33PI468fjiopLwBWAkG5VY4yADjoAecbsZ/QdBnio9aSNiBIGyWxjnqe/P4dO/bvVu9QJDASlBIp5I4zzjHPXOeeh59OVVxVGzcGm1ro4206Oz8k2rLX0NKGDnzRUlo0mk1Z9NddbrVuyV35mhqutMU3LIMIcso5JIzng4OcYxn/AVwF1q9y8o3SusXVVByHOeCw9CeT9ccdob2efLRht7N03ZIAP8AEW6HA6H65rnriKeQ7Q7FgADgdRjt6D6kcjOBg18bj8dXq1W1Oajs7N+7tq0n0TsrJp29T6nBYWjRgrpN/FzWersr2038+6S13L11f3FxIIk+dm4GBgKOOcjP698Ywa7HwzoMty4mufmQOCAfmO0f3enJPPpx+WP4c0WSaRXl+aIA9sNzjdn29O3TpXsOkQw2ihFVXy21h7Z+XJ4wB1465x3Nb5RhJVqirYh1FTT5YKVm5W5elttNX/kRmWNjTpulh1FTtrJ6tbK/lLy0T+R2Gk2i2kQjWMIuBhl5LAgcEZ6joM88kmuhiYYC5KjkHHHPUdvf2+nSsy3G5B8wO0EE54HK4A+mavAFcHucdenAC8cdsfn6HNfcwjTp0oRXKkorRpJX03vbf52Z8RWnOpK7vdq7tZXbaTa13s/k7K7628AYIY8nOSex6kA+3Hv6EUySVEIDdwQDyeevHI79SRyOnSmpJGxHVj93kYyR2DDvjGR2zkDtSk8gsFwAcZAyCOMgdx0xxxj8K4Krc5Wg03qua9t7X2tzWs/ltcdNNO7Ta7XSV3azTbX9L1tnzyuVcgbgrYbjJ9xzjjkHIPfFZFzKyxSSYI+UlAAMknAzjvgjOc5HJ6A46RhlDnbyQ2cAEnjknnJPAx/9aub1NDtfyVVm54zgBTnjnPvz2z2xXj4zD1adOc/aydviilt8Kut3bVq++j2u2elh5KUlBxir6XWrureVr+i7K+7Kdmf3AllILSA4HXAPQnn26Z65GeM1UdI2nDggEY3gnkn6HsOcDPGMeuMk300CvE5IWP8AizkghjjBx0BwBj0JPNSW0qzRGYvksCcbucH3656DtznnvXjU8RSqexUYvnhbmu39lp9dvRaPtoj11h5qLlaWrUVytbO1rJq1jS1WeP7MFTGdoBOAeVz16HPOP5ZrziS7mjuiC425+VSDkZ6MccEnP9feunubmOWOQA5K5x6cHqTnk88HGB1Oa4W9uYIrpiW+cYAy3ynGcHPAJHY4GBjA7DhzWt9Yq4WrCaSvGLtJK3LZO/lZW3Xe6s7ell1JwVaLi3Kya927Urwas76XV9UtiPxDcymNSrZZlyWPRRg5OCO3fno2fr47rLsWxu3BiMM3Ur9egx35J549K9N1e7eeAtGVYKMZHIAwc8emMf0ryXWrkGOWNfkO392D8zeYfunPHy5HT9cDn4DiylGvCqoVJSco3Wjm5NcrtF83ZWXRM+74ci48vupyWj6JX5Fd6LZ20tffbU5OaKBblpiB85OQDySO2AMBTkkD0zznNc/q98LRg0UhjfJJbJwcZx1zxxz14wPQVrgbIC0p3zB9xU9SPUNwMdMnIzx6Zrzbxbq8cbherbwgVTkFf7mB0wMkYHPGeAMfjWOoSwtOTt7HnkpSi/ibdla62XfVWvbff9QyujLFYinT5XJJKL0umlyrbZrqr2733K+pN9rKXTyCXa4EjHAkO0cMpHoCeBjd6ivCfHc08V232dBI821w0hIVUOeT2z3HHqfWvTZLqBrRpWnZVUgFBkZIBxg55XJOR246GvKfE+oW9wzQM8hmIBiKkk4GcFjjhOOeOQe1fP1kp2te91KVreVrPRrro7vVW3P0bKMLOjWvy3jBeza5bLeNtLvXrtey+Z80+PLVZdF1SOWFZvt6iOQgjKFg25YxjKkE/wD1881/OZ/wUE+A7anepqGnWkk5LKocx7fKY7isURAbIU7ssQA2RnbX9KWs21pdLLZyRM00jFzhtzHOMED0Tqoxxk4zkGvlX4xfCbSvEul3tzd2UN2sCC3gW5A85pTuEZj4O4pgk5H45rryfM5ZbjIYiMnZS6fatJJbJuz00urpbPQ+1lh6WJw1TC1qalDEx95O7vokm+W/e+qaVz+Ffxt4I1fw5qV3FeWkqOjtGRKhUIGJzu44cgZPGOB1Oa8dvdOkUSB8BQW2nbk45yyOMYA9APx9f6Ev2wP2WvsV1qNxpVjLc3T24lmZEJ2MgZiGIUdMgbuNvTByMfi34w8E6loMl1Fe2ktuVaWNkMZIV8gDPA3AY5wQPXrz/R/DfFNPHUKclOKqJRTV1zacmlnrrfZrXbfb8R4w4FpYfnlTpuWHqRc1dKUItuOt7xXRbrd67NP5tltWC58sjkLwCxHpkZAGc9eM+lUprTZklSu7DqRwCD2Ppu/hGCeTXo1/pTQAqQwLY3lV3KCDkuoGMbsjaO3PrXMTwyIXVlyoGAw5UAghQB/FnB56rnkV+hYbNI1uVJp6Wavr9nddk30100sfzrnnDf1Wcp8nLq4qMVddGnu7XXRtL11Zx7xsu4Kgbn7rE5Qj19RzwAfXpxmFotsYkICkkHIHI2noOAQB2/Mda3ZrZAGbBBYk/Kflb+8QT1TI79sjGBWZKpw4IzgYC/xEE8N7YA4I/LtXuUKsZW5ZLZN23e1/J+dtL/M/PsVSq03KM4cttk7Xto221ffTtt1ZLZ6pcWp/dyOFJLGI5Cu2Rhu/zjGVBAwM9elem+H/AB3dK0KyOzMjBPMDFWZD98Ov+38o3cbcHg5ryNlYEAgD5QTjtnOMk88Dg+mOhpFfyslSQPmyFOAV9yP/ANZx1HWsMfkmCzCDVWnFykrPRXd7K7su669Ldzoy3P8AHZXOPsKtRQjP3ocz1V09G2u19b+ulj7b8L/EFXHltP5aOrCMJ9/OB+5ckj5ZejE5wACOvHsuieMPPtyyyKF4LGI5EQTiWOMZ4OCMsM564z1/ODTNels2T52JVSpIOA6ZG4uefu4GPXOPavW/Dvj7bgR3IiKZ2g4wynlndiQN+cADHqM8V+ScQ8AQi6lSjSi1vZL/AA2bS67Wv8+p+z8O+JDqqnSr1uScWrXd5P4b3TeiVl17NO9j9CNK8Q277ZY5GlwTIiQOf3sZC7YypzsYfNgDPfrXbafqglSKOKXc4LMhaQBo2GDmRiDvZQeZCQMdvT4x0H4hGeF0DxJIixvIEAyzoCEG7PBfLAgDjvkEY9V0Pxd5whUSRrIxLK0L5wEwVgk6YwCdx5OR3r8fzXhWvRlK9GSSfKnZt20WrS2t2+6x+s5dxXh8RGC9upOXLZRsnf3dN+mu79W76fTcF/DI6r9pXEjJ+5V8oZIy2VhbA2CTcS2Rj5R7itqO8xPLNuxhhGVf5pJdw55GAqsQMjBHftXiWmeJLbzQxuQAwVpspgMUHAhcMehJydo7N6118GvJ+7eKSMyFCszggkp/C8YGQ0ig9e+eMDFfGYrLKlKfLySUUl8SaV7xV02kne/XXrskfS4fMKVaHNCe11ySmt3Z3XL3fZ36O2qPSo7oNPBBGFV1lCyIGBil3AnYH/hMfGBg43Eg9amEyyWjIZI0Qg5Eh8x2fOFXOAcpzg84z05rho71RGsxkjXzCWfyzkvgjYwXqsgyckHLZ7EVcm1MR4WKJGknZXVCSsfluB93g7ZQVyx5xwO+a8yeGk2kopPTp6X9G9bfLY1+taNyaVvPd6Watfps+nnqdHN5biSCItLGiB2V4/miPXcxz85HYgAn8jSwyK7J5U7xmKAyCMoYwzgfdwc7FOT/AHiO/XjGGpSsFAfbyzzu0gYnaAGES4BZUyM8jII44FV31gl5XDxyhhhWkQ7wG+8sfPBOFweq46EnNYuhUT5eWzT8/K/Rp31+fnqZyxSVleK8+70emyb366327a4e1iKNnBjV12ytsSWYfwMcMWZsgD5RkA9AKo3U+1mMbkKyFmKHekc3U+aQB8xP3ABjAJPArFm1B4SCsfniUlVYSAiHZndxjIdcgbien8NZkupSo0kjAvbSRF1RGyGlzwQMAknHzMSM8dccdMMFUaU72d1dN6va9l0vfRW6O99Dz6uNhFvVK+nvab2St563vfZdb66l3dyQsoVkkdlUMXPMUbZMgVsne4AU5IGOgzmsm4uQ0YeQlxMxDLKcSPu+4YsE4RMHJ4LZ/AY15qewS4DtlgGIX5/KbqkbFvvL3OOMj6DGlvRlIFJRHO1JGbawzgqSTuOxcgnA6Ed+nqUMDJ7pvto77LyTveyXa1tFv5VXMXduM77eSumnq99Vo7/edMupedJEA6RxqZUWONwUmkXaUWbj5VYkgcnoTUg1IqJmJd2iwEaFdwiboShyDuIwC+PTArh2vIogQzRiRipMqPuVlQndJtx8gbOByTnPFVn1aOKa4kZmBMY2xJlgVPTac4KsQCxIJHHA6V6EMC9GoO+mjT6uKultqtunnqYf2po1OSWtnZtO1o2eidn118l0PQ4r/wAuUyFQQVkbg7Q7sB96TB5B5b5fm454q6b4rFcoLyLzJVUyY5Vyc7URc8kDIJGAe4ry5NYESqCWYuzgyfeaNyBlWcnAVeArY6EkjORUA12OKVWcpFIg3R4OFA7HPO7bzyeuPbhrAVHKPLF38lvovK+z31+elyObYdcqnJW0Wruvs+nfrouzPTfthEY23MTSRMSnJwyErtWQcbnOTg8ZyTj5TmKe/EKri5TzCMSRRrubHoBu5J/v5xjOa8wl8RwqsrG6iJKruZGyRtyWYsMAAg45Bxz7Vz//AAlUcaR79hV5HZDE5OUJ+SGfI4BOSzA88e9d+HyzE1XZQl0duXl1tFaPR6eXXfa5zVs5wkX8UZOzdk1zaONlbXW73fZ31Pa7rWIWkBSRXmEKFo4UyHbByqjcMy+r9jjiqD6yzsY1nESOTEwdg0kLDBBbkYzyA2McEYGOfDbjxhGq3Cx3UMLxq3lsXAwWPzESkkOVHB+UZz0A4rl7rxzY26mSW8AcFjNtfbJMGwfJY54BJznHIz3FethuHMXXlyKnVeyTUHKzdt7Kz1drq+r8lfy8RxNg6d1KcIW0fPKMU7W+S33vezPoWfxAiT7Zkw2+MojvlDv3BliUj5uFBJyMHj0qq/iK1WWKLaZFR8CUkqnJOEjUk7mXBG7pnHWvk7Vvi5p0SOIL5FmjDqwBEsm4YygfK4HK7RjGQ2PbzHWvjNfyuRYzNMyqFC7DHEoAOcNuJLj5TuwfTHp9bl3h9mmK5U6EqcdEpzjKOvu62W2+1+ttdT5vGcf4DD3tVjVkrJKlNNq/L5paXaWyt9x94XHi60g3iW4jSJUAUkgupbOEWM48yTIG58qQccHOKpH4g6ZAJUXUYI2eLPmSsG8vGP3YXcMEfxAc/MM9M1+al98QPFOoqyyX8kSK7sDGxDKODgNxg4PA69frXOvrmsyFT/aV2SGLKxkbgnB+XJ4yRggjkgZr67D+Fdox9ri1GT5bqMW47pXemrur27X7s+SxHiVVnO1LDNxTSTnJRldcqu0m77N9n2R+q1p8Q9OjB2ajGEKSb2LgNJK20iI5YlCcHDcgcY7Z3rHxpYTWxIuo7dizmMFg2yMEeaDLnhuVwNvzcgHivyUi8Q6/Bu26lcfeEjYdiWbkjIJGAfTPJxnFbOn/ABB8TWuS+oSspLh9zlvMDYzvGRtxjggYPYepifCnROljIXvflateyva226a/z1DC+JdSLiq+GaT1k4ybUdY200drdVfX5I/YHTvGMAe1haeOeKEeU06zgKrEDbFjBLIcHe2eMCuittZF2A0kto7F2WSMsJCsJOXWPLD0Use1fkrp3xfu7WdXkmu2lQAoh3NEUwQzFtwwQcFFx83JB459D0v49pbiPfNMGZfLnmLne0Z6AL/AeoJ5J4xtAr5bGeG+PoNulBztrdQWuzutVo7PbXbsfa5d4gZbWjFVZ+yk2rubsmko6aq27W97d90/1Vtbu31K1jtIpoUVWdyVkVNuMFTGvI5wd2GxgenWwiJHOObXYzGOSEANHFsHyyHOd55OSCOSM9M1+d+j/tB6OltEBcywspba8pIMUeRvBYElCD90EEtk+ter6L+0Z4ct54YpJzcQ7f30jy7Iiz4K7oSDtWTB3NuJG33r5qvwnnGEcmsNWaV9HB6pW31srq/e+61Pr8LxJk2JtOOIp68qScoKnLb3feaVm29emvqfWN1pk18jeXJsicMj5twSAnKsjhhyM/eA+YdQMc4x0ea3gtmtrNGvPMAeeQYaNATiZVKkFRnLLn5iR8y15DN+1L4T8praIo7wiMrDBDtjRjuMvmSb/wB4JsDJ2gAj6Cue1T9sLQ7e3kXT7ZS5RE8r7PuYBCQQsmeUY9scepyCJo5NnTaprCTs5LfTdR1d1ppqltvY9JZnlFnKWJoJRSbSlGSTXLdLV2s9N0unTT6OtvCtxcNDd3dukko2krIAoIB+YAYAQMSpLEnk8gdK7iOx8N6HDPd6jPpmnOYo1FzdSoBBO4OxlHG6VQGD5x1FfnH4m/bM8c6hbGHRrW0tIBI6hnVS6QYH7tm4OOhV88YPBPNfMXi34tfELxczpq2u3ktvLN5ogjd40BOSudpzlRn5j6njkV9FguEswxKj9bqRw0NHZTfMvh0cVdbd7bfM8TMeKsvwqf1enUxL5k1yJcm8bq+l10dm+9j9OPiL8Xvh74eldofEdrfPHGYhaacA0nmJnPzqx27yxO7BxjAx0r53b9pfxAZmi8OI+n2zzP5F3MwkuynZWQgbFjB9STu4PAr4ftYrmaNpZZZjGGyzmQtIzEj5iSSSncjPfk5zXoPhjTbq4kiEUTSvLKFTBJMg4EcgwOAvzYPX6549Ovw5gcLRl7ScsTONorn+Ho7uPfRN3bS8rtrnyvPMZmeLo06OHp0ac5RaSi27ScV0taSte+2t1fY+hpfHXjTxTdx/a9d1O7aZ4kYSTPHb7pSdqeUO/GCS3ToOw/Qr9kv4Dal4q8U6RcSadLcxBlnnlmZokX7pIgJDby/GR6A9RXz78AvgbeeK9U0+a6tRehpI52ib5QioQrHG0gfeHJYnrx6f0sfss/A/T/B2maX9psv3sEcEpE2FKRNgxQK+zJDc8YyMY7mvLweEpV8RCCSp0oyj7S0bc/wq10le3bRLXRpI/ZKGEeBw8cVirubi+SMm01eMfivom9lZvTpsffP7J/wsi8KeH9PkubQwXaxKImCCV442ACIGAXK8Hdxxxz3r9evh9pWk21hZSOiwPEFdw5BMrMOZQ3XcMcAjqT26fHvwqt7ObSrcw20kF9Gqw4RRJlRgKxIC5UDp0xk5HevtLw74O1a9treGAm3Eio24MSY/ViuBncPuxhhz3AFfrWQ06NCMfYU/aOySa12atbRe93b6O9+/4lxdmUMRWrKtiFQhGdldyvbROzbabsrp6XemnTe8Va5oSWtxDHlzEoTyUO4ljn5hk5IGOcdN1fEfj+OO+u5Zk2ALJ99z+9Dj7wjH8eARuPGeO4r6o8ZeCNU0mc3DSLcRFcrKcx/Ng5V48scHpjdkEd+TXyz4r02eFXNwq7i7uhBzjnkdcBcYzn29683ielWqQlUqU3Fculo2tZxeumjST/Xez9HgaGDoSjVw+Kc+flXM5xvry6LVW11t+p8o6/pyeHvEsOtWJEVnqK7J2AKwtM3DSbeduDzsHXPBwM1FrTSzp9tByU25l6gFe4P8S88AEfpx3HjSya50i4Bh80q37mOP76yqc5jGML5fVhkhs9RjnxqfxGBpxtZAYcKImUfM6zAkKSowVEnO/IwMc8V+NV5RoVakNYxnLnUV0kkvOy79k07n7vh5TxMKNaznUg1SlJ2leCcUnotHa1ne/wAjutN1CV/suxwGUpu3HIDn7xxxntgnnk47Ae4+HdVSGZRdzDYuzPO1Yyo+4rDcCregHGMD3+PbHX0tbiCDzf3Yfc7xvlkU4ITOB69AOORk17hp2r201taz+eAG2ecj8Es33ZN2cscg5OPw4NeplubqD1s5RcW+aVuqSWnVaX131PLznANxUZRdpqVpKN7NpNK7em17+t7H1qmqtdWG63diYmG+MYYlc/fAzz1HzAj2HWuW8S3H2qye1nkLQ3Fs9lPFLy6wzoUfKkHavTceeACQa4Kz8Rx2Cwo9woVlADLKNux8YBGOGOD8nUDHODWre64t5C1wI2lKx7TIANzr1DqgPKce3HfmvYxeaU61NuM4Qm+VSpp80baOzWui2u+rZ8fhstlh6sZqnKSU+eMnFp3XLdN3tbzWp/NR+0b8ONY+Dnx68YaGlokWmalqk+oaZBFlbYW96fMiIJB8uOTaxzluR+Ncpop0/XXlFvIbDVoFZJctsZSuMggnMo7E8e/Nfov/AMFE/DKamnhrx/bwJJPD5ejarcnEbjOVtXTCney4cn5htyOuSa/K6zW6gura4t2MN8dy3SSMFV5AQd4bHzBhgL9Dzxx4lGvHnktVdqUZR2s7atbvyT2totj9BhiHUoUpNONRwSk1rdxUUrrVadG7dLn0x4S01b2KeyvZgl2kLgspxHcFB8kikDhj1zycjp3qcz3NhcPavO0qJGwaVhujdVHBdv4SAADwce/bgfDHjDdJFYXSiC5icmScnGYmwCUb+JHAJBHGAQAOK6XxdqsVkn/EtnjvIriBo28sgxrlTuLnqj84OScHgc9fewUvaVFHfmXe2qa916aXvuumq8vnMxqSjKpKbtC1+nu8tle+l3Ju6X5WPMvGd+Da3tyD5IRJTGwI8sg4yzkchjwckHPUYwc/Gvj/AMZ2XhXSL68vpliRg7gF9sknLEBVzyrHDH1wfbPtXiHxk1lpmpxXjQLERNGBI+VkIOUIPGWJ+b8B26fjN+1J8Yry/nm0PT7oTSpcGGcwuf3USE84XjgE/Nk8cgYGT9rlODcpRhFXlNx5ru9ldX31SSTW1rn4Xx5xL9Qw1VuaaUGoxbestOVJXTd21a1r2tc8A+NvxSv/AB74jvPNummsYpWhiIOEWOItsjAHUcnnOfbFeCsoOVAYqcbeOCCe/Xg9zg49OafKskju7SFtxMj7uWDNkk5z6nnI7ilztXHXIxgj727IBb1HXnj0HWv0zCUFQpQhFJWVtlrotWmutnq+ltW9T+RsyxtbMMTVxOIk3Oc21zPVRbslFXbS7p9RoUDB2gYIHBG0dh+Hqe/15qYMCpUjDED5QCVLY6g89enp0PWosEHIGSQpI4POcHPrx0z9CPUGQSffjjoD3PXI9O/I6811HmTbsnffo/VPbv66b9d3kgFcseANwI4+XnbwM5H+PpRkYY4XJ5Vs4yB37YJ+nOB05FRkfeJx94Hnnk9COeg9fpzjFKwbDA4C+qk9+fQ4B7gD0I7CqSVm3forrZev/Avv3sc076W18vmvv6aeQL0TPLMc7ucZ54zx055688Ac0pABOcDqd3Vscckdfwx7+tRB8oB2HzAjPUcAcYxjP69QaduJBJLEcA45J6ZBOOSenByRyOeaknll2t+PVb2/Ttewbg5wQNp4UjI5GPvZ64PfsPxp6lfmyRkA7e5boAQOoK9OPrgc1FkAHAI5POM4PGfXk/qT1qRNpJOQTwB0yc9QfXJByOPfHYLWlru17JerSvpa9r3vr8r2YKx4+Zhgt6gc/d7dc4BHXqD3FSq27IYnOCCRznr0GeuD06Y7mkZV2kYxkBgQCSM9vTdnqOcHB9ahLHr/ABbgFJPXrjPTpjoSMd89gssjByqsVGM89Bjlj0PPqDkjjj1Oe5HAOSeDj2PHbAxz3xUKM2SSRxnI/LkD1Ofzx0PWcfMp4GeDnOdoPTI47/55BEt2suvTTp0XlfTXYOqXf/gfP7k/8xRzkA4JGc4wO5PHrz1P6DFSEH+H7oGcA8qPxxye31xxnFMUEnAfDEYIHb3HY+uCeeM9qsqFAwCSwwQ2M54zgH6DHPsMdBWbk9k7Nb336b/NPSyv94Wt0tfX7+o0E4G3jjByOT14OcYJ9T6fk8DcBgEDg46dOp+vPB9eCe5QA57jrg++OBn0JwB29KVQw3ZBxnHtzjGOuffj1xkYy1KTe9ldOWrta67v+r9VoNxkknpe6dlrZaa83dbrt23BiVBwxJ42jHGOO2Mc+35js4YYBiOR+Y7+vvnvxwM4oOPqcDgc8dRyRnnoT7j2agbiA2AOQuMnIGfToPc/TnitLWvbq0+nW1/w+fzJeu1+ndaXTfbp/kSccYJJwCFBOcjjJz9D3z06dj5hhsEHPA9Mnj5QenXv69OhkXb1yo+X72e2AOevqeo6cH3DzyHJ4GMZznnggD8O3+Db79NNen3i12dul/6tZ3209etlIrZ4PH94jnk4yCTwMcc8fiellSg4+UjBwOuc9wTycc8e3fmqKgDnJJ7jOMnI9Rn+QJ69TVgYzyCBzhgOQPr745HfOfY0paWW7er3d9Nremq1J5bWtqk1be+ltNdLW66a6eRafaCGORkehwcjngD64/IUVWEhOBksqkqR/ESPbsM++R70Vqtv6fRbPdr1Id7u9/K+9nqv8/medyyEttACKF28LwSucMTnjPrnIzgc4qqpMbhnJAwSVU4LZztIIPzA9wOTxnpmnMSrgMSuRu2HIVuuMk9Ac4ORn+ke0BWYKSw6EchBnkkc5xjIyPqKmLUY2vvbRLr0Vl6fnfqRCOjXRpK3e9k1fzaTvttq7k3miOIgKh34H3dxDEkbQQQVI7H3J96gYtuQHlSSF3nAbb6Y6jn7x4HSo9jlWZAdpwVOTkN83fqMg88ZPPSpCvAyDvXl+eE6/KCemOOcn3qrXd9Onndaa/hZdunlpZJX3u35a8qtp5dbPrv0EQyBZZdx4O1kGMKD3A9h1I9fU1NaFluFbBUbiVkGF8sH0wOvp1B9Oua0YKktKMpgkbQCX3YEYJGOAQevFXbc7ZF3sSWwNigNsQE4CkEZbnjp3xzisKsrQlrrZ267pNX80r7+W6OmmnfZbW0s39nbrp16vS7voe4+Cb1o3ilWRQQEjut4wAgPzIqZzKWGMgFSMcda+3/h9dxF4ooCgRv3wnlBEK24GW3rn7tweFOcjyz8pr4J8ISW6SRyl2XCqwjZSGDA4Em7o23HzjA4I56Z+z/AepqbUyhrZJCkbtkB5N0Wdz7vlysQbgYGNxxnivy3P6HPUlJLVveyte8W3tq7vv2XU+zy6/sbvdJJapaaNt79PwvY+kprxYbdykkUG8IrLayby8jElrgIVBCPxgZO/ByRt58H+IWsD/SFZxJJsWIwLJujgGDsaGTH+rGf3kfIXK8kjjvY9YDo80rRrHIjSWs1m2W8sKAiO2BslX5vM4Pl7gRu3CvEvGd0siyAxlCkMjW5YhPJUn5ZQ3PmOCDuBAOcdO3i4Om1VgnHXS+lusVv11WvZq+nXHHN8k0rO1rrytFr0vfT1112+afFV4889ykhZGWQn+8DsbCjPGQuSR04JwQCDXmNxbOyPM0YZ8liSACFJyHxn5QOo9SSa7jxE8rzSxI+452My4+UDO7cuc9PQ/XrXGGXIO58KV25HSVuR8uOnoy84HcjgfpeW05QpwaenLFNX1eif3Pz2XqfONNzvquu+vy1/N9bvcxhHskJDSYxnLAgqcenOQOM44Oe2KtHOYjnCtuOcHluMgntx+GfSpGiZt5U+4AGTg9CGzyMdR+Az0ro9F8OT6tZXFxFmRbUq7Lg7nRT87YGMAHGSOo6GvfVWEIXk9dF18kl023vptu7XPUwVOdSUeWEpNtPd7pL52trZpddbbT6ZoEbJbajEXQwSpyQdsmT8wBB4AxyMYIPXgCvdrqwWTS9OugS6hIyCFJ/eR9VHBxtzyckc5x0qp4Z0COXSpIEUSSiM3FqmPnIiH+qPPRgxP4DNe9/DrwZZ+L9AntoY2lliVxbxrnzYxEf3q46hVON/XgLxmvDxOIi5ScuklFemive6as7tfLtY+1weXydO8eVSnBS1v8AEuVu3RdErbPq9S34K8TyXvhyAW4SDVdKkDRupw4MbA7CuMtwDkcDnntj6O03xS/ie10vUSIjewGO1vkVQrq6HawwOSSDnn6gnmvmODw9c+FNYkiiinFsQzS8Er8xAdW4ADDaMHB/rX0B4G0+KNr2aFhHHc2b31pGT8rXCAElTjksTk8euTk4rkrV0otXSjKKcWrbOyaeqaWrdtb9+p69GlBJuTkpJJXjJL3ly3vd3t5q3l0G/E+3uNM1yxVEX7LqUSyl8AKrEDCk9MnnnPJ6Z5FeYWzxwaiYgm1RIfkwCVbjDjP8PXcD0yOnWvaNXmi8Z6VYtM++5tpBaMpOyaKSM4j2dT8/OR2xz1zXlev6ZLol8RNE6XCKAEYZdkxgsfU8jnk/TBrmU3Zx2nF6LlTW6u7vXZro9OqOuKk07pN3jytW7K+q3atdt69uh6N4P1tbXVbSL7QxjkBXy25GTtBVR3C9x0Gepzx6lqRtf7VOpRyPBiIJhDgS7s7j1+bOBx1H5V822FtcFLe/jBFxHcIUY5D+S5xt4P3MD585IyOa90vfNl8OW+oonmSQiNyqkjKjhx7hvXv1wKyqyW9/i5YttrTbRW01fV3u3uephb2fO1Fy96ad+to6LZ+fSyvoj2bwjbxxwR6krrJHAktzI4OTEU2kA8cg5OV4HTnsfRXvrPWrTTteikzPDIsNzGhwpjUlclB3Hr156YrkfhSLDWvDmrWx2QTzabctBbk8ZCqAuePmY5OOMHn5g3Od4HuH07VLzQ5keWFCxEJy23LHeT0wMdPrn6fMYyalVqJNupSa0291tLfTd6b20t1se9QfL7PllJ2STlHpFKKabdrtu90ndXPevC5t11YSRrvUujbGOMhhnI9T0Ixj0PWvorRdan0K8SdRKVupkEYU5UbsYDg8YBJ46AHqa+d/D1mljf3N86EiO3861hX5kbIITaeMlSMn0yPWvV/D+pDVNOSKaUDUWLSwwKAHRARjaR/Co69MZyCAcV4VX3pq2sXBQkpavW1r2e276O7S7noxjzWtNONrSjUV+XVWej7dLPa59h+HvGbCVkldxI2yOFicY3LwoJPO05Oex6jOK920TxGsWkLCbiWW8njczSMDjnGRkHhQMY7emT1+LfCjTahf2FmSyyRuHlXlneKP75J4z1GTxgccmvozw7coqarJcEJBao/lHdnzAg4GScjHJb/gPbgePisPTpJNcqcmpSUbOy0s9m7vXa3VWZcqCi1yRi22lJRUWmrpKWqf3/f3f0p4a1qBrPzpJVRoomQqoBL7SCN2SMZyfX3Fdr4fuBf6hHclsxBc5JypC+nYEc9vUdBz8k6Xr9xPbM9mX8iebyll5GEDfvDg/gB+PNe46F4kSGwW3iZlVUVfkG6SQj/WHAwVUnByM+oHSvIdf2VTSXNBKzTfpp5fJX/EHgXGMvZ25ZS/eSaTt8N7pX/pbau/umtaiZpo7C33NFhUZl+6WPBzjoAenXnPPeu68M2EdnEksrByWDL5hBOV6Z4ySpzgemOcmvHPD0s8lz/aEx2wYVI4yw+c9nBP8WcHI9Pwr1bTrhvOjJGwyD92rNkKowMAD+PkYJ9c4ODXfhMTGpL29WEWk404JJtRT5dUtFJr+nuc9Wi6cY0aU04KN29XdtR0b2W1uj9VofQ2gXi2WnmfyyJZxgMM8qT1A6gHua7HSrqWaXfEWLsAqZPyjPUejZ9f5jmvJbCW4lWKOMvhY1QkkoGJGDjt7jjA9TkV6xoVtJaWouJiSsSlUHdmOP4vX147DNfYYTE1J8lOMpRp04pJ2sr+63fvv1f4NHzuIopc05KLqN6v4nbS3uvordr2v6HokV9JBbBI5HYj/WKvzDOOQBxwp4HPetnRpriViAGVQxJbJDEk56YyFYAn8ODjNc94eia5J8zgsuFOOQT/AA9MDj7xOeua9Ks9Pjt1L7TnC4OB8wHU59PTjgdeua9unh62KcKjrSjCL5Um7NcttUtN7N6eR505wptx5W5uzurLe1lbXS703s7botxanLayRKpJaQjcQchTwADxjA59c9u1dJNrDssce/DEZ+UYVgBz3+8ffvjOK52O3SZzK6odgACjkhVP657n8OhyKrOFu1DnEZAUEjACg9eT15GDuz3zxWjjWw7cVXlyVLKPNNpXbS01enXR67bkKFKaT5HzxV9k+z00V7b/AKnY2l/mVBI5IyME9QBnj2AznP65zWxeXcZCs8oWMKCfQkA4IGR179O2eBXFySJAgeMke4PGO+O5Jx9fX0rE1DU5pFIR2IUABfu4x1J9R36e+M9eiOYywkZwmlVlZWlf0XTdfjp0sJ4ONaUZJuCWjskm3daLtddl877dvLqETj5JA7sQu3Gdq9uuMLycnI5HbGafZtDK+DyQSWH4j5R1yOvH5H081ttTMIxv3O2cZ+YnPYDtgdDx/Wu18M291cMkzB8OeCR97noQeB09efTrWdLMfrNSnFQtOUk32Sbinulu99O99y6mD9jC8ql425k3u7JaX01Vvy7HrehhEhVVXDOpB4x6c564XPr1JJ749C0u2KqHblsgrjjP9M9MEfh7cfp0IWBERfnwC3uO5wehOf59K9A04EQoCeAD1XHpj1/Hrnvg9PuMupNuDqON1FNQS/wuza0S2svPyPjswruE5KNveumnvpa1ui6va3mbcSgjABGeSFyBkDnuMt7HOensbsXcBiT8yjcOAcAbu20e2Ow6VVgGByOuRkep/u4ySB7DoanzwADznOQBkHjJI4Iz0Pb+Ve1UhKa3tdJW0XRK1vLpe133PFU1fsnfmT0fRaO/Sy0vrr1veyABjcRgH73OT2PPPzdRkcYGMVC2S42nCk4J9c469e/HHr34pA5JHzc4BIHIwQOnb6HkfXrUoKgcjnGMnk8A9OehOR6c9q5pYfT3m1az0s9Eo76Xvdu7jf13vtCVlZO2qVvu2V+97Nbb6bCYJXGcY499wJHbgjp24Prg1iX64DMcD7wG7nk8k4AyMd8/jjFbLEBS24AdevI7g49fUnOOMdhWDqFxHlULAhs/eIOAfUZzj65z9M1z1HJxdpRjJJK8o3T1W97ay2V07PU7KKfMpRu2ru0el7dO+1/udzgdUsbhxI0JZuMsM9eckEdCMEYAqnbXBghEcjDhSAOBggYw3sD2x09ua6XUJlt4JJGYMFQ5PHze5OBjGR3ODn3ryOTWhJdzoW2qhOQMknnIBHIH68ZB4zXymPjhMHUlVm+VtNuMZXV3y33el9/wPqstp4rGQ9lyNwg7c1tbe6lrfbtvvoX9Q1Q2rShWBV8k7cELnOR69OOnOemDivI9c8SStfCNAAu4Lkdic/MR+HJHb0PNbt/qqvLcAHd12jHPpx6DjBA9ec15jqrmS6Z2x98HP8I7gAjJySOn/wCqvyjPcwxNTljhMTONP2v8OK1tdXS1u09NUlvfR7fp2R5RQV/rNJOfs01zK0VpGz1urrT/ACPSYdTUWbKcuXUbh2BPUg9OPUdea8o13UmkvDChCgORnGBk9vr1PTr16VsNqb2unspJLkDjGccnaFIxgcHnB44+9XmeqXshkeZmUEEsSQFwO4C92XIIIPrx1I8XN85n9VoUmpp01H2l1qrJcyu7u7S7vrY+gyjJlTxFVpXUm1FcuzfLrpZWT00LmrahLa2e0Or5B+YYJGe5bj1wOD16dBXhuv3Ms10wBBlZl8sEc7Xzuc5+XC9ckcE1v6rrlzMTl2WPJ+VQSwUH0z8pOMjkjrnIxXlHiDWrubUIY7dEyNqb8ANz0ckcl8A5IxkYHSvy3Ps3p4uceSU1Si1GzSd37uqTVnZ3T63aduj/AFPIMrlhXzNRUpRu5tPvHR311ata+tjo7sxwWMoMgdUjJxuyGkweHHbBPc9hn1Pl99cxQW0lwFBYls/LllBOPvZAWNcnDdhz0rpbqZGWS3N1JtlVSyjtIQSwC5Jwxx3wADyTmsxtNSe0uFlKxrIvlqGGNq/wo3+8epx749fJ9qpRbUZXUbNWeycdU+ur6W0XbU+yw8adGMZpOfNNNxStZ3jeW+itrvt01Z5gmlJeXsV2o8yaSQBgo/hJ5PYD68AEVk+LfDsV1FLaW1kbhoTvkRPny3Z8D+NTz1JGeRg16tp+heRcoyozW6r+8bO1ScYIQ84BB6dDjjqcVrvTJba4uUgikWIlminb7kZbOSzdWPHQ8cenFeZOclVUoyaVo35rpJtK6vvsv8229fbo11GSfMm0k027raNlvey1XXf7vhX4i/BJPEOma/NcabFcSXcXkrCsaNcBpQQIlyMqzYO4Y4A5GK/GT9oP9iGymn1eO00qYalDbvNBHFHlSsmSyTkDbtUYw3BAz64r+lXV9BvnS6t7NS0jKkpugflwCd0jL3Iz8vII5GcGvKPFXwzs5gL69tIZoJ5YjcSvhfNWRXEqNlWJBwPlHQDqAQK9fL81xOBrxqUcTKKT0VN6vlcGk0na2lrdfXQ2rPC4ylOliFTrQlG/s3BPl5rO6um01dPSzXkz+H34pfsxa34Qk1B5IJWhtZJCY44WCTb8EkMMhdmNqLgg5bJHOfjbXfDl1bSSpLbyQujOCgjONy9SeowQeeo4wepNf2vfFL9mLw9rqa3cRaaLiC4t7mO38mIJbOk4HloysGY/dJ35BBB4ANfjd8dP2FL61tbq50jTZWkZZbzzY4yIY4gSSuQvTGAJM5PZDjFfrHD/AB7Fz5MXOUXeMeeb5LO0U7627N7vXofmHE/h7hcwoyqZa6XK43nSlfn5rRatK1+a+iUrWt6W/nzl03y45C+EwW+RQQrBvvbOuMcZU9fUVhzwEqwVcEcoqYIQjpuGB64IOTnA4HT7J+JXwI8QeDpbpLrT5zFG2DlCpUOWxkkEgcHZnrzkDHPzPqukvaySQKXQhhvYx4CMM5BU5KuedwBOAB2r9kybiPDYzkdKvCd7OLUk+XRb2d+urd/Jq5/NHFXBOKyypy4jCzhC+s5xbbaskk+ZK21mtPM85lgCuxwQSAVByQ4PT6E9CM8YHPNVXQFiFUgbeQOOe6984Pfj37V0NxG7tICv3ARhV2jAPGBnoecnPGPpjK8tvv8Al7Y9xw5ByyrgYJ6kjGQeuT37feYbGqpBXauut3d7fPXpfvp2PyTH5dUw1bljCUYtaKSvfVWtu7W01b+dzLKmJww4ZcgbcgZIGST7f/rJzSpcXCOfnwrEbgvAcp0LAdB6njp+d6SNi5BjGMDt971x6e5GOB0qP7Mx3LtyMZ9898EDnHbpk89+N51ISjyuKlpZtpN6dLvdbavtpY8qMK0JN024yurOOktWuqSlrpr8nc6HS/E9zbSQp5jxhCASsh2sB/eGO5xg9e+emPTdI8fSwtGGmKx4KHBJK7iNy7QRhmOBvP3QM8548MMDANtXPUjjBHHp3UZ4P3jnp6uZrm3HmK+WYKWVXyV67SBgcn/HI658DG5JgcYmnGEXLS9lfmdrvW++mqT/AFPfy/Oc1wElONWpUgnflcpJr4W7a630t031tY+zvD/xEMcyRzMyK4AQ7sxbUBKiT/Zwx8w8EkjjAr1XSviJBHDGzShhsAGG2yITnakRO7nJ5yDnp16/nVb63e2ixbppWTOZFL5AZvulhjJVMHHIBHIHWuw0/wAcOkqxySsGVlw5yNijB3ED7ydNvK4wfXNfBZrwDQru9KKnftHtZLp57vXqm7o/Qsr8RK1CMYVv3fS8k5P7Leuqu/na67H6Nab413Kn2iSExsW8sRttdGyPJSX0kHzbz3z+A6ZfFx4Yyh32qruDzlf+WMbg4YMOWIC7cDjk1+fFl8Qm84h79libDLtwwIIwXb+4xIxgZK44J5x19j8RJ5mii82RSshCM8uIwcf6tRjhm4y/PTmvh8V4d14SbjSfmuWyVuXVX1ttezevpr9pR8RKM4RjKd78qj76u9ItJq99bdfwPt9fEMf2cyLKYvnUjHMpUZBOc8gdhjD+xFWX8RRPhBdW844RggGd8Yzkvn5Dz83BAxjGDXyJZfEJwsYnkBlIAILb3XZ0DHgbQCcHqQT3HO3B48tYpUd9gZQCQDgup6nryx7nrgZ7mvBrcEV4SbdKTa1UUtHqm272e+mu/ex6NHi6hWSaqaXWjd3G/LquX4X2tbtfqfSEermONJGlMqB2L+UTl0c/xAkg8j5j14A5zVKTWoQJVCSMrOpUYyElGSHHTaOScjI9j28QTx/YujRiaQO/zAA7wzgkqrHjavJ4HQ8/SN/G1uSrCRJGAZAEIVlHZAMnPfceoAHByKmnwziE+V4aVl1cZXXw7bq7att6XvYupn9BxUvrEW9Frv8AZe7d+n56W29efWBGAs0iPGolbCkK2Tt2sRzk/wB7pnjnjBxLnWArvcCXzFVM4ccqp67VBwWb1H3dq5yTXlMvjWKOFiWSVsbVTgkA8BTk53DuSP0xjDu/HcCrKDKmRGFCKoYLu5x1G4jkkH8xiu+hwxiFKPLQerSenRuLfR/Drs9HbzPJxHEOGjCS9tG7bas02ndO97+T18rW6nqUniAQjzE2+Q8jqrA/MJRyQw6+mDk5496wrrxk0KFlJV8IGZRuPmDPIzjIHIKjt3HWvFdQ8ZRr8qziOIoMIDsVj1z354OOmT65rgtT8bb3KNI5csPKG75VbkMpYYyCMHn07cV9ZgeC511DmovSzd09VeOi6eb1ttrff5TG8WeyTUal3rFNNNyXTS8vK3/BVvoObxubcoxcFCrBlD/vJcc7GQZ3K3ds54rl7z4hRqHPmsM7/LlcZUcHKnJGVQHv3r51u/Ftw0DoHLDO5Wwd4bd8oHv1AIzjqc81zM+tahO65bCkn5mzkhsce4HfHJ9BgV9bguAsKmpVYRSSXfmvpurWva997LQ+axHF9eV1SdWV1F/EnZ6dU9NNtfnpc99uviMkLNiZsBTvWIlfM8zjIBzycZPvyMY45G7+KF66SJE8oMhAO7JXcmRnnHGCSAvTkg8ivJWnuJt+92DFcHYAA5HG5jyCSMg+px17otu2QcMG4J7D8MDJ9+RxjtzX02E4VyrDpXoxnJNWbWmlu62snsn0d9z57EcS5lVbUa0oRate8uZPRNXTs2vW2iO3vPHWq3MeImdAmdjYy2OffADHrnPbvXOza1qdyGea6mZnXDHcfmxnGRngDuO/r6VVtzsAyR8xZgTgsBnAHbPUc9s1LHAT8pXbt53Zwcc49jn0+bODn0Hv4fLsBh42pYejHq3ypvWz3autVov1PLlj8ZXf7zE16l9GpSbineKb7vbfXR7FXYzclnO4k7iSMkcn16c/qOnAcsBDKAGPcnOQQemCP88fiL6R8hGHH8Ixnr0JI6AfpnvVgwkYUJk4B7ZA4JAxk85Hoe1dUpQjG3u9rJaW02t8ttnbcKcJTaSi5N7Wu272Xf0+/pqZ6RP8ylTtyWGevqucZ98jg+3TI0Z3jKkdMN0ViQRwRxgd/X2HFayQ7XwUbB4G7oW7c9h7/nQYpnfZsOOQq7TwADu5/lk/3fw5Z10neLi1pqn/AIdPXfdr7z0aeDqzSvCd76p3bWiabukttPwdtb5/2Y7OSuHIztOW29x04A446AZ/Fq24JKjBPIwGGCRg4ZiOQOuMDnI5xWwumy+WsjBlifGWwWIX0IORxzkdPUnJp66LKWVQjg5JjOMLIWwRjnk9SSduPrzWLxNO93UXZ2e219bpaO17avsd9LKcQ3Bww9V8zjrKPKmnazvqlra1+7uZYiVmYMqqyptjXZ6ZGd3cHt/9am/ZNpIZAwYAZzt+XsqEZyo5zgZ64569EmisjN5k4VQAGKkyFR2Y9MEkYIGcd884kTR/3hO52y4AJfkdeB0ycdenUDpiuepi6K2le3W7s9ur7L10d+p69PJMalGMqDV3ZKUlbdWtfWXolbZvqYVvp58lpFlSJBglXJD4JIJHYheOuOtW47aWO4CxTSzYx5rF9sYUdVzg/J6Y78cckdFHoAdCMuV+V8bcn3yAeFyQBzyM10Gn+HFfYqEhQclGyu88YUbslmjPTt8wyMAV4eMzGhBSbqRbUdFyJpaRtdrW9vlfbY+ly7hvF1ZQhHDTilaXO5NQ1aTW+tk9l3va5y1vaXUxZg8yqytsIJRc9Dls/Mp9OM8DpnGulndm2eN9+4qu3K5kCc/vSvACr2Oecngc13qeHZXKJaxtcsV3SAjywjD7wUYIkaYYBcYzsAAOK7HQPCF5coskllcxBiyu21nZtxAQfdBWNSDluwOAOpr5XEZxSTbtTjrHRpJt6b6319Ot9np+h5VwhiazjTVKc248vMoT9lLmS+1tdO19XZtW00PFf7GuIUjzGZ0f532ErGVHIc8HHUErzjgk80+DRrq7VhDb7mV2dcAhZUX76q5HIXj5AAT6jmvsTw18Hdb8QXsFgujTzTTSxOI4wyyyWyZIltX2kMqZ+dSozngcV9B+C/2aNP8AGHjnSvh/q+/RNU1aaO30+7ZPssLX6DMyXjsAI1lDIuCCJDnBBGD51TOXJXhFSfu7PVtpaJLS9t7WaSTS10+kl4b4qSpNzdNWgqdGS9+cpWi9Z6OKbunuk9Fbf819H0QExwNHLHGZASfus7E/NGo5O3IGB354xmvq74P+Bje6rZJJAyh512yIAcSD/VKTt+VCC2WIO305xX6I/EH/AIJb+MvD3hu/8WeELee+1PRiZNQ8PJmXzoIQC15aNtH7qRcsiqrZweRXiXw28H3fhrUxY32m3Wm38bP58F5G0f2V0IDJIHAIZ2+6wwBtJxxkfPZxnHJG0/iaaa5rXfu9bau1r2fk2kfacJ8GPK8SljPYupGSdJpqalyONkrL3Wlv20tuj9IP2VvB9l4bks5dRsY5bW4DC3hChljbKFJA+BvkZuXX5QMLjNfud8HrT+1GihZ42OxYYlcDch4AVCMZ2jAzgBfxOPxQ+DuqXFpaWawYzA43ovz+Xtwdu0njaM/MOoPTgA/rb+zt4tL6lp8t1titT84kU4Y/dwqk9V4wxOT0wBivNyjNY18TToSk4wc46JJWu4/E79t3v+J9hxLhqiwU/YuTcIuUU03zNWdlorpO9u1vNH68fCjQJtDh06K4iwJgSsiqC7KSp56k7uee+O2a/Rv4dWlgumx+bEJZXiMhOMGEnG1ZOuXAOfT8MCvz5+HGvWt79hnW5DtCqKAVBGExkgZAyQwAOeOvORX3R4Z8QJFpyOCsShFDEgLg8duMsCeT79PX+huF6NJwgouMoRinG9pN6RTTaT2v8vLr/HPHtPG1OaLhONWVXo3F3T7Xb1b8k07Fr4j6bZ3NjdzGFXniBDoRwYyOH/2jx7Ee5NfCfjaxS2S4xEjRujgo6ZYSdAyt1DjB7cj0xX2B4o8VyosyTR7xIj+TuX5TuHEjE9eny8DPOOlfMHieOXUkmOxcl2kVkwCChJypx0OcFcc5+ufQ4mwNKrhJxpwi200mkm7u2t+itrtY04EqYzAypxrSap88ZWc7pJ8t0u92nfVWvvax8b3+m3LedDIx2ymRIsAYfqVBPZz25zxn6/HvjhrjQfEt2kkK+X5TyI7AtAJB1O4japbPGQdpGea/QTWljF0I3t0WOMncRnDv0A2jkY7HPc9ecfHXx+8OTxWNzrFlvlbyx5gK/u40buVxwyjIyT19e/8AMOfYCVB1KlOTvTneS2unZtK63010vontof1hwrmdPE1IYeq4pV6ahCd7JzvCzTXW2zs+h89pqFukzXPmKUmbdI0cvmxhyclQ4Awh9NoOc+hr0rTfFyGOKNJGE0YVkjUHEkPBGwH5iye/971zXy9ol9IGjtI0dASXmGcMzISCrqQQq8kgZ+Yeh6eo2uvR6Wts1pJGVZcIZl/eRq2A4QEEqq8Y5I6Y618nTrOzkpcr1uno7q17aXS6t28j9BxmBg7QcXPTRt3VlaKu3o3d+u78z6b/ALUubm2tblZXEZ2kwuMSllHLDnj36nGPQiukg8TXDWsbwzTQkDyJmPKjnDR88DA+6OQ2TXgum6+88BEM7XDRESJLuwXU5yq554PVMYOcE12GgX6X1xcW9y0io4M0AU7VRkAKlzg5zzuUgcqDmrVedRvknJTdk5J2in7nd3t6J9NT47F4aOHjJStJReijq0m03uundPczfjn4Tt/iF8LfF3hu8hS4upbBtQ0cBf3kd5aBnDRuCSpBbuO/XHFfhXNbfbZl0pkWO+0yaaKTbJtuoZ7RisgnGASANuM4DZPpX9CTvp+xi0/mRMskUxBLffUo25SBt3dMYbIB6YxX4b/tHeFk+Fvx715orQ2ukeLY31LSp4iUtC75M6jAwrEsmQOuOpxXq5b7WNWmub2jnZcqbkla2rafVPy7o4KWIbjUaag07Rva7V0m2tXf00XyPNlmfTZrWeb9+ufLuI2O1okJAY5AJbGRtHBGec5qbVdc0+1juJYLibyJoMqsxysc5HKqScHvjGCB6U69jtpdL/tJ33QyoWX5gzMx4VUI6l+RnB6eleD/ABH8UWVrp6xCQW9lbwGeRWcJN9qRWPlZ5JBPbvnrxX6LlVBVKkVyR5kuZ6WfNFJp9mtVo1d3PkM9zGjSw9SU6jhGzU23o27N3u9H26WZ80ftRfE+20HSZYYZvIDIG3qdvnygt5oXB6k7Ru/HBxX4ra94hute1q91G6cu11K/AclEUE7AgHYAHPXtk84HvH7QfxSuvG+vajp8d1utLKdookXlFSMkGFjnGeRuAznPXjNfL0KlST8owxPQcL2weOevUd8HrX61keB5KftZRXNJaN6JJpNO7VrXs1az6aH8V+IHEssfmM8LSnzYejN8zb5uaonotL6Jdd9dDT28E/eG7k4I255PHcn/ABPbFMZ8DaExxwQMcn7pzkgfz568jKFww6leArYGN/qznJ5Jxnt6npSNhgQMA8HqAu0dTxwT7cYOa+mhouXW6dne1+l9rvXy02aaSPzWU1Ule91a66222v66fdpYRWYDLE9yQuO+DkcgdevPoSKcXIGCVPYMR/e6KSOdwPH+QKjyBjG4gEn0xxkjrg9+uPryaQlixwOAO+B3GQfb2IokrPrpZN9LWS6a+vqZStGT00lq9et1997eQ4kkjPToW67jzjjnI9D09AKUNjliT22dyD0yMfT04xg0zoQwGCDg88D1wO/bkngngjNP3AKQMbhhgxPGRg5B45PryB6HFT39P179PlqvmKMG1d2Wi0vbe2mzV9fJ/cGSBjIAB5UDjBPtwfw/nUgHHAHIB3nhS3uP06Y9+cVXBdRu3ZyeTnIx9RgHuOMbR7dZlkbGN2SRkjGCTg457D0+lPb8H1/4H9bMIx5nva3pd3tfzt+vcY6lepPJJxjgfU+/bGfwGaljaNVbO3cOhwOcZ6HOSRnkdB+NMkZjj1IwQeuMHGB+P/18YFICDwR1HBPf/HA6HryTx3V2mv610t5f5+eoOKScmrWaXvWutd7u3XS6Q9SCOXJJJ68r7g9T+GO3TBOEJwpA5yTg453deD6Dr/PBFIPlzgEgY4HYHr7/AID1Hrw4gNwCQMDoPungH39uvGfXIobu72t5diUnrv8AO3ZW9Ovq9hgB+Vumc/jjpnOSfb05/CypG1ifvYJAPA6AA5H05zx6Ac5iKA/dBIByDn9CM9fQdvQgipAAMZ5Pb2BHf6dxzjPbNRN2fmlp5XV72t27v5BZXv8AK/lfsJGpIBOQS/3uemc4zxx6+uBmrKgjjJbB+/xntg8/X079MmoAWLhAcL1I6LnPH4ngcdfarAYKpBPQEHAJP58YwenXtweMZASAsAOeOefU9enfnnJ6H1qQMeVGASvPHJPqeoA54HB9qhypKglsjPBBwOBn8fUe5xjikygYHJB3EbeT0PGeuOf0zWihpdNptJ/l/X3di+dpNNXsrK11009dFpZr8CRcgA4G/PPOc+3QA9cn0p4Y5OQA2OvGMHAx7Z4H/fOPSlIyScjbjICnkZ6e/r3P40zA6/MCTjBB6Hjqe/f2/CtCF8K72X/DdXfzuSqpABGBnHBOcZyD1P4Y9c/L1zMhIU5AyOB7HHXGfxJ+hGeM1w46c8DIGeSf15HpwffuV3Z2nkc98jHXBI6e2PTAI6ZxvZ7tteT300d+jXXR/hdcqSWibVtdtV5/c3+vWVU464YnnPOcn1z9Pf0PYSkMdoyQQAGGOoPqBxjB9fwpoICDdyQfvdeCe3T16dOuOtLvwRh1OR169fp0PTjnpjnjGsXfVprXyd1pqv8Agig20+bdO35eSuPICkcEEY7cHPt2PXaBxnnp0KTczMc8nHDHONuMD0xx7A854PQroXpa+uvov+GH3tbfW6vrv380eaSSbVZiWZiF5C7hg5yCfyyR0/KoF3KNz7MEMOMZc87d3HB67jjvwKfxgAZZXOcL93PYkgA45Pbn+LFNZU+YKeOCDnrIeOnzZIwBwf6iudvTdJO+r87Wtt59f1vhDlirWdne7ae10vK3yXS/QlQDyyrZDHLBs4Xdk7VGODwDg+p6U0EiSNpTkfxK4wX28AH2Hr3/AENYGUkLkhi2B2+Ve2P1J9yME4xcQfKXfgrghz8xZiSRj0z1zyenXNaRvrrst7aa287rb5W3tvTVrbO/RX2sl1vvrYk2soZoxt3MGZlG6NfvcY7EZwpx1ySOtLawYdCqAB5NwYZ3yEHkbjwuM8jac+1W7a3lmDu0h2sMiNsBHPZe/wAy4BXrnp3FalvZyvKFhViU2sNwG3DdV542tgYYfiKwrz5VJPTmSva6X2bfdrbV3srHZhouTSSf2brs7q99dGk9OnS2x2PhvfBco2WUsdqFh8pjOA0ZHOWfjPb5evp9G+E7sQq0LxIqQyrKkYcxyTRg9c4OYmz82OWwOOMV8/aQJI3RyAZEK7g5yI9m7dFggfKM/e9Tk8Yr2vw5dwraE7dszBbiN2+ZxKn3VLfwxLk+X2wWGCTivic0pc8Z3Wl0rrrrH8LLV76+h9ng4tU7KzWia6LRLV6avTz03bue7vqcgigSM7R5mZjCQ6O+P3kMceBsJG3DZwxGcDGG8l8aXsrBSsxZkjVnjLHEJbd5oYAfvJJOM4wBjGDW2t3OvmSO5h81QpZMqqs4+YvyeXCjL4+XHQ5NcB4kvI5En88zKsh2QlyQY0BO0FsHgckccjnHSvDwtDlrxsr+8r6a6tXXyd9b6owxq9ySvZvdPTRctndadt7aebV/EtZIa4nf96xmb+MbSAx6Y5POPvE8g8DArnV+VTGEUKGJGDkgr6emc/M2DnHOOBXQavN51xK2w+bHgZxgkrnHmHI3DHQ4B7Y+aucPmKwdwVZnYkkcktjIGTjoBgd+OvFfoWCjKNOKaXRqN22k4qyT/P16Hzl1dptXb08vK6Vt9vkQsWR/mAc52sgOVHPHPAwR+H0Br6E+B9omq+Ip9MdVKXdtMihhiHIRQinOcAhjk8546Yr58uHbfnaoO1c4wCWHoO57AZx1461738LdUm0PVvDmpJGpE7kIQceYylMpIO5dTxnpjOetVjr+xlyxk5fYjHumm9bb69bve6PpcijKWITbcowkpO3lZNaLe2y6Xtrpf6b8M+FkttRbT2tP3tqs0MHYqk3GyT5f9WQvyntk49a9i+AemXHhnx1eadNa4083jFWdcxFLk/PBgjgyD7p9F6VWivoofE+n60kCPpupvbSt8mU81wFkikGeCCQEU9eemMV9Uz+EIdJvF8U6dCfsU1rBPOsKAqJZBlJSuRyADsUEA18hicVNzUWpJTjazWqlBxet+t09l2vufqGCwsOVT5WnCUpQ5U9rRstrPa+voV/ix8IrK11i7W2gMVrrFsbvSWiGFIlTeuBg/PnOEBPX8/nTQotV8M6jouk6hHKC81zaEyjasaEqPLyRjacjBPXniv1I0/RIfiR8NdJvIkB8Q6I1jNYyTABpLYM4lV25/eINoZOh3AccmvAPjr8HHt7K61Gyjkt9c0qO3mu7LIQqzcyTKMDGMAnB5BHpXdGM50ovlu7rdNy5LJKXpr80lfS46lFS91wcXJKTi1y2StaXLbaWj13fk9PnCDQzHdalaRRpDdRTfaYPLO1UmiBdAuchlky2fUr061yPjiWLW7Wx1QN85jNrqqBAJbKWM4WRhnJVzu3cgjj2FeuAGwh0DUpgJXWW3g1BJBliu4JIG55Zt37tuMgMDjtz3xv8B3XgXXrm+t48eGvEFla6np0nAjIuULMpBP3gRx03cjJwTWcLc9O6d2tpJ2dmk0m11Wq5uy3uZtKPLaKUXaKvo3JNL3d0nv8Aq2eWaWqnRmkjPmPAcLIRuRScgLngc+gHUnJxivbvBtjc6t4J1DT/ACA1zbQuynGZQgJI+b+IjnsOOuM5Pj3heSO80zUNHCLJBuMkV0uEdCeQhIBOVOdrd85AA6+4/AjVpW1+90a9V9jWdzbDzFG0TqBjzWyd3UEHAB5x1rnxXNyzcU0lNSs3/wCA66db9nrsduFUXVipOT5mk1B2Su0ryWu22l/vMD4beKbvwx4uj0mcy/YZAbNlyd0O8EH5c9GIHPHToe/qMpuYfEd/d2QdZJg/zI3yqG5VcY5U9yCMEe+a8v8AE+gz6b45udSjjaNYLheFOEZif9YFxyG6Y4x7849JuLry7q0uonZPtEMJcY4csMMrD3yMDOT1+vzOOqr2iny2clFS0vzaRte762306M+gw0OSEo2ban7rev8AKt9Xp26Wurntmh6xNNo8E8wdUk3WDuf4HwBIUPXGCCMjIGcYr1vwqselm01I/vPKi+yorNlpI2JDEkdeSOcZHA9z5/o2liXw9BAiKltIIrhnB+dLl85z6qxA446itG91C5tbvSNNsiUEeFu95G5gQPmHrzjjt6nOK8VVIXmneKbbT0s1JpNvSz9eifU9KzcU9IpyitUrtuy7atc2l77dND6u8NalD4bQ6rIyy31zC4tecrEk+0Mp92wD+BFejW99fHR5owz+ZKqSS7eF8u4JLhepOwLyM9COa8K8CQN4njktbmTYY1CI55KtDypHoMnA5/MV9GRMlnpq2hjVrgbVaQ/w7B69FzxxjB9q48RKEbOV5TdmnZ2ls1ZNdFdN+foddBKnOPLFua+KTXu2srJLXR66Wvc2dF1P7Pp4cqRFAh8tWGC5BAYqvPQnGeuT+XeeEdRvYQ13dB0W4YJApUgRxE/LlT0LD25xzjiuB0O3GpSiOYIiDCwsrYVmB+ViuMEnvzjgYOK9a0zRmutQggRl8m3VTIithGkAG1un3epxxjnnpXz9akpuT0Sm09m7v3eVLXT3vmr7bX6pShFe8oqM3eW6Tb6rp6bvW/r9EaBIslpY/vGWNFWaQMPmDDpsAP3cnAB44x0PHsHh3feXMbxvF5UW0OrYLKF6vjHHbI6dOK8Mt0ktLaC2ttzNsSNpegLHgLGCT0x+IPTmvW/C1o+nLuZ3aW6KqecjJ79eAcnjtnJz26sMmpQXK2ouKVtEpK2jstrtWTW3nc8XEtOM05Wtfl0tdXS2Wr0d0+iV+jT+kvDVpA97GDkq2CWPzAEDILDPC85I9cc9SPYIo4MRWYIZEZZCo6FzznvhTxgdgOBXknhkhFtRFu8xipkJ5AAAxjv6555GK9FNwYbpI4mJklCl2YHCk54B/D3P8z9ZhsYo0+T2ekpKMnZt3fL7vbovv1Xf5fEycpvdcuqbSSe17WunutHs/wAeztbtLOaMjAG7IYdMA9G6dem706HGK7STWhPFGsZGSq/KDxx1xwfTOOnPJyefPBavNAXlBMb7Qkg4O7+7kZIHH656812ekWSNbNK0bMqKQQOgC/xE/iMnv65Ar6ChOqvhTVNpqOrX8uy+6y1u/Q4ZRUldq8otW0W109lZ3XTXZ67XOisLmR48F8MwOSR/DwMk57jGPoBjgGsy/M32jKkMiAHcTw27k8dyMZPf8+IorgRuVVSApIIxgqSOMD1H49+OmcrUNQeDc7gkEkAnnZ1wze3t/LJrTExVWlHmqTXLZt6WWqa3dtP8tFfXow9NcyT5ZbK6u2m2vd9U3rddS5Nq8gbynfCoBu5wCcfX2OevZTk1zlzrUiu+D5gO7AHOCe+e+T34Pr0rDu7maRpH3FlkyFIHGT0I9O3GCOMZxWp4b0SXU8O6syqSScZVgMEk+mfwz+HHz060p1I0ac3KrJ2jKV9V7q5t+rasrL57nrqlTp0vaSSUYrXTS+iW+u/R620NXw7Bc6ndwSyBvKSYqPTK5yMD1zgdv1NfUOiaescNugUklMvs5KrgZ7AZ6AnjPzdhXD6DoENlFEyRLhSGZhgkE43E+3QDJx26AGvXdDg3xqQOASPl6nGBz9OoGMenavuMlyn2cU6jcqtWN07rR6P11+Xfc+RzfH87cadowp9LWvsr9lo1fsvx2NKsyrGRhhFICbsAY7H3wOM468cnOO2iVCEC8cc4AyRjrjtnqOfToODjQ2wZkUEhFwpQZBLjv6Y5yB0HOM99yFRGAuCGYgKSOh6Dt0HI9OvvX3OGw/sIctRX5uW767Rtr1Ter+/V2PicVVdSfM9NNtlpZdf117l4ZBXYDwB64xz1HTjj3I9MmlLBByQx5JJz36jI/H29Gpw+RQzcDb1ONpPUY9v6+lV5GWWNgu3DAgnHI45JJOc56HsMjmvQhJPWWmm2t29Hp99vO61OaEIyavdX1utlt27J/Pr0Maa8Y3QG/YqHPycnAP8AEOOMdSencDNXpNTSFVLSKT/eY8Dk8Aeo5yc5wQTwK5vU7uGw3su0yg9COx/Hvxkc8jk4rl59Qnu4y4b5UySqjPQcFR7c5/8A1A+fi8xhSbhGnd20iviule71b7XT7rc+iwmVOvCFSV1SbS5no76O/e7fZ9bdXf0O81i3WAHeW34zt5wAPr+B/kRXnur+IVSU4dFCjAycE98Yzg+/Q8ZrkdX8SSWsTAOzMhxg8bd2ecg9PUY59eBjjpbq41AC4aRmUtyoP8QI6+3Y9cdK+OzTPPbShRowtN2dlvFKyT5lfZpdEr+TsfTZfkcKKVSfvRd3qkr2svv/ADvsd9eatdXtowMoAZMKMEZHbv39cHsa88liki85t2WkxvI5JXkHB55PHTjHUU++1KdESKMBQFCuOhwRjHcg/Q5/SsuTUQsL7smZyFQt0Uk854AIHYZyfavnsdTWIjzVpzk1FPmurdN3fqrv8W0fT5bR9k2qMYty05VZu2i10slpvfQyHjBedpMoI9zA79rNgZUA45yc5BA7cCvLb6/kk1FYlk+7IQwxxxj7w+6xJ46jpXpOsK5tZpUYgLHu+XjI2nOfRTngdeMD1rxdRM141wBtYOdrN90ru9OeSeePxxya/OsztTq06cYONpJ35tXG61iunRa7v5s/RcmpOrTqSk7SUEoxafxe627y3srN2SS30TO1d43tcXEowBwCQOV6AjuOw649zXmHiSZEhl8skkZLHqGJzjA6bQBkYPPHA61u6tcyOxjhYoAoZ1Bx8+OSPUH044Ga4DV52+zzGVm5XhhyeAdo9s+34+3zOdY5OhUp+yXPCDk6j3aSi766dO/W2p9PlGBkqsakpStKSbitknKDXezbu99r7Hn813L5jx7nMW4hwPvkE9PYdge475qRPDrzKbt4CvmBWjB4KIc/Oxx3OACByfTFZdstw927tuCM7KvZSuRtB7ZAJGQc8Zxg16NLq+NMS22DiELubH7sHIBHHIJHAyQMZzjmvzzJ40cxlXnXjKVODlKKjHXn93ltd213dlZM+3x862EhS+rqEpS5FJttpRvFW83d7dWedWWjwf2lNvwGbIR2bIU5zj9c9e55A66eo6bbJE4kVZNvDTher5+UYHfPAHT3JBzO6C2Q3THerbv3q42FyTliccEfKMAZx7g1mWFzeXkd1FIu3dKzW5AycJjDtwMH5sBTnrTx9Gnh1GEYShUndxjJP4bxS10abV9/zuycJiq1Wo5czioQjGcZNpOTsm0+ttb38n2ZpmxtksYgVKkRiWZQ3JAAwD0PJ/E555FcnqXmG0lUMI7cncIwOSBk8t1Xj0HOe2OO1vGe30/50Vp5FwX6Hp2XODx79zx0r5f+Lvxo0nwPqfhHRrmImfxHqqaXIyArHZJIQEnuOu3fkhc53Y7Yryq2E55U4wXLKUU3F33Sjrrq79r9rrt7NCu7xUpXcpqMXJpRbdtFffR3W90k2jvrF7mYzDO2GPd9niXnz14yHbgsnHQjHU5AwapajbLq8MMMkCSxQFnaJFChnBztIJIDKe/PXHQ8dPpht5LOG4V9sUloHjkUHa/mgEP6kt0XjJxXLazvsZLS2tPnimkCTyO20Ycnd0yCPTpkduMVwQhLnjFK0ueMU1sldX2s1pr2srt9reJlz6Jwa05k7apx0bXXo1pfrdbYt54a03WdFMslqqXUztHGiACGK2iIVjKQMAtkYXGWxyfXy/xd8IdG1DRbm1m0pJjcPHtuNoG+FNxMKkoQsb54HIPIzjr9M6XLZQWsVreLiFkI8wKAAV5JDAHOc4zjnBzzUNze2Wr+do0DKIAp+yoiBfLxy5klySwIC7VwNpz8x7elGiuVwcnq9HtfVLRt200TtucjzOvTny6KKb5m2r2The6/G+r108/wF/a3/ZE0zxJK50HRLZZpLdJZnaIQpNLEshltn4YSE5Xyn+TnccZOa/nI+P37PPiTwZqWoM+l3IgjMs0yw27RxwMCQIiMEbVxzISNw7da/vB+IWgeHb4RWrRwvfEqRC7gRRW8Ocgnbne2R7k/hX5wftI/s/eF/GGgeJbm2sbRJtVspLedJEV3M4UrCUUorJ/EWcHIODgnp9tw1meLyqrTVKbqU04Nwbk3yNxUpR7Wtay302OfN4YPP8H9WxVGm4VFyxlBRVVVGkuaDsm7u271a0R/DVqdtcW8jxShxGztG6eWC0ToTneeMYPBODg+oIrAe2ldvmV/L/hTAAYccr7Nyc456V+jXx//AGW9d8CeJr1E0+ZLWW5Z7R0IaKZJmYqk2QNqgr8p53AnpjnwnSfgd4k1KK6WDTixhcxbnChBMf8AVqRz8wG4AZ7/AFI/ovA8RYVYKliOeKUow5tdYN8unW8rtK+92r2PwrNPCjMsViakKcKcaMW2qlSb5YxSTg5OVrSlG2m19D5cW0lZF3KwLPgg4JRQcZ24GQR0PU89wKtLpjA5IkMTYxt4OCB0Az1xyCeMA8V9CXXwp1XT797Ca1aW+t3RJoSACm/dhHAJ+UAblJPqeauv8NL5IWZohIY0AIRctETjKnP3lIHXjofWuqfEuGXLaqlGajbWzu+Wys15/fe21l8xHwxxNH2ntacHOm7cy1i7cvzfNvp5Prr82iwdwQEYMxYovBCqMY3ttG0DJwehzg+1dtJChxIrBVB3YBBJPYcHIznqcDB/D6Lb4dXKhQto8ZJY79/yOvGInIHCNg444xn3qlN4CvY2LfY8yo21OjIg6bCOrLwMc/Uc8VT4hoyslNK/K97OSvFbtta319XpqceJ4CrQinKEbxta6aittbattp6beXQ+bZtKBLDbLgjcgHyghezNg7iOycZzjINZU1pIobaGbaCrsACNyno2T24GR6ng19Dal4Ku4QWaHa6AF1VQwjXoVAyMhs9QQcDp2rirzwvcxPIVjMcSjLAnasgYHJY4OAMDAx3HPavbwmaU6yi7t6LXXrZJ+u6vbv8AP4zHcKV6Ep+67RkrNL7L5dlfZarur3seUxx3Odqho1z5ic5woAzzngY6A59OnS4L65iCSRmUBctH5hKs4HG89dmMjb/e7EYNdHLpMyZSOIg72UscEFl+6MDoT0B5wOcECqD20gUI0KbkIVgBl8A4IAyAy45ycFsZ4GRXpurCqtYppNNaJt6J3lvq19/yueHUy2rQd25Oy2tqrW6p33flsrbWIYvEd7bld0zsw25kZ8/Mc4ZwcHA54zj68GtIeNLyPe7yl5CAEULuYNg4lxwCoHU4wOO+KypdMUhm2OwdQCM5C4J+Xb/dH90d8dOarvo9yISVTCKAS+3kKfveYS3AHG1cDHJyR0yqYXC1LudGN3bXkV2tNb2e1/TTzMZYjFUrqFSUVFJ3UmtXZJP1urbXtvfU6NfGNwiZZnDtkLsOWKtjqDwAO5HYA9QTSDxhIqlkmkWQlsAkkHcRkkk4HABA5yf+BGuOewmCMWBJDZLAY68EOfvbRgEfTpVVraRUkPQKcuw6jJ6KQMAAjr61isuwUtVBduj6Ky2fpd9X235Kua46Ls69RqL0vLma2vZJLfW9t09bHZnxXcvuC3zElQWyNpOM4Oc/eHIHucZHGaL+ILqZwHlc5OFOCQFxyGXP0OR1PQA4rmFgfaQFJ3871IBK+jN6DsMc/U1bhtbh4/3cY2B/mc8kjPc9SATgeuDxW8cvwtOPOqStok7Ld2Tei79NW195x/X8TWkuarZbWvLXa6tf/Pe1+j0JL15AQZ5Cu7IGOGAGRtHbPpz9etV2y43MWGRkM4O/r94HPXHHOD2qyLO5IjAQZjQbwowW3ZBU4zx6n17VrWWhahdogS3dwDgMo5wewPIO3qGwOO2eurdKnH4owS87bpX6b2ae33HVSoV68lyU3Vbjb+ZW0sleWis9lv2ObSNtyx7TsP8AEeBg9N3cD3PTk8d3rDu4PyKrc7hhmHPKH1PZR+fr6ba+BdauXTZYvLIgXCom4FDnOeRlm6hu3PBB47Cw+EXie/WKODSJriSR0VUVQzbXJwGT+Ld2OR0PYVxVs4w1DR1YefvK920t5eV09PPue9huGMwxUbxoNWSkm1ZaqKs3fZX8/PseGRoGVcKQM5Qjg/L2YkZyT2x14Gc82MlsqqsH6MwXAHGMDuS2T2xX1Nb/ALOPi6aaNDo9wsybCYtofcjYx5iggAvzjk4wSK6CP9mHxURIV0+ZtrKnUHyyecMcDJ5OB3PGcjjzZ8VZZBx5q8b3Tevpe+jfNta2nluj16Hh5nFeK0oxba1V1aL5dUlu0nrd7tXPjuOORto2HlyG3ZJAGPvdsdeeh71pxwKUDFXB/uhTucL1AA7ZI45zkjkCvr4/sw+JYN7SaZdRGIK25gMkN2fr1AwB0wec0Qfs5+JXkjit9JnkmchFjKAsit1UpkDBxk5PbgiuefGOXu3JXivXTTS/lomvnc9rLvCvNHUj7ZU6ib25mkleNmk0ldJq+uqfc+UEhYuCiF3IAxtyoAxwTkDJ54/TAq7DZszL+7ch2IOSNysOcAAdBz2A9+a+uv8AhlzxcpdX02aMQqHYYA2ocjLZOArZ4Ge2K9A8L/sfeNb82zQ6Dd3EU87JbuZB/pGcB41GDtWPjAz/ABYPtw1uLMvdPmWIi5K2mqe0dbuyV7rW2m+tj7HL/CvNJTacaEIJq0lJpJLls23dO++2ivsfDa6bJMPMIIXAZhtxwep6ck9+OfQc1o2+hzMYz5DqkpY+ZJwqkY5XI4HYnv8ApX6naH+wb8Rr+NDF4ddlUrFGflCAf8tFL4OXPGOOcEelehWP/BPvx9d3MVpHoUl5LGscZjjVdhY53rJJk5Jx8nCkkH3rxavFlJuSjOKj3U00m+V3dvXW71tbXQ+xw3hjGjJKtisLKaim+V2ey0et7Wvdq3ofkGvhu5lRTgrBk7RjYfl/iXAOc8Dge/Y1MvhbUJXij+zzNN0WNFJJzj5h0yTkZfIORnHr+6mh/wDBM7x3fCKOHQZHMLKu87cqo+8rt/CTzgYJJXrjmva9I/4JaeKZIfOl0cK4HlJJNIrTSSdwQFG0L0257jpXkVuMIxfuzjK117skrNWd210e7T17NnvUPD/ARUfa4ujFOy3aVvd5VHe/k0k1qfznWvgXVG8vy7WULIxEp5YoxxgmPAwo6bs4AySOK6WD4W3u9rhojLDGoaZY8kgE5MigDOBxkDJOecYr+kjTP+CW+pWl1DG+g3Dys+GDupSdXAzHOmBtBxxyd2OCK9R0n/gmbbWfyNpDwQh1cIFVIo9hJZWkOfJUnGxSrA88ivKxXGldp+zqKMWt9PeV0rXu3okrPbpbQ97D8C5NS5OarRnKNnFytKXvWaaupKKTs/k7n8xGn+BNVghtxDo87NNMYmEqlgh4MSjKglSN5DgDjoK9n8IfAXxD4qMMyaNe+YjSGOBYWJkWPaJZg45KAFSo2/vO2Mc/05aV/wAE6PD2tRC6ufDFn59oBbhDEscsxQYe7M4OHccEJsBz0PNeq+HP2N9P+Gksd3Y6HbXMj/IIbqJSbeAfxSHny3fny2AYttORwK+er8T160G4aT5km3K97pPS767LRO/qe/g8hyrD1Yr3aiSXLG0FFyXK3q48torba65tz+dHwt+yv9kktLbVIJ1SYK7PclrcxzdbaIKyPu2kSApk7sjkda+6/hP+xZYeKNNiurNoIr3VP9Du4LxAjWV5EfkuyCDstp/4GAwQuD6H9bL34EaX4na7mvvCtveGOaJbW2YrFcQGPIW4hkVCH2Nzs2jPGGra07wDqHw9W3ddHjVIJUjUTIFn+ygjzbe6AOH+QjyZOC3zfKMYrheY1asfaSc+e6bWqjtF6PXfTs9LH1WHqYbDw5KWGpcrSjDkSd2rXV0kleztJaa3fn8lfCX9nDTvBOt6dZeK/CGgahNayhLXWGhU2288eRfKAcTMQv2eUsdp3kKd1e4/Ff8AYx0H4kppPi7w1BaeG/iF4ZmivIxDAFXWLezZGaCdw0YaRUI2XCgFs52cE190D4a+GPE+ix3vh2W7sTfxQT/ZpnLtbXX3nKzEKQqMNyqB8u4DPNeyxfD6P+x7a3uZpI9XsoIWGoIdpuhEjbnkkAO1iuA4wfM4+6FFd+CxU67i0pr3vetdWvZN6pXTvv5b7nzuZ5tRdpySapzakv5HGUVFpqz5bpxa33T0d3wng34Y3ep/CLTry4jZ/Eumadb2ryvEnnXlsi7LmOZeRNJGAoTON4Y8A5J+Hv2iv2JtD8faBqXijwxoSaV4xisHa+SC3VItQuYvmiZ41A+d/nLFT8jAcE8j9KvhD4jtTeaz4Z23RlgmdolMmFllG43Sq2CFWQKh2bSRjqCcH0vSNS8NeL21iCyjNjq+n3M1ncW9svy71IV7mYEYLy8EqR8u3ryQLzTCRq4WaqON4LRyfw35VBttvXZ9dm76ny9HPsTQxtak7OEaqmndtxhU5YrlV27d9bK66H8nvhzTPEHw88TtoXiO0vNNvo2YyxyBltW2HGVkZQG8zGVXau3aeemPvz4UeKpGvtPTT7hPOWWJxHkKDn77MM/NuHuAcdM819x/tWfseWfxA07UfEOh2sUfie1sJJo2t41T7XcBWYRzlQMOSAGzjBKjvX5LfDa41fwN4gm0jxEtxp2q6TqMthNaXPylijgN82SFaPA2cE8kcc18Jha9TDYunB1WmqkbNNWlHSzTuk76/f2R+m4arHNsvqYiEY89OEY1Iz0UXJL3uVp2Wmktk3bc/oT+C3i+e3tbKWSVH82NFeOUgKH4xgE4DDnd36dOh+9ND8R3VxartnI8xVeMDscfMcgn5Rnk9/TnNfj5+z94qi8QxWsQkZpEYlY0BYqECjeXyAcZ+Ube556Cv088LXYtbC3UsX/d4JfO4oQApA54bp159u/9T8CY51cLQkqsuWMUlNTioqSUVaave99LLTS/VH8+8bZZSlWlajH2nPJyvG10mmnG91d63dtXvc9L1rV7iW1QyOZGVVGMk5AzhQewB6ADP4V5XLPdSsqswEauSzKBvGCfkfnDDBxjjB4yetdBqWqQW9uUd3VJUwWk6KDkKAx/iJyMnHp9ODvrxYI2MbGKBwR5rk43ZO7cxwR2wMcA4HY1+gYytCpQk6mjStKzTadlzPl7d3fW9vI+GwOCVGLVKFk1e7Wsdmr9jH1rToXM1ykBPlkAIcJERggSA4OG75578GvA/idokepeFdTt0/fLPaTQXEQYqfOwTAQcH/VfNkADORyMV3eo/EvSxPdaT9qTzwGREUFmWUH5nkPG0AAE9cHJye3NXeoreaTebCs0M0ErzPEuY2OCGAbs6cbXwepwBX4PxPHCVvawoWk5Rqc01FxtONlZuSS0fbXW6dtD9M4Xr4jDV6LnzWp1Kbi9lq48rv21a6Wdj8YL1b3QfE9/Z3UyrHBciGSFCW8iIuxjim5U73wfKJAxhug61viL43vPCUNt4ikhVNFkFtbSTsMx2jAlWMpHGyMsu9iBncvocnxq1E6d8R9eaAbNTmZY1jdg0MwJby7hwAoaZVzl+Nvo2c1xXiLxJFrng1tC1mKzNoyLamxI3XBaBt2Ys/6wAtuLcZyuTX4jUkoOThF1km4ShFtuNpLmas/L3vQ/ofE1qjoUatOMZTWHpyknJcrtGDlor3ajdXdtemrZ9TeEfF2n3vh6PXrPcbiVI4pbYj7juBu2KD9wcN2HJxgdfSdC1dpQtzHOJHSXa0iAE+U+PMhkUfddcDBzg5LcAYP5/W/jo/DnS7GGGTzbG+MdvCXUn7JLJ8okdSTtckDYefuk+lfUPwbbVksr66v7hptO1e0a+hvxnyzK4LfvnySgHBU455OOKrAzc58sVCUW+VP7WrV9NG2rq6evRdD43Na8Ywm2rKcY2XNFNN8r9/b4l6brfY+rNNnivNMv5XRISqSNCxPzSFBlXLY+Ugk8kc5B+v5kft5afZeJPBFhrViBc+IvC96C21/3jafIxE0eBySAq7Tk9+OePrHWPjH4d+HvhXXNZ13U4odJiivbOSVgZQj7cKIQCCHdsAN/CQOCOv4s+K/2nX8U+IPENnqRYaRqmq/Y9OiIO0WayOFuLg4O1pFZSR3xjp0+uyzCyhONSmnKXtIxtF2ly80ZPRbvdeVnvqfnecZ1QwfOpymqjjHk9nry3TUm220le/V66brWPwvqK3GiW0VzdvJFARLBFI/CyY/1UgODtB/+vzX5w/tjfFFPDUV/pmn3TjUJLlWCE4MJlDBl2Z/1RHEbfxfNkDFelftFftE6H8KdSvtI0CeG9/tA28sL25I+ys6uyuufuMWGCFBzgZ56fkf8VPiprXxT8Sz69rUpeTaltBFgonkRZCFlBOWwc5OCT3HFfseRZc6tWliVF+ynTjeLVvhcU13vo18lZbH8+8cccTnha2DwsuesnKnKTd0pbczadrxjZ91LpZaeZ3M/2i5luW3mWeVpZFYluZTycHPIycckjJBNKM8k46c7s5JPtjP1PB9qaQWcNuVm6gHr6YYAdB2OexJHSntjaxIKnk8jO088HOCCOuM9DyT0r9IpNKKjG6tpo+istH2W2i6aW0P5sxClUnKdSd5t803dtNu1/K+t79vva4GAc4znOe+D6Zye2eePyqMsv3ck5yFAwARkYwDzgn1/A9KbvJOOMcYyOQMD8PXHXn60D7248gcHj5gO+OB/+qu6N/na3zuu7XXr8zk5lGLsr3sr7pO+6s9d1+fXSVidvy7uudozgY7c/wAR/me1L1XODu5JJP3Tx1Pf6ZyPWo945Azg4xkZBz09MH/6x6Uisy5HRefblufmPuPr16ZrZJ/du/u9bJ66adrdSXO75b9Vf7rX9b6P/gaTEbjlsgEA8cY468DGDg8ZB6e+Wj58AAYyR6HA7HPHPPXvn3pocncOvGANvA7gnA+pBz0x6cNycgDIODyOhb0H65zznvyaznBW1va1/R6K+9tU769V2KUmte667/ndPQkZVGAxI/ugdMjjJxyM9foPrSL0wCSCfvHAOOwPXkYwOv09Gb2JHGWzjjnjnOB6YHOeCfTBy5WwSCMYPAI43cH8/TnOfxqCm00lZq3y00b7377X2fTVxXkfez94AnnJHX0xx9SM8c1IqYiBPzkN13YYA9iOOnPpk5645YWwdw7gY5wQSMD6HBIz2zzwaYHYYyAQ2VAxgbj1HoAOMn3yOtAnJ6q/Z9bryu/NdCYuqhSpzkncvUY+nr7fXvyFVixAB+9kcDBAHQnHf/P1rk4BbJ4HHHy5J6Z75/rjHU0iFuoB5Jwc557KB9M/l+UTdlpu32vt/X/AHZu9+2ravq7WXV9dWvuWhf4AIH3geCRyRj0ye3bnHfFKTkcMN2OmeAR3P06gZIIqHcVGcjdgKMjIyT17YP09ehycighlzjJyMdiTnBz6dc9j169c5NNvRdNe+1ur67dSCUNt5BG4k5J5GO2Ovy9fpnsc4sJlsHA5HLAc5z24OQeOxz254qsQASSDheBxz9eufXHJ68VKknygDIXGQQcMMHj27/XH40lvpvf8f6tuJvS617FkrwGYkkdPXHQ/U9sduntTT5Y6YLdQc5yB/ePboe365qIyEYIw3Ppnk555wOQDz65NCnLhSBliQFAzz1Jz+Zbk9+K6Ixdlp9nf0tdLv/wO9iedK19Xpf8AW/UmR23EsMccYxkjOAMfQ5I4GTjnrUpbIPPUA4AHUdyOcc/gD+GIgCu4kH5c4I7k8Kfy7HOcHqOKVc9WySxwO+M479uM49c/99TJ20791f108ut7ad9hx+/r0sr2dkr3tpfVCoASWY4OcZJxgY7Dkc/T04yKnABQbioxyDn+HPB46n1GB3x7w8DPGAPm6cnjrz1255yOOKTfggY4OMA45GMYAzkD2z6dqyvFX5rt6W1312VtdPusDklv1t20+7X13X4FgYK8ZbcRn09wR+A5yOvXqAowCrDBxxjJOT3GPx9ueOCah3hcnJVQSFGDjJ7nqV5zznjpg800E7dw/vHBGeh6kZPXngDscfXSF/NpbN28un/Bt5DTT/D8Vf8AIuAnccHIYbs5zhT1yMcHpnA5+o5KjRyVUZIP8RP3s/XHQ8cE/nmiupbL0+fz/r/MjnSbsr69/Rdu1j//2Q==',\n            ),\n          ]),\n        ),\n      ]);\n      final chatModel = ChatOpenAI(\n        apiKey: openaiApiKey,\n        defaultOptions: const ChatOpenAIOptions(model: defaultModel),\n      );\n\n      final res = await chatModel.invoke(prompt);\n      expect(res.output.content.toLowerCase(), contains('apple'));\n    });\n\n    test('Test additive bind calls', () async {\n      final chatModel = ChatOpenAI(\n        apiKey: openaiApiKey,\n        defaultOptions: const ChatOpenAIOptions(\n          model: defaultModel,\n          temperature: 0,\n        ),\n      );\n\n      final chatModelWithTools = chatModel.bind(\n        const ChatOpenAIOptions(tools: [getCurrentWeatherTool, jokeTool]),\n      );\n\n      final res1 = await chatModelWithTools.invoke(\n        PromptValue.string(\n          'Tell me the weather in Barcelona, Spain and a joke about bears',\n        ),\n      );\n      expect(res1.output.toolCalls.map((tc) => tc.name).toSet(), {\n        getCurrentWeatherTool.name,\n        jokeTool.name,\n      });\n\n      final chatModelForceWeatherTool = chatModelWithTools.bind(\n        ChatOpenAIOptions(\n          toolChoice: ChatToolChoice.forced(name: getCurrentWeatherTool.name),\n        ),\n      );\n\n      final res2 = await chatModelForceWeatherTool.invoke(\n        PromptValue.string(\n          'Tell me the weather in Barcelona, Spain and a joke about bears',\n        ),\n      );\n      expect(res2.output.toolCalls.map((tc) => tc.name).toSet(), {\n        getCurrentWeatherTool.name,\n      });\n\n      final chatModelForceJokeTool = chatModelWithTools.bind(\n        ChatOpenAIOptions(\n          toolChoice: ChatToolChoice.forced(name: jokeTool.name),\n        ),\n      );\n\n      final res3 = await chatModelForceJokeTool.invoke(\n        PromptValue.string(\n          'Tell me the weather in Barcelona, Spain and a joke about bears',\n        ),\n      );\n      expect(res3.output.toolCalls.map((tc) => tc.name).toSet(), {\n        jokeTool.name,\n      });\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain_openai/test/chat_models/github_models_test.dart",
    "content": "// ignore_for_file: avoid_print\n@TestOn('vm')\nlibrary; // Uses dart:io\n\nimport 'dart:convert';\nimport 'dart:io';\n\nimport 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/prompts.dart';\nimport 'package:langchain_core/tools.dart';\nimport 'package:langchain_openai/langchain_openai.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('GitHub Models tests', () {\n    late ChatOpenAI chatModel;\n\n    setUp(() {\n      chatModel = ChatOpenAI(\n        apiKey: Platform.environment['GITHUB_TOKEN'],\n        baseUrl: 'https://models.inference.ai.azure.com',\n      );\n    });\n\n    tearDown(() {\n      chatModel.close();\n    });\n\n    test('Test invoke GitHub Models API with different models', () async {\n      final models = [\n        'gpt-4o',\n        'AI21-Jamba-Instruct',\n        'meta-llama-3.1-405b-instruct',\n        'Mistral-large',\n        'Phi-3.5-mini-instruct',\n      ];\n      for (final model in models) {\n        print('Testing model: $model');\n        final res = await chatModel.invoke(\n          PromptValue.string(\n            'List the numbers from 1 to 9 in order. '\n            'Output ONLY the numbers in one line without any spaces or commas. '\n            'NUMBERS:',\n          ),\n          options: ChatOpenAIOptions(model: model, temperature: 0),\n        );\n\n        expect(res.id, isNotEmpty);\n        expect(\n          res.output.content.replaceAll(RegExp(r'[\\s\\n]'), ''),\n          contains('123456789'),\n        );\n        expect(res.metadata, isNotEmpty, reason: model);\n        expect(res.metadata['created'], greaterThan(0), reason: model);\n        expect(res.metadata['model'], isNotEmpty, reason: model);\n      }\n    });\n\n    test('Test stream GitHub Models API with different models', () async {\n      final models = [\n        'gpt-4o',\n        'AI21-Jamba-Instruct',\n        'meta-llama-3.1-405b-instruct',\n        'Phi-3.5-mini-instruct',\n      ];\n      for (final model in models) {\n        print('Testing model: $model');\n        final stream = chatModel.stream(\n          PromptValue.string(\n            'List the numbers from 1 to 9 in order. '\n            'Output ONLY the numbers in one line without any spaces or commas. '\n            'NUMBERS:',\n          ),\n          options: ChatOpenAIOptions(model: model, temperature: 0),\n        );\n\n        var content = '';\n        var count = 0;\n        await for (final res in stream) {\n          content += res.output.content.replaceAll(RegExp(r'[\\s\\n]'), '');\n          count++;\n        }\n        expect(count, greaterThan(1), reason: model);\n        expect(content, contains('123456789'), reason: model);\n      }\n    });\n\n    test('Test countTokens', () async {\n      final models = [\n        'gpt-4o',\n        'AI21-Jamba-Instruct',\n        'meta-llama-3.1-405b-instruct',\n        'Mistral-large',\n        'Phi-3.5-mini-instruct',\n      ];\n      for (final model in models) {\n        print('Testing model: $model');\n        const text = 'Hello, how are you?';\n\n        final numTokens = await chatModel.countTokens(\n          PromptValue.chat([ChatMessage.humanText(text)]),\n          options: ChatOpenAIOptions(model: model),\n        );\n        expect(numTokens, 13, reason: model);\n      }\n    });\n\n    test(\n      'Test tool calling',\n      timeout: const Timeout(Duration(minutes: 1)),\n      () async {\n        const tool = ToolSpec(\n          name: 'get_current_weather',\n          description: 'Get the current weather in a given location',\n          inputJsonSchema: {\n            'type': 'object',\n            'properties': {\n              'location': {\n                'type': 'string',\n                'description': 'The city and state, e.g. San Francisco, CA',\n              },\n              'unit': {\n                'type': 'string',\n                'description': 'The unit of temperature to return',\n                'enum': ['celsius', 'fahrenheit'],\n              },\n            },\n            'required': ['location'],\n          },\n        );\n\n        final humanMessage = ChatMessage.humanText(\n          'What’s the weather like in Boston right now?',\n        );\n        final res1 = await chatModel.invoke(\n          PromptValue.chat([humanMessage]),\n          options: const ChatOpenAIOptions(model: 'gpt-4o', tools: [tool]),\n        );\n\n        final aiMessage1 = res1.output;\n\n        expect(aiMessage1.content, isEmpty);\n        expect(aiMessage1.toolCalls, isNotEmpty);\n        final toolCall = aiMessage1.toolCalls.first;\n\n        expect(toolCall.name, tool.name);\n        expect(toolCall.arguments.containsKey('location'), isTrue);\n        expect(toolCall.arguments['location'], contains('Boston'));\n\n        final functionResult = {\n          'temperature': '22',\n          'unit': 'celsius',\n          'description': 'Sunny',\n        };\n        final functionMessage = ChatMessage.tool(\n          toolCallId: toolCall.id,\n          content: json.encode(functionResult),\n        );\n\n        final res2 = await chatModel.invoke(\n          PromptValue.chat([humanMessage, aiMessage1, functionMessage]),\n          options: const ChatOpenAIOptions(model: 'gpt-4o', tools: [tool]),\n        );\n\n        final aiMessage2 = res2.output;\n\n        expect(aiMessage2.toolCalls, isEmpty);\n        expect(aiMessage2.content, contains('22'));\n      },\n    );\n  });\n}\n"
  },
  {
    "path": "packages/langchain_openai/test/chat_models/open_router_test.dart",
    "content": "@TestOn('vm')\nlibrary; // Uses dart:io\n\nimport 'dart:convert';\nimport 'dart:io';\n\nimport 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/prompts.dart';\nimport 'package:langchain_core/tools.dart';\nimport 'package:langchain_openai/langchain_openai.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('OpenRouter tests', () {\n    late ChatOpenAI chatModel;\n\n    setUp(() {\n      chatModel = ChatOpenAI(\n        apiKey: Platform.environment['OPEN_ROUTER_API_KEY'],\n        baseUrl: 'https://openrouter.ai/api/v1',\n      );\n    });\n\n    tearDown(() {\n      chatModel.close();\n    });\n\n    test('Test invoke OpenRouter API with different models', () async {\n      final models = [\n        'gpt-4o-mini',\n        'gpt-4',\n        'google/gemini-pro',\n        'anthropic/claude-2',\n        'mistralai/mixtral-8x7b-instruct',\n        'mistralai/mistral-small',\n      ];\n      for (final model in models) {\n        final res = await chatModel.invoke(\n          PromptValue.string(\n            'List the numbers from 1 to 9 in order. '\n            'Output ONLY the numbers in one line without any spaces or commas. '\n            'NUMBERS:',\n          ),\n          options: ChatOpenAIOptions(model: model),\n        );\n\n        expect(res.id, isNotEmpty);\n        expect(\n          res.output.content.replaceAll(RegExp(r'[\\s\\n]'), ''),\n          contains('123456789'),\n        );\n        expect(res.metadata, isNotEmpty, reason: model);\n        expect(res.metadata['created'], greaterThan(0), reason: model);\n        expect(res.metadata['model'], isNotEmpty, reason: model);\n      }\n    });\n\n    test('Test stream OpenRouter API with different models', () async {\n      final models = [\n        'gpt-4o-mini',\n        'gpt-4',\n        // 'google/gemini-pro', // Not supported\n        'anthropic/claude-2',\n        'mistralai/mixtral-8x7b-instruct',\n        'mistralai/mistral-small',\n        // 'haotian-liu/llava-13b', // Not supported\n      ];\n      for (final model in models) {\n        final stream = chatModel.stream(\n          PromptValue.string(\n            'List the numbers from 1 to 9 in order. '\n            'Output ONLY the numbers in one line without any spaces or commas. '\n            'NUMBERS:',\n          ),\n          options: ChatOpenAIOptions(model: model),\n        );\n\n        var content = '';\n        var count = 0;\n        await for (final res in stream) {\n          content += res.output.content.replaceAll(RegExp(r'[\\s\\n]'), '');\n          count++;\n        }\n        expect(count, greaterThan(1), reason: model);\n        expect(content, contains('123456789'), reason: model);\n      }\n    });\n\n    test('Test countTokens', () async {\n      final models = [\n        'gpt-4o-mini',\n        'gpt-4',\n        'google/gemini-pro',\n        'anthropic/claude-2',\n        'mistralai/mixtral-8x7b-instruct',\n        'mistralai/mistral-small',\n        'haotian-liu/llava-13b',\n      ];\n      for (final model in models) {\n        const text = 'Hello, how are you?';\n\n        final numTokens = await chatModel.countTokens(\n          PromptValue.chat([ChatMessage.humanText(text)]),\n          options: ChatOpenAIOptions(model: model),\n        );\n        expect(numTokens, 13, reason: model);\n      }\n    });\n\n    test(\n      'Test tool calling',\n      timeout: const Timeout(Duration(minutes: 1)),\n      () async {\n        const tool = ToolSpec(\n          name: 'get_current_weather',\n          description: 'Get the current weather in a given location',\n          inputJsonSchema: {\n            'type': 'object',\n            'properties': {\n              'location': {\n                'type': 'string',\n                'description': 'The city and state, e.g. San Francisco, CA',\n              },\n              'unit': {\n                'type': 'string',\n                'description': 'The unit of temperature to return',\n                'enum': ['celsius', 'fahrenheit'],\n              },\n            },\n            'required': ['location'],\n          },\n        );\n\n        final humanMessage = ChatMessage.humanText(\n          'What’s the weather like in Boston right now?',\n        );\n        final res1 = await chatModel.invoke(\n          PromptValue.chat([humanMessage]),\n          options: const ChatOpenAIOptions(model: 'gpt-4o', tools: [tool]),\n        );\n\n        final aiMessage1 = res1.output;\n\n        expect(aiMessage1.content, isEmpty);\n        expect(aiMessage1.toolCalls, isNotEmpty);\n        final toolCall = aiMessage1.toolCalls.first;\n\n        expect(toolCall.name, tool.name);\n        expect(toolCall.arguments.containsKey('location'), isTrue);\n        expect(toolCall.arguments['location'], contains('Boston'));\n\n        final functionResult = {\n          'temperature': '22',\n          'unit': 'celsius',\n          'description': 'Sunny',\n        };\n        final functionMessage = ChatMessage.tool(\n          toolCallId: toolCall.id,\n          content: json.encode(functionResult),\n        );\n\n        final res2 = await chatModel.invoke(\n          PromptValue.chat([humanMessage, aiMessage1, functionMessage]),\n          options: const ChatOpenAIOptions(model: 'gpt-4o', tools: [tool]),\n        );\n\n        final aiMessage2 = res2.output;\n\n        expect(aiMessage2.toolCalls, isEmpty);\n        expect(aiMessage2.content, contains('22'));\n      },\n    );\n  });\n}\n"
  },
  {
    "path": "packages/langchain_openai/test/chat_models/together_ai_test.dart",
    "content": "@TestOn('vm')\nlibrary; // Uses dart:io\n\nimport 'dart:io';\n\nimport 'package:langchain_core/chat_models.dart';\nimport 'package:langchain_core/prompts.dart';\nimport 'package:langchain_openai/langchain_openai.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('TogetherAI tests', () {\n    late ChatOpenAI chatModel;\n\n    setUp(() {\n      chatModel = ChatOpenAI(\n        apiKey: Platform.environment['TOGETHER_AI_API_KEY'],\n        baseUrl: 'https://api.together.xyz/v1',\n      );\n    });\n\n    tearDown(() {\n      chatModel.close();\n    });\n\n    test('Test invoke TogetherAI API with different models', () async {\n      final models = [\n        'mistralai/Mixtral-8x7B-Instruct-v0.1',\n        'mistralai/Mistral-7B-Instruct-v0.2',\n        'NousResearch/Nous-Hermes-2-Yi-34B',\n        'openchat/openchat-3.5-1210',\n        'togethercomputer/llama-2-70b-chat',\n      ];\n      for (final model in models) {\n        final res = await chatModel.invoke(\n          PromptValue.string(\n            'List the numbers from 1 to 9 in order. '\n            'Output ONLY the numbers in one line without any spaces or commas. '\n            'NUMBERS:',\n          ),\n          options: ChatOpenAIOptions(model: model),\n        );\n\n        expect(res.id, isNotEmpty);\n        expect(\n          res.output.content.replaceAll(RegExp(r'[\\s\\n]'), ''),\n          contains('123456789'),\n        );\n        expect(res.metadata, isNotEmpty, reason: model);\n        expect(res.metadata['created'], greaterThan(0), reason: model);\n        expect(res.metadata['model'], isNotEmpty, reason: model);\n        await Future<void>.delayed(const Duration(seconds: 1)); // Rate limit\n      }\n    });\n\n    test('Test stream TogetherAI API with different models', () async {\n      final models = [\n        'mistralai/Mixtral-8x7B-Instruct-v0.1',\n        'mistralai/Mistral-7B-Instruct-v0.2',\n        'NousResearch/Nous-Hermes-2-Yi-34B',\n        'openchat/openchat-3.5-1210',\n        'togethercomputer/llama-2-70b-chat',\n      ];\n      for (final model in models) {\n        final stream = chatModel.stream(\n          PromptValue.string(\n            'List the numbers from 1 to 9 in order. '\n            'Output ONLY the numbers in one line without any spaces or commas. '\n            'NUMBERS:',\n          ),\n          options: ChatOpenAIOptions(model: model),\n        );\n\n        var content = '';\n        var count = 0;\n        await for (final res in stream) {\n          content += res.output.content.replaceAll(RegExp(r'[\\s\\n]'), '');\n          count++;\n        }\n        expect(count, greaterThan(1), reason: model);\n        expect(content, contains('123456789'), reason: model);\n        await Future<void>.delayed(const Duration(seconds: 1)); // Rate limit\n      }\n    });\n\n    test('Test countTokens', () async {\n      final models = [\n        'mistralai/Mixtral-8x7B-Instruct-v0.1',\n        'mistralai/Mistral-7B-Instruct-v0.2',\n        'NousResearch/Nous-Hermes-2-Yi-34B',\n        'openchat/openchat-3.5-1210',\n        'togethercomputer/llama-2-70b-chat',\n        'togethercomputer/falcon-40b-instruct',\n      ];\n      for (final model in models) {\n        const text = 'Hello, how are you?';\n\n        final numTokens = await chatModel.countTokens(\n          PromptValue.chat([ChatMessage.humanText(text)]),\n          options: ChatOpenAIOptions(model: model),\n        );\n        expect(numTokens, 13, reason: model);\n      }\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain_openai/test/embeddings/openai_embeddings_test.dart",
    "content": "@TestOn('vm')\nlibrary; // Uses dart:io\n\nimport 'dart:io';\n\nimport 'package:langchain_core/documents.dart';\nimport 'package:langchain_openai/langchain_openai.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('OpenAIEmbeddings tests', () {\n    final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n\n    test('Test OpenAIEmbeddings.embedQuery', () async {\n      final models = [\n        ('text-embedding-ada-002', 1536),\n        ('text-embedding-3-small', 1536),\n        ('text-embedding-3-large', 3072),\n      ];\n\n      for (final (modelId, modelDim) in models) {\n        final embeddings = OpenAIEmbeddings(\n          apiKey: openaiApiKey,\n          model: modelId,\n        );\n        final res = await embeddings.embedQuery('Hello world');\n        expect(res.length, modelDim, reason: modelId);\n      }\n    });\n\n    test('Test OpenAIEmbeddings.embedDocuments', () async {\n      final embeddings = OpenAIEmbeddings(apiKey: openaiApiKey, batchSize: 1);\n      final res = await embeddings.embedDocuments([\n        const Document(id: '1', pageContent: 'Hello world'),\n        const Document(id: '2', pageContent: 'Bye bye'),\n      ]);\n      expect(res.length, 2);\n      expect(res[0].length, 1536);\n      expect(res[1].length, 1536);\n    });\n\n    test('Test shortening embeddings', () async {\n      final embeddings = OpenAIEmbeddings(\n        apiKey: openaiApiKey,\n        model: 'text-embedding-3-large',\n        dimensions: 256,\n      );\n      final res = await embeddings.embedQuery('Hello world');\n      expect(res.length, 256);\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain_openai/test/embeddings/together_ai_embeddings_test.dart",
    "content": "@TestOn('vm')\nlibrary; // Uses dart:io\n\nimport 'dart:io';\n\nimport 'package:langchain_openai/langchain_openai.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('Together AI Embeddings tests', () {\n    late OpenAIEmbeddings embeddings;\n\n    setUp(() {\n      embeddings = OpenAIEmbeddings(\n        apiKey: Platform.environment['TOGETHER_AI_API_KEY'],\n        baseUrl: 'https://api.together.xyz/v1',\n      );\n    });\n\n    tearDown(() {\n      embeddings.close();\n    });\n\n    test('Test AI Embeddings models', () async {\n      final models = [\n        'togethercomputer/m2-bert-80M-2k-retrieval',\n        'togethercomputer/m2-bert-80M-8k-retrieval',\n        'togethercomputer/m2-bert-80M-32k-retrieval',\n        'WhereIsAI/UAE-Large-V1',\n        'BAAI/bge-large-en-v1.5',\n        'BAAI/bge-base-en-v1.5',\n        'sentence-transformers/msmarco-bert-base-dot-v5',\n        'bert-base-uncased',\n      ];\n      for (final model in models) {\n        embeddings.model = model;\n        final res = await embeddings.embedQuery('Hello world');\n        expect(res.length, greaterThan(0));\n        await Future<void>.delayed(const Duration(seconds: 2)); // Rate limit\n      }\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain_openai/test/llms/openai_test.dart",
    "content": "@TestOn('vm')\nlibrary; // Uses dart:io\n\nimport 'dart:io';\n\nimport 'package:langchain_core/llms.dart';\nimport 'package:langchain_core/prompts.dart';\nimport 'package:langchain_openai/langchain_openai.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('OpenAI tests', () {\n    const defaultModel = 'gpt-3.5-turbo-instruct';\n    final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n\n    late OpenAI llm;\n\n    setUp(() {\n      llm = OpenAI(\n        apiKey: openaiApiKey,\n        defaultOptions: const OpenAIOptions(model: defaultModel),\n      );\n    });\n\n    tearDown(() {\n      llm.close();\n    });\n\n    test('Test OpenAI parameters', () {\n      final llm = OpenAI(\n        apiKey: openaiApiKey,\n        defaultOptions: const OpenAIOptions(\n          model: defaultModel,\n          maxTokens: 10,\n          temperature: 0.1,\n          topP: 0.1,\n          n: 10,\n          presencePenalty: 0.1,\n          frequencyPenalty: 0.1,\n          bestOf: 10,\n          logitBias: {'foo': 1},\n          user: 'foo',\n        ),\n      );\n      expect(llm.apiKey, openaiApiKey);\n      expect(llm.defaultOptions.model, defaultModel);\n      expect(llm.defaultOptions.maxTokens, 10);\n      expect(llm.defaultOptions.temperature, 0.1);\n      expect(llm.defaultOptions.topP, 0.1);\n      expect(llm.defaultOptions.n, 10);\n      expect(llm.defaultOptions.presencePenalty, 0.1);\n      expect(llm.defaultOptions.frequencyPenalty, 0.1);\n      expect(llm.defaultOptions.bestOf, 10);\n      expect(llm.defaultOptions.logitBias, {'foo': 1.0});\n      expect(llm.defaultOptions.user, 'foo');\n    });\n\n    test('Test call to OpenAI', () async {\n      final output = await llm('Say foo:');\n      expect(output, isNotEmpty);\n    });\n\n    test('Test close OpenAI', () async {\n      final output = await llm('Say foo:');\n      expect(output, isNotEmpty);\n      llm.close();\n      expect(() => llm('Say foo:'), throwsA(isA<StateError>()));\n    });\n\n    test('Test invoke to OpenAI', () async {\n      final res = await llm.invoke(PromptValue.string('Hello, how are you?'));\n      expect(res.output, isNotEmpty);\n    });\n\n    test('Test model output contains metadata', () async {\n      final res = await llm.invoke(PromptValue.string('Hello, how are you?'));\n      expect(res.id, isNotEmpty);\n      expect(res.metadata, isNotNull);\n      expect(res.metadata['created'], isNotNull);\n      expect(res.metadata['model'], llm.defaultOptions.model);\n    });\n\n    test('Test stop logic on valid configuration', () async {\n      final res = await llm.invoke(\n        PromptValue.string('write an ordered list of five items'),\n        options: const OpenAIOptions(stop: ['3'], temperature: 0),\n      );\n      expect(res.output.contains('2.'), isTrue);\n      expect(res.output.contains('3.'), isFalse);\n    });\n\n    test('Test tokenize', () async {\n      final text = PromptValue.string('Hello, how are you?');\n      final tokens = await llm.tokenize(text);\n      expect(tokens, [9906, 11, 1268, 527, 499, 30]);\n    });\n\n    test('Test different encoding than the model', () async {\n      llm.encoding = 'cl100k_base';\n      const text = 'Hello, how are you?';\n      final tokens = await llm.tokenize(PromptValue.string(text));\n      expect(tokens, [9906, 11, 1268, 527, 499, 30]);\n    });\n\n    test('Test countTokens', () async {\n      const text = 'Hello, how are you?';\n      final numTokens = await llm.countTokens(PromptValue.string(text));\n      expect(numTokens, 6);\n    });\n\n    test('Test streaming', () async {\n      final promptTemplate = PromptTemplate.fromTemplate(\n        'List the numbers from 1 to {max_num} in order without any spaces or commas',\n      );\n\n      final chain = promptTemplate.pipe(llm);\n\n      final stream = chain.stream({'max_num': '9'});\n\n      LLMResult? result;\n      var count = 0;\n      await for (final res in stream) {\n        result = result?.concat(res) ?? res;\n        count++;\n      }\n      expect(count, greaterThan(1));\n      expect(result!.output, contains('123456789'));\n      expect(result.usage.promptTokens, greaterThan(0));\n      expect(result.usage.responseTokens, greaterThan(0));\n      expect(result.usage.totalTokens, greaterThan(0));\n    });\n\n    test('Test response seed', () async {\n      final prompt = PromptValue.string('How are you?');\n      const options = OpenAIOptions(\n        model: defaultModel,\n        temperature: 0,\n        seed: 9999,\n      );\n\n      final res1 = await llm.invoke(prompt, options: options);\n      final res2 = await llm.invoke(prompt, options: options);\n\n      expect(\n        res1.metadata['system_fingerprint'],\n        res2.metadata['system_fingerprint'],\n      );\n      expect(res1.output, res2.output);\n    });\n\n    test('Test batch with same options for all inputs', () async {\n      const count = 10;\n      final prompts = List.generate(\n        count,\n        (final i) => PromptValue.string('Output the following number: $i'),\n      );\n\n      final res = await llm.batch(\n        prompts,\n        options: [const OpenAIOptions(concurrencyLimit: count ~/ 2)],\n      );\n\n      expect(res.length, count);\n      for (var i = 0; i < count; i++) {\n        expect(res[i].id.endsWith(':${i % (count ~/ 2)}'), isTrue);\n        expect(res[i].output.contains('$i'), isTrue);\n      }\n    });\n\n    test('Test batch with different options per input', () async {\n      const count = 10;\n      final prompts = List.generate(\n        count,\n        (final i) => PromptValue.string('Output the following number: $i'),\n      );\n      final options = List.generate(\n        count,\n        (final i) => OpenAIOptions(temperature: 0 + (i / 10)),\n      );\n\n      final res = await llm.batch(prompts, options: options);\n\n      expect(res.length, count);\n      for (var i = 0; i < count; i++) {\n        expect(res[i].id.endsWith(':0'), isTrue);\n        expect(res[i].output.contains('$i'), isTrue);\n      }\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain_openai/test/tools/dall_e_test.dart",
    "content": "// ignore_for_file: avoid_redundant_argument_values\n@TestOn('vm')\nlibrary; // Uses dart:io\n\nimport 'dart:io';\n\nimport 'package:langchain/langchain.dart' show AgentExecutor, ToolsAgent;\nimport 'package:langchain_community/langchain_community.dart';\nimport 'package:langchain_core/tools.dart';\nimport 'package:langchain_openai/langchain_openai.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('OpenAIDallETool tests', () {\n    final openAiKey = Platform.environment['OPENAI_API_KEY'];\n\n    test('Test generate image returned as URL', () async {\n      final tool = OpenAIDallETool(\n        apiKey: openAiKey,\n        defaultOptions: const OpenAIDallEToolOptions(\n          model: 'dall-e-2',\n          size: ImageSize.size256x256,\n        ),\n      );\n      final res = await tool.invoke('A cute baby sea otter');\n      expect(res, startsWith('http'));\n      tool.close();\n    });\n\n    test('Test generate image returned as base64', () async {\n      final tool = OpenAIDallETool(\n        apiKey: openAiKey,\n        defaultOptions: const OpenAIDallEToolOptions(\n          model: 'dall-e-2',\n          size: ImageSize.size256x256,\n          responseFormat: ImageResponseFormat.b64Json,\n        ),\n      );\n      final res = await tool.invoke('A cute baby sea otter');\n      expect(res, isNot(startsWith('http')));\n      tool.close();\n    });\n\n    test(\n      'Test OpenAIDallETool in an agent',\n      timeout: const Timeout(Duration(minutes: 2)),\n      skip: false,\n      () async {\n        final llm = ChatOpenAI(\n          apiKey: openAiKey,\n          defaultOptions: const ChatOpenAIOptions(\n            model: 'gpt-4',\n            temperature: 0,\n          ),\n        );\n\n        final List<Tool> tools = [\n          CalculatorTool(),\n          OpenAIDallETool(\n            apiKey: openAiKey,\n            defaultOptions: const OpenAIDallEToolOptions(\n              model: 'dall-e-2',\n              size: ImageSize.size256x256,\n            ),\n          ),\n        ];\n\n        final agent = ToolsAgent.fromLLMAndTools(llm: llm, tools: tools);\n\n        final executor = AgentExecutor(agent: agent);\n\n        final res = await executor.run(\n          'Calculate the result of 40 raised to the power of 0.43 and generate a funny illustration with it. '\n          'Return ONLY the URL of the image. Do not add any explanation.',\n        );\n\n        expect(res, startsWith('https://'));\n      },\n    );\n  });\n}\n"
  },
  {
    "path": "packages/langchain_pinecone/.gitignore",
    "content": "# https://dart.dev/guides/libraries/private-files\n# Created by `dart pub`\n.dart_tool/\n\n# Avoid committing pubspec.lock for library packages; see\n# https://dart.dev/guides/libraries/private-files#pubspeclock.\npubspec.lock\n"
  },
  {
    "path": "packages/langchain_pinecone/CHANGELOG.md",
    "content": "## 0.1.1+2\n\n - Update a dependency to the latest release.\n\n## 0.1.1+1\n\n - **BUILD**: Update dependencies ([#751](https://github.com/davidmigloz/langchain_dart/issues/751)). ([250a3c6](https://github.com/davidmigloz/langchain_dart/commit/250a3c6a6c1815703a61a142ba839c0392a31015))\n\n## 0.1.1\n\n - **FEAT**: Update dependencies (requires Dart 3.6.0) ([#709](https://github.com/davidmigloz/langchain_dart/issues/709)). ([9e3467f7](https://github.com/davidmigloz/langchain_dart/commit/9e3467f7caabe051a43c0eb3c1110bc4a9b77b81))\n - **REFACTOR**: Remove fetch_client dependency in favor of http v1.3.0 ([#659](https://github.com/davidmigloz/langchain_dart/issues/659)). ([0e0a685c](https://github.com/davidmigloz/langchain_dart/commit/0e0a685c376895425dbddb0f9b83758c700bb0c7))\n - **FIX**: Fix linter issues ([#656](https://github.com/davidmigloz/langchain_dart/issues/656)). ([88a79c65](https://github.com/davidmigloz/langchain_dart/commit/88a79c65aad23bcf5859e58a7375a4b686cf02ef))\n\n## 0.1.0+11\n\n - Update a dependency to the latest release.\n\n## 0.1.0+10\n\n - **FIX**: UUID 'Namespace' can't be assigned to the parameter type 'String?' ([#566](https://github.com/davidmigloz/langchain_dart/issues/566)). ([1e93a595](https://github.com/davidmigloz/langchain_dart/commit/1e93a595f2f166da2cae3f7cfcdbb28892abf9b5))\n\n## 0.1.0+9\n\n - Update a dependency to the latest release.\n\n## 0.1.0+8\n\n - Update a dependency to the latest release.\n\n## 0.1.0+7\n\n - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d))\n\n## 0.1.0+6\n\n - Update a dependency to the latest release.\n\n## 0.1.0+5\n\n - Update a dependency to the latest release.\n\n## 0.1.0+4\n\n - Update a dependency to the latest release.\n\n## 0.1.0+3\n\n - Update a dependency to the latest release.\n\n## 0.1.0+2\n\n - Update a dependency to the latest release.\n\n## 0.1.0+1\n\n - Update a dependency to the latest release.\n\n## 0.1.0\n\n> Note: This release has breaking changes.  \n> [Migration guide](https://github.com/davidmigloz/langchain_dart/discussions/374)\n\n - **BREAKING** **REFACTOR**: Introduce langchain_core and langchain_community packages ([#328](https://github.com/davidmigloz/langchain_dart/issues/328)). ([5fa520e6](https://github.com/davidmigloz/langchain_dart/commit/5fa520e663602d9cdfcab0c62a053090fa02b02e))\n\n## 0.0.7\n\n - **FEAT**: Update meta and test dependencies ([#331](https://github.com/davidmigloz/langchain_dart/issues/331)). ([912370ee](https://github.com/davidmigloz/langchain_dart/commit/912370ee0ba667ee9153303395a457e6caf5c72d))\n - **DOCS**: Update pubspecs. ([d23ed89a](https://github.com/davidmigloz/langchain_dart/commit/d23ed89adf95a34a78024e2f621dc0af07292f44))\n\n## 0.0.6+14\n\n - **DOCS**: Update CHANGELOG.md. ([d0d46534](https://github.com/davidmigloz/langchain_dart/commit/d0d46534565d6f52d819d62329e8917e00bc7030))\n\n## 0.0.6+13\n\n - Update a dependency to the latest release.\n\n## 0.0.6+12\n\n - Update a dependency to the latest release.\n\n## 0.0.6+11\n\n - Update a dependency to the latest release.\n\n## 0.0.6+10\n\n - Update a dependency to the latest release.\n\n## 0.0.6+9\n\n - Update a dependency to the latest release.\n\n## 0.0.6+8\n\n - Update a dependency to the latest release.\n\n## 0.0.6+7\n\n - Update a dependency to the latest release.\n\n## 0.0.6+6\n\n - Update a dependency to the latest release.\n\n## 0.0.6+5\n\n - **FIX**: Decode JSON responses as UTF-8 in Pinecone ([#236](https://github.com/davidmigloz/langchain_dart/issues/236)). ([edb427b1](https://github.com/davidmigloz/langchain_dart/commit/edb427b16e6cd938adcaaa7cf641f4df6632f479))\n\n## 0.0.6+4\n\n - Update a dependency to the latest release.\n\n## 0.0.6+3\n\n - Update a dependency to the latest release.\n\n## 0.0.6+2\n\n - **DOCS**: Add public_member_api_docs lint rule and document missing APIs ([#223](https://github.com/davidmigloz/langchain_dart/issues/223)). ([52380433](https://github.com/davidmigloz/langchain_dart/commit/523804331783970870b023946c016be6c0797920))\n\n## 0.0.6+1\n\n - Update a dependency to the latest release.\n\n## 0.0.6\n\n - **FEAT**: Add support for global headers in Pinecone ([#213](https://github.com/davidmigloz/langchain_dart/issues/213)). ([8e0d221f](https://github.com/davidmigloz/langchain_dart/commit/8e0d221fad55b1fa62d9ff6f97476ee647837c6b))\n\n## 0.0.5+2\n\n - **DOCS**: Update vector stores documentation. ([dad60d24](https://github.com/davidmigloz/langchain_dart/commit/dad60d247fac157f2980f73c14ac88e9a0894fba))\n\n## 0.0.5+1\n\n - Update a dependency to the latest release.\n\n## 0.0.5\n\n - **FEAT**: Upgrade Pinecone client to v0.6.0 ([#188](https://github.com/davidmigloz/langchain_dart/issues/188)). ([57e2587f](https://github.com/davidmigloz/langchain_dart/commit/57e2587fa3849e7aea199dd52e2cb2ce4f61946a))\n - **DOCS**: Update CHANGELOG.md. ([5ea4e532](https://github.com/davidmigloz/langchain_dart/commit/5ea4e5326e706a52d157284a281eb881e05117c5))\n\n## 0.0.4\n\n> Note: This release has breaking changes.\n\n - **BREAKING** **FEAT**: Update uuid internal dependency to 4.x.x ([#173](https://github.com/davidmigloz/langchain_dart/issues/173)). ([b01f4afe](https://github.com/davidmigloz/langchain_dart/commit/b01f4afea6cfcdf8a0aa6e1b11d3057efa6e5fc0))\n\n## 0.0.3\n\n - **FIX**: Update Pinecone client version ([#160](https://github.com/davidmigloz/langchain_dart/issues/160)). ([d15cc576](https://github.com/davidmigloz/langchain_dart/commit/d15cc5761563476fe7b5d66effd42ded077dbbbc))\n\n## 0.0.2+1\n\n - **REFACTOR**: Require `http.Client` instead of `AuthClient` ([#156](https://github.com/davidmigloz/langchain_dart/issues/156)). ([0f7fee7f](https://github.com/davidmigloz/langchain_dart/commit/0f7fee7f0780e5b650ec50307a7fda65e242e822))\n\n## 0.0.2\n\n - **DOCS**: Fix invalid package topics. ([f81b833a](https://github.com/davidmigloz/langchain_dart/commit/f81b833aae33e0a945ef4450da12344886224bae))\n - **DOCS**: Add topics to pubspecs. ([8c1d6297](https://github.com/davidmigloz/langchain_dart/commit/8c1d62970710cc326fd5930101918aaf16b18f74))\n - **DOCS**: Update changelog. ([df784ff1](https://github.com/davidmigloz/langchain_dart/commit/df784ff108584b0732ec9455f1531636256e9c4e))\n\n## 0.0.1\n\n - **DOCS**: Update readme. ([e1b5b295](https://github.com/davidmigloz/langchain_dart/commit/e1b5b2958bdf2b787c8b49aeeb6690c33c225943))\n - **DOCS**: Update packages example. ([4f8488fc](https://github.com/davidmigloz/langchain_dart/commit/4f8488fcb324e31b9d8dece7d1999333d7982253))\n - **FEAT**: Add support for Pinecone VectorStore ([#37](https://github.com/davidmigloz/langchain_dart/issues/37)). ([e43eef97](https://github.com/davidmigloz/langchain_dart/commit/e43eef979c329fc72a3eed72d818992287838a80))\n\n## 0.0.1-dev.1\n\n- Bootstrap project.\n"
  },
  {
    "path": "packages/langchain_pinecone/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2023 David Miguel Lozano\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "packages/langchain_pinecone/README.md",
    "content": "# 🦜️🔗 LangChain.dart / Pinecone\n\n[![tests](https://img.shields.io/github/actions/workflow/status/davidmigloz/langchain_dart/test.yaml?logo=github&label=tests)](https://github.com/davidmigloz/langchain_dart/actions/workflows/test.yaml)\n[![docs](https://img.shields.io/github/actions/workflow/status/davidmigloz/langchain_dart/pages%2Fpages-build-deployment?logo=github&label=docs)](https://github.com/davidmigloz/langchain_dart/actions/workflows/pages/pages-build-deployment)\n[![langchain_pinecone](https://img.shields.io/pub/v/langchain_pinecone.svg)](https://pub.dev/packages/langchain_pinecone)\n![Discord](https://img.shields.io/discord/1123158322812555295?label=discord)\n[![MIT](https://img.shields.io/badge/license-MIT-purple.svg)](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE)\n\nPinecone module for [LangChain.dart](https://github.com/davidmigloz/langchain_dart).\n\n## Features\n\n- Vector stores:\n    * `Pinecone` vector store that uses [Pinecone](https://www.pinecone.io) \n      fully-managed vector database.\n\n## License\n\nLangChain.dart is licensed under the\n[MIT License](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE).\n"
  },
  {
    "path": "packages/langchain_pinecone/example/langchain_pinecone_example.dart",
    "content": "// ignore_for_file: avoid_print\nimport 'dart:io';\n\nimport 'package:langchain_core/documents.dart';\nimport 'package:langchain_openai/langchain_openai.dart';\nimport 'package:langchain_pinecone/langchain_pinecone.dart';\n\nvoid main() async {\n  final openaiApiKey = Platform.environment['OPENAI_API_KEY']!;\n  final pineconeApiKey = Platform.environment['PINECONE_API_KEY']!;\n  final embeddings = OpenAIEmbeddings(apiKey: openaiApiKey);\n  final vectorStore = Pinecone(\n    apiKey: pineconeApiKey,\n    indexName: 'langchain-dart',\n    embeddings: embeddings,\n  );\n\n  // Add documents to the vector store\n  await vectorStore.addDocuments(\n    documents: const [\n      Document(\n        id: '1',\n        pageContent: 'The cat sat on the mat',\n        metadata: {'cat': 'animal'},\n      ),\n      Document(\n        id: '2',\n        pageContent: 'The dog chased the ball.',\n        metadata: {'cat': 'animal'},\n      ),\n      Document(\n        id: '3',\n        pageContent: 'The boy ate the apple.',\n        metadata: {'cat': 'person'},\n      ),\n      Document(\n        id: '4',\n        pageContent: 'The girl drank the milk.',\n        metadata: {'cat': 'person'},\n      ),\n      Document(\n        id: '5',\n        pageContent: 'The sun is shining.',\n        metadata: {'cat': 'natural'},\n      ),\n    ],\n  );\n\n  // Query the vector store\n  final res = await vectorStore.similaritySearch(\n    query: 'What are they eating?',\n    config: const PineconeSimilaritySearch(\n      k: 2,\n      scoreThreshold: 0.4,\n      filter: {'cat': 'person'},\n    ),\n  );\n  print(res);\n}\n"
  },
  {
    "path": "packages/langchain_pinecone/lib/langchain_pinecone.dart",
    "content": "/// LangChain.dart integration module for Pinecone fully-managed vector database.\nlibrary;\n\nexport 'src/vector_stores/vector_stores.dart';\n"
  },
  {
    "path": "packages/langchain_pinecone/lib/src/vector_stores/mappers.dart",
    "content": "import 'package:pinecone/pinecone.dart';\n\nimport 'types.dart';\n\n/// Mapper for [PineconeSparseVector].\nextension PineconeSparseVectorMapper on PineconeSparseVector {\n  /// Converts a [PineconeSparseVector] to a [SparseVector].\n  SparseVector toSparseVector() {\n    return SparseVector(\n      indices: indices,\n      values: values,\n    );\n  }\n}\n"
  },
  {
    "path": "packages/langchain_pinecone/lib/src/vector_stores/pinecone.dart",
    "content": "import 'package:http/http.dart' as http;\nimport 'package:langchain_core/documents.dart';\nimport 'package:langchain_core/vector_stores.dart';\nimport 'package:pinecone/pinecone.dart';\nimport 'package:uuid/uuid.dart';\n\nimport 'mappers.dart';\nimport 'types.dart';\n\n/// {@template pinecone}\n/// Vector store for Pinecone vector database.\n///\n/// Pinecone documentation:\n/// https://docs.pinecone.io/\n///\n/// To use Pinecone, you must have an API key. To find your API key, open the\n/// Pinecone console and click API Keys.\n///\n/// Before using this vector store you need to create an index in Pinecone.\n/// You can do that in the Pinecone console or using a Pinecone API client.\n/// Check out the Pinecone documentation for more information regarding index\n/// type and size: https://docs.pinecone.io/docs/choosing-index-type-and-size\n///\n/// After creating the index, configure the index name in the [indexName]\n/// parameter and the cloud region in the [environment] parameter.\n///\n/// Example:\n/// ```dart\n/// final vectorStore = Pinecone(\n///   apiKey: pineconeApiKey,\n///   indexName: 'langchain-dart',\n///   environment: 'gcp-starter',\n///   embeddings: embeddings,\n/// );\n/// ```\n///\n/// Pinecone indexes store records with vector data. Each record in a Pinecone\n/// index always contains a unique ID and an array of floats representing a\n/// dense vector embedding. It can also contain a sparse vector embedding for\n/// hybrid search and metadata key-value pairs for filtered queries.\n///\n/// When you add documents to the index using this class, the document's page\n/// content will be stored in the index's metadata. You can configure the\n/// metadata key in the [docPageContentKey] parameter.\n///\n/// Mind that Pinecone supports 40kb of metadata per vector.\n///\n/// You can organize the vectors added to an index into partitions, or\n/// \"namespaces,\" to limit queries and other vector operations to only one such\n/// namespace at a time. You can configure the namespace in the [namespace]\n/// parameter.\n///\n/// ### Filtering\n///\n/// Metadata filter expressions can be included with queries to limit the\n/// search to only vectors matching the filter expression.\n///\n/// For example:\n/// ```dart\n/// final vectorStore = VectorStore(...);\n/// final res = await vectorStore.similaritySearch(\n///   query: 'What should I feed my cat?',\n///   config: PineconeSimilaritySearch(\n///     k: 5,\n///     scoreThreshold: 0.8,\n///     filter: {'class: 'cat'},\n///   ),\n/// );\n/// ```\n///\n/// Pinecone supports a wide range of operators for filtering. Check out the\n/// filtering section of the Pinecone docs for more info:\n/// https://docs.pinecone.io/docs/metadata-filtering#metadata-query-language\n/// {@endtemplate}\nclass Pinecone extends VectorStore {\n  /// {@macro pinecone}\n  Pinecone({\n    final String? apiKey,\n    final String? baseUrl,\n    final Map<String, String> headers = const {},\n    final Map<String, dynamic> queryParams = const {},\n    final http.Client? client,\n    required this.indexName,\n    this.environment = 'gcp-starter',\n    this.namespace,\n    this.docPageContentKey = 'page_content',\n    required super.embeddings,\n  }) : _client = PineconeClient(\n          apiKey: apiKey ?? '',\n          baseUrl: baseUrl,\n          headers: headers,\n          queryParams: queryParams,\n          client: client,\n        );\n\n  /// The name of the index.\n  final String indexName;\n\n  /// The cloud region for your project. See the Pinecone console > API keys.\n  final String environment;\n\n  /// The namespace of the index (optional).\n  final String? namespace;\n\n  /// The metadata key used to store the document's page content.\n  final String docPageContentKey;\n\n  /// The Pinecone client.\n  final PineconeClient _client;\n\n  /// A UUID generator.\n  late final _uuid = const Uuid();\n\n  /// The Pinecone index.\n  Index? _index;\n\n  @override\n  Future<List<String>> addVectors({\n    required final List<List<double>> vectors,\n    required final List<Document> documents,\n  }) async {\n    assert(vectors.length == documents.length);\n\n    final index = await _getIndex();\n\n    final List<String> ids = [];\n    final List<Vector> vec = [];\n\n    for (var i = 0; i < documents.length; i++) {\n      final doc = documents[i];\n      final id = doc.id ?? _uuid.v4();\n      final vector = Vector(\n        id: id,\n        values: vectors[i],\n        metadata: {\n          ...doc.metadata,\n          docPageContentKey: doc.pageContent,\n        },\n      );\n      ids.add(id);\n      vec.add(vector);\n    }\n\n    await _client.upsertVectors(\n      indexName: index.name,\n      projectId: index.projectId,\n      environment: index.environment,\n      request: UpsertRequest(\n        namespace: namespace,\n        vectors: vec,\n      ),\n    );\n    return ids;\n  }\n\n  @override\n  Future<void> delete({required final List<String> ids}) async {\n    final index = await _getIndex();\n    await _client.deleteVectors(\n      indexName: index.name,\n      projectId: index.projectId,\n      environment: index.environment,\n      request: DeleteRequest(ids: ids),\n    );\n  }\n\n  @override\n  Future<List<(Document, double)>> similaritySearchByVectorWithScores({\n    required final List<double> embedding,\n    final VectorStoreSimilaritySearch config = const PineconeSimilaritySearch(),\n  }) async {\n    final pConfig = PineconeSimilaritySearch.fromBaseConfig(config);\n    final index = await _getIndex();\n    final queryRes = await _client.queryVectors(\n      indexName: index.name,\n      projectId: index.projectId,\n      environment: index.environment,\n      request: QueryRequest(\n        namespace: pConfig.namespace ?? namespace,\n        vector: embedding,\n        sparseVector: pConfig.sparseVector?.toSparseVector(),\n        topK: pConfig.k,\n        filter: pConfig.filter,\n        includeMetadata: true,\n      ),\n    );\n\n    final matches = queryRes.matches;\n    if (matches.isEmpty) {\n      return const [];\n    }\n\n    final List<(Document, double)> results = [];\n    for (final match in matches) {\n      final score = match.score ?? 0.0;\n      if (pConfig.scoreThreshold != null && score < pConfig.scoreThreshold!) {\n        continue;\n      }\n\n      final id = match.id;\n      final metadata = match.metadata ?? <String, dynamic>{};\n      final document = Document(\n        id: id,\n        pageContent: metadata[docPageContentKey] as String? ?? '',\n        metadata: {\n          for (final entry in metadata.entries)\n            if (entry.key != docPageContentKey) entry.key: entry.value,\n        },\n      );\n      results.add((document, score));\n    }\n    return results;\n  }\n\n  Future<Index> _getIndex() async {\n    if (_index != null) {\n      return _index!;\n    }\n\n    final index = await _client.describeIndex(\n      indexName: indexName,\n      environment: environment,\n    );\n\n    _index = index;\n    return index;\n  }\n}\n"
  },
  {
    "path": "packages/langchain_pinecone/lib/src/vector_stores/types.dart",
    "content": "import 'package:langchain_core/vector_stores.dart';\n\n/// {@template pinecone_similarity_search}\n/// Pinecone similarity search config.\n///\n/// Pinecone supports a wide range of operators for filtering. Check out the\n/// filtering section of the Pinecone docs for more info:\n/// https://docs.pinecone.io/docs/metadata-filtering#metadata-query-language\n///\n/// Example:\n/// ```dart\n/// PineconeSimilaritySearch(\n///   k: 5,\n///   filter: {'style': 'style1'},\n///   scoreThreshold: 0.8,\n/// ),\n/// ```\n/// {@endtemplate}\nclass PineconeSimilaritySearch extends VectorStoreSimilaritySearch {\n  /// {@macro pinecone_similarity_search}\n  const PineconeSimilaritySearch({\n    super.k = 4,\n    super.filter,\n    super.scoreThreshold,\n    this.namespace,\n    this.sparseVector,\n  });\n\n  /// The namespace of the index (optional).\n  ///\n  /// If specified, this will override the namespace specified in the\n  /// Pinecone constructor.\n  final String? namespace;\n\n  /// Sparse vector for hybrid search.\n  final PineconeSparseVector? sparseVector;\n\n  /// Creates a [PineconeSimilaritySearch] from a [VectorStoreSimilaritySearch].\n  factory PineconeSimilaritySearch.fromBaseConfig(\n    final VectorStoreSimilaritySearch config,\n  ) {\n    if (config is PineconeSimilaritySearch) {\n      return config;\n    }\n    return PineconeSimilaritySearch(\n      k: config.k,\n      filter: config.filter,\n      scoreThreshold: config.scoreThreshold,\n    );\n  }\n}\n\n/// {@template pinecone_sparse_vector}\n/// Pinecone sparse vector.\n/// {@endtemplate}\nclass PineconeSparseVector {\n  /// {@macro pinecone_sparse_vector}\n  const PineconeSparseVector({\n    required this.indices,\n    required this.values,\n  });\n\n  /// The indices of the sparse data.\n  final List<int>? indices;\n\n  /// The corresponding values of the sparse data, which must be the same\n  /// length as the indices.\n  final List<double>? values;\n}\n"
  },
  {
    "path": "packages/langchain_pinecone/lib/src/vector_stores/vector_stores.dart",
    "content": "export 'pinecone.dart';\nexport 'types.dart';\n"
  },
  {
    "path": "packages/langchain_pinecone/pubspec.yaml",
    "content": "name: langchain_pinecone\ndescription: LangChain.dart integration module for Pinecone fully-managed vector database.\nversion: 0.1.1+2\nrepository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_pinecone\nissue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_pinecone\nhomepage: https://github.com/davidmigloz/langchain_dart\ndocumentation: https://langchaindart.dev\n\nfunding:\n  - https://github.com/sponsors/davidmigloz\n\ntopics:\n  - nlp\n  - gen-ai\n  - llms\n  - langchain\n  - vector-db\n\nenvironment:\n  sdk: \">=3.6.0 <4.0.0\"\n#resolution: workspace\n\ndependencies:\n  http: ^1.4.0\n  langchain_core: 0.3.9\n  meta: ^1.16.0\n  pinecone: ^0.7.2\n  uuid: ^4.5.1\n\ndev_dependencies:\n  test: ^1.25.15\n  langchain_openai: ^0.7.6+1\n"
  },
  {
    "path": "packages/langchain_pinecone/test/vector_stores/pinecone_test.dart",
    "content": "@TestOn('vm')\nlibrary; // Uses dart:io\n\nimport 'dart:io';\n\nimport 'package:langchain_core/documents.dart';\nimport 'package:langchain_openai/langchain_openai.dart';\nimport 'package:langchain_pinecone/langchain_pinecone.dart';\nimport 'package:test/test.dart';\n\nvoid main() {\n  group('Pinecone tests', timeout: const Timeout(Duration(minutes: 1)), () {\n    final openaiApiKey = Platform.environment['OPENAI_API_KEY']!;\n    final pineconeApiKey = Platform.environment['PINECONE_API_KEY']!;\n    final embeddings = OpenAIEmbeddings(apiKey: openaiApiKey);\n    final vectorStore = Pinecone(\n      apiKey: pineconeApiKey,\n      indexName: 'langchain-dart',\n      embeddings: embeddings,\n    );\n\n    test('Test Pinecone add new vectors', () async {\n      final res = await vectorStore.addDocuments(\n        documents: const [\n          Document(\n            id: '1',\n            pageContent: 'The cat sat on the mat',\n            metadata: {'cat': 'animal'},\n          ),\n          Document(\n            id: '2',\n            pageContent: 'The dog chased the ball.',\n            metadata: {'cat': 'animal'},\n          ),\n          Document(\n            id: '3',\n            pageContent: 'The boy ate the apple.',\n            metadata: {'cat': 'person'},\n          ),\n          Document(\n            id: '4',\n            pageContent: 'The girl drank the milk.',\n            metadata: {'cat': 'person'},\n          ),\n          Document(\n            id: '5',\n            pageContent: 'The sun is shining.',\n            metadata: {'cat': 'natural'},\n          ),\n        ],\n      );\n\n      expect(res.length, 5);\n    });\n\n    test('Test Pinecone query return 1 result', () async {\n      final res = await vectorStore.similaritySearch(\n        query: 'Is it raining?',\n        config: const PineconeSimilaritySearch(k: 1),\n      );\n      expect(res.length, 1);\n      expect(\n        res.first.id,\n        '5',\n      );\n    });\n\n    test('Test Pinecone query with scoreThreshold', () async {\n      final res = await vectorStore.similaritySearchWithScores(\n        query: 'Is it raining?',\n        config: const PineconeSimilaritySearch(scoreThreshold: 0.6),\n      );\n      for (final (_, score) in res) {\n        expect(score, greaterThan(0.6));\n      }\n    });\n\n    test('Test Pinecone query with filter', () async {\n      final res = await vectorStore.similaritySearch(\n        query: 'What are they eating?',\n        config: const PineconeSimilaritySearch(\n          k: 10,\n          filter: {'cat': 'person'},\n        ),\n      );\n      for (final doc in res) {\n        expect(doc.metadata['cat'], 'person');\n      }\n    });\n\n    test('Test Pinecone delete document', skip: true, () async {\n      await vectorStore.addDocuments(\n        documents: [\n          const Document(\n            id: 'delete',\n            pageContent: 'This document will be deleted',\n            metadata: {'cat': 'xxx'},\n          ),\n        ],\n      );\n      final res1 = await vectorStore.similaritySearch(\n        query: 'Deleted doc',\n        config: const PineconeSimilaritySearch(\n          filter: {'cat': 'xxx'},\n        ),\n      );\n      expect(res1.length, 1);\n      expect(res1.first.id, 'delete');\n\n      await vectorStore.delete(ids: ['delete']);\n      final res2 = await vectorStore.similaritySearch(\n        query: 'Deleted doc',\n        config: const PineconeSimilaritySearch(\n          filter: {'cat': 'xxx'},\n        ),\n      );\n      expect(res2.length, 0);\n    });\n  });\n}\n"
  },
  {
    "path": "packages/langchain_supabase/.gitignore",
    "content": "# https://dart.dev/guides/libraries/private-files\n# Created by `dart pub`\n.dart_tool/\n\n# Avoid committing pubspec.lock for library packages; see\n# https://dart.dev/guides/libraries/private-files#pubspeclock.\npubspec.lock\n"
  },
  {
    "path": "packages/langchain_supabase/CHANGELOG.md",
    "content": "## 0.2.0+2\n\n - Update a dependency to the latest release.\n\n## 0.2.0+1\n\n - **REFACTOR**: Fix pub format warnings ([#809](https://github.com/davidmigloz/langchain_dart/issues/809)). ([640cdefb](https://github.com/davidmigloz/langchain_dart/commit/640cdefbede9c0a0182fb6bb4005a20aa6f35635))\n\n## 0.2.0\n\n> Note: This release has breaking changes.\n\n - **FEAT**: Upgrade to http v1.5.0 ([#785](https://github.com/davidmigloz/langchain_dart/issues/785)). ([f7c87790](https://github.com/davidmigloz/langchain_dart/commit/f7c8779011015b5a4a7f3a07dca32bde1bb2ea88))\n - **BREAKING** **BUILD**: Require Dart >=3.8.0 ([#792](https://github.com/davidmigloz/langchain_dart/issues/792)). ([b887f5c6](https://github.com/davidmigloz/langchain_dart/commit/b887f5c62e307b3a510c5049e3d1fbe7b7b4f4c9))\n\n## 0.1.2+2\n\n - Update a dependency to the latest release.\n\n## 0.1.2+1\n\n - **BUILD**: Update dependencies ([#751](https://github.com/davidmigloz/langchain_dart/issues/751)). ([250a3c6](https://github.com/davidmigloz/langchain_dart/commit/250a3c6a6c1815703a61a142ba839c0392a31015))\n\n## 0.1.2\n\n - **FEAT**: Update dependencies (requires Dart 3.6.0) ([#709](https://github.com/davidmigloz/langchain_dart/issues/709)). ([9e3467f7](https://github.com/davidmigloz/langchain_dart/commit/9e3467f7caabe051a43c0eb3c1110bc4a9b77b81))\n - **REFACTOR**: Remove fetch_client dependency in favor of http v1.3.0 ([#659](https://github.com/davidmigloz/langchain_dart/issues/659)). ([0e0a685c](https://github.com/davidmigloz/langchain_dart/commit/0e0a685c376895425dbddb0f9b83758c700bb0c7))\n - **FIX**: Fix linter issues ([#656](https://github.com/davidmigloz/langchain_dart/issues/656)). ([88a79c65](https://github.com/davidmigloz/langchain_dart/commit/88a79c65aad23bcf5859e58a7375a4b686cf02ef))\n\n## 0.1.1+4\n\n - Update a dependency to the latest release.\n\n## 0.1.1+3\n\n - Update a dependency to the latest release.\n\n## 0.1.1+2\n\n - Update a dependency to the latest release.\n\n## 0.1.1+1\n\n - **REFACTOR**: Depend on exact versions for internal 1st party dependencies ([#484](https://github.com/davidmigloz/langchain_dart/issues/484)). ([244e5e8f](https://github.com/davidmigloz/langchain_dart/commit/244e5e8f30e0d9a642fe01a804cc0de5e807e13d))\n\n## 0.1.1\n\n - Update a dependency to the latest release.\n\n## 0.1.0+5\n\n - Update a dependency to the latest release.\n\n## 0.1.0+4\n\n - Update a dependency to the latest release.\n\n## 0.1.0+3\n\n - Update a dependency to the latest release.\n\n## 0.1.0+2\n\n - Update a dependency to the latest release.\n\n## 0.1.0+1\n\n - Update a dependency to the latest release.\n\n## 0.1.0\n\n> Note: This release has breaking changes.  \n> [Migration guide](https://github.com/davidmigloz/langchain_dart/discussions/374)\n\n - **BREAKING** **REFACTOR**: Introduce langchain_core and langchain_community packages ([#328](https://github.com/davidmigloz/langchain_dart/issues/328)). ([5fa520e6](https://github.com/davidmigloz/langchain_dart/commit/5fa520e663602d9cdfcab0c62a053090fa02b02e))\n\n## 0.0.1+1\n\n - **DOCS**: Update pubspecs. ([d23ed89a](https://github.com/davidmigloz/langchain_dart/commit/d23ed89adf95a34a78024e2f621dc0af07292f44))\n\n## 0.0.1\n\n - **FEAT**: Add support for Supabase VectorStore ([#69](https://github.com/davidmigloz/langchain_dart/issues/69)). ([be9e72bc](https://github.com/davidmigloz/langchain_dart/commit/be9e72bc210232e403f548a95a305d5bb6254f49))\n\n## 0.0.1-dev.1\n\n- Bootstrap project.\n"
  },
  {
    "path": "packages/langchain_supabase/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2023 David Miguel Lozano\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "packages/langchain_supabase/README.md",
    "content": "# 🦜️🔗 LangChain.dart / Supabase\n\n[![tests](https://img.shields.io/github/actions/workflow/status/davidmigloz/langchain_dart/test.yaml?logo=github&label=tests)](https://github.com/davidmigloz/langchain_dart/actions/workflows/test.yaml)\n[![docs](https://img.shields.io/github/actions/workflow/status/davidmigloz/langchain_dart/pages%2Fpages-build-deployment?logo=github&label=docs)](https://github.com/davidmigloz/langchain_dart/actions/workflows/pages/pages-build-deployment)\n[![langchain_supabase](https://img.shields.io/pub/v/langchain_supabase.svg)](https://pub.dev/packages/langchain_supabase)\n![Discord](https://img.shields.io/discord/1123158322812555295?label=discord)\n[![MIT](https://img.shields.io/badge/license-MIT-purple.svg)](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE)\n\nSupabase module for [LangChain.dart](https://github.com/davidmigloz/langchain_dart).\n\n## Features\n\n- Vector stores:\n    * `Supabase` vector store that uses [Supabase Vector](https://supabase.com/vector)\n    Postgres Vector database.\n\n## License\n\nLangChain.dart is licensed under the\n[MIT License](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE).\n"
  },
  {
    "path": "packages/langchain_supabase/example/langchain_supabase_example.dart",
    "content": "void main() {\n  // TODO\n}\n"
  },
  {
    "path": "packages/langchain_supabase/lib/langchain_supabase.dart",
    "content": "/// LangChain.dart integration module for Supabase (e.g. Supabase Vector).\nlibrary;\n\nexport 'src/vector_stores/vector_stores.dart';\n"
  },
  {
    "path": "packages/langchain_supabase/lib/src/vector_stores/supabase.dart",
    "content": "import 'package:http/http.dart' as http;\nimport 'package:langchain_core/documents.dart';\nimport 'package:langchain_core/vector_stores.dart';\nimport 'package:supabase/supabase.dart';\n\n/// {@template supabase}\n/// Vector store for [Supabase Vector](https://supabase.com/vector)\n/// embedding database.\n///\n/// It assumes a database with the `pg_vector` extension,\n/// containing a [tableName] (default: `documents`) and\n/// a [postgresFunctionName] (default: `match_documents`)\n/// defined as follows:\n///\n/// ```sql\n///  -- Enable the \"vector\" extension\n/// create extension vector\n/// with\n///   schema extensions;\n///\n/// -- Create table to store the documents\n/// create table documents (\n///   id bigserial primary key,\n///   content text,\n///   metadata jsonb,\n///   embedding vector(1536)\n/// );\n///\n/// -- Create PostgreSQL function to query documents\n/// create or replace function match_documents (\n///   query_embedding vector(1536),\n///   match_count int,\n///   match_threshold float,\n///   filter jsonb\n/// ) returns table (\n///   id bigint,\n///   content text,\n///   metadata jsonb,\n///   similarity float\n/// )\n/// language sql stable\n/// as $$\n///   select\n///     documents.id,\n///     documents.content,\n///     documents.metadata,\n///     1 - (documents.embedding <=> query_embedding) as similarity\n/// from documents\n/// where metadata @> filter\n///   and 1 - (documents.embedding <=> query_embedding) > match_threshold\n/// order by (documents.embedding <=> query_embedding) asc\n///     limit match_count;\n/// $$;\n/// ```\n///\n/// See documentation for more details:\n/// - [LangChain.dart Supabase docs](https://langchaindart.dev/#/modules/retrieval/vector_stores/integrations/supabase)\n/// - [Supabase Vector docs](https://supabase.com/docs/guides/ai)\n/// {@endtemplate}\nclass Supabase extends VectorStore {\n  /// Creates a new [Supabase] instance.\n  ///\n  /// Main configuration options:\n  /// - [tableName] (default: `documents`): the Supabase table name.\n  /// - `supabaseUrl`: the Supabase URL. You can find it in your project's\n  ///   [API settings](https://supabase.com/dashboard/project/_/settings/api).\n  ///   E.g. `https://xyzcompany.supabase.co`.\n  /// - `supabaseKey`: the Supabase API key. You can find it in your project's\n  ///   [API settings](https://supabase.com/dashboard/project/_/settings/api).\n  ///\n  /// Advance configuration options:\n  /// - `headers`: overrides the default Supabase client headers.\n  /// - `client`: the HTTP client to use. You can set your own HTTP client if\n  ///   you need further customization (e.g. to use a Socks5 proxy).\n  Supabase({\n    this.tableName = 'documents',\n    required final String supabaseUrl,\n    required final String supabaseKey,\n    final Map<String, String> headers = const {},\n    final http.Client? client,\n    required super.embeddings,\n  }) : _client = SupabaseClient(\n         supabaseUrl,\n         supabaseKey,\n         headers: headers,\n         httpClient: client,\n       );\n\n  /// The Supabase client.\n  final SupabaseClient _client;\n\n  /// The Supabase table name.\n  final String tableName;\n\n  /// The name of the PostgreSQL function that executes the query.\n  final postgresFunctionName = 'match_documents';\n\n  @override\n  Future<List<String>> addVectors({\n    required final List<List<double>> vectors,\n    required final List<Document> documents,\n  }) async {\n    assert(vectors.length == documents.length);\n\n    final List<Map<String, dynamic>> records = [];\n    for (var i = 0; i < documents.length; i++) {\n      final doc = documents[i];\n      records.add({\n        if (doc.id != null) 'id': doc.id,\n        'metadata': doc.metadata,\n        'content': doc.pageContent,\n        'embedding': vectors[i],\n      });\n    }\n\n    final ids = await _client.from(tableName).upsert(records).select('id');\n    return ids\n        .map((final row) => row['id'])\n        .map((final id) => id.toString())\n        .toList(growable: false);\n  }\n\n  @override\n  Future<void> delete({required final List<String> ids}) {\n    return _client.from(tableName).delete().filter('id', 'in', ids);\n  }\n\n  @override\n  Future<List<(Document, double)>> similaritySearchByVectorWithScores({\n    required final List<double> embedding,\n    final VectorStoreSimilaritySearch config =\n        const VectorStoreSimilaritySearch(),\n  }) async {\n    final params = {\n      'query_embedding': embedding,\n      'match_count': config.k,\n      'match_threshold': config.scoreThreshold ?? 0.0,\n      'filter': config.filter ?? {},\n    };\n\n    final List<dynamic> result = await _client.rpc(\n      postgresFunctionName,\n      params: params,\n    );\n    return result\n        .map((final row) => row as Map<String, dynamic>)\n        .map(\n          (final row) => (\n            Document(\n              id: row['id'].toString(),\n              pageContent: row['content'] as String,\n              metadata: row['metadata'] as Map<String, dynamic>,\n            ),\n            row['similarity'] as double,\n          ),\n        )\n        .toList(growable: false);\n  }\n}\n"
  },
  {
    "path": "packages/langchain_supabase/lib/src/vector_stores/types.dart",
    "content": "import 'package:langchain_core/vector_stores.dart';\n\n/// {@template supabase_similarity_search}\n/// Supabase similarity search config.\n///\n/// Supabase supports filtering queries by metadata using\n/// the [filter] parameter: the query will return all rows from the table\n/// where the JSON metadata column contains a key-value pair\n/// where the key is \"key\" and the value is \"value\" as in [filter].\n///\n/// Example:\n/// ```dart\n/// SupabaseSimilaritySearch(\n///   k: 5,\n///   filter: {'animal: 'cat'},\n///   scoreThreshold: 0.8,\n/// ),\n/// ```\n/// {@endtemplate}\nclass SupabaseSimilaritySearch extends VectorStoreSimilaritySearch {\n  /// {@macro supabase_similarity_search}\n  const SupabaseSimilaritySearch({\n    super.k = 4,\n    super.filter,\n    super.scoreThreshold,\n  });\n}\n"
  },
  {
    "path": "packages/langchain_supabase/lib/src/vector_stores/vector_stores.dart",
    "content": "export 'supabase.dart';\nexport 'types.dart';\n"
  },
  {
    "path": "packages/langchain_supabase/pubspec.yaml",
    "content": "name: langchain_supabase\ndescription: LangChain.dart integration module for Supabase (e.g. Supabase Vector).\nversion: 0.2.0+2\nrepository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_supabase\nissue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_supabase\nhomepage: https://github.com/davidmigloz/langchain_dart\ndocumentation: https://langchaindart.dev\n\nfunding:\n  - https://github.com/sponsors/davidmigloz\n\ntopics:\n  - nlp\n  - gen-ai\n  - llms\n  - langchain\n  - vector-db\n\nenvironment:\n  sdk: \">=3.9.0 <4.0.0\"\nresolution: workspace\n\ndependencies:\n  http: ^1.5.0\n  langchain_core: 0.4.1\n  meta: ^1.16.0\n  supabase: ^2.10.0\n\ndev_dependencies:\n  test: ^1.26.2\n  langchain: ^0.8.1\n  langchain_community: 0.4.0+2\n  langchain_openai: ^0.8.1+1\n"
  },
  {
    "path": "packages/langchain_supabase/test/vector_stores/assets/example.txt",
    "content": "The answer to the question \"Who are we?\" is complex and multifaceted. It depends on the context in which the question is asked, and the perspective of the person answering.\n\nOn a general level, we are all human beings. We share the same basic biology, and we all have the same basic needs: food, water, shelter, love, and belonging. We also share the same basic capacity for love, compassion, creativity, and resilience.\n\nHowever, we are also all unique individuals. We have different experiences, different talents, and different perspectives. We are shaped by our families, our communities, our cultures, and our individual choices.\n\nSo, who are we? We are all of these things, and more. We are complex and contradictory, but we are also beautiful and resilient. We are human beings, and we are all connected.\n\nIn the context of this conversation, you and I are both users of a language model. We are both interested in learning and exploring new ideas. We are both part of a community of people who are using technology to connect with each other and to make the world a better place.\n\nUltimately, the answer to the question \"Who are we?\" is up to each individual to decide. We are all free to define ourselves in our own way.\n"
  },
  {
    "path": "packages/langchain_supabase/test/vector_stores/supabase_test.dart",
    "content": "@TestOn('vm')\nlibrary; // Uses dart:io\n\nimport 'dart:io';\n\nimport 'package:langchain/langchain.dart' show RecursiveCharacterTextSplitter;\nimport 'package:langchain_community/langchain_community.dart';\nimport 'package:langchain_core/documents.dart';\nimport 'package:langchain_core/embeddings.dart';\nimport 'package:langchain_core/vector_stores.dart';\nimport 'package:langchain_openai/langchain_openai.dart';\nimport 'package:langchain_supabase/langchain_supabase.dart';\nimport 'package:supabase/supabase.dart' as sp;\nimport 'package:test/test.dart';\n\nvoid main() {\n  final openaiApiKey = Platform.environment['OPENAI_API_KEY'];\n  final supabaseUrl = Platform.environment['SUPABASE_URL'];\n  final supabaseApiKey = Platform.environment['SUPABASE_API_KEY'];\n  late final Embeddings embeddings;\n  late final VectorStore vectorStore;\n  late final sp.SupabaseClient supabaseClient;\n\n  setUpAll(() async {\n    expect(openaiApiKey, isNotNull);\n    expect(supabaseUrl, isNotNull);\n    expect(supabaseApiKey, isNotNull);\n\n    try {\n      supabaseClient = sp.SupabaseClient(supabaseUrl!, supabaseApiKey!);\n    } catch (e) {\n      fail('Expected SupabaseClient to be initialized');\n    }\n\n    // sanity check for documents table\n    try {\n      final result = await supabaseClient\n          .from('documents')\n          .select('id,content,metadata,embedding');\n      expect(result, isNotNull);\n      expect(result.length, 0);\n    } catch (e) {\n      fail('Expected documents table to exist and be empty: $e');\n    }\n\n    // sanity check for match_documents function\n    try {\n      final result = await supabaseClient.rpc<List<dynamic>>(\n        'match_documents',\n        params: {\n          'filter': '', // no filter\n          'match_count': 2147483647, // maximum integer value\n          'query_embedding': '[${'0,' * 1535}0]', // origin in 1536 dims space\n          'match_threshold': 0.0, // minimum similarity score\n        },\n      );\n      expect(result, isNotNull);\n      expect(result.length, 0);\n    } catch (e) {\n      fail('Expected match_documents function to exist: $e');\n    }\n\n    embeddings = OpenAIEmbeddings(apiKey: openaiApiKey);\n    vectorStore = Supabase(\n      tableName: 'documents',\n      embeddings: embeddings,\n      supabaseUrl: supabaseUrl,\n      supabaseKey: supabaseApiKey,\n    );\n  });\n\n  group('Supabase tests', () {\n    test('Test Supabase add new vectors', () async {\n      final res = await vectorStore.addDocuments(\n        documents: [\n          const Document(\n            id: '1',\n            pageContent: 'The cat sat on the mat',\n            metadata: {'cat': 'animal'},\n          ),\n          const Document(\n            id: '2',\n            pageContent: 'The dog chased the ball.',\n            metadata: {'cat': 'animal'},\n          ),\n          const Document(\n            id: '3',\n            pageContent: 'The boy ate the apple.',\n            metadata: {'cat': 'person'},\n          ),\n          const Document(\n            id: '4',\n            pageContent: 'The girl drank the milk.',\n            metadata: {'cat': 'person'},\n          ),\n          const Document(\n            id: '5',\n            pageContent: 'The sun is shining.',\n            metadata: {'cat': 'natural'},\n          ),\n        ],\n      );\n\n      expect(res.length, 5);\n    });\n\n    test('Test Supabase add new vectors from file', () async {\n      const filePath = './test/vector_stores/assets/example.txt';\n      const loader = TextLoader(filePath);\n      final pages = await loader.load();\n\n      const splitter = RecursiveCharacterTextSplitter(\n        chunkOverlap: 150,\n        chunkSize: 1500,\n      );\n      final docs = splitter.splitDocuments(pages);\n\n      await vectorStore.addDocuments(documents: docs);\n\n      final res = await vectorStore.similaritySearch(\n        query: 'Who are we?',\n        config: const SupabaseSimilaritySearch(k: 1),\n      );\n      expect(res.length, 1);\n    });\n\n    test('Test Supabase query return 1 result', () async {\n      final res = await vectorStore.similaritySearch(\n        query: 'Is it raining?',\n        config: const SupabaseSimilaritySearch(k: 1),\n      );\n      expect(res.length, 1);\n      expect(res.first.id, '5');\n    });\n\n    test('Test Supabase query with scoreThreshold', () async {\n      final res = await vectorStore.similaritySearchWithScores(\n        query: 'Is it raining?',\n        config: const SupabaseSimilaritySearch(scoreThreshold: 0.3),\n      );\n      for (final (_, score) in res) {\n        expect(score, greaterThan(0.3));\n      }\n    });\n\n    test('Test Supabase query with equality filter', () async {\n      final res = await vectorStore.similaritySearch(\n        query: 'What are they eating?',\n        config: const SupabaseSimilaritySearch(\n          k: 10,\n          filter: {'cat': 'person'},\n        ),\n      );\n      for (final doc in res) {\n        expect(doc.metadata['cat'], 'person');\n      }\n    });\n\n    test('Test Supabase query with filter with operators', () async {\n      final res = await vectorStore.similaritySearch(\n        query: 'What are they eating?',\n        config: const SupabaseSimilaritySearch(\n          k: 10,\n          filter: {\n            'cat': {r'ne': 'person'},\n          },\n        ),\n      );\n      for (final doc in res) {\n        expect(doc.metadata['cat'], isNot('person'));\n      }\n    });\n\n    test('Test Supabase delete document', () async {\n      await vectorStore.addDocuments(\n        documents: [\n          const Document(\n            id: '9999',\n            pageContent: 'This document will be deleted',\n            metadata: {'cat': 'xxx'},\n          ),\n        ],\n      );\n      final res1 = await vectorStore.similaritySearch(\n        query: 'Deleted doc',\n        config: const SupabaseSimilaritySearch(filter: {'cat': 'xxx'}),\n      );\n      expect(res1.length, 1);\n      expect(res1.first.id, '9999');\n\n      await vectorStore.delete(ids: ['9999']);\n      final res2 = await vectorStore.similaritySearch(\n        query: 'Deleted doc',\n        config: const SupabaseSimilaritySearch(filter: {'cat': 'xxx'}),\n      );\n      expect(res2.length, 0);\n    });\n  });\n\n  group('SupabaseSimilaritySearch', () {\n    test('SupabaseSimilaritySearch fields', () {\n      const config = SupabaseSimilaritySearch(\n        k: 5,\n        filter: {'style': 'style1'},\n        scoreThreshold: 0.8,\n      );\n      expect(config.k, 5);\n      expect(config.filter, {'style': 'style1'});\n      expect(config.scoreThreshold, 0.8);\n    });\n  });\n\n  tearDownAll(() async {\n    late final List<Map<String, dynamic>> result;\n    try {\n      result = await supabaseClient\n          .from('documents')\n          .delete()\n          .neq('id', '0')\n          .select();\n    } catch (e) {\n      fail('Error deleting documents: $e');\n    }\n    expect(result, isNotNull);\n    expect(result.length, 6);\n    await supabaseClient.dispose();\n  });\n}\n"
  },
  {
    "path": "packages/langchain_weaviate/.gitignore",
    "content": "# https://dart.dev/guides/libraries/private-files\n# Created by `dart pub`\n.dart_tool/\n\n# Avoid committing pubspec.lock for library packages; see\n# https://dart.dev/guides/libraries/private-files#pubspeclock.\npubspec.lock\n"
  },
  {
    "path": "packages/langchain_weaviate/CHANGELOG.md",
    "content": "## 0.0.1-dev.1\n\n- Bootstrap project.\n"
  },
  {
    "path": "packages/langchain_weaviate/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2023 David Miguel Lozano\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "packages/langchain_weaviate/README.md",
    "content": "# 🦜️🔗 LangChain.dart\n\nWeaviate module for [LangChain.dart](https://github.com/davidmigloz/langchain_dart).\n\n## License\n\nLangChain.dart is licensed under the\n[MIT License](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE).\n"
  },
  {
    "path": "packages/langchain_weaviate/example/langchain_weaviate_example.dart",
    "content": "void main() {\n  // TODO\n}\n"
  },
  {
    "path": "packages/langchain_weaviate/lib/langchain_weaviate.dart",
    "content": "/// Weaviate module for LangChain.dart.\nlibrary;\n"
  },
  {
    "path": "packages/langchain_weaviate/pubspec.yaml",
    "content": "name: langchain_weaviate\ndescription: Weaviate module for LangChain.dart.\nversion: 0.0.1-dev.1\nrepository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_weaviate\nissue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_weaviate\nhomepage: https://github.com/davidmigloz/langchain_dart\ndocumentation: https://langchaindart.dev\npublish_to: none # Remove when the package is ready to be published\n\nfunding:\n  - https://github.com/sponsors/davidmigloz\n\ntopics:\n  - nlp\n  - gen-ai\n  - llms\n  - langchain\n\nenvironment:\n  sdk: \">=3.9.0 <4.0.0\"\nresolution: workspace\n"
  },
  {
    "path": "packages/langchain_wikipedia/.gitignore",
    "content": "# https://dart.dev/guides/libraries/private-files\n# Created by `dart pub`\n.dart_tool/\n\n# Avoid committing pubspec.lock for library packages; see\n# https://dart.dev/guides/libraries/private-files#pubspeclock.\npubspec.lock\n"
  },
  {
    "path": "packages/langchain_wikipedia/CHANGELOG.md",
    "content": "## 0.0.1-dev.1\n\n- Bootstrap project.\n"
  },
  {
    "path": "packages/langchain_wikipedia/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2023 David Miguel Lozano\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "packages/langchain_wikipedia/README.md",
    "content": "# 🦜️🔗 LangChain.dart\n\nWikipedia module for [LangChain.dart](https://github.com/davidmigloz/langchain_dart).\n\n## License\n\nLangChain.dart is licensed under the\n[MIT License](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE).\n"
  },
  {
    "path": "packages/langchain_wikipedia/example/langchain_wikipedia_example.dart",
    "content": "void main() {\n  // TODO\n}\n"
  },
  {
    "path": "packages/langchain_wikipedia/lib/langchain_wikipedia.dart",
    "content": "/// Wikipedia module for LangChain.dart.\nlibrary;\n"
  },
  {
    "path": "packages/langchain_wikipedia/pubspec.yaml",
    "content": "name: langchain_wikipedia\ndescription: Wikipedia module for LangChain.dart.\nversion: 0.0.1-dev.1\nrepository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_wikipedia\nissue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_wikipedia\nhomepage: https://github.com/davidmigloz/langchain_dart\ndocumentation: https://langchaindart.dev\npublish_to: none # Remove when the package is ready to be published\n\nfunding:\n  - https://github.com/sponsors/davidmigloz\n\ntopics:\n  - nlp\n  - gen-ai\n  - llms\n  - langchain\n\nenvironment:\n  sdk: \">=3.9.0 <4.0.0\"\nresolution: workspace\n"
  },
  {
    "path": "packages/langchain_wolfram/.gitignore",
    "content": "# https://dart.dev/guides/libraries/private-files\n# Created by `dart pub`\n.dart_tool/\n\n# Avoid committing pubspec.lock for library packages; see\n# https://dart.dev/guides/libraries/private-files#pubspeclock.\npubspec.lock\n"
  },
  {
    "path": "packages/langchain_wolfram/CHANGELOG.md",
    "content": "## 0.0.1-dev.1\n\n- Bootstrap project.\n"
  },
  {
    "path": "packages/langchain_wolfram/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2023 David Miguel Lozano\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "packages/langchain_wolfram/README.md",
    "content": "# 🦜️🔗 LangChain.dart\n\nWolfram Research module for [LangChain.dart](https://github.com/davidmigloz/langchain_dart).\n\n## License\n\nLangChain.dart is licensed under the\n[MIT License](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE).\n"
  },
  {
    "path": "packages/langchain_wolfram/example/langchain_wolfram_example.dart",
    "content": "void main() {\n  // TODO\n}\n"
  },
  {
    "path": "packages/langchain_wolfram/lib/langchain_wolfram.dart",
    "content": "/// Wolfram Research module for LangChain.dart.\nlibrary;\n"
  },
  {
    "path": "packages/langchain_wolfram/pubspec.yaml",
    "content": "name: langchain_wolfram\ndescription: Wolfram Research module for LangChain.dart.\nversion: 0.0.1-dev.1\nrepository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langchain_wolfram\nissue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langchain_wolfram\nhomepage: https://github.com/davidmigloz/langchain_dart\ndocumentation: https://langchaindart.dev\npublish_to: none # Remove when the package is ready to be published\n\nfunding:\n  - https://github.com/sponsors/davidmigloz\n\ntopics:\n  - nlp\n  - gen-ai\n  - llms\n  - langchain\n\nenvironment:\n  sdk: \">=3.9.0 <4.0.0\"\nresolution: workspace\n"
  },
  {
    "path": "packages/langgraph/.gitignore",
    "content": "# https://dart.dev/guides/libraries/private-files\n# Created by `dart pub`\n.dart_tool/\n\n# Avoid committing pubspec.lock for library packages; see\n# https://dart.dev/guides/libraries/private-files#pubspeclock.\npubspec.lock\n"
  },
  {
    "path": "packages/langgraph/CHANGELOG.md",
    "content": "## 0.0.1-dev.3\n\n> Note: This release has breaking changes.\n\n - **BREAKING** **BUILD**: Require Dart >=3.8.0 ([#792](https://github.com/davidmigloz/langchain_dart/issues/792)). ([b887f5c6](https://github.com/davidmigloz/langchain_dart/commit/b887f5c62e307b3a510c5049e3d1fbe7b7b4f4c9))\n\n## 0.0.1-dev.2\n\n - **FEAT**: Update dependencies (requires Dart 3.6.0) ([#709](https://github.com/davidmigloz/langchain_dart/issues/709)). ([9e3467f7](https://github.com/davidmigloz/langchain_dart/commit/9e3467f7caabe051a43c0eb3c1110bc4a9b77b81))\n\n## 0.0.1-dev.1\n\n- Bootstrap package.\n"
  },
  {
    "path": "packages/langgraph/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2023 David Miguel Lozano\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "packages/langgraph/README.md",
    "content": "# 🦜🕸️LangGraph\n\n[![tests](https://img.shields.io/github/actions/workflow/status/davidmigloz/langchain_dart/test.yaml?logo=github&label=tests)](https://github.com/davidmigloz/langchain_dart/actions/workflows/test.yaml)\n[![langgraph](https://img.shields.io/pub/v/langgraph.svg)](https://pub.dev/packages/langgraph)\n![Discord](https://img.shields.io/discord/1123158322812555295?label=discord)\n[![MIT](https://img.shields.io/badge/license-MIT-purple.svg)](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE)\n\n⚡ Building language agents as graphs ⚡\n\n## Overview\n\nTODO\n\n## License\n\nLangChain.dart is licensed under the\n[MIT License](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE).\n"
  },
  {
    "path": "packages/langgraph/example/langgraph_example.dart",
    "content": "void main() {\n  // TODO\n}\n"
  },
  {
    "path": "packages/langgraph/lib/langgraph.dart",
    "content": "/// Build resilient language agents as graphs.\nlibrary;\n"
  },
  {
    "path": "packages/langgraph/pubspec.yaml",
    "content": "name: langgraph\ndescription: Build resilient language agents as graphs.\nversion: 0.0.1-dev.3\nrepository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/langgraph\nissue_tracker: https://github.com/davidmigloz/langchain_dart/issues?q=label:p:langgraph\nhomepage: https://github.com/davidmigloz/langchain_dart\ndocumentation: https://langchaindart.dev\n\nfunding:\n  - https://github.com/sponsors/davidmigloz\n\ntopics:\n  - nlp\n  - gen-ai\n  - llms\n  - langchain\n\nenvironment:\n  sdk: \">=3.9.0 <4.0.0\"\nresolution: workspace\n"
  },
  {
    "path": "packages/mistralai_dart/README.md",
    "content": "# mistralai_dart\n\nThis package has been moved to a new repository:\n\n**New location:** https://github.com/davidmigloz/ai_clients_dart/tree/main/packages/mistralai_dart\n\nPlease update your dependencies to use the package from its new home.\n"
  },
  {
    "path": "packages/ollama_dart/README.md",
    "content": "# ollama_dart\n\nThis package has been moved to a new repository:\n\n**New location:** https://github.com/davidmigloz/ai_clients_dart/tree/main/packages/ollama_dart\n\nPlease update your dependencies to use the package from its new home.\n"
  },
  {
    "path": "packages/openai_dart/README.md",
    "content": "# openai_dart\n\nThis package has been moved to a new repository:\n\n**New location:** https://github.com/davidmigloz/ai_clients_dart/tree/main/packages/openai_dart\n\nPlease update your dependencies to use the package from its new home.\n"
  },
  {
    "path": "packages/openai_realtime_dart/README.md",
    "content": "# openai_realtime_dart\n\nThis package has been moved to a new repository:\n\n**New location:** https://github.com/davidmigloz/ai_clients_dart/tree/main/packages/openai_realtime_dart\n\nPlease update your dependencies to use the package from its new home.\n"
  },
  {
    "path": "packages/tavily_dart/README.md",
    "content": "# tavily_dart\n\nThis package has been moved to a new repository:\n\n**New location:** https://github.com/davidmigloz/ai_clients_dart/tree/main/packages/tavily_dart\n\nPlease update your dependencies to use the package from its new home.\n"
  },
  {
    "path": "packages/vertex_ai/README.md",
    "content": "# vertex_ai\n\nThis package has been moved to a new repository:\n\n**New location:** https://github.com/davidmigloz/ai_clients_dart/tree/main/packages/vertex_ai\n\nPlease update your dependencies to use the package from its new home.\n"
  },
  {
    "path": "pubspec.yaml",
    "content": "name: langchain_workspace\npublish_to: none\nenvironment:\n  sdk: ^3.9.0\nworkspace:\n  - examples/browser_summarizer\n  - examples/docs_examples\n  - examples/hello_world_backend\n  - examples/hello_world_cli\n  - examples/hello_world_flutter\n  - examples/vertex_ai_matching_engine_setup\n  - examples/wikivoyage_eu\n  - packages/langchain\n  - packages/langchain_amazon\n  - packages/langchain_anthropic\n  - packages/langchain_chroma\n  - packages/langchain_cohere\n  - packages/langchain_community\n  - packages/langchain_core\n  - packages/langchain_firebase\n  - packages/langchain_google\n  - packages/langchain_huggingface\n  - packages/langchain_microsoft\n  - packages/langchain_mistralai\n  - packages/langchain_ollama\n  - packages/langchain_openai\n#  - packages/langchain_pinecone\n  - packages/langchain_supabase\n  - packages/langchain_weaviate\n  - packages/langchain_wikipedia\n  - packages/langchain_wolfram\n  - packages/langgraph\n  - packages/langchain_firebase/example\n\ndev_dependencies:\n  melos: ^7.3.0\n\nmelos:\n  repository: https://github.com/davidmigloz/langchain_dart\n\n  command:\n    version:\n      linkToCommits: true\n      workspaceChangelog: true\n      releaseUrl: true\n      branch: main\n      changelogs:\n        - path: CHANGELOG.md\n          description: \"📣 Check out the [releases page](https://github.com/davidmigloz/langchain_dart/releases) or the [#announcements](https://discord.com/channels/1123158322812555295/1123250594644242534) channel on the [LangChain.dart Discord](https://discord.gg/x4qbhqecVR) server for more details.\"\n          packageFilters:\n            no-private: true\n    bootstrap:\n      usePubspecOverrides: true\n      environment:\n        sdk: \">=3.9.0 <4.0.0\"\n        flutter: \">=3.27.0\"\n      dependencies:\n        async: ^2.13.0\n        beautiful_soup_dart: ^0.3.0\n        characters: ^1.4.0\n        chrome_extension: ^0.4.0\n        collection: ^1.19.1\n        cross_file: ^0.3.4+2\n        crypto: ^3.0.6\n        csv: ^7.1.0\n        anthropic_sdk_dart: ^1.3.0\n        equatable: ^2.0.7\n        firebase_ai: ^3.4.0\n        firebase_app_check: ^0.4.1+1\n        firebase_auth: ^6.1.1\n        firebase_core: ^4.2.0\n        flat_buffers: ^25.9.23\n        flutter_bloc: ^9.1.1\n        flutter_markdown: ^0.7.7 # Package will be discontinued. Migrate to flutter_markdown_plus\n        freezed_annotation: ^3.1.0\n        gcloud: ^0.9.0\n        glob: ^2.1.3\n        googleai_dart: ^3.5.0\n        googleapis: ^15.0.0\n        googleapis_auth: ^2.0.0\n        http: ^1.5.0\n        json_annotation: ^4.9.0\n        json_path: ^0.9.0\n        langchain_tiktoken: ^1.0.1\n        logging: ^1.3.0\n        math_expressions: ^3.0.0\n        meta: ^1.16.0\n        mistralai_dart: ^1.3.0\n        objectbox: ^5.1.0\n        ollama_dart: ^1.4.0\n        openai_dart: ^1.4.0\n        path: ^1.9.1\n        pinecone: ^0.7.2\n        rxdart: \">=0.27.7 <0.29.0\"\n        shared_preferences: ^2.5.3\n        shelf: ^1.4.2\n        shelf_router: ^1.1.4\n        supabase: ^2.10.0\n        uuid: ^4.5.1\n        web_socket_channel: ^3.0.2\n      dev_dependencies:\n        build_runner: ^2.5.4\n        coverage: ^1.15.0\n        freezed: ^3.2.3\n        json_serializable: ^6.11.1\n        objectbox_generator: ^5.1.0\n        openapi_spec:\n          git:\n            url: https://github.com/davidmigloz/openapi_spec.git\n            ref: 4fbc3c3841faf90c6799cb6d871a4f3b0ef82624\n        mocktail: ^1.0.4\n        test: ^1.26.2\n\n  scripts:\n    lint:\n      description: Run all static analysis checks\n      run: melos run format && melos run analyze\n\n    lint:diff:\n      description: Run all static analysis checks failing fast\n      run: melos run format:diff && melos run analyze:diff\n\n    format:\n      description: Format Dart files\n      run: melos exec -- \"dart fix --apply\"\n\n    format:diff:\n      description: Format Flutter files\n      run: melos exec --fail-fast -- \"dart format --set-exit-if-changed .\"\n      packageFilters:\n        diff: origin/main...HEAD\n\n    analyze:\n      description: Run Flutter static analyzer\n      run: melos exec -- \"flutter analyze .\"\n\n    analyze:diff:\n      description: Run Flutter static analyzer\n      run: melos exec -- \"flutter analyze .\"\n      packageFilters:\n        diff: origin/main...HEAD\n\n    test:\n      run: melos run test:dart --no-select && melos run test:flutter --no-select\n      description: Run all Dart & Flutter tests in this project.\n\n    test:dart:\n      run: melos exec -c 1 --fail-fast -- \"dart test test\"\n      description: Run Dart tests for a specific package in this project.\n      packageFilters:\n        flutter: false\n        dirExists: test\n\n    test:flutter:\n      run: melos exec -c 1 --fail-fast -- \"flutter test test\"\n      description: Run Flutter tests for a specific package in this project.\n      packageFilters:\n        flutter: true\n        dirExists: test\n\n    test:diff:\n      exec: dart test test\n      description: Run all Dart tests for changed packages in this project.\n      packageFilters:\n        diff: origin/main...HEAD\n        flutter: false\n        dirExists: test\n\n    dep-outdated:\n      description: Checks which dependencies have newer versions available.\n      run: melos exec -c 1 -- \"flutter pub outdated\"\n\n    dep-upgrade:\n      description: Automatically upgrades package dependencies to the latest versions.\n      run: melos exec -c 1 -- \"flutter pub upgrade --major-versions\"\n\n    fix:\n      description: Run Dart/Flutter automated fixes\n      run: melos exec -c 1 -- \"dart fix --apply\"\n\n    codegen:\n      description: Run code generation using build_runner\n      run: melos exec -c 1 -- \"flutter pub run build_runner build --delete-conflicting-outputs && dart format --fix .\"\n      packageFilters:\n        dependsOn: \"build_runner\"\n"
  }
]