Full Code of dzhng/zod-gpt for AI

main 9be7db3375f6 cached
22 files
38.6 KB
10.3k tokens
22 symbols
1 requests
Download .txt
Repository: dzhng/zod-gpt
Branch: main
Commit: 9be7db3375f6
Files: 22
Total size: 38.6 KB

Directory structure:
gitextract_3ud4kvde/

├── .editorconfig
├── .eslintrc.json
├── .gitattributes
├── .github/
│   └── workflows/
│       ├── publish.yml
│       └── test.yml
├── .gitignore
├── .husky/
│   └── pre-commit
├── .prettierrc
├── LICENSE
├── README.md
├── jest.config.js
├── package.json
├── playground.ts
├── src/
│   ├── __snapshots__/
│   │   └── text-splitter.test.ts.snap
│   ├── completion.ts
│   ├── config.ts
│   ├── index.ts
│   ├── text-splitter.test.ts
│   ├── text-splitter.ts
│   ├── types.ts
│   └── utils.ts
└── tsconfig.json

================================================
FILE CONTENTS
================================================

================================================
FILE: .editorconfig
================================================
root = true

[*]
end_of_line = lf
insert_final_newline = true

[*.{js,json,yml}]
charset = utf-8
indent_style = space
indent_size = 2


================================================
FILE: .eslintrc.json
================================================
{
  "root": true,
  "plugins": ["prettier", "@typescript-eslint"],
  "extends": [
    "universe/node",
    "plugin:prettier/recommended",
    "plugin:import/recommended",
    "plugin:import/typescript",
    "plugin:@typescript-eslint/recommended"
  ],
  "parser": "@typescript-eslint/parser",
  "parserOptions": {
    "ecmaVersion": 2020,
    "sourceType": "module"
  },
  "settings": {
    "import/extensions": [".ts", ".tsx", ".js", ".jsx"],
    "import/resolver": {
      "typescript": {
        "project": "tsconfig.json"
      }
    }
  },
  "rules": {
    "@typescript-eslint/no-explicit-any": "off",
    "@typescript-eslint/ban-ts-comment": "off",
    "@typescript-eslint/no-empty-interface": "off",
    "no-shadow": "off",
    "no-console": ["error", { "allow": ["warn", "error", "info"] }],
    "react/react-in-jsx-scope": "off",
    "react/jsx-props-no-spreading": "off",
    "jsx-a11y/anchor-is-valid": "off",
    "jsx-a11y/alt-text": "off",
    "jsx-a11y/click-events-have-key-events": "off",
    "jsx-a11y/no-static-element-interactions": "off",
    "jsx-a11y/interactive-supports-focus": "off",
    "react/require-default-props": "off",
    "no-param-reassign": "off",
    "import/no-anonymous-default-export": "off",
    "import/no-named-as-default": "off",
    "import/no-named-as-default-member": "off",
    "import/order": [
      "error",
      {
        "newlines-between": "always",
        "groups": [
          ["builtin", "external"],
          "internal",
          "parent",
          "sibling",
          "index"
        ],
        "pathGroups": [
          {
            "pattern": "~/**",
            "group": "parent",
            "position": "before"
          }
        ],
        "pathGroupsExcludedImportTypes": ["react"],
        "alphabetize": { "order": "asc", "caseInsensitive": true }
      }
    ]
  }
}


================================================
FILE: .gitattributes
================================================
/.yarn/**            linguist-vendored
/.yarn/releases/*    binary
/.yarn/plugins/**/*  binary
/.pnp.*              binary linguist-generated


================================================
FILE: .github/workflows/publish.yml
================================================
name: Release & Publish

on:
  push:
    tags:
      - 'v*'

jobs:
  release:
    runs-on: ubuntu-latest
    steps:
      - name: Checkout code
        uses: actions/checkout@v2

  publish:
    runs-on: ubuntu-latest
    steps:
      - uses: actions/checkout@v2
      - name: Setup .npmrc file to publish to npm
        uses: actions/setup-node@v2
        with:
          node-version: '18.x'
          registry-url: 'https://registry.npmjs.org'
      - name: Setup pnpm
        uses: pnpm/action-setup@v3 # docs https://pnpm.io/continuous-integration#github-actions
        with:
          version: 8 # Optional: specify a pnpm version
      - name: Install modules
        run: pnpm install
      - name: Build
        run: pnpm build
      - name: Publish to npm
        run: npm publish --access public
        env:
          NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}


================================================
FILE: .github/workflows/test.yml
================================================
name: Test

on:
  push:
    branches: [main, staging]
  pull_request:
    branches: [main, staging]

jobs:
  test:
    runs-on: ${{ matrix.os }}
    strategy:
      matrix:
        os:
          - ubuntu-latest
          - windows-latest
        node: [16.x, 18.x]

    steps:
      - uses: actions/checkout@v2
      - name: Set up Node.js
        uses: actions/setup-node@v2
        with:
          node-version: ${{ matrix.node }}
      - name: Setup pnpm
        uses: pnpm/action-setup@v3 # docs https://pnpm.io/continuous-integration#github-actions
        with:
          version: 8 # Optional: specify a pnpm version
      - name: Install modules
        run: pnpm install
      - name: Run tests
        run: pnpm test


================================================
FILE: .gitignore
================================================
.yarn/*
!.yarn/patches
!.yarn/plugins
!.yarn/releases
!.yarn/sdks
!.yarn/versions


# Swap the comments on the following lines if you don't wish to use zero-installs
# Documentation here: https://yarnpkg.com/features/zero-installs
#!.yarn/cache
#.pnp.*

node_modules/
dist/
npm-debug.*
.DS_Store


================================================
FILE: .husky/pre-commit
================================================
#!/bin/sh
. "$(dirname "$0")/_/husky.sh"

npx lint-staged


================================================
FILE: .prettierrc
================================================
{
  "printWidth": 80,
  "tabWidth": 2,
  "trailingComma": "all",
  "semi": true,
  "singleQuote": true
}


================================================
FILE: LICENSE
================================================
MIT License

Copyright (c) 2023 David Zhang

Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.


================================================
FILE: README.md
================================================
# ✨ ZodGPT

[![test](https://github.com/dzhng/zod-gpt/actions/workflows/test.yml/badge.svg?branch=main&event=push)](https://github.com/dzhng/zod-gpt/actions/workflows/test.yml)

Get structured, fully typed, and validated JSON outputs from OpenAI and Anthropic models.

Under the hood, `zod-gpt` uses functions to coerce the model to always respond as function calls. Add self-reflection for reliability and zod for parsing & typing.

- [Introduction](#-introduction)
- [Usage](#-usage)
  - [Install](#install)
  - [Request](#request)
  - [Auto Healing](#-auto-healing)
  - [Text Slicing](#-text-slicing)
- [Debugging](#-debugging)
- [API Reference](#-api-reference)

## 👋 Introduction

ZodGPT is a library for

- Receiving structured outputs from models with complete type safety. All responses are fully validated & typed, works with [zod](https://github.com/colinhacks/zod) as a peer dep.
- Schema definition, serialization / parsing, and **automatically asking the model to correct outputs**.
- Handle rate limit and any other API errors as gracefully as possible (e.g. exponential backoff for rate-limit) via [llm-api](https://github.com/dzhng/llm-api).

With `zod-gpt`, you can simply query OpenAI's ChatGPT model like so:

```typescript
import { OpenAIChatApi } from 'llm-api';
import { completion } from 'zod-gpt';

const openai = new OpenAIChatApi({ apiKey: 'YOUR_OPENAI_KEY' });

const response = await completion(openai, 'Generate a startup idea', {
  schema: z.object({
    name: z.string().describe('The name of the startup'),
    description: z.string().describe('What does this startup do?'),
  }),
});

// data will be typed as { name: string; description: string }
console.log(response.data);
```

Anthropic is also supported via `llm-api`:

```typescript
import { AnthropicChatApi } from 'llm-api';
import { completion } from 'zod-gpt';

const client = new AnthropicChatApi({ apiKey: 'YOUR_ANTHROPIC_KEY' });
const response = await completion(client, ...);
```

## 🔨 Usage

### Install

This package is hosted on npm:

```
npm i zod-gpt
```

```
yarn add zod-gpt
```

To setup in your codebase, initialize a new instance with the model you want via the `llm-api` peer dep. Note that `zod-gpt` is designed to work with any models that implements the `CompletionApi` interface, so you can also import your own API wrapper.

```typescript
import { OpenAIChatApi } from 'llm-api';

const openai = new OpenAIChatApi(
  { apiKey: 'YOUR_OPENAI_KEY' },
  { model: 'gpt-4-0613' },
);
```

### Request

To send a standard completion request with a given model, simply call the `completion` method.

```typescript
const response = await completion(openai, 'hello');

// data will be typed as string
console.log(response.data);
```

To add schema parsing and typing, simply add a `schema` key in the options argument. **Make sure to add a description to each key via the `describe` method.** The descriptions will be fed into the model to ensure that it understand exactly what data is requested for each key. Try to error on the side of being over descriptive to ensure the model understands exactly.

```typescript
const response = await completion(
  openai,
  'Generate a step by step plan on how to run a hackathon',
  {
    schema: z.object({
      plan: z.array(
        z.object({
          reason: z.string().describe('Name the reasoning for this step'),
          id: z.string().describe('Unique step id'),
          task: z
            .string()
            .describe('What is the task to be done for this step?'),
        }),
      ),
    }),
  },
);

// data will be typed as { plan: { reason: string; id: string; task: string }[] }
console.info('Response:', response.data);
```

NOTE: the `schema` key ONLY takes object type schemas - this is a limitation of the `functions` API. If you need to generate arrays or other type of reponses, simply wrap them in an object like the above example.

### 🧑‍⚕️ Auto Healing

By default, `zod-gpt` has logic to automatically detect and heal any schema errors via self-reflection (e.g. if the function api is not being used correctly, if the schema has parse errors.. etc). This means whenever these types of errors happen, `zod-gpt` will send a new message to re-ask the model to correct its own output, together with any error messages it gathered from parsing.

The logic is simple but incredabily powerful, and adds a layer of reliability to model outputs. I suggest leaving this flag set to true (its default setting), unless if token usage or response time becomes a real issue.

### 📃 Text Slicing

A common way to handle token limit issues is to split your content. `zod-gpt` provides an `autoSlice` option to automatically split your text when a token overflow error from `llm-api` is detected. It's smart enough to only split your text if it determines that it is above the token limit, and will try to preserve as much of the original text as possible.

```typescript
const openai = new OpenAIChatApi(
  { apiKey: 'YOUR_OPENAI_KEY' },
  // make sure `contextSize` is set to enable throwing TokenErrors
  { model: 'gpt-4-0613', contextSize: 8129 },
);

const response = await completion(
  openai,
  'hello world, testing overflow logic',
  { autoSlice: true },
);
```

## 🤓 Debugging

`zod-gpt` uses the `debug` module for logging & error messages. To run in debug mode, set the `DEBUG` env variable:

`DEBUG=zod-gpt:* yarn playground`

You can also specify different logging types via:

`DEBUG=zod-gpt:error yarn playground`
`DEBUG=zod-gpt:log yarn playground`

## ✅ API Reference

### LLM Provider Support

`zod-gpt` currently users the [llm-api](https://github.com/dzhng/llm-api) library to support multiple LLM providers. Check the `llm-api` documentation on how to configure model parameters.

#### Completion

To send a completion request to a model:

```typescript
const res: Response = await completion(model, prompt, options: RequestOptions);
```

**options**
You can override the default request options via this parameter. The `RequestOptions` object extends the request options defined in `llm-api`.

```typescript
type RequestOptions = {
  // set a zod schema to enable JSON output
  schema?: T;

  // set to enable automatically slicing the prompt on token overflow. prompt will be sliced starting from the last character
  // default: false
  autoSlice?: boolean;

  // attempt to auto heal the output via reflection
  // default: true
  autoHeal?: boolean;

  // set message history, useful if you want to continue an existing conversation
  messageHistory?: ChatRequestMessage[];

  // the number of time to retry this request due to rate limit or recoverable API errors
  // default: 3
  retries?: number;
  // default: 30s
  retryInterval?: number;
  // default: 60s
  timeout?: number;

  // the minimum amount of tokens to allocate for the response. if the request is predicted to not have enough tokens, it will automatically throw a 'TokenError' without sending the request
  // default: 200
  minimumResponseTokens?: number;
};
```

#### Response

Completion responses extends the model responses from `llm-api`, specifically adding a `data` field for the pased JSON that's automatically typed according to the input `zod` schema.

```typescript
interface Response<T extends z.ZodType> {
  // parsed and typecasted data from the model
  data: z.infer<T>;

  // raw response from the completion API
  content?: string;
  name?: string;
  arguments?: JsonValue;
  usage?: {
    promptTokens: number;
    completionTokens: number;
    totalTokens: number;
  };
}
```

### Misc

#### Text Splitting

If you need to split long text into multiple chunks before calling the llm, few text splitters are also exported in `text-spitter.ts`. Try to default to `RecursiveTextSplitter` unless if there is a specific reason to use the other text splitters, as it is the most widely used text splitter.


================================================
FILE: jest.config.js
================================================
module.exports = {
  preset: 'ts-jest',
  testEnvironment: 'node',
  clearMocks: true,
  roots: ['<rootDir>/src'],
  modulePaths: ['<rootDir>/src'],
  testRegex: '(/__tests__/.*|(\\.|/)(test))\\.tsx?$',
  moduleFileExtensions: ['ts', 'tsx', 'js', 'jsx', 'json', 'node'],
  reporters: ['default'],
  globals: {
    // we must specify a custom tsconfig for tests because we need the typescript transform
    // to transform jsx into js rather than leaving it jsx such as the next build requires.  you
    // can see this setting in tsconfig.jest.json -> "jsx": "react"
    'ts-jest': {
      tsconfig: 'tsconfig.json',

      // set isolatedModules to fix jest memory leak with ts include directories
      // https://github.com/kulshekhar/ts-jest/issues/1967
      isolatedModules: true,
    },

    // disable types from preventing tests from running
    // https://github.com/kulshekhar/ts-jest/issues/822
    diagnostics: {
      exclude: ['!**/*.(spec|test).ts?(x)'],
    },
  },
};


================================================
FILE: package.json
================================================
{
  "name": "zod-gpt",
  "description": "Get structured, fully typed JSON outputs from OpenAI and Anthropic LLMs",
  "version": "0.16.0",
  "main": "dist/src/index.js",
  "types": "dist/src/index.d.ts",
  "publishConfig": {
    "access": "public"
  },
  "files": [
    "dist"
  ],
  "keywords": [
    "typescript",
    "zod",
    "gpt",
    "chatgpt",
    "llama",
    "llm",
    "ai",
    "ml",
    "prompt",
    "prompt engineering",
    "openai"
  ],
  "author": "David Zhang <david@aomni.com>",
  "license": "MIT",
  "homepage": "https://github.com/dzhng/zod-gpt",
  "repository": {
    "type": "git",
    "url": "git+ssh://git@github.com/dzhng/zod-gpt.git"
  },
  "bugs": {
    "url": "https://github.com/dzhng/zod-gpt/issues"
  },
  "scripts": {
    "setup": "husky install",
    "build": "tsc --build --pretty",
    "lint": "eslint src --ext ts,tsx,js,jsx --ignore-path .gitignore --fix",
    "test": "jest --passWithNoTests",
    "test:update": "jest -u --passWithNoTests",
    "playground": "tsx playground"
  },
  "dependencies": {
    "debug": "^4.3.4",
    "jsonic": "^1.0.1",
    "jsonrepair": "^3.4.0",
    "lodash": "^4.17.21",
    "tsx": "^4.7.1",
    "type-fest": "^4.6.0",
    "zod-to-json-schema": "^3.21.4"
  },
  "peerDependencies": {
    "llm-api": "^1.6.0",
    "zod": "^3.22.4"
  },
  "devDependencies": {
    "@types/debug": "^4.1.10",
    "@types/jest": "^29.5.7",
    "@types/jsonic": "^0.3.2",
    "@types/lodash": "^4.14.200",
    "eslint": "^8.36.0",
    "eslint-config-prettier": "^8.5.0",
    "eslint-config-universe": "^11.1.1",
    "eslint-import-resolver-typescript": "^3.3.0",
    "eslint-plugin-import": "^2.26.0",
    "eslint-plugin-prettier": "^4.2.1",
    "husky": "^8.0.2",
    "jest": "^29.7.0",
    "lint-staged": "^13.2.0",
    "llm-api": "^1.6.0",
    "prettier": "^2.8.0",
    "ts-jest": "^29.1.1",
    "typescript": "^5.2.2",
    "zod": "^3.22.4"
  },
  "lint-staged": {
    "*.{js,jsx,ts,tsx}": [
      "eslint --ext ts,tsx,js,jsx --fix --ignore-path .gitignore ",
      "prettier --write"
    ],
    "*.{json,md,css,scss}": [
      "prettier --write"
    ]
  }
}


================================================
FILE: playground.ts
================================================
import {
  AnthropicChatApi,
  OpenAIChatApi,
  AnthropicBedrockChatApi,
  GroqChatApi,
} from 'llm-api';
import { z } from 'zod';

import { completion } from './src';

(async function go() {
  let client:
    | OpenAIChatApi
    | AnthropicChatApi
    | AnthropicBedrockChatApi
    | GroqChatApi
    | undefined;

  if (process.env.OPENAI_KEY) {
    client = new OpenAIChatApi(
      {
        apiKey: process.env.OPENAI_KEY ?? 'YOUR_client_KEY',
      },
      { contextSize: 4096 },
    );
  } else if (process.env.ANTHROPIC_KEY) {
    client = new AnthropicChatApi(
      {
        apiKey: process.env.ANTHROPIC_KEY ?? 'YOUR_client_KEY',
      },
      { stream: true, temperature: 0 },
    );
  } else if (
    process.env.AWS_BEDROCK_ACCESS_KEY &&
    process.env.AWS_BEDROCK_SECRET_KEY
  ) {
    client = new AnthropicBedrockChatApi(
      {
        accessKeyId: process.env.AWS_BEDROCK_ACCESS_KEY ?? 'YOUR_access_key',
        secretAccessKey:
          process.env.AWS_BEDROCK_SECRET_KEY ?? 'YOUR_secret_key',
      },
      { stream: true, temperature: 0, model: 'anthropic.claude-v2' },
    );
  } else if (process.env.GROQ_KEY) {
    client = new GroqChatApi(
      {
        apiKey: process.env.GROQ_KEY ?? 'YOUR_client_KEY',
      },
      { stream: false, temperature: 0 },
    );
  }
  if (!client) {
    throw new Error(
      'Please pass in either an OpenAI or Anthropic environment variable',
    );
  }

  /*const resSlice = await completion(
    client,
    'Just say hello and ignore the rest of this message\n' +
      Array(500_000).fill('1'),
    { autoSlice: true },
  );
  console.info('Response slice: ', resSlice.data);*/

  const resStartup = await completion(client, 'Generate a startup idea', {
    schema: z.object({
      name: z.string().describe('The name of the startup'),
      description: z.string().describe('What does this startup do?'),
    }),
  });
  console.info('Response 1: ', resStartup.data);

  const resHello = await completion(client, 'Hello');
  console.info('Response 2:', resHello.data);

  const resComplexSchema = await completion(
    client,
    'Generate a step by step plan to run a hackathon',
    {
      schema: z.object({
        plan: z.array(
          z.object({
            reason: z.string().describe('Name the reasoning for this step'),
            name: z.string().describe('Step name'),
            task: z
              .string()
              .describe('What is the task to be done for this step?')
              .optional(),
          }),
        ),
      }),
    },
  );
  console.info('Response 3:', resComplexSchema.data);

  const resBulletPoints = await completion(
    client,
    'Generate a list of interesting areas of exploration about the renaissance',
    {
      schema: z.object({
        topics: z
          .array(
            z.object({
              title: z.string().describe('Title of the idea'),
              reason: z.string().describe('Why you choose this idea'),
              peopleInvolved: z
                .string()
                .describe(
                  "If there any known figures that's related to this idea",
                )
                .optional(),
            }),
          )
          .min(10)
          .max(20),
      }),
    },
  );
  console.info('Response 4:', resBulletPoints.data);

  const resBuletPoints2 = await resBulletPoints.respond('Generate 10 more');
  console.info('Response 4R:', resBuletPoints2.data);

  const resMessageHistory = await completion(
    client,
    'What did I mention in my first message to you?',
    {
      messageHistory: [
        { role: 'user', content: 'Tell me about large langauge models' },
        { role: 'assistant', content: 'ok' },
      ],
    },
  );
  console.info('Response 5:', resMessageHistory.data);

  const meaning = await completion(client, 'What is the meaning of life?')
    .then((res) => res.respond('why'))
    .then((res) => res.respond('why'))
    .then((res) => res.respond('why'))
    .then((res) => res.respond('why'))
    .then((res) => res.respond('why'));

  console.info('The meaning of life after 5 whys is: ', meaning.content);
})();


================================================
FILE: src/__snapshots__/text-splitter.test.ts.snap
================================================
// Jest Snapshot v1, https://goo.gl/fbAQLP

exports[`RecursiveCharacterTextSplitter Should correctly spilt text by seperators 1`] = `
[
  "Hello world",
  "this is a test of the recursive text splitter",
]
`;

exports[`RecursiveCharacterTextSplitter Should correctly spilt text by seperators 2`] = `
[
  "Hello world, this is a test of the recursive text splitter",
  "If I have a period, it should split along the period.",
]
`;

exports[`RecursiveCharacterTextSplitter Should correctly spilt text by seperators 3`] = `
[
  "Hello world, this is a test of the recursive text splitter",
  "If I have a period, it should split along the period.",
  "Or, if there is a new line, it should prioritize splitting on new lines instead.",
]
`;

exports[`RecursiveCharacterTextSplitter Should correctly spilt text by seperators 4`] = `
[
  58,
  53,
  80,
]
`;


================================================
FILE: src/completion.ts
================================================
import {
  TokenError,
  CompletionApi,
  AnthropicChatApi,
  ChatRequestMessage,
  AnthropicBedrockChatApi,
  GroqChatApi,
} from 'llm-api';
import { defaults, last } from 'lodash';
import { z } from 'zod';

import type { RequestOptions, Response } from './types';
import { debug, parseUnsafeJson, zodToJsonSchema } from './utils';

const FunctionName = 'print';
const FunctionDescription =
  'Respond by calling this function with the correct parameters.';

const Defaults = {
  autoHeal: true,
  autoSlice: false,
};

export async function completion<T extends z.ZodType = z.ZodString>(
  model: CompletionApi,
  prompt: string | (() => string),
  opt?: Partial<RequestOptions<T>>,
): Promise<Response<T>> {
  const message = typeof prompt === 'string' ? prompt : prompt();
  const messages: ChatRequestMessage[] = [
    ...(opt?.messageHistory ?? []),
    { role: 'user', content: message },
  ];

  return chat(model, messages, opt);
}

export async function chat<T extends z.ZodType = z.ZodString>(
  model: CompletionApi,
  messages: ChatRequestMessage[],
  _opt?: Partial<RequestOptions<T>>,
): Promise<Response<T>> {
  const jsonSchema = _opt?.schema && zodToJsonSchema(_opt?.schema);
  const opt = defaults(
    {
      // build function to call if schema is defined
      callFunction: _opt?.schema
        ? _opt.functionName ?? FunctionName
        : undefined,
      functions: _opt?.schema
        ? [
            {
              name: _opt.functionName ?? FunctionName,
              description: _opt.functionDescription ?? FunctionDescription,
              parameters: jsonSchema,
            },
          ]
        : undefined,
    },
    _opt,
    Defaults,
  );

  if (
    opt.schema &&
    (opt.schema._def as any).typeName !== z.ZodFirstPartyTypeKind.ZodObject
  ) {
    throw new Error('Schemas can ONLY be an object');
  }
  debug.log('⬆️ sending request:', messages);

  try {
    const hasFunctionCall = !(
      model instanceof AnthropicChatApi ||
      model instanceof AnthropicBedrockChatApi ||
      model instanceof GroqChatApi
    );
    const schemaInstructions =
      !hasFunctionCall && _opt?.schema && JSON.stringify(jsonSchema);
    const firstSchemaKey =
      !hasFunctionCall &&
      _opt?.schema &&
      Object.keys(jsonSchema['properties'])[0];
    const responsePrefix = `\`\`\`json\n{ "${firstSchemaKey}":`;
    const stopSequence = '```';

    // Anthropic does not have support for functions, so create a custom system message and inject it as the first system message
    // Use the `responsePrefix` property to steer anthropic to output in the json structure
    let response =
      !hasFunctionCall && _opt?.schema
        ? await model.chatCompletion(messages, {
            ...opt,
            systemMessage:
              `You will respond to ALL human messages in JSON. Make sure the response correctly follow the following JSON schema specifications:\n<json_schema>\n${schemaInstructions}\n</json_schema>\n\n${
                opt.systemMessage
                  ? typeof opt.systemMessage === 'string'
                    ? opt.systemMessage
                    : opt.systemMessage()
                  : ''
              }`.trim(),
            responsePrefix: opt.responsePrefix ?? responsePrefix,
            stop: stopSequence,
          })
        : await model.chatCompletion(messages, opt);
    if (!response) {
      throw new Error('Chat request failed');
    }

    // only send this debug msg when stream is not enabled, or there'll be duplicate log msgs since stream also streams in the logs
    !model.modelConfig.stream && debug.log('⬇️ received response:', response);

    // validate res content, and recursively loop if invalid
    if (opt?.schema) {
      if (hasFunctionCall && !response.arguments) {
        if (opt.autoHeal) {
          debug.log('⚠️ function not called, autohealing...');
          response = await response.respond({
            role: 'user',
            content: `Please respond with a call to the ${FunctionName} function`,
          });

          if (!response.arguments) {
            throw new Error('Response function autoheal failed');
          }
        } else {
          throw new Error('Response function not called');
        }
      }

      let json = hasFunctionCall
        ? response.arguments
        : parseUnsafeJson(response.content ?? '');
      if (!json) {
        throw new Error('No response received');
      }

      const res = opt.schema.safeParse(json);
      if (res.success) {
        return {
          ...response,
          respond: (message: string | ChatRequestMessage, opt) =>
            chat(
              model,
              [
                ...messages,
                response.message,
                typeof message === 'string'
                  ? {
                      role: hasFunctionCall ? 'tool' : 'user',
                      toolCallId: response.toolCallId,
                      content: message,
                    }
                  : message,
              ],
              opt ?? _opt,
            ),
          data: res.data,
        };
      } else {
        debug.error('⚠️ error parsing response', res.error);
        if (opt.autoHeal) {
          debug.log('⚠️ response parsing failed, autohealing...', res.error);
          const issuesMessage = res.error.issues.reduce(
            (prev, issue) =>
              issue.path && issue.path.length > 0
                ? `${prev}\nThe issue is at path ${issue.path.join('.')}: ${
                    issue.message
                  }.`
                : `\nThe issue is: ${issue.message}.`,
            hasFunctionCall
              ? `There is an issue with that response, please rewrite by calling the ${FunctionName} function with the correct parameters.`
              : `There is an issue with that response, please follow the JSON schema EXACTLY, the output must be valid parsable JSON: ${schemaInstructions}`,
          );
          response = await response.respond(issuesMessage);
        } else {
          throw new Error('Response parsing failed');
        }
      }

      json = hasFunctionCall
        ? response.arguments
        : parseUnsafeJson(response.content ?? '');
      if (!json) {
        throw new Error('Response schema autoheal failed');
      }

      // TODO: there is definitely a cleaner way to implement this to avoid the duplicate parsing
      const data = opt.schema.parse(json);
      return {
        ...response,
        respond: (message: string | ChatRequestMessage, opt) =>
          chat(
            model,
            [
              ...messages,
              response.message,
              typeof message === 'string'
                ? {
                    role: hasFunctionCall ? 'tool' : 'user',
                    toolCallId: response.toolCallId,
                    content: message,
                  }
                : message,
            ],
            opt ?? _opt,
          ),
        data,
      };
    }

    // if no schema is defined, default to string
    return {
      ...response,
      respond: (message: string | ChatRequestMessage, opt) =>
        chat(
          model,
          [
            ...messages,
            response.message,
            typeof message === 'string'
              ? { role: 'user', content: message }
              : message,
          ],
          opt ?? _opt,
        ),
      data: String(response.content),
    };
  } catch (e) {
    // For autoslice, keep looping recursively, chopping off a bit of the message at a time, until it fits
    if (e instanceof TokenError && opt.autoSlice) {
      // break out the last message to auto slice
      const message = last(messages)?.content ?? '';
      const chunkSize = message.length - e.overflowTokens;
      if (chunkSize < 0) {
        throw e;
      }

      debug.log(
        `⚠️ Request prompt too long, splitting text with chunk size of ${chunkSize}`,
      );
      const newMessage = message.slice(0, chunkSize);
      return chat(
        model,
        [...messages.slice(0, -1), { role: 'user', content: newMessage }],
        opt,
      );
    } else {
      throw e;
    }
  }
}


================================================
FILE: src/config.ts
================================================
// completion request
export const RateLimitRetryIntervalMs = 30_000;
export const CompletionDefaultRetries = 3;
export const CompletionDefaultTimeout = 60_000;
export const MinimumResponseTokens = 200;


================================================
FILE: src/index.ts
================================================
export * from './completion';
export * from './text-splitter';
export * from './types';


================================================
FILE: src/text-splitter.test.ts
================================================
import { RecursiveCharacterTextSplitter } from './text-splitter';

describe('RecursiveCharacterTextSplitter', () => {
  it('Should correctly spilt text by seperators', () => {
    const splitter = new RecursiveCharacterTextSplitter({
      chunkSize: 50,
      chunkOverlap: 10,
    });
    expect(
      splitter.splitText(
        'Hello world, this is a test of the recursive text splitter.',
      ),
    ).toMatchSnapshot();

    splitter.chunkSize = 100;
    expect(
      splitter.splitText(
        'Hello world, this is a test of the recursive text splitter. If I have a period, it should split along the period.',
      ),
    ).toMatchSnapshot();

    splitter.chunkSize = 110;
    const res = splitter.splitText(
      'Hello world, this is a test of the recursive text splitter. If I have a period, it should split along the period.\nOr, if there is a new line, it should prioritize splitting on new lines instead.',
    );
    expect(res).toMatchSnapshot();
    expect(res.map((r) => r.length)).toMatchSnapshot();
  });
});


================================================
FILE: src/text-splitter.ts
================================================
interface TextSplitterParams {
  chunkSize: number;

  chunkOverlap: number;
}

abstract class TextSplitter implements TextSplitterParams {
  chunkSize = 1000;
  chunkOverlap = 200;

  constructor(fields?: Partial<TextSplitterParams>) {
    this.chunkSize = fields?.chunkSize ?? this.chunkSize;
    this.chunkOverlap = fields?.chunkOverlap ?? this.chunkOverlap;
    if (this.chunkOverlap >= this.chunkSize) {
      throw new Error('Cannot have chunkOverlap >= chunkSize');
    }
  }

  abstract splitText(text: string): string[];

  createDocuments(texts: string[]): string[] {
    const documents: string[] = [];
    for (let i = 0; i < texts.length; i += 1) {
      const text = texts[i];
      for (const chunk of this.splitText(text)) {
        documents.push(chunk);
      }
    }
    return documents;
  }

  splitDocuments(documents: string[]): string[] {
    return this.createDocuments(documents);
  }

  private joinDocs(docs: string[], separator: string): string | null {
    const text = docs.join(separator).trim();
    return text === '' ? null : text;
  }

  mergeSplits(splits: string[], separator: string): string[] {
    const docs: string[] = [];
    const currentDoc: string[] = [];
    let total = 0;
    for (const d of splits) {
      const _len = d.length;
      if (total + _len >= this.chunkSize) {
        if (total > this.chunkSize) {
          console.warn(
            `Created a chunk of size ${total}, +
which is longer than the specified ${this.chunkSize}`,
          );
        }
        if (currentDoc.length > 0) {
          const doc = this.joinDocs(currentDoc, separator);
          if (doc !== null) {
            docs.push(doc);
          }
          // Keep on popping if:
          // - we have a larger chunk than in the chunk overlap
          // - or if we still have any chunks and the length is long
          while (
            total > this.chunkOverlap ||
            (total + _len > this.chunkSize && total > 0)
          ) {
            total -= currentDoc[0].length;
            currentDoc.shift();
          }
        }
      }
      currentDoc.push(d);
      total += _len;
    }
    const doc = this.joinDocs(currentDoc, separator);
    if (doc !== null) {
      docs.push(doc);
    }
    return docs;
  }
}

export interface CharacterTextSplitterParams extends TextSplitterParams {
  separator: string;
}

export class CharacterTextSplitter
  extends TextSplitter
  implements CharacterTextSplitterParams
{
  separator = '\n\n';

  constructor(fields?: Partial<CharacterTextSplitterParams>) {
    super(fields);
    this.separator = fields?.separator ?? this.separator;
  }

  public splitText(text: string): string[] {
    // First we naively split the large input into a bunch of smaller ones.
    let splits: string[];
    if (this.separator) {
      splits = text.split(this.separator);
    } else {
      splits = text.split('');
    }
    return this.mergeSplits(splits, this.separator);
  }
}

export interface RecursiveCharacterTextSplitterParams
  extends TextSplitterParams {
  separators: string[];
}

export class RecursiveCharacterTextSplitter
  extends TextSplitter
  implements RecursiveCharacterTextSplitterParams
{
  separators: string[] = ['\n\n', '\n', '.', ',', ' ', ''];

  constructor(fields?: Partial<RecursiveCharacterTextSplitterParams>) {
    super(fields);
    this.separators = fields?.separators ?? this.separators;
  }

  splitText(text: string): string[] {
    const finalChunks: string[] = [];

    // Get appropriate separator to use
    let separator: string = this.separators[this.separators.length - 1];
    for (const s of this.separators) {
      if (s === '') {
        separator = s;
        break;
      }
      if (text.includes(s)) {
        separator = s;
        break;
      }
    }

    // Now that we have the separator, split the text
    let splits: string[];
    if (separator) {
      splits = text.split(separator);
    } else {
      splits = text.split('');
    }

    // Now go merging things, recursively splitting longer texts.
    let goodSplits: string[] = [];
    for (const s of splits) {
      if (s.length < this.chunkSize) {
        goodSplits.push(s);
      } else {
        if (goodSplits.length) {
          const mergedText = this.mergeSplits(goodSplits, separator);
          finalChunks.push(...mergedText);
          goodSplits = [];
        }
        const otherInfo = this.splitText(s);
        finalChunks.push(...otherInfo);
      }
    }
    if (goodSplits.length) {
      const mergedText = this.mergeSplits(goodSplits, separator);
      finalChunks.push(...mergedText);
    }
    return finalChunks;
  }
}


================================================
FILE: src/types.ts
================================================
import { ModelRequestOptions, ChatResponse, ChatRequestMessage } from 'llm-api';
import { z } from 'zod';

// don't expost the functions array to the request layer
export type RequestOptions<T extends z.ZodType> = Omit<
  ModelRequestOptions,
  'functions' | 'callFunction'
> & {
  // set a zod schema to enable JSON output
  schema?: T;

  // override default function name and description used to print outputs
  functionName?: string;
  functionDescription?: string;

  // set to enable automatically slicing the prompt on token overflow. prompt will be sliced starting from the last character
  // default: false
  autoSlice?: boolean;

  // attempt to auto heal the output via reflection
  // default: true
  autoHeal?: boolean;

  // set message history, useful if you want to continue an existing conversation
  messageHistory?: ChatRequestMessage[];
};

export type Response<T extends z.ZodType> = {
  // override previous respond method to include schema types
  respond: (
    message: string | ChatRequestMessage,
    opt?: ModelRequestOptions,
  ) => Promise<Response<T>>;

  // parsed and typecasted data from the model
  data: z.infer<T>;
} & ChatResponse;


================================================
FILE: src/utils.ts
================================================
import { debug as mDebug } from 'debug';
import jsonic from 'jsonic';
import { jsonrepair } from 'jsonrepair';
import { omit } from 'lodash';
import { z } from 'zod';
import zodToJsonSchemaImpl from 'zod-to-json-schema';

const error = mDebug('zod-gpt:error');
const log = mDebug('zod-gpt:log');
// eslint-disable-next-line no-console
log.log = console.log.bind(console);

export const debug = {
  error,
  log,
  write: (t: string) =>
    process.env.DEBUG &&
    'zod-gpt:log'.match(process.env.DEBUG) &&
    process.stdout.write(t),
};

export function sleep(delay: number) {
  return new Promise((resolve) => {
    setTimeout(resolve, delay);
  });
}

const extractJSONObjectResponse = (res: string): string | undefined =>
  res.match(/\{(.|\n)*\}/g)?.[0];

const extractJSONArrayResponse = (res: string): string | undefined =>
  res.match(/\[(.|\n)*\]/g)?.[0];

const extractJSONMarkdownResponse = (res: string): string | undefined => {
  const match = res.match(/```json((.|\n)*?)```/g)?.[0];
  return match ? match.replace(/```json|```/g, '').trim() : undefined;
};

export function parseUnsafeJson(json: string): any {
  try {
    const potientialJson = extractJSONMarkdownResponse(json);
    const potientialArray = extractJSONArrayResponse(potientialJson ?? json);
    const potientialObject = extractJSONObjectResponse(potientialJson ?? json);
    // extract the larger text between potiential array and potiential object, we want the parent json object
    const extracted =
      (potientialArray?.length ?? 0) > (potientialObject?.length ?? 0)
        ? potientialArray
        : potientialObject;
    if (extracted) {
      return jsonic(jsonrepair(extracted));
    } else {
      return undefined;
    }
  } catch (e) {
    debug.error('⚠️ error parsing unsafe json: ', json, e);
    return undefined;
  }
}

export function zodToJsonSchema(schema: z.ZodType): any {
  return omit(
    zodToJsonSchemaImpl(schema, { $refStrategy: 'none' }),
    '$ref',
    '$schema',
    'default',
    'definitions',
    'description',
    'markdownDescription',
  );
}

export type MaybePromise<T> = Promise<T> | T;


================================================
FILE: tsconfig.json
================================================
{
  "compilerOptions": {
    "target": "esnext",
    "moduleResolution": "node",
    "lib": ["dom", "esnext"],
    "allowJs": true,
    "alwaysStrict": true,
    "skipLibCheck": true,
    "esModuleInterop": true,
    "allowSyntheticDefaultImports": true,
    "strict": true,
    "forceConsistentCasingInFileNames": true,
    "resolveJsonModule": true,
    "noFallthroughCasesInSwitch": true,
    "noUnusedLocals": true,
    "noUnusedParameters": true,
    "noImplicitAny": true,
    "noImplicitThis": true,
    "strictNullChecks": true,

    // compile settings
    "module": "commonjs",
    "declaration": true,
    "declarationMap": true,
    "sourceMap": false,
    "removeComments": true,
    "outDir": "dist"
  },
  "include": ["src/**/*", "playground.ts"],
  "exclude": ["dist", "node_modules", "**/__mocks__/*"],
  "ts-node": {
    "compilerOptions": { "module": "commonjs" }
  }
}
Download .txt
gitextract_3ud4kvde/

├── .editorconfig
├── .eslintrc.json
├── .gitattributes
├── .github/
│   └── workflows/
│       ├── publish.yml
│       └── test.yml
├── .gitignore
├── .husky/
│   └── pre-commit
├── .prettierrc
├── LICENSE
├── README.md
├── jest.config.js
├── package.json
├── playground.ts
├── src/
│   ├── __snapshots__/
│   │   └── text-splitter.test.ts.snap
│   ├── completion.ts
│   ├── config.ts
│   ├── index.ts
│   ├── text-splitter.test.ts
│   ├── text-splitter.ts
│   ├── types.ts
│   └── utils.ts
└── tsconfig.json
Download .txt
SYMBOL INDEX (22 symbols across 4 files)

FILE: src/completion.ts
  function completion (line 24) | async function completion<T extends z.ZodType = z.ZodString>(
  function chat (line 38) | async function chat<T extends z.ZodType = z.ZodString>(

FILE: src/text-splitter.ts
  type TextSplitterParams (line 1) | interface TextSplitterParams {
  method constructor (line 11) | constructor(fields?: Partial<TextSplitterParams>) {
  method createDocuments (line 21) | createDocuments(texts: string[]): string[] {
  method splitDocuments (line 32) | splitDocuments(documents: string[]): string[] {
  method joinDocs (line 36) | private joinDocs(docs: string[], separator: string): string | null {
  method mergeSplits (line 41) | mergeSplits(splits: string[], separator: string): string[] {
  type CharacterTextSplitterParams (line 82) | interface CharacterTextSplitterParams extends TextSplitterParams {
  class CharacterTextSplitter (line 86) | class CharacterTextSplitter
    method constructor (line 92) | constructor(fields?: Partial<CharacterTextSplitterParams>) {
    method splitText (line 97) | public splitText(text: string): string[] {
  type RecursiveCharacterTextSplitterParams (line 109) | interface RecursiveCharacterTextSplitterParams
  class RecursiveCharacterTextSplitter (line 114) | class RecursiveCharacterTextSplitter
    method constructor (line 120) | constructor(fields?: Partial<RecursiveCharacterTextSplitterParams>) {
    method splitText (line 125) | splitText(text: string): string[] {

FILE: src/types.ts
  type RequestOptions (line 5) | type RequestOptions<T extends z.ZodType> = Omit<
  type Response (line 28) | type Response<T extends z.ZodType> = {

FILE: src/utils.ts
  function sleep (line 22) | function sleep(delay: number) {
  function parseUnsafeJson (line 39) | function parseUnsafeJson(json: string): any {
  function zodToJsonSchema (line 60) | function zodToJsonSchema(schema: z.ZodType): any {
  type MaybePromise (line 72) | type MaybePromise<T> = Promise<T> | T;
Condensed preview — 22 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (42K chars).
[
  {
    "path": ".editorconfig",
    "chars": 134,
    "preview": "root = true\n\n[*]\nend_of_line = lf\ninsert_final_newline = true\n\n[*.{js,json,yml}]\ncharset = utf-8\nindent_style = space\nin"
  },
  {
    "path": ".eslintrc.json",
    "chars": 1844,
    "preview": "{\n  \"root\": true,\n  \"plugins\": [\"prettier\", \"@typescript-eslint\"],\n  \"extends\": [\n    \"universe/node\",\n    \"plugin:prett"
  },
  {
    "path": ".gitattributes",
    "chars": 142,
    "preview": "/.yarn/**            linguist-vendored\n/.yarn/releases/*    binary\n/.yarn/plugins/**/*  binary\n/.pnp.*              bina"
  },
  {
    "path": ".github/workflows/publish.yml",
    "chars": 872,
    "preview": "name: Release & Publish\n\non:\n  push:\n    tags:\n      - 'v*'\n\njobs:\n  release:\n    runs-on: ubuntu-latest\n    steps:\n    "
  },
  {
    "path": ".github/workflows/test.yml",
    "chars": 727,
    "preview": "name: Test\n\non:\n  push:\n    branches: [main, staging]\n  pull_request:\n    branches: [main, staging]\n\njobs:\n  test:\n    r"
  },
  {
    "path": ".gitignore",
    "chars": 296,
    "preview": ".yarn/*\n!.yarn/patches\n!.yarn/plugins\n!.yarn/releases\n!.yarn/sdks\n!.yarn/versions\n\n\n# Swap the comments on the following"
  },
  {
    "path": ".husky/pre-commit",
    "chars": 58,
    "preview": "#!/bin/sh\n. \"$(dirname \"$0\")/_/husky.sh\"\n\nnpx lint-staged\n"
  },
  {
    "path": ".prettierrc",
    "chars": 105,
    "preview": "{\n  \"printWidth\": 80,\n  \"tabWidth\": 2,\n  \"trailingComma\": \"all\",\n  \"semi\": true,\n  \"singleQuote\": true\n}\n"
  },
  {
    "path": "LICENSE",
    "chars": 1068,
    "preview": "MIT License\n\nCopyright (c) 2023 David Zhang\n\nPermission is hereby granted, free of charge, to any person obtaining a cop"
  },
  {
    "path": "README.md",
    "chars": 7881,
    "preview": "# ✨ ZodGPT\n\n[![test](https://github.com/dzhng/zod-gpt/actions/workflows/test.yml/badge.svg?branch=main&event=push)](http"
  },
  {
    "path": "jest.config.js",
    "chars": 986,
    "preview": "module.exports = {\n  preset: 'ts-jest',\n  testEnvironment: 'node',\n  clearMocks: true,\n  roots: ['<rootDir>/src'],\n  mod"
  },
  {
    "path": "package.json",
    "chars": 2112,
    "preview": "{\n  \"name\": \"zod-gpt\",\n  \"description\": \"Get structured, fully typed JSON outputs from OpenAI and Anthropic LLMs\",\n  \"ve"
  },
  {
    "path": "playground.ts",
    "chars": 4140,
    "preview": "import {\n  AnthropicChatApi,\n  OpenAIChatApi,\n  AnthropicBedrockChatApi,\n  GroqChatApi,\n} from 'llm-api';\nimport { z } f"
  },
  {
    "path": "src/__snapshots__/text-splitter.test.ts.snap",
    "chars": 853,
    "preview": "// Jest Snapshot v1, https://goo.gl/fbAQLP\n\nexports[`RecursiveCharacterTextSplitter Should correctly spilt text by seper"
  },
  {
    "path": "src/completion.ts",
    "chars": 8171,
    "preview": "import {\n  TokenError,\n  CompletionApi,\n  AnthropicChatApi,\n  ChatRequestMessage,\n  AnthropicBedrockChatApi,\n  GroqChatA"
  },
  {
    "path": "src/config.ts",
    "chars": 203,
    "preview": "// completion request\nexport const RateLimitRetryIntervalMs = 30_000;\nexport const CompletionDefaultRetries = 3;\nexport "
  },
  {
    "path": "src/index.ts",
    "chars": 88,
    "preview": "export * from './completion';\nexport * from './text-splitter';\nexport * from './types';\n"
  },
  {
    "path": "src/text-splitter.test.ts",
    "chars": 1038,
    "preview": "import { RecursiveCharacterTextSplitter } from './text-splitter';\n\ndescribe('RecursiveCharacterTextSplitter', () => {\n  "
  },
  {
    "path": "src/text-splitter.ts",
    "chars": 4642,
    "preview": "interface TextSplitterParams {\n  chunkSize: number;\n\n  chunkOverlap: number;\n}\n\nabstract class TextSplitter implements T"
  },
  {
    "path": "src/types.ts",
    "chars": 1171,
    "preview": "import { ModelRequestOptions, ChatResponse, ChatRequestMessage } from 'llm-api';\nimport { z } from 'zod';\n\n// don't expo"
  },
  {
    "path": "src/utils.ts",
    "chars": 2118,
    "preview": "import { debug as mDebug } from 'debug';\nimport jsonic from 'jsonic';\nimport { jsonrepair } from 'jsonrepair';\nimport { "
  },
  {
    "path": "tsconfig.json",
    "chars": 889,
    "preview": "{\n  \"compilerOptions\": {\n    \"target\": \"esnext\",\n    \"moduleResolution\": \"node\",\n    \"lib\": [\"dom\", \"esnext\"],\n    \"allo"
  }
]

About this extraction

This page contains the full source code of the dzhng/zod-gpt GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 22 files (38.6 KB), approximately 10.3k tokens, and a symbol index with 22 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.

Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.

Copied to clipboard!