Repository: dabit3/openai-functions-god-app
Branch: main
Commit: d99e8422553d
Files: 14
Total size: 10.6 KB
Directory structure:
gitextract_9_o6u2mw/
├── .eslintrc.json
├── .example.env.local
├── .gitignore
├── README.md
├── api.ts
├── app/
│ ├── api/
│ │ └── gpt/
│ │ └── route.ts
│ ├── globals.css
│ ├── layout.tsx
│ └── page.tsx
├── next.config.js
├── package.json
├── postcss.config.js
├── tailwind.config.js
└── tsconfig.json
================================================
FILE CONTENTS
================================================
================================================
FILE: .eslintrc.json
================================================
{
"extends": "next/core-web-vitals"
}
================================================
FILE: .example.env.local
================================================
OPENAI_API_KEY=
REPLICATE_TOKEN=
================================================
FILE: .gitignore
================================================
# See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
# dependencies
/node_modules
/.pnp
.pnp.js
# testing
/coverage
# next.js
/.next/
/out/
# production
/build
# misc
.DS_Store
*.pem
# debug
npm-debug.log*
yarn-debug.log*
yarn-error.log*
# local env files
.env*.local
# vercel
.vercel
# typescript
*.tsbuildinfo
next-env.d.ts
================================================
FILE: README.md
================================================
## God app
This is an experimental project using OpenAI GPT Plugins and Replicate to combine all AI APis into one.
Instead of using separate interfaces for image generation, video generation, audio generation, and general text natural language processing, this app combines all of them together into a single prompt.
APIs currently supported:
- Text to image
- Text to video
- Text to audio
- General natural language processing
Examples of what you might ask:
- Create a 4 day travel itinerary for Mexico City.
- Create an image of someone in their production studio creating beats, futuristic, dim lighting, bronx new york
- Create a track that sounds like it might come from kanye west, hip-hop, soul samples, heavy drums, innovative melodies, experimental sounds, unusual effects, automation, modulating filters, distortion effects
- Create a video of clown fish swimming in a coral reef, beautiful, 8k, perfect, award winning, national geographic
## Prerequisites
To run this app, you must have the following:
1. OpenAI API Key
2. Replicate token
## Running the app
To run this app, follow these steps:
1. Clone the repo
```sh
git clone git@github.com:dabit3/openai-functions-god-app.git
```
2. Change into the directory and install the dependencies:
```sh
cd openai-functions-god-app
npm install
```
3. Set environment variables in a file named `.env.local` (you can copy `.example.env.local`)
```
OPENAI_API_KEY=
REPLICATE_TOKEN=
```
4. Run the app
```sh
npm start
```
================================================
FILE: api.ts
================================================
================================================
FILE: app/api/gpt/route.ts
================================================
import { NextRequest, NextResponse } from 'next/server'
import Replicate from 'replicate'
const replicate = new Replicate({
auth: process.env.REPLICATE_TOKEN || ''
})
const KEY = process.env.OPENAI_API_KEY
const base_uri = 'https://api.openai.com/v1/chat/completions'
const headers = {
'Content-Type': 'application/json',
'Authorization': `Bearer ${KEY}`
}
const data = {
'model': 'gpt-4'
}
export async function POST(req: NextRequest, res: NextResponse) {
try {
const { query } = await req.json()
const requestData = {
...data,
'messages': [
{'role': 'user', 'content': query }
],
functions: [
{
name: 'createVideo',
description: 'generate a video using replicate, an AI LLM',
parameters: {
type: 'object',
properties: {
prompt: {
type: 'string',
description: 'the main prompt that should be passed in to the LLM'
},
guidance_scale: {
type: 'integer',
description: 'This is the requested guidance scale for the video in a numeric value. Default to 17.5 if none is defined in the prompt.'
},
num_frames: {
type: 'integer',
description: 'The number of frames if defined in the prompt'
},
height: {
type: 'integer',
description: 'The height of the video if defined in the prompt. Not affected by resolution.'
},
width: {
type: 'integer',
description: 'The width of the video if defined in the prompt. Not affected by resolution.'
}
},
'required': ['prompt', 'guidance_scale'],
}
},
{
name: 'createMusic',
description: 'generate music using replicate',
parameters: {
type: 'object',
properties: {
prompt: {
type: 'string',
description: 'the exact prompt passed in'
}
}
}
},
{
name: 'createImage',
description: 'generates an image using replicate',
parameters: {
type: 'object',
properties: {
prompt: {
type: 'string',
description: 'the exact prompt passed in'
}
}
}
}
],
function_call: 'auto'
}
const response = await fetch(base_uri, {
method: 'POST',
headers,
body: JSON.stringify(requestData)
})
const json = await response.json()
let choice = json.choices[0]
const { function_call } = choice.message
console.log('function_call: ', function_call)
if (function_call) {
const args = JSON.parse(function_call.arguments)
if (function_call.name === 'createVideo') {
const output = await replicate.run(
'anotherjesse/zeroscope-v2-xl:71996d331e8ede8ef7bd76eba9fae076d31792e4ddf4ad057779b443d6aea62f',
{
input: {
...args,
}
}
);
return NextResponse.json({
data: output,
type: 'video'
});
}
if (function_call.name === 'createMusic') {
const output = await replicate.run(
'joehoover/musicgen:7a76a8258b23fae65c5a22debb8841d1d7e816b75c2f24218cd2bd8573787906',
{
input: {
model_version: 'melody',
...args
}
}
)
return NextResponse.json({
data: output,
type: 'audio'
});
}
if (function_call.name === 'createImage') {
const output = await replicate.run(
'ai-forever/kandinsky-2:601eea49d49003e6ea75a11527209c4f510a93e2112c969d548fbb45b9c4f19f',
{
input: {
...args
}
}
)
return NextResponse.json({
data: output,
type: 'image'
});
}
}
else {
console.log('choice: ', choice)
return NextResponse.json({
data: choice.message.content,
type: 'text',
});
}
} catch (err) {
console.log('error: ', err)
return NextResponse.json({ error: err });
}
}
================================================
FILE: app/globals.css
================================================
@tailwind base;
@tailwind components;
@tailwind utilities;
:root {
--foreground-rgb: 0, 0, 0;
--background-start-rgb: 214, 219, 220;
--background-end-rgb: 255, 255, 255;
}
@media (prefers-color-scheme: dark) {
:root {
--foreground-rgb: 255, 255, 255;
--background-start-rgb: 0, 0, 0;
--background-end-rgb: 0, 0, 0;
}
}
body {
color: rgb(var(--foreground-rgb));
background: linear-gradient(
to bottom,
transparent,
rgb(var(--background-end-rgb))
)
rgb(var(--background-start-rgb));
}
================================================
FILE: app/layout.tsx
================================================
import './globals.css'
import { Inter } from 'next/font/google'
const inter = Inter({ subsets: ['latin'] })
export const metadata = {
title: 'Create Next App',
description: 'Generated by create next app',
}
export default function RootLayout({
children,
}: {
children: React.ReactNode
}) {
return (
<html lang="en">
<body className={inter.className}>{children}</body>
</html>
)
}
================================================
FILE: app/page.tsx
================================================
'use client'
import { useState } from 'react'
export default function Home() {
const [input, setInput] = useState('')
const [image, setImage] = useState('')
const [audio, setAudio] = useState('')
const [video, setVideo] = useState('')
const [text, setText] = useState('')
async function callApi() {
try {
if (!input) return
setImage('')
setAudio('')
setVideo('')
setText('')
const response = await fetch('/api/gpt', {
method: "POST",
body: JSON.stringify({
query: input
})
})
const { data, type } = await response.json()
console.log('data:', data)
if (type === 'image') {
setImage(data[0])
}
if (type === 'video') {
setVideo(data[0])
}
if (type === 'audio') {
setAudio(data)
}
if (type == 'text') {
setText(data)
}
} catch (err) {
console.log('error;', err)
}
}
return (
<main className="flex flex-col items-center justify-between p-24">
<input
className="text-black px-3 py-1 rounded"
onChange={e => setInput(e.target.value)}
/>
<button
onClick={callApi}
className="rounded-full bg-green-500 text-white py-3 px-14 mt-3 mb-4 cursor-pointer"
>IMAGINE</button>
{
image && <img src={image} width="500px" />
}
{
video && <video src={video} controls></video>
}
{
text && <p>{text}</p>
}
{
audio && (
<audio controls>
<source src={audio} type="audio/wav"></source>
</audio>
)
}
</main>
)
}
================================================
FILE: next.config.js
================================================
/** @type {import('next').NextConfig} */
const nextConfig = {}
module.exports = nextConfig
================================================
FILE: package.json
================================================
{
"name": "gpt-functions",
"version": "0.1.0",
"private": true,
"scripts": {
"dev": "next dev",
"build": "next build",
"start": "next start",
"lint": "next lint"
},
"dependencies": {
"@types/node": "20.3.2",
"@types/react": "18.2.14",
"@types/react-dom": "18.2.6",
"autoprefixer": "10.4.14",
"eslint": "8.43.0",
"eslint-config-next": "13.4.7",
"next": "13.4.7",
"postcss": "8.4.24",
"react": "18.2.0",
"react-dom": "18.2.0",
"replicate": "^0.12.3",
"tailwindcss": "3.3.2",
"typescript": "5.1.6"
}
}
================================================
FILE: postcss.config.js
================================================
module.exports = {
plugins: {
tailwindcss: {},
autoprefixer: {},
},
}
================================================
FILE: tailwind.config.js
================================================
/** @type {import('tailwindcss').Config} */
module.exports = {
content: [
'./pages/**/*.{js,ts,jsx,tsx,mdx}',
'./components/**/*.{js,ts,jsx,tsx,mdx}',
'./app/**/*.{js,ts,jsx,tsx,mdx}',
],
theme: {
extend: {
backgroundImage: {
'gradient-radial': 'radial-gradient(var(--tw-gradient-stops))',
'gradient-conic':
'conic-gradient(from 180deg at 50% 50%, var(--tw-gradient-stops))',
},
},
},
plugins: [],
}
================================================
FILE: tsconfig.json
================================================
{
"compilerOptions": {
"target": "es5",
"lib": ["dom", "dom.iterable", "esnext"],
"allowJs": true,
"skipLibCheck": true,
"strict": true,
"forceConsistentCasingInFileNames": true,
"noEmit": true,
"esModuleInterop": true,
"module": "esnext",
"moduleResolution": "node",
"resolveJsonModule": true,
"isolatedModules": true,
"jsx": "preserve",
"incremental": true,
"noImplicitAny": false,
"plugins": [
{
"name": "next"
}
],
"paths": {
"@/*": ["./*"]
}
},
"include": ["next-env.d.ts", "**/*.ts", "**/*.tsx", ".next/types/**/*.ts"],
"exclude": ["node_modules"]
}
gitextract_9_o6u2mw/ ├── .eslintrc.json ├── .example.env.local ├── .gitignore ├── README.md ├── api.ts ├── app/ │ ├── api/ │ │ └── gpt/ │ │ └── route.ts │ ├── globals.css │ ├── layout.tsx │ └── page.tsx ├── next.config.js ├── package.json ├── postcss.config.js ├── tailwind.config.js └── tsconfig.json
SYMBOL INDEX (4 symbols across 3 files)
FILE: app/api/gpt/route.ts
constant KEY (line 8) | const KEY = process.env.OPENAI_API_KEY
function POST (line 20) | async function POST(req: NextRequest, res: NextResponse) {
FILE: app/layout.tsx
function RootLayout (line 11) | function RootLayout({
FILE: app/page.tsx
function Home (line 4) | function Home() {
Condensed preview — 14 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (12K chars).
[
{
"path": ".eslintrc.json",
"chars": 40,
"preview": "{\n \"extends\": \"next/core-web-vitals\"\n}\n"
},
{
"path": ".example.env.local",
"chars": 32,
"preview": "OPENAI_API_KEY=\nREPLICATE_TOKEN="
},
{
"path": ".gitignore",
"chars": 368,
"preview": "# See https://help.github.com/articles/ignoring-files/ for more about ignoring files.\n\n# dependencies\n/node_modules\n/.pn"
},
{
"path": "README.md",
"chars": 1493,
"preview": "## God app\n\nThis is an experimental project using OpenAI GPT Plugins and Replicate to combine all AI APis into one.\n\nIns"
},
{
"path": "api.ts",
"chars": 0,
"preview": ""
},
{
"path": "app/api/gpt/route.ts",
"chars": 4431,
"preview": "import { NextRequest, NextResponse } from 'next/server'\nimport Replicate from 'replicate'\n\nconst replicate = new Replica"
},
{
"path": "app/globals.css",
"chars": 538,
"preview": "@tailwind base;\n@tailwind components;\n@tailwind utilities;\n\n:root {\n --foreground-rgb: 0, 0, 0;\n --background-start-rg"
},
{
"path": "app/layout.tsx",
"chars": 409,
"preview": "import './globals.css'\nimport { Inter } from 'next/font/google'\n\nconst inter = Inter({ subsets: ['latin'] })\n\nexport con"
},
{
"path": "app/page.tsx",
"chars": 1674,
"preview": "'use client'\nimport { useState } from 'react'\n\nexport default function Home() {\n const [input, setInput] = useState('')"
},
{
"path": "next.config.js",
"chars": 92,
"preview": "/** @type {import('next').NextConfig} */\nconst nextConfig = {}\n\nmodule.exports = nextConfig\n"
},
{
"path": "package.json",
"chars": 581,
"preview": "{\n \"name\": \"gpt-functions\",\n \"version\": \"0.1.0\",\n \"private\": true,\n \"scripts\": {\n \"dev\": \"next dev\",\n \"build\":"
},
{
"path": "postcss.config.js",
"chars": 82,
"preview": "module.exports = {\n plugins: {\n tailwindcss: {},\n autoprefixer: {},\n },\n}\n"
},
{
"path": "tailwind.config.js",
"chars": 468,
"preview": "/** @type {import('tailwindcss').Config} */\nmodule.exports = {\n content: [\n './pages/**/*.{js,ts,jsx,tsx,mdx}',\n "
},
{
"path": "tsconfig.json",
"chars": 666,
"preview": "{\n \"compilerOptions\": {\n \"target\": \"es5\",\n \"lib\": [\"dom\", \"dom.iterable\", \"esnext\"],\n \"allowJs\": true,\n \"sk"
}
]
About this extraction
This page contains the full source code of the dabit3/openai-functions-god-app GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 14 files (10.6 KB), approximately 3.1k tokens, and a symbol index with 4 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.