Repository: bertrandamobi/AI-Workflow-Engine
Branch: main
Commit: 478a9dd05496
Files: 27
Total size: 21.2 KB
Directory structure:
gitextract_415u4r91/
├── .github/
│ └── workflows/
│ └── deploy-pages.yml
├── .gitignore
├── .gitkeep
├── README.md
├── backend/
│ ├── Dockerfile
│ ├── app/
│ │ ├── __init__.py
│ │ ├── api/
│ │ │ └── workflows.py
│ │ ├── core/
│ │ │ ├── config.py
│ │ │ └── logging.py
│ │ ├── graph/
│ │ │ └── workflow.py
│ │ ├── main.py
│ │ ├── schemas/
│ │ │ └── workflow.py
│ │ └── services/
│ │ ├── llm_service.py
│ │ └── vector_store.py
│ ├── pyproject.toml
│ └── tests/
│ └── test_health.py
├── docker-compose.yml
└── frontend/
├── Dockerfile
├── next.config.mjs
├── package.json
├── src/
│ ├── app/
│ │ ├── layout.tsx
│ │ ├── page.tsx
│ │ └── styles.css
│ ├── components/
│ │ └── WorkflowForm.tsx
│ ├── lib/
│ │ └── api.ts
│ └── types/
│ └── workflow.ts
└── tsconfig.json
================================================
FILE CONTENTS
================================================
================================================
FILE: .github/workflows/deploy-pages.yml
================================================
name: Deploy Next.js static site to Pages
on:
push:
branches: ["main"]
workflow_dispatch:
permissions:
contents: read
pages: write
id-token: write
concurrency:
group: "pages"
cancel-in-progress: true
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup Node
uses: actions/setup-node@v4
with:
node-version: 22
- name: Install dependencies
run: npm install
working-directory: frontend
- name: Build static site
run: npm run build
working-directory: frontend
- name: Upload Pages artifact
uses: actions/upload-pages-artifact@v3
with:
path: frontend/out
deploy:
environment:
name: github-pages
url: ${{ steps.deployment.outputs.page_url }}
runs-on: ubuntu-latest
needs: build
steps:
- name: Deploy to GitHub Pages
id: deployment
uses: actions/deploy-pages@v4
================================================
FILE: .gitignore
================================================
# Python
__pycache__/
*.py[cod]
*.so
.venv/
.pytest_cache/
.mypy_cache/
.coverage
htmlcov/
# Node
node_modules/
.next/
dist/
coverage/
# Environment
.env
.env.local
# IDE
.vscode/
.idea/
.DS_Store
# Build
build/
*.log
================================================
FILE: .gitkeep
================================================
================================================
FILE: README.md
================================================
# Agentic AI Workflow Engine
A production-style, full-stack orchestration platform for autonomous market analysis workflows using **LangGraph**, **OpenAI**, and **vector retrieval patterns**.
## Architecture Overview
- **Backend**: FastAPI service exposing workflow execution APIs.
- **Orchestration**: LangGraph state-machine graph with explicit node transitions (`research -> analysis -> recommendations`).
- **LLM Layer**: OpenAI chat model abstraction with configurable model selection.
- **Retrieval Layer**: Chroma-based vector service abstraction for extensible RAG workflows.
- **Frontend**: Next.js dashboard to trigger workflows and inspect results.
- **Runtime**: Docker Compose for local full-stack bootstrapping.
## Repository Structure
```text
backend/
app/
api/ # HTTP routers
core/ # config + logging
graph/ # LangGraph workflow definitions
schemas/ # API contracts
services/ # LLM and vector abstractions
tests/
frontend/
src/
app/ # Next.js pages
components/ # UI components
lib/ # API clients
types/ # shared types
infra/ # reserved for IaC modules
scripts/ # tooling scripts
docs/ # architecture and ADRs
```
## Local Development
### 1) Prerequisites
- Python 3.11+
- Node 22+
- Docker + Docker Compose
### 2) Configure environment
```bash
cp .env.example .env
```
Populate at least:
- `OPENAI_API_KEY`
- `OPENAI_MODEL`
### 3) Run backend
```bash
cd backend
python -m venv .venv && source .venv/bin/activate
pip install -e .
uvicorn app.main:app --reload --port 8000
```
### 4) Run frontend
```bash
cd frontend
npm install
npm run dev
```
### 5) Run full stack with Docker
```bash
docker compose up --build
```
## API Contract
### POST `/workflows/market-analysis`
Request:
```json
{
"company": "NVIDIA",
"objectives": ["Competitive positioning", "Pricing strategy"],
"context": {}
}
```
Response:
```json
{
"workflow_id": "uuid",
"status": "completed",
"summary": "...",
"artifacts": {
"research_notes": "...",
"recommendations": "..."
}
}
```
## Engineering Characteristics
- Clear separation of concerns (API / orchestration / services / schema boundaries).
- Strong typing across Python and TypeScript.
- Environment-driven configuration with explicit defaults.
- Dockerized runtime and clean onboarding docs.
- Test scaffold for API health and easy extension for integration tests.
## Scalability Roadmap
1. Add task queue + worker runtime (Celery/Arq) for asynchronous workflow runs.
2. Persist workflow state transitions in SQLModel-backed store.
3. Add observability stack (OpenTelemetry traces, structured logs, metrics).
4. Add multi-tenant authn/authz and policy-enforced tool access.
5. Expand graph with conditional branches and evaluator nodes.
## Quality Gates (recommended)
Backend:
```bash
pytest
ruff check .
mypy app
```
Frontend:
```bash
npm run lint
npm run build
```
## GitHub Pages Demo Deployment
This repository includes a ready-to-use workflow at `.github/workflows/deploy-pages.yml`.
1. Push your code to `main`.
2. In GitHub: **Settings → Pages → Source = GitHub Actions**.
3. Run workflow (push or manual dispatch).
The build publishes `frontend/out/` to Pages.
### Why previous workflow failed
If `actions/setup-node` uses npm caching without a lockfile path that exists, it fails with:
`Some specified paths were not resolved, unable to cache dependencies.`
This repository workflow avoids that issue by not requiring `cache-dependency-path`.
================================================
FILE: backend/Dockerfile
================================================
FROM python:3.11-slim
WORKDIR /app
COPY pyproject.toml ./
RUN pip install --no-cache-dir -U pip && pip install --no-cache-dir .
COPY app ./app
CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8000"]
================================================
FILE: backend/app/__init__.py
================================================
================================================
FILE: backend/app/api/workflows.py
================================================
from fastapi import APIRouter
from app.graph.workflow import run_workflow
from app.schemas.workflow import WorkflowRequest, WorkflowResponse
router = APIRouter(prefix="/workflows", tags=["workflows"])
@router.post("/market-analysis", response_model=WorkflowResponse)
def execute_market_analysis(payload: WorkflowRequest) -> WorkflowResponse:
result = run_workflow(payload.company, payload.objectives)
return WorkflowResponse(**result)
================================================
FILE: backend/app/core/config.py
================================================
from functools import lru_cache
from pydantic_settings import BaseSettings, SettingsConfigDict
class Settings(BaseSettings):
model_config = SettingsConfigDict(env_file=".env", env_file_encoding="utf-8", extra="ignore")
app_env: str = "development"
app_host: str = "0.0.0.0"
app_port: int = 8000
log_level: str = "INFO"
openai_api_key: str = ""
openai_model: str = "gpt-4.1"
vector_store_provider: str = "chroma"
chroma_persist_directory: str = "./data/chroma"
database_url: str = "sqlite:///./workflow_engine.db"
@lru_cache(maxsize=1)
def get_settings() -> Settings:
return Settings()
================================================
FILE: backend/app/core/logging.py
================================================
import logging
def configure_logging(level: str = "INFO") -> None:
logging.basicConfig(
level=getattr(logging, level.upper(), logging.INFO),
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
)
================================================
FILE: backend/app/graph/workflow.py
================================================
from typing import TypedDict
from uuid import uuid4
from langgraph.graph import END, StateGraph
from app.services.llm_service import LLMService
class MarketState(TypedDict):
company: str
objectives: list[str]
research_notes: str
analysis: str
recommendations: str
llm = LLMService()
def research_node(state: MarketState) -> MarketState:
prompt = f"Research top market dynamics for {state['company']}."
state["research_notes"] = llm.summarize(prompt)
return state
def analysis_node(state: MarketState) -> MarketState:
prompt = (
f"Use notes to produce strategic analysis for {state['company']}: {state['research_notes']}"
)
state["analysis"] = llm.summarize(prompt)
return state
def recommendations_node(state: MarketState) -> MarketState:
prompt = (
f"Provide 5 executive recommendations for {state['company']} based on {state['analysis']}"
)
state["recommendations"] = llm.summarize(prompt)
return state
def build_market_workflow():
graph = StateGraph(MarketState)
graph.add_node("research", research_node)
graph.add_node("analysis", analysis_node)
graph.add_node("recommendations", recommendations_node)
graph.set_entry_point("research")
graph.add_edge("research", "analysis")
graph.add_edge("analysis", "recommendations")
graph.add_edge("recommendations", END)
return graph.compile()
def run_workflow(company: str, objectives: list[str]) -> dict:
workflow = build_market_workflow()
workflow_id = str(uuid4())
state: MarketState = {
"company": company,
"objectives": objectives,
"research_notes": "",
"analysis": "",
"recommendations": "",
}
result = workflow.invoke(state)
return {
"workflow_id": workflow_id,
"status": "completed",
"summary": result["analysis"],
"artifacts": {
"research_notes": result["research_notes"],
"recommendations": result["recommendations"],
},
}
================================================
FILE: backend/app/main.py
================================================
from fastapi import FastAPI
from app.api.workflows import router as workflow_router
from app.core.config import get_settings
from app.core.logging import configure_logging
settings = get_settings()
configure_logging(settings.log_level)
app = FastAPI(
title="Agentic AI Workflow Engine",
description="Enterprise orchestration engine powered by LangGraph",
version="0.1.0",
)
app.include_router(workflow_router)
@app.get("/health")
def health_check() -> dict[str, str]:
return {"status": "ok"}
================================================
FILE: backend/app/schemas/workflow.py
================================================
from typing import Any
from pydantic import BaseModel, Field
class WorkflowRequest(BaseModel):
company: str = Field(..., description="Company to analyze")
objectives: list[str] = Field(default_factory=list)
context: dict[str, Any] = Field(default_factory=dict)
class WorkflowResponse(BaseModel):
workflow_id: str
status: str
summary: str
artifacts: dict[str, Any]
================================================
FILE: backend/app/services/llm_service.py
================================================
from langchain_openai import ChatOpenAI
from app.core.config import get_settings
class LLMService:
def __init__(self) -> None:
settings = get_settings()
self.client = ChatOpenAI(model=settings.openai_model, api_key=settings.openai_api_key)
def summarize(self, prompt: str) -> str:
response = self.client.invoke(prompt)
return response.content
================================================
FILE: backend/app/services/vector_store.py
================================================
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAIEmbeddings
from app.core.config import get_settings
class VectorStoreService:
def __init__(self) -> None:
settings = get_settings()
self._embeddings = OpenAIEmbeddings(api_key=settings.openai_api_key)
self._store = Chroma(
persist_directory=settings.chroma_persist_directory,
embedding_function=self._embeddings,
)
def similarity_search(self, query: str, k: int = 4):
return self._store.similarity_search(query, k=k)
================================================
FILE: backend/pyproject.toml
================================================
[project]
name = "agentic-ai-workflow-engine"
version = "0.1.0"
description = "LangGraph-based AI orchestration system for autonomous market analysis"
readme = "README.md"
requires-python = ">=3.11"
dependencies = [
"fastapi>=0.115.0",
"uvicorn[standard]>=0.30.0",
"pydantic-settings>=2.6.0",
"langgraph>=0.2.0",
"langchain>=0.3.0",
"langchain-openai>=0.2.0",
"langchain-community>=0.3.0",
"chromadb>=0.5.0",
"sqlmodel>=0.0.22",
"httpx>=0.27.0",
]
[project.optional-dependencies]
dev = ["pytest>=8.3.0", "ruff>=0.7.0", "mypy>=1.12.0"]
[tool.ruff]
line-length = 100
target-version = "py311"
[tool.pytest.ini_options]
pythonpath = ["."]
================================================
FILE: backend/tests/test_health.py
================================================
from fastapi.testclient import TestClient
from app.main import app
def test_health_check() -> None:
client = TestClient(app)
response = client.get("/health")
assert response.status_code == 200
assert response.json() == {"status": "ok"}
================================================
FILE: docker-compose.yml
================================================
version: "3.9"
services:
api:
build:
context: ./backend
env_file:
- .env
ports:
- "8000:8000"
volumes:
- ./backend:/app
- ./data:/app/data
web:
build:
context: ./frontend
ports:
- "3000:3000"
environment:
- NEXT_PUBLIC_API_BASE_URL=http://api:8000
depends_on:
- api
================================================
FILE: frontend/Dockerfile
================================================
FROM node:22-alpine
WORKDIR /app
COPY package.json ./
RUN npm install
COPY . .
CMD ["npm", "run", "dev", "--", "-H", "0.0.0.0", "-p", "3000"]
================================================
FILE: frontend/next.config.mjs
================================================
const isGithubActions = process.env.GITHUB_ACTIONS === "true";
const repoName = "AI-Workflow-Engine";
const nextConfig = {
reactStrictMode: true,
output: "export",
images: {
unoptimized: true,
},
basePath: isGithubActions ? `/${repoName}` : "",
assetPrefix: isGithubActions ? `/${repoName}/` : "",
};
export default nextConfig;
================================================
FILE: frontend/package.json
================================================
{
"name": "workflow-engine-web",
"private": true,
"version": "0.1.0",
"scripts": {
"dev": "next dev",
"build": "next build",
"start": "next start",
"lint": "next lint"
},
"dependencies": {
"next": "15.0.0",
"react": "18.2.0",
"react-dom": "18.2.0"
},
"devDependencies": {
"typescript": "5.6.3",
"@types/react": "18.3.12",
"@types/node": "22.8.1",
"eslint": "9.13.0",
"eslint-config-next": "15.0.0"
}
}
================================================
FILE: frontend/src/app/layout.tsx
================================================
import type { Metadata } from "next";
export const metadata: Metadata = {
title: "Agentic AI Workflow Engine Demo",
description: "Senior-level orchestration demo UI for LangGraph-powered workflows",
};
export default function RootLayout({ children }: { children: React.ReactNode }) {
return (
{children}
);
}
================================================
FILE: frontend/src/app/page.tsx
================================================
import WorkflowForm from "../components/WorkflowForm";
import "./styles.css";
export default function HomePage() {
return (
LangGraph • OpenAI • Retrieval • Orchestration
Agentic AI Workflow Engine
Autonomous market analysis with stateful graph execution, retriever-augmented context,
and executive-grade recommendation synthesis.
3
Orchestration Nodes
1 Click
Run Strategic Analysis
Typed
End-to-End Contracts
Workflow Studio
Trigger a complete research → analysis → recommendation cycle. Designed for portfolio
strategy, product intelligence, and competitive planning.
);
}
================================================
FILE: frontend/src/app/styles.css
================================================
:root {
color-scheme: dark;
}
* {
box-sizing: border-box;
}
body {
margin: 0;
min-height: 100vh;
background: radial-gradient(circle at 20% 20%, #341426, #0f0f13 45%),
radial-gradient(circle at 80% 70%, #20403c, transparent 30%),
linear-gradient(140deg, #1e0f2b, #0f1119 70%);
color: #f8f4f0;
font-family: Inter, Segoe UI, system-ui, -apple-system, sans-serif;
}
.page-shell {
position: relative;
overflow: hidden;
padding: 2.5rem 1.25rem 1.5rem;
max-width: 1180px;
margin: 0 auto;
}
.aurora {
position: absolute;
filter: blur(60px);
border-radius: 999px;
opacity: 0.5;
animation: drift 15s ease-in-out infinite alternate;
z-index: -2;
}
.aurora-a {
width: 300px;
height: 300px;
background: #f25f4c;
top: -120px;
left: -100px;
}
.aurora-b {
width: 340px;
height: 340px;
background: #59c3c3;
right: -120px;
bottom: 80px;
animation-delay: 1.5s;
}
.noise-overlay {
position: absolute;
inset: 0;
pointer-events: none;
z-index: -1;
opacity: 0.12;
background-image: radial-gradient(circle, #f6bd6022 1px, transparent 1px);
background-size: 3px 3px;
}
.hero h1 {
font-size: clamp(2rem, 4vw, 3.5rem);
margin: 0.4rem 0;
}
.eyebrow {
color: #f6bd60;
letter-spacing: 0.1em;
text-transform: uppercase;
font-size: 0.78rem;
}
.hero-copy {
max-width: 68ch;
color: #e6ddd5;
}
.hero-metrics {
display: grid;
grid-template-columns: repeat(auto-fit, minmax(130px, 1fr));
gap: 1rem;
margin-top: 1.2rem;
}
.hero-metrics article {
background: #ffffff0f;
border: 1px solid #ffffff24;
border-radius: 14px;
padding: 0.9rem;
backdrop-filter: blur(6px);
}
.hero-metrics span {
font-weight: 700;
color: #ffdca8;
}
.content-grid {
margin-top: 2rem;
display: grid;
grid-template-columns: 1.6fr 1fr;
gap: 1.2rem;
}
.panel {
border-radius: 18px;
padding: 1.25rem;
}
.elevated {
background: linear-gradient(160deg, #31142f, #182227);
border: 1px solid #ffffff1a;
box-shadow: 0 30px 80px #0000004f;
}
.glass {
background: #ffffff0f;
border: 1px solid #ffffff2b;
backdrop-filter: blur(8px);
}
.panel-copy {
color: #eadfd7;
}
ul {
margin: 0;
padding-left: 1.2rem;
display: grid;
gap: 0.65rem;
}
.pulse-card {
margin-top: 1rem;
padding: 1rem;
border-radius: 14px;
background: linear-gradient(120deg, #f25f4c2d, #59c3c32e);
border: 1px solid #ffffff24;
animation: pulse 4s ease-in-out infinite;
}
form {
display: grid;
gap: 0.8rem;
}
input,
textarea,
button {
border-radius: 12px;
border: 1px solid #ffffff2d;
font: inherit;
padding: 0.75rem 0.8rem;
}
input,
textarea {
background: #0e1118a8;
color: #f5f2ee;
}
button {
background: linear-gradient(110deg, #f25f4c, #f6bd60);
color: #141414;
font-weight: 700;
cursor: pointer;
transition: transform 160ms ease, filter 160ms ease;
}
button:hover {
transform: translateY(-1px);
filter: brightness(1.07);
}
.site-footer {
margin-top: 2rem;
border-top: 1px solid #ffffff2a;
padding-top: 1rem;
color: #e0d8d0;
text-align: center;
}
.site-footer p {
margin: 0.3rem 0;
}
.site-footer a {
color: #ffdca8;
}
@media (max-width: 900px) {
.content-grid {
grid-template-columns: 1fr;
}
}
@keyframes pulse {
0%, 100% { transform: scale(1); }
50% { transform: scale(1.01); }
}
@keyframes drift {
from { transform: translateY(0) translateX(0); }
to { transform: translateY(30px) translateX(18px); }
}
================================================
FILE: frontend/src/components/WorkflowForm.tsx
================================================
"use client";
import { FormEvent, useState } from "react";
import { runMarketAnalysis } from "../lib/api";
import { WorkflowResponse } from "../types/workflow";
export default function WorkflowForm() {
const [company, setCompany] = useState("NVIDIA");
const [objectives, setObjectives] = useState("Competitive positioning, product strategy");
const [result, setResult] = useState(null);
const [loading, setLoading] = useState(false);
const onSubmit = async (event: FormEvent) => {
event.preventDefault();
setLoading(true);
setResult(null);
try {
const response = await runMarketAnalysis({
company,
objectives: objectives.split(",").map((item) => item.trim()),
});
setResult(response);
} finally {
setLoading(false);
}
};
return (
{result && (
Workflow Result
ID: {result.workflow_id}
Status: {result.status}
Summary
{result.summary}
Recommendations
{result.artifacts.recommendations}
)}
);
}
================================================
FILE: frontend/src/lib/api.ts
================================================
import { WorkflowRequest, WorkflowResponse } from "../types/workflow";
const API_BASE_URL = process.env.NEXT_PUBLIC_API_BASE_URL ?? "http://localhost:8000";
export async function runMarketAnalysis(payload: WorkflowRequest): Promise {
const response = await fetch(`${API_BASE_URL}/workflows/market-analysis`, {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify(payload),
});
if (!response.ok) {
throw new Error(`Request failed: ${response.status}`);
}
return response.json();
}
================================================
FILE: frontend/src/types/workflow.ts
================================================
export interface WorkflowRequest {
company: string;
objectives: string[];
}
export interface WorkflowResponse {
workflow_id: string;
status: string;
summary: string;
artifacts: {
research_notes: string;
recommendations: string;
};
}
================================================
FILE: frontend/tsconfig.json
================================================
{
"compilerOptions": {
"target": "ES2022",
"lib": ["DOM", "DOM.Iterable", "ES2022"],
"allowJs": false,
"skipLibCheck": true,
"strict": true,
"noEmit": true,
"module": "ESNext",
"moduleResolution": "Bundler",
"resolveJsonModule": true,
"isolatedModules": true,
"jsx": "preserve",
"incremental": true
},
"include": ["src"]
}