Showing preview only (1,700K chars total). Download the full file or copy to clipboard to get everything.
Repository: DahnM20/ai-flow
Branch: main
Commit: 98ebab6ff3f8
Files: 312
Total size: 1.6 MB
Directory structure:
gitextract_vbwzelad/
├── .github/
│ ├── FUNDING.yml
│ └── workflows/
│ └── main.yml
├── .gitignore
├── LICENSE
├── README.md
├── bin/
│ └── generate_python_classes_from_ts.sh
├── docker/
│ ├── README.md
│ ├── docker-compose.it.yml
│ ├── docker-compose.yml
│ └── healthcheck.sh
├── integration_tests/
│ ├── .gitignore
│ ├── package.json
│ ├── tests/
│ │ ├── nodeProcessingOrder/
│ │ │ ├── nodeErrorTest.ts
│ │ │ ├── nodeParallelExecutionDurationTest.ts
│ │ │ ├── nodeWithChildrenTest.ts
│ │ │ ├── nodeWithMultipleParentsTest.ts
│ │ │ ├── nodesWithoutLinkTest.ts
│ │ │ └── singleNodeTest.ts
│ │ └── socketEvents/
│ │ ├── processFileEventTest.ts
│ │ ├── runNodeEventTest.ts
│ │ └── socketConnectionTest.ts
│ ├── tsconfig.json
│ └── utils/
│ ├── requestDatas.ts
│ └── testHooks.ts
└── packages/
├── backend/
│ ├── .gitignore
│ ├── Dockerfile
│ ├── README.md
│ ├── app/
│ │ ├── env_config.py
│ │ ├── flask/
│ │ │ ├── app_routes/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── image_routes.py
│ │ │ │ ├── node_routes.py
│ │ │ │ ├── parameters_routes.py
│ │ │ │ ├── static_routes.py
│ │ │ │ └── upload_routes.py
│ │ │ ├── decorators.py
│ │ │ ├── flask_app.py
│ │ │ ├── routes.py
│ │ │ ├── socketio_init.py
│ │ │ ├── sockets.py
│ │ │ └── utils/
│ │ │ └── constants.py
│ │ ├── llms/
│ │ │ └── utils/
│ │ │ └── max_token_for_model.py
│ │ ├── log_config.py
│ │ ├── processors/
│ │ │ ├── components/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── core/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── ai_data_splitter_processor.py
│ │ │ │ │ ├── dall_e_prompt_processor.py
│ │ │ │ │ ├── display_processor.py
│ │ │ │ │ ├── file_processor.py
│ │ │ │ │ ├── gpt_vision_processor.py
│ │ │ │ │ ├── input_image_processor.py
│ │ │ │ │ ├── input_processor.py
│ │ │ │ │ ├── llm_prompt_processor.py
│ │ │ │ │ ├── merge_processor.py
│ │ │ │ │ ├── processor_type_name_utils.py
│ │ │ │ │ ├── replicate_processor.py
│ │ │ │ │ ├── stable_diffusion_stabilityai_prompt_processor.py
│ │ │ │ │ ├── stable_video_diffusion_replicate.py
│ │ │ │ │ ├── transition_processor.py
│ │ │ │ │ ├── url_input_processor.py
│ │ │ │ │ └── youtube_transcript_input_processor.py
│ │ │ │ ├── extension/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── claude_anthropic_processor.py
│ │ │ │ │ ├── deepseek_processor.py
│ │ │ │ │ ├── document_to_text_processor.py
│ │ │ │ │ ├── extension_processor.py
│ │ │ │ │ ├── generate_number_processor.py
│ │ │ │ │ ├── gpt_image_processor.py
│ │ │ │ │ ├── http_get_processor.py
│ │ │ │ │ ├── open_router_processor.py
│ │ │ │ │ ├── openai_reasoning_processor.py
│ │ │ │ │ ├── openai_text_to_speech_processor.py
│ │ │ │ │ ├── replace_text_processor.py
│ │ │ │ │ ├── stabilityai_generic_processor.py
│ │ │ │ │ └── stable_diffusion_three_processor.py
│ │ │ │ ├── model.py
│ │ │ │ ├── node_config_builder.py
│ │ │ │ ├── node_config_utils.py
│ │ │ │ └── processor.py
│ │ │ ├── context/
│ │ │ │ ├── processor_context.py
│ │ │ │ └── processor_context_flask_request.py
│ │ │ ├── exceptions.py
│ │ │ ├── factory/
│ │ │ │ ├── processor_factory.py
│ │ │ │ └── processor_factory_iter_modules.py
│ │ │ ├── launcher/
│ │ │ │ ├── abstract_topological_processor_launcher.py
│ │ │ │ ├── async_processor_launcher.py
│ │ │ │ ├── basic_processor_launcher.py
│ │ │ │ ├── event_type.py
│ │ │ │ ├── processor_event.py
│ │ │ │ ├── processor_launcher.py
│ │ │ │ └── processor_launcher_event.py
│ │ │ ├── observer/
│ │ │ │ ├── observer.py
│ │ │ │ └── socketio_event_emitter.py
│ │ │ └── utils/
│ │ │ └── retry_mixin.py
│ │ ├── root_injector.py
│ │ ├── storage/
│ │ │ ├── local_storage_strategy.py
│ │ │ ├── s3_storage_strategy.py
│ │ │ └── storage_strategy.py
│ │ ├── tasks/
│ │ │ ├── green_pool_task_manager.py
│ │ │ ├── single_thread_tasks/
│ │ │ │ └── browser/
│ │ │ │ ├── async_browser_task.py
│ │ │ │ └── browser_task.py
│ │ │ ├── task_exception.py
│ │ │ ├── task_manager.py
│ │ │ ├── task_utils.py
│ │ │ └── thread_pool_task_manager.py
│ │ └── utils/
│ │ ├── node_extension_utils.py
│ │ ├── openapi_client.py
│ │ ├── openapi_converter.py
│ │ ├── openapi_reader.py
│ │ ├── processor_utils.py
│ │ ├── replicate_utils.py
│ │ └── web_scrapping/
│ │ ├── async_browser_manager.py
│ │ └── browser_manager.py
│ ├── config.yaml
│ ├── hooks/
│ │ └── hook-app.processors.py
│ ├── pyproject.toml
│ ├── requirements_windows.txt
│ ├── resources/
│ │ ├── data/
│ │ │ └── openrouter_models.json
│ │ └── openapi/
│ │ └── stabilityai.json
│ ├── server.py
│ └── tests/
│ ├── unit/
│ │ ├── test_processor_factory.py
│ │ ├── test_processor_launcher.py
│ │ └── test_stable_diffusion_stabilityai_prompt_processor.py
│ └── utils/
│ ├── openai_mock_utils.py
│ ├── processor_context_mock.py
│ └── processor_factory_mock.py
└── ui/
├── .gitignore
├── .prettierignore
├── Dockerfile
├── README.md
├── index.html
├── jest.config.ts
├── nginx.conf
├── package.json
├── postcss.config.cjs
├── postcss.config.js
├── prettier.config.js
├── public/
│ ├── health
│ ├── locales/
│ │ ├── en/
│ │ │ ├── aiActions.json
│ │ │ ├── config.json
│ │ │ ├── dialogs.json
│ │ │ ├── flow.json
│ │ │ ├── nodeHelp.json
│ │ │ ├── tips.json
│ │ │ ├── tour.json
│ │ │ └── version.json
│ │ └── fr/
│ │ ├── aiActions.json
│ │ ├── config.json
│ │ ├── dialogs.json
│ │ ├── flow.json
│ │ ├── nodeHelp.json
│ │ ├── tips.json
│ │ ├── tour.json
│ │ └── version.json
│ ├── robots.txt
│ ├── samples/
│ │ └── intro.json
│ └── site.webmanifest
├── src/
│ ├── App.tsx
│ ├── Main.tsx
│ ├── api/
│ │ ├── cache/
│ │ │ ├── cacheManager.ts
│ │ │ └── withCache.ts
│ │ ├── client.ts
│ │ ├── nodes.ts
│ │ ├── parameters.ts
│ │ ├── replicateModels.ts
│ │ └── uploadFile.ts
│ ├── components/
│ │ ├── Flow.tsx
│ │ ├── LoadingScreen.tsx
│ │ ├── bars/
│ │ │ ├── Sidebar.tsx
│ │ │ └── dnd-sidebar/
│ │ │ ├── DnDSidebar.tsx
│ │ │ ├── DraggableNode.tsx
│ │ │ ├── DraggableNodeWithSubnodes.tsx
│ │ │ ├── GripIcon.tsx
│ │ │ ├── Section.tsx
│ │ │ └── types.ts
│ │ ├── buttons/
│ │ │ ├── ButtonRunAll.tsx
│ │ │ └── ConfigurationButton.tsx
│ │ ├── edges/
│ │ │ └── buttonEdge.tsx
│ │ ├── handles/
│ │ │ └── HandleWrapper.tsx
│ │ ├── inputs/
│ │ │ └── InputWithButton.tsx
│ │ ├── nodes/
│ │ │ ├── AIDataSplitterNode.tsx
│ │ │ ├── DisplayNode.tsx
│ │ │ ├── FileUploadNode.tsx
│ │ │ ├── GenericNode.tsx
│ │ │ ├── Node.styles.ts
│ │ │ ├── NodeHelpPopover.tsx
│ │ │ ├── NodeWrapper.tsx
│ │ │ ├── ReplicateNode.tsx
│ │ │ ├── TransitionNode.tsx
│ │ │ ├── node-button/
│ │ │ │ ├── InputNameBar.tsx
│ │ │ │ └── NodePlayButton.tsx
│ │ │ ├── node-input/
│ │ │ │ ├── FileUploadField.tsx
│ │ │ │ ├── ImageMaskCreator.tsx
│ │ │ │ ├── ImageMaskCreatorField.tsx
│ │ │ │ ├── ImageMaskCreatorFieldFlowAware.tsx
│ │ │ │ ├── KeyValueInputList.tsx
│ │ │ │ ├── NodeField.tsx
│ │ │ │ ├── NodeTextField.tsx
│ │ │ │ ├── NodeTextarea.tsx
│ │ │ │ ├── OutputRenderer.tsx
│ │ │ │ └── TextAreaPopupWrapper.tsx
│ │ │ ├── node-output/
│ │ │ │ ├── AudioUrlOutput.tsx
│ │ │ │ ├── ImageBase64Output.tsx
│ │ │ │ ├── ImageUrlOutput.tsx
│ │ │ │ ├── MarkdownOutput.tsx
│ │ │ │ ├── NodeOutput.tsx
│ │ │ │ ├── OutputDisplay.tsx
│ │ │ │ ├── PdfUrlOutput.tsx
│ │ │ │ ├── ThreeDimensionalUrlOutput.tsx
│ │ │ │ ├── VideoUrlOutput.tsx
│ │ │ │ └── outputUtils.ts
│ │ │ ├── types/
│ │ │ │ └── node.ts
│ │ │ └── utils/
│ │ │ ├── HintComponent.tsx
│ │ │ ├── ImageModal.tsx
│ │ │ ├── ImageZoomable.tsx
│ │ │ ├── NodeHelp.tsx
│ │ │ ├── NodeIcons.tsx
│ │ │ └── TextareaModal.tsx
│ │ ├── players/
│ │ │ └── VideoJS.tsx
│ │ ├── popups/
│ │ │ ├── ConfirmPopup.tsx
│ │ │ ├── DefaultPopup.tsx
│ │ │ ├── HelpPopup.tsx
│ │ │ ├── UserMessagePopup.tsx
│ │ │ ├── config-popup/
│ │ │ │ ├── AppParameters.tsx
│ │ │ │ ├── ConfigPopup.tsx
│ │ │ │ ├── DisplayParameters.tsx
│ │ │ │ ├── ParametersFields.tsx
│ │ │ │ ├── UserParameters.tsx
│ │ │ │ ├── configMetadata.ts
│ │ │ │ └── parameters.ts
│ │ │ ├── select-model-popup/
│ │ │ │ ├── Model.tsx
│ │ │ │ └── SelectModelPopup.tsx
│ │ │ └── shared/
│ │ │ ├── FilterGrid.tsx
│ │ │ ├── Grid.tsx
│ │ │ └── LoadMoreButton.tsx
│ │ ├── selectors/
│ │ │ ├── ActionGroup.tsx
│ │ │ ├── ColorSelector.tsx
│ │ │ ├── ExpandableBloc.tsx
│ │ │ ├── FileDropZone.tsx
│ │ │ ├── OptionSelector.tsx
│ │ │ └── SelectAutocomplete.tsx
│ │ ├── shared/
│ │ │ ├── motions/
│ │ │ │ ├── EaseOut.tsx
│ │ │ │ ├── TapScale.tsx
│ │ │ │ └── types.ts
│ │ │ └── theme.tsx
│ │ ├── side-views/
│ │ │ ├── CurrentNodeView.tsx
│ │ │ └── JSONView.tsx
│ │ ├── tools/
│ │ │ └── Fallback.tsx
│ │ └── tour/
│ │ └── AppTour.tsx
│ ├── config/
│ │ └── config.ts
│ ├── hooks/
│ │ ├── useFlowSocketListeners.tsx
│ │ ├── useFormFields.tsx
│ │ ├── useHandlePositions.tsx
│ │ ├── useHandleShowOutput.tsx
│ │ ├── useIsPlaying.tsx
│ │ ├── useIsTouchDevice.tsx
│ │ ├── useLoading.tsx
│ │ ├── useLocalStorage.tsx
│ │ └── useRefreshOnAppearanceChange.tsx
│ ├── i18n.js
│ ├── index.css
│ ├── index.tsx
│ ├── init.js
│ ├── layout/
│ │ └── main-layout/
│ │ ├── AppLayout.tsx
│ │ ├── header/
│ │ │ ├── Tab.tsx
│ │ │ └── TabHeader.tsx
│ │ └── wrapper/
│ │ ├── FlowErrorBoundary.tsx
│ │ └── FlowWrapper.tsx
│ ├── nodes-configuration/
│ │ ├── dallENode.ts
│ │ ├── gptVisionNode.ts
│ │ ├── inputTextNode.ts
│ │ ├── llmPrompt.ts
│ │ ├── mergerPromptNode.ts
│ │ ├── nodeConfig.ts
│ │ ├── sectionConfig.ts
│ │ ├── stableDiffusionStabilityAiNode.ts
│ │ ├── types.ts
│ │ ├── urlNode.ts
│ │ └── youtubeTranscriptNode.ts
│ ├── providers/
│ │ ├── FlowDataProvider.tsx
│ │ ├── NodeProvider.tsx
│ │ ├── SocketProvider.tsx
│ │ ├── ThemeProvider.tsx
│ │ └── VisibilityProvider.tsx
│ ├── react-app-env.d.ts
│ ├── reportWebVitals.ts
│ ├── services/
│ │ └── tabStorage.ts
│ ├── setupTests.ts
│ ├── sockets/
│ │ ├── flowEventTypes.ts
│ │ └── flowSocket.ts
│ ├── utils/
│ │ ├── evaluateConditions.ts
│ │ ├── flowChecker.ts
│ │ ├── flowUtils.ts
│ │ ├── mappings.tsx
│ │ ├── navigatorUtils.ts
│ │ ├── nodeConfigurationUtils.ts
│ │ ├── nodeUtils.ts
│ │ ├── openAPIUtils.ts
│ │ └── toastUtils.tsx
│ └── vite-env.d.ts
├── tailwind.config.js
├── test/
│ ├── e2e/
│ │ ├── intro-flow.spec.ts
│ │ ├── loading-screen.spec.ts
│ │ ├── main-content.spec.ts
│ │ ├── sidebar-default-nodes.spec.ts
│ │ ├── sidebar-extensions-nodes.spec.ts
│ │ └── tuto-display.spec.ts
│ ├── unit/
│ │ ├── flowChecker.test.ts
│ │ └── flowUtils.test.ts
│ └── utils.ts
├── tsconfig.json
├── vite.config.ts
└── vitest.config.ts
================================================
FILE CONTENTS
================================================
================================================
FILE: .github/FUNDING.yml
================================================
# These are supported funding model platforms
github: [DahnM20]
patreon: # Replace with a single Patreon username
open_collective: # Replace with a single Open Collective username
ko_fi: # Replace with a single Ko-fi username
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
liberapay: # Replace with a single Liberapay username
issuehunt: # Replace with a single IssueHunt username
otechie: # Replace with a single Otechie username
lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry
custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
================================================
FILE: .github/workflows/main.yml
================================================
name: Docker Compose Build | Healthcheck | Tests
on:
push:
branches:
- main
- develop
- develop-features-0.8.1
jobs:
build_and_test:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v2
- name: Move to docker directory and run docker compose
run: |
cd docker
docker compose -f docker-compose.it.yml up -d
- name: Run healthcheck script
run: |
cd docker
chmod +x healthcheck.sh
./healthcheck.sh http://localhost:5000/healthcheck
- name: Print Docker logs
if: failure()
run: |
cd docker
docker compose logs
- name: Run UI unit tests
run: |
cd packages/ui
npm i
npm run test
- name: Run Python unit tests
run: |
docker exec ai-flow-backend python -m unittest discover -s tests/unit -p '*test_*.py'
- name: Run integration tests
run: |
cd integration_tests
npm i
npm run test
- name: Print Docker logs
if: failure()
run: |
cd docker
docker compose logs
================================================
FILE: .gitignore
================================================
packages/backend/.env
================================================
FILE: LICENSE
================================================
MIT License
Copyright (c) 2023 Dahn
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
================================================
FILE: README.md
================================================
<p align="center">
<img src="assets/header.png" alt="AI-Flow Logo" />
</p>
<p align="center">
<em>Open-source tool to seamlessly connect multiple AI model APIs into repeatable workflows.</em>
</p>
<p align="center">
<a href="https://docs.ai-flow.net/?ref=github"><img src="https://img.shields.io/badge/lang-English-blue.svg" alt="English"></a>
<a href="https://docs.ai-flow.net/?ref=github"><img src="https://img.shields.io/badge/lang-French-blue.svg" alt="French"></a>
<img src="https://img.shields.io/badge/License-MIT-yellow.svg">
<img src="https://img.shields.io/github/v/release/DahnM20/ai-flow">
<a href="https://twitter.com/DahnM20"><img src="https://img.shields.io/twitter/follow/AI-Flow?style=social" alt="Follow on Twitter"></a>
</p>
<p align="center">
<a href="https://ai-flow.net/?ref=github">🔗 Website</a> •
<a href="https://docs.ai-flow.net/?ref=github">📚 Documentation</a>
</p>
---
<div align="center">
🎉🚀 Latest Release: v0.11.3 🚀🎉
<br>
Nodes Updated : Web search can be enabled on GPT node, Claude 4 available
<br>
UI : Node Search Bar, Shortcut for Popular Replicate Models
<br>
New Models available : Flux Kontext, Veo 3, Lyria 2, Imagen 4 available through the Replicate Node
</div>
---

## Overview
**AI-Flow** is an open-source, user-friendly UI that lets you visually design, manage, and monitor AI-driven workflows by seamlessly connecting multiple AI model APIs (e.g., OpenAI, StabilityAI, Replicate, Claude, Deepseek).
## Features
- **Visual Workflow Builder:** Drag-and-drop interface for crafting AI workflows.
- **Real-Time Monitoring:** Watch your workflow execute and track results.
- **Parallel Processing:** Nodes run in parallel whenever possible.
- **Model Management:** Easily organize and manage diverse AI models.
- **Import/Export:** Share or back up your workflows effortlessly.
## Supported Models
- **Replicate:** All models available through the Replicate API (FLUX.1, FLUX.1 Kontext, Imagen 4, Veo 3, Lyria 2, and many more)
- **OpenAI:** GPT-4o, GPT-4.1, TTS, o1, o3, o4.
- **StabilityAI:** Stable Diffusion 3.5, SDXL, Stable Video Diffusion, plus additional tools.
- **Others:** Claude, Deepseek, OpenRouter.

## Open Source vs. Cloud
**AI-Flow** is fully open source and available under the MIT License, empowering you to build and run your AI workflows on your personal machine.
For those seeking enhanced functionality and a polished experience, **AI-Flow Pro** on our cloud platform ([app.ai-flow.net](https://ai-flow.net/?ref=github)) offers advanced features, including:
- **Subflows & Loops:** Create complex, nested workflows and iterate tasks effortlessly.
- **API-Triggered Flows:** Initiate workflows via API calls for seamless automation.
- **Integrated Services:** Connect with external services such as Google Search, Airtable, Zapier, and Make.
- **Simplified Interface:** Transform workflows into streamlined tools with an intuitive UI.

The cloud version builds upon the foundation of the open-source project, giving you more power and flexibility while still letting you use your own API keys.
## Installation
> **Note:** To unlock full functionality, AI-Flow requires S3-compatible storage (with proper CORS settings) to host resources. Without it, features like File Upload or nodes that rely on external providers (e.g., StabilityAI) may not work as expected. Also, set `REPLICATE_API_KEY` in the App Parameters or in your environment to use the Replicate node.
### Method 1: Using the Executable (Windows Only)
> **Note:** This method is only available for Windows users.
1. Download the latest Windows version of AI-Flow from the official releases page: [AI-Flow Releases](https://ai-flow.net/release/)
2. Once downloaded, run the `.exe` file.
This will start a local server and open AI-Flow in a standalone window, giving you direct access to its user interface without needing to install anything else.
### Method 2 : Docker Installation
1. **Prepare Docker Compose:**
- Navigate to the `docker` directory:
```bash
cd docker
```
2. **Launch with Docker Compose:**
```bash
docker-compose up -d
```
3. **Access the Application:**
- Open [http://localhost:80](http://localhost:80) in your browser.
- To stop, run:
```bash
docker-compose stop
```
### Method 3 : Local Installation
1. **Clone the Repository:**
```bash
git clone https://github.com/DahnM20/ai-flow.git
cd ai-flow
```
2. **UI Setup:**
```bash
cd packages/ui
npm install
```
3. **Backend Setup:**
```bash
cd ../backend
poetry install
```
- **Windows Users:**
```bash
poetry shell
pip install -r requirements_windows.txt
```
4. **Run the Application:**
- Start the backend:
```bash
poetry run python server.py
```
- In a new terminal, start the UI:
```bash
cd packages/ui
npm start
```
- Open your browser and navigate to [http://localhost:3000](http://localhost:3000).
## Contributing
We welcome contributions! If you encounter issues or have feature ideas, please [open an issue](https://github.com/DahnM20/ai-flow/issues) or submit a pull request.
## License
This project is released under the [MIT License](LICENSE).
================================================
FILE: bin/generate_python_classes_from_ts.sh
================================================
npm i -g typescript-json-schema
typescript-json-schema "../packages/ui/src/nodes-configuration/types.ts" "*" --out "schema.json"
mv schema.json ../packages/backend/app/processors/components/
cd ../packages/backend/app/processors/components/
poetry run datamodel-codegen --input schema.json --input-file-type jsonschema --output model.py --output-model-type pydantic_v2.BaseModel --enum-field-as-literal all
rm schema.json
echo "model.py generated"
================================================
FILE: docker/README.md
================================================
## 🐳 Docker
### Docker Compose
1. Go to the docker directory: `cd ./docker`
2. Update the .yml if needed for the PORTS
3. Launch `docker-compose up` or `docker-compose up -d`
4. Open your browser and navigate to `http://localhost:3000`
5. Use `docker-compose stop` when you want to stop the app.
================================================
FILE: docker/docker-compose.it.yml
================================================
services:
backend:
container_name: ai-flow-backend
build:
context: ../packages/backend/
dockerfile: Dockerfile
ports:
- 5000:5000
environment:
- HOST=0.0.0.0
- PORT=5000
- DEPLOYMENT_ENV=LOCAL
- LOCAL_STORAGE_FOLDER_NAME=local_storage
- USE_MOCK=true
volumes:
- ./ai-flow-backend-storage:/app/local_storage
frontend:
container_name: ai-flow-frontend
build:
context: ../packages/ui/
dockerfile: Dockerfile
ports:
- 80:80
environment:
- VITE_APP_WS_HOST=localhost
- VITE_APP_WS_PORT=5000
================================================
FILE: docker/docker-compose.yml
================================================
services:
backend:
container_name: ai-flow-backend
build:
context: ../packages/backend/
dockerfile: Dockerfile
ports:
- 5001:5000
environment:
- HOST=0.0.0.0
- PORT=5000
- DEPLOYMENT_ENV=LOCAL
- REPLICATE_API_KEY=sample
- LOCAL_STORAGE_FOLDER_NAME=local_storage
volumes:
- ./ai-flow-backend-storage:/app/local_storage
frontend:
container_name: ai-flow-frontend
build:
context: ../packages/ui/
dockerfile: Dockerfile
args:
VITE_APP_WS_HOST: localhost
VITE_APP_WS_PORT: 5001
VITE_APP_API_REST_PORT: 5001
ports:
- 80:80
================================================
FILE: docker/healthcheck.sh
================================================
#!/bin/bash
if [ "$#" -ne 1 ]; then
echo "Usage: $0 <URL>"
exit 1
fi
URL="$1"
INTERVAL=5
MAX_ATTEMPTS=20
attempt=0
while [ $attempt -lt $MAX_ATTEMPTS ]; do
attempt=$(( $attempt + 1 ))
curl --fail --silent $URL && echo "Service is up!" && exit 0
echo "Service not ready yet. Waiting for $INTERVAL seconds. Attempt $attempt of $MAX_ATTEMPTS."
sleep $INTERVAL
done
echo "Service did not become ready after $MAX_ATTEMPTS attempts."
exit 1
================================================
FILE: integration_tests/.gitignore
================================================
# See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
# dependencies
/node_modules
/.pnp
.pnp.js
# testing
/coverage
# production
/dist
# misc
.DS_Store
.env.local
.env.development.local
.env.test.local
.env.production.local
npm-debug.log*
yarn-debug.log*
yarn-error.log*
================================================
FILE: integration_tests/package.json
================================================
{
"name": "integration_tests",
"version": "1.0.0",
"description": "",
"main": "dist/index.js",
"scripts": {
"test": "mocha dist/tests/**/*Test.js",
"build": "tsc",
"pretest": "npm run build"
},
"author": "",
"license": "ISC",
"dependencies": {
"axios": "^1.5.0",
"chai": "^4.3.8",
"mocha": "^10.2.0",
"socket.io-client": "^4.7.2"
},
"devDependencies": {
"@types/chai": "^4.3.6",
"@types/minimist": "^1.2.2",
"@types/mocha": "^10.0.1",
"@types/node": "^20.5.9",
"@types/normalize-package-data": "^2.4.1",
"@types/socket.io-client": "^3.0.0",
"typescript": "^5.2.2"
}
}
================================================
FILE: integration_tests/tests/nodeProcessingOrder/nodeErrorTest.ts
================================================
import { expect } from "chai";
import {
disconnectSocket,
getSocket,
setupSocket,
} from "../../utils/testHooks";
import {
createRequestData,
flowWithFourParallelNodeStep,
flowWithoutLinks,
sequentialFlow,
} from "../../utils/requestDatas";
describe("Node errors test", function () {
this.timeout(30000);
beforeEach(function (done) {
setupSocket(done);
});
afterEach(function () {
disconnectSocket();
});
it("Error in sequential flow should stop the flow", function (done) {
const socket = getSocket();
const flowNodesWithOneError = structuredClone(sequentialFlow);
flowNodesWithOneError[1] = {
...flowNodesWithOneError[1],
raiseError: true,
};
socket.emit("process_file", createRequestData(flowNodesWithOneError));
let errorReceived = false;
let progressCount = 0;
const progressBeforeError = 1;
socket.on("error", (error) => {
errorReceived = true;
expect(progressCount).to.equal(progressBeforeError);
});
socket.on("progress", (progress) => {
if (errorReceived) {
done(new Error("Received progress after error"));
}
progressCount++;
if (progressCount > progressBeforeError) {
done(new Error(`Too many nodes sent progress`));
}
});
setTimeout(() => {
if (!errorReceived) {
socket.disconnect();
done(new Error("No error received within the expected time"));
} else {
done();
}
}, 10000);
});
it("Error in flow without link should run the others nodes", function (done) {
const socket = getSocket();
const flow = structuredClone(sequentialFlow);
flow[1] = {
...flow[1],
raiseError: true,
};
socket.emit("process_file", createRequestData(flow));
let errorReceived = false;
let progressCount = 0;
const maxProgressBeforeError = 2;
socket.on("error", (error) => {
errorReceived = true;
expect(progressCount).to.be.at.most(maxProgressBeforeError);
});
socket.on("progress", (progress) => {
progressCount++;
if (progressCount > maxProgressBeforeError) {
done(new Error(`Too many nodes sent progress`));
}
});
setTimeout(() => {
if (errorReceived && progressCount === 1) {
socket.disconnect();
done();
} else {
done(new Error("No error received within the expected time"));
}
}, 2000);
});
it("Error in flow with 4 parallel node should return result for all of them except the one in error", function (done) {
const socket = getSocket();
const flow = structuredClone(flowWithFourParallelNodeStep);
flow[4] = {
...flow[4],
raiseError: true,
};
flow[1] = {
...flow[1],
sleepDuration: 2,
}; //One node with high delay to be sure he is processed during the error raise
socket.emit("process_file", createRequestData(flow));
let errorReceived = false;
let progressCount = 0;
const maxProgressBeforeError = 4;
socket.on("error", (error) => {
errorReceived = true;
expect(progressCount).to.be.at.most(maxProgressBeforeError);
});
socket.on("progress", (progress) => {
progressCount++;
if (progressCount > maxProgressBeforeError) {
done(new Error(`Too many nodes sent progress`));
}
});
socket.on("run_end", (end) => {
if (progressCount !== maxProgressBeforeError) {
done(
new Error(`Not all nodes were processed before end of execution.`)
);
} else {
done();
}
});
});
});
================================================
FILE: integration_tests/tests/nodeProcessingOrder/nodeParallelExecutionDurationTest.ts
================================================
import { expect } from "chai";
import {
disconnectSocket,
getSocket,
setupSocket,
} from "../../utils/testHooks";
import {
createRequestData,
flowWithFourParallelNodeStep,
} from "../../utils/requestDatas";
describe("Node errors test", function () {
this.timeout(15000);
beforeEach(function (done) {
setupSocket(done);
});
afterEach(function () {
disconnectSocket();
});
it("4 parallel node with 2s sleep each should not compound time", function (done) {
const socket = getSocket();
const flow = structuredClone(flowWithFourParallelNodeStep);
flow[1] = {
...flow[1],
sleepDuration: 2,
};
flow[2] = {
...flow[2],
sleepDuration: 2,
};
flow[3] = {
...flow[3],
sleepDuration: 2,
};
flow[4] = {
...flow[4],
sleepDuration: 2,
};
const maxDurationMsExpected = 5000;
const timeStart = Date.now();
socket.emit("process_file", createRequestData(flow));
let progressCount = 0;
const maxProgress = 5;
socket.on("progress", (progress) => {
progressCount++;
if (progressCount > maxProgress) {
done(new Error(`Too many nodes sent progress`));
}
});
socket.on("run_end", (end) => {
if (progressCount !== maxProgress) {
done(
new Error(`Not all nodes were processed before end of execution.`)
);
} else {
const timeEnd = Date.now();
const duration = timeEnd - timeStart;
expect(duration).to.be.lessThan(maxDurationMsExpected);
done();
}
});
});
});
================================================
FILE: integration_tests/tests/nodeProcessingOrder/nodeWithChildrenTest.ts
================================================
import { expect } from "chai";
import { disconnectSocket, getSocket, setupSocket } from "../../utils/testHooks";
import { createRequestData } from "../../utils/requestDatas";
describe('node with children test', function () {
this.timeout(15000);
beforeEach(function (done) {
setupSocket(done);
});
afterEach(function () {
disconnectSocket();
});
const flowNodeWithChildren = [
{
inputs: [],
name: "1#llm-prompt",
processorType: "llm-prompt",
},
{
inputs: [
{
"inputNode": "1#llm-prompt",
"inputNodeOutputKey": 0
}
],
name: "2#llm-prompt",
processorType: "llm-prompt",
},
{
inputs: [
{
"inputNode": "1#llm-prompt",
"inputNodeOutputKey": 0
}
],
name: "3#stable-diffusion-stabilityai-prompt",
processorType: "stable-diffusion-stabilityai-prompt",
}
];
it('process_file should process the parent first, then its children', function (done) {
const socket = getSocket();
socket.emit('process_file', createRequestData(flowNodeWithChildren));
let processedNodes: string[] = [];
socket.on('progress', (data) => {
processedNodes.push(data.instanceName);
if (processedNodes.length === flowNodeWithChildren.length) {
try {
//First one needs to be the parent
expect(processedNodes[0]).to.equal(flowNodeWithChildren[0].name);
expect(processedNodes).to.includes(flowNodeWithChildren[1].name);
expect(processedNodes).to.includes(flowNodeWithChildren[2].name);
done();
} catch (error) {
done(error);
}
}
});
socket.on('error', (error) => {
done(new Error(`Error event received: ${JSON.stringify(error)}`));
});
});
});
================================================
FILE: integration_tests/tests/nodeProcessingOrder/nodeWithMultipleParentsTest.ts
================================================
import { expect } from "chai";
import { disconnectSocket, getSocket, setupSocket } from "../../utils/testHooks";
import { createRequestData } from "../../utils/requestDatas";
describe('node with multiple parent test', function () {
this.timeout(15000);
beforeEach(function (done) {
setupSocket(done);
});
afterEach(function () {
disconnectSocket();
});
const flowWithNodesWithMultipleParents = [
{
inputs: [],
name: "1#llm-prompt",
processorType: "llm-prompt",
},
{
inputs: [],
name: "2#llm-prompt",
processorType: "llm-prompt",
},
{
inputs: [
{
"inputNode": "1#llm-prompt",
"inputNodeOutputKey": 0
},
{
"inputNode": "2#llm-prompt",
"inputNodeOutputKey": 0
}
],
name: "3#stable-diffusion-stabilityai-prompt",
processorType: "stable-diffusion-stabilityai-prompt",
}
];
it('process_file should process both parents before the child', function (done) {
const socket = getSocket();
socket.emit('process_file', createRequestData(flowWithNodesWithMultipleParents));
let processedNodes: string[] = [];
socket.on('progress', (data) => {
processedNodes.push(data.instanceName);
if (processedNodes.length === flowWithNodesWithMultipleParents.length) {
try {
// Check if both parents are processed before the child
expect(processedNodes.includes("1#llm-prompt")).to.be.true;
expect(processedNodes.includes("2#llm-prompt")).to.be.true;
expect(processedNodes.indexOf("3#stable-diffusion-stabilityai-prompt")).to.be.greaterThan(processedNodes.indexOf("1#llm-prompt"));
expect(processedNodes.indexOf("3#stable-diffusion-stabilityai-prompt")).to.be.greaterThan(processedNodes.indexOf("2#llm-prompt"));
done();
} catch (error) {
done(error);
}
}
});
socket.on('error', (error) => {
done(new Error(`Error event received: ${JSON.stringify(error)}`));
});
});
});
================================================
FILE: integration_tests/tests/nodeProcessingOrder/nodesWithoutLinkTest.ts
================================================
import { expect } from "chai";
import { disconnectSocket, getSocket, setupSocket } from "../../utils/testHooks";
import { createRequestData } from "../../utils/requestDatas";
describe('node without link test', function () {
this.timeout(15000);
beforeEach(function (done) {
setupSocket(done);
});
afterEach(function () {
disconnectSocket();
});
const flowWithNodesWithoutLink = [
{
inputs: [],
name: "1#llm-prompt",
processorType: "llm-prompt",
},
{
inputs: [],
name: "2#llm-prompt",
processorType: "llm-prompt",
},
{
inputs: [],
name: "3#stable-diffusion-stabilityai-prompt",
processorType: "stable-diffusion-stabilityai-prompt",
}
];
it('process_file should process all the nodes', function (done) {
const socket = getSocket();
socket.emit('process_file', createRequestData(flowWithNodesWithoutLink));
let processedNodes: string[] = [];
socket.on('progress', (data) => {
processedNodes.push(data.instanceName);
if (processedNodes.length === flowWithNodesWithoutLink.length) {
try {
expect(processedNodes).to.includes(flowWithNodesWithoutLink[0].name);
expect(processedNodes).to.includes(flowWithNodesWithoutLink[1].name);
expect(processedNodes).to.includes(flowWithNodesWithoutLink[2].name);
done();
} catch (error) {
done(error);
}
}
});
socket.on('error', (error) => {
done(new Error(`Error event received: ${JSON.stringify(error)}`));
});
});
});
================================================
FILE: integration_tests/tests/nodeProcessingOrder/singleNodeTest.ts
================================================
import { expect } from "chai";
import { Socket, io } from "socket.io-client";
import { createRequestData } from "../../utils/requestDatas";
describe('single node test', function () {
this.timeout(5000);
let socket: Socket;
beforeEach(function (done) {
socket = io('http://localhost:5000');
socket.on('connect', function () {
done();
});
socket.on('connect_error', function (error) {
done(error);
});
});
afterEach(function () {
socket.disconnect();
});
const flowWithSingleNode = [
{
inputs: [],
name: "1#llm-prompt",
processorType: "llm-prompt",
}
];
it('process_file should trigger one progress event', function (done) {
socket.emit('process_file', createRequestData(flowWithSingleNode));
socket.once('progress', (data) => {
expect(data).to.have.property('instanceName').to.equal(flowWithSingleNode[0].name);
done();
});
socket.once('error', (error) => {
done(new Error(`Error event received: ${JSON.stringify(error)}`));
});
});
});
================================================
FILE: integration_tests/tests/socketEvents/processFileEventTest.ts
================================================
import { io, Socket } from "socket.io-client";
import { expect } from 'chai';
import { basicJsonFlow, getBasicProcessFileData, getJsonFlowWithMissingInputTextProcessFileData } from '../../utils/requestDatas';
describe('process_file event tests', function () {
this.timeout(5000);
let socket: Socket;
beforeEach(function (done) {
socket = io('http://localhost:5000');
socket.on('connect', function () {
done();
});
socket.on('connect_error', function (error) {
done(error);
});
});
afterEach(function () {
socket.disconnect();
});
it('process_file should trigger run_end event', function (done) {
const processFileData = getBasicProcessFileData();
socket.emit('process_file', processFileData);
socket.once('run_end', (data) => {
expect(data).to.have.property('output');
done();
});
socket.once('error', (error) => {
done(new Error(`Error event received: ${JSON.stringify(error)}`));
});
});
it('process_file should trigger progress event', function (done) {
const processFileData = getBasicProcessFileData();
socket.emit('process_file', processFileData);
socket.once('progress', (data) => {
expect(data).to.have.property('output').to.equal(basicJsonFlow[0].inputText);
expect(data).to.have.property('instanceName').to.equal(basicJsonFlow[0].name);
done();
});
socket.once('error', (error) => {
done(new Error(`Error event received: ${JSON.stringify(error)}`));
});
});
it('process_file should trigger current_node_running event', function (done) {
const processFileData = getBasicProcessFileData();
socket.emit('process_file', processFileData);
socket.once('current_node_running', (data) => {
expect(data).to.have.property('instanceName').to.equal(basicJsonFlow[0].name);
done();
});
socket.once('error', (error) => {
done(new Error(`Error event received: ${JSON.stringify(error)}`));
});
});
it('process_file with missing input should trigger error event', function (done) {
const processFileData = getJsonFlowWithMissingInputTextProcessFileData();
socket.emit('process_file', processFileData);
socket.once('error', (error) => {
done();
});
});
});
================================================
FILE: integration_tests/tests/socketEvents/runNodeEventTest.ts
================================================
import { io, Socket } from "socket.io-client";
import { expect } from 'chai';
import { basicJsonFlow, getBasicRunNodeData, getJsonFlowWithMissingInputTextProcessFileData } from '../../utils/requestDatas';
describe('run_node event tests', function () {
this.timeout(5000);
let socket: Socket;
beforeEach(function (done) {
socket = io('http://localhost:5000');
socket.on('connect', function () {
done();
});
socket.on('connect_error', function (error) {
done(error);
});
});
afterEach(function () {
socket.disconnect();
});
it('run_node should trigger progress event', function (done) {
const runNodeData = getBasicRunNodeData();
socket.emit('run_node', runNodeData);
socket.once('progress', (data) => {
expect(data).to.have.property('output').to.equal(basicJsonFlow[0].inputText);
expect(data).to.have.property('instanceName').to.equal(basicJsonFlow[0].name);
done();
});
socket.once('error', (error) => {
done(new Error(`Error event received: ${JSON.stringify(error)}`));
});
});
it('run_node should trigger current_node_running event', function (done) {
const runNodeData = getBasicRunNodeData();
socket.emit('run_node', runNodeData);
socket.once('current_node_running', (data) => {
expect(data).to.have.property('instanceName').to.equal(basicJsonFlow[0].name);
done();
});
socket.once('error', (error) => {
done(new Error(`Error event received: ${JSON.stringify(error)}`));
});
});
it('run_node with missing input should trigger error event', function (done) {
const processFileData = getJsonFlowWithMissingInputTextProcessFileData();
socket.emit('run_node', processFileData);
socket.once('error', (error) => {
done();
});
});
});
================================================
FILE: integration_tests/tests/socketEvents/socketConnectionTest.ts
================================================
import { io, Socket } from "socket.io-client";
import { expect } from 'chai';
describe('Socket.IO connection tests', function () {
let socket: Socket;
beforeEach(function (done: Mocha.Done): void {
socket = io('http://localhost:5000');
socket.on('connect', function (): void {
done();
});
socket.on('connect_error', function (error: any): void {
done(error);
});
});
afterEach(function (): void {
socket.disconnect();
});
it('should be connected to the server', function (done: Mocha.Done): void {
expect(socket.connected).to.be.true;
done();
});
it('should disconnect', function (done: Mocha.Done): void {
socket.disconnect();
expect(socket.connected).to.be.false;
done();
});
});
================================================
FILE: integration_tests/tsconfig.json
================================================
{
"compilerOptions": {
"target": "ES6",
"module": "commonjs",
"outDir": "./dist",
"rootDir": "./",
"strict": true
}
}
================================================
FILE: integration_tests/utils/requestDatas.ts
================================================
type ProcessFileData = {
jsonFile: string;
parameters: Record<string, string>;
};
type RunNodeData = {
jsonFile: string;
parameters: Record<string, string>;
nodeName: string;
};
export type Node = {
inputs: {
inputName?: string;
inputNode: string;
inputNodeOutputKey: number;
}[];
name: string;
processorType: string;
[key: string]: any;
};
const basicJsonFlow: Node[] = [
{
inputs: [],
name: "kbk1proh1#input-text",
processorType: "input-text",
inputText: "Hello World",
x: 1,
y: 1,
},
];
const jsonFlowWithMissingInputText: Node[] = [
{
inputs: [],
name: "kbk1proh1#input-text",
processorType: "input-text",
x: 1,
y: 1,
},
];
export const flowWithOneNonFreeNode: Node[] = [
{
inputs: [],
name: "1#stable-diffusion-stabilityai-prompt",
processorType: "stable-diffusion-stabilityai-prompt",
},
];
export const sequentialFlow: Node[] = [
{
inputs: [],
name: "1#llm-prompt",
processorType: "llm-prompt",
raiseError: false,
},
{
inputs: [
{
inputNode: "1#llm-prompt",
inputNodeOutputKey: 0,
},
],
name: "2#llm-prompt",
processorType: "llm-prompt",
raiseError: false,
},
{
inputs: [
{
inputNode: "2#llm-prompt",
inputNodeOutputKey: 0,
},
],
name: "3#stable-diffusion-stabilityai-prompt",
processorType: "stable-diffusion-stabilityai-prompt",
raiseError: false,
},
];
export const flowWithoutLinks: Node[] = [
{
inputs: [],
name: "1#llm-prompt",
processorType: "llm-prompt",
model: "gpt-4",
prompt: "hi",
raiseError: false,
},
{
inputs: [],
name: "2#llm-prompt",
processorType: "llm-prompt",
model: "gpt-4",
prompt: "hi",
raiseError: false,
},
{
inputs: [],
name: "3#stable-diffusion-stabilityai-prompt",
processorType: "stable-diffusion-stabilityai-prompt",
raiseError: false,
},
];
export const flowFreeNodesWithoutLink: Node[] = [
{
inputs: [],
name: "1#input-text",
processorType: "input-text",
inputText: "fake",
},
{
inputs: [],
name: "2#input-text",
processorType: "input-text",
inputText: "fake",
},
{
inputs: [],
name: "3#input-text",
processorType: "input-text",
inputText: "fake",
},
];
export const flowWithFourParallelNodeStep: Node[] = [
{
inputs: [],
name: "1#llm-prompt",
processorType: "llm-prompt",
raiseError: false,
sleepDuration: undefined,
},
{
inputs: [
{
inputNode: "1#llm-prompt",
inputNodeOutputKey: 0,
},
],
name: "2#llm-prompt",
processorType: "llm-prompt",
raiseError: false,
sleepDuration: undefined,
},
{
inputs: [
{
inputNode: "1#llm-prompt",
inputNodeOutputKey: 0,
},
],
name: "3#llm-prompt",
processorType: "llm-prompt",
raiseError: false,
sleepDuration: undefined,
},
{
inputs: [
{
inputNode: "1#llm-prompt",
inputNodeOutputKey: 0,
},
],
name: "4#llm-prompt",
processorType: "llm-prompt",
raiseError: false,
sleepDuration: undefined,
},
{
inputs: [
{
inputNode: "1#llm-prompt",
inputNodeOutputKey: 0,
},
],
name: "5#llm-prompt",
processorType: "llm-prompt",
raiseError: false,
sleepDuration: undefined,
},
];
function getBasicProcessFileData(): ProcessFileData {
return {
jsonFile: JSON.stringify(basicJsonFlow),
parameters: {
openaiApiKey: "apiKey",
},
};
}
function getBasicRunNodeData(): RunNodeData {
return {
jsonFile: JSON.stringify(basicJsonFlow),
nodeName: basicJsonFlow[0].name,
parameters: {
openaiApiKey: "apiKey",
},
};
}
function getJsonFlowWithMissingInputTextProcessFileData(): ProcessFileData {
return {
jsonFile: JSON.stringify(jsonFlowWithMissingInputText),
parameters: {
openaiApiKey: "apiKey",
},
};
}
function createRequestData(flow: any): ProcessFileData {
return {
jsonFile: JSON.stringify(flow),
parameters: {
openaiApiKey: "apiKey",
},
};
}
export {
basicJsonFlow,
jsonFlowWithMissingInputText,
getBasicProcessFileData,
getBasicRunNodeData,
getJsonFlowWithMissingInputTextProcessFileData,
createRequestData,
};
================================================
FILE: integration_tests/utils/testHooks.ts
================================================
import { Socket, io } from "socket.io-client";
let socket: Socket;
export const setupSocket = (done: any) => {
socket = io('http://localhost:5000');
socket.on('connect', function () {
done();
});
socket.on('connect_error', function (error) {
done(error);
});
};
export const disconnectSocket = () => {
socket.disconnect();
};
export const getSocket = () => socket;
================================================
FILE: packages/backend/.gitignore
================================================
# Fichiers générés par l'environnement de développement
__pycache__/
*.py[cod]
# Fichiers générés par l'IDE
.idea/
.vscode/
# Fichiers de logs
*.log
# Fichiers d'env
*.env
# Fichiers de build
build
dist
server.spec
# Local storage
local_storage/
================================================
FILE: packages/backend/Dockerfile
================================================
FROM python:3.9
# Default values
ENV HOST=0.0.0.0
ENV PORT=5000
WORKDIR /app
# System dependencies
RUN apt-get update && apt-get install -y \
build-essential \
libpq-dev \
python3-dev \
libssl-dev \
libffi-dev \
libmagic-dev \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
# Playwright
ARG PLAYWRIGHT_VERSION=1.39
ENV PLAYWRIGHT_BROWSERS_PATH=/ms-playwright
RUN pip install playwright==$PLAYWRIGHT_VERSION && \
playwright install chromium && \
playwright install-deps chromium
# Poetry & Dependencies
RUN pip install --upgrade poetry \
&& poetry config virtualenvs.create false
COPY poetry.lock pyproject.toml /app/
RUN poetry install --no-interaction --no-root
# The rest of the app
COPY app /app/app/
COPY resources /app/resources
COPY tests/ /app/tests/
COPY server.py README.md /app/
COPY config.yaml /app/
EXPOSE 5000
CMD ["poetry", "run", "python", "server.py"]
================================================
FILE: packages/backend/README.md
================================================
================================================
FILE: packages/backend/app/env_config.py
================================================
import os
import sys
from typing import List, Optional
ENV_LOCAL = "LOCAL"
ENV_CLOUD = "CLOUD"
CURRENT_ENV = os.environ.get("DEPLOYMENT_ENV", ENV_LOCAL)
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
BACKEND_DIR = os.path.dirname(CURRENT_DIR)
LOCAL_STORAGE_DIR = os.path.join(
BACKEND_DIR, os.getenv("LOCAL_STORAGE_FOLDER_NAME", "local_storage")
)
def get_static_folder() -> str:
if getattr(sys, "frozen", False):
base_path = sys._MEIPASS
build_dir = os.path.join(base_path, "build")
else:
base_path = os.path.dirname(os.path.abspath(__file__))
build_dir = os.path.join(base_path, "..", "..", "ui", "build")
return build_dir
def is_cloud_env() -> bool:
return CURRENT_ENV == ENV_CLOUD
def is_local_environment() -> bool:
return CURRENT_ENV == ENV_LOCAL
def is_mock_env() -> bool:
return os.getenv("USE_MOCK") == "true"
def is_server_static_files_enabled() -> bool:
return os.getenv("SERVE_STATIC_FILES") == "true"
def get_local_storage_folder_path() -> str:
return LOCAL_STORAGE_DIR
def get_flask_secret_key() -> Optional[str]:
return os.getenv("FLASK_SECRET_KEY")
def get_replicate_api_key() -> Optional[str]:
return os.getenv("REPLICATE_API_KEY")
def get_background_task_max_workers() -> int:
return int(os.getenv("BACKGROUND_TASK_MAX_WORKERS", "2"))
def use_async_browser() -> bool:
return os.getenv("USE_ASYNC_BROWSER") == "true"
def get_browser_tab_max_usage() -> int:
return int(os.getenv("BROWSER_TAB_MAX_USAGE", "100"))
def get_browser_tab_pool_size() -> int:
return int(os.getenv("BROWSER_TAB_POOL_SIZE", "3"))
def is_set_app_config_on_ui_enabled() -> bool:
return os.getenv("ENABLE_SET_APP_CONFIG_ON_UI", "true") == "true"
def is_s3_enabled() -> bool:
return os.getenv("S3_AWS_ACCESS_KEY_ID") is not None
================================================
FILE: packages/backend/app/flask/app_routes/__init__.py
================================================
================================================
FILE: packages/backend/app/flask/app_routes/image_routes.py
================================================
from app.env_config import (get_local_storage_folder_path)
from flask import Blueprint, send_from_directory
image_blueprint = Blueprint('image_blueprint', __name__)
@image_blueprint.route("/image/<path:filename>")
def serve_image(filename):
"""
Serve image from local storage.
"""
return send_from_directory(get_local_storage_folder_path(), filename)
================================================
FILE: packages/backend/app/flask/app_routes/node_routes.py
================================================
import json
from flask import Blueprint, request
from ...utils.node_extension_utils import get_dynamic_extension_config, get_extensions
# from ...utils.openapi_reader import OpenAPIReader
from ...utils.replicate_utils import (
get_highlighted_models_info,
get_model_openapi_schema,
get_replicate_collection_models,
get_replicate_collections,
get_replicate_models,
)
node_blueprint = Blueprint("node_blueprint", __name__)
@node_blueprint.route("/node/extensions")
def get_node_extensions():
extensions = get_extensions()
return {"extensions": extensions}
@node_blueprint.route("/node/extensions/dynamic", methods=["POST"])
def get_dynamic_extension():
request_body = request.json
if request_body is None:
raise Exception("Missing data")
processor_type = request_body.get("processorType")
data = request_body.get("data")
config = get_dynamic_extension_config(processor_type, data)
return config.dict()
@node_blueprint.route("/node/models")
def get_public_models():
cursor = request.args.get("cursor", None)
public = get_replicate_models(cursor=cursor)
highlighted = get_highlighted_models_info()
return {"public": public, "highlighted": highlighted}
@node_blueprint.route("/node/collections")
def get_collections():
return get_replicate_collections()
@node_blueprint.route("/node/collections/<path:collection>")
def get_collection_models(collection):
cursor = request.args.get("cursor", None)
return get_replicate_collection_models(collection, cursor=cursor)
@node_blueprint.route("/node/replicate/config/<path:model>")
def get_config(model):
return get_model_openapi_schema(model)
# @node_blueprint.route("/node/openapi/<path:api_name>/models")
# def get_openapi_models(api_name):
# api_reader = OpenAPIReader(f"./resources/openapi/{api_name}.json")
# return api_reader.get_all_paths()
# @node_blueprint.route("/node/openapi/<path:api_name>/config/<path:id>")
# def get_openapi_model_config(api_name, id):
# api_reader = OpenAPIReader(f"./resources/openapi/{api_name}.json")
# return api_reader.get_request_schema(id)
================================================
FILE: packages/backend/app/flask/app_routes/parameters_routes.py
================================================
import os
import yaml
from flask import Blueprint
parameters_blueprint = Blueprint("parameters_blueprint", __name__)
def load_config():
with open("config.yaml", "r") as file:
return yaml.safe_load(file)
@parameters_blueprint.route("/parameters", methods=["GET"])
def parameters():
config = load_config()
return config
================================================
FILE: packages/backend/app/flask/app_routes/static_routes.py
================================================
import os
from flask import Blueprint, send_from_directory
from ...env_config import get_static_folder
static_blueprint = Blueprint('static_blueprint', __name__)
@static_blueprint.route("/", defaults={"path": ""})
@static_blueprint.route("/<path:path>")
def serve(path):
"""
Serve UI static files from the static folder.
"""
static_folder = get_static_folder()
if path != "" and os.path.exists(os.path.join(static_folder, path)):
return send_from_directory(static_folder, path)
else:
return send_from_directory(static_folder, "index.html")
================================================
FILE: packages/backend/app/flask/app_routes/upload_routes.py
================================================
import logging
from flask import Blueprint
from ...storage.storage_strategy import StorageStrategy
from ...root_injector import get_root_injector
from flask import request
upload_blueprint = Blueprint("upload_blueprint", __name__)
@upload_blueprint.route("/upload")
def upload_file():
"""
Serve image from local storage.
"""
logging.info("Uploading file")
storage_strategy = get_root_injector().get(StorageStrategy)
filename = request.args.get("filename")
try:
data = storage_strategy.get_upload_link(filename)
except Exception as e:
logging.error(e)
raise Exception(
"Error uploading file. "
"Please check your S3 configuration. "
"If you've not configured S3 please refer to docs.ai-flow.net/docs/file-upload"
)
json_link = {
"upload_data": data[0],
"download_link": data[1],
}
return json_link
================================================
FILE: packages/backend/app/flask/decorators.py
================================================
from functools import wraps
from flask import jsonify, request, g
from flask_socketio import emit
import json
def with_flow_data_validations(*validation_funcs):
def decorator(func):
@wraps(func)
def wrapper(data, *args, **kwargs):
try:
flow_data = json.loads(data.get("jsonFile", "{}"))
for validation_func in validation_funcs:
validation_func(flow_data)
return func(data, *args, **kwargs)
except Exception as e:
emit("error", {"error": str(e)})
return wrapper
return decorator
================================================
FILE: packages/backend/app/flask/flask_app.py
================================================
import logging
from flask import Flask, request, redirect
from flask_cors import CORS
import os
from ..env_config import get_flask_secret_key, get_static_folder
def create_app():
app = Flask(__name__, static_folder=get_static_folder())
if get_flask_secret_key() is not None :
logging.info("Flask secret key set")
app.config['SECRET_KEY'] = get_flask_secret_key()
else :
logging.warning("Flask secret key not set")
app.config['SECRET_KEY'] = "default_secret"
CORS(app)
if os.getenv("USE_HTTPS", "false").lower() == "true":
@app.before_request
def before_request():
if not request.is_secure:
url = request.url.replace("http://", "https://", 1)
return redirect(url, code=301)
logging.info("App created")
return app
================================================
FILE: packages/backend/app/flask/routes.py
================================================
import logging
from app.env_config import is_server_static_files_enabled, is_local_environment
from app.flask.socketio_init import flask_app
from .utils.constants import HTTP_OK
@flask_app.route("/healthcheck", methods=["GET"])
def healthcheck():
return "OK", HTTP_OK
from .app_routes.node_routes import node_blueprint
flask_app.register_blueprint(node_blueprint)
from .app_routes.upload_routes import upload_blueprint
flask_app.register_blueprint(upload_blueprint)
from .app_routes.parameters_routes import parameters_blueprint
flask_app.register_blueprint(parameters_blueprint)
if is_server_static_files_enabled():
from .app_routes.static_routes import static_blueprint
logging.info("Visual interface will be available at http://localhost:5000")
flask_app.register_blueprint(static_blueprint)
if is_local_environment():
from .app_routes.image_routes import image_blueprint
logging.info("Environment set to LOCAL")
flask_app.register_blueprint(image_blueprint)
================================================
FILE: packages/backend/app/flask/socketio_init.py
================================================
import eventlet
eventlet.monkey_patch(all=False, socket=True)
from flask_socketio import SocketIO
from .flask_app import create_app
flask_app = create_app()
socketio = SocketIO(flask_app, cors_allowed_origins="*", async_mode="eventlet")
================================================
FILE: packages/backend/app/flask/sockets.py
================================================
import eventlet
from ..env_config import is_set_app_config_on_ui_enabled
eventlet.monkey_patch(all=False, socket=True)
from app.flask.socketio_init import flask_app
from app.flask.socketio_init import socketio
import logging
import json
from flask import g, request, session
from flask_socketio import emit
from ..root_injector import (
get_root_injector,
refresh_root_injector,
)
from .utils.constants import PARAMETERS_FIELD_NAME, ENV_API_KEYS
from ..processors.launcher.processor_launcher import ProcessorLauncher
from ..processors.context.processor_context_flask_request import (
ProcessorContextFlaskRequest,
)
import traceback
import os
def populate_request_global_object(data):
"""
This function is responsible for initializing individual request objects either from the
environmental variables or from the data passed as arguments, ensuring that the necessary API
keys are available throughout the request for different processes.
Parameters:
data (dict): A dictionary containing potentially necessary keys: "openai_api_key" and "stabilityai_api_key".
"""
use_env = os.getenv("USE_ENV_API_KEYS", "false").lower()
logging.debug("use_env: %s", use_env)
if use_env == "true":
for key in ENV_API_KEYS:
env_key = key.upper()
value = os.getenv(env_key)
if not value:
raise Exception(f"Required {env_key} not provided in environment.")
setattr(g, f"session_{key}", value)
else:
if not PARAMETERS_FIELD_NAME in data:
raise Exception(f"No {PARAMETERS_FIELD_NAME} provided in data.")
for key, value in data[PARAMETERS_FIELD_NAME].items():
if value:
setattr(g, f"session_{key}", value)
else:
raise Exception(f"No {key} provided in data.")
@socketio.on("connect")
def handle_connect():
logging.info("Client connected")
@socketio.on("process_file")
def handle_process_file(data):
"""
This event handler is activated when a "process_file" event is received via Socket.IO. It allows to run every node in
the file, even if they have been executed before.
Parameters:
data (dict): A dictionary encompassing the event's payload, which comprises the JSON configuration file
("jsonFile").
"""
try:
populate_request_global_object(data)
flow_data = json.loads(data.get("jsonFile"))
launcher = get_root_injector().get(ProcessorLauncher)
launcher.set_context(ProcessorContextFlaskRequest(g, session, request.sid))
if flow_data:
processors = launcher.load_processors(flow_data)
output = launcher.launch_processors(processors)
logging.debug("Emitting processing_result event with output: %s", output)
emit("run_end", {"output": output})
else:
logging.warning("Invalid input or missing configuration file")
emit("error", {"error": "Invalid input or missing configuration file"})
except Exception as e:
emit("error", {"error": str(e)})
traceback.print_exc()
logging.error(f"An error occurred: {str(e)}")
@socketio.on("run_node")
def handle_run_node(data):
"""
This event handler is activated when a "run_node" event is received via Socket.IO. It facilitates the processing
of the specified node in the data payload, launching only the designated node and preceding nodes if they
haven't been executed earlier.
Parameters:
data (dict): A dictionary encompassing the event's payload, which comprises the JSON configuration file
("jsonFile") and the name of the node to run ("nodeName").
"""
try:
populate_request_global_object(data)
flow_data = json.loads(data.get("jsonFile"))
node_name = data.get("nodeName")
launcher = get_root_injector().get(ProcessorLauncher)
launcher.set_context(ProcessorContextFlaskRequest(g, session, request.sid))
if flow_data and node_name:
processors = launcher.load_processors_for_node(flow_data, node_name)
output = launcher.launch_processors_for_node(processors, node_name)
logging.debug("Emitting processing_result event with output: %s", output)
emit("run_end", {"output": output})
else:
logging.warning("Invalid input or missing parameters")
emit("error", {"error": "Invalid input or missing parameters"})
except Exception as e:
emit(
"error",
{"error": str(e), "nodeName": node_name},
)
traceback.print_exc()
logging.error(f"An error occurred: {node_name} - {str(e)}")
@socketio.on("disconnect")
def handle_disconnect():
logging.info("Client disconnected")
@socketio.on("update_app_config")
def handle_update_app_config(data):
if not is_set_app_config_on_ui_enabled():
return
logging.info("Updating app config")
config_keys = [
"S3_BUCKET_NAME",
"S3_AWS_ACCESS_KEY_ID",
"S3_AWS_SECRET_ACCESS_KEY",
"S3_AWS_REGION_NAME",
"S3_ENDPOINT_URL",
"REPLICATE_API_KEY",
]
for key in config_keys:
value = data.get(key)
if value is not None and str(value).strip():
logging.info(f"Setting {key}")
os.environ[key] = value
refresh_root_injector()
================================================
FILE: packages/backend/app/flask/utils/constants.py
================================================
HTTP_OK = 200
HTTP_BAD_REQUEST = 400
HTTP_NOT_FOUND = 404
HTTP_UNAUTHORIZED = 401
SESSION_USER_ID_KEY = "user_id"
PARAMETERS_FIELD_NAME = "parameters"
ENV_API_KEYS = [
"openai_api_key",
"stabilityai_api_key",
"replicate_api_key",
"anthropic_api_key",
"openrouter_api_key",
]
================================================
FILE: packages/backend/app/llms/utils/max_token_for_model.py
================================================
import tiktoken
DEFAULT_MAX_TOKEN = 4097
def max_token_for_model(model_name: str) -> int:
if "gpt-4o" in model_name:
return 128000
token_data = {
# GPT-4.1 models
"gpt-4.1": 1047576,
"gpt-4.1-mini": 1047576,
"gpt-4.1-nano": 1047576,
# GPT-4 models
"gpt-4o": 128000,
"gpt-4o-2024-11-20": 128000,
"gpt-4o-mini": 128000,
"gpt-4-turbo": 128000,
"gpt-4-turbo-preview": 128000,
"gpt-4-1106-preview": 128000,
"gpt-4-vision-preview": 128000,
"gpt-4": 8192,
"gpt-4-0613": 8192,
"gpt-4-32k": 32768,
"gpt-4-32k-0613": 32768,
"gpt-4-0314": 8192,
"gpt-4-32k-0314": 32768,
# GPT-3.5 models
"gpt-3.5-turbo": 16385,
"gpt-3.5-turbo-1106": 16385,
"gpt-3.5-turbo-16k": 16385,
"gpt-3.5-turbo-instruct": 4097,
"gpt-3.5-turbo-0613": 4097,
"gpt-3.5-turbo-16k-0613": 16385,
"gpt-3.5-turbo-0301": 4097,
# Other GPT-3.5 models
"text-davinci-003": 4097,
"text-davinci-002": 4097,
"code-davinci-002": 8001,
}
return token_data.get(model_name, DEFAULT_MAX_TOKEN)
def nb_token_for_input(input: str, model_name: str) -> int:
try:
return len(tiktoken.encoding_for_model(model_name).encode(input))
except Exception as e:
default_model_for_token = "gpt-4o"
return len(tiktoken.encoding_for_model(default_model_for_token).encode(input))
================================================
FILE: packages/backend/app/log_config.py
================================================
import logging
import colorlog
def setup_logger(name: str):
formatter = colorlog.ColoredFormatter(
"%(log_color)s%(levelname)-8s%(reset)s %(message)s",
datefmt=None,
reset=True,
log_colors={
"DEBUG": "cyan",
"INFO": "green",
"WARNING": "yellow",
"ERROR": "red",
"CRITICAL": "red",
},
)
logger = logging.getLogger(name)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
root_logger = setup_logger("root")
root_logger.setLevel(logging.INFO)
================================================
FILE: packages/backend/app/processors/components/__init__.py
================================================
================================================
FILE: packages/backend/app/processors/components/core/__init__.py
================================================
================================================
FILE: packages/backend/app/processors/components/core/ai_data_splitter_processor.py
================================================
import logging
from ...context.processor_context import ProcessorContext
from ..processor import ContextAwareProcessor
from .processor_type_name_utils import ProcessorType
from openai import OpenAI
def interpret_escape_sequences(separator):
escape_dict = {
r"\n": "\n",
r"\r": "\r",
r"\t": "\t",
}
return escape_dict.get(separator, separator)
class AIDataSplitterProcessor(ContextAwareProcessor):
processor_type = ProcessorType.AI_DATA_SPLITTER
DEFAULT_SEPARATOR = ";"
AI_MODE = "ai"
MANUAL_MODE = "manual"
def __init__(self, config, context: ProcessorContext):
super().__init__(config, context)
self.nb_output = 0
self.model = "gpt-4o"
self.api_key = context.get_value("openai_api_key")
def get_llm_response(self, messages):
client = OpenAI(api_key=self.api_key)
kwargs = {"model": self.model, "input": messages}
response = client.responses.create(**kwargs)
return response.output_text
def process(self):
if self.get_input_processor() is None:
return ""
input_data = self.get_input_processor().get_output(
self.get_input_node_output_key()
)
mode = self.get_input_by_name("mode", self.AI_MODE)
if mode == self.AI_MODE:
self.init_context(input_data)
answer = self.get_llm_response(self.messages)
data_to_split = answer.encode("utf-8").decode("utf8")
self.set_output(
data_to_split.split(AIDataSplitterProcessor.DEFAULT_SEPARATOR)
)
self.nb_output = len(self._output)
if mode == self.MANUAL_MODE:
separator = self.get_input_by_name(
"separator", AIDataSplitterProcessor.DEFAULT_SEPARATOR
)
separator = interpret_escape_sequences(separator)
self.set_output(input_data.split(separator))
self.nb_output = len(self._output)
return self._output
def init_context(self, input_data: str) -> None:
"""
Initialize the context for the OpenAI Chat model with a set of standard messages.
Additional user input data can be provided, which will be added to the messages.
:param input_data: Additional information or text provided by the user that needs processing.
"""
# Define the system message with clear instructions and examples
system_msg = (
"You are an assistant whose task is to separate ideas or concepts from the input text using semicolons (;). "
"Do not include any meta-comments or self-references in your responses. "
"Here are some examples of how to perform the task: "
"\n\n"
"Example 1:\n"
"Input: 'The main idea is that dogs are very popular pets, and many people enjoy walking them in parks. Another important concept is that dogs need a lot of exercise to stay healthy.'\n"
"Output: 'Dogs are very popular pets; many people enjoy walking them in parks; dogs need a lot of exercise to stay healthy.'\n\n"
"Example 2:\n"
"Input: '1) A picture of a woman 2) A video with a bird 3) Air conditioner'\n"
"Output: 'A picture of a woman; A video with a bird; Air conditioner.'\n\n"
"Example 3:\n"
"Input: 'Here are two ideas: - Dogs are better than cats - Birds are beautiful'\n"
"Output: 'Dogs are better than cats; Birds are beautiful.'\n\n"
"Example 4:\n"
"Input: 'Crée une interprétation artistique numérique de la ville de New York la nuit sous la pluie, mettant l'accent sur les reflets lumineux sur les surfaces mouillées. Imagine et dessine un nouveau type de fleur qui n'existe pas encore dans la nature. Assure-toi qu'elle a une allure exotique et utilise des couleurs vives et uniques que l'on ne trouve pas couramment chez les fleurs. Conçois une image représentant une scène du futur, avec des villes futuristes, des technologies avancées et des formes de vie artificielles coexistant avec des formes de vie naturelles.'\n"
"Output: 'Crée une interprétation artistique numérique de la ville de New York la nuit sous la pluie, mettant l'accent sur les reflets lumineux sur les surfaces mouillées; Imagine et dessine un nouveau type de fleur qui n'existe pas encore dans la nature. Assure-toi qu'elle a une allure exotique et utilise des couleurs vives et uniques que l'on ne trouve pas couramment chez les fleurs; Conçois une image représentant une scène du futur, avec des villes futuristes, des technologies avancées et des formes de vie artificielles coexistant avec des formes de vie naturelles.'\n\n"
"After reading the input, output each distinct idea or concept separated by semicolons."
)
user_nb_output = self.get_input_by_name("nb_output", 0)
if user_nb_output > 1:
system_msg += f"\nThe estimated number of outputs for the next message is {user_nb_output}."
self.messages = [
{"role": "system", "content": system_msg},
{"role": "user", "content": input_data},
]
def cancel(self):
pass
================================================
FILE: packages/backend/app/processors/components/core/dall_e_prompt_processor.py
================================================
from ...context.processor_context import ProcessorContext
from ..processor import ContextAwareProcessor
from openai import OpenAI
from .processor_type_name_utils import ProcessorType
class DallEPromptProcessor(ContextAwareProcessor):
processor_type = ProcessorType.DALLE_PROMPT
DEFAULT_MODEL = "dall-e-3"
DEFAULT_SIZE = "1024x1024"
DEFAULT_QUALITY = "standard"
def __init__(self, config, context: ProcessorContext):
super().__init__(config, context)
self.prompt = config.get("prompt")
self.size = config.get("size", DallEPromptProcessor.DEFAULT_SIZE)
self.quality = config.get("quality", DallEPromptProcessor.DEFAULT_QUALITY)
def process(self):
if self.get_input_processor() is not None:
self.prompt = (
self.get_input_processor().get_output(self.get_input_node_output_key())
if self.prompt is None or len(self.prompt) == 0
else self.prompt
)
api_key = self._processor_context.get_value("openai_api_key")
client = OpenAI(
api_key=api_key,
)
response = client.images.generate(
model=DallEPromptProcessor.DEFAULT_MODEL,
prompt=self.prompt,
n=1,
size=self.size,
quality=self.quality,
)
return response.data[0].url
def cancel(self):
pass
================================================
FILE: packages/backend/app/processors/components/core/display_processor.py
================================================
from .processor_type_name_utils import ProcessorType
from ..processor import BasicProcessor
class DisplayProcessor(BasicProcessor):
processor_type = "display"
def __init__(self, config):
super().__init__(config)
def process(self):
input_data = None
if self.get_input_processor() is None:
return ""
input_data = self.get_input_processor().get_output(
self.get_input_node_output_key()
)
return input_data
================================================
FILE: packages/backend/app/processors/components/core/file_processor.py
================================================
from .processor_type_name_utils import ProcessorType
from ..processor import BasicProcessor
class FileProcessor(BasicProcessor):
processor_type = ProcessorType.FILE
def __init__(self, config):
super().__init__(config)
self.url = config["fileUrl"]
def process(self):
return self.url
================================================
FILE: packages/backend/app/processors/components/core/gpt_vision_processor.py
================================================
import re
from typing import Any, List
from ...launcher.event_type import EventType
from ...launcher.processor_event import ProcessorEvent
from ...context.processor_context import ProcessorContext
from ..processor import ContextAwareProcessor
from .processor_type_name_utils import ProcessorType
from openai import OpenAI
from urllib.parse import urlparse
class GPTVisionProcessor(ContextAwareProcessor):
processor_type = ProcessorType.GPT_VISION
DEFAULT_MODEL = "gpt-4o"
def __init__(self, config, context: ProcessorContext):
super().__init__(config, context)
def _gather_image_url_values(self) -> List[Any]:
"""
Pull the value of `element` plus every `element_<n>` child field
in the order they appear in self.fields_names.
"""
# Match element_0, element_1, … whatever the UI generates
child_pattern = re.compile(r"^image_url_\d+$")
# Preserve original order: parent first, then the children
ordered_field_names = [
fname
for fname in self.fields_names
if fname == "image_url" or child_pattern.match(fname)
]
values = [self.get_input_by_name(fname, None) for fname in ordered_field_names]
return [v for v in values if v is not None]
def process(self):
self.vision_inputs = {
"prompt": self.get_input_by_name("prompt"),
}
images_urls = self._gather_image_url_values()
if (
self.vision_inputs["prompt"] is None
or len(self.vision_inputs["prompt"]) == 0
):
raise ValueError("No prompt provided.")
if len(images_urls) == 0:
raise ValueError("No image provided.")
for url in images_urls:
if not self.is_valid_url(url):
raise ValueError(f"Invalid URL provided. \n {url}")
api_key = self._processor_context.get_value("openai_api_key")
client = OpenAI(
api_key=api_key,
)
content = []
for image in images_urls:
content.append(
{
"type": "image_url",
"image_url": {"url": image},
}
)
content.append(
{
"type": "text",
"text": self.vision_inputs["prompt"],
}
)
response = client.chat.completions.create(
model=GPTVisionProcessor.DEFAULT_MODEL,
messages=[
{
"role": "user",
"content": content,
}
],
max_tokens=4096,
stream=True,
)
final_response = ""
for chunk in response:
if not chunk.choices[0].delta.content:
continue
final_response += chunk.choices[0].delta.content
event = ProcessorEvent(self, final_response)
self.notify(EventType.STREAMING, event)
return final_response
def is_valid_url(self, url):
try:
result = urlparse(url)
return all([result.scheme, result.netloc])
except Exception:
return False
def cancel(self):
pass
================================================
FILE: packages/backend/app/processors/components/core/input_image_processor.py
================================================
from .processor_type_name_utils import ProcessorType
from ..processor import BasicProcessor
class InputImageProcessor(BasicProcessor):
processor_type = ProcessorType.INPUT_IMAGE
def __init__(self, config):
super().__init__(config)
self.inputText = config["inputText"]
def process(self):
return self.inputText
================================================
FILE: packages/backend/app/processors/components/core/input_processor.py
================================================
from .processor_type_name_utils import ProcessorType
from ..processor import BasicProcessor
class InputProcessor(BasicProcessor):
processor_type = ProcessorType.INPUT_TEXT
def __init__(self, config):
super().__init__(config)
self.inputText = config["inputText"]
def process(self):
return self.inputText
================================================
FILE: packages/backend/app/processors/components/core/llm_prompt_processor.py
================================================
import logging
from app.processors.exceptions import LightException
from ...launcher.processor_event import ProcessorEvent
from ...launcher.event_type import EventType
from ....llms.utils.max_token_for_model import max_token_for_model, nb_token_for_input
from ...context.processor_context import ProcessorContext
from ..processor import ContextAwareProcessor
from openai import OpenAI
from .processor_type_name_utils import ProcessorType
class LLMPromptProcessor(ContextAwareProcessor):
processor_type = ProcessorType.LLM_PROMPT
DEFAULT_MODEL = "gpt-4o"
streaming = True
models_with_web_search = [
"gpt-4o",
"gpt-4o-mini",
"gpt-4.1",
"gpt-4.1-mini",
]
def __init__(self, config, context: ProcessorContext):
super().__init__(config, context)
self.model = config.get("model", LLMPromptProcessor.DEFAULT_MODEL)
self.prompt = config.get("prompt", None)
def handle_stream_answer(self, awnser):
event = ProcessorEvent(self, awnser)
self.notify(EventType.STREAMING, event)
def nb_tokens_from_messages(self, messages, model):
"""
Calculates the total number of tokens in a list of messages using nb_token_for_input.
"""
total_tokens = 0
token_overhead = 3
for message in messages:
content_tokens = nb_token_for_input(message["content"], model)
total_tokens += content_tokens + token_overhead
total_tokens += token_overhead
return total_tokens
def check_for_html_tags(self, text):
"""
Checks if the given text contains HTML tags or attributes.
"""
if "<html" in text or "<body" in text:
return True
return False
def process(self):
api_key = self._processor_context.get_value("openai_api_key")
search_enabled = False
if self.model in self.models_with_web_search:
search_enabled = self.get_input_by_name("web_search", False)
search_context_size = self.get_input_by_name("search_context_size", None)
if api_key is None:
raise Exception("No OpenAI API key found")
af_node_version = self.get_input_by_name("af_node_version", 1)
context = None
if af_node_version > 1:
context = self.get_input_by_name("context", None)
self.prompt = self.get_input_by_name("prompt", None)
else:
if self.get_input_processor() is not None:
context = self.get_input_processor().get_output(
self.get_input_node_output_key()
)
if self.prompt is None:
raise Exception("No prompt provided")
self.init_context(context)
total_tokens = self.nb_tokens_from_messages(self.messages, self.model)
model_max_tokens = max_token_for_model(self.model)
if total_tokens > model_max_tokens:
logging.warning("Messages size: " + str(total_tokens))
logging.warning("Model capacity: " + str(model_max_tokens))
message = (
"The text size exceeds the model's capacity. "
"Consider using a model with greater context handling capabilities or utilize the 'Find Similar Text' node to create a cohesive, condensed version of the context."
)
if (
context and self.check_for_html_tags(context)
) or self.check_for_html_tags(self.prompt):
message += (
"\n\n"
"Note: HTML tags or attributes are detected within the data provided. If they are unnecessary for this task, removing them could significantly reduce the context size."
)
raise Exception(message)
client = OpenAI(api_key=api_key)
kwargs = {"model": self.model, "input": self.messages, "stream": self.streaming}
if search_enabled:
kwargs["tools"] = [
{
"type": "web_search_preview",
"search_context_size": search_context_size,
}
]
stream = client.responses.create(**kwargs)
final_response = ""
for event in stream:
type = event.type
if type == "response.output_text.delta":
final_response += event.delta
self.handle_stream_answer(final_response)
if type == "response.completed":
response_data = event.response
final_response = response_data.output_text
if type == "response.failed":
response_data = event.response
if not hasattr(response_data, "error"):
logging.warning(f"Error from OpenAI with no data: {response_data}")
continue
raise LightException(
f"Error from OpenAI : {response_data.error.message}"
)
if type == "error":
raise LightException(f"Error from OpenAI : {event.message}")
return final_response
def init_context(self, context: str) -> None:
"""
Initialise the context for the LLM model with a standard set of messages.
Additional user input data can be provided, which will be added to the messages.
:param context: additional information to be used by the assistant.
"""
if context is None:
system_msg = "You are a helpful assistant. "
user_msg_content = self.prompt
else:
system_msg = (
"You are a helpful assistant. "
"You will respond to requests indicated by the '#Request' tag, "
"using the context provided under the '#Context' tag."
"Your response should feel natural and seamless, as if you've internalized the context "
"and are answering the request without needing to directly point back to the information provided"
)
user_msg_content = f"#Context: {context} \n\n#Request: {self.prompt}"
self.messages = [
{"role": "system", "content": system_msg},
{"role": "user", "content": user_msg_content},
]
def cancel(self):
pass
================================================
FILE: packages/backend/app/processors/components/core/merge_processor.py
================================================
from ..processor import ContextAwareProcessor
from .processor_type_name_utils import ProcessorType, MergeModeEnum
class MergeProcessor(ContextAwareProcessor):
processor_type = ProcessorType.MERGER_PROMPT
def __init__(self, config, context):
super().__init__(config, context)
self.merge_mode = MergeModeEnum(int(config["mergeMode"]))
def update_prompt(self, inputs):
for idx, value in enumerate(inputs, start=1):
placeholder = f"${{input-{idx}}}"
self.prompt = self.prompt.replace(placeholder, str(value))
def process(self):
self.prompt = self.get_input_by_name("prompt", "")
input_names = self.get_input_names_from_config()
inputs = [self.get_input_by_name(name, "") for name in input_names]
self.update_prompt(inputs)
return self.prompt
def cancel(self):
pass
================================================
FILE: packages/backend/app/processors/components/core/processor_type_name_utils.py
================================================
from enum import Enum
class MergeModeEnum(Enum):
MERGE = 1
MERGE_AND_PROMPT = 2
class ProcessorType(Enum):
INPUT_TEXT = "input-text"
INPUT_IMAGE = "input-image"
URL_INPUT = "url_input"
LLM_PROMPT = "llm-prompt"
GPT_VISION = "gpt-vision"
YOUTUBE_TRANSCRIPT_INPUT = "youtube_transcript_input"
DALLE_PROMPT = "dalle-prompt"
STABLE_DIFFUSION_STABILITYAI_PROMPT = "stable-diffusion-stabilityai-prompt"
STABLE_VIDEO_DIFFUSION_REPLICATE = "stable-video-diffusion-replicate"
REPLICATE = "replicate"
MERGER_PROMPT = "merger-prompt"
AI_DATA_SPLITTER = "ai-data-splitter"
TRANSITION = "transition"
DISPLAY = "display"
FILE = "file"
STABLE_DIFFUSION_THREE = "stabilityai-stable-diffusion-3-processor"
TEXT_TO_SPEECH = "openai-text-to-speech-processor"
DOCUMENT_TO_TEXT = "document-to-text-processor"
STABILITYAI = "stabilityai-generic-processor"
CLAUDE = "claude-anthropic-processor"
REPLACE_TEXT = "replace-text"
================================================
FILE: packages/backend/app/processors/components/core/replicate_processor.py
================================================
from datetime import datetime
import logging
from queue import Queue
import time
from urllib.parse import urlparse
from app.env_config import is_s3_enabled
from ...launcher.event_type import EventType
from ...launcher.processor_event import ProcessorEvent
from ....utils.processor_utils import stream_download_file_as_binary
from ...exceptions import LightException
from ....utils.replicate_utils import (
get_input_schema_from_open_API_schema,
get_model_openapi_schema,
get_output_schema_from_open_API_schema,
)
from ...context.processor_context import ProcessorContext
from ..processor import ContextAwareProcessor
import replicate
from .processor_type_name_utils import ProcessorType
from ....tasks.task_exception import TaskAlreadyRegisteredError
from ....tasks.thread_pool_task_manager import add_task, register_task_processor
from ....tasks.task_utils import wait_for_result
class ReplicateProcessor(ContextAwareProcessor):
processor_type = ProcessorType.REPLICATE
def __init__(self, config, context: ProcessorContext):
super().__init__(config, context)
self.is_processing = False
self.config = config
self.model = config.get("model")
if self.model is None:
self.model = config.get("config").get("nodeName")
if ":" not in self.model:
logging.warning(f"Model {self.model} has no version")
raise Exception(f"Cannot find version for this model : {self.model}.")
self.model_name_withouth_version = self.model.split(":")[0]
def get_prediction_result(
self, prediction, processor, timeout=3600.0, initial_sleep=0.1, max_sleep=5.0
):
results_queue = Queue()
add_task("replicate_prediction_wait", (prediction, processor), results_queue)
try:
prediction = wait_for_result(
results_queue, timeout, initial_sleep, max_sleep
)
except TimeoutError as e:
raise TimeoutError("Prediction result timed out")
return prediction
@staticmethod
def wait_for_prediction_task(task_data):
prediction, processor = task_data
while prediction.status not in ["succeeded", "failed", "canceled"]:
time.sleep(prediction._client.poll_interval)
if prediction.status == "processing":
processor.is_processing = True
prediction.reload()
return prediction
def register_background_task(self):
try:
register_task_processor(
"replicate_prediction_wait",
self.wait_for_prediction_task,
max_concurrent_tasks=100,
)
except TaskAlreadyRegisteredError as e:
pass
def process(self):
api_key = self._processor_context.get_value("replicate_api_key")
self.schema = get_model_openapi_schema(self.model_name_withouth_version)
input_processors = self.get_input_processors()
input_output_keys = self.get_input_node_output_keys()
input_names = self.get_input_names()
if input_processors:
for processor, name, key in zip(
input_processors, input_names, input_output_keys
):
output = processor.get_output(key)
if output is None:
continue
input_type = self._get_nested_input_schema_property(name, "type")
if input_type == "integer":
output = int(output)
if input_type == "number":
output = float(output)
self.config[name] = output
api = replicate.Client(api_token=api_key)
output_schema = get_output_schema_from_open_API_schema(self.schema["schema"])
logging.debug(f"Output schema : {output_schema}")
output_type = output_schema.get("type")
output_array_display = output_schema.get("x-cog-array-display")
output_format = output_schema.get("format")
if not ":" in self.model:
logging.warning(f"Model {self.model} has no version")
raise Exception("Cannot find version for this model")
rest, version_id = self.model.split(":")
self.config["disable_safety_checker"] = True
try:
self.prediction = api.predictions.create(
version=version_id, input=self.config
)
except Exception as e:
logging.warning(f"Error while creating prediction : {e}")
raise LightException(
"Please review your input to ensure it aligns with the expected format. \n\n"
"For reference, you can review the examples here: \n"
f"https://replicate.com/{self.model_name_withouth_version}/examples\n\n"
f"Error message from Replicate: \n\n {e}"
)
self.register_background_task()
self.prediction = self.get_prediction_result(self.prediction, self)
if self.prediction.status != "succeeded":
replicate_error_message = self.prediction.error
message_str = f"Your Replicate prediction ended with status : {self.prediction.status} \n\n"
if replicate_error_message and self.prediction.status != "canceled":
message_str += (
f"There may be an issue with the parameters provided for the model '{self.model_name_withouth_version}'. \n\n"
"Please review your input to ensure it aligns with the expected format. \n\n"
"For reference, you can review the examples here: \n"
f"https://replicate.com/{self.model_name_withouth_version}/examples\n\n"
f"Error message from Replicate: {replicate_error_message}"
)
exception = Exception(message_str)
exception.rollback_not_needed = True
raise exception
output = self.prediction.output
self.metrics = self.prediction.metrics
isUriOutput = output_format == "uri"
if output_type == "array" and output_array_display == "concatenate":
output = "".join(output)
elif output_type == "array":
items_type = output_schema.get("items").get("type")
items_format = output_schema.get("items").get("format")
isUriOutput = items_format == "uri"
output = output
elif output_type == "string":
if isinstance(output, list):
output = "".join(output)
else:
output = [output]
event = ProcessorEvent(self, output)
self.notify(EventType.STREAMING, event)
if isUriOutput:
if isinstance(output, list):
new_output = []
for uri in output:
new_uri = self.upload_replicate_uri_to_storage(uri)
new_output.append(new_uri)
output = new_output
else:
output = self.upload_replicate_uri_to_storage(output)
return output
def upload_replicate_uri_to_storage(self, uri):
if not is_s3_enabled():
return uri
storage = self.get_storage()
timestamp_str = datetime.now().strftime("%Y%m%d%H%M%S%f")
extension = None
try:
parsed = urlparse(uri)
path = parsed.path
if "." in path:
extension = path.split(".")[-1]
else:
logging.warning("No extension found in URI: %s", uri)
except Exception as e:
logging.warning("Error extracting extension from URI (%s): %s", uri, str(e))
if not extension:
logging.warning("Aborting Upload - No extension found in URI: %s", uri)
return uri
filename = f"{self.name}-{timestamp_str}.{extension}"
file = stream_download_file_as_binary(uri)
url = storage.save(filename, file)
return url
def _get_nested_input_schema_property(self, property_name, nested_key):
return (
get_input_schema_from_open_API_schema(self.schema.get("schema", {}))
.get("properties", {})
.get(property_name, {})
.get(nested_key)
)
def cancel(self):
api_key = self._processor_context.get_value("replicate_api_key")
api = replicate.Client(api_token=api_key)
api.predictions.cancel(id=self.prediction.id)
================================================
FILE: packages/backend/app/processors/components/core/stable_diffusion_stabilityai_prompt_processor.py
================================================
import base64
from ...context.processor_context import ProcessorContext
from ..processor import ContextAwareProcessor
from datetime import datetime
import requests
import os
from .processor_type_name_utils import ProcessorType
class StableDiffusionStabilityAIPromptProcessor(ContextAwareProcessor):
processor_type = ProcessorType.STABLE_DIFFUSION_STABILITYAI_PROMPT
def __init__(self, config, context: ProcessorContext):
super().__init__(config, context)
self.prompt = config.get("prompt")
size = config.get("size", "1024x1024")
self.height = int(size.split("x")[0])
self.width = int(size.split("x")[1])
self.style_preset = config.get("style_preset", "")
self.samples = 1
self.engine_id = "stable-diffusion-xl-1024-v1-0"
self.api_host = os.getenv(
"STABLE_DIFFUSION_STABILITYAI_API_HOST", "https://api.stability.ai"
)
def prepare_and_process_response(self, response):
if response.status_code != 200:
raise Exception("Non-200 response: " + str(response.text))
data = response.json()
first_image = data["artifacts"][0]["base64"]
image_data = base64.b64decode(first_image)
storage = self.get_storage()
timestamp_str = datetime.now().strftime("%Y%m%d%H%M%S%f")
filename = f"{self.name}-{timestamp_str}.png"
url = storage.save(filename, image_data)
return url
def setup_data_to_send(self):
if self.get_input_processor() is not None:
self.prompt = (
self.get_input_processor().get_output(self.get_input_node_output_key())
if self.prompt is None or len(self.prompt) == 0
else self.prompt
)
data_to_send = {
"text_prompts": [{"text": f"{self.prompt}"}],
"cfg_scale": 7,
"height": self.height,
"width": self.width,
"samples": self.samples,
"steps": 30,
}
return data_to_send
def process(self):
data_to_send = self.setup_data_to_send()
api_key = self._processor_context.get_value("stabilityai_api_key")
response = requests.post(
f"{self.api_host}/v1/generation/{self.engine_id}/text-to-image",
headers={
"Content-Type": "application/json",
"Accept": "application/json",
"Authorization": f"Bearer {api_key}",
},
json=data_to_send,
)
return self.prepare_and_process_response(response)
def cancel(self):
pass
================================================
FILE: packages/backend/app/processors/components/core/stable_video_diffusion_replicate.py
================================================
import os
from urllib.parse import urlparse
from ...context.processor_context import ProcessorContext
from ..processor import ContextAwareProcessor
import replicate
from .processor_type_name_utils import ProcessorType
class StableVideoDiffusionReplicaterocessor(ContextAwareProcessor):
processor_type = ProcessorType.STABLE_VIDEO_DIFFUSION_REPLICATE
stable_video_diffusion_model = "stability-ai/stable-video-diffusion:3f0457e4619daac51203dedb472816fd4af51f3149fa7a9e0b5ffcf1b8172438"
def __init__(self, config, context: ProcessorContext):
super().__init__(config, context)
self.length = config.get("length", "14_frames_with_svd")
self.frames_per_second = config.get("frames_per_second", "6")
def process(self):
input_image_url = None
if self.get_input_processor() is not None:
input_image_url = self.get_input_processor().get_output(
self.get_input_node_output_key()
)
if input_image_url is None:
return "No image provided."
if not self.is_valid_url(input_image_url):
return "Invalid URL provided."
api_key = self._processor_context.get_value("replicate_api_key")
api = replicate.Client(api_token=api_key)
output = api.run(
StableVideoDiffusionReplicaterocessor.stable_video_diffusion_model,
input={
"cond_aug": 0.02,
"decoding_t": 7,
"input_image": input_image_url,
"video_length": self.length,
"sizing_strategy": "maintain_aspect_ratio",
"motion_bucket_id": 127,
"frames_per_second": int(self.frames_per_second),
},
)
return output
def is_valid_url(self, url):
try:
result = urlparse(url)
return all([result.scheme, result.netloc])
except Exception:
return False
def cancel(self):
pass
================================================
FILE: packages/backend/app/processors/components/core/transition_processor.py
================================================
from .processor_type_name_utils import ProcessorType
from ..processor import BasicProcessor
class TransitionProcessor(BasicProcessor):
processor_type = ProcessorType.TRANSITION
def __init__(self, config):
super().__init__(config)
def process(self):
input_data = None
if self.get_input_processor() is None:
return ""
input_data = self.get_input_processor().get_output(
self.get_input_node_output_key()
)
return input_data
================================================
FILE: packages/backend/app/processors/components/core/url_input_processor.py
================================================
import random
from bs4 import BeautifulSoup
import requests
from ....utils.processor_utils import is_valid_url
from ..processor import BasicProcessor
from .processor_type_name_utils import ProcessorType
import logging
from markdownify import markdownify
class URLInputProcessor(BasicProcessor):
WAIT_TIMEOUT = 60
GET_TIMEOUT = 20
processor_type = ProcessorType.URL_INPUT
USER_AGENTS = [
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/15.1 Safari/605.1.15",
"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:97.0) Gecko/20100101 Firefox/97.0",
]
def __init__(self, config):
super().__init__(config)
def get_random_user_agent():
return random.choice(URLInputProcessor.USER_AGENTS)
def fetch_content_simple(self):
"""
Fetches the website content using a simple GET request.
"""
try:
headers = {"User-Agent": URLInputProcessor.get_random_user_agent()}
response = requests.get(self.url, headers=headers, timeout=self.GET_TIMEOUT)
response.raise_for_status()
return response.text
except requests.RequestException as e:
logging.warning(f"Failed to fetch content using simple GET: {e}")
return None
def process(self):
self.url = self.get_input_by_name("url")
self.loading_mode = self.get_input_by_name("loading_mode", "browser")
self.effective_load_mode = self.loading_mode
# Validate URL input
if not self.url or not isinstance(self.url, str) or self.url.strip() == "":
raise Exception("No URL provided.", "noURLProvided")
self.url = self.url.strip()
self.original_url = self.url
if not (self.url.startswith("https://") or self.url.startswith("http://")):
logging.warning(
"URL does not start with 'https://' or 'http://' - compensating by prepending 'https://'."
)
self.url = "https://" + self.url
if not is_valid_url(self.url):
logging.warning(f"Invalid URL: {self.url}")
raise Exception(
f"The provided URL '{self.original_url}' is not valid.\n\n"
"Please ensure the URL follows the correct format, e.g., 'https://www.example.com' or 'https://example.com'."
)
# Get additional parameters
self.selectors = self.get_input_by_name("selectors", [])
self.selectors_to_remove = self.get_input_by_name("selectors_to_remove", [])
self.with_html_tags = self.get_input_by_name("with_html_tags", False)
self.with_html_attributes = self.get_input_by_name(
"with_html_attributes", False
)
response = None
task_data = {
"url": self.url,
"selectors": self.selectors,
"selectors_to_remove": self.selectors_to_remove,
"with_html_tags": self.with_html_tags,
"with_html_attributes": self.with_html_attributes,
}
content = self.fetch_content_simple()
response = self.process_content_with_beautiful_soup(content, task_data)
return response
def process_content_with_beautiful_soup(self, content, task_data):
"""
Process the HTML content using BeautifulSoup while considering the following parameters:
- selectors: a list of CSS selectors; if provided, only matching elements are kept.
- selectors_to_remove: a list of CSS selectors for elements that should be removed.
- with_html_tags: if True, the returned result will include HTML tags; otherwise, plain text.
- with_html_attributes: if True (and with_html_tags is True), HTML attributes will be kept;
otherwise, they will be stripped.
"""
if not content:
return ""
soup = BeautifulSoup(content, "html.parser")
selectors = task_data.get("selectors", [])
if isinstance(selectors, str):
selectors = [selectors]
selectors_to_remove = task_data.get("selectors_to_remove", [])
if isinstance(selectors_to_remove, str):
selectors_to_remove = [selectors_to_remove]
for selector in selectors_to_remove:
for element in soup.select(selector):
element.decompose()
if selectors:
selected_elements = soup.select(", ".join(selectors))
if not selected_elements:
selected_elements = [soup]
else:
selected_elements = [soup]
with_html_tags = task_data.get("with_html_tags", False)
with_html_attributes = task_data.get("with_html_attributes", False)
if with_html_tags:
if not with_html_attributes:
for element in selected_elements:
if hasattr(element, "attrs"):
element.attrs = {}
for tag in element.find_all(True):
tag.attrs = {}
html_output = "".join(str(element) for element in selected_elements)
return html_output
else:
html_output = "".join(str(element) for element in selected_elements)
text_output = markdownify(html_output)
return text_output
================================================
FILE: packages/backend/app/processors/components/core/youtube_transcript_input_processor.py
================================================
import logging
from ...utils.retry_mixin import RetryMixin
from ...exceptions import LightException
from ..processor import BasicProcessor
from youtube_transcript_api import (
YouTubeTranscriptApi,
TranscriptsDisabled,
NoTranscriptFound,
VideoUnavailable,
)
from .processor_type_name_utils import ProcessorType
class YoutubeTranscriptInputProcessor(BasicProcessor, RetryMixin):
processor_type = ProcessorType.YOUTUBE_TRANSCRIPT_INPUT
def __init__(self, config):
super().__init__(config)
self.max_retries = 2
self.retry_delay = 0
def get_video_id(self):
if "watch?v=" in self.url:
return self.url.split("watch?v=")[-1].split("&")[0]
elif "youtu.be/" in self.url:
return self.url.split("youtu.be/")[-1].split("?")[0]
else:
raise LightException(f"Invalid YouTube URL {self.url}")
def process_with_youtube_transcript_api(self):
video_id = self.get_video_id()
try:
transcript_data = self.get_transcript(video_id)
except (TranscriptsDisabled, NoTranscriptFound) as e:
logging.warning(
f"Transcript not available or disabled for video {self.url}"
)
logging.debug(e)
raise Exception(f"No transcription found for {self.url}")
except VideoUnavailable as e:
logging.warning(f"Video is unavailable")
logging.debug(e)
raise Exception(f"Video is unavailable for {self.url}")
except Exception as e:
logging.warning(f"Failed to retrieve transcript")
logging.debug(e)
raise Exception(self.create_no_transcript_error_message(e))
content = " ".join([entry["text"] for entry in transcript_data])
if not content:
raise Exception(f"No transcription found for {self.url}")
return content
def get_transcript(self, video_id):
"""Attempts to get the transcript in the requested language or translate if not available."""
try:
# Try to get the transcript in the requested language
return YouTubeTranscriptApi.get_transcript(
video_id, languages=[self.language]
)
except NoTranscriptFound:
# If transcript in the requested language is not found, try to find a translatable one
return self.get_translatable_transcript(video_id)
except Exception as e:
logging.debug(f"Failed to retrieve transcript with first proxy")
logging.debug(e)
# Retry with a new proxy
return YouTubeTranscriptApi.get_transcript(
video_id, languages=[self.language]
)
def get_translatable_transcript(self, video_id):
"""Finds a translatable transcript and translates it to the requested language."""
try:
# List all transcripts for the video
transcripts = YouTubeTranscriptApi.list_transcripts(video_id)
# Find an auto-generated, translatable transcript
for transcript in transcripts:
if transcript.is_translatable:
return transcript.translate(self.language).fetch()
# Raise an exception if no translatable transcript is found
raise NoTranscriptFound(
f"No translatable transcript available for video {video_id}"
)
except Exception as e:
logging.warning(f"Failed to find a translatable transcript")
logging.debug(e)
raise
def create_no_transcript_error_message(self, e):
requested_languages = getattr(e, "_requested_language_codes", None)
transcript_data = getattr(e, "_transcript_data", None)
error_message = f"Failed to retrieve transcript for {self.url} \n\nRequested Language: {requested_languages} \n\n{transcript_data}"
return error_message
def retrieve_transcript(self, url, language):
self.url = url
self.language = language
if not self.url:
raise Exception("No URL provided")
content = self.run_with_retry(self.process_with_youtube_transcript_api)
if not content:
raise Exception(f"No transcription found for {self.url}")
logging.info(f"Transcription for {self.url} retrieved successfully")
return content
def process(self):
url = self.get_input_by_name("url")
language = self.get_input_by_name("language")
logging.info(language)
return self.retrieve_transcript(url, language)
================================================
FILE: packages/backend/app/processors/components/extension/__init__.py
================================================
================================================
FILE: packages/backend/app/processors/components/extension/claude_anthropic_processor.py
================================================
import logging
from datetime import datetime
import anthropic
from ...context.processor_context import ProcessorContext
from ..model import Field, NodeConfig, Option, Condition, ConditionGroup
from .extension_processor import ContextAwareExtensionProcessor
from ...launcher.processor_event import ProcessorEvent
from ...launcher.event_type import EventType
class ClaudeAnthropicProcessor(ContextAwareExtensionProcessor):
processor_type = "claude-anthropic-processor"
model_config_map = {
"claude-3-7-sonnet-latest": {
"max_tokens": 8192,
"max_tokens_thinking": 64000,
},
"claude-3-5-haiku-latest": {
"max_tokens": 8192,
"max_tokens_thinking": 8192,
},
"claude-3-5-sonnet-latest": {
"max_tokens": 8192,
"max_tokens_thinking": 8192,
},
"claude-3-opus-latest": {
"max_tokens": 4096,
"max_tokens_thinking": 4096,
},
"claude-3-haiku-20240307": {
"max_tokens": 4096,
"max_tokens_thinking": 4096,
},
"claude-3-5-sonnet-20240620": {
"max_tokens": 8192,
"max_tokens_thinking": 8192,
},
"claude-opus-4-0": {
"max_tokens": 32000,
"max_tokens_thinking": 32000,
},
"claude-sonnet-4-0": {
"max_tokens": 64000,
"max_tokens_thinking": 64000,
},
}
def __init__(self, config, context: ProcessorContext):
super().__init__(config, context)
self.reasoning_content = ""
def get_node_config(self):
# Conditions
claude_thinking_condition = Condition(
field="model",
operator="in",
value=["claude-3-7-sonnet-latest", "claude-opus-4-0", "claude-sonnet-4-0"],
)
thinking_enabled_condition = Condition(
field="thinking", operator="equals", value=True
)
budget_token_condition = ConditionGroup(
conditions=[claude_thinking_condition, thinking_enabled_condition],
logic="AND",
)
# Fields
prompt = Field(
name="prompt",
label="prompt",
type="textarea",
required=True,
placeholder="InputTextPlaceholder",
hasHandle=True,
)
prompt_context = Field(
name="context",
label="context",
type="textfield",
placeholder="InputTextPlaceholder",
hasHandle=True,
description="Additional context that will be used to answer your prompt.",
)
temperature = Field(
name="temperature",
label="temperature",
type="slider",
min=0,
max=1,
defaultValue=1,
placeholder="InputTextPlaceholder",
description="Use temperature closer to 0.0 for analytical tasks, and closer to 1.0 for creative tasks.",
)
budget_token = Field(
name="budget_tokens",
label="budget_tokens",
type="slider",
defaultValue=1024,
max=63999,
min=1024,
condition=budget_token_condition,
description=(
"Determines how many tokens Claude can use for its internal reasoning process. "
"Larger budgets can enable more thorough analysis for complex problems, improving response quality."
),
)
model_options = [
Option(
default=False,
value="claude-3-7-sonnet-latest",
label="Claude 3.7 Sonnet",
),
Option(
default=False,
value="claude-3-5-haiku-latest",
label="Claude 3.5 Haiku",
),
Option(
default=False,
value="claude-3-5-sonnet-latest",
label="Claude 3.5 Sonnet",
),
Option(
default=False,
value="claude-3-opus-latest",
label="Claude 3 Opus",
),
Option(
default=False,
value="claude-3-haiku-20240307",
label="Claude 3 Haiku",
),
Option(
default=False,
value="claude-opus-4-0",
label="Claude 4 Opus",
),
Option(
default=True,
value="claude-sonnet-4-0",
label="Claude 4 Sonnet",
),
]
model = Field(
name="model",
label="model",
type="select",
options=model_options,
required=True,
)
thinking = Field(
name="thinking",
label="thinking",
type="boolean",
condition=claude_thinking_condition,
)
fields = [
prompt,
prompt_context,
model,
thinking,
budget_token,
temperature,
]
config = NodeConfig(
nodeName="ClaudeAnthropic",
processorType=self.processor_type,
icon="AnthropicLogo",
fields=fields,
outputType="markdown",
section="models",
helpMessage="claudeAnthropichHelp",
showHandlesNames=True,
)
return config
def handle_stream_awnser(self, awnser):
event = ProcessorEvent(self, awnser)
self.notify(EventType.STREAMING, event)
def process(self):
"""
Retrieve max_tokens from a map instead of the node config.
If 'thinking' is enabled and the model supports it, we choose a different max_tokens.
"""
prompt = self.get_input_by_name("prompt")
prompt_context = self.get_input_by_name("context", None)
model = self.get_input_by_name("model", "claude-3-5-sonnet-20240620")
temperature = self.get_input_by_name("temperature", 1)
thinking = self.get_input_by_name("thinking", False)
if "3-7" not in model and "4-0" not in model:
thinking = False
budget_tokens = None
if thinking:
budget_tokens = self.get_input_by_name("budget_tokens", 1024)
model_config = ClaudeAnthropicProcessor.model_config_map.get(
model,
ClaudeAnthropicProcessor.model_config_map["claude-3-5-sonnet-20240620"],
)
if thinking:
max_tokens = model_config["max_tokens_thinking"]
else:
max_tokens = model_config["max_tokens"]
if prompt is None:
return None
api_key = self._processor_context.get_value("anthropic_api_key")
if api_key is None:
raise Exception("No Anthropic API key found")
client = anthropic.Anthropic(api_key=api_key)
awnser = ""
if prompt_context is not None:
messages = [
{
"role": "user",
"content": f"Context: {prompt_context} \n Prompt: {prompt}",
}
]
else:
messages = [{"role": "user", "content": prompt}]
stream_kwargs = {
"model": model,
"temperature": temperature,
"max_tokens": max_tokens,
"messages": messages,
}
if thinking:
stream_kwargs["thinking"] = {
"budget_tokens": budget_tokens,
"type": "enabled",
}
with client.messages.stream(**stream_kwargs) as stream:
try:
current_block_type = None
for event in stream:
if event.type == "content_block_start":
current_block_type = event.content_block.type
elif event.type == "content_block_delta":
if event.delta.type == "thinking_delta":
self.reasoning_content += event.delta.thinking
elif event.delta.type == "text_delta":
awnser += event.delta.text
self.handle_stream_awnser(awnser)
elif event.type == "message_stop":
break
except Exception as e:
logging.error(f"An error occurred during streaming : {e}")
raise Exception("An error occurred during streaming")
finally:
stream.close()
return awnser
def cancel(self):
pass
================================================
FILE: packages/backend/app/processors/components/extension/deepseek_processor.py
================================================
from ...launcher.event_type import EventType
from ...launcher.processor_event import ProcessorEvent
from ...context.processor_context import ProcessorContext
from ..model import Field, NodeConfig, Option
from .extension_processor import ContextAwareExtensionProcessor
from openai import OpenAI
class DeepSeekProcessor(ContextAwareExtensionProcessor):
processor_type = "deepseek-processor"
streaming = True
def __init__(self, config, context: ProcessorContext):
super().__init__(config, context)
self.reasoning_content = ""
def get_node_config(self):
context = Field(
name="context",
label="context",
type="textfield",
required=False,
placeholder="ContextPlaceholder",
hasHandle=True,
)
text = Field(
name="prompt",
label="prompt",
type="textarea",
required=True,
placeholder="PromptPlaceholder",
hasHandle=True,
)
model_options = [
Option(
default=False,
value="deepseek-chat",
label="V3",
),
Option(
default=True,
value="deepseek-reasoner",
label="R1",
),
]
model = Field(
name="model",
type="option",
options=model_options,
required=True,
)
fields = [model, context, text]
config = NodeConfig(
nodeName="DeepSeek",
processorType=self.processor_type,
icon="DeepSeekLogo",
fields=fields,
outputType="text",
section="models",
helpMessage="deepSeekHelp",
showHandlesNames=True,
)
return config
def process(self):
prompt = self.get_input_by_name("prompt")
context = self.get_input_by_name("context", "")
model = self.get_input_by_name("model")
if prompt is None:
return None
api_key = self._processor_context.get_value("deepseek_api_key")
if api_key is None:
raise Exception("No DeepSeek API key found")
client = OpenAI(api_key=api_key, base_url="https://api.deepseek.com")
response = client.chat.completions.create(
model=model,
messages=[
{
"role": "user",
"content": f"{context} {prompt}",
}
],
stream=self.streaming,
)
if self.streaming:
final_response = ""
for chunk in response:
r_content = getattr(chunk.choices[0].delta, "reasoning_content", None)
if r_content is not None:
self.reasoning_content += r_content
if not chunk.choices[0].delta.content:
continue
final_response += chunk.choices[0].delta.content
event = ProcessorEvent(self, final_response)
self.notify(EventType.STREAMING, event)
return final_response
return response.choices[0].message.content
def cancel(self):
pass
================================================
FILE: packages/backend/app/processors/components/extension/document_to_text_processor.py
================================================
import logging
from queue import Queue
import requests
from ....tasks.task_exception import TaskAlreadyRegisteredError
from ..node_config_builder import FieldBuilder, NodeConfigBuilder
from ....tasks.thread_pool_task_manager import add_task, register_task_processor
from ....utils.processor_utils import (
create_temp_file_with_bytes_content,
get_max_file_size_in_mb,
is_accepted_url_file_size,
is_s3_file,
is_valid_url,
)
from ....tasks.task_utils import wait_for_result
from ..model import NodeConfig
from .extension_processor import BasicExtensionProcessor
from langchain.document_loaders import (
UnstructuredPDFLoader,
UnstructuredHTMLLoader,
CSVLoader,
JSONLoader,
TextLoader,
PyMuPDFLoader,
)
class DocumentToText(BasicExtensionProcessor):
processor_type = "document-to-text-processor"
WAIT_TIMEOUT = 60
def __init__(self, config):
super().__init__(config)
self.loaders = {
"application/pdf": PyMuPDFLoader,
"text/plain": TextLoader,
"text/csv": CSVLoader,
"text/html": UnstructuredHTMLLoader,
"application/json": JSONLoader,
}
self.accepted_mime_types = self.loaders.keys()
def get_node_config(self) -> NodeConfig:
urlField = (
FieldBuilder()
.set_name("document_url")
.set_label("document_url")
.set_type("textfield")
.set_required(True)
.set_placeholder("URLPlaceholder")
.set_has_handle(True)
.build()
)
return (
NodeConfigBuilder()
.set_node_name("DocumentToText")
.set_processor_type(self.processor_type)
.set_icon("FaFile")
.set_section("input")
.set_help_message("documentToTextHelp")
.set_show_handles(True)
.set_output_type("text")
.set_default_hide_output(True)
.add_field(urlField)
.build()
)
def get_loader_for_mime_type(self, mime_type, path):
"""Return an instance of the loader class associated with the given mime_type."""
loader_class = self.loaders.get(mime_type)
if loader_class:
return loader_class(file_path=path)
else:
return None
def load_document(self, loader):
results_queue = Queue()
add_task("document_loader", loader, results_queue)
document = None
try:
document = wait_for_result(results_queue)
except TimeoutError as e:
raise TimeoutError("Timeout - The document took too long to load")
return document
@staticmethod
def document_loader_task(loader):
return loader.load()
def register_background_task(self):
try:
register_task_processor("document_loader", self.document_loader_task)
except TaskAlreadyRegisteredError as e:
pass
def process(self):
url = self.get_input_by_name("document_url")
if not is_valid_url(url):
raise ValueError("Invalid URL")
if not is_s3_file(url) and not is_accepted_url_file_size(url):
raise ValueError(
f"File size is too large (Max : {get_max_file_size_in_mb()})"
)
r = requests.get(url)
if r.status_code != 200:
raise ValueError(
"Check the url of your file; returned status code %s" % r.status_code
)
mime_type = r.headers.get("Content-Type")
if not is_s3_file(url) and mime_type not in self.accepted_mime_types:
raise ValueError("The file type is not supported.")
temp_file, temp_dir = create_temp_file_with_bytes_content(r.content)
file_path = str(temp_file)
loader = self.get_loader_for_mime_type(mime_type, file_path)
self.register_background_task()
try:
document = self.load_document(loader)
if len(document) > 0:
output = ""
for doc in document:
output += doc.page_content
return output
else:
return None
except Exception as e:
logging.warning(f"Failed to load document from URL: {e}")
raise e
finally:
temp_dir.cleanup()
================================================
FILE: packages/backend/app/processors/components/extension/extension_processor.py
================================================
from ..model import NodeConfig
from ...context.processor_context import ProcessorContext
from ..processor import BasicProcessor, ContextAwareProcessor
class ExtensionProcessor:
"""Base interface for extension processors"""
def get_node_config(self) -> NodeConfig:
pass
class DynamicExtensionProcessor:
"""Base interface for dynamic extension processors - These nodes config are populated by an API call after a user choice"""
def get_dynamic_node_config(self, data) -> NodeConfig:
pass
class BasicExtensionProcessor(ExtensionProcessor, BasicProcessor):
"""A basic extension processor that does not depend on user-specific parameters.
Inherits basic processing capabilities from BasicProcessor and schema handling from ExtensionProcessor.
Args:
config (dict): Configuration dictionary for processor setup.
"""
def __init__(self, config):
super().__init__(config)
class ContextAwareExtensionProcessor(ExtensionProcessor, ContextAwareProcessor):
"""An extension processor that requires context about the user, such as user-specific settings or keys.
This class supports context-aware processing by incorporating user context into the processing flow.
Args:
config (dict): Configuration dictionary for processor setup.
context (ProcessorContext, optional): Context object containing user-specific parameters. Defaults to None.
"""
def __init__(self, config, context: ProcessorContext = None):
super().__init__(config)
self._processor_context = context
================================================
FILE: packages/backend/app/processors/components/extension/generate_number_processor.py
================================================
import random
from ..node_config_builder import FieldBuilder, NodeConfigBuilder
from ...context.processor_context import ProcessorContext
from .extension_processor import (
ContextAwareExtensionProcessor,
DynamicExtensionProcessor,
)
class GenerateNumberProcessor(
ContextAwareExtensionProcessor, DynamicExtensionProcessor
):
processor_type = "generate-number-processor"
def __init__(self, config, context: ProcessorContext):
super().__init__(config, context)
def get_node_config(self):
min_field = (
FieldBuilder()
.set_name("min")
.set_label("Min")
.set_type("numericfield")
.set_description("minimumValueForTheRandomNumber")
.set_default_value(0)
.build()
)
max_field = (
FieldBuilder()
.set_name("max")
.set_label("Max")
.set_type("numericfield")
.set_description("maximumValueForTheRandomNumber")
.set_default_value(1000)
.build()
)
return (
NodeConfigBuilder()
.set_node_name("Generate Number")
.set_processor_type(self.processor_type)
.set_section("tools")
.set_help_message("generateNumberHelp")
.set_show_handles(True)
.add_field(min_field)
.add_field(max_field)
.set_output_type("text")
.set_icon("GiPerspectiveDiceSix")
.build()
)
def process(self):
# Retrieve optional parameters; default values are used if they are not provided.
min_val = self.get_input_by_name("min")
max_val = self.get_input_by_name("max")
try:
min_val = int(min_val) if min_val is not None else 0
max_val = int(max_val) if max_val is not None else 500
except ValueError:
raise ValueError("Both 'min' and 'max' should be valid numbers")
if min_val > max_val:
raise ValueError("'min' should not be greater than 'max'")
# Generate and return a random number in the inclusive range [min_val, max_val]
random_number = random.randint(min_val, max_val)
return [random_number]
def cancel(self):
pass
================================================
FILE: packages/backend/app/processors/components/extension/gpt_image_processor.py
================================================
import base64
import mimetypes
import os
import re
from datetime import datetime
from io import BytesIO
from urllib.parse import unquote, urlparse
import requests
from openai import OpenAI
from ...context.processor_context import ProcessorContext
from ..model import Field, NodeConfig, Option
from ..node_config_builder import NodeConfigBuilder
from .extension_processor import (
ContextAwareExtensionProcessor,
DynamicExtensionProcessor,
)
class GPTImageProcessor(ContextAwareExtensionProcessor, DynamicExtensionProcessor):
processor_type = "gpt-image-processor"
# our two modes
methods = ["generate", "edit"]
def __init__(self, config, context: ProcessorContext):
super().__init__(config, context)
self.method = self.get_input_by_name("method")
def get_node_config(self):
# top-level mode selector
method_options = [
Option(default=(m == "generate"), value=m, label=m.title())
for m in self.methods
]
method_field = Field(
name="method",
label="mode",
type="select",
options=method_options,
required=True,
)
return (
NodeConfigBuilder()
.set_node_name("GPT Image")
.set_processor_type(self.processor_type)
.set_icon("OpenAILogo")
.set_help_message("gptImageHelp")
.set_section("models")
.add_field(method_field)
.set_is_dynamic(True)
.build()
)
# — builders for each mode's fields —
def build_generate_config(self, builder):
# same fields as your original generate case
builder.add_field(
Field(
name="model",
label="Model",
type="select",
options=[
Option(default=True, value="gpt-image-1", label="gpt-image-1")
],
required=True,
)
)
builder.add_field(
Field(
name="prompt",
label="Prompt",
type="textarea",
required=True,
placeholder="InputTextPlaceholder",
hasHandle=True,
)
)
builder.add_field(
Field(
name="size",
label="Size",
type="select",
options=[
Option(default=True, value="auto", label="auto"),
Option(default=False, value="1024x1024", label="1024x1024"),
Option(default=False, value="1536x1024", label="1536x1024"),
Option(default=False, value="1024x1536", label="1024x1536"),
],
required=True,
)
)
builder.add_field(
Field(
name="quality",
label="Quality",
type="select",
options=[
Option(default=True, value="auto", label="auto"),
Option(default=False, value="low", label="low"),
Option(default=False, value="medium", label="medium"),
Option(default=False, value="high", label="high"),
],
required=True,
)
)
builder.add_field(
Field(
name="background",
label="Background",
type="select",
options=[
Option(default=True, value="opaque", label="opaque"),
Option(default=False, value="transparent", label="transparent"),
],
required=True,
)
)
builder.add_field(
Field(
name="moderation",
label="Moderation",
type="select",
options=[
Option(default=False, value="auto", label="auto"),
Option(default=True, value="low", label="low"),
],
required=True,
)
)
builder.set_output_type("imageUrl")
def build_edit_config(self, builder):
# same fields as your original edit case
builder.add_field(
Field(
name="model",
label="Model",
type="select",
options=[
Option(default=True, value="gpt-image-1", label="gpt-image-1")
],
required=True,
)
)
builder.add_field(
Field(
name="prompt",
label="Prompt",
type="textarea",
required=True,
placeholder="InputTextPlaceholder",
hasHandle=True,
)
)
builder.add_field(
Field(
name="mask",
label="Mask",
type="fileUpload",
hasHandle=True,
description="gptImageMaskDescription",
)
)
builder.add_field(
Field(
name="image",
label="Image",
type="fileUpload",
hasHandle=True,
canAddChildrenFields=True,
)
)
builder.set_output_type("imageUrl")
method_config_builders = {
"generate": build_generate_config,
"edit": build_edit_config,
}
def get_dynamic_node_config(self, data) -> NodeConfig:
method = data["method"]
builder = (
NodeConfigBuilder()
.set_node_name(f"GPT Image – {method.title()}")
.set_processor_type(self.processor_type)
.set_icon("OpenAILogo")
.set_section("models")
.set_show_handles(True)
)
# inject the right fields
self.method_config_builders[method](self, builder)
return builder.build()
@staticmethod
def get_image_file_from_url(url):
response = requests.get(url)
response.raise_for_status()
parsed = urlparse(url)
filename = os.path.basename(parsed.path) or "image.png"
filename = unquote(filename)
if "." not in filename:
ext = mimetypes.guess_extension(response.headers.get("Content-Type", ""))
filename += ext or ".png"
buf = BytesIO(response.content)
buf.name = filename
return buf
def process(self):
prompt = self.get_input_by_name("prompt")
model = self.get_input_by_name("model")
api_key = self._processor_context.get_value("openai_api_key")
if api_key is None:
raise Exception("No OpenAI API key found")
client = OpenAI(api_key=api_key)
if self.method == "edit":
# gather all image_* fields just like before
images_fields = [
f for f in self.fields_names if re.match(r"^image_\d+$", f)
]
images_fields.insert(0, "image")
urls = [self.get_input_by_name(fld, None) for fld in images_fields]
urls = [u for u in urls if u]
files = [GPTImageProcessor.get_image_file_from_url(u) for u in urls]
mask = self.get_input_by_name("mask", None)
if mask:
mask = GPTImageProcessor.get_image_file_from_url(mask)
result = client.images.edit(
model=model,
prompt=prompt,
image=files,
mask=mask,
)
else:
result = client.images.edit(
model=model,
prompt=prompt,
image=files,
)
else:
# generate
size = self.get_input_by_name("size")
quality = self.get_input_by_name("quality")
background = self.get_input_by_name("background")
moderation = self.get_input_by_name("moderation")
result = client.images.generate(
model=model,
prompt=prompt,
size=size,
quality=quality,
background=background,
moderation=moderation,
)
img_b64 = result.data[0].b64_json
img_bytes = base64.b64decode(img_b64)
storage = self.get_storage()
fname = f"{self.name}-{datetime.now():%Y%m%d%H%M%S%f}.png"
return storage.save(fname, img_bytes)
def cancel(self):
pass
================================================
FILE: packages/backend/app/processors/components/extension/http_get_processor.py
================================================
import logging
import requests
import json
from urllib.parse import urlparse
from ..node_config_builder import FieldBuilder, NodeConfigBuilder
from ...context.processor_context import ProcessorContext
from .extension_processor import ContextAwareExtensionProcessor
class HttpGetProcessor(ContextAwareExtensionProcessor):
processor_type = "http-get-processor"
max_timeout = 5 # Maximum timeout in seconds
max_response_size_in_mb = 2
max_response_size = (
1024 * 1024 * max_response_size_in_mb
) # Maximum response size in bytes (2 MB)
def __init__(self, config, context: ProcessorContext):
super().__init__(config, context)
def get_node_config(self):
url_field = (
FieldBuilder()
.set_name("url")
.set_label("URL")
.set_type("textfield")
.set_required(True)
.set_placeholder("httpGetProcessorURLPlaceholder")
.set_description("httpGetProcessorURLDescription")
.set_has_handle(True)
.build()
)
headers_field = (
FieldBuilder()
.set_name("headers")
.set_label("Headers")
.set_type("dictionnary")
.set_description("httpGetProcessorHeadersDescription")
.build()
)
return (
NodeConfigBuilder()
.set_node_name("HTTP Get")
.set_processor_type(self.processor_type)
.set_icon("TbHttpGet")
.set_section("input")
.set_help_message("httpGetProcessorHelp")
.set_output_type("text")
.set_show_handles(True)
.add_field(url_field)
.add_field(headers_field)
.build()
)
def convert_headers_array_to_json(self, headers_array):
headers = {}
for header in headers_array:
headers[header["key"]] = header["value"]
return json.dumps(headers)
def process(self):
url = self.get_input_by_name("url")
headers = self.get_input_by_name("headers")
timeout = self.get_input_by_name("timeout")
if not url:
raise ValueError("URL is required.")
# Validate URL to prevent misuse
parsed_url = urlparse(url)
if not parsed_url.scheme.startswith("http"):
raise ValueError("Invalid URL scheme. Only HTTP and HTTPS are allowed.")
timeout = HttpGetProcessor.max_timeout
if headers:
headers = self.convert_headers_array_to_json(headers)
try:
headers = json.loads(headers)
except json.JSONDecodeError:
raise Exception("Headers must be a valid JSON.")
else:
headers = {}
try:
response = requests.get(
url=url,
headers=headers,
timeout=timeout,
allow_redirects=False,
stream=True,
)
response.raise_for_status()
except requests.exceptions.RequestException as e:
logging.warning(f"HTTP GET request failed: {str(e)}")
raise Exception(f"HTTP GET request failed: {str(e)}")
# Limit the response size
content = bytes()
total_size = 0
try:
for chunk in response.iter_content(chunk_size=8192):
content += chunk
total_size += len(chunk)
if total_size > HttpGetProcessor.max_response_size:
logging.warning("Response size exceeds maximum allowed limit.")
raise Exception(
f"Response size exceeds maximum allowed limit of {HttpGetProcessor.max_response_size_in_mb} MB. If need to load file, consider using the file node in URL mode."
)
finally:
response.close()
content_type = response.headers.get("Content-Type", "")
if "application/json" in content_type:
try:
return [json.loads(content.decode(response.encoding or "utf-8"))]
except ValueError:
raise Exception("Failed to parse JSON response.")
else:
return content.decode(response.encoding or "utf-8", errors="replace")
def cancel(self):
pass
================================================
FILE: packages/backend/app/processors/components/extension/open_router_processor.py
================================================
import logging
from ....env_config import is_local_environment
from ...launcher.event_type import EventType
from ...launcher.processor_event import ProcessorEvent
from ...context.processor_context import ProcessorContext
from ..model import Field, NodeConfig, Option, Condition
from .extension_processor import ContextAwareExtensionProcessor
from openai import OpenAI
import requests
from cachetools import TTLCache, cached
def load_models_from_file():
import json
import os
current_dir = os.path.dirname(os.path.abspath(__file__))
models_file_path = os.path.join(
current_dir,
"..",
"..",
"..",
"..",
"resources",
"data",
"openrouter_models.json",
)
with open(models_file_path, "r") as file:
models = json.load(file)
return models.get("data", [])
@cached(TTLCache(maxsize=1, ttl=120000))
def get_models():
"""
Fetches the list of available models from OpenRouter API.
Caches the result to avoid redundant API calls.
"""
url = "https://openrouter.ai/api/v1/models"
try:
response = requests.get(url, timeout=10)
response.raise_for_status()
models = response.json()
return models.get("data", [])
except Exception as e:
logging.warning(
f"Failed to fetch OpenRouter models - Loading from file instead: {e}"
)
return load_models_from_file()
@cached(TTLCache(maxsize=1, ttl=120000))
def get_text_to_image_model_ids():
"""
Returns a list of model IDs that support text to image generation.
"""
available_models = get_models()
text_image_model_ids = [
model["id"]
for model in available_models
if model.get("architecture").get("modality") == "text+image->text"
]
return text_image_model_ids
class OpenRouterProcessor(ContextAwareExtensionProcessor):
processor_type = "openrouter-processor"
streaming = True
def __init__(self, config, context: ProcessorContext):
super().__init__(config, context)
def get_node_config(self):
context = Field(
name="context",
label="context",
type="textfield",
required=False,
placeholder="ContextPlaceholder",
hasHandle=True,
)
text = Field(
name="prompt",
label="prompt",
type="textarea",
required=True,
placeholder="PromptPlaceholder",
hasHandle=True,
)
available_models = get_models()
target_default_model_id = "google/gemma-2-9b-it:free"
model_options = [
Option(
default=(model["id"] == target_default_model_id),
value=model["id"],
label=model.get("name", model["id"]),
)
for model in available_models
]
model_field = Field(
name="model",
label="model",
type="select",
options=model_options,
required=True,
)
text_image_model_ids = get_text_to_image_model_ids()
image_url_condition = Condition(
field="model", operator="in", value=text_image_model_ids
)
image_url = Field(
name="image_url",
label="Image URL",
type="textfield",
placeholder="InputImagePlaceholder",
hasHandle=True,
condition=image_url_condition,
)
fields = [model_field, image_url, context, text]
config = NodeConfig(
nodeName="OpenRouter",
processorType=self.processor_type,
icon="OpenRouterLogo",
fields=fields,
outputType="text",
section="models",
helpMessage="openRouterHelp",
showHandlesNames=True,
)
return config
def process(self):
prompt = self.get_input_by_name("prompt")
context = self.get_input_by_name("context", "")
model = self.get_input_by_name("model")
image_url = self.get_input_by_name("image_url", None)
if prompt is None:
return None
api_key = self._processor_context.get_value("openrouter_api_key")
if api_key is None:
raise Exception("No OpenRouter API key found")
client = OpenAI(base_url="https://openrouter.ai/api/v1", api_key=api_key)
text_image_model_ids = get_text_to_image_model_ids()
if image_url is not None and model in text_image_model_ids:
content = [
{
"type": "image_url",
"image_url": {"url": image_url},
},
{"type": "text", "text": prompt},
]
else:
content = f"{context} {prompt}"
response = client.chat.completions.create(
model=model,
messages=[{"role": "user", "content": content}],
stream=self.streaming,
)
if self.streaming:
final_response = ""
for chunk in response:
if not chunk.choices[0].delta.content:
continue
final_response += chunk.choices[0].delta.content
event = ProcessorEvent(self, final_response)
self.notify(EventType.STREAMING, event)
return final_response
return response.choices[0].message.content
def cancel(self):
pass
================================================
FILE: packages/backend/app/processors/components/extension/openai_reasoning_processor.py
================================================
import logging
from app.processors.exceptions import LightException
from ...launcher.event_type import EventType
from ...launcher.processor_event import ProcessorEvent
from ...context.processor_context import ProcessorContext
from ..model import Field, FieldCondition, NodeConfig, Option
from .extension_processor import ContextAwareExtensionProcessor
from openai import OpenAI
class OpenAIReasoningProcessor(ContextAwareExtensionProcessor):
processor_type = "openai-reasoning-processor"
streaming = True
models_with_reasoning_effort = ["o3-mini", "o4-mini", "o3"]
def __init__(self, config, context: ProcessorContext):
super().__init__(config, context)
def get_node_config(self):
context = Field(
name="context",
label="context",
type="textfield",
required=False,
placeholder="ContextPlaceholder",
hasHandle=True,
)
text = Field(
name="prompt",
label="prompt",
type="textarea",
required=True,
placeholder="PromptPlaceholder",
hasHandle=True,
)
model_options = [
Option(
default=True,
value="o4-mini",
label="o4-mini",
),
Option(
default=False,
value="o3-mini",
label="o3-mini",
),
Option(
default=False,
value="o3",
label="o3",
),
Option(
default=False,
value="o1-pro",
label="o1-pro",
),
Option(
default=False,
value="o1",
label="o1",
),
]
model = Field(
name="model",
type="option",
options=model_options,
required=True,
)
reasoning_effort_options = [
Option(
default=False,
value="low",
label="low",
),
Option(
default=True,
value="medium",
label="medium",
),
Option(
default=False,
value="high",
label="high",
),
]
reasoning_effort = Field(
name="reasoning_effort",
label="reasoning_effort",
type="select",
options=reasoning_effort_options,
condition=FieldCondition(
field="model",
operator="in",
value=OpenAIReasoningProcessor.models_with_reasoning_effort,
),
)
fields = [model, context, text, reasoning_effort]
config = NodeConfig(
nodeName="OpenAI o-series",
processorType=self.processor_type,
icon="OpenAILogo",
fields=fields,
outputType="text",
section="models",
helpMessage="openaio1Help",
showHandlesNames=True,
)
return config
def handle_stream_answer(self, awnser):
event = ProcessorEvent(self, awnser)
self.notify(EventType.STREAMING, event)
def process(self):
prompt = self.get_input_by_name("prompt")
context = self.get_input_by_name("context", "")
model = self.get_input_by_name("model")
reasoning_effort = self.get_input_by_name("reasoning_effort", "medium")
if prompt is None:
return None
api_key = self._processor_context.get_value("openai_api_key")
if api_key is None:
raise Exception("No OpenAI API key found")
client = OpenAI(api_key=api_key)
kwargs = {
"model": model,
"input": [{"role": "user", "content": f"{context} {prompt}"}],
"stream": self.streaming,
}
if model in OpenAIReasoningProcessor.models_with_reasoning_effort:
kwargs["reasoning"] = {"effort": reasoning_effort}
stream = client.responses.create(**kwargs)
final_response = ""
for event in stream:
type = event.type
if type == "response.output_text.delta":
final_response += event.delta
self.handle_stream_answer(final_response)
if type == "response.completed":
response_data = event.response
final_response = response_data.output_text
if type == "response.failed":
response_data = event.response
if not hasattr(response_data, "error"):
logging.warning(f"Error from OpenAI with no data: {response_data}")
continue
raise LightException(
f"Error from OpenAI : {response_data.error.message}"
)
if type == "error":
raise LightException(f"Error from OpenAI : {event.message}")
return final_response
def cancel(self):
pass
================================================
FILE: packages/backend/app/processors/components/extension/openai_text_to_speech_processor.py
================================================
import logging
import re
from ...context.processor_context import ProcessorContext
from ..model import Field, NodeConfig, Option, Condition
from .extension_processor import ContextAwareExtensionProcessor
from openai import OpenAI
from datetime import datetime
import io
from pydub import AudioSegment
import eventlet
class OpenAITextToSpeechProcessor(ContextAwareExtensionProcessor):
processor_type = "openai-text-to-speech-processor"
def __init__(self, config, context: ProcessorContext):
super().__init__(config, context)
def get_node_config(self):
text = Field(
name="text",
label="text",
type="textfield",
required=True,
placeholder="InputTextPlaceholder",
hasHandle=True,
)
voices_options = [
Option(
default=True,
value="alloy",
label="alloy",
),
Option(
default=False,
value="ash",
label="ash",
),
Option(
default=False,
value="ballad",
label="ballad",
),
Option(
default=False,
value="coral",
label="coral",
),
Option(
default=False,
value="echo",
label="echo",
),
Option(
default=False,
value="fable",
label="fable",
),
Option(
default=False,
value="onyx",
label="onyx",
),
Option(
default=False,
value="nova",
label="nova",
),
Option(
default=False,
value="sage",
label="sage",
),
Option(
default=False,
value="shimmer",
label="shimmer",
),
]
voice = Field(
name="voice",
label="voice",
type="select",
options=voices_options,
required=True,
)
model_options = [
Option(
default=True,
value="gpt-4o-mini-tts",
label="gpt-4o-mini-tts",
),
Option(
default=False,
value="tts-1",
label="tts-1",
),
Option(
default=False,
value="tts-1-hd",
label="tts-1-hd",
),
]
model = Field(
name="model",
label="model",
type="select",
options=model_options,
required=True,
)
instructions_enabled_condition = Condition(
field="model", operator="equals", value="gpt-4o-mini-tts"
)
instructions = Field(
name="instruction",
label="instruction",
type="textfield",
required=False,
placeholder="TTSInstructionPlaceholder",
description="TTSInstructionDescription",
hasHandle=True,
condition=instructions_enabled_condition,
)
fields = [text, model, voice, instructions]
config = NodeConfig(
nodeName="TextToSpeech",
processorType=self.processor_type,
icon="OpenAILogo",
fields=fields,
outputType="audioUrl",
section="models",
helpMessage="textToSpeechHelp",
showHandlesNames=True,
keywords=["Audio", "Speech", "OpenAI", "TTS"],
)
return config
def split_text_into_chunks(text, max_length=4096):
"""
Split text into chunks of up to max_length characters by packing as many whole sentences as possible.
If a single sentence exceeds max_length, split it into smaller parts.
"""
# Split text by sentence-ending punctuation followed by whitespace.
sentences = re.split(r"(?<=[.!?])\s+", text)
chunks = []
current_sentences = []
current_length = 0
for sentence in sentences:
sentence_length = len(sentence)
# Add a space if there is already a sentence in the current chunk.
additional_length = (
sentence_length if not current_sentences else sentence_length + 1
)
if current_length + additional_length <= max_length:
# Append sentence to the current chunk.
current_sentences.append(sentence)
current_length += additional_length
else:
# Flush the current chunk if it's not empty.
if current_sentences:
chunks.append(" ".join(current_sentences))
current_sentences = []
current_length = 0
# If the sentence itself is too long, split it into parts.
if sentence_length > max_length:
parts = [
sentence[i : i + max_length]
for i in range(0, sentence_length, max_length)
]
# All full parts are separate chunks.
chunks.extend(parts[:-1])
# The last part might be less than max_length; add it to current chunk.
current_sentences = [parts[-1]]
current_length = len(parts[-1])
else:
# Start a new chunk with the sentence.
current_sentences = [sentence]
current_length = sentence_length
if current_sentences:
chunks.append(" ".join(current_sentences))
return chunks
def process(self):
text = self.get_input_by_name("text")
voice = self.get_input_by_name("voice")
model = self.get_input_by_name("model")
instruction = self.get_input_by_name("instruction", None)
if text is None:
return None
api_key = self._processor_context.get_value("openai_api_key")
if api_key is None:
raise Exception("No OpenAI API key found")
client = OpenAI(api_key=api_key)
# Split text into chunks that are each less than or equal to 4096 characters.
chunks = OpenAITextToSpeechProcessor.split_text_into_chunks(text, 4096)
pool = eventlet.GreenPool(2)
def create_audio_segment(chunk):
kwargs = {
"model": model,
"voice": voice,
"input": chunk,
}
if instruction is not None:
kwargs["instructions"] = instruction
response = client.audio.speech.create(**kwargs)
if response is None:
return None
# Convert the response content (mp3 bytes) into an AudioSegment.
return AudioSegment.from_file(io.BytesIO(response.content), format="mp3")
# Process chunks concurrently; imap preserves the order of chunks.
audio_segments = list(pool.imap(create_audio_segment, chunks))
# Filter out any None segments.
audio_segments = [segment for segment in audio_segments if segment is not None]
if not audio_segments:
return None
# Merge the audio segments.
merged_audio = audio_segments[0]
for seg in audio_segments[1:]:
merged_audio += seg
# Export merged audio to a bytes buffer.
merged_audio_buffer = io.BytesIO()
merged_audio.export(merged_audio_buffer, format="mp3")
merged_audio_buffer.seek(0)
storage = self.get_storage()
timestamp_str = datetime.now().strftime("%Y%m%d%H%M%S%f")
filename = f"{self.name}-{timestamp_str}.mp3"
url = storage.save(filename, merged_audio_buffer.read())
# cleanup
merged_audio_buffer.close()
del merged_audio_buffer
del merged_audio
del audio_segments
return url
def cancel(self):
pass
================================================
FILE: packages/backend/app/processors/components/extension/replace_text_processor.py
================================================
import logging
import re
from ..node_config_builder import FieldBuilder, NodeConfigBuilder
from .extension_processor import BasicExtensionProcessor
from ..core.processor_type_name_utils import ProcessorType
class ReplaceTextProcessor(BasicExtensionProcessor):
processor_type = ProcessorType.REPLACE_TEXT
def __init__(self, config):
super().__init__(config)
def get_node_config(self):
input_text_field = (
FieldBuilder()
.set_name("input_text")
.set_label("Input Text")
.set_type("textarea")
.set_required(True)
.set_placeholder("ReplaceTextInputPlaceholder")
.set_has_handle(True)
.build()
)
search_text_field = (
FieldBuilder()
.set_name("search_text")
.set_label("Search Text")
.set_type("textfield")
.set_required(True)
.set_placeholder("ReplaceTextSearchPlaceholder")
.set_has_handle(True)
.build()
)
replacement_text_field = (
FieldBuilder()
.set_name("replacement_text")
.set_label("Replacement Text")
.set_type("textfield")
.set_required(True)
.set_placeholder("ReplaceTextReplacePlaceholder")
.set_has_handle(True)
.build()
)
replace_all_field = (
FieldBuilder()
.set_name("replace_all")
.set_label("Replace All Occurrences")
.set_type("boolean")
.set_default_value(True)
.build()
)
use_regex_field = (
FieldBuilder()
.set_name("use_regex")
.set_label("Use Regular Expression")
.set_type("boolean")
.set_default_value(False)
.build()
)
case_sensitivity_field = (
FieldBuilder()
.set_name("case_sensitivity")
.set_label("Case Sensitive")
.set_type("boolean")
.set_default_value(True)
.build()
)
return (
NodeConfigBuilder()
.set_node_name("ReplaceText")
.set_processor_type(self.processor_type.value)
.set_section("tools")
.set_help_message("replaceTextNodeHelp")
.set_show_handles(True)
.set_output_type("text")
.set_default_hide_output(False)
.add_field(input_text_field)
.add_field(search_text_field)
.add_field(replacement_text_field)
.add_field(replace_all_field)
.add_field(case_sensitivity_field)
.add_field(use_regex_field)
.set_icon("MdSwapHoriz")
.build()
)
def process(self):
input_text = self.get_input_by_name("input_text")
search_text = self.get_input_by_name("search_text")
replacement_text = self.get_input_by_name("replacement_text")
replace_all = self.get_input_by_name("replace_all")
use_regex = self.get_input_by_name("use_regex")
case_sensitivity = self.get_input_by_name("case_sensitivity")
flags = 0
if not case_sensitivity:
flags |= re.IGNORECASE
if use_regex:
try:
pattern = re.compile(search_text, flags)
count = 0 if replace_all else 1
result_text = pattern.sub(replacement_text, input_text, count=count)
except re.error as e:
logging.warning(f"Invalid regular expression: {e}")
result_text = input_text
else:
if not case_sensitivity:
escaped_search_text = re.escape(search_text)
pattern = re.compile(escaped_search_text, flags)
count = 0 if replace_all else 1
result_text = pattern.sub(replacement_text, input_text, count=count)
else:
if replace_all:
result_text = input_text.replace(search_text, replacement_text)
else:
result_text = input_text.replace(search_text, replacement_text, 1)
return [result_text]
================================================
FILE: packages/backend/app/processors/components/extension/stabilityai_generic_processor.py
================================================
import json
import logging
import os
from ..node_config_utils import get_sub_configuration
from ....utils.openapi_client import Client
from ....utils.processor_utils import (
stream_download_file_as_binary,
)
from ....utils.openapi_converter import OpenAPIConverter
from ....utils.openapi_reader import OpenAPIReader
from ..node_config_builder import FieldBuilder, NodeConfigBuilder
from ...context.processor_context import ProcessorContext
from ..model import NodeConfig, Option
from .extension_processor import (
ContextAwareExtensionProcessor,
DynamicExtensionProcessor,
)
from datetime import datetime
import re
class StabilityAIGenericProcessor(
ContextAwareExtensionProcessor, DynamicExtensionProcessor
):
processor_type = "stabilityai-generic-processor"
openapi_file_path = "./resources/openapi/stabilityai.json"
paths_denied = [
re.compile(r"/v1/"), # Contains'/v1/'
re.compile(r"/user/"), # Contains 'user'
re.compile(r"/engines/"), # Contains 'engines'
re.compile(r"/result/"), # Contains 'result'
re.compile(r"/v2alpha/"), # Contains 'v2alpha'
re.compile(r"/result"),
# Temporary
re.compile(r"/chat"), # api returns 404 for now
]
api_reader = None
all_paths_cache = None
pooling_paths_cache = None
allowed_paths_cache = None
def __init__(self, config, context: ProcessorContext):
super().__init__(config, context)
if StabilityAIGenericProcessor.allowed_paths_cache is None:
StabilityAIGenericProcessor.initialize_allowed_paths_cache()
self.api_host = os.getenv(
"STABLE_DIFFUSION_STABILITYAI_API_HOST", "https://api.stability.ai"
)
self.path = self.get_input_by_name("path")
self.initialize_api_config()
self.final_node_config = self.get_dynamic_node_config(dict(path=self.path))
@classmethod
def initialize_allowed_paths_cache(cls):
cls.api_reader = OpenAPIReader(StabilityAIGenericProcessor.openapi_file_path)
paths_names = cls.api_reader.get_all_paths_names()
cls.all_paths_cache = paths_names
cls.pooling_paths_cache = [path for path in paths_names if "/result/" in path]
cls.allowed_paths_cache = [
path
for path in paths_names
if not cls.is_path_banned(path, cls.paths_denied)
]
@staticmethod
def is_path_banned(path, denied_patterns):
return any(pattern.search(path) for pattern in denied_patterns)
@staticmethod
def get_pooling_path(path_selected):
for path in StabilityAIGenericProcessor.pooling_paths_cache:
if path.startswith(path_selected):
return path
return None
def transform_path_options_labels(options):
transformed_options = []
for option in options:
# Remove the first path element and split the rest
parts = re.sub(r"^/[^/]+/", "", option.label).split("/")
# Take the last two elements, or one if alone
if len(parts) > 1:
label = f"{parts[-2].capitalize()} - {parts[-1].replace('-', ' ').capitalize()}"
else:
label = parts[-1].replace("-", " ").capitalize()
transformed_option = Option(
default=option.default, value=option.value, label=label
)
transformed_options.append(transformed_option)
transformed_options.sort(key=lambda option: option.label)
return transformed_options
def get_node_config(self):
if StabilityAIGenericProcessor.allowed_paths_cache is None:
StabilityAIGenericProcessor.initialize_allowed_paths_cache()
path_options = [
Option(default=False, value=name, label=name)
for i, name in enumerate(StabilityAIGenericProcessor.allowed_paths_cache)
]
path_options = self.transform_path_options_labels(path_options)
path_options[0].default = True
path = (
FieldBuilder()
.set_name("path")
.set_label("Path")
.set_type("select")
.set_options(path_options)
.build()
)
return (
NodeConfigBuilder()
.set_node_name("StabilityAI")
.set_processor_type(self.processor_type)
.set_icon("StabilityAILogo")
.set_section("models")
.set_help_message("stableDiffusionPromptHelp")
.set_show_handles(True)
.add_field(path)
.set_is_dynamic(True) # Important
.build()
)
def initialize_api_config(self):
response_content_path = self.path
response_method = "post"
self.path_accept = StabilityAIGenericProcessor.api_reader.get_path_accept(
self.path, "post"
)
self.pooling_path = self.get_pooling_path(self.path)
if self.pooling_path is not None:
response_content_path = self.pooling_path
response_method = "get"
self.pooling_path_accept = (
StabilityAIGenericProcessor.api_reader.get_path_accept(
self.pooling_path, "get"
)
)
self.response_content_type = (
StabilityAIGenericProcessor.api_reader.get_response_content_type(
response_content_path, response_method
)[0]
)
print(f"Response content type {self.response_content_type}")
@staticmethod
def determine_output_type(path_accept):
if path_accept is None:
return None
elif path_accept == "video/*":
return "videoUrl"
elif "model" in path_accept:
return "3dUrl"
else:
return "imageUrl"
def get_dynamic_node_config(self, data) -> NodeConfig:
if StabilityAIGenericProcessor.allowed_paths_cache is None:
StabilityAIGenericProcessor.initialize_allowed_paths_cache()
selected_api_path = data["path"]
schema = StabilityAIGenericProcessor.api_reader.get_request_schema_for_path(
selected_api_path, "post"
)
path_accept = StabilityAIGenericProcessor.api_reader.get_path_accept(
selected_api_path, "post"
)
output_type = StabilityAIGenericProcessor.determine_output_type(path_accept)
pooling_path = self.get_pooling_path(selected_api_path)
if pooling_path is not None:
pooling_path_accept = (
StabilityAIGenericProcessor.api_reader.get_path_accept(
pooling_path, "get"
)
)
output_type = StabilityAIGenericProcessor.determine_output_type(
pooling_path_accept
)
builder = OpenAPIConverter().convert_schema_to_node_config(schema)
path_components = selected_api_path.split("/")
last_component = (
path_components[-1] if path_components[-1] else path_components[-2]
)
node_name = " ".join(word.capitalize() for word in last_component.split("-"))
(
builder.set_node_name(f"StabilityAI - {node_name}")
.set_processor_type(self.processor_type)
.set_icon("StabilityAILogo")
.set_section("models")
.set_help_message("stableDiffusionPromptHelp")
.set_show_handles(True)
)
if output_type is not None:
builder.set_output_type(output_type)
return builder.build()
def perform_pooling(self, client, path):
return client.pooling(path=path, accept=self.pooling_path_accept)
def prepare_and_process_response(self, response):
storage = self.get_storage()
timestamp_str = datetime.now().strftime("%Y%m%d%H%M%S%f")
extension = self.get_input_by_name("output_format")
if extension:
filename = f"{self.name}-{timestamp_str}.{extension}"
else:
if "gltf-binary" in self.response_content_type:
extension = "glb"
else:
extension = self.response_content_type.split("/")[-1]
filename = f"{self.name}-{timestamp_str}.{extension}"
url = storage.save(filename, response)
return url
def get_fields_from_config(self):
if self.final_node_config is None:
return []
if isinstance(self.final_node_config, NodeConfig):
return self.final_node_config.fields
discriminators_values = []
for discriminator_name in self.final_node_config.discriminatorFields:
value = self.get_input_by_name(discriminator_name)
discriminators_values.append(value)
corresponding_config = get_sub_configuration(
discriminators_values, self.final_node_config
)
if corresponding_config is None:
return []
return corresponding_config.config.fields
def quick_filter(self, data):
if "mode" in data:
if data["mode"] == "image-to-image":
if "aspect_ratio" in data:
del data["aspect_ratio"]
if data["mode"] == "text-to-image":
if "strength" in data:
del data["strength"]
if "image" in data:
del data["image"]
def process(self):
api_key = self._processor_context.get_value("stabilityai_api_key")
fields = self.get_fields_from_config()
data = {field.name: self.get_input_by_name(field.name) for field in fields}
self.quick_filter(data)
binaryFieldNames = [field.name for field in fields if field.isBinary]
files = {} if len(binaryFieldNames) > 0 else {"none": (None, "")}
for field_name in binaryFieldNames:
if field_name not in data:
files[field_name] = None
continue
url = data[field_name]
data[field_name] = None
del data[field_name]
if url:
files[field_name] = stream_download_file_as_binary(url)
else:
files[field_name] = None
client = Client(
api_token=api_key,
base_url=self.api_host,
)
response = client.post(
path=self.path, data=data, files=files, accept=self.path_accept
)
if self.pooling_path:
response_str = response.decode("utf-8")
response_json = json.loads(response_str)
key_name = "id"
key_value = response_json[key_name]
updated_pooling_path = self.pooling_path.replace(
"{" + key_name + "}", str(key_value)
)
response = self.perform_pooling(client, updated_pooling_path)
return self.prepare_and_process_response(response)
def cancel(self):
pass
================================================
FILE: packages/backend/app/processors/components/extension/stable_diffusion_three_processor.py
================================================
import logging
import os
import requests
from ..node_config_builder import FieldBuilder, NodeConfigBuilder
from ...context.processor_context import ProcessorContext
from ..model import Option
from .extension_processor import ContextAwareExtensionProcessor
from datetime import datetime
class StableDiffusionThreeProcessor(ContextAwareExtensionProcessor):
processor_type = "stabilityai-stable-diffusion-3-processor"
def __init__(self, config, context: ProcessorContext):
super().__init__(config, context)
self.api_host = os.getenv(
"STABLE_DIFFUSION_STABILITYAI_API_HOST", "https://api.stability.ai"
)
def get_node_config(self):
prompt = (
FieldBuilder()
.set_name("prompt")
.set_label("Prompt")
.set_type("textfield")
.set_required(True)
.set_placeholder("GenericPromptPlaceholder")
.set_has_handle(True)
.build()
)
negative_prompt = (
FieldBuilder()
.set_name("negative_prompt")
.set_label("Negative Prompt")
.set_type("textfield")
.set_placeholder("GenericNegativePromptPlaceholder")
.set_has_handle(True)
.build()
)
model_options = [
Option(
default=True, value="sd3.5-large", label="Stable Diffusion 3.5 Large"
),
Option(
default=False,
value="sd3.5-large-turbo",
label="Stable Diffusion 3.5 Large Turbo",
),
Option(default=False, value="sd3-large", label="Stable Diffusion 3 Large"),
Option(
default=False, value="sd3-medium", label="Stable Diffusion 3 Medium"
),
Option(
default=False,
value="sd3-large-turbo",
label="Stable Diffusion 3 Large Turbo",
),
]
model = (
FieldBuilder()
.set_name("model")
.set_label("Model")
.set_type("select")
.set_options(model_options)
.build()
)
aspect_ratio_options = [
Option(default=True, value="1:1", label="1:1"),
Option(default=False, value="16:9", label="16:9"),
Option(default=False, value="3:2", label="3:2"),
Option(default=False, value="2:3", label="2:3"),
Option(default=False, value="4:5", label="4:5"),
Option(default=False, value="5:4", label="5:4"),
Option(default=False, value="9:16", label="9:16"),
Option(default=False, value="9:21", label="9:21"),
Option(default=False, value="21:9", label="21:9"),
]
aspect_ratio = (
FieldBuilder()
.set_name("aspect_ratio")
.set_label("Aspect Ratio")
.set_type("select")
.set_options(aspect_ratio_options)
.build()
)
seed = (
FieldBuilder()
.set_name("seed")
.set_label("Seed")
.set_type("numericfield")
.set_placeholder("Enter a numeric seed")
.set_default_value(0)
.set_has_handle(True)
.build()
)
return (
NodeConfigBuilder()
.set_node_name("Stable Diffusion 3.5")
.set_processor_type(self.processor_type)
.set_icon("StabilityAILogo")
.set_section("models")
.set_help_message("stableDiffusionPromptHelp")
.set_output_type("imageUrl")
.set_show_handles(True)
.add_field(prompt)
.add_field(negative_prompt)
.add_field(model)
.add_field(aspect_ratio)
.add_field(seed)
.build()
)
def process(self):
prompt = self.get_input_by_name("prompt")
model = self.get_input_by_name("model")
seed = self.get_input_by_name("seed")
aspect_ratio = self.get_input_by_name("aspect_ratio")
negative_prompt = self.get_input_by_name("negative_prompt")
if prompt is None:
return None
api_key = self._processor_context.get_value("stabilityai_api_key")
data_to_send = {
"prompt": prompt,
"negative_prompt": negative_prompt if model != "sd3-turbo" else None,
"model": model,
"seed": seed,
"aspect_ratio": aspect_ratio,
}
response = requests.post(
f"{self.api_host}/v2beta/stable-image/generate/sd3",
headers={
"Accept": "image/*",
"Authorization": f"Bearer {api_key}",
},
files={"none": ""},
data=data_to_send,
)
return self.prepare_and_process_response(response)
def prepare_and_process_response(self, response):
if response.status_code != 200:
logging.warning(
f"API call to StabilityAI failed with status {response.status_code}: {response.text}"
)
logging.warning("User prompt : " + self.get_input_by_name("prompt") or "")
raise Exception(f"Error message from StabilityAI : \n {response.text}")
storage = self.get_storage()
timestamp_str = datetime.now().strftime("%Y%m%d%H%M%S%f")
filename = f"{self.name}-{timestamp_str}.png"
url = storage.save(filename, response.content)
return url
def cancel(self):
pass
================================================
FILE: packages/backend/app/processors/components/model.py
================================================
# generated by datamodel-codegen:
# filename: schema.json
# timestamp: 2025-05-26T04:44:24+00:00
from __future__ import annotations
from typing import Any, Dict, List, Optional, Union
from pydantic import BaseModel, RootModel
from typing_extensions import Literal
class Model(RootModel[Any]):
root: Any
class FieldType(
RootModel[
Literal[
'boolean',
'dictionnary',
'fileUpload',
'imageMaskCreator',
'input',
'inputInt',
'inputNameBar',
'json',
'list',
'nonRendered',
'numericfield',
'option',
'select',
'slider',
'switch',
'textToDisplay',
'textarea',
'textfield',
]
]
):
root: Literal[
'boolean',
'dictionnary',
'fileUpload',
'imageMaskCreator',
'input',
'inputInt',
'inputNameBar',
'json',
'list',
'nonRendered',
'numericfield',
'option',
'select',
'slider',
'switch',
'textToDisplay',
'textarea',
'textfield',
]
class Operator(
RootModel[
Literal[
'equals',
'exists',
'greater than',
'in',
'less than',
'not equals',
'not exists',
'not in',
]
]
):
root: Literal[
'equals',
'exists',
'greater than',
'in',
'less than',
'not equals',
'not exists',
'not in',
]
class Option(BaseModel):
default: Optional[bool] = None
label: Optional[str] = None
value: Optional[str] = None
class OutputType(
RootModel[
Literal[
'3dUrl',
'audioUrl',
'fileUrl',
'imageBase64',
'imageUrl',
'markdown',
'pdfUrl',
'text',
'videoUrl',
]
]
):
root: Literal[
'3dUrl',
'audioUrl',
'fileUrl',
'imageBase64',
'imageUrl',
'markdown',
'pdfUrl',
'text',
'videoUrl',
]
class SectionType(RootModel[Literal['image-generation', 'input', 'models', 'tools']]):
root: Literal['image-generation', 'input', 'models', 'tools']
class Condition(BaseModel):
field: Optional[str] = None
operator: Optional[Operator] = None
value: Optional[Any] = None
class ConditionGroup(BaseModel):
conditions: Optional[List[Condition]] = None
logic: Optional[Literal['AND', 'OR']] = None
class FieldCondition(RootModel[Union[Condition, ConditionGroup]]):
root: Union[Condition, ConditionGroup]
class OmitNodeConfigFieldsOutputType(BaseModel):
defaultHideOutput: Optional[bool] = None
hasInputHandle: Optional[bool] = None
helpMessage: Optional[str] = None
hideFieldsIfParent: Optional[bool] = None
icon: Optional[str] = None
inputNames: Optional[List[str]] = None
isBeta: Optional[bool] = None
isDynamicallyGenerated: Optional[bool] = None
nodeName: Optional[str] = None
processorType: Optional[str] = None
section: Optional[SectionType] = None
showHandlesNames: Optional[bool] = None
class Field(BaseModel):
allowDecimal: Optional[bool] = None
associatedField: Optional[str] = None
canAddChildrenFields: Optional[bool] = None
condition: Optional[FieldCondition] = None
defaultValue: Optional[Any] = None
description: Optional[str] = None
hasHandle: Optional[bool] = None
hidden: Optional[bool] = None
hideIfParent: Optional[bool] = None
isBinary: Optional[bool] = None
isChild: Optional[bool] = None
isLinked: Optional[bool] = None
label: Optional[str] = None
max: Optional[float] = None
min: Optional[float] = None
name: Optional[str] = None
options: Optional[List[Option]] = None
placeholder: Optional[str] = None
required: Optional[bool] = None
step: Optional[float] = None
type: Optional[FieldType] = None
withModalEdit: Optional[bool] = None
class NodeConfig(BaseModel):
defaultHideOutput: Optional[bool] = None
fields: Optional[List[Field]] = None
hasInputHandle: Optional[bool] = None
helpMessage: Optional[str] = None
hideFieldsIfParent: Optional[bool] = None
icon: Optional[str] = None
inputNames: Optional[List[str]] = None
isBeta: Optional[bool] = None
isDynamicallyGenerated: Optional[bool] = None
nodeName: Optional[str] = None
outputType: Optional[OutputType] = None
processorType: Optional[str] = None
section: Optional[SectionType] = None
showHandlesNames: Optional[bool] = None
class DiscriminatedNodeConfig(BaseModel):
config: Optional[NodeConfig] = None
discriminators: Optional[Dict[str, str]] = None
class NodeSubConfig(BaseModel):
discriminatorFields: Optional[List[str]] = None
subConfigurations: Optional[List[DiscriminatedNodeConfig]] = None
class NodeConfigVariant(NodeSubConfig, OmitNodeConfigFieldsOutputType):
pass
================================================
FILE: packages/backend/app/processors/components/node_config_builder.py
================================================
from typing import Dict, List, Optional, Union
from .model import (
DiscriminatedNodeConfig,
Field,
FieldType,
NodeConfig,
NodeConfigVariant,
Option,
OutputType,
SectionType,
)
class BaseNodeConfigBuilder:
def __init__(self):
self.nodeName: Optional[str] = None
self.processorType: Optional[str] = None
self.icon: Optional[str] = None
self.outputType: Optional[str] = None
self.section: Optional[str] = None
self.helpMessage: Optional[str] = None
self.showHandlesNames: Optional[bool] = False
self.isBeta: Optional[bool] = False
self.defaultHideOutput: Optional[bool] = False
def set_node_name(self, name: str) -> "BaseNodeConfigBuilder":
self.nodeName = name
return self
def set_processor_type(self, processor_type: str) -> "BaseNodeConfigBuilder":
self.processorType = processor_type
return self
def set_icon(self, icon: str) -> "BaseNodeConfigBuilder":
self.icon = icon
return self
def set_output_type(self, output_type: str) -> "BaseNodeConfigBuilder":
self.outputType = OutputType(root=output_type)
return self
def set_section(self, section: str) -> "BaseNodeConfigBuilder":
self.section = SectionType(root=section)
return self
def set_help_message(self, help_message: str) -> "BaseNodeConfigBuilder":
self.helpMessage = help_message
return self
def set_show_handles(self, show: bool) -> "BaseNodeConfigBuilder":
self.showHandlesNames = show
return self
def set_is_beta(self, beta: bool) -> "NodeConfigBuilder":
self.isBeta = beta
return self
def set_default_hide_output(self, hide: bool) -> "NodeConfigBuilder":
self.defaultHideOutput = hide
return self
class NodeConfigBuilder(BaseNodeConfigBuilder):
def __init__(self):
super().__init__()
self.fields: List[Field] = []
self.isDynamicallyGenerated: Optional[bool] = False
self.discriminators: Optional[Dict[str, str]] = None
def set_is_dynamic(self, dyna: bool) -> "NodeConfigBuilder":
self.isDynamicallyGenerated = dyna
return self
def set_fields(self, fields: List[Field]) -> "NodeConfigBuilder":
self.fields = fields
return self
def add_field(self, field: Field) -> "NodeConfigBuilder":
self.fields.append(field)
return self
def add_discriminator(self, key, value) -> "NodeConfigBuilder":
if self.discriminators is None:
self.discriminators = {}
self.discriminators[key] = value
return self
def build(self) -> NodeConfig:
baseConfig = NodeConfig(
nodeName=self.nodeName,
processorType=self.processorType,
icon=self.icon,
fields=self.fields,
outputType=self.outputType,
section=self.section,
helpMessage=self.helpMessage,
showHandlesNames=self.showHandlesNames,
isDynamicallyGenerated=self.isDynamicallyGenerated,
isBeta=self.isBeta,
defaultHideOutput=self.defaultHideOutput,
)
if self.discriminators is not None:
return DiscriminatedNodeConfig(
config=baseConfig, discriminators=self.discriminators
)
else:
return baseConfig
class NodeConfigVariantBuilder(BaseNodeConfigBuilder):
def __init__(self):
super().__init__()
self.subConfigurations: List[NodeConfig] = []
self.discriminatorFields: Optional[List[str]] = []
def add_discriminator_field(self, field: str) -> "NodeConfigVariantBuilder":
if self.discriminatorFields is None:
self.discriminatorFields = []
self.discriminatorFields.append(field)
return self
def add_sub_configuration(
self, sub_configuration: NodeConfig
) -> "NodeConfigVariantBuilder":
self.subConfigurations.append(sub_configuration)
return self
def build(self) -> NodeConfigVariant:
for subConfig in self.subConfigurations:
config = subConfig.config
config.showHandlesNames = self.showHandlesNames
config.icon = self.icon
config.nodeName = self.nodeName
config.outputType = self.outputType
config.section = self.section
config.processorType = self.processorType
config.helpMessage = self.helpMessage
return NodeConfigVariant(
subConfigurations=self.subConfigurations,
discriminatorFields=self.discriminatorFields,
)
class FieldBuilder:
def __init__(self):
self._field = Field()
def set_name(self, name: str) -> "FieldBuilder":
self._field.name = name
return self
def set_label(self, label: str) -> "FieldBuilder":
self._field.label = label
return self
def set_description(self, description: str) -> "FieldBuilder":
self._field.description = description
return self
def set_type(self, field_type: str) -> "FieldBuilder":
self._field.type = FieldType(root=field_type)
return self
def set_min(self, min: float) -> "FieldBuilder":
self._field.min = min
return self
def set_max(self, max: float) -> "FieldBuilder":
self._field.max = max
return self
def set_is_binary(self, binary: bool) -> "FieldBuilder":
self._field.isBinary = binary
return self
def set_placeholder(self, placeholder: str) -> "FieldBuilder":
self._field.placeholder = placeholder
return self
def set_required(self, required: bool) -> "FieldBuilder":
self._field.required = required
return self
def set_options(self, options: List[Option]) -> "FieldBuilder":
self._field.options = options
return self
def add_option(self, option: Option) -> "FieldBuilder":
if not self._field.options:
self._field.options = []
self._field.options.append(option)
return self
def set_default_value(self, default_value: Union[str, float]) -> "FieldBuilder":
self._field.defaultValue = default_value
return self
def set_has_handle(self, has_handle: bool) -> "FieldBuilder":
self._field.hasHandle = has_handle
return self
def build(self) -> Field:
return self._field
================================================
FILE: packages/backend/app/processors/components/node_config_utils.py
================================================
from .model import NodeConfigVariant
def get_sub_configuration(discriminators_values, node_config: NodeConfigVariant):
for subconfig in node_config.subConfigurations:
subconfig_discriminator_values = [
subconfig.discriminators[discriminator]
for discriminator in subconfig.discriminators
]
if subconfig_discriminator_values == discriminators_values:
return subconfig
================================================
FILE: packages/backend/app/processors/components/processor.py
================================================
from abc import ABC, abstractmethod
import json
import logging
from typing import Any, List, Optional, TypedDict, Union, Dict
from ..launcher.processor_event import ProcessorEvent
from ..launcher.event_type import EventType
from ..observer.observer import Observer
from .core.processor_type_name_utils import ProcessorType
from ...storage.storage_strategy import StorageStrategy
from ..context.processor_context import ProcessorContext
class BadKeyInputIndex(Exception):
"""Exception raised for index out of bounds in the output list."""
def __init__(self, message="This input key does not exists"):
self.message = message
super().__init__(self.message)
class InputItem(TypedDict, total=False):
inputName: Optional[str]
inputNode: str
inputNodeOutputKey: int
class Processor(ABC):
processor_type: Optional["ProcessorType"] = None
"""The type of the processor"""
observers: List[Observer] = []
"""The observers of the processor"""
storage_strategy: Optional["StorageStrategy"]
"""The storage strategy used by the processor"""
_processor_context: Optional["ProcessorContext"]
"""The context data of the processor"""
name: str
"""The name of the processor"""
_output: Optional[Any]
"""The output of the processor"""
inputs: Optional[List[InputItem]]
"""A list of inputs accepted by the processor."""
input_processors: List["Processor"]
"""The processors set as inputs"""
is_processing: bool
"""Flag indicating if the processor has started working, useful when using API with cold start"""
is_finished: bool
"""Flag indicating if the processor's has produced his output"""
_has_dynamic_behavior: bool
"""Flag indicating if the processor's behavior and execution time are unpredictable and subject to change at runtime."""
def __init__(self, config: Dict[str, Any]) -> None:
self.name = config["name"]
self.processor_type = config["processorType"]
self.observers = []
self._output = None
self.inputs = None
self._processor_context = None
self.input_processors = []
self.storage_strategy = None
self.is_finished = False
self._has_dynamic_behavior = False
self._config = config
if (
config.get("config") is not None
and config.get("config").get("fields") is not None
and config.get("config").get("fields") != []
):
self.fields = config.get("config").get("fields")
self.fields_names = [field["name"] for field in self.fields]
if config.get("inputs") is not None and config.get("inputs") != []:
self.inputs = config.get("inputs")
def cleanup(self) -> None:
self.input_processors = None
self._processor_context = None
self._output = None
self.storage_strategy = None
def process_and_update(self):
output = self.process()
if output is not None:
self.set_output(output)
return output
@abstractmethod
def process(self):
pass
@abstractmethod
def cancel(self) -> None:
pass
def add_observer(self, observer):
self.observers.append(observer)
def remove_observer(self, observer):
self.observers.remove(observer)
if len(self.observers) == 0:
self.observers = None
return self.observers
def notify(self, event: EventType, data: ProcessorEvent):
for observer in self.observers:
observer.notify(event, data)
def get_output(self, input_key=None) -> Optional[str]:
output = getattr(self, "_output", None)
if output is not None and isinstance(output, list) and len(output) > 0:
if input_key is not None:
if input_key < 0 or input_key >= len(output):
logging.warning(
f"Index {input_key} out of bounds for output of size {len(output)}."
)
return None
return output[input_key]
else:
return output
return None
def set_output(self, value: Union[List, str]) -> None:
if isinstance(value, list):
self._output = value
elif isinstance(value, str):
self._output = [value]
else:
raise TypeError("Value should be either a list or a string.")
self.is_finished = True
def get_inputs(self) -> Optional[List[InputItem]]:
return self.inputs
def get_input_processor(self) -> Optional["Processor"]:
if self.input_processors is None or len(self.input_processors) == 0:
return None
return self.input_processors[0]
def get_input_processors(self) -> List["Processor"]:
return self.input_processors
def get_input_node_output_key(self) -> Optional[int]:
if self.inputs is None or len(self.inputs) == 0:
return None
if self.inputs[0].get("inputNodeOutputKey") is None:
return 0
return self.inputs[0].get("inputNodeOutputKey")
def get_input_node_output_key_by_node_name(
self, input_node_name: str
) -> Optional[int]:
keys = []
for input in self.inputs:
if input.get("inputNode") == input_node_name:
keys.append(input.get("inputNodeOutputKey"))
return keys
def get_input_node_outpu
gitextract_vbwzelad/
├── .github/
│ ├── FUNDING.yml
│ └── workflows/
│ └── main.yml
├── .gitignore
├── LICENSE
├── README.md
├── bin/
│ └── generate_python_classes_from_ts.sh
├── docker/
│ ├── README.md
│ ├── docker-compose.it.yml
│ ├── docker-compose.yml
│ └── healthcheck.sh
├── integration_tests/
│ ├── .gitignore
│ ├── package.json
│ ├── tests/
│ │ ├── nodeProcessingOrder/
│ │ │ ├── nodeErrorTest.ts
│ │ │ ├── nodeParallelExecutionDurationTest.ts
│ │ │ ├── nodeWithChildrenTest.ts
│ │ │ ├── nodeWithMultipleParentsTest.ts
│ │ │ ├── nodesWithoutLinkTest.ts
│ │ │ └── singleNodeTest.ts
│ │ └── socketEvents/
│ │ ├── processFileEventTest.ts
│ │ ├── runNodeEventTest.ts
│ │ └── socketConnectionTest.ts
│ ├── tsconfig.json
│ └── utils/
│ ├── requestDatas.ts
│ └── testHooks.ts
└── packages/
├── backend/
│ ├── .gitignore
│ ├── Dockerfile
│ ├── README.md
│ ├── app/
│ │ ├── env_config.py
│ │ ├── flask/
│ │ │ ├── app_routes/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── image_routes.py
│ │ │ │ ├── node_routes.py
│ │ │ │ ├── parameters_routes.py
│ │ │ │ ├── static_routes.py
│ │ │ │ └── upload_routes.py
│ │ │ ├── decorators.py
│ │ │ ├── flask_app.py
│ │ │ ├── routes.py
│ │ │ ├── socketio_init.py
│ │ │ ├── sockets.py
│ │ │ └── utils/
│ │ │ └── constants.py
│ │ ├── llms/
│ │ │ └── utils/
│ │ │ └── max_token_for_model.py
│ │ ├── log_config.py
│ │ ├── processors/
│ │ │ ├── components/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── core/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── ai_data_splitter_processor.py
│ │ │ │ │ ├── dall_e_prompt_processor.py
│ │ │ │ │ ├── display_processor.py
│ │ │ │ │ ├── file_processor.py
│ │ │ │ │ ├── gpt_vision_processor.py
│ │ │ │ │ ├── input_image_processor.py
│ │ │ │ │ ├── input_processor.py
│ │ │ │ │ ├── llm_prompt_processor.py
│ │ │ │ │ ├── merge_processor.py
│ │ │ │ │ ├── processor_type_name_utils.py
│ │ │ │ │ ├── replicate_processor.py
│ │ │ │ │ ├── stable_diffusion_stabilityai_prompt_processor.py
│ │ │ │ │ ├── stable_video_diffusion_replicate.py
│ │ │ │ │ ├── transition_processor.py
│ │ │ │ │ ├── url_input_processor.py
│ │ │ │ │ └── youtube_transcript_input_processor.py
│ │ │ │ ├── extension/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── claude_anthropic_processor.py
│ │ │ │ │ ├── deepseek_processor.py
│ │ │ │ │ ├── document_to_text_processor.py
│ │ │ │ │ ├── extension_processor.py
│ │ │ │ │ ├── generate_number_processor.py
│ │ │ │ │ ├── gpt_image_processor.py
│ │ │ │ │ ├── http_get_processor.py
│ │ │ │ │ ├── open_router_processor.py
│ │ │ │ │ ├── openai_reasoning_processor.py
│ │ │ │ │ ├── openai_text_to_speech_processor.py
│ │ │ │ │ ├── replace_text_processor.py
│ │ │ │ │ ├── stabilityai_generic_processor.py
│ │ │ │ │ └── stable_diffusion_three_processor.py
│ │ │ │ ├── model.py
│ │ │ │ ├── node_config_builder.py
│ │ │ │ ├── node_config_utils.py
│ │ │ │ └── processor.py
│ │ │ ├── context/
│ │ │ │ ├── processor_context.py
│ │ │ │ └── processor_context_flask_request.py
│ │ │ ├── exceptions.py
│ │ │ ├── factory/
│ │ │ │ ├── processor_factory.py
│ │ │ │ └── processor_factory_iter_modules.py
│ │ │ ├── launcher/
│ │ │ │ ├── abstract_topological_processor_launcher.py
│ │ │ │ ├── async_processor_launcher.py
│ │ │ │ ├── basic_processor_launcher.py
│ │ │ │ ├── event_type.py
│ │ │ │ ├── processor_event.py
│ │ │ │ ├── processor_launcher.py
│ │ │ │ └── processor_launcher_event.py
│ │ │ ├── observer/
│ │ │ │ ├── observer.py
│ │ │ │ └── socketio_event_emitter.py
│ │ │ └── utils/
│ │ │ └── retry_mixin.py
│ │ ├── root_injector.py
│ │ ├── storage/
│ │ │ ├── local_storage_strategy.py
│ │ │ ├── s3_storage_strategy.py
│ │ │ └── storage_strategy.py
│ │ ├── tasks/
│ │ │ ├── green_pool_task_manager.py
│ │ │ ├── single_thread_tasks/
│ │ │ │ └── browser/
│ │ │ │ ├── async_browser_task.py
│ │ │ │ └── browser_task.py
│ │ │ ├── task_exception.py
│ │ │ ├── task_manager.py
│ │ │ ├── task_utils.py
│ │ │ └── thread_pool_task_manager.py
│ │ └── utils/
│ │ ├── node_extension_utils.py
│ │ ├── openapi_client.py
│ │ ├── openapi_converter.py
│ │ ├── openapi_reader.py
│ │ ├── processor_utils.py
│ │ ├── replicate_utils.py
│ │ └── web_scrapping/
│ │ ├── async_browser_manager.py
│ │ └── browser_manager.py
│ ├── config.yaml
│ ├── hooks/
│ │ └── hook-app.processors.py
│ ├── pyproject.toml
│ ├── requirements_windows.txt
│ ├── resources/
│ │ ├── data/
│ │ │ └── openrouter_models.json
│ │ └── openapi/
│ │ └── stabilityai.json
│ ├── server.py
│ └── tests/
│ ├── unit/
│ │ ├── test_processor_factory.py
│ │ ├── test_processor_launcher.py
│ │ └── test_stable_diffusion_stabilityai_prompt_processor.py
│ └── utils/
│ ├── openai_mock_utils.py
│ ├── processor_context_mock.py
│ └── processor_factory_mock.py
└── ui/
├── .gitignore
├── .prettierignore
├── Dockerfile
├── README.md
├── index.html
├── jest.config.ts
├── nginx.conf
├── package.json
├── postcss.config.cjs
├── postcss.config.js
├── prettier.config.js
├── public/
│ ├── health
│ ├── locales/
│ │ ├── en/
│ │ │ ├── aiActions.json
│ │ │ ├── config.json
│ │ │ ├── dialogs.json
│ │ │ ├── flow.json
│ │ │ ├── nodeHelp.json
│ │ │ ├── tips.json
│ │ │ ├── tour.json
│ │ │ └── version.json
│ │ └── fr/
│ │ ├── aiActions.json
│ │ ├── config.json
│ │ ├── dialogs.json
│ │ ├── flow.json
│ │ ├── nodeHelp.json
│ │ ├── tips.json
│ │ ├── tour.json
│ │ └── version.json
│ ├── robots.txt
│ ├── samples/
│ │ └── intro.json
│ └── site.webmanifest
├── src/
│ ├── App.tsx
│ ├── Main.tsx
│ ├── api/
│ │ ├── cache/
│ │ │ ├── cacheManager.ts
│ │ │ └── withCache.ts
│ │ ├── client.ts
│ │ ├── nodes.ts
│ │ ├── parameters.ts
│ │ ├── replicateModels.ts
│ │ └── uploadFile.ts
│ ├── components/
│ │ ├── Flow.tsx
│ │ ├── LoadingScreen.tsx
│ │ ├── bars/
│ │ │ ├── Sidebar.tsx
│ │ │ └── dnd-sidebar/
│ │ │ ├── DnDSidebar.tsx
│ │ │ ├── DraggableNode.tsx
│ │ │ ├── DraggableNodeWithSubnodes.tsx
│ │ │ ├── GripIcon.tsx
│ │ │ ├── Section.tsx
│ │ │ └── types.ts
│ │ ├── buttons/
│ │ │ ├── ButtonRunAll.tsx
│ │ │ └── ConfigurationButton.tsx
│ │ ├── edges/
│ │ │ └── buttonEdge.tsx
│ │ ├── handles/
│ │ │ └── HandleWrapper.tsx
│ │ ├── inputs/
│ │ │ └── InputWithButton.tsx
│ │ ├── nodes/
│ │ │ ├── AIDataSplitterNode.tsx
│ │ │ ├── DisplayNode.tsx
│ │ │ ├── FileUploadNode.tsx
│ │ │ ├── GenericNode.tsx
│ │ │ ├── Node.styles.ts
│ │ │ ├── NodeHelpPopover.tsx
│ │ │ ├── NodeWrapper.tsx
│ │ │ ├── ReplicateNode.tsx
│ │ │ ├── TransitionNode.tsx
│ │ │ ├── node-button/
│ │ │ │ ├── InputNameBar.tsx
│ │ │ │ └── NodePlayButton.tsx
│ │ │ ├── node-input/
│ │ │ │ ├── FileUploadField.tsx
│ │ │ │ ├── ImageMaskCreator.tsx
│ │ │ │ ├── ImageMaskCreatorField.tsx
│ │ │ │ ├── ImageMaskCreatorFieldFlowAware.tsx
│ │ │ │ ├── KeyValueInputList.tsx
│ │ │ │ ├── NodeField.tsx
│ │ │ │ ├── NodeTextField.tsx
│ │ │ │ ├── NodeTextarea.tsx
│ │ │ │ ├── OutputRenderer.tsx
│ │ │ │ └── TextAreaPopupWrapper.tsx
│ │ │ ├── node-output/
│ │ │ │ ├── AudioUrlOutput.tsx
│ │ │ │ ├── ImageBase64Output.tsx
│ │ │ │ ├── ImageUrlOutput.tsx
│ │ │ │ ├── MarkdownOutput.tsx
│ │ │ │ ├── NodeOutput.tsx
│ │ │ │ ├── OutputDisplay.tsx
│ │ │ │ ├── PdfUrlOutput.tsx
│ │ │ │ ├── ThreeDimensionalUrlOutput.tsx
│ │ │ │ ├── VideoUrlOutput.tsx
│ │ │ │ └── outputUtils.ts
│ │ │ ├── types/
│ │ │ │ └── node.ts
│ │ │ └── utils/
│ │ │ ├── HintComponent.tsx
│ │ │ ├── ImageModal.tsx
│ │ │ ├── ImageZoomable.tsx
│ │ │ ├── NodeHelp.tsx
│ │ │ ├── NodeIcons.tsx
│ │ │ └── TextareaModal.tsx
│ │ ├── players/
│ │ │ └── VideoJS.tsx
│ │ ├── popups/
│ │ │ ├── ConfirmPopup.tsx
│ │ │ ├── DefaultPopup.tsx
│ │ │ ├── HelpPopup.tsx
│ │ │ ├── UserMessagePopup.tsx
│ │ │ ├── config-popup/
│ │ │ │ ├── AppParameters.tsx
│ │ │ │ ├── ConfigPopup.tsx
│ │ │ │ ├── DisplayParameters.tsx
│ │ │ │ ├── ParametersFields.tsx
│ │ │ │ ├── UserParameters.tsx
│ │ │ │ ├── configMetadata.ts
│ │ │ │ └── parameters.ts
│ │ │ ├── select-model-popup/
│ │ │ │ ├── Model.tsx
│ │ │ │ └── SelectModelPopup.tsx
│ │ │ └── shared/
│ │ │ ├── FilterGrid.tsx
│ │ │ ├── Grid.tsx
│ │ │ └── LoadMoreButton.tsx
│ │ ├── selectors/
│ │ │ ├── ActionGroup.tsx
│ │ │ ├── ColorSelector.tsx
│ │ │ ├── ExpandableBloc.tsx
│ │ │ ├── FileDropZone.tsx
│ │ │ ├── OptionSelector.tsx
│ │ │ └── SelectAutocomplete.tsx
│ │ ├── shared/
│ │ │ ├── motions/
│ │ │ │ ├── EaseOut.tsx
│ │ │ │ ├── TapScale.tsx
│ │ │ │ └── types.ts
│ │ │ └── theme.tsx
│ │ ├── side-views/
│ │ │ ├── CurrentNodeView.tsx
│ │ │ └── JSONView.tsx
│ │ ├── tools/
│ │ │ └── Fallback.tsx
│ │ └── tour/
│ │ └── AppTour.tsx
│ ├── config/
│ │ └── config.ts
│ ├── hooks/
│ │ ├── useFlowSocketListeners.tsx
│ │ ├── useFormFields.tsx
│ │ ├── useHandlePositions.tsx
│ │ ├── useHandleShowOutput.tsx
│ │ ├── useIsPlaying.tsx
│ │ ├── useIsTouchDevice.tsx
│ │ ├── useLoading.tsx
│ │ ├── useLocalStorage.tsx
│ │ └── useRefreshOnAppearanceChange.tsx
│ ├── i18n.js
│ ├── index.css
│ ├── index.tsx
│ ├── init.js
│ ├── layout/
│ │ └── main-layout/
│ │ ├── AppLayout.tsx
│ │ ├── header/
│ │ │ ├── Tab.tsx
│ │ │ └── TabHeader.tsx
│ │ └── wrapper/
│ │ ├── FlowErrorBoundary.tsx
│ │ └── FlowWrapper.tsx
│ ├── nodes-configuration/
│ │ ├── dallENode.ts
│ │ ├── gptVisionNode.ts
│ │ ├── inputTextNode.ts
│ │ ├── llmPrompt.ts
│ │ ├── mergerPromptNode.ts
│ │ ├── nodeConfig.ts
│ │ ├── sectionConfig.ts
│ │ ├── stableDiffusionStabilityAiNode.ts
│ │ ├── types.ts
│ │ ├── urlNode.ts
│ │ └── youtubeTranscriptNode.ts
│ ├── providers/
│ │ ├── FlowDataProvider.tsx
│ │ ├── NodeProvider.tsx
│ │ ├── SocketProvider.tsx
│ │ ├── ThemeProvider.tsx
│ │ └── VisibilityProvider.tsx
│ ├── react-app-env.d.ts
│ ├── reportWebVitals.ts
│ ├── services/
│ │ └── tabStorage.ts
│ ├── setupTests.ts
│ ├── sockets/
│ │ ├── flowEventTypes.ts
│ │ └── flowSocket.ts
│ ├── utils/
│ │ ├── evaluateConditions.ts
│ │ ├── flowChecker.ts
│ │ ├── flowUtils.ts
│ │ ├── mappings.tsx
│ │ ├── navigatorUtils.ts
│ │ ├── nodeConfigurationUtils.ts
│ │ ├── nodeUtils.ts
│ │ ├── openAPIUtils.ts
│ │ └── toastUtils.tsx
│ └── vite-env.d.ts
├── tailwind.config.js
├── test/
│ ├── e2e/
│ │ ├── intro-flow.spec.ts
│ │ ├── loading-screen.spec.ts
│ │ ├── main-content.spec.ts
│ │ ├── sidebar-default-nodes.spec.ts
│ │ ├── sidebar-extensions-nodes.spec.ts
│ │ └── tuto-display.spec.ts
│ ├── unit/
│ │ ├── flowChecker.test.ts
│ │ └── flowUtils.test.ts
│ └── utils.ts
├── tsconfig.json
├── vite.config.ts
└── vitest.config.ts
SYMBOL INDEX (897 symbols across 202 files)
FILE: integration_tests/utils/requestDatas.ts
type ProcessFileData (line 1) | type ProcessFileData = {
type RunNodeData (line 6) | type RunNodeData = {
type Node (line 12) | type Node = {
function getBasicProcessFileData (line 187) | function getBasicProcessFileData(): ProcessFileData {
function getBasicRunNodeData (line 196) | function getBasicRunNodeData(): RunNodeData {
function getJsonFlowWithMissingInputTextProcessFileData (line 206) | function getJsonFlowWithMissingInputTextProcessFileData(): ProcessFileDa...
function createRequestData (line 215) | function createRequestData(flow: any): ProcessFileData {
FILE: packages/backend/app/env_config.py
function get_static_folder (line 17) | def get_static_folder() -> str:
function is_cloud_env (line 27) | def is_cloud_env() -> bool:
function is_local_environment (line 31) | def is_local_environment() -> bool:
function is_mock_env (line 35) | def is_mock_env() -> bool:
function is_server_static_files_enabled (line 39) | def is_server_static_files_enabled() -> bool:
function get_local_storage_folder_path (line 43) | def get_local_storage_folder_path() -> str:
function get_flask_secret_key (line 47) | def get_flask_secret_key() -> Optional[str]:
function get_replicate_api_key (line 51) | def get_replicate_api_key() -> Optional[str]:
function get_background_task_max_workers (line 55) | def get_background_task_max_workers() -> int:
function use_async_browser (line 59) | def use_async_browser() -> bool:
function get_browser_tab_max_usage (line 63) | def get_browser_tab_max_usage() -> int:
function get_browser_tab_pool_size (line 67) | def get_browser_tab_pool_size() -> int:
function is_set_app_config_on_ui_enabled (line 71) | def is_set_app_config_on_ui_enabled() -> bool:
function is_s3_enabled (line 75) | def is_s3_enabled() -> bool:
FILE: packages/backend/app/flask/app_routes/image_routes.py
function serve_image (line 7) | def serve_image(filename):
FILE: packages/backend/app/flask/app_routes/node_routes.py
function get_node_extensions (line 20) | def get_node_extensions():
function get_dynamic_extension (line 26) | def get_dynamic_extension():
function get_public_models (line 41) | def get_public_models():
function get_collections (line 51) | def get_collections():
function get_collection_models (line 56) | def get_collection_models(collection):
function get_config (line 62) | def get_config(model):
FILE: packages/backend/app/flask/app_routes/parameters_routes.py
function load_config (line 8) | def load_config():
function parameters (line 14) | def parameters():
FILE: packages/backend/app/flask/app_routes/static_routes.py
function serve (line 11) | def serve(path):
FILE: packages/backend/app/flask/app_routes/upload_routes.py
function upload_file (line 12) | def upload_file():
FILE: packages/backend/app/flask/decorators.py
function with_flow_data_validations (line 8) | def with_flow_data_validations(*validation_funcs):
FILE: packages/backend/app/flask/flask_app.py
function create_app (line 8) | def create_app():
FILE: packages/backend/app/flask/routes.py
function healthcheck (line 8) | def healthcheck():
FILE: packages/backend/app/flask/sockets.py
function populate_request_global_object (line 27) | def populate_request_global_object(data):
function handle_connect (line 58) | def handle_connect():
function handle_process_file (line 63) | def handle_process_file(data):
function handle_run_node (line 95) | def handle_run_node(data):
function handle_disconnect (line 132) | def handle_disconnect():
function handle_update_app_config (line 137) | def handle_update_app_config(data):
FILE: packages/backend/app/llms/utils/max_token_for_model.py
function max_token_for_model (line 6) | def max_token_for_model(model_name: str) -> int:
function nb_token_for_input (line 44) | def nb_token_for_input(input: str, model_name: str) -> int:
FILE: packages/backend/app/log_config.py
function setup_logger (line 5) | def setup_logger(name: str):
FILE: packages/backend/app/processors/components/core/ai_data_splitter_processor.py
function interpret_escape_sequences (line 10) | def interpret_escape_sequences(separator):
class AIDataSplitterProcessor (line 19) | class AIDataSplitterProcessor(ContextAwareProcessor):
method __init__ (line 25) | def __init__(self, config, context: ProcessorContext):
method get_llm_response (line 32) | def get_llm_response(self, messages):
method process (line 39) | def process(self):
method init_context (line 70) | def init_context(self, input_data: str) -> None:
method cancel (line 107) | def cancel(self):
FILE: packages/backend/app/processors/components/core/dall_e_prompt_processor.py
class DallEPromptProcessor (line 9) | class DallEPromptProcessor(ContextAwareProcessor):
method __init__ (line 16) | def __init__(self, config, context: ProcessorContext):
method process (line 22) | def process(self):
method cancel (line 45) | def cancel(self):
FILE: packages/backend/app/processors/components/core/display_processor.py
class DisplayProcessor (line 5) | class DisplayProcessor(BasicProcessor):
method __init__ (line 8) | def __init__(self, config):
method process (line 11) | def process(self):
FILE: packages/backend/app/processors/components/core/file_processor.py
class FileProcessor (line 5) | class FileProcessor(BasicProcessor):
method __init__ (line 8) | def __init__(self, config):
method process (line 12) | def process(self):
FILE: packages/backend/app/processors/components/core/gpt_vision_processor.py
class GPTVisionProcessor (line 13) | class GPTVisionProcessor(ContextAwareProcessor):
method __init__ (line 17) | def __init__(self, config, context: ProcessorContext):
method _gather_image_url_values (line 20) | def _gather_image_url_values(self) -> List[Any]:
method process (line 38) | def process(self):
method is_valid_url (line 101) | def is_valid_url(self, url):
method cancel (line 108) | def cancel(self):
FILE: packages/backend/app/processors/components/core/input_image_processor.py
class InputImageProcessor (line 5) | class InputImageProcessor(BasicProcessor):
method __init__ (line 8) | def __init__(self, config):
method process (line 12) | def process(self):
FILE: packages/backend/app/processors/components/core/input_processor.py
class InputProcessor (line 5) | class InputProcessor(BasicProcessor):
method __init__ (line 8) | def __init__(self, config):
method process (line 12) | def process(self):
FILE: packages/backend/app/processors/components/core/llm_prompt_processor.py
class LLMPromptProcessor (line 15) | class LLMPromptProcessor(ContextAwareProcessor):
method __init__ (line 26) | def __init__(self, config, context: ProcessorContext):
method handle_stream_answer (line 32) | def handle_stream_answer(self, awnser):
method nb_tokens_from_messages (line 36) | def nb_tokens_from_messages(self, messages, model):
method check_for_html_tags (line 48) | def check_for_html_tags(self, text):
method process (line 56) | def process(self):
method init_context (line 140) | def init_context(self, context: str) -> None:
method cancel (line 165) | def cancel(self):
FILE: packages/backend/app/processors/components/core/merge_processor.py
class MergeProcessor (line 4) | class MergeProcessor(ContextAwareProcessor):
method __init__ (line 7) | def __init__(self, config, context):
method update_prompt (line 12) | def update_prompt(self, inputs):
method process (line 17) | def process(self):
method cancel (line 26) | def cancel(self):
FILE: packages/backend/app/processors/components/core/processor_type_name_utils.py
class MergeModeEnum (line 4) | class MergeModeEnum(Enum):
class ProcessorType (line 9) | class ProcessorType(Enum):
FILE: packages/backend/app/processors/components/core/replicate_processor.py
class ReplicateProcessor (line 31) | class ReplicateProcessor(ContextAwareProcessor):
method __init__ (line 34) | def __init__(self, config, context: ProcessorContext):
method get_prediction_result (line 49) | def get_prediction_result(
method wait_for_prediction_task (line 65) | def wait_for_prediction_task(task_data):
method register_background_task (line 74) | def register_background_task(self):
method process (line 84) | def process(self):
method upload_replicate_uri_to_storage (line 191) | def upload_replicate_uri_to_storage(self, uri):
method _get_nested_input_schema_property (line 220) | def _get_nested_input_schema_property(self, property_name, nested_key):
method cancel (line 228) | def cancel(self):
FILE: packages/backend/app/processors/components/core/stable_diffusion_stabilityai_prompt_processor.py
class StableDiffusionStabilityAIPromptProcessor (line 14) | class StableDiffusionStabilityAIPromptProcessor(ContextAwareProcessor):
method __init__ (line 17) | def __init__(self, config, context: ProcessorContext):
method prepare_and_process_response (line 33) | def prepare_and_process_response(self, response):
method setup_data_to_send (line 48) | def setup_data_to_send(self):
method process (line 68) | def process(self):
method cancel (line 84) | def cancel(self):
FILE: packages/backend/app/processors/components/core/stable_video_diffusion_replicate.py
class StableVideoDiffusionReplicaterocessor (line 10) | class StableVideoDiffusionReplicaterocessor(ContextAwareProcessor):
method __init__ (line 15) | def __init__(self, config, context: ProcessorContext):
method process (line 21) | def process(self):
method is_valid_url (line 52) | def is_valid_url(self, url):
method cancel (line 59) | def cancel(self):
FILE: packages/backend/app/processors/components/core/transition_processor.py
class TransitionProcessor (line 5) | class TransitionProcessor(BasicProcessor):
method __init__ (line 8) | def __init__(self, config):
method process (line 11) | def process(self):
FILE: packages/backend/app/processors/components/core/url_input_processor.py
class URLInputProcessor (line 14) | class URLInputProcessor(BasicProcessor):
method __init__ (line 25) | def __init__(self, config):
method get_random_user_agent (line 28) | def get_random_user_agent():
method fetch_content_simple (line 31) | def fetch_content_simple(self):
method process (line 45) | def process(self):
method process_content_with_beautiful_soup (line 93) | def process_content_with_beautiful_soup(self, content, task_data):
FILE: packages/backend/app/processors/components/core/youtube_transcript_input_processor.py
class YoutubeTranscriptInputProcessor (line 18) | class YoutubeTranscriptInputProcessor(BasicProcessor, RetryMixin):
method __init__ (line 21) | def __init__(self, config):
method get_video_id (line 26) | def get_video_id(self):
method process_with_youtube_transcript_api (line 34) | def process_with_youtube_transcript_api(self):
method get_transcript (line 64) | def get_transcript(self, video_id):
method get_translatable_transcript (line 84) | def get_translatable_transcript(self, video_id):
method create_no_transcript_error_message (line 105) | def create_no_transcript_error_message(self, e):
method retrieve_transcript (line 111) | def retrieve_transcript(self, url, language):
method process (line 126) | def process(self):
FILE: packages/backend/app/processors/components/extension/claude_anthropic_processor.py
class ClaudeAnthropicProcessor (line 13) | class ClaudeAnthropicProcessor(ContextAwareExtensionProcessor):
method __init__ (line 51) | def __init__(self, config, context: ProcessorContext):
method get_node_config (line 55) | def get_node_config(self):
method handle_stream_awnser (line 192) | def handle_stream_awnser(self, awnser):
method process (line 196) | def process(self):
method cancel (line 280) | def cancel(self):
FILE: packages/backend/app/processors/components/extension/deepseek_processor.py
class DeepSeekProcessor (line 9) | class DeepSeekProcessor(ContextAwareExtensionProcessor):
method __init__ (line 13) | def __init__(self, config, context: ProcessorContext):
method get_node_config (line 17) | def get_node_config(self):
method process (line 71) | def process(self):
method cancel (line 114) | def cancel(self):
FILE: packages/backend/app/processors/components/extension/document_to_text_processor.py
class DocumentToText (line 29) | class DocumentToText(BasicExtensionProcessor):
method __init__ (line 33) | def __init__(self, config):
method get_node_config (line 44) | def get_node_config(self) -> NodeConfig:
method get_loader_for_mime_type (line 70) | def get_loader_for_mime_type(self, mime_type, path):
method load_document (line 78) | def load_document(self, loader):
method document_loader_task (line 92) | def document_loader_task(loader):
method register_background_task (line 95) | def register_background_task(self):
method process (line 101) | def process(self):
FILE: packages/backend/app/processors/components/extension/extension_processor.py
class ExtensionProcessor (line 6) | class ExtensionProcessor:
method get_node_config (line 9) | def get_node_config(self) -> NodeConfig:
class DynamicExtensionProcessor (line 13) | class DynamicExtensionProcessor:
method get_dynamic_node_config (line 16) | def get_dynamic_node_config(self, data) -> NodeConfig:
class BasicExtensionProcessor (line 20) | class BasicExtensionProcessor(ExtensionProcessor, BasicProcessor):
method __init__ (line 29) | def __init__(self, config):
class ContextAwareExtensionProcessor (line 33) | class ContextAwareExtensionProcessor(ExtensionProcessor, ContextAwarePro...
method __init__ (line 43) | def __init__(self, config, context: ProcessorContext = None):
FILE: packages/backend/app/processors/components/extension/generate_number_processor.py
class GenerateNumberProcessor (line 11) | class GenerateNumberProcessor(
method __init__ (line 16) | def __init__(self, config, context: ProcessorContext):
method get_node_config (line 19) | def get_node_config(self):
method process (line 52) | def process(self):
method cancel (line 70) | def cancel(self):
FILE: packages/backend/app/processors/components/extension/gpt_image_processor.py
class GPTImageProcessor (line 21) | class GPTImageProcessor(ContextAwareExtensionProcessor, DynamicExtension...
method __init__ (line 27) | def __init__(self, config, context: ProcessorContext):
method get_node_config (line 31) | def get_node_config(self):
method build_generate_config (line 59) | def build_generate_config(self, builder):
method build_edit_config (line 138) | def build_edit_config(self, builder):
method get_dynamic_node_config (line 189) | def get_dynamic_node_config(self, data) -> NodeConfig:
method get_image_file_from_url (line 204) | def get_image_file_from_url(url):
method process (line 217) | def process(self):
method cancel (line 272) | def cancel(self):
FILE: packages/backend/app/processors/components/extension/http_get_processor.py
class HttpGetProcessor (line 11) | class HttpGetProcessor(ContextAwareExtensionProcessor):
method __init__ (line 19) | def __init__(self, config, context: ProcessorContext):
method get_node_config (line 22) | def get_node_config(self):
method convert_headers_array_to_json (line 58) | def convert_headers_array_to_json(self, headers_array):
method process (line 64) | def process(self):
method cancel (line 128) | def cancel(self):
FILE: packages/backend/app/processors/components/extension/open_router_processor.py
function load_models_from_file (line 15) | def load_models_from_file():
function get_models (line 37) | def get_models():
function get_text_to_image_model_ids (line 56) | def get_text_to_image_model_ids():
class OpenRouterProcessor (line 69) | class OpenRouterProcessor(ContextAwareExtensionProcessor):
method __init__ (line 73) | def __init__(self, config, context: ProcessorContext):
method get_node_config (line 76) | def get_node_config(self):
method process (line 146) | def process(self):
method cancel (line 194) | def cancel(self):
FILE: packages/backend/app/processors/components/extension/openai_reasoning_processor.py
class OpenAIReasoningProcessor (line 12) | class OpenAIReasoningProcessor(ContextAwareExtensionProcessor):
method __init__ (line 17) | def __init__(self, config, context: ProcessorContext):
method get_node_config (line 20) | def get_node_config(self):
method handle_stream_answer (line 119) | def handle_stream_answer(self, awnser):
method process (line 123) | def process(self):
method cancel (line 172) | def cancel(self):
FILE: packages/backend/app/processors/components/extension/openai_text_to_speech_processor.py
class OpenAITextToSpeechProcessor (line 13) | class OpenAITextToSpeechProcessor(ContextAwareExtensionProcessor):
method __init__ (line 16) | def __init__(self, config, context: ProcessorContext):
method get_node_config (line 19) | def get_node_config(self):
method split_text_into_chunks (line 147) | def split_text_into_chunks(text, max_length=4096):
method process (line 196) | def process(self):
method cancel (line 263) | def cancel(self):
FILE: packages/backend/app/processors/components/extension/replace_text_processor.py
class ReplaceTextProcessor (line 9) | class ReplaceTextProcessor(BasicExtensionProcessor):
method __init__ (line 12) | def __init__(self, config):
method get_node_config (line 15) | def get_node_config(self):
method process (line 95) | def process(self):
FILE: packages/backend/app/processors/components/extension/stabilityai_generic_processor.py
class StabilityAIGenericProcessor (line 27) | class StabilityAIGenericProcessor(
method __init__ (line 48) | def __init__(self, config, context: ProcessorContext):
method initialize_allowed_paths_cache (line 62) | def initialize_allowed_paths_cache(cls):
method is_path_banned (line 74) | def is_path_banned(path, denied_patterns):
method get_pooling_path (line 78) | def get_pooling_path(path_selected):
method transform_path_options_labels (line 84) | def transform_path_options_labels(options):
method get_node_config (line 104) | def get_node_config(self):
method initialize_api_config (line 138) | def initialize_api_config(self):
method determine_output_type (line 165) | def determine_output_type(path_accept):
method get_dynamic_node_config (line 175) | def get_dynamic_node_config(self, data) -> NodeConfig:
method perform_pooling (line 222) | def perform_pooling(self, client, path):
method prepare_and_process_response (line 225) | def prepare_and_process_response(self, response):
method get_fields_from_config (line 241) | def get_fields_from_config(self):
method quick_filter (line 261) | def quick_filter(self, data):
method process (line 272) | def process(self):
method cancel (line 318) | def cancel(self):
FILE: packages/backend/app/processors/components/extension/stable_diffusion_three_processor.py
class StableDiffusionThreeProcessor (line 12) | class StableDiffusionThreeProcessor(ContextAwareExtensionProcessor):
method __init__ (line 15) | def __init__(self, config, context: ProcessorContext):
method get_node_config (line 21) | def get_node_config(self):
method process (line 121) | def process(self):
method prepare_and_process_response (line 153) | def prepare_and_process_response(self, response):
method cancel (line 168) | def cancel(self):
FILE: packages/backend/app/processors/components/model.py
class Model (line 13) | class Model(RootModel[Any]):
class FieldType (line 17) | class FieldType(
class Operator (line 63) | class Operator(
class Option (line 89) | class Option(BaseModel):
class OutputType (line 95) | class OutputType(
class SectionType (line 123) | class SectionType(RootModel[Literal['image-generation', 'input', 'models...
class Condition (line 127) | class Condition(BaseModel):
class ConditionGroup (line 133) | class ConditionGroup(BaseModel):
class FieldCondition (line 138) | class FieldCondition(RootModel[Union[Condition, ConditionGroup]]):
class OmitNodeConfigFieldsOutputType (line 142) | class OmitNodeConfigFieldsOutputType(BaseModel):
class Field (line 157) | class Field(BaseModel):
class NodeConfig (line 182) | class NodeConfig(BaseModel):
class DiscriminatedNodeConfig (line 199) | class DiscriminatedNodeConfig(BaseModel):
class NodeSubConfig (line 204) | class NodeSubConfig(BaseModel):
class NodeConfigVariant (line 209) | class NodeConfigVariant(NodeSubConfig, OmitNodeConfigFieldsOutputType):
FILE: packages/backend/app/processors/components/node_config_builder.py
class BaseNodeConfigBuilder (line 14) | class BaseNodeConfigBuilder:
method __init__ (line 15) | def __init__(self):
method set_node_name (line 26) | def set_node_name(self, name: str) -> "BaseNodeConfigBuilder":
method set_processor_type (line 30) | def set_processor_type(self, processor_type: str) -> "BaseNodeConfigBu...
method set_icon (line 34) | def set_icon(self, icon: str) -> "BaseNodeConfigBuilder":
method set_output_type (line 38) | def set_output_type(self, output_type: str) -> "BaseNodeConfigBuilder":
method set_section (line 42) | def set_section(self, section: str) -> "BaseNodeConfigBuilder":
method set_help_message (line 46) | def set_help_message(self, help_message: str) -> "BaseNodeConfigBuilder":
method set_show_handles (line 50) | def set_show_handles(self, show: bool) -> "BaseNodeConfigBuilder":
method set_is_beta (line 54) | def set_is_beta(self, beta: bool) -> "NodeConfigBuilder":
method set_default_hide_output (line 58) | def set_default_hide_output(self, hide: bool) -> "NodeConfigBuilder":
class NodeConfigBuilder (line 63) | class NodeConfigBuilder(BaseNodeConfigBuilder):
method __init__ (line 64) | def __init__(self):
method set_is_dynamic (line 70) | def set_is_dynamic(self, dyna: bool) -> "NodeConfigBuilder":
method set_fields (line 74) | def set_fields(self, fields: List[Field]) -> "NodeConfigBuilder":
method add_field (line 78) | def add_field(self, field: Field) -> "NodeConfigBuilder":
method add_discriminator (line 82) | def add_discriminator(self, key, value) -> "NodeConfigBuilder":
method build (line 88) | def build(self) -> NodeConfig:
class NodeConfigVariantBuilder (line 110) | class NodeConfigVariantBuilder(BaseNodeConfigBuilder):
method __init__ (line 111) | def __init__(self):
method add_discriminator_field (line 116) | def add_discriminator_field(self, field: str) -> "NodeConfigVariantBui...
method add_sub_configuration (line 122) | def add_sub_configuration(
method build (line 128) | def build(self) -> NodeConfigVariant:
class FieldBuilder (line 145) | class FieldBuilder:
method __init__ (line 146) | def __init__(self):
method set_name (line 149) | def set_name(self, name: str) -> "FieldBuilder":
method set_label (line 153) | def set_label(self, label: str) -> "FieldBuilder":
method set_description (line 157) | def set_description(self, description: str) -> "FieldBuilder":
method set_type (line 161) | def set_type(self, field_type: str) -> "FieldBuilder":
method set_min (line 165) | def set_min(self, min: float) -> "FieldBuilder":
method set_max (line 169) | def set_max(self, max: float) -> "FieldBuilder":
method set_is_binary (line 173) | def set_is_binary(self, binary: bool) -> "FieldBuilder":
method set_placeholder (line 177) | def set_placeholder(self, placeholder: str) -> "FieldBuilder":
method set_required (line 181) | def set_required(self, required: bool) -> "FieldBuilder":
method set_options (line 185) | def set_options(self, options: List[Option]) -> "FieldBuilder":
method add_option (line 189) | def add_option(self, option: Option) -> "FieldBuilder":
method set_default_value (line 195) | def set_default_value(self, default_value: Union[str, float]) -> "Fiel...
method set_has_handle (line 199) | def set_has_handle(self, has_handle: bool) -> "FieldBuilder":
method build (line 203) | def build(self) -> Field:
FILE: packages/backend/app/processors/components/node_config_utils.py
function get_sub_configuration (line 4) | def get_sub_configuration(discriminators_values, node_config: NodeConfig...
FILE: packages/backend/app/processors/components/processor.py
class BadKeyInputIndex (line 18) | class BadKeyInputIndex(Exception):
method __init__ (line 21) | def __init__(self, message="This input key does not exists"):
class InputItem (line 26) | class InputItem(TypedDict, total=False):
class Processor (line 32) | class Processor(ABC):
method __init__ (line 66) | def __init__(self, config: Dict[str, Any]) -> None:
method cleanup (line 88) | def cleanup(self) -> None:
method process_and_update (line 94) | def process_and_update(self):
method process (line 101) | def process(self):
method cancel (line 105) | def cancel(self) -> None:
method add_observer (line 108) | def add_observer(self, observer):
method remove_observer (line 111) | def remove_observer(self, observer):
method notify (line 117) | def notify(self, event: EventType, data: ProcessorEvent):
method get_output (line 121) | def get_output(self, input_key=None) -> Optional[str]:
method set_output (line 135) | def set_output(self, value: Union[List, str]) -> None:
method get_inputs (line 144) | def get_inputs(self) -> Optional[List[InputItem]]:
method get_input_processor (line 147) | def get_input_processor(self) -> Optional["Processor"]:
method get_input_processors (line 152) | def get_input_processors(self) -> List["Processor"]:
method get_input_node_output_key (line 155) | def get_input_node_output_key(self) -> Optional[int]:
method get_input_node_output_key_by_node_name (line 162) | def get_input_node_output_key_by_node_name(
method get_input_node_output_keys (line 171) | def get_input_node_output_keys(self) -> Optional[List[int]]:
method get_input_names (line 176) | def get_input_names(self) -> Optional[List[str]]:
method get_input_names_from_config (line 181) | def get_input_names_from_config(self) -> Optional[List[str]]:
method get_input_by_name (line 184) | def get_input_by_name(
method add_input_processor (line 209) | def add_input_processor(self, input_processor: "Processor") -> None:
method set_storage_strategy (line 212) | def set_storage_strategy(self, storage_strategy: "StorageStrategy") ->...
method __str__ (line 215) | def __str__(self) -> str:
method get_context (line 218) | def get_context(self) -> Optional["ProcessorContext"]:
method get_storage (line 221) | def get_storage(self) -> Optional["StorageStrategy"]:
method has_dynamic_behavior (line 224) | def has_dynamic_behavior(self) -> bool:
class BasicProcessor (line 228) | class BasicProcessor(Processor):
method __init__ (line 229) | def __init__(self, config):
method cancel (line 232) | def cancel(self):
class ContextAwareProcessor (line 236) | class ContextAwareProcessor(Processor):
method __init__ (line 237) | def __init__(self, config, context: ProcessorContext = None):
FILE: packages/backend/app/processors/context/processor_context.py
class ProcessorContext (line 6) | class ProcessorContext(ABC):
method get_context (line 8) | def get_context(self) -> "ProcessorContext":
method get_current_user_id (line 12) | def get_current_user_id(self) -> Optional[str]:
method get_session_id (line 16) | def get_session_id(self) -> Optional[str]:
method get_parameter_names (line 20) | def get_parameter_names(self) -> List[str]:
method get_value (line 29) | def get_value(self, name) -> Optional[str]:
FILE: packages/backend/app/processors/context/processor_context_flask_request.py
class ProcessorContextFlaskRequest (line 8) | class ProcessorContextFlaskRequest(ProcessorContext):
method __init__ (line 11) | def __init__(self, g_context=None, session_data=None, session_id=None):
method get_context (line 16) | def get_context(self) -> "ProcessorContext":
method get_current_user_id (line 20) | def get_current_user_id(self) -> str:
method get_session_id (line 24) | def get_session_id(self) -> str:
method get_parameter_names (line 27) | def get_parameter_names(self) -> List[str]:
method get_value (line 34) | def get_value(self, name) -> Optional[str]:
FILE: packages/backend/app/processors/exceptions.py
class LightException (line 1) | class LightException(Exception):
method __init__ (line 2) | def __init__(
FILE: packages/backend/app/processors/factory/processor_factory.py
class ProcessorFactory (line 7) | class ProcessorFactory(ABC):
method create_processor (line 9) | def create_processor(
method load_processors (line 18) | def load_processors(self):
FILE: packages/backend/app/processors/factory/processor_factory_iter_modules.py
class ProcessorFactoryIterModules (line 12) | class ProcessorFactoryIterModules(ProcessorFactory):
method __init__ (line 13) | def __init__(self):
method register_processor (line 16) | def register_processor(self, processor_type, processor_class):
method create_processor (line 19) | def create_processor(self, config, context_data=None, storage_strategy...
method load_processors (line 37) | def load_processors(self):
method _load_recursive (line 40) | def _load_recursive(self, package_name):
FILE: packages/backend/app/processors/launcher/abstract_topological_processor_launcher.py
class AbstractTopologicalProcessorLauncher (line 18) | class AbstractTopologicalProcessorLauncher(ProcessorLauncher):
method __init__ (line 31) | def __init__(
method set_context (line 43) | def set_context(self, context: ProcessorContext):
method add_observer (line 46) | def add_observer(self, observer):
method _load_config_data (line 49) | def _load_config_data(self, fileName):
method _link_processors (line 54) | def _link_processors(self, processors):
method load_processors (line 68) | def load_processors(self, config_data):
method get_node_by_name (line 79) | def get_node_by_name(self, config_data, node_name):
method notify_error (line 95) | def notify_error(self, processor, e):
method notify_streaming (line 106) | def notify_streaming(self, processor, output, isDone=False, duration=0):
method notify_progress (line 119) | def notify_progress(self, processor, output, isDone=False, duration=0):
method notify_current_node_running (line 132) | def notify_current_node_running(self, processor):
method load_required_processors (line 145) | def load_required_processors(self, config_data, node_name):
method get_related_config_data (line 192) | def get_related_config_data(self, config_data, node_name, visited):
method load_processors_for_node (line 215) | def load_processors_for_node(self, config_data, node_name):
method launch_processors (line 222) | def launch_processors(self, processors):
method launch_processors_for_node (line 226) | def launch_processors_for_node(self, processors, node_name=None):
method notify_observers (line 229) | def notify_observers(self, event, data):
FILE: packages/backend/app/processors/launcher/async_processor_launcher.py
class AsyncProcessorLauncher (line 24) | class AsyncProcessorLauncher(AbstractTopologicalProcessorLauncher, Obser...
class NodeState (line 35) | class NodeState(Enum):
class Node (line 41) | class Node:
method __init__ (line 42) | def __init__(self, id: str, parent_ids: List[str], processor: Proces...
method run (line 50) | def run(self):
method get_processor (line 69) | def get_processor(self):
method get_input_processor_names (line 72) | def get_input_processor_names(self, processor: Processor):
method convert_processors_to_node_dict (line 77) | def convert_processors_to_node_dict(self, processors: List[Processor]):
method launch_processors (line 85) | def launch_processors(self, processors: List[Processor]):
method remove_completed_nodes (line 124) | def remove_completed_nodes(self, nodes: List[Node]):
method can_run (line 131) | def can_run(self, node: Node, nodes: List[Node]):
method launch_processors_for_node (line 135) | def launch_processors_for_node(self, processors: List[Processor], node...
method run_processor (line 144) | def run_processor(self, processor: "Processor"):
method run_node (line 158) | def run_node(self, node: Node):
method notify (line 174) | def notify(self, event: EventType, data: ProcessorEvent):
FILE: packages/backend/app/processors/launcher/basic_processor_launcher.py
class BasicProcessorLauncher (line 4) | class BasicProcessorLauncher(AbstractTopologicalProcessorLauncher):
method launch_processors (line 11) | def launch_processors(self, processors):
method launch_processors_for_node (line 22) | def launch_processors_for_node(self, processors, node_name=None):
FILE: packages/backend/app/processors/launcher/event_type.py
class EventType (line 4) | class EventType(Enum):
FILE: packages/backend/app/processors/launcher/processor_event.py
class ProcessorEvent (line 6) | class ProcessorEvent:
FILE: packages/backend/app/processors/launcher/processor_launcher.py
class ProcessorLauncher (line 6) | class ProcessorLauncher(ABC):
method load_processors (line 8) | def load_processors(self, config_data):
method load_processors_for_node (line 12) | def load_processors_for_node(self, config_data, node_name):
method launch_processors (line 16) | def launch_processors(self, processor):
method launch_processors_for_node (line 20) | def launch_processors_for_node(self, processors, node_name):
method set_context (line 24) | def set_context(self, context: ProcessorContext):
FILE: packages/backend/app/processors/launcher/processor_launcher_event.py
class ProcessorLauncherEvent (line 8) | class ProcessorLauncherEvent:
FILE: packages/backend/app/processors/observer/observer.py
class Observer (line 4) | class Observer(ABC):
method notify (line 6) | def notify(self, event, data):
FILE: packages/backend/app/processors/observer/socketio_event_emitter.py
class SocketIOEventEmitter (line 9) | class SocketIOEventEmitter(Observer):
method notify (line 27) | def notify(self, event: EventType, data: ProcessorLauncherEvent):
FILE: packages/backend/app/processors/utils/retry_mixin.py
class RetryMixin (line 5) | class RetryMixin:
method run_with_retry (line 6) | def run_with_retry(self, func, *args, **kwargs):
FILE: packages/backend/app/root_injector.py
class ProcessorFactoryModule (line 20) | class ProcessorFactoryModule(Module):
method configure (line 21) | def configure(self, binder: Binder):
class StorageModule (line 29) | class StorageModule(Module):
method configure (line 30) | def configure(self, binder: Binder):
class ProcessorLauncherModule (line 39) | class ProcessorLauncherModule(Module):
method configure (line 40) | def configure(self, binder: Binder):
function create_application_injector (line 47) | def create_application_injector() -> Injector:
function get_root_injector (line 62) | def get_root_injector() -> Injector:
function refresh_root_injector (line 66) | def refresh_root_injector() -> None:
FILE: packages/backend/app/storage/local_storage_strategy.py
class LocalStorageStrategy (line 12) | class LocalStorageStrategy(StorageStrategy):
method save (line 18) | def save(self, filename: str, data: Any) -> str:
method get_url (line 29) | def get_url(self, filename: str) -> str:
method get_file (line 33) | def get_file(self, filename: str) -> bytes:
FILE: packages/backend/app/storage/s3_storage_strategy.py
class S3StorageStrategy (line 15) | class S3StorageStrategy(CloudStorageStrategy):
method __init__ (line 23) | def __init__(self):
method save (line 45) | def save(self, filename: str, data: Any, bucket_name: str = None) -> str:
method get_upload_link (line 59) | def get_upload_link(self, filename=None) -> str:
method get_url (line 104) | def get_url(self, filename: str, bucket_name: str = None) -> str:
method get_file (line 122) | def get_file(self, filename: str, bucket_name: str = None) -> bytes:
method upload_and_get_link (line 138) | def upload_and_get_link(self, filename: str, bucket_name: str = None) ...
FILE: packages/backend/app/storage/storage_strategy.py
class StorageStrategy (line 5) | class StorageStrategy(ABC):
method save (line 10) | def save(self, filename: str, data: Any) -> Optional[str]:
method get_url (line 14) | def get_url(self, filename: str) -> str:
method get_file (line 18) | def get_file(self, filename: str, *args) -> bytes:
class CloudStorageStrategy (line 22) | class CloudStorageStrategy(StorageStrategy):
method get_upload_link (line 24) | def get_upload_link(self, filename: str) -> str:
FILE: packages/backend/app/tasks/green_pool_task_manager.py
function register_task_processor (line 18) | def register_task_processor(task_name, processor_func, max_concurrent_ta...
function process_task (line 32) | def process_task(task_name, task_data, task_result_queue):
function add_task (line 46) | def add_task(task_name, task_data, result_queue):
FILE: packages/backend/app/tasks/single_thread_tasks/browser/async_browser_task.py
function accept_cookies (line 13) | async def accept_cookies(page, cookies_consent_label, timeout=5000):
function strip_attributes (line 29) | def strip_attributes(html):
function fetch_url_content (line 33) | async def fetch_url_content(
function scrapping_task (line 107) | async def scrapping_task(task_data, browser_manager):
function add_task (line 130) | async def add_task(task_data, result_queue):
function browser_task_worker (line 134) | async def browser_task_worker():
function start_event_loop (line 159) | def start_event_loop():
function stop_event_loop (line 167) | def stop_event_loop():
function add_task_sync (line 173) | def add_task_sync(task_data, result_queue):
FILE: packages/backend/app/tasks/single_thread_tasks/browser/browser_task.py
function accept_cookies (line 12) | def accept_cookies(page, cookies_consent_label, timeout=5000):
function strip_attributes (line 26) | def strip_attributes(html):
function fetch_url_content (line 30) | def fetch_url_content(
function scrapping_task (line 74) | def scrapping_task(task_data, browser_manager):
function add_task_sync (line 94) | def add_task_sync(task_data, result_queue):
function browser_thread_func (line 98) | def browser_thread_func(task_queue):
function stop_browser_thread (line 119) | def stop_browser_thread():
FILE: packages/backend/app/tasks/task_exception.py
class TaskAlreadyRegisteredError (line 1) | class TaskAlreadyRegisteredError(Exception):
method __init__ (line 4) | def __init__(self, task_name):
FILE: packages/backend/app/tasks/task_manager.py
function register_task_processor (line 17) | def register_task_processor(task_name, processor_func, max_concurrent_ta...
function process_task (line 31) | def process_task(task_name, task_data, task_result_queue):
function add_task (line 45) | def add_task(task_name, task_data, result_queue):
FILE: packages/backend/app/tasks/task_utils.py
function wait_for_result (line 6) | def wait_for_result(queue, timeout=120, initial_sleep=0.1, max_sleep=5.0):
FILE: packages/backend/app/tasks/thread_pool_task_manager.py
function register_task_processor (line 18) | def register_task_processor(task_name, processor_func, max_concurrent_ta...
function process_task (line 32) | def process_task(task_name, task_data, task_result_queue):
function add_task (line 46) | def add_task(task_name, task_data, result_queue):
FILE: packages/backend/app/utils/node_extension_utils.py
function _load_dynamic_extension (line 21) | def _load_dynamic_extension(processor_type, data):
function _load_all_extension_schemas (line 42) | def _load_all_extension_schemas():
function filter_extensions (line 68) | def filter_extensions(extensions):
function get_extensions (line 78) | def get_extensions():
function get_dynamic_extension_config (line 88) | def get_dynamic_extension_config(processor_type, data):
FILE: packages/backend/app/utils/openapi_client.py
class Client (line 7) | class Client:
method __init__ (line 9) | def __init__(
method post (line 23) | def post(
method get (line 67) | def get(
method pooling (line 99) | def pooling(
FILE: packages/backend/app/utils/openapi_converter.py
class OpenAPIConverter (line 11) | class OpenAPIConverter:
method __init__ (line 12) | def __init__(self):
method convert_enum_to_options (line 15) | def convert_enum_to_options(self, enum, defaultValue):
method convert_properties_to_fields (line 23) | def convert_properties_to_fields(self, schema):
method convert_schema_to_node_config (line 75) | def convert_schema_to_node_config(self, schema):
FILE: packages/backend/app/utils/openapi_reader.py
class OpenAPIReader (line 6) | class OpenAPIReader:
method __init__ (line 9) | def __init__(self, file_path):
method get_api_key_name (line 14) | def get_api_key_name(self):
method get_servers (line 23) | def get_servers(self):
method get_all_paths (line 27) | def get_all_paths(self):
method get_all_paths_names (line 45) | def get_all_paths_names(self):
method get_params_for_path (line 48) | def get_params_for_path(self, path, method):
method get_path_accept (line 56) | def get_path_accept(self, path, method):
method get_response_content_type (line 64) | def get_response_content_type(self, path, method):
method get_request_schema_for_path (line 71) | def get_request_schema_for_path(self, path, method, content_type=None):
method merge_schemas (line 93) | def merge_schemas(base_schema, additions):
method resolve_schema (line 101) | def resolve_schema(self, schema):
method resolve_ref (line 155) | def resolve_ref(self, ref):
method get_response_schema_for_path (line 164) | def get_response_schema_for_path(self, path, method, content_type=None):
function resolve_references (line 185) | def resolve_references(schema, root):
FILE: packages/backend/app/utils/processor_utils.py
function create_empty_tmp_file (line 8) | def create_empty_tmp_file(prefix="tmp"):
function create_temp_file_with_str_content (line 15) | def create_temp_file_with_str_content(content):
function create_temp_file_with_bytes_content (line 22) | def create_temp_file_with_bytes_content(content):
function get_file_size_from_url (line 29) | def get_file_size_from_url(url):
function get_file_size_from_url_in_mb (line 40) | def get_file_size_from_url_in_mb(url):
function get_max_file_size_in_mb (line 47) | def get_max_file_size_in_mb():
function is_s3_file (line 51) | def is_s3_file(url):
function is_accepted_url_file_size (line 60) | def is_accepted_url_file_size(url):
function is_valid_url (line 67) | def is_valid_url(url):
function file_downloable_check (line 74) | def file_downloable_check(url):
function download_file_as_binary (line 85) | def download_file_as_binary(url):
function stream_download_file_as_binary (line 97) | def stream_download_file_as_binary(url):
FILE: packages/backend/app/utils/replicate_utils.py
function get_replicate_models (line 18) | def get_replicate_models(cursor: str = None):
function get_replicate_collections (line 48) | def get_replicate_collections():
function get_replicate_collection_models (line 67) | def get_replicate_collection_models(collection_slug: str, cursor=None):
function get_highlighted_models_info (line 99) | def get_highlighted_models_info():
function get_model_info (line 122) | def get_model_info(model_id: str):
function get_model_openapi_schema (line 146) | def get_model_openapi_schema(model_id: str):
function get_input_schema_from_open_API_schema (line 173) | def get_input_schema_from_open_API_schema(openapi_schema):
function get_output_schema_from_open_API_schema (line 178) | def get_output_schema_from_open_API_schema(openapi_schema):
FILE: packages/backend/app/utils/web_scrapping/async_browser_manager.py
class AsyncBrowserManager (line 13) | class AsyncBrowserManager:
method __init__ (line 14) | def __init__(self):
method initialize_browser (line 23) | async def initialize_browser(self):
method unzip_extension (line 28) | def unzip_extension(zip_path, extract_to):
method launch_context (line 32) | async def launch_context(self):
method initialize_pool (line 46) | async def initialize_pool(self):
method check_extensions_loaded (line 53) | async def check_extensions_loaded(self, take_extensions_screenshot=Fal...
method close_browser (line 82) | async def close_browser(self):
method _recycle_tab (line 88) | async def _recycle_tab(self, page, context):
method get_tab (line 98) | async def get_tab(self, timeout=10):
method release_tab (line 115) | async def release_tab(self, page, context):
FILE: packages/backend/app/utils/web_scrapping/browser_manager.py
class BrowserManager (line 10) | class BrowserManager:
method __init__ (line 11) | def __init__(self, pool_size=None, max_usage=None):
method initialize_browser (line 20) | def initialize_browser(self):
method initialize_pool (line 26) | def initialize_pool(self):
method close_browser (line 33) | def close_browser(self):
method get_browser (line 39) | def get_browser(self):
method _recycle_tab (line 44) | def _recycle_tab(self, page, context):
method get_tab (line 51) | def get_tab(self, timeout=10):
method release_tab (line 68) | def release_tab(self, page, context):
FILE: packages/backend/tests/unit/test_processor_factory.py
class DummyProcessor (line 8) | class DummyProcessor(BasicProcessor):
method process (line 11) | def process(self):
method cancel (line 14) | def cancel(self):
class APIDummyProcessor (line 18) | class APIDummyProcessor(ContextAwareProcessor):
method __init__ (line 21) | def __init__(self, config, context=None):
method process (line 25) | def process(self):
method cancel (line 28) | def cancel(self):
class TestProcessorFactory (line 32) | class TestProcessorFactory(unittest.TestCase):
method setUp (line 33) | def setUp(self):
method test_register_and_create_simple_processor (line 36) | def test_register_and_create_simple_processor(self):
method test_create_unknown_processor_raises_exception (line 44) | def test_create_unknown_processor_raises_exception(self):
method test_create_processor_with_api_context_data (line 50) | def test_create_processor_with_api_context_data(self):
FILE: packages/backend/tests/unit/test_processor_launcher.py
class NoInputMock (line 20) | class NoInputMock(MagicMock):
method __getattr__ (line 21) | def __getattr__(self, name):
class TestProcessorLauncher (line 29) | class TestProcessorLauncher(unittest.TestCase):
method test_load_config_data_valid_file (line 30) | def test_load_config_data_valid_file(self):
method test_link_processors_valid (line 39) | def test_link_processors_valid(self):
FILE: packages/backend/tests/unit/test_stable_diffusion_stabilityai_prompt_processor.py
class TestStableDiffusionStabilityAIPromptProcessor (line 12) | class TestStableDiffusionStabilityAIPromptProcessor(unittest.TestCase):
method get_default_valid_config (line 14) | def get_default_valid_config():
method test_process_returns_valid_image_url_on_successful_api_response (line 25) | def test_process_returns_valid_image_url_on_successful_api_response(
method test_process_transmit_prompt_to_api (line 61) | def test_process_transmit_prompt_to_api(self, mock_post):
method test_when_linked_to_input_node_transmits_input_node_output_to_the_api (line 91) | def test_when_linked_to_input_node_transmits_input_node_output_to_the_...
FILE: packages/backend/tests/utils/openai_mock_utils.py
function create_mocked_openai_response (line 4) | def create_mocked_openai_response(
FILE: packages/backend/tests/utils/processor_context_mock.py
class ProcessorContextMock (line 6) | class ProcessorContextMock(ProcessorContext):
method __init__ (line 7) | def __init__(self, api_key, user_id=0, session_id=0) -> None:
method get_context (line 13) | def get_context(self):
method get_current_user_id (line 16) | def get_current_user_id(self):
method get_session_id (line 19) | def get_session_id(self):
method get_parameter_names (line 22) | def get_parameter_names(self) -> List[str]:
method get_value (line 25) | def get_value(self, name):
method is_using_personal_keys (line 30) | def is_using_personal_keys(self, source_name):
FILE: packages/backend/tests/utils/processor_factory_mock.py
class ProcessorFactoryMock (line 19) | class ProcessorFactoryMock(ProcessorFactoryIterModules):
method __init__ (line 31) | def __init__(
method create_mock_processor (line 46) | def create_mock_processor(
method create_processor (line 131) | def create_processor(self, config, context=None, storage_strategy=None):
FILE: packages/ui/src/App.tsx
type AppProps (line 19) | interface AppProps {
function loadAppData (line 75) | async function loadAppData() {
FILE: packages/ui/src/api/cache/cacheManager.ts
type CacheItem (line 3) | interface CacheItem<T> {
constant DEFAULT_TTL (line 9) | const DEFAULT_TTL = 3600 * 1000;
constant DEFAULT_NB_ELEMENTS_TO_REMOVE (line 10) | const DEFAULT_NB_ELEMENTS_TO_REMOVE = 5;
constant DISPENSABLE_CACHE_PREFIX (line 11) | const DISPENSABLE_CACHE_PREFIX = "dispensable_cache";
function generateCacheKey (line 13) | function generateCacheKey(functionName: string, ...args: any[]): string {
function setCache (line 18) | function setCache(key: string, data: any, ttl?: number) {
function getCache (line 37) | function getCache<T>(key: string): T | undefined {
function clearOldCacheItems (line 56) | function clearOldCacheItems() {
FILE: packages/ui/src/api/cache/withCache.ts
type AsyncFunction (line 3) | type AsyncFunction<T extends any[], N> = (...args: T) => Promise<N>;
type Params (line 5) | type Params<T> = T extends (...args: infer U) => any ? U : never;
type CacheOptions (line 7) | interface CacheOptions {
function withCache (line 23) | async function withCache<T extends any[], N>(
FILE: packages/ui/src/api/nodes.ts
function getNodeExtensions (line 3) | async function getNodeExtensions() {
function getDynamicConfig (line 14) | async function getDynamicConfig(processorType: string, data: any) {
function getModels (line 29) | async function getModels(providerName: string) {
function getModelConfig (line 40) | async function getModelConfig(providerName: string, id: string) {
FILE: packages/ui/src/api/parameters.ts
function getParameters (line 3) | async function getParameters() {
FILE: packages/ui/src/api/replicateModels.ts
type GetCollectionModelsResponse (line 4) | interface GetCollectionModelsResponse {
function getCollections (line 9) | async function getCollections() {
function getPublicModels (line 20) | async function getPublicModels(cursor?: string) {
function getHighlightedModels (line 42) | async function getHighlightedModels() {
function getCollectionModels (line 53) | async function getCollectionModels(
function getModelConfig (line 77) | async function getModelConfig(model: string, processorType: string) {
FILE: packages/ui/src/api/uploadFile.ts
function getUploadAndDownloadUrl (line 4) | async function getUploadAndDownloadUrl(filename?: string) {
function uploadWithS3Link (line 15) | async function uploadWithS3Link(s3UploadData: any, file: File) {
FILE: packages/ui/src/components/Flow.tsx
type FlowProps (line 44) | interface FlowProps {
function getAllEdgeTypes (line 59) | function getAllEdgeTypes() {
function onProgress (line 139) | function onProgress(data: FlowOnProgressEventData) {
function onError (line 167) | function onError(data: FlowOnErrorEventData) {
function onCurrentNodeRunning (line 180) | function onCurrentNodeRunning(data: FlowOnCurrentNodeRunningEventData) {
function handleChangeFlow (line 291) | function handleChangeFlow(nodes: Node[], edges: Edge[]): void {
FILE: packages/ui/src/components/bars/Sidebar.tsx
type SidebarProps (line 15) | interface SidebarProps {
FILE: packages/ui/src/components/bars/dnd-sidebar/DnDSidebar.tsx
constant HIDE_SIDEBAR_ANIMATION_DURATION (line 23) | const HIDE_SIDEBAR_ANIMATION_DURATION = 300;
type DnDSidebarProps (line 25) | interface DnDSidebarProps {
function nodeMatchesSearch (line 72) | function nodeMatchesSearch(node: DnDNode, query: string): boolean {
function subnodeMatchesSearch (line 78) | function subnodeMatchesSearch(subnode: SubnodeData, query: string): bool...
function filterSubnodes (line 84) | function filterSubnodes(
function filterNode (line 105) | function filterNode(node: DnDNode, searchQuery: string): DnDNode | null {
function renderNodeWithSubnode (line 127) | function renderNodeWithSubnode(nodeIndex: number, node: DnDNode) {
FILE: packages/ui/src/components/bars/dnd-sidebar/DraggableNode.tsx
type DraggableNodeProps (line 13) | interface DraggableNodeProps extends DraggableNodeAdditionnalData {
type NodeBadgeProps (line 18) | interface NodeBadgeProps {
function showDragAndDropHelper (line 49) | function showDragAndDropHelper() {
FILE: packages/ui/src/components/bars/dnd-sidebar/DraggableNodeWithSubnodes.tsx
type DraggableNodeWithSubnodesProps (line 9) | interface DraggableNodeWithSubnodesProps {
FILE: packages/ui/src/components/bars/dnd-sidebar/GripIcon.tsx
function GripIcon (line 3) | function GripIcon(props: ComponentProps<"svg">) {
FILE: packages/ui/src/components/bars/dnd-sidebar/Section.tsx
type SidebarSectionProps (line 11) | interface SidebarSectionProps {
function SidebarSection (line 17) | function SidebarSection({ section, index, children }: SidebarSectionProp...
FILE: packages/ui/src/components/bars/dnd-sidebar/types.ts
type DraggableNodeAdditionnalData (line 1) | interface DraggableNodeAdditionnalData {
FILE: packages/ui/src/components/buttons/ButtonRunAll.tsx
type ButtonRunAllProps (line 7) | interface ButtonRunAllProps {
FILE: packages/ui/src/components/buttons/ConfigurationButton.tsx
type RightButtonProps (line 5) | interface RightButtonProps {
FILE: packages/ui/src/components/edges/buttonEdge.tsx
function ButtonEdge (line 11) | function ButtonEdge({
FILE: packages/ui/src/components/handles/HandleWrapper.tsx
type LinkedHandlePositions (line 8) | type LinkedHandlePositions = {
type HandleWrapperProps (line 12) | type HandleWrapperProps = {
type PopupProps (line 107) | type PopupProps = {
FILE: packages/ui/src/components/inputs/InputWithButton.tsx
type InputWithButtonProps (line 3) | interface InputWithButtonProps {
FILE: packages/ui/src/components/nodes/AIDataSplitterNode.tsx
type AIDataSplitterNodeData (line 17) | interface AIDataSplitterNodeData extends GenericNodeData {
type AIDataSplitterNodeProps (line 28) | interface AIDataSplitterNodeProps extends NodeProps {
FILE: packages/ui/src/components/nodes/DisplayNode.tsx
type DisplayNodeData (line 21) | interface DisplayNodeData extends GenericNodeData {
type DisplayNodeProps (line 33) | interface DisplayNodeProps extends NodeProps {
type Dimensions (line 37) | interface Dimensions {
function ResizeIcon (line 42) | function ResizeIcon() {
FILE: packages/ui/src/components/nodes/FileUploadNode.tsx
type GenericNodeProps (line 32) | interface GenericNodeProps extends NodeProps {
type FileChoice (line 38) | type FileChoice = "url" | "upload";
function uploadFile (line 94) | async function uploadFile(files: File[]) {
function processFiles (line 102) | async function processFiles(files: File[]) {
function handleFileChoiceSelected (line 183) | function handleFileChoiceSelected(choice: FileChoice | null) {
FILE: packages/ui/src/components/nodes/GenericNode.tsx
type GenericNodeProps (line 43) | interface GenericNodeProps extends NodeProps {
function handleNodeDataChange (line 202) | function handleNodeDataChange(data: GenericNodeData) {
function handleNodeFieldChange (line 210) | function handleNodeFieldChange(
function updateConfigWithDiscriminator (line 241) | function updateConfigWithDiscriminator(nodeData: NodeData) {
function setDefaultOptions (line 251) | function setDefaultOptions() {
function handleChangeHandlePosition (line 260) | function handleChangeHandlePosition(
function updateConfig (line 274) | function updateConfig(config: NodeConfig) {
function updateConfigVariant (line 289) | function updateConfigVariant(variantConf: NodeSubConfig) {
function handleGetDynamicConfig (line 312) | async function handleGetDynamicConfig() {
function propsAreEqual (line 417) | function propsAreEqual(
FILE: packages/ui/src/components/nodes/NodeHelpPopover.tsx
type NodeHelpPopoverProps (line 5) | type NodeHelpPopoverProps = {
function NodeHelpPopover (line 12) | function NodeHelpPopover({
FILE: packages/ui/src/components/nodes/NodeWrapper.tsx
type NodeWrapperProps (line 12) | type NodeWrapperProps = {
type NodeActions (line 17) | type NodeActions =
function NodeWrapper (line 27) | function NodeWrapper({ children, nodeId }: NodeWrapperProps) {
FILE: packages/ui/src/components/nodes/ReplicateNode.tsx
type ReplicateNodeData (line 20) | interface ReplicateNodeData extends NodeData {
type DynamicFieldsProps (line 24) | interface DynamicFieldsProps extends NodeProps {
function ReplicateNode (line 28) | function ReplicateNode({
FILE: packages/ui/src/components/nodes/TransitionNode.tsx
type TransitionNodeData (line 11) | interface TransitionNodeData extends GenericNodeData {
type TransitionNodeProps (line 23) | interface TransitionNodeProps extends NodeProps {
FILE: packages/ui/src/components/nodes/node-button/InputNameBar.tsx
type InputNameBarProps (line 6) | interface InputNameBarProps {
function InputNameBar (line 15) | function InputNameBar({
FILE: packages/ui/src/components/nodes/node-button/NodePlayButton.tsx
type NodePlayButtonProps (line 8) | interface NodePlayButtonProps {
function getIconComponent (line 68) | function getIconComponent(
FILE: packages/ui/src/components/nodes/node-input/FileUploadField.tsx
type UploadInfo (line 16) | interface UploadInfo {
type FileUploadFieldProps (line 21) | interface FileUploadFieldProps {
FILE: packages/ui/src/components/nodes/node-input/ImageMaskCreator.tsx
type ImageMaskCreatorProps (line 8) | interface ImageMaskCreatorProps {
FILE: packages/ui/src/components/nodes/node-input/ImageMaskCreatorField.tsx
type ImageMaskCreatorFieldProps (line 14) | interface ImageMaskCreatorFieldProps {
function ImageMaskCreatorField (line 20) | function ImageMaskCreatorField({
FILE: packages/ui/src/components/nodes/node-input/ImageMaskCreatorFieldFlowAware.tsx
type ImageMaskCreatorFieldProps (line 4) | interface ImageMaskCreatorFieldProps {
function ImageMaskCreatorFieldFlowAware (line 19) | function ImageMaskCreatorFieldFlowAware({
FILE: packages/ui/src/components/nodes/node-input/KeyValueInputList.tsx
type KeyValuePair (line 7) | interface KeyValuePair {
type KeyValueInputListProps (line 12) | interface KeyValueInputListProps {
FILE: packages/ui/src/components/nodes/node-input/NodeField.tsx
type NodeFieldProps (line 9) | interface NodeFieldProps<T> {
function NodeField (line 20) | function NodeField<
FILE: packages/ui/src/components/nodes/node-input/NodeTextField.tsx
type NodeTextFieldProps (line 5) | interface NodeTextFieldProps {
function NodeTextField (line 17) | function NodeTextField({
FILE: packages/ui/src/components/nodes/node-input/NodeTextarea.tsx
type NodeTextareaProps (line 10) | interface NodeTextareaProps {
function NodeTextarea (line 21) | function NodeTextarea({
FILE: packages/ui/src/components/nodes/node-input/OutputRenderer.tsx
type OutputRendererProps (line 14) | interface OutputRendererProps {
function OutputRenderer (line 21) | function OutputRenderer({
FILE: packages/ui/src/components/nodes/node-input/TextAreaPopupWrapper.tsx
type TextAreaPopupWrapperProps (line 6) | interface TextAreaPopupWrapperProps {
function TextAreaPopupWrapper (line 13) | function TextAreaPopupWrapper({
FILE: packages/ui/src/components/nodes/node-output/AudioUrlOutput.tsx
type AudioUrlOutputProps (line 8) | interface AudioUrlOutputProps {
FILE: packages/ui/src/components/nodes/node-output/ImageBase64Output.tsx
type ImageBase64OutputProps (line 5) | interface ImageBase64OutputProps {
function arePropsEqual (line 77) | function arePropsEqual(
FILE: packages/ui/src/components/nodes/node-output/ImageUrlOutput.tsx
type ImageUrlOutputProps (line 8) | interface ImageUrlOutputProps {
FILE: packages/ui/src/components/nodes/node-output/MarkdownOutput.tsx
type MarkdownOutputProps (line 16) | interface MarkdownOutputProps {
method code (line 75) | code(props: any) {
FILE: packages/ui/src/components/nodes/node-output/NodeOutput.tsx
type NodeOutputProps (line 12) | interface NodeOutputProps {
function NodeOutput (line 18) | function NodeOutput({
FILE: packages/ui/src/components/nodes/node-output/OutputDisplay.tsx
type OutputDisplayProps (line 15) | interface OutputDisplayProps {
function OutputDisplay (line 23) | function OutputDisplay({
FILE: packages/ui/src/components/nodes/node-output/PdfUrlOutput.tsx
type PdfUrlOutputProps (line 6) | interface PdfUrlOutputProps {
FILE: packages/ui/src/components/nodes/node-output/ThreeDimensionalUrlOutput.tsx
type ThreeDimensionalUrlOutputProps (line 18) | interface ThreeDimensionalUrlOutputProps {
FILE: packages/ui/src/components/nodes/node-output/VideoUrlOutput.tsx
type VideoUrlOutputProps (line 7) | interface VideoUrlOutputProps {
FILE: packages/ui/src/components/nodes/node-output/outputUtils.ts
function getOutputExtension (line 34) | function getOutputExtension(output: string): OutputType {
function getFileTypeFromUrl (line 49) | function getFileTypeFromUrl(url: string) {
FILE: packages/ui/src/components/nodes/types/node.ts
type NodeInput (line 3) | interface NodeInput {
type NodeAppearance (line 9) | interface NodeAppearance {
type NodeData (line 15) | interface NodeData {
type GenericNodeData (line 31) | interface GenericNodeData extends NodeData {
FILE: packages/ui/src/components/nodes/utils/HintComponent.tsx
type HintComponentProps (line 4) | interface HintComponentProps {
FILE: packages/ui/src/components/nodes/utils/ImageModal.tsx
type ImageModalProps (line 4) | interface ImageModalProps {
function ImageModal (line 9) | function ImageModal({ src, alt, onClose }: ImageModalProps) {
FILE: packages/ui/src/components/nodes/utils/ImageZoomable.tsx
type ImageZoomableProps (line 5) | interface ImageZoomableProps {
function ImageZoomable (line 10) | function ImageZoomable({ src, alt }: ImageZoomableProps) {
FILE: packages/ui/src/components/nodes/utils/NodeHelp.tsx
type UrlWithLabel (line 7) | type UrlWithLabel = {
type NodeHelpData (line 11) | type NodeHelpData = {
type NodeHelpProps (line 17) | interface NodeHelpProps {
function NodeHelp (line 22) | function NodeHelp({ data, onClose }: NodeHelpProps) {
FILE: packages/ui/src/components/nodes/utils/NodeIcons.tsx
constant ICON_MAP (line 34) | const ICON_MAP: { [key: string]: FC } = {
FILE: packages/ui/src/components/nodes/utils/TextareaModal.tsx
type TextareaModalProps (line 7) | interface TextareaModalProps {
function TextareaModal (line 13) | function TextareaModal({
FILE: packages/ui/src/components/players/VideoJS.tsx
type VideoJSProps (line 9) | interface VideoJSProps {
FILE: packages/ui/src/components/popups/ConfirmPopup.tsx
type ConfirmPopupProps (line 4) | interface ConfirmPopupProps {
FILE: packages/ui/src/components/popups/DefaultPopup.tsx
type DefaultPopupWrapperProps (line 5) | interface DefaultPopupWrapperProps {
function DefaultPopupWrapper (line 14) | function DefaultPopupWrapper({
FILE: packages/ui/src/components/popups/HelpPopup.tsx
type HelpPopupProps (line 7) | interface HelpPopupProps {
type HelpArticle (line 12) | interface HelpArticle {
function selectArticle (line 29) | function selectArticle(item: HelpArticle) {
function resetSelectedArticle (line 33) | function resetSelectedArticle() {
FILE: packages/ui/src/components/popups/UserMessagePopup.tsx
type MessageType (line 4) | enum MessageType {
type UserMessage (line 10) | interface UserMessage {
type PopupProps (line 16) | interface PopupProps {
function UserMessagePopup (line 23) | function UserMessagePopup(props: PopupProps) {
FILE: packages/ui/src/components/popups/config-popup/AppParameters.tsx
function AppParameters (line 8) | function AppParameters() {
FILE: packages/ui/src/components/popups/config-popup/ConfigPopup.tsx
type ConfigPopupProps (line 10) | interface ConfigPopupProps {
FILE: packages/ui/src/components/popups/config-popup/DisplayParameters.tsx
function DisplayParameters (line 14) | function DisplayParameters() {
FILE: packages/ui/src/components/popups/config-popup/ParametersFields.tsx
type ParameterFieldsProps (line 5) | interface ParameterFieldsProps {
FILE: packages/ui/src/components/popups/config-popup/UserParameters.tsx
function UserParameters (line 16) | function UserParameters() {
FILE: packages/ui/src/components/popups/config-popup/configMetadata.ts
type FieldMetadata (line 2) | interface FieldMetadata {
type ConfigMetadata (line 9) | interface ConfigMetadata {
type AppConfig (line 13) | interface AppConfig {
FILE: packages/ui/src/components/popups/config-popup/parameters.ts
type ParameterDetail (line 5) | interface ParameterDetail {
type Parameters (line 12) | type Parameters = {
constant PARAMETERS_KEY_LOCAL_STORAGE (line 37) | const PARAMETERS_KEY_LOCAL_STORAGE = "parameters";
constant PARAMETER_NODES_HIDDEN_LIST_KEY_LOCAL_STORAGE (line 38) | const PARAMETER_NODES_HIDDEN_LIST_KEY_LOCAL_STORAGE = "nodes_hidden";
function updateParameters (line 40) | async function updateParameters(parameters: Parameters) {
function loadFromLocalStorage (line 48) | function loadFromLocalStorage() {
function loadParameters (line 65) | async function loadParameters() {
function getConfigParameters (line 76) | function getConfigParameters(): Parameters {
function getConfigParametersFlat (line 80) | function getConfigParametersFlat() {
function migrateOldParameters (line 99) | function migrateOldParameters() {
function loadNodesHiddenList (line 133) | function loadNodesHiddenList(): string[] {
function getNodesHiddenList (line 144) | function getNodesHiddenList(): string[] {
function saveNodesHiddenList (line 148) | function saveNodesHiddenList(nodesHiddenList: string[]) {
FILE: packages/ui/src/components/popups/select-model-popup/Model.tsx
type ModelProps (line 3) | interface ModelProps {
function Model (line 8) | function Model({ model, onValidate }: ModelProps) {
FILE: packages/ui/src/components/popups/select-model-popup/SelectModelPopup.tsx
type SelectModelPopupProps (line 20) | interface SelectModelPopupProps {
function SelectModelPopup (line 26) | function SelectModelPopup({
type ModelSectionProps (line 207) | interface ModelSectionProps {
type ModelData (line 213) | interface ModelData {
FILE: packages/ui/src/components/popups/shared/FilterGrid.tsx
type FilterItem (line 1) | type FilterItem = {
type FilterGridProps (line 6) | type FilterGridProps = {
function FilterGrid (line 12) | function FilterGrid({
FILE: packages/ui/src/components/popups/shared/Grid.tsx
type GridProps (line 3) | interface GridProps<T> {
function Grid (line 28) | function Grid<T>({
FILE: packages/ui/src/components/popups/shared/LoadMoreButton.tsx
type LoadMoreButtonProps (line 4) | interface LoadMoreButtonProps {
function LoadMoreButton (line 10) | function LoadMoreButton({
FILE: packages/ui/src/components/selectors/ActionGroup.tsx
type ActionGroupProps (line 3) | interface ActionGroupProps<T> {
type Action (line 8) | interface Action<T> {
function ActionGroup (line 17) | function ActionGroup<T>({
FILE: packages/ui/src/components/selectors/ColorSelector.tsx
type ColorSelectorProps (line 12) | interface ColorSelectorProps {
function ColorSelector (line 16) | function ColorSelector({ onChangeColor }: ColorSelectorProps) {
FILE: packages/ui/src/components/selectors/ExpandableBloc.tsx
type ExpandableBlocProps (line 4) | interface ExpandableBlocProps {
function ExpandableBloc (line 10) | function ExpandableBloc({
FILE: packages/ui/src/components/selectors/FileDropZone.tsx
type FileDropZoneProps (line 4) | interface FileDropZoneProps {
constant DEFAULT_MAX_SIZE (line 14) | const DEFAULT_MAX_SIZE = 314572800;
function FileDropZone (line 16) | function FileDropZone({
FILE: packages/ui/src/components/selectors/OptionSelector.tsx
type OptionSelectorProps (line 3) | interface OptionSelectorProps<T> {
type Option (line 10) | interface Option<T> {
function OptionSelector (line 16) | function OptionSelector<T>({
FILE: packages/ui/src/components/selectors/SelectAutocomplete.tsx
type SelectItem (line 6) | interface SelectItem<T> {
type SelectAutocompleteProps (line 11) | interface SelectAutocompleteProps<T> {
function SelectAutocomplete (line 17) | function SelectAutocomplete<T>({
FILE: packages/ui/src/components/shared/motions/EaseOut.tsx
function EaseOut (line 4) | function EaseOut({ children }: AnimationProps) {
FILE: packages/ui/src/components/shared/motions/TapScale.tsx
type TapScaleProps (line 5) | interface TapScaleProps extends AnimationProps {
function TapScale (line 9) | function TapScale({ children, scale }: TapScaleProps) {
FILE: packages/ui/src/components/shared/motions/types.ts
type AnimationProps (line 4) | interface AnimationProps {
FILE: packages/ui/src/components/side-views/CurrentNodeView.tsx
type CurrentNodeViewProps (line 11) | interface CurrentNodeViewProps {}
function setDefaultOptions (line 28) | function setDefaultOptions() {
FILE: packages/ui/src/components/side-views/JSONView.tsx
type JSONViewProps (line 26) | interface JSONViewProps {
FILE: packages/ui/src/components/tour/AppTour.tsx
type AppTourProps (line 7) | interface AppTourProps {
function preloadImages (line 25) | function preloadImages(urls: string[]) {
function AppTour (line 32) | function AppTour({ run, setRun }: AppTourProps) {
FILE: packages/ui/src/config/config.ts
constant HOST (line 1) | const HOST = import.meta.env.VITE_APP_WS_HOST || "localhost";
constant WS_PORT (line 2) | const WS_PORT = import.meta.env.VITE_APP_WS_PORT || 5000;
constant REST_API_PORT (line 3) | const REST_API_PORT = import.meta.env.VITE_APP_API_REST_PORT || 5000;
constant USE_HTTPS (line 4) | const USE_HTTPS = import.meta.env.VITE_APP_USE_HTTPS || "false";
constant USE_CACHE (line 5) | const USE_CACHE = import.meta.env.VITE_APP_USE_CACHE?.toLowerCase() || "...
constant CURRENT_APP_VERSION (line 6) | const CURRENT_APP_VERSION = import.meta.env.VITE_APP_VERSION;
constant DEFAULT_NODES_HIDDEN_LIST (line 7) | const DEFAULT_NODES_HIDDEN_LIST =
constant LOW_PRIORITY_NODE_PREFIXES_RAW (line 10) | const LOW_PRIORITY_NODE_PREFIXES_RAW =
constant HIGH_PRIORITY_NODE_PREFIXES_RAW (line 12) | const HIGH_PRIORITY_NODE_PREFIXES_RAW =
constant IS_DEV (line 15) | const IS_DEV = import.meta.env.VITE_APP_IS_DEV?.toLowerCase() === "true";
FILE: packages/ui/src/hooks/useFlowSocketListeners.tsx
function defaultOnDisconnect (line 46) | function defaultOnDisconnect(reason: string) {
FILE: packages/ui/src/hooks/useFormFields.tsx
type DisplayParams (line 19) | interface DisplayParams {
function useFormFields (line 26) | function useFormFields(
FILE: packages/ui/src/hooks/useHandleShowOutput.tsx
type UseHandleShowOutputProps (line 3) | interface UseHandleShowOutputProps {
FILE: packages/ui/src/hooks/useLoading.tsx
type AsyncFunction (line 3) | type AsyncFunction<T extends any[], N> = (...args: T) => Promise<N>;
type Params (line 5) | type Params<T> = T extends (...args: infer U) => any ? U : never;
type StartLoadingWith (line 7) | type StartLoadingWith = <T extends any[], N>(
FILE: packages/ui/src/hooks/useLocalStorage.tsx
function useLocalStorage (line 11) | function useLocalStorage<T>(
FILE: packages/ui/src/layout/main-layout/AppLayout.tsx
type FlowTab (line 32) | interface FlowTab {
type FlowMetadata (line 38) | interface FlowMetadata {
type FlowManagerState (line 48) | interface FlowManagerState {
type FlowTabsProps (line 52) | interface FlowTabsProps {
type ApplicationMode (line 56) | type ApplicationMode = "flow";
type ApplicationMenu (line 57) | type ApplicationMenu = "template" | "config" | "help";
FILE: packages/ui/src/layout/main-layout/header/Tab.tsx
type TabProps (line 9) | interface TabProps {
type TabActions (line 18) | type TabActions = "remove" | "name";
function calculatePosition (line 154) | function calculatePosition(element: HTMLDivElement) {
FILE: packages/ui/src/layout/main-layout/header/TabHeader.tsx
type TabHeaderProps (line 7) | interface TabHeaderProps {
FILE: packages/ui/src/layout/main-layout/wrapper/FlowErrorBoundary.tsx
type ErrorBoundaryProps (line 3) | interface ErrorBoundaryProps {
type ErrorBoundaryState (line 7) | interface ErrorBoundaryState {
function ErrorBoundary (line 11) | function ErrorBoundary({ children }: ErrorBoundaryProps) {
FILE: packages/ui/src/layout/main-layout/wrapper/FlowWrapper.tsx
type FlowWrapperProps (line 11) | interface FlowWrapperProps {
type MenuStateType (line 18) | type MenuStateType = {
function FlowWrapper (line 22) | function FlowWrapper({
FILE: packages/ui/src/nodes-configuration/sectionConfig.ts
type NodeSection (line 18) | type NodeSection = {
type DnDNode (line 25) | type DnDNode = {
function transformNodeConfigsToDndNode (line 38) | function transformNodeConfigsToDndNode(configs: {
function getNonGenericNodeConfig (line 52) | function getNonGenericNodeConfig() {
function getAllDndNode (line 333) | function getAllDndNode(): DnDNode[] {
function sortSection (line 383) | function sortSection(
FILE: packages/ui/src/nodes-configuration/types.ts
type SectionType (line 1) | type SectionType = "models" | "image-generation" | "tools" | "input";
type FieldType (line 2) | type FieldType =
type OutputType (line 22) | type OutputType =
type Option (line 33) | interface Option {
type Operator (line 39) | type Operator =
type Condition (line 49) | interface Condition {
type ConditionGroup (line 55) | interface ConditionGroup {
type FieldCondition (line 60) | type FieldCondition = Condition | ConditionGroup;
type Field (line 62) | interface Field {
type SubnodeData (line 87) | interface SubnodeData {
type SubnodeShortcutStyle (line 96) | interface SubnodeShortcutStyle {
type NodeConfig (line 100) | interface NodeConfig {
type DiscriminatedNodeConfig (line 117) | interface DiscriminatedNodeConfig {
type NodeSubConfig (line 122) | interface NodeSubConfig {
type NodeConfigVariant (line 127) | type NodeConfigVariant = NodeSubConfig &
FILE: packages/ui/src/providers/FlowDataProvider.tsx
type FlowDataContextType (line 5) | interface FlowDataContextType {
type FlowDataProviderProps (line 14) | interface FlowDataProviderProps {
function getCurrentTab (line 25) | function getCurrentTab() {
function updateCurrentTabMetadata (line 29) | function updateCurrentTabMetadata(metadata: FlowMetadata) {
FILE: packages/ui/src/providers/NodeProvider.tsx
type NodeDimensions (line 17) | type NodeDimensions = {
type NodeContextType (line 22) | interface NodeContextType {
constant DUPLICATED_NODE_OFFSET (line 57) | const DUPLICATED_NODE_OFFSET = 100;
function clearAllOutput (line 310) | function clearAllOutput() {
FILE: packages/ui/src/providers/SocketProvider.tsx
type FlowEventData (line 21) | interface FlowEventData {
type FlowEvent (line 27) | interface FlowEvent {
type WSConfiguration (line 32) | type WSConfiguration = {
type ISocketContext (line 36) | interface ISocketContext {
type SocketProviderProps (line 45) | interface SocketProviderProps {
function updateSocket (line 80) | function updateSocket(config?: WSConfiguration): void {
function getActiveSocket (line 89) | function getActiveSocket(): FlowSocket | null {
function createNewSocket (line 96) | function createNewSocket(configuration?: WSConfiguration) {
function emitEvent (line 116) | function emitEvent(event: FlowEvent): boolean {
function connect (line 131) | function connect() {
function disconnect (line 135) | function disconnect() {
FILE: packages/ui/src/providers/ThemeProvider.tsx
type ThemeContextType (line 13) | interface ThemeContextType {
type ThemeProviderProps (line 24) | interface ThemeProviderProps {
FILE: packages/ui/src/providers/VisibilityProvider.tsx
type VisibilityElement (line 3) | type VisibilityElement =
type SidepaneTab (line 9) | type SidepaneTab = "json" | "topological" | "current_node";
type ConfigTab (line 10) | type ConfigTab = "user" | "display" | "app";
type VisibilityContextState (line 12) | interface VisibilityContextState {
type VisibilityContextType (line 22) | interface VisibilityContextType {
type VisibilityProviderProps (line 34) | interface VisibilityProviderProps {
constant VISBILITY_PROVIDER_PREFIX (line 38) | const VISBILITY_PROVIDER_PREFIX = "vp-";
function getIsVisibleFromLocalStorage (line 40) | function getIsVisibleFromLocalStorage(key: string): boolean | null {
FILE: packages/ui/src/services/tabStorage.ts
constant LOCAL_STORAGE_TAB_KEY (line 3) | const LOCAL_STORAGE_TAB_KEY = "flowTabs";
constant LOCAL_STORAGE_CURRENT_TAB_KEY (line 4) | const LOCAL_STORAGE_CURRENT_TAB_KEY = "currentTab";
function getCurrentTabIndex (line 6) | function getCurrentTabIndex() {
function saveCurrentTabIndex (line 12) | function saveCurrentTabIndex(index: number) {
function getLocalTabs (line 16) | function getLocalTabs() {
function saveTabsLocally (line 22) | function saveTabsLocally(tabs: FlowTab[]) {
function getAllTabs (line 30) | async function getAllTabs() {
FILE: packages/ui/src/sockets/flowEventTypes.ts
type FlowOnProgressEventData (line 1) | interface FlowOnProgressEventData<T = any> {
type FlowOnErrorEventData (line 7) | interface FlowOnErrorEventData {
type FlowOnCurrentNodeRunningEventData (line 13) | interface FlowOnCurrentNodeRunningEventData {
FILE: packages/ui/src/sockets/flowSocket.ts
type FlowEventIn (line 3) | type FlowEventIn =
type FlowEventOut (line 12) | type FlowEventOut = "run_node" | "process_file" | "update_app_config";
class FlowSocket (line 14) | class FlowSocket {
method constructor (line 17) | constructor(socket: Socket) {
method on (line 21) | public on(event: FlowEventIn, handler: (...args: any[]) => void): void {
method off (line 25) | public off(event: FlowEventIn, handler: (...args: any[]) => void): void {
method emit (line 29) | public emit(event: FlowEventOut, ...args: any[]): void {
method connect (line 33) | public connect(): void {
method disconnect (line 39) | public disconnect(): void {
method close (line 45) | public close(): void {
FILE: packages/ui/src/utils/evaluateConditions.ts
function evaluateSingleCondition (line 14) | function evaluateSingleCondition(
function evaluateCondition (line 53) | function evaluateCondition(
FILE: packages/ui/src/utils/flowChecker.ts
function createErrorMessageForMissingFields (line 50) | function createErrorMessageForMissingFields(
function getNodeInError (line 70) | function getNodeInError(
FILE: packages/ui/src/utils/flowUtils.ts
type BasicNode (line 8) | type BasicNode = Pick<
type BasicEdge (line 13) | type BasicEdge = Pick<
constant CONFIG (line 18) | const CONFIG = {
function getConfig (line 28) | function getConfig() {
function isCompatibleConfigVersion (line 32) | function isCompatibleConfigVersion(fileVersion: string | undefined) {
function nodesTopologicalSort (line 52) | function nodesTopologicalSort(
function findParents (line 78) | function findParents(node: BasicNode, edges: BasicEdge[]) {
function formatFlow (line 84) | function formatFlow(nodes: BasicNode[], edges: BasicEdge[]) {
function clearSelectedNodes (line 117) | function clearSelectedNodes(nodes: Node[]) {
function getConfigEssentials (line 125) | function getConfigEssentials(config: any) {
function convertFlowToJson (line 144) | function convertFlowToJson(
function getInputNamesWithValidCondition (line 173) | function getInputNamesWithValidCondition(node: BasicNode) {
function convertEdgeToNodeInput (line 188) | function convertEdgeToNodeInput(
function getFieldsWithValidCondition (line 225) | function getFieldsWithValidCondition(
function convertJsonToFlow (line 242) | function convertJsonToFlow(json: any): {
function migrateConfig (line 324) | function migrateConfig(oldConfig: FlowTab) {
FILE: packages/ui/src/utils/nodeConfigurationUtils.ts
function getAdequateConfigFromDiscriminators (line 4) | function getAdequateConfigFromDiscriminators(nodeData: NodeData) {
function getDefaultOptions (line 31) | function getDefaultOptions(fields: Field[], data: NodeData) {
function getNbInputs (line 56) | function getNbInputs(data: NodeData, fields?: Field[]) {
function getNbOutputs (line 66) | function getNbOutputs(data: NodeData) {
FILE: packages/ui/src/utils/openAPIUtils.ts
type OpenApiSchema (line 4) | interface OpenApiSchema {
type Config (line 12) | interface Config {
function getSchemaFromConfig (line 17) | function getSchemaFromConfig(config: Config, schemaName: string) {
function resolveReference (line 34) | function resolveReference(ref: string, globalSchema?: OpenApiSchema): any {
function convertOpenAPISchemaToNodeConfig (line 53) | function convertOpenAPISchemaToNodeConfig(schema: any, config?: Config) {
FILE: packages/ui/src/utils/toastUtils.tsx
function toastInfoMessage (line 4) | function toastInfoMessage(message: string, id?: string) {
function toastErrorMessage (line 18) | function toastErrorMessage(message: string) {
function toastFastSuccessMessage (line 34) | function toastFastSuccessMessage(message: string) {
function toastFastInfoMessage (line 47) | function toastFastInfoMessage(message: string) {
function toastCustomIconInfoMessage (line 60) | function toastCustomIconInfoMessage(message: string, icon: IconType) {
FILE: packages/ui/test/unit/flowChecker.test.ts
function createNode (line 103) | function createNode({
FILE: packages/ui/test/unit/flowUtils.test.ts
function createNode (line 14) | function createNode(
function createEdge (line 27) | function createEdge(
FILE: packages/ui/test/utils.ts
function waitForAppInitialRender (line 8) | async function waitForAppInitialRender(page: Page) {
Condensed preview — 312 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (1,761K chars).
[
{
"path": ".github/FUNDING.yml",
"chars": 735,
"preview": "# These are supported funding model platforms\n\ngithub: [DahnM20]\npatreon: # Replace with a single Patreon username\nopen_"
},
{
"path": ".github/workflows/main.yml",
"chars": 1217,
"preview": "name: Docker Compose Build | Healthcheck | Tests\n\non:\n push:\n branches:\n - main\n - develop\n - develop"
},
{
"path": ".gitignore",
"chars": 22,
"preview": "packages/backend/.env\n"
},
{
"path": "LICENSE",
"chars": 1061,
"preview": "MIT License\n\nCopyright (c) 2023 Dahn\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof th"
},
{
"path": "README.md",
"chars": 5435,
"preview": "<p align=\"center\">\n <img src=\"assets/header.png\" alt=\"AI-Flow Logo\" />\n</p>\n\n<p align=\"center\">\n <em>Open-source tool "
},
{
"path": "bin/generate_python_classes_from_ts.sh",
"chars": 448,
"preview": "npm i -g typescript-json-schema \ntypescript-json-schema \"../packages/ui/src/nodes-configuration/types.ts\" \"*\" --out \"sch"
},
{
"path": "docker/README.md",
"chars": 298,
"preview": "## 🐳 Docker\n\n### Docker Compose\n\n1. Go to the docker directory: `cd ./docker`\n2. Update the .yml if needed for the PORTS"
},
{
"path": "docker/docker-compose.it.yml",
"chars": 611,
"preview": "services:\n backend:\n container_name: ai-flow-backend\n build:\n context: ../packages/backend/\n dockerfile"
},
{
"path": "docker/docker-compose.yml",
"chars": 656,
"preview": "services:\n backend:\n container_name: ai-flow-backend\n build:\n context: ../packages/backend/\n dockerfile"
},
{
"path": "docker/healthcheck.sh",
"chars": 461,
"preview": "#!/bin/bash\n\nif [ \"$#\" -ne 1 ]; then\n echo \"Usage: $0 <URL>\"\n exit 1\nfi\n\nURL=\"$1\"\nINTERVAL=5\nMAX_ATTEMPTS=20 \n\natt"
},
{
"path": "integration_tests/.gitignore",
"chars": 309,
"preview": "# See https://help.github.com/articles/ignoring-files/ for more about ignoring files.\n\n# dependencies\n/node_modules\n/.pn"
},
{
"path": "integration_tests/package.json",
"chars": 647,
"preview": "{\n \"name\": \"integration_tests\",\n \"version\": \"1.0.0\",\n \"description\": \"\",\n \"main\": \"dist/index.js\",\n \"scripts\": {\n "
},
{
"path": "integration_tests/tests/nodeProcessingOrder/nodeErrorTest.ts",
"chars": 3623,
"preview": "import { expect } from \"chai\";\nimport {\n disconnectSocket,\n getSocket,\n setupSocket,\n} from \"../../utils/testHooks\";\n"
},
{
"path": "integration_tests/tests/nodeProcessingOrder/nodeParallelExecutionDurationTest.ts",
"chars": 1603,
"preview": "import { expect } from \"chai\";\nimport {\n disconnectSocket,\n getSocket,\n setupSocket,\n} from \"../../utils/testHooks\";\n"
},
{
"path": "integration_tests/tests/nodeProcessingOrder/nodeWithChildrenTest.ts",
"chars": 2161,
"preview": "import { expect } from \"chai\";\nimport { disconnectSocket, getSocket, setupSocket } from \"../../utils/testHooks\";\nimport "
},
{
"path": "integration_tests/tests/nodeProcessingOrder/nodeWithMultipleParentsTest.ts",
"chars": 2410,
"preview": "import { expect } from \"chai\";\nimport { disconnectSocket, getSocket, setupSocket } from \"../../utils/testHooks\";\nimport "
},
{
"path": "integration_tests/tests/nodeProcessingOrder/nodesWithoutLinkTest.ts",
"chars": 1823,
"preview": "import { expect } from \"chai\";\nimport { disconnectSocket, getSocket, setupSocket } from \"../../utils/testHooks\";\nimport "
},
{
"path": "integration_tests/tests/nodeProcessingOrder/singleNodeTest.ts",
"chars": 1186,
"preview": "import { expect } from \"chai\";\nimport { Socket, io } from \"socket.io-client\";\nimport { createRequestData } from \"../../u"
},
{
"path": "integration_tests/tests/socketEvents/processFileEventTest.ts",
"chars": 2504,
"preview": "import { io, Socket } from \"socket.io-client\";\nimport { expect } from 'chai';\nimport { basicJsonFlow, getBasicProcessFil"
},
{
"path": "integration_tests/tests/socketEvents/runNodeEventTest.ts",
"chars": 1992,
"preview": "import { io, Socket } from \"socket.io-client\";\nimport { expect } from 'chai';\nimport { basicJsonFlow, getBasicRunNodeDat"
},
{
"path": "integration_tests/tests/socketEvents/socketConnectionTest.ts",
"chars": 837,
"preview": "import { io, Socket } from \"socket.io-client\";\nimport { expect } from 'chai';\n\ndescribe('Socket.IO connection tests', fu"
},
{
"path": "integration_tests/tsconfig.json",
"chars": 165,
"preview": "{\n \"compilerOptions\": {\n \"target\": \"ES6\",\n \"module\": \"commonjs\",\n \"outDir\": \"./dist\",\n \"r"
},
{
"path": "integration_tests/utils/requestDatas.ts",
"chars": 4391,
"preview": "type ProcessFileData = {\n jsonFile: string;\n parameters: Record<string, string>;\n};\n\ntype RunNodeData = {\n jsonFile: "
},
{
"path": "integration_tests/utils/testHooks.ts",
"chars": 411,
"preview": "import { Socket, io } from \"socket.io-client\";\n\nlet socket: Socket;\n\nexport const setupSocket = (done: any) => {\n soc"
},
{
"path": "packages/backend/.gitignore",
"chars": 250,
"preview": "# Fichiers générés par l'environnement de développement\n__pycache__/\n*.py[cod]\n\n# Fichiers générés par l'IDE\n.idea/\n.vsc"
},
{
"path": "packages/backend/Dockerfile",
"chars": 928,
"preview": "FROM python:3.9\n\n# Default values\nENV HOST=0.0.0.0\nENV PORT=5000\n\n\nWORKDIR /app\n\n# System dependencies\nRUN apt-get updat"
},
{
"path": "packages/backend/README.md",
"chars": 0,
"preview": ""
},
{
"path": "packages/backend/app/env_config.py",
"chars": 1856,
"preview": "import os\nimport sys\nfrom typing import List, Optional\n\nENV_LOCAL = \"LOCAL\"\nENV_CLOUD = \"CLOUD\"\nCURRENT_ENV = os.environ"
},
{
"path": "packages/backend/app/flask/app_routes/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "packages/backend/app/flask/app_routes/image_routes.py",
"chars": 372,
"preview": "from app.env_config import (get_local_storage_folder_path)\nfrom flask import Blueprint, send_from_directory\n\nimage_bluep"
},
{
"path": "packages/backend/app/flask/app_routes/node_routes.py",
"chars": 2151,
"preview": "import json\n\nfrom flask import Blueprint, request\n\nfrom ...utils.node_extension_utils import get_dynamic_extension_confi"
},
{
"path": "packages/backend/app/flask/app_routes/parameters_routes.py",
"chars": 343,
"preview": "import os\nimport yaml\nfrom flask import Blueprint\n\nparameters_blueprint = Blueprint(\"parameters_blueprint\", __name__)\n\n\n"
},
{
"path": "packages/backend/app/flask/app_routes/static_routes.py",
"chars": 588,
"preview": "import os\nfrom flask import Blueprint, send_from_directory\n\nfrom ...env_config import get_static_folder\n\nstatic_blueprin"
},
{
"path": "packages/backend/app/flask/app_routes/upload_routes.py",
"chars": 933,
"preview": "import logging\nfrom flask import Blueprint\nfrom ...storage.storage_strategy import StorageStrategy\n\nfrom ...root_injecto"
},
{
"path": "packages/backend/app/flask/decorators.py",
"chars": 625,
"preview": "from functools import wraps\n\nfrom flask import jsonify, request, g\nfrom flask_socketio import emit\nimport json\n\n\ndef wit"
},
{
"path": "packages/backend/app/flask/flask_app.py",
"chars": 861,
"preview": "import logging\nfrom flask import Flask, request, redirect\nfrom flask_cors import CORS\nimport os\n\nfrom ..env_config impor"
},
{
"path": "packages/backend/app/flask/routes.py",
"chars": 1003,
"preview": "import logging\nfrom app.env_config import is_server_static_files_enabled, is_local_environment\nfrom app.flask.socketio_i"
},
{
"path": "packages/backend/app/flask/socketio_init.py",
"chars": 239,
"preview": "import eventlet\n\neventlet.monkey_patch(all=False, socket=True)\n\nfrom flask_socketio import SocketIO\nfrom .flask_app impo"
},
{
"path": "packages/backend/app/flask/sockets.py",
"chars": 5464,
"preview": "import eventlet\nfrom ..env_config import is_set_app_config_on_ui_enabled\n\neventlet.monkey_patch(all=False, socket=True)\n"
},
{
"path": "packages/backend/app/flask/utils/constants.py",
"chars": 299,
"preview": "HTTP_OK = 200\nHTTP_BAD_REQUEST = 400\nHTTP_NOT_FOUND = 404\nHTTP_UNAUTHORIZED = 401\n\n\nSESSION_USER_ID_KEY = \"user_id\"\n\nPAR"
},
{
"path": "packages/backend/app/llms/utils/max_token_for_model.py",
"chars": 1503,
"preview": "import tiktoken\n\nDEFAULT_MAX_TOKEN = 4097\n\n\ndef max_token_for_model(model_name: str) -> int:\n if \"gpt-4o\" in model_na"
},
{
"path": "packages/backend/app/log_config.py",
"chars": 630,
"preview": "import logging\nimport colorlog\n\n\ndef setup_logger(name: str):\n formatter = colorlog.ColoredFormatter(\n \"%(log_"
},
{
"path": "packages/backend/app/processors/components/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "packages/backend/app/processors/components/core/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "packages/backend/app/processors/components/core/ai_data_splitter_processor.py",
"chars": 5252,
"preview": "import logging\nfrom ...context.processor_context import ProcessorContext\nfrom ..processor import ContextAwareProcessor\n\n"
},
{
"path": "packages/backend/app/processors/components/core/dall_e_prompt_processor.py",
"chars": 1413,
"preview": "from ...context.processor_context import ProcessorContext\nfrom ..processor import ContextAwareProcessor\n\nfrom openai imp"
},
{
"path": "packages/backend/app/processors/components/core/display_processor.py",
"chars": 493,
"preview": "from .processor_type_name_utils import ProcessorType\nfrom ..processor import BasicProcessor\n\n\nclass DisplayProcessor(Bas"
},
{
"path": "packages/backend/app/processors/components/core/file_processor.py",
"chars": 322,
"preview": "from .processor_type_name_utils import ProcessorType\nfrom ..processor import BasicProcessor\n\n\nclass FileProcessor(BasicP"
},
{
"path": "packages/backend/app/processors/components/core/gpt_vision_processor.py",
"chars": 3263,
"preview": "import re\nfrom typing import Any, List\n\nfrom ...launcher.event_type import EventType\nfrom ...launcher.processor_event im"
},
{
"path": "packages/backend/app/processors/components/core/input_image_processor.py",
"chars": 349,
"preview": "from .processor_type_name_utils import ProcessorType\nfrom ..processor import BasicProcessor\n\n\nclass InputImageProcessor("
},
{
"path": "packages/backend/app/processors/components/core/input_processor.py",
"chars": 343,
"preview": "from .processor_type_name_utils import ProcessorType\nfrom ..processor import BasicProcessor\n\n\nclass InputProcessor(Basic"
},
{
"path": "packages/backend/app/processors/components/core/llm_prompt_processor.py",
"chars": 6333,
"preview": "import logging\n\nfrom app.processors.exceptions import LightException\n\nfrom ...launcher.processor_event import ProcessorE"
},
{
"path": "packages/backend/app/processors/components/core/merge_processor.py",
"chars": 885,
"preview": "from ..processor import ContextAwareProcessor\nfrom .processor_type_name_utils import ProcessorType, MergeModeEnum\n\nclass"
},
{
"path": "packages/backend/app/processors/components/core/processor_type_name_utils.py",
"chars": 998,
"preview": "from enum import Enum\n\n\nclass MergeModeEnum(Enum):\n MERGE = 1\n MERGE_AND_PROMPT = 2\n\n\nclass ProcessorType(Enum):\n "
},
{
"path": "packages/backend/app/processors/components/core/replicate_processor.py",
"chars": 8508,
"preview": "from datetime import datetime\nimport logging\nfrom queue import Queue\nimport time\nfrom urllib.parse import urlparse\n\nfrom"
},
{
"path": "packages/backend/app/processors/components/core/stable_diffusion_stabilityai_prompt_processor.py",
"chars": 2638,
"preview": "import base64\n\nfrom ...context.processor_context import ProcessorContext\nfrom ..processor import ContextAwareProcessor\nf"
},
{
"path": "packages/backend/app/processors/components/core/stable_video_diffusion_replicate.py",
"chars": 1994,
"preview": "import os\nfrom urllib.parse import urlparse\nfrom ...context.processor_context import ProcessorContext\nfrom ..processor i"
},
{
"path": "packages/backend/app/processors/components/core/transition_processor.py",
"chars": 511,
"preview": "from .processor_type_name_utils import ProcessorType\nfrom ..processor import BasicProcessor\n\n\nclass TransitionProcessor("
},
{
"path": "packages/backend/app/processors/components/core/url_input_processor.py",
"chars": 5479,
"preview": "import random\nfrom bs4 import BeautifulSoup\n\nimport requests\n\nfrom ....utils.processor_utils import is_valid_url\nfrom .."
},
{
"path": "packages/backend/app/processors/components/core/youtube_transcript_input_processor.py",
"chars": 4645,
"preview": "import logging\n\nfrom ...utils.retry_mixin import RetryMixin\n\nfrom ...exceptions import LightException\n\nfrom ..processor "
},
{
"path": "packages/backend/app/processors/components/extension/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "packages/backend/app/processors/components/extension/claude_anthropic_processor.py",
"chars": 8732,
"preview": "import logging\nfrom datetime import datetime\n\nimport anthropic\n\nfrom ...context.processor_context import ProcessorContex"
},
{
"path": "packages/backend/app/processors/components/extension/deepseek_processor.py",
"chars": 3286,
"preview": "from ...launcher.event_type import EventType\nfrom ...launcher.processor_event import ProcessorEvent\nfrom ...context.proc"
},
{
"path": "packages/backend/app/processors/components/extension/document_to_text_processor.py",
"chars": 4404,
"preview": "import logging\nfrom queue import Queue\nimport requests\nfrom ....tasks.task_exception import TaskAlreadyRegisteredError\n\n"
},
{
"path": "packages/backend/app/processors/components/extension/extension_processor.py",
"chars": 1584,
"preview": "from ..model import NodeConfig\nfrom ...context.processor_context import ProcessorContext\nfrom ..processor import BasicPr"
},
{
"path": "packages/backend/app/processors/components/extension/generate_number_processor.py",
"chars": 2302,
"preview": "import random\n\nfrom ..node_config_builder import FieldBuilder, NodeConfigBuilder\nfrom ...context.processor_context impor"
},
{
"path": "packages/backend/app/processors/components/extension/gpt_image_processor.py",
"chars": 8662,
"preview": "import base64\nimport mimetypes\nimport os\nimport re\nfrom datetime import datetime\nfrom io import BytesIO\nfrom urllib.pars"
},
{
"path": "packages/backend/app/processors/components/extension/http_get_processor.py",
"chars": 4346,
"preview": "import logging\nimport requests\nimport json\nfrom urllib.parse import urlparse\n\nfrom ..node_config_builder import FieldBui"
},
{
"path": "packages/backend/app/processors/components/extension/open_router_processor.py",
"chars": 5545,
"preview": "import logging\n\nfrom ....env_config import is_local_environment\n\nfrom ...launcher.event_type import EventType\nfrom ...la"
},
{
"path": "packages/backend/app/processors/components/extension/openai_reasoning_processor.py",
"chars": 5163,
"preview": "import logging\n\nfrom app.processors.exceptions import LightException\nfrom ...launcher.event_type import EventType\nfrom ."
},
{
"path": "packages/backend/app/processors/components/extension/openai_text_to_speech_processor.py",
"chars": 8319,
"preview": "import logging\nimport re\nfrom ...context.processor_context import ProcessorContext\nfrom ..model import Field, NodeConfig"
},
{
"path": "packages/backend/app/processors/components/extension/replace_text_processor.py",
"chars": 4255,
"preview": "import logging\nimport re\n\nfrom ..node_config_builder import FieldBuilder, NodeConfigBuilder\nfrom .extension_processor im"
},
{
"path": "packages/backend/app/processors/components/extension/stabilityai_generic_processor.py",
"chars": 10998,
"preview": "import json\nimport logging\nimport os\nfrom ..node_config_utils import get_sub_configuration\n\nfrom ....utils.openapi_clien"
},
{
"path": "packages/backend/app/processors/components/extension/stable_diffusion_three_processor.py",
"chars": 5604,
"preview": "import logging\nimport os\nimport requests\n\nfrom ..node_config_builder import FieldBuilder, NodeConfigBuilder\nfrom ...cont"
},
{
"path": "packages/backend/app/processors/components/model.py",
"chars": 5177,
"preview": "# generated by datamodel-codegen:\n# filename: schema.json\n# timestamp: 2025-05-26T04:44:24+00:00\n\nfrom __future__ i"
},
{
"path": "packages/backend/app/processors/components/node_config_builder.py",
"chars": 6527,
"preview": "from typing import Dict, List, Optional, Union\nfrom .model import (\n DiscriminatedNodeConfig,\n Field,\n FieldTyp"
},
{
"path": "packages/backend/app/processors/components/node_config_utils.py",
"chars": 433,
"preview": "from .model import NodeConfigVariant\n\n\ndef get_sub_configuration(discriminators_values, node_config: NodeConfigVariant):"
},
{
"path": "packages/backend/app/processors/components/processor.py",
"chars": 7967,
"preview": "from abc import ABC, abstractmethod\nimport json\nimport logging\nfrom typing import Any, List, Optional, TypedDict, Union,"
},
{
"path": "packages/backend/app/processors/context/processor_context.py",
"chars": 861,
"preview": "from abc import ABC, abstractmethod\nfrom typing import Optional\nfrom typing import List\n\n\nclass ProcessorContext(ABC):\n "
},
{
"path": "packages/backend/app/processors/context/processor_context_flask_request.py",
"chars": 1323,
"preview": "from typing import List, Optional\nfrom ...flask.utils.constants import SESSION_USER_ID_KEY\nfrom .processor_context impor"
},
{
"path": "packages/backend/app/processors/exceptions.py",
"chars": 342,
"preview": "class LightException(Exception):\n def __init__(\n self,\n message: str,\n langvar_message: str = \"L"
},
{
"path": "packages/backend/app/processors/factory/processor_factory.py",
"chars": 434,
"preview": "from abc import ABC, abstractmethod\n\nfrom ...storage.storage_strategy import StorageStrategy\nfrom ..context.processor_co"
},
{
"path": "packages/backend/app/processors/factory/processor_factory_iter_modules.py",
"chars": 2316,
"preview": "from enum import Enum\nimport importlib\nimport logging\nimport pkgutil\nimport inspect\nfrom ..components.processor import P"
},
{
"path": "packages/backend/app/processors/launcher/abstract_topological_processor_launcher.py",
"chars": 8686,
"preview": "from abc import abstractmethod\nimport json\nimport logging\nfrom typing import List\nfrom injector import inject\nfrom .proc"
},
{
"path": "packages/backend/app/processors/launcher/async_processor_launcher.py",
"chars": 6004,
"preview": "import gc\nimport threading\nimport time\nimport eventlet\nfrom eventlet.semaphore import Semaphore\nimport logging\nimport tr"
},
{
"path": "packages/backend/app/processors/launcher/basic_processor_launcher.py",
"chars": 1299,
"preview": "from .abstract_topological_processor_launcher import AbstractTopologicalProcessorLauncher\n\n\nclass BasicProcessorLauncher"
},
{
"path": "packages/backend/app/processors/launcher/event_type.py",
"chars": 171,
"preview": "from enum import Enum\n\n\nclass EventType(Enum):\n PROGRESS = \"progress\"\n STREAMING = \"streaming\"\n CURRENT_NODE_RU"
},
{
"path": "packages/backend/app/processors/launcher/processor_event.py",
"chars": 212,
"preview": "from dataclasses import dataclass, field\nfrom typing import Any\n\n\n@dataclass\nclass ProcessorEvent:\n source: Any = fie"
},
{
"path": "packages/backend/app/processors/launcher/processor_launcher.py",
"chars": 570,
"preview": "from abc import ABC, abstractmethod\n\nfrom ..context.processor_context import ProcessorContext\n\n\nclass ProcessorLauncher("
},
{
"path": "packages/backend/app/processors/launcher/processor_launcher_event.py",
"chars": 504,
"preview": "from dataclasses import dataclass, field\nfrom typing import Any\n\nfrom ..components.processor import Processor\n\n\n@datacla"
},
{
"path": "packages/backend/app/processors/observer/observer.py",
"chars": 127,
"preview": "from abc import ABC, abstractmethod\n\n\nclass Observer(ABC):\n @abstractmethod\n def notify(self, event, data):\n "
},
{
"path": "packages/backend/app/processors/observer/socketio_event_emitter.py",
"chars": 1739,
"preview": "from ..launcher.event_type import EventType\nfrom ..launcher.processor_launcher_event import ProcessorLauncherEvent\n\nfrom"
},
{
"path": "packages/backend/app/processors/utils/retry_mixin.py",
"chars": 895,
"preview": "import time\nimport logging\n\n\nclass RetryMixin:\n def run_with_retry(self, func, *args, **kwargs):\n \"\"\"\n "
},
{
"path": "packages/backend/app/root_injector.py",
"chars": 2292,
"preview": "from typing import List\nfrom injector import Injector, Binder, Module\nfrom tests.utils.processor_factory_mock import Pro"
},
{
"path": "packages/backend/app/storage/local_storage_strategy.py",
"chars": 1050,
"preview": "from typing import Any\nfrom ..storage.storage_strategy import StorageStrategy\nfrom werkzeug.utils import secure_filename"
},
{
"path": "packages/backend/app/storage/s3_storage_strategy.py",
"chars": 5755,
"preview": "import logging\nfrom typing import Any\nimport uuid\nfrom ..storage.storage_strategy import CloudStorageStrategy\nimport bot"
},
{
"path": "packages/backend/app/storage/storage_strategy.py",
"chars": 687,
"preview": "from abc import ABC, abstractmethod\nfrom typing import Any, Optional\n\n\nclass StorageStrategy(ABC):\n \"\"\"Storage strate"
},
{
"path": "packages/backend/app/tasks/green_pool_task_manager.py",
"chars": 1625,
"preview": "import logging\nfrom queue import Queue\nimport eventlet\nfrom eventlet.green import threading\n\n\nfrom .task_exception impor"
},
{
"path": "packages/backend/app/tasks/single_thread_tasks/browser/async_browser_task.py",
"chars": 5805,
"preview": "import logging\nimport re\nimport asyncio\nimport threading\nfrom ....utils.web_scrapping.async_browser_manager import (\n "
},
{
"path": "packages/backend/app/tasks/single_thread_tasks/browser/browser_task.py",
"chars": 3939,
"preview": "import logging\nimport queue\nimport re\nimport threading\nimport time\nfrom ....utils.web_scrapping.browser_manager import B"
},
{
"path": "packages/backend/app/tasks/task_exception.py",
"chars": 277,
"preview": "class TaskAlreadyRegisteredError(Exception):\n \"\"\"Exception raised when attempting to register a task that is already "
},
{
"path": "packages/backend/app/tasks/task_manager.py",
"chars": 1635,
"preview": "import logging\nfrom queue import Queue\nfrom concurrent.futures import ThreadPoolExecutor\nimport threading\n\nfrom .task_ex"
},
{
"path": "packages/backend/app/tasks/task_utils.py",
"chars": 559,
"preview": "from queue import Empty\nimport time\nimport eventlet\n\n\ndef wait_for_result(queue, timeout=120, initial_sleep=0.1, max_sle"
},
{
"path": "packages/backend/app/tasks/thread_pool_task_manager.py",
"chars": 1656,
"preview": "from concurrent.futures import ThreadPoolExecutor\nimport logging\nfrom queue import Queue\nimport threading\n\n\nfrom .task_e"
},
{
"path": "packages/backend/app/utils/node_extension_utils.py",
"chars": 3146,
"preview": "import importlib\nimport logging\nimport os\nimport pkgutil\nfrom cachetools import TTLCache, cached\n\nfrom ..processors.comp"
},
{
"path": "packages/backend/app/utils/openapi_client.py",
"chars": 4046,
"preview": "import logging\nfrom typing import Optional\nimport requests\nimport eventlet\n\n\nclass Client:\n\n def __init__(\n se"
},
{
"path": "packages/backend/app/utils/openapi_converter.py",
"chars": 3628,
"preview": "import json\n\nfrom ..processors.components.model import Option\nfrom ..processors.components.node_config_builder import (\n"
},
{
"path": "packages/backend/app/utils/openapi_reader.py",
"chars": 8816,
"preview": "import json\nimport hashlib\nfrom openapi_spec_validator.readers import read_from_filename\n\n\nclass OpenAPIReader:\n HTTP"
},
{
"path": "packages/backend/app/utils/processor_utils.py",
"chars": 2907,
"preview": "from datetime import datetime\nimport os\nimport tempfile\nfrom urllib.parse import urlparse\nimport requests\n\n\ndef create_e"
},
{
"path": "packages/backend/app/utils/replicate_utils.py",
"chars": 4891,
"preview": "import os\nimport requests\nfrom ..env_config import get_replicate_api_key\nfrom cachetools import TTLCache, cached\nimport "
},
{
"path": "packages/backend/app/utils/web_scrapping/async_browser_manager.py",
"chars": 4384,
"preview": "import logging\nimport asyncio\nfrom asyncio import Queue\nfrom queue import Empty\nimport tempfile\nimport time\nimport zipfi"
},
{
"path": "packages/backend/app/utils/web_scrapping/browser_manager.py",
"chars": 2549,
"preview": "import logging\nimport os\nfrom queue import Empty, Queue\nimport time\nimport threading\nfrom ...env_config import get_brows"
},
{
"path": "packages/backend/config.yaml",
"chars": 614,
"preview": "core:\n openai_api_key:\n tag: \"core\"\n description: \"API key for accessing OpenAI services.\"\n \n stabilityai_api_k"
},
{
"path": "packages/backend/hooks/hook-app.processors.py",
"chars": 108,
"preview": "from PyInstaller.utils.hooks import collect_submodules\n\nhiddenimports = collect_submodules('app.processors')"
},
{
"path": "packages/backend/pyproject.toml",
"chars": 1112,
"preview": "[tool.poetry]\nname = \"ai-flow-back\"\nversion = \"0.11.3\"\ndescription = \"\"\nauthors = [\"DahnM20 <you@example.com>\"]\nreadme ="
},
{
"path": "packages/backend/requirements_windows.txt",
"chars": 16,
"preview": "python-magic-bin"
},
{
"path": "packages/backend/resources/data/openrouter_models.json",
"chars": 184852,
"preview": "{\"data\":[{\"id\":\"deepseek/deepseek-chat\",\"name\":\"DeepSeek V3\",\"created\":1735241320,\"description\":\"DeepSeek-V3 is the late"
},
{
"path": "packages/backend/resources/openapi/stabilityai.json",
"chars": 676530,
"preview": "{\n \"openapi\": \"3.0.3\",\n \"info\": {\n \"version\": \"v2beta\",\n \"title\": \"StabilityAI REST API\",\n \"description\": \"We"
},
{
"path": "packages/backend/server.py",
"chars": 869,
"preview": "from app.log_config import root_logger\nimport sys\nimport os\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\n\nfrom app.fla"
},
{
"path": "packages/backend/tests/unit/test_processor_factory.py",
"chars": 1961,
"preview": "import unittest\nfrom app.processors.factory.processor_factory_iter_modules import (\n ProcessorFactoryIterModules,\n)\nf"
},
{
"path": "packages/backend/tests/unit/test_processor_launcher.py",
"chars": 1734,
"preview": "import unittest\nfrom unittest.mock import MagicMock, Mock, patch, mock_open\n\nfrom app.processors.launcher.basic_processo"
},
{
"path": "packages/backend/tests/unit/test_stable_diffusion_stabilityai_prompt_processor.py",
"chars": 4432,
"preview": "import unittest\nfrom unittest.mock import ANY, patch, Mock\nimport re\nfrom app.processors.components.core.stable_diffusio"
},
{
"path": "packages/backend/tests/utils/openai_mock_utils.py",
"chars": 628,
"preview": "from unittest.mock import Mock\n\n\ndef create_mocked_openai_response(\n model=\"gpt-4\", api_key=\"000000000\", response_con"
},
{
"path": "packages/backend/tests/utils/processor_context_mock.py",
"chars": 837,
"preview": "from typing import List\nfrom app.processors.context.processor_context import ProcessorContext\nfrom typing import List\n\n\n"
},
{
"path": "packages/backend/tests/utils/processor_factory_mock.py",
"chars": 5159,
"preview": "import logging\nimport random\nimport eventlet\nimport time\n\nfrom injector import singleton\nfrom unittest.mock import Magic"
},
{
"path": "packages/ui/.gitignore",
"chars": 744,
"preview": "# See https://help.github.com/articles/ignoring-files/ for more about ignoring files.\n\n# dependencies\n/node_modules\n/.pn"
},
{
"path": "packages/ui/.prettierignore",
"chars": 48,
"preview": "node_modules\n\n# Ignore artifacts:\nbuild\ncoverage"
},
{
"path": "packages/ui/Dockerfile",
"chars": 631,
"preview": "FROM node:21 as build\n\nWORKDIR /app\n\nARG VITE_APP_WS_HOST\nARG VITE_APP_WS_PORT\nARG VITE_APP_API_REST_PORT\nARG VITE_APP_U"
},
{
"path": "packages/ui/README.md",
"chars": 2103,
"preview": "# Getting Started with Create React App\n\nThis project was bootstrapped with [Create React App](https://github.com/facebo"
},
{
"path": "packages/ui/index.html",
"chars": 948,
"preview": "<!DOCTYPE html>\n<html lang=\"en\">\n\n<head>\n <meta charset=\"utf-8\" />\n <link rel=\"apple-touch-icon\" sizes=\"180x180\" href="
},
{
"path": "packages/ui/jest.config.ts",
"chars": 237,
"preview": "import type { Config } from \"@jest/types\";\n\nconst config: Config.InitialOptions = {\n verbose: true,\n preset: \"ts-jest\""
},
{
"path": "packages/ui/nginx.conf",
"chars": 253,
"preview": "server {\n listen 80;\n server_name localhost;\n root /usr/share/nginx/html;\n index index.html;\n\n location /"
},
{
"path": "packages/ui/package.json",
"chars": 3167,
"preview": "{\n \"name\": \"ai-flow-front\",\n \"version\": \"0.11.3\",\n \"private\": true,\n \"dependencies\": {\n \"@headlessui/react\": \"^1."
},
{
"path": "packages/ui/postcss.config.cjs",
"chars": 343,
"preview": "module.exports = {\n plugins: {\n \"postcss-preset-mantine\": {},\n \"postcss-simple-vars\": {\n variables: {\n "
},
{
"path": "packages/ui/postcss.config.js",
"chars": 82,
"preview": "module.exports = {\n plugins: {\n tailwindcss: {},\n autoprefixer: {},\n },\n}\n"
},
{
"path": "packages/ui/prettier.config.js",
"chars": 64,
"preview": "module.exports = {\n plugins: ['prettier-plugin-tailwindcss'],\n}"
},
{
"path": "packages/ui/public/health",
"chars": 2,
"preview": "OK"
},
{
"path": "packages/ui/public/locales/en/aiActions.json",
"chars": 365,
"preview": "{\n \"Summary\": \"Summary\",\n \"SpellCheck\": \"Spell Check\",\n \"VisualPrompt\": \"Visual Prompt\",\n \"ConstructiveCriti"
},
{
"path": "packages/ui/public/locales/en/config.json",
"chars": 1422,
"preview": "{\n \"configurationTitle\": \"Configuration\",\n \"apiKeyDisclaimer\": \"We do not use or store your API keys.\",\n \"openSourceD"
},
{
"path": "packages/ui/public/locales/en/dialogs.json",
"chars": 74,
"preview": "{\n \"attachNodeTitle\": \"Attach Node\",\n \"attachNodeAction\": \"Attach\"\n}"
},
{
"path": "packages/ui/public/locales/en/flow.json",
"chars": 10731,
"preview": "{\n \"Flow\": \"Flow\",\n \"AddTab\": \"Add Tab\",\n \"ShowOnlyOutputs\": \"Show only outputs\",\n \"ShowOnlyParams\": \"Show only para"
},
{
"path": "packages/ui/public/locales/en/nodeHelp.json",
"chars": 8379,
"preview": "{\n \"input-text\": {\n \"description\": \"Text Node can be used to transfer text input to other nodes.\",\n \"docUrls\": [\n"
},
{
"path": "packages/ui/public/locales/en/tips.json",
"chars": 3431,
"preview": "{\n \"tips\": [\n {\n \"title\": \"Getting started with AI-Flow\",\n \"description\": \"This guide will help you get st"
},
{
"path": "packages/ui/public/locales/en/tour.json",
"chars": 1435,
"preview": "{\n \"firstTimeHere\": \"First time here?\",\n \"discoverApp\": \"Unlock tips to make the most of our app in just 15 second"
},
{
"path": "packages/ui/public/locales/en/version.json",
"chars": 1186,
"preview": "{\n \"versionInfo\": {\n \"versionNumber\": \"v0.7.3\",\n \"description\": \"Discover the latest features added in version 0."
},
{
"path": "packages/ui/public/locales/fr/aiActions.json",
"chars": 401,
"preview": "{\n \"Summary\": \"Résumé\",\n \"SpellCheck\": \"Vérification Orthographique\",\n \"VisualPrompt\": \"Prompt Visuelle\",\n \""
},
{
"path": "packages/ui/public/locales/fr/config.json",
"chars": 1524,
"preview": "{\n \"configurationTitle\": \"Configuration\",\n \"apiKeyDisclaimer\": \"Nous n'utilisons ni ne stockons vos clés API.\",\n \"ope"
},
{
"path": "packages/ui/public/locales/fr/dialogs.json",
"chars": 82,
"preview": "{\n \"attachNodeTitle\": \"Attacher un noeud\",\n \"attachNodeAction\": \"Attacher\"\n}"
},
{
"path": "packages/ui/public/locales/fr/flow.json",
"chars": 11654,
"preview": "{\n \"Flow\": \"Flow\",\n \"AddTab\": \"Ajouter un Flow\",\n \"ShowOnlyOutputs\": \"Afficher uniquement les résultats\",\n \"ShowOnly"
},
{
"path": "packages/ui/public/locales/fr/nodeHelp.json",
"chars": 8855,
"preview": "{\n \"input-text\": {\n \"description\": \"Le nœud de texte peut être utilisé pour transférer une entrée de texte à d'autre"
},
{
"path": "packages/ui/public/locales/fr/tips.json",
"chars": 3507,
"preview": "{\n \"tips\": [\n {\n \"title\": \"Bien débuter sur AI-Flow\",\n \"description\": \"Ce guide vous montrera l'essentiel "
},
{
"path": "packages/ui/public/locales/fr/tour.json",
"chars": 1623,
"preview": "{\n \"firstTimeHere\": \"Première visite ?\",\n \"discoverApp\": \"Découvrez des astuces pour profiter pleinement de notre "
},
{
"path": "packages/ui/public/locales/fr/version.json",
"chars": 1325,
"preview": "{\n \"versionInfo\": {\n \"versionNumber\": \"v0.7.3\",\n \"description\": \"Voici les nouveautés de la v0.7.3\"\n },\n \"featu"
},
{
"path": "packages/ui/public/robots.txt",
"chars": 67,
"preview": "# https://www.robotstxt.org/robotstxt.html\nUser-agent: *\nDisallow:\n"
},
{
"path": "packages/ui/public/samples/intro.json",
"chars": 179,
"preview": "[\n {\n \"inputs\": [],\n \"name\": \"3jexlwros#llm-prompt\",\n \"processorType\": \"llm-prompt\",\n \"model\": \"gpt-4o\",\n "
},
{
"path": "packages/ui/public/site.webmanifest",
"chars": 263,
"preview": "{\"name\":\"\",\"short_name\":\"\",\"icons\":[{\"src\":\"/android-chrome-192x192.png\",\"sizes\":\"192x192\",\"type\":\"image/png\"},{\"src\":\"/"
},
{
"path": "packages/ui/src/App.tsx",
"chars": 4327,
"preview": "import { useContext, useEffect, useMemo, useState } from \"react\";\nimport FlowTabs, { FlowTab } from \"./layout/main-layou"
},
{
"path": "packages/ui/src/Main.tsx",
"chars": 439,
"preview": "import React, { useState } from \"react\";\nimport App from \"./App\";\nimport LoadingScreen from \"./components/LoadingScreen\""
},
{
"path": "packages/ui/src/api/cache/cacheManager.ts",
"chars": 1717,
"preview": "import { isCacheEnabled } from \"../../config/config\";\n\ninterface CacheItem<T> {\n data: T;\n ttl?: number;\n timestamp: "
},
{
"path": "packages/ui/src/api/cache/withCache.ts",
"chars": 1436,
"preview": "import { generateCacheKey, getCache, setCache } from \"./cacheManager\";\n\ntype AsyncFunction<T extends any[], N> = (...arg"
},
{
"path": "packages/ui/src/api/client.ts",
"chars": 228,
"preview": "import axios from \"axios\";\nimport { getRestApiUrl } from \"../config/config\";\n\nconst apiClient = axios.create({\n baseURL"
},
{
"path": "packages/ui/src/api/nodes.ts",
"chars": 1233,
"preview": "import client from \"./client\";\n\nexport async function getNodeExtensions() {\n let response;\n try {\n response = await"
},
{
"path": "packages/ui/src/api/parameters.ts",
"chars": 270,
"preview": "import client from \"./client\";\n\nexport async function getParameters() {\n let response;\n try {\n response = await cli"
},
{
"path": "packages/ui/src/api/replicateModels.ts",
"chars": 2121,
"preview": "import { Config } from \"../utils/openAPIUtils\";\nimport client from \"./client\";\n\ninterface GetCollectionModelsResponse {\n"
},
{
"path": "packages/ui/src/api/uploadFile.ts",
"chars": 1167,
"preview": "import axios, { AxiosProgressEvent } from \"axios\";\nimport client from \"./client\";\n\nexport async function getUploadAndDow"
},
{
"path": "packages/ui/src/components/Flow.tsx",
"chars": 9618,
"preview": "import {\n useState,\n useCallback,\n useMemo,\n useEffect,\n useRef,\n useImperativeHandle,\n Ref,\n forwardRef,\n} from"
},
{
"path": "packages/ui/src/components/LoadingScreen.tsx",
"chars": 505,
"preview": "import { LoadingScreenSpinner } from \"./nodes/Node.styles\";\n\nconst LoadingScreen = () => {\n return (\n <div\n cla"
},
{
"path": "packages/ui/src/components/bars/Sidebar.tsx",
"chars": 4456,
"preview": "import React, { useContext } from \"react\";\nimport { Edge, Node } from \"reactflow\";\nimport JSONView from \"../side-views/J"
},
{
"path": "packages/ui/src/components/bars/dnd-sidebar/DnDSidebar.tsx",
"chars": 7519,
"preview": "import { useTranslation } from \"react-i18next\";\nimport styled from \"styled-components\";\nimport {\n DnDNode,\n getSection"
},
{
"path": "packages/ui/src/components/bars/dnd-sidebar/DraggableNode.tsx",
"chars": 3720,
"preview": "import { useDrag } from \"react-dnd\";\nimport { useTranslation } from \"react-i18next\";\nimport { DnDNode } from \"../../../n"
},
{
"path": "packages/ui/src/components/bars/dnd-sidebar/DraggableNodeWithSubnodes.tsx",
"chars": 4820,
"preview": "import React, { useState } from \"react\";\nimport { FiChevronDown, FiChevronRight, FiGrid } from \"react-icons/fi\";\nimport "
},
{
"path": "packages/ui/src/components/bars/dnd-sidebar/GripIcon.tsx",
"chars": 615,
"preview": "import { ComponentProps } from \"react\";\n\nexport function GripIcon(props: ComponentProps<\"svg\">) {\n return (\n <svg\n "
},
{
"path": "packages/ui/src/components/bars/dnd-sidebar/Section.tsx",
"chars": 1407,
"preview": "import { useTranslation } from \"react-i18next\";\nimport { NodeSection } from \"../../../nodes-configuration/sectionConfig\""
},
{
"path": "packages/ui/src/components/bars/dnd-sidebar/types.ts",
"chars": 102,
"preview": "export interface DraggableNodeAdditionnalData {\n additionnalData?: any;\n additionnalConfig?: any;\n}\n"
},
{
"path": "packages/ui/src/components/buttons/ButtonRunAll.tsx",
"chars": 1347,
"preview": "import styled, { keyframes } from \"styled-components\";\nimport { FaPlay, FaSpinner } from \"react-icons/fa\";\nimport { memo"
},
{
"path": "packages/ui/src/components/buttons/ConfigurationButton.tsx",
"chars": 972,
"preview": "import React, { memo } from \"react\";\nimport { FiSettings } from \"react-icons/fi\";\nimport styled from \"styled-components\""
},
{
"path": "packages/ui/src/components/edges/buttonEdge.tsx",
"chars": 2715,
"preview": "import {\n BaseEdge,\n EdgeLabelRenderer,\n EdgeProps,\n getBezierPath,\n getSmoothStepPath,\n getStraightPath,\n useRea"
},
{
"path": "packages/ui/src/components/handles/HandleWrapper.tsx",
"chars": 5605,
"preview": "import ReactDOM from \"react-dom\";\nimport styled, { CSSProperties } from \"styled-components\";\nimport { InputHandle, Outpu"
},
{
"path": "packages/ui/src/components/inputs/InputWithButton.tsx",
"chars": 1141,
"preview": "import NodeTextField from \"../nodes/node-input/NodeTextField\";\n\ninterface InputWithButtonProps {\n buttonText: string;\n "
},
{
"path": "packages/ui/src/components/nodes/AIDataSplitterNode.tsx",
"chars": 7276,
"preview": "import React, { useContext, useEffect } from \"react\";\nimport { Position, NodeProps, useUpdateNodeInternals } from \"react"
},
{
"path": "packages/ui/src/components/nodes/DisplayNode.tsx",
"chars": 5589,
"preview": "import React, { useContext, useEffect, useMemo, useState } from \"react\";\nimport {\n Position,\n NodeProps,\n useUpdateNo"
},
{
"path": "packages/ui/src/components/nodes/FileUploadNode.tsx",
"chars": 7133,
"preview": "import { useContext, useEffect, useState } from \"react\";\nimport { FaFileAlt, FaLink } from \"react-icons/fa\";\nimport { No"
},
{
"path": "packages/ui/src/components/nodes/GenericNode.tsx",
"chars": 12125,
"preview": "import React, { useState, useEffect, useContext, useMemo, FC } from \"react\";\nimport { Position, NodeProps, useUpdateNode"
},
{
"path": "packages/ui/src/components/nodes/Node.styles.ts",
"chars": 7503,
"preview": "import styled, { css, keyframes } from \"styled-components\";\nimport ReactFlow, { MiniMap, Controls, Panel, Handle } from "
},
{
"path": "packages/ui/src/components/nodes/NodeHelpPopover.tsx",
"chars": 916,
"preview": "import React from \"react\";\nimport { Popover } from \"@mantine/core\";\nimport { NodeHelp, NodeHelpData } from \"./utils/Node"
},
{
"path": "packages/ui/src/components/nodes/NodeWrapper.tsx",
"chars": 6597,
"preview": "import React, { useContext, useState } from \"react\";\nimport { NodeContext } from \"../../providers/NodeProvider\";\nimport "
},
{
"path": "packages/ui/src/components/nodes/ReplicateNode.tsx",
"chars": 6998,
"preview": "import { useContext, useEffect, useMemo, useRef, useState } from \"react\";\nimport { useTranslation } from \"react-i18next\""
},
{
"path": "packages/ui/src/components/nodes/TransitionNode.tsx",
"chars": 3351,
"preview": "import React, { useContext, useEffect, useMemo, useState } from \"react\";\nimport { Position, NodeProps, useUpdateNodeInte"
},
{
"path": "packages/ui/src/components/nodes/node-button/InputNameBar.tsx",
"chars": 2568,
"preview": "import { memo } from \"react\";\nimport { Tooltip, ActionIcon } from \"@mantine/core\";\nimport { FaMinus, FaPlus } from \"reac"
},
{
"path": "packages/ui/src/components/nodes/node-button/NodePlayButton.tsx",
"chars": 2572,
"preview": "import React, { useContext, useState } from \"react\";\nimport styled, { css, keyframes } from \"styled-components\";\nimport "
},
{
"path": "packages/ui/src/components/nodes/node-input/FileUploadField.tsx",
"chars": 3984,
"preview": "import React, { useContext, useEffect, useRef, useState } from \"react\";\nimport { useTranslation } from \"react-i18next\";\n"
},
{
"path": "packages/ui/src/components/nodes/node-input/ImageMaskCreator.tsx",
"chars": 11221,
"preview": "import { Button, Slider } from \"@mantine/core\";\nimport React, { useState, useRef, useEffect } from \"react\";\nimport { use"
},
{
"path": "packages/ui/src/components/nodes/node-input/ImageMaskCreatorField.tsx",
"chars": 3936,
"preview": "import { Button, Modal } from \"@mantine/core\";\nimport React, { useContext, useEffect, useState } from \"react\";\nimport { "
},
{
"path": "packages/ui/src/components/nodes/node-input/ImageMaskCreatorFieldFlowAware.tsx",
"chars": 722,
"preview": "import { getOutputExtension } from \"../node-output/outputUtils\";\nimport ImageMaskCreatorField from \"./ImageMaskCreatorFi"
},
{
"path": "packages/ui/src/components/nodes/node-input/KeyValueInputList.tsx",
"chars": 2168,
"preview": "// KeyValueInputList.tsx\nimport React from \"react\";\nimport { Button, Group, TextInput } from \"@mantine/core\";\nimport { M"
},
{
"path": "packages/ui/src/components/nodes/node-input/NodeField.tsx",
"chars": 3687,
"preview": "import { InputHandle, NodeLabel } from \"../Node.styles\";\nimport { Position } from \"reactflow\";\nimport { Field } from \".."
},
{
"path": "packages/ui/src/components/nodes/node-input/NodeTextField.tsx",
"chars": 2315,
"preview": "import styled from \"styled-components\";\nimport { ChangeEvent, RefObject, useState } from \"react\";\nimport TextAreaPopupWr"
},
{
"path": "packages/ui/src/components/nodes/node-input/NodeTextarea.tsx",
"chars": 2701,
"preview": "import { useTranslation } from \"react-i18next\";\nimport { DisplayParams } from \"../../../hooks/useFormFields\";\nimport { F"
},
{
"path": "packages/ui/src/components/nodes/node-input/OutputRenderer.tsx",
"chars": 3113,
"preview": "import { useState } from \"react\";\nimport { FiFile } from \"react-icons/fi\";\nimport AudioUrlOutput from \"../../nodes/node-"
},
{
"path": "packages/ui/src/components/nodes/node-input/TextAreaPopupWrapper.tsx",
"chars": 1227,
"preview": "import { Tooltip } from \"@mantine/core\";\nimport { FiExternalLink } from \"react-icons/fi\";\nimport { TextareaModal } from "
}
]
// ... and 112 more files (download for full content)
About this extraction
This page contains the full source code of the DahnM20/ai-flow GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 312 files (1.6 MB), approximately 383.3k tokens, and a symbol index with 897 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.