Showing preview only (3,418K chars total). Download the full file or copy to clipboard to get everything.
Repository: exo-explore/exo
Branch: main
Commit: 07598a3af1f6
Files: 724
Total size: 3.1 MB
Directory structure:
gitextract_vey6weao/
├── .clauderules
├── .cursorrules
├── .envrc
├── .githooks/
│ ├── post-checkout
│ ├── post-commit
│ ├── post-merge
│ └── pre-push
├── .github/
│ ├── CODEOWNERS
│ ├── ISSUE_TEMPLATE/
│ │ ├── bug_report.md
│ │ └── feature_request.md
│ ├── actions/
│ │ ├── conditional-commit/
│ │ │ └── action.yml
│ │ ├── format/
│ │ │ └── action.yml
│ │ ├── lint/
│ │ │ └── action.yml
│ │ ├── lint-check/
│ │ │ └── action.yml
│ │ ├── regenerate-protobufs/
│ │ │ └── action.yml
│ │ ├── setup-python-uv/
│ │ │ └── action.yml
│ │ ├── unit-test/
│ │ │ └── action.yml
│ │ └── verify-clean/
│ │ └── action.yml
│ ├── pull_request_template.md
│ └── workflows/
│ ├── build-app.yml
│ └── pipeline.yml
├── .gitignore
├── .mlx_typings/
│ ├── .gitkeep
│ ├── mflux/
│ │ ├── __init__.pyi
│ │ ├── callbacks/
│ │ │ ├── __init__.pyi
│ │ │ ├── callback.pyi
│ │ │ ├── callback_registry.pyi
│ │ │ └── generation_context.pyi
│ │ ├── cli/
│ │ │ ├── __init__.pyi
│ │ │ └── defaults/
│ │ │ └── defaults.pyi
│ │ ├── models/
│ │ │ ├── __init__.pyi
│ │ │ ├── common/
│ │ │ │ ├── __init__.pyi
│ │ │ │ ├── cli/
│ │ │ │ │ └── __init__.pyi
│ │ │ │ ├── config/
│ │ │ │ │ ├── __init__.pyi
│ │ │ │ │ ├── config.pyi
│ │ │ │ │ └── model_config.pyi
│ │ │ │ ├── latent_creator/
│ │ │ │ │ ├── __init__.pyi
│ │ │ │ │ └── latent_creator.pyi
│ │ │ │ ├── lora/
│ │ │ │ │ ├── __init__.pyi
│ │ │ │ │ ├── layer/
│ │ │ │ │ │ ├── fused_linear_lora_layer.pyi
│ │ │ │ │ │ └── linear_lora_layer.pyi
│ │ │ │ │ └── mapping/
│ │ │ │ │ ├── lora_loader.pyi
│ │ │ │ │ ├── lora_mapping.pyi
│ │ │ │ │ ├── lora_saver.pyi
│ │ │ │ │ └── lora_transforms.pyi
│ │ │ │ ├── resolution/
│ │ │ │ │ ├── __init__.pyi
│ │ │ │ │ ├── actions.pyi
│ │ │ │ │ ├── config_resolution.pyi
│ │ │ │ │ ├── lora_resolution.pyi
│ │ │ │ │ ├── path_resolution.pyi
│ │ │ │ │ └── quantization_resolution.pyi
│ │ │ │ ├── schedulers/
│ │ │ │ │ ├── __init__.pyi
│ │ │ │ │ ├── base_scheduler.pyi
│ │ │ │ │ ├── flow_match_euler_discrete_scheduler.pyi
│ │ │ │ │ ├── linear_scheduler.pyi
│ │ │ │ │ └── seedvr2_euler_scheduler.pyi
│ │ │ │ ├── tokenizer/
│ │ │ │ │ ├── __init__.pyi
│ │ │ │ │ ├── tokenizer.pyi
│ │ │ │ │ ├── tokenizer_loader.pyi
│ │ │ │ │ └── tokenizer_output.pyi
│ │ │ │ ├── vae/
│ │ │ │ │ ├── __init__.pyi
│ │ │ │ │ ├── tiling_config.pyi
│ │ │ │ │ ├── vae_tiler.pyi
│ │ │ │ │ └── vae_util.pyi
│ │ │ │ └── weights/
│ │ │ │ ├── __init__.pyi
│ │ │ │ ├── loading/
│ │ │ │ │ ├── loaded_weights.pyi
│ │ │ │ │ ├── weight_applier.pyi
│ │ │ │ │ ├── weight_definition.pyi
│ │ │ │ │ └── weight_loader.pyi
│ │ │ │ ├── mapping/
│ │ │ │ │ ├── weight_mapper.pyi
│ │ │ │ │ ├── weight_mapping.pyi
│ │ │ │ │ └── weight_transforms.pyi
│ │ │ │ └── saving/
│ │ │ │ └── model_saver.pyi
│ │ │ ├── depth_pro/
│ │ │ │ ├── depth_pro_initializer.pyi
│ │ │ │ ├── model/
│ │ │ │ │ ├── decoder/
│ │ │ │ │ │ ├── feature_fusion_block_2d.pyi
│ │ │ │ │ │ ├── multires_conv_decoder.pyi
│ │ │ │ │ │ └── residual_block.pyi
│ │ │ │ │ ├── depth_pro.pyi
│ │ │ │ │ ├── depth_pro_model.pyi
│ │ │ │ │ ├── depth_pro_util.pyi
│ │ │ │ │ ├── dino_v2/
│ │ │ │ │ │ ├── attention.pyi
│ │ │ │ │ │ ├── dino_vision_transformer.pyi
│ │ │ │ │ │ ├── layer_scale.pyi
│ │ │ │ │ │ ├── mlp.pyi
│ │ │ │ │ │ ├── patch_embed.pyi
│ │ │ │ │ │ └── transformer_block.pyi
│ │ │ │ │ ├── encoder/
│ │ │ │ │ │ ├── depth_pro_encoder.pyi
│ │ │ │ │ │ └── upsample_block.pyi
│ │ │ │ │ └── head/
│ │ │ │ │ └── fov_head.pyi
│ │ │ │ └── weights/
│ │ │ │ ├── depth_pro_weight_definition.pyi
│ │ │ │ └── depth_pro_weight_mapping.pyi
│ │ │ ├── fibo/
│ │ │ │ ├── latent_creator/
│ │ │ │ │ └── fibo_latent_creator.pyi
│ │ │ │ └── weights/
│ │ │ │ ├── fibo_weight_definition.pyi
│ │ │ │ └── fibo_weight_mapping.pyi
│ │ │ ├── fibo_vlm/
│ │ │ │ ├── tokenizer/
│ │ │ │ │ ├── qwen2vl_image_processor.pyi
│ │ │ │ │ └── qwen2vl_processor.pyi
│ │ │ │ └── weights/
│ │ │ │ ├── fibo_vlm_weight_definition.pyi
│ │ │ │ └── fibo_vlm_weight_mapping.pyi
│ │ │ ├── flux/
│ │ │ │ ├── __init__.pyi
│ │ │ │ ├── cli/
│ │ │ │ │ └── __init__.pyi
│ │ │ │ ├── flux_initializer.pyi
│ │ │ │ ├── latent_creator/
│ │ │ │ │ ├── __init__.pyi
│ │ │ │ │ └── flux_latent_creator.pyi
│ │ │ │ ├── model/
│ │ │ │ │ ├── __init__.pyi
│ │ │ │ │ ├── flux_text_encoder/
│ │ │ │ │ │ ├── clip_encoder/
│ │ │ │ │ │ │ ├── clip_embeddings.pyi
│ │ │ │ │ │ │ ├── clip_encoder.pyi
│ │ │ │ │ │ │ ├── clip_encoder_layer.pyi
│ │ │ │ │ │ │ ├── clip_mlp.pyi
│ │ │ │ │ │ │ ├── clip_sdpa_attention.pyi
│ │ │ │ │ │ │ ├── clip_text_model.pyi
│ │ │ │ │ │ │ └── encoder_clip.pyi
│ │ │ │ │ │ ├── prompt_encoder.pyi
│ │ │ │ │ │ └── t5_encoder/
│ │ │ │ │ │ ├── t5_attention.pyi
│ │ │ │ │ │ ├── t5_block.pyi
│ │ │ │ │ │ ├── t5_dense_relu_dense.pyi
│ │ │ │ │ │ ├── t5_encoder.pyi
│ │ │ │ │ │ ├── t5_feed_forward.pyi
│ │ │ │ │ │ ├── t5_layer_norm.pyi
│ │ │ │ │ │ └── t5_self_attention.pyi
│ │ │ │ │ ├── flux_transformer/
│ │ │ │ │ │ ├── ada_layer_norm_continuous.pyi
│ │ │ │ │ │ ├── ada_layer_norm_zero.pyi
│ │ │ │ │ │ ├── ada_layer_norm_zero_single.pyi
│ │ │ │ │ │ ├── common/
│ │ │ │ │ │ │ └── attention_utils.pyi
│ │ │ │ │ │ ├── embed_nd.pyi
│ │ │ │ │ │ ├── feed_forward.pyi
│ │ │ │ │ │ ├── guidance_embedder.pyi
│ │ │ │ │ │ ├── joint_attention.pyi
│ │ │ │ │ │ ├── joint_transformer_block.pyi
│ │ │ │ │ │ ├── single_block_attention.pyi
│ │ │ │ │ │ ├── single_transformer_block.pyi
│ │ │ │ │ │ ├── text_embedder.pyi
│ │ │ │ │ │ ├── time_text_embed.pyi
│ │ │ │ │ │ ├── timestep_embedder.pyi
│ │ │ │ │ │ └── transformer.pyi
│ │ │ │ │ ├── flux_vae/
│ │ │ │ │ │ ├── common/
│ │ │ │ │ │ │ ├── attention.pyi
│ │ │ │ │ │ │ ├── resnet_block_2d.pyi
│ │ │ │ │ │ │ └── unet_mid_block.pyi
│ │ │ │ │ │ ├── decoder/
│ │ │ │ │ │ │ ├── conv_in.pyi
│ │ │ │ │ │ │ ├── conv_norm_out.pyi
│ │ │ │ │ │ │ ├── conv_out.pyi
│ │ │ │ │ │ │ ├── decoder.pyi
│ │ │ │ │ │ │ ├── up_block_1_or_2.pyi
│ │ │ │ │ │ │ ├── up_block_3.pyi
│ │ │ │ │ │ │ ├── up_block_4.pyi
│ │ │ │ │ │ │ └── up_sampler.pyi
│ │ │ │ │ │ ├── encoder/
│ │ │ │ │ │ │ ├── conv_in.pyi
│ │ │ │ │ │ │ ├── conv_norm_out.pyi
│ │ │ │ │ │ │ ├── conv_out.pyi
│ │ │ │ │ │ │ ├── down_block_1.pyi
│ │ │ │ │ │ │ ├── down_block_2.pyi
│ │ │ │ │ │ │ ├── down_block_3.pyi
│ │ │ │ │ │ │ ├── down_block_4.pyi
│ │ │ │ │ │ │ ├── down_sampler.pyi
│ │ │ │ │ │ │ └── encoder.pyi
│ │ │ │ │ │ └── vae.pyi
│ │ │ │ │ ├── redux_encoder/
│ │ │ │ │ │ └── redux_encoder.pyi
│ │ │ │ │ └── siglip_vision_transformer/
│ │ │ │ │ ├── siglip_encoder.pyi
│ │ │ │ │ ├── siglip_encoder_layer.pyi
│ │ │ │ │ ├── siglip_mlp.pyi
│ │ │ │ │ ├── siglip_multi_head_attention_pooling_head.pyi
│ │ │ │ │ ├── siglip_sdpa_attention.pyi
│ │ │ │ │ ├── siglip_vision_embeddings.pyi
│ │ │ │ │ └── siglip_vision_transformer.pyi
│ │ │ │ ├── variants/
│ │ │ │ │ ├── __init__.pyi
│ │ │ │ │ ├── concept_attention/
│ │ │ │ │ │ ├── attention_data.pyi
│ │ │ │ │ │ ├── joint_attention_concept.pyi
│ │ │ │ │ │ ├── joint_transformer_block_concept.pyi
│ │ │ │ │ │ └── transformer_concept.pyi
│ │ │ │ │ ├── controlnet/
│ │ │ │ │ │ └── transformer_controlnet.pyi
│ │ │ │ │ ├── kontext/
│ │ │ │ │ │ ├── __init__.pyi
│ │ │ │ │ │ ├── flux_kontext.pyi
│ │ │ │ │ │ └── kontext_util.pyi
│ │ │ │ │ └── txt2img/
│ │ │ │ │ └── flux.pyi
│ │ │ │ └── weights/
│ │ │ │ ├── __init__.pyi
│ │ │ │ ├── flux_lora_mapping.pyi
│ │ │ │ ├── flux_weight_definition.pyi
│ │ │ │ └── flux_weight_mapping.pyi
│ │ │ ├── qwen/
│ │ │ │ ├── __init__.pyi
│ │ │ │ ├── cli/
│ │ │ │ │ └── __init__.pyi
│ │ │ │ ├── latent_creator/
│ │ │ │ │ ├── __init__.pyi
│ │ │ │ │ └── qwen_latent_creator.pyi
│ │ │ │ ├── model/
│ │ │ │ │ ├── __init__.pyi
│ │ │ │ │ ├── qwen_text_encoder/
│ │ │ │ │ │ ├── qwen_attention.pyi
│ │ │ │ │ │ ├── qwen_encoder.pyi
│ │ │ │ │ │ ├── qwen_encoder_layer.pyi
│ │ │ │ │ │ ├── qwen_mlp.pyi
│ │ │ │ │ │ ├── qwen_patch_merger.pyi
│ │ │ │ │ │ ├── qwen_prompt_encoder.pyi
│ │ │ │ │ │ ├── qwen_rms_norm.pyi
│ │ │ │ │ │ ├── qwen_rope.pyi
│ │ │ │ │ │ ├── qwen_text_encoder.pyi
│ │ │ │ │ │ ├── qwen_vision_attention.pyi
│ │ │ │ │ │ ├── qwen_vision_block.pyi
│ │ │ │ │ │ ├── qwen_vision_language_encoder.pyi
│ │ │ │ │ │ ├── qwen_vision_mlp.pyi
│ │ │ │ │ │ ├── qwen_vision_patch_embed.pyi
│ │ │ │ │ │ ├── qwen_vision_rotary_embedding.pyi
│ │ │ │ │ │ └── qwen_vision_transformer.pyi
│ │ │ │ │ ├── qwen_transformer/
│ │ │ │ │ │ ├── qwen_attention.pyi
│ │ │ │ │ │ ├── qwen_feed_forward.pyi
│ │ │ │ │ │ ├── qwen_rope.pyi
│ │ │ │ │ │ ├── qwen_time_text_embed.pyi
│ │ │ │ │ │ ├── qwen_timestep_embedding.pyi
│ │ │ │ │ │ ├── qwen_timesteps.pyi
│ │ │ │ │ │ ├── qwen_transformer.pyi
│ │ │ │ │ │ ├── qwen_transformer_block.pyi
│ │ │ │ │ │ └── qwen_transformer_rms_norm.pyi
│ │ │ │ │ └── qwen_vae/
│ │ │ │ │ ├── qwen_image_attention_block_3d.pyi
│ │ │ │ │ ├── qwen_image_causal_conv_3d.pyi
│ │ │ │ │ ├── qwen_image_decoder_3d.pyi
│ │ │ │ │ ├── qwen_image_down_block_3d.pyi
│ │ │ │ │ ├── qwen_image_encoder_3d.pyi
│ │ │ │ │ ├── qwen_image_mid_block_3d.pyi
│ │ │ │ │ ├── qwen_image_res_block_3d.pyi
│ │ │ │ │ ├── qwen_image_resample_3d.pyi
│ │ │ │ │ ├── qwen_image_rms_norm.pyi
│ │ │ │ │ ├── qwen_image_up_block_3d.pyi
│ │ │ │ │ └── qwen_vae.pyi
│ │ │ │ ├── qwen_initializer.pyi
│ │ │ │ ├── tokenizer/
│ │ │ │ │ ├── __init__.pyi
│ │ │ │ │ ├── qwen_image_processor.pyi
│ │ │ │ │ ├── qwen_vision_language_processor.pyi
│ │ │ │ │ └── qwen_vision_language_tokenizer.pyi
│ │ │ │ ├── variants/
│ │ │ │ │ ├── __init__.pyi
│ │ │ │ │ ├── edit/
│ │ │ │ │ │ ├── qwen_edit_util.pyi
│ │ │ │ │ │ └── qwen_image_edit.pyi
│ │ │ │ │ └── txt2img/
│ │ │ │ │ └── qwen_image.pyi
│ │ │ │ └── weights/
│ │ │ │ ├── __init__.pyi
│ │ │ │ ├── qwen_lora_mapping.pyi
│ │ │ │ ├── qwen_weight_definition.pyi
│ │ │ │ └── qwen_weight_mapping.pyi
│ │ │ ├── seedvr2/
│ │ │ │ └── weights/
│ │ │ │ ├── seedvr2_weight_definition.pyi
│ │ │ │ └── seedvr2_weight_mapping.pyi
│ │ │ └── z_image/
│ │ │ ├── latent_creator/
│ │ │ │ └── z_image_latent_creator.pyi
│ │ │ └── weights/
│ │ │ ├── z_image_weight_definition.pyi
│ │ │ └── z_image_weight_mapping.pyi
│ │ ├── release/
│ │ │ └── __init__.pyi
│ │ └── utils/
│ │ ├── __init__.pyi
│ │ ├── box_values.pyi
│ │ ├── exceptions.pyi
│ │ ├── generated_image.pyi
│ │ ├── image_util.pyi
│ │ ├── metadata_builder.pyi
│ │ └── version_util.pyi
│ ├── mlx/
│ │ ├── core/
│ │ │ ├── __init__.pyi
│ │ │ ├── cuda/
│ │ │ │ └── __init__.pyi
│ │ │ ├── distributed/
│ │ │ │ └── __init__.pyi
│ │ │ ├── metal/
│ │ │ │ └── __init__.pyi
│ │ │ └── random/
│ │ │ └── __init__.pyi
│ │ ├── nn/
│ │ │ ├── __init__.pyi
│ │ │ ├── init.pyi
│ │ │ ├── layers/
│ │ │ │ ├── __init__.pyi
│ │ │ │ ├── activations.pyi
│ │ │ │ ├── base.pyi
│ │ │ │ ├── containers.pyi
│ │ │ │ ├── convolution.pyi
│ │ │ │ ├── convolution_transpose.pyi
│ │ │ │ ├── distributed.pyi
│ │ │ │ ├── dropout.pyi
│ │ │ │ ├── embedding.pyi
│ │ │ │ ├── linear.pyi
│ │ │ │ ├── normalization.pyi
│ │ │ │ ├── pooling.pyi
│ │ │ │ ├── positional_encoding.pyi
│ │ │ │ ├── quantized.pyi
│ │ │ │ ├── recurrent.pyi
│ │ │ │ ├── transformer.pyi
│ │ │ │ └── upsample.pyi
│ │ │ ├── losses.pyi
│ │ │ └── utils.pyi
│ │ └── utils.pyi
│ └── mlx_lm/
│ ├── __init__.pyi
│ ├── convert.pyi
│ ├── generate.pyi
│ ├── models/
│ │ ├── __init__.pyi
│ │ ├── base.pyi
│ │ ├── bitlinear_layers.pyi
│ │ ├── cache.pyi
│ │ ├── deepseek_v3.pyi
│ │ ├── glm4_moe.pyi
│ │ ├── glm_moe_dsa.pyi
│ │ ├── nemotron_h.pyi
│ │ ├── qwen3_5.pyi
│ │ ├── qwen3_5_moe.pyi
│ │ ├── qwen3_next.pyi
│ │ ├── step3p5.pyi
│ │ └── switch_layers.pyi
│ ├── sample_utils.pyi
│ ├── tokenizer_utils.pyi
│ └── utils.pyi
├── .python-version
├── .swift-format
├── .vscode/
│ ├── extensions.json
│ └── settings.json
├── .zed/
│ └── settings.json
├── AGENTS.md
├── CONTRIBUTING.md
├── Cargo.toml
├── LICENSE
├── MISSED_THINGS.md
├── PLATFORMS.md
├── README.md
├── RULES.md
├── TODO.md
├── app/
│ └── EXO/
│ ├── EXO/
│ │ ├── Assets.xcassets/
│ │ │ ├── AccentColor.colorset/
│ │ │ │ └── Contents.json
│ │ │ ├── AppIcon.appiconset/
│ │ │ │ └── Contents.json
│ │ │ ├── Contents.json
│ │ │ └── menubar-icon.imageset/
│ │ │ └── Contents.json
│ │ ├── ContentView.swift
│ │ ├── EXO.entitlements
│ │ ├── EXOApp.swift
│ │ ├── ExoProcessController.swift
│ │ ├── Info.plist
│ │ ├── Models/
│ │ │ └── ClusterState.swift
│ │ ├── Preview Content/
│ │ │ └── Preview Assets.xcassets/
│ │ │ └── Contents.json
│ │ ├── Services/
│ │ │ ├── BugReportService.swift
│ │ │ ├── ClusterStateService.swift
│ │ │ ├── LocalNetworkChecker.swift
│ │ │ ├── NetworkSetupHelper.swift
│ │ │ ├── NetworkStatusService.swift
│ │ │ ├── ThunderboltBridgeDetector.swift
│ │ │ └── ThunderboltBridgeService.swift
│ │ ├── ViewModels/
│ │ │ ├── InstanceViewModel.swift
│ │ │ └── NodeViewModel.swift
│ │ ├── Views/
│ │ │ ├── FirstLaunchPopout.swift
│ │ │ ├── InstanceRowView.swift
│ │ │ ├── NodeDetailView.swift
│ │ │ ├── NodeRowView.swift
│ │ │ ├── SettingsView.swift
│ │ │ ├── SettingsWindowController.swift
│ │ │ └── TopologyMiniView.swift
│ │ └── main.swift
│ ├── EXO.xcodeproj/
│ │ ├── project.pbxproj
│ │ ├── project.xcworkspace/
│ │ │ ├── contents.xcworkspacedata
│ │ │ └── xcshareddata/
│ │ │ └── swiftpm/
│ │ │ └── Package.resolved
│ │ └── xcshareddata/
│ │ └── xcschemes/
│ │ └── EXO.xcscheme
│ ├── EXOTests/
│ │ └── EXOTests.swift
│ ├── EXOUITests/
│ │ ├── EXOUITests.swift
│ │ └── EXOUITestsLaunchTests.swift
│ └── uninstall-exo.sh
├── bench/
│ ├── bench.toml
│ ├── eval_configs/
│ │ └── models.toml
│ ├── eval_tool_calls.py
│ ├── exo_bench.py
│ ├── exo_eval.py
│ ├── harness.py
│ ├── parallel_requests.py
│ ├── pyproject.toml
│ ├── scenarios.toml
│ ├── single-m3-ultra.toml
│ ├── src/
│ │ └── exo_bench/
│ │ └── __init__.py
│ └── vendor/
│ ├── __init__.py
│ └── lcb_testing_util.py
├── dashboard/
│ ├── dashboard.nix
│ ├── package.json
│ ├── parts.nix
│ ├── src/
│ │ ├── app.css
│ │ ├── app.d.ts
│ │ ├── app.html
│ │ ├── lib/
│ │ │ ├── components/
│ │ │ │ ├── ChatAttachments.svelte
│ │ │ │ ├── ChatForm.svelte
│ │ │ │ ├── ChatMessages.svelte
│ │ │ │ ├── ChatModelSelector.svelte
│ │ │ │ ├── ChatSidebar.svelte
│ │ │ │ ├── ConnectionBanner.svelte
│ │ │ │ ├── DeviceIcon.svelte
│ │ │ │ ├── FamilyLogos.svelte
│ │ │ │ ├── FamilySidebar.svelte
│ │ │ │ ├── HeaderNav.svelte
│ │ │ │ ├── HuggingFaceResultItem.svelte
│ │ │ │ ├── ImageLightbox.svelte
│ │ │ │ ├── ImageParamsPanel.svelte
│ │ │ │ ├── MarkdownContent.svelte
│ │ │ │ ├── ModelCard.svelte
│ │ │ │ ├── ModelFilterPopover.svelte
│ │ │ │ ├── ModelPickerGroup.svelte
│ │ │ │ ├── ModelPickerModal.svelte
│ │ │ │ ├── PrefillProgressBar.svelte
│ │ │ │ ├── ToastContainer.svelte
│ │ │ │ ├── TokenHeatmap.svelte
│ │ │ │ ├── TopologyGraph.svelte
│ │ │ │ └── index.ts
│ │ │ ├── stores/
│ │ │ │ ├── app.svelte.ts
│ │ │ │ ├── favorites.svelte.ts
│ │ │ │ ├── recents.svelte.ts
│ │ │ │ └── toast.svelte.ts
│ │ │ ├── types/
│ │ │ │ └── files.ts
│ │ │ └── utils/
│ │ │ └── downloads.ts
│ │ └── routes/
│ │ ├── +layout.svelte
│ │ ├── +page.svelte
│ │ ├── downloads/
│ │ │ └── +page.svelte
│ │ └── traces/
│ │ ├── +page.svelte
│ │ └── [taskId]/
│ │ └── +page.svelte
│ ├── svelte.config.js
│ ├── tsconfig.json
│ └── vite.config.ts
├── docs/
│ ├── api.md
│ └── architecture.md
├── flake.nix
├── justfile
├── nix/
│ ├── apple-sdk/
│ │ └── metadata/
│ │ └── versions.json
│ ├── apple-sdk-overlay.nix
│ ├── darwin-build-fixes.patch
│ ├── metal-toolchain.nix
│ └── mlx.nix
├── packaging/
│ ├── dmg/
│ │ ├── create-dmg.sh
│ │ └── generate-background.py
│ └── pyinstaller/
│ └── exo.spec
├── pyproject.toml
├── python/
│ └── parts.nix
├── resources/
│ ├── image_model_cards/
│ │ ├── exolabs--FLUX.1-Kontext-dev-4bit.toml
│ │ ├── exolabs--FLUX.1-Kontext-dev-8bit.toml
│ │ ├── exolabs--FLUX.1-Kontext-dev.toml
│ │ ├── exolabs--FLUX.1-Krea-dev-4bit.toml
│ │ ├── exolabs--FLUX.1-Krea-dev-8bit.toml
│ │ ├── exolabs--FLUX.1-Krea-dev.toml
│ │ ├── exolabs--FLUX.1-dev-4bit.toml
│ │ ├── exolabs--FLUX.1-dev-8bit.toml
│ │ ├── exolabs--FLUX.1-dev.toml
│ │ ├── exolabs--FLUX.1-schnell-4bit.toml
│ │ ├── exolabs--FLUX.1-schnell-8bit.toml
│ │ ├── exolabs--FLUX.1-schnell.toml
│ │ ├── exolabs--Qwen-Image-4bit.toml
│ │ ├── exolabs--Qwen-Image-8bit.toml
│ │ ├── exolabs--Qwen-Image-Edit-2509-4bit.toml
│ │ ├── exolabs--Qwen-Image-Edit-2509-8bit.toml
│ │ ├── exolabs--Qwen-Image-Edit-2509.toml
│ │ └── exolabs--Qwen-Image.toml
│ └── inference_model_cards/
│ ├── mlx-community--DeepSeek-V3.1-4bit.toml
│ ├── mlx-community--DeepSeek-V3.1-8bit.toml
│ ├── mlx-community--GLM-4.5-Air-8bit.toml
│ ├── mlx-community--GLM-4.5-Air-bf16.toml
│ ├── mlx-community--GLM-4.7-4bit.toml
│ ├── mlx-community--GLM-4.7-6bit.toml
│ ├── mlx-community--GLM-4.7-8bit-gs32.toml
│ ├── mlx-community--GLM-4.7-Flash-4bit.toml
│ ├── mlx-community--GLM-4.7-Flash-5bit.toml
│ ├── mlx-community--GLM-4.7-Flash-6bit.toml
│ ├── mlx-community--GLM-4.7-Flash-8bit.toml
│ ├── mlx-community--GLM-5-8bit.toml
│ ├── mlx-community--GLM-5-MXFP4-Q8.toml
│ ├── mlx-community--GLM-5-bf16.toml
│ ├── mlx-community--Kimi-K2-Instruct-4bit.toml
│ ├── mlx-community--Kimi-K2-Thinking.toml
│ ├── mlx-community--Kimi-K2.5.toml
│ ├── mlx-community--Llama-3.1-Nemotron-70B-Instruct-HF-4bit.toml
│ ├── mlx-community--Llama-3.1-Nemotron-70B-Instruct-HF-8bit.toml
│ ├── mlx-community--Llama-3.1-Nemotron-70B-Instruct-HF-bf16.toml
│ ├── mlx-community--Llama-3.1-Nemotron-Nano-4B-v1.1-4bit.toml
│ ├── mlx-community--Llama-3.1-Nemotron-Nano-4B-v1.1-8bit.toml
│ ├── mlx-community--Llama-3.1-Nemotron-Nano-4B-v1.1-bf16.toml
│ ├── mlx-community--Llama-3.2-1B-Instruct-4bit.toml
│ ├── mlx-community--Llama-3.2-3B-Instruct-4bit.toml
│ ├── mlx-community--Llama-3.2-3B-Instruct-8bit.toml
│ ├── mlx-community--Llama-3.3-70B-Instruct-4bit.toml
│ ├── mlx-community--Llama-3.3-70B-Instruct-8bit.toml
│ ├── mlx-community--Meta-Llama-3.1-70B-Instruct-4bit.toml
│ ├── mlx-community--Meta-Llama-3.1-8B-Instruct-4bit.toml
│ ├── mlx-community--Meta-Llama-3.1-8B-Instruct-8bit.toml
│ ├── mlx-community--Meta-Llama-3.1-8B-Instruct-bf16.toml
│ ├── mlx-community--MiniMax-M2.1-3bit.toml
│ ├── mlx-community--MiniMax-M2.1-8bit.toml
│ ├── mlx-community--MiniMax-M2.5-4bit.toml
│ ├── mlx-community--MiniMax-M2.5-6bit.toml
│ ├── mlx-community--MiniMax-M2.5-8bit.toml
│ ├── mlx-community--NVIDIA-Nemotron-3-Nano-30B-A3B-MLX-4Bit.toml
│ ├── mlx-community--NVIDIA-Nemotron-3-Nano-30B-A3B-MLX-5Bit.toml
│ ├── mlx-community--NVIDIA-Nemotron-3-Nano-30B-A3B-MLX-6Bit.toml
│ ├── mlx-community--NVIDIA-Nemotron-3-Nano-30B-A3B-MLX-8Bit.toml
│ ├── mlx-community--NVIDIA-Nemotron-3-Nano-30B-A3B-MLX-BF16.toml
│ ├── mlx-community--NVIDIA-Nemotron-3-Nano-30B-A3B-MLX-MXFP4.toml
│ ├── mlx-community--NVIDIA-Nemotron-3-Nano-30B-A3B-NVFP4.toml
│ ├── mlx-community--NVIDIA-Nemotron-Nano-9B-v2-4bits.toml
│ ├── mlx-community--NVIDIA-Nemotron-Nano-9B-v2-6bit.toml
│ ├── mlx-community--Qwen3-0.6B-4bit.toml
│ ├── mlx-community--Qwen3-0.6B-8bit.toml
│ ├── mlx-community--Qwen3-235B-A22B-Instruct-2507-4bit.toml
│ ├── mlx-community--Qwen3-235B-A22B-Instruct-2507-8bit.toml
│ ├── mlx-community--Qwen3-30B-A3B-4bit.toml
│ ├── mlx-community--Qwen3-30B-A3B-8bit.toml
│ ├── mlx-community--Qwen3-Coder-480B-A35B-Instruct-4bit.toml
│ ├── mlx-community--Qwen3-Coder-480B-A35B-Instruct-8bit.toml
│ ├── mlx-community--Qwen3-Coder-Next-4bit.toml
│ ├── mlx-community--Qwen3-Coder-Next-5bit.toml
│ ├── mlx-community--Qwen3-Coder-Next-6bit.toml
│ ├── mlx-community--Qwen3-Coder-Next-8bit.toml
│ ├── mlx-community--Qwen3-Coder-Next-bf16.toml
│ ├── mlx-community--Qwen3-Next-80B-A3B-Instruct-4bit.toml
│ ├── mlx-community--Qwen3-Next-80B-A3B-Instruct-8bit.toml
│ ├── mlx-community--Qwen3-Next-80B-A3B-Thinking-4bit.toml
│ ├── mlx-community--Qwen3-Next-80B-A3B-Thinking-8bit.toml
│ ├── mlx-community--Qwen3.5-122B-A10B-4bit.toml
│ ├── mlx-community--Qwen3.5-122B-A10B-6bit.toml
│ ├── mlx-community--Qwen3.5-122B-A10B-8bit.toml
│ ├── mlx-community--Qwen3.5-122B-A10B-bf16.toml
│ ├── mlx-community--Qwen3.5-27B-4bit.toml
│ ├── mlx-community--Qwen3.5-27B-8bit.toml
│ ├── mlx-community--Qwen3.5-2B-MLX-8bit.toml
│ ├── mlx-community--Qwen3.5-35B-A3B-4bit.toml
│ ├── mlx-community--Qwen3.5-35B-A3B-8bit.toml
│ ├── mlx-community--Qwen3.5-397B-A17B-4bit.toml
│ ├── mlx-community--Qwen3.5-397B-A17B-6bit.toml
│ ├── mlx-community--Qwen3.5-397B-A17B-8bit.toml
│ ├── mlx-community--Qwen3.5-9B-4bit.toml
│ ├── mlx-community--Qwen3.5-9B-8bit.toml
│ ├── mlx-community--Step-3.5-Flash-4bit.toml
│ ├── mlx-community--Step-3.5-Flash-6bit.toml
│ ├── mlx-community--Step-3.5-Flash-8Bit.toml
│ ├── mlx-community--gpt-oss-120b-MXFP4-Q8.toml
│ ├── mlx-community--gpt-oss-20b-MXFP4-Q8.toml
│ └── mlx-community--llama-3.3-70b-instruct-fp16.toml
├── rust/
│ ├── exo_pyo3_bindings/
│ │ ├── Cargo.toml
│ │ ├── README.md
│ │ ├── exo_pyo3_bindings.pyi
│ │ ├── pyproject.toml
│ │ ├── src/
│ │ │ ├── allow_threading.rs
│ │ │ ├── bin/
│ │ │ │ └── stub_gen.rs
│ │ │ ├── ident.rs
│ │ │ ├── lib.rs
│ │ │ └── networking.rs
│ │ └── tests/
│ │ ├── dummy.rs
│ │ └── test_python.py
│ ├── networking/
│ │ ├── Cargo.toml
│ │ ├── examples/
│ │ │ └── chatroom.rs
│ │ ├── src/
│ │ │ ├── RESEARCH_NOTES.txt
│ │ │ ├── discovery.rs
│ │ │ ├── lib.rs
│ │ │ └── swarm.rs
│ │ └── tests/
│ │ └── dummy.rs
│ ├── parts.nix
│ └── util/
│ ├── Cargo.toml
│ └── src/
│ ├── lib.rs
│ └── wakerdeque.rs
├── scripts/
│ └── fetch_kv_heads.py
├── src/
│ └── exo/
│ ├── __init__.py
│ ├── __main__.py
│ ├── api/
│ │ ├── __init__.py
│ │ ├── adapters/
│ │ │ ├── __init__.py
│ │ │ ├── chat_completions.py
│ │ │ ├── claude.py
│ │ │ ├── ollama.py
│ │ │ └── responses.py
│ │ ├── main.py
│ │ ├── tests/
│ │ │ ├── test_api_error_handling.py
│ │ │ ├── test_cancel_command.py
│ │ │ ├── test_claude_api.py
│ │ │ ├── test_claude_tool_use.py
│ │ │ └── test_openai_responses_api.py
│ │ └── types/
│ │ ├── __init__.py
│ │ ├── api.py
│ │ ├── claude_api.py
│ │ ├── ollama_api.py
│ │ └── openai_responses.py
│ ├── download/
│ │ ├── coordinator.py
│ │ ├── download_utils.py
│ │ ├── huggingface_utils.py
│ │ ├── impl_shard_downloader.py
│ │ ├── shard_downloader.py
│ │ └── tests/
│ │ ├── __init__.py
│ │ ├── test_download_verification.py
│ │ ├── test_offline_mode.py
│ │ └── test_re_download.py
│ ├── main.py
│ ├── master/
│ │ ├── __init__.py
│ │ ├── image_store.py
│ │ ├── main.py
│ │ ├── placement.py
│ │ ├── placement_utils.py
│ │ └── tests/
│ │ ├── conftest.py
│ │ ├── test_master.py
│ │ ├── test_placement.py
│ │ ├── test_placement_utils.py
│ │ └── test_topology.py
│ ├── routing/
│ │ ├── __init__.py
│ │ ├── connection_message.py
│ │ ├── event_router.py
│ │ ├── router.py
│ │ ├── tests/
│ │ │ └── test_event_buffer.py
│ │ └── topics.py
│ ├── shared/
│ │ ├── __init__.py
│ │ ├── apply.py
│ │ ├── constants.py
│ │ ├── election.py
│ │ ├── logging.py
│ │ ├── models/
│ │ │ └── model_cards.py
│ │ ├── tests/
│ │ │ ├── __init__.py
│ │ │ ├── conftest.py
│ │ │ ├── test_apply/
│ │ │ │ ├── test_apply_node_download.py
│ │ │ │ └── test_apply_runner_deleted.py
│ │ │ ├── test_election.py
│ │ │ ├── test_node_id_persistence.py
│ │ │ ├── test_resolve_reasoning_params.py
│ │ │ ├── test_state_serialization.py
│ │ │ └── test_xdg_paths.py
│ │ ├── topology.py
│ │ ├── tracing.py
│ │ └── types/
│ │ ├── __init__.py
│ │ ├── chunks.py
│ │ ├── commands.py
│ │ ├── common.py
│ │ ├── events.py
│ │ ├── memory.py
│ │ ├── mlx.py
│ │ ├── multiaddr.py
│ │ ├── profiling.py
│ │ ├── state.py
│ │ ├── tasks.py
│ │ ├── text_generation.py
│ │ ├── thunderbolt.py
│ │ ├── topology.py
│ │ └── worker/
│ │ ├── downloads.py
│ │ ├── instances.py
│ │ ├── runner_response.py
│ │ ├── runners.py
│ │ └── shards.py
│ ├── utils/
│ │ ├── __init__.py
│ │ ├── banner.py
│ │ ├── channels.py
│ │ ├── dashboard_path.py
│ │ ├── disk_event_log.py
│ │ ├── event_buffer.py
│ │ ├── fs.py
│ │ ├── info_gatherer/
│ │ │ ├── __init__.py
│ │ │ ├── info_gatherer.py
│ │ │ ├── macmon.py
│ │ │ ├── net_profile.py
│ │ │ ├── system_info.py
│ │ │ └── tests/
│ │ │ └── test_tb_parsing.py
│ │ ├── keyed_backoff.py
│ │ ├── phantom.py
│ │ ├── power_sampler.py
│ │ ├── pydantic_ext.py
│ │ ├── reactive.py
│ │ ├── task_group.py
│ │ └── tests/
│ │ ├── test_event_log.py
│ │ ├── test_mp_channel.py
│ │ ├── test_power_sampler.py
│ │ └── test_tagged.py
│ └── worker/
│ ├── __init__.py
│ ├── engines/
│ │ ├── __init__.py
│ │ ├── image/
│ │ │ ├── __init__.py
│ │ │ ├── config.py
│ │ │ ├── distributed_model.py
│ │ │ ├── generate.py
│ │ │ ├── models/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── base.py
│ │ │ │ ├── flux/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── adapter.py
│ │ │ │ │ ├── config.py
│ │ │ │ │ ├── kontext_adapter.py
│ │ │ │ │ └── wrappers.py
│ │ │ │ └── qwen/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── adapter.py
│ │ │ │ ├── config.py
│ │ │ │ ├── edit_adapter.py
│ │ │ │ └── wrappers.py
│ │ │ └── pipeline/
│ │ │ ├── __init__.py
│ │ │ ├── block_wrapper.py
│ │ │ ├── kv_cache.py
│ │ │ └── runner.py
│ │ └── mlx/
│ │ ├── __init__.py
│ │ ├── auto_parallel.py
│ │ ├── cache.py
│ │ ├── constants.py
│ │ ├── dsml_encoding.py
│ │ ├── generator/
│ │ │ ├── __init__.py
│ │ │ ├── batch_generate.py
│ │ │ └── generate.py
│ │ ├── tests/
│ │ │ └── test_batch_generate.py
│ │ └── utils_mlx.py
│ ├── main.py
│ ├── plan.py
│ ├── runner/
│ │ ├── __init__.py
│ │ ├── bootstrap.py
│ │ ├── image_models/
│ │ │ ├── __init__.py
│ │ │ └── runner.py
│ │ ├── llm_inference/
│ │ │ ├── __init__.py
│ │ │ ├── batch_generator.py
│ │ │ ├── model_output_parsers.py
│ │ │ ├── runner.py
│ │ │ └── tool_parsers.py
│ │ └── runner_supervisor.py
│ └── tests/
│ ├── TODO.tests
│ ├── __init__.py
│ ├── constants.py
│ └── unittests/
│ ├── __init__.py
│ ├── conftest.py
│ ├── test_download/
│ │ └── __init__.py
│ ├── test_mlx/
│ │ ├── __init__.py
│ │ ├── conftest.py
│ │ ├── test_auto_parallel.py
│ │ ├── test_batch_vs_generate.py
│ │ ├── test_distributed_fix.py
│ │ ├── test_kv_prefix_cache.py
│ │ ├── test_pipeline_prefill_callbacks.py
│ │ ├── test_prefix_cache_architectures.py
│ │ └── test_tokenizers.py
│ ├── test_plan/
│ │ ├── __init__.py
│ │ ├── test_download_and_loading.py
│ │ ├── test_runner_lifecycle.py
│ │ ├── test_task_forwarding.py
│ │ └── test_warmup.py
│ └── test_runner/
│ ├── __init__.py
│ ├── test_dsml_e2e.py
│ ├── test_event_ordering.py
│ ├── test_glm_tool_parsing.py
│ ├── test_parse_gpt_oss.py
│ ├── test_parse_tool_calls.py
│ └── test_runner_supervisor.py
├── tests/
│ ├── auto_bench.sh
│ ├── eval_tool_calls.sh
│ ├── get_all_models_on_cluster.py
│ ├── headless_runner.py
│ ├── run_exo_on.sh
│ └── start_distributed_test.py
└── tmp/
├── config_examples/
│ ├── claude_code.sh
│ └── opencode.json
├── gen_card.py
├── prompt.txt
├── quantize_and_upload.py
├── run_llm.py
├── run_llm.sh
├── set_rdma_network_config.sh
└── test_trust_remote_code_attack.sh
================================================
FILE CONTENTS
================================================
================================================
FILE: .clauderules
================================================
# Claude Code Rules - Follow Every Rule Exactly
You must prioritize straightforward code semantics, well-named types, clear function signatures, and robust, carefully-chosen abstractions. Think about how your decisions might impact these aspects of code quality before proposing any changes.
You have access to all modern Python features from Python 3.13, 3.12, 3.11...
**When you're done making changes, remove any redundant comments; remaining comments should only apply to complex code segments, adding relevant context.**
## 1. Code Discipline
* Eliminate superfluous `try`/`catch` and `if` branches through strict typing and static analysis.
* Use pure functions unless you must mutate fixed state—then wrap that state in a class.
* Every function is **referentially transparent**: same inputs ⇒ same outputs, no hidden state, no unintended I/O.
* Put side-effects in injectable "effect handlers"; keep core logic pure.
## 2. Naming
* Choose descriptive, non-abbreviated names—no 3-letter acronyms or non-standard contractions.
* Anyone reading a function's type signature alone should grasp its purpose without extra context.
## 3. Typing
* Maintain **strict, exhaustive** typing; never bypass the type-checker.
* Default to `Literal[...]` when an enum-like set is needed.
* Prefer built-in types; when two values share structure but differ in meaning, enforce separation:
* Use `typing.NewType` for primitives (zero runtime cost).
* For serializable objects, add a `type: str` field that states the object's identity.
## 4. Pydantic
* Read, respect, and rely on Pydantic documentation.
* Centralize a common `ConfigDict` with `frozen=True` and `strict=True` (or stricter) and reuse it everywhere.
* For hierarchies of `BaseModel` variants, declare a discriminated union with `typing.Annotated[Base, Field(discriminator='variant')]`; publish a single `TypeAdapter[Base]` so all variants share one strict validator.
## 5. IDs & UUIDs
* Subclass Pydantic's `UUID4` for custom ID types.
* Generate fresh IDs with `uuid.uuid4()`.
* Create idempotency keys by hashing *persisted* state plus a **function-specific salt** to avoid collisions after crashes.
## 6. Error Handling
* Catch an exception **only** where you can handle or transform it meaningfully.
* State in the docstring **where** each exception is expected to be handled and **why**.
## 7. Dependencies
* Introduce new external dependencies only after approval.
* Request only libraries common in production environments.
## 8. Use of `@final` & Freezing
* Mark classes, methods, and variables as `@final` or otherwise immutable wherever applicable.
## 9. Repository Workflow
If you spot a rule violation within code that you've not been asked to work on directly, inform the user rather than patching it ad-hoc.
---
### One-Sentence Summary
Write strictly-typed, pure, self-describing Python that uses Pydantic, well-scoped side-effects, immutable state, approved dependencies, and explicit error handling.
================================================
FILE: .cursorrules
================================================
# follow **every** rule exactly; report any violation instead of silently fixing it.
You must prioritize straightforward code semantics, well-named types, clear function signatures, and robust, carefully-chosen abstractions. Think about how your decisions might impact these aspects of code quality before proposing any changes.
You can use the advanced features of `typing`. You have access to all of the new features from Python 3.13, 3.12, 3.11...
**When you're done making your changes, remove any redundant comments that you may have left; the comments that remain should only apply to complex segments of code, adding relevant context.**
## 1. Code Discipline
* Eliminate superfluous `try` / `catch` and `if` branches through strict typing and static analysis.
* Use pure functions unless you must mutate fixed state—then wrap that state in a class.
* Every function is **referentially transparent**: same inputs ⇒ same outputs, no hidden state, no unintended I/O.
* Put side-effects in injectable “effect handlers”; keep core logic pure.
## 2. Naming
* Choose descriptive, non-abbreviated names—no 3-letter acronyms or non-standard contractions.
* Anyone reading a function’s type signature alone should grasp its purpose without extra context.
## 3. Typing
* Maintain **strict, exhaustive** typing; never bypass the type-checker.
* Default to `Literal[...]` when an enum-like set is needed.
* Prefer built-in types; when two values share structure but differ in meaning, enforce separation:
* Use `typing.NewType` for primitives (zero runtime cost).
* For serialisable objects, add a `type: str` field that states the object’s identity.
## 4. Pydantic
* Read, respect, and rely on Pydantic docs.
* Centralise a common `ConfigDict` with `frozen=True` and `strict=True` (or stricter) and reuse it everywhere.
* For hierarchies of `BaseModel` variants, declare a discriminated union with `typing.Annotated[Base, Field(discriminator='variant')]`; publish a single `TypeAdapter[Base]` so all variants share one strict validator.
## 5. IDs & UUIDs
* Subclass Pydantic’s `UUID4` for custom ID types.
* Generate fresh IDs with `uuid.uuid4()`.
* Create idempotency keys by hashing *persisted* state plus a **function-specific salt** to avoid collisions after crashes.
## 6. Error Handling
* Catch an exception **only** where you can handle or transform it meaningfully.
* State in the docstring **where** each exception is expected to be handled and **why**.
## 7. Dependencies
* Introduce new external dependencies only after approval.
* Request only libraries common in production environments.
## 8. Use of `@final` & Freezing
* Mark classes, methods, and variables as `@final` or otherwise immutable wherever applicable.
## 9. Repository Workflow
If you spot a rule violation within code that you've not been asked to work on directly, inform the user rather than patching it ad-hoc.
---
### One-Sentence Summary
Write strictly-typed, pure, self-describing Python that uses Pydantic, well-scoped side-effects, immutable state, approved dependencies, and explicit error handling
================================================
FILE: .envrc
================================================
use flake
================================================
FILE: .githooks/post-checkout
================================================
#!/bin/sh
command -v git-lfs >/dev/null 2>&1 || { printf >&2 "\n%s\n\n" "This repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting the 'post-checkout' file in the hooks directory (set by 'core.hookspath'; usually '.git/hooks')."; exit 2; }
git lfs post-checkout "$@"
================================================
FILE: .githooks/post-commit
================================================
#!/bin/sh
command -v git-lfs >/dev/null 2>&1 || { printf >&2 "\n%s\n\n" "This repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting the 'post-commit' file in the hooks directory (set by 'core.hookspath'; usually '.git/hooks')."; exit 2; }
git lfs post-commit "$@"
================================================
FILE: .githooks/post-merge
================================================
#!/bin/sh
command -v git-lfs >/dev/null 2>&1 || { printf >&2 "\n%s\n\n" "This repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting the 'post-merge' file in the hooks directory (set by 'core.hookspath'; usually '.git/hooks')."; exit 2; }
git lfs post-merge "$@"
================================================
FILE: .githooks/pre-push
================================================
#!/bin/sh
command -v git-lfs >/dev/null 2>&1 || { printf >&2 "\n%s\n\n" "This repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting the 'pre-push' file in the hooks directory (set by 'core.hookspath'; usually '.git/hooks')."; exit 2; }
git lfs pre-push "$@"
================================================
FILE: .github/CODEOWNERS
================================================
* @ToxicPine
* @AlexCheema
* @GeluVrabie
================================================
FILE: .github/ISSUE_TEMPLATE/bug_report.md
================================================
---
name: Bug Report
about: Create a report to help us improve
title: '[BUG] '
labels: bug
assignees: ''
---
## Describe the bug
A clear and concise description of what the bug is.
## To Reproduce
Steps to reproduce the behavior:
1.
2.
3.
## Expected behavior
A clear and concise description of what you expected to happen.
## Actual behavior
A clear and concise description of what actually happened.
## Environment
- macOS Version:
- EXO Version:
- Hardware:
- Device 1: (e.g., MacBook Pro M1 Max, 32GB RAM)
- Device 2: (e.g., Mac Mini M2, 16GB RAM)
- Additional devices:
- Interconnection:
- (e.g., Thunderbolt 4 cable between Device 1 and 2)
- (e.g., WiFi 6 for Device 3)
- (e.g., 10GbE Ethernet between all devices)
## Additional context
Add any other context about the problem here.
================================================
FILE: .github/ISSUE_TEMPLATE/feature_request.md
================================================
---
name: Feature Request
about: Suggest an idea for this project
title: ''
labels: enhancement
assignees: ''
---
<!-- Please use a clear, descriptive title above -->
Describe what you'd like to see added to EXO.
================================================
FILE: .github/actions/conditional-commit/action.yml
================================================
name: Commit if changed
description: "Create a commit when the working tree is dirty"
inputs:
message:
description: "Commit message"
required: true
runs:
using: composite
steps:
- name: Commit changed files
shell: bash
run: |
git diff --quiet && exit 0
git commit -am "${{ inputs.message }}"
================================================
FILE: .github/actions/format/action.yml
================================================
name: Format Code
description: "Run code formatter"
runs:
using: "composite"
steps:
- name: Format code
run: nix --extra-experimental-features nix-command --extra-experimental-features flakes develop -c just fmt
shell: bash
================================================
FILE: .github/actions/lint/action.yml
================================================
name: Lint Code
description: "Run code linter"
runs:
using: "composite"
steps:
- name: Lint code
run: nix --extra-experimental-features nix-command --extra-experimental-features flakes develop -c just lint
shell: bash
================================================
FILE: .github/actions/lint-check/action.yml
================================================
name: Lint Check
description: "Check for lint errors"
runs:
using: "composite"
steps:
- name: Lint check
run: nix --extra-experimental-features nix-command --extra-experimental-features flakes develop -c just lint-check
shell: bash
================================================
FILE: .github/actions/regenerate-protobufs/action.yml
================================================
name: Regenerate Protobufs
description: "Regenerate protobuf files"
runs:
using: "composite"
steps:
- name: Regenerate protobufs
run: nix --extra-experimental-features nix-command --extra-experimental-features flakes develop -c just regenerate-protobufs
shell: bash
================================================
FILE: .github/actions/setup-python-uv/action.yml
================================================
name: Setup Python & uv
description: "Regenerate Python environment from uv.lock"
runs:
using: "composite"
steps:
- name: Install uv
uses: astral-sh/setup-uv@v6
with:
enable-cache: true
cache-dependency-glob: uv.lock
- name: Install Python
run: uv python install
shell: bash
- name: Sync
run: uv sync --locked --all-extras --dev
shell: bash
================================================
FILE: .github/actions/unit-test/action.yml
================================================
name: Unit Test
description: "Run unit tests"
runs:
using: "composite"
steps:
- name: Run unit tests
run: |
nix --extra-experimental-features nix-command --extra-experimental-features flakes develop -c just sync-clean
nix --extra-experimental-features nix-command --extra-experimental-features flakes develop -c just test-fast
shell: bash
================================================
FILE: .github/actions/verify-clean/action.yml
================================================
name: Verify Clean Working Tree
description: "Fail the job if the previous step left the working tree dirty"
inputs:
step:
description: "The name of the step that just executed"
required: true
runs:
using: composite
steps:
- name: Check git diff
shell: bash
run: |
if ! git diff --quiet; then
echo "Error: ${{ inputs.step }} left working tree dirty." >&2
git --no-pager diff >&2
exit 1
fi
================================================
FILE: .github/pull_request_template.md
================================================
## Motivation
<!-- Why is this change needed? What problem does it solve? -->
<!-- If it fixes an open issue, please link to the issue here -->
## Changes
<!-- Describe what you changed in detail -->
## Why It Works
<!-- Explain why your approach solves the problem -->
## Test Plan
### Manual Testing
<!-- Hardware: (e.g., MacBook Pro M1 Max 32GB, Mac Mini M2 16GB, connected via Thunderbolt 4) -->
<!-- What you did: -->
<!-- - -->
### Automated Testing
<!-- Describe changes to automated tests, or how existing tests cover this change -->
<!-- - -->
================================================
FILE: .github/workflows/build-app.yml
================================================
name: Build EXO macOS DMG
# Release workflow:
# 1. Create a draft GitHub Release with the tag name (e.g. v1.0.0) and write release notes in markdown
# 2. Push the tag: git tag v1.0.0 && git push origin v1.0.0
# 3. This workflow builds, signs, and notarizes the DMG
# 4. Release notes are embedded in appcast.xml for Sparkle (rendered as markdown)
# 5. DMG and appcast.xml are uploaded to S3
# 6. The draft GitHub Release is published with the DMG attached
#
# For alpha releases (e.g. v1.0.0-alpha.1): draft release and notes are optional.
# If no draft exists, a release is auto-created with generated notes.
on:
workflow_dispatch:
push:
tags:
- "v*"
branches:
- "test-app"
jobs:
build-macos-app:
runs-on: "macos-26"
permissions:
contents: write
env:
SPARKLE_VERSION: 2.9.0-beta.1
SPARKLE_DOWNLOAD_PREFIX: ${{ secrets.SPARKLE_DOWNLOAD_PREFIX }}
SPARKLE_FEED_URL: ${{ secrets.SPARKLE_FEED_URL }}
SPARKLE_ED25519_PUBLIC: ${{ secrets.SPARKLE_ED25519_PUBLIC }}
SPARKLE_ED25519_PRIVATE: ${{ secrets.SPARKLE_ED25519_PRIVATE }}
SPARKLE_S3_BUCKET: ${{ secrets.SPARKLE_S3_BUCKET }}
SPARKLE_S3_PREFIX: ${{ secrets.SPARKLE_S3_PREFIX }}
EXO_BUG_REPORT_PRESIGNED_URL_ENDPOINT: ${{ secrets.EXO_BUG_REPORT_PRESIGNED_URL_ENDPOINT }}
AWS_REGION: ${{ secrets.AWS_REGION }}
EXO_BUILD_NUMBER: ${{ github.run_number }}
EXO_LIBP2P_NAMESPACE: ${{ github.ref_name }}
steps:
# ============================================================
# Checkout and tag validation
# ============================================================
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Derive release version from tag
run: |
if [[ "$GITHUB_REF_NAME" == "test-app" || "${{ github.event_name }}" == "workflow_dispatch" ]]; then
VERSION="0.0.0-alpha.0"
echo "IS_ALPHA=true" >> $GITHUB_ENV
else
VERSION="${GITHUB_REF_NAME#v}"
if [[ "$VERSION" == *-alpha* ]]; then
echo "IS_ALPHA=true" >> $GITHUB_ENV
else
echo "IS_ALPHA=false" >> $GITHUB_ENV
fi
fi
echo "RELEASE_VERSION=$VERSION" >> $GITHUB_ENV
- name: Compute build version from semver
run: |
VERSION="$RELEASE_VERSION"
# Extract major.minor.patch (strip prerelease suffix)
BASE_VERSION="${VERSION%%-*}"
MAJOR=$(echo "$BASE_VERSION" | cut -d. -f1)
MINOR=$(echo "$BASE_VERSION" | cut -d. -f2)
PATCH=$(echo "$BASE_VERSION" | cut -d. -f3)
# Extract prerelease number (e.g., "alpha.2" -> 2, or 999 for releases)
if [[ "$VERSION" == *-* ]]; then
PRERELEASE_PART="${VERSION#*-}"
PRERELEASE_NUM="${PRERELEASE_PART##*.}"
# Default to 0 if not a number
if ! [[ "$PRERELEASE_NUM" =~ ^[0-9]+$ ]]; then
PRERELEASE_NUM=0
fi
else
PRERELEASE_NUM=999
fi
# Compute: PRERELEASE + (1000 * PATCH) + (1_000_000 * MINOR) + (1_000_000_000 * MAJOR)
BUILD_VERSION=$((PRERELEASE_NUM + 1000 * PATCH + 1000000 * MINOR + 1000000000 * MAJOR))
echo "EXO_BUILD_VERSION=$BUILD_VERSION" >> $GITHUB_ENV
echo "Computed build version: $BUILD_VERSION from $VERSION"
- name: Ensure tag commit is on main
if: github.ref_type == 'tag'
run: |
git fetch origin main
# Alpha tags can be on any branch, production tags must be on main
if [[ "$IS_ALPHA" == "true" ]]; then
echo "Alpha tag detected, skipping main branch check"
elif ! git merge-base --is-ancestor origin/main HEAD; then
echo "Production tag must point to a commit on main"
exit 1
fi
- name: Fetch and validate release notes
if: github.ref_type == 'tag'
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
# Find draft release by name using gh release list (more reliable with default token)
echo "Looking for draft release named '$GITHUB_REF_NAME'..."
DRAFT_EXISTS=$(gh release list --json name,isDraft --jq ".[] | select(.isDraft == true) | select(.name == \"$GITHUB_REF_NAME\") | .name" 2>/dev/null || echo "")
if [[ -z "$DRAFT_EXISTS" ]]; then
if [[ "$IS_ALPHA" == "true" ]]; then
echo "No draft release found for alpha tag $GITHUB_REF_NAME (optional for alphas)"
echo "HAS_RELEASE_NOTES=false" >> $GITHUB_ENV
exit 0
fi
echo "ERROR: No draft release found for tag $GITHUB_REF_NAME"
echo "Please create a draft release with release notes before pushing the tag."
exit 1
fi
# Fetch full release details via API to get body and ID
echo "Found draft release, fetching details..."
RELEASE_JSON=$(gh api repos/${{ github.repository }}/releases --jq ".[] | select(.draft == true) | select(.name == \"$GITHUB_REF_NAME\")" 2>/dev/null || echo "")
# Extract release notes
NOTES=$(echo "$RELEASE_JSON" | jq -r '.body // ""')
if [[ -z "$NOTES" || "$NOTES" == "null" ]]; then
if [[ "$IS_ALPHA" == "true" ]]; then
echo "Draft release has no notes (optional for alphas)"
echo "HAS_RELEASE_NOTES=false" >> $GITHUB_ENV
exit 0
fi
echo "ERROR: Draft release exists but has no release notes"
echo "Please add release notes to the draft release before pushing the tag."
exit 1
fi
# Save release ID for later publishing
RELEASE_ID=$(echo "$RELEASE_JSON" | jq -r '.id')
echo "DRAFT_RELEASE_ID=$RELEASE_ID" >> $GITHUB_ENV
echo "HAS_RELEASE_NOTES=true" >> $GITHUB_ENV
echo "Found draft release (ID: $RELEASE_ID), saving release notes..."
echo "$NOTES" > /tmp/release_notes.md
echo "RELEASE_NOTES_FILE=/tmp/release_notes.md" >> $GITHUB_ENV
# ============================================================
# Install dependencies
# ============================================================
- name: Select Xcode 26.2
run: |
sudo xcode-select -s /Applications/Xcode_26.2.app
if ! xcrun -f metal >/dev/null 2>&1; then
echo "Metal toolchain is not installed."
exit 1
fi
- name: Install Homebrew packages
run: brew install just awscli macmon
- name: Install UV
uses: astral-sh/setup-uv@v6
with:
enable-cache: true
cache-dependency-glob: uv.lock
- name: Setup Python
run: |
uv python install
uv sync --locked
- name: Install Nix
uses: cachix/install-nix-action@v31
with:
nix_path: nixpkgs=channel:nixos-unstable
- name: Configure Cachix
uses: cachix/cachix-action@v14
with:
name: exo
authToken: "${{ secrets.CACHIX_AUTH_TOKEN }}"
- name: Build dashboard
run: |
DASHBOARD_OUT=$(nix build .#dashboard --print-build-logs --no-link --print-out-paths)
mkdir -p dashboard/build
cp -r "$DASHBOARD_OUT"/* dashboard/build/
- name: Install Sparkle CLI
run: |
CLI_URL="${SPARKLE_CLI_URL:-https://github.com/sparkle-project/Sparkle/releases/download/${SPARKLE_VERSION}/Sparkle-${SPARKLE_VERSION}.tar.xz}"
echo "Downloading Sparkle CLI from: $CLI_URL"
mkdir -p /tmp/sparkle
curl --fail --location --output /tmp/sparkle.tar.xz "$CLI_URL"
tar -xJf /tmp/sparkle.tar.xz -C /tmp/sparkle --strip-components=1
echo "SPARKLE_BIN=/tmp/sparkle/bin" >> $GITHUB_ENV
- name: Prepare code-signing keychain
env:
MACOS_CERTIFICATE: ${{ secrets.MACOS_CERTIFICATE }}
MACOS_CERTIFICATE_PASSWORD: ${{ secrets.MACOS_CERTIFICATE_PASSWORD }}
PROVISIONING_PROFILE: ${{ secrets.PROVISIONING_PROFILE }}
run: |
KEYCHAIN_PATH="$HOME/Library/Keychains/build.keychain-db"
# Create fresh keychain
security create-keychain -p "$MACOS_CERTIFICATE_PASSWORD" "$KEYCHAIN_PATH"
# Disable auto-lock (no timeout, no lock-on-sleep)
security set-keychain-settings "$KEYCHAIN_PATH"
# Add to search list while preserving existing keychains
security list-keychains -d user -s "$KEYCHAIN_PATH" $(security list-keychains -d user | tr -d '"')
# Set as default and unlock
security default-keychain -s "$KEYCHAIN_PATH"
security unlock-keychain -p "$MACOS_CERTIFICATE_PASSWORD" "$KEYCHAIN_PATH"
# Import certificate with full access for codesign
echo "$MACOS_CERTIFICATE" | base64 --decode > /tmp/cert.p12
security import /tmp/cert.p12 -k "$KEYCHAIN_PATH" -P "$MACOS_CERTIFICATE_PASSWORD" \
-T /usr/bin/codesign -T /usr/bin/security -T /usr/bin/productbuild
rm /tmp/cert.p12
# Allow codesign to access the key without prompting
security set-key-partition-list -S apple-tool:,apple:,codesign: -s -k "$MACOS_CERTIFICATE_PASSWORD" "$KEYCHAIN_PATH"
# Verify keychain is unlocked and identity is available
echo "Verifying signing identity..."
security find-identity -v -p codesigning "$KEYCHAIN_PATH"
# Setup provisioning profile
mkdir -p "$HOME/Library/Developer/Xcode/UserData/Provisioning Profiles"
echo "$PROVISIONING_PROFILE" | base64 --decode > "$HOME/Library/Developer/Xcode/UserData/Provisioning Profiles/EXO.provisionprofile"
# Export keychain path for other steps
echo "BUILD_KEYCHAIN_PATH=$KEYCHAIN_PATH" >> $GITHUB_ENV
# ============================================================
# Build the bundle
# ============================================================
- name: Build PyInstaller bundle
run: uv run pyinstaller packaging/pyinstaller/exo.spec
- name: Build Swift app
env:
MACOS_CERTIFICATE_PASSWORD: ${{ secrets.MACOS_CERTIFICATE_PASSWORD }}
SPARKLE_FEED_URL: ${{ secrets.SPARKLE_FEED_URL }}
SPARKLE_ED25519_PUBLIC: ${{ secrets.SPARKLE_ED25519_PUBLIC }}
run: |
cd app/EXO
security unlock-keychain -p "$MACOS_CERTIFICATE_PASSWORD" "$BUILD_KEYCHAIN_PATH"
SIGNING_IDENTITY=$(security find-identity -v -p codesigning "$BUILD_KEYCHAIN_PATH" | awk -F '"' '{print $2}')
xcodebuild clean build \
-scheme EXO \
-configuration Release \
-derivedDataPath build \
MARKETING_VERSION="$RELEASE_VERSION" \
CURRENT_PROJECT_VERSION="$EXO_BUILD_VERSION" \
EXO_BUILD_TAG="$RELEASE_VERSION" \
EXO_BUILD_COMMIT="$GITHUB_SHA" \
SPARKLE_FEED_URL="$SPARKLE_FEED_URL" \
SPARKLE_ED25519_PUBLIC="$SPARKLE_ED25519_PUBLIC" \
EXO_BUG_REPORT_PRESIGNED_URL_ENDPOINT="$EXO_BUG_REPORT_PRESIGNED_URL_ENDPOINT" \
CODE_SIGNING_IDENTITY="$SIGNING_IDENTITY" \
CODE_SIGN_INJECT_BASE_ENTITLEMENTS=YES
mkdir -p ../../output
cp -R build/Build/Products/Release/EXO.app ../../output/EXO.app
- name: Inject PyInstaller runtime
run: |
rm -rf output/EXO.app/Contents/Resources/exo
mkdir -p output/EXO.app/Contents/Resources
cp -R dist/exo output/EXO.app/Contents/Resources/exo
- name: Codesign PyInstaller runtime
env:
MACOS_CERTIFICATE_PASSWORD: ${{ secrets.MACOS_CERTIFICATE_PASSWORD }}
run: |
cd output
security unlock-keychain -p "$MACOS_CERTIFICATE_PASSWORD" "$BUILD_KEYCHAIN_PATH"
SIGNING_IDENTITY=$(security find-identity -v -p codesigning "$BUILD_KEYCHAIN_PATH" | awk -F '"' '{print $2}')
RUNTIME_DIR="EXO.app/Contents/Resources/exo"
find "$RUNTIME_DIR" -type f \( -perm -111 -o -name "*.dylib" -o -name "*.so" \) -print0 |
while IFS= read -r -d '' file; do
/usr/bin/codesign --force --timestamp --options runtime \
--sign "$SIGNING_IDENTITY" "$file"
done
- name: Sign, notarize, and create DMG
env:
MACOS_CERTIFICATE_PASSWORD: ${{ secrets.MACOS_CERTIFICATE_PASSWORD }}
APPLE_NOTARIZATION_USERNAME: ${{ secrets.APPLE_NOTARIZATION_USERNAME }}
APPLE_NOTARIZATION_PASSWORD: ${{ secrets.APPLE_NOTARIZATION_PASSWORD }}
APPLE_NOTARIZATION_TEAM: ${{ secrets.APPLE_NOTARIZATION_TEAM }}
run: |
cd output
security unlock-keychain -p "$MACOS_CERTIFICATE_PASSWORD" "$BUILD_KEYCHAIN_PATH"
SIGNING_IDENTITY=$(security find-identity -v -p codesigning "$BUILD_KEYCHAIN_PATH" | awk -F '"' '{print $2}')
/usr/bin/codesign --deep --force --timestamp --options runtime \
--sign "$SIGNING_IDENTITY" EXO.app
mkdir -p dmg-root
cp -R EXO.app dmg-root/
ln -s /Applications dmg-root/Applications
DMG_NAME="EXO-${RELEASE_VERSION}.dmg"
hdiutil create -volname "EXO" -srcfolder dmg-root -ov -format UDZO "$DMG_NAME"
/usr/bin/codesign --force --timestamp --options runtime \
--sign "$SIGNING_IDENTITY" "$DMG_NAME"
if [[ -n "$APPLE_NOTARIZATION_USERNAME" ]]; then
SUBMISSION_OUTPUT=$(xcrun notarytool submit "$DMG_NAME" \
--apple-id "$APPLE_NOTARIZATION_USERNAME" \
--password "$APPLE_NOTARIZATION_PASSWORD" \
--team-id "$APPLE_NOTARIZATION_TEAM" \
--wait --timeout 15m 2>&1)
echo "$SUBMISSION_OUTPUT"
SUBMISSION_ID=$(echo "$SUBMISSION_OUTPUT" | awk 'tolower($1)=="id:" && $2 ~ /^[0-9a-fA-F-]+$/ {print $2; exit}')
STATUS=$(echo "$SUBMISSION_OUTPUT" | awk 'tolower($1)=="status:" {print $2; exit}')
if [[ -n "$SUBMISSION_ID" ]]; then
xcrun notarytool log "$SUBMISSION_ID" \
--apple-id "$APPLE_NOTARIZATION_USERNAME" \
--password "$APPLE_NOTARIZATION_PASSWORD" \
--team-id "$APPLE_NOTARIZATION_TEAM" > notarization-log.txt || true
echo "===== Notarization Log ====="
cat notarization-log.txt
echo "============================"
fi
if [[ "$STATUS" != "Accepted" ]]; then
echo "Notarization failed with status: ${STATUS:-Unknown}"
exit 1
fi
xcrun stapler staple "$DMG_NAME"
fi
- name: Generate Sparkle appcast
env:
SPARKLE_DOWNLOAD_PREFIX: ${{ env.SPARKLE_DOWNLOAD_PREFIX }}
SPARKLE_ED25519_PRIVATE: ${{ secrets.SPARKLE_ED25519_PRIVATE }}
IS_ALPHA: ${{ env.IS_ALPHA }}
run: |
set -euo pipefail
cd output
DOWNLOAD_PREFIX="${SPARKLE_DOWNLOAD_PREFIX:-https://assets.exolabs.net}"
echo "$SPARKLE_ED25519_PRIVATE" > sparkle_ed25519.key
chmod 600 sparkle_ed25519.key
CHANNEL_FLAG=""
if [[ "$IS_ALPHA" == "true" ]]; then
CHANNEL_FLAG="--channel alpha"
echo "Generating appcast for alpha channel"
fi
$SPARKLE_BIN/generate_appcast \
--ed-key-file sparkle_ed25519.key \
--download-url-prefix "$DOWNLOAD_PREFIX" \
$CHANNEL_FLAG \
.
- name: Inject release notes into appcast
if: github.ref_type == 'tag' && env.HAS_RELEASE_NOTES == 'true'
env:
RELEASE_VERSION: ${{ env.RELEASE_VERSION }}
run: |
# Inject markdown release notes with sparkle:format="markdown" (Sparkle 2.9+)
export NOTES=$(cat "$RELEASE_NOTES_FILE")
# Insert description after the enclosure tag for this version
awk '
/<enclosure[^>]*>/ && index($0, ENVIRON["RELEASE_VERSION"]) {
print
print " <description sparkle:format=\"markdown\"><![CDATA["
print ENVIRON["NOTES"]
print " ]]></description>"
next
}
{ print }
' output/appcast.xml > output/appcast.xml.tmp && mv output/appcast.xml.tmp output/appcast.xml
echo "Injected markdown release notes for version $RELEASE_VERSION"
# ============================================================
# Upload artifacts
# ============================================================
- name: Upload DMG
uses: actions/upload-artifact@v4
with:
name: EXO-dmg-${{ env.RELEASE_VERSION }}
path: output/EXO-${{ env.RELEASE_VERSION }}.dmg
- name: Upload to S3
if: env.SPARKLE_S3_BUCKET != ''
env:
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
AWS_REGION: ${{ env.AWS_REGION }}
SPARKLE_S3_BUCKET: ${{ env.SPARKLE_S3_BUCKET }}
SPARKLE_S3_PREFIX: ${{ env.SPARKLE_S3_PREFIX }}
IS_ALPHA: ${{ env.IS_ALPHA }}
run: |
set -euo pipefail
cd output
PREFIX="${SPARKLE_S3_PREFIX:-}"
if [[ -n "$PREFIX" && "${PREFIX: -1}" != "/" ]]; then
PREFIX="${PREFIX}/"
fi
DMG_NAME="EXO-${RELEASE_VERSION}.dmg"
if [[ "${{ github.ref_type }}" != "tag" ]]; then
aws s3 cp "$DMG_NAME" "s3://${SPARKLE_S3_BUCKET}/${PREFIX}EXO-${GITHUB_SHA}.dmg"
exit 0
fi
aws s3 cp "$DMG_NAME" "s3://${SPARKLE_S3_BUCKET}/${PREFIX}${DMG_NAME}"
if [[ "$IS_ALPHA" != "true" ]]; then
aws s3 cp "$DMG_NAME" "s3://${SPARKLE_S3_BUCKET}/${PREFIX}EXO-latest.dmg"
aws s3 cp appcast.xml "s3://${SPARKLE_S3_BUCKET}/${PREFIX}appcast.xml" --content-type application/xml --cache-control no-cache
fi
- name: Publish GitHub Release
if: github.ref_type == 'tag'
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
DMG_PATH="output/EXO-${RELEASE_VERSION}.dmg"
if [[ "$HAS_RELEASE_NOTES" == "true" ]]; then
# Update the draft release with the tag and upload DMG
gh api --method PATCH "repos/${{ github.repository }}/releases/$DRAFT_RELEASE_ID" \
-f tag_name="$GITHUB_REF_NAME" \
-F draft=false
gh release upload "$GITHUB_REF_NAME" "$DMG_PATH" --clobber
echo "Published release $GITHUB_REF_NAME with DMG attached"
else
# Alpha without draft release - create one with auto-generated notes
gh release create "$GITHUB_REF_NAME" "$DMG_PATH" \
--title "$GITHUB_REF_NAME" \
--generate-notes \
--prerelease
echo "Created alpha release $GITHUB_REF_NAME with auto-generated notes"
fi
================================================
FILE: .github/workflows/pipeline.yml
================================================
name: ci-pipeline
on:
push:
pull_request:
branches:
- staging
- main
jobs:
nix:
name: Build and check (${{ matrix.system }})
runs-on: ${{ matrix.runner }}
strategy:
fail-fast: false
matrix:
include:
- runner: macos-26
system: aarch64-darwin
- runner: ubuntu-latest
system: x86_64-linux
- runner: ubuntu-24.04-arm
system: aarch64-linux
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
lfs: false
- uses: cachix/install-nix-action@v31
with:
nix_path: nixpkgs=channel:nixos-unstable
- uses: cachix/cachix-action@v14
name: Configure Cachix
with:
name: exo
authToken: "${{ secrets.CACHIX_AUTH_TOKEN }}"
- name: Build Metal packages (macOS only)
if: runner.os == 'macOS'
run: |
# Try to build metal-toolchain first (may succeed via cachix cache hit)
if nix build .#metal-toolchain 2>/dev/null; then
echo "metal-toolchain built successfully (likely cache hit)"
else
echo "metal-toolchain build failed, extracting from Xcode..."
NAR_HASH="sha256-ayR5mXN4sZAddwKEG2OszGRF93k9ZFc7H0yi2xbylQw="
NAR_NAME="metal-toolchain-17C48.nar"
# Use RUNNER_TEMP to avoid /tmp symlink issues on macOS
WORK_DIR="${RUNNER_TEMP}/metal-work"
mkdir -p "$WORK_DIR"
# Download the Metal toolchain component
xcodebuild -downloadComponent MetalToolchain
# Find and mount the DMG
DMG_PATH=$(find /System/Library/AssetsV2/com_apple_MobileAsset_MetalToolchain -name '*.dmg' 2>/dev/null | head -1)
if [ -z "$DMG_PATH" ]; then
echo "Error: Could not find Metal toolchain DMG"
exit 1
fi
echo "Found DMG at: $DMG_PATH"
hdiutil attach "$DMG_PATH" -mountpoint "${WORK_DIR}/metal-dmg"
# Copy the toolchain
cp -R "${WORK_DIR}/metal-dmg/Metal.xctoolchain" "${WORK_DIR}/metal-export"
hdiutil detach "${WORK_DIR}/metal-dmg"
# Create NAR and add to store
nix nar pack "${WORK_DIR}/metal-export" > "${WORK_DIR}/${NAR_NAME}"
STORE_PATH=$(nix store add --mode flat "${WORK_DIR}/${NAR_NAME}")
echo "Added NAR to store: $STORE_PATH"
# Verify the hash matches
ACTUAL_HASH=$(nix hash file "${WORK_DIR}/${NAR_NAME}")
if [ "$ACTUAL_HASH" != "$NAR_HASH" ]; then
echo "Warning: NAR hash mismatch!"
echo "Expected: $NAR_HASH"
echo "Actual: $ACTUAL_HASH"
echo "The metal-toolchain.nix may need updating"
fi
# Clean up
rm -rf "$WORK_DIR"
# Retry the build now that NAR is in store
nix build .#metal-toolchain
fi
# Build mlx (depends on metal-toolchain)
nix build .#mlx
- name: Build all Nix outputs
run: |
nix flake show --json | jq -r '
[
(.packages."${{ matrix.system }}" // {} | keys[] | ".#packages.${{ matrix.system }}.\(.)"),
(.devShells."${{ matrix.system }}" // {} | keys[] | ".#devShells.${{ matrix.system }}.\(.)")
] | .[]
' | xargs nix build
- name: Run nix flake check
run: nix flake check
- name: Run pytest (macOS only)
if: runner.os == 'macOS'
run: |
# Build the test environment (requires relaxed sandbox for uv2nix on macOS)
TEST_ENV=$(nix build '.#exo-test-env' --option sandbox relaxed --print-out-paths)
# Run pytest outside sandbox (needs GPU access for MLX)
export HOME="$RUNNER_TEMP"
export EXO_TESTS=1
export EXO_DASHBOARD_DIR="$PWD/dashboard/"
export EXO_RESOURCES_DIR="$PWD/resources"
$TEST_ENV/bin/python -m pytest src -m "not slow" --import-mode=importlib
================================================
FILE: .gitignore
================================================
# gitingest
digest.txt
# python
**/__pycache__
# nix
.direnv/
# IDEA (PyCharm)
.idea
# xcode / macos
*.xcuserstate
*.xcuserdata
*.xcuserdatad/
**/.DS_Store
app/EXO/build/
dist/
# rust
target/
**/*.rs.bk
*.pdb
# svelte
dashboard/build/
dashboard/node_modules/
dashboard/.svelte-kit/
# host config snapshots
hosts_*.json
.swp
# bench files
bench/**/*.json
# tmp
tmp/models
================================================
FILE: .mlx_typings/.gitkeep
================================================
================================================
FILE: .mlx_typings/mflux/__init__.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import os
if "TOKENIZERS_PARALLELISM" not in os.environ: ...
================================================
FILE: .mlx_typings/mflux/callbacks/__init__.pyi
================================================
"""
This type stub file was generated by pyright.
"""
================================================
FILE: .mlx_typings/mflux/callbacks/callback.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
import PIL.Image
import tqdm
from typing import Protocol
from mflux.models.common.config.config import Config
class BeforeLoopCallback(Protocol):
def call_before_loop(
self,
seed: int,
prompt: str,
latents: mx.array,
config: Config,
canny_image: PIL.Image.Image | None = ...,
depth_image: PIL.Image.Image | None = ...,
) -> None: ...
class InLoopCallback(Protocol):
def call_in_loop(
self,
t: int,
seed: int,
prompt: str,
latents: mx.array,
config: Config,
time_steps: tqdm,
) -> None: ...
class AfterLoopCallback(Protocol):
def call_after_loop(
self, seed: int, prompt: str, latents: mx.array, config: Config
) -> None: ...
class InterruptCallback(Protocol):
def call_interrupt(
self,
t: int,
seed: int,
prompt: str,
latents: mx.array,
config: Config,
time_steps: tqdm,
) -> None: ...
================================================
FILE: .mlx_typings/mflux/callbacks/callback_registry.pyi
================================================
"""
This type stub file was generated by pyright.
"""
from typing import TYPE_CHECKING
from mflux.callbacks.callback import (
AfterLoopCallback,
BeforeLoopCallback,
InLoopCallback,
InterruptCallback,
)
from mflux.callbacks.generation_context import GenerationContext
from mflux.models.common.config.config import Config
if TYPE_CHECKING: ...
class CallbackRegistry:
def __init__(self) -> None: ...
def register(self, callback) -> None: ...
def start(self, seed: int, prompt: str, config: Config) -> GenerationContext: ...
def before_loop_callbacks(self) -> list[BeforeLoopCallback]: ...
def in_loop_callbacks(self) -> list[InLoopCallback]: ...
def after_loop_callbacks(self) -> list[AfterLoopCallback]: ...
def interrupt_callbacks(self) -> list[InterruptCallback]: ...
================================================
FILE: .mlx_typings/mflux/callbacks/generation_context.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
import PIL.Image
import tqdm
from typing import TYPE_CHECKING
from mflux.callbacks.callback_registry import CallbackRegistry
from mflux.models.common.config.config import Config
if TYPE_CHECKING: ...
class GenerationContext:
def __init__(
self, registry: CallbackRegistry, seed: int, prompt: str, config: Config
) -> None: ...
def before_loop(
self,
latents: mx.array,
*,
canny_image: PIL.Image.Image | None = ...,
depth_image: PIL.Image.Image | None = ...,
) -> None: ...
def in_loop(self, t: int, latents: mx.array, time_steps: tqdm = ...) -> None: ...
def after_loop(self, latents: mx.array) -> None: ...
def interruption(
self, t: int, latents: mx.array, time_steps: tqdm = ...
) -> None: ...
================================================
FILE: .mlx_typings/mflux/cli/__init__.pyi
================================================
"""
This type stub file was generated by pyright.
"""
================================================
FILE: .mlx_typings/mflux/cli/defaults/defaults.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import os
BATTERY_PERCENTAGE_STOP_LIMIT = ...
CONTROLNET_STRENGTH = ...
DEFAULT_DEV_FILL_GUIDANCE = ...
DEFAULT_DEPTH_GUIDANCE = ...
DIMENSION_STEP_PIXELS = ...
GUIDANCE_SCALE = ...
GUIDANCE_SCALE_KONTEXT = ...
IMAGE_STRENGTH = ...
MODEL_CHOICES = ...
MODEL_INFERENCE_STEPS = ...
QUANTIZE_CHOICES = ...
if os.environ.get("MFLUX_CACHE_DIR"):
MFLUX_CACHE_DIR = ...
else:
MFLUX_CACHE_DIR = ...
MFLUX_LORA_CACHE_DIR = ...
================================================
FILE: .mlx_typings/mflux/models/__init__.pyi
================================================
"""
This type stub file was generated by pyright.
"""
================================================
FILE: .mlx_typings/mflux/models/common/__init__.pyi
================================================
"""
This type stub file was generated by pyright.
"""
================================================
FILE: .mlx_typings/mflux/models/common/cli/__init__.pyi
================================================
"""
This type stub file was generated by pyright.
"""
================================================
FILE: .mlx_typings/mflux/models/common/config/__init__.pyi
================================================
"""
This type stub file was generated by pyright.
"""
from mflux.models.common.config.config import Config
from mflux.models.common.config.model_config import ModelConfig
__all__ = ["Config", "ModelConfig"]
================================================
FILE: .mlx_typings/mflux/models/common/config/config.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from pathlib import Path
from typing import Any
from tqdm import tqdm
from mflux.models.common.config.model_config import ModelConfig
logger = ...
class Config:
def __init__(
self,
model_config: ModelConfig,
num_inference_steps: int = ...,
height: int = ...,
width: int = ...,
guidance: float = ...,
image_path: Path | str | None = ...,
image_strength: float | None = ...,
depth_image_path: Path | str | None = ...,
redux_image_paths: list[Path | str] | None = ...,
redux_image_strengths: list[float] | None = ...,
masked_image_path: Path | str | None = ...,
controlnet_strength: float | None = ...,
scheduler: str = ...,
) -> None: ...
@property
def height(self) -> int: ...
@property
def width(self) -> int: ...
@width.setter
def width(self, value): # -> None:
...
@property
def image_seq_len(self) -> int: ...
@property
def guidance(self) -> float: ...
@property
def num_inference_steps(self) -> int: ...
@property
def precision(self) -> mx.Dtype: ...
@property
def num_train_steps(self) -> int: ...
@property
def image_path(self) -> Path | None: ...
@property
def image_strength(self) -> float | None: ...
@property
def depth_image_path(self) -> Path | None: ...
@property
def redux_image_paths(self) -> list[Path] | None: ...
@property
def redux_image_strengths(self) -> list[float] | None: ...
@property
def masked_image_path(self) -> Path | None: ...
@property
def init_time_step(self) -> int: ...
@property
def time_steps(self) -> tqdm: ...
@property
def controlnet_strength(self) -> float | None: ...
@property
def scheduler(self) -> Any: ...
================================================
FILE: .mlx_typings/mflux/models/common/config/model_config.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from functools import lru_cache
from typing import Literal
class ModelConfig:
precision: mx.Dtype = ...
def __init__(
self,
priority: int,
aliases: list[str],
model_name: str,
base_model: str | None,
controlnet_model: str | None,
custom_transformer_model: str | None,
num_train_steps: int | None,
max_sequence_length: int | None,
supports_guidance: bool | None,
requires_sigma_shift: bool | None,
transformer_overrides: dict | None = ...,
) -> None: ...
@staticmethod
@lru_cache
def dev() -> ModelConfig: ...
@staticmethod
@lru_cache
def schnell() -> ModelConfig: ...
@staticmethod
@lru_cache
def dev_kontext() -> ModelConfig: ...
@staticmethod
@lru_cache
def dev_fill() -> ModelConfig: ...
@staticmethod
@lru_cache
def dev_redux() -> ModelConfig: ...
@staticmethod
@lru_cache
def dev_depth() -> ModelConfig: ...
@staticmethod
@lru_cache
def dev_controlnet_canny() -> ModelConfig: ...
@staticmethod
@lru_cache
def schnell_controlnet_canny() -> ModelConfig: ...
@staticmethod
@lru_cache
def dev_controlnet_upscaler() -> ModelConfig: ...
@staticmethod
@lru_cache
def dev_fill_catvton() -> ModelConfig: ...
@staticmethod
@lru_cache
def krea_dev() -> ModelConfig: ...
@staticmethod
@lru_cache
def flux2_klein_4b() -> ModelConfig: ...
@staticmethod
@lru_cache
def flux2_klein_9b() -> ModelConfig: ...
@staticmethod
@lru_cache
def qwen_image() -> ModelConfig: ...
@staticmethod
@lru_cache
def qwen_image_edit() -> ModelConfig: ...
@staticmethod
@lru_cache
def fibo() -> ModelConfig: ...
@staticmethod
@lru_cache
def z_image_turbo() -> ModelConfig: ...
@staticmethod
@lru_cache
def seedvr2_3b() -> ModelConfig: ...
def x_embedder_input_dim(self) -> int: ...
def is_canny(self) -> bool: ...
@staticmethod
def from_name(
model_name: str, base_model: Literal["dev", "schnell", "krea-dev"] | None = ...
) -> ModelConfig: ...
AVAILABLE_MODELS = ...
================================================
FILE: .mlx_typings/mflux/models/common/latent_creator/__init__.pyi
================================================
"""
This type stub file was generated by pyright.
"""
"""
This type stub file was generated by pyright.
"""
================================================
FILE: .mlx_typings/mflux/models/common/latent_creator/latent_creator.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from pathlib import Path
from typing import TYPE_CHECKING, TypeAlias
from mlx import nn
from mflux.models.common.vae.tiling_config import TilingConfig
from mflux.models.fibo.latent_creator.fibo_latent_creator import FiboLatentCreator
from mflux.models.flux.latent_creator.flux_latent_creator import FluxLatentCreator
from mflux.models.qwen.latent_creator.qwen_latent_creator import QwenLatentCreator
from mflux.models.z_image.latent_creator.z_image_latent_creator import (
ZImageLatentCreator,
)
if TYPE_CHECKING:
LatentCreatorType: TypeAlias = type[
FiboLatentCreator | FluxLatentCreator | QwenLatentCreator | ZImageLatentCreator
]
class Img2Img:
def __init__(
self,
vae: nn.Module,
latent_creator: LatentCreatorType,
sigmas: mx.array,
init_time_step: int,
image_path: str | Path | None,
tiling_config: TilingConfig | None = ...,
) -> None: ...
class LatentCreator:
@staticmethod
def create_for_txt2img_or_img2img(
seed: int, height: int, width: int, img2img: Img2Img
) -> mx.array: ...
@staticmethod
def encode_image(
vae: nn.Module,
image_path: str | Path,
height: int,
width: int,
tiling_config: TilingConfig | None = ...,
) -> mx.array: ...
@staticmethod
def add_noise_by_interpolation(
clean: mx.array, noise: mx.array, sigma: float
) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/common/lora/__init__.pyi
================================================
"""
This type stub file was generated by pyright.
"""
================================================
FILE: .mlx_typings/mflux/models/common/lora/layer/fused_linear_lora_layer.pyi
================================================
"""
This type stub file was generated by pyright.
"""
from mlx import nn
from mflux.models.common.lora.layer.linear_lora_layer import LoRALinear
class FusedLoRALinear(nn.Module):
def __init__(
self, base_linear: nn.Linear | nn.QuantizedLinear, loras: list[LoRALinear]
) -> None: ...
def __call__(self, x): # -> array:
...
================================================
FILE: .mlx_typings/mflux/models/common/lora/layer/linear_lora_layer.pyi
================================================
"""
This type stub file was generated by pyright.
"""
from mlx import nn
class LoRALinear(nn.Module):
@staticmethod
def from_linear(
linear: nn.Linear | nn.QuantizedLinear, r: int = ..., scale: float = ...
): # -> LoRALinear:
...
def __init__(
self,
input_dims: int,
output_dims: int,
r: int = ...,
scale: float = ...,
bias: bool = ...,
) -> None: ...
def __call__(self, x): # -> array:
...
================================================
FILE: .mlx_typings/mflux/models/common/lora/mapping/lora_loader.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
import mlx.nn as nn
from collections.abc import Callable
from dataclasses import dataclass
from mflux.models.common.lora.mapping.lora_mapping import LoRATarget
@dataclass
class PatternMatch:
source_pattern: str
target_path: str
matrix_name: str
transpose: bool
transform: Callable[[mx.array], mx.array] | None = ...
class LoRALoader:
@staticmethod
def load_and_apply_lora(
lora_mapping: list[LoRATarget],
transformer: nn.Module,
lora_paths: list[str] | None = ...,
lora_scales: list[float] | None = ...,
) -> tuple[list[str], list[float]]: ...
================================================
FILE: .mlx_typings/mflux/models/common/lora/mapping/lora_mapping.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from collections.abc import Callable
from dataclasses import dataclass
from typing import List, Protocol
@dataclass
class LoRATarget:
model_path: str
possible_up_patterns: List[str]
possible_down_patterns: List[str]
possible_alpha_patterns: List[str] = ...
up_transform: Callable[[mx.array], mx.array] | None = ...
down_transform: Callable[[mx.array], mx.array] | None = ...
class LoRAMapping(Protocol):
@staticmethod
def get_mapping() -> List[LoRATarget]: ...
================================================
FILE: .mlx_typings/mflux/models/common/lora/mapping/lora_saver.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.nn as nn
class LoRASaver:
@staticmethod
def bake_and_strip_lora(module: nn.Module) -> nn.Module: ...
================================================
FILE: .mlx_typings/mflux/models/common/lora/mapping/lora_transforms.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
class LoraTransforms:
@staticmethod
def split_q_up(tensor: mx.array) -> mx.array: ...
@staticmethod
def split_k_up(tensor: mx.array) -> mx.array: ...
@staticmethod
def split_v_up(tensor: mx.array) -> mx.array: ...
@staticmethod
def split_q_down(tensor: mx.array) -> mx.array: ...
@staticmethod
def split_k_down(tensor: mx.array) -> mx.array: ...
@staticmethod
def split_v_down(tensor: mx.array) -> mx.array: ...
@staticmethod
def split_single_q_up(tensor: mx.array) -> mx.array: ...
@staticmethod
def split_single_k_up(tensor: mx.array) -> mx.array: ...
@staticmethod
def split_single_v_up(tensor: mx.array) -> mx.array: ...
@staticmethod
def split_single_mlp_up(tensor: mx.array) -> mx.array: ...
@staticmethod
def split_single_q_down(tensor: mx.array) -> mx.array: ...
@staticmethod
def split_single_k_down(tensor: mx.array) -> mx.array: ...
@staticmethod
def split_single_v_down(tensor: mx.array) -> mx.array: ...
@staticmethod
def split_single_mlp_down(tensor: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/common/resolution/__init__.pyi
================================================
"""
This type stub file was generated by pyright.
"""
from mflux.models.common.resolution.config_resolution import ConfigResolution
from mflux.models.common.resolution.lora_resolution import LoraResolution
from mflux.models.common.resolution.path_resolution import PathResolution
from mflux.models.common.resolution.quantization_resolution import (
QuantizationResolution,
)
__all__ = [
"ConfigResolution",
"LoraResolution",
"PathResolution",
"QuantizationResolution",
]
================================================
FILE: .mlx_typings/mflux/models/common/resolution/actions.pyi
================================================
"""
This type stub file was generated by pyright.
"""
from enum import Enum
from typing import NamedTuple
class QuantizationAction(Enum):
NONE = ...
STORED = ...
REQUESTED = ...
class PathAction(Enum):
LOCAL = ...
HUGGINGFACE_CACHED = ...
HUGGINGFACE = ...
ERROR = ...
class LoraAction(Enum):
LOCAL = ...
REGISTRY = ...
HUGGINGFACE_COLLECTION_CACHED = ...
HUGGINGFACE_COLLECTION = ...
HUGGINGFACE_REPO_CACHED = ...
HUGGINGFACE_REPO = ...
ERROR = ...
class ConfigAction(Enum):
EXACT_MATCH = ...
EXPLICIT_BASE = ...
INFER_SUBSTRING = ...
ERROR = ...
class Rule(NamedTuple):
priority: int
name: str
check: str
action: QuantizationAction | PathAction | LoraAction | ConfigAction
...
================================================
FILE: .mlx_typings/mflux/models/common/resolution/config_resolution.pyi
================================================
"""
This type stub file was generated by pyright.
"""
from typing import TYPE_CHECKING
from mflux.models.common.config.model_config import ModelConfig
if TYPE_CHECKING: ...
logger = ...
class ConfigResolution:
RULES = ...
@staticmethod
def resolve(model_name: str, base_model: str | None = ...) -> ModelConfig: ...
================================================
FILE: .mlx_typings/mflux/models/common/resolution/lora_resolution.pyi
================================================
"""
This type stub file was generated by pyright.
"""
from pathlib import Path
logger = ...
class LoraResolution:
RULES = ...
_registry: dict[str, Path] = ...
@staticmethod
def resolve(path: str) -> str: ...
@staticmethod
def resolve_paths(paths: list[str] | None) -> list[str]: ...
@staticmethod
def resolve_scales(scales: list[float] | None, num_paths: int) -> list[float]: ...
@staticmethod
def get_registry() -> dict[str, Path]: ...
@staticmethod
def discover_files(library_paths: list[Path]) -> dict[str, Path]: ...
================================================
FILE: .mlx_typings/mflux/models/common/resolution/path_resolution.pyi
================================================
"""
This type stub file was generated by pyright.
"""
from pathlib import Path
logger = ...
class PathResolution:
RULES = ...
@staticmethod
def resolve(path: str | None, patterns: list[str] | None = ...) -> Path | None: ...
================================================
FILE: .mlx_typings/mflux/models/common/resolution/quantization_resolution.pyi
================================================
"""
This type stub file was generated by pyright.
"""
logger = ...
class QuantizationResolution:
RULES = ...
@staticmethod
def resolve(
stored: int | None, requested: int | None
) -> tuple[int | None, str | None]: ...
================================================
FILE: .mlx_typings/mflux/models/common/schedulers/__init__.pyi
================================================
"""
This type stub file was generated by pyright.
"""
from .flow_match_euler_discrete_scheduler import FlowMatchEulerDiscreteScheduler
from .linear_scheduler import LinearScheduler
from .seedvr2_euler_scheduler import SeedVR2EulerScheduler
__all__ = [
"LinearScheduler",
"FlowMatchEulerDiscreteScheduler",
"SeedVR2EulerScheduler",
]
class SchedulerModuleNotFound(ValueError): ...
class SchedulerClassNotFound(ValueError): ...
class InvalidSchedulerType(TypeError): ...
SCHEDULER_REGISTRY = ...
def register_contrib(scheduler_object, scheduler_name=...): # -> None:
...
def try_import_external_scheduler(
scheduler_object_path: str,
): # -> type[BaseScheduler]:
...
================================================
FILE: .mlx_typings/mflux/models/common/schedulers/base_scheduler.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from abc import ABC, abstractmethod
class BaseScheduler(ABC):
@property
@abstractmethod
def sigmas(self) -> mx.array: ...
@abstractmethod
def step(
self, noise: mx.array, timestep: int, latents: mx.array, **kwargs
) -> mx.array: ...
def scale_model_input(self, latents: mx.array, t: int) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/common/schedulers/flow_match_euler_discrete_scheduler.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from typing import TYPE_CHECKING
from mflux.models.common.config.config import Config
from mflux.models.common.schedulers.base_scheduler import BaseScheduler
if TYPE_CHECKING: ...
class FlowMatchEulerDiscreteScheduler(BaseScheduler):
def __init__(self, config: Config) -> None: ...
@property
def sigmas(self) -> mx.array: ...
@property
def timesteps(self) -> mx.array: ...
def set_image_seq_len(self, image_seq_len: int) -> None: ...
@staticmethod
def get_timesteps_and_sigmas(
image_seq_len: int, num_inference_steps: int, num_train_timesteps: int = ...
) -> tuple[mx.array, mx.array]: ...
def step(
self, noise: mx.array, timestep: int, latents: mx.array, **kwargs
) -> mx.array: ...
def scale_model_input(self, latents: mx.array, t: int) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/common/schedulers/linear_scheduler.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from typing import TYPE_CHECKING
from mflux.models.common.config.config import Config
from mflux.models.common.schedulers.base_scheduler import BaseScheduler
if TYPE_CHECKING: ...
class LinearScheduler(BaseScheduler):
def __init__(self, config: Config) -> None: ...
@property
def sigmas(self) -> mx.array: ...
@property
def timesteps(self) -> mx.array: ...
def step(
self, noise: mx.array, timestep: int, latents: mx.array, **kwargs
) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/common/schedulers/seedvr2_euler_scheduler.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from typing import TYPE_CHECKING
from mflux.models.common.config.config import Config
from mflux.models.common.schedulers.base_scheduler import BaseScheduler
if TYPE_CHECKING: ...
class SeedVR2EulerScheduler(BaseScheduler):
def __init__(self, config: Config) -> None: ...
@property
def timesteps(self) -> mx.array: ...
@property
def sigmas(self) -> mx.array: ...
def step(
self, noise: mx.array, timestep: int, latents: mx.array, **kwargs
) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/common/tokenizer/__init__.pyi
================================================
"""
This type stub file was generated by pyright.
"""
from mflux.models.common.tokenizer.tokenizer import (
BaseTokenizer,
LanguageTokenizer,
Tokenizer,
VisionLanguageTokenizer,
)
from mflux.models.common.tokenizer.tokenizer_loader import TokenizerLoader
from mflux.models.common.tokenizer.tokenizer_output import TokenizerOutput
"""
This type stub file was generated by pyright.
"""
__all__ = [
"Tokenizer",
"BaseTokenizer",
"LanguageTokenizer",
"VisionLanguageTokenizer",
"TokenizerLoader",
"TokenizerOutput",
]
================================================
FILE: .mlx_typings/mflux/models/common/tokenizer/tokenizer.pyi
================================================
"""
This type stub file was generated by pyright.
"""
from abc import ABC, abstractmethod
from typing import Protocol, runtime_checkable
from PIL import Image
from transformers import PreTrainedTokenizer
from mflux.models.common.tokenizer.tokenizer_output import TokenizerOutput
"""
This type stub file was generated by pyright.
"""
@runtime_checkable
class Tokenizer(Protocol):
tokenizer: PreTrainedTokenizer
def tokenize(
self,
prompt: str | list[str],
images: list[Image.Image] | None = ...,
max_length: int | None = ...,
**kwargs,
) -> TokenizerOutput: ...
class BaseTokenizer(ABC):
def __init__(
self, tokenizer: PreTrainedTokenizer, max_length: int = ...
) -> None: ...
@abstractmethod
def tokenize(
self,
prompt: str | list[str],
images: list[Image.Image] | None = ...,
max_length: int | None = ...,
**kwargs,
) -> TokenizerOutput: ...
class LanguageTokenizer(BaseTokenizer):
def __init__(
self,
tokenizer: PreTrainedTokenizer,
max_length: int = ...,
padding: str = ...,
return_attention_mask: bool = ...,
template: str | None = ...,
use_chat_template: bool = ...,
chat_template_kwargs: dict | None = ...,
add_special_tokens: bool = ...,
) -> None: ...
def tokenize(
self,
prompt: str | list[str],
images: list[Image.Image] | None = ...,
max_length: int | None = ...,
**kwargs,
) -> TokenizerOutput: ...
class VisionLanguageTokenizer(BaseTokenizer):
def __init__(
self,
tokenizer: PreTrainedTokenizer,
processor,
max_length: int = ...,
template: str | None = ...,
image_token: str = ...,
) -> None: ...
def tokenize(
self,
prompt: str | list[str],
images: list[Image.Image] | None = ...,
max_length: int | None = ...,
**kwargs,
) -> TokenizerOutput: ...
================================================
FILE: .mlx_typings/mflux/models/common/tokenizer/tokenizer_loader.pyi
================================================
"""
This type stub file was generated by pyright.
"""
from typing import TYPE_CHECKING
from mflux.models.common.tokenizer.tokenizer import BaseTokenizer
from mflux.models.common.weights.loading.weight_definition import TokenizerDefinition
"""
This type stub file was generated by pyright.
"""
if TYPE_CHECKING: ...
class TokenizerLoader:
@staticmethod
def load(definition: TokenizerDefinition, model_path: str) -> BaseTokenizer: ...
@staticmethod
def load_all(
definitions: list[TokenizerDefinition],
model_path: str,
max_length_overrides: dict[str, int] | None = ...,
) -> dict[str, BaseTokenizer]: ...
================================================
FILE: .mlx_typings/mflux/models/common/tokenizer/tokenizer_output.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from dataclasses import dataclass
"""
This type stub file was generated by pyright.
"""
@dataclass
class TokenizerOutput:
input_ids: mx.array
attention_mask: mx.array
pixel_values: mx.array | None = ...
image_grid_thw: mx.array | None = ...
================================================
FILE: .mlx_typings/mflux/models/common/vae/__init__.pyi
================================================
"""
This type stub file was generated by pyright.
"""
from mflux.models.common.vae.tiling_config import TilingConfig
from mflux.models.common.vae.vae_tiler import VAETiler
__all__ = ["TilingConfig", "VAETiler"]
================================================
FILE: .mlx_typings/mflux/models/common/vae/tiling_config.pyi
================================================
"""
This type stub file was generated by pyright.
"""
from dataclasses import dataclass
@dataclass(frozen=True, slots=True)
class TilingConfig:
vae_decode_tiles_per_dim: int | None = ...
vae_decode_overlap: int = ...
vae_encode_tiled: bool = ...
vae_encode_tile_size: int = ...
vae_encode_tile_overlap: int = ...
================================================
FILE: .mlx_typings/mflux/models/common/vae/vae_tiler.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from typing import Callable
class VAETiler:
@staticmethod
def encode_image_tiled(
*,
image: mx.array,
encode_fn: Callable[[mx.array], mx.array],
latent_channels: int,
tile_size: tuple[int, int] = ...,
tile_overlap: tuple[int, int] = ...,
spatial_scale: int = ...,
) -> mx.array: ...
@staticmethod
def decode_image_tiled(
*,
latent: mx.array,
decode_fn: Callable[[mx.array], mx.array],
tile_size: tuple[int, int] = ...,
tile_overlap: tuple[int, int] = ...,
spatial_scale: int = ...,
) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/common/vae/vae_util.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
from mflux.models.common.vae.tiling_config import TilingConfig
class VAEUtil:
@staticmethod
def encode(
vae: nn.Module, image: mx.array, tiling_config: TilingConfig | None = ...
) -> mx.array: ...
@staticmethod
def decode(
vae: nn.Module, latent: mx.array, tiling_config: TilingConfig | None = ...
) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/common/weights/__init__.pyi
================================================
"""
This type stub file was generated by pyright.
"""
from mflux.models.common.weights.loading.loaded_weights import LoadedWeights, MetaData
from mflux.models.common.weights.loading.weight_applier import WeightApplier
from mflux.models.common.weights.loading.weight_definition import ComponentDefinition
from mflux.models.common.weights.loading.weight_loader import WeightLoader
from mflux.models.common.weights.saving.model_saver import ModelSaver
__all__ = [
"ComponentDefinition",
"LoadedWeights",
"MetaData",
"ModelSaver",
"WeightApplier",
"WeightLoader",
]
================================================
FILE: .mlx_typings/mflux/models/common/weights/loading/loaded_weights.pyi
================================================
"""
This type stub file was generated by pyright.
"""
from dataclasses import dataclass
@dataclass
class MetaData:
quantization_level: int | None = ...
mflux_version: str | None = ...
@dataclass
class LoadedWeights:
components: dict[str, dict]
meta_data: MetaData
def __getattr__(self, name: str) -> dict | None: ...
def num_transformer_blocks(self, component_name: str = ...) -> int: ...
def num_single_transformer_blocks(self, component_name: str = ...) -> int: ...
================================================
FILE: .mlx_typings/mflux/models/common/weights/loading/weight_applier.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.nn as nn
from typing import TYPE_CHECKING
from mflux.models.common.weights.loading.loaded_weights import LoadedWeights
from mflux.models.common.weights.loading.weight_definition import (
ComponentDefinition,
WeightDefinitionType,
)
if TYPE_CHECKING: ...
class WeightApplier:
@staticmethod
def apply_and_quantize_single(
weights: LoadedWeights,
model: nn.Module,
component: ComponentDefinition,
quantize_arg: int | None,
quantization_predicate=...,
) -> int | None: ...
@staticmethod
def apply_and_quantize(
weights: LoadedWeights,
models: dict[str, nn.Module],
quantize_arg: int | None,
weight_definition: WeightDefinitionType,
) -> int | None: ...
================================================
FILE: .mlx_typings/mflux/models/common/weights/loading/weight_definition.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from dataclasses import dataclass
from typing import Callable, List, TYPE_CHECKING, TypeAlias
from mflux.models.common.weights.mapping.weight_mapping import WeightTarget
from mflux.models.common.tokenizer.tokenizer import BaseTokenizer
from mflux.models.depth_pro.weights.depth_pro_weight_definition import (
DepthProWeightDefinition,
)
from mflux.models.fibo.weights.fibo_weight_definition import FIBOWeightDefinition
from mflux.models.fibo_vlm.weights.fibo_vlm_weight_definition import (
FIBOVLMWeightDefinition,
)
from mflux.models.flux.weights.flux_weight_definition import FluxWeightDefinition
from mflux.models.qwen.weights.qwen_weight_definition import QwenWeightDefinition
from mflux.models.seedvr2.weights.seedvr2_weight_definition import (
SeedVR2WeightDefinition,
)
from mflux.models.z_image.weights.z_image_weight_definition import (
ZImageWeightDefinition,
)
"""
This type stub file was generated by pyright.
"""
if TYPE_CHECKING:
WeightDefinitionType: TypeAlias = type[
FluxWeightDefinition
| FIBOWeightDefinition
| FIBOVLMWeightDefinition
| QwenWeightDefinition
| ZImageWeightDefinition
| SeedVR2WeightDefinition
| DepthProWeightDefinition
]
@dataclass
class ComponentDefinition:
name: str
hf_subdir: str
mapping_getter: Callable[[], List[WeightTarget]] | None = ...
model_attr: str | None = ...
num_blocks: int | None = ...
num_layers: int | None = ...
loading_mode: str = ...
precision: mx.Dtype | None = ...
skip_quantization: bool = ...
bulk_transform: Callable[[mx.array], mx.array] | None = ...
weight_subkey: str | None = ...
download_url: str | None = ...
weight_prefix_filters: List[str] | None = ...
weight_files: List[str] | None = ...
@dataclass
class TokenizerDefinition:
name: str
hf_subdir: str
tokenizer_class: str = ...
fallback_subdirs: List[str] | None = ...
download_patterns: List[str] | None = ...
encoder_class: type[BaseTokenizer] | None = ...
max_length: int = ...
padding: str = ...
template: str | None = ...
use_chat_template: bool = ...
chat_template_kwargs: dict | None = ...
add_special_tokens: bool = ...
processor_class: type | None = ...
image_token: str = ...
chat_template: str | None = ...
================================================
FILE: .mlx_typings/mflux/models/common/weights/loading/weight_loader.pyi
================================================
"""
This type stub file was generated by pyright.
"""
from typing import TYPE_CHECKING
from mflux.models.common.weights.loading.loaded_weights import LoadedWeights
from mflux.models.common.weights.loading.weight_definition import (
ComponentDefinition,
WeightDefinitionType,
)
if TYPE_CHECKING: ...
logger = ...
class WeightLoader:
@staticmethod
def load_single(
component: ComponentDefinition, repo_id: str, file_pattern: str = ...
) -> LoadedWeights: ...
@staticmethod
def load(
weight_definition: WeightDefinitionType, model_path: str | None = ...
) -> LoadedWeights: ...
================================================
FILE: .mlx_typings/mflux/models/common/weights/mapping/weight_mapper.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from typing import Dict, List, Optional
from mflux.models.common.weights.mapping.weight_mapping import WeightTarget
class WeightMapper:
@staticmethod
def apply_mapping(
hf_weights: Dict[str, mx.array],
mapping: List[WeightTarget],
num_blocks: Optional[int] = ...,
num_layers: Optional[int] = ...,
) -> Dict: ...
================================================
FILE: .mlx_typings/mflux/models/common/weights/mapping/weight_mapping.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from dataclasses import dataclass
from typing import Callable, List, Optional, Protocol
"""
This type stub file was generated by pyright.
"""
@dataclass
class WeightTarget:
to_pattern: str
from_pattern: List[str]
transform: Optional[Callable[[mx.array], mx.array]] = ...
required: bool = ...
max_blocks: Optional[int] = ...
class WeightMapping(Protocol):
@staticmethod
def get_mapping() -> List[WeightTarget]: ...
================================================
FILE: .mlx_typings/mflux/models/common/weights/mapping/weight_transforms.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
class WeightTransforms:
@staticmethod
def reshape_gamma_to_1d(tensor: mx.array) -> mx.array: ...
@staticmethod
def transpose_patch_embed(tensor: mx.array) -> mx.array: ...
@staticmethod
def transpose_conv3d_weight(tensor: mx.array) -> mx.array: ...
@staticmethod
def transpose_conv2d_weight(tensor: mx.array) -> mx.array: ...
@staticmethod
def transpose_conv_transpose2d_weight(tensor: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/common/weights/saving/model_saver.pyi
================================================
"""
This type stub file was generated by pyright.
"""
from typing import Any, TYPE_CHECKING
from mflux.models.common.weights.loading.weight_definition import WeightDefinitionType
if TYPE_CHECKING: ...
class ModelSaver:
@staticmethod
def save_model(
model: Any, bits: int, base_path: str, weight_definition: WeightDefinitionType
) -> None: ...
================================================
FILE: .mlx_typings/mflux/models/depth_pro/depth_pro_initializer.pyi
================================================
"""
This type stub file was generated by pyright.
"""
from mflux.models.depth_pro.model.depth_pro_model import DepthProModel
class DepthProInitializer:
@staticmethod
def init(model: DepthProModel, quantize: int | None = ...) -> None: ...
================================================
FILE: .mlx_typings/mflux/models/depth_pro/model/decoder/feature_fusion_block_2d.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
import mlx.nn as nn
class FeatureFusionBlock2d(nn.Module):
def __init__(self, num_features: int, deconv: bool = ...) -> None: ...
def __call__(self, x0: mx.array, x1: mx.array | None = ...) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/depth_pro/model/decoder/multires_conv_decoder.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
import mlx.nn as nn
class MultiresConvDecoder(nn.Module):
def __init__(self) -> None: ...
def __call__(
self,
x0_latent: mx.array,
x1_latent: mx.array,
x0_features: mx.array,
x1_features: mx.array,
x_global_features: mx.array,
) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/depth_pro/model/decoder/residual_block.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
import mlx.nn as nn
class ResidualBlock(nn.Module):
def __init__(self, num_features: int) -> None: ...
def __call__(self, x: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/depth_pro/model/depth_pro.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from dataclasses import dataclass
from pathlib import Path
from PIL import Image
@dataclass
class DepthResult:
depth_image: Image.Image
depth_array: mx.array
min_depth: float
max_depth: float
...
class DepthPro:
def __init__(self, quantize: int | None = ...) -> None: ...
def create_depth_map(self, image_path: str | Path) -> DepthResult: ...
================================================
FILE: .mlx_typings/mflux/models/depth_pro/model/depth_pro_model.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
import mlx.nn as nn
class DepthProModel(nn.Module):
def __init__(self) -> None: ...
def __call__(
self, x0: mx.array, x1: mx.array, x2: mx.array
) -> tuple[mx.array, mx.array]: ...
================================================
FILE: .mlx_typings/mflux/models/depth_pro/model/depth_pro_util.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
import mlx.nn as nn
class DepthProUtil:
@staticmethod
def split(x: mx.array, overlap_ratio: float = ...) -> mx.array: ...
@staticmethod
def interpolate(x: mx.array, size=..., scale_factor=...): # -> array:
...
@staticmethod
def apply_conv(x: mx.array, conv_module: nn.Module) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/depth_pro/model/dino_v2/attention.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class Attention(nn.Module):
def __init__(
self, dim: int = ..., head_dim: int = ..., num_heads: int = ...
) -> None: ...
def __call__(self, x: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/depth_pro/model/dino_v2/dino_vision_transformer.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
import mlx.nn as nn
class DinoVisionTransformer(nn.Module):
def __init__(self) -> None: ...
def __call__(self, x: mx.array) -> tuple[mx.array, mx.array, mx.array]: ...
================================================
FILE: .mlx_typings/mflux/models/depth_pro/model/dino_v2/layer_scale.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
import mlx.nn as nn
class LayerScale(nn.Module):
def __init__(self, dims: int, init_values: float = ...) -> None: ...
def __call__(self, x: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/depth_pro/model/dino_v2/mlp.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
import mlx.nn as nn
class MLP(nn.Module):
def __init__(self) -> None: ...
def __call__(self, x: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/depth_pro/model/dino_v2/patch_embed.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
import mlx.nn as nn
class PatchEmbed(nn.Module):
def __init__(self) -> None: ...
def __call__(self, x: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/depth_pro/model/dino_v2/transformer_block.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
import mlx.nn as nn
class TransformerBlock(nn.Module):
def __init__(self) -> None: ...
def __call__(self, x: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/depth_pro/model/encoder/depth_pro_encoder.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
import mlx.nn as nn
class DepthProEncoder(nn.Module):
def __init__(self) -> None: ...
def __call__(
self, x0: mx.array, x1: mx.array, x2: mx.array
) -> tuple[mx.array, mx.array, mx.array, mx.array, mx.array]: ...
================================================
FILE: .mlx_typings/mflux/models/depth_pro/model/encoder/upsample_block.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
import mlx.nn as nn
class UpSampleBlock(nn.Module):
def __init__(
self,
dim_in: int = ...,
dim_int: int = ...,
dim_out: int = ...,
upsample_layers: int = ...,
) -> None: ...
def __call__(self, x: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/depth_pro/model/head/fov_head.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
import mlx.nn as nn
class FOVHead(nn.Module):
def __init__(self) -> None: ...
def __call__(self, x: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/depth_pro/weights/depth_pro_weight_definition.pyi
================================================
"""
This type stub file was generated by pyright.
"""
from typing import List
from mflux.models.common.weights.loading.weight_definition import (
ComponentDefinition,
TokenizerDefinition,
)
"""
This type stub file was generated by pyright.
"""
class DepthProWeightDefinition:
@staticmethod
def get_components() -> List[ComponentDefinition]: ...
@staticmethod
def get_tokenizers() -> List[TokenizerDefinition]: ...
@staticmethod
def get_download_patterns() -> List[str]: ...
@staticmethod
def quantization_predicate(path: str, module) -> bool: ...
================================================
FILE: .mlx_typings/mflux/models/depth_pro/weights/depth_pro_weight_mapping.pyi
================================================
"""
This type stub file was generated by pyright.
"""
from typing import List
from mflux.models.common.weights.mapping.weight_mapping import (
WeightMapping,
WeightTarget,
)
class DepthProWeightMapping(WeightMapping):
@staticmethod
def get_mapping() -> List[WeightTarget]: ...
================================================
FILE: .mlx_typings/mflux/models/fibo/latent_creator/fibo_latent_creator.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
class FiboLatentCreator:
@staticmethod
def create_noise(seed: int, height: int, width: int) -> mx.array: ...
@staticmethod
def pack_latents(latents: mx.array, height: int, width: int) -> mx.array: ...
@staticmethod
def unpack_latents(latents: mx.array, height: int, width: int) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/fibo/weights/fibo_weight_definition.pyi
================================================
"""
This type stub file was generated by pyright.
"""
from typing import List
from mflux.models.common.weights.loading.weight_definition import (
ComponentDefinition,
TokenizerDefinition,
)
"""
This type stub file was generated by pyright.
"""
class FIBOWeightDefinition:
@staticmethod
def get_components() -> List[ComponentDefinition]: ...
@staticmethod
def get_tokenizers() -> List[TokenizerDefinition]: ...
@staticmethod
def get_download_patterns() -> List[str]: ...
@staticmethod
def quantization_predicate(path: str, module) -> bool: ...
================================================
FILE: .mlx_typings/mflux/models/fibo/weights/fibo_weight_mapping.pyi
================================================
"""
This type stub file was generated by pyright.
"""
from typing import List
from mflux.models.common.weights.mapping.weight_mapping import (
WeightMapping,
WeightTarget,
)
class FIBOWeightMapping(WeightMapping):
@staticmethod
def get_transformer_mapping() -> List[WeightTarget]: ...
@staticmethod
def get_text_encoder_mapping() -> List[WeightTarget]: ...
@staticmethod
def get_vae_mapping() -> List[WeightTarget]: ...
================================================
FILE: .mlx_typings/mflux/models/fibo_vlm/tokenizer/qwen2vl_image_processor.pyi
================================================
"""
This type stub file was generated by pyright.
"""
from mflux.models.qwen.tokenizer.qwen_image_processor import QwenImageProcessor
class Qwen2VLImageProcessor(QwenImageProcessor):
def __init__(self) -> None: ...
================================================
FILE: .mlx_typings/mflux/models/fibo_vlm/tokenizer/qwen2vl_processor.pyi
================================================
"""
This type stub file was generated by pyright.
"""
from typing import Optional, Union
from PIL import Image
class Qwen2VLProcessor:
def __init__(self, tokenizer) -> None: ...
def apply_chat_template(
self,
messages,
tokenize: bool = ...,
add_generation_prompt: bool = ...,
return_tensors: Optional[str] = ...,
return_dict: bool = ...,
**kwargs,
): # -> dict[Any, Any]:
...
def __call__(
self,
text: Optional[Union[str, list[str]]] = ...,
images: Optional[Union[Image.Image, list[Image.Image]]] = ...,
padding: bool = ...,
return_tensors: Optional[str] = ...,
**kwargs,
): # -> dict[Any, Any]:
...
================================================
FILE: .mlx_typings/mflux/models/fibo_vlm/weights/fibo_vlm_weight_definition.pyi
================================================
"""
This type stub file was generated by pyright.
"""
from typing import List
from mflux.models.common.weights.loading.weight_definition import (
ComponentDefinition,
TokenizerDefinition,
)
"""
This type stub file was generated by pyright.
"""
QWEN2VL_CHAT_TEMPLATE = ...
class FIBOVLMWeightDefinition:
@staticmethod
def get_components() -> List[ComponentDefinition]: ...
@staticmethod
def get_tokenizers() -> List[TokenizerDefinition]: ...
@staticmethod
def get_download_patterns() -> List[str]: ...
@staticmethod
def quantization_predicate(path: str, module) -> bool: ...
================================================
FILE: .mlx_typings/mflux/models/fibo_vlm/weights/fibo_vlm_weight_mapping.pyi
================================================
"""
This type stub file was generated by pyright.
"""
from typing import List
from mflux.models.common.weights.mapping.weight_mapping import (
WeightMapping,
WeightTarget,
)
class FIBOVLMWeightMapping(WeightMapping):
@staticmethod
def get_vlm_decoder_mapping(num_layers: int = ...) -> List[WeightTarget]: ...
@staticmethod
def get_vlm_visual_mapping(depth: int = ...) -> List[WeightTarget]: ...
================================================
FILE: .mlx_typings/mflux/models/flux/__init__.pyi
================================================
"""
This type stub file was generated by pyright.
"""
================================================
FILE: .mlx_typings/mflux/models/flux/cli/__init__.pyi
================================================
"""
This type stub file was generated by pyright.
"""
================================================
FILE: .mlx_typings/mflux/models/flux/flux_initializer.pyi
================================================
"""
This type stub file was generated by pyright.
"""
from mflux.models.common.config import ModelConfig
class FluxInitializer:
@staticmethod
def init(
model,
model_config: ModelConfig,
quantize: int | None,
model_path: str | None = ...,
lora_paths: list[str] | None = ...,
lora_scales: list[float] | None = ...,
custom_transformer=...,
) -> None: ...
@staticmethod
def init_depth(
model,
model_config: ModelConfig,
quantize: int | None,
model_path: str | None = ...,
lora_paths: list[str] | None = ...,
lora_scales: list[float] | None = ...,
) -> None: ...
@staticmethod
def init_redux(
model,
model_config: ModelConfig,
quantize: int | None,
model_path: str | None = ...,
lora_paths: list[str] | None = ...,
lora_scales: list[float] | None = ...,
) -> None: ...
@staticmethod
def init_controlnet(
model,
model_config: ModelConfig,
quantize: int | None,
model_path: str | None = ...,
lora_paths: list[str] | None = ...,
lora_scales: list[float] | None = ...,
) -> None: ...
@staticmethod
def init_concept(
model,
model_config: ModelConfig,
quantize: int | None,
model_path: str | None = ...,
lora_paths: list[str] | None = ...,
lora_scales: list[float] | None = ...,
) -> None: ...
================================================
FILE: .mlx_typings/mflux/models/flux/latent_creator/__init__.pyi
================================================
"""
This type stub file was generated by pyright.
"""
"""
This type stub file was generated by pyright.
"""
================================================
FILE: .mlx_typings/mflux/models/flux/latent_creator/flux_latent_creator.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
"""
This type stub file was generated by pyright.
"""
class FluxLatentCreator:
@staticmethod
def create_noise(seed: int, height: int, width: int) -> mx.array: ...
@staticmethod
def pack_latents(
latents: mx.array, height: int, width: int, num_channels_latents: int = ...
) -> mx.array: ...
@staticmethod
def unpack_latents(latents: mx.array, height: int, width: int) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/flux/model/__init__.pyi
================================================
"""
This type stub file was generated by pyright.
"""
"""
This type stub file was generated by pyright.
"""
================================================
FILE: .mlx_typings/mflux/models/flux/model/flux_text_encoder/clip_encoder/clip_embeddings.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class CLIPEmbeddings(nn.Module):
def __init__(self, dims: int) -> None: ...
def __call__(self, tokens: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/flux/model/flux_text_encoder/clip_encoder/clip_encoder.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
"""
This type stub file was generated by pyright.
"""
class CLIPEncoder(nn.Module):
def __init__(self) -> None: ...
def __call__(self, tokens: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/flux/model/flux_text_encoder/clip_encoder/clip_encoder_layer.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class CLIPEncoderLayer(nn.Module):
def __init__(self, layer: int) -> None: ...
def __call__(
self, hidden_states: mx.array, causal_attention_mask: mx.array
) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/flux/model/flux_text_encoder/clip_encoder/clip_mlp.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class CLIPMLP(nn.Module):
def __init__(self) -> None: ...
def __call__(self, hidden_states: mx.array) -> mx.array: ...
@staticmethod
def quick_gelu(input_array: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/flux/model/flux_text_encoder/clip_encoder/clip_sdpa_attention.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class CLIPSdpaAttention(nn.Module):
head_dimension = ...
batch_size = ...
num_heads = ...
def __init__(self) -> None: ...
def __call__(
self, hidden_states: mx.array, causal_attention_mask: mx.array
) -> mx.array: ...
@staticmethod
def reshape_and_transpose(x, batch_size, num_heads, head_dim): # -> array:
...
================================================
FILE: .mlx_typings/mflux/models/flux/model/flux_text_encoder/clip_encoder/clip_text_model.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class CLIPTextModel(nn.Module):
def __init__(self, dims: int, num_encoder_layers: int) -> None: ...
def __call__(self, tokens: mx.array) -> tuple[mx.array, mx.array]: ...
@staticmethod
def create_causal_attention_mask(input_shape: tuple) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/flux/model/flux_text_encoder/clip_encoder/encoder_clip.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class EncoderCLIP(nn.Module):
def __init__(self, num_encoder_layers: int) -> None: ...
def __call__(
self, tokens: mx.array, causal_attention_mask: mx.array
) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/flux/model/flux_text_encoder/prompt_encoder.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mflux.models.common.tokenizer import Tokenizer
from mflux.models.flux.model.flux_text_encoder.clip_encoder.clip_encoder import (
CLIPEncoder,
)
from mflux.models.flux.model.flux_text_encoder.t5_encoder.t5_encoder import T5Encoder
"""
This type stub file was generated by pyright.
"""
class PromptEncoder:
@staticmethod
def encode_prompt(
prompt: str,
prompt_cache: dict[str, tuple[mx.array, mx.array]],
t5_tokenizer: Tokenizer,
clip_tokenizer: Tokenizer,
t5_text_encoder: T5Encoder,
clip_text_encoder: CLIPEncoder,
) -> tuple[mx.array, mx.array]: ...
================================================
FILE: .mlx_typings/mflux/models/flux/model/flux_text_encoder/t5_encoder/t5_attention.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class T5Attention(nn.Module):
def __init__(self) -> None: ...
def __call__(self, hidden_states: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/flux/model/flux_text_encoder/t5_encoder/t5_block.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class T5Block(nn.Module):
def __init__(self, layer: int) -> None: ...
def __call__(self, hidden_states: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/flux/model/flux_text_encoder/t5_encoder/t5_dense_relu_dense.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class T5DenseReluDense(nn.Module):
def __init__(self) -> None: ...
def __call__(self, hidden_states: mx.array) -> mx.array: ...
@staticmethod
def new_gelu(input_array: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/flux/model/flux_text_encoder/t5_encoder/t5_encoder.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
"""
This type stub file was generated by pyright.
"""
class T5Encoder(nn.Module):
def __init__(self) -> None: ...
def __call__(self, tokens: mx.array): ...
================================================
FILE: .mlx_typings/mflux/models/flux/model/flux_text_encoder/t5_encoder/t5_feed_forward.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class T5FeedForward(nn.Module):
def __init__(self) -> None: ...
def __call__(self, hidden_states: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/flux/model/flux_text_encoder/t5_encoder/t5_layer_norm.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class T5LayerNorm(nn.Module):
def __init__(self) -> None: ...
def __call__(self, hidden_states: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/flux/model/flux_text_encoder/t5_encoder/t5_self_attention.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class T5SelfAttention(nn.Module):
def __init__(self) -> None: ...
def __call__(self, hidden_states: mx.array) -> mx.array: ...
@staticmethod
def shape(states): # -> array:
...
@staticmethod
def un_shape(states): # -> array:
...
================================================
FILE: .mlx_typings/mflux/models/flux/model/flux_transformer/ada_layer_norm_continuous.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class AdaLayerNormContinuous(nn.Module):
def __init__(self, embedding_dim: int, conditioning_embedding_dim: int) -> None: ...
def __call__(self, x: mx.array, text_embeddings: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/flux/model/flux_transformer/ada_layer_norm_zero.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class AdaLayerNormZero(nn.Module):
def __init__(self) -> None: ...
def __call__(
self, hidden_states: mx.array, text_embeddings: mx.array
) -> tuple[mx.array, mx.array, mx.array, mx.array, mx.array]: ...
================================================
FILE: .mlx_typings/mflux/models/flux/model/flux_transformer/ada_layer_norm_zero_single.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class AdaLayerNormZeroSingle(nn.Module):
def __init__(self) -> None: ...
def __call__(
self, hidden_states: mx.array, text_embeddings: mx.array
) -> tuple[mx.array, mx.array]: ...
================================================
FILE: .mlx_typings/mflux/models/flux/model/flux_transformer/common/attention_utils.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class AttentionUtils:
@staticmethod
def process_qkv(
hidden_states: mx.array,
to_q: nn.Linear,
to_k: nn.Linear,
to_v: nn.Linear,
norm_q: nn.RMSNorm,
norm_k: nn.RMSNorm,
num_heads: int,
head_dim: int,
) -> tuple[mx.array, mx.array, mx.array]: ...
@staticmethod
def compute_attention(
query: mx.array,
key: mx.array,
value: mx.array,
batch_size: int,
num_heads: int,
head_dim: int,
mask: mx.array | None = ...,
) -> mx.array: ...
@staticmethod
def convert_key_padding_mask_to_additive_mask(
mask: mx.array | None, joint_seq_len: int, txt_seq_len: int
) -> mx.array | None: ...
@staticmethod
def apply_rope(
xq: mx.array, xk: mx.array, freqs_cis: mx.array
) -> tuple[mx.array, mx.array]: ...
@staticmethod
def apply_rope_bshd(
xq: mx.array, xk: mx.array, cos: mx.array, sin: mx.array
) -> tuple[mx.array, mx.array]: ...
================================================
FILE: .mlx_typings/mflux/models/flux/model/flux_transformer/embed_nd.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class EmbedND(nn.Module):
def __init__(self) -> None: ...
def __call__(self, ids: mx.array) -> mx.array: ...
@staticmethod
def rope(pos: mx.array, dim: int, theta: float) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/flux/model/flux_transformer/feed_forward.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class FeedForward(nn.Module):
def __init__(self, activation_function) -> None: ...
def __call__(self, hidden_states: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/flux/model/flux_transformer/guidance_embedder.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class GuidanceEmbedder(nn.Module):
def __init__(self) -> None: ...
def __call__(self, sample: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/flux/model/flux_transformer/joint_attention.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
from typing import Any
class JointAttention(nn.Module):
num_heads: int
head_dimension: int
to_q: nn.Linear
to_k: nn.Linear
to_v: nn.Linear
norm_q: nn.RMSNorm
norm_k: nn.RMSNorm
add_q_proj: nn.Linear
add_k_proj: nn.Linear
add_v_proj: nn.Linear
norm_added_q: nn.RMSNorm
norm_added_k: nn.RMSNorm
to_out: list[Any]
to_add_out: nn.Linear
def __init__(self) -> None: ...
def __call__(
self,
hidden_states: mx.array,
encoder_hidden_states: mx.array,
image_rotary_emb: mx.array,
) -> tuple[mx.array, mx.array]: ...
================================================
FILE: .mlx_typings/mflux/models/flux/model/flux_transformer/joint_transformer_block.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
from typing import Any
from mflux.models.flux.model.flux_transformer.joint_attention import JointAttention
from mflux.models.flux.model.flux_transformer.ada_layer_norm_zero import (
AdaLayerNormZero,
)
class JointTransformerBlock(nn.Module):
attn: JointAttention
norm1: AdaLayerNormZero
norm1_context: AdaLayerNormZero
norm2: nn.Module
norm2_context: nn.Module
ff: nn.Module
ff_context: nn.Module
def __init__(self, layer: Any) -> None: ...
def __call__(
self,
hidden_states: mx.array,
encoder_hidden_states: mx.array,
text_embeddings: mx.array,
rotary_embeddings: mx.array,
) -> tuple[mx.array, mx.array]: ...
@staticmethod
def apply_norm_and_feed_forward(
hidden_states: mx.array,
attn_output: mx.array,
gate_mlp: mx.array,
gate_msa: mx.array,
scale_mlp: mx.array,
shift_mlp: mx.array,
norm_layer: nn.Module,
ff_layer: nn.Module,
) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/flux/model/flux_transformer/single_block_attention.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class SingleBlockAttention(nn.Module):
num_heads: int
head_dimension: int
to_q: nn.Linear
to_k: nn.Linear
to_v: nn.Linear
norm_q: nn.RMSNorm
norm_k: nn.RMSNorm
def __init__(self) -> None: ...
def __call__(
self, hidden_states: mx.array, image_rotary_emb: mx.array
) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/flux/model/flux_transformer/single_transformer_block.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
from typing import Any
from mflux.models.flux.model.flux_transformer.single_block_attention import (
SingleBlockAttention,
)
from mflux.models.flux.model.flux_transformer.ada_layer_norm_zero_single import (
AdaLayerNormZeroSingle,
)
class SingleTransformerBlock(nn.Module):
attn: SingleBlockAttention
norm: AdaLayerNormZeroSingle
def __init__(self, layer: Any) -> None: ...
def __call__(
self,
hidden_states: mx.array,
text_embeddings: mx.array,
rotary_embeddings: mx.array,
) -> tuple[mx.array, mx.array]: ...
def _apply_feed_forward_and_projection(
self, norm_hidden_states: mx.array, attn_output: mx.array, gate: mx.array
) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/flux/model/flux_transformer/text_embedder.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class TextEmbedder(nn.Module):
def __init__(self) -> None: ...
def __call__(self, caption: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/flux/model/flux_transformer/time_text_embed.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
from mflux.models.common.config import ModelConfig
class TimeTextEmbed(nn.Module):
def __init__(self, model_config: ModelConfig) -> None: ...
def __call__(
self, time_step: mx.array, pooled_projection: mx.array, guidance: mx.array
) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/flux/model/flux_transformer/timestep_embedder.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class TimestepEmbedder(nn.Module):
def __init__(self) -> None: ...
def __call__(self, sample: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/flux/model/flux_transformer/transformer.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
from mflux.models.common.config.config import Config
from mflux.models.common.config.model_config import ModelConfig
from mflux.models.flux.model.flux_transformer.embed_nd import EmbedND
from mflux.models.flux.model.flux_transformer.time_text_embed import TimeTextEmbed
from mflux.models.flux.model.flux_transformer.joint_transformer_block import (
JointTransformerBlock,
)
from mflux.models.flux.model.flux_transformer.single_transformer_block import (
SingleTransformerBlock,
)
class Transformer(nn.Module):
transformer_blocks: list[JointTransformerBlock]
single_transformer_blocks: list[SingleTransformerBlock]
x_embedder: nn.Linear
pos_embed: EmbedND
time_text_embed: TimeTextEmbed
norm_out: nn.LayerNorm
proj_out: nn.Linear
context_embedder: nn.Linear
def __init__(
self,
model_config: ModelConfig,
num_transformer_blocks: int = ...,
num_single_transformer_blocks: int = ...,
) -> None: ...
def __call__(
self,
t: int,
config: Config,
hidden_states: mx.array,
prompt_embeds: mx.array,
pooled_prompt_embeds: mx.array,
controlnet_block_samples: list[mx.array] | None = ...,
controlnet_single_block_samples: list[mx.array] | None = ...,
kontext_image_ids: mx.array | None = ...,
) -> mx.array: ...
@staticmethod
def compute_rotary_embeddings(
prompt_embeds: mx.array,
pos_embed: EmbedND,
config: Config,
kontext_image_ids: mx.array | None = ...,
) -> mx.array: ...
@staticmethod
def compute_text_embeddings(
t: int,
pooled_prompt_embeds: mx.array,
time_text_embed: TimeTextEmbed,
config: Config,
) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/flux/model/flux_vae/common/attention.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class Attention(nn.Module):
def __init__(self) -> None: ...
def __call__(self, input_array: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/flux/model/flux_vae/common/resnet_block_2d.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class ResnetBlock2D(nn.Module):
def __init__(
self,
norm1: int,
conv1_in: int,
conv1_out: int,
norm2: int,
conv2_in: int,
conv2_out: int,
conv_shortcut_in: int | None = ...,
conv_shortcut_out: int | None = ...,
is_conv_shortcut: bool = ...,
) -> None: ...
def __call__(self, input_array: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/flux/model/flux_vae/common/unet_mid_block.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class UnetMidBlock(nn.Module):
def __init__(self) -> None: ...
def __call__(self, input_array: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/flux/model/flux_vae/decoder/conv_in.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
import mlx.nn as nn
class ConvIn(nn.Module):
def __init__(self) -> None: ...
def __call__(self, input_array: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/flux/model/flux_vae/decoder/conv_norm_out.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
import mlx.nn as nn
class ConvNormOut(nn.Module):
def __init__(self) -> None: ...
def __call__(self, input_array: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/flux/model/flux_vae/decoder/conv_out.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
import mlx.nn as nn
class ConvOut(nn.Module):
def __init__(self) -> None: ...
def __call__(self, input_array: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/flux/model/flux_vae/decoder/decoder.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class Decoder(nn.Module):
def __init__(
self, enable_tiling: bool = ..., split_direction: str = ...
) -> None: ...
def __call__(self, latents: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/flux/model/flux_vae/decoder/up_block_1_or_2.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class UpBlock1Or2(nn.Module):
def __init__(self) -> None: ...
def __call__(self, input_array: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/flux/model/flux_vae/decoder/up_block_3.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class UpBlock3(nn.Module):
def __init__(self) -> None: ...
def __call__(self, input_array: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/flux/model/flux_vae/decoder/up_block_4.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class UpBlock4(nn.Module):
def __init__(self) -> None: ...
def __call__(self, input_array: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/flux/model/flux_vae/decoder/up_sampler.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class UpSampler(nn.Module):
def __init__(self, conv_in: int, conv_out: int) -> None: ...
def __call__(self, input_array: mx.array) -> mx.array: ...
@staticmethod
def up_sample_nearest(x: mx.array, scale: int = ...): # -> array:
...
================================================
FILE: .mlx_typings/mflux/models/flux/model/flux_vae/encoder/conv_in.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
import mlx.nn as nn
class ConvIn(nn.Module):
def __init__(self) -> None: ...
def __call__(self, input_array: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/flux/model/flux_vae/encoder/conv_norm_out.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
import mlx.nn as nn
class ConvNormOut(nn.Module):
def __init__(self) -> None: ...
def __call__(self, input_array: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/flux/model/flux_vae/encoder/conv_out.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
import mlx.nn as nn
class ConvOut(nn.Module):
def __init__(self) -> None: ...
def __call__(self, input_array: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/flux/model/flux_vae/encoder/down_block_1.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class DownBlock1(nn.Module):
def __init__(self) -> None: ...
def __call__(self, input_array: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/flux/model/flux_vae/encoder/down_block_2.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class DownBlock2(nn.Module):
def __init__(self) -> None: ...
def __call__(self, input_array: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/flux/model/flux_vae/encoder/down_block_3.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class DownBlock3(nn.Module):
def __init__(self) -> None: ...
def __call__(self, input_array: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/flux/model/flux_vae/encoder/down_block_4.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class DownBlock4(nn.Module):
def __init__(self) -> None: ...
def __call__(self, input_array: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/flux/model/flux_vae/encoder/down_sampler.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class DownSampler(nn.Module):
def __init__(self, conv_in: int, conv_out: int) -> None: ...
def __call__(self, input_array: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/flux/model/flux_vae/encoder/encoder.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class Encoder(nn.Module):
def __init__(self) -> None: ...
def __call__(self, latents: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/flux/model/flux_vae/vae.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class VAE(nn.Module):
scaling_factor: int = ...
shift_factor: int = ...
spatial_scale = ...
latent_channels = ...
def __init__(self) -> None: ...
def decode(self, latents: mx.array) -> mx.array: ...
def encode(self, image: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/flux/model/redux_encoder/redux_encoder.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class ReduxEncoder(nn.Module):
def __init__(self) -> None: ...
def __call__(self, x: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/flux/model/siglip_vision_transformer/siglip_encoder.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class SiglipEncoder(nn.Module):
def __init__(self) -> None: ...
def __call__(self, inputs_embeds: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/flux/model/siglip_vision_transformer/siglip_encoder_layer.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class SiglipEncoderLayer(nn.Module):
def __init__(self) -> None: ...
def __call__(self, hidden_states: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/flux/model/siglip_vision_transformer/siglip_mlp.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class SiglipMLP(nn.Module):
def __init__(self) -> None: ...
def __call__(self, hidden_states: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/flux/model/siglip_vision_transformer/siglip_multi_head_attention_pooling_head.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class SiglipMultiHeadAttentionPoolingHead(nn.Module):
def __init__(self) -> None: ...
def __call__(self, hidden_states: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/flux/model/siglip_vision_transformer/siglip_sdpa_attention.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class SiglipSdpaAttention(nn.Module):
head_dimension = ...
batch_size = ...
num_heads = ...
def __init__(self) -> None: ...
def __call__(self, hidden_states: mx.array) -> mx.array: ...
@staticmethod
def reshape_and_transpose(x, batch_size, num_heads, head_dim): # -> array:
...
================================================
FILE: .mlx_typings/mflux/models/flux/model/siglip_vision_transformer/siglip_vision_embeddings.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class SiglipVisionEmbeddings(nn.Module):
embed_dim = ...
image_size = ...
patch_size = ...
def __init__(self) -> None: ...
def __call__(self, pixel_values: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/flux/model/siglip_vision_transformer/siglip_vision_transformer.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class SiglipVisionTransformer(nn.Module):
def __init__(self) -> None: ...
def __call__(self, pixel_values: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/flux/variants/__init__.pyi
================================================
"""
This type stub file was generated by pyright.
"""
"""
This type stub file was generated by pyright.
"""
================================================
FILE: .mlx_typings/mflux/models/flux/variants/concept_attention/attention_data.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
import PIL.Image
from dataclasses import dataclass
from pathlib import Path
from typing import List
from mflux.models.flux.variants.concept_attention.joint_transformer_block_concept import (
LayerAttentionData,
)
@dataclass
class TimestepAttentionData:
t: int
attention_information: List[LayerAttentionData]
def stack_img_attentions(self) -> mx.array: ...
def stack_concept_attentions(self) -> mx.array: ...
class GenerationAttentionData:
def __init__(self) -> None: ...
def append(self, timestep_attention: TimestepAttentionData): # -> None:
...
def stack_all_img_attentions(self) -> mx.array: ...
def stack_all_concept_attentions(self) -> mx.array: ...
@dataclass
class ConceptHeatmap:
concept: str
image: PIL.Image.Image
layer_indices: List[int]
timesteps: List[int]
height: int
width: int
def save(
self, path: str | Path, export_json_metadata: bool = ..., overwrite: bool = ...
) -> None: ...
def get_metadata(self) -> dict: ...
================================================
FILE: .mlx_typings/mflux/models/flux/variants/concept_attention/joint_attention_concept.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class JointAttentionConcept(nn.Module):
def __init__(self) -> None: ...
def __call__(
self,
hidden_states: mx.array,
encoder_hidden_states: mx.array,
encoder_hidden_states_concept: mx.array,
image_rotary_emb: mx.array,
image_rotary_emb_concept: mx.array,
) -> tuple[mx.array, mx.array, mx.array, mx.array, mx.array]: ...
================================================
FILE: .mlx_typings/mflux/models/flux/variants/concept_attention/joint_transformer_block_concept.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from dataclasses import dataclass
from mlx import nn
@dataclass
class LayerAttentionData:
layer: int
img_attention: mx.array
concept_attention: mx.array
...
class JointTransformerBlockConcept(nn.Module):
def __init__(self, layer) -> None: ...
def __call__(
self,
layer_idx: int,
hidden_states: mx.array,
encoder_hidden_states: mx.array,
encoder_hidden_states_concept: mx.array,
text_embeddings: mx.array,
text_embeddings_concept: mx.array,
rotary_embeddings: mx.array,
rotary_embeddings_concept: mx.array,
) -> tuple[mx.array, mx.array, mx.array, LayerAttentionData]: ...
================================================
FILE: .mlx_typings/mflux/models/flux/variants/concept_attention/transformer_concept.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
from mflux.models.common.config.config import Config
from mflux.models.common.config.model_config import ModelConfig
from mflux.models.flux.variants.concept_attention.attention_data import (
TimestepAttentionData,
)
class TransformerConcept(nn.Module):
def __init__(
self,
model_config: ModelConfig,
num_transformer_blocks: int = ...,
num_single_transformer_blocks: int = ...,
) -> None: ...
def __call__(
self,
t: int,
config: Config,
hidden_states: mx.array,
prompt_embeds: mx.array,
prompt_embeds_concept: mx.array,
pooled_prompt_embeds: mx.array,
pooled_prompt_embeds_concept: mx.array,
) -> tuple[mx.array, TimestepAttentionData]: ...
================================================
FILE: .mlx_typings/mflux/models/flux/variants/controlnet/transformer_controlnet.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
from mflux.models.common.config.config import Config
from mflux.models.common.config.model_config import ModelConfig
class TransformerControlnet(nn.Module):
def __init__(
self,
model_config: ModelConfig,
num_transformer_blocks: int = ...,
num_single_transformer_blocks: int = ...,
) -> None: ...
def __call__(
self,
t: int,
config: Config,
hidden_states: mx.array,
prompt_embeds: mx.array,
pooled_prompt_embeds: mx.array,
controlnet_condition: mx.array,
) -> tuple[list[mx.array], list[mx.array]]: ...
================================================
FILE: .mlx_typings/mflux/models/flux/variants/kontext/__init__.pyi
================================================
"""
This type stub file was generated by pyright.
"""
from mflux.models.flux.variants.kontext.flux_kontext import Flux1Kontext
__all__ = ["Flux1Kontext"]
================================================
FILE: .mlx_typings/mflux/models/flux/variants/kontext/flux_kontext.pyi
================================================
"""
This type stub file was generated by pyright.
"""
from pathlib import Path
from typing import Any
from mlx import nn
from mflux.models.common.config.model_config import ModelConfig
from mflux.models.flux.model.flux_text_encoder.clip_encoder.clip_encoder import (
CLIPEncoder,
)
from mflux.models.flux.model.flux_text_encoder.t5_encoder.t5_encoder import T5Encoder
from mflux.models.flux.model.flux_transformer.transformer import Transformer
from mflux.models.flux.model.flux_vae.vae import VAE
from mflux.utils.generated_image import GeneratedImage
class Flux1Kontext(nn.Module):
vae: VAE
transformer: Transformer
t5_text_encoder: T5Encoder
clip_text_encoder: CLIPEncoder
bits: int | None
lora_paths: list[str] | None
lora_scales: list[float] | None
prompt_cache: dict[str, Any]
tokenizers: dict[str, Any]
def __init__(
self,
quantize: int | None = ...,
model_path: str | None = ...,
lora_paths: list[str] | None = ...,
lora_scales: list[float] | None = ...,
model_config: ModelConfig = ...,
) -> None: ...
def generate_image(
self,
seed: int,
prompt: str,
num_inference_steps: int = ...,
height: int = ...,
width: int = ...,
guidance: float = ...,
image_path: Path | str | None = ...,
image_strength: float | None = ...,
scheduler: str = ...,
) -> GeneratedImage: ...
================================================
FILE: .mlx_typings/mflux/models/flux/variants/kontext/kontext_util.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mflux.models.flux.model.flux_vae.vae import VAE
class KontextUtil:
@staticmethod
def create_image_conditioning_latents(
vae: VAE,
height: int,
width: int,
image_path: str,
) -> tuple[mx.array, mx.array]: ...
================================================
FILE: .mlx_typings/mflux/models/flux/variants/txt2img/flux.pyi
================================================
"""
This type stub file was generated by pyright.
"""
from pathlib import Path
from mlx import nn
from typing import Any
from mflux.models.common.config.model_config import ModelConfig
from mflux.models.flux.model.flux_text_encoder.clip_encoder.clip_encoder import (
CLIPEncoder,
)
from mflux.models.flux.model.flux_text_encoder.t5_encoder.t5_encoder import T5Encoder
from mflux.models.flux.model.flux_transformer.transformer import Transformer
from mflux.models.flux.model.flux_vae.vae import VAE
from mflux.utils.generated_image import GeneratedImage
class Flux1(nn.Module):
vae: VAE
transformer: Transformer
t5_text_encoder: T5Encoder
clip_text_encoder: CLIPEncoder
bits: int | None
lora_paths: list[str] | None
lora_scales: list[float] | None
prompt_cache: dict[str, Any]
tokenizers: dict[str, Any]
def __init__(
self,
quantize: int | None = ...,
model_path: str | None = ...,
lora_paths: list[str] | None = ...,
lora_scales: list[float] | None = ...,
model_config: ModelConfig = ...,
) -> None: ...
def generate_image(
self,
seed: int,
prompt: str,
num_inference_steps: int = ...,
height: int = ...,
width: int = ...,
guidance: float = ...,
image_path: Path | str | None = ...,
image_strength: float | None = ...,
scheduler: str = ...,
negative_prompt: str | None = ...,
) -> GeneratedImage: ...
@staticmethod
def from_name(model_name: str, quantize: int | None = ...) -> Flux1: ...
def save_model(self, base_path: str) -> None: ...
def freeze(self, **kwargs): # -> None:
...
================================================
FILE: .mlx_typings/mflux/models/flux/weights/__init__.pyi
================================================
"""
This type stub file was generated by pyright.
"""
from mflux.models.flux.weights.flux_weight_definition import FluxWeightDefinition
from mflux.models.flux.weights.flux_weight_mapping import FluxWeightMapping
__all__ = ["FluxWeightDefinition", "FluxWeightMapping"]
================================================
FILE: .mlx_typings/mflux/models/flux/weights/flux_lora_mapping.pyi
================================================
"""
This type stub file was generated by pyright.
"""
from mflux.models.common.lora.mapping.lora_mapping import LoRAMapping, LoRATarget
class FluxLoRAMapping(LoRAMapping):
@staticmethod
def get_mapping() -> list[LoRATarget]: ...
================================================
FILE: .mlx_typings/mflux/models/flux/weights/flux_weight_definition.pyi
================================================
"""
This type stub file was generated by pyright.
"""
from typing import List
from mflux.models.common.weights.loading.weight_definition import (
ComponentDefinition,
TokenizerDefinition,
)
"""
This type stub file was generated by pyright.
"""
class FluxWeightDefinition:
@staticmethod
def get_components() -> List[ComponentDefinition]: ...
@staticmethod
def get_tokenizers() -> List[TokenizerDefinition]: ...
@staticmethod
def get_download_patterns() -> List[str]: ...
@staticmethod
def quantization_predicate(path: str, module) -> bool: ...
class FluxControlnetWeightDefinition:
@staticmethod
def get_controlnet_component() -> ComponentDefinition: ...
@staticmethod
def get_components() -> List[ComponentDefinition]: ...
@staticmethod
def get_tokenizers() -> List[TokenizerDefinition]: ...
@staticmethod
def get_download_patterns() -> List[str]: ...
@staticmethod
def quantization_predicate(path: str, module) -> bool: ...
class FluxReduxWeightDefinition:
@staticmethod
def get_components() -> List[ComponentDefinition]: ...
@staticmethod
def get_tokenizers() -> List[TokenizerDefinition]: ...
@staticmethod
def get_download_patterns() -> List[str]: ...
@staticmethod
def quantization_predicate(path: str, module) -> bool: ...
================================================
FILE: .mlx_typings/mflux/models/flux/weights/flux_weight_mapping.pyi
================================================
"""
This type stub file was generated by pyright.
"""
from typing import List
from mflux.models.common.weights.mapping.weight_mapping import (
WeightMapping,
WeightTarget,
)
class FluxWeightMapping(WeightMapping):
@staticmethod
def get_transformer_mapping() -> List[WeightTarget]: ...
@staticmethod
def get_controlnet_transformer_mapping() -> List[WeightTarget]: ...
@staticmethod
def get_vae_mapping() -> List[WeightTarget]: ...
@staticmethod
def get_t5_encoder_mapping() -> List[WeightTarget]: ...
@staticmethod
def get_clip_encoder_mapping() -> List[WeightTarget]: ...
================================================
FILE: .mlx_typings/mflux/models/qwen/__init__.pyi
================================================
"""
This type stub file was generated by pyright.
"""
================================================
FILE: .mlx_typings/mflux/models/qwen/cli/__init__.pyi
================================================
"""
This type stub file was generated by pyright.
"""
================================================
FILE: .mlx_typings/mflux/models/qwen/latent_creator/__init__.pyi
================================================
"""
This type stub file was generated by pyright.
"""
"""
This type stub file was generated by pyright.
"""
================================================
FILE: .mlx_typings/mflux/models/qwen/latent_creator/qwen_latent_creator.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
"""
This type stub file was generated by pyright.
"""
class QwenLatentCreator:
@staticmethod
def create_noise(seed: int, height: int, width: int) -> mx.array: ...
@staticmethod
def pack_latents(
latents: mx.array, height: int, width: int, num_channels_latents: int = ...
) -> mx.array: ...
@staticmethod
def unpack_latents(latents: mx.array, height: int, width: int) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/qwen/model/__init__.pyi
================================================
"""
This type stub file was generated by pyright.
"""
"""
This type stub file was generated by pyright.
"""
================================================
FILE: .mlx_typings/mflux/models/qwen/model/qwen_text_encoder/qwen_attention.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class QwenAttention(nn.Module):
def __init__(
self,
hidden_size: int,
num_attention_heads: int,
num_key_value_heads: int = ...,
max_position_embeddings: int = ...,
rope_theta: float = ...,
rope_scaling: dict = ...,
) -> None: ...
def __call__(
self,
hidden_states: mx.array,
attention_mask: mx.array | None = ...,
position_embeddings: tuple[mx.array, mx.array] | None = ...,
) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/qwen/model/qwen_text_encoder/qwen_encoder.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class QwenEncoder(nn.Module):
def __init__(
self,
vocab_size: int = ...,
hidden_size: int = ...,
num_hidden_layers: int = ...,
max_position_embeddings: int = ...,
rope_theta: float = ...,
) -> None: ...
def get_image_features(
self, pixel_values: mx.array, image_grid_thw: mx.array
) -> mx.array: ...
def __call__(
self,
input_ids: mx.array,
attention_mask: mx.array,
pixel_values: mx.array | None = ...,
image_grid_thw: mx.array | None = ...,
) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/qwen/model/qwen_text_encoder/qwen_encoder_layer.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class QwenEncoderLayer(nn.Module):
def __init__(
self,
hidden_size: int = ...,
num_attention_heads: int = ...,
num_key_value_heads: int = ...,
intermediate_size: int = ...,
rms_norm_eps: float = ...,
max_position_embeddings: int = ...,
rope_theta: float = ...,
) -> None: ...
def __call__(
self,
hidden_states: mx.array,
attention_mask: mx.array | None = ...,
position_embeddings: tuple[mx.array, mx.array] | None = ...,
) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/qwen/model/qwen_text_encoder/qwen_mlp.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class QwenMLP(nn.Module):
def __init__(self, hidden_size: int, intermediate_size: int) -> None: ...
def __call__(self, hidden_states: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/qwen/model/qwen_text_encoder/qwen_patch_merger.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class PatchMerger(nn.Module):
def __init__(
self, context_dim: int, hidden_size: int, spatial_merge_size: int = ...
) -> None: ...
def __call__(self, x: mx.array, grid_thw: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/qwen/model/qwen_text_encoder/qwen_prompt_encoder.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mflux.models.common.tokenizer import Tokenizer
from mflux.models.qwen.model.qwen_text_encoder.qwen_text_encoder import QwenTextEncoder
"""
This type stub file was generated by pyright.
"""
class QwenPromptEncoder:
@staticmethod
def encode_prompt(
prompt: str,
negative_prompt: str,
prompt_cache: dict[str, tuple[mx.array, mx.array, mx.array, mx.array]],
qwen_tokenizer: Tokenizer,
qwen_text_encoder: QwenTextEncoder,
) -> tuple[mx.array, mx.array, mx.array, mx.array]: ...
================================================
FILE: .mlx_typings/mflux/models/qwen/model/qwen_text_encoder/qwen_rms_norm.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class QwenRMSNorm(nn.Module):
def __init__(self, hidden_size: int, eps: float = ...) -> None: ...
def __call__(self, hidden_states: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/qwen/model/qwen_text_encoder/qwen_rope.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class QwenRotaryEmbedding(nn.Module):
def __init__(
self,
dim: int,
max_position_embeddings: int = ...,
base: float = ...,
device: str = ...,
scaling_factor: float = ...,
rope_type: str = ...,
config=...,
) -> None: ...
def __call__(
self, x: mx.array, position_ids: mx.array
) -> tuple[mx.array, mx.array]: ...
================================================
FILE: .mlx_typings/mflux/models/qwen/model/qwen_text_encoder/qwen_text_encoder.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
"""
This type stub file was generated by pyright.
"""
class QwenTextEncoder(nn.Module):
def __init__(self) -> None: ...
def __call__(
self, input_ids: mx.array, attention_mask: mx.array
) -> tuple[mx.array, mx.array]: ...
================================================
FILE: .mlx_typings/mflux/models/qwen/model/qwen_text_encoder/qwen_vision_attention.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class VisionAttention(nn.Module):
def __init__(self, embed_dim: int = ..., num_heads: int = ...) -> None: ...
def __call__(
self, x: mx.array, position_embeddings=..., cu_seqlens=...
) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/qwen/model/qwen_text_encoder/qwen_vision_block.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class VisionBlock(nn.Module):
def __init__(
self, embed_dim: int = ..., num_heads: int = ..., mlp_ratio: float = ...
) -> None: ...
def __call__(
self, x: mx.array, position_embeddings=..., cu_seqlens=...
) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/qwen/model/qwen_text_encoder/qwen_vision_language_encoder.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class QwenVisionLanguageEncoder(nn.Module):
def __init__(self, encoder=...) -> None: ...
def __call__(
self,
input_ids: mx.array,
attention_mask: mx.array | None = ...,
pixel_values: mx.array | None = ...,
image_grid_thw: mx.array | None = ...,
) -> tuple[mx.array, mx.array]: ...
================================================
FILE: .mlx_typings/mflux/models/qwen/model/qwen_text_encoder/qwen_vision_mlp.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class VisionMLP(nn.Module):
def __init__(self, dim: int, hidden_dim: int) -> None: ...
def __call__(self, x: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/qwen/model/qwen_text_encoder/qwen_vision_patch_embed.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class VisionPatchEmbed(nn.Module):
def __init__(
self,
patch_size: int = ...,
temporal_patch_size: int = ...,
in_channels: int = ...,
embed_dim: int = ...,
) -> None: ...
def __call__(self, hidden_states: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/qwen/model/qwen_text_encoder/qwen_vision_rotary_embedding.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class VisionRotaryEmbedding(nn.Module):
def __init__(self, dim: int, theta: float = ...) -> None: ...
def __call__(self, max_grid_size: int) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/qwen/model/qwen_text_encoder/qwen_vision_transformer.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class VisionTransformer(nn.Module):
def __init__(
self,
patch_size: int = ...,
temporal_patch_size: int = ...,
in_channels: int = ...,
embed_dim: int = ...,
depth: int = ...,
num_heads: int = ...,
mlp_ratio: float = ...,
hidden_size: int = ...,
spatial_merge_size: int = ...,
window_size: int = ...,
fullatt_block_indexes: list = ...,
) -> None: ...
def get_window_index(self, grid_thw: mx.array): # -> tuple[array, array]:
...
def rot_pos_emb(self, grid_thw: mx.array) -> mx.array: ...
def __call__(self, pixel_values: mx.array, grid_thw: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/qwen/model/qwen_transformer/qwen_attention.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
from typing import Any
class QwenAttention(nn.Module):
_num_heads: int
_head_dim: int
num_heads: int
head_dim: int
to_q: nn.Linear
to_k: nn.Linear
to_v: nn.Linear
add_q_proj: nn.Linear
add_k_proj: nn.Linear
add_v_proj: nn.Linear
norm_q: nn.RMSNorm
norm_k: nn.RMSNorm
norm_added_q: nn.RMSNorm
norm_added_k: nn.RMSNorm
attn_to_out: list[Any]
to_add_out: nn.Linear
def __init__(
self, dim: int = ..., num_heads: int = ..., head_dim: int = ...
) -> None: ...
def __call__(
self,
img_modulated: mx.array,
txt_modulated: mx.array,
encoder_hidden_states_mask: mx.array | None,
image_rotary_emb: tuple[mx.array, mx.array],
block_idx: int | None = ...,
) -> tuple[mx.array, mx.array]: ...
def _compute_attention_qwen(
self,
query: mx.array,
key: mx.array,
value: mx.array,
mask: mx.array | None,
block_idx: int | None,
) -> mx.array: ...
@staticmethod
def _convert_mask_for_qwen(
mask: mx.array | None, joint_seq_len: int, txt_seq_len: int
) -> mx.array | None: ...
@staticmethod
def _apply_rope_qwen(
x: mx.array, cos_vals: mx.array, sin_vals: mx.array
) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/qwen/model/qwen_transformer/qwen_feed_forward.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class QwenFeedForward(nn.Module):
def __init__(self, dim: int = ...) -> None: ...
def __call__(self, hidden_states: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/qwen/model/qwen_transformer/qwen_rope.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class QwenEmbedRopeMLX(nn.Module):
def __init__(
self, theta: int, axes_dim: list[int], scale_rope: bool = ...
) -> None: ...
def __call__(
self,
video_fhw: tuple[int, int, int] | list[tuple[int, int, int]],
txt_seq_lens: list[int],
) -> tuple[tuple[mx.array, mx.array], tuple[mx.array, mx.array]]: ...
================================================
FILE: .mlx_typings/mflux/models/qwen/model/qwen_transformer/qwen_time_text_embed.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class QwenTimeTextEmbed(nn.Module):
def __init__(self, timestep_proj_dim: int = ..., inner_dim: int = ...) -> None: ...
def __call__(self, timestep: mx.array, hidden_states: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/qwen/model/qwen_transformer/qwen_timestep_embedding.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class QwenTimestepEmbedding(nn.Module):
def __init__(self, proj_dim: int, inner_dim: int) -> None: ...
def __call__(self, x: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/qwen/model/qwen_transformer/qwen_timesteps.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class QwenTimesteps(nn.Module):
def __init__(self, proj_dim: int = ..., scale: float = ...) -> None: ...
def __call__(self, timesteps: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/qwen/model/qwen_transformer/qwen_transformer.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
from typing import Any
from mflux.models.common.config.config import Config
from mflux.models.qwen.model.qwen_transformer.qwen_transformer_block import (
QwenTransformerBlock,
)
class QwenTransformer(nn.Module):
transformer_blocks: list[QwenTransformerBlock]
inner_dim: int
img_in: nn.Linear
txt_in: nn.Linear
txt_norm: nn.RMSNorm
time_text_embed: Any
pos_embed: Any
norm_out: nn.Module
proj_out: nn.Linear
def __init__(
self,
in_channels: int = ...,
out_channels: int = ...,
num_layers: int = ...,
attention_head_dim: int = ...,
num_attention_heads: int = ...,
joint_attention_dim: int = ...,
patch_size: int = ...,
) -> None: ...
def __call__(
self,
t: int,
config: Config,
hidden_states: mx.array,
encoder_hidden_states: mx.array,
encoder_hidden_states_mask: mx.array,
qwen_image_ids: mx.array | None = ...,
cond_image_grid: tuple[int, int, int] | None = ...,
) -> mx.array: ...
@staticmethod
def _compute_timestep(t: int | float, config: Config) -> mx.array: ...
@staticmethod
def _compute_rotary_embeddings(
encoder_hidden_states_mask: mx.array,
pos_embed: Any,
config: Config,
cond_image_grid: tuple[int, int, int] | list[tuple[int, int, int]] | None = ...,
) -> tuple[mx.array, mx.array]: ...
================================================
FILE: .mlx_typings/mflux/models/qwen/model/qwen_transformer/qwen_transformer_block.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
from typing import Any
from mflux.models.qwen.model.qwen_transformer.qwen_attention import QwenAttention
class QwenTransformerBlock(nn.Module):
attn: QwenAttention
img_mod_linear: nn.Linear
img_mod_silu: nn.SiLU
txt_mod_linear: nn.Linear
txt_mod_silu: nn.SiLU
img_norm1: nn.RMSNorm
txt_norm1: nn.RMSNorm
img_norm2: nn.RMSNorm
txt_norm2: nn.RMSNorm
img_ff: Any
txt_ff: Any
def __init__(
self, dim: int = ..., num_heads: int = ..., head_dim: int = ...
) -> None: ...
def __call__(
self,
hidden_states: mx.array,
encoder_hidden_states: mx.array,
encoder_hidden_states_mask: mx.array | None,
text_embeddings: mx.array,
image_rotary_emb: tuple[mx.array, mx.array],
block_idx: int | None = ...,
) -> tuple[mx.array, mx.array]: ...
@staticmethod
def _modulate(x: mx.array, mod_params: mx.array) -> tuple[mx.array, mx.array]: ...
================================================
FILE: .mlx_typings/mflux/models/qwen/model/qwen_transformer/qwen_transformer_rms_norm.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class QwenTransformerRMSNorm(nn.Module):
def __init__(self, dim: int, eps: float = ...) -> None: ...
def __call__(self, hidden_states: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/qwen/model/qwen_vae/qwen_image_attention_block_3d.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class QwenImageAttentionBlock3D(nn.Module):
def __init__(self, dim: int) -> None: ...
def __call__(self, x: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/qwen/model/qwen_vae/qwen_image_causal_conv_3d.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class QwenImageCausalConv3D(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: int = ...,
stride: int = ...,
padding: int = ...,
) -> None: ...
def __call__(self, x: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/qwen/model/qwen_vae/qwen_image_decoder_3d.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class QwenImageDecoder3D(nn.Module):
def __init__(self) -> None: ...
def __call__(self, x: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/qwen/model/qwen_vae/qwen_image_down_block_3d.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class QwenImageDownBlock3D(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
num_res_blocks: int = ...,
downsample_mode: str = ...,
) -> None: ...
def __call__(self, x: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/qwen/model/qwen_vae/qwen_image_encoder_3d.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class QwenImageEncoder3D(nn.Module):
def __init__(self) -> None: ...
def __call__(self, x: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/qwen/model/qwen_vae/qwen_image_mid_block_3d.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class QwenImageMidBlock3D(nn.Module):
def __init__(self, dim: int, num_layers: int = ...) -> None: ...
def __call__(self, x: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/qwen/model/qwen_vae/qwen_image_res_block_3d.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class QwenImageResBlock3D(nn.Module):
def __init__(self, in_channels: int, out_channels: int) -> None: ...
def __call__(self, x: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/qwen/model/qwen_vae/qwen_image_resample_3d.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class QwenImageResample3D(nn.Module):
def __init__(self, dim: int, mode: str) -> None: ...
def __call__(self, x: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/qwen/model/qwen_vae/qwen_image_rms_norm.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class QwenImageRMSNorm(nn.Module):
def __init__(
self, num_channels: int, eps: float = ..., images: bool = ...
) -> None: ...
def __call__(self, x: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/qwen/model/qwen_vae/qwen_image_up_block_3d.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class QwenImageUpBlock3D(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
num_res_blocks: int = ...,
upsample_mode: str = ...,
) -> None: ...
def __call__(self, x: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/qwen/model/qwen_vae/qwen_vae.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mlx import nn
class QwenVAE(nn.Module):
LATENTS_MEAN = ...
LATENTS_STD = ...
spatial_scale = ...
latent_channels = ...
def __init__(self) -> None: ...
def decode(self, latents: mx.array) -> mx.array: ...
def encode(self, latents: mx.array) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/qwen/qwen_initializer.pyi
================================================
"""
This type stub file was generated by pyright.
"""
from mflux.models.common.config import ModelConfig
class QwenImageInitializer:
@staticmethod
def init(
model,
model_config: ModelConfig,
quantize: int | None,
model_path: str | None = ...,
lora_paths: list[str] | None = ...,
lora_scales: list[float] | None = ...,
) -> None: ...
@staticmethod
def init_edit(
model,
model_config: ModelConfig,
quantize: int | None,
model_path: str | None = ...,
lora_paths: list[str] | None = ...,
lora_scales: list[float] | None = ...,
) -> None: ...
================================================
FILE: .mlx_typings/mflux/models/qwen/tokenizer/__init__.pyi
================================================
"""
This type stub file was generated by pyright.
"""
================================================
FILE: .mlx_typings/mflux/models/qwen/tokenizer/qwen_image_processor.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import numpy as np
from typing import Optional, Union
from PIL import Image
OPENAI_CLIP_MEAN = ...
OPENAI_CLIP_STD = ...
def smart_resize(
height: int,
width: int,
factor: int = ...,
min_pixels: int = ...,
max_pixels: int = ...,
) -> tuple[int, int]: ...
class QwenImageProcessor:
def __init__(
self,
min_pixels: int = ...,
max_pixels: int = ...,
patch_size: int = ...,
temporal_patch_size: int = ...,
merge_size: int = ...,
image_mean: Optional[list[float]] = ...,
image_std: Optional[list[float]] = ...,
) -> None: ...
def preprocess(
self, images: Union[Image.Image, list[Image.Image]]
) -> tuple[np.ndarray, np.ndarray]: ...
def get_number_of_image_patches(
self,
height: int,
width: int,
min_pixels: Optional[int] = ...,
max_pixels: Optional[int] = ...,
) -> int: ...
================================================
FILE: .mlx_typings/mflux/models/qwen/tokenizer/qwen_vision_language_processor.pyi
================================================
"""
This type stub file was generated by pyright.
"""
from typing import Optional, Union
from PIL import Image
from mflux.models.qwen.tokenizer.qwen_image_processor import QwenImageProcessor
class QwenVisionLanguageProcessor:
def __init__(
self,
tokenizer,
image_processor: Optional[QwenImageProcessor] = ...,
image_token: str = ...,
video_token: str = ...,
) -> None: ...
def __call__(
self,
images: Optional[Union[Image.Image, list[Image.Image]]] = ...,
text: Optional[Union[str, list[str]]] = ...,
padding: bool = ...,
return_tensors: Optional[str] = ...,
) -> dict: ...
================================================
FILE: .mlx_typings/mflux/models/qwen/tokenizer/qwen_vision_language_tokenizer.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
import numpy as np
from typing import Union
from PIL import Image
from mflux.models.qwen.tokenizer.qwen_vision_language_processor import (
QwenVisionLanguageProcessor,
)
class QwenVisionLanguageTokenizer:
def __init__(
self,
processor: QwenVisionLanguageProcessor,
max_length: int = ...,
use_picture_prefix: bool = ...,
) -> None: ...
def tokenize_with_image(
self,
prompt: str,
image: Union[Image.Image, np.ndarray, str, list],
vl_width: int | None = ...,
vl_height: int | None = ...,
) -> tuple[mx.array, mx.array, mx.array, mx.array]: ...
def tokenize_text_only(self, prompt: str) -> tuple[mx.array, mx.array]: ...
================================================
FILE: .mlx_typings/mflux/models/qwen/variants/__init__.pyi
================================================
"""
This type stub file was generated by pyright.
"""
"""
This type stub file was generated by pyright.
"""
================================================
FILE: .mlx_typings/mflux/models/qwen/variants/edit/qwen_edit_util.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from mflux.models.common.vae.tiling_config import TilingConfig
class QwenEditUtil:
@staticmethod
def create_image_conditioning_latents(
vae,
height: int,
width: int,
image_paths: list[str] | str,
vl_width: int | None = ...,
vl_height: int | None = ...,
tiling_config: TilingConfig | None = ...,
) -> tuple[mx.array, mx.array, int, int, int]: ...
================================================
FILE: .mlx_typings/mflux/models/qwen/variants/edit/qwen_image_edit.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from pathlib import Path
from mlx import nn
from typing import Any
from mflux.models.common.config import Config
from mflux.models.common.config.model_config import ModelConfig
from mflux.models.qwen.model.qwen_text_encoder.qwen_text_encoder import QwenTextEncoder
from mflux.models.qwen.model.qwen_transformer.qwen_transformer import QwenTransformer
from mflux.models.qwen.model.qwen_vae.qwen_vae import QwenVAE
from mflux.utils.generated_image import GeneratedImage
class QwenImageEdit(nn.Module):
vae: QwenVAE
transformer: QwenTransformer
text_encoder: QwenTextEncoder
bits: int | None
lora_paths: list[str] | None
lora_scales: list[float] | None
prompt_cache: dict[str, Any]
tokenizers: dict[str, Any]
def __init__(
self,
quantize: int | None = ...,
model_path: str | None = ...,
lora_paths: list[str] | None = ...,
lora_scales: list[float] | None = ...,
model_config: ModelConfig = ...,
) -> None: ...
def generate_image(
self,
seed: int,
prompt: str,
image_paths: list[str],
num_inference_steps: int = ...,
height: int | None = ...,
width: int | None = ...,
guidance: float = ...,
image_path: Path | str | None = ...,
scheduler: str = ...,
negative_prompt: str | None = ...,
) -> GeneratedImage: ...
def _encode_prompts_with_images(
self,
prompt: str,
negative_prompt: str,
image_paths: list[str],
config: Config,
vl_width: int | None,
vl_height: int | None,
) -> tuple[mx.array, mx.array, mx.array, mx.array]: ...
================================================
FILE: .mlx_typings/mflux/models/qwen/variants/txt2img/qwen_image.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
from pathlib import Path
from mlx import nn
from typing import Any
from mflux.models.common.config import ModelConfig
from mflux.models.qwen.model.qwen_text_encoder.qwen_text_encoder import QwenTextEncoder
from mflux.models.qwen.model.qwen_transformer.qwen_transformer import QwenTransformer
from mflux.models.qwen.model.qwen_vae.qwen_vae import QwenVAE
from mflux.utils.generated_image import GeneratedImage
class QwenImage(nn.Module):
vae: QwenVAE
transformer: QwenTransformer
text_encoder: QwenTextEncoder
bits: int | None
lora_paths: list[str] | None
lora_scales: list[float] | None
prompt_cache: dict[str, Any]
tokenizers: dict[str, Any]
def __init__(
self,
quantize: int | None = ...,
model_path: str | None = ...,
lora_paths: list[str] | None = ...,
lora_scales: list[float] | None = ...,
model_config: ModelConfig = ...,
) -> None: ...
def generate_image(
self,
seed: int,
prompt: str,
num_inference_steps: int = ...,
height: int = ...,
width: int = ...,
guidance: float = ...,
image_path: Path | str | None = ...,
image_strength: float | None = ...,
scheduler: str = ...,
negative_prompt: str | None = ...,
) -> GeneratedImage: ...
def save_model(self, base_path: str) -> None: ...
@staticmethod
def compute_guided_noise(
noise: mx.array, noise_negative: mx.array, guidance: float
) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/qwen/weights/__init__.pyi
================================================
"""
This type stub file was generated by pyright.
"""
from mflux.models.qwen.weights.qwen_weight_definition import QwenWeightDefinition
from mflux.models.qwen.weights.qwen_weight_mapping import QwenWeightMapping
__all__ = ["QwenWeightDefinition", "QwenWeightMapping"]
================================================
FILE: .mlx_typings/mflux/models/qwen/weights/qwen_lora_mapping.pyi
================================================
"""
This type stub file was generated by pyright.
"""
from typing import List
from mflux.models.common.lora.mapping.lora_mapping import LoRAMapping, LoRATarget
class QwenLoRAMapping(LoRAMapping):
@staticmethod
def get_mapping() -> List[LoRATarget]: ...
================================================
FILE: .mlx_typings/mflux/models/qwen/weights/qwen_weight_definition.pyi
================================================
"""
This type stub file was generated by pyright.
"""
from typing import List
from mflux.models.common.weights.loading.weight_definition import (
ComponentDefinition,
TokenizerDefinition,
)
"""
This type stub file was generated by pyright.
"""
class QwenWeightDefinition:
@staticmethod
def get_components() -> List[ComponentDefinition]: ...
@staticmethod
def get_tokenizers() -> List[TokenizerDefinition]: ...
@staticmethod
def get_download_patterns() -> List[str]: ...
@staticmethod
def quantization_predicate(path: str, module) -> bool: ...
================================================
FILE: .mlx_typings/mflux/models/qwen/weights/qwen_weight_mapping.pyi
================================================
"""
This type stub file was generated by pyright.
"""
from typing import List
from mflux.models.common.weights.mapping.weight_mapping import (
WeightMapping,
WeightTarget,
)
class QwenWeightMapping(WeightMapping):
@staticmethod
def get_transformer_mapping() -> List[WeightTarget]: ...
@staticmethod
def get_vae_mapping() -> List[WeightTarget]: ...
@staticmethod
def get_text_encoder_mapping() -> List[WeightTarget]: ...
@staticmethod
def get_mapping() -> List[WeightTarget]: ...
================================================
FILE: .mlx_typings/mflux/models/seedvr2/weights/seedvr2_weight_definition.pyi
================================================
"""
This type stub file was generated by pyright.
"""
from typing import List
from mflux.models.common.weights.loading.weight_definition import (
ComponentDefinition,
TokenizerDefinition,
)
"""
This type stub file was generated by pyright.
"""
class SeedVR2WeightDefinition:
@staticmethod
def get_components() -> List[ComponentDefinition]: ...
@staticmethod
def get_tokenizers() -> List[TokenizerDefinition]: ...
@staticmethod
def get_download_patterns() -> List[str]: ...
@staticmethod
def quantization_predicate(path: str, module) -> bool: ...
================================================
FILE: .mlx_typings/mflux/models/seedvr2/weights/seedvr2_weight_mapping.pyi
================================================
"""
This type stub file was generated by pyright.
"""
from typing import List
from mflux.models.common.weights.mapping.weight_mapping import (
WeightMapping,
WeightTarget,
)
class SeedVR2WeightMapping(WeightMapping):
@staticmethod
def get_transformer_mapping() -> List[WeightTarget]: ...
@staticmethod
def get_vae_mapping() -> List[WeightTarget]: ...
================================================
FILE: .mlx_typings/mflux/models/z_image/latent_creator/z_image_latent_creator.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
class ZImageLatentCreator:
@staticmethod
def create_noise(seed: int, height: int, width: int) -> mx.array: ...
@staticmethod
def pack_latents(latents: mx.array, height: int, width: int) -> mx.array: ...
@staticmethod
def unpack_latents(latents: mx.array, height: int, width: int) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/models/z_image/weights/z_image_weight_definition.pyi
================================================
"""
This type stub file was generated by pyright.
"""
from typing import List
from mflux.models.common.weights.loading.weight_definition import (
ComponentDefinition,
TokenizerDefinition,
)
"""
This type stub file was generated by pyright.
"""
class ZImageWeightDefinition:
@staticmethod
def get_components() -> List[ComponentDefinition]: ...
@staticmethod
def get_tokenizers() -> List[TokenizerDefinition]: ...
@staticmethod
def get_download_patterns() -> List[str]: ...
@staticmethod
def quantization_predicate(path: str, module) -> bool: ...
================================================
FILE: .mlx_typings/mflux/models/z_image/weights/z_image_weight_mapping.pyi
================================================
"""
This type stub file was generated by pyright.
"""
from mflux.models.common.weights.mapping.weight_mapping import (
WeightMapping,
WeightTarget,
)
class ZImageWeightMapping(WeightMapping):
@staticmethod
def get_text_encoder_mapping() -> list[WeightTarget]: ...
@staticmethod
def get_vae_mapping() -> list[WeightTarget]: ...
@staticmethod
def get_transformer_mapping() -> list[WeightTarget]: ...
================================================
FILE: .mlx_typings/mflux/release/__init__.pyi
================================================
"""
This type stub file was generated by pyright.
"""
================================================
FILE: .mlx_typings/mflux/utils/__init__.pyi
================================================
"""
This type stub file was generated by pyright.
"""
"""
This type stub file was generated by pyright.
"""
================================================
FILE: .mlx_typings/mflux/utils/box_values.pyi
================================================
"""
This type stub file was generated by pyright.
"""
from dataclasses import dataclass
@dataclass
class AbsoluteBoxValues:
top: int
right: int
bottom: int
left: int
...
class BoxValueError(ValueError): ...
@dataclass
class BoxValues:
top: int | str
right: int | str
bottom: int | str
left: int | str
def normalize_to_dimensions(self, width, height) -> AbsoluteBoxValues: ...
@staticmethod
def parse(value, delimiter=...) -> BoxValues: ...
================================================
FILE: .mlx_typings/mflux/utils/exceptions.pyi
================================================
"""
This type stub file was generated by pyright.
"""
"""
This type stub file was generated by pyright.
"""
class MFluxException(Exception): ...
class ImageSavingException(MFluxException): ...
class MetadataEmbedException(MFluxException): ...
class MFluxUserException(MFluxException): ...
class PromptFileReadError(MFluxUserException): ...
class StopImageGenerationException(MFluxUserException): ...
class StopTrainingException(MFluxUserException): ...
class CommandExecutionError(MFluxException):
def __init__(
self, cmd: list[str], return_code: int, stdout: str | None, stderr: str | None
) -> None: ...
class ReferenceVsOutputImageError(AssertionError): ...
class ModelConfigError(ValueError): ...
class InvalidBaseModel(ModelConfigError): ...
================================================
FILE: .mlx_typings/mflux/utils/generated_image.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
import PIL.Image
from pathlib import Path
from mflux.models.common.config import ModelConfig
from mflux.models.flux.variants.concept_attention.attention_data import ConceptHeatmap
log = ...
class GeneratedImage:
image: PIL.Image.Image
def __init__(
self,
image: PIL.Image.Image,
model_config: ModelConfig,
seed: int,
prompt: str,
steps: int,
guidance: float | None,
precision: mx.Dtype,
quantization: int,
generation_time: float,
lora_paths: list[str] | None = ...,
lora_scales: list[float] | None = ...,
height: int | None = ...,
width: int | None = ...,
controlnet_image_path: str | Path | None = ...,
controlnet_strength: float | None = ...,
image_path: str | Path | None = ...,
image_paths: list[str] | list[Path] | None = ...,
image_strength: float | None = ...,
masked_image_path: str | Path | None = ...,
depth_image_path: str | Path | None = ...,
redux_image_paths: list[str] | list[Path] | None = ...,
redux_image_strengths: list[float] | None = ...,
concept_heatmap: ConceptHeatmap | None = ...,
negative_prompt: str | None = ...,
init_metadata: dict | None = ...,
) -> None: ...
def get_right_half(self) -> GeneratedImage: ...
def save(
self, path: str | Path, export_json_metadata: bool = ..., overwrite: bool = ...
) -> None: ...
def save_with_heatmap(
self, path: str | Path, export_json_metadata: bool = ..., overwrite: bool = ...
) -> None: ...
def save_concept_heatmap(
self, path: str | Path, export_json_metadata: bool = ..., overwrite: bool = ...
) -> None: ...
================================================
FILE: .mlx_typings/mflux/utils/image_util.pyi
================================================
"""
This type stub file was generated by pyright.
"""
import mlx.core as mx
import PIL.Image
from pathlib import Path
from typing import Any
from PIL._typing import StrOrBytesPath
from mflux.models.common.config.config import Config
from mflux.models.flux.variants.concept_attention.attention_data import ConceptHeatmap
from mflux.utils.box_values import AbsoluteBoxValues
from mflux.utils.generated_image import GeneratedImage
log = ...
class ImageUtil:
@staticmethod
def to_image(
decoded_latents: mx.array,
config: Config,
seed: int,
prompt: str,
quantization: int,
generation_time: float,
lora_paths: list[str] | None = ...,
lora_scales: list[float] | None = ...,
controlnet_image_path: str | Path | None = ...,
image_path: str | Path | None = ...,
image_paths: list[str] | list[Path] | None = ...,
redux_image_paths: list[str] | list[Path] | None = ...,
redux_image_strengths: list[float] | None = ...,
image_strength: float | None = ...,
masked_image_path: str | Path | None = ...,
depth_image_path: str | Path | None = ...,
concept_heatmap: ConceptHeatmap | None = ...,
negative_prompt: str | None = ...,
init_metadata: dict[str, Any] | None = ...,
) -> GeneratedImage: ...
@staticmethod
def to_composite_image(
generated_images: list[GeneratedImage],
) -> PIL.Image.Image: ...
@staticmethod
def to_array(image: PIL.Image.Image, is_mask: bool = ...) -> mx.array: ...
@staticmethod
def load_image(
image_or_path: PIL.Image.Image | StrOrBytesPath,
) -> PIL.Image.Image: ...
@staticmethod
def expand_image(
image: PIL.Image.Image,
box_values: AbsoluteBoxValues | None = ...,
top: int | str = ...,
right: int | str = ...,
bottom: int | str = ...,
left: int | str = ...,
fill_color: tuple = ...,
) -> PIL.Image.Image: ...
@staticmethod
def create_outpaint_mask_image(
orig_width: int, orig_height: int, **create_bordered_image_kwargs
): # -> Image:
...
@staticmethod
def create_bordered_image(
orig_width: int,
orig_height: int,
border_color: tuple,
content_color: tuple,
box_values: AbsoluteBoxValues | None = ...,
top: int | str = ...,
right: int | str = ...,
bottom: int | str = ...,
left: int | str = ...,
) -> PIL.Image.Image: ...
@staticmethod
def scale_to_dimensions(
image: PIL.Image.Image, target_width: int, target_height: int
) -> PIL.Image.Image: ...
@staticmethod
def save_image(
image: PIL.Image.Image,
path: str | Path,
metadata: dict | None = ...,
export_json_metadata: bool = ...,
overwrite: bool = ...,
) -> None: ...
@staticmethod
def preprocess_for_model(
image: PIL.Image.Image,
target_size: tuple = ...,
mean: list = ...,
std: list = ...,
resample: int = ...,
) -> mx.array: ...
@staticmethod
def preprocess_for_depth_pro(
image: PIL.Image.Image,
target_size: tuple = ...,
mean: list = ...,
std: list = ...,
resample: int = ...,
) -> mx.array: ...
================================================
FILE: .mlx_typings/mflux/utils/metadata_builder.pyi
================================================
"""
This type stub file was generated by pyright.
"""
from pathlib import Path
log = ...
class MetadataBuilder:
_IPTC_PROMPT_MAX_BYTES = ...
@staticmethod
def embed_metadata(metadata: dict, path: str | Path) -> None: ...
@staticmethod
def build_xmp_packet(metadata: dict) -> str: ...
@staticmethod
def build_iptc_binary(metadata: dict) -> bytes: ...
================================================
FILE: .mlx_typings/mflux/utils/version_util.pyi
================================================
"""
This type stub file was generated by pyright.
"""
"""
This type stub file was generated by pyright.
"""
class VersionUtil:
@staticmethod
def get_mflux_version() -> str: ...
================================================
FILE: .mlx_typings/mlx/core/__init__.pyi
================================================
import enum
import pathlib
import types
from typing import (
Annotated,
Callable,
Literal,
Mapping,
Sequence,
TypeAlias,
overload,
)
import numpy
from mlx.nn.layers import Module
from numpy.typing import ArrayLike as _ArrayLike
from . import cuda as cuda
from . import distributed as distributed
from . import metal as metal
from . import random as random
class ArrayAt:
"""A helper object to apply updates at specific indices."""
def __getitem__(self, indices: object | None) -> ArrayAt: ...
def add(
self,
value: bool
| int
| float
| array
| Annotated[_ArrayLike, dict(order="C", device="cpu", writable=False)]
| complex
| ArrayLike,
) -> array: ...
def subtract(
self,
value: bool
| int
| float
| array
| Annotated[_ArrayLike, dict(order="C", device="cpu", writable=False)]
| complex
| ArrayLike,
) -> array: ...
def multiply(
self,
value: bool
| int
| float
| array
| Annotated[_ArrayLike, dict(order="C", device="cpu", writable=False)]
| complex
| ArrayLike,
) -> array: ...
def divide(
self,
value: bool
| int
| float
| array
| Annotated[_ArrayLike, dict(order="C", device="cpu", writable=False)]
| complex
| ArrayLike,
) -> array: ...
def maximum(
self,
value: bool
| int
| float
| array
| Annotated[_ArrayLike, dict(order="C", device="cpu", writable=False)]
| complex
| ArrayLike,
) -> array: ...
def minimum(
self,
value: bool
| int
| float
| array
| Annotated[_ArrayLike, dict(order="C", device="cpu", writable=False)]
| complex
| ArrayLike,
) -> array: ...
class ArrayIterator:
"""A helper object to iterate over the 1st dimension of an array."""
def __next__(self) -> array: ...
def __iter__(self) -> ArrayIterator: ...
class ArrayLike:
"""
Any Python object which has an ``__mlx__array__`` method that
returns an :obj:`array`.
"""
def __init__(self, arg: object, /) -> None: ...
class Device:
"""A device to run operations on."""
def __init__(self, type: DeviceType, index: int = ...) -> None: ...
@property
def type(self) -> DeviceType: ...
def __repr__(self) -> str: ...
def __eq__(self, arg: object, /) -> bool: ...
class DeviceType(enum.Enum):
cpu = ... # type: ignore
gpu = ... # type: ignore
def __eq__(self, arg: object, /) -> bool: ...
class Dtype:
"""
An object to hold the type of a :class:`array`.
See the :ref:`list of types <data_types>` for more details
on available data types.
"""
@property
def size(self) -> int:
"""Size of the type in bytes."""
def __repr__(self) -> str: ...
def __eq__(self, arg: object, /) -> bool: ...
def __hash__(self) -> int: ...
class DtypeCategory(enum.Enum):
"""
Type to hold categories of :class:`dtypes <Dtype>`.
* :attr:`~mlx.core.generic`
* :ref:`bool_ <data_types>`
* :attr:`~mlx.core.number`
* :attr:`~mlx.core.integer`
* :attr:`~mlx.core.unsignedinteger`
* :ref:`uint8 <data_types>`
* :ref:`uint16 <data_types>`
* :ref:`uint32 <data_types>`
* :ref:`uint64 <data_types>`
* :attr:`~mlx.core.signedinteger`
* :ref:`int8 <data_types>`
* :ref:`int32 <data_types>`
* :ref:`int64 <data_types>`
* :attr:`~mlx.core.inexact`
* :attr:`~mlx.core.floating`
* :ref:`float16 <data_types>`
* :ref:`bfloat16 <data_types>`
* :ref:`float32 <data_types>`
* :ref:`float64 <data_types>`
* :attr:`~mlx.core.complexfloating`
* :ref:`complex64 <data_types>`
See also :func:`~mlx.core.issubdtype`.
"""
complexfloating = ...
floating = ...
inexact = ...
signedinteger = ...
unsignedinteger = ...
integer = ...
number = ...
generic = ...
class FunctionExporter:
"""
A context managing class for exporting multiple traces of the same
function to a file.
Make an instance of this class by calling fun:`mx.exporter`.
"""
def close(self) -> None: ...
def __enter__(self) -> FunctionExporter: ...
def __exit__(
self,
exc_type: object | None = ...,
exc_value: object | None = ...,
traceback: object | None = ...,
) -> None: ...
def __call__(self, *args, **kwargs) -> None: ...
class Stream:
"""A stream for running operations on a given device."""
@property
def device(self) -> Device: ...
def __repr__(self) -> str: ...
def __eq__(self, arg: object, /) -> bool: ...
class StreamContext:
"""
A context manager for setting the current device and stream.
See :func:`stream` for usage.
Args:
s: The stream or device to set as the default.
"""
def __init__(self, s: Stream | Device) -> None: ...
def __enter__(self) -> None: ...
def __exit__(
self,
exc_type: type | None = ...,
exc_value: object | None = ...,
traceback: object | None = ...,
) -> None: ...
def device_info() -> dict[str, str | int]:
"""
Get information about the GPU device and system settings.
Currently returns:
* ``architecture``
* ``max_buffer_size``
* ``max_recommended_working_set_size``
* ``memory_size``
* ``resource_limit``
Returns:
dict: A dictionary with string keys and string or integer values.
"""
def abs(a: array, /, *, stream: Stream | Device | None = ...) -> array:
"""
Element-wise absolute value.
Args:
a (array): Input array.
Returns:
array: The absolute value of ``a``.
"""
def add(
a: scalar | array,
b: scalar | array,
stream: Stream | Device | None = ...,
) -> array:
"""
Element-wise addition.
Add two arrays with numpy-style broadcasting semantics. Either or both input arrays
can also be scalars.
Args:
a (array): Input array or scalar.
b (array): Input array or scalar.
Returns:
array: The sum of ``a`` and ``b``.
"""
def addmm(
c: array,
a: array,
b: array,
/,
alpha: float = ...,
beta: float = ...,
*,
stream: Stream | Device | None = ...,
) -> array:
"""
Matrix multiplication with addition and optional scaling.
Perform the (possibly batched) matrix multiplication of two arrays and add to the result
with optional scaling factors.
Args:
c (array): Input array or scalar.
a (array): Input array or scalar.
b (array): Input array or scalar.
alpha (float, optional): Scaling factor for the
matrix product of ``a`` and ``b`` (default: ``1``)
beta (float, optional): Scaling factor for ``c`` (default: ``1``)
Returns:
array: ``alpha * (a @ b) + beta * c``
"""
def all(
a: array,
/,
axis: int | Sequence[int] | None = ...,
keepdims: bool = ...,
*,
stream: Stream | Device | None = ...,
) -> array:
"""
An `and` reduction over the given axes.
Args:
a (array): Input array.
axis (int or list(int), optional): Optional axis or
axes to reduce over. If unspecified this defaults
to reducing over the entire array.
keepdims (bool, optional): Keep reduced axes as
singleton dimensions, defaults to `False`.
Returns:
array: The output array with the corresponding axes reduced.
"""
def allclose(
a: array,
b: array,
/,
rtol: float = ...,
atol: float = ...,
*,
equal_nan: bool = ...,
stream: Stream | Device | None = ...,
) -> array:
"""
Approximate comparison of two arrays.
Infinite values are considered equal if they have the same sign, NaN values are not equal unless ``equal_nan`` is ``True``.
The arrays are considered equal if:
.. code-block::
all(abs(a - b) <= (atol + rtol * abs(b)))
Note unlike :func:`array_equal`, this function supports numpy-style
broadcasting.
Args:
a (array): Input array.
b (array): Input array.
rtol (float): Relative tolerance.
atol (float): Absolute tolerance.
equal_nan (bool): If ``True``, NaNs are considered equal.
Defaults to ``False``.
Returns:
array: The boolean output scalar indicating if the arrays are close.
"""
def any(
a: array,
/,
axis: int | Sequence[int] | None = ...,
keepdims: bool = ...,
*,
stream: Stream | Device | None = ...,
) -> array:
"""
An `or` reduction over the given axes.
Args:
a (array): Input array.
axis (int or list(int), optional): Optional axis or
axes to reduce over. If unspecified this defaults
to reducing over the entire array.
keepdims (bool, optional): Keep reduced axes as
singleton dimensions, defaults to `False`.
Returns:
array: The output array with the corresponding axes reduced.
"""
@overload
def arange(
start: int | float,
stop: int | float,
step: int | float | None,
dtype: Dtype | None = ...,
*,
stream: Stream | Device | None = ...,
) -> array:
"""
Generates ranges of numbers.
Generate numbers in the half-open interval ``[start, stop)`` in
increments of ``step``.
Args:
start (float or int, optional): Starting value which defaults to ``0``.
stop (float or int): Stopping value.
step (float or int, optional): Increment which defaults to ``1``.
dtype (Dtype, optional): Specifies the data type of the output. If unspecified will default to ``float32`` if any of ``start``, ``stop``, or ``step`` are ``float``. Otherwise will default to ``int32``.
Returns:
array: The range of values.
Note:
Following the Numpy convention the actual increment used to
generate numbers is ``dtype(start + step) - dtype(start)``.
This can lead to unexpected results for example if `start + step`
is a fractional value and the `dtype` is integral.
"""
@overload
def arange(
stop: int | float,
step: int | float | None = ...,
dtype: Dtype | None = ...,
*,
stream: Stream | Device | None = ...,
) -> array: ...
def arccos(a: array, /, *, stream: Stream | Device | None = ...) -> array:
"""
Element-wise inverse cosine.
Args:
a (array): Input array.
Returns:
array: The inverse cosine of ``a``.
"""
def arccosh(a: array, /, *, stream: Stream | Device | None = ...) -> array:
"""
Element-wise inverse hyperbolic cosine.
Args:
a (array): Input array.
Returns:
array: The inverse hyperbolic cosine of ``a``.
"""
def arcsin(a: array, /, *, stream: Stream | Device | None = ...) -> array:
"""
Element-wise inverse sine.
Args:
a (array): Input array.
Returns:
array: The inverse sine of ``a``.
"""
def arcsinh(a: array, /, *, stream: Stream | Device | None = ...) -> array:
"""
Element-wise inverse hyperbolic sine.
Args:
a (array): Input array.
Returns:
array: The inverse hyperbolic sine of ``a``.
"""
def arctan(a: array, /, *, stream: Stream | Device | None = ...) -> array:
"""
Element-wise inverse tangent.
Args:
a (array): Input array.
Returns:
array: The inverse tangent of ``a``.
"""
def arctan2(a: array, b: array, /, *, stream: Stream | Device | None = ...) -> array:
"""
Element-wise inverse tangent of the ratio of two arrays.
Args:
a (array): Input array.
b (array): Input array.
Returns:
array: The inverse tangent of the ratio of ``a`` and ``b``.
"""
def arctanh(a: array, /, *, stream: Stream | Device | None = ...) -> array:
"""
Element-wise inverse hyperbolic tangent.
Args:
a (array): Input array.
Returns:
array: The inverse hyperbolic tangent of ``a``.
"""
def argmax(
a: array,
/,
axis: int | None = ...,
keepdims: bool = ...,
*,
stream: Stream | Device | None = ...,
) -> array:
"""
Indices of the maximum values along the axis.
Args:
a (array): Input array.
axis (int, optional): Optional axis to reduce over. If unspecified
this defaults to reducing over the entire array.
keepdims (bool, optional): Keep reduced axes as
singleton dimensions, defaults to `False`.
Returns:
array: The ``uint32`` array with the indices of the maximum values.
"""
def argmin(
a: array,
/,
axis: int | None = ...,
keepdims: bool = ...,
*,
stream: Stream | Device | None = ...,
) -> array:
"""
Indices of the minimum values along the axis.
Args:
a (array): Input array.
axis (int, optional): Optional axis to reduce over. If unspecified
this defaults to reducing over the entire array.
keepdims (bool, optional): Keep reduced axes as
singleton dimensions, defaults to `False`.
Returns:
array: The ``uint32`` array with the indices of the minimum values.
"""
def argpartition(
a: array,
/,
kth: int,
axis: int | None = ...,
*,
stream: Stream | Device | None = ...,
) -> array:
"""
Returns the indices that partition the array.
The ordering of the elements within a partition in given by the indices
is undefined.
Args:
a (array): Input array.
kth (int): Element index at the ``kth`` position in the output will
give the sorted position. All indices before the ``kth`` position
will be of elements less or equal to the element at the ``kth``
index and all indices after will be of elements greater or equal
to the element at the ``kth`` index.
axis (int or None, optional): Optional axis to partition over.
If ``None``, this partitions over the flattened array.
If unspecified, it defaults to ``-1``.
Returns:
array
gitextract_vey6weao/
├── .clauderules
├── .cursorrules
├── .envrc
├── .githooks/
│ ├── post-checkout
│ ├── post-commit
│ ├── post-merge
│ └── pre-push
├── .github/
│ ├── CODEOWNERS
│ ├── ISSUE_TEMPLATE/
│ │ ├── bug_report.md
│ │ └── feature_request.md
│ ├── actions/
│ │ ├── conditional-commit/
│ │ │ └── action.yml
│ │ ├── format/
│ │ │ └── action.yml
│ │ ├── lint/
│ │ │ └── action.yml
│ │ ├── lint-check/
│ │ │ └── action.yml
│ │ ├── regenerate-protobufs/
│ │ │ └── action.yml
│ │ ├── setup-python-uv/
│ │ │ └── action.yml
│ │ ├── unit-test/
│ │ │ └── action.yml
│ │ └── verify-clean/
│ │ └── action.yml
│ ├── pull_request_template.md
│ └── workflows/
│ ├── build-app.yml
│ └── pipeline.yml
├── .gitignore
├── .mlx_typings/
│ ├── .gitkeep
│ ├── mflux/
│ │ ├── __init__.pyi
│ │ ├── callbacks/
│ │ │ ├── __init__.pyi
│ │ │ ├── callback.pyi
│ │ │ ├── callback_registry.pyi
│ │ │ └── generation_context.pyi
│ │ ├── cli/
│ │ │ ├── __init__.pyi
│ │ │ └── defaults/
│ │ │ └── defaults.pyi
│ │ ├── models/
│ │ │ ├── __init__.pyi
│ │ │ ├── common/
│ │ │ │ ├── __init__.pyi
│ │ │ │ ├── cli/
│ │ │ │ │ └── __init__.pyi
│ │ │ │ ├── config/
│ │ │ │ │ ├── __init__.pyi
│ │ │ │ │ ├── config.pyi
│ │ │ │ │ └── model_config.pyi
│ │ │ │ ├── latent_creator/
│ │ │ │ │ ├── __init__.pyi
│ │ │ │ │ └── latent_creator.pyi
│ │ │ │ ├── lora/
│ │ │ │ │ ├── __init__.pyi
│ │ │ │ │ ├── layer/
│ │ │ │ │ │ ├── fused_linear_lora_layer.pyi
│ │ │ │ │ │ └── linear_lora_layer.pyi
│ │ │ │ │ └── mapping/
│ │ │ │ │ ├── lora_loader.pyi
│ │ │ │ │ ├── lora_mapping.pyi
│ │ │ │ │ ├── lora_saver.pyi
│ │ │ │ │ └── lora_transforms.pyi
│ │ │ │ ├── resolution/
│ │ │ │ │ ├── __init__.pyi
│ │ │ │ │ ├── actions.pyi
│ │ │ │ │ ├── config_resolution.pyi
│ │ │ │ │ ├── lora_resolution.pyi
│ │ │ │ │ ├── path_resolution.pyi
│ │ │ │ │ └── quantization_resolution.pyi
│ │ │ │ ├── schedulers/
│ │ │ │ │ ├── __init__.pyi
│ │ │ │ │ ├── base_scheduler.pyi
│ │ │ │ │ ├── flow_match_euler_discrete_scheduler.pyi
│ │ │ │ │ ├── linear_scheduler.pyi
│ │ │ │ │ └── seedvr2_euler_scheduler.pyi
│ │ │ │ ├── tokenizer/
│ │ │ │ │ ├── __init__.pyi
│ │ │ │ │ ├── tokenizer.pyi
│ │ │ │ │ ├── tokenizer_loader.pyi
│ │ │ │ │ └── tokenizer_output.pyi
│ │ │ │ ├── vae/
│ │ │ │ │ ├── __init__.pyi
│ │ │ │ │ ├── tiling_config.pyi
│ │ │ │ │ ├── vae_tiler.pyi
│ │ │ │ │ └── vae_util.pyi
│ │ │ │ └── weights/
│ │ │ │ ├── __init__.pyi
│ │ │ │ ├── loading/
│ │ │ │ │ ├── loaded_weights.pyi
│ │ │ │ │ ├── weight_applier.pyi
│ │ │ │ │ ├── weight_definition.pyi
│ │ │ │ │ └── weight_loader.pyi
│ │ │ │ ├── mapping/
│ │ │ │ │ ├── weight_mapper.pyi
│ │ │ │ │ ├── weight_mapping.pyi
│ │ │ │ │ └── weight_transforms.pyi
│ │ │ │ └── saving/
│ │ │ │ └── model_saver.pyi
│ │ │ ├── depth_pro/
│ │ │ │ ├── depth_pro_initializer.pyi
│ │ │ │ ├── model/
│ │ │ │ │ ├── decoder/
│ │ │ │ │ │ ├── feature_fusion_block_2d.pyi
│ │ │ │ │ │ ├── multires_conv_decoder.pyi
│ │ │ │ │ │ └── residual_block.pyi
│ │ │ │ │ ├── depth_pro.pyi
│ │ │ │ │ ├── depth_pro_model.pyi
│ │ │ │ │ ├── depth_pro_util.pyi
│ │ │ │ │ ├── dino_v2/
│ │ │ │ │ │ ├── attention.pyi
│ │ │ │ │ │ ├── dino_vision_transformer.pyi
│ │ │ │ │ │ ├── layer_scale.pyi
│ │ │ │ │ │ ├── mlp.pyi
│ │ │ │ │ │ ├── patch_embed.pyi
│ │ │ │ │ │ └── transformer_block.pyi
│ │ │ │ │ ├── encoder/
│ │ │ │ │ │ ├── depth_pro_encoder.pyi
│ │ │ │ │ │ └── upsample_block.pyi
│ │ │ │ │ └── head/
│ │ │ │ │ └── fov_head.pyi
│ │ │ │ └── weights/
│ │ │ │ ├── depth_pro_weight_definition.pyi
│ │ │ │ └── depth_pro_weight_mapping.pyi
│ │ │ ├── fibo/
│ │ │ │ ├── latent_creator/
│ │ │ │ │ └── fibo_latent_creator.pyi
│ │ │ │ └── weights/
│ │ │ │ ├── fibo_weight_definition.pyi
│ │ │ │ └── fibo_weight_mapping.pyi
│ │ │ ├── fibo_vlm/
│ │ │ │ ├── tokenizer/
│ │ │ │ │ ├── qwen2vl_image_processor.pyi
│ │ │ │ │ └── qwen2vl_processor.pyi
│ │ │ │ └── weights/
│ │ │ │ ├── fibo_vlm_weight_definition.pyi
│ │ │ │ └── fibo_vlm_weight_mapping.pyi
│ │ │ ├── flux/
│ │ │ │ ├── __init__.pyi
│ │ │ │ ├── cli/
│ │ │ │ │ └── __init__.pyi
│ │ │ │ ├── flux_initializer.pyi
│ │ │ │ ├── latent_creator/
│ │ │ │ │ ├── __init__.pyi
│ │ │ │ │ └── flux_latent_creator.pyi
│ │ │ │ ├── model/
│ │ │ │ │ ├── __init__.pyi
│ │ │ │ │ ├── flux_text_encoder/
│ │ │ │ │ │ ├── clip_encoder/
│ │ │ │ │ │ │ ├── clip_embeddings.pyi
│ │ │ │ │ │ │ ├── clip_encoder.pyi
│ │ │ │ │ │ │ ├── clip_encoder_layer.pyi
│ │ │ │ │ │ │ ├── clip_mlp.pyi
│ │ │ │ │ │ │ ├── clip_sdpa_attention.pyi
│ │ │ │ │ │ │ ├── clip_text_model.pyi
│ │ │ │ │ │ │ └── encoder_clip.pyi
│ │ │ │ │ │ ├── prompt_encoder.pyi
│ │ │ │ │ │ └── t5_encoder/
│ │ │ │ │ │ ├── t5_attention.pyi
│ │ │ │ │ │ ├── t5_block.pyi
│ │ │ │ │ │ ├── t5_dense_relu_dense.pyi
│ │ │ │ │ │ ├── t5_encoder.pyi
│ │ │ │ │ │ ├── t5_feed_forward.pyi
│ │ │ │ │ │ ├── t5_layer_norm.pyi
│ │ │ │ │ │ └── t5_self_attention.pyi
│ │ │ │ │ ├── flux_transformer/
│ │ │ │ │ │ ├── ada_layer_norm_continuous.pyi
│ │ │ │ │ │ ├── ada_layer_norm_zero.pyi
│ │ │ │ │ │ ├── ada_layer_norm_zero_single.pyi
│ │ │ │ │ │ ├── common/
│ │ │ │ │ │ │ └── attention_utils.pyi
│ │ │ │ │ │ ├── embed_nd.pyi
│ │ │ │ │ │ ├── feed_forward.pyi
│ │ │ │ │ │ ├── guidance_embedder.pyi
│ │ │ │ │ │ ├── joint_attention.pyi
│ │ │ │ │ │ ├── joint_transformer_block.pyi
│ │ │ │ │ │ ├── single_block_attention.pyi
│ │ │ │ │ │ ├── single_transformer_block.pyi
│ │ │ │ │ │ ├── text_embedder.pyi
│ │ │ │ │ │ ├── time_text_embed.pyi
│ │ │ │ │ │ ├── timestep_embedder.pyi
│ │ │ │ │ │ └── transformer.pyi
│ │ │ │ │ ├── flux_vae/
│ │ │ │ │ │ ├── common/
│ │ │ │ │ │ │ ├── attention.pyi
│ │ │ │ │ │ │ ├── resnet_block_2d.pyi
│ │ │ │ │ │ │ └── unet_mid_block.pyi
│ │ │ │ │ │ ├── decoder/
│ │ │ │ │ │ │ ├── conv_in.pyi
│ │ │ │ │ │ │ ├── conv_norm_out.pyi
│ │ │ │ │ │ │ ├── conv_out.pyi
│ │ │ │ │ │ │ ├── decoder.pyi
│ │ │ │ │ │ │ ├── up_block_1_or_2.pyi
│ │ │ │ │ │ │ ├── up_block_3.pyi
│ │ │ │ │ │ │ ├── up_block_4.pyi
│ │ │ │ │ │ │ └── up_sampler.pyi
│ │ │ │ │ │ ├── encoder/
│ │ │ │ │ │ │ ├── conv_in.pyi
│ │ │ │ │ │ │ ├── conv_norm_out.pyi
│ │ │ │ │ │ │ ├── conv_out.pyi
│ │ │ │ │ │ │ ├── down_block_1.pyi
│ │ │ │ │ │ │ ├── down_block_2.pyi
│ │ │ │ │ │ │ ├── down_block_3.pyi
│ │ │ │ │ │ │ ├── down_block_4.pyi
│ │ │ │ │ │ │ ├── down_sampler.pyi
│ │ │ │ │ │ │ └── encoder.pyi
│ │ │ │ │ │ └── vae.pyi
│ │ │ │ │ ├── redux_encoder/
│ │ │ │ │ │ └── redux_encoder.pyi
│ │ │ │ │ └── siglip_vision_transformer/
│ │ │ │ │ ├── siglip_encoder.pyi
│ │ │ │ │ ├── siglip_encoder_layer.pyi
│ │ │ │ │ ├── siglip_mlp.pyi
│ │ │ │ │ ├── siglip_multi_head_attention_pooling_head.pyi
│ │ │ │ │ ├── siglip_sdpa_attention.pyi
│ │ │ │ │ ├── siglip_vision_embeddings.pyi
│ │ │ │ │ └── siglip_vision_transformer.pyi
│ │ │ │ ├── variants/
│ │ │ │ │ ├── __init__.pyi
│ │ │ │ │ ├── concept_attention/
│ │ │ │ │ │ ├── attention_data.pyi
│ │ │ │ │ │ ├── joint_attention_concept.pyi
│ │ │ │ │ │ ├── joint_transformer_block_concept.pyi
│ │ │ │ │ │ └── transformer_concept.pyi
│ │ │ │ │ ├── controlnet/
│ │ │ │ │ │ └── transformer_controlnet.pyi
│ │ │ │ │ ├── kontext/
│ │ │ │ │ │ ├── __init__.pyi
│ │ │ │ │ │ ├── flux_kontext.pyi
│ │ │ │ │ │ └── kontext_util.pyi
│ │ │ │ │ └── txt2img/
│ │ │ │ │ └── flux.pyi
│ │ │ │ └── weights/
│ │ │ │ ├── __init__.pyi
│ │ │ │ ├── flux_lora_mapping.pyi
│ │ │ │ ├── flux_weight_definition.pyi
│ │ │ │ └── flux_weight_mapping.pyi
│ │ │ ├── qwen/
│ │ │ │ ├── __init__.pyi
│ │ │ │ ├── cli/
│ │ │ │ │ └── __init__.pyi
│ │ │ │ ├── latent_creator/
│ │ │ │ │ ├── __init__.pyi
│ │ │ │ │ └── qwen_latent_creator.pyi
│ │ │ │ ├── model/
│ │ │ │ │ ├── __init__.pyi
│ │ │ │ │ ├── qwen_text_encoder/
│ │ │ │ │ │ ├── qwen_attention.pyi
│ │ │ │ │ │ ├── qwen_encoder.pyi
│ │ │ │ │ │ ├── qwen_encoder_layer.pyi
│ │ │ │ │ │ ├── qwen_mlp.pyi
│ │ │ │ │ │ ├── qwen_patch_merger.pyi
│ │ │ │ │ │ ├── qwen_prompt_encoder.pyi
│ │ │ │ │ │ ├── qwen_rms_norm.pyi
│ │ │ │ │ │ ├── qwen_rope.pyi
│ │ │ │ │ │ ├── qwen_text_encoder.pyi
│ │ │ │ │ │ ├── qwen_vision_attention.pyi
│ │ │ │ │ │ ├── qwen_vision_block.pyi
│ │ │ │ │ │ ├── qwen_vision_language_encoder.pyi
│ │ │ │ │ │ ├── qwen_vision_mlp.pyi
│ │ │ │ │ │ ├── qwen_vision_patch_embed.pyi
│ │ │ │ │ │ ├── qwen_vision_rotary_embedding.pyi
│ │ │ │ │ │ └── qwen_vision_transformer.pyi
│ │ │ │ │ ├── qwen_transformer/
│ │ │ │ │ │ ├── qwen_attention.pyi
│ │ │ │ │ │ ├── qwen_feed_forward.pyi
│ │ │ │ │ │ ├── qwen_rope.pyi
│ │ │ │ │ │ ├── qwen_time_text_embed.pyi
│ │ │ │ │ │ ├── qwen_timestep_embedding.pyi
│ │ │ │ │ │ ├── qwen_timesteps.pyi
│ │ │ │ │ │ ├── qwen_transformer.pyi
│ │ │ │ │ │ ├── qwen_transformer_block.pyi
│ │ │ │ │ │ └── qwen_transformer_rms_norm.pyi
│ │ │ │ │ └── qwen_vae/
│ │ │ │ │ ├── qwen_image_attention_block_3d.pyi
│ │ │ │ │ ├── qwen_image_causal_conv_3d.pyi
│ │ │ │ │ ├── qwen_image_decoder_3d.pyi
│ │ │ │ │ ├── qwen_image_down_block_3d.pyi
│ │ │ │ │ ├── qwen_image_encoder_3d.pyi
│ │ │ │ │ ├── qwen_image_mid_block_3d.pyi
│ │ │ │ │ ├── qwen_image_res_block_3d.pyi
│ │ │ │ │ ├── qwen_image_resample_3d.pyi
│ │ │ │ │ ├── qwen_image_rms_norm.pyi
│ │ │ │ │ ├── qwen_image_up_block_3d.pyi
│ │ │ │ │ └── qwen_vae.pyi
│ │ │ │ ├── qwen_initializer.pyi
│ │ │ │ ├── tokenizer/
│ │ │ │ │ ├── __init__.pyi
│ │ │ │ │ ├── qwen_image_processor.pyi
│ │ │ │ │ ├── qwen_vision_language_processor.pyi
│ │ │ │ │ └── qwen_vision_language_tokenizer.pyi
│ │ │ │ ├── variants/
│ │ │ │ │ ├── __init__.pyi
│ │ │ │ │ ├── edit/
│ │ │ │ │ │ ├── qwen_edit_util.pyi
│ │ │ │ │ │ └── qwen_image_edit.pyi
│ │ │ │ │ └── txt2img/
│ │ │ │ │ └── qwen_image.pyi
│ │ │ │ └── weights/
│ │ │ │ ├── __init__.pyi
│ │ │ │ ├── qwen_lora_mapping.pyi
│ │ │ │ ├── qwen_weight_definition.pyi
│ │ │ │ └── qwen_weight_mapping.pyi
│ │ │ ├── seedvr2/
│ │ │ │ └── weights/
│ │ │ │ ├── seedvr2_weight_definition.pyi
│ │ │ │ └── seedvr2_weight_mapping.pyi
│ │ │ └── z_image/
│ │ │ ├── latent_creator/
│ │ │ │ └── z_image_latent_creator.pyi
│ │ │ └── weights/
│ │ │ ├── z_image_weight_definition.pyi
│ │ │ └── z_image_weight_mapping.pyi
│ │ ├── release/
│ │ │ └── __init__.pyi
│ │ └── utils/
│ │ ├── __init__.pyi
│ │ ├── box_values.pyi
│ │ ├── exceptions.pyi
│ │ ├── generated_image.pyi
│ │ ├── image_util.pyi
│ │ ├── metadata_builder.pyi
│ │ └── version_util.pyi
│ ├── mlx/
│ │ ├── core/
│ │ │ ├── __init__.pyi
│ │ │ ├── cuda/
│ │ │ │ └── __init__.pyi
│ │ │ ├── distributed/
│ │ │ │ └── __init__.pyi
│ │ │ ├── metal/
│ │ │ │ └── __init__.pyi
│ │ │ └── random/
│ │ │ └── __init__.pyi
│ │ ├── nn/
│ │ │ ├── __init__.pyi
│ │ │ ├── init.pyi
│ │ │ ├── layers/
│ │ │ │ ├── __init__.pyi
│ │ │ │ ├── activations.pyi
│ │ │ │ ├── base.pyi
│ │ │ │ ├── containers.pyi
│ │ │ │ ├── convolution.pyi
│ │ │ │ ├── convolution_transpose.pyi
│ │ │ │ ├── distributed.pyi
│ │ │ │ ├── dropout.pyi
│ │ │ │ ├── embedding.pyi
│ │ │ │ ├── linear.pyi
│ │ │ │ ├── normalization.pyi
│ │ │ │ ├── pooling.pyi
│ │ │ │ ├── positional_encoding.pyi
│ │ │ │ ├── quantized.pyi
│ │ │ │ ├── recurrent.pyi
│ │ │ │ ├── transformer.pyi
│ │ │ │ └── upsample.pyi
│ │ │ ├── losses.pyi
│ │ │ └── utils.pyi
│ │ └── utils.pyi
│ └── mlx_lm/
│ ├── __init__.pyi
│ ├── convert.pyi
│ ├── generate.pyi
│ ├── models/
│ │ ├── __init__.pyi
│ │ ├── base.pyi
│ │ ├── bitlinear_layers.pyi
│ │ ├── cache.pyi
│ │ ├── deepseek_v3.pyi
│ │ ├── glm4_moe.pyi
│ │ ├── glm_moe_dsa.pyi
│ │ ├── nemotron_h.pyi
│ │ ├── qwen3_5.pyi
│ │ ├── qwen3_5_moe.pyi
│ │ ├── qwen3_next.pyi
│ │ ├── step3p5.pyi
│ │ └── switch_layers.pyi
│ ├── sample_utils.pyi
│ ├── tokenizer_utils.pyi
│ └── utils.pyi
├── .python-version
├── .swift-format
├── .vscode/
│ ├── extensions.json
│ └── settings.json
├── .zed/
│ └── settings.json
├── AGENTS.md
├── CONTRIBUTING.md
├── Cargo.toml
├── LICENSE
├── MISSED_THINGS.md
├── PLATFORMS.md
├── README.md
├── RULES.md
├── TODO.md
├── app/
│ └── EXO/
│ ├── EXO/
│ │ ├── Assets.xcassets/
│ │ │ ├── AccentColor.colorset/
│ │ │ │ └── Contents.json
│ │ │ ├── AppIcon.appiconset/
│ │ │ │ └── Contents.json
│ │ │ ├── Contents.json
│ │ │ └── menubar-icon.imageset/
│ │ │ └── Contents.json
│ │ ├── ContentView.swift
│ │ ├── EXO.entitlements
│ │ ├── EXOApp.swift
│ │ ├── ExoProcessController.swift
│ │ ├── Info.plist
│ │ ├── Models/
│ │ │ └── ClusterState.swift
│ │ ├── Preview Content/
│ │ │ └── Preview Assets.xcassets/
│ │ │ └── Contents.json
│ │ ├── Services/
│ │ │ ├── BugReportService.swift
│ │ │ ├── ClusterStateService.swift
│ │ │ ├── LocalNetworkChecker.swift
│ │ │ ├── NetworkSetupHelper.swift
│ │ │ ├── NetworkStatusService.swift
│ │ │ ├── ThunderboltBridgeDetector.swift
│ │ │ └── ThunderboltBridgeService.swift
│ │ ├── ViewModels/
│ │ │ ├── InstanceViewModel.swift
│ │ │ └── NodeViewModel.swift
│ │ ├── Views/
│ │ │ ├── FirstLaunchPopout.swift
│ │ │ ├── InstanceRowView.swift
│ │ │ ├── NodeDetailView.swift
│ │ │ ├── NodeRowView.swift
│ │ │ ├── SettingsView.swift
│ │ │ ├── SettingsWindowController.swift
│ │ │ └── TopologyMiniView.swift
│ │ └── main.swift
│ ├── EXO.xcodeproj/
│ │ ├── project.pbxproj
│ │ ├── project.xcworkspace/
│ │ │ ├── contents.xcworkspacedata
│ │ │ └── xcshareddata/
│ │ │ └── swiftpm/
│ │ │ └── Package.resolved
│ │ └── xcshareddata/
│ │ └── xcschemes/
│ │ └── EXO.xcscheme
│ ├── EXOTests/
│ │ └── EXOTests.swift
│ ├── EXOUITests/
│ │ ├── EXOUITests.swift
│ │ └── EXOUITestsLaunchTests.swift
│ └── uninstall-exo.sh
├── bench/
│ ├── bench.toml
│ ├── eval_configs/
│ │ └── models.toml
│ ├── eval_tool_calls.py
│ ├── exo_bench.py
│ ├── exo_eval.py
│ ├── harness.py
│ ├── parallel_requests.py
│ ├── pyproject.toml
│ ├── scenarios.toml
│ ├── single-m3-ultra.toml
│ ├── src/
│ │ └── exo_bench/
│ │ └── __init__.py
│ └── vendor/
│ ├── __init__.py
│ └── lcb_testing_util.py
├── dashboard/
│ ├── dashboard.nix
│ ├── package.json
│ ├── parts.nix
│ ├── src/
│ │ ├── app.css
│ │ ├── app.d.ts
│ │ ├── app.html
│ │ ├── lib/
│ │ │ ├── components/
│ │ │ │ ├── ChatAttachments.svelte
│ │ │ │ ├── ChatForm.svelte
│ │ │ │ ├── ChatMessages.svelte
│ │ │ │ ├── ChatModelSelector.svelte
│ │ │ │ ├── ChatSidebar.svelte
│ │ │ │ ├── ConnectionBanner.svelte
│ │ │ │ ├── DeviceIcon.svelte
│ │ │ │ ├── FamilyLogos.svelte
│ │ │ │ ├── FamilySidebar.svelte
│ │ │ │ ├── HeaderNav.svelte
│ │ │ │ ├── HuggingFaceResultItem.svelte
│ │ │ │ ├── ImageLightbox.svelte
│ │ │ │ ├── ImageParamsPanel.svelte
│ │ │ │ ├── MarkdownContent.svelte
│ │ │ │ ├── ModelCard.svelte
│ │ │ │ ├── ModelFilterPopover.svelte
│ │ │ │ ├── ModelPickerGroup.svelte
│ │ │ │ ├── ModelPickerModal.svelte
│ │ │ │ ├── PrefillProgressBar.svelte
│ │ │ │ ├── ToastContainer.svelte
│ │ │ │ ├── TokenHeatmap.svelte
│ │ │ │ ├── TopologyGraph.svelte
│ │ │ │ └── index.ts
│ │ │ ├── stores/
│ │ │ │ ├── app.svelte.ts
│ │ │ │ ├── favorites.svelte.ts
│ │ │ │ ├── recents.svelte.ts
│ │ │ │ └── toast.svelte.ts
│ │ │ ├── types/
│ │ │ │ └── files.ts
│ │ │ └── utils/
│ │ │ └── downloads.ts
│ │ └── routes/
│ │ ├── +layout.svelte
│ │ ├── +page.svelte
│ │ ├── downloads/
│ │ │ └── +page.svelte
│ │ └── traces/
│ │ ├── +page.svelte
│ │ └── [taskId]/
│ │ └── +page.svelte
│ ├── svelte.config.js
│ ├── tsconfig.json
│ └── vite.config.ts
├── docs/
│ ├── api.md
│ └── architecture.md
├── flake.nix
├── justfile
├── nix/
│ ├── apple-sdk/
│ │ └── metadata/
│ │ └── versions.json
│ ├── apple-sdk-overlay.nix
│ ├── darwin-build-fixes.patch
│ ├── metal-toolchain.nix
│ └── mlx.nix
├── packaging/
│ ├── dmg/
│ │ ├── create-dmg.sh
│ │ └── generate-background.py
│ └── pyinstaller/
│ └── exo.spec
├── pyproject.toml
├── python/
│ └── parts.nix
├── resources/
│ ├── image_model_cards/
│ │ ├── exolabs--FLUX.1-Kontext-dev-4bit.toml
│ │ ├── exolabs--FLUX.1-Kontext-dev-8bit.toml
│ │ ├── exolabs--FLUX.1-Kontext-dev.toml
│ │ ├── exolabs--FLUX.1-Krea-dev-4bit.toml
│ │ ├── exolabs--FLUX.1-Krea-dev-8bit.toml
│ │ ├── exolabs--FLUX.1-Krea-dev.toml
│ │ ├── exolabs--FLUX.1-dev-4bit.toml
│ │ ├── exolabs--FLUX.1-dev-8bit.toml
│ │ ├── exolabs--FLUX.1-dev.toml
│ │ ├── exolabs--FLUX.1-schnell-4bit.toml
│ │ ├── exolabs--FLUX.1-schnell-8bit.toml
│ │ ├── exolabs--FLUX.1-schnell.toml
│ │ ├── exolabs--Qwen-Image-4bit.toml
│ │ ├── exolabs--Qwen-Image-8bit.toml
│ │ ├── exolabs--Qwen-Image-Edit-2509-4bit.toml
│ │ ├── exolabs--Qwen-Image-Edit-2509-8bit.toml
│ │ ├── exolabs--Qwen-Image-Edit-2509.toml
│ │ └── exolabs--Qwen-Image.toml
│ └── inference_model_cards/
│ ├── mlx-community--DeepSeek-V3.1-4bit.toml
│ ├── mlx-community--DeepSeek-V3.1-8bit.toml
│ ├── mlx-community--GLM-4.5-Air-8bit.toml
│ ├── mlx-community--GLM-4.5-Air-bf16.toml
│ ├── mlx-community--GLM-4.7-4bit.toml
│ ├── mlx-community--GLM-4.7-6bit.toml
│ ├── mlx-community--GLM-4.7-8bit-gs32.toml
│ ├── mlx-community--GLM-4.7-Flash-4bit.toml
│ ├── mlx-community--GLM-4.7-Flash-5bit.toml
│ ├── mlx-community--GLM-4.7-Flash-6bit.toml
│ ├── mlx-community--GLM-4.7-Flash-8bit.toml
│ ├── mlx-community--GLM-5-8bit.toml
│ ├── mlx-community--GLM-5-MXFP4-Q8.toml
│ ├── mlx-community--GLM-5-bf16.toml
│ ├── mlx-community--Kimi-K2-Instruct-4bit.toml
│ ├── mlx-community--Kimi-K2-Thinking.toml
│ ├── mlx-community--Kimi-K2.5.toml
│ ├── mlx-community--Llama-3.1-Nemotron-70B-Instruct-HF-4bit.toml
│ ├── mlx-community--Llama-3.1-Nemotron-70B-Instruct-HF-8bit.toml
│ ├── mlx-community--Llama-3.1-Nemotron-70B-Instruct-HF-bf16.toml
│ ├── mlx-community--Llama-3.1-Nemotron-Nano-4B-v1.1-4bit.toml
│ ├── mlx-community--Llama-3.1-Nemotron-Nano-4B-v1.1-8bit.toml
│ ├── mlx-community--Llama-3.1-Nemotron-Nano-4B-v1.1-bf16.toml
│ ├── mlx-community--Llama-3.2-1B-Instruct-4bit.toml
│ ├── mlx-community--Llama-3.2-3B-Instruct-4bit.toml
│ ├── mlx-community--Llama-3.2-3B-Instruct-8bit.toml
│ ├── mlx-community--Llama-3.3-70B-Instruct-4bit.toml
│ ├── mlx-community--Llama-3.3-70B-Instruct-8bit.toml
│ ├── mlx-community--Meta-Llama-3.1-70B-Instruct-4bit.toml
│ ├── mlx-community--Meta-Llama-3.1-8B-Instruct-4bit.toml
│ ├── mlx-community--Meta-Llama-3.1-8B-Instruct-8bit.toml
│ ├── mlx-community--Meta-Llama-3.1-8B-Instruct-bf16.toml
│ ├── mlx-community--MiniMax-M2.1-3bit.toml
│ ├── mlx-community--MiniMax-M2.1-8bit.toml
│ ├── mlx-community--MiniMax-M2.5-4bit.toml
│ ├── mlx-community--MiniMax-M2.5-6bit.toml
│ ├── mlx-community--MiniMax-M2.5-8bit.toml
│ ├── mlx-community--NVIDIA-Nemotron-3-Nano-30B-A3B-MLX-4Bit.toml
│ ├── mlx-community--NVIDIA-Nemotron-3-Nano-30B-A3B-MLX-5Bit.toml
│ ├── mlx-community--NVIDIA-Nemotron-3-Nano-30B-A3B-MLX-6Bit.toml
│ ├── mlx-community--NVIDIA-Nemotron-3-Nano-30B-A3B-MLX-8Bit.toml
│ ├── mlx-community--NVIDIA-Nemotron-3-Nano-30B-A3B-MLX-BF16.toml
│ ├── mlx-community--NVIDIA-Nemotron-3-Nano-30B-A3B-MLX-MXFP4.toml
│ ├── mlx-community--NVIDIA-Nemotron-3-Nano-30B-A3B-NVFP4.toml
│ ├── mlx-community--NVIDIA-Nemotron-Nano-9B-v2-4bits.toml
│ ├── mlx-community--NVIDIA-Nemotron-Nano-9B-v2-6bit.toml
│ ├── mlx-community--Qwen3-0.6B-4bit.toml
│ ├── mlx-community--Qwen3-0.6B-8bit.toml
│ ├── mlx-community--Qwen3-235B-A22B-Instruct-2507-4bit.toml
│ ├── mlx-community--Qwen3-235B-A22B-Instruct-2507-8bit.toml
│ ├── mlx-community--Qwen3-30B-A3B-4bit.toml
│ ├── mlx-community--Qwen3-30B-A3B-8bit.toml
│ ├── mlx-community--Qwen3-Coder-480B-A35B-Instruct-4bit.toml
│ ├── mlx-community--Qwen3-Coder-480B-A35B-Instruct-8bit.toml
│ ├── mlx-community--Qwen3-Coder-Next-4bit.toml
│ ├── mlx-community--Qwen3-Coder-Next-5bit.toml
│ ├── mlx-community--Qwen3-Coder-Next-6bit.toml
│ ├── mlx-community--Qwen3-Coder-Next-8bit.toml
│ ├── mlx-community--Qwen3-Coder-Next-bf16.toml
│ ├── mlx-community--Qwen3-Next-80B-A3B-Instruct-4bit.toml
│ ├── mlx-community--Qwen3-Next-80B-A3B-Instruct-8bit.toml
│ ├── mlx-community--Qwen3-Next-80B-A3B-Thinking-4bit.toml
│ ├── mlx-community--Qwen3-Next-80B-A3B-Thinking-8bit.toml
│ ├── mlx-community--Qwen3.5-122B-A10B-4bit.toml
│ ├── mlx-community--Qwen3.5-122B-A10B-6bit.toml
│ ├── mlx-community--Qwen3.5-122B-A10B-8bit.toml
│ ├── mlx-community--Qwen3.5-122B-A10B-bf16.toml
│ ├── mlx-community--Qwen3.5-27B-4bit.toml
│ ├── mlx-community--Qwen3.5-27B-8bit.toml
│ ├── mlx-community--Qwen3.5-2B-MLX-8bit.toml
│ ├── mlx-community--Qwen3.5-35B-A3B-4bit.toml
│ ├── mlx-community--Qwen3.5-35B-A3B-8bit.toml
│ ├── mlx-community--Qwen3.5-397B-A17B-4bit.toml
│ ├── mlx-community--Qwen3.5-397B-A17B-6bit.toml
│ ├── mlx-community--Qwen3.5-397B-A17B-8bit.toml
│ ├── mlx-community--Qwen3.5-9B-4bit.toml
│ ├── mlx-community--Qwen3.5-9B-8bit.toml
│ ├── mlx-community--Step-3.5-Flash-4bit.toml
│ ├── mlx-community--Step-3.5-Flash-6bit.toml
│ ├── mlx-community--Step-3.5-Flash-8Bit.toml
│ ├── mlx-community--gpt-oss-120b-MXFP4-Q8.toml
│ ├── mlx-community--gpt-oss-20b-MXFP4-Q8.toml
│ └── mlx-community--llama-3.3-70b-instruct-fp16.toml
├── rust/
│ ├── exo_pyo3_bindings/
│ │ ├── Cargo.toml
│ │ ├── README.md
│ │ ├── exo_pyo3_bindings.pyi
│ │ ├── pyproject.toml
│ │ ├── src/
│ │ │ ├── allow_threading.rs
│ │ │ ├── bin/
│ │ │ │ └── stub_gen.rs
│ │ │ ├── ident.rs
│ │ │ ├── lib.rs
│ │ │ └── networking.rs
│ │ └── tests/
│ │ ├── dummy.rs
│ │ └── test_python.py
│ ├── networking/
│ │ ├── Cargo.toml
│ │ ├── examples/
│ │ │ └── chatroom.rs
│ │ ├── src/
│ │ │ ├── RESEARCH_NOTES.txt
│ │ │ ├── discovery.rs
│ │ │ ├── lib.rs
│ │ │ └── swarm.rs
│ │ └── tests/
│ │ └── dummy.rs
│ ├── parts.nix
│ └── util/
│ ├── Cargo.toml
│ └── src/
│ ├── lib.rs
│ └── wakerdeque.rs
├── scripts/
│ └── fetch_kv_heads.py
├── src/
│ └── exo/
│ ├── __init__.py
│ ├── __main__.py
│ ├── api/
│ │ ├── __init__.py
│ │ ├── adapters/
│ │ │ ├── __init__.py
│ │ │ ├── chat_completions.py
│ │ │ ├── claude.py
│ │ │ ├── ollama.py
│ │ │ └── responses.py
│ │ ├── main.py
│ │ ├── tests/
│ │ │ ├── test_api_error_handling.py
│ │ │ ├── test_cancel_command.py
│ │ │ ├── test_claude_api.py
│ │ │ ├── test_claude_tool_use.py
│ │ │ └── test_openai_responses_api.py
│ │ └── types/
│ │ ├── __init__.py
│ │ ├── api.py
│ │ ├── claude_api.py
│ │ ├── ollama_api.py
│ │ └── openai_responses.py
│ ├── download/
│ │ ├── coordinator.py
│ │ ├── download_utils.py
│ │ ├── huggingface_utils.py
│ │ ├── impl_shard_downloader.py
│ │ ├── shard_downloader.py
│ │ └── tests/
│ │ ├── __init__.py
│ │ ├── test_download_verification.py
│ │ ├── test_offline_mode.py
│ │ └── test_re_download.py
│ ├── main.py
│ ├── master/
│ │ ├── __init__.py
│ │ ├── image_store.py
│ │ ├── main.py
│ │ ├── placement.py
│ │ ├── placement_utils.py
│ │ └── tests/
│ │ ├── conftest.py
│ │ ├── test_master.py
│ │ ├── test_placement.py
│ │ ├── test_placement_utils.py
│ │ └── test_topology.py
│ ├── routing/
│ │ ├── __init__.py
│ │ ├── connection_message.py
│ │ ├── event_router.py
│ │ ├── router.py
│ │ ├── tests/
│ │ │ └── test_event_buffer.py
│ │ └── topics.py
│ ├── shared/
│ │ ├── __init__.py
│ │ ├── apply.py
│ │ ├── constants.py
│ │ ├── election.py
│ │ ├── logging.py
│ │ ├── models/
│ │ │ └── model_cards.py
│ │ ├── tests/
│ │ │ ├── __init__.py
│ │ │ ├── conftest.py
│ │ │ ├── test_apply/
│ │ │ │ ├── test_apply_node_download.py
│ │ │ │ └── test_apply_runner_deleted.py
│ │ │ ├── test_election.py
│ │ │ ├── test_node_id_persistence.py
│ │ │ ├── test_resolve_reasoning_params.py
│ │ │ ├── test_state_serialization.py
│ │ │ └── test_xdg_paths.py
│ │ ├── topology.py
│ │ ├── tracing.py
│ │ └── types/
│ │ ├── __init__.py
│ │ ├── chunks.py
│ │ ├── commands.py
│ │ ├── common.py
│ │ ├── events.py
│ │ ├── memory.py
│ │ ├── mlx.py
│ │ ├── multiaddr.py
│ │ ├── profiling.py
│ │ ├── state.py
│ │ ├── tasks.py
│ │ ├── text_generation.py
│ │ ├── thunderbolt.py
│ │ ├── topology.py
│ │ └── worker/
│ │ ├── downloads.py
│ │ ├── instances.py
│ │ ├── runner_response.py
│ │ ├── runners.py
│ │ └── shards.py
│ ├── utils/
│ │ ├── __init__.py
│ │ ├── banner.py
│ │ ├── channels.py
│ │ ├── dashboard_path.py
│ │ ├── disk_event_log.py
│ │ ├── event_buffer.py
│ │ ├── fs.py
│ │ ├── info_gatherer/
│ │ │ ├── __init__.py
│ │ │ ├── info_gatherer.py
│ │ │ ├── macmon.py
│ │ │ ├── net_profile.py
│ │ │ ├── system_info.py
│ │ │ └── tests/
│ │ │ └── test_tb_parsing.py
│ │ ├── keyed_backoff.py
│ │ ├── phantom.py
│ │ ├── power_sampler.py
│ │ ├── pydantic_ext.py
│ │ ├── reactive.py
│ │ ├── task_group.py
│ │ └── tests/
│ │ ├── test_event_log.py
│ │ ├── test_mp_channel.py
│ │ ├── test_power_sampler.py
│ │ └── test_tagged.py
│ └── worker/
│ ├── __init__.py
│ ├── engines/
│ │ ├── __init__.py
│ │ ├── image/
│ │ │ ├── __init__.py
│ │ │ ├── config.py
│ │ │ ├── distributed_model.py
│ │ │ ├── generate.py
│ │ │ ├── models/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── base.py
│ │ │ │ ├── flux/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── adapter.py
│ │ │ │ │ ├── config.py
│ │ │ │ │ ├── kontext_adapter.py
│ │ │ │ │ └── wrappers.py
│ │ │ │ └── qwen/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── adapter.py
│ │ │ │ ├── config.py
│ │ │ │ ├── edit_adapter.py
│ │ │ │ └── wrappers.py
│ │ │ └── pipeline/
│ │ │ ├── __init__.py
│ │ │ ├── block_wrapper.py
│ │ │ ├── kv_cache.py
│ │ │ └── runner.py
│ │ └── mlx/
│ │ ├── __init__.py
│ │ ├── auto_parallel.py
│ │ ├── cache.py
│ │ ├── constants.py
│ │ ├── dsml_encoding.py
│ │ ├── generator/
│ │ │ ├── __init__.py
│ │ │ ├── batch_generate.py
│ │ │ └── generate.py
│ │ ├── tests/
│ │ │ └── test_batch_generate.py
│ │ └── utils_mlx.py
│ ├── main.py
│ ├── plan.py
│ ├── runner/
│ │ ├── __init__.py
│ │ ├── bootstrap.py
│ │ ├── image_models/
│ │ │ ├── __init__.py
│ │ │ └── runner.py
│ │ ├── llm_inference/
│ │ │ ├── __init__.py
│ │ │ ├── batch_generator.py
│ │ │ ├── model_output_parsers.py
│ │ │ ├── runner.py
│ │ │ └── tool_parsers.py
│ │ └── runner_supervisor.py
│ └── tests/
│ ├── TODO.tests
│ ├── __init__.py
│ ├── constants.py
│ └── unittests/
│ ├── __init__.py
│ ├── conftest.py
│ ├── test_download/
│ │ └── __init__.py
│ ├── test_mlx/
│ │ ├── __init__.py
│ │ ├── conftest.py
│ │ ├── test_auto_parallel.py
│ │ ├── test_batch_vs_generate.py
│ │ ├── test_distributed_fix.py
│ │ ├── test_kv_prefix_cache.py
│ │ ├── test_pipeline_prefill_callbacks.py
│ │ ├── test_prefix_cache_architectures.py
│ │ └── test_tokenizers.py
│ ├── test_plan/
│ │ ├── __init__.py
│ │ ├── test_download_and_loading.py
│ │ ├── test_runner_lifecycle.py
│ │ ├── test_task_forwarding.py
│ │ └── test_warmup.py
│ └── test_runner/
│ ├── __init__.py
│ ├── test_dsml_e2e.py
│ ├── test_event_ordering.py
│ ├── test_glm_tool_parsing.py
│ ├── test_parse_gpt_oss.py
│ ├── test_parse_tool_calls.py
│ └── test_runner_supervisor.py
├── tests/
│ ├── auto_bench.sh
│ ├── eval_tool_calls.sh
│ ├── get_all_models_on_cluster.py
│ ├── headless_runner.py
│ ├── run_exo_on.sh
│ └── start_distributed_test.py
└── tmp/
├── config_examples/
│ ├── claude_code.sh
│ └── opencode.json
├── gen_card.py
├── prompt.txt
├── quantize_and_upload.py
├── run_llm.py
├── run_llm.sh
├── set_rdma_network_config.sh
└── test_trust_remote_code_attack.sh
Showing preview only (344K chars total). Download the full file or copy to clipboard to get everything.
SYMBOL INDEX (4125 symbols across 409 files)
FILE: .mlx_typings/mflux/callbacks/callback.pyi
class BeforeLoopCallback (line 11) | class BeforeLoopCallback(Protocol):
method call_before_loop (line 12) | def call_before_loop(
class InLoopCallback (line 22) | class InLoopCallback(Protocol):
method call_in_loop (line 23) | def call_in_loop(
class AfterLoopCallback (line 33) | class AfterLoopCallback(Protocol):
method call_after_loop (line 34) | def call_after_loop(
class InterruptCallback (line 38) | class InterruptCallback(Protocol):
method call_interrupt (line 39) | def call_interrupt(
FILE: .mlx_typings/mflux/callbacks/callback_registry.pyi
class CallbackRegistry (line 17) | class CallbackRegistry:
method __init__ (line 18) | def __init__(self) -> None: ...
method register (line 19) | def register(self, callback) -> None: ...
method start (line 20) | def start(self, seed: int, prompt: str, config: Config) -> GenerationC...
method before_loop_callbacks (line 21) | def before_loop_callbacks(self) -> list[BeforeLoopCallback]: ...
method in_loop_callbacks (line 22) | def in_loop_callbacks(self) -> list[InLoopCallback]: ...
method after_loop_callbacks (line 23) | def after_loop_callbacks(self) -> list[AfterLoopCallback]: ...
method interrupt_callbacks (line 24) | def interrupt_callbacks(self) -> list[InterruptCallback]: ...
FILE: .mlx_typings/mflux/callbacks/generation_context.pyi
class GenerationContext (line 14) | class GenerationContext:
method __init__ (line 15) | def __init__(
method before_loop (line 18) | def before_loop(
method in_loop (line 25) | def in_loop(self, t: int, latents: mx.array, time_steps: tqdm = ...) -...
method after_loop (line 26) | def after_loop(self, latents: mx.array) -> None: ...
method interruption (line 27) | def interruption(
FILE: .mlx_typings/mflux/models/common/config/config.pyi
class Config (line 13) | class Config:
method __init__ (line 14) | def __init__(
method height (line 31) | def height(self) -> int: ...
method width (line 33) | def width(self) -> int: ...
method width (line 35) | def width(self, value): # -> None:
method image_seq_len (line 38) | def image_seq_len(self) -> int: ...
method guidance (line 40) | def guidance(self) -> float: ...
method num_inference_steps (line 42) | def num_inference_steps(self) -> int: ...
method precision (line 44) | def precision(self) -> mx.Dtype: ...
method num_train_steps (line 46) | def num_train_steps(self) -> int: ...
method image_path (line 48) | def image_path(self) -> Path | None: ...
method image_strength (line 50) | def image_strength(self) -> float | None: ...
method depth_image_path (line 52) | def depth_image_path(self) -> Path | None: ...
method redux_image_paths (line 54) | def redux_image_paths(self) -> list[Path] | None: ...
method redux_image_strengths (line 56) | def redux_image_strengths(self) -> list[float] | None: ...
method masked_image_path (line 58) | def masked_image_path(self) -> Path | None: ...
method init_time_step (line 60) | def init_time_step(self) -> int: ...
method time_steps (line 62) | def time_steps(self) -> tqdm: ...
method controlnet_strength (line 64) | def controlnet_strength(self) -> float | None: ...
method scheduler (line 66) | def scheduler(self) -> Any: ...
FILE: .mlx_typings/mflux/models/common/config/model_config.pyi
class ModelConfig (line 9) | class ModelConfig:
method __init__ (line 11) | def __init__(
method dev (line 27) | def dev() -> ModelConfig: ...
method schnell (line 30) | def schnell() -> ModelConfig: ...
method dev_kontext (line 33) | def dev_kontext() -> ModelConfig: ...
method dev_fill (line 36) | def dev_fill() -> ModelConfig: ...
method dev_redux (line 39) | def dev_redux() -> ModelConfig: ...
method dev_depth (line 42) | def dev_depth() -> ModelConfig: ...
method dev_controlnet_canny (line 45) | def dev_controlnet_canny() -> ModelConfig: ...
method schnell_controlnet_canny (line 48) | def schnell_controlnet_canny() -> ModelConfig: ...
method dev_controlnet_upscaler (line 51) | def dev_controlnet_upscaler() -> ModelConfig: ...
method dev_fill_catvton (line 54) | def dev_fill_catvton() -> ModelConfig: ...
method krea_dev (line 57) | def krea_dev() -> ModelConfig: ...
method flux2_klein_4b (line 60) | def flux2_klein_4b() -> ModelConfig: ...
method flux2_klein_9b (line 63) | def flux2_klein_9b() -> ModelConfig: ...
method qwen_image (line 66) | def qwen_image() -> ModelConfig: ...
method qwen_image_edit (line 69) | def qwen_image_edit() -> ModelConfig: ...
method fibo (line 72) | def fibo() -> ModelConfig: ...
method z_image_turbo (line 75) | def z_image_turbo() -> ModelConfig: ...
method seedvr2_3b (line 78) | def seedvr2_3b() -> ModelConfig: ...
method x_embedder_input_dim (line 79) | def x_embedder_input_dim(self) -> int: ...
method is_canny (line 80) | def is_canny(self) -> bool: ...
method from_name (line 82) | def from_name(
FILE: .mlx_typings/mflux/models/common/latent_creator/latent_creator.pyi
class Img2Img (line 22) | class Img2Img:
method __init__ (line 23) | def __init__(
class LatentCreator (line 33) | class LatentCreator:
method create_for_txt2img_or_img2img (line 35) | def create_for_txt2img_or_img2img(
method encode_image (line 39) | def encode_image(
method add_noise_by_interpolation (line 47) | def add_noise_by_interpolation(
FILE: .mlx_typings/mflux/models/common/lora/layer/fused_linear_lora_layer.pyi
class FusedLoRALinear (line 8) | class FusedLoRALinear(nn.Module):
method __init__ (line 9) | def __init__(
method __call__ (line 12) | def __call__(self, x): # -> array:
FILE: .mlx_typings/mflux/models/common/lora/layer/linear_lora_layer.pyi
class LoRALinear (line 7) | class LoRALinear(nn.Module):
method from_linear (line 9) | def from_linear(
method __init__ (line 13) | def __init__(
method __call__ (line 21) | def __call__(self, x): # -> array:
FILE: .mlx_typings/mflux/models/common/lora/mapping/lora_loader.pyi
class PatternMatch (line 12) | class PatternMatch:
class LoRALoader (line 19) | class LoRALoader:
method load_and_apply_lora (line 21) | def load_and_apply_lora(
FILE: .mlx_typings/mflux/models/common/lora/mapping/lora_mapping.pyi
class LoRATarget (line 11) | class LoRATarget:
class LoRAMapping (line 19) | class LoRAMapping(Protocol):
method get_mapping (line 21) | def get_mapping() -> List[LoRATarget]: ...
FILE: .mlx_typings/mflux/models/common/lora/mapping/lora_saver.pyi
class LoRASaver (line 7) | class LoRASaver:
method bake_and_strip_lora (line 9) | def bake_and_strip_lora(module: nn.Module) -> nn.Module: ...
FILE: .mlx_typings/mflux/models/common/lora/mapping/lora_transforms.pyi
class LoraTransforms (line 7) | class LoraTransforms:
method split_q_up (line 9) | def split_q_up(tensor: mx.array) -> mx.array: ...
method split_k_up (line 11) | def split_k_up(tensor: mx.array) -> mx.array: ...
method split_v_up (line 13) | def split_v_up(tensor: mx.array) -> mx.array: ...
method split_q_down (line 15) | def split_q_down(tensor: mx.array) -> mx.array: ...
method split_k_down (line 17) | def split_k_down(tensor: mx.array) -> mx.array: ...
method split_v_down (line 19) | def split_v_down(tensor: mx.array) -> mx.array: ...
method split_single_q_up (line 21) | def split_single_q_up(tensor: mx.array) -> mx.array: ...
method split_single_k_up (line 23) | def split_single_k_up(tensor: mx.array) -> mx.array: ...
method split_single_v_up (line 25) | def split_single_v_up(tensor: mx.array) -> mx.array: ...
method split_single_mlp_up (line 27) | def split_single_mlp_up(tensor: mx.array) -> mx.array: ...
method split_single_q_down (line 29) | def split_single_q_down(tensor: mx.array) -> mx.array: ...
method split_single_k_down (line 31) | def split_single_k_down(tensor: mx.array) -> mx.array: ...
method split_single_v_down (line 33) | def split_single_v_down(tensor: mx.array) -> mx.array: ...
method split_single_mlp_down (line 35) | def split_single_mlp_down(tensor: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/common/resolution/actions.pyi
class QuantizationAction (line 8) | class QuantizationAction(Enum):
class PathAction (line 13) | class PathAction(Enum):
class LoraAction (line 19) | class LoraAction(Enum):
class ConfigAction (line 28) | class ConfigAction(Enum):
class Rule (line 34) | class Rule(NamedTuple):
FILE: .mlx_typings/mflux/models/common/resolution/config_resolution.pyi
class ConfigResolution (line 11) | class ConfigResolution:
method resolve (line 14) | def resolve(model_name: str, base_model: str | None = ...) -> ModelCon...
FILE: .mlx_typings/mflux/models/common/resolution/lora_resolution.pyi
class LoraResolution (line 9) | class LoraResolution:
method resolve (line 13) | def resolve(path: str) -> str: ...
method resolve_paths (line 15) | def resolve_paths(paths: list[str] | None) -> list[str]: ...
method resolve_scales (line 17) | def resolve_scales(scales: list[float] | None, num_paths: int) -> list...
method get_registry (line 19) | def get_registry() -> dict[str, Path]: ...
method discover_files (line 21) | def discover_files(library_paths: list[Path]) -> dict[str, Path]: ...
FILE: .mlx_typings/mflux/models/common/resolution/path_resolution.pyi
class PathResolution (line 9) | class PathResolution:
method resolve (line 12) | def resolve(path: str | None, patterns: list[str] | None = ...) -> Pat...
FILE: .mlx_typings/mflux/models/common/resolution/quantization_resolution.pyi
class QuantizationResolution (line 7) | class QuantizationResolution:
method resolve (line 10) | def resolve(
FILE: .mlx_typings/mflux/models/common/schedulers/__init__.pyi
class SchedulerModuleNotFound (line 15) | class SchedulerModuleNotFound(ValueError): ...
class SchedulerClassNotFound (line 16) | class SchedulerClassNotFound(ValueError): ...
class InvalidSchedulerType (line 17) | class InvalidSchedulerType(TypeError): ...
function register_contrib (line 21) | def register_contrib(scheduler_object, scheduler_name=...): # -> None:
function try_import_external_scheduler (line 23) | def try_import_external_scheduler(
FILE: .mlx_typings/mflux/models/common/schedulers/base_scheduler.pyi
class BaseScheduler (line 8) | class BaseScheduler(ABC):
method sigmas (line 11) | def sigmas(self) -> mx.array: ...
method step (line 13) | def step(
method scale_model_input (line 16) | def scale_model_input(self, latents: mx.array, t: int) -> mx.array: ...
FILE: .mlx_typings/mflux/models/common/schedulers/flow_match_euler_discrete_scheduler.pyi
class FlowMatchEulerDiscreteScheduler (line 12) | class FlowMatchEulerDiscreteScheduler(BaseScheduler):
method __init__ (line 13) | def __init__(self, config: Config) -> None: ...
method sigmas (line 15) | def sigmas(self) -> mx.array: ...
method timesteps (line 17) | def timesteps(self) -> mx.array: ...
method set_image_seq_len (line 18) | def set_image_seq_len(self, image_seq_len: int) -> None: ...
method get_timesteps_and_sigmas (line 20) | def get_timesteps_and_sigmas(
method step (line 23) | def step(
method scale_model_input (line 26) | def scale_model_input(self, latents: mx.array, t: int) -> mx.array: ...
FILE: .mlx_typings/mflux/models/common/schedulers/linear_scheduler.pyi
class LinearScheduler (line 12) | class LinearScheduler(BaseScheduler):
method __init__ (line 13) | def __init__(self, config: Config) -> None: ...
method sigmas (line 15) | def sigmas(self) -> mx.array: ...
method timesteps (line 17) | def timesteps(self) -> mx.array: ...
method step (line 18) | def step(
FILE: .mlx_typings/mflux/models/common/schedulers/seedvr2_euler_scheduler.pyi
class SeedVR2EulerScheduler (line 12) | class SeedVR2EulerScheduler(BaseScheduler):
method __init__ (line 13) | def __init__(self, config: Config) -> None: ...
method timesteps (line 15) | def timesteps(self) -> mx.array: ...
method sigmas (line 17) | def sigmas(self) -> mx.array: ...
method step (line 18) | def step(
FILE: .mlx_typings/mflux/models/common/tokenizer/tokenizer.pyi
class Tokenizer (line 16) | class Tokenizer(Protocol):
method tokenize (line 18) | def tokenize(
class BaseTokenizer (line 26) | class BaseTokenizer(ABC):
method __init__ (line 27) | def __init__(
method tokenize (line 31) | def tokenize(
class LanguageTokenizer (line 39) | class LanguageTokenizer(BaseTokenizer):
method __init__ (line 40) | def __init__(
method tokenize (line 51) | def tokenize(
class VisionLanguageTokenizer (line 59) | class VisionLanguageTokenizer(BaseTokenizer):
method __init__ (line 60) | def __init__(
method tokenize (line 68) | def tokenize(
FILE: .mlx_typings/mflux/models/common/tokenizer/tokenizer_loader.pyi
class TokenizerLoader (line 14) | class TokenizerLoader:
method load (line 16) | def load(definition: TokenizerDefinition, model_path: str) -> BaseToke...
method load_all (line 18) | def load_all(
FILE: .mlx_typings/mflux/models/common/tokenizer/tokenizer_output.pyi
class TokenizerOutput (line 13) | class TokenizerOutput:
FILE: .mlx_typings/mflux/models/common/vae/tiling_config.pyi
class TilingConfig (line 8) | class TilingConfig:
FILE: .mlx_typings/mflux/models/common/vae/vae_tiler.pyi
class VAETiler (line 8) | class VAETiler:
method encode_image_tiled (line 10) | def encode_image_tiled(
method decode_image_tiled (line 20) | def decode_image_tiled(
FILE: .mlx_typings/mflux/models/common/vae/vae_util.pyi
class VAEUtil (line 9) | class VAEUtil:
method encode (line 11) | def encode(
method decode (line 15) | def decode(
FILE: .mlx_typings/mflux/models/common/weights/loading/loaded_weights.pyi
class MetaData (line 8) | class MetaData:
class LoadedWeights (line 13) | class LoadedWeights:
method __getattr__ (line 16) | def __getattr__(self, name: str) -> dict | None: ...
method num_transformer_blocks (line 17) | def num_transformer_blocks(self, component_name: str = ...) -> int: ...
method num_single_transformer_blocks (line 18) | def num_single_transformer_blocks(self, component_name: str = ...) -> ...
FILE: .mlx_typings/mflux/models/common/weights/loading/weight_applier.pyi
class WeightApplier (line 15) | class WeightApplier:
method apply_and_quantize_single (line 17) | def apply_and_quantize_single(
method apply_and_quantize (line 25) | def apply_and_quantize(
FILE: .mlx_typings/mflux/models/common/weights/loading/weight_definition.pyi
class ComponentDefinition (line 41) | class ComponentDefinition:
class TokenizerDefinition (line 58) | class TokenizerDefinition:
FILE: .mlx_typings/mflux/models/common/weights/loading/weight_loader.pyi
class WeightLoader (line 15) | class WeightLoader:
method load_single (line 17) | def load_single(
method load (line 21) | def load(
FILE: .mlx_typings/mflux/models/common/weights/mapping/weight_mapper.pyi
class WeightMapper (line 9) | class WeightMapper:
method apply_mapping (line 11) | def apply_mapping(
FILE: .mlx_typings/mflux/models/common/weights/mapping/weight_mapping.pyi
class WeightTarget (line 14) | class WeightTarget:
class WeightMapping (line 21) | class WeightMapping(Protocol):
method get_mapping (line 23) | def get_mapping() -> List[WeightTarget]: ...
FILE: .mlx_typings/mflux/models/common/weights/mapping/weight_transforms.pyi
class WeightTransforms (line 7) | class WeightTransforms:
method reshape_gamma_to_1d (line 9) | def reshape_gamma_to_1d(tensor: mx.array) -> mx.array: ...
method transpose_patch_embed (line 11) | def transpose_patch_embed(tensor: mx.array) -> mx.array: ...
method transpose_conv3d_weight (line 13) | def transpose_conv3d_weight(tensor: mx.array) -> mx.array: ...
method transpose_conv2d_weight (line 15) | def transpose_conv2d_weight(tensor: mx.array) -> mx.array: ...
method transpose_conv_transpose2d_weight (line 17) | def transpose_conv_transpose2d_weight(tensor: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/common/weights/saving/model_saver.pyi
class ModelSaver (line 10) | class ModelSaver:
method save_model (line 12) | def save_model(
FILE: .mlx_typings/mflux/models/depth_pro/depth_pro_initializer.pyi
class DepthProInitializer (line 7) | class DepthProInitializer:
method init (line 9) | def init(model: DepthProModel, quantize: int | None = ...) -> None: ...
FILE: .mlx_typings/mflux/models/depth_pro/model/decoder/feature_fusion_block_2d.pyi
class FeatureFusionBlock2d (line 8) | class FeatureFusionBlock2d(nn.Module):
method __init__ (line 9) | def __init__(self, num_features: int, deconv: bool = ...) -> None: ...
method __call__ (line 10) | def __call__(self, x0: mx.array, x1: mx.array | None = ...) -> mx.arra...
FILE: .mlx_typings/mflux/models/depth_pro/model/decoder/multires_conv_decoder.pyi
class MultiresConvDecoder (line 8) | class MultiresConvDecoder(nn.Module):
method __init__ (line 9) | def __init__(self) -> None: ...
method __call__ (line 10) | def __call__(
FILE: .mlx_typings/mflux/models/depth_pro/model/decoder/residual_block.pyi
class ResidualBlock (line 8) | class ResidualBlock(nn.Module):
method __init__ (line 9) | def __init__(self, num_features: int) -> None: ...
method __call__ (line 10) | def __call__(self, x: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/depth_pro/model/depth_pro.pyi
class DepthResult (line 11) | class DepthResult:
class DepthPro (line 18) | class DepthPro:
method __init__ (line 19) | def __init__(self, quantize: int | None = ...) -> None: ...
method create_depth_map (line 20) | def create_depth_map(self, image_path: str | Path) -> DepthResult: ...
FILE: .mlx_typings/mflux/models/depth_pro/model/depth_pro_model.pyi
class DepthProModel (line 8) | class DepthProModel(nn.Module):
method __init__ (line 9) | def __init__(self) -> None: ...
method __call__ (line 10) | def __call__(
FILE: .mlx_typings/mflux/models/depth_pro/model/depth_pro_util.pyi
class DepthProUtil (line 8) | class DepthProUtil:
method split (line 10) | def split(x: mx.array, overlap_ratio: float = ...) -> mx.array: ...
method interpolate (line 12) | def interpolate(x: mx.array, size=..., scale_factor=...): # -> array:
method apply_conv (line 15) | def apply_conv(x: mx.array, conv_module: nn.Module) -> mx.array: ...
FILE: .mlx_typings/mflux/models/depth_pro/model/dino_v2/attention.pyi
class Attention (line 8) | class Attention(nn.Module):
method __init__ (line 9) | def __init__(
method __call__ (line 12) | def __call__(self, x: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/depth_pro/model/dino_v2/dino_vision_transformer.pyi
class DinoVisionTransformer (line 8) | class DinoVisionTransformer(nn.Module):
method __init__ (line 9) | def __init__(self) -> None: ...
method __call__ (line 10) | def __call__(self, x: mx.array) -> tuple[mx.array, mx.array, mx.array]...
FILE: .mlx_typings/mflux/models/depth_pro/model/dino_v2/layer_scale.pyi
class LayerScale (line 8) | class LayerScale(nn.Module):
method __init__ (line 9) | def __init__(self, dims: int, init_values: float = ...) -> None: ...
method __call__ (line 10) | def __call__(self, x: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/depth_pro/model/dino_v2/mlp.pyi
class MLP (line 8) | class MLP(nn.Module):
method __init__ (line 9) | def __init__(self) -> None: ...
method __call__ (line 10) | def __call__(self, x: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/depth_pro/model/dino_v2/patch_embed.pyi
class PatchEmbed (line 8) | class PatchEmbed(nn.Module):
method __init__ (line 9) | def __init__(self) -> None: ...
method __call__ (line 10) | def __call__(self, x: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/depth_pro/model/dino_v2/transformer_block.pyi
class TransformerBlock (line 8) | class TransformerBlock(nn.Module):
method __init__ (line 9) | def __init__(self) -> None: ...
method __call__ (line 10) | def __call__(self, x: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/depth_pro/model/encoder/depth_pro_encoder.pyi
class DepthProEncoder (line 8) | class DepthProEncoder(nn.Module):
method __init__ (line 9) | def __init__(self) -> None: ...
method __call__ (line 10) | def __call__(
FILE: .mlx_typings/mflux/models/depth_pro/model/encoder/upsample_block.pyi
class UpSampleBlock (line 8) | class UpSampleBlock(nn.Module):
method __init__ (line 9) | def __init__(
method __call__ (line 16) | def __call__(self, x: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/depth_pro/model/head/fov_head.pyi
class FOVHead (line 8) | class FOVHead(nn.Module):
method __init__ (line 9) | def __init__(self) -> None: ...
method __call__ (line 10) | def __call__(self, x: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/depth_pro/weights/depth_pro_weight_definition.pyi
class DepthProWeightDefinition (line 15) | class DepthProWeightDefinition:
method get_components (line 17) | def get_components() -> List[ComponentDefinition]: ...
method get_tokenizers (line 19) | def get_tokenizers() -> List[TokenizerDefinition]: ...
method get_download_patterns (line 21) | def get_download_patterns() -> List[str]: ...
method quantization_predicate (line 23) | def quantization_predicate(path: str, module) -> bool: ...
FILE: .mlx_typings/mflux/models/depth_pro/weights/depth_pro_weight_mapping.pyi
class DepthProWeightMapping (line 11) | class DepthProWeightMapping(WeightMapping):
method get_mapping (line 13) | def get_mapping() -> List[WeightTarget]: ...
FILE: .mlx_typings/mflux/models/fibo/latent_creator/fibo_latent_creator.pyi
class FiboLatentCreator (line 7) | class FiboLatentCreator:
method create_noise (line 9) | def create_noise(seed: int, height: int, width: int) -> mx.array: ...
method pack_latents (line 11) | def pack_latents(latents: mx.array, height: int, width: int) -> mx.arr...
method unpack_latents (line 13) | def unpack_latents(latents: mx.array, height: int, width: int) -> mx.a...
FILE: .mlx_typings/mflux/models/fibo/weights/fibo_weight_definition.pyi
class FIBOWeightDefinition (line 15) | class FIBOWeightDefinition:
method get_components (line 17) | def get_components() -> List[ComponentDefinition]: ...
method get_tokenizers (line 19) | def get_tokenizers() -> List[TokenizerDefinition]: ...
method get_download_patterns (line 21) | def get_download_patterns() -> List[str]: ...
method quantization_predicate (line 23) | def quantization_predicate(path: str, module) -> bool: ...
FILE: .mlx_typings/mflux/models/fibo/weights/fibo_weight_mapping.pyi
class FIBOWeightMapping (line 11) | class FIBOWeightMapping(WeightMapping):
method get_transformer_mapping (line 13) | def get_transformer_mapping() -> List[WeightTarget]: ...
method get_text_encoder_mapping (line 15) | def get_text_encoder_mapping() -> List[WeightTarget]: ...
method get_vae_mapping (line 17) | def get_vae_mapping() -> List[WeightTarget]: ...
FILE: .mlx_typings/mflux/models/fibo_vlm/tokenizer/qwen2vl_image_processor.pyi
class Qwen2VLImageProcessor (line 7) | class Qwen2VLImageProcessor(QwenImageProcessor):
method __init__ (line 8) | def __init__(self) -> None: ...
FILE: .mlx_typings/mflux/models/fibo_vlm/tokenizer/qwen2vl_processor.pyi
class Qwen2VLProcessor (line 8) | class Qwen2VLProcessor:
method __init__ (line 9) | def __init__(self, tokenizer) -> None: ...
method apply_chat_template (line 10) | def apply_chat_template(
method __call__ (line 20) | def __call__(
FILE: .mlx_typings/mflux/models/fibo_vlm/weights/fibo_vlm_weight_definition.pyi
class FIBOVLMWeightDefinition (line 16) | class FIBOVLMWeightDefinition:
method get_components (line 18) | def get_components() -> List[ComponentDefinition]: ...
method get_tokenizers (line 20) | def get_tokenizers() -> List[TokenizerDefinition]: ...
method get_download_patterns (line 22) | def get_download_patterns() -> List[str]: ...
method quantization_predicate (line 24) | def quantization_predicate(path: str, module) -> bool: ...
FILE: .mlx_typings/mflux/models/fibo_vlm/weights/fibo_vlm_weight_mapping.pyi
class FIBOVLMWeightMapping (line 11) | class FIBOVLMWeightMapping(WeightMapping):
method get_vlm_decoder_mapping (line 13) | def get_vlm_decoder_mapping(num_layers: int = ...) -> List[WeightTarge...
method get_vlm_visual_mapping (line 15) | def get_vlm_visual_mapping(depth: int = ...) -> List[WeightTarget]: ...
FILE: .mlx_typings/mflux/models/flux/flux_initializer.pyi
class FluxInitializer (line 7) | class FluxInitializer:
method init (line 9) | def init(
method init_depth (line 19) | def init_depth(
method init_redux (line 28) | def init_redux(
method init_controlnet (line 37) | def init_controlnet(
method init_concept (line 46) | def init_concept(
FILE: .mlx_typings/mflux/models/flux/latent_creator/flux_latent_creator.pyi
class FluxLatentCreator (line 11) | class FluxLatentCreator:
method create_noise (line 13) | def create_noise(seed: int, height: int, width: int) -> mx.array: ...
method pack_latents (line 15) | def pack_latents(
method unpack_latents (line 19) | def unpack_latents(latents: mx.array, height: int, width: int) -> mx.a...
FILE: .mlx_typings/mflux/models/flux/model/flux_text_encoder/clip_encoder/clip_embeddings.pyi
class CLIPEmbeddings (line 8) | class CLIPEmbeddings(nn.Module):
method __init__ (line 9) | def __init__(self, dims: int) -> None: ...
method __call__ (line 10) | def __call__(self, tokens: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/flux/model/flux_text_encoder/clip_encoder/clip_encoder.pyi
class CLIPEncoder (line 12) | class CLIPEncoder(nn.Module):
method __init__ (line 13) | def __init__(self) -> None: ...
method __call__ (line 14) | def __call__(self, tokens: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/flux/model/flux_text_encoder/clip_encoder/clip_encoder_layer.pyi
class CLIPEncoderLayer (line 8) | class CLIPEncoderLayer(nn.Module):
method __init__ (line 9) | def __init__(self, layer: int) -> None: ...
method __call__ (line 10) | def __call__(
FILE: .mlx_typings/mflux/models/flux/model/flux_text_encoder/clip_encoder/clip_mlp.pyi
class CLIPMLP (line 8) | class CLIPMLP(nn.Module):
method __init__ (line 9) | def __init__(self) -> None: ...
method __call__ (line 10) | def __call__(self, hidden_states: mx.array) -> mx.array: ...
method quick_gelu (line 12) | def quick_gelu(input_array: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/flux/model/flux_text_encoder/clip_encoder/clip_sdpa_attention.pyi
class CLIPSdpaAttention (line 8) | class CLIPSdpaAttention(nn.Module):
method __init__ (line 12) | def __init__(self) -> None: ...
method __call__ (line 13) | def __call__(
method reshape_and_transpose (line 17) | def reshape_and_transpose(x, batch_size, num_heads, head_dim): # -> a...
FILE: .mlx_typings/mflux/models/flux/model/flux_text_encoder/clip_encoder/clip_text_model.pyi
class CLIPTextModel (line 8) | class CLIPTextModel(nn.Module):
method __init__ (line 9) | def __init__(self, dims: int, num_encoder_layers: int) -> None: ...
method __call__ (line 10) | def __call__(self, tokens: mx.array) -> tuple[mx.array, mx.array]: ...
method create_causal_attention_mask (line 12) | def create_causal_attention_mask(input_shape: tuple) -> mx.array: ...
FILE: .mlx_typings/mflux/models/flux/model/flux_text_encoder/clip_encoder/encoder_clip.pyi
class EncoderCLIP (line 8) | class EncoderCLIP(nn.Module):
method __init__ (line 9) | def __init__(self, num_encoder_layers: int) -> None: ...
method __call__ (line 10) | def __call__(
FILE: .mlx_typings/mflux/models/flux/model/flux_text_encoder/prompt_encoder.pyi
class PromptEncoder (line 16) | class PromptEncoder:
method encode_prompt (line 18) | def encode_prompt(
FILE: .mlx_typings/mflux/models/flux/model/flux_text_encoder/t5_encoder/t5_attention.pyi
class T5Attention (line 8) | class T5Attention(nn.Module):
method __init__ (line 9) | def __init__(self) -> None: ...
method __call__ (line 10) | def __call__(self, hidden_states: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/flux/model/flux_text_encoder/t5_encoder/t5_block.pyi
class T5Block (line 8) | class T5Block(nn.Module):
method __init__ (line 9) | def __init__(self, layer: int) -> None: ...
method __call__ (line 10) | def __call__(self, hidden_states: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/flux/model/flux_text_encoder/t5_encoder/t5_dense_relu_dense.pyi
class T5DenseReluDense (line 8) | class T5DenseReluDense(nn.Module):
method __init__ (line 9) | def __init__(self) -> None: ...
method __call__ (line 10) | def __call__(self, hidden_states: mx.array) -> mx.array: ...
method new_gelu (line 12) | def new_gelu(input_array: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/flux/model/flux_text_encoder/t5_encoder/t5_encoder.pyi
class T5Encoder (line 12) | class T5Encoder(nn.Module):
method __init__ (line 13) | def __init__(self) -> None: ...
method __call__ (line 14) | def __call__(self, tokens: mx.array): ...
FILE: .mlx_typings/mflux/models/flux/model/flux_text_encoder/t5_encoder/t5_feed_forward.pyi
class T5FeedForward (line 8) | class T5FeedForward(nn.Module):
method __init__ (line 9) | def __init__(self) -> None: ...
method __call__ (line 10) | def __call__(self, hidden_states: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/flux/model/flux_text_encoder/t5_encoder/t5_layer_norm.pyi
class T5LayerNorm (line 8) | class T5LayerNorm(nn.Module):
method __init__ (line 9) | def __init__(self) -> None: ...
method __call__ (line 10) | def __call__(self, hidden_states: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/flux/model/flux_text_encoder/t5_encoder/t5_self_attention.pyi
class T5SelfAttention (line 8) | class T5SelfAttention(nn.Module):
method __init__ (line 9) | def __init__(self) -> None: ...
method __call__ (line 10) | def __call__(self, hidden_states: mx.array) -> mx.array: ...
method shape (line 12) | def shape(states): # -> array:
method un_shape (line 15) | def un_shape(states): # -> array:
FILE: .mlx_typings/mflux/models/flux/model/flux_transformer/ada_layer_norm_continuous.pyi
class AdaLayerNormContinuous (line 8) | class AdaLayerNormContinuous(nn.Module):
method __init__ (line 9) | def __init__(self, embedding_dim: int, conditioning_embedding_dim: int...
method __call__ (line 10) | def __call__(self, x: mx.array, text_embeddings: mx.array) -> mx.array...
FILE: .mlx_typings/mflux/models/flux/model/flux_transformer/ada_layer_norm_zero.pyi
class AdaLayerNormZero (line 8) | class AdaLayerNormZero(nn.Module):
method __init__ (line 9) | def __init__(self) -> None: ...
method __call__ (line 10) | def __call__(
FILE: .mlx_typings/mflux/models/flux/model/flux_transformer/ada_layer_norm_zero_single.pyi
class AdaLayerNormZeroSingle (line 8) | class AdaLayerNormZeroSingle(nn.Module):
method __init__ (line 9) | def __init__(self) -> None: ...
method __call__ (line 10) | def __call__(
FILE: .mlx_typings/mflux/models/flux/model/flux_transformer/common/attention_utils.pyi
class AttentionUtils (line 8) | class AttentionUtils:
method process_qkv (line 10) | def process_qkv(
method compute_attention (line 21) | def compute_attention(
method convert_key_padding_mask_to_additive_mask (line 31) | def convert_key_padding_mask_to_additive_mask(
method apply_rope (line 35) | def apply_rope(
method apply_rope_bshd (line 39) | def apply_rope_bshd(
FILE: .mlx_typings/mflux/models/flux/model/flux_transformer/embed_nd.pyi
class EmbedND (line 8) | class EmbedND(nn.Module):
method __init__ (line 9) | def __init__(self) -> None: ...
method __call__ (line 10) | def __call__(self, ids: mx.array) -> mx.array: ...
method rope (line 12) | def rope(pos: mx.array, dim: int, theta: float) -> mx.array: ...
FILE: .mlx_typings/mflux/models/flux/model/flux_transformer/feed_forward.pyi
class FeedForward (line 8) | class FeedForward(nn.Module):
method __init__ (line 9) | def __init__(self, activation_function) -> None: ...
method __call__ (line 10) | def __call__(self, hidden_states: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/flux/model/flux_transformer/guidance_embedder.pyi
class GuidanceEmbedder (line 8) | class GuidanceEmbedder(nn.Module):
method __init__ (line 9) | def __init__(self) -> None: ...
method __call__ (line 10) | def __call__(self, sample: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/flux/model/flux_transformer/joint_attention.pyi
class JointAttention (line 9) | class JointAttention(nn.Module):
method __init__ (line 25) | def __init__(self) -> None: ...
method __call__ (line 26) | def __call__(
FILE: .mlx_typings/mflux/models/flux/model/flux_transformer/joint_transformer_block.pyi
class JointTransformerBlock (line 13) | class JointTransformerBlock(nn.Module):
method __init__ (line 22) | def __init__(self, layer: Any) -> None: ...
method __call__ (line 23) | def __call__(
method apply_norm_and_feed_forward (line 31) | def apply_norm_and_feed_forward(
FILE: .mlx_typings/mflux/models/flux/model/flux_transformer/single_block_attention.pyi
class SingleBlockAttention (line 8) | class SingleBlockAttention(nn.Module):
method __init__ (line 17) | def __init__(self) -> None: ...
method __call__ (line 18) | def __call__(
FILE: .mlx_typings/mflux/models/flux/model/flux_transformer/single_transformer_block.pyi
class SingleTransformerBlock (line 15) | class SingleTransformerBlock(nn.Module):
method __init__ (line 19) | def __init__(self, layer: Any) -> None: ...
method __call__ (line 20) | def __call__(
method _apply_feed_forward_and_projection (line 26) | def _apply_feed_forward_and_projection(
FILE: .mlx_typings/mflux/models/flux/model/flux_transformer/text_embedder.pyi
class TextEmbedder (line 8) | class TextEmbedder(nn.Module):
method __init__ (line 9) | def __init__(self) -> None: ...
method __call__ (line 10) | def __call__(self, caption: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/flux/model/flux_transformer/time_text_embed.pyi
class TimeTextEmbed (line 9) | class TimeTextEmbed(nn.Module):
method __init__ (line 10) | def __init__(self, model_config: ModelConfig) -> None: ...
method __call__ (line 11) | def __call__(
FILE: .mlx_typings/mflux/models/flux/model/flux_transformer/timestep_embedder.pyi
class TimestepEmbedder (line 8) | class TimestepEmbedder(nn.Module):
method __init__ (line 9) | def __init__(self) -> None: ...
method __call__ (line 10) | def __call__(self, sample: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/flux/model/flux_transformer/transformer.pyi
class Transformer (line 18) | class Transformer(nn.Module):
method __init__ (line 28) | def __init__(
method __call__ (line 34) | def __call__(
method compute_rotary_embeddings (line 46) | def compute_rotary_embeddings(
method compute_text_embeddings (line 53) | def compute_text_embeddings(
FILE: .mlx_typings/mflux/models/flux/model/flux_vae/common/attention.pyi
class Attention (line 8) | class Attention(nn.Module):
method __init__ (line 9) | def __init__(self) -> None: ...
method __call__ (line 10) | def __call__(self, input_array: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/flux/model/flux_vae/common/resnet_block_2d.pyi
class ResnetBlock2D (line 8) | class ResnetBlock2D(nn.Module):
method __init__ (line 9) | def __init__(
method __call__ (line 21) | def __call__(self, input_array: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/flux/model/flux_vae/common/unet_mid_block.pyi
class UnetMidBlock (line 8) | class UnetMidBlock(nn.Module):
method __init__ (line 9) | def __init__(self) -> None: ...
method __call__ (line 10) | def __call__(self, input_array: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/flux/model/flux_vae/decoder/conv_in.pyi
class ConvIn (line 8) | class ConvIn(nn.Module):
method __init__ (line 9) | def __init__(self) -> None: ...
method __call__ (line 10) | def __call__(self, input_array: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/flux/model/flux_vae/decoder/conv_norm_out.pyi
class ConvNormOut (line 8) | class ConvNormOut(nn.Module):
method __init__ (line 9) | def __init__(self) -> None: ...
method __call__ (line 10) | def __call__(self, input_array: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/flux/model/flux_vae/decoder/conv_out.pyi
class ConvOut (line 8) | class ConvOut(nn.Module):
method __init__ (line 9) | def __init__(self) -> None: ...
method __call__ (line 10) | def __call__(self, input_array: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/flux/model/flux_vae/decoder/decoder.pyi
class Decoder (line 8) | class Decoder(nn.Module):
method __init__ (line 9) | def __init__(
method __call__ (line 12) | def __call__(self, latents: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/flux/model/flux_vae/decoder/up_block_1_or_2.pyi
class UpBlock1Or2 (line 8) | class UpBlock1Or2(nn.Module):
method __init__ (line 9) | def __init__(self) -> None: ...
method __call__ (line 10) | def __call__(self, input_array: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/flux/model/flux_vae/decoder/up_block_3.pyi
class UpBlock3 (line 8) | class UpBlock3(nn.Module):
method __init__ (line 9) | def __init__(self) -> None: ...
method __call__ (line 10) | def __call__(self, input_array: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/flux/model/flux_vae/decoder/up_block_4.pyi
class UpBlock4 (line 8) | class UpBlock4(nn.Module):
method __init__ (line 9) | def __init__(self) -> None: ...
method __call__ (line 10) | def __call__(self, input_array: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/flux/model/flux_vae/decoder/up_sampler.pyi
class UpSampler (line 8) | class UpSampler(nn.Module):
method __init__ (line 9) | def __init__(self, conv_in: int, conv_out: int) -> None: ...
method __call__ (line 10) | def __call__(self, input_array: mx.array) -> mx.array: ...
method up_sample_nearest (line 12) | def up_sample_nearest(x: mx.array, scale: int = ...): # -> array:
FILE: .mlx_typings/mflux/models/flux/model/flux_vae/encoder/conv_in.pyi
class ConvIn (line 8) | class ConvIn(nn.Module):
method __init__ (line 9) | def __init__(self) -> None: ...
method __call__ (line 10) | def __call__(self, input_array: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/flux/model/flux_vae/encoder/conv_norm_out.pyi
class ConvNormOut (line 8) | class ConvNormOut(nn.Module):
method __init__ (line 9) | def __init__(self) -> None: ...
method __call__ (line 10) | def __call__(self, input_array: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/flux/model/flux_vae/encoder/conv_out.pyi
class ConvOut (line 8) | class ConvOut(nn.Module):
method __init__ (line 9) | def __init__(self) -> None: ...
method __call__ (line 10) | def __call__(self, input_array: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/flux/model/flux_vae/encoder/down_block_1.pyi
class DownBlock1 (line 8) | class DownBlock1(nn.Module):
method __init__ (line 9) | def __init__(self) -> None: ...
method __call__ (line 10) | def __call__(self, input_array: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/flux/model/flux_vae/encoder/down_block_2.pyi
class DownBlock2 (line 8) | class DownBlock2(nn.Module):
method __init__ (line 9) | def __init__(self) -> None: ...
method __call__ (line 10) | def __call__(self, input_array: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/flux/model/flux_vae/encoder/down_block_3.pyi
class DownBlock3 (line 8) | class DownBlock3(nn.Module):
method __init__ (line 9) | def __init__(self) -> None: ...
method __call__ (line 10) | def __call__(self, input_array: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/flux/model/flux_vae/encoder/down_block_4.pyi
class DownBlock4 (line 8) | class DownBlock4(nn.Module):
method __init__ (line 9) | def __init__(self) -> None: ...
method __call__ (line 10) | def __call__(self, input_array: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/flux/model/flux_vae/encoder/down_sampler.pyi
class DownSampler (line 8) | class DownSampler(nn.Module):
method __init__ (line 9) | def __init__(self, conv_in: int, conv_out: int) -> None: ...
method __call__ (line 10) | def __call__(self, input_array: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/flux/model/flux_vae/encoder/encoder.pyi
class Encoder (line 8) | class Encoder(nn.Module):
method __init__ (line 9) | def __init__(self) -> None: ...
method __call__ (line 10) | def __call__(self, latents: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/flux/model/flux_vae/vae.pyi
class VAE (line 8) | class VAE(nn.Module):
method __init__ (line 13) | def __init__(self) -> None: ...
method decode (line 14) | def decode(self, latents: mx.array) -> mx.array: ...
method encode (line 15) | def encode(self, image: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/flux/model/redux_encoder/redux_encoder.pyi
class ReduxEncoder (line 8) | class ReduxEncoder(nn.Module):
method __init__ (line 9) | def __init__(self) -> None: ...
method __call__ (line 10) | def __call__(self, x: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/flux/model/siglip_vision_transformer/siglip_encoder.pyi
class SiglipEncoder (line 8) | class SiglipEncoder(nn.Module):
method __init__ (line 9) | def __init__(self) -> None: ...
method __call__ (line 10) | def __call__(self, inputs_embeds: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/flux/model/siglip_vision_transformer/siglip_encoder_layer.pyi
class SiglipEncoderLayer (line 8) | class SiglipEncoderLayer(nn.Module):
method __init__ (line 9) | def __init__(self) -> None: ...
method __call__ (line 10) | def __call__(self, hidden_states: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/flux/model/siglip_vision_transformer/siglip_mlp.pyi
class SiglipMLP (line 8) | class SiglipMLP(nn.Module):
method __init__ (line 9) | def __init__(self) -> None: ...
method __call__ (line 10) | def __call__(self, hidden_states: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/flux/model/siglip_vision_transformer/siglip_multi_head_attention_pooling_head.pyi
class SiglipMultiHeadAttentionPoolingHead (line 8) | class SiglipMultiHeadAttentionPoolingHead(nn.Module):
method __init__ (line 9) | def __init__(self) -> None: ...
method __call__ (line 10) | def __call__(self, hidden_states: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/flux/model/siglip_vision_transformer/siglip_sdpa_attention.pyi
class SiglipSdpaAttention (line 8) | class SiglipSdpaAttention(nn.Module):
method __init__ (line 12) | def __init__(self) -> None: ...
method __call__ (line 13) | def __call__(self, hidden_states: mx.array) -> mx.array: ...
method reshape_and_transpose (line 15) | def reshape_and_transpose(x, batch_size, num_heads, head_dim): # -> a...
FILE: .mlx_typings/mflux/models/flux/model/siglip_vision_transformer/siglip_vision_embeddings.pyi
class SiglipVisionEmbeddings (line 8) | class SiglipVisionEmbeddings(nn.Module):
method __init__ (line 12) | def __init__(self) -> None: ...
method __call__ (line 13) | def __call__(self, pixel_values: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/flux/model/siglip_vision_transformer/siglip_vision_transformer.pyi
class SiglipVisionTransformer (line 8) | class SiglipVisionTransformer(nn.Module):
method __init__ (line 9) | def __init__(self) -> None: ...
method __call__ (line 10) | def __call__(self, pixel_values: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/flux/variants/concept_attention/attention_data.pyi
class TimestepAttentionData (line 15) | class TimestepAttentionData:
method stack_img_attentions (line 18) | def stack_img_attentions(self) -> mx.array: ...
method stack_concept_attentions (line 19) | def stack_concept_attentions(self) -> mx.array: ...
class GenerationAttentionData (line 21) | class GenerationAttentionData:
method __init__ (line 22) | def __init__(self) -> None: ...
method append (line 23) | def append(self, timestep_attention: TimestepAttentionData): # -> None:
method stack_all_img_attentions (line 25) | def stack_all_img_attentions(self) -> mx.array: ...
method stack_all_concept_attentions (line 26) | def stack_all_concept_attentions(self) -> mx.array: ...
class ConceptHeatmap (line 29) | class ConceptHeatmap:
method save (line 36) | def save(
method get_metadata (line 39) | def get_metadata(self) -> dict: ...
FILE: .mlx_typings/mflux/models/flux/variants/concept_attention/joint_attention_concept.pyi
class JointAttentionConcept (line 8) | class JointAttentionConcept(nn.Module):
method __init__ (line 9) | def __init__(self) -> None: ...
method __call__ (line 10) | def __call__(
FILE: .mlx_typings/mflux/models/flux/variants/concept_attention/joint_transformer_block_concept.pyi
class LayerAttentionData (line 10) | class LayerAttentionData:
class JointTransformerBlockConcept (line 16) | class JointTransformerBlockConcept(nn.Module):
method __init__ (line 17) | def __init__(self, layer) -> None: ...
method __call__ (line 18) | def __call__(
FILE: .mlx_typings/mflux/models/flux/variants/concept_attention/transformer_concept.pyi
class TransformerConcept (line 13) | class TransformerConcept(nn.Module):
method __init__ (line 14) | def __init__(
method __call__ (line 20) | def __call__(
FILE: .mlx_typings/mflux/models/flux/variants/controlnet/transformer_controlnet.pyi
class TransformerControlnet (line 10) | class TransformerControlnet(nn.Module):
method __init__ (line 11) | def __init__(
method __call__ (line 17) | def __call__(
FILE: .mlx_typings/mflux/models/flux/variants/kontext/flux_kontext.pyi
class Flux1Kontext (line 19) | class Flux1Kontext(nn.Module):
method __init__ (line 30) | def __init__(
method generate_image (line 38) | def generate_image(
FILE: .mlx_typings/mflux/models/flux/variants/kontext/kontext_util.pyi
class KontextUtil (line 9) | class KontextUtil:
method create_image_conditioning_latents (line 11) | def create_image_conditioning_latents(
FILE: .mlx_typings/mflux/models/flux/variants/txt2img/flux.pyi
class Flux1 (line 17) | class Flux1(nn.Module):
method __init__ (line 28) | def __init__(
method generate_image (line 36) | def generate_image(
method from_name (line 50) | def from_name(model_name: str, quantize: int | None = ...) -> Flux1: ...
method save_model (line 51) | def save_model(self, base_path: str) -> None: ...
method freeze (line 52) | def freeze(self, **kwargs): # -> None:
FILE: .mlx_typings/mflux/models/flux/weights/flux_lora_mapping.pyi
class FluxLoRAMapping (line 7) | class FluxLoRAMapping(LoRAMapping):
method get_mapping (line 9) | def get_mapping() -> list[LoRATarget]: ...
FILE: .mlx_typings/mflux/models/flux/weights/flux_weight_definition.pyi
class FluxWeightDefinition (line 15) | class FluxWeightDefinition:
method get_components (line 17) | def get_components() -> List[ComponentDefinition]: ...
method get_tokenizers (line 19) | def get_tokenizers() -> List[TokenizerDefinition]: ...
method get_download_patterns (line 21) | def get_download_patterns() -> List[str]: ...
method quantization_predicate (line 23) | def quantization_predicate(path: str, module) -> bool: ...
class FluxControlnetWeightDefinition (line 25) | class FluxControlnetWeightDefinition:
method get_controlnet_component (line 27) | def get_controlnet_component() -> ComponentDefinition: ...
method get_components (line 29) | def get_components() -> List[ComponentDefinition]: ...
method get_tokenizers (line 31) | def get_tokenizers() -> List[TokenizerDefinition]: ...
method get_download_patterns (line 33) | def get_download_patterns() -> List[str]: ...
method quantization_predicate (line 35) | def quantization_predicate(path: str, module) -> bool: ...
class FluxReduxWeightDefinition (line 37) | class FluxReduxWeightDefinition:
method get_components (line 39) | def get_components() -> List[ComponentDefinition]: ...
method get_tokenizers (line 41) | def get_tokenizers() -> List[TokenizerDefinition]: ...
method get_download_patterns (line 43) | def get_download_patterns() -> List[str]: ...
method quantization_predicate (line 45) | def quantization_predicate(path: str, module) -> bool: ...
FILE: .mlx_typings/mflux/models/flux/weights/flux_weight_mapping.pyi
class FluxWeightMapping (line 11) | class FluxWeightMapping(WeightMapping):
method get_transformer_mapping (line 13) | def get_transformer_mapping() -> List[WeightTarget]: ...
method get_controlnet_transformer_mapping (line 15) | def get_controlnet_transformer_mapping() -> List[WeightTarget]: ...
method get_vae_mapping (line 17) | def get_vae_mapping() -> List[WeightTarget]: ...
method get_t5_encoder_mapping (line 19) | def get_t5_encoder_mapping() -> List[WeightTarget]: ...
method get_clip_encoder_mapping (line 21) | def get_clip_encoder_mapping() -> List[WeightTarget]: ...
FILE: .mlx_typings/mflux/models/qwen/latent_creator/qwen_latent_creator.pyi
class QwenLatentCreator (line 11) | class QwenLatentCreator:
method create_noise (line 13) | def create_noise(seed: int, height: int, width: int) -> mx.array: ...
method pack_latents (line 15) | def pack_latents(
method unpack_latents (line 19) | def unpack_latents(latents: mx.array, height: int, width: int) -> mx.a...
FILE: .mlx_typings/mflux/models/qwen/model/qwen_text_encoder/qwen_attention.pyi
class QwenAttention (line 8) | class QwenAttention(nn.Module):
method __init__ (line 9) | def __init__(
method __call__ (line 18) | def __call__(
FILE: .mlx_typings/mflux/models/qwen/model/qwen_text_encoder/qwen_encoder.pyi
class QwenEncoder (line 8) | class QwenEncoder(nn.Module):
method __init__ (line 9) | def __init__(
method get_image_features (line 17) | def get_image_features(
method __call__ (line 20) | def __call__(
FILE: .mlx_typings/mflux/models/qwen/model/qwen_text_encoder/qwen_encoder_layer.pyi
class QwenEncoderLayer (line 8) | class QwenEncoderLayer(nn.Module):
method __init__ (line 9) | def __init__(
method __call__ (line 19) | def __call__(
FILE: .mlx_typings/mflux/models/qwen/model/qwen_text_encoder/qwen_mlp.pyi
class QwenMLP (line 8) | class QwenMLP(nn.Module):
method __init__ (line 9) | def __init__(self, hidden_size: int, intermediate_size: int) -> None: ...
method __call__ (line 10) | def __call__(self, hidden_states: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/qwen/model/qwen_text_encoder/qwen_patch_merger.pyi
class PatchMerger (line 8) | class PatchMerger(nn.Module):
method __init__ (line 9) | def __init__(
method __call__ (line 12) | def __call__(self, x: mx.array, grid_thw: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/qwen/model/qwen_text_encoder/qwen_prompt_encoder.pyi
class QwenPromptEncoder (line 13) | class QwenPromptEncoder:
method encode_prompt (line 15) | def encode_prompt(
FILE: .mlx_typings/mflux/models/qwen/model/qwen_text_encoder/qwen_rms_norm.pyi
class QwenRMSNorm (line 8) | class QwenRMSNorm(nn.Module):
method __init__ (line 9) | def __init__(self, hidden_size: int, eps: float = ...) -> None: ...
method __call__ (line 10) | def __call__(self, hidden_states: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/qwen/model/qwen_text_encoder/qwen_rope.pyi
class QwenRotaryEmbedding (line 8) | class QwenRotaryEmbedding(nn.Module):
method __init__ (line 9) | def __init__(
method __call__ (line 19) | def __call__(
FILE: .mlx_typings/mflux/models/qwen/model/qwen_text_encoder/qwen_text_encoder.pyi
class QwenTextEncoder (line 12) | class QwenTextEncoder(nn.Module):
method __init__ (line 13) | def __init__(self) -> None: ...
method __call__ (line 14) | def __call__(
FILE: .mlx_typings/mflux/models/qwen/model/qwen_text_encoder/qwen_vision_attention.pyi
class VisionAttention (line 8) | class VisionAttention(nn.Module):
method __init__ (line 9) | def __init__(self, embed_dim: int = ..., num_heads: int = ...) -> None...
method __call__ (line 10) | def __call__(
FILE: .mlx_typings/mflux/models/qwen/model/qwen_text_encoder/qwen_vision_block.pyi
class VisionBlock (line 8) | class VisionBlock(nn.Module):
method __init__ (line 9) | def __init__(
method __call__ (line 12) | def __call__(
FILE: .mlx_typings/mflux/models/qwen/model/qwen_text_encoder/qwen_vision_language_encoder.pyi
class QwenVisionLanguageEncoder (line 8) | class QwenVisionLanguageEncoder(nn.Module):
method __init__ (line 9) | def __init__(self, encoder=...) -> None: ...
method __call__ (line 10) | def __call__(
FILE: .mlx_typings/mflux/models/qwen/model/qwen_text_encoder/qwen_vision_mlp.pyi
class VisionMLP (line 8) | class VisionMLP(nn.Module):
method __init__ (line 9) | def __init__(self, dim: int, hidden_dim: int) -> None: ...
method __call__ (line 10) | def __call__(self, x: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/qwen/model/qwen_text_encoder/qwen_vision_patch_embed.pyi
class VisionPatchEmbed (line 8) | class VisionPatchEmbed(nn.Module):
method __init__ (line 9) | def __init__(
method __call__ (line 16) | def __call__(self, hidden_states: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/qwen/model/qwen_text_encoder/qwen_vision_rotary_embedding.pyi
class VisionRotaryEmbedding (line 8) | class VisionRotaryEmbedding(nn.Module):
method __init__ (line 9) | def __init__(self, dim: int, theta: float = ...) -> None: ...
method __call__ (line 10) | def __call__(self, max_grid_size: int) -> mx.array: ...
FILE: .mlx_typings/mflux/models/qwen/model/qwen_text_encoder/qwen_vision_transformer.pyi
class VisionTransformer (line 8) | class VisionTransformer(nn.Module):
method __init__ (line 9) | def __init__(
method get_window_index (line 23) | def get_window_index(self, grid_thw: mx.array): # -> tuple[array, arr...
method rot_pos_emb (line 25) | def rot_pos_emb(self, grid_thw: mx.array) -> mx.array: ...
method __call__ (line 26) | def __call__(self, pixel_values: mx.array, grid_thw: mx.array) -> mx.a...
FILE: .mlx_typings/mflux/models/qwen/model/qwen_transformer/qwen_attention.pyi
class QwenAttention (line 9) | class QwenAttention(nn.Module):
method __init__ (line 27) | def __init__(
method __call__ (line 30) | def __call__(
method _compute_attention_qwen (line 38) | def _compute_attention_qwen(
method _convert_mask_for_qwen (line 47) | def _convert_mask_for_qwen(
method _apply_rope_qwen (line 51) | def _apply_rope_qwen(
FILE: .mlx_typings/mflux/models/qwen/model/qwen_transformer/qwen_feed_forward.pyi
class QwenFeedForward (line 8) | class QwenFeedForward(nn.Module):
method __init__ (line 9) | def __init__(self, dim: int = ...) -> None: ...
method __call__ (line 10) | def __call__(self, hidden_states: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/qwen/model/qwen_transformer/qwen_rope.pyi
class QwenEmbedRopeMLX (line 8) | class QwenEmbedRopeMLX(nn.Module):
method __init__ (line 9) | def __init__(
method __call__ (line 12) | def __call__(
FILE: .mlx_typings/mflux/models/qwen/model/qwen_transformer/qwen_time_text_embed.pyi
class QwenTimeTextEmbed (line 8) | class QwenTimeTextEmbed(nn.Module):
method __init__ (line 9) | def __init__(self, timestep_proj_dim: int = ..., inner_dim: int = ...)...
method __call__ (line 10) | def __call__(self, timestep: mx.array, hidden_states: mx.array) -> mx....
FILE: .mlx_typings/mflux/models/qwen/model/qwen_transformer/qwen_timestep_embedding.pyi
class QwenTimestepEmbedding (line 8) | class QwenTimestepEmbedding(nn.Module):
method __init__ (line 9) | def __init__(self, proj_dim: int, inner_dim: int) -> None: ...
method __call__ (line 10) | def __call__(self, x: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/qwen/model/qwen_transformer/qwen_timesteps.pyi
class QwenTimesteps (line 8) | class QwenTimesteps(nn.Module):
method __init__ (line 9) | def __init__(self, proj_dim: int = ..., scale: float = ...) -> None: ...
method __call__ (line 10) | def __call__(self, timesteps: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/qwen/model/qwen_transformer/qwen_transformer.pyi
class QwenTransformer (line 13) | class QwenTransformer(nn.Module):
method __init__ (line 24) | def __init__(
method __call__ (line 34) | def __call__(
method _compute_timestep (line 45) | def _compute_timestep(t: int | float, config: Config) -> mx.array: ...
method _compute_rotary_embeddings (line 47) | def _compute_rotary_embeddings(
FILE: .mlx_typings/mflux/models/qwen/model/qwen_transformer/qwen_transformer_block.pyi
class QwenTransformerBlock (line 10) | class QwenTransformerBlock(nn.Module):
method __init__ (line 23) | def __init__(
method __call__ (line 26) | def __call__(
method _modulate (line 36) | def _modulate(x: mx.array, mod_params: mx.array) -> tuple[mx.array, mx...
FILE: .mlx_typings/mflux/models/qwen/model/qwen_transformer/qwen_transformer_rms_norm.pyi
class QwenTransformerRMSNorm (line 8) | class QwenTransformerRMSNorm(nn.Module):
method __init__ (line 9) | def __init__(self, dim: int, eps: float = ...) -> None: ...
method __call__ (line 10) | def __call__(self, hidden_states: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/qwen/model/qwen_vae/qwen_image_attention_block_3d.pyi
class QwenImageAttentionBlock3D (line 8) | class QwenImageAttentionBlock3D(nn.Module):
method __init__ (line 9) | def __init__(self, dim: int) -> None: ...
method __call__ (line 10) | def __call__(self, x: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/qwen/model/qwen_vae/qwen_image_causal_conv_3d.pyi
class QwenImageCausalConv3D (line 8) | class QwenImageCausalConv3D(nn.Module):
method __init__ (line 9) | def __init__(
method __call__ (line 17) | def __call__(self, x: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/qwen/model/qwen_vae/qwen_image_decoder_3d.pyi
class QwenImageDecoder3D (line 8) | class QwenImageDecoder3D(nn.Module):
method __init__ (line 9) | def __init__(self) -> None: ...
method __call__ (line 10) | def __call__(self, x: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/qwen/model/qwen_vae/qwen_image_down_block_3d.pyi
class QwenImageDownBlock3D (line 8) | class QwenImageDownBlock3D(nn.Module):
method __init__ (line 9) | def __init__(
method __call__ (line 16) | def __call__(self, x: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/qwen/model/qwen_vae/qwen_image_encoder_3d.pyi
class QwenImageEncoder3D (line 8) | class QwenImageEncoder3D(nn.Module):
method __init__ (line 9) | def __init__(self) -> None: ...
method __call__ (line 10) | def __call__(self, x: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/qwen/model/qwen_vae/qwen_image_mid_block_3d.pyi
class QwenImageMidBlock3D (line 8) | class QwenImageMidBlock3D(nn.Module):
method __init__ (line 9) | def __init__(self, dim: int, num_layers: int = ...) -> None: ...
method __call__ (line 10) | def __call__(self, x: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/qwen/model/qwen_vae/qwen_image_res_block_3d.pyi
class QwenImageResBlock3D (line 8) | class QwenImageResBlock3D(nn.Module):
method __init__ (line 9) | def __init__(self, in_channels: int, out_channels: int) -> None: ...
method __call__ (line 10) | def __call__(self, x: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/qwen/model/qwen_vae/qwen_image_resample_3d.pyi
class QwenImageResample3D (line 8) | class QwenImageResample3D(nn.Module):
method __init__ (line 9) | def __init__(self, dim: int, mode: str) -> None: ...
method __call__ (line 10) | def __call__(self, x: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/qwen/model/qwen_vae/qwen_image_rms_norm.pyi
class QwenImageRMSNorm (line 8) | class QwenImageRMSNorm(nn.Module):
method __init__ (line 9) | def __init__(
method __call__ (line 12) | def __call__(self, x: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/qwen/model/qwen_vae/qwen_image_up_block_3d.pyi
class QwenImageUpBlock3D (line 8) | class QwenImageUpBlock3D(nn.Module):
method __init__ (line 9) | def __init__(
method __call__ (line 16) | def __call__(self, x: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/qwen/model/qwen_vae/qwen_vae.pyi
class QwenVAE (line 8) | class QwenVAE(nn.Module):
method __init__ (line 13) | def __init__(self) -> None: ...
method decode (line 14) | def decode(self, latents: mx.array) -> mx.array: ...
method encode (line 15) | def encode(self, latents: mx.array) -> mx.array: ...
FILE: .mlx_typings/mflux/models/qwen/qwen_initializer.pyi
class QwenImageInitializer (line 7) | class QwenImageInitializer:
method init (line 9) | def init(
method init_edit (line 18) | def init_edit(
FILE: .mlx_typings/mflux/models/qwen/tokenizer/qwen_image_processor.pyi
function smart_resize (line 12) | def smart_resize(
class QwenImageProcessor (line 20) | class QwenImageProcessor:
method __init__ (line 21) | def __init__(
method preprocess (line 31) | def preprocess(
method get_number_of_image_patches (line 34) | def get_number_of_image_patches(
FILE: .mlx_typings/mflux/models/qwen/tokenizer/qwen_vision_language_processor.pyi
class QwenVisionLanguageProcessor (line 9) | class QwenVisionLanguageProcessor:
method __init__ (line 10) | def __init__(
method __call__ (line 17) | def __call__(
FILE: .mlx_typings/mflux/models/qwen/tokenizer/qwen_vision_language_tokenizer.pyi
class QwenVisionLanguageTokenizer (line 13) | class QwenVisionLanguageTokenizer:
method __init__ (line 14) | def __init__(
method tokenize_with_image (line 20) | def tokenize_with_image(
method tokenize_text_only (line 27) | def tokenize_text_only(self, prompt: str) -> tuple[mx.array, mx.array]...
FILE: .mlx_typings/mflux/models/qwen/variants/edit/qwen_edit_util.pyi
class QwenEditUtil (line 8) | class QwenEditUtil:
method create_image_conditioning_latents (line 10) | def create_image_conditioning_latents(
FILE: .mlx_typings/mflux/models/qwen/variants/edit/qwen_image_edit.pyi
class QwenImageEdit (line 16) | class QwenImageEdit(nn.Module):
method __init__ (line 26) | def __init__(
method generate_image (line 34) | def generate_image(
method _encode_prompts_with_images (line 47) | def _encode_prompts_with_images(
FILE: .mlx_typings/mflux/models/qwen/variants/txt2img/qwen_image.pyi
class QwenImage (line 15) | class QwenImage(nn.Module):
method __init__ (line 25) | def __init__(
method generate_image (line 33) | def generate_image(
method save_model (line 46) | def save_model(self, base_path: str) -> None: ...
method compute_guided_noise (line 48) | def compute_guided_noise(
FILE: .mlx_typings/mflux/models/qwen/weights/qwen_lora_mapping.pyi
class QwenLoRAMapping (line 8) | class QwenLoRAMapping(LoRAMapping):
method get_mapping (line 10) | def get_mapping() -> List[LoRATarget]: ...
FILE: .mlx_typings/mflux/models/qwen/weights/qwen_weight_definition.pyi
class QwenWeightDefinition (line 15) | class QwenWeightDefinition:
method get_components (line 17) | def get_components() -> List[ComponentDefinition]: ...
method get_tokenizers (line 19) | def get_tokenizers() -> List[TokenizerDefinition]: ...
method get_download_patterns (line 21) | def get_download_patterns() -> List[str]: ...
method quantization_predicate (line 23) | def quantization_predicate(path: str, module) -> bool: ...
FILE: .mlx_typings/mflux/models/qwen/weights/qwen_weight_mapping.pyi
class QwenWeightMapping (line 11) | class QwenWeightMapping(WeightMapping):
method get_transformer_mapping (line 13) | def get_transformer_mapping() -> List[WeightTarget]: ...
method get_vae_mapping (line 15) | def get_vae_mapping() -> List[WeightTarget]: ...
method get_text_encoder_mapping (line 17) | def get_text_encoder_mapping() -> List[WeightTarget]: ...
method get_mapping (line 19) | def get_mapping() -> List[WeightTarget]: ...
FILE: .mlx_typings/mflux/models/seedvr2/weights/seedvr2_weight_definition.pyi
class SeedVR2WeightDefinition (line 15) | class SeedVR2WeightDefinition:
method get_components (line 17) | def get_components() -> List[ComponentDefinition]: ...
method get_tokenizers (line 19) | def get_tokenizers() -> List[TokenizerDefinition]: ...
method get_download_patterns (line 21) | def get_download_patterns() -> List[str]: ...
method quantization_predicate (line 23) | def quantization_predicate(path: str, module) -> bool: ...
FILE: .mlx_typings/mflux/models/seedvr2/weights/seedvr2_weight_mapping.pyi
class SeedVR2WeightMapping (line 11) | class SeedVR2WeightMapping(WeightMapping):
method get_transformer_mapping (line 13) | def get_transformer_mapping() -> List[WeightTarget]: ...
method get_vae_mapping (line 15) | def get_vae_mapping() -> List[WeightTarget]: ...
FILE: .mlx_typings/mflux/models/z_image/latent_creator/z_image_latent_creator.pyi
class ZImageLatentCreator (line 7) | class ZImageLatentCreator:
method create_noise (line 9) | def create_noise(seed: int, height: int, width: int) -> mx.array: ...
method pack_latents (line 11) | def pack_latents(latents: mx.array, height: int, width: int) -> mx.arr...
method unpack_latents (line 13) | def unpack_latents(latents: mx.array, height: int, width: int) -> mx.a...
FILE: .mlx_typings/mflux/models/z_image/weights/z_image_weight_definition.pyi
class ZImageWeightDefinition (line 15) | class ZImageWeightDefinition:
method get_components (line 17) | def get_components() -> List[ComponentDefinition]: ...
method get_tokenizers (line 19) | def get_tokenizers() -> List[TokenizerDefinition]: ...
method get_download_patterns (line 21) | def get_download_patterns() -> List[str]: ...
method quantization_predicate (line 23) | def quantization_predicate(path: str, module) -> bool: ...
FILE: .mlx_typings/mflux/models/z_image/weights/z_image_weight_mapping.pyi
class ZImageWeightMapping (line 10) | class ZImageWeightMapping(WeightMapping):
method get_text_encoder_mapping (line 12) | def get_text_encoder_mapping() -> list[WeightTarget]: ...
method get_vae_mapping (line 14) | def get_vae_mapping() -> list[WeightTarget]: ...
method get_transformer_mapping (line 16) | def get_transformer_mapping() -> list[WeightTarget]: ...
FILE: .mlx_typings/mflux/utils/box_values.pyi
class AbsoluteBoxValues (line 8) | class AbsoluteBoxValues:
class BoxValueError (line 15) | class BoxValueError(ValueError): ...
class BoxValues (line 18) | class BoxValues:
method normalize_to_dimensions (line 23) | def normalize_to_dimensions(self, width, height) -> AbsoluteBoxValues:...
method parse (line 25) | def parse(value, delimiter=...) -> BoxValues: ...
FILE: .mlx_typings/mflux/utils/exceptions.pyi
class MFluxException (line 9) | class MFluxException(Exception): ...
class ImageSavingException (line 10) | class ImageSavingException(MFluxException): ...
class MetadataEmbedException (line 11) | class MetadataEmbedException(MFluxException): ...
class MFluxUserException (line 12) | class MFluxUserException(MFluxException): ...
class PromptFileReadError (line 13) | class PromptFileReadError(MFluxUserException): ...
class StopImageGenerationException (line 14) | class StopImageGenerationException(MFluxUserException): ...
class StopTrainingException (line 15) | class StopTrainingException(MFluxUserException): ...
class CommandExecutionError (line 17) | class CommandExecutionError(MFluxException):
method __init__ (line 18) | def __init__(
class ReferenceVsOutputImageError (line 22) | class ReferenceVsOutputImageError(AssertionError): ...
class ModelConfigError (line 23) | class ModelConfigError(ValueError): ...
class InvalidBaseModel (line 24) | class InvalidBaseModel(ModelConfigError): ...
FILE: .mlx_typings/mflux/utils/generated_image.pyi
class GeneratedImage (line 13) | class GeneratedImage:
method __init__ (line 16) | def __init__(
method get_right_half (line 44) | def get_right_half(self) -> GeneratedImage: ...
method save (line 45) | def save(
method save_with_heatmap (line 48) | def save_with_heatmap(
method save_concept_heatmap (line 51) | def save_concept_heatmap(
FILE: .mlx_typings/mflux/utils/image_util.pyi
class ImageUtil (line 17) | class ImageUtil:
method to_image (line 19) | def to_image(
method to_composite_image (line 41) | def to_composite_image(
method to_array (line 45) | def to_array(image: PIL.Image.Image, is_mask: bool = ...) -> mx.array:...
method load_image (line 47) | def load_image(
method expand_image (line 51) | def expand_image(
method create_outpaint_mask_image (line 61) | def create_outpaint_mask_image(
method create_bordered_image (line 66) | def create_bordered_image(
method scale_to_dimensions (line 78) | def scale_to_dimensions(
method save_image (line 82) | def save_image(
method preprocess_for_model (line 90) | def preprocess_for_model(
method preprocess_for_depth_pro (line 98) | def preprocess_for_depth_pro(
FILE: .mlx_typings/mflux/utils/metadata_builder.pyi
class MetadataBuilder (line 9) | class MetadataBuilder:
method embed_metadata (line 12) | def embed_metadata(metadata: dict, path: str | Path) -> None: ...
method build_xmp_packet (line 14) | def build_xmp_packet(metadata: dict) -> str: ...
method build_iptc_binary (line 16) | def build_iptc_binary(metadata: dict) -> bytes: ...
FILE: .mlx_typings/mflux/utils/version_util.pyi
class VersionUtil (line 9) | class VersionUtil:
method get_mflux_version (line 11) | def get_mflux_version() -> str: ...
FILE: .mlx_typings/mlx/core/__init__.pyi
class ArrayAt (line 23) | class ArrayAt:
method __getitem__ (line 25) | def __getitem__(self, indices: object | None) -> ArrayAt: ...
method add (line 26) | def add(
method subtract (line 36) | def subtract(
method multiply (line 46) | def multiply(
method divide (line 56) | def divide(
method maximum (line 66) | def maximum(
method minimum (line 76) | def minimum(
class ArrayIterator (line 87) | class ArrayIterator:
method __next__ (line 89) | def __next__(self) -> array: ...
method __iter__ (line 90) | def __iter__(self) -> ArrayIterator: ...
class ArrayLike (line 92) | class ArrayLike:
method __init__ (line 97) | def __init__(self, arg: object, /) -> None: ...
class Device (line 99) | class Device:
method __init__ (line 101) | def __init__(self, type: DeviceType, index: int = ...) -> None: ...
method type (line 103) | def type(self) -> DeviceType: ...
method __repr__ (line 104) | def __repr__(self) -> str: ...
method __eq__ (line 105) | def __eq__(self, arg: object, /) -> bool: ...
class DeviceType (line 107) | class DeviceType(enum.Enum):
method __eq__ (line 110) | def __eq__(self, arg: object, /) -> bool: ...
class Dtype (line 112) | class Dtype:
method size (line 120) | def size(self) -> int:
method __repr__ (line 123) | def __repr__(self) -> str: ...
method __eq__ (line 124) | def __eq__(self, arg: object, /) -> bool: ...
method __hash__ (line 125) | def __hash__(self) -> int: ...
class DtypeCategory (line 127) | class DtypeCategory(enum.Enum):
class FunctionExporter (line 176) | class FunctionExporter:
method close (line 183) | def close(self) -> None: ...
method __enter__ (line 184) | def __enter__(self) -> FunctionExporter: ...
method __exit__ (line 185) | def __exit__(
method __call__ (line 191) | def __call__(self, *args, **kwargs) -> None: ...
class Stream (line 193) | class Stream:
method device (line 196) | def device(self) -> Device: ...
method __repr__ (line 197) | def __repr__(self) -> str: ...
method __eq__ (line 198) | def __eq__(self, arg: object, /) -> bool: ...
class StreamContext (line 200) | class StreamContext:
method __init__ (line 209) | def __init__(self, s: Stream | Device) -> None: ...
method __enter__ (line 210) | def __enter__(self) -> None: ...
method __exit__ (line 211) | def __exit__(
function device_info (line 218) | def device_info() -> dict[str, str | int]:
function abs (line 234) | def abs(a: array, /, *, stream: Stream | Device | None = ...) -> array:
function add (line 245) | def add(
function addmm (line 264) | def addmm(
function all (line 292) | def all(
function allclose (line 315) | def allclose(
function any (line 351) | def any(
function arange (line 375) | def arange(
function arange (line 406) | def arange(
function arccos (line 413) | def arccos(a: array, /, *, stream: Stream | Device | None = ...) -> array:
function arccosh (line 424) | def arccosh(a: array, /, *, stream: Stream | Device | None = ...) -> array:
function arcsin (line 435) | def arcsin(a: array, /, *, stream: Stream | Device | None = ...) -> array:
function arcsinh (line 446) | def arcsinh(a: array, /, *, stream: Stream | Device | None = ...) -> array:
function arctan (line 457) | def arctan(a: array, /, *, stream: Stream | Device | None = ...) -> array:
function arctan2 (line 468) | def arctan2(a: array, b: array, /, *, stream: Stream | Device | None = ....
function arctanh (line 480) | def arctanh(a: array, /, *, stream: Stream | Device | None = ...) -> array:
function argmax (line 491) | def argmax(
function argmin (line 513) | def argmin(
function argpartition (line 535) | def argpartition(
function argsort (line 564) | def argsort(
class array (line 584) | class array:
method __init__ (line 586) | def __init__(
method __buffer__ (line 591) | def __buffer__(self, flags, /):
method __release_buffer__ (line 596) | def __release_buffer__(self, buffer, /):
method size (line 602) | def size(self) -> int:
method ndim (line 606) | def ndim(self) -> int:
method itemsize (line 610) | def itemsize(self) -> int:
method nbytes (line 614) | def nbytes(self) -> int:
method shape (line 618) | def shape(self) -> tuple[int, ...]:
method dtype (line 627) | def dtype(self) -> Dtype:
method real (line 631) | def real(self) -> array:
method imag (line 635) | def imag(self) -> array:
method item (line 638) | def item(self) -> scalar:
method tolist (line 646) | def tolist(self) -> list_or_scalar:
method astype (line 662) | def astype(self, dtype: Dtype, stream: Stream | Device | None = ...) -...
method __array_namespace__ (line 674) | def __array_namespace__(self, api_version: str | None = ...) -> types....
method __getitem__ (line 689) | def __getitem__(self, arg: object | None) -> array: ...
method __setitem__ (line 690) | def __setitem__(
method at (line 702) | def at(self) -> ArrayAt:
method __len__ (line 743) | def __len__(self) -> int: ...
method __iter__ (line 744) | def __iter__(self) -> ArrayIterator: ...
method __getstate__ (line 745) | def __getstate__(self) -> tuple: ...
method __setstate__ (line 746) | def __setstate__(self, arg: tuple, /) -> None: ...
method __dlpack__ (line 747) | def __dlpack__(self) -> _ArrayLike: ...
method __dlpack_device__ (line 748) | def __dlpack_device__(self) -> tuple: ...
method __copy__ (line 749) | def __copy__(self) -> array: ...
method __deepcopy__ (line 750) | def __deepcopy__(self, memo: dict) -> array: ...
method __add__ (line 751) | def __add__(
method __iadd__ (line 761) | def __iadd__(
method __radd__ (line 771) | def __radd__(
method __sub__ (line 781) | def __sub__(
method __isub__ (line 791) | def __isub__(
method __rsub__ (line 801) | def __rsub__(
method __mul__ (line 811) | def __mul__(
method __imul__ (line 821) | def __imul__(
method __rmul__ (line 831) | def __rmul__(
method __truediv__ (line 841) | def __truediv__(
method __itruediv__ (line 851) | def __itruediv__(
method __rtruediv__ (line 861) | def __rtruediv__(
method __div__ (line 871) | def __div__(
method __rdiv__ (line 881) | def __rdiv__(
method __floordiv__ (line 891) | def __floordiv__(
method __ifloordiv__ (line 901) | def __ifloordiv__(
method __rfloordiv__ (line 911) | def __rfloordiv__(
method __mod__ (line 921) | def __mod__(
method __imod__ (line 931) | def __imod__(
method __rmod__ (line 941) | def __rmod__(
method __eq__ (line 951) | def __eq__(
method __lt__ (line 961) | def __lt__(
method __le__ (line 971) | def __le__(
method __gt__ (line 981) | def __gt__(
method __ge__ (line 991) | def __ge__(
method __ne__ (line 1001) | def __ne__(
method __neg__ (line 1011) | def __neg__(self) -> array: ...
method __bool__ (line 1012) | def __bool__(self) -> bool: ...
method __repr__ (line 1013) | def __repr__(self) -> str: ...
method __matmul__ (line 1014) | def __matmul__(self, other: array) -> array: ...
method __imatmul__ (line 1015) | def __imatmul__(self, other: array) -> array: ...
method __pow__ (line 1016) | def __pow__(
method __rpow__ (line 1026) | def __rpow__(
method __ipow__ (line 1036) | def __ipow__(
method __invert__ (line 1046) | def __invert__(self) -> array: ...
method __and__ (line 1047) | def __and__(
method __iand__ (line 1057) | def __iand__(
method __or__ (line 1067) | def __or__(
method __ior__ (line 1077) | def __ior__(
method __lshift__ (line 1087) | def __lshift__(
method __ilshift__ (line 1097) | def __ilshift__(
method __rshift__ (line 1107) | def __rshift__(
method __irshift__ (line 1117) | def __irshift__(
method __xor__ (line 1127) | def __xor__(
method __ixor__ (line 1137) | def __ixor__(
method __int__ (line 1147) | def __int__(self) -> int: ...
method __float__ (line 1148) | def __float__(self) -> float: ...
method flatten (line 1149) | def flatten(
method reshape (line 1158) | def reshape(self, *shape: int, stream: Stream | Device | None = ...) -...
method squeeze (line 1166) | def squeeze(
method abs (line 1174) | def abs(self, *, stream: Stream | Device | None = ...) -> array:
method __abs__ (line 1177) | def __abs__(self) -> array:
method square (line 1180) | def square(self, *, stream: Stream | Device | None = ...) -> array:
method sqrt (line 1183) | def sqrt(self, *, stream: Stream | Device | None = ...) -> array:
method rsqrt (line 1186) | def rsqrt(self, *, stream: Stream | Device | None = ...) -> array:
method reciprocal (line 1189) | def reciprocal(self, *, stream: Stream | Device | None = ...) -> array:
method exp (line 1192) | def exp(self, *, stream: Stream | Device | None = ...) -> array:
method log (line 1195) | def log(self, *, stream: Stream | Device | None = ...) -> array:
method log2 (line 1198) | def log2(self, *, stream: Stream | Device | None = ...) -> array:
method log10 (line 1201) | def log10(self, *, stream: Stream | Device | None = ...) -> array:
method sin (line 1204) | def sin(self, *, stream: Stream | Device | None = ...) -> array:
method cos (line 1207) | def cos(self, *, stream: Stream | Device | None = ...) -> array:
method log1p (line 1210) | def log1p(self, *, stream: Stream | Device | None = ...) -> array:
method all (line 1213) | def all(
method any (line 1222) | def any(
method moveaxis (line 1231) | def moveaxis(
method swapaxes (line 1236) | def swapaxes(
method transpose (line 1241) | def transpose(self, *axes: int, stream: Stream | Device | None = ...) ...
method T (line 1250) | def T(self) -> array:
method sum (line 1253) | def sum(
method prod (line 1262) | def prod(
method min (line 1271) | def min(
method max (line 1280) | def max(
method logcumsumexp (line 1289) | def logcumsumexp(
method logsumexp (line 1299) | def logsumexp(
method mean (line 1308) | def mean(
method std (line 1317) | def std(
method var (line 1327) | def var(
method split (line 1337) | def split(
method argmin (line 1346) | def argmin(
method argmax (line 1355) | def argmax(
method cumsum (line 1364) | def cumsum(
method cumprod (line 1374) | def cumprod(
method cummax (line 1384) | def cummax(
method cummin (line 1394) | def cummin(
method round (line 1404) | def round(
method diagonal (line 1409) | def diagonal(
method diag (line 1418) | def diag(self, k: int = ..., *, stream: Stream | Device | None = ...) ...
method conj (line 1421) | def conj(self, *, stream: Stream | Device | None = ...) -> array:
method view (line 1424) | def view(self, dtype: Dtype, *, stream: Stream | Device | None = ...) ...
function array_equal (line 1427) | def array_equal(
function as_strided (line 1450) | def as_strided(
function async_eval (line 1486) | def async_eval(*args: MX_ARRAY_TREE) -> None:
function atleast_1d (line 1513) | def atleast_1d(
function atleast_2d (line 1527) | def atleast_2d(
function atleast_3d (line 1541) | def atleast_3d(
function bitwise_and (line 1557) | def bitwise_and(
function bitwise_invert (line 1576) | def bitwise_invert(a: scalar | array, stream: Stream | Device | None = ....
function bitwise_or (line 1589) | def bitwise_or(
function bitwise_xor (line 1608) | def bitwise_xor(
function block_masked_mm (line 1628) | def block_masked_mm(
function broadcast_arrays (line 1667) | def broadcast_arrays(
function broadcast_shapes (line 1682) | def broadcast_shapes(*shapes: Sequence[int]) -> tuple[int]:
function broadcast_to (line 1707) | def broadcast_to(
function ceil (line 1727) | def ceil(a: array, /, *, stream: Stream | Device | None = ...) -> array:
function checkpoint (line 1738) | def checkpoint(fun: Callable) -> Callable: ...
function clear_cache (line 1739) | def clear_cache() -> None:
function clip (line 1746) | def clip(
function compile (line 1770) | def compile(
function concat (line 1808) | def concat(
function concatenate (line 1816) | def concatenate(
function conj (line 1834) | def conj(a: array, *, stream: Stream | Device | None = ...) -> array:
function conjugate (line 1846) | def conjugate(a: array, *, stream: Stream | Device | None = ...) -> array:
function contiguous (line 1858) | def contiguous(
function conv1d (line 1876) | def conv1d(
function conv2d (line 1902) | def conv2d(
function conv3d (line 1934) | def conv3d(
function conv_general (line 1968) | def conv_general(
function conv_transpose1d (line 2009) | def conv_transpose1d(
function conv_transpose2d (line 2037) | def conv_transpose2d(
function conv_transpose3d (line 2075) | def conv_transpose3d(
function convolve (line 2113) | def convolve(
function cos (line 2131) | def cos(a: array, /, *, stream: Stream | Device | None = ...) -> array:
function cosh (line 2142) | def cosh(a: array, /, *, stream: Stream | Device | None = ...) -> array:
function cummax (line 2155) | def cummax(
function cummin (line 2180) | def cummin(
function cumprod (line 2205) | def cumprod(
function cumsum (line 2230) | def cumsum(
class custom_function (line 2255) | class custom_function:
method __init__ (line 2324) | def __init__(self, f: Callable) -> None: ...
method __call__ (line 2325) | def __call__(self, *args, **kwargs) -> object: ...
method vjp (line 2326) | def vjp(self, f: Callable):
method jvp (line 2345) | def jvp(self, f: Callable):
method vmap (line 2362) | def vmap(self, f: Callable):
function default_device (line 2382) | def default_device() -> Device:
function default_stream (line 2385) | def default_stream(device: Device | DeviceType) -> Stream:
function degrees (line 2388) | def degrees(a: array, /, *, stream: Stream | Device | None = ...) -> array:
function depends (line 2399) | def depends(inputs: array | Sequence[array], dependencies: array | Seque...
function dequantize (line 2413) | def dequantize(
function diag (line 2453) | def diag(a: array, /, k: int = ..., *, stream: Stream | Device | None = ...
function diagonal (line 2469) | def diagonal(
function disable_compile (line 2500) | def disable_compile() -> None:
function divide (line 2506) | def divide(
function divmod (line 2525) | def divmod(
function einsum (line 2547) | def einsum(subscripts: str, *operands, stream: Stream | Device | None = ...
function einsum_path (line 2559) | def einsum_path(subscripts: str, *operands):
function enable_compile (line 2573) | def enable_compile() -> None:
function equal (line 2579) | def equal(
function erf (line 2598) | def erf(a: array, /, *, stream: Stream | Device | None = ...) -> array:
function erfinv (line 2612) | def erfinv(a: array, /, *, stream: Stream | Device | None = ...) -> array:
function eval (line 2633) | def eval(*args: MX_ARRAY_TREE | None) -> None:
function exp (line 2644) | def exp(a: array, /, *, stream: Stream | Device | None = ...) -> array:
function expand_dims (line 2655) | def expand_dims(
function expm1 (line 2673) | def expm1(a: array, /, *, stream: Stream | Device | None = ...) -> array:
function export_function (line 2686) | def export_function(
function export_to_dot (line 2725) | def export_to_dot(file: object, *args, **kwargs) -> None:
function exporter (line 2747) | def exporter(file: str, fun: Callable, *, shapeless: bool = ...) -> Func...
function eye (line 2775) | def eye(
class finfo (line 2797) | class finfo:
method __init__ (line 2799) | def __init__(self, arg: Dtype, /) -> None: ...
method min (line 2801) | def min(self) -> float:
method max (line 2805) | def max(self) -> float:
method eps (line 2809) | def eps(self) -> float:
method dtype (line 2816) | def dtype(self) -> Dtype:
method __repr__ (line 2819) | def __repr__(self) -> str: ...
function flatten (line 2821) | def flatten(
function floor (line 2861) | def floor(a: array, /, *, stream: Stream | Device | None = ...) -> array:
function floor_divide (line 2872) | def floor_divide(
function full (line 2891) | def full(
function gather_mm (line 2914) | def gather_mm(
function gather_qmm (line 2957) | def gather_qmm(
function get_active_memory (line 3010) | def get_active_memory() -> int:
function get_cache_memory (line 3018) | def get_cache_memory() -> int:
function get_peak_memory (line 3026) | def get_peak_memory() -> int:
function grad (line 3036) | def grad(
function greater (line 3062) | def greater(
function greater_equal (line 3081) | def greater_equal(
function hadamard_transform (line 3100) | def hadamard_transform(
function identity (line 3126) | def identity(
class iinfo (line 3141) | class iinfo:
method __init__ (line 3143) | def __init__(self, arg: Dtype, /) -> None: ...
method min (line 3145) | def min(self) -> int:
method max (line 3149) | def max(self) -> int:
method dtype (line 3153) | def dtype(self) -> Dtype:
method __repr__ (line 3156) | def __repr__(self) -> str: ...
function imag (line 3158) | def imag(a: array, /, *, stream: Stream | Device | None = ...) -> array:
function import_function (line 3169) | def import_function(file: str) -> Callable:
function inner (line 3200) | def inner(a: array, b: array, /, *, stream: Stream | Device | None = ......
function is_available (line 3218) | def is_available(device: Device) -> bool:
function isclose (line 3221) | def isclose(
function isfinite (line 3258) | def isfinite(a: array, stream: Stream | Device | None = ...) -> array:
function isinf (line 3271) | def isinf(a: array, stream: Stream | Device | None = ...) -> array:
function isnan (line 3282) | def isnan(a: array, stream: Stream | Device | None = ...) -> array:
function isneginf (line 3293) | def isneginf(a: array, stream: Stream | Device | None = ...) -> array:
function isposinf (line 3305) | def isposinf(a: array, stream: Stream | Device | None = ...) -> array:
function issubdtype (line 3317) | def issubdtype(arg1: Dtype | DtypeCategory, arg2: Dtype | DtypeCategory)...
function jvp (line 3367) | def jvp(
function kron (line 3390) | def kron(a: array, b: array, *, stream: Stream | Device | None = ...) ->...
function left_shift (line 3414) | def left_shift(
function less (line 3434) | def less(
function less_equal (line 3453) | def less_equal(
function linspace (line 3472) | def linspace(
function load (line 3493) | def load(
function log (line 3528) | def log(a: array, /, *, stream: Stream | Device | None = ...) -> array:
function log10 (line 3539) | def log10(a: array, /, *, stream: Stream | Device | None = ...) -> array:
function log1p (line 3550) | def log1p(a: array, /, *, stream: Stream | Device | None = ...) -> array:
function log2 (line 3561) | def log2(a: array, /, *, stream: Stream | Device | None = ...) -> array:
function logaddexp (line 3572) | def logaddexp(
function logcumsumexp (line 3595) | def logcumsumexp(
function logical_and (line 3620) | def logical_and(
function logical_not (line 3634) | def logical_not(a: array, /, *, stream: Stream | Device | None = ...) ->...
function logical_or (line 3645) | def logical_or(a: array, b: array, /, *, stream: Stream | Device | None ...
function logsumexp (line 3657) | def logsumexp(
function matmul (line 3686) | def matmul(a: array, b: array, /, *, stream: Stream | Device | None = .....
function max (line 3711) | def max(
function maximum (line 3734) | def maximum(
function mean (line 3755) | def mean(
function meshgrid (line 3778) | def meshgrid(
function min (line 3799) | def min(
function minimum (line 3822) | def minimum(
function moveaxis (line 3843) | def moveaxis(
function multiply (line 3863) | def multiply(
function nan_to_num (line 3884) | def nan_to_num(
function negative (line 3909) | def negative(a: array, /, *, stream: Stream | Device | None = ...) -> ar...
function new_stream (line 3920) | def new_stream(device: Device) -> Stream:
function not_equal (line 3925) | def not_equal(
function ones (line 3946) | def ones(
function ones_like (line 3964) | def ones_like(a: array, /, *, stream: Stream | Device | None = ...) -> a...
function outer (line 3975) | def outer(a: array, b: array, /, *, stream: Stream | Device | None = ......
function pad (line 3987) | def pad(
function partition (line 4016) | def partition(
function permute_dims (line 4044) | def permute_dims(
function power (line 4055) | def power(
function prod (line 4076) | def prod(
function put_along_axis (line 4099) | def put_along_axis(
function quantize (line 4126) | def quantize(
function quantized_matmul (line 4198) | def quantized_matmul(
function radians (line 4236) | def radians(a: array, /, *, stream: Stream | Device | None = ...) -> array:
function real (line 4247) | def real(a: array, /, *, stream: Stream | Device | None = ...) -> array:
function reciprocal (line 4258) | def reciprocal(a: array, /, *, stream: Stream | Device | None = ...) -> ...
function remainder (line 4269) | def remainder(
function repeat (line 4289) | def repeat(
function reset_peak_memory (line 4311) | def reset_peak_memory() -> None:
function reshape (line 4314) | def reshape(
function right_shift (line 4330) | def right_shift(
function roll (line 4350) | def roll(
function round (line 4377) | def round(
function rsqrt (line 4399) | def rsqrt(a: array, /, *, stream: Stream | Device | None = ...) -> array:
function save (line 4410) | def save(file: str | pathlib.Path, arr: array) -> None:
function save_gguf (line 4419) | def save_gguf(
function save_safetensors (line 4440) | def save_safetensors(
function savez (line 4460) | def savez(file: str | pathlib.Path, *args, **kwargs):
function savez_compressed (line 4486) | def savez_compressed(file: str | pathlib.Path, *args, **kwargs):
function segmented_mm (line 4497) | def segmented_mm(
function set_cache_limit (line 4513) | def set_cache_limit(limit: int) -> int:
function set_default_device (line 4531) | def set_default_device(device: Device | DeviceType) -> None:
function set_default_stream (line 4534) | def set_default_stream(stream: Stream) -> None:
function set_memory_limit (line 4545) | def set_memory_limit(limit: int) -> int:
function set_wired_limit (line 4564) | def set_wired_limit(limit: int) -> int:
function sigmoid (line 4594) | def sigmoid(a: array, /, *, stream: Stream | Device | None = ...) -> array:
function sign (line 4610) | def sign(a: array, /, *, stream: Stream | Device | None = ...) -> array:
function sin (line 4623) | def sin(a: array, /, *, stream: Stream | Device | None = ...) -> array:
function sinh (line 4634) | def sinh(a: array, /, *, stream: Stream | Device | None = ...) -> array:
function slice (line 4645) | def slice(
function slice_update (line 4676) | def slice_update(
function softmax (line 4705) | def softmax(
function sort (line 4731) | def sort(
function split (line 4751) | def split(
function sqrt (line 4775) | def sqrt(a: array, /, *, stream: Stream | Device | None = ...) -> array:
function square (line 4786) | def square(a: array, /, *, stream: Stream | Device | None = ...) -> array:
function squeeze (line 4797) | def squeeze(
function stack (line 4816) | def stack(
function std (line 4835) | def std(
function stop_gradient (line 4861) | def stop_gradient(a: array, /, *, stream: Stream | Device | None = ...) ...
function stream (line 4877) | def stream(s: Stream | Device) -> StreamContext:
function subtract (line 4899) | def subtract(
function sum (line 4918) | def sum(
function swapaxes (line 4941) | def swapaxes(
function synchronize (line 4956) | def synchronize(stream: Stream | None = ...) -> None:
function take (line 4966) | def take(
function take_along_axis (line 4993) | def take_along_axis(
function tan (line 5016) | def tan(a: array, /, *, stream: Stream | Device | None = ...) -> array:
function tanh (line 5027) | def tanh(a: array, /, *, stream: Stream | Device | None = ...) -> array:
function tensordot (line 5038) | def tensordot(
function tile (line 5062) | def tile(
function topk (line 5080) | def topk(
function trace (line 5104) | def trace(
function transpose (line 5132) | def transpose(
function tri (line 5151) | def tri(
function tril (line 5173) | def tril(x: array, k: int, *, stream: Stream | Device | None = ...) -> a...
function triu (line 5186) | def triu(x: array, k: int, *, stream: Stream | Device | None = ...) -> a...
function unflatten (line 5204) | def unflatten(
function value_and_grad (line 5235) | def value_and_grad(
function var (line 5290) | def var(
function view (line 5316) | def view(
function vjp (line 5337) | def vjp(
function vmap (line 5360) | def vmap(fun: Callable, in_axes: object = ..., out_axes: object = ...) -...
function where (line 5381) | def where(
function zeros (line 5405) | def zeros(
function zeros_like (line 5423) | def zeros_like(a: array, /, *, stream: Stream | Device | None = ...) -> ...
FILE: .mlx_typings/mlx/core/cuda/__init__.pyi
function is_available (line 1) | def is_available() -> bool:
FILE: .mlx_typings/mlx/core/distributed/__init__.pyi
class Group (line 5) | class Group:
method rank (line 10) | def rank(self) -> int:
method size (line 13) | def size(self) -> int:
method split (line 16) | def split(self, color: int, key: int = ...) -> Group:
function all_gather (line 31) | def all_gather(
function all_max (line 52) | def all_max(
function all_min (line 72) | def all_min(
function all_sum (line 92) | def all_sum(
function init (line 112) | def init(strict: bool = ..., backend: str = ...) -> Group:
function is_available (line 138) | def is_available() -> bool:
function recv (line 141) | def recv(
function recv_like (line 167) | def recv_like(
function send (line 194) | def send(
FILE: .mlx_typings/mlx/core/metal/__init__.pyi
function clear_cache (line 1) | def clear_cache() -> None: ...
function device_info (line 2) | def device_info() -> dict[str, str | int]:
function get_active_memory (line 18) | def get_active_memory() -> int: ...
function get_cache_memory (line 19) | def get_cache_memory() -> int: ...
function get_peak_memory (line 20) | def get_peak_memory() -> int: ...
function is_available (line 21) | def is_available() -> bool:
function reset_peak_memory (line 24) | def reset_peak_memory() -> None: ...
function set_cache_limit (line 25) | def set_cache_limit(limit: int) -> int: ...
function set_memory_limit (line 26) | def set_memory_limit(limit: int) -> int: ...
function set_wired_limit (line 27) | def set_wired_limit(limit: int) -> int: ...
function start_capture (line 28) | def start_capture(path: str) -> None:
function stop_capture (line 37) | def stop_capture() -> None:
FILE: .mlx_typings/mlx/core/random/__init__.pyi
function bernoulli (line 6) | def bernoulli(
function categorical (line 30) | def categorical(
function gumbel (line 62) | def gumbel(
function key (line 86) | def key(seed: int) -> array:
function laplace (line 97) | def laplace(
function multivariate_normal (line 120) | def multivariate_normal(
function normal (line 151) | def normal(
function permutation (line 180) | def permutation(
function randint (line 201) | def randint(
function seed (line 227) | def seed(seed: int) -> None:
function split (line 235) | def split(key: array, num: int = ..., stream: Stream | Device | None = ....
function truncated_normal (line 247) | def truncated_normal(
function uniform (line 275) | def uniform(
FILE: .mlx_typings/mlx/nn/init.pyi
function constant (line 9) | def constant(value: float, dtype: mx.Dtype = ...) -> Callable[[mx.array]...
function normal (line 29) | def normal(
function uniform (line 54) | def uniform(
function identity (line 79) | def identity(dtype: mx.Dtype = ...) -> Callable[[mx.array], mx.array]:
function glorot_normal (line 98) | def glorot_normal(dtype: mx.Dtype = ...) -> Callable[[mx.array, float], ...
function glorot_uniform (line 131) | def glorot_uniform(dtype: mx.Dtype = ...) -> Callable[[mx.array, float],...
function he_normal (line 164) | def he_normal(
function he_uniform (line 203) | def he_uniform(
function sparse (line 244) | def sparse(
function orthogonal (line 271) | def orthogonal(
FILE: .mlx_typings/mlx/nn/layers/activations.pyi
function sigmoid (line 12) | def sigmoid(x: mx.array) -> mx.array:
function relu (line 20) | def relu(x: mx.array) -> mx.array:
function relu2 (line 27) | def relu2(x: mx.array) -> mx.array:
function relu6 (line 34) | def relu6(x: mx.array) -> mx.array:
function leaky_relu (line 41) | def leaky_relu(x: mx.array, negative_slope=...) -> mx.array:
function log_softmax (line 48) | def log_softmax(x: mx.array, axis=...):
function elu (line 55) | def elu(x: mx.array, alpha=...) -> mx.array:
function softmax (line 62) | def softmax(x: mx.array, axis=...) -> mx.array:
function softplus (line 69) | def softplus(x: mx.array) -> mx.array:
function softsign (line 76) | def softsign(x: mx.array) -> mx.array:
function softshrink (line 83) | def softshrink(x: mx.array, lambd: float = ...) -> mx.array:
function celu (line 95) | def celu(x: mx.array, alpha=...) -> mx.array:
function silu (line 103) | def silu(x: mx.array) -> mx.array:
function log_sigmoid (line 111) | def log_sigmoid(x: mx.array) -> mx.array:
function gelu (line 118) | def gelu(x: mx.array) -> mx.array:
function gelu_approx (line 131) | def gelu_approx(x: mx.array) -> mx.array:
function gelu_fast_approx (line 146) | def gelu_fast_approx(x: mx.array) -> mx.array:
function glu (line 165) | def glu(x: mx.array, axis: int = ...) -> mx.array:
function step (line 179) | def step(x: mx.array, threshold: float = ...) -> mx.array:
function selu (line 196) | def selu(x: mx.array) -> mx.array:
function prelu (line 211) | def prelu(x: mx.array, alpha: mx.array) -> mx.array:
function mish (line 221) | def mish(x: mx.array) -> mx.array:
function hardswish (line 234) | def hardswish(x: mx.array) -> mx.array:
function hard_tanh (line 242) | def hard_tanh(x: mx.array, min_val=..., max_val=...) -> mx.array:
function hard_shrink (line 249) | def hard_shrink(x: mx.array, lambd=...) -> mx.array:
function softmin (line 261) | def softmin(x: mx.array, axis=...) -> mx.array:
function tanh (line 267) | def tanh(x: mx.array) -> mx.array:
class GLU (line 273) | class GLU(Module):
method __init__ (line 285) | def __init__(self, axis: int = ...) -> None: ...
method __call__ (line 286) | def __call__(self, x) -> Any: ...
class Sigmoid (line 289) | class Sigmoid(Module):
class Mish (line 297) | class Mish(Module):
class ReLU (line 308) | class ReLU(Module):
class ReLU2 (line 316) | class ReLU2(Module):
class ReLU6 (line 323) | class ReLU6(Module):
class LeakyReLU (line 329) | class LeakyReLU(Module):
method __init__ (line 337) | def __init__(self, negative_slope=...) -> None: ...
method __call__ (line 338) | def __call__(self, x): ...
class ELU (line 340) | class ELU(Module):
method __init__ (line 349) | def __init__(self, alpha=...) -> None: ...
method __call__ (line 350) | def __call__(self, x): ...
class Softmax (line 353) | class Softmax(Module):
class Softplus (line 360) | class Softplus(Module):
class Softsign (line 367) | class Softsign(Module):
class Softshrink (line 373) | class Softshrink(Module):
method __init__ (line 381) | def __init__(self, lambd=...) -> None: ...
method __call__ (line 382) | def __call__(self, x): ...
class CELU (line 384) | class CELU(Module):
method __init__ (line 394) | def __init__(self, alpha=...) -> None: ...
method __call__ (line 395) | def __call__(self, x): ...
class SiLU (line 398) | class SiLU(Module):
class LogSoftmax (line 405) | class LogSoftmax(Module):
class LogSigmoid (line 412) | class LogSigmoid(Module):
class PReLU (line 418) | class PReLU(Module):
method __init__ (line 429) | def __init__(self, num_parameters=..., init=...) -> None: ...
method __call__ (line 430) | def __call__(self, x: mx.array): ...
class GELU (line 432) | class GELU(Module):
method __init__ (line 459) | def __init__(self, approx=...) -> None: ...
method __call__ (line 460) | def __call__(self, x): ...
class Tanh (line 463) | class Tanh(Module):
class Hardswish (line 470) | class Hardswish(Module):
class Step (line 476) | class Step(Module):
method __init__ (line 491) | def __init__(self, threshold: float = ...) -> None: ...
method __call__ (line 492) | def __call__(self, x: mx.array): ...
class SELU (line 495) | class SELU(Module):
class HardTanh (line 502) | class HardTanh(Module):
class HardShrink (line 509) | class HardShrink(Module):
class Softmin (line 519) | class Softmin(Module):
FILE: .mlx_typings/mlx/nn/layers/base.pyi
class Module (line 9) | class Module(dict):
method __init__ (line 57) | def __init__(self) -> None:
method training (line 61) | def training(self): # -> bool:
method state (line 65) | def state(self): # -> Self:
method __repr__ (line 76) | def __repr__(self): # -> str:
method __getattr__ (line 78) | def __getattr__(self, key: str): # -> None:
method __setattr__ (line 80) | def __setattr__(self, key: str, val: Any): # -> None:
method __delattr__ (line 82) | def __delattr__(self, name): # -> None:
method load_weights (line 84) | def load_weights(
method save_weights (line 137) | def save_weights(self, file: str): # -> None:
method is_module (line 145) | def is_module(value): # -> bool:
method valid_child_filter (line 148) | def valid_child_filter(module, key, value): # -> bool:
method valid_parameter_filter (line 151) | def valid_parameter_filter(module, key, value): # -> bool:
method trainable_parameter_filter (line 154) | def trainable_parameter_filter(module, key, value): # -> bool:
method filter_and_map (line 156) | def filter_and_map(
method parameters (line 181) | def parameters(
method trainable_parameters (line 187) | def trainable_parameters(
method children (line 193) | def children(
method leaf_modules (line 198) | def leaf_modules(
method update (line 203) | def update(self, parameters: dict[str, Any], strict: bool = ...) -> Mo...
method apply (line 224) | def apply(
method update_modules (line 244) | def update_modules(self, modules: dict, strict: bool = ...) -> Module:
method apply_to_modules (line 265) | def apply_to_modules(self, apply_fn: Callable[[str, Module], Any]) -> ...
method modules (line 276) | def modules(self): # -> list[Any]:
method named_modules (line 283) | def named_modules(self): # -> list[Any]:
method freeze (line 291) | def freeze(
method unfreeze (line 326) | def unfreeze(
method train (line 362) | def train(self, mode: bool = ...) -> Module:
method eval (line 376) | def eval(self) -> Module:
method set_dtype (line 382) | def set_dtype(
FILE: .mlx_typings/mlx/nn/layers/containers.pyi
class Sequential (line 10) | class Sequential(Module):
method __init__ (line 20) | def __init__(self, *modules: Module | Callable[[mx.array], mx.array]) ...
method __call__ (line 21) | def __call__(self, x: mx.array) -> mx.array: ...
FILE: .mlx_typings/mlx/nn/layers/convolution.pyi
class Conv1d (line 10) | class Conv1d(Module):
method __init__ (line 37) | def __init__(
method __call__ (line 48) | def __call__(self, x: mx.array) -> mx.array: ...
class Conv2d (line 50) | class Conv2d(Module):
method __init__ (line 74) | def __init__(
method __call__ (line 85) | def __call__(self, x) -> mx.array: ...
class Conv3d (line 87) | class Conv3d(Module):
method __init__ (line 110) | def __init__(
method __call__ (line 120) | def __call__(self, x: mx.array) -> mx.array: ...
FILE: .mlx_typings/mlx/nn/layers/convolution_transpose.pyi
class ConvTranspose1d (line 10) | class ConvTranspose1d(Module):
method __init__ (line 33) | def __init__(
method __call__ (line 44) | def __call__(self, x: mx.array) -> mx.array: ...
class ConvTranspose2d (line 46) | class ConvTranspose2d(Module):
method __init__ (line 70) | def __init__(
method __call__ (line 81) | def __call__(self, x: mx.array) -> mx.array: ...
class ConvTranspose3d (line 83) | class ConvTranspose3d(Module):
method __init__ (line 108) | def __init__(
method __call__ (line 119) | def __call__(self, x: mx.array) -> mx.array: ...
FILE: .mlx_typings/mlx/nn/layers/distributed.pyi
function sum_gradients (line 13) | def sum_gradients(
function shard_inplace (line 17) | def shard_inplace(
function shard_linear (line 48) | def shard_linear(
class AllToShardedLinear (line 72) | class AllToShardedLinear(Module):
method __init__ (line 87) | def __init__(
method __call__ (line 94) | def __call__(self, x: mx.array) -> mx.array: ...
method from_linear (line 96) | def from_linear(
class ShardedToAllLinear (line 104) | class ShardedToAllLinear(Module):
method __init__ (line 122) | def __init__(
method __call__ (line 129) | def __call__(self, x: mx.array) -> mx.array: ...
method from_linear (line 131) | def from_linear(
class QuantizedAllToShardedLinear (line 139) | class QuantizedAllToShardedLinear(Module):
method __init__ (line 160) | def __init__(
method unfreeze (line 169) | def unfreeze(self, *args, **kwargs) -> None:
method __call__ (line 173) | def __call__(self, x: mx.array) -> mx.array: ...
method from_quantized_linear (line 175) | def from_quantized_linear(
class QuantizedShardedToAllLinear (line 183) | class QuantizedShardedToAllLinear(Module):
method __init__ (line 206) | def __init__(
method unfreeze (line 215) | def unfreeze(self, *args, **kwargs): # -> None:
method __call__ (line 219) | def __call__(self, x: mx.array) -> mx.array: ...
method from_quantized_linear (line 221) | def from_quantized_linear(
FILE: .mlx_typings/mlx/nn/layers/dropout.pyi
class Dropout (line 8) | class Dropout(Module):
method __init__ (line 18) | def __init__(self, p: float = ...) -> None: ...
method __call__ (line 19) | def __call__(self, x: mx.array) -> mx.array: ...
class Dropout2d (line 21) | class Dropout2d(Module):
method __init__ (line 43) | def __init__(self, p: float = ...) -> None: ...
method __call__ (line 44) | def __call__(self, x: mx.array) -> mx.array: ...
class Dropout3d (line 46) | class Dropout3d(Module):
method __init__ (line 64) | def __init__(self, p: float = ...) -> None: ...
method __call__ (line 65) | def __call__(self, x: mx.array) -> mx.array: ...
FILE: .mlx_typings/mlx/nn/layers/embedding.pyi
class Embedding (line 10) | class Embedding(Module):
method __init__ (line 21) | def __init__(self, num_embeddings: int, dims: int) -> None: ...
method __call__ (line 22) | def __call__(self, x: mx.array) -> mx.array: ...
method as_linear (line 23) | def as_linear(self, x: mx.array) -> mx.array:
method to_quantized (line 31) | def to_quantized(
FILE: .mlx_typings/mlx/nn/layers/linear.pyi
class Identity (line 12) | class Identity(Module):
method __init__ (line 19) | def __init__(self, *args: Any, **kwargs: Any) -> None: ...
method __call__ (line 20) | def __call__(self, x: mx.array) -> mx.array: ...
class Linear (line 22) | class Linear(Module):
method __init__ (line 47) | def __init__(self, input_dims: int, output_dims: int, bias: bool = ......
method __call__ (line 48) | def __call__(self, x: mx.array) -> mx.array: ...
method to_quantized (line 49) | def to_quantized(
class Bilinear (line 54) | class Bilinear(Module):
method __init__ (line 77) | def __init__(
method __call__ (line 80) | def __call__(self, x1: mx.array, x2: mx.array) -> mx.array: ...
FILE: .mlx_typings/mlx/nn/layers/normalization.pyi
class InstanceNorm (line 8) | class InstanceNorm(Module):
method __init__ (line 40) | def __init__(self, dims: int, eps: float = ..., affine: bool = ...) ->...
method __call__ (line 41) | def __call__(self, x: mx.array) -> mx.array: ...
class LayerNorm (line 43) | class LayerNorm(Module):
method __init__ (line 66) | def __init__(
method __call__ (line 69) | def __call__(self, x) -> mx.array: ...
class RMSNorm (line 71) | class RMSNorm(Module):
method __init__ (line 94) | def __init__(self, dims: int, eps: float = ...) -> None: ...
method __call__ (line 95) | def __call__(self, x) -> mx.array: ...
class GroupNorm (line 97) | class GroupNorm(Module):
method __init__ (line 126) | def __init__(
method __call__ (line 134) | def __call__(self, x) -> mx.array: ...
class BatchNorm (line 136) | class BatchNorm(Module):
method __init__ (line 176) | def __init__(
method unfreeze (line 184) | def unfreeze(self, *args, **kwargs): # -> None:
method __call__ (line 188) | def __call__(self, x: mx.array) -> mx.array:
FILE: .mlx_typings/mlx/nn/layers/pooling.pyi
class _Pool (line 10) | class _Pool(Module):
method __init__ (line 11) | def __init__(
method __call__ (line 14) | def __call__(self, x: mx.array) -> mx.array: ...
class _Pool1d (line 16) | class _Pool1d(_Pool):
method __init__ (line 17) | def __init__(
class _Pool2d (line 26) | class _Pool2d(_Pool):
method __init__ (line 27) | def __init__(
class _Pool3d (line 36) | class _Pool3d(_Pool):
method __init__ (line 37) | def __init__(
class MaxPool1d (line 46) | class MaxPool1d(_Pool1d):
method __init__ (line 67) | def __init__(
class AvgPool1d (line 74) | class AvgPool1d(_Pool1d):
method __init__ (line 95) | def __init__(
class MaxPool2d (line 102) | class MaxPool2d(_Pool2d):
method __init__ (line 130) | def __init__(
class AvgPool2d (line 137) | class AvgPool2d(_Pool2d):
method __init__ (line 165) | def __init__(
class MaxPool3d (line 172) | class MaxPool3d(_Pool3d):
method __init__ (line 201) | def __init__(
class AvgPool3d (line 208) | class AvgPool3d(_Pool3d):
method __init__ (line 237) | def __init__(
FILE: .mlx_typings/mlx/nn/layers/positional_encoding.pyi
class RoPE (line 10) | class RoPE(Module):
method __init__ (line 29) | def __init__(
method __call__ (line 32) | def __call__(self, x, offset: int = ...) -> mx.array: ...
class SinusoidalPositionalEncoding (line 34) | class SinusoidalPositionalEncoding(Module):
method __init__ (line 53) | def __init__(
method __call__ (line 62) | def __call__(self, x: mx.array) -> mx.array: ...
class ALiBi (line 64) | class ALiBi(Module):
method create_alibi_matrix (line 68) | def create_alibi_matrix(
method create_alibi_slope (line 77) | def create_alibi_slope(num_heads: int) -> mx.array: ...
method __call__ (line 78) | def __call__(
FILE: .mlx_typings/mlx/nn/layers/quantized.pyi
function quantize (line 10) | def quantize(
class QuantizedEmbedding (line 40) | class QuantizedEmbedding(Module):
method __init__ (line 58) | def __init__(
method __call__ (line 66) | def __call__(self, x: mx.array) -> mx.array: ...
method as_linear (line 67) | def as_linear(self, x: mx.array) -> mx.array:
method from_embedding (line 76) | def from_embedding(
class QuantizedLinear (line 85) | class QuantizedLinear(Module):
method __init__ (line 107) | def __init__(
method __call__ (line 116) | def __call__(self, x: mx.array) -> mx.array: ...
method from_linear (line 118) | def from_linear(
FILE: .mlx_typings/mlx/nn/layers/recurrent.pyi
class RNN (line 10) | class RNN(Module):
method __init__ (line 37) | def __init__(
method __call__ (line 44) | def __call__(self, x: mx.array, hidden=...) -> mx.array: ...
class GRU (line 46) | class GRU(Module):
method __init__ (line 75) | def __init__(self, input_size: int, hidden_size: int, bias: bool = ......
method __call__ (line 76) | def __call__(self, x: mx.array, hidden=...) -> mx.array: ...
class LSTM (line 78) | class LSTM(Module):
method __init__ (line 110) | def __init__(self, input_size: int, hidden_size: int, bias: bool = ......
method __call__ (line 111) | def __call__(
FILE: .mlx_typings/mlx/nn/layers/transformer.pyi
class MultiHeadAttention (line 10) | class MultiHeadAttention(Module):
method __init__ (line 42) | def __init__(
method __call__ (line 53) | def __call__(
method create_additive_causal_mask (line 57) | def create_additive_causal_mask(N: int, dtype: mx.Dtype = ...) -> mx.a...
class TransformerEncoderLayer (line 59) | class TransformerEncoderLayer(Module):
method __init__ (line 60) | def __init__(
method __call__ (line 69) | def __call__(self, x: mx.array, mask: mx.array) -> mx.array: ...
class TransformerEncoder (line 71) | class TransformerEncoder(Module):
method __init__ (line 72) | def __init__(
method __call__ (line 83) | def __call__(self, x: mx.array, mask: mx.array) -> mx.array: ...
class TransformerDecoderLayer (line 85) | class TransformerDecoderLayer(Module):
method __init__ (line 86) | def __init__(
method __call__ (line 95) | def __call__(self, x: mx.array, memory, x_mask, memory_mask) -> mx.arr...
class TransformerDecoder (line 97) | class TransformerDecoder(Module):
method __init__ (line 98) | def __init__(
method __call__ (line 109) | def __call__(self, x: mx.array, memory, x_mask, memory_mask) -> mx.arr...
class Transformer (line 111) | class Transformer(Module):
method __init__ (line 151) | def __init__(
method __call__ (line 165) | def __call__(
FILE: .mlx_typings/mlx/nn/layers/upsample.pyi
function upsample_nearest (line 10) | def upsample_nearest(x: mx.array, scale_factor: Tuple) -> mx.array: ...
function upsample_linear (line 11) | def upsample_linear(
function upsample_cubic (line 15) | def upsample_cubic(
class Upsample (line 20) | class Upsample(Module):
method __init__ (line 81) | def __init__(
method __call__ (line 87) | def __call__(self, x: mx.array) -> mx.array: ...
FILE: .mlx_typings/mlx/nn/losses.pyi
function cross_entropy (line 11) | def cross_entropy(
function binary_cross_entropy (line 56) | def binary_cross_entropy(
function l1_loss (line 99) | def l1_loss(
function mse_loss (line 115) | def mse_loss(
function nll_loss (line 131) | def nll_loss(
function gaussian_nll_loss (line 148) | def gaussian_nll_loss(
function kl_div_loss (line 184) | def kl_div_loss(
function smooth_l1_loss (line 207) | def smooth_l1_loss(
function triplet_loss (line 241) | def triplet_loss(
function hinge_loss (line 275) | def hinge_loss(
function huber_loss (line 296) | def huber_loss(
function log_cosh_loss (line 322) | def log_cosh_loss(
function cosine_similarity_loss (line 349) | def cosine_similarity_loss(
function margin_ranking_loss (line 378) | def margin_ranking_loss(
FILE: .mlx_typings/mlx/nn/utils.pyi
function value_and_grad (line 11) | def value_and_grad(
function checkpoint (line 28) | def checkpoint(
function average_gradients (line 46) | def average_gradients(
FILE: .mlx_typings/mlx/utils.pyi
function tree_map (line 9) | def tree_map(
function tree_map_with_path (line 49) | def tree_map_with_path(
function tree_flatten (line 83) | def tree_flatten(
function tree_unflatten (line 124) | def tree_unflatten(tree: list[tuple[str, Any]] | dict[str, Any]) -> Any:
function tree_reduce (line 147) | def tree_reduce(
function tree_merge (line 178) | def tree_merge(
FILE: .mlx_typings/mlx_lm/convert.pyi
function mixed_quant_predicate_builder (line 10) | def mixed_quant_predicate_builder(
function convert (line 17) | def convert(
function configure_parser (line 34) | def configure_parser() -> argparse.ArgumentParser:
function main (line 42) | def main(): # -> None:
FILE: .mlx_typings/mlx_lm/generate.pyi
function str2bool (line 28) | def str2bool(string): # -> bool:
function setup_arg_parser (line 30) | def setup_arg_parser(): # -> ArgumentParser:
function wired_limit (line 36) | def wired_limit(
class GenerationResponse (line 47) | class GenerationResponse:
function maybe_quantize_kv_cache (line 75) | def maybe_quantize_kv_cache(
function generate_step (line 81) | def generate_step(
function speculative_generate_step (line 129) | def speculative_generate_step(
function stream_generate (line 174) | def stream_generate(
function generate (line 203) | def generate(
class BatchStats (line 223) | class BatchStats:
class BatchResponse (line 246) | class BatchResponse:
function _left_pad_prompts (line 259) | def _left_pad_prompts(prompts: Any, max_length: Optional[int] = ...) -> ...
function _right_pad_prompts (line 260) | def _right_pad_prompts(prompts: Any, max_length: Optional[int] = ...) ->...
function _make_cache (line 261) | def _make_cache(
function _merge_caches (line 264) | def _merge_caches(caches: Any) -> List[Any]: ...
class Batch (line 266) | class Batch:
method __len__ (line 276) | def __len__(self) -> int: ...
method filter (line 277) | def filter(self, keep_idx: List[int]) -> None: ...
method extend (line 278) | def extend(self, other: "Batch") -> None: ...
method extract_cache (line 279) | def extract_cache(self, idx: int) -> List[Any]: ...
class BatchGenerator (line 281) | class BatchGenerator:
class Response (line 291) | class Response:
method __init__ (line 298) | def __init__(
method close (line 315) | def close(self) -> None: ...
method insert (line 316) | def insert(
method remove (line 324) | def remove(
method stats (line 327) | def stats(self) -> BatchStats: ...
method next (line 328) | def next(self) -> List[Response]: ...
method _process_prompts (line 329) | def _process_prompts(self, prompts: List[Any]) -> Batch: ...
method _step (line 330) | def _step(
function batch_generate (line 339) | def batch_generate(
function main (line 362) | def main(): # -> None:
FILE: .mlx_typings/mlx_lm/models/base.pyi
class BaseModelArgs (line 11) | class BaseModelArgs:
method from_dict (line 13) | def from_dict(cls, params): # -> Self:
function create_causal_mask (line 16) | def create_causal_mask(
function create_attention_mask (line 24) | def create_attention_mask(
function create_ssm_mask (line 28) | def create_ssm_mask(h, cache=...): # -> None:
function quantized_scaled_dot_product_attention (line 30) | def quantized_scaled_dot_product_attention(
function scaled_dot_product_attention (line 39) | def scaled_dot_product_attention(
FILE: .mlx_typings/mlx_lm/models/bitlinear_layers.pyi
function bitnet_quantize (line 7) | def bitnet_quantize(model, quantization_config: dict): ...
function make_bitlinear_kernel (line 8) | def make_bitlinear_kernel():
class BitLinear (line 17) | class BitLinear(nn.Module):
method __init__ (line 21) | def __init__(
method execute_matmul_kernel (line 24) | def execute_matmul_kernel(self, x, packed_weights): ...
method __call__ (line 25) | def __call__(self, x): # -> array:
FILE: .mlx_typings/mlx_lm/models/cache.pyi
class Cache (line 11) | class Cache(Protocol):
method update_and_fetch (line 15) | def update_and_fetch(
method state (line 19) | def state(self) -> tuple[mx.array | None, mx.array | None]: ...
method state (line 21) | def state(self, v) -> None: ...
function make_prompt_cache (line 23) | def make_prompt_cache(
function save_prompt_cache (line 39) | def save_prompt_cache(
function load_prompt_cache (line 52) | def load_prompt_cache(file_name: str, return_metadata=...) -> array:
function can_trim_prompt_cache (line 66) | def can_trim_prompt_cache(cache: List[Cache]) -> bool:
function trim_prompt_cache (line 71) | def trim_prompt_cache(cache: List[Cache], num_tokens: int) -> List[Cache]:
function create_attention_mask (line 86) | def create_attention_mask(
class _BaseCache (line 90) | class _BaseCache(Cache):
method state (line 95) | def state(self) -> tuple[mx.array | None, mx.array | None]: ...
method state (line 97) | def state(self, v) -> None: ...
method meta_state (line 99) | def meta_state(self) -> Literal[""]: ...
method meta_state (line 101) | def meta_state(self, v) -> None: ...
method trim (line 102) | def trim(self, n: int) -> int: ...
method is_trimmable (line 103) | def is_trimmable(self) -> Literal[False]: ...
method from_state (line 105) | def from_state(cls, state, meta_state) -> Self: ...
class ConcatenateKVCache (line 107) | class ConcatenateKVCache(_BaseCache):
method __init__ (line 114) | def __init__(self) -> None: ...
method update_and_fetch (line 115) | def update_and_fetch(self, keys, values): # -> tuple[Any | array, Any...
method state (line 118) | def state(self) -> tuple[mx.array | None, mx.array | None]: ...
method state (line 120) | def state(self, v): # -> None:
method is_trimmable (line 122) | def is_trimmable(self): # -> Literal[True]:
method trim (line 124) | def trim(self, n: int) -> int: ...
method make_mask (line 125) | def make_mask(self, *args, **kwargs): # -> array | Literal['causal'] ...
class QuantizedKVCache (line 128) | class QuantizedKVCache(_BaseCache):
method __init__ (line 130) | def __init__(self, group_size: int = ..., bits: int = ...) -> None: ...
method update_and_fetch (line 131) | def update_and_fetch(self, keys, values): # -> Any:
method state (line 134) | def state(self) -> tuple[mx.array | None, mx.array | None]: ...
method state (line 136) | def state(self, v): # -> None:
method meta_state (line 139) | def meta_state(self): # -> tuple[str, ...]:
method meta_state (line 142) | def meta_state(self, v): # -> None:
method is_trimmable (line 144) | def is_trimmable(self): # -> Literal[True]:
method trim (line 146) | def trim(self, n: int) -> int: ...
method make_mask (line 147) | def make_mask(self, *args, **kwargs): # -> array | Literal['causal'] ...
class KVCache (line 150) | class KVCache(_BaseCache):
method __init__ (line 152) | def __init__(self) -> None: ...
method update_and_fetch (line 153) | def update_and_fetch(self, keys, values): # -> tuple[array | Any, arr...
method state (line 156) | def state(
method state (line 160) | def state(self, v) -> None: ...
method is_trimmable (line 161) | def is_trimmable(self): # -> Literal[True]:
method trim (line 163) | def trim(self, n: int) -> int: ...
method to_quantized (line 164) | def to_quantized(
method make_mask (line 167) | def make_mask(
class RotatingKVCache (line 171) | class RotatingKVCache(_BaseCache):
method __init__ (line 178) | def __init__(self, max_size, keep=...) -> None: ...
method _trim (line 179) | def _trim(
method update_and_fetch (line 182) | def update_and_fetch(
method state (line 187) | def state(
method state (line 191) | def state(self, v): # -> None:
method meta_state (line 194) | def meta_state(self): # -> tuple[str, ...]:
method meta_state (line 197) | def meta_state(self, v): # -> None:
method is_trimmable (line 199) | def is_trimmable(self): # -> bool:
method trim (line 201) | def trim(self, n: int) -> int: ...
method to_quantized (line 202) | def to_quantized(
method make_mask (line 205) | def make_mask(
class ArraysCache (line 210) | class ArraysCache(_BaseCache):
method __init__ (line 211) | def __init__(self, size, left_padding: Optional[List[int]] = ...) -> N...
method __setitem__ (line 212) | def __setitem__(self, idx, value): # -> None:
method __getitem__ (line 214) | def __getitem__(self, idx): ...
method state (line 216) | def state(self) -> tuple[mx.array | None, mx.array | None]: ...
method state (line 218) | def state(self, v): # -> None:
method filter (line 220) | def filter(self, batch_indices): # -> None:
method extend (line 225) | def extend(self, other): # -> None:
method make_mask (line 230) | def make_mask(self, N: int) -> mx.array | None: ...
class MambaCache (line 232) | class MambaCache(ArraysCache):
method __init__ (line 233) | def __init__(self, left_padding: Optional[List[int]] = ...) -> None: ...
class ChunkedKVCache (line 235) | class ChunkedKVCache(KVCache):
method __init__ (line 236) | def __init__(self, chunk_size) -> None: ...
method maybe_trim_front (line 237) | def maybe_trim_front(self): # -> None:
method update_and_fetch (line 239) | def update_and_fetch(self, keys, values): # -> tuple[array, array]:
method trim (line 241) | def trim(self, n: int) -> int: ...
method meta_state (line 243) | def meta_state(self): # -> tuple[str, ...]:
method meta_state (line 246) | def meta_state(self, v): # -> None:
class CacheList (line 249) | class CacheList(_BaseCache):
method __init__ (line 250) | def __init__(self, *caches) -> None: ...
method __getitem__ (line 251) | def __getitem__(self, idx): ...
method is_trimmable (line 252) | def is_trimmable(self): # -> bool:
method trim (line 254) | def trim(self, n: int) -> int: ...
method state (line 256) | def state(self) -> list[tuple[mx.array | None, mx.array | None]]: ...
method state (line 258) | def state(self, v): # -> None:
method filter (line 260) | def filter(self, batch_indices): # -> None:
method extend (line 265) | def extend(self, other): # -> None:
class BatchKVCache (line 270) | class BatchKVCache(_BaseCache):
method __init__ (line 272) | def __init__(self, left_padding: List[int]) -> None:
method update_and_fetch (line 292) | def update_and_fetch(self, keys, values): # -> tuple[array | Any, arr...
method state (line 295) | def state(
method state (line 300) | def state(self, v): # -> None:
method is_trimmable (line 302) | def is_trimmable(self): # -> Literal[True]:
method trim (line 304) | def trim(self, n): # -> int | float:
method make_mask (line 306) | def make_mask(self, N: int, return_array: bool = ..., **kwargs): # ->...
method filter (line 308) | def filter(self, batch_indices): # -> None:
method extend (line 313) | def extend(self, other): # -> None:
class BatchRotatingKVCache (line 318) | class BatchRotatingKVCache(_BaseCache):
method __init__ (line 320) | def __init__(self, max_size, left_padding: List[int]) -> None: ...
method update_and_fetch (line 321) | def update_and_fetch(
method state (line 326) | def state(
method state (line 331) | def state(self, v): # -> None:
method meta_state (line 334) | def meta_state(self): # -> tuple[str, ...]:
method meta_state (line 337) | def meta_state(self, v): # -> None:
method is_trimmable (line 339) | def is_trimmable(self): # -> bool:
method trim (line 341) | def trim(self, n): # -> int:
method to_quantized (line 343) | def to_quantized(
method make_mask (line 346) | def make_mask(
method filter (line 350) | def filter(self, batch_indices): # -> None:
method extend (line 355) | def extend(self, other): # -> None:
FILE: .mlx_typings/mlx_lm/models/deepseek_v3.pyi
class ModelArgs (line 14) | class ModelArgs(BaseModelArgs):
class DeepseekV3Attention (line 45) | class DeepseekV3Attention(nn.Module):
method __init__ (line 71) | def __init__(self, config: ModelArgs) -> None: ...
method __call__ (line 72) | def __call__(
class DeepseekV3MLP (line 79) | class DeepseekV3MLP(nn.Module):
method __init__ (line 87) | def __init__(
method __call__ (line 93) | def __call__(self, x: mx.array) -> mx.array: ...
class MoEGate (line 95) | class MoEGate(nn.Module):
method __init__ (line 106) | def __init__(self, config: ModelArgs) -> None: ...
method __call__ (line 107) | def __call__(self, x: mx.array) -> tuple[mx.array, mx.array]: ...
class DeepseekV3MoE (line 109) | class DeepseekV3MoE(nn.Module):
method __init__ (line 117) | def __init__(self, config: ModelArgs) -> None: ...
method __call__ (line 118) | def __call__(self, x: mx.array) -> mx.array: ...
class DeepseekV3DecoderLayer (line 120) | class DeepseekV3DecoderLayer(nn.Module):
method __init__ (line 126) | def __init__(self, config: ModelArgs, layer_idx: int) -> None: ...
method __call__ (line 127) | def __call__(
class DeepseekV3Model (line 134) | class DeepseekV3Model(nn.Module):
method __init__ (line 140) | def __init__(self, config: ModelArgs) -> None: ...
method __call__ (line 141) | def __call__(
class Model (line 147) | class Model(nn.Module):
method __init__ (line 152) | def __init__(self, config: ModelArgs) -> None: ...
method __call__ (line 153) | def __call__(
method sanitize (line 158) | def sanitize(self, weights: dict[str, Any]) -> dict[str, Any]: ...
method layers (line 160) | def layers(self) -> list[DeepseekV3DecoderLayer]: ...
FILE: .mlx_typings/mlx_lm/models/glm4_moe.pyi
class ModelArgs (line 11) | class ModelArgs(BaseModelArgs):
class Attention (line 40) | class Attention(nn.Module):
method __init__ (line 53) | def __init__(self, args: ModelArgs) -> None: ...
method __call__ (line 54) | def __call__(
class MLP (line 61) | class MLP(nn.Module):
method __init__ (line 69) | def __init__(
method __call__ (line 75) | def __call__(self, x: mx.array) -> mx.array: ...
class MoEGate (line 77) | class MoEGate(nn.Module):
method __init__ (line 88) | def __init__(self, config: ModelArgs) -> None: ...
method __call__ (line 89) | def __call__(self, x: mx.array) -> tuple[mx.array, mx.array]: ...
class MoE (line 91) | class MoE(nn.Module):
method __init__ (line 99) | def __init__(self, config: ModelArgs) -> None: ...
method __call__ (line 100) | def __call__(self, x: mx.array) -> mx.array: ...
class DecoderLayer (line 102) | class DecoderLayer(nn.Module):
method __init__ (line 108) | def __init__(self, config: ModelArgs, layer_idx: int) -> None: ...
method __call__ (line 109) | def __call__(
class LanguageModel (line 116) | class LanguageModel(nn.Module):
method __init__ (line 127) | def __init__(self, config: ModelArgs) -> None: ...
method __call__ (line 128) | def __call__(
method pipeline_layers (line 134) | def pipeline_layers(self) -> list[DecoderLayer]: ...
class Model (line 136) | class Model(nn.Module):
method __init__ (line 142) | def __init__(self, config: ModelArgs) -> None: ...
method __call__ (line 143) | def __call__(
method sanitize (line 148) | def sanitize(self, weights: dict[str, Any]) -> dict[str, Any]: ...
method shard (line 149) | def shard(self, group: Optional[mx.distributed.Group] = None) -> None:...
method layers (line 151) | def layers(self) -> list[DecoderLayer]: ...
method cast_predicate (line 153) | def cast_predicate(self) -> Any: ...
FILE: .mlx_typings/mlx_lm/models/glm_moe_dsa.pyi
class ModelArgs (line 10) | class ModelArgs(BaseModelArgs):
class Model (line 45) | class Model(DSV32Model):
method __init__ (line 46) | def __init__(self, config: ModelArgs) -> None: ...
FILE: .mlx_typings/mlx_lm/models/nemotron_h.pyi
class ModelArgs (line 11) | class ModelArgs:
method from_dict (line 47) | def from_dict(cls, params: dict[str, Any]) -> ModelArgs: ...
method __post_init__ (line 48) | def __post_init__(self) -> None: ...
class NemotronHMamba2Mixer (line 50) | class NemotronHMamba2Mixer(nn.Module):
method __init__ (line 68) | def __init__(self, args: ModelArgs) -> None: ...
method __call__ (line 69) | def __call__(
class NemotronHAttention (line 76) | class NemotronHAttention(nn.Module):
method __init__ (line 87) | def __init__(self, args: ModelArgs) -> None: ...
method __call__ (line 88) | def __call__(
class NemotronHMLP (line 95) | class NemotronHMLP(nn.Module):
method __init__ (line 99) | def __init__(
method __call__ (line 102) | def __call__(self, x: mx.array) -> mx.array: ...
class NemotronHMoE (line 104) | class NemotronHMoE(nn.Module):
method __init__ (line 109) | def __init__(self, config: ModelArgs) -> None: ...
method __call__ (line 110) | def __call__(self, x: mx.array) -> mx.array: ...
class NemotronHBlock (line 112) | class NemotronHBlock(nn.Module):
method __init__ (line 117) | def __init__(self, args: ModelArgs, block_type: str) -> None: ...
method __call__ (line 118) | def __call__(
class NemotronHModel (line 125) | class NemotronHModel(nn.Module):
method __init__ (line 132) | def __init__(self, args: ModelArgs) -> None: ...
method __call__ (line 133) | def __call__(
class Model (line 139) | class Model(nn.Module):
method __init__ (line 145) | def __init__(self, args: ModelArgs) -> None: ...
method __call__ (line 146) | def __call__(
method layers (line 152) | def layers(self) -> list[NemotronHBlock]: ...
method make_cache (line 153) | def make_cache(self) -> list[ArraysCache | KVCache]: ...
method sanitize (line 154) | def sanitize(self, weights: dict[str, Any]) -> dict[str, Any]: ...
FILE: .mlx_typings/mlx_lm/models/qwen3_5.pyi
class TextModelArgs (line 19) | class TextModelArgs:
method from_dict (line 50) | def from_dict(cls, params: dict[str, Any]) -> TextModelArgs: ...
method __post_init__ (line 51) | def __post_init__(self) -> None: ...
class GatedDeltaNet (line 53) | class GatedDeltaNet(nn.Module):
method __init__ (line 73) | def __init__(self, config: TextModelArgs) -> None: ...
method __call__ (line 74) | def __call__(
class DecoderLayer (line 81) | class DecoderLayer(nn.Module):
method __init__ (line 89) | def __init__(self, args: TextModelArgs, layer_idx: int) -> None: ...
method __call__ (line 90) | def __call__(
class Qwen3_5TextModel (line 97) | class Qwen3_5TextModel(nn.Module):
method __init__ (line 104) | def __init__(self, args: TextModelArgs) -> None: ...
method __call__ (line 105) | def __call__(
class TextModel (line 112) | class TextModel(nn.Module):
method __init__ (line 118) | def __init__(self, args: TextModelArgs) -> None: ...
method __call__ (line 119) | def __call__(
method layers (line 126) | def layers(self) -> list[DecoderLayer]: ...
method make_cache (line 127) | def make_cache(self) -> list[ArraysCache | KVCache]: ...
method sanitize (line 128) | def sanitize(self, weights: dict[str, Any]) -> dict[str, Any]: ...
class ModelArgs (line 131) | class ModelArgs:
method from_dict (line 136) | def from_dict(cls, params: dict[str, Any]) -> ModelArgs: ...
class Model (line 138) | class Model(nn.Module):
method __init__ (line 143) | def __init__(self, args: ModelArgs) -> None: ...
method __call__ (line 144) | def __call__(
method sanitize (line 150) | def sanitize(self, weights: dict[str, Any]) -> dict[str, Any]: ...
method layers (line 152) | def layers(self) -> list[DecoderLayer]: ...
method make_cache (line 153) | def make_cache(self) -> list[ArraysCache | KVCache]: ...
FILE: .mlx_typings/mlx_lm/models/qwen3_5_moe.pyi
class ModelArgs (line 11) | class ModelArgs:
method from_dict (line 16) | def from_dict(cls, params: dict[str, Any]) -> ModelArgs: ...
class Model (line 18) | class Model(Qwen3_5Model):
method sanitize (line 19) | def sanitize(self, weights: dict[str, Any]) -> dict[str, Any]: ...
FILE: .mlx_typings/mlx_lm/models/qwen3_next.pyi
class Qwen3NextRMSNormGated (line 11) | class Qwen3NextRMSNormGated(nn.Module):
method __init__ (line 15) | def __init__(self, hidden_size: int, eps: float = ...) -> None: ...
method __call__ (line 16) | def __call__(
class Qwen3NextMLP (line 20) | class Qwen3NextMLP(nn.Module):
method __init__ (line 25) | def __init__(self, dim: int, hidden_dim: int) -> None: ...
method __call__ (line 26) | def __call__(self, x: mx.array) -> mx.array: ...
class Qwen3NextGatedDeltaNet (line 28) | class Qwen3NextGatedDeltaNet(nn.Module):
method __init__ (line 45) | def __init__(self, config: Any) -> None: ...
method __call__ (line 46) | def __call__(
class Qwen3NextAttention (line 53) | class Qwen3NextAttention(nn.Module):
method __init__ (line 63) | def __init__(self, args: Any) -> None: ...
method __call__ (line 64) | def __call__(
class Qwen3NextSparseMoeBlock (line 71) | class Qwen3NextSparseMoeBlock(nn.Module):
method __init__ (line 80) | def __init__(self, args: Any) -> None: ...
method __call__ (line 81) | def __call__(self, x: mx.array) -> mx.array: ...
class Qwen3NextDecoderLayer (line 83) | class Qwen3NextDecoderLayer(nn.Module):
method __init__ (line 91) | def __init__(self, args: Any, layer_idx: int) -> None: ...
method __call__ (line 92) | def __call__(
class Qwen3NextModel (line 99) | class Qwen3NextModel(nn.Module):
method __init__ (line 106) | def __init__(self, args: Any) -> None: ...
method __call__ (line 107) | def __call__(
class Model (line 113) | class Model(nn.Module):
method __init__ (line 118) | def __init__(self, args: Any) -> None: ...
method __call__ (line 119) | def __call__(
method sanitize (line 124) | def sanitize(self, weights: dict[str, Any]) -> dict[str, Any]: ...
method layers (line 126) | def layers(self) -> list[Qwen3NextDecoderLayer]: ...
method make_cache (line 127) | def make_cache(self) -> list[ArraysCache | KVCache]: ...
FILE: .mlx_typings/mlx_lm/models/step3p5.pyi
class ModelArgs (line 11) | class ModelArgs(BaseModelArgs):
class Step3p5MLP (line 41) | class Step3p5MLP(nn.Module):
method __init__ (line 49) | def __init__(
method __call__ (line 52) | def __call__(self, x: mx.array) -> mx.array: ...
class Step3p5MoEGate (line 54) | class Step3p5MoEGate(nn.Module):
method __init__ (line 62) | def __init__(self, args: ModelArgs) -> None: ...
method __call__ (line 63) | def __call__(self, x: mx.array) -> tuple[mx.array, mx.array]: ...
class Step3p5MoE (line 65) | class Step3p5MoE(nn.Module):
method __init__ (line 71) | def __init__(self, args: ModelArgs, layer_idx: int) -> None: ...
method __call__ (line 72) | def __call__(self, x: mx.array) -> mx.array: ...
class Step3p5Attention (line 74) | class Step3p5Attention(nn.Module):
method __init__ (line 90) | def __init__(self, args: ModelArgs, layer_idx: int) -> None: ...
method __call__ (line 91) | def __call__(
class Step3p5DecoderLayer (line 98) | class Step3p5DecoderLayer(nn.Module):
method __init__ (line 106) | def __init__(self, args: ModelArgs, layer_idx: int) -> None: ...
method __call__ (line 107) | def __call__(
class Step3p5Model (line 114) | class Step3p5Model(nn.Module):
method __init__ (line 124) | def __init__(self, args: ModelArgs) -> None: ...
method __call__ (line 125) | def __call__(
class Model (line 131) | class Model(nn.Module):
method __init__ (line 137) | def __init__(self, args: ModelArgs) -> None: ...
method __call__ (line 138) | def __call__(
method sanitize (line 143) | def sanitize(self, weights: dict[str, Any]) -> dict[str, Any]: ...
method shard (line 144) | def shard(self, group: Optional[mx.distributed.Group] = None) -> None:...
method layers (line 146) | def layers(self) -> list[Step3p5DecoderLayer]: ...
method make_cache (line 147) | def make_cache(self) -> list[Any]: ...
method cast_predicate (line 149) | def cast_predicate(self) -> Any: ...
method quant_predicate (line 151) | def quant_predicate(self) -> Any: ...
FILE: .mlx_typings/mlx_lm/models/switch_layers.pyi
class QuantizedSwitchLinear (line 10) | class QuantizedSwitchLinear(nn.Module):
method __init__ (line 11) | def __init__(
method input_dims (line 22) | def input_dims(self): # -> int:
method output_dims (line 25) | def output_dims(self): # -> int:
method num_experts (line 28) | def num_experts(self): # -> int:
method __call__ (line 30) | def __call__(self, x, indices, sorted_indices=...): # -> array:
class SwitchLinear (line 33) | class SwitchLinear(nn.Module):
method __init__ (line 34) | def __init__(
method input_dims (line 38) | def input_dims(self): # -> int:
method output_dims (line 41) | def output_dims(self): # -> int:
method num_experts (line 44) | def num_experts(self): # -> int:
method __call__ (line 46) | def __call__(self, x, indices, sorted_indices=...): ...
method to_quantized (line 47) | def to_quantized(
function swiglu (line 53) | def swiglu(x, gate): ...
class SwiGLU (line 55) | class SwiGLU(nn.Module):
method __init__ (line 56) | def __init__(self) -> None: ...
method __call__ (line 57) | def __call__(self, x, gate): ...
class SwitchGLU (line 59) | class SwitchGLU(nn.Module):
method __init__ (line 65) | def __init__(
method __call__ (line 73) | def __call__(self, x, indices) -> mx.array: ...
class SwitchMLP (line 75) | class SwitchMLP(nn.Module):
method __init__ (line 79) | def __init__(
method __call__ (line 87) | def __call__(self, x, indices) -> mx.array: ...
FILE: .mlx_typings/mlx_lm/sample_utils.pyi
function make_sampler (line 10) | def make_sampler(
function make_logits_processors (line 47) | def make_logits_processors(
function apply_top_k (line 70) | def apply_top_k(logprobs: mx.array, top_k: int) -> mx.array:
function apply_min_p (line 80) | def apply_min_p(
function apply_top_p (line 101) | def apply_top_p(logprobs: mx.array, top_p: float) -> mx.array:
function apply_xtc (line 113) | def apply_xtc(
function categorical_sampling (line 130) | def categorical_sampling(logits, temp): # -> array:
function make_repetition_penalty (line 132) | def make_repetition_penalty(
FILE: .mlx_typings/mlx_lm/tokenizer_utils.pyi
class StreamingDetokenizer (line 11) | class StreamingDetokenizer:
method reset (line 42) | def reset(self) -> None: ...
method add_token (line 43) | def add_token(self, token: int) -> None: ...
method finalize (line 44) | def finalize(self) -> None: ...
method last_segment (line 46) | def last_segment(self) -> str:
class NaiveStreamingDetokenizer (line 49) | class NaiveStreamingDetokenizer(StreamingDetokenizer):
method __init__ (line 56) | def __init__(self, tokenizer) -> None: ...
method reset (line 57) | def reset(self): # -> None:
method add_token (line 59) | def add_token(self, token): # -> None:
method finalize (line 61) | def finalize(self): # -> None:
method text (line 64) | def text(self): # -> str:
class SPMStreamingDetokenizer (line 67) | class SPMStreamingDetokenizer(StreamingDetokenizer):
method __init__ (line 73) | def __init__(self, tokenizer, trim_space=...) -> None: ...
method reset (line 74) | def reset(self): # -> None:
method add_token (line 76) | def add_token(self, token): # -> None:
method finalize (line 78) | def finalize(self): # -> None:
class BPEStreamingDetokenizer (line 81) | class BPEStreamingDetokenizer(StreamingDetokenizer):
method __init__ (line 90) | def __init__(self, tokenizer) -> None: ...
method reset (line 91) | def reset(self): # -> None:
method add_token (line 93) | def add_token(self, token): # -> None:
method finalize (line 95) | def finalize(self): # -> None:
method make_byte_decoder (line 98) | def make_byte_decoder(cls): # -> None:
class TokenizerWrapper (line 101) | class TokenizerWrapper:
method __init__ (line 121) | def __init__(
method encode (line 131) | def encode(self, text: str, **kwargs: Any) -> list[int]: ...
method decode (line 132) | def decode(self, token_ids: list[int], **kwargs: Any) -> str: ...
method apply_chat_template (line 133) | def apply_chat_template(
method get_vocab (line 141) | def get_vocab(self) -> dict[str, int]: ...
method add_eos_token (line 142) | def add_eos_token(self, token: str) -> None: ...
method has_thinking (line 144) | def has_thinking(self) -> bool: ...
method think_start (line 146) | def think_start(self) -> str | None: ...
method think_end (line 148) | def think_end(self) -> str | None: ...
method has_tool_calling (line 150) | def has_tool_calling(self) -> bool: ...
method tool_call_start (line 152) | def tool_call_start(self) -> str | None: ...
method tool_call_end (line 154) | def tool_call_end(self) -> str | None: ...
method detokenizer (line 156) | def detokenizer(self) -> NaiveStreamingDetokenizer:
method __getattr__ (line 159) | def __getattr__(self, attr: str) -> Any: ...
method __setattr__ (line 160) | def __setattr__(self, attr: str, value: Any) -> None: ...
class NewlineTokenizer (line 162) | class NewlineTokenizer(PreTrainedTokenizerFast):
method __init__ (line 164) | def __init__(self, *args, **kwargs) -> None: ...
method encode (line 165) | def encode(self, text, **kwargs): # -> list[int]:
method encode_batch (line 167) | def encode_batch(self, texts, **kwargs): ...
method decode (line 168) | def decode(self, *args, **kwargs): # -> str:
method batch_decode (line 170) | def batch_decode(self, *args, **kwargs): # -> list[str]:
function load (line 173) | def load(
function no_bos_or_eos (line 188) | def no_bos_or_eos(sequence: list[int], bos: int, eos: int) -> list[int]:...
FILE: .mlx_typings/mlx_lm/utils.pyi
function compute_bits_per_weight (line 19) | def compute_bits_per_weight(model): ...
function hf_repo_to_path (line 20) | def hf_repo_to_path(hf_repo): # -> Path:
function load_config (line 22) | def load_config(model_path: Path) -> dict: ...
function load_model (line 23) | def load_model(
function load (line 56) | def load(
function make_shards (line 93) | def make_shards(weights: dict, max_file_size_gb: int = ...) -> list:
function create_model_card (line 105) | def create_model_card(
function upload_to_hub (line 116) | def upload_to_hub(path: str, upload_repo: str): # -> None:
function save_model (line 125) | def save_model(
function quantize_model (line 130) | def quantize_model(
function save_config (line 156) | def save_config(config: dict, config_path: Union[str, Path]) -> None:
function save (line 166) | def save(
function common_prefix_len (line 175) | def common_prefix_len(list1, list2): # -> int:
function does_model_support_input_embeddings (line 188) | def does_model_support_input_embeddings(model: nn.Module) -> bool:
FILE: bench/eval_tool_calls.py
class Scenario (line 33) | class Scenario:
function load_scenarios (line 46) | def load_scenarios(path: Path) -> list[Scenario]:
class ParsedResponse (line 123) | class ParsedResponse:
class ScenarioResult (line 131) | class ScenarioResult:
function validate_args (line 141) | def validate_args(args_str: str, required_keys: list[str]) -> tuple[bool...
function validate_nested_args (line 155) | def validate_nested_args(
function call_api (line 184) | def call_api(
function _openai_build_request (line 201) | def _openai_build_request(
function _openai_parse_response (line 217) | def _openai_parse_response(data: dict[str, Any]) -> ParsedResponse:
function _openai_build_followup (line 244) | def _openai_build_followup(
function _claude_translate_tools (line 284) | def _claude_translate_tools(tools: list[dict[str, Any]]) -> list[dict[st...
function _claude_translate_messages (line 299) | def _claude_translate_messages(messages: list[dict[str, Any]]) -> list[d...
function _claude_build_request (line 362) | def _claude_build_request(
function _claude_parse_response (line 390) | def _claude_parse_response(data: dict[str, Any]) -> ParsedResponse:
function _claude_build_followup (line 434) | def _claude_build_followup(
function _responses_translate_input (line 500) | def _responses_translate_input(messages: list[dict[str, Any]]) -> list[d...
function _responses_build_request (line 549) | def _responses_build_request(
function _responses_parse_response (line 567) | def _responses_parse_response(data: dict[str, Any]) -> ParsedResponse:
function _responses_build_followup (line 612) | def _responses_build_followup(
function run_scenario (line 671) | def run_scenario(
function result_to_dict (line 854) | def result_to_dict(result: ScenarioResult) -> dict[str, Any]:
function _placement_sort_key (line 876) | def _placement_sort_key(p: dict[str, Any]) -> tuple[int, int]:
function main (line 890) | def main() -> None:
FILE: bench/exo_bench.py
function load_tokenizer_for_bench (line 60) | def load_tokenizer_for_bench(model_id: str) -> Any:
function format_peak_memory (line 126) | def format_peak_memory(b: float) -> str:
function parse_int_list (line 134) | def parse_int_list(values: list[str]) -> list[int]:
function run_one_completion (line 144) | def run_one_completion(
class PromptSizer (line 174) | class PromptSizer:
method __init__ (line 175) | def __init__(self, tokenizer: Any, atom: str = "a "):
method _make_counter (line 182) | def _make_counter(tokenizer: Any) -> Callable[[str], int]:
method build (line 195) | def build(self, target_prompt_tokens: int) -> tuple[str, int]:
function main (line 235) | def main() -> int:
FILE: bench/exo_eval.py
function load_model_config (line 94) | def load_model_config(model_id: str) -> dict[str, Any] | None:
function extract_mc_answer (line 113) | def extract_mc_answer(text: str, valid_letters: str = "ABCD") -> str | N...
function extract_boxed_answer (line 127) | def extract_boxed_answer(text: str) -> str | None:
function extract_code_block (line 151) | def extract_code_block(text: str, preserve_indent: bool = False) -> str ...
function check_aime_answer (line 169) | def check_aime_answer(extracted: str, gold: int) -> bool:
function _lcb_worker (line 192) | def _lcb_worker(
function run_livecodebench_test (line 211) | def run_livecodebench_test(
function run_humaneval_test (line 254) | def run_humaneval_test(
class QuestionResult (line 272) | class QuestionResult:
class BenchmarkConfig (line 287) | class BenchmarkConfig:
function format_gpqa_question (line 402) | def format_gpqa_question(doc: dict, idx: int) -> tuple[str, str]:
function format_mmlu_pro_question (line 419) | def format_mmlu_pro_question(doc: dict) -> tuple[str, str]:
function format_aime_question (line 429) | def format_aime_question(doc: dict) -> tuple[str, int]:
function format_humaneval_question (line 434) | def format_humaneval_question(doc: dict) -> tuple[str, dict]:
function format_livecodebench_question (line 450) | def format_livecodebench_question(doc: dict) -> tuple[str, str | None, d...
class ApiResult (line 514) | class ApiResult:
function _call_api (line 521) | async def _call_api(
function call_with_retries (line 569) | async def call_with_retries(
function evaluate_benchmark (line 612) | async def evaluate_benchmark(
function print_results (line 852) | def print_results(
function print_comparison (line 901) | def print_comparison(
function pick_tasks_interactive (line 954) | def pick_tasks_interactive() -> list[str]:
function save_results (line 1023) | def save_results(
function parse_int_list (line 1069) | def parse_int_list(values: list[str]) -> list[int]:
function main (line 1078) | def main() -> int:
FILE: bench/harness.py
class ExoHttpError (line 19) | class ExoHttpError(RuntimeError):
method __init__ (line 20) | def __init__(self, status: int, reason: str, body_preview: str):
class ExoClient (line 25) | class ExoClient:
method __init__ (line 26) | def __init__(self, host: str, port: int, timeout_s: float = 7200.0):
method request_json (line 31) | def request_json(
method post_bench_chat_completions (line 69) | def post_bench_chat_completions(self, payload: dict[str, Any]) -> dict...
function unwrap_instance (line 73) | def unwrap_instance(instance: dict[str, Any]) -> dict[str, Any]:
function instance_id_from_instance (line 84) | def instance_id_from_instance(instance: dict[str, Any]) -> str:
function nodes_used_in_instance (line 89) | def nodes_used_in_instance(instance: dict[str, Any]) -> int:
function runner_ids_from_instance (line 94) | def runner_ids_from_instance(instance: dict[str, Any]) -> list[str]:
function runner_ready (line 100) | def runner_ready(runner: dict[str, Any]) -> bool:
function runner_failed (line 104) | def runner_failed(runner: dict[str, Any]) -> bool:
function get_runner_failed_message (line 108) | def get_runner_failed_message(runner: dict[str, Any]) -> str | None:
function wait_for_instance_ready (line 114) | def wait_for_instance_ready(
function wait_for_instance_gone (line 152) | def wait_for_instance_gone(
function resolve_model_short_id (line 168) | def resolve_model_short_id(
function placement_filter (line 199) | def placement_filter(instance_meta: str, wanted: str) -> bool:
function sharding_filter (line 206) | def sharding_filter(sharding: str, wanted: str) -> bool:
function fetch_and_filter_placements (line 213) | def fetch_and_filter_placements(
function settle_and_fetch_placements (line 273) | def settle_and_fetch_placements(
function run_planning_phase (line 297) | def run_planning_phase(
function add_common_instance_args (line 454) | def add_common_instance_args(ap: argparse.ArgumentParser) -> None:
FILE: bench/parallel_requests.py
function write (line 28) | def write(s: str) -> None:
function fetch_models (line 37) | def fetch_models() -> list[str]:
function pick_model (line 53) | def pick_model() -> str | None:
function render_progress (line 116) | def render_progress(first: bool = False) -> None:
function send_request (line 145) | async def send_request(
function run_requests (line 185) | async def run_requests(print_stdout: bool = False) -> None:
function main (line 233) | def main() -> None:
FILE: bench/vendor/lcb_testing_util.py
function truncatefn (line 32) | def truncatefn(s, length=300):
class CODE_TYPE (line 43) | class CODE_TYPE(Enum):
class TimeoutException (line 49) | class TimeoutException(Exception):
function timeout_handler (line 53) | def timeout_handler(signum, frame):
class Capturing (line 61) | class Capturing(list):
method __enter__ (line 62) | def __enter__(self):
method __exit__ (line 69) | def __exit__(self, *args):
class MockStdinWithBuffer (line 76) | class MockStdinWithBuffer:
method __init__ (line 77) | def __init__(self, inputs: str):
method read (line 82) | def read(self, *args):
method readline (line 85) | def readline(self, *args):
method readlines (line 88) | def readlines(self, *args):
method __getattr__ (line 91) | def __getattr__(self, name):
class MockBuffer (line 96) | class MockBuffer:
method __init__ (line 97) | def __init__(self, inputs: str):
method read (line 100) | def read(self, *args):
method readline (line 104) | def readline(self, *args):
function clean_if_name (line 108) | def clean_if_name(code: str) -> str:
function make_function (line 124) | def make_function(code: str) -> str:
function call_method (line 156) | def call_method(method, inputs):
function get_function (line 185) | def get_function(compiled_sol, fn_name: str): # type: ignore
function compile_code (line 193) | def compile_code(code: str, timeout: int):
function convert_line_to_decimals (line 215) | def convert_line_to_decimals(line: str) -> tuple[bool, list[Decimal]]:
function get_stripped_lines (line 223) | def get_stripped_lines(val: str):
function grade_call_based (line 230) | def grade_call_based(
function grade_stdio (line 311) | def grade_stdio(
function run_test (line 429) | def run_test(sample, test=None, debug=False, timeout=6):
function reliability_guard (line 510) | def reliability_guard(maximum_memory_bytes=None):
FILE: dashboard/src/lib/stores/app.svelte.ts
function generateUUID (line 13) | function generateUUID(): string {
type NodeInfo (line 28) | interface NodeInfo {
type TopologyEdge (line 55) | interface TopologyEdge {
type TopologyData (line 64) | interface TopologyData {
type Instance (line 69) | interface Instance {
type RawNodeIdentity (line 78) | interface RawNodeIdentity {
type RawMemoryUsage (line 86) | interface RawMemoryUsage {
type RawSystemPerformanceProfile (line 93) | interface RawSystemPerformanceProfile {
type RawNetworkInterfaceInfo (line 101) | interface RawNetworkInterfaceInfo {
type RawNodeNetworkInfo (line 111) | interface RawNodeNetworkInfo {
type RawSocketConnection (line 115) | interface RawSocketConnection {
type RawRDMAConnection (line 124) | interface RawRDMAConnection {
type RawConnectionEdge (line 129) | type RawConnectionEdge = RawSocketConnection | RawRDMAConnection;
type RawConnectionsMap (line 132) | type RawConnectionsMap = Record<string, Record<string, RawConnectionEdge...
type RawTopology (line 134) | interface RawTopology {
type DownloadProgress (line 139) | interface DownloadProgress {
type ModelDownloadStatus (line 157) | interface ModelDownloadStatus {
type PlacementPreview (line 168) | interface PlacementPreview {
type PlacementPreviewResponse (line 177) | interface PlacementPreviewResponse {
type ImageApiResponse (line 181) | interface ImageApiResponse {
type TraceCategoryStats (line 187) | interface TraceCategoryStats {
type TraceRankStats (line 195) | interface TraceRankStats {
type TraceStatsResponse (line 199) | interface TraceStatsResponse {
type TraceListItem (line 206) | interface TraceListItem {
type TraceListResponse (line 212) | interface TraceListResponse {
type RawStateResponse (line 216) | interface RawStateResponse {
type MessageAttachment (line 259) | interface MessageAttachment {
type TopLogprob (line 267) | interface TopLogprob {
type TokenData (line 273) | interface TokenData {
type PrefillProgress (line 280) | interface PrefillProgress {
type Message (line 287) | interface Message {
type Conversation (line 301) | interface Conversation {
constant STORAGE_KEY (line 313) | const STORAGE_KEY = "exo-conversations";
constant IMAGE_PARAMS_STORAGE_KEY (line 314) | const IMAGE_PARAMS_STORAGE_KEY = "exo-image-generation-params";
type ImageGenerationParams (line 317) | interface ImageGenerationParams {
type EditingImage (line 345) | interface EditingImage {
constant DEFAULT_IMAGE_PARAMS (line 350) | const DEFAULT_IMAGE_PARAMS: ImageGenerationParams = {
type GranularNodeState (line 365) | interface GranularNodeState {
function transformNetworkInterface (line 372) | function transformNetworkInterface(iface: RawNetworkInterfaceInfo): {
function transformTopology (line 406) | function transformTopology(
function extractIpFromMultiaddr (line 507) | function extractIpFromMultiaddr(ma?: string): string | undefined {
class AppStore (line 519) | class AppStore {
method constructor (line 605) | constructor() {
method loadConversationsFromStorage (line 619) | private loadConversationsFromStorage() {
method saveConversationsToStorage (line 644) | private saveConversationsToStorage() {
method loadDebugModeFromStorage (line 663) | private loadDebugModeFromStorage() {
method saveDebugModeToStorage (line 674) | private saveDebugModeToStorage() {
method loadTopologyOnlyModeFromStorage (line 682) | private loadTopologyOnlyModeFromStorage() {
method saveTopologyOnlyModeToStorage (line 693) | private saveTopologyOnlyModeToStorage() {
method loadChatSidebarVisibleFromStorage (line 704) | private loadChatSidebarVisibleFromStorage() {
method saveChatSidebarVisibleToStorage (line 715) | private saveChatSidebarVisibleToStorage() {
method loadImageGenerationParamsFromStorage (line 726) | private loadImageGenerationParamsFromStorage() {
method saveImageGenerationParamsToStorage (line 741) | private saveImageGenerationParamsToStorage() {
method getImageGenerationParams (line 752) | getImageGenerationParams(): ImageGenerationParams {
method setImageGenerationParams (line 756) | setImageGenerationParams(params: Partial<ImageGenerationParams>) {
method resetImageGenerationParams (line 764) | resetImageGenerationParams() {
method setEditingImage (line 769) | setEditingImage(imageDataUrl: string, sourceMessage: Message) {
method clearEditingImage (line 773) | clearEditingImage() {
method createConversation (line 780) | createConversation(name?: string): string {
method loadConversation (line 840) | loadConversation(id: string): boolean {
method deleteConversation (line 862) | deleteConversation(id: string) {
method deleteAllConversations (line 878) | deleteAllConversations() {
method renameConversation (line 890) | renameConversation(id: string, newName: string) {
method getTaggedValue (line 899) | private getTaggedValue(obj: unknown): [string | null, unknown] {
method extractInstanceModelId (line 908) | private extractInstanceModelId(instanceWrapped: unknown): string | null {
method describeInstance (line 915) | private describeInstance(instanceWrapped: unknown): {
method buildConversationModelInfo (line 945) | private buildConversationModelInfo(modelId: string): {
method applyConversationModelInfo (line 966) | private applyConversationModelInfo(info: {
method getModelTail (line 986) | private getModelTail(modelId: string): string {
method isBetterModelId (line 991) | private isBetterModelId(
method refreshConversationModelFromInstances (line 1005) | private refreshConversationModelFromInstances() {
method getDebugMode (line 1056) | getDebugMode(): boolean {
method updateActiveConversation (line 1063) | private updateActiveConversation() {
method persistActiveConversation (line 1096) | private persistActiveConversation(throttleMs = 400) {
method updateConversationMessage (line 1107) | private updateConversationMessage(
method syncActiveMessagesIfNeeded (line 1127) | private syncActiveMessagesIfNeeded(conversationId: string): void {
method conversationExists (line 1141) | private conversationExists(conversationId: string): boolean {
method persistConversation (line 1148) | private persistConversation(conversationId: string, throttleMs = 400):...
method addMessageToConversation (line 1185) | private addMessageToConversation(
method toggleSidebar (line 1208) | toggleSidebar() {
method setDebugMode (line 1212) | setDebugMode(enabled: boolean) {
method toggleDebugMode (line 1217) | toggleDebugMode() {
method getTopologyOnlyMode (line 1222) | getTopologyOnlyMode(): boolean {
method setTopologyOnlyMode (line 1226) | setTopologyOnlyMode(enabled: boolean) {
method toggleTopologyOnlyMode (line 1231) | toggleTopologyOnlyMode() {
method getChatSidebarVisible (line 1236) | getChatSidebarVisible(): boolean {
method setChatSidebarVisible (line 1240) | setChatSidebarVisible(visible: boolean) {
method toggleChatSidebarVisible (line 1245) | toggleChatSidebarVisible() {
method getMobileChatSidebarOpen (line 1250) | getMobileChatSidebarOpen(): boolean {
method setMobileChatSidebarOpen (line 1254) | setMobileChatSidebarOpen(open: boolean) {
method toggleMobileChatSidebar (line 1258) | toggleMobileChatSidebar() {
method getMobileRightSidebarOpen (line 1262) | getMobileRightSidebarOpen(): boolean {
method setMobileRightSidebarOpen (line 1266) | setMobileRightSidebarOpen(open: boolean) {
method toggleMobileRightSidebar (line 1270) | toggleMobileRightSidebar() {
method startPolling (line 1274) | startPolling() {
method stopPolling (line 1279) | stopPolling() {
method fetchState (line 1287) | async fetchState() {
method fetchPlacementPreviews (line 1346) | async fetchPlacementPreviews(modelId: string, showLoading = true) {
method startPreviewsPolling (line 1380) | startPreviewsPolling(modelId: string) {
method stopPreviewsPolling (line 1395) | stopPreviewsPolling() {
method selectPreviewModel (line 1402) | selectPreviewModel(modelId: string | null) {
method togglePreviewNodeFilter (line 1415) | togglePreviewNodeFilter(nodeId: string) {
method clearPreviewNodeFilter (line 1432) | clearPreviewNodeFilter() {
method handleTopologyChange (line 1443) | private handleTopologyChange() {
method startChat (line 1481) | startChat() {
method addMessage (line 1497) | addMessage(role: "user" | "assistant", content: string) {
method deleteMessage (line 1511) | deleteMessage(messageId: string) {
method editMessage (line 1523) | editMessage(messageId: string, newContent: string) {
method editAndRegenerate (line 1535) | async editAndRegenerate(
method regenerateLastResponse (line 1559) | async regenerateLastResponse(): Promise<void> {
method regenerateFromToken (line 1614) | async regenerateFromToken(
method regenerateChatCompletion (line 1834) | private async regenerateChatCompletion(): Promise<void> {
method setSelectedModel (line 2041) | setSelectedModel(modelId: string) {
method stripThinkingTags (line 2052) | private stripThinkingTags(content: string): {
method parseSSEStream (line 2092) | private async parseSSEStream<T>(
method handleStreamingError (line 2171) | private handleStreamingError(
method getModelForRequest (line 2197) | private getModelForRequest(modelId?: string): string | null {
method sendMessage (line 2221) | async sendMessage(
method stopGeneration (line 2575) | stopGeneration(): void {
method generateImage (line 2583) | async generateImage(prompt: string, modelId?: string): Promise<void> {
method editImage (line 2839) | async editImage(
method clearChat (line 3071) | clearChat() {
method getActiveConversation (line 3085) | getActiveConversation(): Conversation | null {
method setConversationThinking (line 3095) | setConversationThinking(enabled: boolean) {
method startDownload (line 3107) | async startDownload(nodeId: string, shardMetadata: object): Promise<vo...
method deleteDownload (line 3132) | async deleteDownload(nodeId: string, modelId: string): Promise<void> {
method listTraces (line 3155) | async listTraces(): Promise<TraceListResponse> {
method checkTraceExists (line 3166) | async checkTraceExists(taskId: string): Promise<boolean> {
method fetchTraceStats (line 3178) | async fetchTraceStats(taskId: string): Promise<TraceStatsResponse> {
method deleteTraces (line 3191) | async deleteTraces(
method getTraceRawUrl (line 3208) | getTraceRawUrl(taskId: string): string {
FILE: dashboard/src/lib/stores/favorites.svelte.ts
constant FAVORITES_KEY (line 7) | const FAVORITES_KEY = "exo-favorite-models";
class FavoritesStore (line 9) | class FavoritesStore {
method constructor (line 12) | constructor() {
method loadFromStorage (line 18) | private loadFromStorage() {
method saveToStorage (line 30) | private saveToStorage() {
method add (line 39) | add(baseModelId: string) {
method remove (line 46) | remove(baseModelId: string) {
method toggle (line 53) | toggle(baseModelId: string) {
method isFavorite (line 61) | isFavorite(baseModelId: string): boolean {
method getAll (line 65) | getAll(): string[] {
method getSet (line 69) | getSet(): Set<string> {
method hasAny (line 73) | hasAny(): boolean {
method clearAll (line 77) | clearAll() {
FILE: dashboard/src/lib/stores/recents.svelte.ts
constant RECENTS_KEY (line 7) | const RECENTS_KEY = "exo-recent-models";
constant MAX_RECENT_MODELS (line 8) | const MAX_RECENT_MODELS = 20;
type RecentEntry (line 10) | interface RecentEntry {
class RecentsStore (line 15) | class RecentsStore {
method constructor (line 18) | constructor() {
method loadFromStorage (line 24) | private loadFromStorage() {
method saveToStorage (line 36) | private saveToStorage() {
method recordLaunch (line 44) | recordLaunch(modelId: string) {
method getRecentModelIds (line 54) | getRecentModelIds(): string[] {
method hasAny (line 58) | hasAny(): boolean {
method clearAll (line 62) | clearAll() {
FILE: dashboard/src/lib/stores/toast.svelte.ts
type ToastType (line 10) | type ToastType = "success" | "error" | "warning" | "info";
type Toast (line 12) | interface Toast {
type ToastInput (line 21) | interface ToastInput {
constant DEFAULT_DURATIONS (line 30) | const DEFAULT_DURATIONS: Record<ToastType, number> = {
function generateId (line 40) | function generateId(): string {
function addToast (line 44) | function addToast(input: ToastInput): string {
function dismissToast (line 68) | function dismissToast(id: string): void {
function dismissByMessage (line 78) | function dismissByMessage(message: string): void {
function toasts (line 85) | function toasts(): Toast[] {
FILE: dashboard/src/lib/types/files.ts
type ChatUploadedFile (line 5) | interface ChatUploadedFile {
type ChatAttachment (line 15) | interface ChatAttachment {
type FileCategory (line 23) | type FileCategory = "image" | "text" | "pdf" | "audio" | "unknown";
constant IMAGE_EXTENSIONS (line 25) | const IMAGE_EXTENSIONS = [
constant IMAGE_MIME_TYPES (line 33) | const IMAGE_MIME_TYPES = [
constant TEXT_EXTENSIONS (line 41) | const TEXT_EXTENSIONS = [
constant TEXT_MIME_TYPES (line 77) | const TEXT_MIME_TYPES = [
constant PDF_EXTENSIONS (line 91) | const PDF_EXTENSIONS = [".pdf"];
constant PDF_MIME_TYPES (line 92) | const PDF_MIME_TYPES = ["application/pdf"];
constant AUDIO_EXTENSIONS (line 94) | const AUDIO_EXTENSIONS = [".mp3", ".wav", ".ogg", ".m4a"];
constant AUDIO_MIME_TYPES (line 95) | const AUDIO_MIME_TYPES = [
function getFileCategory (line 105) | function getFileCategory(
function getAcceptString (line 139) | function getAcceptString(categories: FileCategory[]): string {
function formatFileSize (line 165) | function formatFileSize(bytes: number): string {
function readFileAsDataURL (line 176) | function readFileAsDataURL(file: File): Promise<string> {
function readFileAsText (line 188) | function readFileAsText(file: File): Promise<string> {
function processUploadedFiles (line 200) | async function processUploadedFiles(
FILE: dashboard/src/lib/utils/downloads.ts
function unwrapTagged (line 12) | function unwrapTagged(
function extractModelIdFromDownload (line 24) | function extractModelIdFromDownload(
function extractShardMetadata (line 43) | function extractShardMetadata(
function getDownloadTag (line 53) | function getDownloadTag(
function isModelDownloadedOnNode (line 77) | function isModelDownloadedOnNode(
function getNodesWithModelDownloaded (line 92) | function getNodesWithModelDownloaded(
function getShardMetadataForModel (line 109) | function getShardMetadataForModel(
function getModelDownloadStatus (line 136) | function getModelDownloadStatus(
FILE: packaging/dmg/generate-background.py
function draw_arrow (line 39) | def draw_arrow(draw: ImageDraw.ImageDraw) -> None:
function generate_background (line 79) | def generate_background(output_path: str) -> None:
FILE: rust/exo_pyo3_bindings/exo_pyo3_bindings.pyi
class AllQueuesFullError (line 8) | class AllQueuesFullError(builtins.Exception):
method __new__ (line 9) | def __new__(cls, *args: typing.Any) -> AllQueuesFullError: ...
method __repr__ (line 10) | def __repr__(self) -> builtins.str: ...
method __str__ (line 11) | def __str__(self) -> builtins.str: ...
class Keypair (line 14) | class Keypair:
method generate (line 19) | def generate() -> Keypair:
method from_bytes (line 24) | def from_bytes(bytes: bytes) -> Keypair:
method to_bytes (line 28) | def to_bytes(self) -> bytes:
method to_node_id (line 32) | def to_node_id(self) -> builtins.str:
class MessageTooLargeError (line 38) | class MessageTooLargeError(builtins.Exception):
method __new__ (line 39) | def __new__(cls, *args: typing.Any) -> MessageTooLargeError: ...
method __repr__ (line 40) | def __repr__(self) -> builtins.str: ...
method __str__ (line 41) | def __str__(self) -> builtins.str: ...
class NetworkingHandle (line 44) | class NetworkingHandle:
method __new__ (line 45) | def __new__(cls, identity: Keypair) -> NetworkingHandle: ...
method gossipsub_subscribe (line 46) | async def gossipsub_subscribe(self, topic: builtins.str) -> builtins.b...
method gossipsub_unsubscribe (line 52) | async def gossipsub_unsubscribe(self, topic: builtins.str) -> builtins...
method gossipsub_publish (line 58) | async def gossipsub_publish(self, topic: builtins.str, data: bytes) ->...
method recv (line 64) | async def recv(self) -> PyFromSwarm: ...
class NoPeersSubscribedToTopicError (line 67) | class NoPeersSubscribedToTopicError(builtins.Exception):
method __new__ (line 68) | def __new__(cls, *args: typing.Any) -> NoPeersSubscribedToTopicError: ...
method __repr__ (line 69) | def __repr__(self) -> builtins.str: ...
method __str__ (line 70) | def __str__(self) -> builtins.str: ...
class PyFromSwarm (line 72) | class PyFromSwarm:
class Connection (line 74) | class Connection(PyFromSwarm):
method peer_id (line 77) | def peer_id(self) -> builtins.str: ...
method connected (line 79) | def connected(self) -> builtins.bool: ...
method __new__ (line 80) | def __new__(cls, peer_id: builtins.str, connected: builtins.bool) ->...
class Message (line 83) | class Message(PyFromSwarm):
method origin (line 86) | def origin(self) -> builtins.str: ...
method topic (line 88) | def topic(self) -> builtins.str: ...
method data (line 90) | def data(self) -> bytes: ...
method __new__ (line 91) | def __new__(cls, origin: builtins.str, topic: builtins.str, data: by...
FILE: rust/exo_pyo3_bindings/src/allow_threading.rs
type AllowThreads (line 15) | pub(crate) struct AllowThreads<F>(#[pin] F);
function new (line 21) | pub fn new(f: F) -> Self {
type Output (line 31) | type Output = F::Output;
method poll (line 33) | fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
FILE: rust/exo_pyo3_bindings/src/bin/stub_gen.rs
function main (line 3) | fn main() -> Result<()> {
FILE: rust/exo_pyo3_bindings/src/ident.rs
type PyKeypair (line 11) | pub struct PyKeypair(pub Keypair);
method generate (line 19) | fn generate() -> Self {
method from_bytes (line 25) | fn from_bytes(bytes: Bound<'_, PyBytes>) -> PyResult<Self> {
method to_bytes (line 31) | fn to_bytes<'py>(&self, py: Python<'py>) -> PyResult<Bound<'py, PyByte...
method to_node_id (line 44) | fn to_node_id(&self) -> String {
FILE: rust/exo_pyo3_bindings/src/lib.rs
constant MPSC_CHANNEL_SIZE (line 20) | pub const MPSC_CHANNEL_SIZE: usize = 1024;
function pybytes (line 37) | fn pybytes(&self) -> Py<PyBytes> {
function pyerr (line 47) | fn pyerr(self) -> PyResult<T> {
type FutureExt (line 52) | pub trait FutureExt: Future + Sized {
method allow_threads_py (line 54) | fn allow_threads_py(self) -> AllowThreads<Self>
method receiver_channel_closed (line 66) | fn receiver_channel_closed() -> Self {
function write_unraisable (line 73) | fn write_unraisable(self) -> Option<T> {
function write_unraisable_with (line 77) | fn write_unraisable_with(self, py: Python<'_>) -> Option<T> {
method spawn_with_scope (line 91) | fn spawn_with_scope<F>(&self, py: Python<'_>, future: F) -> PyResult<Joi...
function send_py (line 108) | async fn send_py(&self, value: T) -> PyResult<()> {
function recv_py (line 118) | async fn recv_py(&mut self) -> PyResult<T> {
function recv_many_py (line 127) | async fn recv_many_py(&mut self, limit: usize) -> PyResult<Vec<T>> {
function try_recv_py (line 141) | fn try_recv_py(&mut self) -> PyResult<Option<T>> {
function main_module (line 155) | fn main_module(m: &Bound<'_, PyModule>) -> PyResult<()> {
FILE: rust/exo_pyo3_bindings/src/networking.rs
type PyNoPeersSubscribedToTopicError (line 31) | pub struct PyNoPeersSubscribedToTopicError {}
constant MSG (line 34) | const MSG: &'static str = "\
method new_err (line 41) | pub(crate) fn new_err() -> PyErr {
method new (line 52) | pub(crate) fn new(args: &Bound<'_, PyTuple>) -> Self {
method __repr__ (line 56) | fn __repr__(&self) -> String {
method __str__ (line 60) | fn __str__(&self) -> String {
type PyAllQueuesFullError (line 67) | pub struct PyAllQueuesFullError {}
constant MSG (line 70) | const MSG: &'static str =
method new_err (line 76) | pub(crate) fn new_err() -> PyErr {
method new (line 87) | pub(crate) fn new(args: &Bound<'_, PyTuple>) -> Self {
method __repr__ (line 91) | fn __repr__(&self) -> String {
method __str__ (line 95) | fn __str__(&self) -> String {
type PyMessageTooLargeError (line 102) | pub struct PyMessageTooLargeError {}
constant MSG (line 105) | const MSG: &'static str = "Gossipsub message exceeds max_transmit_size...
method new_err (line 107) | pub(crate) fn new_err() -> PyErr {
method new (line 118) | pub(crate) fn new(args: &Bound<'_, PyTuple>) -> Self {
method __repr__ (line 122) | fn __repr__(&self) -> String {
method __str__ (line 126) | fn __str__(&self) -> String {
type PyNetworkingHandle (line 134) | struct PyNetworkingHandle {
method py_new (line 183) | fn py_new(identity: Bound<'_, PyKeypair>) -> PyResult<Self> {
method recv (line 201) | fn recv<'py>(&'py self, py: Python<'py>) -> PyResult<Bound<'py, PyAny>> {
method gossipsub_subscribe (line 219) | async fn gossipsub_subscribe(&self, topic: String) -> PyResult<bool> {
method gossipsub_unsubscribe (line 241) | async fn gossipsub_unsubscribe(&self, topic: String) -> PyResult<bool> {
method gossipsub_publish (line 262) | async fn gossipsub_publish(&self, topic: String, data: Py<PyBytes>) ->...
type PyFromSwarm (line 142) | enum PyFromSwarm {
method from (line 154) | fn from(value: FromSwarm) -> Self {
function networking_submodule (line 302) | pub fn networking_submodule(m: &Bound<'_, PyModule>) -> PyResult<()> {
FILE: rust/exo_pyo3_bindings/tests/dummy.rs
function test_drop_channel (line 10) | async fn test_drop_channel() {
FILE: rust/exo_pyo3_bindings/tests/test_python.py
function test_sleep_on_multiple_items (line 13) | async def test_sleep_on_multiple_items() -> None:
function _await_recv (line 29) | async def _await_recv(h: NetworkingHandle):
FILE: rust/networking/examples/chatroom.rs
function main (line 11) | async fn main() {
FILE: rust/networking/src/discovery.rs
constant RETRY_CONNECT_INTERVAL (line 24) | const RETRY_CONNECT_INTERVAL: Duration = Duration::from_secs(5);
constant MDNS_RECORD_TTL (line 32) | const MDNS_RECORD_TTL: Duration = Duration::from_secs(2_500);
constant MDNS_QUERY_INTERVAL (line 33) | const MDNS_QUERY_INTERVAL: Duration = Duration::from_secs(1_500);
constant PING_TIMEOUT (line 34) | const PING_TIMEOUT: Duration = Duration::from_millis(2_500);
constant PING_INTERVAL (line 35) | const PING_INTERVAL: Duration = Duration::from_millis(2_500);
type Behaviour (line 38) | pub struct Behaviour {
method new (line 44) | pub fn new(keypair: &identity::Keypair) -> io::Result<Self> {
method new (line 115) | pub fn new(keypair: &identity::Keypair) -> io::Result<Self> {
method dial (line 124) | fn dial(&mut self, peer_id: PeerId, addr: Multiaddr) {
method close_connection (line 130) | fn close_connection(&mut self, peer_id: PeerId, connection: Connection...
method handle_mdns_discovered (line 138) | fn handle_mdns_discovered(&mut self, peers: Vec<(PeerId, Multiaddr)>) {
method handle_mdns_expired (line 154) | fn handle_mdns_expired(&mut self, peers: Vec<(PeerId, Multiaddr)>) {
method on_connection_established (line 173) | fn on_connection_established(
method on_connection_closed (line 190) | fn on_connection_closed(
function mdns_behaviour (line 52) | fn mdns_behaviour(keypair: &identity::Keypair) -> io::Result<mdns::tokio...
function ping_behaviour (line 68) | fn ping_behaviour() -> ping::Behaviour {
type Event (line 79) | pub enum Event {
type Behaviour (line 103) | pub struct Behaviour {
method new (line 44) | pub fn new(keypair: &identity::Keypair) -> io::Result<Self> {
method new (line 115) | pub fn new(keypair: &identity::Keypair) -> io::Result<Self> {
method dial (line 124) | fn dial(&mut self, peer_id: PeerId, addr: Multiaddr) {
method close_connection (line 130) | fn close_connection(&mut self, peer_id: PeerId, connection: Connection...
method handle_mdns_discovered (line 138) | fn handle_mdns_discovered(&mut self, peers: Vec<(PeerId, Multiaddr)>) {
method handle_mdns_expired (line 154) | fn handle_mdns_expired(&mut self, peers: Vec<(PeerId, Multiaddr)>) {
method on_connection_established (line 173) | fn on_connection_established(
method on_connection_closed (line 190) | fn on_connection_closed(
type ConnectionHandler (line 209) | type ConnectionHandler =
type ToSwarm (line 211) | type ToSwarm = Event;
method handle_established_inbound_connection (line 222) | fn handle_established_inbound_connection(
method handle_established_outbound_connection (line 241) | fn handle_established_outbound_connection(
method on_connection_handler_event (line 261) | fn on_connection_handler_event(
method on_swarm_event (line 278) | fn on_swarm_event(&mut self, event: FromSwarm) {
method poll (line 325) | fn poll(&mut self, cx: &mut Context) -> Poll<ToSwarm<Self::ToSwarm, THan...
FILE: rust/networking/src/lib.rs
type AnyError (line 13) | pub type AnyError = Box<dyn Error + Send + Sync + 'static>;
type AnyResult (line 14) | pub type AnyResult<T> = Result<T, AnyError>;
method try_to_tcp_addr (line 27) | fn try_to_tcp_addr(&self) -> Option<(IpAddr, u16)> {
FILE: rust/networking/src/swarm.rs
constant NETWORK_VERSION (line 17) | pub const NETWORK_VERSION: &[u8] = b"v0.0.1";
constant OVERRIDE_VERSION_ENV_VAR (line 18) | pub const OVERRIDE_VERSION_ENV_VAR: &str = "EXO_LIBP2P_NAMESPACE";
type ToSwarm (line 22) | pub enum ToSwarm {
type FromSwarm (line 37) | pub enum FromSwarm {
type Swarm (line 51) | pub struct Swarm {
method into_stream (line 57) | pub fn into_stream(self) -> Pin<Box<dyn Stream<Item = FromSwarm> + Sen...
function on_message (line 82) | fn on_message(swarm: &mut libp2p::Swarm<Behaviour>, message: ToSwarm) {
function filter_swarm_event (line 118) | fn filter_swarm_event(event: SwarmEvent<BehaviourEvent>) -> Option<FromS...
function create_swarm (line 146) | pub fn create_swarm(
function pnet_upgrade (line 189) | async fn pnet_upgrade<TSocket>(
function tcp_transport (line 203) | pub fn tcp_transport(
type Behaviour (line 243) | pub struct Behaviour {
method new (line 249) | pub fn new(keypair: &identity::Keypair) -> alias::AnyResult<Self> {
function gossipsub_behaviour (line 257) | fn gossipsub_behaviour(keypair: &identity::Keypair) -> gossipsub::Behavi...
FILE: rust/networking/tests/dummy.rs
function does_nothing (line 6) | fn does_nothing() {}
FILE: rust/util/src/wakerdeque.rs
type WakerDeque (line 7) | pub struct WakerDeque<T> {
method fmt (line 13) | fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
function new (line 19) | pub fn new() -> Self {
function update (line 26) | fn update(&mut self, cx: &mut Context<'_>) {
function wake (line 30) | fn wake(&mut self) {
function pop_front (line 36) | pub fn pop_front(&mut self, cx: &mut Context<'_>) -> Option<T> {
function pop_back (line 41) | pub fn pop_back(&mut self, cx: &mut Context<'_>) -> Option<T> {
function push_front (line 46) | pub fn push_front(&mut self, value: T) {
function push_back (line 51) | pub fn push_back(&mut self, value: T) {
FILE: scripts/fetch_kv_heads.py
function fetch_kv_heads (line 29) | def fetch_kv_heads(model_id: str) -> int | None:
function update_toml (line 46) | def update_toml(path: Path, kv_heads: int) -> bool:
function process_card (line 69) | def process_card(path: Path) -> tuple[str, str]:
function main (line 85) | def main():
FILE: src/exo/__main__.py
function _maybe_run_inline_code (line 13) | def _maybe_run_inline_code(argv: Sequence[str]) -> bool:
FILE: src/exo/api/adapters/chat_completions.py
function chat_request_to_text_generation (line 36) | def chat_request_to_text_generation(
function chunk_to_response (line 113) | def chunk_to_response(
function generate_chat_stream (line 150) | async def generate_chat_stream(
function collect_chat_response (line 222) | async def collect_chat_response(
FILE: src/exo/api/adapters/claude.py
function finish_reason_to_claude_stop_reason (line 42) | def finish_reason_to_claude_stop_reason(
function _extract_tool_result_text (line 58) | def _extract_tool_result_text(block: ClaudeToolResultBlock) -> str:
function _strip_volatile_headers (line 72) | def _strip_volatile_headers(text: str) -> str:
function claude_request_to_text_generation (line 83) | def claude_request_to_text_generation(
function collect_claude_response (line 203) | async def collect_claude_response(
function generate_claude_stream (line 285) | async def generate_claude_stream(
FILE: src/exo/api/adapters/ollama.py
function _map_done_reason (line 27) | def _map_done_reason(
function _try_parse_json (line 43) | def _try_parse_json(value: str) -> dict[str, Any] | str:
function _build_tool_calls (line 50) | def _build_tool_calls(chunk: ToolCallChunk) -> list[OllamaToolCall]:
function _get_usage (line 67) | def _get_usage(
function ollama_request_to_text_generation (line 78) | def ollama_request_to_text_generation(
function generate_ollama_chat_stream (line 158) | async def generate_ollama_chat_stream(
function collect_ollama_chat_response (line 241) | async def collect_ollama_chat_response(
function ollama_generate_request_to_text_generation (line 309) | def ollama_generate_request_to_text_generation(
function generate_ollama_generate_stream (line 337) | async def generate_ollama_generate_stream(
function collect_ollama_generate_response (line 410) | async def collect_ollama_generate_response(
FILE: src/exo/api/adapters/responses.py
function _format_sse (line 52) | def _format_sse(event: ResponsesStreamEvent) -> str:
function _extract_content (line 57) | def _extract_content(content: str | list[ResponseContentPart]) -> str:
function responses_request_to_text_generation (line 64) | def responses_request_to_text_generation(
function collect_responses_response (line 171) | async def collect_responses_response(
function generate_responses_stream (line 258) | async def generate_responses_stream(
FILE: src/exo/api/main.py
function _format_to_content_type (line 184) | def _format_to_content_type(image_format: Literal["png", "jpeg", "webp"]...
function _ensure_seed (line 188) | def _ensure_seed(params: AdvancedImageParams | None) -> AdvancedImagePar...
class API (line 197) | class API:
method __init__ (line 198) | def __init__(
method reset (line 256) | def reset(self, result_clock: int, event_receiver: Receiver[IndexedEve...
method unpause (line 269) | def unpause(self, result_clock: int):
method _setup_exception_handlers (line 276) | def _setup_exception_handlers(self) -> None:
method http_exception_handler (line 279) | async def http_exception_handler(
method _setup_cors (line 291) | def _setup_cors(self) -> None:
method _setup_routes (line 300) | def _setup_routes(self) -> None:
method place_instance (line 355) | async def place_instance(self, payload: PlaceInstanceParams):
method create_instance (line 370) | async def create_instance(
method get_placement (line 395) | async def get_placement(
method get_placement_previews (line 432) | async def get_placement_previews(
method get_instance (line 554) | def get_instance(self, instance_id: InstanceId) -> Instance:
method delete_instance (line 559) | async def delete_instance(self, instance_id: InstanceId) -> DeleteInst...
method cancel_command (line 573) | async def cancel_command(self, command_id: CommandId) -> CancelCommand...
method _token_chunk_stream (line 592) | async def _token_chunk_stream(
method _collect_text_generation_with_stats (line 626) | async def _collect_text_generation_with_stats(
method _trigger_notify_user_to_download_model (line 695) | async def _trigger_notify_user_to_download_model(self, model_id: Model...
method chat_completions (line 700) | async def chat_completions(
method bench_chat_completions (line 735) | async def bench_chat_completions(
method _resolve_and_validate_text_model (line 751) | async def _resolve_and_validate_text_model(self, model_id: ModelId) ->...
method _validate_image_model (line 767) | async def _validate_image_model(self, model: ModelId) -> ModelId:
method stream_events (line 784) | def stream_events(self) -> StreamingResponse:
method get_image (line 800) | async def get_image(self, image_id: str) -> FileResponse:
method list_images (line 806) | async def list_images(self, request: Request) -> ImageListResponse:
method _build_image_url (line 821) | def _build_image_url(self, request: Request, image_id: Id) -> str:
method image_generations (line 826) | async def image_generations(
method _generate_image_stream (line 866) | async def _generate_image_stream(
method _collect_image_chunks (line 980) | async def _collect_image_chunks(
method _collect_image_generation (line 1066) | async def _collect_image_generation(
method _collect_image_generation_with_stats (line 1079) | async def _collect_image_generation_with_stats(
method bench_image_generations (line 1099) | async def bench_image_generations(
method _send_image_edits_command (line 1123) | async def _send_image_edits_command(
method image_edits (line 1192) | async def image_edits(
method bench_image_edits (line 1254) | async def bench_image_edits(
method claude_messages (line 1299) | async def claude_messages(
method openai_responses (line 1336) | async def openai_responses(
method _ollama_root (line 1372) | async def _ollama_root(self) -> JSONResponse:
method ollama_chat (line 1376) | async def ollama_chat(
method ollama_generate (line 1413) | async def ollama_generate(
method ollama_tags (line 1450) | async def ollama_tags(self) -> OllamaTagsResponse:
method ollama_show (line 1484) | async def ollama_show(self, request: Request) -> OllamaShowResponse:
method ollama_ps (line 1507) | async def ollama_ps(self) -> OllamaPsResponse:
method ollama_version (line 1525) | async def ollama_version(self) -> dict[str, str]:
method _calculate_total_available_memory (line 1529) | def _calculate_total_available_memory(self) -> Memory:
method get_models (line 1538) | async def get_models(self, status: str | None = Query(default=None)) -...
method add_custom_model (line 1571) | async def add_custom_model(self, payload: AddCustomModelParams) -> Mod...
method delete_custom_model (line 1592) | async def delete_custom_model(self, model_id: ModelId) -> JSONResponse:
method search_models (line 1601) | async def search_models(
method run (line 1641) | async def run(self):
method run_api (line 1662) | async def run_api(self, ev: anyio.Event):
method _apply_state (line 1676) | async def _apply_state(self):
method _save_merged_trace (line 1703) | def _save_merged_trace(self, event: TracesMerged) -> None:
method _pause_on_new_election (line 1718) | async def _pause_on_new_election(self):
method _cleanup_expired_images (line 1724) | async def _cleanup_expired_images(self):
method _send (line 1733) | async def _send(self, command: Command):
method _send_download (line 1740) | async def _send_download(self, command: DownloadCommand):
method start_download (line 1745) | async def start_download(
method delete_download (line 1755) | async def delete_download(
method _get_trace_path (line 1766) | def _get_trace_path(task_id: str) -> Path:
method list_traces (line 1772) | async def list_traces(self) -> TraceListResponse:
method get_trace (line 1796) | async def get_trace(self, task_id: str) -> TraceResponse:
method get_trace_stats (line 1818) | async def get_trace_stats(self, task_id: str) -> TraceStatsResponse:
method get_trace_raw (line 1857) | async def get_trace_raw(self, task_id: str) -> FileResponse:
method delete_traces (line 1869) | async def delete_traces(self, request: DeleteTracesRequest) -> DeleteT...
method get_onboarding (line 1881) | async def get_onboarding(self) -> JSONResponse:
method complete_onboarding (line 1884) | async def complete_onboarding(self) -> JSONResponse:
FILE: src/exo/api/tests/test_api_error_handling.py
function test_http_exception_handler_formats_openai_style (line 10) | def test_http_exception_handler_formats_openai_style() -> None:
FILE: src/exo/api/tests/test_cancel_command.py
function _make_api (line 12) | def _make_api() -> Any:
function test_cancel_nonexistent_command_returns_404 (line 26) | def test_cancel_nonexistent_command_returns_404() -> None:
function test_cancel_active_text_generation (line 40) | def test_cancel_active_text_generation() -> None:
function test_cancel_active_image_generation (line 60) | def test_cancel_active_image_generation() -> None:
FILE: src/exo/api/tests/test_claude_api.py
class TestFinishReasonToClaudeStopReason (line 18) | class TestFinishReasonToClaudeStopReason:
method test_stop_maps_to_end_turn (line 21) | def test_stop_maps_to_end_turn(self):
method test_length_maps_to_max_tokens (line 24) | def test_length_maps_to_max_tokens(self):
method test_tool_calls_maps_to_tool_use (line 27) | def test_tool_calls_maps_to_tool_use(self):
method test_function_call_maps_to_tool_use (line 30) | def test_function_call_maps_to_tool_use(self):
method test_content_filter_maps_to_end_turn (line 33) | def test_content_filter_maps_to_end_turn(self):
method test_none_returns_none (line 36) | def test_none_returns_none(self):
class TestClaudeRequestToInternal (line 40) | class TestClaudeRequestToInternal:
method test_basic_request_conversion (line 43) | def test_basic_request_conversion(self):
method test_request_with_system_string (line 61) | def test_request_with_system_string(self):
method test_request_with_system_text_blocks (line 78) | def test_request_with_system_text_blocks(self):
method test_request_with_content_blocks (line 96) | def test_request_with_content_blocks(self):
method test_request_with_multi_turn_conversation (line 116) | def test_request_with_multi_turn_conversation(self):
method test_request_with_optional_parameters (line 134) | def test_request_with_optional_parameters(self):
class TestClaudeMessagesRequestValidation (line 154) | class TestClaudeMessagesRequestValidation:
method test_request_requires_model (line 157) | def test_request_requires_model(self):
method test_request_requires_max_tokens (line 166) | def test_request_requires_max_tokens(self):
method test_request_requires_messages (line 175) | def test_request_requires_messages(self):
FILE: src/exo/api/tests/test_claude_tool_use.py
function _chunks_to_stream (line 17) | async def _chunks_to_stream(
function _collect_response (line 24) | async def _collect_response(
function _parse_sse_events (line 40) | def _parse_sse_events(events: list[str]) -> list[dict[str, Any]]:
class TestCollectClaudeResponseToolUse (line 50) | class TestCollectClaudeResponseToolUse:
method test_tool_call_chunk_produces_tool_use_blocks (line 53) | async def test_tool_call_chunk_produces_tool_use_blocks(self):
method test_multiple_tool_calls (line 79) | async def test_multiple_tool_calls(self):
method test_mixed_text_and_tool_use (line 106) | async def test_mixed_text_and_tool_use(self):
method test_no_content_produces_empty_text_block (line 133) | async def test_no_content_produces_empty_text_block(self):
class TestGenerateClaudeStreamToolUse (line 142) | class TestGenerateClaudeStreamToolUse:
method test_tool_call_emits_tool_use_events (line 145) | async def test_tool_call_emits_tool_use_events(self):
method test_streaming_mixed_text_and_tool_use (line 197) | async def test_streaming_mixed_text_and_tool_use(self):
method test_streaming_tool_block_stop_events (line 245) | async def test_streaming_tool_block_stop_events(self):
FILE: src/exo/api/tests/test_openai_responses_api.py
class TestResponsesRequestValidation (line 17) | class TestResponsesRequestValidation:
method test_request_requires_model (line 20) | def test_request_requires_model(self):
method test_request_requires_input (line 28) | def test_request_requires_input(self):
method test_request_accepts_string_input (line 36) | def test_request_accepts_string_i
Condensed preview — 724 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (3,469K chars).
[
{
"path": ".clauderules",
"chars": 2998,
"preview": "# Claude Code Rules - Follow Every Rule Exactly\n\nYou must prioritize straightforward code semantics, well-named types, c"
},
{
"path": ".cursorrules",
"chars": 3110,
"preview": "# follow **every** rule exactly; report any violation instead of silently fixing it.\n\nYou must prioritize straightforwar"
},
{
"path": ".envrc",
"chars": 10,
"preview": "use flake\n"
},
{
"path": ".githooks/post-checkout",
"chars": 360,
"preview": "#!/bin/sh\ncommand -v git-lfs >/dev/null 2>&1 || { printf >&2 \"\\n%s\\n\\n\" \"This repository is configured for Git LFS but '"
},
{
"path": ".githooks/post-commit",
"chars": 356,
"preview": "#!/bin/sh\ncommand -v git-lfs >/dev/null 2>&1 || { printf >&2 \"\\n%s\\n\\n\" \"This repository is configured for Git LFS but '"
},
{
"path": ".githooks/post-merge",
"chars": 354,
"preview": "#!/bin/sh\ncommand -v git-lfs >/dev/null 2>&1 || { printf >&2 \"\\n%s\\n\\n\" \"This repository is configured for Git LFS but '"
},
{
"path": ".githooks/pre-push",
"chars": 350,
"preview": "#!/bin/sh\ncommand -v git-lfs >/dev/null 2>&1 || { printf >&2 \"\\n%s\\n\\n\" \"This repository is configured for Git LFS but '"
},
{
"path": ".github/CODEOWNERS",
"chars": 47,
"preview": "* @ToxicPine\n* @AlexCheema\n* @GeluVrabie\n"
},
{
"path": ".github/ISSUE_TEMPLATE/bug_report.md",
"chars": 813,
"preview": "---\nname: Bug Report\nabout: Create a report to help us improve\ntitle: '[BUG] '\nlabels: bug\nassignees: ''\n---\n\n## Describ"
},
{
"path": ".github/ISSUE_TEMPLATE/feature_request.md",
"chars": 215,
"preview": "---\nname: Feature Request\nabout: Suggest an idea for this project\ntitle: ''\nlabels: enhancement\nassignees: ''\n---\n\n<!-- "
},
{
"path": ".github/actions/conditional-commit/action.yml",
"chars": 340,
"preview": "name: Commit if changed\ndescription: \"Create a commit when the working tree is dirty\"\n\ninputs:\n message:\n descriptio"
},
{
"path": ".github/actions/format/action.yml",
"chars": 246,
"preview": "name: Format Code\n\ndescription: \"Run code formatter\"\n\nruns:\n using: \"composite\"\n steps:\n - name: Format code\n "
},
{
"path": ".github/actions/lint/action.yml",
"chars": 240,
"preview": "name: Lint Code\n\ndescription: \"Run code linter\"\n\nruns:\n using: \"composite\"\n steps:\n - name: Lint code\n run: ni"
},
{
"path": ".github/actions/lint-check/action.yml",
"chars": 254,
"preview": "name: Lint Check\n\ndescription: \"Check for lint errors\"\n\nruns:\n using: \"composite\"\n steps:\n - name: Lint check\n "
},
{
"path": ".github/actions/regenerate-protobufs/action.yml",
"chars": 288,
"preview": "name: Regenerate Protobufs\n\ndescription: \"Regenerate protobuf files\"\n\nruns:\n using: \"composite\"\n steps:\n - name: Re"
},
{
"path": ".github/actions/setup-python-uv/action.yml",
"chars": 413,
"preview": "name: Setup Python & uv\n\ndescription: \"Regenerate Python environment from uv.lock\"\n\nruns:\n using: \"composite\"\n steps:\n"
},
{
"path": ".github/actions/unit-test/action.yml",
"chars": 377,
"preview": "name: Unit Test\n\ndescription: \"Run unit tests\"\n\nruns:\n using: \"composite\"\n steps:\n - name: Run unit tests\n run"
},
{
"path": ".github/actions/verify-clean/action.yml",
"chars": 468,
"preview": "name: Verify Clean Working Tree\n\ndescription: \"Fail the job if the previous step left the working tree dirty\"\n\ninputs:\n "
},
{
"path": ".github/pull_request_template.md",
"chars": 561,
"preview": "## Motivation\n\n<!-- Why is this change needed? What problem does it solve? -->\n<!-- If it fixes an open issue, please li"
},
{
"path": ".github/workflows/build-app.yml",
"chars": 19255,
"preview": "name: Build EXO macOS DMG\n\n# Release workflow:\n# 1. Create a draft GitHub Release with the tag name (e.g. v1.0.0) and wr"
},
{
"path": ".github/workflows/pipeline.yml",
"chars": 4114,
"preview": "name: ci-pipeline\n\non:\n push:\n pull_request:\n branches:\n - staging\n - main\n\njobs:\n nix:\n name: Build "
},
{
"path": ".gitignore",
"chars": 381,
"preview": "# gitingest\ndigest.txt\n\n# python\n**/__pycache__\n\n# nix\n.direnv/\n\n# IDEA (PyCharm)\n.idea\n\n# xcode / macos\n*.xcuserstate\n*"
},
{
"path": ".mlx_typings/.gitkeep",
"chars": 0,
"preview": ""
},
{
"path": ".mlx_typings/mflux/__init__.pyi",
"chars": 117,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport os\n\nif \"TOKENIZERS_PARALLELISM\" not in os.environ: ...\n"
},
{
"path": ".mlx_typings/mflux/callbacks/__init__.pyi",
"chars": 54,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n"
},
{
"path": ".mlx_typings/mflux/callbacks/callback.pyi",
"chars": 1075,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nimport PIL.Image\nimport tqdm\nfrom typing im"
},
{
"path": ".mlx_typings/mflux/callbacks/callback_registry.pyi",
"chars": 816,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nfrom typing import TYPE_CHECKING\nfrom mflux.callbacks.callback im"
},
{
"path": ".mlx_typings/mflux/callbacks/generation_context.pyi",
"chars": 864,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nimport PIL.Image\nimport tqdm\nfrom typing im"
},
{
"path": ".mlx_typings/mflux/cli/__init__.pyi",
"chars": 54,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n"
},
{
"path": ".mlx_typings/mflux/cli/defaults/defaults.pyi",
"chars": 482,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport os\n\nBATTERY_PERCENTAGE_STOP_LIMIT = ...\nCONTROLNET_STRENGT"
},
{
"path": ".mlx_typings/mflux/models/__init__.pyi",
"chars": 54,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n"
},
{
"path": ".mlx_typings/mflux/models/common/__init__.pyi",
"chars": 54,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n"
},
{
"path": ".mlx_typings/mflux/models/common/cli/__init__.pyi",
"chars": 54,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n"
},
{
"path": ".mlx_typings/mflux/models/common/config/__init__.pyi",
"chars": 209,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nfrom mflux.models.common.config.config import Config\nfrom mflux.m"
},
{
"path": ".mlx_typings/mflux/models/common/config/config.pyi",
"chars": 1904,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom pathlib import Path\nfrom typing import"
},
{
"path": ".mlx_typings/mflux/models/common/config/model_config.pyi",
"chars": 2268,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom functools import lru_cache\nfrom typing"
},
{
"path": ".mlx_typings/mflux/models/common/latent_creator/__init__.pyi",
"chars": 109,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\n\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n"
},
{
"path": ".mlx_typings/mflux/models/common/latent_creator/latent_creator.pyi",
"chars": 1514,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom pathlib import Path\nfrom typing import"
},
{
"path": ".mlx_typings/mflux/models/common/lora/__init__.pyi",
"chars": 54,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n"
},
{
"path": ".mlx_typings/mflux/models/common/lora/layer/fused_linear_lora_layer.pyi",
"chars": 353,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nfrom mlx import nn\nfrom mflux.models.common.lora.layer.linear_lor"
},
{
"path": ".mlx_typings/mflux/models/common/lora/layer/linear_lora_layer.pyi",
"chars": 491,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nfrom mlx import nn\n\nclass LoRALinear(nn.Module):\n @staticmetho"
},
{
"path": ".mlx_typings/mflux/models/common/lora/mapping/lora_loader.pyi",
"chars": 687,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nimport mlx.nn as nn\nfrom collections.abc im"
},
{
"path": ".mlx_typings/mflux/models/common/lora/mapping/lora_mapping.pyi",
"chars": 572,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom collections.abc import Callable\nfrom d"
},
{
"path": ".mlx_typings/mflux/models/common/lora/mapping/lora_saver.pyi",
"chars": 176,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.nn as nn\n\nclass LoRASaver:\n @staticmethod\n def b"
},
{
"path": ".mlx_typings/mflux/models/common/lora/mapping/lora_transforms.pyi",
"chars": 1182,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\n\nclass LoraTransforms:\n @staticmethod\n "
},
{
"path": ".mlx_typings/mflux/models/common/resolution/__init__.pyi",
"chars": 493,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nfrom mflux.models.common.resolution.config_resolution import Conf"
},
{
"path": ".mlx_typings/mflux/models/common/resolution/actions.pyi",
"chars": 777,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nfrom enum import Enum\nfrom typing import NamedTuple\n\nclass Quanti"
},
{
"path": ".mlx_typings/mflux/models/common/resolution/config_resolution.pyi",
"chars": 330,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nfrom typing import TYPE_CHECKING\nfrom mflux.models.common.config."
},
{
"path": ".mlx_typings/mflux/models/common/resolution/lora_resolution.pyi",
"chars": 572,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nfrom pathlib import Path\n\nlogger = ...\n\nclass LoraResolution:\n "
},
{
"path": ".mlx_typings/mflux/models/common/resolution/path_resolution.pyi",
"chars": 239,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nfrom pathlib import Path\n\nlogger = ...\n\nclass PathResolution:\n "
},
{
"path": ".mlx_typings/mflux/models/common/resolution/quantization_resolution.pyi",
"chars": 244,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nlogger = ...\n\nclass QuantizationResolution:\n RULES = ...\n @"
},
{
"path": ".mlx_typings/mflux/models/common/schedulers/__init__.pyi",
"chars": 696,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nfrom .flow_match_euler_discrete_scheduler import FlowMatchEulerDi"
},
{
"path": ".mlx_typings/mflux/models/common/schedulers/base_scheduler.pyi",
"chars": 419,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom abc import ABC, abstractmethod\n\nclass "
},
{
"path": ".mlx_typings/mflux/models/common/schedulers/flow_match_euler_discrete_scheduler.pyi",
"chars": 901,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom typing import TYPE_CHECKING\nfrom mflux"
},
{
"path": ".mlx_typings/mflux/models/common/schedulers/linear_scheduler.pyi",
"chars": 567,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom typing import TYPE_CHECKING\nfrom mflux"
},
{
"path": ".mlx_typings/mflux/models/common/schedulers/seedvr2_euler_scheduler.pyi",
"chars": 573,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom typing import TYPE_CHECKING\nfrom mflux"
},
{
"path": ".mlx_typings/mflux/models/common/tokenizer/__init__.pyi",
"chars": 556,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nfrom mflux.models.common.tokenizer.tokenizer import (\n BaseTok"
},
{
"path": ".mlx_typings/mflux/models/common/tokenizer/tokenizer.pyi",
"chars": 2019,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nfrom abc import ABC, abstractmethod\nfrom typing import Protocol, "
},
{
"path": ".mlx_typings/mflux/models/common/tokenizer/tokenizer_loader.pyi",
"chars": 651,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nfrom typing import TYPE_CHECKING\nfrom mflux.models.common.tokeniz"
},
{
"path": ".mlx_typings/mflux/models/common/tokenizer/tokenizer_output.pyi",
"chars": 336,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom dataclasses import dataclass\n\n\"\"\"\nThis"
},
{
"path": ".mlx_typings/mflux/models/common/vae/__init__.pyi",
"chars": 213,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nfrom mflux.models.common.vae.tiling_config import TilingConfig\nfr"
},
{
"path": ".mlx_typings/mflux/models/common/vae/tiling_config.pyi",
"chars": 335,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nfrom dataclasses import dataclass\n\n@dataclass(frozen=True, slots="
},
{
"path": ".mlx_typings/mflux/models/common/vae/vae_tiler.pyi",
"chars": 707,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom typing import Callable\n\nclass VAETiler"
},
{
"path": ".mlx_typings/mflux/models/common/vae/vae_util.pyi",
"chars": 454,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\nfrom mflux.models.common"
},
{
"path": ".mlx_typings/mflux/models/common/weights/__init__.pyi",
"chars": 588,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nfrom mflux.models.common.weights.loading.loaded_weights import Lo"
},
{
"path": ".mlx_typings/mflux/models/common/weights/loading/loaded_weights.pyi",
"chars": 499,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nfrom dataclasses import dataclass\n\n@dataclass\nclass MetaData:\n "
},
{
"path": ".mlx_typings/mflux/models/common/weights/loading/weight_applier.pyi",
"chars": 821,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.nn as nn\nfrom typing import TYPE_CHECKING\nfrom mflux.m"
},
{
"path": ".mlx_typings/mflux/models/common/weights/loading/weight_definition.pyi",
"chars": 2418,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom dataclasses import dataclass\nfrom typi"
},
{
"path": ".mlx_typings/mflux/models/common/weights/loading/weight_loader.pyi",
"chars": 626,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nfrom typing import TYPE_CHECKING\nfrom mflux.models.common.weights"
},
{
"path": ".mlx_typings/mflux/models/common/weights/mapping/weight_mapper.pyi",
"chars": 434,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom typing import Dict, List, Optional\nfro"
},
{
"path": ".mlx_typings/mflux/models/common/weights/mapping/weight_mapping.pyi",
"chars": 522,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom dataclasses import dataclass\nfrom typi"
},
{
"path": ".mlx_typings/mflux/models/common/weights/mapping/weight_transforms.pyi",
"chars": 531,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\n\nclass WeightTransforms:\n @staticmethod\n"
},
{
"path": ".mlx_typings/mflux/models/common/weights/saving/model_saver.pyi",
"chars": 366,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nfrom typing import Any, TYPE_CHECKING\nfrom mflux.models.common.we"
},
{
"path": ".mlx_typings/mflux/models/depth_pro/depth_pro_initializer.pyi",
"chars": 248,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nfrom mflux.models.depth_pro.model.depth_pro_model import DepthPro"
},
{
"path": ".mlx_typings/mflux/models/depth_pro/model/decoder/feature_fusion_block_2d.pyi",
"chars": 293,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nimport mlx.nn as nn\n\nclass FeatureFusionBlo"
},
{
"path": ".mlx_typings/mflux/models/depth_pro/model/decoder/multires_conv_decoder.pyi",
"chars": 384,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nimport mlx.nn as nn\n\nclass MultiresConvDeco"
},
{
"path": ".mlx_typings/mflux/models/depth_pro/model/decoder/residual_block.pyi",
"chars": 238,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nimport mlx.nn as nn\n\nclass ResidualBlock(nn"
},
{
"path": ".mlx_typings/mflux/models/depth_pro/model/depth_pro.pyi",
"chars": 450,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom dataclasses import dataclass\nfrom path"
},
{
"path": ".mlx_typings/mflux/models/depth_pro/model/depth_pro_model.pyi",
"chars": 279,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nimport mlx.nn as nn\n\nclass DepthProModel(nn"
},
{
"path": ".mlx_typings/mflux/models/depth_pro/model/depth_pro_util.pyi",
"chars": 404,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nimport mlx.nn as nn\n\nclass DepthProUtil:\n "
},
{
"path": ".mlx_typings/mflux/models/depth_pro/model/dino_v2/attention.pyi",
"chars": 287,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\nclass Attention(nn.Modu"
},
{
"path": ".mlx_typings/mflux/models/depth_pro/model/dino_v2/dino_vision_transformer.pyi",
"chars": 254,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nimport mlx.nn as nn\n\nclass DinoVisionTransf"
},
{
"path": ".mlx_typings/mflux/models/depth_pro/model/dino_v2/layer_scale.pyi",
"chars": 253,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nimport mlx.nn as nn\n\nclass LayerScale(nn.Mo"
},
{
"path": ".mlx_typings/mflux/models/depth_pro/model/dino_v2/mlp.pyi",
"chars": 209,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nimport mlx.nn as nn\n\nclass MLP(nn.Module):\n"
},
{
"path": ".mlx_typings/mflux/models/depth_pro/model/dino_v2/patch_embed.pyi",
"chars": 216,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nimport mlx.nn as nn\n\nclass PatchEmbed(nn.Mo"
},
{
"path": ".mlx_typings/mflux/models/depth_pro/model/dino_v2/transformer_block.pyi",
"chars": 222,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nimport mlx.nn as nn\n\nclass TransformerBlock"
},
{
"path": ".mlx_typings/mflux/models/depth_pro/model/encoder/depth_pro_encoder.pyi",
"chars": 311,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nimport mlx.nn as nn\n\nclass DepthProEncoder("
},
{
"path": ".mlx_typings/mflux/models/depth_pro/model/encoder/upsample_block.pyi",
"chars": 353,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nimport mlx.nn as nn\n\nclass UpSampleBlock(nn"
},
{
"path": ".mlx_typings/mflux/models/depth_pro/model/head/fov_head.pyi",
"chars": 213,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nimport mlx.nn as nn\n\nclass FOVHead(nn.Modul"
},
{
"path": ".mlx_typings/mflux/models/depth_pro/weights/depth_pro_weight_definition.pyi",
"chars": 590,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nfrom typing import List\nfrom mflux.models.common.weights.loading."
},
{
"path": ".mlx_typings/mflux/models/depth_pro/weights/depth_pro_weight_mapping.pyi",
"chars": 295,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nfrom typing import List\nfrom mflux.models.common.weights.mapping."
},
{
"path": ".mlx_typings/mflux/models/fibo/latent_creator/fibo_latent_creator.pyi",
"chars": 397,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\n\nclass FiboLatentCreator:\n @staticmethod"
},
{
"path": ".mlx_typings/mflux/models/fibo/weights/fibo_weight_definition.pyi",
"chars": 586,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nfrom typing import List\nfrom mflux.models.common.weights.loading."
},
{
"path": ".mlx_typings/mflux/models/fibo/weights/fibo_weight_mapping.pyi",
"chars": 454,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nfrom typing import List\nfrom mflux.models.common.weights.mapping."
},
{
"path": ".mlx_typings/mflux/models/fibo_vlm/tokenizer/qwen2vl_image_processor.pyi",
"chars": 221,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nfrom mflux.models.qwen.tokenizer.qwen_image_processor import Qwen"
},
{
"path": ".mlx_typings/mflux/models/fibo_vlm/tokenizer/qwen2vl_processor.pyi",
"chars": 744,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nfrom typing import Optional, Union\nfrom PIL import Image\n\nclass Q"
},
{
"path": ".mlx_typings/mflux/models/fibo_vlm/weights/fibo_vlm_weight_definition.pyi",
"chars": 617,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nfrom typing import List\nfrom mflux.models.common.weights.loading."
},
{
"path": ".mlx_typings/mflux/models/fibo_vlm/weights/fibo_vlm_weight_mapping.pyi",
"chars": 421,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nfrom typing import List\nfrom mflux.models.common.weights.mapping."
},
{
"path": ".mlx_typings/mflux/models/flux/__init__.pyi",
"chars": 54,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n"
},
{
"path": ".mlx_typings/mflux/models/flux/cli/__init__.pyi",
"chars": 54,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n"
},
{
"path": ".mlx_typings/mflux/models/flux/flux_initializer.pyi",
"chars": 1493,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nfrom mflux.models.common.config import ModelConfig\n\nclass FluxIni"
},
{
"path": ".mlx_typings/mflux/models/flux/latent_creator/__init__.pyi",
"chars": 109,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\n\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n"
},
{
"path": ".mlx_typings/mflux/models/flux/latent_creator/flux_latent_creator.pyi",
"chars": 499,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\n\n\"\"\"\nThis type stub file was generated by p"
},
{
"path": ".mlx_typings/mflux/models/flux/model/__init__.pyi",
"chars": 109,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\n\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n"
},
{
"path": ".mlx_typings/mflux/models/flux/model/flux_text_encoder/clip_encoder/clip_embeddings.pyi",
"chars": 235,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\nclass CLIPEmbeddings(nn"
},
{
"path": ".mlx_typings/mflux/models/flux/model/flux_text_encoder/clip_encoder/clip_encoder.pyi",
"chars": 276,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\n\"\"\"\nThis type stub file"
},
{
"path": ".mlx_typings/mflux/models/flux/model/flux_text_encoder/clip_encoder/clip_encoder_layer.pyi",
"chars": 292,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\nclass CLIPEncoderLayer("
},
{
"path": ".mlx_typings/mflux/models/flux/model/flux_text_encoder/clip_encoder/clip_mlp.pyi",
"chars": 301,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\nclass CLIPMLP(nn.Module"
},
{
"path": ".mlx_typings/mflux/models/flux/model/flux_text_encoder/clip_encoder/clip_sdpa_attention.pyi",
"chars": 457,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\nclass CLIPSdpaAttention"
},
{
"path": ".mlx_typings/mflux/models/flux/model/flux_text_encoder/clip_encoder/clip_text_model.pyi",
"chars": 368,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\nclass CLIPTextModel(nn."
},
{
"path": ".mlx_typings/mflux/models/flux/model/flux_text_encoder/clip_encoder/encoder_clip.pyi",
"chars": 293,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\nclass EncoderCLIP(nn.Mo"
},
{
"path": ".mlx_typings/mflux/models/flux/model/flux_text_encoder/prompt_encoder.pyi",
"chars": 699,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mflux.models.common.tokenizer import T"
},
{
"path": ".mlx_typings/mflux/models/flux/model/flux_text_encoder/t5_encoder/t5_attention.pyi",
"chars": 228,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\nclass T5Attention(nn.Mo"
},
{
"path": ".mlx_typings/mflux/models/flux/model/flux_text_encoder/t5_encoder/t5_block.pyi",
"chars": 236,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\nclass T5Block(nn.Module"
},
{
"path": ".mlx_typings/mflux/models/flux/model/flux_text_encoder/t5_encoder/t5_dense_relu_dense.pyi",
"chars": 308,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\nclass T5DenseReluDense("
},
{
"path": ".mlx_typings/mflux/models/flux/model/flux_text_encoder/t5_encoder/t5_encoder.pyi",
"chars": 262,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\n\"\"\"\nThis type stub file"
},
{
"path": ".mlx_typings/mflux/models/flux/model/flux_text_encoder/t5_encoder/t5_feed_forward.pyi",
"chars": 230,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\nclass T5FeedForward(nn."
},
{
"path": ".mlx_typings/mflux/models/flux/model/flux_text_encoder/t5_encoder/t5_layer_norm.pyi",
"chars": 228,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\nclass T5LayerNorm(nn.Mo"
},
{
"path": ".mlx_typings/mflux/models/flux/model/flux_text_encoder/t5_encoder/t5_self_attention.pyi",
"chars": 367,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\nclass T5SelfAttention(n"
},
{
"path": ".mlx_typings/mflux/models/flux/model/flux_transformer/ada_layer_norm_continuous.pyi",
"chars": 307,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\nclass AdaLayerNormConti"
},
{
"path": ".mlx_typings/mflux/models/flux/model/flux_transformer/ada_layer_norm_zero.pyi",
"chars": 321,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\nclass AdaLayerNormZero("
},
{
"path": ".mlx_typings/mflux/models/flux/model/flux_transformer/ada_layer_norm_zero_single.pyi",
"chars": 297,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\nclass AdaLayerNormZeroS"
},
{
"path": ".mlx_typings/mflux/models/flux/model/flux_transformer/common/attention_utils.pyi",
"chars": 1118,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\nclass AttentionUtils:\n "
},
{
"path": ".mlx_typings/mflux/models/flux/model/flux_transformer/embed_nd.pyi",
"chars": 301,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\nclass EmbedND(nn.Module"
},
{
"path": ".mlx_typings/mflux/models/flux/model/flux_transformer/feed_forward.pyi",
"chars": 249,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\nclass FeedForward(nn.Mo"
},
{
"path": ".mlx_typings/mflux/models/flux/model/flux_transformer/guidance_embedder.pyi",
"chars": 226,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\nclass GuidanceEmbedder("
},
{
"path": ".mlx_typings/mflux/models/flux/model/flux_transformer/joint_attention.pyi",
"chars": 705,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\nfrom typing import Any\n\n"
},
{
"path": ".mlx_typings/mflux/models/flux/model/flux_transformer/joint_transformer_block.pyi",
"chars": 1109,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\nfrom typing import Any\nf"
},
{
"path": ".mlx_typings/mflux/models/flux/model/flux_transformer/single_block_attention.pyi",
"chars": 429,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\nclass SingleBlockAttent"
},
{
"path": ".mlx_typings/mflux/models/flux/model/flux_transformer/single_transformer_block.pyi",
"chars": 818,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\nfrom typing import Any\nf"
},
{
"path": ".mlx_typings/mflux/models/flux/model/flux_transformer/text_embedder.pyi",
"chars": 223,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\nclass TextEmbedder(nn.M"
},
{
"path": ".mlx_typings/mflux/models/flux/model/flux_transformer/time_text_embed.pyi",
"chars": 367,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\nfrom mflux.models.common"
},
{
"path": ".mlx_typings/mflux/models/flux/model/flux_transformer/timestep_embedder.pyi",
"chars": 226,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\nclass TimestepEmbedder("
},
{
"path": ".mlx_typings/mflux/models/flux/model/flux_transformer/transformer.pyi",
"chars": 1863,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\nfrom mflux.models.common"
},
{
"path": ".mlx_typings/mflux/models/flux/model/flux_vae/common/attention.pyi",
"chars": 224,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\nclass Attention(nn.Modu"
},
{
"path": ".mlx_typings/mflux/models/flux/model/flux_vae/common/resnet_block_2d.pyi",
"chars": 504,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\nclass ResnetBlock2D(nn."
},
{
"path": ".mlx_typings/mflux/models/flux/model/flux_vae/common/unet_mid_block.pyi",
"chars": 227,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\nclass UnetMidBlock(nn.M"
},
{
"path": ".mlx_typings/mflux/models/flux/model/flux_vae/decoder/conv_in.pyi",
"chars": 222,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nimport mlx.nn as nn\n\nclass ConvIn(nn.Module"
},
{
"path": ".mlx_typings/mflux/models/flux/model/flux_vae/decoder/conv_norm_out.pyi",
"chars": 227,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nimport mlx.nn as nn\n\nclass ConvNormOut(nn.M"
},
{
"path": ".mlx_typings/mflux/models/flux/model/flux_vae/decoder/conv_out.pyi",
"chars": 223,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nimport mlx.nn as nn\n\nclass ConvOut(nn.Modul"
},
{
"path": ".mlx_typings/mflux/models/flux/model/flux_vae/decoder/decoder.pyi",
"chars": 287,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\nclass Decoder(nn.Module"
},
{
"path": ".mlx_typings/mflux/models/flux/model/flux_vae/decoder/up_block_1_or_2.pyi",
"chars": 226,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\nclass UpBlock1Or2(nn.Mo"
},
{
"path": ".mlx_typings/mflux/models/flux/model/flux_vae/decoder/up_block_3.pyi",
"chars": 223,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\nclass UpBlock3(nn.Modul"
},
{
"path": ".mlx_typings/mflux/models/flux/model/flux_vae/decoder/up_block_4.pyi",
"chars": 223,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\nclass UpBlock4(nn.Modul"
},
{
"path": ".mlx_typings/mflux/models/flux/model/flux_vae/decoder/up_sampler.pyi",
"chars": 354,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\nclass UpSampler(nn.Modu"
},
{
"path": ".mlx_typings/mflux/models/flux/model/flux_vae/encoder/conv_in.pyi",
"chars": 222,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nimport mlx.nn as nn\n\nclass ConvIn(nn.Module"
},
{
"path": ".mlx_typings/mflux/models/flux/model/flux_vae/encoder/conv_norm_out.pyi",
"chars": 227,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nimport mlx.nn as nn\n\nclass ConvNormOut(nn.M"
},
{
"path": ".mlx_typings/mflux/models/flux/model/flux_vae/encoder/conv_out.pyi",
"chars": 223,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nimport mlx.nn as nn\n\nclass ConvOut(nn.Modul"
},
{
"path": ".mlx_typings/mflux/models/flux/model/flux_vae/encoder/down_block_1.pyi",
"chars": 225,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\nclass DownBlock1(nn.Mod"
},
{
"path": ".mlx_typings/mflux/models/flux/model/flux_vae/encoder/down_block_2.pyi",
"chars": 225,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\nclass DownBlock2(nn.Mod"
},
{
"path": ".mlx_typings/mflux/models/flux/model/flux_vae/encoder/down_block_3.pyi",
"chars": 225,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\nclass DownBlock3(nn.Mod"
},
{
"path": ".mlx_typings/mflux/models/flux/model/flux_vae/encoder/down_block_4.pyi",
"chars": 225,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\nclass DownBlock4(nn.Mod"
},
{
"path": ".mlx_typings/mflux/models/flux/model/flux_vae/encoder/down_sampler.pyi",
"chars": 255,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\nclass DownSampler(nn.Mo"
},
{
"path": ".mlx_typings/mflux/models/flux/model/flux_vae/encoder/encoder.pyi",
"chars": 218,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\nclass Encoder(nn.Module"
},
{
"path": ".mlx_typings/mflux/models/flux/model/flux_vae/vae.pyi",
"chars": 375,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\nclass VAE(nn.Module):\n "
},
{
"path": ".mlx_typings/mflux/models/flux/model/redux_encoder/redux_encoder.pyi",
"chars": 217,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\nclass ReduxEncoder(nn.M"
},
{
"path": ".mlx_typings/mflux/models/flux/model/siglip_vision_transformer/siglip_encoder.pyi",
"chars": 230,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\nclass SiglipEncoder(nn."
},
{
"path": ".mlx_typings/mflux/models/flux/model/siglip_vision_transformer/siglip_encoder_layer.pyi",
"chars": 235,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\nclass SiglipEncoderLaye"
},
{
"path": ".mlx_typings/mflux/models/flux/model/siglip_vision_transformer/siglip_mlp.pyi",
"chars": 226,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\nclass SiglipMLP(nn.Modu"
},
{
"path": ".mlx_typings/mflux/models/flux/model/siglip_vision_transformer/siglip_multi_head_attention_pooling_head.pyi",
"chars": 252,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\nclass SiglipMultiHeadAt"
},
{
"path": ".mlx_typings/mflux/models/flux/model/siglip_vision_transformer/siglip_sdpa_attention.pyi",
"chars": 412,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\nclass SiglipSdpaAttenti"
},
{
"path": ".mlx_typings/mflux/models/flux/model/siglip_vision_transformer/siglip_vision_embeddings.pyi",
"chars": 300,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\nclass SiglipVisionEmbed"
},
{
"path": ".mlx_typings/mflux/models/flux/model/siglip_vision_transformer/siglip_vision_transformer.pyi",
"chars": 239,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\nclass SiglipVisionTrans"
},
{
"path": ".mlx_typings/mflux/models/flux/variants/__init__.pyi",
"chars": 109,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\n\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n"
},
{
"path": ".mlx_typings/mflux/models/flux/variants/concept_attention/attention_data.pyi",
"chars": 1103,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nimport PIL.Image\nfrom dataclasses import da"
},
{
"path": ".mlx_typings/mflux/models/flux/variants/concept_attention/joint_attention_concept.pyi",
"chars": 478,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\nclass JointAttentionCon"
},
{
"path": ".mlx_typings/mflux/models/flux/variants/concept_attention/joint_transformer_block_concept.pyi",
"chars": 751,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom dataclasses import dataclass\nfrom mlx "
},
{
"path": ".mlx_typings/mflux/models/flux/variants/concept_attention/transformer_concept.pyi",
"chars": 853,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\nfrom mflux.models.common"
},
{
"path": ".mlx_typings/mflux/models/flux/variants/controlnet/transformer_controlnet.pyi",
"chars": 703,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\nfrom mflux.models.common"
},
{
"path": ".mlx_typings/mflux/models/flux/variants/kontext/__init__.pyi",
"chars": 156,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nfrom mflux.models.flux.variants.kontext.flux_kontext import Flux1"
},
{
"path": ".mlx_typings/mflux/models/flux/variants/kontext/flux_kontext.pyi",
"chars": 1462,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nfrom pathlib import Path\nfrom typing import Any\n\nfrom mlx import "
},
{
"path": ".mlx_typings/mflux/models/flux/variants/kontext/kontext_util.pyi",
"chars": 336,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\n\nfrom mflux.models.flux.model.flux_vae.vae "
},
{
"path": ".mlx_typings/mflux/models/flux/variants/txt2img/flux.pyi",
"chars": 1701,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nfrom pathlib import Path\nfrom mlx import nn\nfrom typing import An"
},
{
"path": ".mlx_typings/mflux/models/flux/weights/__init__.pyi",
"chars": 270,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nfrom mflux.models.flux.weights.flux_weight_definition import Flux"
},
{
"path": ".mlx_typings/mflux/models/flux/weights/flux_lora_mapping.pyi",
"chars": 239,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nfrom mflux.models.common.lora.mapping.lora_mapping import LoRAMap"
},
{
"path": ".mlx_typings/mflux/models/flux/weights/flux_weight_definition.pyi",
"chars": 1346,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nfrom typing import List\nfrom mflux.models.common.weights.loading."
},
{
"path": ".mlx_typings/mflux/models/flux/weights/flux_weight_mapping.pyi",
"chars": 622,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nfrom typing import List\nfrom mflux.models.common.weights.mapping."
},
{
"path": ".mlx_typings/mflux/models/qwen/__init__.pyi",
"chars": 54,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n"
},
{
"path": ".mlx_typings/mflux/models/qwen/cli/__init__.pyi",
"chars": 54,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n"
},
{
"path": ".mlx_typings/mflux/models/qwen/latent_creator/__init__.pyi",
"chars": 109,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\n\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n"
},
{
"path": ".mlx_typings/mflux/models/qwen/latent_creator/qwen_latent_creator.pyi",
"chars": 499,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\n\n\"\"\"\nThis type stub file was generated by p"
},
{
"path": ".mlx_typings/mflux/models/qwen/model/__init__.pyi",
"chars": 109,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\n\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n"
},
{
"path": ".mlx_typings/mflux/models/qwen/model/qwen_text_encoder/qwen_attention.pyi",
"chars": 595,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\nclass QwenAttention(nn."
},
{
"path": ".mlx_typings/mflux/models/qwen/model/qwen_text_encoder/qwen_encoder.pyi",
"chars": 680,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\nclass QwenEncoder(nn.Mo"
},
{
"path": ".mlx_typings/mflux/models/qwen/model/qwen_text_encoder/qwen_encoder_layer.pyi",
"chars": 649,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\nclass QwenEncoderLayer("
},
{
"path": ".mlx_typings/mflux/models/qwen/model/qwen_text_encoder/qwen_mlp.pyi",
"chars": 266,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\nclass QwenMLP(nn.Module"
},
{
"path": ".mlx_typings/mflux/models/qwen/model/qwen_text_encoder/qwen_patch_merger.pyi",
"chars": 317,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\nclass PatchMerger(nn.Mo"
},
{
"path": ".mlx_typings/mflux/models/qwen/model/qwen_text_encoder/qwen_prompt_encoder.pyi",
"chars": 609,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mflux.models.common.tokenizer import T"
},
{
"path": ".mlx_typings/mflux/models/qwen/model/qwen_text_encoder/qwen_rms_norm.pyi",
"chars": 264,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\nclass QwenRMSNorm(nn.Mo"
},
{
"path": ".mlx_typings/mflux/models/qwen/model/qwen_text_encoder/qwen_rope.pyi",
"chars": 497,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\nclass QwenRotaryEmbeddi"
},
{
"path": ".mlx_typings/mflux/models/qwen/model/qwen_text_encoder/qwen_text_encoder.pyi",
"chars": 340,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\n\"\"\"\nThis type stub file"
},
{
"path": ".mlx_typings/mflux/models/qwen/model/qwen_text_encoder/qwen_vision_attention.pyi",
"chars": 319,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\nclass VisionAttention(n"
},
{
"path": ".mlx_typings/mflux/models/qwen/model/qwen_text_encoder/qwen_vision_block.pyi",
"chars": 353,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\nclass VisionBlock(nn.Mo"
},
{
"path": ".mlx_typings/mflux/models/qwen/model/qwen_text_encoder/qwen_vision_language_encoder.pyi",
"chars": 430,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\nclass QwenVisionLanguag"
},
{
"path": ".mlx_typings/mflux/models/qwen/model/qwen_text_encoder/qwen_vision_mlp.pyi",
"chars": 241,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\nclass VisionMLP(nn.Modu"
},
{
"path": ".mlx_typings/mflux/models/qwen/model/qwen_text_encoder/qwen_vision_patch_embed.pyi",
"chars": 381,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\nclass VisionPatchEmbed("
},
{
"path": ".mlx_typings/mflux/models/qwen/model/qwen_text_encoder/qwen_vision_rotary_embedding.pyi",
"chars": 263,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\nclass VisionRotaryEmbed"
},
{
"path": ".mlx_typings/mflux/models/qwen/model/qwen_text_encoder/qwen_vision_transformer.pyi",
"chars": 789,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\nclass VisionTransformer"
},
{
"path": ".mlx_typings/mflux/models/qwen/model/qwen_transformer/qwen_attention.pyi",
"chars": 1394,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\nfrom typing import Any\n\n"
},
{
"path": ".mlx_typings/mflux/models/qwen/model/qwen_transformer/qwen_feed_forward.pyi",
"chars": 248,
"preview": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport mlx.core as mx\nfrom mlx import nn\n\nclass QwenFeedForward(n"
}
]
// ... and 524 more files (download for full content)
About this extraction
This page contains the full source code of the exo-explore/exo GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 724 files (3.1 MB), approximately 859.9k tokens, and a symbol index with 4125 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.