Showing preview only (5,208K chars total). Download the full file or copy to clipboard to get everything.
Repository: sherwinbahmani/tc4d
Branch: main
Commit: d1af1822a5fc
Files: 182
Total size: 4.9 MB
Directory structure:
gitextract_4duiir5e/
├── .gitignore
├── LICENSE
├── README.md
├── configs/
│ ├── tc4d_stage_1.yaml
│ ├── tc4d_stage_2.yaml
│ ├── tc4d_stage_3.yaml
│ ├── tc4d_stage_3_24_gb.yaml
│ ├── tc4d_stage_3_40_gb.yaml
│ └── tc4d_stage_3_eval.yaml
├── configs_comp/
│ ├── comp0.yaml
│ ├── comp1.yaml
│ ├── comp2.yaml
│ └── comp3.yaml
├── configs_prompts/
│ ├── a_bear_walking.yaml
│ ├── a_camel_walking.yaml
│ ├── a_carp_swimming.yaml
│ ├── a_cat_walking.yaml
│ ├── a_chihuahua_running.yaml
│ ├── a_clown_fish_swimming.yaml
│ ├── a_corgi_running.yaml
│ ├── a_deer_walking.yaml
│ ├── a_dog_riding_a_skateboard.yaml
│ ├── a_fox_walking.yaml
│ ├── a_german_shepherd_running.yaml
│ ├── a_giraffe_walking.yaml
│ ├── a_girl_is_riding_a_bicycle.yaml
│ ├── a_goat_walking.yaml
│ ├── a_hippo_walking.yaml
│ ├── a_labrador_running.yaml
│ ├── a_lion_walking.yaml
│ ├── a_pigeon_flying.yaml
│ ├── a_rhinoceros_walking.yaml
│ ├── a_seagull_flying.yaml
│ ├── a_shark_swimming.yaml
│ ├── a_sheep_running.yaml
│ ├── a_tiger_walking.yaml
│ ├── a_turtle_swimming.yaml
│ ├── a_unicorn_running.yaml
│ ├── a_wolf_running.yaml
│ ├── an_astronaut_riding_a_horse.yaml
│ ├── an_eagle_flying.yaml
│ ├── an_elephant_walking.yaml
│ ├── an_octopus_swimming.yaml
│ ├── assassin_riding_a_cow.yaml
│ ├── batman_riding_a_camel.yaml
│ ├── deadpool_riding_a_cow.yaml
│ ├── son_goku_riding_an_elephant.yaml
│ └── spiderman_riding_a_donkey.yaml
├── configs_prompts_static/
│ ├── a_firepit.yaml
│ ├── a_lamppost.yaml
│ └── water_spraying_out_of_a_firehydrant.yaml
├── launch.py
├── load/
│ ├── make_prompt_library.py
│ ├── prompt_library.json
│ ├── shapes/
│ │ ├── README.md
│ │ ├── animal.obj
│ │ ├── blub.obj
│ │ ├── cabin.obj
│ │ ├── env_sphere.obj
│ │ ├── hand_prismatic.obj
│ │ ├── human.obj
│ │ ├── nascar.obj
│ │ ├── potion.obj
│ │ └── teddy.obj
│ ├── tets/
│ │ ├── 128_tets.npz
│ │ ├── 32_tets.npz
│ │ ├── 64_tets.npz
│ │ └── generate_tets.py
│ └── zero123/
│ ├── download.sh
│ └── sd-objaverse-finetune-c_concat-256.yaml
├── requirements-dev.txt
├── requirements.txt
├── threestudio/
│ ├── __init__.py
│ ├── data/
│ │ ├── __init__.py
│ │ ├── co3d.py
│ │ ├── image.py
│ │ ├── multiview.py
│ │ ├── random_multiview.py
│ │ ├── single_multiview_combined.py
│ │ └── uncond.py
│ ├── models/
│ │ ├── __init__.py
│ │ ├── background/
│ │ │ ├── __init__.py
│ │ │ ├── base.py
│ │ │ ├── neural_environment_map_background.py
│ │ │ ├── solid_color_background.py
│ │ │ └── textured_background.py
│ │ ├── estimators.py
│ │ ├── exporters/
│ │ │ ├── __init__.py
│ │ │ ├── base.py
│ │ │ └── mesh_exporter.py
│ │ ├── geometry/
│ │ │ ├── __init__.py
│ │ │ ├── base.py
│ │ │ ├── implicit_sdf.py
│ │ │ ├── implicit_volume.py
│ │ │ ├── tetrahedra_sdf_grid.py
│ │ │ └── volume_grid.py
│ │ ├── guidance/
│ │ │ ├── __init__.py
│ │ │ ├── deep_floyd_guidance.py
│ │ │ ├── deep_floyd_vsd_guidance.py
│ │ │ ├── multiview_diffusion_guidance.py
│ │ │ ├── stable_diffusion_guidance.py
│ │ │ ├── stable_diffusion_vsd_guidance.py
│ │ │ ├── svd_guidance.py
│ │ │ ├── video_stable_diffusion_guidance.py
│ │ │ ├── video_stable_diffusion_vsd_guidance.py
│ │ │ ├── videocrafter/
│ │ │ │ ├── .gitignore
│ │ │ │ ├── License
│ │ │ │ ├── README.md
│ │ │ │ ├── cog.yaml
│ │ │ │ ├── configs/
│ │ │ │ │ ├── inference_i2v_512_v1.0.yaml
│ │ │ │ │ ├── inference_t2v_1024_v1.0.yaml
│ │ │ │ │ ├── inference_t2v_512_v1.0.yaml
│ │ │ │ │ └── inference_t2v_512_v2.0.yaml
│ │ │ │ ├── gradio_app.py
│ │ │ │ ├── lvdm/
│ │ │ │ │ ├── basics.py
│ │ │ │ │ ├── common.py
│ │ │ │ │ ├── distributions.py
│ │ │ │ │ ├── ema.py
│ │ │ │ │ ├── models/
│ │ │ │ │ │ ├── autoencoder.py
│ │ │ │ │ │ ├── ddpm3d.py
│ │ │ │ │ │ ├── samplers/
│ │ │ │ │ │ │ └── ddim.py
│ │ │ │ │ │ └── utils_diffusion.py
│ │ │ │ │ └── modules/
│ │ │ │ │ ├── attention.py
│ │ │ │ │ ├── encoders/
│ │ │ │ │ │ ├── condition.py
│ │ │ │ │ │ └── ip_resampler.py
│ │ │ │ │ ├── networks/
│ │ │ │ │ │ ├── ae_modules.py
│ │ │ │ │ │ └── openaimodel3d.py
│ │ │ │ │ └── x_transformer.py
│ │ │ │ ├── predict.py
│ │ │ │ ├── prompts/
│ │ │ │ │ └── test_prompts.txt
│ │ │ │ ├── requirements.txt
│ │ │ │ ├── scripts/
│ │ │ │ │ ├── evaluation/
│ │ │ │ │ │ ├── ddp_wrapper.py
│ │ │ │ │ │ ├── funcs.py
│ │ │ │ │ │ └── inference.py
│ │ │ │ │ ├── gradio/
│ │ │ │ │ │ ├── i2v_test.py
│ │ │ │ │ │ └── t2v_test.py
│ │ │ │ │ ├── run_image2video.sh
│ │ │ │ │ └── run_text2video.sh
│ │ │ │ └── utils/
│ │ │ │ └── utils.py
│ │ │ ├── videocrafter_guidance.py
│ │ │ ├── zero123_guidance.py
│ │ │ └── zeroscope_guidance.py
│ │ ├── isosurface.py
│ │ ├── materials/
│ │ │ ├── __init__.py
│ │ │ ├── base.py
│ │ │ ├── diffuse_with_point_light_material.py
│ │ │ ├── neural_radiance_material.py
│ │ │ ├── no_material.py
│ │ │ └── sd_latent_adapter_material.py
│ │ ├── mesh.py
│ │ ├── networks.py
│ │ ├── prompt_processors/
│ │ │ ├── __init__.py
│ │ │ ├── base.py
│ │ │ ├── deepfloyd_prompt_processor.py
│ │ │ ├── stable_diffusion_prompt_processor.py
│ │ │ ├── videocrafter_prompt_processor.py
│ │ │ ├── zero123_prompt_processor.py
│ │ │ └── zeroscope_diffusion_prompt_processor.py
│ │ └── renderers/
│ │ ├── __init__.py
│ │ ├── base.py
│ │ ├── mask_nerf_renderer.py
│ │ ├── mask_nerf_renderer_multi.py
│ │ ├── stable_nerf_renderer.py
│ │ └── stable_nerf_renderer_multi.py
│ ├── systems/
│ │ ├── __init__.py
│ │ ├── base.py
│ │ ├── optimizers.py
│ │ ├── tc4d.py
│ │ └── utils.py
│ └── utils/
│ ├── __init__.py
│ ├── base.py
│ ├── bounding_boxes.py
│ ├── callbacks.py
│ ├── config.py
│ ├── config_scene.py
│ ├── misc.py
│ ├── object_trajectory.py
│ ├── ops.py
│ ├── rasterize.py
│ ├── saving.py
│ └── typing.py
└── train.sh
================================================
FILE CONTENTS
================================================
================================================
FILE: .gitignore
================================================
outputs
experiments
.threestudio_cache
*.pyc
================================================
FILE: LICENSE
================================================
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
================================================
FILE: README.md
================================================
# TC4D: Trajectory-Conditioned Text-to-4D Generation
<img src="./assets/tc4d.png" width="512">
| [Project Page](https://sherwinbahmani.github.io/tc4d/) | [Paper](https://arxiv.org/abs/2403.17920) | [User Study Template](https://github.com/victor-rong/video-generation-study) |
- **This code is forked from [threestudio](https://github.com/threestudio-project/threestudio).**
## Installation
### Install threestudio
**This part is the same as original threestudio. Skip it if you already have installed the environment.**
- You must have an NVIDIA graphics card with at least 24 GB VRAM and have [CUDA](https://developer.nvidia.com/cuda-downloads) installed.
- Install `Python >= 3.8`.
- (Optional, Recommended) Create a virtual environment:
```sh
python3 -m virtualenv venv
. venv/bin/activate
# Newer pip versions, e.g. pip-23.x, can be much faster than old versions, e.g. pip-20.x.
# For instance, it caches the wheels of git packages to avoid unnecessarily rebuilding them later.
python3 -m pip install --upgrade pip
```
- Install `PyTorch >= 1.12`. We have tested on `torch1.12.1+cu113` and `torch2.0.0+cu118`, but other versions should also work fine.
```sh
# torch1.12.1+cu113
pip install torch==1.12.1+cu113 torchvision==0.13.1+cu113 --extra-index-url https://download.pytorch.org/whl/cu113
# or torch2.0.0+cu118
pip install torch torchvision --index-url https://download.pytorch.org/whl/cu118
```
- (Optional, Recommended) Install ninja to speed up the compilation of CUDA extensions:
```sh
pip install ninja
```
- Install dependencies:
```sh
pip install -r requirements.txt
```
### Install MVDream
MVDream multi-view diffusion model is provided in a different codebase. Install it by:
```sh
git clone https://github.com/bytedance/MVDream extern/MVDream
pip install -e extern/MVDream
```
## Quickstart
Our model is trained in 3 stages and there are three different config files for every stage. Training has to be resumed after finishing a stage.
```sh
seed=0
gpu=0
exp_root_dir=/path/to
# Trajectory-conditioned generation
scene_setup_path=configs_prompts/a_deer_walking.yaml
# Stage 1
python launch.py --config configs/tc4d_stage_1.yaml --train --gpu $gpu exp_root_dir=$exp_root_dir seed=$seed system.prompt_processor.prompt="a deer walking" system.scene_setup_path=$scene_setup_path
# Stage 2
ckpt=/path/to/tc4d_stage_1/a_deer_walking@timestamp/ckpts/last.ckpt
python launch.py --config configs/tc4d_stage_2.yaml --train --gpu $gpu exp_root_dir=$exp_root_dir seed=$seed system.prompt_processor.prompt="a deer walking" system.scene_setup_path=$scene_setup_path system.weights=$ckpt
# Stage 3
ckpt=/path/to/tc4d_stage_2/a_deer_walking@timestamp/ckpts/last.ckpt
python launch.py --config configs/tc4d_stage_3.yaml --train --gpu $gpu exp_root_dir=$exp_root_dir seed=$seed system.prompt_processor.prompt="a deer walking" system.scene_setup_path=$scene_setup_path system.weights=$ckpt
# Compositional 4D Scene after training multiple stage 3 trajectory-conditioned prompts
# Add ckpts in the compositional config and define the trajectory list, see configs_comp for examples used in the paper
scene_setup_path=configs_comp/comp0.yaml
ckpt=/path/to/tc4d_stage_2/a_deer_walking@timestamp/ckpts/last.ckpt # Just a dummy input, overwritten by ckpts specified in the comp0.yaml
python launch.py --config configs/tc4d_stage_3.yaml --test --gpu $gpu exp_root_dir=$exp_root_dir seed=$seed system.prompt_processor.prompt="a deer walking" system.scene_setup_path=$scene_setup_path system.weights=$ckpt
# Render high resolution videos used in the paper and project page after training
ckpt=/path/to/tc4d_stage_3/a_deer_walking@timestamp/ckpts/last.ckpt
python launch.py --config configs/tc4d_stage_3_eval.yaml --test --gpu $gpu exp_root_dir=$exp_root_dir seed=$seed system.prompt_processor.prompt="a deer walking" system.scene_setup_path=$scene_setup_path system.weights=$ckpt
```
## Memory Usage
We tested our code on a A100 80 GB, but the memory can be reduced to 24 GB by adjusting following:
- Reduce system.renderer.base_renderer.train_max_nums to fit into the given memory. This reduces the number of sampled points which are part of the backpropagation
- Set system.guidance_video.low_ram_vae to a number between 1 and 16. This saves memory by only backpropagating through low_ram_vae number of frames instead of all 16.
Note that the convergence speed can be slower when reducing these parameters, the quality should not degrade however.
We provide 24 GB and 40 GB configs for stage 3, replace --config configs/tc4d_stage_3.yaml in the training command with: tc4d_stage_3_24_gb.yaml or tc4d_stage_3_40_gb.yaml
## Credits
This code is built on the [threestudio-project](https://github.com/threestudio-project/threestudio), [MVDream-threestudio](https://github.com/bytedance/MVDream-threestudio), [4D-fy-threestudio](https://github.com/sherwinbahmani/4dfy), and [VideoCrafter](https://github.com/AILab-CVC/VideoCrafter). Thanks to the maintainers for their contribution to the community!
## Citing
If you find TC4D helpful, please consider citing:
```
@article{bah2024tc4d,
title={TC4D: Trajectory-Conditioned Text-to-4D Generation},
author={Bahmani, Sherwin and Liu, Xian and Yifan, Wang and Skorokhodov, Ivan and Rong, Victor and Liu, Ziwei and Liu, Xihui and Park, Jeong Joon and Tulyakov, Sergey and Wetzstein, Gordon and Tagliasacchi, Andrea and Lindell, David B.},
journal={arXiv},
year={2024}
}
```
================================================
FILE: configs/tc4d_stage_1.yaml
================================================
name: "tc4d_stage_1"
tag: "${rmspace:${system.prompt_processor.prompt},_}"
exp_root_dir: "outputs"
seed: 0
data_type: "single-multiview-combined-camera-datamodule"
data:
multi_rate_perc: 1.0
multi_rate: null
single_view:
batch_size: [1,1]
width: [64, 256]
height: [64, 256]
resolution_milestones: [5000]
camera_distance_range: [1.5, 2.0]
fovy_range: [15, 60]
elevation_range: [0, 30]
camera_perturb: 0.
center_perturb: 0.
up_perturb: 0.
eval_camera_distance: 2.0
eval_fovy_deg: 40.
static: true
eval_height: 256
eval_width: 256
num_frames: 16
multi_view:
batch_size: [8,4] # must be dividable by n_view
n_view: 4
# 0-4999: 64x64, >=5000: 256x256
width: [64, 256]
height: [64, 256]
resolution_milestones: [5000]
camera_distance_range: [1.5, 2.0]
fovy_range: [15, 60]
elevation_range: [0, 30]
camera_perturb: 0.
center_perturb: 0.
up_perturb: 0.
n_val_views: 4
eval_camera_distance: 2.0
eval_fovy_deg: 40.
relative_radius: false
eval_height: ${data.single_view.eval_height}
eval_width: ${data.single_view.eval_width}
system_type: "tc4d-system"
system:
multi_rate_perc: ${data.multi_rate_perc}
stage: coarse
geometry_type: "implicit-volume"
geometry:
radius: 1.0
normal_type: "finite_difference"
density_bias: "blob_magic3d"
density_activation: softplus
density_blob_scale: 10.
density_blob_std: 0.5
pos_encoding_config:
otype: HashGridSpatialTimeDeform
n_levels: 16
n_features_per_level: 2
log2_hashmap_size: 19
base_resolution: 16
per_level_scale: 1.447269237440378 # max resolution 4096
static: ${data.single_view.static}
num_frames: ${data.single_view.num_frames}
compute_elastic_loss: false
compute_rigidity_loss: false
rigidity_loss_time: false
compute_divergence_loss: false
div_type: "l1"
rigidity_loss_std: 0.0
elastic_loss_alpha: 1.0
time_encoding_config:
otype: ProgressiveBandHashGrid
type: 'Hash'
n_levels: 8
n_features_per_level: 2
log2_hashmap_size: 19
base_resolution: 4
per_level_scale: 1.447269237440378
start_level: 4
start_step: 0
update_steps: 500
time_network_config:
otype: "VanillaMLP"
n_neurons: 64
n_hidden_layers: 2
material_type: "no-material"
material:
n_output_dims: 3
color_activation: sigmoid
background_type: "neural-environment-map-background"
background:
color_activation: sigmoid
random_aug: false
renderer_type: "stable-nerf-volume-renderer-multi"
renderer:
base_renderer_type: "mask-nerf-volume-renderer-multi"
base_renderer:
radius: ${system.geometry.radius}
num_samples_per_ray: 512
train_max_nums: 2000000
block_nums: [3,3]
prompt_processor_type: "stable-diffusion-prompt-processor"
prompt_processor:
pretrained_model_name_or_path: "stabilityai/stable-diffusion-2-1-base"
prompt: ???
front_threshold: 30.
back_threshold: 30.
prompt_processor_type_multi_view: "stable-diffusion-prompt-processor"
prompt_processor_multi_view:
pretrained_model_name_or_path: "stabilityai/stable-diffusion-2-1-base"
prompt: ???
negative_prompt: "ugly, bad anatomy, blurry, pixelated obscure, unnatural colors, poor lighting, dull, and unclear, cropped, lowres, low quality, artifacts, duplicate, morbid, mutilated, poorly drawn face, deformed, dehydrated, bad proportions"
front_threshold: 30.
back_threshold: 30.
guidance_type: "stable-diffusion-vsd-guidance"
guidance:
pretrained_model_name_or_path: "stabilityai/stable-diffusion-2-1-base"
pretrained_model_name_or_path_lora: "stabilityai/stable-diffusion-2-1"
guidance_scale: 7.5
min_step_percent: 0.02
max_step_percent: 0.98
max_step_percent_annealed: 0.5
anneal_start_step: 8000
guidance_type_multi_view: "multiview-diffusion-guidance"
guidance_multi_view:
model_name: "sd-v2.1-base-4view"
ckpt_path: null # path to a pre-downloaded checkpoint file (null for loading from URL)
guidance_scale: 50.0
min_step_percent: [0, 0.98, 0.02, 8000] # (start_iter, start_val, end_val, end_iter)
max_step_percent: [0, 0.98, 0.50, 8000]
recon_loss: true
recon_std_rescale: 0.5
# prompt_processor_type_video: "zeroscope-prompt-processor"
# prompt_processor_video:
# pretrained_model_name_or_path: "cerspense/zeroscope_v2_576w"
# prompt: ???
# guidance_type_video: "zeroscope-guidance"
# guidance_video:
# pretrained_model_name_or_path: "cerspense/zeroscope_v2_576w"
# guidance_scale: 100.
# weighting_strategy: sds
loggers:
wandb:
enable: false
project: "threestudio"
loss:
lambda_sds: 1.
lambda_sds_video: 0.0
lambda_vsd: 1.
lambda_lora: 1.
lambda_orient: 0.
lambda_sparsity: 100.
lambda_opaque: 0.
lambda_z_variance: 0.
lambda_tv: 0.
lambda_deformation: 0.
lambda_elastic: 0.0
lambda_divergence: 0.0
lambda_rigidity: 0.0
optimizer:
name: AdamW
args:
betas: [0.9, 0.99]
eps: 1.e-15
params:
renderer.renderers.0.geometry.encoding:
lr: 0.01
renderer.renderers.0.geometry.density_network:
lr: 0.001
renderer.renderers.0.geometry.feature_network:
lr: 0.001
background:
lr: 0.001
background_scene:
lr: 0.001
guidance_single_view:
lr: 0.0001
trainer:
max_steps: 10000
log_every_n_steps: 1
num_sanity_val_steps: 0
val_check_interval: 1000
enable_progress_bar: true
precision: 16-mixed
checkpoint:
save_last: true
save_top_k: 0
every_n_train_steps: ${trainer.max_steps}
================================================
FILE: configs/tc4d_stage_2.yaml
================================================
name: "tc4d_stage_2"
tag: "${rmspace:${system.prompt_processor.prompt},_}"
exp_root_dir: "outputs"
seed: 0
data_type: "single-multiview-combined-camera-datamodule"
data:
multi_rate_perc: 0.5
multi_rate: null
single_view:
batch_size: [1,1]
width: [256, 256]
height: [256, 256]
resolution_milestones: [5000]
camera_distance_range: [1.5, 2.0]
fovy_range: [15, 60]
elevation_range: [0, 30]
camera_perturb: 0.
center_perturb: 0.
up_perturb: 0.
eval_camera_distance: 2.0
eval_fovy_deg: 40.
static: true
eval_height: 256
eval_width: 256
num_frames: 16
multi_view:
batch_size: [4,4] # must be dividable by n_view
n_view: 4
# 0-4999: 64x64, >=5000: 256x256
width: [256, 256]
height: [256, 256]
resolution_milestones: [5000]
camera_distance_range: [1.5, 2.0]
fovy_range: [15, 60]
elevation_range: [0, 30]
camera_perturb: 0.
center_perturb: 0.
up_perturb: 0.
n_val_views: 4
eval_camera_distance: 2.0
eval_fovy_deg: 40.
relative_radius: false
eval_height: ${data.single_view.eval_height}
eval_width: ${data.single_view.eval_width}
system_type: "tc4d-system"
system:
multi_rate_perc: ${data.multi_rate_perc}
stage: coarse
geometry_type: "implicit-volume"
geometry:
radius: 1.0
normal_type: "finite_difference"
density_bias: "blob_magic3d"
density_activation: softplus
density_blob_scale: 10.
density_blob_std: 0.5
pos_encoding_config:
otype: HashGridSpatialTimeDeform
n_levels: 16
n_features_per_level: 2
log2_hashmap_size: 19
base_resolution: 16
per_level_scale: 1.447269237440378 # max resolution 4096
static: ${data.single_view.static}
num_frames: ${data.single_view.num_frames}
compute_elastic_loss: false
compute_rigidity_loss: false
rigidity_loss_time: false
compute_divergence_loss: false
div_type: "l1"
rigidity_loss_std: 0.0
elastic_loss_alpha: 1.0
time_encoding_config:
otype: ProgressiveBandHashGrid
type: 'Hash'
n_levels: 8
n_features_per_level: 2
log2_hashmap_size: 19
base_resolution: 4
per_level_scale: 1.447269237440378 # max resolution 233
start_level: 4
start_step: 0
update_steps: 500
time_network_config:
otype: "VanillaMLP"
n_neurons: 64
n_hidden_layers: 2
material_type: "no-material"
material:
n_output_dims: 3
color_activation: sigmoid
background_type: "neural-environment-map-background"
background:
color_activation: sigmoid
random_aug: false
renderer_type: "stable-nerf-volume-renderer-multi"
renderer:
base_renderer_type: "mask-nerf-volume-renderer-multi"
base_renderer:
radius: ${system.geometry.radius}
num_samples_per_ray: 512
train_max_nums: 2000000
block_nums: [3,3]
prompt_processor_type: "stable-diffusion-prompt-processor"
prompt_processor:
pretrained_model_name_or_path: "stabilityai/stable-diffusion-2-1-base"
prompt: ???
front_threshold: 30.
back_threshold: 30.
prompt_processor_type_multi_view: "stable-diffusion-prompt-processor"
prompt_processor_multi_view:
pretrained_model_name_or_path: "stabilityai/stable-diffusion-2-1-base"
prompt: ???
negative_prompt: "ugly, bad anatomy, blurry, pixelated obscure, unnatural colors, poor lighting, dull, and unclear, cropped, lowres, low quality, artifacts, duplicate, morbid, mutilated, poorly drawn face, deformed, dehydrated, bad proportions"
front_threshold: 30.
back_threshold: 30.
guidance_type: "stable-diffusion-vsd-guidance"
guidance:
pretrained_model_name_or_path: "stabilityai/stable-diffusion-2-1-base"
pretrained_model_name_or_path_lora: "stabilityai/stable-diffusion-2-1"
guidance_scale: 7.5
min_step_percent: 0.02
max_step_percent: 0.98
max_step_percent_annealed: 0.5
anneal_start_step: 8000
guidance_type_multi_view: "multiview-diffusion-guidance"
guidance_multi_view:
model_name: "sd-v2.1-base-4view"
ckpt_path: null # path to a pre-downloaded checkpoint file (null for loading from URL)
guidance_scale: 50.0
min_step_percent: [0, 0.02, 0.02, 8000] # (start_iter, start_val, end_val, end_iter)
max_step_percent: [0, 0.50, 0.50, 8000]
recon_loss: true
recon_std_rescale: 0.5
# prompt_processor_type_video: "zeroscope-prompt-processor"
# prompt_processor_video:
# pretrained_model_name_or_path: "cerspense/zeroscope_v2_576w"
# prompt: ???
# guidance_type_video: "zeroscope-guidance"
# guidance_video:
# pretrained_model_name_or_path: "cerspense/zeroscope_v2_576w"
# guidance_scale: 100.
# weighting_strategy: sds
loggers:
wandb:
enable: false
project: "threestudio"
loss:
lambda_sds: 1.
lambda_sds_video: 0.0
lambda_vsd: 1.
lambda_lora: 1.
lambda_orient: 0.
lambda_sparsity: 100.
lambda_z_variance: 0.
lambda_opaque: 0.
lambda_tv: 0.
lambda_deformation: 0.
lambda_elastic: 0.0
lambda_divergence: 0.0
lambda_rigidity: 0.0
optimizer:
name: AdamW
args:
betas: [0.9, 0.99]
eps: 1.e-15
params:
renderer.renderers.0.geometry.encoding:
lr: 0.01
renderer.renderers.0.geometry.density_network:
lr: 0.001
renderer.renderers.0.geometry.feature_network:
lr: 0.001
background:
lr: 0.001
background_scene:
lr: 0.001
guidance_single_view:
lr: 0.0001
trainer:
max_steps: 20000
log_every_n_steps: 1
num_sanity_val_steps: 0
val_check_interval: 1000
enable_progress_bar: true
precision: 16-mixed
checkpoint:
save_last: true
save_top_k: 0
every_n_train_steps: ${trainer.max_steps}
================================================
FILE: configs/tc4d_stage_3.yaml
================================================
name: "tc4d_stage_3"
tag: "${rmspace:${system.prompt_processor.prompt},_}"
exp_root_dir: "outputs"
seed: 0
data_type: "single-multiview-combined-camera-datamodule"
data:
multi_rate_perc: 0.0
multi_rate: null
single_view:
batch_size: [1,1]
# 0-4999: 64x64, >=5000: 512x512
# this drastically reduces VRAM usage as empty space is pruned in early training
width: [256, 256]
height: [256, 256]
resolution_milestones: [5000]
camera_distance_range: [1.5, 2.0]
fovy_range: [15, 60]
elevation_range: [0, 30]
camera_perturb: 0.
center_perturb: 0.
up_perturb: 0.
eval_camera_distance: 2.0
eval_fovy_deg: 40.
static: false
num_frames: 16
simultan: true
simultan_vid_mod_perc: 1.0
simultan_vid_mod: null
width_vid: 128
height_vid: 80
sample_rand_frames: t0_sub
num_frames_factor: 4
# eval_height: 1024
# eval_width: 1024
eval_height: 512
eval_width: 512
frame_range: 1.0
scene_single_obj: true
scene_single_obj_static: true
multi_obj: true
num_objs: 1
scene_iters: 9999999999 # currently: always render object in non canonical pose but depending on trajectory
scene_iters_freq: 1
multi_view:
batch_size: [4,4] # must be dividable by n_view
n_view: 4
width: [256, 256]
height: [256, 256]
resolution_milestones: [5000]
camera_distance_range: [1.5, 2.0]
fovy_range: [15, 60]
elevation_range: [0, 30]
camera_perturb: 0.
center_perturb: 0.
up_perturb: 0.
n_val_views: 4
eval_camera_distance: 2.0
eval_fovy_deg: 40.
relative_radius: false
num_frames: ${data.single_view.num_frames}
sample_rand_frames: ${data.single_view.sample_rand_frames}
eval_height: ${data.single_view.eval_height}
eval_width: ${data.single_view.eval_width}
multi_obj: ${data.single_view.multi_obj}
num_objs: ${data.single_view.num_objs}
scene_iters: ${data.single_view.scene_iters}
scene_iters_freq: ${data.single_view.scene_iters_freq}
frame_range: ${data.single_view.frame_range}
system_type: "tc4d-system"
system:
weights_ignore_modules: ["renderer.base_renderer.estimators", "renderer.base_renderer.obj_trajs", "geometry.0.encoding.encoding.time_network", "geometry.0.encoding.encoding.encoding_time"]
multi_obj: ${data.single_view.multi_obj}
multi_rate_perc: ${data.multi_rate_perc}
simultan_vid_mod_perc: ${data.single_view.simultan_vid_mod_perc}
use_traj_length_frame_range: true
traj_length_frame_range: 0.3
stage: coarse
geometry_type: "implicit-volume"
geometry:
radius: 1.0
normal_type: "finite_difference"
density_bias: "blob_magic3d"
density_activation: softplus
density_blob_scale: 10.
density_blob_std: 0.5
pos_encoding_config:
otype: HashGridSpatialTimeDeform
n_levels: 16
n_features_per_level: 2
log2_hashmap_size: 19
base_resolution: 16
per_level_scale: 1.447269237440378 # max resolution 4096
static: ${data.single_view.static}
num_frames: ${data.single_view.num_frames}
compute_divergence_loss: false
div_type: 'l2'
compute_elastic_loss: false
elastic_loss_alpha: 1.0
compute_rigidity_loss: false
rigidity_loss_time: true
rigidity_loss_std: 0.001
time_encoding_config:
otype: Grid
type: 'Hash'
n_levels: 8
n_features_per_level: 2
log2_hashmap_size: 19
base_resolution: 4
per_level_scale: 1.447269237440378
start_level: 4
start_step: 0
update_steps: 500
time_network_config:
otype: "VanillaMLP"
n_neurons: 64
n_hidden_layers: 2
anneal_density_blob_std_config:
min_anneal_step: 0
max_anneal_step: 5000
start_val: ${system.geometry.density_blob_std}
end_val: 0.5
material_type: "no-material"
material:
n_output_dims: 3
color_activation: sigmoid
background_type: "neural-environment-map-background"
background:
color_activation: sigmoid
random_aug: false
renderer_type: "stable-nerf-volume-renderer-multi"
renderer:
base_renderer_type: "mask-nerf-volume-renderer-multi"
base_renderer:
radius: ${system.geometry.radius}
num_samples_per_ray: 512
occ_frame_updates: true
occ_n: 16
occ_ema_decay: 0.9
occ_ema_decay_init_zero: true
occ_thre: 0.5
occ_thre_post_init: 0.5
# A100 (80GB)
train_max_nums: 700000
block_nums: [3,3]
simultan: ${data.single_view.simultan}
prompt_processor_type: "stable-diffusion-prompt-processor"
prompt_processor:
pretrained_model_name_or_path: "stabilityai/stable-diffusion-2-1-base"
prompt: ???
front_threshold: 30.
back_threshold: 30.
prompt_processor_type_multi_view: "stable-diffusion-prompt-processor"
prompt_processor_multi_view:
pretrained_model_name_or_path: "stabilityai/stable-diffusion-2-1-base"
prompt: ???
negative_prompt: "ugly, bad anatomy, blurry, pixelated obscure, unnatural colors, poor lighting, dull, and unclear, cropped, lowres, low quality, artifacts, duplicate, morbid, mutilated, poorly drawn face, deformed, dehydrated, bad proportions"
front_threshold: 30.
back_threshold: 30.
guidance_type: "stable-diffusion-vsd-guidance"
guidance:
pretrained_model_name_or_path: "stabilityai/stable-diffusion-2-1-base"
pretrained_model_name_or_path_lora: "stabilityai/stable-diffusion-2-1"
guidance_scale: 7.5
min_step_percent: 0.02
max_step_percent: 0.5
max_step_percent_annealed: 0.5
anneal_start_step: 5000
guidance_type_multi_view: "multiview-diffusion-guidance"
guidance_multi_view:
model_name: "sd-v2.1-base-4view"
ckpt_path: null # path to a pre-downloaded checkpoint file (null for loading from URL)
guidance_scale: 50.0
min_step_percent: [0, 0.02, 0.02, 8000] # (start_iter, start_val, end_val, end_iter)
max_step_percent: [0, 0.5, 0.5, 8000]
recon_loss: true
recon_std_rescale: 0.5
prompt_processor_type_video: "videocrafter-prompt-processor"
prompt_processor_video:
config: threestudio/models/guidance/videocrafter/configs/inference_t2v_512_v2.0.yaml
# Checkpoint https://huggingface.co/VideoCrafter/VideoCrafter2/blob/main/model.ckpt
pretrained_model_name_or_path: "VideoCrafter/VideoCrafter2"
negative_prompt: "low motion, static statue, not moving, no motion"
prompt: ???
guidance_type_video: "videocrafter-guidance"
guidance_video:
config: ${system.prompt_processor_video.config}
pretrained_model_name_or_path: ${system.prompt_processor_video.pretrained_model_name_or_path}
guidance_scale: 100.
weighting_strategy: sds
use_hifa: false
width_vid: ${data.single_view.width_vid}
height_vid: ${data.single_view.height_vid}
motion_amp_scale: 1.
half_precision_weights: false
fps: 8
min_step_percent: [0, 0.02, 0.02, 5000] # (start_iter, start_val, end_val, end_iter)
max_step_percent: [0, 0.98, 0.5, 5000]
# Set a number between 1 and 16, this saves memory by only backpropagating through low_ram_vae number of frames instead of all 16
# low_ram_vae: 16
loggers:
wandb:
enable: false
project: "threestudio"
loss:
lambda_sds: 1.
lambda_sds_video: 1.0
lambda_vsd: 1.
lambda_lora: 1.
lambda_orient: 0.
lambda_sparsity: 0.
lambda_opaque: 0.
lambda_z_variance: 0.
lambda_tv: 0.
lambda_deformation: 100.0
lambda_elastic: 0.0
lambda_rigidity: 0.0
lambda_divergence: 0.0
optimizer:
name: AdamW
args:
betas: [0.9, 0.99]
eps: 1.e-15
params:
renderer.renderers.0.geometry.density_network:
lr: 0.0
renderer.renderers.0.geometry.feature_network:
lr: 0.0
renderer.renderers.0.geometry.encoding.encoding.encoding:
lr: 0.0
renderer.renderers.0.geometry.encoding.encoding.encoding_time:
lr: 0.001
renderer.renderers.0.geometry.encoding.encoding.time_network:
lr: 0.0001
background:
lr: 0.0
background_scene:
lr: 0.0
trainer:
max_steps: 30000
log_every_n_steps: 1
num_sanity_val_steps: 0
val_check_interval: 1000
enable_progress_bar: true
precision: 16-mixed
checkpoint:
save_last: true
save_top_k: 0
every_n_train_steps: ${trainer.max_steps}
================================================
FILE: configs/tc4d_stage_3_24_gb.yaml
================================================
name: "tc4d_stage_3"
tag: "${rmspace:${system.prompt_processor.prompt},_}"
exp_root_dir: "outputs"
seed: 0
data_type: "single-multiview-combined-camera-datamodule"
data:
multi_rate_perc: 0.0
multi_rate: null
single_view:
batch_size: [1,1]
# 0-4999: 64x64, >=5000: 512x512
# this drastically reduces VRAM usage as empty space is pruned in early training
width: [256, 256]
height: [256, 256]
resolution_milestones: [5000]
camera_distance_range: [1.5, 2.0]
fovy_range: [15, 60]
elevation_range: [0, 30]
camera_perturb: 0.
center_perturb: 0.
up_perturb: 0.
eval_camera_distance: 2.0
eval_fovy_deg: 40.
static: false
num_frames: 16
simultan: true
simultan_vid_mod_perc: 1.0
simultan_vid_mod: null
width_vid: 128
height_vid: 80
sample_rand_frames: t0_sub
num_frames_factor: 4
# eval_height: 1024
# eval_width: 1024
eval_height: 512
eval_width: 512
frame_range: 1.0
scene_single_obj: true
scene_single_obj_static: true
multi_obj: true
num_objs: 1
scene_iters: 9999999999 # currently: always render object in non canonical pose but depending on trajectory
scene_iters_freq: 1
multi_view:
batch_size: [4,4] # must be dividable by n_view
n_view: 4
width: [256, 256]
height: [256, 256]
resolution_milestones: [5000]
camera_distance_range: [1.5, 2.0]
fovy_range: [15, 60]
elevation_range: [0, 30]
camera_perturb: 0.
center_perturb: 0.
up_perturb: 0.
n_val_views: 4
eval_camera_distance: 2.0
eval_fovy_deg: 40.
relative_radius: false
num_frames: ${data.single_view.num_frames}
sample_rand_frames: ${data.single_view.sample_rand_frames}
eval_height: ${data.single_view.eval_height}
eval_width: ${data.single_view.eval_width}
multi_obj: ${data.single_view.multi_obj}
num_objs: ${data.single_view.num_objs}
scene_iters: ${data.single_view.scene_iters}
scene_iters_freq: ${data.single_view.scene_iters_freq}
frame_range: ${data.single_view.frame_range}
system_type: "tc4d-system"
system:
weights_ignore_modules: ["renderer.base_renderer.estimators", "renderer.base_renderer.obj_trajs", "geometry.0.encoding.encoding.time_network", "geometry.0.encoding.encoding.encoding_time"]
multi_obj: ${data.single_view.multi_obj}
multi_rate_perc: ${data.multi_rate_perc}
simultan_vid_mod_perc: ${data.single_view.simultan_vid_mod_perc}
use_traj_length_frame_range: true
traj_length_frame_range: 0.3
stage: coarse
geometry_type: "implicit-volume"
geometry:
radius: 1.0
normal_type: "finite_difference"
density_bias: "blob_magic3d"
density_activation: softplus
density_blob_scale: 10.
density_blob_std: 0.5
pos_encoding_config:
otype: HashGridSpatialTimeDeform
n_levels: 16
n_features_per_level: 2
log2_hashmap_size: 19
base_resolution: 16
per_level_scale: 1.447269237440378 # max resolution 4096
static: ${data.single_view.static}
num_frames: ${data.single_view.num_frames}
compute_divergence_loss: false
div_type: 'l2'
compute_elastic_loss: false
elastic_loss_alpha: 1.0
compute_rigidity_loss: false
rigidity_loss_time: true
rigidity_loss_std: 0.001
time_encoding_config:
otype: Grid
type: 'Hash'
n_levels: 8
n_features_per_level: 2
log2_hashmap_size: 19
base_resolution: 4
per_level_scale: 1.447269237440378
start_level: 4
start_step: 0
update_steps: 500
time_network_config:
otype: "VanillaMLP"
n_neurons: 64
n_hidden_layers: 2
anneal_density_blob_std_config:
min_anneal_step: 0
max_anneal_step: 5000
start_val: ${system.geometry.density_blob_std}
end_val: 0.5
material_type: "no-material"
material:
n_output_dims: 3
color_activation: sigmoid
background_type: "neural-environment-map-background"
background:
color_activation: sigmoid
random_aug: false
renderer_type: "stable-nerf-volume-renderer-multi"
renderer:
base_renderer_type: "mask-nerf-volume-renderer-multi"
base_renderer:
radius: ${system.geometry.radius}
num_samples_per_ray: 512
occ_frame_updates: true
occ_n: 16
occ_ema_decay: 0.9
occ_ema_decay_init_zero: true
occ_thre: 0.5
occ_thre_post_init: 0.5
# A100 (80GB)
train_max_nums: 60000
block_nums: [3,3]
simultan: ${data.single_view.simultan}
prompt_processor_type: "stable-diffusion-prompt-processor"
prompt_processor:
pretrained_model_name_or_path: "stabilityai/stable-diffusion-2-1-base"
prompt: ???
front_threshold: 30.
back_threshold: 30.
prompt_processor_type_multi_view: "stable-diffusion-prompt-processor"
prompt_processor_multi_view:
pretrained_model_name_or_path: "stabilityai/stable-diffusion-2-1-base"
prompt: ???
negative_prompt: "ugly, bad anatomy, blurry, pixelated obscure, unnatural colors, poor lighting, dull, and unclear, cropped, lowres, low quality, artifacts, duplicate, morbid, mutilated, poorly drawn face, deformed, dehydrated, bad proportions"
front_threshold: 30.
back_threshold: 30.
guidance_type: "stable-diffusion-vsd-guidance"
guidance:
pretrained_model_name_or_path: "stabilityai/stable-diffusion-2-1-base"
pretrained_model_name_or_path_lora: "stabilityai/stable-diffusion-2-1"
guidance_scale: 7.5
min_step_percent: 0.02
max_step_percent: 0.5
max_step_percent_annealed: 0.5
anneal_start_step: 5000
guidance_type_multi_view: "multiview-diffusion-guidance"
guidance_multi_view:
model_name: "sd-v2.1-base-4view"
ckpt_path: null # path to a pre-downloaded checkpoint file (null for loading from URL)
guidance_scale: 50.0
min_step_percent: [0, 0.02, 0.02, 8000] # (start_iter, start_val, end_val, end_iter)
max_step_percent: [0, 0.5, 0.5, 8000]
recon_loss: true
recon_std_rescale: 0.5
prompt_processor_type_video: "videocrafter-prompt-processor"
prompt_processor_video:
config: threestudio/models/guidance/videocrafter/configs/inference_t2v_512_v2.0.yaml
# Checkpoint https://huggingface.co/VideoCrafter/VideoCrafter2/blob/main/model.ckpt
pretrained_model_name_or_path: "VideoCrafter/VideoCrafter2"
negative_prompt: "low motion, static statue, not moving, no motion"
prompt: ???
guidance_type_video: "videocrafter-guidance"
guidance_video:
config: ${system.prompt_processor_video.config}
pretrained_model_name_or_path: ${system.prompt_processor_video.pretrained_model_name_or_path}
guidance_scale: 100.
weighting_strategy: sds
use_hifa: false
width_vid: ${data.single_view.width_vid}
height_vid: ${data.single_view.height_vid}
motion_amp_scale: 1.
half_precision_weights: false
fps: 8
min_step_percent: [0, 0.02, 0.02, 5000] # (start_iter, start_val, end_val, end_iter)
max_step_percent: [0, 0.98, 0.5, 5000]
# Set a number between 1 and 16, this saves memory by only backpropagating through low_ram_vae number of frames instead of all 16
low_ram_vae: 3
loggers:
wandb:
enable: false
project: "threestudio"
loss:
lambda_sds: 1.
lambda_sds_video: 1.0
lambda_vsd: 1.
lambda_lora: 1.
lambda_orient: 0.
lambda_sparsity: 0.
lambda_opaque: 0.
lambda_z_variance: 0.
lambda_tv: 0.
lambda_deformation: 100.0
lambda_elastic: 0.0
lambda_rigidity: 0.0
lambda_divergence: 0.0
optimizer:
name: AdamW
args:
betas: [0.9, 0.99]
eps: 1.e-15
params:
renderer.renderers.0.geometry.density_network:
lr: 0.0
renderer.renderers.0.geometry.feature_network:
lr: 0.0
renderer.renderers.0.geometry.encoding.encoding.encoding:
lr: 0.0
renderer.renderers.0.geometry.encoding.encoding.encoding_time:
lr: 0.001
renderer.renderers.0.geometry.encoding.encoding.time_network:
lr: 0.0001
background:
lr: 0.0
background_scene:
lr: 0.0
trainer:
max_steps: 30000
log_every_n_steps: 1
num_sanity_val_steps: 0
val_check_interval: 1000
enable_progress_bar: true
precision: 16-mixed
checkpoint:
save_last: true
save_top_k: 0
every_n_train_steps: ${trainer.max_steps}
================================================
FILE: configs/tc4d_stage_3_40_gb.yaml
================================================
name: "tc4d_stage_3"
tag: "${rmspace:${system.prompt_processor.prompt},_}"
exp_root_dir: "outputs"
seed: 0
data_type: "single-multiview-combined-camera-datamodule"
data:
multi_rate_perc: 0.0
multi_rate: null
single_view:
batch_size: [1,1]
# 0-4999: 64x64, >=5000: 512x512
# this drastically reduces VRAM usage as empty space is pruned in early training
width: [256, 256]
height: [256, 256]
resolution_milestones: [5000]
camera_distance_range: [1.5, 2.0]
fovy_range: [15, 60]
elevation_range: [0, 30]
camera_perturb: 0.
center_perturb: 0.
up_perturb: 0.
eval_camera_distance: 2.0
eval_fovy_deg: 40.
static: false
num_frames: 16
simultan: true
simultan_vid_mod_perc: 1.0
simultan_vid_mod: null
width_vid: 128
height_vid: 80
sample_rand_frames: t0_sub
num_frames_factor: 4
# eval_height: 1024
# eval_width: 1024
eval_height: 512
eval_width: 512
frame_range: 1.0
scene_single_obj: true
scene_single_obj_static: true
multi_obj: true
num_objs: 1
scene_iters: 9999999999 # currently: always render object in non canonical pose but depending on trajectory
scene_iters_freq: 1
multi_view:
batch_size: [4,4] # must be dividable by n_view
n_view: 4
width: [256, 256]
height: [256, 256]
resolution_milestones: [5000]
camera_distance_range: [1.5, 2.0]
fovy_range: [15, 60]
elevation_range: [0, 30]
camera_perturb: 0.
center_perturb: 0.
up_perturb: 0.
n_val_views: 4
eval_camera_distance: 2.0
eval_fovy_deg: 40.
relative_radius: false
num_frames: ${data.single_view.num_frames}
sample_rand_frames: ${data.single_view.sample_rand_frames}
eval_height: ${data.single_view.eval_height}
eval_width: ${data.single_view.eval_width}
multi_obj: ${data.single_view.multi_obj}
num_objs: ${data.single_view.num_objs}
scene_iters: ${data.single_view.scene_iters}
scene_iters_freq: ${data.single_view.scene_iters_freq}
frame_range: ${data.single_view.frame_range}
system_type: "tc4d-system"
system:
weights_ignore_modules: ["renderer.base_renderer.estimators", "renderer.base_renderer.obj_trajs", "geometry.0.encoding.encoding.time_network", "geometry.0.encoding.encoding.encoding_time"]
multi_obj: ${data.single_view.multi_obj}
multi_rate_perc: ${data.multi_rate_perc}
simultan_vid_mod_perc: ${data.single_view.simultan_vid_mod_perc}
use_traj_length_frame_range: true
traj_length_frame_range: 0.3
stage: coarse
geometry_type: "implicit-volume"
geometry:
radius: 1.0
normal_type: "finite_difference"
density_bias: "blob_magic3d"
density_activation: softplus
density_blob_scale: 10.
density_blob_std: 0.5
pos_encoding_config:
otype: HashGridSpatialTimeDeform
n_levels: 16
n_features_per_level: 2
log2_hashmap_size: 19
base_resolution: 16
per_level_scale: 1.447269237440378 # max resolution 4096
static: ${data.single_view.static}
num_frames: ${data.single_view.num_frames}
compute_divergence_loss: false
div_type: 'l2'
compute_elastic_loss: false
elastic_loss_alpha: 1.0
compute_rigidity_loss: false
rigidity_loss_time: true
rigidity_loss_std: 0.001
time_encoding_config:
otype: Grid
type: 'Hash'
n_levels: 8
n_features_per_level: 2
log2_hashmap_size: 19
base_resolution: 4
per_level_scale: 1.447269237440378
start_level: 4
start_step: 0
update_steps: 500
time_network_config:
otype: "VanillaMLP"
n_neurons: 64
n_hidden_layers: 2
anneal_density_blob_std_config:
min_anneal_step: 0
max_anneal_step: 5000
start_val: ${system.geometry.density_blob_std}
end_val: 0.5
material_type: "no-material"
material:
n_output_dims: 3
color_activation: sigmoid
background_type: "neural-environment-map-background"
background:
color_activation: sigmoid
random_aug: false
renderer_type: "stable-nerf-volume-renderer-multi"
renderer:
base_renderer_type: "mask-nerf-volume-renderer-multi"
base_renderer:
radius: ${system.geometry.radius}
num_samples_per_ray: 512
occ_frame_updates: true
occ_n: 16
occ_ema_decay: 0.9
occ_ema_decay_init_zero: true
occ_thre: 0.5
occ_thre_post_init: 0.5
# A100 (80GB)
train_max_nums: 600000
block_nums: [3,3]
simultan: ${data.single_view.simultan}
prompt_processor_type: "stable-diffusion-prompt-processor"
prompt_processor:
pretrained_model_name_or_path: "stabilityai/stable-diffusion-2-1-base"
prompt: ???
front_threshold: 30.
back_threshold: 30.
prompt_processor_type_multi_view: "stable-diffusion-prompt-processor"
prompt_processor_multi_view:
pretrained_model_name_or_path: "stabilityai/stable-diffusion-2-1-base"
prompt: ???
negative_prompt: "ugly, bad anatomy, blurry, pixelated obscure, unnatural colors, poor lighting, dull, and unclear, cropped, lowres, low quality, artifacts, duplicate, morbid, mutilated, poorly drawn face, deformed, dehydrated, bad proportions"
front_threshold: 30.
back_threshold: 30.
guidance_type: "stable-diffusion-vsd-guidance"
guidance:
pretrained_model_name_or_path: "stabilityai/stable-diffusion-2-1-base"
pretrained_model_name_or_path_lora: "stabilityai/stable-diffusion-2-1"
guidance_scale: 7.5
min_step_percent: 0.02
max_step_percent: 0.5
max_step_percent_annealed: 0.5
anneal_start_step: 5000
guidance_type_multi_view: "multiview-diffusion-guidance"
guidance_multi_view:
model_name: "sd-v2.1-base-4view"
ckpt_path: null # path to a pre-downloaded checkpoint file (null for loading from URL)
guidance_scale: 50.0
min_step_percent: [0, 0.02, 0.02, 8000] # (start_iter, start_val, end_val, end_iter)
max_step_percent: [0, 0.5, 0.5, 8000]
recon_loss: true
recon_std_rescale: 0.5
prompt_processor_type_video: "videocrafter-prompt-processor"
prompt_processor_video:
config: threestudio/models/guidance/videocrafter/configs/inference_t2v_512_v2.0.yaml
# Checkpoint https://huggingface.co/VideoCrafter/VideoCrafter2/blob/main/model.ckpt
pretrained_model_name_or_path: "VideoCrafter/VideoCrafter2"
negative_prompt: "low motion, static statue, not moving, no motion"
prompt: ???
guidance_type_video: "videocrafter-guidance"
guidance_video:
config: ${system.prompt_processor_video.config}
pretrained_model_name_or_path: ${system.prompt_processor_video.pretrained_model_name_or_path}
guidance_scale: 100.
weighting_strategy: sds
use_hifa: false
width_vid: ${data.single_view.width_vid}
height_vid: ${data.single_view.height_vid}
motion_amp_scale: 1.
half_precision_weights: false
fps: 8
min_step_percent: [0, 0.02, 0.02, 5000] # (start_iter, start_val, end_val, end_iter)
max_step_percent: [0, 0.98, 0.5, 5000]
# Set a number between 1 and 16, this saves memory by only backpropagating through low_ram_vae number of frames instead of all 16
low_ram_vae: 3
loggers:
wandb:
enable: false
project: "threestudio"
loss:
lambda_sds: 1.
lambda_sds_video: 1.0
lambda_vsd: 1.
lambda_lora: 1.
lambda_orient: 0.
lambda_sparsity: 0.
lambda_opaque: 0.
lambda_z_variance: 0.
lambda_tv: 0.
lambda_deformation: 100.0
lambda_elastic: 0.0
lambda_rigidity: 0.0
lambda_divergence: 0.0
optimizer:
name: AdamW
args:
betas: [0.9, 0.99]
eps: 1.e-15
params:
renderer.renderers.0.geometry.density_network:
lr: 0.0
renderer.renderers.0.geometry.feature_network:
lr: 0.0
renderer.renderers.0.geometry.encoding.encoding.encoding:
lr: 0.0
renderer.renderers.0.geometry.encoding.encoding.encoding_time:
lr: 0.001
renderer.renderers.0.geometry.encoding.encoding.time_network:
lr: 0.0001
background:
lr: 0.0
background_scene:
lr: 0.0
trainer:
max_steps: 30000
log_every_n_steps: 1
num_sanity_val_steps: 0
val_check_interval: 1000
enable_progress_bar: true
precision: 16-mixed
checkpoint:
save_last: true
save_top_k: 0
every_n_train_steps: ${trainer.max_steps}
================================================
FILE: configs/tc4d_stage_3_eval.yaml
================================================
name: "tc4d_stage_3_eval"
tag: "${rmspace:${system.prompt_processor.prompt},_}"
exp_root_dir: "outputs"
seed: 0
data_type: "single-multiview-combined-camera-datamodule"
data:
multi_rate_perc: 0.0
multi_rate: null
single_view:
batch_size: [1,1]
# 0-4999: 64x64, >=5000: 512x512
# this drastically reduces VRAM usage as empty space is pruned in early training
width: [256, 256]
height: [256, 256]
resolution_milestones: [5000]
camera_distance_range: [1.5, 2.0]
fovy_range: [15, 60]
elevation_range: [0, 30]
camera_perturb: 0.
center_perturb: 0.
up_perturb: 0.
eval_camera_distance: 2.0
eval_fovy_deg: 40.
static: false
num_frames: 16
simultan: true
simultan_vid_mod_perc: 1.0
simultan_vid_mod: null
width_vid: 128
height_vid: 80
sample_rand_frames: t0_sub
num_frames_factor: 8
eval_height: 1024
eval_width: 1024
frame_range: 1.0
scene_single_obj: true
scene_single_obj_static: true
multi_obj: true
num_objs: 1
scene_iters: 9999999999 # currently: always render object in non canonical pose but depending on trajectory
scene_iters_freq: 1
multi_view:
batch_size: [4,4] # must be dividable by n_view
n_view: 4
width: [256, 256]
height: [256, 256]
resolution_milestones: [5000]
camera_distance_range: [1.5, 2.0]
fovy_range: [15, 60]
elevation_range: [0, 30]
camera_perturb: 0.
center_perturb: 0.
up_perturb: 0.
n_val_views: 4
eval_camera_distance: 2.0
eval_fovy_deg: 40.
relative_radius: false
num_frames: ${data.single_view.num_frames}
sample_rand_frames: ${data.single_view.sample_rand_frames}
eval_height: ${data.single_view.eval_height}
eval_width: ${data.single_view.eval_width}
multi_obj: ${data.single_view.multi_obj}
num_objs: ${data.single_view.num_objs}
scene_iters: ${data.single_view.scene_iters}
scene_iters_freq: ${data.single_view.scene_iters_freq}
frame_range: ${data.single_view.frame_range}
system_type: "tc4d-system"
system:
multi_obj: ${data.single_view.multi_obj}
multi_rate_perc: ${data.multi_rate_perc}
simultan_vid_mod_perc: ${data.single_view.simultan_vid_mod_perc}
use_traj_length_frame_range: true
traj_length_frame_range: 0.3
stage: coarse
geometry_type: "implicit-volume"
geometry:
radius: 1.0
normal_type: "finite_difference"
density_bias: "blob_magic3d"
density_activation: softplus
density_blob_scale: 10.
density_blob_std: 0.5
pos_encoding_config:
otype: HashGridSpatialTimeDeform
n_levels: 16
n_features_per_level: 2
log2_hashmap_size: 19
base_resolution: 16
per_level_scale: 1.447269237440378 # max resolution 4096
static: ${data.single_view.static}
num_frames: ${data.single_view.num_frames}
compute_divergence_loss: false
div_type: 'l2'
compute_elastic_loss: false
elastic_loss_alpha: 1.0
compute_rigidity_loss: false
rigidity_loss_time: true
rigidity_loss_std: 0.001
time_encoding_config:
otype: Grid
type: 'Hash'
n_levels: 8
n_features_per_level: 2
log2_hashmap_size: 19
base_resolution: 4
per_level_scale: 1.447269237440378
start_level: 4
start_step: 0
update_steps: 500
time_network_config:
otype: "VanillaMLP"
n_neurons: 64
n_hidden_layers: 2
anneal_density_blob_std_config:
min_anneal_step: 0
max_anneal_step: 5000
start_val: ${system.geometry.density_blob_std}
end_val: 0.5
material_type: "no-material"
material:
n_output_dims: 3
color_activation: sigmoid
background_type: "solid-color-background"
background:
color: [0.5, 0.5, 0.5]
renderer_type: "stable-nerf-volume-renderer-multi"
renderer:
base_renderer_type: "mask-nerf-volume-renderer-multi"
base_renderer:
radius: ${system.geometry.radius}
num_samples_per_ray: 512
occ_frame_updates: true
occ_n: 16
occ_ema_decay: 0.9
occ_ema_decay_init_zero: true
occ_thre: 0.5
occ_thre_post_init: 0.5
# A100 (80GB)
train_max_nums: 700000
block_nums: [3,3]
simultan: ${data.single_view.simultan}
prompt_processor_type: "stable-diffusion-prompt-processor"
prompt_processor:
pretrained_model_name_or_path: "stabilityai/stable-diffusion-2-1-base"
prompt: ???
front_threshold: 30.
back_threshold: 30.
prompt_processor_type_multi_view: "stable-diffusion-prompt-processor"
prompt_processor_multi_view:
pretrained_model_name_or_path: "stabilityai/stable-diffusion-2-1-base"
prompt: ???
negative_prompt: "ugly, bad anatomy, blurry, pixelated obscure, unnatural colors, poor lighting, dull, and unclear, cropped, lowres, low quality, artifacts, duplicate, morbid, mutilated, poorly drawn face, deformed, dehydrated, bad proportions"
front_threshold: 30.
back_threshold: 30.
guidance_type: "stable-diffusion-vsd-guidance"
guidance:
pretrained_model_name_or_path: "stabilityai/stable-diffusion-2-1-base"
pretrained_model_name_or_path_lora: "stabilityai/stable-diffusion-2-1"
guidance_scale: 7.5
min_step_percent: 0.02
max_step_percent: 0.5
max_step_percent_annealed: 0.5
anneal_start_step: 5000
guidance_type_multi_view: "multiview-diffusion-guidance"
guidance_multi_view:
model_name: "sd-v2.1-base-4view"
ckpt_path: null # path to a pre-downloaded checkpoint file (null for loading from URL)
guidance_scale: 50.0
min_step_percent: [0, 0.02, 0.02, 8000] # (start_iter, start_val, end_val, end_iter)
max_step_percent: [0, 0.5, 0.5, 8000]
recon_loss: true
recon_std_rescale: 0.5
prompt_processor_type_video: "videocrafter-prompt-processor"
prompt_processor_video:
config: threestudio/models/guidance/videocrafter/configs/inference_t2v_512_v2.0.yaml
# Checkpoint https://huggingface.co/VideoCrafter/VideoCrafter2/blob/main/model.ckpt
pretrained_model_name_or_path: "VideoCrafter/VideoCrafter2"
negative_prompt: "low motion, static statue, not moving, no motion"
prompt: ???
guidance_type_video: "videocrafter-guidance"
guidance_video:
config: ${system.prompt_processor_video.config}
pretrained_model_name_or_path: ${system.prompt_processor_video.pretrained_model_name_or_path}
guidance_scale: 100.
weighting_strategy: sds
use_hifa: false
width_vid: ${data.single_view.width_vid}
height_vid: ${data.single_view.height_vid}
motion_amp_scale: 1.
half_precision_weights: false
fps: 8
min_step_percent: [0, 0.02, 0.02, 5000] # (start_iter, start_val, end_val, end_iter)
max_step_percent: [0, 0.98, 0.5, 5000]
loggers:
wandb:
enable: false
project: "threestudio"
loss:
lambda_sds: 1.
lambda_sds_video: 1.0
lambda_vsd: 1.
lambda_lora: 1.
lambda_orient: 0.
lambda_sparsity: 0.
lambda_opaque: 0.
lambda_z_variance: 0.
lambda_tv: 0.
lambda_deformation: 100.0
lambda_elastic: 0.0
lambda_rigidity: 0.0
lambda_divergence: 0.0
optimizer:
name: AdamW
args:
betas: [0.9, 0.99]
eps: 1.e-15
params:
renderer.renderers.0.geometry.density_network:
lr: 0.0
renderer.renderers.0.geometry.feature_network:
lr: 0.0
renderer.renderers.0.geometry.encoding.encoding.encoding:
lr: 0.0
renderer.renderers.0.geometry.encoding.encoding.encoding_time:
lr: 0.001
renderer.renderers.0.geometry.encoding.encoding.time_network:
lr: 0.0001
background:
lr: 0.0
background_scene:
lr: 0.0
trainer:
max_steps: 30000
log_every_n_steps: 1
num_sanity_val_steps: 0
val_check_interval: 1000
enable_progress_bar: true
precision: 16-mixed
checkpoint:
save_last: true
save_top_k: 0
every_n_train_steps: ${trainer.max_steps}
================================================
FILE: configs_comp/comp0.yaml
================================================
name: "comp0"
config_scene:
traj_kwargs:
# Object 0
-
proxy_size: [0.4, 0.4, 0.4]
trajs:
- traj_type: spline-trajectory
coords: [[0.25, 0.5, 0.5], [0.5, 0.75, 0.5], [0.75, 0.5, 0.5], [0.5, 0.25, 0.5], [0.25, 0.5, 0.5]]
angle_offset: 0.
time_offset: 0.
# Object 1
-
proxy_size: [0.5, 0.5, 0.5]
trajs:
- traj_type: spline-trajectory
coords: [[0.25, 0.5, 0.5], [0.5, 0.75, 0.5], [0.75, 0.5, 0.5], [0.5, 0.25, 0.5], [0.25, 0.5, 0.5]]
angle_offset: 0.
time_offset: 0.33
# Object 2
-
proxy_size: [0.5, 0.5, 0.5]
trajs:
- traj_type: spline-trajectory
coords: [[0.25, 0.5, 0.5], [0.5, 0.75, 0.5], [0.75, 0.5, 0.5], [0.5, 0.25, 0.5], [0.25, 0.5, 0.5]]
angle_offset: 0.
time_offset: 0.66
# Object 3
-
proxy_size: [0.4, 0.4, 0.4]
trajs:
- traj_type: static-trajectory
pos_start: [0.5, 0.5, 0.5]
angle_offset: 90.
prompt_processor:
prompt: ["a bear walking", "an astronaut riding a horse", "deadpool riding a cow", "a firepit"]
# First train each object independently and use this to render different trained models together
checkpoints: [
/path/to/a_bear_walking@x/ckpts/last.ckpt,
/path/to/an_astronaut_riding_a_horse@x/ckpts/last.ckpt,
/path/to/deadpool_riding_a_cow@x/ckpts/last.ckpt,
/path/to/a_firepit@x/ckpts/last.ckpt
]
================================================
FILE: configs_comp/comp1.yaml
================================================
name: "comp1"
config_scene:
traj_kwargs:
# Object 0
-
proxy_size: [0.5, 0.5, 0.5]
trajs:
- traj_type: spline-trajectory
coords: [[0.25, 0.25, 0.5], [0.35, 0.6, 0.5], [0.5, 0.75, 0.5]]
angle_offset: 0.
# Object 1
-
proxy_size: [0.5, 0.5, 0.6]
trajs:
- traj_type: spline-trajectory
coords: [[0.75, 0.75, 0.5], [0.6, 0.35, 0.5], [0.25, 0.5, 0.5]]
angle_offset: 0.
prompt_processor:
prompt: ["an elephant walking", "a giraffe walking"]
# First train each object independently and use this to render different trained models together
checkpoints: [
/path/to/an_elephant_walking@x/ckpts/last.ckpt,
/path/to/a_giraffe_walking@x/ckpts/last.ckpt,
]
================================================
FILE: configs_comp/comp2.yaml
================================================
name: "comp2"
config_scene:
traj_kwargs:
# Object 0
-
proxy_size: [0.5, 0.5, 0.5]
trajs:
- traj_type: spline-trajectory
coords: [[0.30, 0.30, 0.5], [0.30, 0.70, 0.5]]
angle_offset: 0.
# Object 1
-
proxy_size: [0.5, 0.5, 0.5]
trajs:
- traj_type: spline-trajectory
coords: [[0.70, 0.70, 0.5], [0.70, 0.30, 0.5]]
angle_offset: 0.
# Object 2
-
proxy_size: [0.6, 0.6, 0.6]
trajs:
- traj_type: spline-trajectory
coords: [[0.30, 0.70, 0.5], [0.70, 0.70, 0.5]]
angle_offset: 0.
# Object 3
-
proxy_size: [0.3, 0.3, 0.3]
trajs:
- traj_type: spline-trajectory
coords: [[0.70, 0.30, 0.5], [0.30, 0.30, 0.5]]
angle_offset: 0.
# Object 4
-
proxy_size: [0.27, 0.27, 0.27]
# proxy_size: [0.2, 0.2, 0.2]
trajs:
- traj_type: static-trajectory
pos_start: [0.5, 0.5, 0.52]
angle_offset: 90.
prompt_processor:
prompt: ["a tiger walking", "a rhinoceros walking", "a firehydrant", "assassin riding a cow", "a deer walking"]
# First train each object independently and use this to render different trained models together
checkpoints: [
/path/to/a_tiger_walking@x/ckpts/last.ckpt,
/path/to/a_rhinoceros_walking@x/ckpts/last.ckpt,
/path/to/assassin_riding_a_cow@x/ckpts/last.ckpt,
/path/to/a_deer_walking@x/ckpts/last.ckpt,
/path/to/water_spraying_out_of_a_firehydrant@x/ckpts/last.ckpt,
]
================================================
FILE: configs_comp/comp3.yaml
================================================
name: "comp3"
config_scene:
traj_kwargs:
# Object 0
-
proxy_size: [0.5, 0.5, 0.5]
trajs:
- traj_type: spline-trajectory
coords: [[0.75, 0.65, 0.4], [0.75, 0.35, 0.4]]
angle_offset: 0.
# Object 1
-
proxy_size: [0.5, 0.5, 0.5]
trajs:
- traj_type: spline-trajectory
coords: [[0.5, 0.3, 0.4], [0.3, 0.5, 0.4], [0.5, 0.7, 0.4]]
angle_offset: 0.
# Object 3
-
proxy_size: [0.4, 0.4, 0.4]
trajs:
- traj_type: spline-trajectory
coords: [[0.25, 0.5, 0.7], [0.5, 0.25, 0.7], [0.75, 0.5, 0.7], [0.5, 0.75, 0.7],[0.25, 0.5, 0.7]]
angle_offset: 0.
# Object 2
-
proxy_size: [0.7, 0.7, 0.7]
trajs:
- traj_type: static-trajectory
pos_start: [0.5, 0.5, 0.55]
angle_offset: 90.
prompt_processor:
prompt: ["a sheep running", "a goat walking", "a seagull flying", "a lamppost"]
# First train each object independently and use this to render different trained models together
checkpoints: [
/path/to/a_sheep_running@x/ckpts/last.ckpt,
/path/to/a_goat_walking@x/ckpts/last.ckpt,
/path/to/a_seagull_flying_with_fluttering_wings@x/ckpts/last.ckpt,
/path/to/a_lamppost@x/ckpts/last.ckpt,
]
================================================
FILE: configs_prompts/a_bear_walking.yaml
================================================
name: "a_bear_walking"
config_scene:
traj_kwargs:
# Object 0
-
proxy_size: [0.5, 0.5, 0.5]
trajs:
- traj_type: spline-trajectory
coords: [[0.5, 0.35, 0.5], [0.5, 0.65, 0.5]]
# coords: [[0.25, 0.25, 0.5], [0.35, 0.6, 0.5], [0.5, 0.75, 0.5]]
# coords: [[0.25, 0.5, 0.5], [0.5, 0.75, 0.5], [0.75, 0.5, 0.5], [0.5, 0.25, 0.5], [0.25, 0.5, 0.5]]
angle_offset: 0.
prompt_processor:
prompt: ["a bear walking"]
================================================
FILE: configs_prompts/a_camel_walking.yaml
================================================
name: "a_camel_walking"
config_scene:
traj_kwargs:
# Object 0
-
proxy_size: [0.5, 0.5, 0.5]
trajs:
- traj_type: spline-trajectory
coords: [[0.5, 0.35, 0.5], [0.5, 0.65, 0.5]]
angle_offset: 0.
prompt_processor:
prompt: ["a camel walking"]
================================================
FILE: configs_prompts/a_carp_swimming.yaml
================================================
name: "a_carp_swimming"
config_scene:
traj_kwargs:
# Object 0
-
proxy_size: [0.5, 0.5, 0.5]
trajs:
- traj_type: spline-trajectory
coords: [[0.3, 0.65, 0.5], [0.25, 0.52, 0.5], [0.25, 0.4, 0.5], [0.25, 0.3, 0.5], [0.45, 0.3, 0.5], [0.45, 0.7, 0.5], [0.4, 0.7, 0.5], [0.3, 0.65, 0.5]]
angle_offset: 0.
prompt_processor:
prompt: ["a carp swimming"]
================================================
FILE: configs_prompts/a_cat_walking.yaml
================================================
name: "a_cat_walking"
config_scene:
traj_kwargs:
# Object 0
-
proxy_size: [0.65, 0.65, 0.65]
trajs:
- traj_type: spline-trajectory
coords: [[0.34, 0.6, 0.5], [0.4, 0.65, 0.5], [0.6, 0.6, 0.5], [0.7, 0.3, 0.5], [0.33, 0.51, 0.5], [0.34, 0.6, 0.5]]
angle_offset: 0.
prompt_processor:
prompt: ["a cat walking"]
================================================
FILE: configs_prompts/a_chihuahua_running.yaml
================================================
name: "a_chihuahua_running"
config_scene:
traj_kwargs:
# Object 0
-
proxy_size: [0.5, 0.5, 0.5]
trajs:
- traj_type: spline-trajectory
coords: [[0.5, 0.3, 0.5], [0.28, 0.55, 0.5], [0.4, 0.75, 0.5]]
angle_offset: 0.
prompt_processor:
prompt: ["a chihuahua running"]
================================================
FILE: configs_prompts/a_clown_fish_swimming.yaml
================================================
name: "a_clown_fish_swimming"
config_scene:
traj_kwargs:
# Object 0
-
proxy_size: [0.5, 0.5, 0.5]
trajs:
- traj_type: spline-trajectory
coords: [[0.3, 0.45, 0.5], [0.5, 0.75, 0.5], [0.7, 0.5, 0.5], [0.5, 0.25, 0.5], [0.3, 0.45, 0.5]]
angle_offset: 0.
prompt_processor:
prompt: ["a clown fish swimming"]
================================================
FILE: configs_prompts/a_corgi_running.yaml
================================================
name: "a_corgi_running"
config_scene:
traj_kwargs:
# Object 0
-
proxy_size: [0.5, 0.5, 0.5]
trajs:
- traj_type: spline-trajectory
coords: [[0.5, 0.3, 0.5], [0.6, 0.5, 0.5], [0.5, 0.7, 0.5]]
angle_offset: 0.
prompt_processor:
prompt: ["a corgi running"]
================================================
FILE: configs_prompts/a_deer_walking.yaml
================================================
name: "a_deer_walking"
config_scene:
traj_kwargs:
# Object 0
-
proxy_size: [0.5, 0.5, 0.5]
trajs:
- traj_type: spline-trajectory
# coords: [[0.25, 0.5, 0.5], [0.5, 0.25, 0.5], [0.75, 0.5, 0.5], [0.5, 0.75, 0.5], [0.25, 0.5, 0.5]]
coords: [[0.5, 0.35, 0.5], [0.5, 0.65, 0.5]]
angle_offset: 0.
prompt_processor:
prompt: ["a deer walking"]
================================================
FILE: configs_prompts/a_dog_riding_a_skateboard.yaml
================================================
name: "a_dog_riding_a_skateboard"
config_scene:
traj_kwargs:
# Object 0
-
proxy_size: [0.5, 0.5, 0.5]
trajs:
- traj_type: spline-trajectory
coords: [[0.3, 0.45, 0.5], [0.35, 0.30, 0.5], [0.4, 0.3, 0.5], [0.55, 0.7, 0.5], [0.75, 0.45, 0.5], [0.55, 0.45, 0.5], [0.3, 0.45, 0.5]]
angle_offset: 0.
prompt_processor:
prompt: ["a dog riding a skateboard"]
================================================
FILE: configs_prompts/a_fox_walking.yaml
================================================
name: "a_fox_walking"
config_scene:
traj_kwargs:
# Object 0
-
proxy_size: [0.5, 0.5, 0.5]
trajs:
- traj_type: spline-trajectory
coords: [[0.65, 0.25, 0.5], [0.25, 0.65, 0.5]]
angle_offset: 0.
prompt_processor:
prompt: ["a fox walking"]
================================================
FILE: configs_prompts/a_german_shepherd_running.yaml
================================================
name: "a_german_shepherd_running"
config_scene:
traj_kwargs:
# Object 0
-
proxy_size: [0.5, 0.5, 0.5]
trajs:
- traj_type: spline-trajectory
coords: [[0.5, 0.3, 0.5], [0.3, 0.5, 0.5], [0.5, 0.7, 0.5]]
angle_offset: 0.
prompt_processor:
prompt: ["a german shepherd running"]
================================================
FILE: configs_prompts/a_giraffe_walking.yaml
================================================
name: "a_giraffe_walking"
config_scene:
traj_kwargs:
# Object 0
-
proxy_size: [0.5, 0.5, 0.5]
trajs:
- traj_type: spline-trajectory
coords: [[0.5, 0.35, 0.5], [0.5, 0.65, 0.5]]
# For compositional scene
# coords: [[0.75, 0.75, 0.5], [0.6, 0.35, 0.5], [0.25, 0.5, 0.5]]
angle_offset: 0.
prompt_processor:
# prompt: ["a giraffe"] # used in static stage
prompt: ["a giraffe walking"]
================================================
FILE: configs_prompts/a_girl_is_riding_a_bicycle.yaml
================================================
name: "a_girl_is_riding_a_bicycle"
config_scene:
traj_kwargs:
# Object 0
-
proxy_size: [0.5, 0.5, 0.5]
trajs:
- traj_type: spline-trajectory
coords: [[0.25, 0.5, 0.5], [0.5, 0.25, 0.5], [0.75, 0.5, 0.5], [0.5, 0.75, 0.5], [0.25, 0.5, 0.5]]
angle_offset: 0.
prompt_processor:
prompt: ["a girl is riding a bicycle"]
================================================
FILE: configs_prompts/a_goat_walking.yaml
================================================
name: "a_goat_walking"
config_scene:
traj_kwargs:
# Object 0
-
proxy_size: [0.5, 0.5, 0.5]
trajs:
- traj_type: spline-trajectory
coords: [[0.5, 0.3, 0.5], [0.3, 0.5, 0.5], [0.5, 0.7, 0.5]]
angle_offset: 0.
prompt_processor:
prompt: ["a goat walking"]
================================================
FILE: configs_prompts/a_hippo_walking.yaml
================================================
name: "a_hippo_walking"
config_scene:
traj_kwargs:
# Object 0
-
proxy_size: [0.5, 0.5, 0.5]
trajs:
- traj_type: spline-trajectory
coords: [[0.5, 0.3, 0.5], [0.25, 0.5, 0.5], [0.5, 0.7, 0.5]]
angle_offset: 0.
prompt_processor:
prompt: ["a hippo walking"]
================================================
FILE: configs_prompts/a_labrador_running.yaml
================================================
name: "a_labrador_running"
config_scene:
traj_kwargs:
# Object 0
-
proxy_size: [0.5, 0.5, 0.5]
trajs:
- traj_type: spline-trajectory
coords: [[0.5, 0.35, 0.5], [0.5, 0.65, 0.5]]
angle_offset: 0.
prompt_processor:
prompt: ["a labrador running"]
================================================
FILE: configs_prompts/a_lion_walking.yaml
================================================
name: "a_lion_walking"
config_scene:
traj_kwargs:
# Object 0
-
proxy_size: [0.5, 0.5, 0.5]
trajs:
- traj_type: spline-trajectory
coords: [[0.3, 0.3, 0.5], [0.3, 0.5, 0.5], [0.5, 0.6, 0.5], [0.7, 0.7, 0.5]]
angle_offset: 0.
prompt_processor:
prompt: ["a lion walking"]
================================================
FILE: configs_prompts/a_pigeon_flying.yaml
================================================
name: "a_pigeon_flying"
config_scene:
traj_kwargs:
# Object 0
-
proxy_size: [0.5, 0.5, 0.5]
trajs:
- traj_type: spline-trajectory
coords: [[0.5, 0.35, 0.5], [0.5, 0.65, 0.5]]
angle_offset: 0.
prompt_processor:
prompt: ["a pigeon flying"]
================================================
FILE: configs_prompts/a_rhinoceros_walking.yaml
================================================
name: "a_rhinoceros_walking"
config_scene:
traj_kwargs:
# Object 0
-
proxy_size: [0.5, 0.5, 0.5]
trajs:
- traj_type: spline-trajectory
angle_offset: 0.
coords: [[0.5, 0.35, 0.5], [0.5, 0.65, 0.5]]
prompt_processor:
prompt: ["a rhinoceros walking"]
================================================
FILE: configs_prompts/a_seagull_flying.yaml
================================================
name: "a_seagull_flying"
config_scene:
traj_kwargs:
# Object 0
-
proxy_size: [0.5, 0.5, 0.5]
trajs:
- traj_type: spline-trajectory
coords: [[0.45, 0.25, 0.5], [0.30, 0.33, 0.5], [0.3, 0.4, 0.5], [0.75, 0.65, 0.5], [0.51, 0.75, 0.5], [0.45, 0.6, 0.5], [0.45, 0.5, 0.5], [0.45, 0.25, 0.5]]
angle_offset: 0.
prompt_processor:
prompt: ["a seagull flying"]
================================================
FILE: configs_prompts/a_shark_swimming.yaml
================================================
name: "a_shark_swimming"
config_scene:
traj_kwargs:
# Object 0
-
proxy_size: [0.5, 0.5, 0.5]
trajs:
- traj_type: spline-trajectory
coords: [[0.45, 0.25, 0.5], [0.30, 0.33, 0.5], [0.3, 0.4, 0.5], [0.75, 0.65, 0.5], [0.51, 0.75, 0.5], [0.45, 0.6, 0.5], [0.45, 0.5, 0.5], [0.45, 0.25, 0.5]]
angle_offset: 0.
prompt_processor:
prompt: ["a shark swimming"]
================================================
FILE: configs_prompts/a_sheep_running.yaml
================================================
name: "a_sheep_running"
config_scene:
traj_kwargs:
# Object 0
-
proxy_size: [0.5, 0.5, 0.5]
trajs:
- traj_type: spline-trajectory
coords: [[0.5, 0.35, 0.5], [0.5, 0.65, 0.5]]
angle_offset: 0.
prompt_processor:
prompt: ["a sheep running"]
================================================
FILE: configs_prompts/a_tiger_walking.yaml
================================================
name: "a_tiger_walking"
config_scene:
traj_kwargs:
# Object 0
-
proxy_size: [0.5, 0.5, 0.5]
trajs:
- traj_type: spline-trajectory
coords: [[0.5, 0.35, 0.5], [0.5, 0.65, 0.5]]
angle_offset: 0.
prompt_processor:
prompt: ["a tiger walking"]
================================================
FILE: configs_prompts/a_turtle_swimming.yaml
================================================
name: "a_turtle_swimming"
config_scene:
traj_kwargs:
# Object 0
-
proxy_size: [0.5, 0.5, 0.5]
trajs:
- traj_type: spline-trajectory
coords: [[0.3, 0.55, 0.5], [0.7, 0.7, 0.5], [0.75, 0.5, 0.5], [0.5, 0.25, 0.5], [0.25, 0.25, 0.5], [0.3, 0.55, 0.5]]
angle_offset: 0.
prompt_processor:
prompt: ["a turtle swimming"]
================================================
FILE: configs_prompts/a_unicorn_running.yaml
================================================
name: "a_unicorn_running"
config_scene:
traj_kwargs:
# Object 0
-
proxy_size: [0.5, 0.5, 0.5]
trajs:
- traj_type: spline-trajectory
coords: [[0.5, 0.7, 0.5], [0.5, 0.3, 0.5]]
angle_offset: 0.
prompt_processor:
prompt: ["a unicorn running"]
================================================
FILE: configs_prompts/a_wolf_running.yaml
================================================
name: "a_wolf_running"
config_scene:
traj_kwargs:
# Object 0
-
proxy_size: [0.5, 0.5, 0.5]
trajs:
- traj_type: spline-trajectory
coords: [[0.5, 0.35, 0.5], [0.5, 0.65, 0.5]]
angle_offset: 0.
prompt_processor:
prompt: ["a wolf running"]
================================================
FILE: configs_prompts/an_astronaut_riding_a_horse.yaml
================================================
name: "an_astronaut_riding_a_horse"
config_scene:
traj_kwargs:
# Object 0
-
proxy_size: [0.5, 0.5, 0.5]
trajs:
- traj_type: spline-trajectory
# Compositional scene
# coords: [[0.25, 0.5, 0.5], [0.5, 0.75, 0.5], [0.75, 0.5, 0.5], [0.5, 0.25, 0.5], [0.25, 0.5, 0.5]]
# Single
coords: [[0.5, 0.35, 0.5], [0.5, 0.65, 0.5]]
angle_offset: 0.
prompt_processor:
prompt: ["an astronaut riding a horse"]
================================================
FILE: configs_prompts/an_eagle_flying.yaml
================================================
name: "an_eagle_flying"
config_scene:
traj_kwargs:
# Object 0
-
proxy_size: [0.5, 0.5, 0.5]
trajs:
- traj_type: spline-trajectory
coords: [[0.45, 0.25, 0.5], [0.30, 0.33, 0.5], [0.3, 0.4, 0.5], [0.75, 0.65, 0.5], [0.51, 0.75, 0.5], [0.45, 0.6, 0.5], [0.45, 0.5, 0.5], [0.45, 0.25, 0.5]]
angle_offset: 0.
prompt_processor:
prompt: ["an eagle flying"]
================================================
FILE: configs_prompts/an_elephant_walking.yaml
================================================
name: "an_elephant_walking"
config_scene:
traj_kwargs:
# Object 0
-
proxy_size: [0.5, 0.5, 0.5]
trajs:
- traj_type: spline-trajectory
# Single
coords: [[0.5, 0.35, 0.5], [0.5, 0.65, 0.5]]
# Compositional
# coords: [[0.25, 0.25, 0.5], [0.35, 0.6, 0.5], [0.5, 0.75, 0.5]]
angle_offset: 0.
prompt_processor:
prompt: ["an elephant walking"]
================================================
FILE: configs_prompts/an_octopus_swimming.yaml
================================================
name: "an_octopus_swimming"
config_scene:
traj_kwargs:
# Object 0
-
proxy_size: [0.65, 0.65, 0.65]
trajs:
# Object 0
- traj_type: spline-trajectory
coords: [[0.5, 0.5, 0.4], [0.5, 0.5, 0.6]]
angle_offset: 180.
prompt_processor:
prompt: ["an octopus swimming"]
================================================
FILE: configs_prompts/assassin_riding_a_cow.yaml
================================================
name: "assassin_riding_a_cow"
config_scene:
traj_kwargs:
# Object 0
-
proxy_size: [0.6, 0.6, 0.6]
trajs:
- traj_type: spline-trajectory
coords: [[0.5, 0.35, 0.5], [0.5, 0.65, 0.5]]
angle_offset: 0.
prompt_processor:
prompt: ["assassin riding a cow"]
================================================
FILE: configs_prompts/batman_riding_a_camel.yaml
================================================
name: "batman_riding_a_camel"
config_scene:
traj_kwargs:
# Object 0
-
proxy_size: [0.7, 0.7, 0.7]
trajs:
- traj_type: spline-trajectory
coords: [[0.5, 0.35, 0.5], [0.5, 0.65, 0.5]]
angle_offset: 0.
prompt_processor:
prompt: ["batman riding a camel"]
================================================
FILE: configs_prompts/deadpool_riding_a_cow.yaml
================================================
name: "deadpool_riding_a_cow"
config_scene:
traj_kwargs:
# Object 0
-
proxy_size: [0.5, 0.5, 0.5]
trajs:
- traj_type: spline-trajectory
# Compositional
# coords: [[0.25, 0.5, 0.5], [0.5, 0.75, 0.5], [0.75, 0.5, 0.5], [0.5, 0.25, 0.5], [0.25, 0.5, 0.5]]
# Single
coords: [[0.5, 0.35, 0.5], [0.5, 0.65, 0.5]]
angle_offset: 0.
prompt_processor:
prompt: ["deadpool riding a cow"]
================================================
FILE: configs_prompts/son_goku_riding_an_elephant.yaml
================================================
name: "son_goku_riding_an_elephant"
config_scene:
traj_kwargs:
# Object 0
-
proxy_size: [0.65, 0.65, 0.65]
trajs:
- traj_type: spline-trajectory
coords: [[0.5, 0.35, 0.5], [0.4, 0.5, 0.5], [0.5, 0.65, 0.5]]
angle_offset: 0.
prompt_processor:
prompt: ["son goku riding an elephant"]
================================================
FILE: configs_prompts/spiderman_riding_a_donkey.yaml
================================================
name: "spiderman_riding_a_donkey"
config_scene:
traj_kwargs:
# Object 0
-
proxy_size: [0.5, 0.5, 0.5]
trajs:
- traj_type: spline-trajectory
coords: [[0.5, 0.35, 0.5], [0.5, 0.65, 0.5]]
angle_offset: 0.
prompt_processor:
prompt: ["spiderman riding a donkey, donkey running"]
================================================
FILE: configs_prompts_static/a_firepit.yaml
================================================
name: "a_firepit"
config_scene:
traj_kwargs:
# Object 0
-
proxy_size: [0.5, 0.5, 0.5]
trajs:
- traj_type: static-trajectory
pos_start: [0.5, 0.5, 0.5]
angle_offset: 90.
prompt_processor:
prompt: ["a firepit with large flame"]
================================================
FILE: configs_prompts_static/a_lamppost.yaml
================================================
name: "a_lamppost"
config_scene:
traj_kwargs:
# Object 0
-
proxy_size: [0.5, 0.5, 0.5]
trajs:
- traj_type: static-trajectory
pos_start: [0.5, 0.5, 0.5]
angle_offset: 90.
prompt_processor:
prompt: ["a lamppost"]
# Only train stage 1 and 2
================================================
FILE: configs_prompts_static/water_spraying_out_of_a_firehydrant.yaml
================================================
name: "water_spraying_out_of_a_firehydrant"
config_scene:
traj_kwargs:
# Object 0
-
proxy_size: [0.5, 0.5, 0.5]
trajs:
- traj_type: static-trajectory
pos_start: [0.5, 0.5, 0.5]
angle_offset: 90.
prompt_processor:
prompt: ["water spraying out of a firehydrant"]
================================================
FILE: launch.py
================================================
import argparse
import logging
import os
import shutil
import sys
class ColoredFilter(logging.Filter):
"""
A logging filter to add color to certain log levels.
"""
RESET = "\033[0m"
RED = "\033[31m"
GREEN = "\033[32m"
YELLOW = "\033[33m"
BLUE = "\033[34m"
MAGENTA = "\033[35m"
CYAN = "\033[36m"
COLORS = {
"WARNING": YELLOW,
"INFO": GREEN,
"DEBUG": BLUE,
"CRITICAL": MAGENTA,
"ERROR": RED,
}
RESET = "\x1b[0m"
def __init__(self):
super().__init__()
def filter(self, record):
if record.levelname in self.COLORS:
color_start = self.COLORS[record.levelname]
record.levelname = f"{color_start}[{record.levelname}]"
record.msg = f"{record.msg}{self.RESET}"
return True
def main() -> None:
parser = argparse.ArgumentParser()
parser.add_argument("--config", required=True, help="path to config file")
parser.add_argument("--gpu", default="0", help="GPU(s) to be used")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("--train", action="store_true")
group.add_argument("--validate", action="store_true")
group.add_argument("--test", action="store_true")
group.add_argument("--export", action="store_true")
parser.add_argument(
"--verbose", action="store_true", help="if true, set logging level to DEBUG"
)
parser.add_argument(
"--typecheck",
action="store_true",
help="whether to enable dynamic type checking",
)
args, extras = parser.parse_known_args()
# set CUDA_VISIBLE_DEVICES then import pytorch-lightning
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
n_gpus = len(args.gpu.split(","))
import pytorch_lightning as pl
import torch
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
from pytorch_lightning.loggers import CSVLogger, TensorBoardLogger
from pytorch_lightning.utilities.rank_zero import rank_zero_only
if args.typecheck:
from jaxtyping import install_import_hook
install_import_hook("threestudio", "typeguard.typechecked")
import threestudio
from threestudio.systems.base import BaseSystem
from threestudio.utils.callbacks import (
CodeSnapshotCallback,
ConfigSnapshotCallback,
CustomProgressBar,
# ModelCheckpointCustom,
)
from threestudio.utils.config import ExperimentConfig, load_config
from threestudio.utils.typing import Optional
logger = logging.getLogger("pytorch_lightning")
if args.verbose:
logger.setLevel(logging.DEBUG)
for handler in logger.handlers:
if handler.stream == sys.stderr: # type: ignore
handler.setFormatter(logging.Formatter("%(levelname)s %(message)s"))
handler.addFilter(ColoredFilter())
# parse YAML config to OmegaConf
cfg: ExperimentConfig
cfg = load_config(args.config, cli_args=extras, n_gpus=n_gpus)
pl.seed_everything(cfg.seed)
dm = threestudio.find(cfg.data_type)(cfg.data)
system: BaseSystem = threestudio.find(cfg.system_type)(
cfg.system, resumed=cfg.resume is not None
)
system.set_save_dir(os.path.join(cfg.trial_dir, "save"))
callbacks = []
if args.train:
callbacks += [
ModelCheckpoint(
dirpath=os.path.join(cfg.trial_dir, "ckpts"), **cfg.checkpoint
),
LearningRateMonitor(logging_interval="step"),
CustomProgressBar(refresh_rate=1),
CodeSnapshotCallback(
os.path.join(cfg.trial_dir, "code"), use_version=False
),
ConfigSnapshotCallback(
args.config,
cfg,
os.path.join(cfg.trial_dir, "configs"),
use_version=False,
),
]
def write_to_text(file, lines):
with open(file, "w") as f:
for line in lines:
f.write(line + "\n")
loggers = []
if args.train:
# make tensorboard logging dir to suppress warning
rank_zero_only(
lambda: os.makedirs(os.path.join(cfg.trial_dir, "tb_logs"), exist_ok=True)
)()
loggers += [
TensorBoardLogger(cfg.trial_dir, name="tb_logs"),
CSVLogger(cfg.trial_dir, name="csv_logs"),
] + system.get_loggers()
rank_zero_only(
lambda: write_to_text(
os.path.join(cfg.trial_dir, "log.txt"),
["python " + " ".join(sys.argv), str(args)],
)
)()
trainer = Trainer(
callbacks=callbacks, logger=loggers, inference_mode=False, **cfg.trainer
)
def set_system_status(system: BaseSystem, ckpt_path: Optional[str]):
if ckpt_path is None:
return
ckpt = torch.load(ckpt_path, map_location="cpu")
system.set_resume_status(ckpt["epoch"], ckpt["global_step"])
if args.train:
trainer.fit(system, datamodule=dm, ckpt_path=cfg.resume)
trainer.test(system, datamodule=dm)
elif args.validate:
# manually set epoch and global_step as they cannot be automatically resumed
set_system_status(system, cfg.resume)
trainer.validate(system, datamodule=dm, ckpt_path=cfg.resume)
elif args.test:
# manually set epoch and global_step as they cannot be automatically resumed
set_system_status(system, cfg.resume)
trainer.test(system, datamodule=dm, ckpt_path=cfg.resume)
elif args.export:
set_system_status(system, cfg.resume)
trainer.predict(system, datamodule=dm, ckpt_path=cfg.resume)
if __name__ == "__main__":
main()
================================================
FILE: load/make_prompt_library.py
================================================
import json
dreamfusion_gallery_video_names = [
"a_20-sided_die_made_out_of_glass.mp4",
"a_bald_eagle_carved_out_of_wood.mp4",
"a_banana_peeling_itself.mp4",
"a_beagle_in_a_detective's_outfit.mp4",
"a_beautiful_dress_made_out_of_fruit,_on_a_mannequin._Studio_lighting,_high_quality,_high_resolution.mp4",
"a_beautiful_dress_made_out_of_garbage_bags,_on_a_mannequin._Studio_lighting,_high_quality,_high_resolution.mp4",
"a_beautiful_rainbow_fish.mp4",
"a_bichon_frise_wearing_academic_regalia.mp4",
"a_blue_motorcycle.mp4",
"a_blue_poison-dart_frog_sitting_on_a_water_lily.mp4",
"a_brightly_colored_mushroom_growing_on_a_log.mp4",
"a_bumblebee_sitting_on_a_pink_flower.mp4",
"a_bunch_of_colorful_marbles_spilling_out_of_a_red_velvet_bag.mp4",
"a_capybara_wearing_a_top_hat,_low_poly.mp4",
"a_cat_with_a_mullet.mp4",
"a_ceramic_lion.mp4",
"a_ceramic_upside_down_yellow_octopus_holding_a_blue_green_ceramic_cup.mp4",
"a_chihuahua_wearing_a_tutu.mp4",
"a_chimpanzee_holding_a_peeled_banana.mp4",
"a_chimpanzee_looking_through_a_telescope.mp4",
"a_chimpanzee_stirring_a_bubbling_purple_potion_in_a_cauldron.mp4",
"a_chimpanzee_with_a_big_grin.mp4",
"a_completely_destroyed_car.mp4",
"a_confused_beagle_sitting_at_a_desk_working_on_homework.mp4",
"a_corgi_taking_a_selfie.mp4",
"a_crab,_low_poly.mp4",
"a_crocodile_playing_a_drum_set.mp4",
"a_cute_steampunk_elephant.mp4",
"a_dachsund_dressed_up_in_a_hotdog_costume.mp4",
"a_delicious_hamburger.mp4",
"a_dragon-cat_hybrid.mp4",
"a_DSLR_photo_of_a_baby_dragon_drinking_boba.mp4",
"a_DSLR_photo_of_a_baby_dragon_hatching_out_of_a_stone_egg.mp4",
"a_DSLR_photo_of_a_baby_grand_piano_viewed_from_far_away.mp4",
"a_DSLR_photo_of_a_bagel_filled_with_cream_cheese_and_lox.mp4",
"a_DSLR_photo_of_a_bald_eagle.mp4",
"a_DSLR_photo_of_a_barbecue_grill_cooking_sausages_and_burger_patties.mp4",
"a_DSLR_photo_of_a_basil_plant.mp4",
"a_DSLR_photo_of_a_bear_dancing_ballet.mp4",
"a_DSLR_photo_of_a_bear_dressed_as_a_lumberjack.mp4",
"a_DSLR_photo_of_a_bear_dressed_in_medieval_armor.mp4",
"a_DSLR_photo_of_a_beautiful_violin_sitting_flat_on_a_table.mp4",
"a_DSLR_photo_of_a_blue_jay_standing_on_a_large_basket_of_rainbow_macarons.mp4",
"a_DSLR_photo_of_a_bulldozer_clearing_away_a_pile_of_snow.mp4",
"a_DSLR_photo_of_a_bulldozer.mp4",
"a_DSLR_photo_of_a_cake_covered_in_colorful_frosting_with_a_slice_being_taken_out,_high_resolution.mp4",
"a_DSLR_photo_of_a_candelabra_with_many_candles_on_a_red_velvet_tablecloth.mp4",
"a_DSLR_photo_of_a_car_made_out_of_cheese.mp4",
"a_DSLR_photo_of_A_car_made_out_of_sushi.mp4",
"a_DSLR_photo_of_a_car_made_out_pizza.mp4",
"a_DSLR_photo_of_a_cat_lying_on_its_side_batting_at_a_ball_of_yarn.mp4",
"a_DSLR_photo_of_a_cat_magician_making_a_white_dove_appear.mp4",
"a_DSLR_photo_of_a_cat_wearing_a_bee_costume.mp4",
"a_DSLR_photo_of_a_cat_wearing_a_lion_costume.mp4",
"a_DSLR_photo_of_a_cauldron_full_of_gold_coins.mp4",
"a_DSLR_photo_of_a_chimpanzee_dressed_like_Henry_VIII_king_of_England.mp4",
"a_DSLR_photo_of_a_chimpanzee_dressed_like_Napoleon_Bonaparte.mp4",
"a_DSLR_photo_of_a_chow_chow_puppy.mp4",
"a_DSLR_photo_of_a_Christmas_tree_with_donuts_as_decorations.mp4",
"a_DSLR_photo_of_a_chrome-plated_duck_with_a_golden_beak_arguing_with_an_angry_turtle_in_a_forest.mp4",
"a_DSLR_photo_of_a_classic_Packard_car.mp4",
"a_DSLR_photo_of_a_cocker_spaniel_wearing_a_crown.mp4",
"a_DSLR_photo_of_a_corgi_lying_on_its_back_with_its_tongue_lolling_out.mp4",
"a_DSLR_photo_of_a_corgi_puppy.mp4",
"a_DSLR_photo_of_a_corgi_sneezing.mp4",
"a_DSLR_photo_of_a_corgi_standing_up_drinking_boba.mp4",
"a_DSLR_photo_of_a_corgi_taking_a_selfie.mp4",
"a_DSLR_photo_of_a_corgi_wearing_a_beret_and_holding_a_baguette,_standing_up_on_two_hind_legs.mp4",
"a_DSLR_photo_of_a_covered_wagon.mp4",
"a_DSLR_photo_of_a_cracked_egg_with_the_yolk_spilling_out_on_a_wooden_table.mp4",
"a_DSLR_photo_of_a_cup_full_of_pens_and_pencils.mp4",
"a_DSLR_photo_of_a_dalmation_wearing_a_fireman's_hat.mp4",
"a_DSLR_photo_of_a_delicious_chocolate_brownie_dessert_with_ice_cream_on_the_side.mp4",
"a_DSLR_photo_of_a_delicious_croissant.mp4",
"a_DSLR_photo_of_A_DMC_Delorean_car.mp4",
"a_DSLR_photo_of_a_dog_made_out_of_salad.mp4",
"a_DSLR_photo_of_a_drum_set_made_of_cheese.mp4",
"a_DSLR_photo_of_a_drying_rack_covered_in_clothes.mp4",
"a_DSLR_photo_of_aerial_view_of_a_ruined_castle.mp4",
"a_DSLR_photo_of_a_football_helmet.mp4",
"a_DSLR_photo_of_a_fox_holding_a_videogame_controller.mp4",
"a_DSLR_photo_of_a_fox_taking_a_photograph_using_a_DSLR.mp4",
"a_DSLR_photo_of_a_frazer_nash_super_sport_car.mp4",
"a_DSLR_photo_of_a_frog_wearing_a_sweater.mp4",
"a_DSLR_photo_of_a_ghost_eating_a_hamburger.mp4",
"a_DSLR_photo_of_a_giant_worm_emerging_from_the_sand_in_the_middle_of_the_desert.mp4",
"a_DSLR_photo_of_a_goose_made_out_of_gold.mp4",
"a_DSLR_photo_of_a_green_monster_truck.mp4",
"a_DSLR_photo_of_a_group_of_dogs_eating_pizza.mp4",
"a_DSLR_photo_of_a_group_of_dogs_playing_poker.mp4",
"a_DSLR_photo_of_a_gummy_bear_playing_the_saxophone.mp4",
"a_DSLR_photo_of_a_hippo_wearing_a_sweater.mp4",
"a_DSLR_photo_of_a_humanoid_robot_holding_a_human_brain.mp4",
"a_DSLR_photo_of_a_humanoid_robot_playing_solitaire.mp4",
"a_DSLR_photo_of_a_humanoid_robot_playing_the_cello.mp4",
"a_DSLR_photo_of_a_humanoid_robot_using_a_laptop.mp4",
"a_DSLR_photo_of_a_humanoid_robot_using_a_rolling_pin_to_roll_out_dough.mp4",
"a_DSLR_photo_of_a_human_skull.mp4",
"a_DSLR_photo_of_a_kitten_standing_on_top_of_a_giant_tortoise.mp4",
"a_DSLR_photo_of_a_knight_chopping_wood.mp4",
"a_DSLR_photo_of_a_knight_holding_a_lance_and_sitting_on_an_armored_horse.mp4",
"a_DSLR_photo_of_a_koala_wearing_a_party_hat_and_blowing_out_birthday_candles_on_a_cake.mp4",
"a_DSLR_photo_of_a_lemur_taking_notes_in_a_journal.mp4",
"a_DSLR_photo_of_a_lion_reading_the_newspaper.mp4",
"a_DSLR_photo_of_a_mandarin_duck_swimming_in_a_pond.mp4",
"a_DSLR_photo_of_a_model_of_the_eiffel_tower_made_out_of_toothpicks.mp4",
"a_DSLR_photo_of_a_mouse_playing_the_tuba.mp4",
"a_DSLR_photo_of_a_mug_of_hot_chocolate_with_whipped_cream_and_marshmallows.mp4",
"a_DSLR_photo_of_an_adorable_piglet_in_a_field.mp4",
"a_DSLR_photo_of_an_airplane_taking_off_from_the_runway.mp4",
"a_DSLR_photo_of_an_astronaut_standing_on_the_surface_of_mars.mp4",
"a_DSLR_photo_of_an_eggshell_broken_in_two_with_an_adorable_chick_standing_next_to_it.mp4",
"a_DSLR_photo_of_an_elephant_skull.mp4",
"a_DSLR_photo_of_an_exercise_bike_in_a_well_lit_room.mp4",
"a_DSLR_photo_of_an_extravagant_mansion,_aerial_view.mp4",
"a_DSLR_photo_of_an_ice_cream_sundae.mp4",
"a_DSLR_photo_of_an_iguana_holding_a_balloon.mp4",
"a_DSLR_photo_of_an_intricate_and_complex_dish_from_a_michelin_star_restaurant.mp4",
"a_DSLR_photo_of_An_iridescent_steampunk_patterned_millipede_with_bison_horns.mp4",
"a_DSLR_photo_of_an_octopus_playing_the_piano.mp4",
"a_DSLR_photo_of_an_old_car_overgrown_by_vines_and_weeds.mp4",
"a_DSLR_photo_of_an_old_vintage_car.mp4",
"a_DSLR_photo_of_an_orangutan_making_a_clay_bowl_on_a_throwing_wheel.mp4",
"a_DSLR_photo_of_an_orc_forging_a_hammer_on_an_anvil.mp4",
"a_DSLR_photo_of_an_origami_motorcycle.mp4",
"a_DSLR_photo_of_an_ornate_silver_gravy_boat_sitting_on_a_patterned_tablecloth.mp4",
"a_DSLR_photo_of_an_overstuffed_pastrami_sandwich.mp4",
"a_DSLR_photo_of_an_unstable_rock_cairn_in_the_middle_of_a_stream.mp4",
"a_DSLR_photo_of_a_pair_of_headphones_sitting_on_a_desk.mp4",
"a_DSLR_photo_of_a_pair_of_tan_cowboy_boots,_studio_lighting,_product_photography.mp4",
"a_DSLR_photo_of_a_peacock_on_a_surfboard.mp4",
"a_DSLR_photo_of_a_pigeon_reading_a_book.mp4",
"a_DSLR_photo_of_a_piglet_sitting_in_a_teacup.mp4",
"a_DSLR_photo_of_a_pig_playing_a_drum_set.mp4",
"a_DSLR_photo_of_a_pile_of_dice_on_a_green_tabletop_next_to_some_playing_cards.mp4",
"a_DSLR_photo_of_a_pirate_collie_dog,_high_resolution.mp4",
"a_DSLR_photo_of_a_plate_of_fried_chicken_and_waffles_with_maple_syrup_on_them.mp4",
"a_DSLR_photo_of_a_plate_piled_high_with_chocolate_chip_cookies.mp4",
"a_DSLR_photo_of_a_plush_t-rex_dinosaur_toy,_studio_lighting,_high_resolution.mp4",
"a_DSLR_photo_of_a_plush_triceratops_toy,_studio_lighting,_high_resolution.mp4",
"a_DSLR_photo_of_a_pomeranian_dog.mp4",
"a_DSLR_photo_of_a_porcelain_dragon.mp4",
"a_DSLR_photo_of_a_praying_mantis_wearing_roller_skates.mp4",
"a_DSLR_photo_of_a_puffin_standing_on_a_rock.mp4",
"a_DSLR_photo_of_a_pug_made_out_of_metal.mp4",
"a_DSLR_photo_of_a_pug_wearing_a_bee_costume.mp4",
"a_DSLR_photo_of_a_quill_and_ink_sitting_on_a_desk.mp4",
"a_DSLR_photo_of_a_raccoon_stealing_a_pie.mp4",
"a_DSLR_photo_of_a_red_cardinal_bird_singing.mp4",
"a_DSLR_photo_of_a_red_convertible_car_with_the_top_down.mp4",
"a_DSLR_photo_of_a_red-eyed_tree_frog.mp4",
"a_DSLR_photo_of_a_red_pickup_truck_driving_across_a_stream.mp4",
"a_DSLR_photo_of_a_red_wheelbarrow_with_a_shovel_in_it.mp4",
"a_DSLR_photo_of_a_roast_turkey_on_a_platter.mp4",
"a_DSLR_photo_of_a_robot_and_dinosaur_playing_chess,_high_resolution.mp4",
"a_DSLR_photo_of_a_robot_arm_picking_up_a_colorful_block_from_a_table.mp4",
"a_DSLR_photo_of_a_robot_cat_knocking_over_a_chess_piece_on_a_board.mp4",
"a_DSLR_photo_of_a_robot_dinosaur.mp4",
"a_DSLR_photo_of_a_robot_made_out_of_vegetables.mp4",
"a_DSLR_photo_of_a_robot_stegosaurus.mp4",
"a_DSLR_photo_of_a_robot_tiger.mp4",
"a_DSLR_photo_of_a_rolling_pin_on_top_of_bread_dough.mp4",
"a_DSLR_photo_of_a_sheepdog_running.mp4",
"a_DSLR_photo_of_a_shiba_inu_playing_golf_wearing_tartan_golf_clothes_and_hat.mp4",
"a_DSLR_photo_of_a_shiny_silver_robot_cat.mp4",
"a_DSLR_photo_of_a_silverback_gorilla_holding_a_golden_trophy.mp4",
"a_DSLR_photo_of_a_silver_humanoid_robot_flipping_a_coin.mp4",
"a_DSLR_photo_of_a_small_cherry_tomato_plant_in_a_pot_with_a_few_red_tomatoes_growing_on_it.mp4",
"a_DSLR_photo_of_a_small_saguaro_cactus_planted_in_a_clay_pot.mp4",
"a_DSLR_photo_of_a_Space_Shuttle.mp4",
"a_DSLR_photo_of_a_squirrel_dressed_like_a_clown.mp4",
"a_DSLR_photo_of_a_squirrel_flying_a_biplane.mp4",
"a_DSLR_photo_of_a_squirrel_giving_a_lecture_writing_on_a_chalkboard.mp4",
"a_DSLR_photo_of_a_squirrel_holding_a_bowling_ball.mp4",
"a_DSLR_photo_of_a_squirrel-lizard_hybrid.mp4",
"a_DSLR_photo_of_a_squirrel_made_out_of_fruit.mp4",
"a_DSLR_photo_of_a_squirrel-octopus_hybrid.mp4",
"a_DSLR_photo_of_a_stack_of_pancakes_covered_in_maple_syrup.mp4",
"a_DSLR_photo_of_a_steam_engine_train,_high_resolution.mp4",
"a_DSLR_photo_of_a_steaming_basket_full_of_dumplings.mp4",
"a_DSLR_photo_of_a_steaming_hot_plate_piled_high_with_spaghetti_and_meatballs.mp4",
"a_DSLR_photo_of_a_steampunk_space_ship_designed_in_the_18th_century.mp4",
"a_DSLR_photo_of_a_straw_basket_with_a_cobra_coming_out_of_it.mp4",
"a_DSLR_photo_of_a_swan_and_its_cygnets_swimming_in_a_pond.mp4",
"a_DSLR_photo_of_a_tarantula,_highly_detailed.mp4",
"a_DSLR_photo_of_a_teal_moped.mp4",
"a_DSLR_photo_of_a_teapot_shaped_like_an_elephant_head_where_its_snout_acts_as_the_spout.mp4",
"a_DSLR_photo_of_a_teddy_bear_taking_a_selfie.mp4",
"a_DSLR_photo_of_a_terracotta_bunny.mp4",
"a_DSLR_photo_of_a_tiger_dressed_as_a_doctor.mp4",
"a_DSLR_photo_of_a_tiger_made_out_of_yarn.mp4",
"a_DSLR_photo_of_a_toilet_made_out_of_gold.mp4",
"a_DSLR_photo_of_a_toy_robot.mp4",
"a_DSLR_photo_of_a_train_engine_made_out_of_clay.mp4",
"a_DSLR_photo_of_a_tray_of_Sushi_containing_pugs.mp4",
"a_DSLR_photo_of_a_tree_stump_with_an_axe_buried_in_it.mp4",
"a_DSLR_photo_of_a_turtle_standing_on_its_hind_legs,_wearing_a_top_hat_and_holding_a_cane.mp4",
"a_DSLR_photo_of_a_very_beautiful_small_organic_sculpture_made_of_fine_clockwork_and_gears_with_tiny_ruby_bearings,_very_intricate,_caved,_curved._Studio_lighting,_High_resolution,_white_background.mp4",
"a_DSLR_photo_of_A_very_beautiful_tiny_human_heart_organic_sculpture_made_of_copper_wire_and_threaded_pipes,_very_intricate,_curved,_Studio_lighting,_high_resolution.mp4",
"a_DSLR_photo_of_a_very_cool_and_trendy_pair_of_sneakers,_studio_lighting.mp4",
"a_DSLR_photo_of_a_vintage_record_player.mp4",
"a_DSLR_photo_of_a_wine_bottle_and_full_wine_glass_on_a_chessboard.mp4",
"a_DSLR_photo_of_a_wooden_desk_and_chair_from_an_elementary_school.mp4",
"a_DSLR_photo_of_a_yorkie_dog_eating_a_donut.mp4",
"a_DSLR_photo_of_a_yorkie_dog_wearing_extremely_cool_sneakers.mp4",
"a_DSLR_photo_of_baby_elephant_jumping_on_a_trampoline.mp4",
"a_DSLR_photo_of_cat_wearing_virtual_reality_headset_in_renaissance_oil_painting_high_detail_caravaggio.mp4",
"a_DSLR_photo_of_edible_typewriter_made_out_of_vegetables.mp4",
"a_DSLR_photo_of_Mont_Saint-Michel,_France,_aerial_view.mp4",
"a_DSLR_photo_of_Mount_Fuji,_aerial_view.mp4",
"a_DSLR_photo_of_Neuschwanstein_Castle,_aerial_view.mp4",
"A_DSLR_photo_of___pyramid_shaped_burrito_with_a_slice_cut_out_of_it.mp4",
"a_DSLR_photo_of_the_Imperial_State_Crown_of_England.mp4",
"a_DSLR_photo_of_the_leaning_tower_of_Pisa,_aerial_view.mp4",
"a_DSLR_photo_of_the_Statue_of_Liberty,_aerial_view.mp4",
"a_DSLR_photo_of_Two_locomotives_playing_tug_of_war.mp4",
"a_DSLR_photo_of_two_macaw_parrots_sharing_a_milkshake_with_two_straws.mp4",
"a_DSLR_photo_of_Westminster_Abbey,_aerial_view.mp4",
"a_ficus_planted_in_a_pot.mp4",
"a_flower_made_out_of_metal.mp4",
"a_fluffy_cat_lying_on_its_back_in_a_patch_of_sunlight.mp4",
"a_fox_and_a_hare_tangoing_together.mp4",
"a_fox_holding_a_videogame_controller.mp4",
"a_fox_playing_the_cello.mp4",
"a_frazer_nash_super_sport_car.mp4",
"a_freshly_baked_loaf_of_sourdough_bread_on_a_cutting_board.mp4",
"a_goat_drinking_beer.mp4",
"a_golden_goblet,_low_poly.mp4",
"a_green_dragon_breathing_fire.mp4",
"a_green_tractor_farming_corn_fields.mp4",
"a_highland_cow.mp4",
"a_hotdog_in_a_tutu_skirt.mp4",
"a_humanoid_robot_laying_on_the_couch_while_on_a_laptop.mp4",
"a_humanoid_robot_playing_the_violin.mp4",
"a_humanoid_robot_sitting_looking_at_a_Go_board_with_some_pieces_on_it.mp4",
"a_human_skeleton_drinking_a_glass_of_red_wine.mp4",
"a_human_skull_with_a_vine_growing_through_one_of_the_eye_sockets.mp4",
"a_kitten_looking_at_a_goldfish_in_a_bowl.mp4",
"a_lemur_drinking_boba.mp4",
"a_lemur_taking_notes_in_a_journal.mp4",
"a_lionfish.mp4",
"a_llama_wearing_a_suit.mp4",
"a_marble_bust_of_a_mouse.mp4",
"a_metal_sculpture_of_a_lion's_head,_highly_detailed.mp4",
"a_mojito_in_a_beach_chair.mp4",
"a_monkey-rabbit_hybrid.mp4",
"an_airplane_made_out_of_wood.mp4",
"an_amigurumi_bulldozer.mp4",
"An_anthropomorphic_tomato_eating_another_tomato.mp4",
"an_astronaut_playing_the_violin.mp4",
"an_astronaut_riding_a_kangaroo.mp4",
"an_English_castle,_aerial_view.mp4",
"an_erupting_volcano,_aerial_view.mp4",
"a_nest_with_a_few_white_eggs_and_one_golden_egg.mp4",
"an_exercise_bike.mp4",
"an_iridescent_metal_scorpion.mp4",
"An_octopus_and_a_giraffe_having_cheesecake.mp4",
"an_octopus_playing_the_harp.mp4",
"an_old_vintage_car.mp4",
"an_opulent_couch_from_the_palace_of_Versailles.mp4",
"an_orange_road_bike.mp4",
"an_orangutan_holding_a_paint_palette_in_one_hand_and_a_paintbrush_in_the_other.mp4",
"an_orangutan_playing_accordion_with_its_hands_spread_wide.mp4",
"an_orangutan_using_chopsticks_to_eat_ramen.mp4",
"an_orchid_flower_planted_in_a_clay_pot.mp4",
"a_palm_tree,_low_poly_3d_model.mp4",
"a_panda_rowing_a_boat_in_a_pond.mp4",
"a_panda_wearing_a_necktie_and_sitting_in_an_office_chair.mp4",
"A_Panther_De_Ville_car.mp4",
"a_pig_wearing_a_backpack.mp4",
"a_plate_of_delicious_tacos.mp4",
"a_plush_dragon_toy.mp4",
"a_plush_toy_of_a_corgi_nurse.mp4",
"a_rabbit,_animated_movie_character,_high_detail_3d_model.mp4",
"a_rabbit_cutting_grass_with_a_lawnmower.mp4",
"a_red_eyed_tree_frog,_low_poly.mp4",
"a_red_panda.mp4",
"a_ripe_strawberry.mp4",
"a_roulette_wheel.mp4",
"a_shiny_red_stand_mixer.mp4",
"a_silver_platter_piled_high_with_fruits.mp4",
"a_sliced_loaf_of_fresh_bread.mp4",
"a_snail_on_a_leaf.mp4",
"a_spanish_galleon_sailing_on_the_open_sea.mp4",
"a_squirrel_dressed_like_Henry_VIII_king_of_England.mp4",
"a_squirrel_gesturing_in_front_of_an_easel_showing_colorful_pie_charts.mp4",
"a_squirrel_wearing_a_tuxedo_and_holding_a_conductor's_baton.mp4",
"a_team_of_butterflies_playing_soccer_on_a_field.mp4",
"a_teddy_bear_pushing_a_shopping_cart_full_of_fruits_and_vegetables.mp4",
"a_tiger_dressed_as_a_military_general.mp4",
"a_tiger_karate_master.mp4",
"a_tiger_playing_the_violin.mp4",
"a_tiger_waiter_at_a_fancy_restaurant.mp4",
"a_tiger_wearing_a_tuxedo.mp4",
"a_t-rex_roaring_up_into_the_air.mp4",
"a_turtle_standing_on_its_hind_legs,_wearing_a_top_hat_and_holding_a_cane.mp4",
"a_typewriter.mp4",
"a_walrus_smoking_a_pipe.mp4",
"a_wedge_of_cheese_on_a_silver_platter.mp4",
"a_wide_angle_DSLR_photo_of_a_colorful_rooster.mp4",
"a_wide_angle_DSLR_photo_of_a_humanoid_banana_sitting_at_a_desk_doing_homework.mp4",
"a_wide_angle_DSLR_photo_of_a_mythical_troll_stirring_a_cauldron.mp4",
"a_wide_angle_DSLR_photo_of_a_squirrel_in_samurai_armor_wielding_a_katana.mp4",
"a_wide_angle_zoomed_out_DSLR_photo_of_A_red_dragon_dressed_in_a_tuxedo_and_playing_chess._The_chess_pieces_are_fashioned_after_robots.mp4",
"a_wide_angle_zoomed_out_DSLR_photo_of_a_skiing_penguin_wearing_a_puffy_jacket.mp4",
"a_wide_angle_zoomed_out_DSLR_photo_of_zoomed_out_view_of_Tower_Bridge_made_out_of_gingerbread_and_candy.mp4",
"a_woolly_mammoth_standing_on_ice.mp4",
"a_yellow_schoolbus.mp4",
"a_zoomed_out_DSLR_photo_of_a_3d_model_of_an_adorable_cottage_with_a_thatched_roof.mp4",
"a_zoomed_out_DSLR_photo_of_a_baby_bunny_sitting_on_top_of_a_stack_of_pancakes.mp4",
"a_zoomed_out_DSLR_photo_of_a_baby_dragon.mp4",
"a_zoomed_out_DSLR_photo_of_a_baby_monkey_riding_on_a_pig.mp4",
"a_zoomed_out_DSLR_photo_of_a_badger_wearing_a_party_hat_and_blowing_out_birthday_candles_on_a_cake.mp4",
"a_zoomed_out_DSLR_photo_of_a_beagle_eating_a_donut.mp4",
"a_zoomed_out_DSLR_photo_of_a_bear_playing_electric_bass.mp4",
"a_zoomed_out_DSLR_photo_of_a_beautifully_carved_wooden_knight_chess_piece.mp4",
"a_zoomed_out_DSLR_photo_of_a_beautiful_suit_made_out_of_moss,_on_a_mannequin._Studio_lighting,_high_quality,_high_resolution.mp4",
"a_zoomed_out_DSLR_photo_of_a_blue_lobster.mp4",
"a_zoomed_out_DSLR_photo_of_a_blue_tulip.mp4",
"a_zoomed_out_DSLR_photo_of_a_bowl_of_cereal_and_milk_with_a_spoon_in_it.mp4",
"a_zoomed_out_DSLR_photo_of_a_brain_in_a_jar.mp4",
"a_zoomed_out_DSLR_photo_of_a_bulldozer_made_out_of_toy_bricks.mp4",
"a_zoomed_out_DSLR_photo_of_a_cake_in_the_shape_of_a_train.mp4",
"a_zoomed_out_DSLR_photo_of_a_chihuahua_lying_in_a_pool_ring.mp4",
"a_zoomed_out_DSLR_photo_of_a_chimpanzee_dressed_as_a_football_player.mp4",
"a_zoomed_out_DSLR_photo_of_a_chimpanzee_holding_a_cup_of_hot_coffee.mp4",
"a_zoomed_out_DSLR_photo_of_a_chimpanzee_wearing_headphones.mp4",
"a_zoomed_out_DSLR_photo_of_a_colorful_camping_tent_in_a_patch_of_grass.mp4",
"a_zoomed_out_DSLR_photo_of_a_complex_movement_from_an_expensive_watch_with_many_shiny_gears,_sitting_on_a_table.mp4",
"a_zoomed_out_DSLR_photo_of_a_construction_excavator.mp4",
"a_zoomed_out_DSLR_photo_of_a_corgi_wearing_a_top_hat.mp4",
"a_zoomed_out_DSLR_photo_of_a_corn_cob_and_a_banana_playing_poker.mp4",
"a_zoomed_out_DSLR_photo_of_a_dachsund_riding_a_unicycle.mp4",
"a_zoomed_out_DSLR_photo_of_a_dachsund_wearing_a_boater_hat.mp4",
"a_zoomed_out_DSLR_photo_of_a_few_pool_balls_sitting_on_a_pool_table.mp4",
"a_zoomed_out_DSLR_photo_of_a_fox_working_on_a_jigsaw_puzzle.mp4",
"a_zoomed_out_DSLR_photo_of_a_fresh_cinnamon_roll_covered_in_glaze.mp4",
"a_zoomed_out_DSLR_photo_of_a_green_tractor.mp4",
"a_zoomed_out_DSLR_photo_of_a_greyhound_dog_racing_down_the_track.mp4",
"a_zoomed_out_DSLR_photo_of_a_group_of_squirrels_rowing_crew.mp4",
"a_zoomed_out_DSLR_photo_of_a_gummy_bear_driving_a_convertible.mp4",
"a_zoomed_out_DSLR_photo_of_a_hermit_crab_with_a_colorful_shell.mp4",
"a_zoomed_out_DSLR_photo_of_a_hippo_biting_through_a_watermelon.mp4",
"a_zoomed_out_DSLR_photo_of_a_hippo_made_out_of_chocolate.mp4",
"a_zoomed_out_DSLR_photo_of_a_humanoid_robot_lying_on_a_couch_using_a_laptop.mp4",
"a_zoomed_out_DSLR_photo_of_a_humanoid_robot_sitting_on_a_chair_drinking_a_cup_of_coffee.mp4",
"a_zoomed_out_DSLR_photo_of_a_human_skeleton_relaxing_in_a_lounge_chair.mp4",
"a_zoomed_out_DSLR_photo_of_a_kangaroo_sitting_on_a_bench_playing_the_accordion.mp4",
"a_zoomed_out_DSLR_photo_of_a_kingfisher_bird.mp4",
"a_zoomed_out_DSLR_photo_of_a_ladybug.mp4",
"a_zoomed_out_DSLR_photo_of_a_lion's_mane_jellyfish.mp4",
"a_zoomed_out_DSLR_photo_of_a_lobster_playing_the_saxophone.mp4",
"a_zoomed_out_DSLR_photo_of_a_majestic_sailboat.mp4",
"a_zoomed_out_DSLR_photo_of_a_marble_bust_of_a_cat,_a_real_mouse_is_sitting_on_its_head.mp4",
"a_zoomed_out_DSLR_photo_of_a_marble_bust_of_a_fox_head.mp4",
"a_zoomed_out_DSLR_photo_of_a_model_of_a_house_in_Tudor_style.mp4",
"a_zoomed_out_DSLR_photo_of_a_monkey-rabbit_hybrid.mp4",
"a_zoomed_out_DSLR_photo_of_a_monkey_riding_a_bike.mp4",
"a_zoomed_out_DSLR_photo_of_a_mountain_goat_standing_on_a_boulder.mp4",
"a_zoomed_out_DSLR_photo_of_a_mouse_holding_a_candlestick.mp4",
"a_zoomed_out_DSLR_photo_of_an_adorable_kitten_lying_next_to_a_flower.mp4",
"a_zoomed_out_DSLR_photo_of_an_all-utility_vehicle_driving_across_a_stream.mp4",
"a_zoomed_out_DSLR_photo_of_an_amigurumi_motorcycle.mp4",
"a_zoomed_out_DSLR_photo_of_an_astronaut_chopping_vegetables_in_a_sunlit_kitchen.mp4",
"a_zoomed_out_DSLR_photo_of_an_egg_cracked_open_with_a_newborn_chick_hatching_out_of_it.mp4",
"a_zoomed_out_DSLR_photo_of_an_expensive_office_chair.mp4",
"a_zoomed_out_DSLR_photo_of_an_origami_bulldozer_sitting_on_the_ground.mp4",
"a_zoomed_out_DSLR_photo_of_an_origami_crane.mp4",
"a_zoomed_out_DSLR_photo_of_an_origami_hippo_in_a_river.mp4",
"a_zoomed_out_DSLR_photo_of_an_otter_lying_on_its_back_in_the_water_holding_a_flower.mp4",
"a_zoomed_out_DSLR_photo_of_a_pair_of_floating_chopsticks_picking_up_noodles_out_of_a_bowl_of_ramen.mp4",
"a_zoomed_out_DSLR_photo_of_a_panda_throwing_wads_of_cash_into_the_air.mp4",
"a_zoomed_out_DSLR_photo_of_a_panda_wearing_a_chef's_hat_and_kneading_bread_dough_on_a_countertop.mp4",
"a_zoomed_out_DSLR_photo_of_a_pigeon_standing_on_a_manhole_cover.mp4",
"a_zoomed_out_DSLR_photo_of_a_pig_playing_the_saxophone.mp4",
"a_zoomed_out_DSLR_photo_of_a_pile_of_dice_on_a_green_tabletop.mp4",
"a_zoomed_out_DSLR_photo_of_a_pita_bread_full_of_hummus_and_falafel_and_vegetables.mp4",
"a_zoomed_out_DSLR_photo_of_a_pug_made_out_of_modeling_clay.mp4",
"a_zoomed_out_DSLR_photo_of_A_punk_rock_squirrel_in_a_studded_leather_jacket_shouting_into_a_microphone_while_standing_on_a_stump_and_holding_a_beer.mp4",
"a_zoomed_out_DSLR_photo_of_a_rabbit_cutting_grass_with_a_lawnmower.mp4",
"a_zoomed_out_DSLR_photo_of_a_rabbit_digging_a_hole_with_a_shovel.mp4",
"a_zoomed_out_DSLR_photo_of_a_raccoon_astronaut_holding_his_helmet.mp4",
"a_zoomed_out_DSLR_photo_of_a_rainforest_bird_mating_ritual_dance.mp4",
"a_zoomed_out_DSLR_photo_of_a_recliner_chair.mp4",
"a_zoomed_out_DSLR_photo_of_a_red_rotary_telephone.mp4",
"a_zoomed_out_DSLR_photo_of_a_robot_couple_fine_dining.mp4",
"a_zoomed_out_DSLR_photo_of_a_rotary_telephone_carved_out_of_wood.mp4",
"a_zoomed_out_DSLR_photo_of_a_shiny_beetle.mp4",
"a_zoomed_out_DSLR_photo_of_a_silver_candelabra_sitting_on_a_red_velvet_tablecloth,_only_one_candle_is_lit.mp4",
"a_zoomed_out_DSLR_photo_of_a_squirrel_DJing.mp4",
"a_zoomed_out_DSLR_photo_of_a_squirrel_dressed_up_like_a_Victorian_woman.mp4",
"a_zoomed_out_DSLR_photo_of_a_table_with_dim_sum_on_it.mp4",
"a_zoomed_out_DSLR_photo_of_a_tiger_dressed_as_a_maid.mp4",
"a_zoomed_out_DSLR_photo_of_a_tiger_dressed_as_a_military_general.mp4",
"a_zoomed_out_DSLR_photo_of_a_tiger_eating_an_ice_cream_cone.mp4",
"a_zoomed_out_DSLR_photo_of_a_tiger_wearing_sunglasses_and_a_leather_jacket,_riding_a_motorcycle.mp4",
"a_zoomed_out_DSLR_photo_of_a_toad_catching_a_fly_with_its_tongue.mp4",
"a_zoomed_out_DSLR_photo_of_a_wizard_raccoon_casting_a_spell.mp4",
"a_zoomed_out_DSLR_photo_of_a_yorkie_dog_dressed_as_a_maid.mp4",
"a_zoomed_out_DSLR_photo_of_cats_wearing_eyeglasses.mp4",
"a_zoomed_out_DSLR_photo_of_miniature_schnauzer_wooden_sculpture,_high_quality_studio_photo.mp4",
"A_zoomed_out_DSLR_photo_of___phoenix_made_of_splashing_water_.mp4",
"a_zoomed_out_DSLR_photo_of_Sydney_opera_house,_aerial_view.mp4",
"a_zoomed_out_DSLR_photo_of_two_foxes_tango_dancing.mp4",
"a_zoomed_out_DSLR_photo_of_two_raccoons_playing_poker.mp4",
"Chichen_Itza,_aerial_view.mp4",
"__Coffee_cup_with_many_holes.mp4",
"fries_and_a_hamburger.mp4",
"__Luminescent_wild_horses.mp4",
"Michelangelo_style_statue_of_an_astronaut.mp4",
"Michelangelo_style_statue_of_dog_reading_news_on_a_cellphone.mp4",
"the_titanic,_aerial_view.mp4",
"two_gummy_bears_playing_dominoes.mp4",
"two_macaw_parrots_playing_chess.mp4",
"Wedding_dress_made_of_tentacles.mp4",
]
def main():
prompt_library = {
"dreamfusion": [
p.replace(".mp4", "").replace("_", " ")
for p in dreamfusion_gallery_video_names
]
}
with open("load/prompt_library.json", "w") as f:
json.dump(prompt_library, f, indent=2)
if __name__ == "__main__":
main()
================================================
FILE: load/prompt_library.json
================================================
{
"dreamfusion": [
"a 20-sided die made out of glass",
"a bald eagle carved out of wood",
"a banana peeling itself",
"a beagle in a detective's outfit",
"a beautiful dress made out of fruit, on a mannequin. Studio lighting, high quality, high resolution",
"a beautiful dress made out of garbage bags, on a mannequin. Studio lighting, high quality, high resolution",
"a beautiful rainbow fish",
"a bichon frise wearing academic regalia",
"a blue motorcycle",
"a blue poison-dart frog sitting on a water lily",
"a brightly colored mushroom growing on a log",
"a bumblebee sitting on a pink flower",
"a bunch of colorful marbles spilling out of a red velvet bag",
"a capybara wearing a top hat, low poly",
"a cat with a mullet",
"a ceramic lion",
"a ceramic upside down yellow octopus holding a blue green ceramic cup",
"a chihuahua wearing a tutu",
"a chimpanzee holding a peeled banana",
"a chimpanzee looking through a telescope",
"a chimpanzee stirring a bubbling purple potion in a cauldron",
"a chimpanzee with a big grin",
"a completely destroyed car",
"a confused beagle sitting at a desk working on homework",
"a corgi taking a selfie",
"a crab, low poly",
"a crocodile playing a drum set",
"a cute steampunk elephant",
"a dachsund dressed up in a hotdog costume",
"a delicious hamburger",
"a dragon-cat hybrid",
"a DSLR photo of a baby dragon drinking boba",
"a DSLR photo of a baby dragon hatching out of a stone egg",
"a DSLR photo of a baby grand piano viewed from far away",
"a DSLR photo of a bagel filled with cream cheese and lox",
"a DSLR photo of a bald eagle",
"a DSLR photo of a barbecue grill cooking sausages and burger patties",
"a DSLR photo of a basil plant",
"a DSLR photo of a bear dancing ballet",
"a DSLR photo of a bear dressed as a lumberjack",
"a DSLR photo of a bear dressed in medieval armor",
"a DSLR photo of a beautiful violin sitting flat on a table",
"a DSLR photo of a blue jay standing on a large basket of rainbow macarons",
"a DSLR photo of a bulldozer clearing away a pile of snow",
"a DSLR photo of a bulldozer",
"a DSLR photo of a cake covered in colorful frosting with a slice being taken out, high resolution",
"a DSLR photo of a candelabra with many candles on a red velvet tablecloth",
"a DSLR photo of a car made out of cheese",
"a DSLR photo of A car made out of sushi",
"a DSLR photo of a car made out pizza",
"a DSLR photo of a cat lying on its side batting at a ball of yarn",
"a DSLR photo of a cat magician making a white dove appear",
"a DSLR photo of a cat wearing a bee costume",
"a DSLR photo of a cat wearing a lion costume",
"a DSLR photo of a cauldron full of gold coins",
"a DSLR photo of a chimpanzee dressed like Henry VIII king of England",
"a DSLR photo of a chimpanzee dressed like Napoleon Bonaparte",
"a DSLR photo of a chow chow puppy",
"a DSLR photo of a Christmas tree with donuts as decorations",
"a DSLR photo of a chrome-plated duck with a golden beak arguing with an angry turtle in a forest",
"a DSLR photo of a classic Packard car",
"a DSLR photo of a cocker spaniel wearing a crown",
"a DSLR photo of a corgi lying on its back with its tongue lolling out",
"a DSLR photo of a corgi puppy",
"a DSLR photo of a corgi sneezing",
"a DSLR photo of a corgi standing up drinking boba",
"a DSLR photo of a corgi taking a selfie",
"a DSLR photo of a corgi wearing a beret and holding a baguette, standing up on two hind legs",
"a DSLR photo of a covered wagon",
"a DSLR photo of a cracked egg with the yolk spilling out on a wooden table",
"a DSLR photo of a cup full of pens and pencils",
"a DSLR photo of a dalmation wearing a fireman's hat",
"a DSLR photo of a delicious chocolate brownie dessert with ice cream on the side",
"a DSLR photo of a delicious croissant",
"a DSLR photo of A DMC Delorean car",
"a DSLR photo of a dog made out of salad",
"a DSLR photo of a drum set made of cheese",
"a DSLR photo of a drying rack covered in clothes",
"a DSLR photo of aerial view of a ruined castle",
"a DSLR photo of a football helmet",
"a DSLR photo of a fox holding a videogame controller",
"a DSLR photo of a fox taking a photograph using a DSLR",
"a DSLR photo of a frazer nash super sport car",
"a DSLR photo of a frog wearing a sweater",
"a DSLR photo of a ghost eating a hamburger",
"a DSLR photo of a giant worm emerging from the sand in the middle of the desert",
"a DSLR photo of a goose made out of gold",
"a DSLR photo of a green monster truck",
"a DSLR photo of a group of dogs eating pizza",
"a DSLR photo of a group of dogs playing poker",
"a DSLR photo of a gummy bear playing the saxophone",
"a DSLR photo of a hippo wearing a sweater",
"a DSLR photo of a humanoid robot holding a human brain",
"a DSLR photo of a humanoid robot playing solitaire",
"a DSLR photo of a humanoid robot playing the cello",
"a DSLR photo of a humanoid robot using a laptop",
"a DSLR photo of a humanoid robot using a rolling pin to roll out dough",
"a DSLR photo of a human skull",
"a DSLR photo of a kitten standing on top of a giant tortoise",
"a DSLR photo of a knight chopping wood",
"a DSLR photo of a knight holding a lance and sitting on an armored horse",
"a DSLR photo of a koala wearing a party hat and blowing out birthday candles on a cake",
"a DSLR photo of a lemur taking notes in a journal",
"a DSLR photo of a lion reading the newspaper",
"a DSLR photo of a mandarin duck swimming in a pond",
"a DSLR photo of a model of the eiffel tower made out of toothpicks",
"a DSLR photo of a mouse playing the tuba",
"a DSLR photo of a mug of hot chocolate with whipped cream and marshmallows",
"a DSLR photo of an adorable piglet in a field",
"a DSLR photo of an airplane taking off from the runway",
"a DSLR photo of an astronaut standing on the surface of mars",
"a DSLR photo of an eggshell broken in two with an adorable chick standing next to it",
"a DSLR photo of an elephant skull",
"a DSLR photo of an exercise bike in a well lit room",
"a DSLR photo of an extravagant mansion, aerial view",
"a DSLR photo of an ice cream sundae",
"a DSLR photo of an iguana holding a balloon",
"a DSLR photo of an intricate and complex dish from a michelin star restaurant",
"a DSLR photo of An iridescent steampunk patterned millipede with bison horns",
"a DSLR photo of an octopus playing the piano",
"a DSLR photo of an old car overgrown by vines and weeds",
"a DSLR photo of an old vintage car",
"a DSLR photo of an orangutan making a clay bowl on a throwing wheel",
"a DSLR photo of an orc forging a hammer on an anvil",
"a DSLR photo of an origami motorcycle",
"a DSLR photo of an ornate silver gravy boat sitting on a patterned tablecloth",
"a DSLR photo of an overstuffed pastrami sandwich",
"a DSLR photo of an unstable rock cairn in the middle of a stream",
"a DSLR photo of a pair of headphones sitting on a desk",
"a DSLR photo of a pair of tan cowboy boots, studio lighting, product photography",
"a DSLR photo of a peacock on a surfboard",
"a DSLR photo of a pigeon reading a book",
"a DSLR photo of a piglet sitting in a teacup",
"a DSLR photo of a pig playing a drum set",
"a DSLR photo of a pile of dice on a green tabletop next to some playing cards",
"a DSLR photo of a pirate collie dog, high resolution",
"a DSLR photo of a plate of fried chicken and waffles with maple syrup on them",
"a DSLR photo of a plate piled high with chocolate chip cookies",
"a DSLR photo of a plush t-rex dinosaur toy, studio lighting, high resolution",
"a DSLR photo of a plush triceratops toy, studio lighting, high resolution",
"a DSLR photo of a pomeranian dog",
"a DSLR photo of a porcelain dragon",
"a DSLR photo of a praying mantis wearing roller skates",
"a DSLR photo of a puffin standing on a rock",
"a DSLR photo of a pug made out of metal",
"a DSLR photo of a pug wearing a bee costume",
"a DSLR photo of a quill and ink sitting on a desk",
"a DSLR photo of a raccoon stealing a pie",
"a DSLR photo of a red cardinal bird singing",
"a DSLR photo of a red convertible car with the top down",
"a DSLR photo of a red-eyed tree frog",
"a DSLR photo of a red pickup truck driving across a stream",
"a DSLR photo of a red wheelbarrow with a shovel in it",
"a DSLR photo of a roast turkey on a platter",
"a DSLR photo of a robot and dinosaur playing chess, high resolution",
"a DSLR photo of a robot arm picking up a colorful block from a table",
"a DSLR photo of a robot cat knocking over a chess piece on a board",
"a DSLR photo of a robot dinosaur",
"a DSLR photo of a robot made out of vegetables",
"a DSLR photo of a robot stegosaurus",
"a DSLR photo of a robot tiger",
"a DSLR photo of a rolling pin on top of bread dough",
"a DSLR photo of a sheepdog running",
"a DSLR photo of a shiba inu playing golf wearing tartan golf clothes and hat",
"a DSLR photo of a shiny silver robot cat",
"a DSLR photo of a silverback gorilla holding a golden trophy",
"a DSLR photo of a silver humanoid robot flipping a coin",
"a DSLR photo of a small cherry tomato plant in a pot with a few red tomatoes growing on it",
"a DSLR photo of a small saguaro cactus planted in a clay pot",
"a DSLR photo of a Space Shuttle",
"a DSLR photo of a squirrel dressed like a clown",
"a DSLR photo of a squirrel flying a biplane",
"a DSLR photo of a squirrel giving a lecture writing on a chalkboard",
"a DSLR photo of a squirrel holding a bowling ball",
"a DSLR photo of a squirrel-lizard hybrid",
"a DSLR photo of a squirrel made out of fruit",
"a DSLR photo of a squirrel-octopus hybrid",
"a DSLR photo of a stack of pancakes covered in maple syrup",
"a DSLR photo of a steam engine train, high resolution",
"a DSLR photo of a steaming basket full of dumplings",
"a DSLR photo of a steaming hot plate piled high with spaghetti and meatballs",
"a DSLR photo of a steampunk space ship designed in the 18th century",
"a DSLR photo of a straw basket with a cobra coming out of it",
"a DSLR photo of a swan and its cygnets swimming in a pond",
"a DSLR photo of a tarantula, highly detailed",
"a DSLR photo of a teal moped",
"a DSLR photo of a teapot shaped like an elephant head where its snout acts as the spout",
"a DSLR photo of a teddy bear taking a selfie",
"a DSLR photo of a terracotta bunny",
"a DSLR photo of a tiger dressed as a doctor",
"a DSLR photo of a tiger made out of yarn",
"a DSLR photo of a toilet made out of gold",
"a DSLR photo of a toy robot",
"a DSLR photo of a train engine made out of clay",
"a DSLR photo of a tray of Sushi containing pugs",
"a DSLR photo of a tree stump with an axe buried in it",
"a DSLR photo of a turtle standing on its hind legs, wearing a top hat and holding a cane",
"a DSLR photo of a very beautiful small organic sculpture made of fine clockwork and gears with tiny ruby bearings, very intricate, caved, curved. Studio lighting, High resolution, white background",
"a DSLR photo of A very beautiful tiny human heart organic sculpture made of copper wire and threaded pipes, very intricate, curved, Studio lighting, high resolution",
"a DSLR photo of a very cool and trendy pair of sneakers, studio lighting",
"a DSLR photo of a vintage record player",
"a DSLR photo of a wine bottle and full wine glass on a chessboard",
"a DSLR photo of a wooden desk and chair from an elementary school",
"a DSLR photo of a yorkie dog eating a donut",
"a DSLR photo of a yorkie dog wearing extremely cool sneakers",
"a DSLR photo of baby elephant jumping on a trampoline",
"a DSLR photo of cat wearing virtual reality headset in renaissance oil painting high detail caravaggio",
"a DSLR photo of edible typewriter made out of vegetables",
"a DSLR photo of Mont Saint-Michel, France, aerial view",
"a DSLR photo of Mount Fuji, aerial view",
"a DSLR photo of Neuschwanstein Castle, aerial view",
"A DSLR photo of pyramid shaped burrito with a slice cut out of it",
"a DSLR photo of the Imperial State Crown of England",
"a DSLR photo of the leaning tower of Pisa, aerial view",
"a DSLR photo of the Statue of Liberty, aerial view",
"a DSLR photo of Two locomotives playing tug of war",
"a DSLR photo of two macaw parrots sharing a milkshake with two straws",
"a DSLR photo of Westminster Abbey, aerial view",
"a ficus planted in a pot",
"a flower made out of metal",
"a fluffy cat lying on its back in a patch of sunlight",
"a fox and a hare tangoing together",
"a fox holding a videogame controller",
"a fox playing the cello",
"a frazer nash super sport car",
"a freshly baked loaf of sourdough bread on a cutting board",
"a goat drinking beer",
"a golden goblet, low poly",
"a green dragon breathing fire",
"a green tractor farming corn fields",
"a highland cow",
"a hotdog in a tutu skirt",
"a humanoid robot laying on the couch while on a laptop",
"a humanoid robot playing the violin",
"a humanoid robot sitting looking at a Go board with some pieces on it",
"a human skeleton drinking a glass of red wine",
"a human skull with a vine growing through one of the eye sockets",
"a kitten looking at a goldfish in a bowl",
"a lemur drinking boba",
"a lemur taking notes in a journal",
"a lionfish",
"a llama wearing a suit",
"a marble bust of a mouse",
"a metal sculpture of a lion's head, highly detailed",
"a mojito in a beach chair",
"a monkey-rabbit hybrid",
"an airplane made out of wood",
"an amigurumi bulldozer",
"An anthropomorphic tomato eating another tomato",
"an astronaut playing the violin",
"an astronaut riding a kangaroo",
"an English castle, aerial view",
"an erupting volcano, aerial view",
"a nest with a few white eggs and one golden egg",
"an exercise bike",
"an iridescent metal scorpion",
"An octopus and a giraffe having cheesecake",
"an octopus playing the harp",
"an old vintage car",
"an opulent couch from the palace of Versailles",
"an orange road bike",
"an orangutan holding a paint palette in one hand and a paintbrush in the other",
"an orangutan playing accordion with its hands spread wide",
"an orangutan using chopsticks to eat ramen",
"an orchid flower planted in a clay pot",
"a palm tree, low poly 3d model",
"a panda rowing a boat in a pond",
"a panda wearing a necktie and sitting in an office chair",
"A Panther De Ville car",
"a pig wearing a backpack",
"a plate of delicious tacos",
"a plush dragon toy",
"a plush toy of a corgi nurse",
"a rabbit, animated movie character, high detail 3d model",
"a rabbit cutting grass with a lawnmower",
"a red eyed tree frog, low poly",
"a red panda",
"a ripe strawberry",
"a roulette wheel",
"a shiny red stand mixer",
"a silver platter piled high with fruits",
"a sliced loaf of fresh bread",
"a snail on a leaf",
"a spanish galleon sailing on the open sea",
"a squirrel dressed like Henry VIII king of England",
"a squirrel gesturing in front of an easel showing colorful pie charts",
"a squirrel wearing a tuxedo and holding a conductor's baton",
"a team of butterflies playing soccer on a field",
"a teddy bear pushing a shopping cart full of fruits and vegetables",
"a tiger dressed as a military general",
"a tiger karate master",
"a tiger playing the violin",
"a tiger waiter at a fancy restaurant",
"a tiger wearing a tuxedo",
"a t-rex roaring up into the air",
"a turtle standing on its hind legs, wearing a top hat and holding a cane",
"a typewriter",
"a walrus smoking a pipe",
"a wedge of cheese on a silver platter",
"a wide angle DSLR photo of a colorful rooster",
"a wide angle DSLR photo of a humanoid banana sitting at a desk doing homework",
"a wide angle DSLR photo of a mythical troll stirring a cauldron",
"a wide angle DSLR photo of a squirrel in samurai armor wielding a katana",
"a wide angle zoomed out DSLR photo of A red dragon dressed in a tuxedo and playing chess. The chess pieces are fashioned after robots",
"a wide angle zoomed out DSLR photo of a skiing penguin wearing a puffy jacket",
"a wide angle zoomed out DSLR photo of zoomed out view of Tower Bridge made out of gingerbread and candy",
"a woolly mammoth standing on ice",
"a yellow schoolbus",
"a zoomed out DSLR photo of a 3d model of an adorable cottage with a thatched roof",
"a zoomed out DSLR photo of a baby bunny sitting on top of a stack of pancakes",
"a zoomed out DSLR photo of a baby dragon",
"a zoomed out DSLR photo of a baby monkey riding on a pig",
"a zoomed out DSLR photo of a badger wearing a party hat and blowing out birthday candles on a cake",
"a zoomed out DSLR photo of a beagle eating a donut",
"a zoomed out DSLR photo of a bear playing electric bass",
"a zoomed out DSLR photo of a beautifully carved wooden knight chess piece",
"a zoomed out DSLR photo of a beautiful suit made out of moss, on a mannequin. Studio lighting, high quality, high resolution",
"a zoomed out DSLR photo of a blue lobster",
"a zoomed out DSLR photo of a blue tulip",
"a zoomed out DSLR photo of a bowl of cereal and milk with a spoon in it",
"a zoomed out DSLR photo of a brain in a jar",
"a zoomed out DSLR photo of a bulldozer made out of toy bricks",
"a zoomed out DSLR photo of a cake in the shape of a train",
"a zoomed out DSLR photo of a chihuahua lying in a pool ring",
"a zoomed out DSLR photo of a chimpanzee dressed as a football player",
"a zoomed out DSLR photo of a chimpanzee holding a cup of hot coffee",
"a zoomed out DSLR photo of a chimpanzee wearing headphones",
"a zoomed out DSLR photo of a colorful camping tent in a patch of grass",
"a zoomed out DSLR photo of a complex movement from an expensive watch with many shiny gears, sitting on a table",
"a zoomed out DSLR photo of a construction excavator",
"a zoomed out DSLR photo of a corgi wearing a top hat",
"a zoomed out DSLR photo of a corn cob and a banana playing poker",
"a zoomed out DSLR photo of a dachsund riding a unicycle",
"a zoomed out DSLR photo of a dachsund wearing a boater hat",
"a zoomed out DSLR photo of a few pool balls sitting on a pool table",
"a zoomed out DSLR photo of a fox working on a jigsaw puzzle",
"a zoomed out DSLR photo of a fresh cinnamon roll covered in glaze",
"a zoomed out DSLR photo of a green tractor",
"a zoomed out DSLR photo of a greyhound dog racing down the track",
"a zoomed out DSLR photo of a group of squirrels rowing crew",
"a zoomed out DSLR photo of a gummy bear driving a convertible",
"a zoomed out DSLR photo of a hermit crab with a colorful shell",
"a zoomed out DSLR photo of a hippo biting through a watermelon",
"a zoomed out DSLR photo of a hippo made out of chocolate",
"a zoomed out DSLR photo of a humanoid robot lying on a couch using a laptop",
"a zoomed out DSLR photo of a humanoid robot sitting on a chair drinking a cup of coffee",
"a zoomed out DSLR photo of a human skeleton relaxing in a lounge chair",
"a zoomed out DSLR photo of a kangaroo sitting on a bench playing the accordion",
"a zoomed out DSLR photo of a kingfisher bird",
"a zoomed out DSLR photo of a ladybug",
"a zoomed out DSLR photo of a lion's mane jellyfish",
"a zoomed out DSLR photo of a lobster playing the saxophone",
"a zoomed out DSLR photo of a majestic sailboat",
"a zoomed out DSLR photo of a marble bust of a cat, a real mouse is sitting on its head",
"a zoomed out DSLR photo of a marble bust of a fox head",
"a zoomed out DSLR photo of a model of a house in Tudor style",
"a zoomed out DSLR photo of a monkey-rabbit hybrid",
"a zoomed out DSLR photo of a monkey riding a bike",
"a zoomed out DSLR photo of a mountain goat standing on a boulder",
"a zoomed out DSLR photo of a mouse holding a candlestick",
"a zoomed out DSLR photo of an adorable kitten lying next to a flower",
"a zoomed out DSLR photo of an all-utility vehicle driving across a stream",
"a zoomed out DSLR photo of an amigurumi motorcycle",
"a zoomed out DSLR photo of an astronaut chopping vegetables in a sunlit kitchen",
"a zoomed out DSLR photo of an egg cracked open with a newborn chick hatching out of it",
"a zoomed out DSLR photo of an expensive office chair",
"a zoomed out DSLR photo of an origami bulldozer sitting on the ground",
"a zoomed out DSLR photo of an origami crane",
"a zoomed out DSLR photo of an origami hippo in a river",
"a zoomed out DSLR photo of an otter lying on its back in the water holding a flower",
"a zoomed out DSLR photo of a pair of floating chopsticks picking up noodles out of a bowl of ramen",
"a zoomed out DSLR photo of a panda throwing wads of cash into the air",
"a zoomed out DSLR photo of a panda wearing a chef's hat and kneading bread dough on a countertop",
"a zoomed out DSLR photo of a pigeon standing on a manhole cover",
"a zoomed out DSLR photo of a pig playing the saxophone",
"a zoomed out DSLR photo of a pile of dice on a green tabletop",
"a zoomed out DSLR photo of a pita bread full of hummus and falafel and vegetables",
"a zoomed out DSLR photo of a pug made out of modeling clay",
"a zoomed out DSLR photo of A punk rock squirrel in a studded leather jacket shouting into a microphone while standing on a stump and holding a beer",
"a zoomed out DSLR photo of a rabbit cutting grass with a lawnmower",
"a zoomed out DSLR photo of a rabbit digging a hole with a shovel",
"a zoomed out DSLR photo of a raccoon astronaut holding his helmet",
"a zoomed out DSLR photo of a rainforest bird mating ritual dance",
"a zoomed out DSLR photo of a recliner chair",
"a zoomed out DSLR photo of a red rotary telephone",
"a zoomed out DSLR photo of a robot couple fine dining",
"a zoomed out DSLR photo of a rotary telephone carved out of wood",
"a zoomed out DSLR photo of a shiny beetle",
"a zoomed out DSLR photo of a silver candelabra sitting on a red velvet tablecloth, only one candle is lit",
"a zoomed out DSLR photo of a squirrel DJing",
"a zoomed out DSLR photo of a squirrel dressed up like a Victorian woman",
"a zoomed out DSLR photo of a table with dim sum on it",
"a zoomed out DSLR photo of a tiger dressed as a maid",
"a zoomed out DSLR photo of a tiger dressed as a military general",
"a zoomed out DSLR photo of a tiger eating an ice cream cone",
"a zoomed out DSLR photo of a tiger wearing sunglasses and a leather jacket, riding a motorcycle",
"a zoomed out DSLR photo of a toad catching a fly with its tongue",
"a zoomed out DSLR photo of a wizard raccoon casting a spell",
"a zoomed out DSLR photo of a yorkie dog dressed as a maid",
"a zoomed out DSLR photo of cats wearing eyeglasses",
"a zoomed out DSLR photo of miniature schnauzer wooden sculpture, high quality studio photo",
"A zoomed out DSLR photo of phoenix made of splashing water ",
"a zoomed out DSLR photo of Sydney opera house, aerial view",
"a zoomed out DSLR photo of two foxes tango dancing",
"a zoomed out DSLR photo of two raccoons playing poker",
"Chichen Itza, aerial view",
" Coffee cup with many holes",
"fries and a hamburger",
" Luminescent wild horses",
"Michelangelo style statue of an astronaut",
"Michelangelo style statue of dog reading news on a cellphone",
"the titanic, aerial view",
"two gummy bears playing dominoes",
"two macaw parrots playing chess",
"Wedding dress made of tentacles"
]
}
================================================
FILE: load/shapes/README.md
================================================
# Shape Credits
- `animal.obj` - Ido Richardson
- `hand_prismatic.obj` - Ido Richardson
- `potion.obj` - Ido Richardson
- `blub.obj` - [Keenan's 3D Model Repository](https://www.cs.cmu.edu/~kmcrane/Projects/ModelRepository/)
- `nascar.obj` - [Princeton ModelNet](https://modelnet.cs.princeton.edu/)
- `cabin.obj` - [Princeton ModelNet](https://modelnet.cs.princeton.edu/)
- `teddy.obj` - [Gal Metzer](https://galmetzer.github.io/)
- `human.obj` - [TurboSquid](https://www.turbosquid.com/3d-models/3d-model-character-base/524860)
================================================
FILE: load/shapes/animal.obj
================================================
####
#
# OBJ File Generated by Meshlab
#
####
# Object animal_legs_head.obj
#
# Vertices: 1536
# Faces: 3068
#
####
mtllib ./animal_legs_head.obj.mtl
vn -0.037566 0.880458 -0.472633
v 9.999994 49.041206 -42.944695
vn -0.715259 0.532976 -0.452042
v 9.999994 48.646877 -43.444782
vn -0.716542 0.495637 -0.490828
v 9.999994 48.261967 -43.867054
vn -0.717102 0.451769 -0.530726
v 9.999994 47.874546 -44.227222
vn -0.717793 0.401355 -0.568935
v 9.999994 47.469975 -44.542099
vn -0.718191 0.344641 -0.604504
v 9.999994 47.026360 -44.824703
vn -0.718058 0.283944 -0.635428
v 9.999994 46.535217 -45.073555
vn -0.717495 0.222382 -0.660112
v 9.999994 45.989674 -45.286037
vn -0.716685 0.162663 -0.678162
v 9.999994 45.382099 -45.459721
vn -0.715773 0.106663 -0.690139
v 9.999994 44.699299 -45.592724
vn -0.714822 0.055588 -0.697094
v 9.999994 43.931980 -45.680847
vn -0.713905 0.010049 -0.700171
v 9.999994 43.072033 -45.719482
vn -0.713071 -0.029961 -0.700451
v 9.999994 42.110180 -45.704025
vn -0.712545 -0.065448 -0.698567
v 9.999994 41.020355 -45.628292
vn -0.710879 -0.093246 -0.697106
v 9.999994 39.780952 -45.482559
vn -0.709369 -0.111445 -0.695972
v 9.999994 39.099632 -45.380280
vn -0.709357 -0.125023 -0.693673
v 9.999994 38.374283 -45.256695
vn -0.709238 -0.136940 -0.691541
v 9.999994 37.602699 -45.110538
vn -0.041933 -0.362528 -0.931029
v 9.999994 36.782665 -44.940544
vn -0.688592 -0.196727 -0.697954
v -0.000006 36.782665 -44.940544
vn -0.700884 -0.113013 -0.704265
v -0.000006 38.939480 -45.353718
vn -0.700280 -0.068149 -0.710608
v -0.000006 41.153679 -45.639793
vn -0.700821 -0.024999 -0.712899
v -0.000006 42.237087 -45.709007
vn -0.699595 0.020340 -0.714250
v -0.000006 43.272068 -45.715389
vn -0.698467 0.073765 -0.711830
v -0.000006 44.196259 -45.656879
vn -0.697365 0.134359 -0.704010
v -0.000006 45.009594 -45.539471
vn -0.696395 0.200742 -0.689011
v -0.000006 45.723118 -45.369526
vn -0.695654 0.270555 -0.665482
v -0.000006 46.348988 -45.152943
vn -0.695302 0.340466 -0.632960
v -0.000006 46.907177 -44.890789
vn -0.695553 0.406157 -0.592657
v -0.000006 47.408821 -44.584728
vn -0.696443 0.463501 -0.547845
v -0.000006 47.862633 -44.237129
vn -0.694298 0.521175 -0.496313
v -0.000006 48.278328 -43.849361
vn -0.678963 0.619032 -0.394726
v -0.000006 49.041206 -42.944695
vn 0.041933 -0.362528 -0.931029
v 19.999994 36.782665 -44.940544
vn 0.709042 -0.137202 -0.691689
v 19.999994 37.602699 -45.110538
vn 0.709176 -0.124825 -0.693894
v 19.999994 38.374283 -45.256695
vn 0.709320 -0.111526 -0.696008
v 19.999994 39.099632 -45.380280
vn 0.710620 -0.093428 -0.697346
v 19.999994 39.780952 -45.482559
vn 0.712347 -0.065292 -0.698784
v 19.999994 41.020355 -45.628292
vn 0.713004 -0.029975 -0.700519
v 19.999994 42.110180 -45.704025
vn 0.713850 0.010081 -0.700226
v 19.999994 43.072033 -45.719482
vn 0.714809 0.055616 -0.697105
v 19.999994 43.931980 -45.680847
vn 0.715790 0.106674 -0.690120
v 19.999994 44.699299 -45.592724
vn 0.716713 0.162656 -0.678134
v 19.999994 45.382099 -45.459721
vn 0.717516 0.222366 -0.660094
v 19.999994 45.989674 -45.286037
vn 0.718056 0.283925 -0.635439
v 19.999994 46.535217 -45.073555
vn 0.718146 0.344633 -0.604561
v 19.999994 47.026360 -44.824703
vn 0.717688 0.401379 -0.569050
v 19.999994 47.469975 -44.542099
vn 0.717159 0.451908 -0.530531
v 19.999994 47.874546 -44.227222
vn 0.716601 0.495449 -0.490931
v 19.999994 48.261967 -43.867054
vn 0.715201 0.533063 -0.452030
v 19.999994 48.646877 -43.444782
vn 0.037604 0.880551 -0.472458
v 19.999994 49.041206 -42.944695
vn 0.679540 0.619772 -0.392565
v 29.999994 49.041206 -42.944695
vn 0.691521 0.519586 -0.501826
v 29.999994 48.348183 -43.777119
vn 0.692694 0.442713 -0.569369
v 29.999994 47.585117 -44.458385
vn 0.696519 0.372233 -0.613436
v 29.999994 47.154171 -44.749405
vn 0.696522 0.310967 -0.646650
v 29.999994 46.680916 -45.006001
vn 0.696925 0.247832 -0.672960
v 29.999994 46.159019 -45.226433
vn 0.697542 0.185639 -0.692080
v 29.999994 45.576836 -45.410397
vn 0.698322 0.126453 -0.704526
v 29.999994 44.917595 -45.556469
vn 0.699230 0.071952 -0.711267
v 29.999994 44.172188 -45.659336
vn 0.700100 0.022956 -0.713676
v 29.999994 43.331646 -45.713631
vn 0.700841 -0.020794 -0.713014
v 29.999994 42.369610 -45.713448
vn 0.702878 -0.055212 -0.709164
v 29.999994 41.250793 -45.648365
vn 0.704392 -0.077449 -0.705574
v 29.999994 40.627243 -45.587639
vn 0.704565 -0.093769 -0.703417
v 29.999994 39.958080 -45.506065
vn 0.704790 -0.109268 -0.700951
v 29.999994 39.241177 -45.402111
vn 0.704926 -0.123640 -0.698421
v 29.999994 38.474396 -45.274231
vn 0.705050 -0.137029 -0.695792
v 29.999994 37.655605 -45.120888
vn 0.689849 -0.201262 -0.695415
v 29.999994 36.782665 -44.940544
vn 0.038032 0.995301 0.089050
v 22.000000 58.057266 5.561405
vn 0.716134 0.665935 0.209002
v 22.000000 57.816811 6.449402
vn 0.717298 0.644796 0.264048
v 22.000000 57.499245 7.336279
vn 0.717869 0.617043 0.322371
v 22.000000 57.090443 8.219446
vn 0.717830 0.582554 0.381249
v 22.000000 56.576279 9.096297
vn 0.717115 0.544598 0.434925
v 22.000000 55.971092 9.929281
vn 0.715991 0.505834 0.481132
v 22.000000 55.324966 10.666539
vn 0.714924 0.468517 0.519014
v 22.000000 54.658588 11.312586
vn 0.713804 0.434041 0.549630
v 22.000000 53.992245 11.872548
vn 0.712735 0.403171 0.573988
v 22.000000 53.346222 12.351551
vn 0.714478 0.367454 0.595398
v 22.000000 52.728939 12.761940
vn 0.716305 0.319466 0.620361
v 22.000000 51.433067 13.500792
vn 0.715694 0.269044 0.644514
v 22.000000 50.047176 14.143022
vn 0.714922 0.220027 0.663682
v 22.000000 48.586147 14.686825
vn 0.714485 0.172507 0.678050
v 22.000000 47.064857 15.130394
vn 0.714404 0.125711 0.688348
v 22.000000 45.496616 15.472218
vn 0.714699 0.078589 0.695003
v 22.000000 43.869396 15.713192
vn 0.715077 0.030594 0.698376
v 22.000000 42.186924 15.846107
vn 0.715166 -0.018820 0.698701
v 22.000000 40.462143 15.861362
vn 0.715132 -0.070797 0.695395
v 22.000000 38.707996 15.749357
vn 0.713294 -0.118010 0.690858
v 22.000000 36.941570 15.501235
vn 0.711347 -0.151245 0.686374
v 22.000000 36.095028 15.331611
vn 0.043304 -0.416571 0.908071
v 22.000000 35.292015 15.138749
vn 0.678614 -0.256243 0.688348
v 30.000000 35.292015 15.138749
vn 0.700700 -0.136245 0.700326
v 30.000000 36.673241 15.451460
vn 0.701227 -0.095819 0.706469
v 30.000000 37.960979 15.661963
vn 0.701735 -0.059108 0.709982
v 30.000000 39.141129 15.789329
vn 0.700080 -0.020412 0.713772
v 30.000000 40.200653 15.852660
vn 0.698427 0.031940 0.714968
v 30.000000 42.208824 15.844935
vn 0.698699 0.089560 0.709788
v 30.000000 44.180241 15.675954
vn 0.698819 0.146569 0.700121
v 30.000000 46.093781 15.355386
vn 0.698572 0.203777 0.685910
v 30.000000 47.928318 14.892900
vn 0.697736 0.263030 0.666318
v 30.000000 49.671326 14.294765
vn 0.699082 0.316934 0.640965
v 30.000000 51.356880 13.539785
vn 0.701073 0.356583 0.617531
v 30.000000 52.169632 13.099705
vn 0.700752 0.391502 0.596383
v 30.000000 52.955074 12.616725
vn 0.700290 0.427802 0.571471
v 30.000000 53.707741 12.089963
vn 0.699893 0.464779 0.542338
v 30.000000 54.422157 11.518541
vn 0.699747 0.500869 0.509395
v 30.000000 55.073273 10.921085
vn 0.699770 0.534565 0.473880
v 30.000000 55.630398 10.334198
vn 0.699728 0.565176 0.436987
v 30.000000 56.101540 9.765021
vn 0.700102 0.591244 0.400359
v 30.000000 56.495308 9.220168
vn 0.700821 0.613454 0.364039
v 30.000000 56.820316 8.706254
vn 0.699038 0.637148 0.324635
v 30.000000 57.085175 8.229894
vn 0.697218 0.662948 0.272741
v 30.000000 57.491562 7.355235
vn 0.698208 0.682761 0.215275
v 30.000000 57.810844 6.468513
vn 0.682319 0.722736 0.109967
v 30.000000 58.057266 5.561404
vn -0.044067 -0.417806 0.907467
v 8.000003 35.292015 15.138752
vn -0.711558 -0.150968 0.686218
v 8.000003 36.095028 15.331614
vn -0.713664 -0.117898 0.690496
v 8.000003 36.941570 15.501238
vn -0.715423 -0.070994 0.695075
v 8.000003 38.707996 15.749360
vn -0.714957 -0.019279 0.698902
v 8.000003 40.462143 15.861364
vn -0.714579 0.030615 0.698884
v 8.000003 42.186924 15.846110
vn -0.714346 0.078863 0.695335
v 8.000003 43.869396 15.713195
vn -0.714317 0.125909 0.688403
v 8.000003 45.496616 15.472220
vn -0.714589 0.172594 0.677919
v 8.000003 47.064857 15.130397
vn -0.715156 0.220028 0.663430
v 8.000003 48.586147 14.686828
vn -0.715966 0.268916 0.644265
v 8.000003 50.047176 14.143024
vn -0.716671 0.319362 0.619992
v 8.000003 51.433067 13.500794
vn -0.714742 0.367067 0.595320
v 8.000003 52.728939 12.761943
vn -0.712756 0.403185 0.573952
v 8.000003 53.346222 12.351554
vn -0.713830 0.434006 0.549623
v 8.000003 53.992245 11.872551
vn -0.714913 0.468505 0.519041
v 8.000003 54.658588 11.312589
vn -0.715982 0.505859 0.481121
v 8.000000 55.324966 10.666542
vn -0.717162 0.544587 0.434860
v 8.000000 55.971092 9.929284
vn -0.717890 0.582485 0.381241
v 8.000000 56.576279 9.096300
vn -0.717728 0.617066 0.322639
v 8.000000 57.090443 8.219449
vn -0.717009 0.645058 0.264194
v 8.000000 57.499245 7.336280
vn -0.715980 0.666159 0.208819
v 8.000000 57.816811 6.449404
vn -0.038001 0.995297 0.089106
v 8.000000 58.057266 5.561407
vn -0.681128 0.723082 0.114964
v 0.000001 58.057266 5.561408
vn -0.695063 0.678491 0.237776
v 0.000001 57.702892 6.795747
vn -0.696959 0.647652 0.307887
v 0.000001 57.195858 8.011716
vn -0.699770 0.618154 0.358061
v 0.000001 56.874542 8.612262
vn -0.699531 0.592666 0.399253
v 0.000001 56.502613 9.207300
vn -0.699537 0.564627 0.438001
v 0.000001 56.078911 9.792962
vn -0.699855 0.533483 0.474972
v 0.000001 55.601017 10.367282
vn -0.700068 0.500661 0.509159
v 0.000001 55.065716 10.929310
vn -0.700307 0.466715 0.540136
v 0.000002 54.469799 11.478095
vn -0.700615 0.432258 0.567707
v 0.000002 53.810055 12.012688
vn -0.698248 0.391599 0.599249
v 0.000002 53.095020 12.523724
vn -0.696470 0.334878 0.634654
v 0.000002 51.573853 13.427677
vn -0.697598 0.273375 0.662286
v 0.000002 49.962353 14.177936
vn -0.698598 0.215929 0.682155
v 0.000002 48.296322 14.780287
vn -0.699105 0.161125 0.696628
v 0.000002 46.570908 15.250257
vn -0.699250 0.106912 0.706837
v 0.000002 44.764725 15.594750
vn -0.699174 0.052044 0.713055
v 0.000002 42.895981 15.804317
vn -0.698896 -0.004207 0.715211
v 0.000002 40.982910 15.869497
vn -0.698514 -0.062099 0.712897
v 0.000002 39.044220 15.780875
vn -0.698372 -0.120773 0.705472
v 0.000002 37.137413 15.535860
vn -0.677706 -0.253089 0.690406
v 0.000002 35.292015 15.138752
vn -0.237122 -0.530354 0.813940
v 25.000000 24.999994 9.572199
vn 0.236928 -0.533617 0.811861
v 5.000001 24.999994 9.572202
vn 0.283495 -0.417438 -0.863352
v 5.000000 24.999994 -1.984161
vn -0.284452 -0.406707 -0.868146
v 25.000000 24.999994 -1.984163
vn 0.189460 -0.663173 -0.724090
v 4.999994 24.999994 -40.456905
vn -0.189265 -0.663429 -0.723907
v 24.999994 24.999994 -40.456905
vn -0.186518 -0.719727 0.668733
v 24.999994 24.999994 -25.000006
vn 0.183869 -0.714753 0.674774
v 4.999997 24.999994 -25.000000
vn 0.709998 0.070113 -0.700705
v 4.999996 11.761100 -38.115604
vn 0.712254 0.044853 -0.700487
v 4.999996 13.613945 -37.954124
vn 0.714201 0.003865 -0.699930
v 4.999996 15.423278 -37.882206
vn 0.716319 -0.052113 -0.695824
v 4.999996 17.199190 -37.934822
vn 0.715392 -0.112301 -0.689639
v 4.999996 18.951782 -38.146946
vn 0.713306 -0.159062 -0.682564
v 4.999994 19.822487 -38.323750
vn 0.713170 -0.197512 -0.672590
v 4.999994 20.666368 -38.546070
vn 0.712694 -0.233548 -0.661455
v 4.999994 21.461468 -38.803925
vn 0.713906 -0.270327 -0.645958
v 4.999994 22.216190 -39.092468
vn 0.712765 -0.309131 -0.629606
v 4.999994 23.638245 -39.742302
vn 0.730445 -0.262956 0.630320
v 4.999997 24.632648 -25.200006
vn 0.725258 -0.145497 0.672927
v 4.999997 24.230774 -25.321075
vn 0.719111 -0.056971 0.692556
v 4.999997 23.788733 -25.380795
vn 0.713978 0.001659 0.700166
v 4.999997 23.298027 -25.394985
vn 0.710378 0.033704 0.703013
v 4.999997 22.752501 -25.376860
vn 0.707640 0.045503 0.705106
v 4.999997 22.147999 -25.339149
vn 0.705601 0.042308 0.707346
v 4.999997 21.480364 -25.294580
vn 0.702679 0.022334 0.711157
v 4.999997 20.745438 -25.255880
vn 0.700518 -0.017152 0.713429
v 4.999997 19.190577 -25.242184
vn 0.700846 -0.061712 0.710638
v 4.999997 17.576881 -25.333029
vn 0.702913 -0.097913 0.704504
v 4.999997 15.851218 -25.536446
vn 0.704434 -0.119734 0.699597
v 4.999997 14.929784 -25.682877
vn 0.704807 -0.134899 0.696455
v 4.999997 13.960424 -25.860464
vn 0.705230 -0.148616 0.693228
v 4.999997 12.936492 -26.070208
vn 0.705488 -0.160265 0.690363
v 4.999997 11.851344 -26.313114
vn 0.705781 -0.169973 0.687737
v 4.999997 10.698337 -26.590187
vn 0.705680 -0.178995 0.685548
v 4.999997 9.470823 -26.902428
vn 0.705793 -0.184985 0.683839
v 4.999997 8.162160 -27.250841
vn 0.706471 -0.190472 0.681630
v 4.999996 6.765705 -27.636431
vn 0.706776 -0.193078 0.680579
v 4.999996 6.032477 -27.843481
vn 0.706941 -0.194454 0.680017
v 4.999996 5.656652 -27.950651
vn 0.706878 -0.195636 0.679743
v 4.999996 5.234808 -28.071617
vn 0.706733 -0.197321 0.679406
v 4.999996 4.217318 -28.365477
vn 0.706564 -0.199840 0.678845
v 4.999996 3.103128 -28.690798
vn 0.706432 -0.203538 0.677884
v 4.999996 2.054284 -29.002253
vn 0.706294 -0.208034 0.676661
v 4.999996 1.605008 -29.138615
vn 0.705711 -0.214888 0.675126
v 4.999996 1.230790 -29.255152
vn 0.704851 -0.226223 0.672315
v 4.999996 0.924141 -29.354879
vn 0.701081 -0.252672 0.666815
v 4.999996 0.675273 -29.441452
vn 0.694814 -0.310568 0.648676
v 4.999996 0.323635 -29.586000
vn 0.690759 -0.394848 0.605762
v 4.999996 0.200512 -29.653708
vn 0.687710 -0.490412 0.535304
v 4.999996 0.103629 -29.727749
vn 0.690920 -0.568296 0.446843
v 4.999996 0.068456 -29.766258
vn 0.689956 -0.630221 0.356065
v 4.999996 0.042038 -29.805500
vn 0.690656 -0.676520 0.255570
v 4.999996 0.023054 -29.846239
vn 0.687430 -0.712290 0.141714
v 4.999996 0.010375 -29.889297
vn 0.691348 -0.722090 0.024972
v 4.999996 0.000143 -29.985144
vn 0.701798 -0.711046 -0.043512
v 4.999996 0.004464 -30.101032
vn 0.716012 -0.697396 -0.031079
v 4.999996 0.032311 -30.428709
vn 0.714255 -0.699547 0.021774
v 4.999996 0.034864 -31.342150
vn 0.709051 -0.703343 0.050537
v 4.999996 0.002947 -31.845482
vn 0.706459 -0.705650 0.054536
v 4.999996 -0.041295 -32.395313
vn 0.700890 -0.712581 0.031319
v 4.999996 -0.130382 -33.606750
vn 0.697333 -0.716380 -0.022954
v 4.999996 -0.147117 -34.816341
vn 0.696670 -0.711894 -0.088640
v 4.999996 -0.101083 -35.414734
vn 0.691829 -0.701173 -0.172419
v 4.999996 0.000000 -36.000000
vn 0.685997 -0.666786 -0.291212
v 4.999996 0.211554 -36.655914
vn 0.682836 -0.590279 -0.430472
v 4.999996 0.549616 -37.256489
vn 0.683345 -0.474220 -0.555117
v 4.999996 1.012095 -37.754852
vn 0.687070 -0.344863 -0.639535
v 4.999996 1.584753 -38.139488
vn 0.691796 -0.230844 -0.684200
v 4.999994 2.223330 -38.408249
vn 0.695646 -0.141918 -0.704227
v 4.999994 2.903082 -38.583950
vn 0.698496 -0.074554 -0.711720
v 4.999994 3.635547 -38.690884
vn 0.700744 -0.024106 -0.713005
v 4.999994 4.412277 -38.740578
vn 0.702409 0.013187 -0.711652
v 4.999994 5.224669 -38.743664
vn 0.702593 0.044424 -0.710204
v 4.999994 6.076509 -38.708942
vn 0.704078 0.070773 -0.706588
v 4.999994 7.883051 -38.555347
vn 0.707098 0.079539 -0.702628
v 4.999994 9.791864 -38.338131
vn -0.697939 -0.715701 -0.025567
v 24.999994 -0.147117 -34.816341
vn -0.700609 -0.713101 0.025181
v 24.999994 -0.141284 -33.815369
vn -0.705257 -0.707059 0.051768
v 24.999994 -0.076723 -32.819256
vn -0.708931 -0.703398 0.051463
v 24.999994 0.000716 -31.867691
vn -0.710678 -0.702659 0.034753
v 24.999994 0.029495 -31.426409
vn -0.713652 -0.700497 0.002186
v 24.999994 0.041841 -31.056400
vn -0.710051 -0.703459 -0.031205
v 24.999994 0.022757 -30.387638
vn -0.693548 -0.720407 0.002142
v 24.999994 0.000017 -29.999195
vn -0.687220 -0.717685 0.112500
v 24.999994 0.005308 -29.917118
vn -0.680865 -0.683165 0.264024
v 24.999994 0.023388 -29.845369
vn -0.680819 -0.599094 0.421393
v 24.999994 0.058681 -29.779324
vn -0.679320 -0.475921 0.558590
v 24.999994 0.114171 -29.717966
vn -0.688791 -0.341114 0.639694
v 24.999994 0.297914 -29.598825
vn -0.699394 -0.257796 0.666625
v 24.999994 0.600749 -29.469084
vn -0.704387 -0.223769 0.673622
v 24.999994 1.061505 -29.309494
vn -0.706068 -0.211661 0.675772
v 24.999994 1.363070 -29.213469
vn -0.706492 -0.206443 0.676942
v 24.999994 1.718205 -29.104074
vn -0.706752 -0.203423 0.677584
v 24.999994 2.129736 -28.979822
vn -0.706683 -0.201230 0.678311
v 24.999994 2.590803 -28.842157
vn -0.706450 -0.198187 0.679448
v 24.999994 3.628388 -28.536322
vn -0.706426 -0.194085 0.680656
v 24.999994 5.936840 -27.870344
vn -0.706480 -0.190137 0.681714
v 24.999994 7.077121 -27.548904
vn -0.706045 -0.185649 0.683400
v 24.999994 8.137035 -27.256487
vn -0.705823 -0.181125 0.684841
v 24.999994 9.140247 -26.987217
vn -0.706026 -0.175035 0.686214
v 24.999994 10.093677 -26.739492
vn -0.705841 -0.168121 0.688131
v 24.999994 11.000375 -26.512781
vn -0.704849 -0.158470 0.691430
v 24.999994 11.863399 -26.306551
vn -0.703429 -0.142166 0.696403
v 24.999994 13.470623 -25.953403
vn -0.702709 -0.119713 0.701334
v 24.999994 14.939770 -25.675781
vn -0.702170 -0.092607 0.705961
v 24.999994 16.295258 -25.469425
vn -0.701834 -0.061798 0.709655
v 24.999994 17.561510 -25.330070
vn -0.701817 -0.028378 0.711792
v 24.999994 18.762945 -25.253456
vn -0.702246 0.004932 0.711917
v 24.999994 19.923985 -25.235315
vn -0.703727 0.032014 0.709748
v 24.999994 21.014936 -25.267462
vn -0.706577 0.044205 0.706254
v 24.999994 21.955469 -25.324291
vn -0.711298 0.031825 0.702170
v 24.999994 22.763422 -25.376421
vn -0.717934 -0.015630 0.695936
v 24.999994 23.459042 -25.394526
vn -0.726162 -0.106934 0.679157
v 24.999994 24.055696 -25.351854
vn -0.733946 -0.239744 0.635489
v 24.999994 24.565554 -25.226120
vn -0.712234 -0.311815 -0.628883
v 24.999994 23.714045 -39.779976
vn -0.715620 -0.268657 -0.644758
v 24.999994 22.382565 -39.160889
vn -0.715062 -0.216693 -0.664628
v 24.999994 20.946503 -38.630741
vn -0.713167 -0.174578 -0.678908
v 24.999994 20.170801 -38.408737
vn -0.713180 -0.137154 -0.687432
v 24.999994 19.346807 -38.220631
vn -0.713030 -0.099295 -0.694067
v 24.999994 18.467142 -38.070305
vn -0.714739 -0.055244 -0.697206
v 24.999994 17.527441 -37.961914
vn -0.714894 -0.001845 -0.699230
v 24.999994 15.606934 -37.882214
vn -0.712630 0.042984 -0.700222
v 24.999994 13.680856 -37.949921
vn -0.710100 0.069829 -0.700629
v 24.999994 11.761100 -38.115604
vn -0.707078 0.079516 -0.702651
v 24.999994 9.782825 -38.339272
vn -0.703998 0.070338 -0.706710
v 24.999994 7.852611 -38.558609
vn -0.702455 0.043039 -0.710426
v 24.999994 6.022571 -38.712059
vn -0.702240 0.010696 -0.711860
v 24.999994 5.160625 -38.744881
vn -0.700593 -0.027831 -0.713018
v 24.999994 4.342332 -38.738194
vn -0.698502 -0.079038 -0.711230
v 24.999994 3.573898 -38.684319
vn -0.695722 -0.146389 -0.703236
v 24.999994 2.861524 -38.575584
vn -0.691825 -0.235168 -0.682696
v 24.999994 2.195804 -38.399067
vn -0.687101 -0.348972 -0.637268
v 24.999994 1.563588 -38.128181
vn -0.683474 -0.477314 -0.552300
v 24.999994 0.999137 -37.743797
vn -0.683031 -0.591775 -0.428101
v 24.999994 0.542971 -37.247295
vn -0.686164 -0.667115 -0.290064
v 24.999994 0.209633 -36.651432
vn -0.691927 -0.701097 -0.172334
v 24.999994 0.000000 -36.000000
vn -0.696643 -0.711885 -0.088928
v 24.999994 -0.100531 -35.419548
vn 0.712495 -0.158892 0.683450
v 30.000000 24.169331 9.355659
vn 0.711609 -0.126911 0.691018
v 30.000000 23.013584 9.117564
vn 0.710383 -0.102329 0.696336
v 30.000000 21.703257 8.905037
vn 0.709327 -0.084974 0.699739
v 30.000000 20.245955 8.713100
vn 0.708586 -0.073718 0.701763
v 30.000000 18.658257 8.536223
vn 0.708014 -0.067372 0.702977
v 30.000000 16.963675 8.368478
vn 0.707306 -0.065990 0.703820
v 30.000000 15.191051 8.203665
vn 0.705956 -0.071752 0.704612
v 30.000000 11.547770 7.857165
vn 0.705524 -0.082482 0.703869
v 30.000000 9.751814 7.662535
vn 0.704474 -0.097597 0.702988
v 30.000000 8.024162 7.445222
vn 0.703396 -0.118949 0.700775
v 30.000000 6.401947 7.199317
vn 0.701972 -0.149915 0.696248
v 30.000000 4.919433 6.919466
vn 0.701698 -0.186314 0.687683
v 30.000000 3.606273 6.601035
vn 0.702383 -0.218687 0.677374
v 30.000000 3.020778 6.426138
vn 0.701402 -0.251761 0.666822
v 30.000000 2.485797 6.240269
vn 0.700174 -0.291110 0.651928
v 30.000000 2.003040 6.043133
vn 0.698664 -0.337584 0.630798
v 30.000000 1.573659 5.834519
vn 0.696871 -0.391564 0.600873
v 30.000000 1.198197 5.614307
vn 0.694894 -0.452315 0.559046
v 30.000000 0.876506 5.382463
vn 0.693004 -0.517070 0.502379
v 30.000000 0.607663 5.139047
vn 0.691719 -0.580438 0.429669
v 30.000000 0.389889 4.884210
vn 0.691422 -0.635696 0.343259
v 30.000000 0.220493 4.618203
vn 0.686139 -0.689465 0.232060
v 30.000000 0.095884 4.341384
vn 0.686520 -0.721271 0.091965
v 30.000000 -0.037692 3.757416
vn 0.696506 -0.717419 -0.013748
v 30.000000 -0.056256 3.138010
vn 0.715573 -0.698238 -0.020458
v 30.000000 0.032658 1.827342
vn 0.707918 -0.706156 0.013998
v 30.000000 0.016877 0.504303
vn 0.690189 -0.722561 -0.039314
v 30.000000 0.001481 -0.115066
vn 0.681988 -0.707472 -0.185407
v 30.000000 0.078064 -0.676331
vn 0.684880 -0.646362 -0.336385
v 30.000000 0.177790 -0.929266
vn 0.684502 -0.562246 -0.464043
v 30.000000 0.331271 -1.161435
vn 0.687269 -0.458142 -0.563708
v 30.000000 0.545968 -1.371753
vn 0.691301 -0.356282 -0.628622
v 30.000000 0.826957 -1.559708
vn 0.694995 -0.270664 -0.666125
v 30.000000 1.176930 -1.725331
vn 0.698044 -0.203539 -0.686517
v 30.000000 1.596587 -1.869108
vn 0.700356 -0.152084 -0.697404
v 30.000000 2.085082 -1.991888
vn 0.701992 -0.112928 -0.703175
v 30.000000 2.640446 -2.094794
vn 0.703171 -0.082985 -0.706162
v 30.000000 3.259840 -2.179157
vn 0.704044 -0.059956 -0.707621
v 30.000000 3.939609 -2.246477
vn 0.704709 -0.042185 -0.708242
v 30.000000 4.675291 -2.298395
vn 0.704738 -0.026724 -0.708964
v 30.000000 5.461628 -2.336658
vn 0.705194 -0.011882 -0.708914
v 30.000000 7.005451 -2.377252
vn 0.705759 -0.000373 -0.708452
v 30.000000 8.656768 -2.389464
vn 0.706529 0.007155 -0.707648
v 30.000000 12.110321 -2.366466
vn 0.706807 0.010251 -0.707332
v 30.000000 13.831272 -2.344055
vn 0.706546 0.012824 -0.707551
v 30.000000 15.501485 -2.317515
vn 0.706145 0.016458 -0.707876
v 30.000000 17.092804 -2.285722
vn 0.705802 0.021978 -0.708068
v 30.000000 18.582697 -2.246343
vn 0.705665 0.029135 -0.707947
v 30.000000 19.954018 -2.197438
vn 0.705872 0.036986 -0.707373
v 30.000000 21.194740 -2.139177
vn 0.706538 0.042798 -0.706380
v 30.000000 22.297642 -2.075361
vn 0.708213 0.040913 -0.704812
v 30.000000 23.259989 -2.014541
vn 0.712261 0.020111 -0.701627
v 30.000000 24.083271 -1.971274
vn 0.721255 -0.041400 -0.691431
v 30.000000 24.773029 -1.968328
vn 0.727997 -0.147994 -0.669416
v 30.000000 25.338749 -2.039175
vn 0.727295 -0.265412 -0.632928
v 30.000000 25.579050 -2.115934
vn 0.730457 -0.377832 -0.568925
v 30.000000 25.793484 -2.228932
vn 0.730195 -0.484219 -0.482024
v 30.000000 25.984135 -2.385484
vn 0.726759 -0.567959 -0.386321
v 30.000000 26.153200 -2.593288
vn 0.722164 -0.624171 -0.298145
v 30.000000 26.302895 -2.859857
vn 0.718037 -0.658143 -0.226430
v 30.000000 26.435345 -3.191852
vn 0.714912 -0.677732 -0.171989
v 30.000000 26.552481 -3.594429
vn 0.712721 -0.688965 -0.131741
v 30.000000 26.655941 -4.070642
vn 0.711229 -0.695527 -0.101963
v 30.000000 26.747044 -4.621290
vn 0.710215 -0.699473 -0.079578
v 30.000000 26.826820 -5.245183
vn 0.709522 -0.701920 -0.062345
v 30.000000 26.896059 -5.939472
vn 0.709041 -0.703483 -0.048709
v 30.000000 26.955338 -6.699978
vn 0.708709 -0.704499 -0.037591
v 30.000000 27.005081 -7.521472
vn 0.709056 -0.704661 -0.026306
v 30.000000 27.045547 -8.397826
vn 0.709301 -0.704790 -0.012758
v 30.000000 27.099003 -10.286575
vn 0.709221 -0.704985 0.001356
v 30.000000 27.114952 -12.301959
vn 0.709462 -0.704562 0.016012
v 30.000000 27.090588 -14.370722
vn 0.710032 -0.703383 0.033272
v 30.000000 27.021626 -16.415909
vn 0.709985 -0.702285 0.052129
v 30.000000 26.902948 -18.362595
vn 0.709549 -0.701297 0.068727
v 30.000000 26.823242 -19.277197
vn 0.710035 -0.698966 0.085421
v 30.000000 26.729000 -20.142057
vn 0.710709 -0.695506 0.105657
v 30.000000 26.619505 -20.950077
vn 0.711602 -0.690350 0.130539
v 30.000000 26.494045 -21.695021
vn 0.712789 -0.682551 0.161420
v 29.999994 26.351965 -22.371765
vn 0.714354 -0.670619 0.199919
v 29.999994 26.192678 -22.976526
vn 0.716389 -0.652230 0.247756
v 29.999994 26.015711 -23.507105
vn 0.718938 -0.623952 0.306286
v 29.999994 25.820709 -23.963049
vn 0.721892 -0.581301 0.375447
v 29.999994 25.607433 -24.345648
vn 0.724787 -0.519893 0.452100
v 29.999994 25.375742 -24.657856
vn 0.726525 -0.438737 0.528840
v 29.999994 25.125559 -24.904205
vn 0.726586 -0.342011 0.595903
v 29.999994 24.856855 -25.090715
vn 0.724618 -0.242932 0.644914
v 29.999994 24.569620 -25.224630
vn 0.726162 -0.136942 0.673747
v 29.999994 24.263802 -25.313948
vn 0.721207 -0.034988 0.691835
v 29.999994 23.595713 -25.391237
vn 0.712462 0.026419 0.701213
v 29.999994 22.849535 -25.381523
vn 0.707286 0.044027 0.705555
v 29.999994 22.019657 -25.330265
vn 0.704111 0.034582 0.709247
v 29.999994 21.099363 -25.272505
vn 0.702383 0.009211 0.711739
v 29.999994 20.082497 -25.237167
vn 0.701893 -0.023824 0.711884
v 29.999994 18.964546 -25.247244
vn 0.702223 -0.058216 0.709573
v 29.999994 17.743507 -25.318377
vn 0.702881 -0.090128 0.705574
v 29.999994 16.420792 -25.458849
vn 0.703463 -0.117982 0.700871
v 29.999994 15.002238 -25.670876
vn 0.703967 -0.140795 0.696137
v 29.999994 13.499299 -25.951517
vn 0.704700 -0.158661 0.691537
v 29.999994 11.930531 -26.292789
vn 0.704931 -0.174666 0.687433
v 29.999994 10.322615 -26.681793
vn 0.705786 -0.188115 0.682992
v 29.999994 7.131649 -27.533689
vn 0.706524 -0.194439 0.680453
v 29.999994 5.631601 -27.957825
vn 0.706746 -0.197222 0.679422
v 29.999994 4.252389 -28.355757
vn 0.706882 -0.199464 0.678625
v 29.999994 3.033163 -28.712212
vn 0.706824 -0.201331 0.678134
v 29.999994 2.493956 -28.871456
vn 0.706602 -0.203921 0.677592
v 29.999994 2.006085 -29.017036
vn 0.706224 -0.208355 0.676636
v 29.999994 1.571959 -29.148796
vn 0.705556 -0.216106 0.674900
v 29.999994 1.193171 -29.267128
vn 0.704318 -0.229762 0.671673
v 29.999994 0.870415 -29.373018
vn 0.702122 -0.253748 0.665309
v 29.999994 0.603395 -29.468096
vn 0.698218 -0.296414 0.651637
v 29.999994 0.390730 -29.554689
vn 0.691008 -0.372336 0.619576
v 29.999994 0.229812 -29.635908
vn 0.687621 -0.469678 0.553697
v 29.999994 0.116645 -29.715757
vn 0.690198 -0.552953 0.466765
v 29.999994 0.076302 -29.756683
vn 0.688369 -0.622883 0.371704
v 29.999994 0.045688 -29.799221
vn 0.688789 -0.676214 0.261352
v 29.999994 0.023845 -29.844166
vn 0.685712 -0.715205 0.135208
v 29.999994 0.009712 -29.892368
vn 0.689599 -0.724172 0.005317
v 29.999994 0.000003 -30.002150
vn 0.709749 -0.703016 -0.045004
v 29.999994 0.022069 -30.299870
vn 0.715484 -0.698521 -0.012337
v 29.999994 0.045652 -30.739035
vn 0.712396 -0.701126 0.030239
v 29.999994 0.034202 -31.337605
vn 0.708653 -0.703612 0.052353
v 29.999994 -0.015867 -32.083118
vn 0.705004 -0.707398 0.050567
v 29.999994 -0.085704 -32.942577
vn 0.700794 -0.712978 0.023454
v 29.999994 -0.142915 -33.870865
vn 0.698024 -0.715599 -0.026091
v 29.999994 -0.147117 -34.816341
vn 0.696647 -0.711856 -0.089130
v 29.999994 -0.100106 -35.422859
vn 0.692854 -0.701027 -0.168864
v 29.999994 0.000000 -36.000000
vn 0.688962 -0.671926 -0.271748
v 29.999994 0.179744 -36.578987
vn 0.686635 -0.614197 -0.388967
v 29.999994 0.442257 -37.097832
vn 0.686004 -0.526289 -0.502413
v 29.999994 0.793139 -37.548550
vn 0.687263 -0.417675 -0.594320
v 29.999994 1.234926 -37.926460
vn 0.689926 -0.305627 -0.656196
v 29.999994 1.767458 -38.229805
vn 0.693166 -0.204793 -0.691072
v 29.999994 2.388186 -38.459412
vn 0.696277 -0.121806 -0.707362
v 29.999994 3.092455 -38.618412
vn 0.698911 -0.057120 -0.712924
v 29.999994 3.873777 -38.711926
vn 0.701025 -0.008369 -0.713087
v 29.999994 4.724135 -38.746750
vn 0.702703 0.027365 -0.710957
v 29.999994 5.634303 -38.731022
vn 0.703793 0.052907 -0.708432
v 29.999994 6.594163 -38.673870
vn 0.704391 0.070955 -0.706257
v 29.999994 7.593030 -38.585091
vn 0.710364 0.068430 -0.700500
v 29.999994 11.761100 -38.115604
vn 0.712282 0.040378 -0.700731
v 29.999994 14.082951 -37.926216
vn 0.711800 0.011939 -0.702281
v 29.999994 15.193675 -37.886158
vn 0.712574 -0.019631 -0.701322
v 29.999994 16.261566 -37.890358
vn 0.713083 -0.052779 -0.699090
v 29.999994 17.282484 -37.941776
vn 0.713114 -0.090733 -0.695152
v 29.999994 18.254372 -38.041256
vn 0.713369 -0.129870 -0.688650
v 29.999994 19.176991 -38.187836
vn 0.713398 -0.169307 -0.679999
v 29.999994 20.051655 -38.379009
vn 0.713173 -0.207424 -0.669596
v 29.999994 20.880999 -38.611008
vn 0.714731 -0.248188 -0.653883
v 29.999994 21.668764 -38.878994
vn 0.714104 -0.294626 -0.635020
v 29.999994 23.138731 -39.499779
vn 0.709969 -0.326903 -0.623761
v 29.999994 24.505194 -40.190739
vn 0.705807 -0.333851 -0.624805
v 29.999994 25.815012 -40.900002
vn 0.702603 -0.318582 -0.636281
v 29.999994 27.110714 -41.582569
vn 0.700836 -0.287153 -0.652972
v 29.999994 28.424744 -42.206398
vn 0.700603 -0.248997 -0.668697
v 29.999994 29.776484 -42.755276
vn 0.701115 -0.210977 -0.681122
v 29.999994 31.171547 -43.229073
vn 0.708463 -0.193441 -0.678720
v 29.999994 34.041954 -44.031784
vn 0.715601 -0.219915 -0.662988
v 29.999994 35.454117 -44.442280
vn 0.707047 0.641076 -0.298507
v 29.999994 49.882423 -41.139351
vn 0.706865 0.641647 -0.297710
v 29.999994 50.674103 -39.437923
vn 0.706714 0.642640 -0.295920
v 29.999994 51.413586 -37.839527
vn 0.706583 0.643994 -0.293278
v 29.999994 52.100494 -36.340706
vn 0.706464 0.645664 -0.289872
v 29.999994 52.736290 -34.935638
vn 0.706345 0.647636 -0.285734
v 29.999994 53.323872 -33.616405
vn 0.706211 0.649930 -0.280814
v 29.999994 53.867146 -32.373188
vn 0.706046 0.652602 -0.274972
v 29.999994 54.370609 -31.194521
vn 0.705836 0.655736 -0.267968
v 29.999994 54.838947 -30.067495
vn 0.704646 0.661394 -0.256965
v 29.999994 55.276649 -28.978027
vn 0.702547 0.671604 -0.235320
v 29.999994 56.074310 -26.850750
vn 0.702667 0.681087 -0.205862
v 29.999994 56.779934 -24.685108
vn 0.703666 0.687286 -0.180255
v 29.999994 57.097904 -23.547651
vn 0.703519 0.692988 -0.157572
v 29.999994 57.390564 -22.353683
vn 0.703501 0.697919 -0.134144
v 30.000000 57.655663 -21.089922
vn 0.703597 0.701904 -0.110822
v 30.000000 57.890682 -19.745119
vn 0.703779 0.704906 -0.088332
v 30.000000 58.093124 -18.310602
vn 0.704020 0.706992 -0.067220
v 30.000000 58.260780 -16.780792
vn 0.704295 0.708293 -0.047854
v 30.000000 58.392002 -15.153718
vn 0.704592 0.708959 -0.030459
v 30.000000 58.485950 -13.431509
vn 0.704902 0.709143 -0.015169
v 30.000000 58.542877 -11.620973
vn 0.705221 0.708984 -0.002047
v 30.000000 58.564388 -9.734053
vn 0.705529 0.708624 0.008980
v 30.000000 58.553474 -7.787474
vn 0.705533 0.708419 0.019118
v 30.000000 58.514431 -5.801847
vn 0.706208 0.707440 0.028275
v 30.000000 58.374775 -1.808913
vn 0.706907 0.706581 0.032036
v 30.000000 58.287941 0.147514
vn 0.707330 0.706162 0.031926
v 30.000000 58.200256 2.043410
vn 0.707855 0.705776 0.028640
v 30.000000 58.120346 3.855065
vn 0.707822 -0.329980 0.624581
v 30.000000 34.253681 14.585945
vn 0.707057 -0.328471 0.626241
v 30.000000 33.258583 14.064274
vn 0.706293 -0.331271 0.625628
v 30.000000 32.321003 13.572205
vn 0.705510 -0.338351 0.622715
v 30.000000 31.452284 13.108158
vn 0.704803 -0.349493 0.617338
v 30.000000 30.660385 12.670568
vn 0.704335 -0.363831 0.609541
v 30.000000 29.948956 12.257962
vn 0.704396 -0.378780 0.600293
v 30.000000 29.315687 11.869099
vn 0.705565 -0.389406 0.592065
v 30.000000 28.750889 11.503089
vn 0.713375 -0.372470 0.593601
v 30.000000 27.754021 10.837770
vn 0.717584 -0.324518 0.616247
v 30.000000 26.772318 10.260327
vn 0.714901 -0.274553 0.643068
v 30.000000 26.225433 10.004089
vn 0.714144 -0.231684 0.660546
v 30.000000 25.616301 9.768682
vn 0.713081 -0.193223 0.673929
v 30.000000 24.933372 9.553002
vn -0.705101 -0.027728 -0.708565
v 25.000000 5.461628 -2.336658
vn -0.704877 -0.041130 -0.708136
v 25.000000 4.718313 -2.300915
vn -0.704373 -0.057008 -0.707537
v 25.000000 4.037603 -2.254529
vn -0.703769 -0.076657 -0.706281
v 25.000000 3.416399 -2.196687
vn -0.703014 -0.100916 -0.703980
v 25.000000 2.852480 -2.126693
vn -0.702061 -0.130792 -0.700003
v 25.000000 2.349583 -2.044834
vn -0.700817 -0.167538 -0.693388
v 25.000000 1.905932 -1.951152
vn -0.699176 -0.212661 -0.682589
v 25.000000 1.518290 -1.845551
vn -0.697362 -0.266923 -0.665160
v 25.000000 1.183523 -1.727974
vn -0.695595 -0.330418 -0.637944
v 25.000000 0.904406 -1.601463
vn -0.693971 -0.401203 -0.597863
v 25.000000 0.678029 -1.468641
vn -0.691937 -0.476509 -0.542368
v 25.000000 0.496002 -1.329845
vn -0.690026 -0.551917 -0.468244
v 25.000000 0.344149 -1.176694
vn -0.689503 -0.619497 -0.375246
v 25.000000 0.220150 -1.005039
vn -0.684772 -0.682398 -0.255774
v 25.000000 0.126897 -0.818386
vn -0.686643 -0.718147 -0.113076
v 25.000000 0.026005 -0.427998
vn -0.696564 -0.717452 -0.007856
v 25.000000 0.001774 -0.026085
vn -0.706788 -0.706820 0.029251
v 25.000000 0.018798 0.418681
vn -0.713140 -0.700880 0.014108
v 25.000000 0.042942 0.960524
vn -0.714024 -0.699730 -0.023407
v 25.000000 0.040070 1.653669
vn -0.706617 -0.706220 -0.044104
v 25.000000 -0.013193 2.483971
vn -0.693644 -0.720314 0.002427
v 25.000000 -0.058650 3.244395
vn -0.688179 -0.716688 0.112998
v 25.000000 -0.011616 3.938171
vn -0.690688 -0.687398 0.224577
v 25.000000 0.068379 4.261773
vn -0.690026 -0.645283 0.327833
v 25.000000 0.195541 4.570431
vn -0.690299 -0.586035 0.424322
v 25.000000 0.375697 4.864818
vn -0.691756 -0.516228 0.504958
v 25.000000 0.614279 5.145807
vn -0.693996 -0.444523 0.566364
v 25.000000 0.917077 5.414547
vn -0.696253 -0.377858 0.610292
v 25.000000 1.288772 5.671477
vn -0.698336 -0.319546 0.640482
v 25.000000 1.733461 5.916631
vn -0.700093 -0.270694 0.660753
v 25.000000 2.246742 6.146926
vn -0.701441 -0.230607 0.674389
v 25.000000 2.820940 6.360440
vn -0.702485 -0.197802 0.683658
v 25.000000 3.454226 6.558497
vn -0.703190 -0.170891 0.690160
v 25.000000 4.144774 6.742424
vn -0.703905 -0.148990 0.694492
v 25.000000 4.890759 6.913545
vn -0.704501 -0.130939 0.697519
v 25.000000 5.690354 7.073185
vn -0.704228 -0.113666 0.700816
v 25.000000 6.538578 7.222241
vn -0.704269 -0.095202 0.703521
v 25.000000 8.329456 7.488325
vn -0.704736 -0.078669 0.705095
v 25.000000 10.212727 7.718843
vn -0.705995 -0.067320 0.705010
v 25.000000 14.110267 8.109507
vn -0.707067 -0.065703 0.704087
v 25.000000 16.051449 8.287814
vn -0.707854 -0.070264 0.702856
v 25.000000 17.938850 8.466883
vn -0.708870 -0.080425 0.700740
v 25.000000 19.735929 8.655793
vn -0.710320 -0.097919 0.697035
v 25.000000 21.406141 8.863626
vn -0.710703 -0.120495 0.693096
v 25.000000 22.911793 9.099218
vn -0.710113 -0.141201 0.689785
v 25.000000 23.574486 9.225678
vn -0.711566 -0.163571 0.683315
v 25.000000 24.169331 9.355659
vn -0.714970 -0.001206 -0.699154
v 25.000000 24.438347 -1.962803
vn -0.709934 0.033395 -0.703476
v 25.000000 23.735600 -1.986784
vn -0.707447 0.043930 -0.705400
v 25.000000 22.862825 -2.039587
vn -0.706726 0.044052 -0.706115
v 25.000000 22.310610 -2.074914
vn -0.706571 0.041430 -0.706428
v 25.000000 21.970036 -2.095620
vn -0.706563 0.038505 -0.706602
v 25.000000 21.576748 -2.117908
vn -0.706643 0.034919 -0.706708
v 25.000000 21.123453 -2.141442
vn -0.706734 0.031587 -0.706774
v 25.000000 20.602861 -2.165884
vn -0.706885 0.028043 -0.706772
v 25.000000 20.007687 -2.190897
vn -0.706958 0.024829 -0.706820
v 25.000000 19.330635 -2.216148
vn -0.707035 0.021720 -0.706845
v 25.000000 18.564419 -2.241297
vn -0.707021 0.018839 -0.706942
v 25.000000 17.701746 -2.266008
vn -0.706924 0.016104 -0.707106
v 25.000000 16.735329 -2.289945
vn -0.706854 0.013836 -0.707224
v 25.000000 15.657880 -2.312771
vn -0.706841 0.011504 -0.707279
v 25.000000 14.462104 -2.334149
vn -0.706686 0.008999 -0.707470
v 25.000000 13.140713 -2.353743
vn -0.706305 0.004886 -0.707891
v 25.000000 10.202907 -2.384398
vn -0.706241 -0.000078 -0.707971
v 25.000000 8.843874 -2.389395
vn -0.705942 -0.006731 -0.708238
v 25.000000 7.604637 -2.384336
vn -0.705448 -0.016122 -0.708578
v 25.000000 6.479214 -2.367373
vn 0.708457 0.705605 0.014496
v 22.000000 58.030144 6.583652
vn 0.708866 0.705328 0.004692
v 22.000000 58.016083 7.549005
vn 0.709418 0.704737 -0.008452
v 22.000000 58.017403 8.457397
vn 0.710128 0.703624 -0.025116
v 22.000000 58.036415 9.308758
vn 0.710846 0.701823 -0.046292
v 22.000000 58.075428 10.103023
vn 0.711776 0.698611 -0.072920
v 22.000000 58.136585 10.838724
vn 0.712747 0.693612 -0.104378
v 22.000000 58.220638 11.509662
vn 0.713511 0.686262 -0.141235
v 22.000000 58.328510 12.122121
vn 0.714241 0.675657 -0.182614
v 22.000000 58.461266 12.684445
vn 0.718569 0.654014 -0.236483
v 22.000000 58.619976 13.204982
vn 0.721778 0.618841 -0.309956
v 22.000000 59.020092 14.155158
vn 0.718077 0.581888 -0.381800
v 22.000000 59.542934 15.046820
vn 0.712278 0.556334 -0.427962
v 22.000000 60.203640 15.950969
vn 0.709233 0.544069 -0.448305
v 22.000000 60.590347 16.429655
vn 0.708288 0.538231 -0.456766
v 22.000000 61.017105 16.937782
vn 0.707459 0.535627 -0.461091
v 22.000000 61.485779 17.484121
vn 0.706980 0.535483 -0.461993
v 22.000000 61.998230 18.077444
vn 0.706895 0.536176 -0.461319
v 22.000000 62.271454 18.394464
vn 0.706843 0.537040 -0.460392
v 22.000000 62.418404 18.565527
vn 0.706741 0.538376 -0.458986
v 22.000000 62.612896 18.792904
vn 0.706632 0.540197 -0.457010
v 22.000000 62.857403 19.080679
vn 0.706586 0.542190 -0.454715
v 22.000000 63.146530 19.423840
vn 0.706598 0.544514 -0.451911
v 22.000000 63.474903 19.817362
vn 0.706057 0.548359 -0.448092
v 22.000000 63.837128 20.256235
vn 0.705471 0.554069 -0.441949
v 22.000000 64.641594 21.249949
vn 0.704107 0.565018 -0.430102
v 22.000006 65.516876 22.364841
vn 0.702518 0.582851 -0.408353
v 22.000006 67.305000 24.793970
vn 0.702843 0.599554 -0.382813
v 22.000006 68.089050 25.968634
vn 0.702702 0.614551 -0.358521
v 22.000006 68.417496 26.506306
vn 0.700588 0.633596 -0.328227
v 22.000006 68.691261 26.998262
vn 0.697358 0.659098 -0.281570
v 22.000006 68.900116 27.432602
vn 0.692869 0.690186 -0.208748
v 22.000006 69.033440 27.787722
vn 0.686579 0.720647 -0.096320
v 22.000006 69.099411 28.071886
vn 0.685867 0.726375 0.044332
v 22.000006 69.108032 28.301544
vn 0.690108 0.703516 0.169755
v 22.000006 69.091133 28.406912
vn 0.688717 0.668172 0.281452
v 22.000006 69.061646 28.497568
vn 0.688285 0.611123 0.390887
v 22.000006 69.014458 28.587576
vn 0.683575 0.527020 0.504950
v 22.000006 68.952744 28.667894
vn 0.684734 0.407365 0.604312
v 22.000006 68.792618 28.802044
vn 0.693261 0.297351 0.656484
v 22.000006 68.580025 28.915184
vn 0.699342 0.229009 0.677108
v 22.000006 68.314392 29.015676
vn 0.703415 0.192308 0.684270
v 22.000006 67.996368 29.110828
vn 0.706173 0.177565 0.685412
v 22.000006 67.626106 29.208334
vn 0.709278 0.180774 0.681356
v 22.000006 67.203766 29.315901
vn 0.712511 0.203996 0.671353
v 22.000006 66.236046 29.583530
vn 0.712354 0.237668 0.660352
v 22.000006 65.289825 29.897623
vn 0.712116 0.269193 0.648402
v 22.000006 64.400627 30.242781
vn 0.711781 0.297317 0.636373
v 22.000006 63.571789 30.609079
vn 0.711417 0.323221 0.624031
v 22.000006 62.806625 30.986589
vn 0.711336 0.346349 0.611591
v 22.000006 62.108482 31.365385
vn 0.711040 0.366810 0.599894
v 22.000006 61.480694 31.735537
vn 0.710592 0.385832 0.588381
v 22.000006 60.926598 32.087124
vn 0.710274 0.402529 0.577478
v 22.000006 60.449524 32.410206
vn 0.709874 0.417497 0.567252
v 22.000006 60.052811 32.694862
vn 0.709573 0.430672 0.557699
v 22.000006 59.739796 32.931164
vn 0.711106 0.445631 0.543821
v 22.000006 59.501995 33.119011
vn 0.712491 0.467369 0.523376
v 22.000006 59.133373 33.432388
vn 0.708487 0.486198 0.511525
v 22.000006 58.852356 33.695976
vn 0.701540 0.480985 0.525828
v 22.000006 58.315414 34.212601
vn 0.705184 0.460685 0.538966
v 22.000006 57.954933 34.526112
vn 0.707796 0.456073 0.539466
v 22.000006 57.516834 34.894142
vn 0.709303 0.462160 0.532256
v 22.000006 57.002266 35.331993
vn 0.711507 0.476671 0.516277
v 22.000006 56.422447 35.845860
vn 0.714114 0.501906 0.487988
v 22.000006 55.341324 36.887173
vn 0.715337 0.533851 0.450883
v 22.000006 54.403522 37.916649
vn 0.716136 0.566549 0.407641
v 22.000006 53.593170 38.951950
vn 0.716149 0.597530 0.360678
v 22.000006 52.894394 40.010735
vn 0.714496 0.623753 0.316903
v 22.000006 52.308357 41.076035
vn 0.710827 0.642073 0.287173
v 22.000006 51.852455 42.051373
vn 0.705253 0.649551 0.284081
v 22.000006 51.481533 42.919003
vn 0.696834 0.640267 0.323235
v 22.000006 51.146847 43.667000
vn 0.691325 0.601718 0.400007
v 22.000006 50.794754 44.289783
vn 0.691455 0.538803 0.481228
v 22.000006 50.593330 44.550331
vn 0.688029 0.457141 0.563594
v 22.000006 50.364811 44.771942
vn 0.685736 0.344399 0.641214
v 22.000006 50.100540 44.949936
vn 0.685279 0.208757 0.697720
v 22.000006 49.784283 45.081013
vn 0.687834 0.071884 0.722300
v 22.000006 49.394016 45.155510
vn 0.687965 -0.056926 0.723508
v 22.000006 48.962719 45.159988
vn 0.691411 -0.174263 0.701129
v 22.000006 48.163219 45.023655
vn 0.696068 -0.259403 0.669477
v 22.000006 47.394871 44.771000
vn 0.697962 -0.321004 0.640161
v 22.000006 46.603378 44.416061
vn 0.698375 -0.373721 0.610414
v 22.000006 45.817631 43.978592
vn 0.697408 -0.426154 0.576207
v 22.000006 45.022881 43.445549
vn 0.695479 -0.483648 0.531406
v 22.000006 44.241520 42.809826
vn 0.692646 -0.548251 0.468682
v 22.000006 43.521542 42.077209
vn 0.693795 -0.604781 0.391009
v 22.000006 42.910152 41.251762
vn 0.696603 -0.641467 0.321348
v 22.000006 42.657898 40.802849
vn 0.696107 -0.669866 0.258292
v 22.000006 42.447693 40.328823
vn 0.695888 -0.692677 0.189577
v 22.000006 42.287102 39.840656
vn 0.691639 -0.714250 0.107155
v 22.000006 42.176956 39.343815
vn 0.690002 -0.723802 -0.002667
v 22.000006 42.097694 38.318146
vn 0.694155 -0.712397 -0.103154
v 22.000006 42.187294 37.251759
vn 0.698431 -0.694695 -0.172027
v 22.000006 42.414307 36.150436
vn 0.702251 -0.679261 -0.213185
v 22.000006 42.744877 35.009968
vn 0.706036 -0.669839 -0.229844
v 22.000006 43.145161 33.826126
vn 0.711826 -0.668398 -0.215750
v 22.000006 44.016228 31.318041
vn 0.714160 -0.676657 -0.179196
v 22.000006 44.409451 30.003216
vn 0.717111 -0.685480 -0.125975
v 22.000006 44.720291 28.657116
vn 0.720166 -0.691738 -0.053478
v 22.000006 44.907887 27.288752
vn 0.718706 -0.694881 0.024544
v 22.000006 44.932152 25.918089
vn 0.715662 -0.692863 0.088144
v 22.000006 44.871979 25.233606
vn 0.716057 -0.683262 0.142884
v 22.000006 44.757854 24.550158
vn 0.716147 -0.669205 0.198239
v 22.000006 44.589249 23.881119
vn 0.716009 -0.650945 0.252194
v 22.000006 44.368275 23.234116
vn 0.715720 -0.629066 0.303350
v 22.000006 44.095997 22.607853
vn 0.715304 -0.604397 0.350777
v 22.000006 43.773483 22.001034
vn 0.714769 -0.577941 0.393813
v 22.000000 43.401810 21.412380
vn 0.717306 -0.541946 0.437912
v 22.000000 42.983273 20.842007
vn 0.718703 -0.493875 0.489442
v 22.000000 42.014183 19.753166
vn 0.716396 -0.446588 0.536037
v 22.000000 40.879742 18.723322
vn 0.714303 -0.406415 0.569734
v 22.000000 39.595478 17.742210
vn 0.712392 -0.374592 0.593446
v 22.000000 38.210049 16.820423
vn 0.710715 -0.351230 0.609526
v 22.000000 36.764164 15.955346
vn -0.712743 -0.360499 0.601696
v 8.000003 37.369370 16.307798
vn -0.713710 -0.395694 0.577965
v 8.000003 39.392490 17.599031
vn -0.713149 -0.429301 0.554183
v 8.000003 40.343590 18.294382
vn -0.714861 -0.461681 0.525190
v 8.000003 41.237865 19.027708
vn -0.716342 -0.497383 0.489352
v 8.000003 42.062946 19.802624
vn -0.717538 -0.537780 0.442643
v 8.000003 42.808376 20.625196
vn -0.719278 -0.579352 0.383393
v 8.000003 43.466557 21.508333
vn -0.720778 -0.618967 0.312025
v 8.000003 44.022545 22.459246
vn -0.721485 -0.653286 0.229515
v 8.000003 44.460545 23.483604
vn -0.721320 -0.677869 0.142098
v 8.000003 44.758064 24.551161
vn -0.720253 -0.691412 0.056437
v 8.000003 44.914463 25.647261
vn -0.718567 -0.695097 -0.022389
v 8.000003 44.938042 26.773104
vn -0.716626 -0.691657 -0.089764
v 8.000003 44.841690 27.903152
vn -0.714758 -0.684423 -0.143829
v 8.000003 44.646168 29.025244
vn -0.712864 -0.676536 -0.184728
v 8.000003 44.373032 30.135872
vn -0.711592 -0.668651 -0.215738
v 8.000003 44.044704 31.228455
vn -0.706209 -0.669793 -0.229449
v 8.000006 43.315678 33.335930
vn -0.700699 -0.682311 -0.208499
v 8.000006 42.637295 35.343121
vn -0.699076 -0.696040 -0.163771
v 8.000006 42.372211 36.310604
vn -0.695764 -0.710888 -0.102715
v 8.000006 42.185928 37.254818
vn -0.692362 -0.721313 -0.018507
v 8.000006 42.100494 38.176353
vn -0.689658 -0.718800 0.087736
v 8.000006 42.137608 39.072220
vn -0.688442 -0.695372 0.206169
v 8.000006 42.314651 39.937515
vn -0.689290 -0.649969 0.320030
v 8.000006 42.644630 40.776260
vn -0.691795 -0.591319 0.414442
v 8.000006 43.110935 41.553787
vn -0.694510 -0.530838 0.485661
v 8.000006 43.685726 42.261074
vn -0.696765 -0.474501 0.537929
v 8.000006 44.343044 42.899956
vn -0.698286 -0.423405 0.577169
v 8.000006 45.051559 43.466389
vn -0.699049 -0.375840 0.608338
v 8.000006 45.778252 43.954422
vn -0.698755 -0.328113 0.635676
v 8.000006 46.497486 44.361965
vn -0.697131 -0.273856 0.662579
v 8.000006 47.224743 44.702282
vn -0.693940 -0.203759 0.690601
v 8.000006 47.943161 44.961735
vn -0.692779 -0.116702 0.711645
v 8.000006 48.653233 45.126076
vn -0.692552 -0.022132 0.721028
v 8.000006 49.044903 45.164326
vn -0.688412 0.088927 0.719848
v 8.000006 49.448666 45.149700
vn -0.686094 0.220711 0.693225
v 8.000006 49.813156 45.072025
vn -0.686432 0.350548 0.637124
v 8.000006 50.114330 44.942383
vn -0.688576 0.459020 0.561395
v 8.000006 50.369591 44.767982
vn -0.686502 0.554675 0.470160
v 8.000006 50.592457 44.551319
vn -0.691410 0.622926 0.365944
v 8.000006 50.970547 44.005337
vn -0.701460 0.647628 0.297545
v 8.000006 51.304012 43.327801
vn -0.708209 0.647669 0.281006
v 8.000006 51.647846 42.524803
vn -0.712607 0.634755 0.298794
v 8.000006 52.052837 41.604774
vn -0.715230 0.612168 0.337189
v 8.000006 52.569019 40.577778
vn -0.716023 0.582657 0.384476
v 8.000006 53.232128 39.475487
vn -0.715510 0.550137 0.430574
v 8.000006 53.996887 38.415291
vn -0.714511 0.517839 0.470443
v 8.000006 54.858395 37.398743
vn -0.713322 0.488047 0.502974
v 8.000006 55.821838 36.408554
vn -0.710758 0.466485 0.526512
v 8.000006 56.889332 35.430126
vn -0.708202 0.456587 0.538496
v 8.000006 57.391529 34.999870
vn -0.706121 0.457531 0.540424
v 8.000006 57.824875 34.636189
vn -0.703099 0.471588 0.532218
v 8.000006 58.193501 34.321301
vn -0.704187 0.487279 0.516410
v 8.000006 58.500607 34.038975
vn -0.713871 0.470972 0.518241
v 8.000006 59.076809 33.481823
vn -0.711534 0.445544 0.543332
v 8.000006 59.494278 33.125191
vn -0.710207 0.427701 0.559177
v 8.000006 59.787537 32.894325
vn -0.710712 0.410326 0.571420
v 8.000003 60.215969 32.576031
vn -0.711236 0.390068 0.584799
v 8.000003 60.778362 32.185799
vn -0.711863 0.366691 0.598991
v 8.000003 61.467091 31.744354
vn -0.712454 0.340121 0.613781
v 8.000003 62.274513 31.272421
vn -0.712846 0.309893 0.629140
v 8.000003 63.193005 30.790728
vn -0.713241 0.275017 0.644712
v 8.000003 64.214920 30.320002
vn -0.713888 0.235013 0.659646
v 8.000003 65.332649 29.880966
vn -0.712192 0.198224 0.673417
v 8.000003 66.538551 29.494350
vn -0.708602 0.178890 0.682555
v 8.000003 67.132332 29.334404
vn -0.705743 0.179179 0.685435
v 8.000003 67.650749 29.202248
vn -0.701276 0.202654 0.683479
v 8.000003 68.093460 29.083607
vn -0.693795 0.264386 0.669887
v 8.000003 68.460312 28.964119
vn -0.688905 0.360513 0.628840
v 8.000003 68.748901 28.829308
vn -0.689857 0.456323 0.562020
v 8.000003 68.869667 28.746050
vn -0.686009 0.550361 0.475914
v 8.000003 68.958298 28.661701
vn -0.682754 0.640283 0.351973
v 8.000003 69.034424 28.554081
vn -0.684882 0.698881 0.206161
v 8.000003 69.084457 28.432217
vn -0.687548 0.722907 0.068435
v 8.000003 69.107277 28.309809
vn -0.684377 0.725371 -0.073932
v 8.000003 69.107964 28.149311
vn -0.692013 0.694068 -0.198463
v 8.000003 69.027786 27.769596
vn -0.699654 0.662994 -0.266315
v 8.000003 68.940834 27.529970
vn -0.699349 0.643162 -0.311855
v 8.000003 68.818100 27.252062
vn -0.700689 0.620064 -0.352924
v 8.000003 68.478783 26.611942
vn -0.703073 0.600112 -0.381514
v 8.000003 68.036140 25.885662
vn -0.704333 0.586720 -0.399593
v 8.000003 67.512863 25.095694
vn -0.704177 0.576129 -0.414981
v 8.000003 66.934822 24.269615
vn -0.704471 0.563962 -0.430891
v 8.000003 65.708122 22.617420
vn -0.705621 0.553439 -0.442497
v 8.000003 64.472801 21.046089
vn -0.706395 0.547714 -0.448348
v 8.000003 63.883293 20.320036
vn -0.706542 0.543863 -0.452783
v 8.000003 63.329262 19.649342
vn -0.706576 0.540946 -0.456211
v 8.000003 62.823269 19.045725
vn -0.706639 0.538281 -0.459255
v 8.000003 62.377869 18.520899
vn -0.706728 0.536414 -0.461298
v 8.000003 62.005608 18.086578
vn -0.707103 0.535319 -0.461995
v 8.000003 61.719032 17.754480
vn -0.708351 0.536934 -0.458194
v 8.000003 61.276104 17.241295
vn -0.711505 0.546013 -0.442301
v 8.000003 60.491131 16.310625
vn -0.715348 0.567231 -0.408077
v 8.000003 59.831959 15.462778
vn -0.718962 0.598931 -0.352669
v 8.000003 59.289814 14.645960
vn -0.720965 0.634223 -0.279231
v 8.000003 58.855713 13.807927
vn -0.720451 0.664563 -0.198258
v 8.000003 58.519215 12.889819
vn -0.715548 0.686097 -0.131389
v 8.000003 58.273296 11.833052
vn -0.711971 0.696386 -0.090239
v 8.000003 58.182392 11.234719
vn -0.711218 0.700163 -0.062772
v 8.000000 58.112049 10.582330
vn -0.710371 0.702706 -0.039718
v 8.000000 58.061954 9.879703
vn -0.709730 0.704162 -0.020958
v 8.000000 58.030514 9.125810
vn -0.709180 0.705001 -0.006047
v 8.000000 58.016029 8.318842
vn -0.708723 0.705463 0.005853
v 8.000000 58.016792 7.456990
vn -0.708369 0.705683 0.014959
v 8.000000 58.031105 6.538447
vn -0.708536 0.643701 -0.289180
v 9.999994 46.492378 -48.494801
vn -0.708809 0.648093 -0.278505
v 9.999994 45.277416 -51.254734
vn -0.709353 0.653396 -0.264372
v 9.999994 44.130238 -53.994118
vn -0.710183 0.659795 -0.245583
v 9.999991 43.071945 -56.705120
vn -0.711202 0.667094 -0.221760
v 9.999991 42.117199 -59.398155
vn -0.711977 0.675524 -0.191720
v 9.999991 41.286453 -62.074905
vn -0.711612 0.684411 -0.158713
v 9.999991 40.608196 -64.717796
vn -0.711024 0.690619 -0.132247
v 9.999991 40.335152 -66.021049
vn -0.711281 0.694599 -0.107763
v 9.999991 40.110912 -67.309258
vn -0.711791 0.697930 -0.079040
v 9.999991 39.939037 -68.580208
vn -0.712417 0.700058 -0.048792
v 9.999991 39.823090 -69.831711
vn -0.712612 0.701411 -0.014356
v 9.999991 39.766628 -71.061569
vn -0.712940 0.700838 0.023299
v 9.999991 39.773212 -72.267548
vn -0.713322 0.697963 0.063400
v 9.999991 39.843563 -73.416840
vn -0.713675 0.692611 0.104681
v 9.999991 39.972103 -74.485458
vn -0.713969 0.684651 0.146632
v 9.999991 40.152702 -75.477623
vn -0.713887 0.674549 0.188012
v 9.999989 40.379238 -76.397552
vn -0.713789 0.661702 0.229466
v 9.999989 40.645588 -77.249458
vn -0.713635 0.647140 0.268208
v 9.999989 40.945621 -78.037567
vn -0.713388 0.630658 0.305529
v 9.999989 41.273251 -78.766068
vn -0.713043 0.613661 0.339102
v 9.999989 41.622555 -79.438866
vn -0.712368 0.596573 0.369638
v 9.999989 41.987366 -80.059875
vn -0.713454 0.575651 0.399510
v 9.999989 42.361454 -80.633026
vn -0.712493 0.554295 0.430246
v 9.999989 43.112568 -81.651527
vn -0.707406 0.546949 0.447687
v 9.999989 43.825928 -82.525696
vn -0.702354 0.560194 0.439184
v 9.999989 44.443050 -83.278297
vn -0.699855 0.585269 0.409466
v 9.999989 44.694561 -83.613739
vn -0.694979 0.623622 0.357910
v 9.999989 44.896938 -83.924591
vn -0.686539 0.678124 0.262320
v 9.999989 45.043247 -84.216354
vn -0.683056 0.720887 0.117290
v 9.999989 45.123169 -84.501358
vn -0.685239 0.727332 -0.037874
v 9.999989 45.130173 -84.643044
vn -0.682406 0.704760 -0.193997
v 9.999989 45.108395 -84.783737
vn -0.682978 0.642550 -0.347376
v 9.999989 45.055546 -84.914726
vn -0.686391 0.554367 -0.470686
v 9.999989 44.973671 -85.032394
vn -0.690594 0.463279 -0.555385
v 9.999989 44.870239 -85.132889
vn -0.689307 0.371164 -0.622168
v 9.999989 44.751896 -85.217308
vn -0.685328 0.252067 -0.683219
v 9.999989 44.485989 -85.347687
vn -0.688789 0.115929 -0.715633
v 9.999989 43.876877 -85.502777
vn -0.692792 0.002692 -0.721133
v 9.999989 43.130840 -85.556778
vn -0.693875 -0.090388 -0.714401
v 9.999989 42.272392 -85.501991
vn -0.693856 -0.179852 -0.697293
v 9.999989 41.336506 -85.323318
vn -0.696162 -0.259422 -0.669372
v 9.999989 40.357059 -85.002136
vn -0.698532 -0.317326 -0.641371
v 9.999989 39.849380 -84.774986
vn -0.697838 -0.368070 -0.614448
v 9.999989 39.336567 -84.496140
vn -0.697257 -0.419865 -0.580987
v 9.999989 38.826000 -84.160629
vn -0.696927 -0.470961 -0.540822
v 9.999989 38.325138 -83.763565
vn -0.696985 -0.518700 -0.495138
v 9.999989 37.857029 -83.316284
vn -0.697366 -0.561036 -0.446005
v 9.999989 37.434689 -82.830956
vn -0.697903 -0.597226 -0.395288
v 9.999989 37.056770 -82.309410
vn -0.698538 -0.627209 -0.344461
v 9.999989 36.721935 -81.753479
vn -0.699251 -0.651191 -0.294954
v 9.999989 36.429649 -81.166771
vn -0.697088 -0.675839 -0.239395
v 9.999989 36.180866 -80.558723
vn -0.696099 -0.698012 -0.168004
v 9.999989 35.802814 -79.296165
vn -0.698161 -0.709194 -0.098055
v 9.999989 35.564125 -77.997871
vn -0.699687 -0.713284 -0.040783
v 9.999989 35.438755 -76.660416
vn -0.701036 -0.713097 0.006366
v 9.999991 35.409676 -75.275574
vn -0.702043 -0.710686 0.045404
v 9.999991 35.464859 -73.855949
vn -0.702898 -0.707124 0.076874
v 9.999991 35.592270 -72.414154
vn -0.702577 -0.703480 0.107242
v 9.999991 35.780159 -70.961060
vn -0.703192 -0.697125 0.139775
v 9.999991 36.304012 -68.008339
vn -0.704730 -0.690376 0.163514
v 9.999991 36.968098 -65.030716
vn -0.706357 -0.686212 0.173703
v 9.999991 37.702366 -62.084270
vn -0.707812 -0.684735 0.173608
v 9.999991 38.436749 -59.225098
vn -0.708354 -0.685636 0.167744
v 9.999991 38.782093 -57.845760
vn -0.708915 -0.687355 0.158059
v 9.999991 39.101200 -56.509277
vn -0.709610 -0.689798 0.143638
v 9.999991 39.385231 -55.223049
vn -0.710613 -0.692643 0.123591
v 9.999994 39.624916 -53.997478
vn -0.711785 -0.695668 0.096999
v 9.999994 39.812515 -52.836357
vn -0.713124 -0.698311 0.061779
v 9.999994 39.940662 -51.741646
vn -0.715016 -0.698924 0.016036
v 9.999994 40.001991 -50.715317
vn -0.717170 -0.695669 -0.041382
v 9.999994 39.989143 -49.759621
vn -0.719528 -0.685200 -0.113049
v 9.999994 39.896030 -48.885803
vn -0.722087 -0.663097 -0.197214
v 9.999994 39.718765 -48.096043
vn -0.724250 -0.625139 -0.290970
v 9.999994 39.453621 -47.386463
vn -0.725551 -0.568886 -0.387226
v 9.999994 39.094433 -46.750046
vn -0.725429 -0.496809 -0.476376
v 9.999994 38.633705 -46.182087
vn -0.723595 -0.418825 -0.548631
v 9.999994 38.075760 -45.688240
vn -0.720127 -0.3
gitextract_4duiir5e/ ├── .gitignore ├── LICENSE ├── README.md ├── configs/ │ ├── tc4d_stage_1.yaml │ ├── tc4d_stage_2.yaml │ ├── tc4d_stage_3.yaml │ ├── tc4d_stage_3_24_gb.yaml │ ├── tc4d_stage_3_40_gb.yaml │ └── tc4d_stage_3_eval.yaml ├── configs_comp/ │ ├── comp0.yaml │ ├── comp1.yaml │ ├── comp2.yaml │ └── comp3.yaml ├── configs_prompts/ │ ├── a_bear_walking.yaml │ ├── a_camel_walking.yaml │ ├── a_carp_swimming.yaml │ ├── a_cat_walking.yaml │ ├── a_chihuahua_running.yaml │ ├── a_clown_fish_swimming.yaml │ ├── a_corgi_running.yaml │ ├── a_deer_walking.yaml │ ├── a_dog_riding_a_skateboard.yaml │ ├── a_fox_walking.yaml │ ├── a_german_shepherd_running.yaml │ ├── a_giraffe_walking.yaml │ ├── a_girl_is_riding_a_bicycle.yaml │ ├── a_goat_walking.yaml │ ├── a_hippo_walking.yaml │ ├── a_labrador_running.yaml │ ├── a_lion_walking.yaml │ ├── a_pigeon_flying.yaml │ ├── a_rhinoceros_walking.yaml │ ├── a_seagull_flying.yaml │ ├── a_shark_swimming.yaml │ ├── a_sheep_running.yaml │ ├── a_tiger_walking.yaml │ ├── a_turtle_swimming.yaml │ ├── a_unicorn_running.yaml │ ├── a_wolf_running.yaml │ ├── an_astronaut_riding_a_horse.yaml │ ├── an_eagle_flying.yaml │ ├── an_elephant_walking.yaml │ ├── an_octopus_swimming.yaml │ ├── assassin_riding_a_cow.yaml │ ├── batman_riding_a_camel.yaml │ ├── deadpool_riding_a_cow.yaml │ ├── son_goku_riding_an_elephant.yaml │ └── spiderman_riding_a_donkey.yaml ├── configs_prompts_static/ │ ├── a_firepit.yaml │ ├── a_lamppost.yaml │ └── water_spraying_out_of_a_firehydrant.yaml ├── launch.py ├── load/ │ ├── make_prompt_library.py │ ├── prompt_library.json │ ├── shapes/ │ │ ├── README.md │ │ ├── animal.obj │ │ ├── blub.obj │ │ ├── cabin.obj │ │ ├── env_sphere.obj │ │ ├── hand_prismatic.obj │ │ ├── human.obj │ │ ├── nascar.obj │ │ ├── potion.obj │ │ └── teddy.obj │ ├── tets/ │ │ ├── 128_tets.npz │ │ ├── 32_tets.npz │ │ ├── 64_tets.npz │ │ └── generate_tets.py │ └── zero123/ │ ├── download.sh │ └── sd-objaverse-finetune-c_concat-256.yaml ├── requirements-dev.txt ├── requirements.txt ├── threestudio/ │ ├── __init__.py │ ├── data/ │ │ ├── __init__.py │ │ ├── co3d.py │ │ ├── image.py │ │ ├── multiview.py │ │ ├── random_multiview.py │ │ ├── single_multiview_combined.py │ │ └── uncond.py │ ├── models/ │ │ ├── __init__.py │ │ ├── background/ │ │ │ ├── __init__.py │ │ │ ├── base.py │ │ │ ├── neural_environment_map_background.py │ │ │ ├── solid_color_background.py │ │ │ └── textured_background.py │ │ ├── estimators.py │ │ ├── exporters/ │ │ │ ├── __init__.py │ │ │ ├── base.py │ │ │ └── mesh_exporter.py │ │ ├── geometry/ │ │ │ ├── __init__.py │ │ │ ├── base.py │ │ │ ├── implicit_sdf.py │ │ │ ├── implicit_volume.py │ │ │ ├── tetrahedra_sdf_grid.py │ │ │ └── volume_grid.py │ │ ├── guidance/ │ │ │ ├── __init__.py │ │ │ ├── deep_floyd_guidance.py │ │ │ ├── deep_floyd_vsd_guidance.py │ │ │ ├── multiview_diffusion_guidance.py │ │ │ ├── stable_diffusion_guidance.py │ │ │ ├── stable_diffusion_vsd_guidance.py │ │ │ ├── svd_guidance.py │ │ │ ├── video_stable_diffusion_guidance.py │ │ │ ├── video_stable_diffusion_vsd_guidance.py │ │ │ ├── videocrafter/ │ │ │ │ ├── .gitignore │ │ │ │ ├── License │ │ │ │ ├── README.md │ │ │ │ ├── cog.yaml │ │ │ │ ├── configs/ │ │ │ │ │ ├── inference_i2v_512_v1.0.yaml │ │ │ │ │ ├── inference_t2v_1024_v1.0.yaml │ │ │ │ │ ├── inference_t2v_512_v1.0.yaml │ │ │ │ │ └── inference_t2v_512_v2.0.yaml │ │ │ │ ├── gradio_app.py │ │ │ │ ├── lvdm/ │ │ │ │ │ ├── basics.py │ │ │ │ │ ├── common.py │ │ │ │ │ ├── distributions.py │ │ │ │ │ ├── ema.py │ │ │ │ │ ├── models/ │ │ │ │ │ │ ├── autoencoder.py │ │ │ │ │ │ ├── ddpm3d.py │ │ │ │ │ │ ├── samplers/ │ │ │ │ │ │ │ └── ddim.py │ │ │ │ │ │ └── utils_diffusion.py │ │ │ │ │ └── modules/ │ │ │ │ │ ├── attention.py │ │ │ │ │ ├── encoders/ │ │ │ │ │ │ ├── condition.py │ │ │ │ │ │ └── ip_resampler.py │ │ │ │ │ ├── networks/ │ │ │ │ │ │ ├── ae_modules.py │ │ │ │ │ │ └── openaimodel3d.py │ │ │ │ │ └── x_transformer.py │ │ │ │ ├── predict.py │ │ │ │ ├── prompts/ │ │ │ │ │ └── test_prompts.txt │ │ │ │ ├── requirements.txt │ │ │ │ ├── scripts/ │ │ │ │ │ ├── evaluation/ │ │ │ │ │ │ ├── ddp_wrapper.py │ │ │ │ │ │ ├── funcs.py │ │ │ │ │ │ └── inference.py │ │ │ │ │ ├── gradio/ │ │ │ │ │ │ ├── i2v_test.py │ │ │ │ │ │ └── t2v_test.py │ │ │ │ │ ├── run_image2video.sh │ │ │ │ │ └── run_text2video.sh │ │ │ │ └── utils/ │ │ │ │ └── utils.py │ │ │ ├── videocrafter_guidance.py │ │ │ ├── zero123_guidance.py │ │ │ └── zeroscope_guidance.py │ │ ├── isosurface.py │ │ ├── materials/ │ │ │ ├── __init__.py │ │ │ ├── base.py │ │ │ ├── diffuse_with_point_light_material.py │ │ │ ├── neural_radiance_material.py │ │ │ ├── no_material.py │ │ │ └── sd_latent_adapter_material.py │ │ ├── mesh.py │ │ ├── networks.py │ │ ├── prompt_processors/ │ │ │ ├── __init__.py │ │ │ ├── base.py │ │ │ ├── deepfloyd_prompt_processor.py │ │ │ ├── stable_diffusion_prompt_processor.py │ │ │ ├── videocrafter_prompt_processor.py │ │ │ ├── zero123_prompt_processor.py │ │ │ └── zeroscope_diffusion_prompt_processor.py │ │ └── renderers/ │ │ ├── __init__.py │ │ ├── base.py │ │ ├── mask_nerf_renderer.py │ │ ├── mask_nerf_renderer_multi.py │ │ ├── stable_nerf_renderer.py │ │ └── stable_nerf_renderer_multi.py │ ├── systems/ │ │ ├── __init__.py │ │ ├── base.py │ │ ├── optimizers.py │ │ ├── tc4d.py │ │ └── utils.py │ └── utils/ │ ├── __init__.py │ ├── base.py │ ├── bounding_boxes.py │ ├── callbacks.py │ ├── config.py │ ├── config_scene.py │ ├── misc.py │ ├── object_trajectory.py │ ├── ops.py │ ├── rasterize.py │ ├── saving.py │ └── typing.py └── train.sh
SYMBOL INDEX (1184 symbols across 88 files)
FILE: launch.py
class ColoredFilter (line 8) | class ColoredFilter(logging.Filter):
method __init__ (line 31) | def __init__(self):
method filter (line 34) | def filter(self, record):
function main (line 42) | def main() -> None:
FILE: load/make_prompt_library.py
function main (line 422) | def main():
FILE: load/tets/generate_tets.py
function generate_tetrahedron_grid_file (line 22) | def generate_tetrahedron_grid_file(res=32, root=".."):
function convert_from_quartet_to_npz (line 33) | def convert_from_quartet_to_npz(quartetfile="cube_32_tet.tet", npzfile="...
FILE: threestudio/__init__.py
function register (line 4) | def register(name):
function find (line 12) | def find(name):
function warn (line 32) | def warn(*args, **kwargs):
FILE: threestudio/data/co3d.py
function _load_16big_png_depth (line 33) | def _load_16big_png_depth(depth_png) -> np.ndarray:
function _load_depth (line 45) | def _load_depth(path, scale_adjustment) -> np.ndarray:
function _get_1d_bounds (line 55) | def _get_1d_bounds(arr):
function get_bbox_from_mask (line 60) | def get_bbox_from_mask(mask, thr, decrease_quant=0.05):
function get_clamp_bbox (line 75) | def get_clamp_bbox(bbox, box_crop_context=0.0, impath=""):
function crop_around_box (line 100) | def crop_around_box(tensor, bbox, impath=""):
function resize_image (line 107) | def resize_image(image, height, width, mode="bilinear"):
function similarity_from_cameras (line 132) | def similarity_from_cameras(c2w, fix_rot=False, radius=1.0):
class Co3dDataModuleConfig (line 193) | class Co3dDataModuleConfig:
class Co3dDatasetBase (line 218) | class Co3dDatasetBase:
method setup (line 219) | def setup(self, cfg, split):
method get_all_images (line 524) | def get_all_images(self):
class Co3dDataset (line 528) | class Co3dDataset(Dataset, Co3dDatasetBase):
method __init__ (line 529) | def __init__(self, cfg, split):
method __len__ (line 532) | def __len__(self):
method prepare_data (line 542) | def prepare_data(self, index):
method __getitem__ (line 582) | def __getitem__(self, index):
class Co3dIterableDataset (line 592) | class Co3dIterableDataset(IterableDataset, Co3dDatasetBase):
method __init__ (line 593) | def __init__(self, cfg, split):
method __iter__ (line 598) | def __iter__(self):
method collate (line 602) | def collate(self, batch) -> Dict[str, Any]:
class Co3dDataModule (line 675) | class Co3dDataModule(pl.LightningDataModule):
method __init__ (line 676) | def __init__(self, cfg: Optional[Union[dict, DictConfig]] = None) -> N...
method setup (line 680) | def setup(self, stage=None):
method prepare_data (line 688) | def prepare_data(self):
method general_loader (line 691) | def general_loader(self, dataset, batch_size, collate_fn=None) -> Data...
method train_dataloader (line 701) | def train_dataloader(self):
method val_dataloader (line 706) | def val_dataloader(self):
method test_dataloader (line 709) | def test_dataloader(self):
method predict_dataloader (line 712) | def predict_dataloader(self):
FILE: threestudio/data/image.py
class SingleImageDataModuleConfig (line 30) | class SingleImageDataModuleConfig:
class SingleImageDataBase (line 44) | class SingleImageDataBase:
method setup (line 45) | def setup(self, cfg, split):
method get_all_images (line 153) | def get_all_images(self):
class SingleImageIterableDataset (line 157) | class SingleImageIterableDataset(IterableDataset, SingleImageDataBase):
method __init__ (line 158) | def __init__(self, cfg: Any, split: str) -> None:
method collate (line 162) | def collate(self, batch) -> Dict[str, Any]:
method __iter__ (line 181) | def __iter__(self):
class SingleImageDataset (line 186) | class SingleImageDataset(Dataset, SingleImageDataBase):
method __init__ (line 187) | def __init__(self, cfg: Any, split: str) -> None:
method __len__ (line 191) | def __len__(self):
method __getitem__ (line 194) | def __getitem__(self, index):
class SingleImageDataModule (line 215) | class SingleImageDataModule(pl.LightningDataModule):
method __init__ (line 218) | def __init__(self, cfg: Optional[Union[dict, DictConfig]] = None) -> N...
method setup (line 222) | def setup(self, stage=None) -> None:
method prepare_data (line 230) | def prepare_data(self):
method general_loader (line 233) | def general_loader(self, dataset, batch_size, collate_fn=None) -> Data...
method train_dataloader (line 238) | def train_dataloader(self) -> DataLoader:
method val_dataloader (line 245) | def val_dataloader(self) -> DataLoader:
method test_dataloader (line 248) | def test_dataloader(self) -> DataLoader:
method predict_dataloader (line 251) | def predict_dataloader(self) -> DataLoader:
FILE: threestudio/data/multiview.py
function convert_pose (line 24) | def convert_pose(C2W):
function convert_proj (line 32) | def convert_proj(K, H, W, near, far):
function inter_pose (line 41) | def inter_pose(pose_0, pose_1, ratio):
class MultiviewsDataModuleConfig (line 61) | class MultiviewsDataModuleConfig:
class MultiviewIterableDataset (line 74) | class MultiviewIterableDataset(IterableDataset):
method __init__ (line 75) | def __init__(self, cfg: Any) -> None:
method __iter__ (line 176) | def __iter__(self):
method collate (line 180) | def collate(self, batch):
class MultiviewDataset (line 196) | class MultiviewDataset(Dataset):
method __init__ (line 197) | def __init__(self, cfg: Any, split: str) -> None:
method __len__ (line 356) | def __len__(self):
method __getitem__ (line 359) | def __getitem__(self, index):
method __iter__ (line 371) | def __iter__(self):
method collate (line 375) | def collate(self, batch):
class MultiviewDataModule (line 382) | class MultiviewDataModule(pl.LightningDataModule):
method __init__ (line 385) | def __init__(self, cfg: Optional[Union[dict, DictConfig]] = None) -> N...
method setup (line 389) | def setup(self, stage=None) -> None:
method prepare_data (line 397) | def prepare_data(self):
method general_loader (line 400) | def general_loader(self, dataset, batch_size, collate_fn=None) -> Data...
method train_dataloader (line 408) | def train_dataloader(self) -> DataLoader:
method val_dataloader (line 413) | def val_dataloader(self) -> DataLoader:
method test_dataloader (line 419) | def test_dataloader(self) -> DataLoader:
method predict_dataloader (line 424) | def predict_dataloader(self) -> DataLoader:
FILE: threestudio/data/random_multiview.py
class RandomMultiviewCameraDataModuleConfig (line 32) | class RandomMultiviewCameraDataModuleConfig(RandomCameraDataModuleConfig):
class RandomMultiviewCameraIterableDataset (line 37) | class RandomMultiviewCameraIterableDataset(RandomCameraIterableDataset):
method __init__ (line 39) | def __init__(self, *args, **kwargs):
method collate (line 43) | def collate(self, batch) -> Dict[str, Any]:
class RandomMultiviewCameraDataModule (line 293) | class RandomMultiviewCameraDataModule(pl.LightningDataModule):
method __init__ (line 296) | def __init__(self, cfg: Optional[Union[dict, DictConfig]] = None) -> N...
method setup (line 300) | def setup(self, stage=None) -> None:
method prepare_data (line 308) | def prepare_data(self):
method general_loader (line 311) | def general_loader(self, dataset, batch_size, collate_fn=None) -> Data...
method train_dataloader (line 321) | def train_dataloader(self) -> DataLoader:
method val_dataloader (line 326) | def val_dataloader(self) -> DataLoader:
method test_dataloader (line 332) | def test_dataloader(self) -> DataLoader:
method predict_dataloader (line 337) | def predict_dataloader(self) -> DataLoader:
FILE: threestudio/data/single_multiview_combined.py
class RandomSingleMultiViewCameraIterableDataset (line 27) | class RandomSingleMultiViewCameraIterableDataset(IterableDataset, Update...
method __init__ (line 28) | def __init__(self, cfg_single_view: Any, cfg_multi_view: Any, multi_ra...
method update_step (line 38) | def update_step(self, epoch: int, global_step: int, on_load_weights: b...
method __iter__ (line 42) | def __iter__(self):
method collate (line 46) | def collate(self, batch) -> Dict[str, Any]:
class SingleMultiviewCombinedCameraDataModule (line 65) | class SingleMultiviewCombinedCameraDataModule(pl.LightningDataModule):
method __init__ (line 68) | def __init__(self, cfg: Optional[Union[dict, DictConfig]] = None) -> N...
method setup (line 74) | def setup(self, stage=None) -> None:
method prepare_data (line 82) | def prepare_data(self):
method general_loader (line 85) | def general_loader(self, dataset, batch_size, collate_fn=None) -> Data...
method train_dataloader (line 95) | def train_dataloader(self) -> DataLoader:
method val_dataloader (line 100) | def val_dataloader(self) -> DataLoader:
method test_dataloader (line 105) | def test_dataloader(self) -> DataLoader:
method predict_dataloader (line 110) | def predict_dataloader(self) -> DataLoader:
FILE: threestudio/data/uncond.py
class RandomCameraDataModuleConfig (line 26) | class RandomCameraDataModuleConfig:
class RandomCameraIterableDataset (line 84) | class RandomCameraIterableDataset(IterableDataset, Updateable):
method __init__ (line 85) | def __init__(self, cfg: Any) -> None:
method update_step (line 163) | def update_step(self, epoch: int, global_step: int, on_load_weights: b...
method __iter__ (line 175) | def __iter__(self):
method collate (line 179) | def collate(self, batch) -> Dict[str, Any]:
class RandomCameraDataset (line 477) | class RandomCameraDataset(Dataset):
method __init__ (line 478) | def __init__(self, cfg: Any, split: str) -> None:
method __len__ (line 630) | def __len__(self):
method __getitem__ (line 633) | def __getitem__(self, index):
method collate (line 653) | def collate(self, batch):
class RandomCameraDataModule (line 660) | class RandomCameraDataModule(pl.LightningDataModule):
method __init__ (line 663) | def __init__(self, cfg: Optional[Union[dict, DictConfig]] = None) -> N...
method setup (line 667) | def setup(self, stage=None) -> None:
method prepare_data (line 675) | def prepare_data(self):
method general_loader (line 678) | def general_loader(self, dataset, batch_size, collate_fn=None) -> Data...
method train_dataloader (line 688) | def train_dataloader(self) -> DataLoader:
method val_dataloader (line 693) | def val_dataloader(self) -> DataLoader:
method test_dataloader (line 699) | def test_dataloader(self) -> DataLoader:
method predict_dataloader (line 704) | def predict_dataloader(self) -> DataLoader:
FILE: threestudio/models/background/base.py
class BaseBackground (line 13) | class BaseBackground(BaseModule):
class Config (line 15) | class Config(BaseModule.Config):
method configure (line 20) | def configure(self):
method forward (line 23) | def forward(self, dirs: Float[Tensor, "*B 3"]) -> Float[Tensor, "*B 3"]:
FILE: threestudio/models/background/neural_environment_map_background.py
class NeuralEnvironmentMapBackground (line 16) | class NeuralEnvironmentMapBackground(BaseBackground):
class Config (line 18) | class Config(BaseBackground.Config):
method configure (line 38) | def configure(self) -> None:
method forward (line 46) | def forward(self, dirs: Float[Tensor, "*B 3"]) -> Float[Tensor, "*B 3"]:
FILE: threestudio/models/background/solid_color_background.py
class SolidColorBackground (line 13) | class SolidColorBackground(BaseBackground):
class Config (line 15) | class Config(BaseBackground.Config):
method configure (line 22) | def configure(self) -> None:
method forward (line 33) | def forward(self, dirs: Float[Tensor, "*B 3"]) -> Float[Tensor, "*B Nc"]:
FILE: threestudio/models/background/textured_background.py
class TexturedBackground (line 14) | class TexturedBackground(BaseBackground):
class Config (line 16) | class Config(BaseBackground.Config):
method configure (line 24) | def configure(self) -> None:
method spherical_xyz_to_uv (line 29) | def spherical_xyz_to_uv(self, dirs: Float[Tensor, "*B 3"]) -> Float[Te...
method forward (line 37) | def forward(self, dirs: Float[Tensor, "*B 3"]) -> Float[Tensor, "*B Nc"]:
FILE: threestudio/models/estimators.py
class ImportanceEstimator (line 16) | class ImportanceEstimator(AbstractEstimator):
method __init__ (line 17) | def __init__(
method sampling (line 23) | def sampling(
function _transform_stot (line 104) | def _transform_stot(
FILE: threestudio/models/exporters/base.py
class ExporterOutput (line 12) | class ExporterOutput:
class Exporter (line 18) | class Exporter(BaseObject):
class Config (line 20) | class Config(BaseObject.Config):
method configure (line 25) | def configure(
method geometry (line 40) | def geometry(self) -> BaseImplicitGeometry:
method material (line 44) | def material(self) -> BaseMaterial:
method background (line 48) | def background(self) -> BaseBackground:
method __call__ (line 51) | def __call__(self, *args, **kwargs) -> List[ExporterOutput]:
FILE: threestudio/models/exporters/mesh_exporter.py
class MeshExporter (line 18) | class MeshExporter(Exporter):
class Config (line 20) | class Config(Exporter.Config):
method configure (line 34) | def configure(
method __call__ (line 43) | def __call__(self) -> List[ExporterOutput]:
method export_obj_with_mtl (line 53) | def export_obj_with_mtl(self, mesh: Mesh) -> List[ExporterOutput]:
method export_obj (line 132) | def export_obj(self, mesh: Mesh) -> List[ExporterOutput]:
FILE: threestudio/models/geometry/base.py
function contract_to_unisphere (line 20) | def contract_to_unisphere(
class BaseGeometry (line 35) | class BaseGeometry(BaseModule):
class Config (line 37) | class Config(BaseModule.Config):
method create_from (line 43) | def create_from(
method export (line 50) | def export(self, *args, **kwargs) -> Dict[str, Any]:
class BaseImplicitGeometry (line 54) | class BaseImplicitGeometry(BaseGeometry):
class Config (line 56) | class Config(BaseGeometry.Config):
method configure (line 70) | def configure(self) -> None:
method _initilize_isosurface_helper (line 85) | def _initilize_isosurface_helper(self):
method forward (line 101) | def forward(
method forward_field (line 106) | def forward_field(
method forward_level (line 113) | def forward_level(
method _isosurface (line 119) | def _isosurface(self, bbox: Float[Tensor, "2 3"], fine_stage: bool = F...
method isosurface (line 171) | def isosurface(self) -> Mesh:
class BaseExplicitGeometry (line 191) | class BaseExplicitGeometry(BaseGeometry):
class Config (line 193) | class Config(BaseGeometry.Config):
method configure (line 198) | def configure(self) -> None:
FILE: threestudio/models/geometry/implicit_sdf.py
class ImplicitSDF (line 18) | class ImplicitSDF(BaseImplicitGeometry):
class Config (line 20) | class Config(BaseImplicitGeometry.Config):
method configure (line 59) | def configure(self) -> None:
method initialize_shape (line 87) | def initialize_shape(self) -> None:
method get_shifted_sdf (line 208) | def get_shifted_sdf(
method forward (line 231) | def forward(
method forward_sdf (line 279) | def forward_sdf(self, points: Float[Tensor, "*N Di"]) -> Float[Tensor,...
method forward_field (line 289) | def forward_field(
method forward_level (line 302) | def forward_level(
method export (line 307) | def export(self, points: Float[Tensor, "*N Di"], **kwargs) -> Dict[str...
FILE: threestudio/models/geometry/implicit_volume.py
class ImplicitVolume (line 21) | class ImplicitVolume(BaseImplicitGeometry):
class Config (line 23) | class Config(BaseImplicitGeometry.Config):
method configure (line 61) | def configure(self) -> None:
method set_density_grid (line 83) | def set_density_grid(self):
method get_activated_density (line 102) | def get_activated_density(
method forward (line 131) | def forward(
method forward_density (line 206) | def forward_density(self, points: Float[Tensor, "*N Di"]) -> Float[Ten...
method forward_field (line 217) | def forward_field(
method forward_level (line 227) | def forward_level(
method export (line 232) | def export(self, points: Float[Tensor, "*N Di"], **kwargs) -> Dict[str...
method create_from (line 251) | def create_from(
method update_step (line 282) | def update_step(
FILE: threestudio/models/geometry/tetrahedra_sdf_grid.py
class TetrahedraSDFGrid (line 24) | class TetrahedraSDFGrid(BaseExplicitGeometry):
class Config (line 26) | class Config(BaseExplicitGeometry.Config):
method configure (line 61) | def configure(self) -> None:
method initialize_shape (line 123) | def initialize_shape(self) -> None:
method isosurface (line 126) | def isosurface(self) -> Mesh:
method forward (line 139) | def forward(
method create_from (line 157) | def create_from(
method export (line 243) | def export(self, points: Float[Tensor, "*N Di"], **kwargs) -> Dict[str...
FILE: threestudio/models/geometry/volume_grid.py
class VolumeGrid (line 15) | class VolumeGrid(BaseImplicitGeometry):
class Config (line 17) | class Config(BaseImplicitGeometry.Config):
method configure (line 33) | def configure(self) -> None:
method get_density_bias (line 48) | def get_density_bias(self, points: Float[Tensor, "*N Di"]):
method get_trilinear_feature (line 65) | def get_trilinear_feature(
method forward (line 77) | def forward(
method forward_density (line 133) | def forward_density(self, points: Float[Tensor, "*N Di"]) -> Float[Ten...
method forward_field (line 147) | def forward_field(
method forward_level (line 157) | def forward_level(
method export (line 162) | def export(self, points: Float[Tensor, "*N Di"], **kwargs) -> Dict[str...
FILE: threestudio/models/guidance/deep_floyd_guidance.py
class DeepFloydGuidance (line 17) | class DeepFloydGuidance(BaseObject):
class Config (line 19) | class Config(BaseObject.Config):
method configure (line 41) | def configure(self) -> None:
method forward_unet (line 104) | def forward_unet(
method __call__ (line 117) | def __call__(
method update_step (line 210) | def update_step(self, epoch: int, global_step: int, on_load_weights: b...
FILE: threestudio/models/guidance/deep_floyd_vsd_guidance.py
class ToWeightsDType (line 29) | class ToWeightsDType(nn.Module):
method __init__ (line 30) | def __init__(self, module: nn.Module, dtype: torch.dtype):
method forward (line 35) | def forward(self, x: Float[Tensor, "..."]) -> Float[Tensor, "..."]:
class DeepFloydVSDGuidance (line 40) | class DeepFloydVSDGuidance(BaseModule):
class Config (line 42) | class Config(BaseModule.Config):
method configure (line 65) | def configure(self) -> None:
method pipe (line 240) | def pipe(self):
method pipe_lora (line 244) | def pipe_lora(self):
method unet (line 248) | def unet(self):
method unet_lora (line 252) | def unet_lora(self):
method _sample (line 257) | def _sample(
method sample (line 330) | def sample(
method sample_lora (line 359) | def sample_lora(
method forward_unet (line 406) | def forward_unet(
method disable_unet_class_embedding (line 425) | def disable_unet_class_embedding(self, unet: UNet2DConditionModel):
method compute_grad_vsd (line 433) | def compute_grad_vsd(
method train_lora (line 526) | def train_lora(
method forward (line 572) | def forward(
method update_step (line 634) | def update_step(self, epoch: int, global_step: int, on_load_weights: b...
FILE: threestudio/models/guidance/multiview_diffusion_guidance.py
class MultiviewDiffusionGuidance (line 22) | class MultiviewDiffusionGuidance(BaseModule):
class Config (line 24) | class Config(BaseModule.Config):
method configure (line 46) | def configure(self) -> None:
method get_camera_cond (line 64) | def get_camera_cond(self,
method encode_images (line 77) | def encode_images(
method forward (line 84) | def forward(
method update_step (line 187) | def update_step(self, epoch: int, global_step: int, on_load_weights: b...
FILE: threestudio/models/guidance/stable_diffusion_guidance.py
class StableDiffusionGuidance (line 17) | class StableDiffusionGuidance(BaseObject):
class Config (line 19) | class Config(BaseObject.Config):
method configure (line 47) | def configure(self) -> None:
method forward_unet (line 137) | def forward_unet(
method encode_images (line 151) | def encode_images(
method decode_latents (line 161) | def decode_latents(
method compute_grad_sds (line 176) | def compute_grad_sds(
method compute_grad_sjc (line 216) | def compute_grad_sjc(
method __call__ (line 256) | def __call__(
method update_step (line 315) | def update_step(self, epoch: int, global_step: int, on_load_weights: b...
FILE: threestudio/models/guidance/stable_diffusion_vsd_guidance.py
class ToWeightsDType (line 26) | class ToWeightsDType(nn.Module):
method __init__ (line 27) | def __init__(self, module: nn.Module, dtype: torch.dtype):
method forward (line 32) | def forward(self, x: Float[Tensor, "..."]) -> Float[Tensor, "..."]:
class StableDiffusionVSDGuidance (line 37) | class StableDiffusionVSDGuidance(BaseModule):
class Config (line 39) | class Config(BaseModule.Config):
method configure (line 62) | def configure(self) -> None:
method pipe (line 222) | def pipe(self):
method pipe_lora (line 226) | def pipe_lora(self):
method unet (line 230) | def unet(self):
method unet_lora (line 234) | def unet_lora(self):
method vae (line 238) | def vae(self):
method vae_lora (line 242) | def vae_lora(self):
method _sample (line 247) | def _sample(
method sample (line 321) | def sample(
method sample_lora (line 350) | def sample_lora(
method forward_unet (line 397) | def forward_unet(
method encode_images (line 416) | def encode_images(
method decode_latents (line 426) | def decode_latents(
method disable_unet_class_embedding (line 442) | def disable_unet_class_embedding(self, unet: UNet2DConditionModel):
method compute_grad_vsd (line 450) | def compute_grad_vsd(
method train_lora (line 553) | def train_lora(
method get_latents (line 619) | def get_latents(
method forward (line 634) | def forward(
method update_step (line 692) | def update_step(self, epoch: int, global_step: int, on_load_weights: b...
FILE: threestudio/models/guidance/svd_guidance.py
class StableVideoDiffusionGuidance (line 27) | class StableVideoDiffusionGuidance(BaseObject):
class Config (line 29) | class Config(BaseObject.Config):
method configure (line 58) | def configure(self) -> None:
method encode_image (line 84) | def encode_image(self, image):
method embed_image (line 124) | def embed_image(self, image, num_videos_per_prompt=1, do_classifier_fr...
method __call__ (line 161) | def __call__(
method _resize_with_antialiasing (line 224) | def _resize_with_antialiasing(self, input, size, interpolation="bicubi...
method _compute_padding (line 252) | def _compute_padding(self, kernel_size):
method _filter2d (line 275) | def _filter2d(self, input, kernel):
method _gaussian (line 298) | def _gaussian(self, window_size: int, sigma):
method _gaussian_blur2d (line 314) | def _gaussian_blur2d(self, input, kernel_size, sigma):
FILE: threestudio/models/guidance/video_stable_diffusion_guidance.py
function tensor2vid (line 23) | def tensor2vid(video: torch.Tensor, mean=[0.5, 0.5, 0.5], std=[0.5, 0.5,...
class StableDiffusionGuidance (line 41) | class StableDiffusionGuidance(BaseObject):
class Config (line 43) | class Config(BaseObject.Config):
method configure (line 71) | def configure(self) -> None:
method forward_unet (line 176) | def forward_unet(
method encode_images (line 190) | def encode_images(
method decode_latents (line 249) | def decode_latents(self, latents):
method compute_grad_sds (line 273) | def compute_grad_sds(
method compute_grad_sjc (line 322) | def compute_grad_sjc(
method get_text_embeddings (line 363) | def get_text_embeddings(
method __call__ (line 514) | def __call__(
method update_step (line 580) | def update_step(self, epoch: int, global_step: int, on_load_weights: b...
method _encode_prompt (line 597) | def _encode_prompt(
method gen_video (line 751) | def gen_video(
method prepare_latents (line 956) | def prepare_latents(
method prepare_extra_step_kwargs (line 982) | def prepare_extra_step_kwargs(self, generator, eta):
method progress_bar (line 999) | def progress_bar(self, iterable=None, total=None):
FILE: threestudio/models/guidance/video_stable_diffusion_vsd_guidance.py
function tensor2vid (line 33) | def tensor2vid(video: torch.Tensor, mean=[0.5, 0.5, 0.5], std=[0.5, 0.5,...
class ToWeightsDType (line 51) | class ToWeightsDType(nn.Module):
method __init__ (line 52) | def __init__(self, module: nn.Module, dtype: torch.dtype):
method forward (line 57) | def forward(self, x: Float[Tensor, "..."]) -> Float[Tensor, "..."]:
class StableDiffusionVSDGuidance (line 62) | class StableDiffusionVSDGuidance(BaseModule):
class Config (line 64) | class Config(BaseModule.Config):
method configure (line 87) | def configure(self) -> None:
method pipe (line 268) | def pipe(self):
method pipe_lora (line 272) | def pipe_lora(self):
method unet (line 276) | def unet(self):
method unet_lora (line 280) | def unet_lora(self):
method vae (line 284) | def vae(self):
method vae_lora (line 288) | def vae_lora(self):
method _sample (line 293) | def _sample(
method sample (line 367) | def sample(
method sample_lora (line 396) | def sample_lora(
method forward_unet (line 443) | def forward_unet(
method encode_images (line 472) | def encode_images(
method decode_latents (line 516) | def decode_latents(self, latents):
method get_text_embeddings (line 541) | def get_text_embeddings(
method disable_unet_class_embedding (line 689) | def disable_unet_class_embedding(self, unet: UNet2DConditionModel):
method compute_grad_vsd (line 697) | def compute_grad_vsd(
method train_lora (line 784) | def train_lora(
method get_latents (line 829) | def get_latents(
method forward (line 845) | def forward(
method update_step (line 900) | def update_step(self, epoch: int, global_step: int, on_load_weights: b...
FILE: threestudio/models/guidance/videocrafter/gradio_app.py
function videocrafter_demo (line 17) | def videocrafter_demo(result_dir='./tmp/'):
FILE: threestudio/models/guidance/videocrafter/lvdm/basics.py
function disabled_train (line 14) | def disabled_train(self, mode=True):
function zero_module (line 19) | def zero_module(module):
function scale_module (line 27) | def scale_module(module, scale):
function conv_nd (line 36) | def conv_nd(dims, *args, **kwargs):
function linear (line 49) | def linear(*args, **kwargs):
function avg_pool_nd (line 56) | def avg_pool_nd(dims, *args, **kwargs):
function nonlinearity (line 69) | def nonlinearity(type='silu'):
class GroupNormSpecific (line 76) | class GroupNormSpecific(nn.GroupNorm):
method forward (line 77) | def forward(self, x):
function normalization (line 81) | def normalization(channels, num_groups=32):
class HybridConditioner (line 90) | class HybridConditioner(nn.Module):
method __init__ (line 92) | def __init__(self, c_concat_config, c_crossattn_config):
method forward (line 97) | def forward(self, c_concat, c_crossattn):
FILE: threestudio/models/guidance/videocrafter/lvdm/common.py
function gather_data (line 8) | def gather_data(data, return_np=True):
function autocast (line 16) | def autocast(f):
function extract_into_tensor (line 25) | def extract_into_tensor(a, t, x_shape):
function noise_like (line 31) | def noise_like(shape, device, repeat=False):
function default (line 37) | def default(val, d):
function exists (line 42) | def exists(val):
function identity (line 45) | def identity(*args, **kwargs):
function uniq (line 48) | def uniq(arr):
function mean_flat (line 51) | def mean_flat(tensor):
function ismap (line 57) | def ismap(x):
function isimage (line 62) | def isimage(x):
function max_neg_value (line 67) | def max_neg_value(t):
function shape_to_str (line 70) | def shape_to_str(x):
function init_ (line 74) | def init_(tensor):
function checkpoint (line 81) | def checkpoint(func, inputs, params, flag):
FILE: threestudio/models/guidance/videocrafter/lvdm/distributions.py
class AbstractDistribution (line 5) | class AbstractDistribution:
method sample (line 6) | def sample(self):
method mode (line 9) | def mode(self):
class DiracDistribution (line 13) | class DiracDistribution(AbstractDistribution):
method __init__ (line 14) | def __init__(self, value):
method sample (line 17) | def sample(self):
method mode (line 20) | def mode(self):
class DiagonalGaussianDistribution (line 24) | class DiagonalGaussianDistribution(object):
method __init__ (line 25) | def __init__(self, parameters, deterministic=False):
method sample (line 35) | def sample(self, noise=None):
method kl (line 42) | def kl(self, other=None):
method nll (line 56) | def nll(self, sample, dims=[1,2,3]):
method mode (line 64) | def mode(self):
function normal_kl (line 68) | def normal_kl(mean1, logvar1, mean2, logvar2):
FILE: threestudio/models/guidance/videocrafter/lvdm/ema.py
class LitEma (line 5) | class LitEma(nn.Module):
method __init__ (line 6) | def __init__(self, model, decay=0.9999, use_num_upates=True):
method forward (line 25) | def forward(self,model):
method copy_to (line 46) | def copy_to(self, model):
method store (line 55) | def store(self, parameters):
method restore (line 64) | def restore(self, parameters):
FILE: threestudio/models/guidance/videocrafter/lvdm/models/autoencoder.py
class AutoencoderKL (line 13) | class AutoencoderKL(pl.LightningModule):
method __init__ (line 14) | def __init__(self,
method init_test (line 51) | def init_test(self,):
method init_from_ckpt (line 80) | def init_from_ckpt(self, path, ignore_keys=list()):
method encode (line 97) | def encode(self, x, **kwargs):
method decode (line 104) | def decode(self, z, **kwargs):
method forward (line 109) | def forward(self, input, sample_posterior=True):
method get_input (line 118) | def get_input(self, batch, k):
method training_step (line 128) | def training_step(self, batch, batch_idx, optimizer_idx):
method validation_step (line 149) | def validation_step(self, batch, batch_idx):
method configure_optimizers (line 163) | def configure_optimizers(self):
method get_last_layer (line 174) | def get_last_layer(self):
method log_images (line 178) | def log_images(self, batch, only_inputs=False, **kwargs):
method to_rgb (line 194) | def to_rgb(self, x):
class IdentityFirstStage (line 202) | class IdentityFirstStage(torch.nn.Module):
method __init__ (line 203) | def __init__(self, *args, vq_interface=False, **kwargs):
method encode (line 207) | def encode(self, x, *args, **kwargs):
method decode (line 210) | def decode(self, x, *args, **kwargs):
method quantize (line 213) | def quantize(self, x, *args, **kwargs):
method forward (line 218) | def forward(self, x, *args, **kwargs):
FILE: threestudio/models/guidance/videocrafter/lvdm/models/ddpm3d.py
class DDPM (line 38) | class DDPM(pl.LightningModule):
method __init__ (line 40) | def __init__(self,
method register_schedule (line 113) | def register_schedule(self, given_betas=None, beta_schedule="linear", ...
method ema_scope (line 168) | def ema_scope(self, context=None):
method init_from_ckpt (line 182) | def init_from_ckpt(self, path, ignore_keys=list(), only_model=False):
method q_mean_variance (line 200) | def q_mean_variance(self, x_start, t):
method predict_start_from_noise (line 212) | def predict_start_from_noise(self, x_t, t, noise):
method q_posterior (line 218) | def q_posterior(self, x_start, x_t, t):
method p_mean_variance (line 227) | def p_mean_variance(self, x, t, clip_denoised: bool):
method p_sample (line 240) | def p_sample(self, x, t, clip_denoised=True, repeat_noise=False):
method p_sample_loop (line 249) | def p_sample_loop(self, shape, return_intermediates=False):
method sample (line 264) | def sample(self, batch_size=16, return_intermediates=False):
method q_sample (line 270) | def q_sample(self, x_start, t, noise=None):
method get_input (line 276) | def get_input(self, batch, k):
method _get_rows_from_list (line 281) | def _get_rows_from_list(self, samples):
method log_images (line 289) | def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=Non...
class LatentDiffusion (line 327) | class LatentDiffusion(DDPM):
method __init__ (line 329) | def __init__(self,
method make_cond_schedule (line 407) | def make_cond_schedule(self, ):
method q_sample (line 412) | def q_sample(self, x_start, t, noise=None):
method _freeze_model (line 423) | def _freeze_model(self):
method instantiate_first_stage (line 427) | def instantiate_first_stage(self, config):
method instantiate_cond_stage (line 434) | def instantiate_cond_stage(self, config):
method get_learned_conditioning (line 445) | def get_learned_conditioning(self, c):
method get_first_stage_encoding (line 458) | def get_first_stage_encoding(self, encoder_posterior, noise=None):
method encode_first_stage (line 468) | def encode_first_stage(self, x):
method encode_first_stage_2DAE (line 485) | def encode_first_stage_2DAE(self, x):
method decode_core (line 492) | def decode_core(self, z, **kwargs):
method decode_first_stage (line 509) | def decode_first_stage(self, z, **kwargs):
method apply_model (line 512) | def apply_model(self, x_noisy, t, cond, **kwargs):
method _get_denoise_row_from_list (line 528) | def _get_denoise_row_from_list(self, samples, desc=''):
method decode_first_stage_2DAE (line 555) | def decode_first_stage_2DAE(self, z, **kwargs):
method p_mean_variance (line 564) | def p_mean_variance(self, x, c, t, clip_denoised: bool, return_x0=Fals...
method p_sample (line 590) | def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False, r...
method p_sample_loop (line 612) | def p_sample_loop(self, cond, shape, return_intermediates=False, x_T=N...
class LatentVisualDiffusion (line 659) | class LatentVisualDiffusion(LatentDiffusion):
method __init__ (line 660) | def __init__(self, cond_img_config, finegrained=False, random_cond=Fal...
method instantiate_img_embedder (line 668) | def instantiate_img_embedder(self, config, freeze=True):
method init_projector (line 676) | def init_projector(self, use_finegrained, num_tokens, input_dim, cross...
method get_image_embeds (line 688) | def get_image_embeds(self, batch_imgs):
class DiffusionWrapper (line 695) | class DiffusionWrapper(pl.LightningModule):
method __init__ (line 696) | def __init__(self, diff_model_config, conditioning_key):
method forward (line 701) | def forward(self, x, t, c_concat: list = None, c_crossattn: list = None,
FILE: threestudio/models/guidance/videocrafter/lvdm/models/samplers/ddim.py
class DDIMSampler (line 8) | class DDIMSampler(object):
method __init__ (line 9) | def __init__(self, model, schedule="linear", **kwargs):
method register_buffer (line 16) | def register_buffer(self, name, attr):
method make_schedule (line 22) | def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddi...
method sample (line 63) | def sample(self,
method ddim_sampling (line 132) | def ddim_sampling(self, cond, shape,
method p_sample_ddim (line 212) | def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_origin...
method stochastic_encode (line 294) | def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):
method decode (line 316) | def decode(self, x_latent, cond, t_start, unconditional_guidance_scale...
FILE: threestudio/models/guidance/videocrafter/lvdm/models/utils_diffusion.py
function timestep_embedding (line 8) | def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=Fal...
function make_beta_schedule (line 31) | def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_e...
function make_ddim_timesteps (line 56) | def make_ddim_timesteps(ddim_discr_method, num_ddim_timesteps, num_ddpm_...
function make_ddim_sampling_parameters (line 73) | def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbos...
function betas_for_alpha_bar (line 88) | def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.9...
FILE: threestudio/models/guidance/videocrafter/lvdm/modules/attention.py
class RelativePosition (line 21) | class RelativePosition(nn.Module):
method __init__ (line 24) | def __init__(self, num_units, max_relative_position):
method forward (line 31) | def forward(self, length_q, length_k):
class CrossAttention (line 43) | class CrossAttention(nn.Module):
method __init__ (line 45) | def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, ...
method forward (line 76) | def forward(self, x, context=None, mask=None):
method efficient_forward (line 129) | def efficient_forward(self, x, context=None, mask=None):
class BasicTransformerBlock (line 187) | class BasicTransformerBlock(nn.Module):
method __init__ (line 189) | def __init__(self, dim, n_heads, d_head, dropout=0., context_dim=None,...
method forward (line 204) | def forward(self, x, context=None, mask=None):
method _forward (line 216) | def _forward(self, x, context=None, mask=None):
class SpatialTransformer (line 223) | class SpatialTransformer(nn.Module):
method __init__ (line 233) | def __init__(self, in_channels, n_heads, d_head, depth=1, dropout=0., ...
method forward (line 262) | def forward(self, x, context=None):
class TemporalTransformer (line 281) | class TemporalTransformer(nn.Module):
method __init__ (line 288) | def __init__(self, in_channels, n_heads, d_head, depth=1, dropout=0., ...
method forward (line 331) | def forward(self, x, context=None):
class GEGLU (line 376) | class GEGLU(nn.Module):
method __init__ (line 377) | def __init__(self, dim_in, dim_out):
method forward (line 381) | def forward(self, x):
class FeedForward (line 386) | class FeedForward(nn.Module):
method __init__ (line 387) | def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.):
method forward (line 402) | def forward(self, x):
class LinearAttention (line 406) | class LinearAttention(nn.Module):
method __init__ (line 407) | def __init__(self, dim, heads=4, dim_head=32):
method forward (line 414) | def forward(self, x):
class SpatialSelfAttention (line 425) | class SpatialSelfAttention(nn.Module):
method __init__ (line 426) | def __init__(self, in_channels):
method forward (line 452) | def forward(self, x):
FILE: threestudio/models/guidance/videocrafter/lvdm/modules/encoders/condition.py
class AbstractEncoder (line 10) | class AbstractEncoder(nn.Module):
method __init__ (line 11) | def __init__(self):
method encode (line 14) | def encode(self, *args, **kwargs):
class IdentityEncoder (line 18) | class IdentityEncoder(AbstractEncoder):
method encode (line 20) | def encode(self, x):
class ClassEmbedder (line 24) | class ClassEmbedder(nn.Module):
method __init__ (line 25) | def __init__(self, embed_dim, n_classes=1000, key='class', ucg_rate=0.1):
method forward (line 32) | def forward(self, batch, key=None, disable_dropout=False):
method get_unconditional_conditioning (line 44) | def get_unconditional_conditioning(self, bs, device="cuda"):
function disabled_train (line 51) | def disabled_train(self, mode=True):
class FrozenT5Embedder (line 57) | class FrozenT5Embedder(AbstractEncoder):
method __init__ (line 60) | def __init__(self, version="google/t5-v1_1-large", device="cuda", max_...
method freeze (line 70) | def freeze(self):
method forward (line 76) | def forward(self, text):
method encode (line 85) | def encode(self, text):
class FrozenCLIPEmbedder (line 89) | class FrozenCLIPEmbedder(AbstractEncoder):
method __init__ (line 97) | def __init__(self, version="openai/clip-vit-large-patch14", device="cu...
method freeze (line 113) | def freeze(self):
method forward (line 119) | def forward(self, text):
method encode (line 132) | def encode(self, text):
class ClipImageEmbedder (line 136) | class ClipImageEmbedder(nn.Module):
method __init__ (line 137) | def __init__(
method preprocess (line 155) | def preprocess(self, x):
method forward (line 165) | def forward(self, x, no_dropout=False):
class FrozenOpenCLIPEmbedder (line 174) | class FrozenOpenCLIPEmbedder(AbstractEncoder):
method __init__ (line 184) | def __init__(self, arch="ViT-H-14", version="laion2b_s32b_b79k", devic...
method freeze (line 204) | def freeze(self):
method forward (line 209) | def forward(self, text):
method encode_with_transformer (line 215) | def encode_with_transformer(self, text):
method text_transformer_forward (line 224) | def text_transformer_forward(self, x: torch.Tensor, attn_mask=None):
method encode (line 234) | def encode(self, text):
class FrozenOpenCLIPImageEmbedder (line 238) | class FrozenOpenCLIPImageEmbedder(AbstractEncoder):
method __init__ (line 243) | def __init__(self, arch="ViT-H-14", version="laion2b_s32b_b79k", devic...
method preprocess (line 266) | def preprocess(self, x):
method freeze (line 276) | def freeze(self):
method forward (line 282) | def forward(self, image, no_dropout=False):
method encode_with_vision_transformer (line 288) | def encode_with_vision_transformer(self, img):
method encode (line 293) | def encode(self, text):
class FrozenOpenCLIPImageEmbedderV2 (line 298) | class FrozenOpenCLIPImageEmbedderV2(AbstractEncoder):
method __init__ (line 303) | def __init__(self, arch="ViT-H-14", version="laion2b_s32b_b79k", devic...
method preprocess (line 324) | def preprocess(self, x):
method freeze (line 334) | def freeze(self):
method forward (line 339) | def forward(self, image, no_dropout=False):
method encode_with_vision_transformer (line 344) | def encode_with_vision_transformer(self, x):
class FrozenCLIPT5Encoder (line 377) | class FrozenCLIPT5Encoder(AbstractEncoder):
method __init__ (line 378) | def __init__(self, clip_version="openai/clip-vit-large-patch14", t5_ve...
method encode (line 386) | def encode(self, text):
method forward (line 389) | def forward(self, text):
FILE: threestudio/models/guidance/videocrafter/lvdm/modules/encoders/ip_resampler.py
class ImageProjModel (line 7) | class ImageProjModel(nn.Module):
method __init__ (line 9) | def __init__(self, cross_attention_dim=1024, clip_embeddings_dim=1024,...
method forward (line 16) | def forward(self, image_embeds):
function FeedForward (line 24) | def FeedForward(dim, mult=4):
function reshape_tensor (line 34) | def reshape_tensor(x, heads):
class PerceiverAttention (line 45) | class PerceiverAttention(nn.Module):
method __init__ (line 46) | def __init__(self, *, dim, dim_head=64, heads=8):
method forward (line 61) | def forward(self, x, latents):
class Resampler (line 93) | class Resampler(nn.Module):
method __init__ (line 94) | def __init__(
method forward (line 125) | def forward(self, x):
FILE: threestudio/models/guidance/videocrafter/lvdm/modules/networks/ae_modules.py
function nonlinearity (line 10) | def nonlinearity(x):
function Normalize (line 15) | def Normalize(in_channels, num_groups=32):
class LinAttnBlock (line 20) | class LinAttnBlock(LinearAttention):
method __init__ (line 22) | def __init__(self, in_channels):
class AttnBlock (line 26) | class AttnBlock(nn.Module):
method __init__ (line 27) | def __init__(self, in_channels):
method forward (line 53) | def forward(self, x):
function make_attn (line 80) | def make_attn(in_channels, attn_type="vanilla"):
class Downsample (line 90) | class Downsample(nn.Module):
method __init__ (line 91) | def __init__(self, in_channels, with_conv):
method forward (line 102) | def forward(self, x):
class Upsample (line 111) | class Upsample(nn.Module):
method __init__ (line 112) | def __init__(self, in_channels, with_conv):
method forward (line 123) | def forward(self, x):
function get_timestep_embedding (line 129) | def get_timestep_embedding(timesteps, embedding_dim):
class ResnetBlock (line 151) | class ResnetBlock(nn.Module):
method __init__ (line 152) | def __init__(self, *, in_channels, out_channels=None, conv_shortcut=Fa...
method forward (line 190) | def forward(self, x, temb):
class Model (line 212) | class Model(nn.Module):
method __init__ (line 213) | def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
method forward (line 312) | def forward(self, x, t=None, context=None):
method get_last_layer (line 360) | def get_last_layer(self):
class Encoder (line 364) | class Encoder(nn.Module):
method __init__ (line 365) | def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
method forward (line 430) | def forward(self, x):
class Decoder (line 466) | class Decoder(nn.Module):
method __init__ (line 467) | def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
method forward (line 539) | def forward(self, z):
class SimpleDecoder (line 581) | class SimpleDecoder(nn.Module):
method __init__ (line 582) | def __init__(self, in_channels, out_channels, *args, **kwargs):
method forward (line 604) | def forward(self, x):
class UpsampleDecoder (line 617) | class UpsampleDecoder(nn.Module):
method __init__ (line 618) | def __init__(self, in_channels, out_channels, ch, num_res_blocks, reso...
method forward (line 651) | def forward(self, x):
class LatentRescaler (line 665) | class LatentRescaler(nn.Module):
method __init__ (line 666) | def __init__(self, factor, in_channels, mid_channels, out_channels, de...
method forward (line 690) | def forward(self, x):
class MergedRescaleEncoder (line 702) | class MergedRescaleEncoder(nn.Module):
method __init__ (line 703) | def __init__(self, in_channels, ch, resolution, out_ch, num_res_blocks,
method forward (line 715) | def forward(self, x):
class MergedRescaleDecoder (line 721) | class MergedRescaleDecoder(nn.Module):
method __init__ (line 722) | def __init__(self, z_channels, out_ch, resolution, num_res_blocks, att...
method forward (line 732) | def forward(self, x):
class Upsampler (line 738) | class Upsampler(nn.Module):
method __init__ (line 739) | def __init__(self, in_size, out_size, in_channels, out_channels, ch_mu...
method forward (line 751) | def forward(self, x):
class Resize (line 757) | class Resize(nn.Module):
method __init__ (line 758) | def __init__(self, in_channels=None, learned=False, mode="bilinear"):
method forward (line 773) | def forward(self, x, scale_factor=1.0):
class FirstStagePostProcessor (line 780) | class FirstStagePostProcessor(nn.Module):
method __init__ (line 782) | def __init__(self, ch_mult:list, in_channels,
method instantiate_pretrained (line 817) | def instantiate_pretrained(self, config):
method encode_with_pretrained (line 826) | def encode_with_pretrained(self,x):
method forward (line 832) | def forward(self,x):
FILE: threestudio/models/guidance/videocrafter/lvdm/modules/networks/openaimodel3d.py
class TimestepBlock (line 19) | class TimestepBlock(nn.Module):
method forward (line 24) | def forward(self, x, emb):
class TimestepEmbedSequential (line 30) | class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
method forward (line 36) | def forward(self, x, emb, context=None, batch_size=None):
class Downsample (line 51) | class Downsample(nn.Module):
method __init__ (line 60) | def __init__(self, channels, use_conv, dims=2, out_channels=None, padd...
method forward (line 75) | def forward(self, x):
class Upsample (line 80) | class Upsample(nn.Module):
method __init__ (line 89) | def __init__(self, channels, use_conv, dims=2, out_channels=None, padd...
method forward (line 98) | def forward(self, x):
class ResBlock (line 109) | class ResBlock(TimestepBlock):
method __init__ (line 124) | def __init__(
method forward (line 195) | def forward(self, x, emb, batch_size=None):
method _forward (line 208) | def _forward(self, x, emb, batch_size=None,):
class TemporalConvBlock (line 237) | class TemporalConvBlock(nn.Module):
method __init__ (line 242) | def __init__(self, in_channels, out_channels=None, dropout=0.0, spatia...
method forward (line 269) | def forward(self, x):
class UNetModel (line 279) | class UNetModel(nn.Module):
method __init__ (line 307) | def __init__(self,
method forward (line 534) | def forward(self, x, timesteps, context=None, features_adapter=None, f...
FILE: threestudio/models/guidance/videocrafter/lvdm/modules/x_transformer.py
class AbsolutePositionalEmbedding (line 24) | class AbsolutePositionalEmbedding(nn.Module):
method __init__ (line 25) | def __init__(self, dim, max_seq_len):
method init_ (line 30) | def init_(self):
method forward (line 33) | def forward(self, x):
class FixedPositionalEmbedding (line 38) | class FixedPositionalEmbedding(nn.Module):
method __init__ (line 39) | def __init__(self, dim):
method forward (line 44) | def forward(self, x, seq_dim=1, offset=0):
function exists (line 53) | def exists(val):
function default (line 57) | def default(val, d):
function always (line 63) | def always(val):
function not_equals (line 69) | def not_equals(val):
function equals (line 75) | def equals(val):
function max_neg_value (line 81) | def max_neg_value(tensor):
function pick_and_pop (line 87) | def pick_and_pop(keys, d):
function group_dict_by_key (line 92) | def group_dict_by_key(cond, d):
function string_begins_with (line 101) | def string_begins_with(prefix, str):
function group_by_key_prefix (line 105) | def group_by_key_prefix(prefix, d):
function groupby_prefix_and_trim (line 109) | def groupby_prefix_and_trim(prefix, d):
class Scale (line 116) | class Scale(nn.Module):
method __init__ (line 117) | def __init__(self, value, fn):
method forward (line 122) | def forward(self, x, **kwargs):
class Rezero (line 127) | class Rezero(nn.Module):
method __init__ (line 128) | def __init__(self, fn):
method forward (line 133) | def forward(self, x, **kwargs):
class ScaleNorm (line 138) | class ScaleNorm(nn.Module):
method __init__ (line 139) | def __init__(self, dim, eps=1e-5):
method forward (line 145) | def forward(self, x):
class RMSNorm (line 150) | class RMSNorm(nn.Module):
method __init__ (line 151) | def __init__(self, dim, eps=1e-8):
method forward (line 157) | def forward(self, x):
class Residual (line 162) | class Residual(nn.Module):
method forward (line 163) | def forward(self, x, residual):
class GRUGating (line 167) | class GRUGating(nn.Module):
method __init__ (line 168) | def __init__(self, dim):
method forward (line 172) | def forward(self, x, residual):
class GEGLU (line 183) | class GEGLU(nn.Module):
method __init__ (line 184) | def __init__(self, dim_in, dim_out):
method forward (line 188) | def forward(self, x):
class FeedForward (line 193) | class FeedForward(nn.Module):
method __init__ (line 194) | def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.):
method forward (line 209) | def forward(self, x):
class Attention (line 214) | class Attention(nn.Module):
method __init__ (line 215) | def __init__(
method forward (line 267) | def forward(
class AttentionLayers (line 369) | class AttentionLayers(nn.Module):
method __init__ (line 370) | def __init__(
method forward (line 480) | def forward(
class Encoder (line 540) | class Encoder(AttentionLayers):
method __init__ (line 541) | def __init__(self, **kwargs):
class TransformerWrapper (line 547) | class TransformerWrapper(nn.Module):
method __init__ (line 548) | def __init__(
method init_ (line 594) | def init_(self):
method forward (line 597) | def forward(
FILE: threestudio/models/guidance/videocrafter/predict.py
class Predictor (line 26) | class Predictor(BasePredictor):
method setup (line 27) | def setup(self) -> None:
method predict (line 49) | def predict(
FILE: threestudio/models/guidance/videocrafter/scripts/evaluation/ddp_wrapper.py
function setup_dist (line 8) | def setup_dist(local_rank):
function get_dist_info (line 15) | def get_dist_info():
FILE: threestudio/models/guidance/videocrafter/scripts/evaluation/funcs.py
function batch_ddim_sampling (line 15) | def batch_ddim_sampling(model, cond, noise_shape, n_samples=1, ddim_step...
function get_filelist (line 73) | def get_filelist(data_dir, ext='*'):
function get_dirlist (line 78) | def get_dirlist(path):
function load_model_checkpoint (line 90) | def load_model_checkpoint(model, ckpt=None, model_info = {"repo_id": "Vi...
function load_prompts (line 114) | def load_prompts(prompt_file):
function load_video_batch (line 125) | def load_video_batch(filepath_list, frame_stride, video_size=(256,256), ...
function load_image_batch (line 164) | def load_image_batch(filepath_list, image_size=(256,256)):
function save_videos (line 188) | def save_videos(batch_tensors, savedir, filenames, fps=10):
FILE: threestudio/models/guidance/videocrafter/scripts/evaluation/inference.py
function get_parser (line 18) | def get_parser():
function run_inference (line 42) | def run_inference(args, gpu_num, gpu_no, **kwargs):
FILE: threestudio/models/guidance/videocrafter/scripts/gradio/i2v_test.py
class Image2Video (line 9) | class Image2Video():
method __init__ (line 10) | def __init__(self,result_dir='./tmp/',gpu_num=1) -> None:
method get_image (line 31) | def get_image(self, image, prompt, steps=50, cfg_scale=12.0, eta=1.0, ...
method download_model (line 70) | def download_model(self):
FILE: threestudio/models/guidance/videocrafter/scripts/gradio/t2v_test.py
class Text2Video (line 9) | class Text2Video():
method __init__ (line 10) | def __init__(self,result_dir='./tmp/',gpu_num=1) -> None:
method get_prompt (line 31) | def get_prompt(self, prompt, steps=50, cfg_scale=12.0, eta=1.0, fps=16):
method download_model (line 62) | def download_model(self):
FILE: threestudio/models/guidance/videocrafter/utils/utils.py
function count_params (line 8) | def count_params(model, verbose=False):
function check_istarget (line 15) | def check_istarget(name, para_list):
function instantiate_from_config (line 27) | def instantiate_from_config(config):
function get_obj_from_str (line 37) | def get_obj_from_str(string, reload=False):
function load_npz_from_dir (line 45) | def load_npz_from_dir(data_dir):
function load_npz_from_paths (line 51) | def load_npz_from_paths(data_paths):
function resize_numpy_image (line 57) | def resize_numpy_image(image, max_resolution=512 * 512, resize_short_edg...
function setup_dist (line 70) | def setup_dist(args):
FILE: threestudio/models/guidance/videocrafter_guidance.py
class VideoCrafterGuidance (line 21) | class VideoCrafterGuidance(BaseObject):
class Config (line 23) | class Config(BaseObject.Config):
method configure (line 60) | def configure(self) -> None:
method forward_unet (line 95) | def forward_unet(
method encode_first_stage (line 108) | def encode_first_stage(self, x):
method encode_images (line 156) | def encode_images(
method decode_latents (line 184) | def decode_latents(self, latents):
method add_noise (line 187) | def add_noise(
method compute_grad_sds (line 210) | def compute_grad_sds(
method __call__ (line 268) | def __call__(
method update_step (line 323) | def update_step(self, epoch: int, global_step: int, on_load_weights: b...
FILE: threestudio/models/guidance/zero123_guidance.py
function get_obj_from_str (line 21) | def get_obj_from_str(string, reload=False):
function instantiate_from_config (line 29) | def instantiate_from_config(config):
function load_model_from_config (line 40) | def load_model_from_config(config, ckpt, device, vram_O=True, verbose=Fa...
class Zero123Guidance (line 75) | class Zero123Guidance(BaseObject):
class Config (line 77) | class Config(BaseObject.Config):
method configure (line 99) | def configure(self) -> None:
method set_min_max_steps (line 143) | def set_min_max_steps(self, min_step_percent=0.02, max_step_percent=0....
method prepare_embeddings (line 148) | def prepare_embeddings(self, image_path: str) -> Float[Tensor, "B 3 25...
method get_img_embeds (line 172) | def get_img_embeds(
method encode_images (line 182) | def encode_images(
method decode_latents (line 193) | def decode_latents(
method get_cond (line 204) | def get_cond(
method __call__ (line 254) | def __call__(
method guidance_eval (line 334) | def guidance_eval(self, cond, t_orig, latents, noise_pred):
method update_step (line 398) | def update_step(self, epoch: int, global_step: int, on_load_weights: b...
method generate (line 407) | def generate(
method gen_from_cond (line 433) | def gen_from_cond(
FILE: threestudio/models/guidance/zeroscope_guidance.py
class ZeroscopeGuidance (line 22) | class ZeroscopeGuidance(BaseObject):
class Config (line 24) | class Config(BaseObject.Config):
method configure (line 58) | def configure(self) -> None:
method forward_unet (line 170) | def forward_unet(
method encode_images (line 184) | def encode_images(
method decode_latents (line 248) | def decode_latents(self, latents):
method compute_grad_sds (line 272) | def compute_grad_sds(
method compute_grad_sjc (line 327) | def compute_grad_sjc(
method __call__ (line 367) | def __call__(
method update_step (line 425) | def update_step(self, epoch: int, global_step: int, on_load_weights: b...
FILE: threestudio/models/isosurface.py
class IsosurfaceHelper (line 11) | class IsosurfaceHelper(nn.Module):
method grid_vertices (line 15) | def grid_vertices(self) -> Float[Tensor, "N 3"]:
class MarchingCubeCPUHelper (line 19) | class MarchingCubeCPUHelper(IsosurfaceHelper):
method __init__ (line 20) | def __init__(self, resolution: int) -> None:
method grid_vertices (line 33) | def grid_vertices(self) -> Float[Tensor, "N3 3"]:
method forward (line 48) | def forward(
class MarchingTetrahedraHelper (line 69) | class MarchingTetrahedraHelper(IsosurfaceHelper):
method __init__ (line 70) | def __init__(self, resolution: int, tets_path: str):
method normalize_grid_deformation (line 130) | def normalize_grid_deformation(
method grid_vertices (line 140) | def grid_vertices(self) -> Float[Tensor, "Nv 3"]:
method all_edges (line 144) | def all_edges(self) -> Integer[Tensor, "Ne 2"]:
method sort_edges (line 158) | def sort_edges(self, edges_ex2):
method _forward (line 168) | def _forward(self, pos_nx3, sdf_n, tet_fx4):
method forward (line 229) | def forward(
FILE: threestudio/models/materials/base.py
class BaseMaterial (line 13) | class BaseMaterial(BaseModule):
class Config (line 15) | class Config(BaseModule.Config):
method configure (line 21) | def configure(self):
method forward (line 24) | def forward(self, *args, **kwargs) -> Float[Tensor, "*B 3"]:
method export (line 27) | def export(self, *args, **kwargs) -> Dict[str, Any]:
FILE: threestudio/models/materials/diffuse_with_point_light_material.py
class DiffuseWithPointLightMaterial (line 15) | class DiffuseWithPointLightMaterial(BaseMaterial):
class Config (line 17) | class Config(BaseMaterial.Config):
method configure (line 29) | def configure(self) -> None:
method forward (line 42) | def forward(
method update_step (line 109) | def update_step(self, epoch: int, global_step: int, on_load_weights: b...
method export (line 115) | def export(self, features: Float[Tensor, "*N Nf"], **kwargs) -> Dict[s...
FILE: threestudio/models/materials/neural_radiance_material.py
class NeuralRadianceMaterial (line 16) | class NeuralRadianceMaterial(BaseMaterial):
class Config (line 18) | class Config(BaseMaterial.Config):
method configure (line 35) | def configure(self) -> None:
method forward (line 40) | def forward(
FILE: threestudio/models/materials/no_material.py
class NoMaterial (line 16) | class NoMaterial(BaseMaterial):
class Config (line 18) | class Config(BaseMaterial.Config):
method configure (line 26) | def configure(self) -> None:
method forward (line 39) | def forward(
method export (line 54) | def export(self, features: Float[Tensor, "*N Nf"], **kwargs) -> Dict[s...
FILE: threestudio/models/materials/sd_latent_adapter_material.py
class StableDiffusionLatentAdapterMaterial (line 14) | class StableDiffusionLatentAdapterMaterial(BaseMaterial):
class Config (line 16) | class Config(BaseMaterial.Config):
method configure (line 21) | def configure(self) -> None:
method forward (line 35) | def forward(
FILE: threestudio/models/mesh.py
class Mesh (line 12) | class Mesh:
method __init__ (line 13) | def __init__(
method add_extra (line 28) | def add_extra(self, k, v) -> None:
method remove_outlier (line 31) | def remove_outlier(self, outlier_n_faces_threshold: Union[int, float])...
method requires_grad (line 97) | def requires_grad(self):
method v_nrm (line 101) | def v_nrm(self):
method v_tng (line 107) | def v_tng(self):
method v_tex (line 113) | def v_tex(self):
method t_tex_idx (line 119) | def t_tex_idx(self):
method v_rgb (line 125) | def v_rgb(self):
method edges (line 129) | def edges(self):
method _compute_vertex_normal (line 134) | def _compute_vertex_normal(self):
method _compute_vertex_tangent (line 162) | def _compute_vertex_tangent(self):
method _unwrap_uv (line 206) | def _unwrap_uv(
method unwrap_uv (line 243) | def unwrap_uv(
method set_vertex_color (line 250) | def set_vertex_color(self, v_rgb):
method _compute_edges (line 254) | def _compute_edges(self):
method normal_consistency (line 268) | def normal_consistency(self) -> Float[Tensor, ""]:
method _laplacian_uniform (line 275) | def _laplacian_uniform(self):
method laplacian (line 302) | def laplacian(self) -> Float[Tensor, ""]:
FILE: threestudio/models/networks.py
class ProgressiveBandFrequency (line 16) | class ProgressiveBandFrequency(nn.Module, Updateable):
method __init__ (line 17) | def __init__(self, in_channels: int, config: dict):
method forward (line 29) | def forward(self, x):
method update_step (line 36) | def update_step(self, epoch, global_step, on_load_weights=False):
class TCNNEncoding (line 55) | class TCNNEncoding(nn.Module):
method __init__ (line 56) | def __init__(self, in_channels, config, dtype=torch.float32) -> None:
method forward (line 63) | def forward(self, x):
class PEEncoder (line 66) | class PEEncoder(nn.Module):
method __init__ (line 68) | def __init__(self, x_dim=4, min_deg=0, max_deg=4, use_identity: bool =...
method latent_dim (line 80) | def latent_dim(self) -> int:
method forward (line 85) | def forward(self, x: torch.Tensor) -> torch.Tensor:
class TCNNEncodingSpatialTimeDeform (line 98) | class TCNNEncodingSpatialTimeDeform(nn.Module):
method __init__ (line 99) | def __init__(self, in_channels, config, dtype=torch.float32, init_time...
method init_params_zero (line 135) | def init_params_zero(self, param_list):
method set_temp_param_grad (line 142) | def set_temp_param_grad(self, requires_grad=False):
method set_param_grad (line 147) | def set_param_grad(self, param_list, requires_grad=False):
method warp (line 154) | def warp(self, x):
method log1p_safe (line 157) | def log1p_safe(self, x):
method exp_safe (line 161) | def exp_safe(self, x):
method expm1_safe (line 164) | def expm1_safe(self, x):
method safe_sqrt (line 167) | def safe_sqrt(x, eps=1e-7):
method general_loss_with_squared_residual (line 171) | def general_loss_with_squared_residual(self, squared_x, alpha, scale):
method elastic_loss (line 210) | def elastic_loss(self, x_frame_time, dx, alpha=-2.0, scale=0.03, delta...
method divergence_loss (line 228) | def divergence_loss(self, x_frame_time, dx, delta = 1e-6, div_clamp = ...
method forward (line 258) | def forward(self, x, grid, out_all=False):
class ProgressiveBandHashGrid (line 310) | class ProgressiveBandHashGrid(nn.Module, Updateable):
method __init__ (line 311) | def __init__(self, in_channels, config, dtype=torch.float32):
method forward (line 334) | def forward(self, x):
method update_step (line 339) | def update_step(self, epoch, global_step, on_load_weights=False):
class CompositeEncoding (line 351) | class CompositeEncoding(nn.Module, Updateable):
method __init__ (line 352) | def __init__(self, encoding, include_xyz=False, xyz_scale=1.0, xyz_off...
method forward (line 365) | def forward(self, x, *args, **kwargs):
function get_encoding (line 375) | def get_encoding(n_input_dims: int, config) -> nn.Module:
class VanillaMLP (line 395) | class VanillaMLP(nn.Module):
method __init__ (line 396) | def __init__(self, dim_in: int, dim_out: int, config: dict):
method forward (line 419) | def forward(self, x):
method make_linear (line 427) | def make_linear(self, dim_in, dim_out, is_first, is_last, bias=False):
method make_activation (line 431) | def make_activation(self):
class TCNNNetwork (line 435) | class TCNNNetwork(nn.Module):
method __init__ (line 436) | def __init__(self, dim_in: int, dim_out: int, config: dict) -> None:
method forward (line 441) | def forward(self, x):
function get_mlp (line 445) | def get_mlp(n_input_dims, n_output_dims, config) -> nn.Module:
class NetworkWithInputEncoding (line 457) | class NetworkWithInputEncoding(nn.Module, Updateable):
method __init__ (line 458) | def __init__(self, encoding, network):
method forward (line 462) | def forward(self, x):
class TCNNNetworkWithInputEncoding (line 466) | class TCNNNetworkWithInputEncoding(nn.Module):
method __init__ (line 467) | def __init__(
method forward (line 483) | def forward(self, x):
function create_network_with_input_encoding (line 487) | def create_network_with_input_encoding(
FILE: threestudio/models/prompt_processors/base.py
function hash_prompt (line 16) | def hash_prompt(model: str, prompt: str) -> str:
class DirectionConfig (line 24) | class DirectionConfig:
class PromptProcessorOutput (line 35) | class PromptProcessorOutput:
method get_text_embeddings (line 43) | def get_text_embeddings(
function shift_azimuth_deg (line 73) | def shift_azimuth_deg(azimuth: Float[Tensor, "..."]) -> Float[Tensor, "....
class PromptProcessor (line 78) | class PromptProcessor(BaseObject):
class Config (line 80) | class Config(BaseObject.Config):
method configure_text_encoder (line 95) | def configure_text_encoder(self) -> None:
method destroy_text_encoder (line 99) | def destroy_text_encoder(self) -> None:
method configure (line 102) | def configure(self) -> None:
method spawn_func (line 194) | def spawn_func(pretrained_model_name_or_path, prompts, cache_dir, cfg):
method prepare_text_embeddings (line 198) | def prepare_text_embeddings(self):
method load_text_embeddings (line 246) | def load_text_embeddings(self):
method load_from_cache (line 261) | def load_from_cache(self, prompt):
method preprocess_prompt (line 272) | def preprocess_prompt(self, prompt: str) -> str:
method get_text_embeddings (line 293) | def get_text_embeddings(
method __call__ (line 298) | def __call__(self) -> PromptProcessorOutput:
FILE: threestudio/models/prompt_processors/deepfloyd_prompt_processor.py
class DeepFloydPromptProcessor (line 17) | class DeepFloydPromptProcessor(PromptProcessor):
class Config (line 19) | class Config(PromptProcessor.Config):
method configure_text_encoder (line 25) | def configure_text_encoder(self) -> None:
method destroy_text_encoder (line 40) | def destroy_text_encoder(self) -> None:
method get_text_embeddings (line 45) | def get_text_embeddings(
method spawn_func (line 56) | def spawn_func(pretrained_model_name_or_path, prompts, cache_dir, cfg):
FILE: threestudio/models/prompt_processors/stable_diffusion_prompt_processor.py
class StableDiffusionPromptProcessor (line 16) | class StableDiffusionPromptProcessor(PromptProcessor):
class Config (line 18) | class Config(PromptProcessor.Config):
method configure_text_encoder (line 24) | def configure_text_encoder(self) -> None:
method destroy_text_encoder (line 36) | def destroy_text_encoder(self) -> None:
method get_text_embeddings (line 41) | def get_text_embeddings(
method spawn_func (line 73) | def spawn_func(pretrained_model_name_or_path, prompts, cache_dir, cfg):
FILE: threestudio/models/prompt_processors/videocrafter_prompt_processor.py
class VideoCrafterPromptProcessor (line 23) | class VideoCrafterPromptProcessor(PromptProcessor):
class Config (line 25) | class Config(PromptProcessor.Config):
method configure_text_encoder (line 31) | def configure_text_encoder(self) -> None:
method destroy_text_encoder (line 43) | def destroy_text_encoder(self) -> None:
method get_text_embeddings (line 48) | def get_text_embeddings(
method spawn_func (line 79) | def spawn_func(pretrained_model_name_or_path, prompts, cache_dir, cfg):
FILE: threestudio/models/prompt_processors/zero123_prompt_processor.py
class Zero123PromptProcessor (line 12) | class Zero123PromptProcessor(PromptProcessor):
class Config (line 14) | class Config(PromptProcessor.Config):
FILE: threestudio/models/prompt_processors/zeroscope_diffusion_prompt_processor.py
class ZeroScopeDiffusionPromptProcessor (line 16) | class ZeroScopeDiffusionPromptProcessor(PromptProcessor):
class Config (line 18) | class Config(PromptProcessor.Config):
method configure_text_encoder (line 24) | def configure_text_encoder(self) -> None:
method destroy_text_encoder (line 36) | def destroy_text_encoder(self) -> None:
method get_text_embeddings (line 41) | def get_text_embeddings(
method spawn_func (line 73) | def spawn_func(pretrained_model_name_or_path, prompts, cache_dir, cfg):
FILE: threestudio/models/renderers/base.py
class Renderer (line 15) | class Renderer(BaseModule):
class Config (line 17) | class Config(BaseModule.Config):
method configure (line 22) | def configure(
method forward (line 50) | def forward(self, *args, **kwargs) -> Dict[str, Any]:
method geometry (line 54) | def geometry(self) -> BaseImplicitGeometry:
method material (line 58) | def material(self) -> BaseMaterial:
method background (line 62) | def background(self) -> BaseBackground:
method set_geometry (line 65) | def set_geometry(self, geometry: BaseImplicitGeometry) -> None:
method set_material (line 68) | def set_material(self, material: BaseMaterial) -> None:
method set_background (line 71) | def set_background(self, background: BaseBackground) -> None:
class VolumeRenderer (line 75) | class VolumeRenderer(Renderer):
class Rasterizer (line 79) | class Rasterizer(Renderer):
FILE: threestudio/models/renderers/mask_nerf_renderer.py
class StableNeRFVolumeRenderer (line 20) | class StableNeRFVolumeRenderer(VolumeRenderer):
class Config (line 22) | class Config(VolumeRenderer.Config):
method configure (line 76) | def configure(
method forward (line 151) | def forward(
method update_step (line 590) | def update_step(
method update_step_end (line 664) | def update_step_end(self, epoch: int, global_step: int) -> None:
method train (line 672) | def train(self, mode=True):
method eval (line 678) | def eval(self):
FILE: threestudio/models/renderers/mask_nerf_renderer_multi.py
class StableNeRFVolumeRendererMulti (line 16) | class StableNeRFVolumeRendererMulti(VolumeRenderer):
class Config (line 18) | class Config(VolumeRenderer.Config):
method configure (line 67) | def configure(
method forward (line 104) | def forward(
method update_step (line 364) | def update_step(
method update_object_nerfs (line 372) | def update_object_nerfs(self, set_init: bool = False):
method train (line 381) | def train(self, mode=True):
method eval (line 387) | def eval(self):
FILE: threestudio/models/renderers/stable_nerf_renderer.py
class PatchRenderer (line 15) | class PatchRenderer(VolumeRenderer):
class Config (line 17) | class Config(VolumeRenderer.Config):
method configure (line 26) | def configure(
method forward (line 51) | def forward(
method update_step (line 133) | def update_step(
method train (line 138) | def train(self, mode=True):
method eval (line 141) | def eval(self):
FILE: threestudio/models/renderers/stable_nerf_renderer_multi.py
class PatchRenderer (line 15) | class PatchRenderer(VolumeRenderer):
class Config (line 17) | class Config(VolumeRenderer.Config):
method configure (line 26) | def configure(
method forward (line 50) | def forward(
method update_step (line 133) | def update_step(
method train (line 139) | def train(self, mode=True):
method eval (line 142) | def eval(self):
FILE: threestudio/systems/base.py
class BaseSystem (line 16) | class BaseSystem(pl.LightningModule, Updateable, SaverMixin):
class Config (line 18) | class Config:
method __init__ (line 30) | def __init__(self, cfg, resumed=False) -> None:
method load_weights (line 45) | def load_weights(self, weights: str, ignore_modules: Optional[List[str...
method set_resume_status (line 53) | def set_resume_status(self, current_epoch: int, global_step: int):
method resumed (line 60) | def resumed(self):
method true_global_step (line 65) | def true_global_step(self):
method true_current_epoch (line 72) | def true_current_epoch(self):
method configure (line 78) | def configure(self) -> None:
method post_configure (line 81) | def post_configure(self) -> None:
method C (line 87) | def C(self, value: Any) -> float:
method configure_optimizers (line 90) | def configure_optimizers(self):
method training_step (line 103) | def training_step(self, batch, batch_idx):
method validation_step (line 106) | def validation_step(self, batch, batch_idx):
method on_validation_batch_end (line 109) | def on_validation_batch_end(self, outputs, batch, batch_idx):
method on_validation_epoch_end (line 114) | def on_validation_epoch_end(self):
method test_step (line 117) | def test_step(self, batch, batch_idx):
method on_test_batch_end (line 120) | def on_test_batch_end(self, outputs, batch, batch_idx):
method on_test_epoch_end (line 125) | def on_test_epoch_end(self):
method predict_step (line 128) | def predict_step(self, batch, batch_idx):
method on_predict_batch_end (line 131) | def on_predict_batch_end(self, outputs, batch, batch_idx):
method on_predict_epoch_end (line 136) | def on_predict_epoch_end(self):
method preprocess_data (line 139) | def preprocess_data(self, batch, stage):
method on_train_batch_start (line 147) | def on_train_batch_start(self, batch, batch_idx, unused=0):
method on_validation_batch_start (line 153) | def on_validation_batch_start(self, batch, batch_idx, dataloader_idx=0):
method on_test_batch_start (line 159) | def on_test_batch_start(self, batch, batch_idx, dataloader_idx=0):
method on_predict_batch_start (line 165) | def on_predict_batch_start(self, batch, batch_idx, dataloader_idx=0):
method update_step (line 171) | def update_step(self, epoch: int, global_step: int, on_load_weights: b...
method on_before_optimizer_step (line 174) | def on_before_optimizer_step(self, optimizer):
class BaseLift3DSystem (line 184) | class BaseLift3DSystem(BaseSystem):
class Config (line 186) | class Config(BaseSystem.Config):
method configure (line 235) | def configure(self) -> None:
method on_fit_start (line 289) | def on_fit_start(self) -> None:
method on_test_end (line 297) | def on_test_end(self) -> None:
method on_predict_start (line 301) | def on_predict_start(self) -> None:
method predict_step (line 309) | def predict_step(self, batch, batch_idx):
method on_predict_epoch_end (line 313) | def on_predict_epoch_end(self) -> None:
method on_predict_end (line 324) | def on_predict_end(self) -> None:
FILE: threestudio/systems/optimizers.py
class Adan (line 23) | class Adan(Optimizer):
method __init__ (line 48) | def __init__(
method __setstate__ (line 82) | def __setstate__(self, state):
method restart_opt (line 88) | def restart_opt(self):
method step (line 104) | def step(self, closure=None):
function _single_tensor_adan (line 200) | def _single_tensor_adan(
function _multi_tensor_adan (line 257) | def _multi_tensor_adan(
FILE: threestudio/systems/tc4d.py
class TC4D (line 15) | class TC4D(BaseLift3DSystem):
class Config (line 17) | class Config(BaseLift3DSystem.Config):
method configure (line 35) | def configure(self) -> None:
method get_prompt_processors (line 108) | def get_prompt_processors(self, prompt_processor, prompt_processor_type):
method on_validation_start (line 125) | def on_validation_start(self) -> None:
method on_test_start (line 130) | def on_test_start(self) -> None:
method load_multi_ckpt_cfg (line 136) | def load_multi_ckpt_cfg(self):
method forward (line 160) | def forward(self, batch: Dict[str, Any]) -> Dict[str, Any]:
method on_fit_start (line 202) | def on_fit_start(self) -> None:
method training_step (line 207) | def training_step(self, batch, batch_idx):
method validation_step (line 367) | def validation_step(self, batch, batch_idx):
method on_validation_epoch_end (line 564) | def on_validation_epoch_end(self):
method test_step (line 567) | def test_step(self, batch, batch_idx):
method on_test_epoch_end (line 758) | def on_test_epoch_end(self):
FILE: threestudio/systems/utils.py
function get_scheduler (line 12) | def get_scheduler(name):
function getattr_recursive (line 19) | def getattr_recursive(m, attr):
function get_parameters (line 25) | def get_parameters(model, name):
function parse_optimizer (line 34) | def parse_optimizer(config, model):
function parse_scheduler_to_instance (line 56) | def parse_scheduler_to_instance(config, optimizer):
function parse_scheduler (line 74) | def parse_scheduler(config, optimizer):
FILE: threestudio/utils/base.py
class Configurable (line 11) | class Configurable:
class Config (line 13) | class Config:
method __init__ (line 16) | def __init__(self, cfg: Optional[dict] = None) -> None:
class Updateable (line 21) | class Updateable:
method do_update_step (line 22) | def do_update_step(
method update_step (line 38) | def update_step(self, epoch: int, global_step: int, on_load_weights: b...
function update_if_possible (line 45) | def update_if_possible(module: Any, epoch: int, global_step: int) -> None:
class BaseObject (line 50) | class BaseObject(Updateable):
class Config (line 52) | class Config:
method __init__ (line 57) | def __init__(
method configure (line 65) | def configure(self, *args, **kwargs) -> None:
class BaseModule (line 69) | class BaseModule(nn.Module, Updateable):
class Config (line 71) | class Config:
method __init__ (line 76) | def __init__(
method configure (line 97) | def configure(self, *args, **kwargs) -> None:
FILE: threestudio/utils/bounding_boxes.py
function scale_and_shift_box (line 3) | def scale_and_shift_box(size_min, size_max, rot_angle, translation, devi...
function get_rotation_matrix (line 16) | def get_rotation_matrix(theta):
function voxelize (line 25) | def voxelize(pc: torch.Tensor, voxel_size: int, grid_size=1., filter_out...
function ravel_index (line 71) | def ravel_index(indices, shape, device):
function shape_padright (line 76) | def shape_padright(x, n_ones=1):
function dimshuffle (line 80) | def dimshuffle(x, pattern):
function tensor_linspace (line 90) | def tensor_linspace(start, end, steps, device):
FILE: threestudio/utils/callbacks.py
class VersionedCallback (line 20) | class VersionedCallback(Callback):
method __init__ (line 21) | def __init__(self, save_root, version=None, use_version=True):
method version (line 27) | def version(self) -> int:
method _get_next_version (line 37) | def _get_next_version(self):
method savedir (line 50) | def savedir(self):
class CodeSnapshotCallback (line 61) | class CodeSnapshotCallback(VersionedCallback):
method __init__ (line 62) | def __init__(self, save_root, version=None, use_version=True):
method get_file_list (line 65) | def get_file_list(self):
method save_code_snapshot (line 81) | def save_code_snapshot(self):
method on_fit_start (line 89) | def on_fit_start(self, trainer, pl_module):
class ConfigSnapshotCallback (line 98) | class ConfigSnapshotCallback(VersionedCallback):
method __init__ (line 99) | def __init__(self, config_path, config, save_root, version=None, use_v...
method save_config_snapshot (line 105) | def save_config_snapshot(self):
method on_fit_start (line 110) | def on_fit_start(self, trainer, pl_module):
class CustomProgressBar (line 114) | class CustomProgressBar(TQDMProgressBar):
method get_metrics (line 115) | def get_metrics(self, *args, **kwargs):
FILE: threestudio/utils/config.py
class ExperimentConfig (line 28) | class ExperimentConfig:
method __post_init__ (line 60) | def __post_init__(self):
function load_config (line 79) | def load_config(*yaml_files: str, cli_args: list = [], **kwargs) -> Any:
function config_to_primitive (line 89) | def config_to_primitive(config, resolve: bool = True) -> Any:
function dump_config (line 93) | def dump_config(path: str, config) -> None:
function parse_structured (line 98) | def parse_structured(fields: Any, cfg: Optional[Union[dict, DictConfig]]...
FILE: threestudio/utils/config_scene.py
class ExperimentConfig (line 9) | class ExperimentConfig:
method __post_init__ (line 13) | def __post_init__(self):
function load_config (line 17) | def load_config(*yaml_files: str, cli_args: list = [], **kwargs) -> Any:
function config_to_primitive (line 27) | def config_to_primitive(config, resolve: bool = True) -> Any:
function dump_config (line 31) | def dump_config(path: str, config) -> None:
function parse_structured (line 36) | def parse_structured(fields: Any, cfg: Optional[Union[dict, DictConfig]]...
FILE: threestudio/utils/misc.py
function parse_version (line 13) | def parse_version(ver: str):
function get_rank (line 17) | def get_rank():
function get_device (line 28) | def get_device():
function load_module_weights (line 32) | def load_module_weights(
function C (line 65) | def C(value: Any, epoch: int, global_step: int) -> float:
function cleanup (line 89) | def cleanup():
function finish_with_cleanup (line 95) | def finish_with_cleanup(func: Callable):
function _distributed_available (line 104) | def _distributed_available():
function barrier (line 108) | def barrier():
FILE: threestudio/utils/object_trajectory.py
class SceneTrajectory (line 12) | class SceneTrajectory(torch.nn.Module):
method __init__ (line 14) | def __init__(
method update_objs (line 32) | def update_objs(self, frame_times):
class ObjectTrajectory (line 37) | class ObjectTrajectory(torch.nn.Module):
method __init__ (line 39) | def __init__(
method set_translation_offsets (line 59) | def set_translation_offsets(self):
method update_obj (line 67) | def update_obj(self, frame_time: float):
method get_lengths (line 79) | def get_lengths(self):
method get_pos (line 86) | def get_pos(self):
method get_rot (line 89) | def get_rot(self):
class GeneralTrajectory (line 93) | class GeneralTrajectory(torch.nn.Module):
method __init__ (line 95) | def __init__(
method set_pos_init (line 130) | def set_pos_init(self):
method estimator_res (line 134) | def estimator_res(self):
method est_size (line 138) | def est_size(self):
method device (line 142) | def device(self):
method update_obj (line 145) | def update_obj(self, frame_time: float):
method get_translation_rotation (line 155) | def get_translation_rotation(self, frame_time: float):
method set_translation_offset (line 163) | def set_translation_offset(self, translation_end_previous: torch.Tenso...
method reset_estimator (line 169) | def reset_estimator(self):
method get_rotation_mat (line 173) | def get_rotation_mat(self, rotation: float, frame_time: float):
method set_estimator (line 183) | def set_estimator(self, position: List[float], rotation: float):
class SplineTrajectory (line 214) | class SplineTrajectory(GeneralTrajectory):
method __init__ (line 216) | def __init__(
method set_pos_init (line 240) | def set_pos_init(self):
method eval_spline (line 243) | def eval_spline(
method get_translation (line 256) | def get_translation(self, frame_time: float, **kwargs):
method get_rotation (line 260) | def get_rotation(self, frame_time: float):
method calc_length (line 267) | def calc_length(self):
method segment_spline (line 270) | def segment_spline(self, segment_count: int = 1000):
method transform_time (line 278) | def transform_time(self, u: np.ndarray):
class CurvatureTrajectory (line 291) | class CurvatureTrajectory(GeneralTrajectory):
method __init__ (line 293) | def __init__(
method set_pos_init (line 308) | def set_pos_init(self):
method get_translation (line 312) | def get_translation(self, rotation: float, **kwargs):
method get_rotation (line 317) | def get_rotation(self, frame_time: float):
method calc_length (line 323) | def calc_length(self):
class StaticTrajectory (line 327) | class StaticTrajectory(GeneralTrajectory):
method __init__ (line 329) | def __init__(
method set_pos_init (line 341) | def set_pos_init(self):
method get_translation (line 344) | def get_translation(self, rotation: float, **kwargs):
method get_rotation (line 349) | def get_rotation(self, frame_time: float):
method calc_length (line 353) | def calc_length(self):
FILE: threestudio/utils/ops.py
function load_resize_image (line 18) | def load_resize_image(img_path, h, w):
function dot (line 28) | def dot(x, y):
function reflect (line 32) | def reflect(x, n):
function scale_tensor (line 39) | def scale_tensor(
class _TruncExp (line 53) | class _TruncExp(Function): # pylint: disable=abstract-method
method forward (line 58) | def forward(ctx, x): # pylint: disable=arguments-differ
method backward (line 64) | def backward(ctx, g): # pylint: disable=arguments-differ
class SpecifyGradient (line 69) | class SpecifyGradient(Function):
method forward (line 74) | def forward(ctx, input_tensor, gt_grad):
method backward (line 81) | def backward(ctx, grad_scale):
function get_activation (line 90) | def get_activation(name) -> Callable:
function chunk_batch (line 125) | def chunk_batch(func: Callable, chunk_size: int, *args, **kwargs) -> Any:
function get_ray_directions (line 192) | def get_ray_directions(
function get_rays (line 232) | def get_rays(
function get_projection_matrix (line 279) | def get_projection_matrix(
function get_mvp_matrix (line 294) | def get_mvp_matrix(
function binary_cross_entropy (line 308) | def binary_cross_entropy(input, target):
function tet_sdf_diff (line 315) | def tet_sdf_diff(
class MeshOBJ (line 331) | class MeshOBJ:
method __init__ (line 337) | def __init__(self, v: np.ndarray, f: np.ndarray):
method normalize_mesh (line 355) | def normalize_mesh(self, target_scale=0.5):
method winding_number (line 367) | def winding_number(self, query: torch.Tensor):
method gaussian_weighted_distance (line 376) | def gaussian_weighted_distance(self, query: torch.Tensor, sigma):
function ce_pq_loss (line 388) | def ce_pq_loss(p, q, weight=None):
class ShapeLoss (line 399) | class ShapeLoss(nn.Module):
method __init__ (line 400) | def __init__(self, guide_shape):
method forward (line 419) | def forward(self, xyzs, sigmas):
class TVLoss (line 435) | class TVLoss(nn.Module):
method __init__ (line 436) | def __init__(self, TVLoss_weight_dim1=1.0, TVLoss_weight_dim2=1.0):
method forward (line 441) | def forward(self, x):
method _tensor_size (line 457) | def _tensor_size(self, t):
method __init__ (line 469) | def __init__(self, tv_loss_weight=[1.0, 1.0, 1.0]):
method forward (line 473) | def forward(self, x):
method _tensor_size (line 479) | def _tensor_size(self, t):
function validate_empty_rays (line 460) | def validate_empty_rays(ray_indices, t_start, t_end):
class TVLoss (line 468) | class TVLoss(nn.Module):
method __init__ (line 436) | def __init__(self, TVLoss_weight_dim1=1.0, TVLoss_weight_dim2=1.0):
method forward (line 441) | def forward(self, x):
method _tensor_size (line 457) | def _tensor_size(self, t):
method __init__ (line 469) | def __init__(self, tv_loss_weight=[1.0, 1.0, 1.0]):
method forward (line 473) | def forward(self, x):
method _tensor_size (line 479) | def _tensor_size(self, t):
FILE: threestudio/utils/rasterize.py
class NVDiffRasterizerContext (line 7) | class NVDiffRasterizerContext:
method __init__ (line 8) | def __init__(self, context_type: str, device: torch.device) -> None:
method initialize_context (line 12) | def initialize_context(
method vertex_transform (line 22) | def vertex_transform(
method rasterize (line 30) | def rasterize(
method rasterize_one (line 39) | def rasterize_one(
method antialias (line 49) | def antialias(
method interpolate (line 58) | def interpolate(
method interpolate_one (line 70) | def interpolate_one(
FILE: threestudio/utils/saving.py
class SaverMixin (line 23) | class SaverMixin:
method set_save_dir (line 27) | def set_save_dir(self, save_dir: str):
method get_save_dir (line 30) | def get_save_dir(self):
method convert_data (line 35) | def convert_data(self, data):
method get_save_path (line 52) | def get_save_path(self, filename):
method create_loggers (line 57) | def create_loggers(self, cfg_loggers: DictConfig) -> None:
method get_loggers (line 65) | def get_loggers(self) -> List:
method get_rgb_image_ (line 80) | def get_rgb_image_(self, img, data_format, data_range, rgba=False):
method _save_rgb_image (line 114) | def _save_rgb_image(
method save_rgb_image (line 133) | def save_rgb_image(
method get_uv_image_ (line 146) | def get_uv_image_(self, img, data_format, data_range, cmap):
method save_uv_image (line 169) | def save_uv_image(
method get_grayscale_image_ (line 180) | def get_grayscale_image_(self, img, data_range, cmap):
method save_grayscale_image (line 234) | def save_grayscale_image(
method get_image_grid_ (line 244) | def get_image_grid_(self, imgs, align):
method save_image_grid (line 298) | def save_image_grid(
method save_image (line 324) | def save_image(self, filename, img):
method save_cubemap (line 333) | def save_cubemap(self, filename, img, data_range=(0, 1), rgba=False):
method save_data (line 366) | def save_data(self, filename, data):
method save_state_dict (line 377) | def save_state_dict(self, filename, data):
method save_img_sequence (line 380) | def save_img_sequence(
method save_mesh (line 421) | def save_mesh(self, filename, v_pos, t_pos_idx, v_tex=None, t_tex_idx=...
method save_obj (line 427) | def save_obj(
method _save_obj (line 479) | def _save_obj(
method _save_mtl (line 522) | def _save_mtl(
method save_file (line 581) | def save_file(self, filename, src_path):
method save_json (line 584) | def save_json(self, filename, payload):
Condensed preview — 182 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (5,361K chars).
[
{
"path": ".gitignore",
"chars": 44,
"preview": "outputs\nexperiments\n.threestudio_cache\n*.pyc"
},
{
"path": "LICENSE",
"chars": 11357,
"preview": " Apache License\n Version 2.0, January 2004\n "
},
{
"path": "README.md",
"chars": 5447,
"preview": "# TC4D: Trajectory-Conditioned Text-to-4D Generation\n\n<img src=\"./assets/tc4d.png\" width=\"512\">\n\n| [Project Page](https:"
},
{
"path": "configs/tc4d_stage_1.yaml",
"chars": 5848,
"preview": "name: \"tc4d_stage_1\"\ntag: \"${rmspace:${system.prompt_processor.prompt},_}\"\nexp_root_dir: \"outputs\"\nseed: 0\n\ndata_type: \""
},
{
"path": "configs/tc4d_stage_2.yaml",
"chars": 5873,
"preview": "name: \"tc4d_stage_2\"\ntag: \"${rmspace:${system.prompt_processor.prompt},_}\"\nexp_root_dir: \"outputs\"\nseed: 0\n\ndata_type: \""
},
{
"path": "configs/tc4d_stage_3.yaml",
"chars": 8453,
"preview": "name: \"tc4d_stage_3\"\ntag: \"${rmspace:${system.prompt_processor.prompt},_}\"\nexp_root_dir: \"outputs\"\nseed: 0\n\ndata_type: \""
},
{
"path": "configs/tc4d_stage_3_24_gb.yaml",
"chars": 8449,
"preview": "name: \"tc4d_stage_3\"\ntag: \"${rmspace:${system.prompt_processor.prompt},_}\"\nexp_root_dir: \"outputs\"\nseed: 0\n\ndata_type: \""
},
{
"path": "configs/tc4d_stage_3_40_gb.yaml",
"chars": 8450,
"preview": "name: \"tc4d_stage_3\"\ntag: \"${rmspace:${system.prompt_processor.prompt},_}\"\nexp_root_dir: \"outputs\"\nseed: 0\n\ndata_type: \""
},
{
"path": "configs/tc4d_stage_3_eval.yaml",
"chars": 8030,
"preview": "name: \"tc4d_stage_3_eval\"\ntag: \"${rmspace:${system.prompt_processor.prompt},_}\"\nexp_root_dir: \"outputs\"\nseed: 0\n\ndata_ty"
},
{
"path": "configs_comp/comp0.yaml",
"chars": 1467,
"preview": "name: \"comp0\"\nconfig_scene:\n traj_kwargs:\n # Object 0\n -\n proxy_size: [0.4, 0.4, 0.4]\n trajs:\n -"
},
{
"path": "configs_comp/comp1.yaml",
"chars": 757,
"preview": "name: \"comp1\"\nconfig_scene:\n traj_kwargs:\n # Object 0\n -\n proxy_size: [0.5, 0.5, 0.5]\n trajs:\n -"
},
{
"path": "configs_comp/comp2.yaml",
"chars": 1544,
"preview": "name: \"comp2\"\nconfig_scene:\n traj_kwargs:\n # Object 0\n -\n proxy_size: [0.5, 0.5, 0.5]\n trajs:\n -"
},
{
"path": "configs_comp/comp3.yaml",
"chars": 1285,
"preview": "name: \"comp3\"\nconfig_scene:\n traj_kwargs:\n # Object 0\n -\n proxy_size: [0.5, 0.5, 0.5]\n trajs:\n -"
},
{
"path": "configs_prompts/a_bear_walking.yaml",
"chars": 478,
"preview": "name: \"a_bear_walking\"\nconfig_scene:\n traj_kwargs:\n # Object 0\n -\n proxy_size: [0.5, 0.5, 0.5]\n trajs:\n"
},
{
"path": "configs_prompts/a_camel_walking.yaml",
"chars": 293,
"preview": "name: \"a_camel_walking\"\nconfig_scene:\n traj_kwargs:\n # Object 0\n -\n proxy_size: [0.5, 0.5, 0.5]\n trajs:"
},
{
"path": "configs_prompts/a_carp_swimming.yaml",
"chars": 401,
"preview": "name: \"a_carp_swimming\"\nconfig_scene:\n traj_kwargs:\n # Object 0\n -\n proxy_size: [0.5, 0.5, 0.5]\n trajs:"
},
{
"path": "configs_prompts/a_cat_walking.yaml",
"chars": 363,
"preview": "name: \"a_cat_walking\"\nconfig_scene:\n traj_kwargs:\n # Object 0\n -\n proxy_size: [0.65, 0.65, 0.65]\n trajs"
},
{
"path": "configs_prompts/a_chihuahua_running.yaml",
"chars": 319,
"preview": "name: \"a_chihuahua_running\"\nconfig_scene:\n traj_kwargs:\n # Object 0\n -\n proxy_size: [0.5, 0.5, 0.5]\n tr"
},
{
"path": "configs_prompts/a_clown_fish_swimming.yaml",
"chars": 358,
"preview": "name: \"a_clown_fish_swimming\"\nconfig_scene:\n traj_kwargs:\n # Object 0\n -\n proxy_size: [0.5, 0.5, 0.5]\n "
},
{
"path": "configs_prompts/a_corgi_running.yaml",
"chars": 308,
"preview": "name: \"a_corgi_running\"\nconfig_scene:\n traj_kwargs:\n # Object 0\n -\n proxy_size: [0.5, 0.5, 0.5]\n trajs:"
},
{
"path": "configs_prompts/a_deer_walking.yaml",
"chars": 402,
"preview": "name: \"a_deer_walking\"\nconfig_scene:\n traj_kwargs:\n # Object 0\n -\n proxy_size: [0.5, 0.5, 0.5]\n trajs:\n"
},
{
"path": "configs_prompts/a_dog_riding_a_skateboard.yaml",
"chars": 407,
"preview": "name: \"a_dog_riding_a_skateboard\"\nconfig_scene:\n traj_kwargs:\n # Object 0\n -\n proxy_size: [0.5, 0.5, 0.5]\n "
},
{
"path": "configs_prompts/a_fox_walking.yaml",
"chars": 292,
"preview": "name: \"a_fox_walking\"\nconfig_scene:\n traj_kwargs:\n # Object 0\n -\n proxy_size: [0.5, 0.5, 0.5]\n trajs:\n "
},
{
"path": "configs_prompts/a_german_shepherd_running.yaml",
"chars": 328,
"preview": "name: \"a_german_shepherd_running\"\nconfig_scene:\n traj_kwargs:\n # Object 0\n -\n proxy_size: [0.5, 0.5, 0.5]\n "
},
{
"path": "configs_prompts/a_giraffe_walking.yaml",
"chars": 460,
"preview": "name: \"a_giraffe_walking\"\nconfig_scene:\n traj_kwargs:\n # Object 0\n -\n proxy_size: [0.5, 0.5, 0.5]\n traj"
},
{
"path": "configs_prompts/a_girl_is_riding_a_bicycle.yaml",
"chars": 369,
"preview": "name: \"a_girl_is_riding_a_bicycle\"\nconfig_scene:\n traj_kwargs:\n # Object 0\n -\n proxy_size: [0.5, 0.5, 0.5]\n "
},
{
"path": "configs_prompts/a_goat_walking.yaml",
"chars": 306,
"preview": "name: \"a_goat_walking\"\nconfig_scene:\n traj_kwargs:\n # Object 0\n -\n proxy_size: [0.5, 0.5, 0.5]\n trajs:\n"
},
{
"path": "configs_prompts/a_hippo_walking.yaml",
"chars": 309,
"preview": "name: \"a_hippo_walking\"\nconfig_scene:\n traj_kwargs:\n # Object 0\n -\n proxy_size: [0.5, 0.5, 0.5]\n trajs:"
},
{
"path": "configs_prompts/a_labrador_running.yaml",
"chars": 299,
"preview": "name: \"a_labrador_running\"\nconfig_scene:\n traj_kwargs:\n # Object 0\n -\n proxy_size: [0.5, 0.5, 0.5]\n tra"
},
{
"path": "configs_prompts/a_lion_walking.yaml",
"chars": 323,
"preview": "name: \"a_lion_walking\"\nconfig_scene:\n traj_kwargs:\n # Object 0\n -\n proxy_size: [0.5, 0.5, 0.5]\n trajs:\n"
},
{
"path": "configs_prompts/a_pigeon_flying.yaml",
"chars": 293,
"preview": "name: \"a_pigeon_flying\"\nconfig_scene:\n traj_kwargs:\n # Object 0\n -\n proxy_size: [0.5, 0.5, 0.5]\n trajs:"
},
{
"path": "configs_prompts/a_rhinoceros_walking.yaml",
"chars": 303,
"preview": "name: \"a_rhinoceros_walking\"\nconfig_scene:\n traj_kwargs:\n # Object 0\n -\n proxy_size: [0.5, 0.5, 0.5]\n t"
},
{
"path": "configs_prompts/a_seagull_flying.yaml",
"chars": 407,
"preview": "name: \"a_seagull_flying\"\nconfig_scene:\n traj_kwargs:\n # Object 0\n -\n proxy_size: [0.5, 0.5, 0.5]\n trajs"
},
{
"path": "configs_prompts/a_shark_swimming.yaml",
"chars": 407,
"preview": "name: \"a_shark_swimming\"\nconfig_scene:\n traj_kwargs:\n # Object 0\n -\n proxy_size: [0.5, 0.5, 0.5]\n trajs"
},
{
"path": "configs_prompts/a_sheep_running.yaml",
"chars": 293,
"preview": "name: \"a_sheep_running\"\nconfig_scene:\n traj_kwargs:\n # Object 0\n -\n proxy_size: [0.5, 0.5, 0.5]\n trajs:"
},
{
"path": "configs_prompts/a_tiger_walking.yaml",
"chars": 293,
"preview": "name: \"a_tiger_walking\"\nconfig_scene:\n traj_kwargs:\n # Object 0\n -\n proxy_size: [0.5, 0.5, 0.5]\n trajs:"
},
{
"path": "configs_prompts/a_turtle_swimming.yaml",
"chars": 369,
"preview": "name: \"a_turtle_swimming\"\nconfig_scene:\n traj_kwargs:\n # Object 0\n -\n proxy_size: [0.5, 0.5, 0.5]\n traj"
},
{
"path": "configs_prompts/a_unicorn_running.yaml",
"chars": 295,
"preview": "name: \"a_unicorn_running\"\nconfig_scene:\n traj_kwargs:\n # Object 0\n -\n proxy_size: [0.5, 0.5, 0.5]\n traj"
},
{
"path": "configs_prompts/a_wolf_running.yaml",
"chars": 291,
"preview": "name: \"a_wolf_running\"\nconfig_scene:\n traj_kwargs:\n # Object 0\n -\n proxy_size: [0.5, 0.5, 0.5]\n trajs:\n"
},
{
"path": "configs_prompts/an_astronaut_riding_a_horse.yaml",
"chars": 479,
"preview": "name: \"an_astronaut_riding_a_horse\"\nconfig_scene:\n traj_kwargs:\n # Object 0\n -\n proxy_size: [0.5, 0.5, 0.5]\n"
},
{
"path": "configs_prompts/an_eagle_flying.yaml",
"chars": 405,
"preview": "name: \"an_eagle_flying\"\nconfig_scene:\n traj_kwargs:\n # Object 0\n -\n proxy_size: [0.5, 0.5, 0.5]\n trajs:"
},
{
"path": "configs_prompts/an_elephant_walking.yaml",
"chars": 422,
"preview": "name: \"an_elephant_walking\"\nconfig_scene:\n traj_kwargs:\n # Object 0\n -\n proxy_size: [0.5, 0.5, 0.5]\n tr"
},
{
"path": "configs_prompts/an_octopus_swimming.yaml",
"chars": 323,
"preview": "name: \"an_octopus_swimming\"\nconfig_scene:\n traj_kwargs:\n # Object 0\n -\n proxy_size: [0.65, 0.65, 0.65]\n "
},
{
"path": "configs_prompts/assassin_riding_a_cow.yaml",
"chars": 305,
"preview": "name: \"assassin_riding_a_cow\"\nconfig_scene:\n traj_kwargs:\n # Object 0\n -\n proxy_size: [0.6, 0.6, 0.6]\n "
},
{
"path": "configs_prompts/batman_riding_a_camel.yaml",
"chars": 305,
"preview": "name: \"batman_riding_a_camel\"\nconfig_scene:\n traj_kwargs:\n # Object 0\n -\n proxy_size: [0.7, 0.7, 0.7]\n "
},
{
"path": "configs_prompts/deadpool_riding_a_cow.yaml",
"chars": 461,
"preview": "name: \"deadpool_riding_a_cow\"\nconfig_scene:\n traj_kwargs:\n # Object 0\n -\n proxy_size: [0.5, 0.5, 0.5]\n "
},
{
"path": "configs_prompts/son_goku_riding_an_elephant.yaml",
"chars": 337,
"preview": "name: \"son_goku_riding_an_elephant\"\nconfig_scene:\n traj_kwargs:\n # Object 0\n -\n proxy_size: [0.65, 0.65, 0.6"
},
{
"path": "configs_prompts/spiderman_riding_a_donkey.yaml",
"chars": 329,
"preview": "name: \"spiderman_riding_a_donkey\"\nconfig_scene:\n traj_kwargs:\n # Object 0\n -\n proxy_size: [0.5, 0.5, 0.5]\n "
},
{
"path": "configs_prompts_static/a_firepit.yaml",
"chars": 281,
"preview": "name: \"a_firepit\"\nconfig_scene:\n traj_kwargs:\n # Object 0\n -\n proxy_size: [0.5, 0.5, 0.5]\n trajs:\n "
},
{
"path": "configs_prompts_static/a_lamppost.yaml",
"chars": 293,
"preview": "name: \"a_lamppost\"\nconfig_scene:\n traj_kwargs:\n # Object 0\n -\n proxy_size: [0.5, 0.5, 0.5]\n trajs:\n "
},
{
"path": "configs_prompts_static/water_spraying_out_of_a_firehydrant.yaml",
"chars": 316,
"preview": "name: \"water_spraying_out_of_a_firehydrant\"\nconfig_scene:\n traj_kwargs:\n # Object 0\n -\n proxy_size: [0.5, 0."
},
{
"path": "launch.py",
"chars": 5844,
"preview": "import argparse\nimport logging\nimport os\nimport shutil\nimport sys\n\n\nclass ColoredFilter(logging.Filter):\n \"\"\"\n A l"
},
{
"path": "load/make_prompt_library.py",
"chars": 26490,
"preview": "import json\n\ndreamfusion_gallery_video_names = [\n \"a_20-sided_die_made_out_of_glass.mp4\",\n \"a_bald_eagle_carved_ou"
},
{
"path": "load/prompt_library.json",
"chars": 24482,
"preview": "{\n \"dreamfusion\": [\n \"a 20-sided die made out of glass\",\n \"a bald eagle carved out of wood\",\n \"a banana peelin"
},
{
"path": "load/shapes/README.md",
"chars": 530,
"preview": "# Shape Credits\n\n- `animal.obj` - Ido Richardson\n- `hand_prismatic.obj` - Ido Richardson\n- `potion.obj` - Ido Richardson"
},
{
"path": "load/shapes/animal.obj",
"chars": 286671,
"preview": "####\n#\n# OBJ File Generated by Meshlab\n#\n####\n# Object animal_legs_head.obj\n#\n# Vertices: 1536\n# Faces: 3068\n#\n####\nmtll"
},
{
"path": "load/shapes/blub.obj",
"chars": 812665,
"preview": "v 0.30383 -0.334455 -0.339867\nv 0.407015 -0.335278 0.283284\nv 0.311802 0.281543 -0.353833\nv 0.363848 0.295688 0.319229\nv"
},
{
"path": "load/shapes/cabin.obj",
"chars": 485425,
"preview": "####\n#\n# OBJ File Generated by Meshlab\n#\n####\n# Object wardrobe_0089.obj\n#\n# Vertices: 3752\n# Faces: 7500\n#\n####\nvn -0.4"
},
{
"path": "load/shapes/env_sphere.obj",
"chars": 328735,
"preview": "####\n#\n# OBJ File Generated by Meshlab\n#\n####\n# Object env_sphere.obj\n#\n# Vertices: 2562\n# Faces: 5120\n#\n####\nvn -2.5128"
},
{
"path": "load/shapes/hand_prismatic.obj",
"chars": 241027,
"preview": "# WaveFront *.obj file (generated by Autodesk ATF)\n\nmtllib hand_b.mtl\n\ng hand_b\n\nv -30.000000 -70.000000 20.000000\nv -33"
},
{
"path": "load/shapes/human.obj",
"chars": 423499,
"preview": "# object LowPolyMan1\r\n\r\n\r\nv -0.127664 0.162202 -0.094226\r\nv -0.127866 0.180706 -0.164116\r\nv -0.053822 0.221397 -0.004"
},
{
"path": "load/shapes/nascar.obj",
"chars": 483133,
"preview": "####\n#\n# OBJ File Generated by Meshlab\n#\n####\n# Object car_0016.obj\n#\n# Vertices: 3750\n# Faces: 7500\n#\n####\nvn 0.830884 "
},
{
"path": "load/shapes/potion.obj",
"chars": 633419,
"preview": "# WaveFront *.obj file (generated by Autodesk ATF)\n\nmtllib potion_b.mtl\n\ng potion_b\n\nv 24.830000 100.000000 -0.000000\nv "
},
{
"path": "load/shapes/teddy.obj",
"chars": 391364,
"preview": "# Blender v2.83.4 OBJ File: ''\n# www.blender.org\nmtllib teddy2.mtl\no teddy\nv 1.588069 6.039002 -0.645594\nv 1.672554 5.51"
},
{
"path": "load/tets/generate_tets.py",
"chars": 1958,
"preview": "# Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.\n#\n# NVIDIA CORPORATION, its affiliates a"
},
{
"path": "load/zero123/download.sh",
"chars": 75,
"preview": "wget https://huggingface.co/cvlab/zero123-weights/resolve/main/105000.ckpt\n"
},
{
"path": "load/zero123/sd-objaverse-finetune-c_concat-256.yaml",
"chars": 3096,
"preview": "model:\n base_learning_rate: 1.0e-04\n target: extern.ldm_zero123.models.diffusion.ddpm.LatentDiffusion\n params:\n li"
},
{
"path": "requirements-dev.txt",
"chars": 29,
"preview": "black\nmypy\npylint\npre-commit\n"
},
{
"path": "requirements.txt",
"chars": 615,
"preview": "lightning==2.0.0\nomegaconf==2.3.0\njaxtyping\ntypeguard\ngit+https://github.com/KAIR-BAIR/nerfacc.git@v0.5.2\ngit+https://gi"
},
{
"path": "threestudio/__init__.py",
"chars": 574,
"preview": "__modules__ = {}\n\n\ndef register(name):\n def decorator(cls):\n __modules__[name] = cls\n return cls\n\n r"
},
{
"path": "threestudio/data/__init__.py",
"chars": 90,
"preview": "from . import co3d, image, uncond, multiview, random_multiview, single_multiview_combined\n"
},
{
"path": "threestudio/data/co3d.py",
"chars": 25947,
"preview": "import gzip\nimport json\nimport os\nimport warnings\nfrom dataclasses import dataclass, field\nfrom typing import List\n\nimpo"
},
{
"path": "threestudio/data/image.py",
"chars": 9101,
"preview": "import math\nimport os\nfrom dataclasses import dataclass, field\n\nimport cv2\nimport numpy as np\nimport pytorch_lightning a"
},
{
"path": "threestudio/data/multiview.py",
"chars": 16100,
"preview": "import json\nimport math\nimport os\nimport random\nfrom dataclasses import dataclass\n\nimport cv2\nimport numpy as np\nimport "
},
{
"path": "threestudio/data/random_multiview.py",
"chars": 15167,
"preview": "import math\nimport random\nimport os\nfrom dataclasses import dataclass, field\n\nimport cv2\nimport numpy as np\nimport pytor"
},
{
"path": "threestudio/data/single_multiview_combined.py",
"chars": 4489,
"preview": "import math\nimport random\nimport os\nfrom dataclasses import dataclass, field\n\nimport cv2\nimport numpy as np\nimport pytor"
},
{
"path": "threestudio/data/uncond.py",
"chars": 30920,
"preview": "import bisect\nimport math\nimport random\nfrom dataclasses import dataclass, field\n\nimport pytorch_lightning as pl\nimport "
},
{
"path": "threestudio/models/__init__.py",
"chars": 130,
"preview": "from . import (\n background,\n exporters,\n geometry,\n guidance,\n materials,\n prompt_processors,\n ren"
},
{
"path": "threestudio/models/background/__init__.py",
"chars": 120,
"preview": "from . import (\n base,\n neural_environment_map_background,\n solid_color_background,\n textured_background,\n)\n"
},
{
"path": "threestudio/models/background/base.py",
"chars": 497,
"preview": "import random\nfrom dataclasses import dataclass, field\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as"
},
{
"path": "threestudio/models/background/neural_environment_map_background.py",
"chars": 2591,
"preview": "import random\nfrom dataclasses import dataclass, field\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as"
},
{
"path": "threestudio/models/background/solid_color_background.py",
"chars": 1083,
"preview": "from dataclasses import dataclass, field\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport thr"
},
{
"path": "threestudio/models/background/textured_background.py",
"chars": 1749,
"preview": "from dataclasses import dataclass, field\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport thr"
},
{
"path": "threestudio/models/estimators.py",
"chars": 4271,
"preview": "from typing import Callable, List, Optional, Tuple\n\ntry:\n from typing import Literal\nexcept ImportError:\n from typ"
},
{
"path": "threestudio/models/exporters/__init__.py",
"chars": 34,
"preview": "from . import base, mesh_exporter\n"
},
{
"path": "threestudio/models/exporters/base.py",
"chars": 1350,
"preview": "from dataclasses import dataclass\n\nimport threestudio\nfrom threestudio.models.background.base import BaseBackground\nfrom"
},
{
"path": "threestudio/models/exporters/mesh_exporter.py",
"chars": 5892,
"preview": "from dataclasses import dataclass, field\n\nimport cv2\nimport numpy as np\nimport torch\n\nimport threestudio\nfrom threestudi"
},
{
"path": "threestudio/models/geometry/__init__.py",
"chars": 84,
"preview": "from . import base, implicit_sdf, implicit_volume, tetrahedra_sdf_grid, volume_grid\n"
},
{
"path": "threestudio/models/geometry/base.py",
"chars": 7484,
"preview": "from dataclasses import dataclass, field\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.function"
},
{
"path": "threestudio/models/geometry/implicit_sdf.py",
"chars": 12637,
"preview": "import os\nfrom dataclasses import dataclass, field\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.n"
},
{
"path": "threestudio/models/geometry/implicit_volume.py",
"chars": 11606,
"preview": "from dataclasses import dataclass, field\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.function"
},
{
"path": "threestudio/models/geometry/tetrahedra_sdf_grid.py",
"chars": 10157,
"preview": "from dataclasses import dataclass, field\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.function"
},
{
"path": "threestudio/models/geometry/volume_grid.py",
"chars": 6779,
"preview": "from dataclasses import dataclass, field\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.function"
},
{
"path": "threestudio/models/guidance/__init__.py",
"chars": 341,
"preview": "from . import (\n deep_floyd_guidance,\n deep_floyd_vsd_guidance,\n stable_diffusion_guidance,\n stable_diffusio"
},
{
"path": "threestudio/models/guidance/deep_floyd_guidance.py",
"chars": 10111,
"preview": "from dataclasses import dataclass, field\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom diffus"
},
{
"path": "threestudio/models/guidance/deep_floyd_vsd_guidance.py",
"chars": 23996,
"preview": "import random\nfrom contextlib import contextmanager\nfrom dataclasses import dataclass, field\n\nimport torch\nimport torch."
},
{
"path": "threestudio/models/guidance/multiview_diffusion_guidance.py",
"chars": 8013,
"preview": "import sys\n\nfrom dataclasses import dataclass, field\n\nfrom typing import List\n\nimport torch\nimport torch.nn as nn\nimport"
},
{
"path": "threestudio/models/guidance/stable_diffusion_guidance.py",
"chars": 11804,
"preview": "from dataclasses import dataclass, field\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom diffus"
},
{
"path": "threestudio/models/guidance/stable_diffusion_vsd_guidance.py",
"chars": 25977,
"preview": "import random\nfrom contextlib import contextmanager\nfrom dataclasses import dataclass, field\n\nimport torch\nimport torch."
},
{
"path": "threestudio/models/guidance/svd_guidance.py",
"chars": 12807,
"preview": "import torch\n\nfrom diffusers import StableVideoDiffusionPipeline, DDIMScheduler\n# from diffusers.src.diffusers.pipleines"
},
{
"path": "threestudio/models/guidance/video_stable_diffusion_guidance.py",
"chars": 45897,
"preview": "from dataclasses import dataclass, field\nimport inspect\nfrom tqdm import tqdm\n\nimport torch\nimport torch.nn as nn\nimport"
},
{
"path": "threestudio/models/guidance/video_stable_diffusion_vsd_guidance.py",
"chars": 36395,
"preview": "import random\nimport inspect\nfrom tqdm import tqdm\nfrom contextlib import contextmanager\nfrom dataclasses import datacla"
},
{
"path": "threestudio/models/guidance/videocrafter/.gitignore",
"chars": 66,
"preview": ".DS_Store\n*pyc\n.vscode\n__pycache__\n*.egg-info\n\ncheckpoints\nresults"
},
{
"path": "threestudio/models/guidance/videocrafter/License",
"chars": 24185,
"preview": "This license applies to the source codes that are open sourced in connection with the VideoCrafter1.\r\n\r\nCopyright (C) 20"
},
{
"path": "threestudio/models/guidance/videocrafter/README.md",
"chars": 9799,
"preview": "\n## ___***VideoCrafter2: Overcoming Data Limitations for High-Quality Video Diffusion Models***___\n\n<a href='https://ail"
},
{
"path": "threestudio/models/guidance/videocrafter/cog.yaml",
"chars": 613,
"preview": "# Configuration for Cog ⚙️\n# Reference: https://github.com/replicate/cog/blob/main/docs/yaml.md\n\nbuild:\n gpu: true\n sy"
},
{
"path": "threestudio/models/guidance/videocrafter/configs/inference_i2v_512_v1.0.yaml",
"chars": 2037,
"preview": "model:\n target: lvdm.models.ddpm3d.LatentVisualDiffusion\n params:\n linear_start: 0.00085\n linear_end: 0.012\n "
},
{
"path": "threestudio/models/guidance/videocrafter/configs/inference_t2v_1024_v1.0.yaml",
"chars": 1852,
"preview": "model:\n target: lvdm.models.ddpm3d.LatentDiffusion\n params:\n linear_start: 0.00085\n linear_end: 0.012\n num_ti"
},
{
"path": "threestudio/models/guidance/videocrafter/configs/inference_t2v_512_v1.0.yaml",
"chars": 1784,
"preview": "model:\n target: lvdm.models.ddpm3d.LatentDiffusion\n params:\n linear_start: 0.00085\n linear_end: 0.012\n num_ti"
},
{
"path": "threestudio/models/guidance/videocrafter/configs/inference_t2v_512_v2.0.yaml",
"chars": 1844,
"preview": "model:\n target: lvdm.models.ddpm3d.LatentDiffusion\n params:\n linear_start: 0.00085\n linear_end: 0.012\n num_ti"
},
{
"path": "threestudio/models/guidance/videocrafter/gradio_app.py",
"chars": 2874,
"preview": "import os\nimport sys\nimport gradio as gr\nfrom scripts.gradio.t2v_test import Text2Video\nsys.path.insert(1, os.path.join("
},
{
"path": "threestudio/models/guidance/videocrafter/lvdm/basics.py",
"chars": 2849,
"preview": "# adopted from\n# https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py\n# and\n#"
},
{
"path": "threestudio/models/guidance/videocrafter/lvdm/common.py",
"chars": 2800,
"preview": "import math\nfrom inspect import isfunction\nimport torch\nfrom torch import nn\nimport torch.distributed as dist\n\n\ndef gath"
},
{
"path": "threestudio/models/guidance/videocrafter/lvdm/distributions.py",
"chars": 3043,
"preview": "import torch\nimport numpy as np\n\n\nclass AbstractDistribution:\n def sample(self):\n raise NotImplementedError()\n"
},
{
"path": "threestudio/models/guidance/videocrafter/lvdm/ema.py",
"chars": 2982,
"preview": "import torch\nfrom torch import nn\n\n\nclass LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates="
},
{
"path": "threestudio/models/guidance/videocrafter/lvdm/models/autoencoder.py",
"chars": 8474,
"preview": "import os\nfrom contextlib import contextmanager\nimport torch\nimport numpy as np\nfrom einops import rearrange\nimport torc"
},
{
"path": "threestudio/models/guidance/videocrafter/lvdm/models/ddpm3d.py",
"chars": 33369,
"preview": "\"\"\"\nwild mixture of\nhttps://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_"
},
{
"path": "threestudio/models/guidance/videocrafter/lvdm/models/samplers/ddim.py",
"chars": 17129,
"preview": "import numpy as np\nfrom tqdm import tqdm\nimport torch\nfrom lvdm.models.utils_diffusion import make_ddim_sampling_paramet"
},
{
"path": "threestudio/models/guidance/videocrafter/lvdm/models/utils_diffusion.py",
"chars": 4604,
"preview": "import math\nimport numpy as np\nfrom einops import repeat\nimport torch\nimport torch.nn.functional as F\n\n\ndef timestep_emb"
},
{
"path": "threestudio/models/guidance/videocrafter/lvdm/modules/attention.py",
"chars": 19456,
"preview": "from functools import partial\nimport torch\nfrom torch import nn, einsum\nimport torch.nn.functional as F\nfrom einops impo"
},
{
"path": "threestudio/models/guidance/videocrafter/lvdm/modules/encoders/condition.py",
"chars": 14630,
"preview": "import torch\nimport torch.nn as nn\nfrom torch.utils.checkpoint import checkpoint\nimport kornia\nimport open_clip\nfrom tra"
},
{
"path": "threestudio/models/guidance/videocrafter/lvdm/modules/encoders/ip_resampler.py",
"chars": 4429,
"preview": "# modified from https://github.com/mlfoundations/open_flamingo/blob/main/open_flamingo/src/helpers.py\nimport math\nimport"
},
{
"path": "threestudio/models/guidance/videocrafter/lvdm/modules/networks/ae_modules.py",
"chars": 34246,
"preview": "# pytorch_diffusion + derived encoder decoder\nimport math\nimport torch\nimport numpy as np\nimport torch.nn as nn\nfrom ein"
},
{
"path": "threestudio/models/guidance/videocrafter/lvdm/modules/networks/openaimodel3d.py",
"chars": 23963,
"preview": "from functools import partial\nfrom abc import abstractmethod\nimport torch\nimport torch.nn as nn\nfrom einops import rearr"
},
{
"path": "threestudio/models/guidance/videocrafter/lvdm/modules/x_transformer.py",
"chars": 20159,
"preview": "\"\"\"shout-out to https://github.com/lucidrains/x-transformers/tree/main/x_transformers\"\"\"\nfrom functools import partial\nf"
},
{
"path": "threestudio/models/guidance/videocrafter/predict.py",
"chars": 5564,
"preview": "# Prediction interface for Cog ⚙️\n# https://github.com/replicate/cog/blob/main/docs/python.md\n\n\nimport os\nimport sys\nimp"
},
{
"path": "threestudio/models/guidance/videocrafter/prompts/test_prompts.txt",
"chars": 76,
"preview": "\na dog riding a skateboard\na unicorn running\na corgi running\na camel walking"
},
{
"path": "threestudio/models/guidance/videocrafter/requirements.txt",
"chars": 323,
"preview": "decord==0.6.0\neinops==0.3.0\nimageio==2.9.0\n# numpy==1.24.2\n# omegaconf==2.1.1\nopencv_python\n# pandas==2.0.0\nPillow==9.5."
},
{
"path": "threestudio/models/guidance/videocrafter/scripts/evaluation/ddp_wrapper.py",
"chars": 1481,
"preview": "import datetime\r\nimport argparse, importlib\r\nfrom pytorch_lightning import seed_everything\r\n\r\nimport torch\r\nimport torch"
},
{
"path": "threestudio/models/guidance/videocrafter/scripts/evaluation/funcs.py",
"chars": 8602,
"preview": "import os, sys, glob\r\nimport numpy as np\r\nfrom collections import OrderedDict\r\nfrom decord import VideoReader, cpu\r\nimpo"
},
{
"path": "threestudio/models/guidance/videocrafter/scripts/evaluation/inference.py",
"chars": 6986,
"preview": "import argparse, os, sys, glob, yaml, math, random\r\nimport datetime, time\r\nimport numpy as np\r\nfrom omegaconf import Ome"
},
{
"path": "threestudio/models/guidance/videocrafter/scripts/gradio/i2v_test.py",
"chars": 3703,
"preview": "import os\nimport time\nfrom omegaconf import OmegaConf\nimport torch\nfrom scripts.evaluation.funcs import load_model_check"
},
{
"path": "threestudio/models/guidance/videocrafter/scripts/gradio/t2v_test.py",
"chars": 3291,
"preview": "import os\nimport time\nfrom omegaconf import OmegaConf\nimport torch\nfrom scripts.evaluation.funcs import load_model_check"
},
{
"path": "threestudio/models/guidance/videocrafter/scripts/run_image2video.sh",
"chars": 541,
"preview": "name=\"i2v_512_test\"\n\nckpt='checkpoints/i2v_512_v1/model.ckpt'\nconfig='configs/inference_i2v_512_v1.0.yaml'\n\nprompt_file="
},
{
"path": "threestudio/models/guidance/videocrafter/scripts/run_text2video.sh",
"chars": 473,
"preview": "name=\"base_512_v2\"\n\nckpt='/nfs/sbahmani/ckpts/three/videocrafter2.ckpt'\nconfig='configs/inference_t2v_512_v2.0.yaml'\n\npr"
},
{
"path": "threestudio/models/guidance/videocrafter/utils/utils.py",
"chars": 2171,
"preview": "import importlib\nimport numpy as np\nimport cv2\nimport torch\nimport torch.distributed as dist\n\n\ndef count_params(model, v"
},
{
"path": "threestudio/models/guidance/videocrafter_guidance.py",
"chars": 13295,
"preview": "from dataclasses import dataclass, field\nimport inspect\nfrom tqdm import tqdm\n\nimport torch\nimport torch.nn as nn\nimport"
},
{
"path": "threestudio/models/guidance/zero123_guidance.py",
"chars": 16231,
"preview": "import importlib\nimport os\nfrom dataclasses import dataclass, field\n\nimport cv2\nimport numpy as np\nimport torch\nimport t"
},
{
"path": "threestudio/models/guidance/zeroscope_guidance.py",
"chars": 16627,
"preview": "from dataclasses import dataclass, field\nimport inspect\nfrom tqdm import tqdm\n\nimport torch\nimport torch.nn as nn\nimport"
},
{
"path": "threestudio/models/isosurface.py",
"chars": 9169,
"preview": "import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport threestudio\nfrom threestud"
},
{
"path": "threestudio/models/materials/__init__.py",
"chars": 146,
"preview": "from . import (\n base,\n diffuse_with_point_light_material,\n neural_radiance_material,\n no_material,\n sd_l"
},
{
"path": "threestudio/models/materials/base.py",
"chars": 593,
"preview": "import random\nfrom dataclasses import dataclass, field\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as"
},
{
"path": "threestudio/models/materials/diffuse_with_point_light_material.py",
"chars": 4577,
"preview": "import random\nfrom dataclasses import dataclass, field\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as"
},
{
"path": "threestudio/models/materials/neural_radiance_material.py",
"chars": 1925,
"preview": "import random\nfrom dataclasses import dataclass, field\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as"
},
{
"path": "threestudio/models/materials/no_material.py",
"chars": 2165,
"preview": "import random\nfrom dataclasses import dataclass, field\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as"
},
{
"path": "threestudio/models/materials/sd_latent_adapter_material.py",
"chars": 1168,
"preview": "import random\nfrom dataclasses import dataclass, field\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as"
},
{
"path": "threestudio/models/mesh.py",
"chars": 10428,
"preview": "from __future__ import annotations\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\n\nimport threestudio\n"
},
{
"path": "threestudio/models/networks.py",
"chars": 20328,
"preview": "import math\n\nimport tinycudann as tcnn\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy a"
},
{
"path": "threestudio/models/prompt_processors/__init__.py",
"chars": 151,
"preview": "from . import base, deepfloyd_prompt_processor, stable_diffusion_prompt_processor, zeroscope_diffusion_prompt_processor,"
},
{
"path": "threestudio/models/prompt_processors/base.py",
"chars": 11523,
"preview": "import json\nimport os\nfrom dataclasses import dataclass\n\nimport torch\nimport torch.multiprocessing as mp\nimport torch.nn"
},
{
"path": "threestudio/models/prompt_processors/deepfloyd_prompt_processor.py",
"chars": 3283,
"preview": "import json\nimport os\nfrom dataclasses import dataclass\n\nimport torch\nimport torch.nn as nn\nfrom diffusers import IFPipe"
},
{
"path": "threestudio/models/prompt_processors/stable_diffusion_prompt_processor.py",
"chars": 3375,
"preview": "import json\nimport os\nfrom dataclasses import dataclass\n\nimport torch\nimport torch.nn as nn\nfrom transformers import Aut"
},
{
"path": "threestudio/models/prompt_processors/videocrafter_prompt_processor.py",
"chars": 3568,
"preview": "import json\nimport os\nfrom dataclasses import dataclass\n\nimport torch\nimport torch.nn as nn\nfrom transformers import CLI"
},
{
"path": "threestudio/models/prompt_processors/zero123_prompt_processor.py",
"chars": 462,
"preview": "import json\nimport os\nfrom dataclasses import dataclass\n\nimport threestudio\nfrom threestudio.models.prompt_processors.ba"
},
{
"path": "threestudio/models/prompt_processors/zeroscope_diffusion_prompt_processor.py",
"chars": 3380,
"preview": "import json\nimport os\nfrom dataclasses import dataclass\n\nimport torch\nimport torch.nn as nn\nfrom transformers import CLI"
},
{
"path": "threestudio/models/renderers/__init__.py",
"chars": 140,
"preview": "from . import (\n base,\n mask_nerf_renderer,\n stable_nerf_renderer_multi,\n stable_nerf_renderer,\n mask_ner"
},
{
"path": "threestudio/models/renderers/base.py",
"chars": 2207,
"preview": "from dataclasses import dataclass\n\nimport nerfacc\nimport torch\nimport torch.nn.functional as F\n\nimport threestudio\nfrom "
},
{
"path": "threestudio/models/renderers/mask_nerf_renderer.py",
"chars": 31193,
"preview": "from dataclasses import dataclass, field\nfrom functools import partial\n\nimport nerfacc\nimport threestudio\nimport torch\ni"
},
{
"path": "threestudio/models/renderers/mask_nerf_renderer_multi.py",
"chars": 16955,
"preview": "from dataclasses import dataclass\n\nimport nerfacc\nimport threestudio\nimport torch\nfrom threestudio.models.background.bas"
},
{
"path": "threestudio/models/renderers/stable_nerf_renderer.py",
"chars": 5972,
"preview": "from dataclasses import dataclass\nfrom copy import copy\n\nimport threestudio\nimport torch\nimport torch.nn.functional as F"
},
{
"path": "threestudio/models/renderers/stable_nerf_renderer_multi.py",
"chars": 6019,
"preview": "from dataclasses import dataclass\n\nfrom copy import copy\nimport threestudio\nimport torch\nimport torch.nn.functional as F"
},
{
"path": "threestudio/systems/__init__.py",
"chars": 27,
"preview": "from . import (\n tc4d\n)\n"
},
{
"path": "threestudio/systems/base.py",
"chars": 12229,
"preview": "import os\nfrom dataclasses import dataclass, field\n\nimport pytorch_lightning as pl\n\nimport threestudio\nfrom threestudio."
},
{
"path": "threestudio/systems/optimizers.py",
"chars": 11203,
"preview": "# Copyright 2022 Garena Online Private Limited\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you"
},
{
"path": "threestudio/systems/tc4d.py",
"chars": 39672,
"preview": "from dataclasses import dataclass\n\nimport torch\nimport numpy as np\nimport copy\n\nimport threestudio\nfrom threestudio.syst"
},
{
"path": "threestudio/systems/utils.py",
"chars": 3085,
"preview": "import sys\nimport warnings\nfrom bisect import bisect_right\n\nimport torch\nimport torch.nn as nn\nfrom torch.optim import l"
},
{
"path": "threestudio/utils/__init__.py",
"chars": 19,
"preview": "from . import base\n"
},
{
"path": "threestudio/utils/base.py",
"chars": 3303,
"preview": "from dataclasses import dataclass\n\nimport torch\nimport torch.nn as nn\n\nfrom threestudio.utils.config import parse_struct"
},
{
"path": "threestudio/utils/bounding_boxes.py",
"chars": 4265,
"preview": "import torch\n\ndef scale_and_shift_box(size_min, size_max, rot_angle, translation, device, grid_size=32, num_vertices=32,"
},
{
"path": "threestudio/utils/callbacks.py",
"chars": 4010,
"preview": "import os\nimport shutil\nimport subprocess\n\nimport pytorch_lightning\n\nfrom threestudio.utils.config import dump_config\nfr"
},
{
"path": "threestudio/utils/config.py",
"chars": 3670,
"preview": "import os\nfrom dataclasses import dataclass, field\nfrom datetime import datetime\n\nfrom omegaconf import OmegaConf\n\nimpor"
},
{
"path": "threestudio/utils/config_scene.py",
"chars": 1038,
"preview": "from dataclasses import dataclass, field\n\nfrom omegaconf import OmegaConf\n\nfrom threestudio.utils.typing import *\n\n\n@dat"
},
{
"path": "threestudio/utils/misc.py",
"chars": 3251,
"preview": "import gc\nimport os\nimport re\n\nimport tinycudann as tcnn\nimport torch\nfrom packaging import version\n\nfrom threestudio.ut"
},
{
"path": "threestudio/utils/object_trajectory.py",
"chars": 14342,
"preview": "import threestudio\nfrom threestudio.utils.typing import *\nfrom threestudio.utils.bounding_boxes import scale_and_shift_b"
},
{
"path": "threestudio/utils/ops.py",
"chars": 16643,
"preview": "from collections import defaultdict\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as"
},
{
"path": "threestudio/utils/rasterize.py",
"chars": 2645,
"preview": "import nvdiffrast.torch as dr\nimport torch\n\nfrom threestudio.utils.typing import *\n\n\nclass NVDiffRasterizerContext:\n "
},
{
"path": "threestudio/utils/saving.py",
"chars": 21141,
"preview": "import json\nimport os\nimport re\nimport shutil\nfrom PIL import Image\n\nimport cv2\nimport imageio\nimport matplotlib.pyplot "
},
{
"path": "threestudio/utils/typing.py",
"chars": 1081,
"preview": "\"\"\"\nThis module contains type annotations for the project, using\n1. Python type hints (https://docs.python.org/3/library"
},
{
"path": "train.sh",
"chars": 2053,
"preview": "seed=0\ngpu=0\nexp_root_dir=/path/to\n\n############### Trajectory-conditioned generation\nscene_setup_path=configs_prompts/a"
}
]
// ... and 3 more files (download for full content)
About this extraction
This page contains the full source code of the sherwinbahmani/tc4d GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 182 files (4.9 MB), approximately 1.3M tokens, and a symbol index with 1184 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.