Full Code of a1600012888/PhysDreamer for AI

main 95beb71f9f1b cached
176 files
1.4 MB
394.3k tokens
1427 symbols
1 requests
Download .txt
Showing preview only (1,528K chars total). Download the full file or copy to clipboard to get everything.
Repository: a1600012888/PhysDreamer
Branch: main
Commit: 95beb71f9f1b
Files: 176
Total size: 1.4 MB

Directory structure:
gitextract_1v1oa6eg/

├── .gitignore
├── README.md
├── physdreamer/
│   ├── field_components/
│   │   ├── encoding.py
│   │   └── mlp.py
│   ├── fields/
│   │   ├── mul_offset_field.py
│   │   ├── mul_se3_field.py
│   │   ├── offset_field.py
│   │   ├── se3_field.py
│   │   └── triplane_field.py
│   ├── gaussian_3d/
│   │   ├── README.md
│   │   ├── arguments/
│   │   │   └── __init__.py
│   │   ├── gaussian_renderer/
│   │   │   ├── __init__.py
│   │   │   ├── depth_uv_render.py
│   │   │   ├── feat_render.py
│   │   │   ├── flow_depth_render.py
│   │   │   └── render.py
│   │   ├── scene/
│   │   │   ├── __init__.py
│   │   │   ├── cameras.py
│   │   │   ├── colmap_loader.py
│   │   │   ├── dataset_readers.py
│   │   │   ├── gaussian_model.py
│   │   │   ├── mesh.py
│   │   │   └── mesh_utils.py
│   │   └── utils/
│   │       ├── camera_utils.py
│   │       ├── general_utils.py
│   │       ├── graphics_utils.py
│   │       ├── image_utils.py
│   │       ├── loss_utils.py
│   │       ├── rigid_body_utils.py
│   │       ├── sh_utils.py
│   │       └── system_utils.py
│   ├── losses/
│   │   └── smoothness_loss.py
│   ├── operators/
│   │   ├── dct.py
│   │   ├── np_operators.py
│   │   └── rotation.py
│   ├── utils/
│   │   ├── camera_utils.py
│   │   ├── colmap_utils.py
│   │   ├── config.py
│   │   ├── img_utils.py
│   │   ├── io_utils.py
│   │   ├── optimizer.py
│   │   ├── print_utils.py
│   │   ├── pytorch_mssim.py
│   │   ├── svd_helpper.py
│   │   └── torch_utils.py
│   └── warp_mpm/
│       ├── README.md
│       ├── gaussian_sim_utils.py
│       ├── mpm_data_structure.py
│       ├── mpm_solver_diff.py
│       ├── mpm_utils.py
│       └── warp_utils.py
├── projects/
│   ├── inference/
│   │   ├── README.md
│   │   ├── config_demo.py
│   │   ├── configs/
│   │   │   ├── alocasia.py
│   │   │   ├── carnation.py
│   │   │   ├── hat.py
│   │   │   └── telephone.py
│   │   ├── demo.py
│   │   ├── local_utils.py
│   │   └── run.sh
│   └── uncleaned_train/
│       ├── .gitignore
│       ├── README.md
│       ├── exp_motion/
│       │   └── train/
│       │       ├── config.yml
│       │       ├── config_demo.py
│       │       ├── convert_gaussian_to_mesh.py
│       │       ├── fast_train_velocity.py
│       │       ├── interface.py
│       │       ├── local_utils.py
│       │       ├── model_config.py
│       │       └── train_material.py
│       ├── motionrep/
│       │   ├── datatools/
│       │   │   ├── _convert_fbx_to_mesh.py
│       │   │   ├── blender_deforming_things4d.py
│       │   │   ├── blender_install_packages.py
│       │   │   ├── blender_render_imgs.py
│       │   │   ├── deforming_things4d.py
│       │   │   ├── dragon_animation.py
│       │   │   ├── fbx_to_mesh.py
│       │   │   ├── fbx_to_mesh_flag.py
│       │   │   ├── render_blender_annimations.py
│       │   │   ├── render_fbx_first_frame.py
│       │   │   ├── render_obj.py
│       │   │   ├── render_obj_external_texture.py
│       │   │   ├── test_colmap_camera.py
│       │   │   └── transform_obj_for_blender.py
│       │   ├── diffusion/
│       │   │   ├── builder.py
│       │   │   ├── discretizer.py
│       │   │   ├── draft.py
│       │   │   ├── gaussian_diffusion.py
│       │   │   ├── losses.py
│       │   │   ├── resample.py
│       │   │   ├── respace.py
│       │   │   ├── sigma_sampling.py
│       │   │   ├── sv_diffusion_engine.py
│       │   │   ├── svd_conditioner.py
│       │   │   ├── svd_sds_engine.py
│       │   │   ├── svd_sds_engine_backup.py
│       │   │   ├── svd_sds_wdecoder_engine.py
│       │   │   └── video_diffusion_loss.py
│       │   ├── field_components/
│       │   │   ├── encoding.py
│       │   │   └── mlp.py
│       │   ├── fields/
│       │   │   ├── dct_trajectory_field.py
│       │   │   ├── discrete_field.py
│       │   │   ├── mul_offset_field.py
│       │   │   ├── mul_se3_field.py
│       │   │   ├── offset_field.py
│       │   │   ├── se3_field.py
│       │   │   ├── triplane_field.py
│       │   │   └── video_triplane_disp_field.py
│       │   ├── gaussian_3d/
│       │   │   ├── arguments/
│       │   │   │   └── __init__.py
│       │   │   ├── gaussian_renderer/
│       │   │   │   ├── __init__.py
│       │   │   │   ├── depth_uv_render.py
│       │   │   │   ├── feat_render.py
│       │   │   │   ├── flow_depth_render.py
│       │   │   │   ├── motion_renderer.py
│       │   │   │   └── render.py
│       │   │   ├── scene/
│       │   │   │   ├── __init__.py
│       │   │   │   ├── cameras.py
│       │   │   │   ├── colmap_loader.py
│       │   │   │   ├── dataset_readers.py
│       │   │   │   ├── gaussian_model.py
│       │   │   │   ├── mesh.py
│       │   │   │   └── mesh_utils.py
│       │   │   └── utils/
│       │   │       ├── camera_utils.py
│       │   │       ├── general_utils.py
│       │   │       ├── graphics_utils.py
│       │   │       ├── image_utils.py
│       │   │       ├── loss_utils.py
│       │   │       ├── rigid_body_utils.py
│       │   │       ├── sh_utils.py
│       │   │       └── system_utils.py
│       │   ├── losses/
│       │   │   ├── se3_loss.py
│       │   │   └── smoothness_loss.py
│       │   ├── operators/
│       │   │   ├── dct.py
│       │   │   ├── np_operators.py
│       │   │   └── rotation.py
│       │   └── utils/
│       │       ├── camera_utils.py
│       │       ├── colmap_utils.py
│       │       ├── config.py
│       │       ├── dct.py
│       │       ├── flow_utils.py
│       │       ├── img_utils.py
│       │       ├── io_utils.py
│       │       ├── optimizer.py
│       │       ├── peft_utils.py
│       │       ├── print_utils.py
│       │       ├── pytorch_mssim.py
│       │       ├── svd_helpper.py
│       │       └── torch_utils.py
│       └── thirdparty_code/
│           └── warp_mpm/
│               ├── backup/
│               │   ├── convert_gaussian_to_mesh.py
│               │   ├── diff_warp_utils.py
│               │   ├── engine_utils.py
│               │   ├── grad_test.py
│               │   ├── mpm_solver_warp.py
│               │   ├── mpm_solver_warp_diff.py
│               │   ├── mpm_utils.py
│               │   ├── run_gaussian.py
│               │   ├── run_gaussian_static.py
│               │   ├── run_sand.py
│               │   ├── sim_grad.py
│               │   ├── solver_grad_test.py
│               │   ├── test_inverse_sim.py
│               │   ├── test_sim.py
│               │   ├── warp_rewrite.py
│               │   └── warp_utils.py
│               ├── backup_jan10/
│               │   ├── gaussian_sim_utils.py
│               │   ├── mpm_data_structure.py
│               │   ├── mpm_solver_diff.py
│               │   ├── mpm_utils.py
│               │   └── warp_utils.py
│               ├── gaussian_sim_utils.py
│               ├── mpm_data_structure.py
│               ├── mpm_solver_diff.py
│               ├── mpm_utils.py
│               └── warp_utils.py
├── requirements.txt
└── setup.py

================================================
FILE CONTENTS
================================================

================================================
FILE: .gitignore
================================================
*.pyc

models/
data/
output/
wandb/


================================================
FILE: README.md
================================================
# PhysDreamer: Physics-Based Interaction with 3D Objects via Video Generation [[website](https://physdreamer.github.io/)]

![teaser-figure](figures/figure_teaser.png)

## Useage

### Setup enviroment

Install diff-gaussian-rasterization at: https://github.com/graphdeco-inria/diff-gaussian-rasterization
   
```bash
conda create -n physdreamer python
conda activate physdreamer

pip install -r requirements.txt

python setup.py install
```

### Download the scenes and optimized models from Hugging Face

Download the scenes and optimized velocity and material fields from: https://huggingface.co/datasets/YunjinZhang/PhysDreamer/tree/main

Put folders of these scenes to `data/physics_dreamer/xxx`, e.g. `data/physics_dreamer/carnations`

Put pretrained models to `./models`. 

See `dataset_dir` and `model_list` in  `inference/configs/carnation.py` to match the path of dataset and pretrained models. 


### Run inference

```bash
cd projects/inference
bash run.sh
```


## Acknowledgement
This codebase used lots of source code from: 
1. https://github.com/graphdeco-inria/gaussian-splatting
2. https://github.com/zeshunzong/warp-mpm
3. https://github.com/PingchuanMa/NCLaw

We thank the authors of these projects.


## Citations
```
@article{zhang2024physdreamer,
    title={{PhysDreamer}: Physics-Based Interaction with 3D Objects via Video Generation},
    author={Tianyuan Zhang and Hong-Xing Yu and Rundi Wu and
            Brandon Y. Feng and Changxi Zheng and Noah Snavely and Jiajun Wu and William T. Freeman},
    journal={arxiv},
    year={2024}
}
```


================================================
FILE: physdreamer/field_components/encoding.py
================================================
import torch
import torch.nn.functional as F
from jaxtyping import Float, Int, Shaped
from torch import Tensor, nn
from typing import Optional, Sequence, Tuple, List
from physdreamer.losses.smoothness_loss import (
    compute_plane_smoothness,
    compute_plane_tv,
)


class TemporalKplanesEncoding(nn.Module):
    """

    Args:
        resolutions (Sequence[int]): xyzt resolutions.
    """

    def __init__(
        self,
        resolutions: Sequence[int],
        feat_dim: int = 32,
        init_a: float = 0.1,
        init_b: float = 0.5,
        reduce="sum",  # Literal["sum", "product", "cat"] = "sum",
    ):
        super().__init__()

        self.resolutions = resolutions

        if reduce == "cat":
            feat_dim = feat_dim // 3
        self.feat_dim = feat_dim

        self.reduce = reduce

        self.in_dim = 4

        self.plane_coefs = nn.ParameterList()

        self.coo_combs = [[0, 3], [1, 3], [2, 3]]
        # [(x, t), (y, t), (z, t)]
        for coo_comb in self.coo_combs:
            # [feat_dim, time_resolution, spatial_resolution]
            new_plane_coef = nn.Parameter(
                torch.empty(
                    [
                        self.feat_dim,
                        resolutions[coo_comb[1]],
                        resolutions[coo_comb[0]],  # flip?
                    ]
                )
            )

            # when init to ones?

            nn.init.uniform_(new_plane_coef, a=init_a, b=init_b)
            self.plane_coefs.append(new_plane_coef)

    def forward(self, inp: Float[Tensor, "*bs 4"]):
        output = 1.0 if self.reduce == "product" else 0.0
        if self.reduce == "cat":
            output = []
        for ci, coo_comb in enumerate(self.coo_combs):
            grid = self.plane_coefs[ci].unsqueeze(0)  # [1, feature_dim, reso1, reso2]
            coords = inp[..., coo_comb].view(1, 1, -1, 2)  # [1, 1, flattened_bs, 2]

            interp = F.grid_sample(
                grid, coords, align_corners=True, padding_mode="border"
            )  # [1, output_dim, 1, flattened_bs]
            interp = interp.view(self.feat_dim, -1).T  # [flattened_bs, output_dim]

            if self.reduce == "product":
                output = output * interp
            elif self.reduce == "sum":
                output = output + interp
            elif self.reduce == "cat":
                output.append(interp)

        if self.reduce == "cat":
            # [flattened_bs, output_dim * 3]
            output = torch.cat(output, dim=-1)

        return output

    def compute_temporal_smoothness(
        self,
    ):
        ret_loss = 0.0

        for plane_coef in self.plane_coefs:
            ret_loss += compute_plane_smoothness(plane_coef)

        return ret_loss

    def compute_plane_tv(
        self,
    ):
        ret_loss = 0.0

        for plane_coef in self.plane_coefs:
            ret_loss += compute_plane_tv(plane_coef)

        return ret_loss

    def visualize(
        self,
    ) -> Tuple[Float[Tensor, "3 H W"]]:
        """Visualize the encoding as a RGB images

        Returns:
            Tuple[Float[Tensor, "3 H W"]]
        """
        pass

    @staticmethod
    def functional_forward(
        plane_coefs: List[Float[Tensor, "feat_dim H W"]],
        inp: Float[Tensor, "*bs 4"],
        reduce: str = "sum",
        coo_combs: Optional[List[List[int]]] = [[0, 3], [1, 3], [2, 3]],
    ):
        assert reduce in ["sum", "product", "cat"]
        output = 1.0 if reduce == "product" else 0.0

        if reduce == "cat":
            output = []
        for ci, coo_comb in enumerate(coo_combs):
            grid = plane_coefs[ci].unsqueeze(0)  # [1, feature_dim, reso1, reso2]
            feat_dim = grid.shape[1]
            coords = inp[..., coo_comb].view(1, 1, -1, 2)  # [1, 1, flattened_bs, 2]

            interp = F.grid_sample(
                grid, coords, align_corners=True, padding_mode="border"
            )  # [1, output_dim, 1, flattened_bs]
            interp = interp.view(feat_dim, -1).T  # [flattened_bs, output_dim]

            if reduce == "product":
                output = output * interp
            elif reduce == "sum":
                output = output + interp
            elif reduce == "cat":
                output.append(interp)

        if reduce == "cat":
            # [flattened_bs, output_dim * 3]
            output = torch.cat(output, dim=-1)

        return output


class TriplanesEncoding(nn.Module):
    """

    Args:
        resolutions (Sequence[int]): xyz resolutions.
    """

    def __init__(
        self,
        resolutions: Sequence[int],
        feat_dim: int = 32,
        init_a: float = 0.1,
        init_b: float = 0.5,
        reduce="sum",  # Literal["sum", "product", "cat"] = "sum",
    ):
        super().__init__()

        self.resolutions = resolutions

        if reduce == "cat":
            feat_dim = feat_dim  #  // 3
        self.feat_dim = feat_dim

        self.reduce = reduce

        self.in_dim = 3

        self.plane_coefs = nn.ParameterList()

        self.coo_combs = [[0, 1], [0, 2], [1, 2]]
        # [(x, t), (y, t), (z, t)]
        for coo_comb in self.coo_combs:
            new_plane_coef = nn.Parameter(
                torch.empty(
                    [
                        self.feat_dim,
                        resolutions[coo_comb[1]],
                        resolutions[coo_comb[0]],
                    ]
                )
            )

            # when init to ones?

            nn.init.uniform_(new_plane_coef, a=init_a, b=init_b)
            self.plane_coefs.append(new_plane_coef)

    def forward(self, inp: Float[Tensor, "*bs 3"]):
        output = 1.0 if self.reduce == "product" else 0.0
        if self.reduce == "cat":
            output = []
        for ci, coo_comb in enumerate(self.coo_combs):
            grid = self.plane_coefs[ci].unsqueeze(0)  # [1, feature_dim, reso1, reso2]
            coords = inp[..., coo_comb].view(1, 1, -1, 2)  # [1, 1, flattened_bs, 2]

            interp = F.grid_sample(
                grid, coords, align_corners=True, padding_mode="border"
            )  # [1, output_dim, 1, flattened_bs]
            interp = interp.view(self.feat_dim, -1).T  # [flattened_bs, output_dim]

            if self.reduce == "product":
                output = output * interp
            elif self.reduce == "sum":
                output = output + interp
            elif self.reduce == "cat":
                output.append(interp)

        if self.reduce == "cat":
            # [flattened_bs, output_dim * 3]
            output = torch.cat(output, dim=-1)

        return output

    def compute_plane_tv(
        self,
    ):
        ret_loss = 0.0

        for plane_coef in self.plane_coefs:
            ret_loss += compute_plane_tv(plane_coef)

        return ret_loss


class PlaneEncoding(nn.Module):
    """

    Args:
        resolutions (Sequence[int]): xyz resolutions.
    """

    def __init__(
        self,
        resolutions: Sequence[int],  # [y_res, x_res]
        feat_dim: int = 32,
        init_a: float = 0.1,
        init_b: float = 0.5,
    ):
        super().__init__()

        self.resolutions = resolutions

        self.feat_dim = feat_dim
        self.in_dim = 2

        self.plane_coefs = nn.ParameterList()

        self.coo_combs = [[0, 1]]
        for coo_comb in self.coo_combs:
            new_plane_coef = nn.Parameter(
                torch.empty(
                    [
                        self.feat_dim,
                        resolutions[coo_comb[1]],
                        resolutions[coo_comb[0]],
                    ]
                )
            )

            # when init to ones?

            nn.init.uniform_(new_plane_coef, a=init_a, b=init_b)
            self.plane_coefs.append(new_plane_coef)

    def forward(self, inp: Float[Tensor, "*bs 2"]):

        for ci, coo_comb in enumerate(self.coo_combs):
            grid = self.plane_coefs[ci].unsqueeze(0)  # [1, feature_dim, reso1, reso2]
            coords = inp[..., coo_comb].view(1, 1, -1, 2)  # [1, 1, flattened_bs, 2]

            interp = F.grid_sample(
                grid, coords, align_corners=True, padding_mode="border"
            )  # [1, output_dim, 1, flattened_bs]
            interp = interp.view(self.feat_dim, -1).T  # [flattened_bs, output_dim]

            output = interp

        return output

    def compute_plane_tv(
        self,
    ):
        ret_loss = 0.0

        for plane_coef in self.plane_coefs:
            ret_loss += compute_plane_tv(plane_coef)

        return ret_loss


class TemporalNeRFEncoding(nn.Module):
    def __init__(
        self,
        in_dim,  # : int,
        num_frequencies: int,
        min_freq_exp: float,
        max_freq_exp: float,
        log_scale: bool = False,
        include_input: bool = False,
    ) -> None:
        super().__init__()
        self.in_dim = in_dim
        self.num_frequencies = num_frequencies
        self.min_freq = min_freq_exp
        self.max_freq = max_freq_exp
        self.log_scale = log_scale
        self.include_input = include_input

    def get_out_dim(self) -> int:
        if self.in_dim is None:
            raise ValueError("Input dimension has not been set")
        out_dim = self.in_dim * self.num_frequencies * 2
        if self.include_input:
            out_dim += self.in_dim
        return out_dim

    def forward(
        self,
        in_tensor: Float[Tensor, "*bs input_dim"],
    ) -> Float[Tensor, "*bs output_dim"]:
        """Calculates NeRF encoding. If covariances are provided the encodings will be integrated as proposed
            in mip-NeRF.

        Args:
            in_tensor: For best performance, the input tensor should be between 0 and 1.
            covs: Covariances of input points.
        Returns:
            Output values will be between -1 and 1
        """
        scaled_in_tensor = 2 * torch.pi * in_tensor  # scale to [0, 2pi]

        # freqs = 2 ** torch.linspace(
        freqs = torch.linspace(
            self.min_freq, self.max_freq, self.num_frequencies, device=in_tensor.device
        )
        if self.log_scale:
            freqs = 2**freqs
        scaled_inputs = (
            scaled_in_tensor[..., None] * freqs
        )  # [..., "input_dim", "num_scales"]
        scaled_inputs = scaled_inputs.view(
            *scaled_inputs.shape[:-2], -1
        )  # [..., "input_dim" * "num_scales"]

        encoded_inputs = torch.sin(
            torch.cat([scaled_inputs, scaled_inputs + torch.pi / 2.0], dim=-1)
        )
        return encoded_inputs


================================================
FILE: physdreamer/field_components/mlp.py
================================================
"""
Mostly from nerfstudio: https://github.com/nerfstudio-project/nerfstudio/blob/main/nerfstudio/field_components/mlp.py
"""
from typing import Optional, Set, Tuple, Union

import torch
from jaxtyping import Float
from torch import Tensor, nn


class MLP(nn.Module):
    def __init__(
        self,
        in_dim: int,
        num_layers: int,
        layer_width: int,
        out_dim: Optional[int] = None,
        skip_connections: Optional[Tuple[int]] = None,
        activation: Optional[nn.Module] = nn.ReLU(),
        out_activation: Optional[nn.Module] = None,
        zero_init = False,
    ) -> None:
        super().__init__()
        self.in_dim = in_dim
        assert self.in_dim > 0
        self.out_dim = out_dim if out_dim is not None else layer_width
        self.num_layers = num_layers
        self.layer_width = layer_width
        self.skip_connections = skip_connections
        self._skip_connections: Set[int] = (
            set(skip_connections) if skip_connections else set()
        )
        self.activation = activation
        self.out_activation = out_activation
        self.net = None
        self.zero_init = zero_init

        self.build_nn_modules()

    def build_nn_modules(self) -> None:
        """Initialize multi-layer perceptron."""
        layers = []
        if self.num_layers == 1:
            layers.append(nn.Linear(self.in_dim, self.out_dim))
        else:
            for i in range(self.num_layers - 1):
                if i == 0:
                    assert (
                        i not in self._skip_connections
                    ), "Skip connection at layer 0 doesn't make sense."
                    layers.append(nn.Linear(self.in_dim, self.layer_width))
                elif i in self._skip_connections:
                    layers.append(
                        nn.Linear(self.layer_width + self.in_dim, self.layer_width)
                    )
                else:
                    layers.append(nn.Linear(self.layer_width, self.layer_width))
            layers.append(nn.Linear(self.layer_width, self.out_dim))
        self.layers = nn.ModuleList(layers)

        if self.zero_init:
            torch.nn.init.zeros_(self.layers[-1].weight)
            torch.nn.init.zeros_(self.layers[-1].bias)

    def pytorch_fwd(
        self, in_tensor: Float[Tensor, "*bs in_dim"]
    ) -> Float[Tensor, "*bs out_dim"]:
        """Process input with a multilayer perceptron.

        Args:
            in_tensor: Network input

        Returns:
            MLP network output
        """
        x = in_tensor
        for i, layer in enumerate(self.layers):
            # as checked in `build_nn_modules`, 0 should not be in `_skip_connections`
            if i in self._skip_connections:
                x = torch.cat([in_tensor, x], -1)
            x = layer(x)
            if self.activation is not None and i < len(self.layers) - 1:
                x = self.activation(x)
        if self.out_activation is not None:
            x = self.out_activation(x)
        return x

    def forward(
        self, in_tensor: Float[Tensor, "*bs in_dim"]
    ) -> Float[Tensor, "*bs out_dim"]:
        return self.pytorch_fwd(in_tensor)


================================================
FILE: physdreamer/fields/mul_offset_field.py
================================================
import torch
import torch.nn.functional as F
from jaxtyping import Float, Int, Shaped
from torch import Tensor, nn
from typing import Literal, Optional, Sequence, Tuple, List
from physdreamer.field_components.encoding import (
    TemporalKplanesEncoding,
    TriplanesEncoding,
)
from physdreamer.field_components.mlp import MLP
from physdreamer.operators.rotation import rotation_6d_to_matrix, quaternion_to_matrix
from physdreamer.data.scene_box import SceneBox


class MulTemporalKplanesOffsetfields(nn.Module):
    """Multiple Temporal Kplanes SE(3) fields.

        Decoder is shared, but plane coefs are different.

    Args:
        aabb: axis-aligned bounding box.
            aabb[0] is the minimum (x,y,z) point.
            aabb[1] is the maximum (x,y,z) point.
        resolutions: resolutions of the kplanes. in an order of [x, y, z ,t].

    """

    def __init__(
        self,
        aabb: Float[Tensor, "2 3"],
        resolutions_list: Sequence[int],
        feat_dim: int = 64,
        init_a: float = 0.1,
        init_b: float = 0.5,
        reduce: Literal["sum", "product", "cat"] = "sum",
        num_decoder_layers=2,
        decoder_hidden_size=64,
        add_spatial_triplane: bool = True,
    ):
        super().__init__()

        self.register_buffer("aabb", aabb)
        self.output_dim = 3

        self.temporal_kplanes_encoding_list = nn.ModuleList(
            [
                TemporalKplanesEncoding(resolutions, feat_dim, init_a, init_b, reduce)
                for resolutions in resolutions_list
            ]
        )

        self.add_spatial_triplane = add_spatial_triplane
        if add_spatial_triplane:
            self.spatial_kplanes_encoding_list = nn.ModuleList(
                [
                    TriplanesEncoding(
                        resolutions[:-1], feat_dim, init_a, init_b, reduce
                    )
                    for resolutions in resolutions_list
                ]
            )
            feat_dim = feat_dim * 2

        self.decoder = MLP(
            feat_dim,
            num_decoder_layers,
            layer_width=decoder_hidden_size,
            out_dim=self.output_dim,
            skip_connections=None,
            activation=nn.ReLU(),
            out_activation=None,
        )

    def forward(
        self, inp: Float[Tensor, "*bs 4"], dataset_indx: Int[Tensor, "1"]
    ) -> Tuple[Float[Tensor, "*bs 3 3"], Float[Tensor, "*bs 3"]]:
        inpx, inpt = inp[:, :3], inp[:, 3:]

        # shift to [-1, 1]
        inpx = SceneBox.get_normalized_positions(inpx, self.aabb) * 2.0 - 1.0

        inpt = inpt * 2.0 - 1.0

        inp = torch.cat([inpx, inpt], dim=-1)

        # for loop in batch dimension

        output = self.temporal_kplanes_encoding_list[dataset_indx](inp)

        if self.add_spatial_triplane:
            spatial_output = self.spatial_kplanes_encoding_list[dataset_indx](inp)
            output = torch.cat([output, spatial_output], dim=-1)

        output = self.decoder(output)

        return output

    def compute_smoothess_loss(
        self,
    ):
        temporal_smoothness_loss = 0.0
        for temporal_kplanes_encoding in self.temporal_kplanes_encoding_list:
            temporal_smoothness_loss += (
                temporal_kplanes_encoding.compute_temporal_smoothness()
            )

        smothness_loss = 0.0
        for temporal_kplanes_encoding in self.temporal_kplanes_encoding_list:
            smothness_loss += temporal_kplanes_encoding.compute_plane_tv()

        if self.add_spatial_triplane:
            for spatial_kplanes_encoding in self.spatial_kplanes_encoding_list:
                smothness_loss += spatial_kplanes_encoding.compute_plane_tv()

        return smothness_loss, temporal_smoothness_loss

    def compute_loss(
        self,
        inp: Float[Tensor, "*bs 4"],
        trajectory: Float[Tensor, "*bs 3"],
        loss_func,
    ):
        inpx, inpt = inp[:, :3], inp[:, 3:]

        output = self(inp)

        rec_traj = inpx + output

        rec_loss = loss_func(rec_traj, trajectory)

        return rec_loss

    def arap_loss(self, inp):
        pass


================================================
FILE: physdreamer/fields/mul_se3_field.py
================================================
import torch
import torch.nn.functional as F
from jaxtyping import Float, Int, Shaped
from torch import Tensor, nn
from typing import Literal, Optional, Sequence, Tuple
from physdreamer.field_components.encoding import (
    TemporalKplanesEncoding,
    TriplanesEncoding,
)
from physdreamer.field_components.mlp import MLP
from physdreamer.operators.rotation import rotation_6d_to_matrix, quaternion_to_matrix
from physdreamer.data.scene_box import SceneBox


class MulTemporalKplanesSE3fields(nn.Module):
    """Multiple Temporal Kplanes SE(3) fields.

    Args:
        aabb: axis-aligned bounding box.
            aabb[0] is the minimum (x,y,z) point.
            aabb[1] is the maximum (x,y,z) point.
        resolutions: resolutions of the kplanes. in an order of [x, y, z ,t].

    """

    def __init__(
        self,
        aabb: Float[Tensor, "2 3"],
        resolutions_list: Sequence[int],
        feat_dim: int = 64,
        init_a: float = 0.1,
        init_b: float = 0.5,
        reduce: Literal["sum", "product", "cat"] = "sum",
        num_decoder_layers=2,
        decoder_hidden_size=64,
        rotation_type: Literal["quaternion", "6d"] = "6d",
        add_spatial_triplane: bool = True,
    ):
        super().__init__()

        self.register_buffer("aabb", aabb)
        output_dim_dict = {"quaternion": 4 + 3, "6d": 6 + 3}
        self.output_dim = output_dim_dict[rotation_type]
        self.rotation_type = rotation_type

        self.temporal_kplanes_encoding_list = nn.ModuleList(
            [
                TemporalKplanesEncoding(resolutions, feat_dim, init_a, init_b, reduce)
                for resolutions in resolutions_list
            ]
        )

        self.add_spatial_triplane = add_spatial_triplane
        if add_spatial_triplane:
            self.spatial_kplanes_encoding_list = nn.ModuleList(
                [
                    TriplanesEncoding(
                        resolutions[:-1], feat_dim, init_a, init_b, reduce
                    )
                    for resolutions in resolutions_list
                ]
            )
            feat_dim = feat_dim * 2

        self.decoder = MLP(
            feat_dim,
            num_decoder_layers,
            layer_width=decoder_hidden_size,
            out_dim=self.output_dim,
            skip_connections=None,
            activation=nn.ReLU(),
            out_activation=None,
        )

    def forward(
        self, inp: Float[Tensor, "*bs 4"], dataset_indx: Int[Tensor, "1"]
    ) -> Tuple[Float[Tensor, "*bs 3 3"], Float[Tensor, "*bs 3"]]:
        inpx, inpt = inp[:, :3], inp[:, 3:]

        # shift to [-1, 1]
        inpx = SceneBox.get_normalized_positions(inpx, self.aabb) * 2.0 - 1.0

        inpt = inpt * 2.0 - 1.0

        inp = torch.cat([inpx, inpt], dim=-1)

        # for loop in batch dimension

        output = self.temporal_kplanes_encoding_list[dataset_indx](inp)

        if self.add_spatial_triplane:
            spatial_output = self.spatial_kplanes_encoding_list[dataset_indx](inp)
            output = torch.cat([output, spatial_output], dim=-1)

        output = self.decoder(output)

        if self.rotation_type == "6d":
            rotation_6d, translation = output[:, :6], output[:, 6:]
            R_mat = rotation_6d_to_matrix(rotation_6d)

        elif self.rotation_type == "quaternion":
            quat, translation = output[:, :4], output[:, 4:]

            # tanh and normalize
            quat = torch.tanh(quat)

            R_mat = quaternion_to_matrix(quat)

        return R_mat, translation

    def compute_smoothess_loss(
        self,
    ):
        temporal_smoothness_loss = 0.0
        for temporal_kplanes_encoding in self.temporal_kplanes_encoding_list:
            temporal_smoothness_loss += (
                temporal_kplanes_encoding.compute_temporal_smoothness()
            )

        smothness_loss = 0.0
        for temporal_kplanes_encoding in self.temporal_kplanes_encoding_list:
            smothness_loss += temporal_kplanes_encoding.compute_plane_tv()

        if self.add_spatial_triplane:
            for spatial_kplanes_encoding in self.spatial_kplanes_encoding_list:
                smothness_loss += spatial_kplanes_encoding.compute_plane_tv()

        return smothness_loss, temporal_smoothness_loss

    def compute_loss(
        self,
        inp: Float[Tensor, "*bs 4"],
        trajectory: Float[Tensor, "*bs 3"],
        loss_func,
    ):
        inpx, inpt = inp[:, :3], inp[:, 3:]

        R, t = self(inp)

        rec_traj = torch.bmm(R, inpx.unsqueeze(-1)).squeeze(-1) + t

        rec_loss = loss_func(rec_traj, trajectory)

        return rec_loss


================================================
FILE: physdreamer/fields/offset_field.py
================================================
import torch
import torch.nn.functional as F
from jaxtyping import Float, Int, Shaped
from torch import Tensor, nn
from typing import Literal, Optional, Sequence, Tuple, List
from physdreamer.field_components.encoding import (
    TemporalKplanesEncoding,
    TriplanesEncoding,
)
from physdreamer.field_components.mlp import MLP
from physdreamer.operators.rotation import rotation_6d_to_matrix, quaternion_to_matrix
from physdreamer.data.scene_box import SceneBox


class TemporalKplanesOffsetfields(nn.Module):
    """Temporal Offsets fields.

    Args:
        aabb: axis-aligned bounding box.
            aabb[0] is the minimum (x,y,z) point.
            aabb[1] is the maximum (x,y,z) point.
        resolutions: resolutions of the kplanes. in an order of [x, y, z ,t].

    """

    def __init__(
        self,
        aabb: Float[Tensor, "2 3"],
        resolutions: Sequence[int],
        feat_dim: int = 64,
        init_a: float = 0.1,
        init_b: float = 0.5,
        reduce: Literal["sum", "product", "cat"] = "sum",
        num_decoder_layers=2,
        decoder_hidden_size=64,
        add_spatial_triplane: bool = True,
        zero_init: bool = True,
    ):
        super().__init__()

        self.register_buffer("aabb", aabb)
        self.output_dim = 3

        self.temporal_kplanes_encoding = TemporalKplanesEncoding(
            resolutions, feat_dim, init_a, init_b, reduce
        )

        self.add_spatial_triplane = add_spatial_triplane
        if add_spatial_triplane:
            self.spatial_kplanes_encoding = TriplanesEncoding(
                resolutions[:-1], feat_dim, init_a, init_b, reduce
            )
            feat_dim = feat_dim * 2

        self.decoder = MLP(
            feat_dim,
            num_decoder_layers,
            layer_width=decoder_hidden_size,
            out_dim=self.output_dim,
            skip_connections=None,
            activation=nn.ReLU(),
            out_activation=None,
            zero_init=zero_init,
        )

    def forward(
        self, inp: Float[Tensor, "*bs 4"]
    ) -> Tuple[Float[Tensor, "*bs 3 3"], Float[Tensor, "*bs 3"]]:
        inpx, inpt = inp[:, :3], inp[:, 3:]

        # shift to [-1, 1]
        inpx = SceneBox.get_normalized_positions(inpx, self.aabb) * 2.0 - 1.0

        inpt = inpt * 2.0 - 1.0

        inp = torch.cat([inpx, inpt], dim=-1)
        output = self.temporal_kplanes_encoding(inp)

        if self.add_spatial_triplane:
            spatial_output = self.spatial_kplanes_encoding(inpx)
            output = torch.cat([output, spatial_output], dim=-1)

        output = self.decoder(output)

        return output

    def compute_smoothess_loss(
        self,
    ):
        smothness_loss = self.temporal_kplanes_encoding.compute_plane_tv()
        temporal_smoothness_loss = (
            self.temporal_kplanes_encoding.compute_temporal_smoothness()
        )

        if self.add_spatial_triplane:
            smothness_loss += self.spatial_kplanes_encoding.compute_plane_tv()

        return smothness_loss + temporal_smoothness_loss

    def compute_loss(
        self,
        inp: Float[Tensor, "*bs 4"],
        trajectory: Float[Tensor, "*bs 3"],
        loss_func,
    ):
        inpx, inpt = inp[:, :3], inp[:, 3:]

        output = self(inp)

        rec_traj = inpx + output

        rec_loss = loss_func(rec_traj, trajectory)

        return rec_loss

    def arap_loss(self, inp):
        pass

    def forward_with_plane_coefs(
        self,
        plane_coefs: List[Float[Tensor, "feat_dim H W"]],
        inp: Float[Tensor, "*bs 4"],
    ):
        """
        Args:
            pass
        """

        inpx, inpt = inp[:, :3], inp[:, 3:]

        # shift to [-1, 1]
        inpx = SceneBox.get_normalized_positions(inpx, self.aabb) * 2.0 - 1.0

        inpt = inpt * 2.0 - 1.0

        inp = torch.cat([inpx, inpt], dim=-1)
        output = self.temporal_kplanes_encoding.functional_forward(
            plane_coefs, inp, reduce=self.temporal_kplanes_encoding.reduce
        )

        if self.add_spatial_triplane:
            spatial_output = self.spatial_kplanes_encoding(inpx)
            output = torch.cat([output, spatial_output], dim=-1)

        output = self.decoder(output)

        return output


================================================
FILE: physdreamer/fields/se3_field.py
================================================
import torch
import torch.nn.functional as F
from jaxtyping import Float, Int, Shaped
from torch import Tensor, nn
from typing import Literal, Optional, Sequence, Tuple
from physdreamer.field_components.encoding import (
    TemporalKplanesEncoding,
    TriplanesEncoding,
)
from physdreamer.field_components.mlp import MLP
from physdreamer.operators.rotation import rotation_6d_to_matrix, quaternion_to_matrix
from physdreamer.data.scene_box import SceneBox


class TemporalKplanesSE3fields(nn.Module):
    """Temporal Kplanes SE(3) fields.

    Args:
        aabb: axis-aligned bounding box.
            aabb[0] is the minimum (x,y,z) point.
            aabb[1] is the maximum (x,y,z) point.
        resolutions: resolutions of the kplanes. in an order of [x, y, z ,t].

    """

    def __init__(
        self,
        aabb: Float[Tensor, "2 3"],
        resolutions: Sequence[int],
        feat_dim: int = 64,
        init_a: float = 0.1,
        init_b: float = 0.5,
        reduce: Literal["sum", "product", "cat"] = "sum",
        num_decoder_layers=2,
        decoder_hidden_size=64,
        rotation_type: Literal["quaternion", "6d"] = "6d",
        add_spatial_triplane: bool = True,
        zero_init: bool = True,
    ):
        super().__init__()

        self.register_buffer("aabb", aabb)
        output_dim_dict = {"quaternion": 4 + 3, "6d": 6 + 3}
        self.output_dim = output_dim_dict[rotation_type]
        self.rotation_type = rotation_type

        self.temporal_kplanes_encoding = TemporalKplanesEncoding(
            resolutions, feat_dim, init_a, init_b, reduce
        )

        self.add_spatial_triplane = add_spatial_triplane
        if add_spatial_triplane:
            self.spatial_kplanes_encoding = TriplanesEncoding(
                resolutions[:-1], feat_dim, init_a, init_b, reduce
            )
            feat_dim = feat_dim * 2

        self.decoder = MLP(
            feat_dim,
            num_decoder_layers,
            layer_width=decoder_hidden_size,
            out_dim=self.output_dim,
            skip_connections=None,
            activation=nn.ReLU(),
            out_activation=None,
            zero_init=zero_init,
        )

    def forward(
        self,
        inp: Float[Tensor, "*bs 4"],
        compute_smoothess_loss: bool = False,
    ) -> Tuple[Float[Tensor, "*bs 3 3"], Float[Tensor, "*bs 3"]]:
        if compute_smoothess_loss:
            smothness_loss, temporal_smoothness_loss = self.compute_smoothess_loss()
            return smothness_loss + temporal_smoothness_loss
        inpx, inpt = inp[:, :3], inp[:, 3:]

        # shift to [-1, 1]
        inpx = SceneBox.get_normalized_positions(inpx, self.aabb) * 2.0 - 1.0

        inpt = inpt * 2.0 - 1.0

        inp = torch.cat([inpx, inpt], dim=-1)
        output = self.temporal_kplanes_encoding(inp)

        if self.add_spatial_triplane:
            spatial_output = self.spatial_kplanes_encoding(inpx)
            output = torch.cat([output, spatial_output], dim=-1)

        output = self.decoder(output)

        if self.rotation_type == "6d":
            rotation_6d, translation = output[:, :6], output[:, 6:]
            R_mat = rotation_6d_to_matrix(rotation_6d)

        elif self.rotation_type == "quaternion":
            quat, translation = output[:, :4], output[:, 4:]

            # tanh and normalize
            quat = torch.tanh(quat)

            R_mat = quaternion_to_matrix(quat)

            # --------------- remove below --------------- #
            # add normalization
            # r = quat
            # norm = torch.sqrt(
            #     r[:, 0] * r[:, 0]
            #     + r[:, 1] * r[:, 1]
            #     + r[:, 2] * r[:, 2]
            #     + r[:, 3] * r[:, 3]
            # )
            # q = r / norm[:, None]
            # R_mat = q
            # --------------- remove above --------------- #

        return R_mat, translation

    def compute_smoothess_loss(
        self,
    ):
        smothness_loss = self.temporal_kplanes_encoding.compute_plane_tv()
        temporal_smoothness_loss = (
            self.temporal_kplanes_encoding.compute_temporal_smoothness()
        )

        if self.add_spatial_triplane:
            smothness_loss += self.spatial_kplanes_encoding.compute_plane_tv()

        return smothness_loss, temporal_smoothness_loss

    def compute_loss(
        self,
        inp: Float[Tensor, "*bs 4"],
        trajectory: Float[Tensor, "*bs 3"],
        loss_func,
    ):
        inpx, inpt = inp[:, :3], inp[:, 3:]

        R, t = self(inp)

        rec_traj = torch.bmm(R, inpx.unsqueeze(-1)).squeeze(-1) + t

        rec_loss = loss_func(rec_traj, trajectory)

        return rec_loss


================================================
FILE: physdreamer/fields/triplane_field.py
================================================
import torch
import torch.nn.functional as F
from jaxtyping import Float, Int, Shaped
from torch import Tensor, nn
from typing import Optional, Sequence, Tuple, List
from physdreamer.field_components.encoding import TriplanesEncoding
from physdreamer.field_components.mlp import MLP
from physdreamer.data.scene_box import SceneBox


class TriplaneFields(nn.Module):
    """Temporal Kplanes SE(3) fields.

    Args:
        aabb: axis-aligned bounding box.
            aabb[0] is the minimum (x,y,z) point.
            aabb[1] is the maximum (x,y,z) point.
        resolutions: resolutions of the kplanes. in an order of [x, y, z]

    """

    def __init__(
        self,
        aabb: Float[Tensor, "2 3"],
        resolutions: Sequence[int],
        feat_dim: int = 64,
        init_a: float = 0.1,
        init_b: float = 0.5,
        reduce="sum",  #: Literal["sum", "product", "cat"] = "sum",
        num_decoder_layers=2,
        decoder_hidden_size=64,
        output_dim: int = 96,
        zero_init: bool = False,
    ):
        super().__init__()

        self.register_buffer("aabb", aabb)
        self.output_dim = output_dim

        self.kplanes_encoding = TriplanesEncoding(
            resolutions, feat_dim, init_a, init_b, reduce
        )

        if reduce == "cat":
            feat_dim = feat_dim * 3
        self.decoder = MLP(
            feat_dim,
            num_decoder_layers,
            layer_width=decoder_hidden_size,
            out_dim=self.output_dim,
            skip_connections=None,
            activation=nn.ReLU(),
            out_activation=None,
            zero_init=zero_init,
        )

    def forward(
        self, inp: Float[Tensor, "*bs 3"]
    ) -> Tuple[Float[Tensor, "*bs 3 3"], Float[Tensor, "*bs 3"]]:
        # shift to [-1, 1]
        inpx = SceneBox.get_normalized_positions(inp, self.aabb) * 2.0 - 1.0

        output = self.kplanes_encoding(inpx)

        output = self.decoder(output)

        # split_size = output.shape[-1] // 3
        # output = torch.stack(torch.split(output, split_size, dim=-1), dim=-1)

        return output

    def compute_smoothess_loss(
        self,
    ):
        smothness_loss = self.kplanes_encoding.compute_plane_tv()

        return smothness_loss


def compute_entropy(p):
    return -torch.sum(
        p * torch.log(p + 1e-5), dim=1
    ).mean()  # Adding a small constant to prevent log(0)


class TriplaneFieldsWithEntropy(nn.Module):
    """Temporal Kplanes SE(3) fields.

    Args:
        aabb: axis-aligned bounding box.
            aabb[0] is the minimum (x,y,z) point.
            aabb[1] is the maximum (x,y,z) point.
        resolutions: resolutions of the kplanes. in an order of [x, y, z]

    """

    def __init__(
        self,
        aabb: Float[Tensor, "2 3"],
        resolutions: Sequence[int],
        feat_dim: int = 64,
        init_a: float = 0.1,
        init_b: float = 0.5,
        reduce="sum",  #: Literal["sum", "product", "cat"] = "sum",
        num_decoder_layers=2,
        decoder_hidden_size=64,
        output_dim: int = 96,
        zero_init: bool = False,
        num_cls: int = 3,
    ):
        super().__init__()

        self.register_buffer("aabb", aabb)
        self.output_dim = output_dim
        self.num_cls = num_cls

        self.kplanes_encoding = TriplanesEncoding(
            resolutions, feat_dim, init_a, init_b, reduce
        )

        self.decoder = MLP(
            feat_dim,
            num_decoder_layers,
            layer_width=decoder_hidden_size,
            out_dim=self.num_cls,
            skip_connections=None,
            activation=nn.ReLU(),
            out_activation=None,
            zero_init=zero_init,
        )

        self.cls_embedding = torch.nn.Embedding(num_cls, output_dim)

    def forward(
        self, inp: Float[Tensor, "*bs 3"]
    ) -> Tuple[Float[Tensor, "*bs 3 3"], Float[Tensor, "1"]]:
        # shift to [-1, 1]
        inpx = SceneBox.get_normalized_positions(inp, self.aabb) * 2.0 - 1.0

        output = self.kplanes_encoding(inpx)

        output = self.decoder(output)

        prob = F.softmax(output, dim=-1)

        entropy = compute_entropy(prob)

        cls_index = torch.tensor([0, 1, 2]).to(inp.device)
        cls_emb = self.cls_embedding(cls_index)

        output = torch.matmul(prob, cls_emb)

        return output, entropy

    def compute_smoothess_loss(
        self,
    ):
        smothness_loss = self.kplanes_encoding.compute_plane_tv()

        return smothness_loss


================================================
FILE: physdreamer/gaussian_3d/README.md
================================================
This folder is mainly a copy paste from https://github.com/graphdeco-inria/gaussian-splatting

We add some function to render the applied external force. 

================================================
FILE: physdreamer/gaussian_3d/arguments/__init__.py
================================================
#
# Copyright (C) 2023, Inria
# GRAPHDECO research group, https://team.inria.fr/graphdeco
# All rights reserved.
#
# This software is free for non-commercial, research and evaluation use 
# under the terms of the LICENSE.md file.
#
# For inquiries contact  george.drettakis@inria.fr
#

from argparse import ArgumentParser, Namespace
import sys
import os

class GroupParams:
    pass

class ParamGroup:
    def __init__(self, parser: ArgumentParser, name : str, fill_none = False):
        group = parser.add_argument_group(name)
        for key, value in vars(self).items():
            shorthand = False
            if key.startswith("_"):
                shorthand = True
                key = key[1:]
            t = type(value)
            value = value if not fill_none else None 
            if shorthand:
                if t == bool:
                    group.add_argument("--" + key, ("-" + key[0:1]), default=value, action="store_true")
                else:
                    group.add_argument("--" + key, ("-" + key[0:1]), default=value, type=t)
            else:
                if t == bool:
                    group.add_argument("--" + key, default=value, action="store_true")
                else:
                    group.add_argument("--" + key, default=value, type=t)

    def extract(self, args):
        group = GroupParams()
        for arg in vars(args).items():
            if arg[0] in vars(self) or ("_" + arg[0]) in vars(self):
                setattr(group, arg[0], arg[1])
        return group

class ModelParams(ParamGroup): 
    def __init__(self, parser, sentinel=False):
        self.sh_degree = 3
        self._source_path = ""
        self._model_path = ""
        self._images = "images"
        self._resolution = -1
        self._white_background = False
        self.data_device = "cuda"
        self.eval = False
        super().__init__(parser, "Loading Parameters", sentinel)

    def extract(self, args):
        g = super().extract(args)
        g.source_path = os.path.abspath(g.source_path)
        return g

class PipelineParams(ParamGroup):
    def __init__(self, parser):
        self.convert_SHs_python = False
        self.compute_cov3D_python = False
        self.debug = False
        super().__init__(parser, "Pipeline Parameters")

class OptimizationParams(ParamGroup):
    def __init__(self, parser):
        self.iterations = 30_000
        self.position_lr_init = 0.00016
        self.position_lr_final = 0.0000016
        self.position_lr_delay_mult = 0.01
        self.position_lr_max_steps = 30_000
        self.feature_lr = 0.0025
        self.opacity_lr = 0.05
        self.scaling_lr = 0.005
        self.rotation_lr = 0.001
        self.percent_dense = 0.01
        self.lambda_dssim = 0.2
        self.densification_interval = 100
        self.opacity_reset_interval = 3000
        self.densify_from_iter = 500
        self.densify_until_iter = 15_000
        self.densify_grad_threshold = 0.0002
        super().__init__(parser, "Optimization Parameters")

def get_combined_args(parser : ArgumentParser):
    cmdlne_string = sys.argv[1:]
    cfgfile_string = "Namespace()"
    args_cmdline = parser.parse_args(cmdlne_string)

    try:
        cfgfilepath = os.path.join(args_cmdline.model_path, "cfg_args")
        print("Looking for config file in", cfgfilepath)
        with open(cfgfilepath) as cfg_file:
            print("Config file found: {}".format(cfgfilepath))
            cfgfile_string = cfg_file.read()
    except TypeError:
        print("Config file not found at")
        pass
    args_cfgfile = eval(cfgfile_string)

    merged_dict = vars(args_cfgfile).copy()
    for k,v in vars(args_cmdline).items():
        if v != None:
            merged_dict[k] = v
    return Namespace(**merged_dict)


================================================
FILE: physdreamer/gaussian_3d/gaussian_renderer/__init__.py
================================================


================================================
FILE: physdreamer/gaussian_3d/gaussian_renderer/depth_uv_render.py
================================================
import torch
from physdreamer.gaussian_3d.scene.gaussian_model import GaussianModel
import math

from diff_gaussian_rasterization import (
    GaussianRasterizationSettings,
    GaussianRasterizer,
)
from typing import Callable


def render_uv_depth_w_gaussian(
    viewpoint_camera,
    pc: GaussianModel,
    pipe,
    bg_color: torch.Tensor,
    scaling_modifier=1.0,
):
    """
    Render the scene.

    Background tensor (bg_color) must be on GPU!

    Args:
        point_disp: [N, 3]
    """

    # Create zero tensor. We will use it to make pytorch return gradients of the 2D (screen-space) means
    screenspace_points = (
        torch.zeros_like(
            pc.get_xyz, dtype=pc.get_xyz.dtype, requires_grad=True, device="cuda"
        )
        + 0
    )
    try:
        screenspace_points.retain_grad()
    except:
        pass

    # Set up rasterization configuration
    tanfovx = math.tan(viewpoint_camera.FoVx * 0.5)
    tanfovy = math.tan(viewpoint_camera.FoVy * 0.5)

    raster_settings = GaussianRasterizationSettings(
        image_height=int(viewpoint_camera.image_height),
        image_width=int(viewpoint_camera.image_width),
        tanfovx=tanfovx,
        tanfovy=tanfovy,
        bg=bg_color,
        scale_modifier=scaling_modifier,
        viewmatrix=viewpoint_camera.world_view_transform,
        projmatrix=viewpoint_camera.full_proj_transform,
        sh_degree=pc.active_sh_degree,
        campos=viewpoint_camera.camera_center,
        prefiltered=False,
        debug=pipe.debug,
    )

    rasterizer = GaussianRasterizer(raster_settings=raster_settings)

    means3D = pc.get_xyz
    means2D = screenspace_points
    opacity = pc.get_opacity

    # If precomputed 3d covariance is provided, use it. If not, then it will be computed from
    # scaling / rotation by the rasterizer.
    scales = None
    rotations = None
    cov3D_precomp = None
    if pipe.compute_cov3D_python:
        cov3D_precomp = pc.get_covariance(scaling_modifier)
    else:
        scales = pc.get_scaling
        rotations = pc.get_rotation

    # If precomputed colors are provided, use them. Otherwise, if it is desired to precompute colors
    # from SHs in Python, do it. If not, then SH -> RGB conversion will be done by rasterizer.

    shs = None
    colors_precomp = None

    # project point motion to 2D using camera:
    w2c = viewpoint_camera.world_view_transform.transpose(0, 1)
    cam_plane_2_img = viewpoint_camera.cam_plane_2_img  # [2, 2]

    R = w2c[:3, :3].unsqueeze(0)  # [1, 3, 3]
    t = w2c[:3, 3].unsqueeze(0)  # [1, 3]

    # [N, 3, 1]
    pts = torch.cat([pc._xyz, torch.ones_like(pc._xyz[:, 0:1])], dim=-1)
    pts_cam = w2c.unsqueeze(0) @ pts.unsqueeze(-1)  # [N, 4, 1]
    # pts_cam = R @ (pc._xyz.unsqueeze(-1)) + t[:, None]
    depth = pts_cam[:, 2, 0]  # [N]
    # print("depth", depth.shape, depth.max(), depth.mean(), depth.min())

    # [N, 2]
    pts_cam_xy = pts_cam[:, :2, 0] / depth.unsqueeze(-1)

    pts_cam_xy_pixel = cam_plane_2_img.unsqueeze(0) @ pts_cam_xy.unsqueeze(
        -1
    )  # [N, 2, 1]
    pts_cam_xy_pixel = pts_cam_xy_pixel.squeeze(-1)  # [N, 2]

    colors_precomp = torch.cat(
        [pts_cam_xy_pixel, depth.unsqueeze(dim=-1)], dim=-1
    )  # [N, 3]

    # print("converted 2D motion precompute: ", colors_precomp.shape, shs, colors_precomp.max(), colors_precomp.min(), colors_precomp.mean())
    # Rasterize visible Gaussians to image, obtain their radii (on screen).
    rendered_image, radii = rasterizer(
        means3D=means3D,
        means2D=means2D,
        shs=shs,
        colors_precomp=colors_precomp,
        opacities=opacity,
        scales=scales,
        rotations=rotations,
        cov3D_precomp=cov3D_precomp,
    )

    # Those Gaussians that were frustum culled or had a radius of 0 were not visible.
    # They will be excluded from value updates used in the splitting criteria.

    return {
        "render": rendered_image,
        "visibility_filter": radii > 0,
        "radii": radii,
        "pts_depth": depth,
        "pts_cam_xy_pixel": pts_cam_xy_pixel,
    }


================================================
FILE: physdreamer/gaussian_3d/gaussian_renderer/feat_render.py
================================================
import torch
from physdreamer.gaussian_3d.scene.gaussian_model import GaussianModel
import math

from diff_gaussian_rasterization import (
    GaussianRasterizationSettings,
    GaussianRasterizer,
)
from typing import Callable


def render_feat_gaussian(
    viewpoint_camera,
    pc: GaussianModel,
    pipe,
    bg_color: torch.Tensor,
    points_feat: torch.Tensor,
    scaling_modifier=1.0,
):
    """
    Render the scene.

    Background tensor (bg_color) must be on GPU!

    Args:
        point_disp: [N, 3]
    """

    # Create zero tensor. We will use it to make pytorch return gradients of the 2D (screen-space) means
    screenspace_points = (
        torch.zeros_like(
            pc.get_xyz, dtype=pc.get_xyz.dtype, requires_grad=True, device="cuda"
        )
        + 0
    )
    try:
        screenspace_points.retain_grad()
    except:
        pass

    # Set up rasterization configuration
    tanfovx = math.tan(viewpoint_camera.FoVx * 0.5)
    tanfovy = math.tan(viewpoint_camera.FoVy * 0.5)

    raster_settings = GaussianRasterizationSettings(
        image_height=int(viewpoint_camera.image_height),
        image_width=int(viewpoint_camera.image_width),
        tanfovx=tanfovx,
        tanfovy=tanfovy,
        bg=bg_color,
        scale_modifier=scaling_modifier,
        viewmatrix=viewpoint_camera.world_view_transform,
        projmatrix=viewpoint_camera.full_proj_transform,
        sh_degree=pc.active_sh_degree,
        campos=viewpoint_camera.camera_center,
        prefiltered=False,
        debug=pipe.debug,
    )

    rasterizer = GaussianRasterizer(raster_settings=raster_settings)

    means3D = pc.get_xyz
    means2D = screenspace_points
    opacity = pc.get_opacity

    # If precomputed 3d covariance is provided, use it. If not, then it will be computed from
    # scaling / rotation by the rasterizer.
    scales = None
    rotations = None
    cov3D_precomp = None
    if pipe.compute_cov3D_python:
        cov3D_precomp = pc.get_covariance(scaling_modifier)
    else:
        scales = pc.get_scaling
        rotations = pc.get_rotation

    # If precomputed colors are provided, use them. Otherwise, if it is desired to precompute colors
    # from SHs in Python, do it. If not, then SH -> RGB conversion will be done by rasterizer.

    shs = None
    colors_precomp = points_feat
    assert (points_feat.shape[1] == 3) and (points_feat.shape[0] == means3D.shape[0])

    # print("converted 2D motion precompute: ", colors_precomp.shape, shs, colors_precomp.max(), colors_precomp.min(), colors_precomp.mean())
    # Rasterize visible Gaussians to image, obtain their radii (on screen).
    rendered_image, radii = rasterizer(
        means3D=means3D,
        means2D=means2D,
        shs=shs,
        colors_precomp=colors_precomp,
        opacities=opacity,
        scales=scales,
        rotations=rotations,
        cov3D_precomp=cov3D_precomp,
    )

    # Those Gaussians that were frustum culled or had a radius of 0 were not visible.
    # They will be excluded from value updates used in the splitting criteria.

    return {
        "render": rendered_image,
        "visibility_filter": radii > 0,
        "radii": radii,
    }


================================================
FILE: physdreamer/gaussian_3d/gaussian_renderer/flow_depth_render.py
================================================
import torch
from physdreamer.gaussian_3d.scene.gaussian_model import GaussianModel
import math

from diff_gaussian_rasterization import (
    GaussianRasterizationSettings,
    GaussianRasterizer,
)
from typing import Callable


def render_flow_depth_w_gaussian(
    viewpoint_camera,
    pc: GaussianModel,
    pipe,
    point_disp: torch.Tensor,
    bg_color: torch.Tensor,
    scaling_modifier=1.0,
):
    """
    Render the scene.

    Background tensor (bg_color) must be on GPU!

    Args:
        point_disp: [N, 3]
    """

    # Create zero tensor. We will use it to make pytorch return gradients of the 2D (screen-space) means
    screenspace_points = (
        torch.zeros_like(
            pc.get_xyz, dtype=pc.get_xyz.dtype, requires_grad=True, device="cuda"
        )
        + 0
    )
    try:
        screenspace_points.retain_grad()
    except:
        pass

    # Set up rasterization configuration
    tanfovx = math.tan(viewpoint_camera.FoVx * 0.5)
    tanfovy = math.tan(viewpoint_camera.FoVy * 0.5)

    raster_settings = GaussianRasterizationSettings(
        image_height=int(viewpoint_camera.image_height),
        image_width=int(viewpoint_camera.image_width),
        tanfovx=tanfovx,
        tanfovy=tanfovy,
        bg=bg_color,
        scale_modifier=scaling_modifier,
        viewmatrix=viewpoint_camera.world_view_transform,
        projmatrix=viewpoint_camera.full_proj_transform,
        sh_degree=pc.active_sh_degree,
        campos=viewpoint_camera.camera_center,
        prefiltered=False,
        debug=pipe.debug,
    )

    rasterizer = GaussianRasterizer(raster_settings=raster_settings)

    means3D = pc.get_xyz
    means2D = screenspace_points
    opacity = pc.get_opacity

    # If precomputed 3d covariance is provided, use it. If not, then it will be computed from
    # scaling / rotation by the rasterizer.
    scales = None
    rotations = None
    cov3D_precomp = None
    if pipe.compute_cov3D_python:
        cov3D_precomp = pc.get_covariance(scaling_modifier)
    else:
        scales = pc.get_scaling
        rotations = pc.get_rotation

    # If precomputed colors are provided, use them. Otherwise, if it is desired to precompute colors
    # from SHs in Python, do it. If not, then SH -> RGB conversion will be done by rasterizer.
    shs = None
    colors_precomp = None

    # project point motion to 2D using camera:
    w2c = viewpoint_camera.world_view_transform.transpose(0, 1)
    cam_plane_2_img = viewpoint_camera.cam_plane_2_img  # [2, 2]

    R = w2c[:3, :3].unsqueeze(0)  # [1, 3, 3]
    t = w2c[:3, 3].unsqueeze(0)  # [1, 3]

    # [N, 3, 1]
    pts = torch.cat([pc._xyz, torch.ones_like(pc._xyz[:, 0:1])], dim=-1)
    pts_cam = w2c.unsqueeze(0) @ pts.unsqueeze(-1)  # [N, 4, 1]
    # pts_cam = R @ (pc._xyz.unsqueeze(-1)) + t[:, None]
    depth = pts_cam[:, 2, 0]  # [N]
    # print("depth", depth.shape, depth.max(), depth.mean(), depth.min())

    point_disp_pad = torch.cat(
        [point_disp, torch.zeros_like(point_disp[:, 0:1])], dim=-1
    )  # [N, 4]

    pts_motion = w2c.unsqueeze(0) @ point_disp_pad.unsqueeze(-1)  # [N, 4, 1]

    # [N, 2]
    pts_motion_xy = pts_motion[:, :2, 0] / depth.unsqueeze(-1)

    pts_motion_xy_pixel = cam_plane_2_img.unsqueeze(0) @ pts_motion_xy.unsqueeze(
        -1
    )  # [N, 2, 1]
    pts_motion_xy_pixel = pts_motion_xy_pixel.squeeze(-1)  # [N, 2]

    colors_precomp = torch.cat(
        [pts_motion_xy_pixel, depth.unsqueeze(dim=-1)], dim=-1
    )  # [N, 3]

    # print("converted 2D motion precompute: ", colors_precomp.shape, shs, colors_precomp.max(), colors_precomp.min(), colors_precomp.mean())
    # Rasterize visible Gaussians to image, obtain their radii (on screen).
    rendered_image, radii = rasterizer(
        means3D=means3D,
        means2D=means2D,
        shs=shs,
        colors_precomp=colors_precomp,
        opacities=opacity,
        scales=scales,
        rotations=rotations,
        cov3D_precomp=cov3D_precomp,
    )

    # Those Gaussians that were frustum culled or had a radius of 0 were not visible.
    # They will be excluded from value updates used in the splitting criteria.

    # return {
    #     "render": rendered_image,
    #     "viewspace_points": screenspace_points,
    #     "visibility_filter": radii > 0,
    #     "radii": radii,
    # }

    return {"render": rendered_image}


================================================
FILE: physdreamer/gaussian_3d/gaussian_renderer/render.py
================================================
#
# Copyright (C) 2023, Inria
# GRAPHDECO research group, https://team.inria.fr/graphdeco
# All rights reserved.
#
# This software is free for non-commercial, research and evaluation use
# under the terms of the LICENSE.md file.
#
# For inquiries contact  george.drettakis@inria.fr
#

import torch
import math
from diff_gaussian_rasterization import (
    GaussianRasterizationSettings,
    GaussianRasterizer,
)
from physdreamer.gaussian_3d.scene.gaussian_model import GaussianModel


def render_gaussian(
    viewpoint_camera,
    pc: GaussianModel,
    pipe,
    bg_color: torch.Tensor,
    scaling_modifier=1.0,
    override_color=None,
    cov3D_precomp=None,
):
    """
    Render the scene.

    Background tensor (bg_color) must be on GPU!
    """

    # Create zero tensor. We will use it to make pytorch return gradients of the 2D (screen-space) means
    screenspace_points = (
        torch.zeros_like(
            pc.get_xyz, dtype=pc.get_xyz.dtype, requires_grad=True, device="cuda"
        )
        + 0
    )
    try:
        screenspace_points.retain_grad()
    except:
        pass

    # Set up rasterization configuration
    tanfovx = math.tan(viewpoint_camera.FoVx * 0.5)
    tanfovy = math.tan(viewpoint_camera.FoVy * 0.5)

    raster_settings = GaussianRasterizationSettings(
        image_height=int(viewpoint_camera.image_height),
        image_width=int(viewpoint_camera.image_width),
        tanfovx=tanfovx,
        tanfovy=tanfovy,
        bg=bg_color,
        scale_modifier=scaling_modifier,
        viewmatrix=viewpoint_camera.world_view_transform,
        projmatrix=viewpoint_camera.full_proj_transform,
        sh_degree=pc.active_sh_degree,
        campos=viewpoint_camera.camera_center,
        prefiltered=False,
        debug=pipe.debug,
    )

    rasterizer = GaussianRasterizer(raster_settings=raster_settings)

    means3D = pc.get_xyz
    means2D = screenspace_points
    opacity = pc.get_opacity

    # If precomputed 3d covariance is provided, use it. If not, then it will be computed from
    # scaling / rotation by the rasterizer.
    scales = None
    rotations = None

    if pipe.compute_cov3D_python or cov3D_precomp is None:
        cov3D_precomp = pc.get_covariance(scaling_modifier)
    elif cov3D_precomp is None:
        scales = pc.get_scaling
        rotations = pc.get_rotation

    # If precomputed colors are provided, use them. Otherwise, if it is desired to precompute colors
    # from SHs in Python, do it. If not, then SH -> RGB conversion will be done by rasterizer.
    shs = None
    colors_precomp = None
    if override_color is None:
        if pipe.convert_SHs_python:
            shs_view = pc.get_features.transpose(1, 2).view(
                -1, 3, (pc.max_sh_degree + 1) ** 2
            )
            dir_pp = pc.get_xyz - viewpoint_camera.camera_center.repeat(
                pc.get_features.shape[0], 1
            )
            dir_pp_normalized = dir_pp / dir_pp.norm(dim=1, keepdim=True)
            sh2rgb = eval_sh(pc.active_sh_degree, shs_view, dir_pp_normalized)
            colors_precomp = torch.clamp_min(sh2rgb + 0.5, 0.0)
        else:
            shs = pc.get_features
    else:
        colors_precomp = override_color

    # Rasterize visible Gaussians to image, obtain their radii (on screen).
    rendered_image, radii = rasterizer(
        means3D=means3D,
        means2D=means2D,
        shs=shs,
        colors_precomp=colors_precomp,
        opacities=opacity,
        scales=scales,
        rotations=rotations,
        cov3D_precomp=cov3D_precomp,
    )

    # Those Gaussians that were frustum culled or had a radius of 0 were not visible.
    # They will be excluded from value updates used in the splitting criteria.
    return {
        "render": rendered_image,
        "viewspace_points": screenspace_points,
        "visibility_filter": radii > 0,
        "radii": radii,
    }
    # return {"render": rendered_image}


def gaussian_intrin_scale(x_or_y: torch.Tensor, w_or_h: float):

    ret = ((x_or_y + 1.0) * w_or_h - 1.0) * 0.5

    return ret


def render_arrow_in_screen(viewpoint_camera, points_3d):

    # project point motion to 2D using camera:
    w2c = viewpoint_camera.world_view_transform.transpose(0, 1)
    cam_plane_2_img = viewpoint_camera.cam_plane_2_img  # [2, 2]
    cam_plane_2_img = viewpoint_camera.projection_matrix.transpose(0, 1)  # [4, 4]

    full_proj_mat = viewpoint_camera.full_proj_transform

    # [N, 4]
    pts = torch.cat([points_3d, torch.ones_like(points_3d[:, 0:1])], dim=-1)
    # [N, 1, 4] <-  [N, 1, 4] @ [1, 4, 4]
    pts_cam = pts.unsqueeze(-2) @ full_proj_mat.unsqueeze(0)  # [N, 1, 4]

    # start here

    # pts: [N, 4]
    # [1, 4, 4] @ [N, 4, 1] -> [N, 4, 1]
    # from IPython import embed

    # embed()
    # pts_cam = torch.bmm(
    #     full_proj_mat.T.unsqueeze(0), pts.unsqueeze(-1)
    # )  # K*[R,T]*[x,y,z,1]^T to get 2D projection of Gaussians
    # end here
    pts_cam = full_proj_mat.T.unsqueeze(0) @ pts.unsqueeze(-1)

    # print(pts_cam.shape)

    pts_cam = pts_cam.squeeze(-1)  # [N, 4]
    pts_cam = pts_cam[:, :3] / pts_cam[:, 3:]  # [N, 1, 3]

    # print(pts_cam, "after proj")

    pts_cam_yx_pixel = pts_cam[:, :2]
    #  [N, 2] yx => xy
    # pts_cam_xy_pixel = torch.cat(
    #     [pts_cam_xy_pixel[:, [1]], pts_cam_xy_pixel[:, [0]]], dim=-1
    # )

    pts_cam_x, pts_cam_y = pts_cam_yx_pixel[:, 0], pts_cam_yx_pixel[:, 1]

    w, h = viewpoint_camera.image_width, viewpoint_camera.image_height

    pts_cam_x = gaussian_intrin_scale(pts_cam_x, w)
    pts_cam_y = gaussian_intrin_scale(pts_cam_y, h)

    ret_pts_cam_xy = torch.cat(
        [pts_cam_x.unsqueeze(-1), pts_cam_y.unsqueeze(-1)], dim=-1
    )

    # print(ret_pts_cam_xy)

    return ret_pts_cam_xy


def render_arrow_in_screen_back(viewpoint_camera, points_3d):

    # project point motion to 2D using camera:
    w2c = viewpoint_camera.world_view_transform.transpose(0, 1)
    cam_plane_2_img = viewpoint_camera.cam_plane_2_img  # [2, 2]
    cam_plane_2_img = viewpoint_camera.projection_matrix.transpose(0, 1)

    from IPython import embed

    embed()

    R = w2c[:3, :3].unsqueeze(0)  # [1, 3, 3]
    t = w2c[:3, 3].unsqueeze(0)  # [1, 3]

    # [N, 3, 1]
    pts = torch.cat([points_3d, torch.ones_like(points_3d[:, 0:1])], dim=-1)
    pts_cam = w2c.unsqueeze(0) @ pts.unsqueeze(-1)  # [N, 4, 1]
    # pts_cam = R @ (pc._xyz.unsqueeze(-1)) + t[:, None]
    depth = pts_cam[:, 2, 0]  # [N]
    # print("depth", depth.shape, depth.max(), depth.mean(), depth.min())

    # [N, 2]
    pts_cam_xy = pts_cam[:, :2, 0] / depth.unsqueeze(-1)

    pts_cam_xy_pixel = cam_plane_2_img.unsqueeze(0) @ pts_cam_xy.unsqueeze(
        -1
    )  # [N, 2, 1]
    pts_cam_xy_pixel = pts_cam_xy_pixel.squeeze(-1)  # [N, 2]

    #  [N, 2] yx => xy
    pts_cam_xy_pixel = torch.cat(
        [pts_cam_xy_pixel[:, [1]], pts_cam_xy_pixel[:, [0]]], dim=-1
    )

    return pts_cam_xy_pixel


# for spherecal harmonics


C0 = 0.28209479177387814
C1 = 0.4886025119029199
C2 = [
    1.0925484305920792,
    -1.0925484305920792,
    0.31539156525252005,
    -1.0925484305920792,
    0.5462742152960396,
]
C3 = [
    -0.5900435899266435,
    2.890611442640554,
    -0.4570457994644658,
    0.3731763325901154,
    -0.4570457994644658,
    1.445305721320277,
    -0.5900435899266435,
]
C4 = [
    2.5033429417967046,
    -1.7701307697799304,
    0.9461746957575601,
    -0.6690465435572892,
    0.10578554691520431,
    -0.6690465435572892,
    0.47308734787878004,
    -1.7701307697799304,
    0.6258357354491761,
]


def eval_sh(deg, sh, dirs):
    """
    Evaluate spherical harmonics at unit directions
    using hardcoded SH polynomials.
    Works with torch/np/jnp.
    ... Can be 0 or more batch dimensions.
    Args:
        deg: int SH deg. Currently, 0-3 supported
        sh: jnp.ndarray SH coeffs [..., C, (deg + 1) ** 2]
        dirs: jnp.ndarray unit directions [..., 3]
    Returns:
        [..., C]
    """
    assert deg <= 4 and deg >= 0
    coeff = (deg + 1) ** 2
    assert sh.shape[-1] >= coeff

    result = C0 * sh[..., 0]
    if deg > 0:
        x, y, z = dirs[..., 0:1], dirs[..., 1:2], dirs[..., 2:3]
        result = (
            result - C1 * y * sh[..., 1] + C1 * z * sh[..., 2] - C1 * x * sh[..., 3]
        )

        if deg > 1:
            xx, yy, zz = x * x, y * y, z * z
            xy, yz, xz = x * y, y * z, x * z
            result = (
                result
                + C2[0] * xy * sh[..., 4]
                + C2[1] * yz * sh[..., 5]
                + C2[2] * (2.0 * zz - xx - yy) * sh[..., 6]
                + C2[3] * xz * sh[..., 7]
                + C2[4] * (xx - yy) * sh[..., 8]
            )

            if deg > 2:
                result = (
                    result
                    + C3[0] * y * (3 * xx - yy) * sh[..., 9]
                    + C3[1] * xy * z * sh[..., 10]
                    + C3[2] * y * (4 * zz - xx - yy) * sh[..., 11]
                    + C3[3] * z * (2 * zz - 3 * xx - 3 * yy) * sh[..., 12]
                    + C3[4] * x * (4 * zz - xx - yy) * sh[..., 13]
                    + C3[5] * z * (xx - yy) * sh[..., 14]
                    + C3[6] * x * (xx - 3 * yy) * sh[..., 15]
                )

                if deg > 3:
                    result = (
                        result
                        + C4[0] * xy * (xx - yy) * sh[..., 16]
                        + C4[1] * yz * (3 * xx - yy) * sh[..., 17]
                        + C4[2] * xy * (7 * zz - 1) * sh[..., 18]
                        + C4[3] * yz * (7 * zz - 3) * sh[..., 19]
                        + C4[4] * (zz * (35 * zz - 30) + 3) * sh[..., 20]
                        + C4[5] * xz * (7 * zz - 3) * sh[..., 21]
                        + C4[6] * (xx - yy) * (7 * zz - 1) * sh[..., 22]
                        + C4[7] * xz * (xx - 3 * yy) * sh[..., 23]
                        + C4[8]
                        * (xx * (xx - 3 * yy) - yy * (3 * xx - yy))
                        * sh[..., 24]
                    )
    return result


def RGB2SH(rgb):
    return (rgb - 0.5) / C0


def SH2RGB(sh):
    return sh * C0 + 0.5


================================================
FILE: physdreamer/gaussian_3d/scene/__init__.py
================================================
#
# Copyright (C) 2023, Inria
# GRAPHDECO research group, https://team.inria.fr/graphdeco
# All rights reserved.
#
# This software is free for non-commercial, research and evaluation use
# under the terms of the LICENSE.md file.
#
# For inquiries contact  george.drettakis@inria.fr
#

import os
import random
import numpy as np
import json
from physdreamer.gaussian_3d.utils.system_utils import searchForMaxIteration
from physdreamer.gaussian_3d.scene.dataset_readers import sceneLoadTypeCallbacks
from physdreamer.gaussian_3d.scene.gaussian_model import GaussianModel
from physdreamer.gaussian_3d.arguments import ModelParams
from physdreamer.gaussian_3d.utils.camera_utils import (
    cameraList_from_camInfos,
    camera_to_JSON,
)


class Scene:
    gaussians: GaussianModel

    def __init__(
        self,
        args: ModelParams,
        gaussians: GaussianModel,
        load_iteration=None,
        shuffle=True,
        resolution_scales=[1.0],
    ):
        """b
        :param path: Path to colmap scene main folder.
        """
        self.model_path = args.model_path
        self.loaded_iter = None
        self.gaussians = gaussians

        if load_iteration:
            if load_iteration == -1:
                self.loaded_iter = searchForMaxIteration(
                    os.path.join(self.model_path, "point_cloud")
                )
            else:
                self.loaded_iter = load_iteration
            print("Loading trained model at iteration {}".format(self.loaded_iter))

        self.train_cameras = {}
        self.test_cameras = {}

        if os.path.exists(os.path.join(args.source_path, "sparse")):
            scene_info = sceneLoadTypeCallbacks["Colmap"](
                args.source_path, args.images, args.eval
            )
        elif os.path.exists(os.path.join(args.source_path, "transforms_train.json")):
            print("Found transforms_train.json file, assuming Blender data set!")
            scene_info = sceneLoadTypeCallbacks["Blender"](
                args.source_path, args.white_background, args.eval
            )
        else:
            assert False, "Could not recognize scene type!"

        if not self.loaded_iter:
            with open(scene_info.ply_path, "rb") as src_file, open(
                os.path.join(self.model_path, "input.ply"), "wb"
            ) as dest_file:
                dest_file.write(src_file.read())
            json_cams = []
            camlist = []
            if scene_info.test_cameras:
                camlist.extend(scene_info.test_cameras)
            if scene_info.train_cameras:
                camlist.extend(scene_info.train_cameras)
            for id, cam in enumerate(camlist):
                json_cams.append(camera_to_JSON(id, cam))
            with open(os.path.join(self.model_path, "cameras.json"), "w") as file:
                json.dump(json_cams, file)

        if shuffle:
            random.shuffle(
                scene_info.train_cameras
            )  # Multi-res consistent random shuffling
            random.shuffle(
                scene_info.test_cameras
            )  # Multi-res consistent random shuffling

        self.cameras_extent = scene_info.nerf_normalization["radius"]

        for resolution_scale in resolution_scales:
            print("Loading Training Cameras")
            self.train_cameras[resolution_scale] = cameraList_from_camInfos(
                scene_info.train_cameras, resolution_scale, args
            )
            print("Loading Test Cameras")
            self.test_cameras[resolution_scale] = cameraList_from_camInfos(
                scene_info.test_cameras, resolution_scale, args
            )

        if self.loaded_iter:
            self.gaussians.load_ply(
                os.path.join(
                    self.model_path,
                    "point_cloud",
                    "iteration_" + str(self.loaded_iter),
                    "point_cloud.ply",
                )
            )
        else:
            self.gaussians.create_from_pcd(scene_info.point_cloud, self.cameras_extent)

    def save(self, iteration):
        point_cloud_path = os.path.join(
            self.model_path, "point_cloud/iteration_{}".format(iteration)
        )
        self.gaussians.save_ply(os.path.join(point_cloud_path, "point_cloud.ply"))

    def getTrainCameras(self, scale=1.0):
        return self.train_cameras[scale]

    def getTestCameras(self, scale=1.0):
        return self.test_cameras[scale]


================================================
FILE: physdreamer/gaussian_3d/scene/cameras.py
================================================
#
# Copyright (C) 2023, Inria
# GRAPHDECO research group, https://team.inria.fr/graphdeco
# All rights reserved.
#
# This software is free for non-commercial, research and evaluation use
# under the terms of the LICENSE.md file.
#
# For inquiries contact  george.drettakis@inria.fr
#

import torch
from torch import nn
import numpy as np
from physdreamer.gaussian_3d.utils.graphics_utils import (
    getWorld2View2,
    getProjectionMatrix,
)


class Camera(nn.Module):
    def __init__(
        self,
        colmap_id,
        R,
        T,
        FoVx,
        FoVy,
        image,
        gt_alpha_mask,
        image_name,
        uid,
        trans=np.array([0.0, 0.0, 0.0]),
        scale=1.0,
        data_device="cuda",
    ):
        super(Camera, self).__init__()

        self.uid = uid
        self.colmap_id = colmap_id
        self.R = R
        self.T = T
        self.FoVx = FoVx
        self.FoVy = FoVy
        self.image_name = image_name

        try:
            self.data_device = torch.device(data_device)
        except Exception as e:
            print(e)
            print(
                f"[Warning] Custom device {data_device} failed, fallback to default cuda device"
            )
            self.data_device = torch.device("cuda")

        self.original_image = image.clamp(0.0, 1.0).to(self.data_device)
        self.image_width = self.original_image.shape[2]
        self.image_height = self.original_image.shape[1]

        if gt_alpha_mask is not None:
            self.original_image *= gt_alpha_mask.to(self.data_device)
        else:
            self.original_image *= torch.ones(
                (1, self.image_height, self.image_width), device=self.data_device
            )

        self.zfar = 100.0
        self.znear = 0.01

        self.trans = trans
        self.scale = scale

        self.world_view_transform = (
            torch.tensor(getWorld2View2(R, T, trans, scale)).transpose(0, 1).cuda()
        )
        self.projection_matrix = (
            getProjectionMatrix(
                znear=self.znear, zfar=self.zfar, fovX=self.FoVx, fovY=self.FoVy
            )
            .transpose(0, 1)
            .cuda()
        )
        self.full_proj_transform = (
            self.world_view_transform.unsqueeze(0).bmm(
                self.projection_matrix.unsqueeze(0)
            )
        ).squeeze(0)
        self.camera_center = self.world_view_transform.inverse()[3, :3]


class MiniCam:
    def __init__(
        self,
        width,
        height,
        fovy,
        fovx,
        znear,
        zfar,
        world_view_transform,
        full_proj_transform,
    ):
        self.image_width = width
        self.image_height = height
        self.FoVy = fovy
        self.FoVx = fovx
        self.znear = znear
        self.zfar = zfar
        self.world_view_transform = world_view_transform
        self.full_proj_transform = full_proj_transform
        view_inv = torch.inverse(self.world_view_transform)
        self.camera_center = view_inv[3][:3]


================================================
FILE: physdreamer/gaussian_3d/scene/colmap_loader.py
================================================
#
# Copyright (C) 2023, Inria
# GRAPHDECO research group, https://team.inria.fr/graphdeco
# All rights reserved.
#
# This software is free for non-commercial, research and evaluation use 
# under the terms of the LICENSE.md file.
#
# For inquiries contact  george.drettakis@inria.fr
#

import numpy as np
import collections
import struct

CameraModel = collections.namedtuple(
    "CameraModel", ["model_id", "model_name", "num_params"])
Camera = collections.namedtuple(
    "Camera", ["id", "model", "width", "height", "params"])
BaseImage = collections.namedtuple(
    "Image", ["id", "qvec", "tvec", "camera_id", "name", "xys", "point3D_ids"])
Point3D = collections.namedtuple(
    "Point3D", ["id", "xyz", "rgb", "error", "image_ids", "point2D_idxs"])
CAMERA_MODELS = {
    CameraModel(model_id=0, model_name="SIMPLE_PINHOLE", num_params=3),
    CameraModel(model_id=1, model_name="PINHOLE", num_params=4),
    CameraModel(model_id=2, model_name="SIMPLE_RADIAL", num_params=4),
    CameraModel(model_id=3, model_name="RADIAL", num_params=5),
    CameraModel(model_id=4, model_name="OPENCV", num_params=8),
    CameraModel(model_id=5, model_name="OPENCV_FISHEYE", num_params=8),
    CameraModel(model_id=6, model_name="FULL_OPENCV", num_params=12),
    CameraModel(model_id=7, model_name="FOV", num_params=5),
    CameraModel(model_id=8, model_name="SIMPLE_RADIAL_FISHEYE", num_params=4),
    CameraModel(model_id=9, model_name="RADIAL_FISHEYE", num_params=5),
    CameraModel(model_id=10, model_name="THIN_PRISM_FISHEYE", num_params=12)
}
CAMERA_MODEL_IDS = dict([(camera_model.model_id, camera_model)
                         for camera_model in CAMERA_MODELS])
CAMERA_MODEL_NAMES = dict([(camera_model.model_name, camera_model)
                           for camera_model in CAMERA_MODELS])


def qvec2rotmat(qvec):
    return np.array([
        [1 - 2 * qvec[2]**2 - 2 * qvec[3]**2,
         2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3],
         2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2]],
        [2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3],
         1 - 2 * qvec[1]**2 - 2 * qvec[3]**2,
         2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1]],
        [2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2],
         2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1],
         1 - 2 * qvec[1]**2 - 2 * qvec[2]**2]])

def rotmat2qvec(R):
    Rxx, Ryx, Rzx, Rxy, Ryy, Rzy, Rxz, Ryz, Rzz = R.flat
    K = np.array([
        [Rxx - Ryy - Rzz, 0, 0, 0],
        [Ryx + Rxy, Ryy - Rxx - Rzz, 0, 0],
        [Rzx + Rxz, Rzy + Ryz, Rzz - Rxx - Ryy, 0],
        [Ryz - Rzy, Rzx - Rxz, Rxy - Ryx, Rxx + Ryy + Rzz]]) / 3.0
    eigvals, eigvecs = np.linalg.eigh(K)
    qvec = eigvecs[[3, 0, 1, 2], np.argmax(eigvals)]
    if qvec[0] < 0:
        qvec *= -1
    return qvec

class Image(BaseImage):
    def qvec2rotmat(self):
        return qvec2rotmat(self.qvec)

def read_next_bytes(fid, num_bytes, format_char_sequence, endian_character="<"):
    """Read and unpack the next bytes from a binary file.
    :param fid:
    :param num_bytes: Sum of combination of {2, 4, 8}, e.g. 2, 6, 16, 30, etc.
    :param format_char_sequence: List of {c, e, f, d, h, H, i, I, l, L, q, Q}.
    :param endian_character: Any of {@, =, <, >, !}
    :return: Tuple of read and unpacked values.
    """
    data = fid.read(num_bytes)
    return struct.unpack(endian_character + format_char_sequence, data)

def read_points3D_text(path):
    """
    see: src/base/reconstruction.cc
        void Reconstruction::ReadPoints3DText(const std::string& path)
        void Reconstruction::WritePoints3DText(const std::string& path)
    """
    xyzs = None
    rgbs = None
    errors = None
    with open(path, "r") as fid:
        while True:
            line = fid.readline()
            if not line:
                break
            line = line.strip()
            if len(line) > 0 and line[0] != "#":
                elems = line.split()
                xyz = np.array(tuple(map(float, elems[1:4])))
                rgb = np.array(tuple(map(int, elems[4:7])))
                error = np.array(float(elems[7]))
                if xyzs is None:
                    xyzs = xyz[None, ...]
                    rgbs = rgb[None, ...]
                    errors = error[None, ...]
                else:
                    xyzs = np.append(xyzs, xyz[None, ...], axis=0)
                    rgbs = np.append(rgbs, rgb[None, ...], axis=0)
                    errors = np.append(errors, error[None, ...], axis=0)
    return xyzs, rgbs, errors

def read_points3D_binary(path_to_model_file):
    """
    see: src/base/reconstruction.cc
        void Reconstruction::ReadPoints3DBinary(const std::string& path)
        void Reconstruction::WritePoints3DBinary(const std::string& path)
    """


    with open(path_to_model_file, "rb") as fid:
        num_points = read_next_bytes(fid, 8, "Q")[0]

        xyzs = np.empty((num_points, 3))
        rgbs = np.empty((num_points, 3))
        errors = np.empty((num_points, 1))

        for p_id in range(num_points):
            binary_point_line_properties = read_next_bytes(
                fid, num_bytes=43, format_char_sequence="QdddBBBd")
            xyz = np.array(binary_point_line_properties[1:4])
            rgb = np.array(binary_point_line_properties[4:7])
            error = np.array(binary_point_line_properties[7])
            track_length = read_next_bytes(
                fid, num_bytes=8, format_char_sequence="Q")[0]
            track_elems = read_next_bytes(
                fid, num_bytes=8*track_length,
                format_char_sequence="ii"*track_length)
            xyzs[p_id] = xyz
            rgbs[p_id] = rgb
            errors[p_id] = error
    return xyzs, rgbs, errors

def read_intrinsics_text(path):
    """
    Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py
    """
    cameras = {}
    with open(path, "r") as fid:
        while True:
            line = fid.readline()
            if not line:
                break
            line = line.strip()
            if len(line) > 0 and line[0] != "#":
                elems = line.split()
                camera_id = int(elems[0])
                model = elems[1]
                assert model == "PINHOLE", "While the loader support other types, the rest of the code assumes PINHOLE"
                width = int(elems[2])
                height = int(elems[3])
                params = np.array(tuple(map(float, elems[4:])))
                cameras[camera_id] = Camera(id=camera_id, model=model,
                                            width=width, height=height,
                                            params=params)
    return cameras

def read_extrinsics_binary(path_to_model_file):
    """
    see: src/base/reconstruction.cc
        void Reconstruction::ReadImagesBinary(const std::string& path)
        void Reconstruction::WriteImagesBinary(const std::string& path)
    """
    images = {}
    with open(path_to_model_file, "rb") as fid:
        num_reg_images = read_next_bytes(fid, 8, "Q")[0]
        for _ in range(num_reg_images):
            binary_image_properties = read_next_bytes(
                fid, num_bytes=64, format_char_sequence="idddddddi")
            image_id = binary_image_properties[0]
            qvec = np.array(binary_image_properties[1:5])
            tvec = np.array(binary_image_properties[5:8])
            camera_id = binary_image_properties[8]
            image_name = ""
            current_char = read_next_bytes(fid, 1, "c")[0]
            while current_char != b"\x00":   # look for the ASCII 0 entry
                image_name += current_char.decode("utf-8")
                current_char = read_next_bytes(fid, 1, "c")[0]
            num_points2D = read_next_bytes(fid, num_bytes=8,
                                           format_char_sequence="Q")[0]
            x_y_id_s = read_next_bytes(fid, num_bytes=24*num_points2D,
                                       format_char_sequence="ddq"*num_points2D)
            xys = np.column_stack([tuple(map(float, x_y_id_s[0::3])),
                                   tuple(map(float, x_y_id_s[1::3]))])
            point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3])))
            images[image_id] = Image(
                id=image_id, qvec=qvec, tvec=tvec,
                camera_id=camera_id, name=image_name,
                xys=xys, point3D_ids=point3D_ids)
    return images


def read_intrinsics_binary(path_to_model_file):
    """
    see: src/base/reconstruction.cc
        void Reconstruction::WriteCamerasBinary(const std::string& path)
        void Reconstruction::ReadCamerasBinary(const std::string& path)
    """
    cameras = {}
    with open(path_to_model_file, "rb") as fid:
        num_cameras = read_next_bytes(fid, 8, "Q")[0]
        for _ in range(num_cameras):
            camera_properties = read_next_bytes(
                fid, num_bytes=24, format_char_sequence="iiQQ")
            camera_id = camera_properties[0]
            model_id = camera_properties[1]
            model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name
            width = camera_properties[2]
            height = camera_properties[3]
            num_params = CAMERA_MODEL_IDS[model_id].num_params
            params = read_next_bytes(fid, num_bytes=8*num_params,
                                     format_char_sequence="d"*num_params)
            cameras[camera_id] = Camera(id=camera_id,
                                        model=model_name,
                                        width=width,
                                        height=height,
                                        params=np.array(params))
        assert len(cameras) == num_cameras
    return cameras


def read_extrinsics_text(path):
    """
    Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py
    """
    images = {}
    with open(path, "r") as fid:
        while True:
            line = fid.readline()
            if not line:
                break
            line = line.strip()
            if len(line) > 0 and line[0] != "#":
                elems = line.split()
                image_id = int(elems[0])
                qvec = np.array(tuple(map(float, elems[1:5])))
                tvec = np.array(tuple(map(float, elems[5:8])))
                camera_id = int(elems[8])
                image_name = elems[9]
                elems = fid.readline().split()
                xys = np.column_stack([tuple(map(float, elems[0::3])),
                                       tuple(map(float, elems[1::3]))])
                point3D_ids = np.array(tuple(map(int, elems[2::3])))
                images[image_id] = Image(
                    id=image_id, qvec=qvec, tvec=tvec,
                    camera_id=camera_id, name=image_name,
                    xys=xys, point3D_ids=point3D_ids)
    return images


def read_colmap_bin_array(path):
    """
    Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_dense.py

    :param path: path to the colmap binary file.
    :return: nd array with the floating point values in the value
    """
    with open(path, "rb") as fid:
        width, height, channels = np.genfromtxt(fid, delimiter="&", max_rows=1,
                                                usecols=(0, 1, 2), dtype=int)
        fid.seek(0)
        num_delimiter = 0
        byte = fid.read(1)
        while True:
            if byte == b"&":
                num_delimiter += 1
                if num_delimiter >= 3:
                    break
            byte = fid.read(1)
        array = np.fromfile(fid, np.float32)
    array = array.reshape((width, height, channels), order="F")
    return np.transpose(array, (1, 0, 2)).squeeze()


================================================
FILE: physdreamer/gaussian_3d/scene/dataset_readers.py
================================================
#
# Copyright (C) 2023, Inria
# GRAPHDECO research group, https://team.inria.fr/graphdeco
# All rights reserved.
#
# This software is free for non-commercial, research and evaluation use
# under the terms of the LICENSE.md file.
#
# For inquiries contact  george.drettakis@inria.fr
#

import os
import sys
from PIL import Image
from typing import NamedTuple
from physdreamer.gaussian_3d.scene.colmap_loader import (
    read_extrinsics_text,
    read_intrinsics_text,
    qvec2rotmat,
    read_extrinsics_binary,
    read_intrinsics_binary,
    read_points3D_binary,
    read_points3D_text,
)
from physdreamer.gaussian_3d.utils.graphics_utils import (
    getWorld2View2,
    focal2fov,
    fov2focal,
)
import numpy as np
import math
import json
from pathlib import Path
from plyfile import PlyData, PlyElement
from physdreamer.gaussian_3d.utils.sh_utils import SH2RGB
from physdreamer.gaussian_3d.scene.gaussian_model import BasicPointCloud
import torch
import torch.nn as nn
from physdreamer.gaussian_3d.utils.graphics_utils import (
    getWorld2View2,
    getProjectionMatrix,
)


class CameraInfo(NamedTuple):
    uid: int
    R: np.array
    T: np.array
    FovY: np.array
    FovX: np.array
    image: np.array
    image_path: str
    image_name: str
    width: int
    height: int


class SceneInfo(NamedTuple):
    point_cloud: BasicPointCloud
    train_cameras: list
    test_cameras: list
    nerf_normalization: dict
    ply_path: str


def getNerfppNorm(cam_info):
    def get_center_and_diag(cam_centers):
        cam_centers = np.hstack(cam_centers)
        avg_cam_center = np.mean(cam_centers, axis=1, keepdims=True)
        center = avg_cam_center
        dist = np.linalg.norm(cam_centers - center, axis=0, keepdims=True)
        diagonal = np.max(dist)
        return center.flatten(), diagonal

    cam_centers = []

    for cam in cam_info:
        W2C = getWorld2View2(cam.R, cam.T)
        C2W = np.linalg.inv(W2C)
        cam_centers.append(C2W[:3, 3:4])

    center, diagonal = get_center_and_diag(cam_centers)
    radius = diagonal * 1.1

    translate = -center

    return {"translate": translate, "radius": radius}


def readColmapCameras(cam_extrinsics, cam_intrinsics, images_folder):
    cam_infos = []
    for idx, key in enumerate(cam_extrinsics):
        sys.stdout.write("\r")
        # the exact output you're looking for:
        sys.stdout.write("Reading camera {}/{}".format(idx + 1, len(cam_extrinsics)))
        sys.stdout.flush()

        extr = cam_extrinsics[key]
        intr = cam_intrinsics[extr.camera_id]
        height = intr.height
        width = intr.width

        uid = intr.id
        R = np.transpose(qvec2rotmat(extr.qvec))
        T = np.array(extr.tvec)

        if intr.model == "SIMPLE_PINHOLE":
            focal_length_x = intr.params[0]
            FovY = focal2fov(focal_length_x, height)
            FovX = focal2fov(focal_length_x, width)
        elif intr.model == "PINHOLE":
            focal_length_x = intr.params[0]
            focal_length_y = intr.params[1]
            FovY = focal2fov(focal_length_y, height)
            FovX = focal2fov(focal_length_x, width)
        else:
            assert (
                False
            ), "Colmap camera model not handled: only undistorted datasets (PINHOLE or SIMPLE_PINHOLE cameras) supported!"

        image_path = os.path.join(images_folder, os.path.basename(extr.name))
        image_name = os.path.basename(image_path).split(".")[0]
        image = Image.open(image_path)

        cam_info = CameraInfo(
            uid=uid,
            R=R,
            T=T,
            FovY=FovY,
            FovX=FovX,
            image=image,
            image_path=image_path,
            image_name=image_name,
            width=width,
            height=height,
        )
        cam_infos.append(cam_info)
    sys.stdout.write("\n")
    return cam_infos


def fetchPly(path):
    plydata = PlyData.read(path)
    vertices = plydata["vertex"]
    positions = np.vstack([vertices["x"], vertices["y"], vertices["z"]]).T
    colors = np.vstack([vertices["red"], vertices["green"], vertices["blue"]]).T / 255.0
    normals = np.vstack([vertices["nx"], vertices["ny"], vertices["nz"]]).T
    return BasicPointCloud(points=positions, colors=colors, normals=normals)


def storePly(path, xyz, rgb):
    # Define the dtype for the structured array
    dtype = [
        ("x", "f4"),
        ("y", "f4"),
        ("z", "f4"),
        ("nx", "f4"),
        ("ny", "f4"),
        ("nz", "f4"),
        ("red", "u1"),
        ("green", "u1"),
        ("blue", "u1"),
    ]

    normals = np.zeros_like(xyz)

    elements = np.empty(xyz.shape[0], dtype=dtype)
    attributes = np.concatenate((xyz, normals, rgb), axis=1)
    elements[:] = list(map(tuple, attributes))

    # Create the PlyData object and write to file
    vertex_element = PlyElement.describe(elements, "vertex")
    ply_data = PlyData([vertex_element])
    ply_data.write(path)


def readColmapSceneInfo(path, images, eval, llffhold=8):
    try:
        cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.bin")
        cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.bin")
        cam_extrinsics = read_extrinsics_binary(cameras_extrinsic_file)
        cam_intrinsics = read_intrinsics_binary(cameras_intrinsic_file)
    except:
        cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.txt")
        cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.txt")
        cam_extrinsics = read_extrinsics_text(cameras_extrinsic_file)
        cam_intrinsics = read_intrinsics_text(cameras_intrinsic_file)

    reading_dir = "images" if images == None else images
    cam_infos_unsorted = readColmapCameras(
        cam_extrinsics=cam_extrinsics,
        cam_intrinsics=cam_intrinsics,
        images_folder=os.path.join(path, reading_dir),
    )
    cam_infos = sorted(cam_infos_unsorted.copy(), key=lambda x: x.image_name)

    if eval:
        train_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % llffhold != 0]
        test_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % llffhold == 0]
    else:
        train_cam_infos = cam_infos
        test_cam_infos = []

    nerf_normalization = getNerfppNorm(train_cam_infos)

    ply_path = os.path.join(path, "sparse/0/points3D.ply")
    bin_path = os.path.join(path, "sparse/0/points3D.bin")
    txt_path = os.path.join(path, "sparse/0/points3D.txt")
    if not os.path.exists(ply_path):
        print(
            "Converting point3d.bin to .ply, will happen only the first time you open the scene."
        )
        try:
            xyz, rgb, _ = read_points3D_binary(bin_path)
        except:
            xyz, rgb, _ = read_points3D_text(txt_path)
        storePly(ply_path, xyz, rgb)
    try:
        pcd = fetchPly(ply_path)
    except:
        pcd = None

    scene_info = SceneInfo(
        point_cloud=pcd,
        train_cameras=train_cam_infos,
        test_cameras=test_cam_infos,
        nerf_normalization=nerf_normalization,
        ply_path=ply_path,
    )
    return scene_info


def readCamerasFromTransforms(path, transformsfile, white_background, extension=".png"):
    cam_infos = []

    with open(os.path.join(path, transformsfile)) as json_file:
        contents = json.load(json_file)

        # camera_angle_x is the horizontal field of view
        # frames.file_path is the image name
        # frame.transform_matrix is the camera-to-world transform

        fovx = contents["camera_angle_x"]

        frames = contents["frames"]
        for idx, frame in enumerate(frames):
            cam_name = os.path.join(path, frame["file_path"] + extension)

            # NeRF 'transform_matrix' is a camera-to-world transform
            c2w = np.array(frame["transform_matrix"])
            # change from OpenGL/Blender camera axes (Y up, Z back) to COLMAP (Y down, Z forward)
            c2w[:3, 1:3] *= -1

            # get the world-to-camera transform and set R, T
            w2c = np.linalg.inv(c2w)
            R = np.transpose(
                w2c[:3, :3]
            )  # R is stored transposed due to 'glm' in CUDA code
            T = w2c[:3, 3]

            image_path = os.path.join(path, cam_name)
            image_name = Path(cam_name).stem
            image = Image.open(image_path)

            im_data = np.array(image.convert("RGBA"))

            bg = np.array([1, 1, 1]) if white_background else np.array([0, 0, 0])

            norm_data = im_data / 255.0
            arr = norm_data[:, :, :3] * norm_data[:, :, 3:4] + bg * (
                1 - norm_data[:, :, 3:4]
            )
            image = Image.fromarray(np.array(arr * 255.0, dtype=np.byte), "RGB")

            fovy = focal2fov(fov2focal(fovx, image.size[0]), image.size[1])
            FovY = fovy
            FovX = fovx

            cam_infos.append(
                CameraInfo(
                    uid=idx,
                    R=R,
                    T=T,
                    FovY=FovY,
                    FovX=FovX,
                    image=image,
                    image_path=image_path,
                    image_name=image_name,
                    width=image.size[0],
                    height=image.size[1],
                )
            )

    return cam_infos


def readNerfSyntheticInfo(path, white_background, eval, extension=".png"):
    print("Reading Training Transforms")
    train_cam_infos = readCamerasFromTransforms(
        path, "transforms_train.json", white_background, extension
    )
    print("Reading Test Transforms")
    test_cam_infos = readCamerasFromTransforms(
        path, "transforms_test.json", white_background, extension
    )

    if not eval:
        train_cam_infos.extend(test_cam_infos)
        test_cam_infos = []

    nerf_normalization = getNerfppNorm(train_cam_infos)

    ply_path = os.path.join(path, "points3d.ply")
    if not os.path.exists(ply_path):
        # Since this data set has no colmap data, we start with random points
        num_pts = 100_000
        print(f"Generating random point cloud ({num_pts})...")

        # We create random points inside the bounds of the synthetic Blender scenes
        xyz = np.random.random((num_pts, 3)) * 2.6 - 1.3
        shs = np.random.random((num_pts, 3)) / 255.0
        pcd = BasicPointCloud(
            points=xyz, colors=SH2RGB(shs), normals=np.zeros((num_pts, 3))
        )

        storePly(ply_path, xyz, SH2RGB(shs) * 255)
    try:
        pcd = fetchPly(ply_path)
    except:
        pcd = None

    scene_info = SceneInfo(
        point_cloud=pcd,
        train_cameras=train_cam_infos,
        test_cameras=test_cam_infos,
        nerf_normalization=nerf_normalization,
        ply_path=ply_path,
    )
    return scene_info


sceneLoadTypeCallbacks = {
    "Colmap": readColmapSceneInfo,
    "Blender": readNerfSyntheticInfo,
}


# below used for easy rendering
class NoImageCamera(nn.Module):
    def __init__(
        self,
        colmap_id,
        R,
        T,
        FoVx,
        FoVy,
        width,
        height,
        uid,
        trans=np.array([0.0, 0.0, 0.0]),
        scale=1.0,
        data_device="cuda",
        img_path=None,  # not needed
    ):
        super(NoImageCamera, self).__init__()

        self.uid = uid
        self.colmap_id = colmap_id
        self.R = R
        self.T = T
        self.FoVx = FoVx
        self.FoVy = FoVy
        self.img_path = img_path

        try:
            self.data_device = torch.device(data_device)
        except Exception as e:
            print(e)
            print(
                f"[Warning] Custom device {data_device} failed, fallback to default cuda device"
            )
            self.data_device = torch.device("cuda")

        self.image_width = width
        self.image_height = height

        self.zfar = 100.0
        self.znear = 0.01

        self.trans = trans
        self.scale = scale

        # world to camera, then transpose.  # [4, 4]
        #  w2c.transpose
        self.world_view_transform = (
            torch.tensor(getWorld2View2(R, T, trans, scale)).transpose(0, 1).cuda()
        )

        # [4, 4]
        self.projection_matrix = (
            getProjectionMatrix(
                znear=self.znear, zfar=self.zfar, fovX=self.FoVx, fovY=self.FoVy
            )
            .transpose(0, 1)
            .cuda()
        )

        # # [4, 4].  points @ full_proj_transform => screen space.
        self.full_proj_transform = (
            self.world_view_transform.unsqueeze(0).bmm(
                self.projection_matrix.unsqueeze(0)
            )
        ).squeeze(0)
        self.camera_center = self.world_view_transform.inverse()[3, :3]

        # [2, 2].
        #  (w2c @ p) / depth => cam_plane
        #  (p_in_cam / depth)[:2] @  cam_plane_2_img => [pixel_x, pixel_y]    cam_plane => img_plane
        self.cam_plane_2_img = torch.tensor(
            [
                [0.5 * width / math.tan(self.FoVx / 2.0), 0.0],
                [0.0, 0.5 * height / math.tan(self.FoVy / 2.0)],
            ]
        ).cuda()


def fast_read_cameras_from_transform_file(file_path, width=1080, height=720):
    cam_infos = []

    dir_name = os.path.dirname(file_path)

    with open(file_path) as json_file:
        contents = json.load(json_file)

        # camera_angle_x is the horizontal field of view
        # frames.file_path is the image name
        # frame.transform_matrix is the camera-to-world transform

        fovx = contents["camera_angle_x"]

        frames = contents["frames"]
        for idx, frame in enumerate(frames):
            # NeRF 'transform_matrix' is a camera-to-world transform
            c2w = np.array(frame["transform_matrix"])
            # change from OpenGL/Blender camera axes (Y up, Z back) to COLMAP (Y down, Z forward)
            c2w[:3, 1:3] *= -1

            # get the world-to-camera transform and set R, T
            w2c = np.linalg.inv(c2w)
            R = np.transpose(
                w2c[:3, :3]
            )  # R is stored transposed due to 'glm' in CUDA code
            T = w2c[:3, 3]

            fovy = focal2fov(fov2focal(fovx, width), height)
            FovY = fovy
            FovX = fovx

            img_path = os.path.join(dir_name, frame["file_path"] + ".png")
            cam_ = NoImageCamera(
                colmap_id=idx,
                R=R,
                T=T,
                FoVx=FovX,
                FoVy=FovY,
                width=width,
                height=height,
                uid=id,
                data_device="cuda",
                img_path=img_path,
            )

            cam_infos.append(cam_)

    return cam_infos


================================================
FILE: physdreamer/gaussian_3d/scene/gaussian_model.py
================================================
#
# Copyright (C) 2023, Inria
# GRAPHDECO research group, https://team.inria.fr/graphdeco
# All rights reserved.
#
# This software is free for non-commercial, research and evaluation use
# under the terms of the LICENSE.md file.
#
# For inquiries contact  george.drettakis@inria.fr
#

import torch
import numpy as np
from physdreamer.gaussian_3d.utils.general_utils import (
    inverse_sigmoid,
    get_expon_lr_func,
    build_rotation,
)
from torch import nn
import os
from physdreamer.gaussian_3d.utils.system_utils import mkdir_p
from plyfile import PlyData, PlyElement
from physdreamer.gaussian_3d.utils.sh_utils import RGB2SH
from simple_knn._C import distCUDA2
from physdreamer.gaussian_3d.utils.graphics_utils import BasicPointCloud
from physdreamer.gaussian_3d.utils.general_utils import (
    strip_symmetric,
    build_scaling_rotation,
)
from physdreamer.gaussian_3d.utils.rigid_body_utils import (
    get_rigid_transform,
    matrix_to_quaternion,
    quaternion_multiply,
)


class GaussianModel:
    def setup_functions(self):
        def build_covariance_from_scaling_rotation(scaling, scaling_modifier, rotation):
            L = build_scaling_rotation(scaling_modifier * scaling, rotation)
            actual_covariance = L @ L.transpose(1, 2)
            symm = strip_symmetric(actual_covariance)
            return symm

        self.scaling_activation = torch.exp
        self.scaling_inverse_activation = torch.log

        self.covariance_activation = build_covariance_from_scaling_rotation

        self.opacity_activation = torch.sigmoid
        self.inverse_opacity_activation = inverse_sigmoid

        self.rotation_activation = torch.nn.functional.normalize

    def __init__(self, sh_degree: int = 3):
        self.active_sh_degree = 0
        self.max_sh_degree = sh_degree
        self._xyz = torch.empty(0)
        self._features_dc = torch.empty(0)
        self._features_rest = torch.empty(0)
        self._scaling = torch.empty(0)
        self._rotation = torch.empty(0)
        self._opacity = torch.empty(0)
        self.max_radii2D = torch.empty(0)
        self.xyz_gradient_accum = torch.empty(0)
        self.denom = torch.empty(0)
        self.optimizer = None
        self.percent_dense = 0
        self.spatial_lr_scale = 0
        self.setup_functions()

        self.matched_inds = None

    def capture(self):
        if self.optimizer is None:
            optim_state = None
        else:
            optim_state = self.optimizer.state_dict()

        return (
            self.active_sh_degree,
            self._xyz,
            self._features_dc,
            self._features_rest,
            self._scaling,
            self._rotation,
            self._opacity,
            self.max_radii2D,
            self.xyz_gradient_accum,
            self.denom,
            optim_state,
            self.spatial_lr_scale,
        )

    def restore(self, model_args, training_args):
        (
            self.active_sh_degree,
            self._xyz,
            self._features_dc,
            self._features_rest,
            self._scaling,
            self._rotation,
            self._opacity,
            self.max_radii2D,
            xyz_gradient_accum,
            denom,
            opt_dict,
            self.spatial_lr_scale,
        ) = model_args

        if training_args is not None:
            self.training_setup(training_args)
        self.xyz_gradient_accum = xyz_gradient_accum
        self.denom = denom
        if opt_dict is not None:
            self.optimizer.load_state_dict(opt_dict)

    def capture_training_args(
        self,
    ):
        pass

    @property
    def get_scaling(self):
        return self.scaling_activation(self._scaling)

    @property
    def get_rotation(self):
        return self.rotation_activation(self._rotation)

    @property
    def get_xyz(self):
        return self._xyz

    @property
    def get_features(self):
        features_dc = self._features_dc
        features_rest = self._features_rest
        return torch.cat((features_dc, features_rest), dim=1)

    @property
    def get_opacity(self):
        return self.opacity_activation(self._opacity)

    def get_covariance(self, scaling_modifier=1):
        return self.covariance_activation(
            self.get_scaling, scaling_modifier, self._rotation
        )

    def oneupSHdegree(self):
        if self.active_sh_degree < self.max_sh_degree:
            self.active_sh_degree += 1

    def create_from_pcd(self, pcd: BasicPointCloud, spatial_lr_scale: float):
        self.spatial_lr_scale = spatial_lr_scale
        fused_point_cloud = torch.tensor(np.asarray(pcd.points)).float().cuda()
        fused_color = RGB2SH(torch.tensor(np.asarray(pcd.colors)).float().cuda())
        features = (
            torch.zeros((fused_color.shape[0], 3, (self.max_sh_degree + 1) ** 2))
            .float()
            .cuda()
        )
        features[:, :3, 0] = fused_color
        # typo here?
        features[:, 3:, 1:] = 0.0

        print("Number of points at initialisation : ", fused_point_cloud.shape[0])

        dist2 = torch.clamp_min(
            distCUDA2(torch.from_numpy(np.asarray(pcd.points)).float().cuda()),
            0.0000001,
        )
        scales = torch.log(torch.sqrt(dist2))[..., None].repeat(1, 3)
        rots = torch.zeros((fused_point_cloud.shape[0], 4), device="cuda")
        rots[:, 0] = 1

        opacities = inverse_sigmoid(
            0.1
            * torch.ones(
                (fused_point_cloud.shape[0], 1), dtype=torch.float, device="cuda"
            )
        )

        self._xyz = nn.Parameter(fused_point_cloud.requires_grad_(True))
        self._features_dc = nn.Parameter(
            features[:, :, 0:1].transpose(1, 2).contiguous().requires_grad_(True)
        )
        self._features_rest = nn.Parameter(
            features[:, :, 1:].transpose(1, 2).contiguous().requires_grad_(True)
        )
        self._scaling = nn.Parameter(scales.requires_grad_(True))
        self._rotation = nn.Parameter(rots.requires_grad_(True))
        self._opacity = nn.Parameter(opacities.requires_grad_(True))
        self.max_radii2D = torch.zeros((self.get_xyz.shape[0]), device="cuda")

    def training_setup(self, training_args):
        self.percent_dense = training_args.percent_dense
        self.xyz_gradient_accum = torch.zeros((self.get_xyz.shape[0], 1), device="cuda")
        self.denom = torch.zeros((self.get_xyz.shape[0], 1), device="cuda")

        l = [
            {
                "params": [self._xyz],
                "lr": training_args.position_lr_init * self.spatial_lr_scale,
                "name": "xyz",
            },
            {
                "params": [self._features_dc],
                "lr": training_args.feature_lr,
                "name": "f_dc",
            },
            {
                "params": [self._features_rest],
                "lr": training_args.feature_lr / 20.0,
                "name": "f_rest",
            },
            {
                "params": [self._opacity],
                "lr": training_args.opacity_lr,
                "name": "opacity",
            },
            {
                "params": [self._scaling],
                "lr": training_args.scaling_lr,
                "name": "scaling",
            },
            {
                "params": [self._rotation],
                "lr": training_args.rotation_lr,
                "name": "rotation",
            },
        ]

        self.optimizer = torch.optim.Adam(l, lr=0.0, eps=1e-15)
        self.xyz_scheduler_args = get_expon_lr_func(
            lr_init=training_args.position_lr_init * self.spatial_lr_scale,
            lr_final=training_args.position_lr_final * self.spatial_lr_scale,
            lr_delay_mult=training_args.position_lr_delay_mult,
            max_steps=training_args.position_lr_max_steps,
        )

    def update_learning_rate(self, iteration):
        """Learning rate scheduling per step"""
        for param_group in self.optimizer.param_groups:
            if param_group["name"] == "xyz":
                lr = self.xyz_scheduler_args(iteration)
                param_group["lr"] = lr
                return lr

    def construct_list_of_attributes(self):
        l = ["x", "y", "z", "nx", "ny", "nz"]
        # All channels except the 3 DC
        for i in range(self._features_dc.shape[1] * self._features_dc.shape[2]):
            l.append("f_dc_{}".format(i))
        for i in range(self._features_rest.shape[1] * self._features_rest.shape[2]):
            l.append("f_rest_{}".format(i))
        l.append("opacity")
        for i in range(self._scaling.shape[1]):
            l.append("scale_{}".format(i))
        for i in range(self._rotation.shape[1]):
            l.append("rot_{}".format(i))
        return l

    def save_ply(self, path):
        mkdir_p(os.path.dirname(path))

        xyz = self._xyz.detach().cpu().numpy()
        normals = np.zeros_like(xyz)
        f_dc = (
            self._features_dc.detach()
            .transpose(1, 2)
            .flatten(start_dim=1)
            .contiguous()
            .cpu()
            .numpy()
        )
        f_rest = (
            self._features_rest.detach()
            .transpose(1, 2)
            .flatten(start_dim=1)
            .contiguous()
            .cpu()
            .numpy()
        )
        opacities = self._opacity.detach().cpu().numpy()
        scale = self._scaling.detach().cpu().numpy()
        rotation = self._rotation.detach().cpu().numpy()

        dtype_full = [
            (attribute, "f4") for attribute in self.construct_list_of_attributes()
        ]

        elements = np.empty(xyz.shape[0], dtype=dtype_full)
        attributes = np.concatenate(
            (xyz, normals, f_dc, f_rest, opacities, scale, rotation), axis=1
        )
        elements[:] = list(map(tuple, attributes))
        el = PlyElement.describe(elements, "vertex")
        PlyData([el]).write(path)

    def reset_opacity(self):
        opacities_new = inverse_sigmoid(
            torch.min(self.get_opacity, torch.ones_like(self.get_opacity) * 0.01)
        )
        optimizable_tensors = self.replace_tensor_to_optimizer(opacities_new, "opacity")
        self._opacity = optimizable_tensors["opacity"]

    def load_ply(self, path):
        plydata = PlyData.read(path)

        xyz = np.stack(
            (
                np.asarray(plydata.elements[0]["x"]),
                np.asarray(plydata.elements[0]["y"]),
                np.asarray(plydata.elements[0]["z"]),
            ),
            axis=1,
        )
        opacities = np.asarray(plydata.elements[0]["opacity"])[..., np.newaxis]

        features_dc = np.zeros((xyz.shape[0], 3, 1))
        features_dc[:, 0, 0] = np.asarray(plydata.elements[0]["f_dc_0"])
        features_dc[:, 1, 0] = np.asarray(plydata.elements[0]["f_dc_1"])
        features_dc[:, 2, 0] = np.asarray(plydata.elements[0]["f_dc_2"])

        extra_f_names = [
            p.name
            for p in plydata.elements[0].properties
            if p.name.startswith("f_rest_")
        ]
        extra_f_names = sorted(extra_f_names, key=lambda x: int(x.split("_")[-1]))
        assert len(extra_f_names) == 3 * (self.max_sh_degree + 1) ** 2 - 3
        features_extra = np.zeros((xyz.shape[0], len(extra_f_names)))
        for idx, attr_name in enumerate(extra_f_names):
            features_extra[:, idx] = np.asarray(plydata.elements[0][attr_name])
        # Reshape (P,F*SH_coeffs) to (P, F, SH_coeffs except DC)
        features_extra = features_extra.reshape(
            (features_extra.shape[0], 3, (self.max_sh_degree + 1) ** 2 - 1)
        )

        scale_names = [
            p.name
            for p in plydata.elements[0].properties
            if p.name.startswith("scale_")
        ]
        scale_names = sorted(scale_names, key=lambda x: int(x.split("_")[-1]))
        scales = np.zeros((xyz.shape[0], len(scale_names)))
        for idx, attr_name in enumerate(scale_names):
            scales[:, idx] = np.asarray(plydata.elements[0][attr_name])

        rot_names = [
            p.name for p in plydata.elements[0].properties if p.name.startswith("rot")
        ]
        rot_names = sorted(rot_names, key=lambda x: int(x.split("_")[-1]))
        rots = np.zeros((xyz.shape[0], len(rot_names)))
        for idx, attr_name in enumerate(rot_names):
            rots[:, idx] = np.asarray(plydata.elements[0][attr_name])

        self._xyz = nn.Parameter(
            torch.tensor(xyz, dtype=torch.float, device="cuda").requires_grad_(True)
        )
        self._features_dc = nn.Parameter(
            torch.tensor(features_dc, dtype=torch.float, device="cuda")
            .transpose(1, 2)
            .contiguous()
            .requires_grad_(True)
        )
        self._features_rest = nn.Parameter(
            torch.tensor(features_extra, dtype=torch.float, device="cuda")
            .transpose(1, 2)
            .contiguous()
            .requires_grad_(True)
        )
        self._opacity = nn.Parameter(
            torch.tensor(opacities, dtype=torch.float, device="cuda").requires_grad_(
                True
            )
        )
        self._scaling = nn.Parameter(
            torch.tensor(scales, dtype=torch.float, device="cuda").requires_grad_(True)
        )
        self._rotation = nn.Parameter(
            torch.tensor(rots, dtype=torch.float, device="cuda").requires_grad_(True)
        )

        self.active_sh_degree = self.max_sh_degree

    def replace_tensor_to_optimizer(self, tensor, name):
        optimizable_tensors = {}
        for group in self.optimizer.param_groups:
            if group["name"] == name:
                stored_state = self.optimizer.state.get(group["params"][0], None)
                stored_state["exp_avg"] = torch.zeros_like(tensor)
                stored_state["exp_avg_sq"] = torch.zeros_like(tensor)

                del self.optimizer.state[group["params"][0]]
                group["params"][0] = nn.Parameter(tensor.requires_grad_(True))
                self.optimizer.state[group["params"][0]] = stored_state

                optimizable_tensors[group["name"]] = group["params"][0]
        return optimizable_tensors

    def _prune_optimizer(self, mask):
        optimizable_tensors = {}
        for group in self.optimizer.param_groups:
            stored_state = self.optimizer.state.get(group["params"][0], None)
            if stored_state is not None:
                stored_state["exp_avg"] = stored_state["exp_avg"][mask]
                stored_state["exp_avg_sq"] = stored_state["exp_avg_sq"][mask]

                del self.optimizer.state[group["params"][0]]
                group["params"][0] = nn.Parameter(
                    (group["params"][0][mask].requires_grad_(True))
                )
                self.optimizer.state[group["params"][0]] = stored_state

                optimizable_tensors[group["name"]] = group["params"][0]
            else:
                group["params"][0] = nn.Parameter(
                    group["params"][0][mask].requires_grad_(True)
                )
                optimizable_tensors[group["name"]] = group["params"][0]
        return optimizable_tensors

    def prune_points(self, mask):
        valid_points_mask = ~mask
        optimizable_tensors = self._prune_optimizer(valid_points_mask)

        self._xyz = optimizable_tensors["xyz"]
        self._features_dc = optimizable_tensors["f_dc"]
        self._features_rest = optimizable_tensors["f_rest"]
        self._opacity = optimizable_tensors["opacity"]
        self._scaling = optimizable_tensors["scaling"]
        self._rotation = optimizable_tensors["rotation"]

        self.xyz_gradient_accum = self.xyz_gradient_accum[valid_points_mask]

        self.denom = self.denom[valid_points_mask]
        self.max_radii2D = self.max_radii2D[valid_points_mask]

    def cat_tensors_to_optimizer(self, tensors_dict):
        optimizable_tensors = {}
        for group in self.optimizer.param_groups:
            assert len(group["params"]) == 1
            extension_tensor = tensors_dict[group["name"]]
            stored_state = self.optimizer.state.get(group["params"][0], None)
            if stored_state is not None:
                stored_state["exp_avg"] = torch.cat(
                    (stored_state["exp_avg"], torch.zeros_like(extension_tensor)), dim=0
                )
                stored_state["exp_avg_sq"] = torch.cat(
                    (stored_state["exp_avg_sq"], torch.zeros_like(extension_tensor)),
                    dim=0,
                )

                del self.optimizer.state[group["params"][0]]
                group["params"][0] = nn.Parameter(
                    torch.cat(
                        (group["params"][0], extension_tensor), dim=0
                    ).requires_grad_(True)
                )
                self.optimizer.state[group["params"][0]] = stored_state

                optimizable_tensors[group["name"]] = group["params"][0]
            else:
                group["params"][0] = nn.Parameter(
                    torch.cat(
                        (group["params"][0], extension_tensor), dim=0
                    ).requires_grad_(True)
                )
                optimizable_tensors[group["name"]] = group["params"][0]

        return optimizable_tensors

    def densification_postfix(
        self,
        new_xyz,
        new_features_dc,
        new_features_rest,
        new_opacities,
        new_scaling,
        new_rotation,
    ):
        d = {
            "xyz": new_xyz,
            "f_dc": new_features_dc,
            "f_rest": new_features_rest,
            "opacity": new_opacities,
            "scaling": new_scaling,
            "rotation": new_rotation,
        }

        optimizable_tensors = self.cat_tensors_to_optimizer(d)
        self._xyz = optimizable_tensors["xyz"]
        self._features_dc = optimizable_tensors["f_dc"]
        self._features_rest = optimizable_tensors["f_rest"]
        self._opacity = optimizable_tensors["opacity"]
        self._scaling = optimizable_tensors["scaling"]
        self._rotation = optimizable_tensors["rotation"]

        self.xyz_gradient_accum = torch.zeros((self.get_xyz.shape[0], 1), device="cuda")
        self.denom = torch.zeros((self.get_xyz.shape[0], 1), device="cuda")
        self.max_radii2D = torch.zeros((self.get_xyz.shape[0]), device="cuda")

    def densify_and_split(self, grads, grad_threshold, scene_extent, N=2):
        n_init_points = self.get_xyz.shape[0]
        # Extract points that satisfy the gradient condition
        padded_grad = torch.zeros((n_init_points), device="cuda")
        padded_grad[: grads.shape[0]] = grads.squeeze()
        selected_pts_mask = torch.where(padded_grad >= grad_threshold, True, False)
        selected_pts_mask = torch.logical_and(
            selected_pts_mask,
            torch.max(self.get_scaling, dim=1).values
            > self.percent_dense * scene_extent,
        )

        stds = self.get_scaling[selected_pts_mask].repeat(N, 1)
        means = torch.zeros((stds.size(0), 3), device="cuda")
        samples = torch.normal(mean=means, std=stds)
        rots = build_rotation(self._rotation[selected_pts_mask]).repeat(N, 1, 1)
        new_xyz = torch.bmm(rots, samples.unsqueeze(-1)).squeeze(-1) + self.get_xyz[
            selected_pts_mask
        ].repeat(N, 1)
        new_scaling = self.scaling_inverse_activation(
            self.get_scaling[selected_pts_mask].repeat(N, 1) / (0.8 * N)
        )
        new_rotation = self._rotation[selected_pts_mask].repeat(N, 1)
        new_features_dc = self._features_dc[selected_pts_mask].repeat(N, 1, 1)
        new_features_rest = self._features_rest[selected_pts_mask].repeat(N, 1, 1)
        new_opacity = self._opacity[selected_pts_mask].repeat(N, 1)

        self.densification_postfix(
            new_xyz,
            new_features_dc,
            new_features_rest,
            new_opacity,
            new_scaling,
            new_rotation,
        )

        prune_filter = torch.cat(
            (
                selected_pts_mask,
                torch.zeros(N * selected_pts_mask.sum(), device="cuda", dtype=bool),
            )
        )
        self.prune_points(prune_filter)

    def densify_and_clone(self, grads, grad_threshold, scene_extent):
        # Extract points that satisfy the gradient condition
        selected_pts_mask = torch.where(
            torch.norm(grads, dim=-1) >= grad_threshold, True, False
        )
        selected_pts_mask = torch.logical_and(
            selected_pts_mask,
            torch.max(self.get_scaling, dim=1).values
            <= self.percent_dense * scene_extent,
        )

        new_xyz = self._xyz[selected_pts_mask]
        new_features_dc = self._features_dc[selected_pts_mask]
        new_features_rest = self._features_rest[selected_pts_mask]
        new_opacities = self._opacity[selected_pts_mask]
        new_scaling = self._scaling[selected_pts_mask]
        new_rotation = self._rotation[selected_pts_mask]

        self.densification_postfix(
            new_xyz,
            new_features_dc,
            new_features_rest,
            new_opacities,
            new_scaling,
            new_rotation,
        )

    def densify_and_prune(self, max_grad, min_opacity, extent, max_screen_size):
        grads = self.xyz_gradient_accum / self.denom
        grads[grads.isnan()] = 0.0

        self.densify_and_clone(grads, max_grad, extent)
        self.densify_and_split(grads, max_grad, extent)

        prune_mask = (self.get_opacity < min_opacity).squeeze()
        if max_screen_size:
            big_points_vs = self.max_radii2D > max_screen_size
            big_points_ws = self.get_scaling.max(dim=1).values > 0.1 * extent
            prune_mask = torch.logical_or(
                torch.logical_or(prune_mask, big_points_vs), big_points_ws
            )
        self.prune_points(prune_mask)

        torch.cuda.empty_cache()

    def add_densification_stats(self, viewspace_point_tensor, update_filter):
        self.xyz_gradient_accum[update_filter] += torch.norm(
            viewspace_point_tensor.grad[update_filter, :2], dim=-1, keepdim=True
        )
        self.denom[update_filter] += 1

    def apply_discrete_offset_filds(self, origin_points, offsets):
        """
        Args:
            origin_points: (N_r, 3)
            offsets: (N_r, 3)
        """

        # since origin points and self._xyz might not be matched, we need to first
        #   compute the distance between origin points and self._xyz
        #   then find the nearest point in self._xyz for each origin point

        # compute the distance between origin points and self._xyz
        # [N_r, num_points]
        dist = torch.cdist(origin_points, self._xyz)
        # find the nearest point in self._xyz for each origin point
        _, idx = torch.min(dist, dim=0)

        # apply offsets

        new_xyz = self._xyz + offsets[idx]

        if self.optimizer is None:
            optim_state = None
        else:
            optim_state = self.optimizer.state_dict()

        new_model_args = (
            self.active_sh_degree,
            new_xyz,
            self._features_dc,
            self._features_rest,
            self._scaling,
            self._rotation,
            self._opacity,
            self.max_radii2D,
            self.xyz_gradient_accum,
            self.denom,
            optim_state,
            self.spatial_lr_scale,
        )

        ret_gaussian = GaussianModel(self.max_sh_degree)
        ret_gaussian.restore(new_model_args, None)

        return ret_gaussian

    def apply_discrete_offset_filds_with_R(self, origin_points, offsets, topk=6):
        """
        Args:
            origin_points: (N_r, 3)
            offsets: (N_r, 3)
        """

        # since origin points and self._xyz might not be matched, we need to first
        #   compute the distance between origin points and self._xyz
        #   then find the nearest point in self._xyz for each origin point

        if self.matched_inds is None:
            # compute the distance between origin points and self._xyz
            # [N_r, num_points]
            dist = torch.cdist(origin_points, self._xyz) * -1.0
            # find the nearest point in self._xyz for each origin point

            # idxs: [topk, num_points]
            print(dist.shape, topk, dist[0])
            _, idxs = torch.topk(dist, topk, dim=0)

            self.matched_inds = idxs
        else:
            idxs = self.matched_inds

        # [topk, num_points, 3] => [num_points, topk, 3]
        matched_topk_offsets = offsets[idxs].transpose(0, 1)
        source_points = origin_points[idxs].transpose(0, 1)

        # [num_points, 3, 3/1]
        R, t = get_rigid_transform(source_points, source_points + matched_topk_offsets)

        # new_xyz = R @ self._xyz.unsqueeze(dim=-1) + t
        # new_xyz = new_xyz.squeeze(dim=-1)

        avg_offsets = matched_topk_offsets.mean(dim=1)
        new_xyz = self._xyz + avg_offsets  # offset directly

        new_rotation = quaternion_multiply(matrix_to_quaternion(R), self._rotation)

        if self.optimizer is None:
            optim_state = None
        else:
            optim_state = self.optimizer.state_dict()

        new_model_args = (
            self.active_sh_degree,
            new_xyz,
            self._features_dc,
            self._features_rest,
            self._scaling,
            new_rotation,
            self._opacity,
            self.max_radii2D,
            self.xyz_gradient_accum,
            self.denom,
            optim_state,
            self.spatial_lr_scale,
        )

        ret_gaussian = GaussianModel(self.max_sh_degree)
        ret_gaussian.restore(new_model_args, None)

        return ret_gaussian

    def apply_se3_fields(
        self,
        se3_model,
        timestamp: float,
        freeze_mask=None,
    ):
        """
        Args:
            se3_model: SE3Model
            timestamp: float.  in range [0, 1]
            freeze_mask: [N]
        """

        inp_time = torch.ones_like(self._xyz[:, 0:1]) * timestamp
        inp = torch.cat([self._xyz, inp_time], dim=-1)

        if freeze_mask is not None:
            moving_mask = torch.logical_not(freeze_mask)
            inp = inp[moving_mask, ...]
        # [bs, 3, 3]. [bs, 3]
        R, t = se3_model(inp)

        # print("abs t mean", torch.abs(t).mean(dim=0))
        # new_xyz = (R @ self._xyz.unsqueeze(dim=-1)).squeeze(dim=-1) + t

        if freeze_mask is None:
            new_xyz = self._xyz + t
            new_rotation = quaternion_multiply(matrix_to_quaternion(R), self._rotation)
        else:
            new_xyz = self._xyz.clone()
            new_xyz[moving_mask, ...] += t
            new_rotation = self._rotation.clone()
            new_rotation[moving_mask, ...] = quaternion_multiply(
                matrix_to_quaternion(R), self._rotation[moving_mask, ...]
            )

        if self.optimizer is None:
            optim_state = None
        else:
            optim_state = self.optimizer.state_dict()

        new_model_args = (
            self.active_sh_degree,
            new_xyz,
            self._features_dc,
            self._features_rest,
            self._scaling,
            new_rotation,
            self._opacity,
            self.max_radii2D,
            self.xyz_gradient_accum,
            self.denom,
            optim_state,
            self.spatial_lr_scale,
        )

        ret_gaussian = GaussianModel(self.max_sh_degree)
        ret_gaussian.restore(new_model_args, None)

        return ret_gaussian

    def apply_offset_fields(self, offset_field, timestamp: float):
        """
        Args:
            se3_model: SE3Model
            timestamp: float.  in range [0, 1]
        """

        inp_time = torch.ones_like(self._xyz[:, 0:1]) * timestamp
        inp = torch.cat([self._xyz, inp_time], dim=-1)
        # [bs, 3, 3]. [bs, 3]
        offsets = offset_field(inp)

        # print("abs t mean", torch.abs(t).mean(dim=0))
        new_xyz = self._xyz + offsets

        if self.optimizer is None:
            optim_state = None
        else:
            optim_state = self.optimizer.state_dict()

        new_model_args = (
            self.active_sh_degree,
            new_xyz,
            self._features_dc,
            self._features_rest,
            self._scaling,
            self._rotation,
            self._opacity,
            self.max_radii2D,
            self.xyz_gradient_accum,
            self.denom,
            optim_state,
            self.spatial_lr_scale,
        )

        ret_gaussian = GaussianModel(self.max_sh_degree)
        ret_gaussian.restore(new_model_args, None)

        return ret_gaussian

    def apply_offset_fields_with_R(self, offset_field, timestamp: float, eps=1e-2):
        """
        Args:
            se3_model: SE3Model
            timestamp: float.  in range [0, 1]
        """

        # [4, 3]
        inp_perterb = (
            torch.tensor(
                [
                    [0.0, 0.0, 0.0],  # add this will coplanar?
                    [+eps, -eps, -eps],
                    [-eps, -eps, +eps],
                    [-eps, +eps, -eps],
                    [+eps, +eps, +eps],
                ],
            )
            .to(self._xyz.device)
            .float()
        )
        #  => [N, 4, 3]
        source_points = self._xyz.unsqueeze(dim=1) + inp_perterb.unsqueeze(dim=0)
        num_points = source_points.shape[0]

        inpx = source_points.flatten(end_dim=1)
        inp_time = torch.ones_like(inpx[:, 0:1]) * timestamp

        inp = torch.cat([inpx, inp_time], dim=-1)

        sampled_offsets = offset_field(inp).reshape((num_points, -1, 3))

        R, t = get_rigid_transform(source_points, source_points + sampled_offsets)

        # new_xyz = R @ self._xyz.unsqueeze(dim=-1) + t
        # new_xyz = new_xyz.squeeze(dim=-1)

        avg_offsets = sampled_offsets.mean(dim=1)
        new_xyz = self._xyz + avg_offsets  # offset directly

        new_rotation = quaternion_multiply(matrix_to_quaternion(R), self._rotation)

        if self.optimizer is None:
            optim_state = None
        else:
            optim_state = self.optimizer.state_dict()

        new_model_args = (
            self.active_sh_degree,
            new_xyz,
            self._features_dc,
            self._features_rest,
            self._scaling,
            new_rotation,
            self._opacity,
            self.max_radii2D,
            self.xyz_gradient_accum,
            self.denom,
            optim_state,
            self.spatial_lr_scale,
        )

        ret_gaussian = GaussianModel(self.max_sh_degree)
        ret_gaussian.restore(new_model_args, None)

        return ret_gaussian

    def init_from_mesh(
        self,
        mesh_path: str,
        num_gaussians: int = 10000,
    ):
        import point_cloud_utils as pcu

        mesh = pcu.load_triangle_mesh(mesh_path)

        v, f = mesh.v, mesh.f

        v_n = pcu.estimate_mesh_normals(v, f)
        vert_colors = mesh.vertex_data.colors

        fid, bc = pcu.sample_mesh_random(v, f, num_gaussians)

        # Interpolate the vertex positions and normals using the returned barycentric coordinates
        # to get sample positions and normals
        rand_positions = pcu.interpolate_barycentric_coords(f, fid, bc, v)
        rand_normals = pcu.interpolate_barycentric_coords(f, fid, bc, v_n)
        rand_colors = pcu.interpolate_barycentric_coords(f, fid, bc, vert_colors)[:, :3]

        # copy original pointcloud init functions

        fused_point_cloud = torch.tensor(np.asarray(rand_positions)).float().cuda()
        fused_color = RGB2SH(torch.tensor(np.asarray(rand_colors)).float().cuda())
        features = (
            torch.zeros((fused_color.shape[0], 3, (self.max_sh_degree + 1) ** 2))
            .float()
            .cuda()
        )
        features[:, :3, 0] = fused_color
        # typo here?
        features[:, 3:, 1:] = 0.0

        print("Number of points at initialisation : ", fused_point_cloud.shape[0])

        dist2 = torch.clamp_min(
            distCUDA2(torch.from_numpy(np.asarray(rand_positions)).float().cuda()),
            0.0000001,
        )
        scales = torch.log(torch.sqrt(dist2))[..., None].repeat(1, 3)
        rots = torch.zeros((fused_point_cloud.shape[0], 4), device="cuda")
        rots[:, 0] = 1

        opacities = inverse_sigmoid(
            0.1
            * torch.ones(
                (fused_point_cloud.shape[0], 1), dtype=torch.float, device="cuda"
            )
        )

        self._xyz = nn.Parameter(fused_point_cloud.requires_grad_(True))
        self._features_dc = nn.Parameter(
            features[:, :, 0:1].transpose(1, 2).contiguous().requires_grad_(True)
        )
        self._features_rest = nn.Parameter(
            features[:, :, 1:].transpose(1, 2).contiguous().requires_grad_(True)
        )
        self._scaling = nn.Parameter(scales.requires_grad_(True))
        self._rotation = nn.Parameter(rots.requires_grad_(True))
        self._opacity = nn.Parameter(opacities.requires_grad_(True))
        self.max_radii2D = torch.zeros((self.get_xyz.shape[0]), device="cuda")

    def detach_grad(
        self,
    ):
        self._xyz.requires_grad = False
        self._features_dc.requires_grad = False
        self._features_rest.requires_grad = False
        self._scaling.requires_grad = False
        self._rotation.requires_grad = False
        self._opacity.requires_grad = False

    def apply_mask(self, mask):
        new_xyz = self._xyz[mask]
        if self.xyz_gradient_accum.shape == self._xyz.shape:
            new_xyz_gradient_accum = self.xyz_gradient_accum[mask]
            new_denom = self.denom[mask]
        else:
            new_xyz_gradient_accum = self.xyz_gradient_accum
            new_denom = self.denom
        new_model_args = (
            self.active_sh_degree,
            new_xyz,
            self._features_dc[mask],
            self._features_rest[mask],
            self._scaling[mask],
            self._rotation[mask],
            self._opacity[mask],
            self.max_radii2D,
            new_xyz_gradient_accum,
            new_denom,
            None,
            self.spatial_lr_scale,
        )

        ret_gaussian = GaussianModel(self.max_sh_degree)
        ret_gaussian.restore(new_model_args, None)

        return ret_gaussian

    @torch.no_grad()
    def extract_fields(self, resolution=128, num_blocks=16, relax_ratio=1.5):
        # resolution: resolution of field

        block_size = 2 / num_blocks

        assert resolution % block_size == 0
        split_size = resolution // num_blocks

        opacities = self.get_opacity

        # pre-filter low opacity gaussians to save computation
        mask = (opacities > 0.005).squeeze(1)

        opacities = opacities[mask]
        xyzs = self.get_xyz[mask]
        stds = self.get_scaling[mask]

        # normalize to ~ [-1, 1]
        mn, mx = xyzs.amin(0), xyzs.amax(0)
        self.center = (mn + mx) / 2
        self.scale = 1.0 / (mx - mn).amax().item()

        print("gaussian center, scale", self.center, self.scale)
        xyzs = (xyzs - self.center) * self.scale
        stds = stds * self.scale

        covs = self.covariance_activation(stds, 1, self._rotation[mask])

        # tile
        device = opacities.device
        occ = torch.zeros([resolution] * 3, dtype=torch.float32, device=device)

        X = torch.linspace(-1, 1, resolution).split(split_size)
        Y = torch.linspace(-1, 1, resolution).split(split_size)
        Z = torch.linspace(-1, 1, resolution).split(split_size)

        # loop blocks (assume max size of gaussian is small than relax_ratio * block_size !!!)
        for xi, xs in enumerate(X):
            for yi, ys in enumerate(Y):
                for zi, zs in enumerate(Z):
                    xx, yy, zz = torch.meshgrid(xs, ys, zs)
                    # sample points [M, 3]
                    pts = torch.cat(
                        [xx.reshape(-1, 1), yy.reshape(-1, 1), zz.reshape(-1, 1)],
                        dim=-1,
                    ).to(device)
                    # in-tile gaussians mask
                    vmin, vmax = pts.amin(0), pts.amax(0)
                    vmin -= block_size * relax_ratio
                    vmax += block_size * relax_ratio
                    mask = (xyzs < vmax).all(-1) & (xyzs > vmin).all(-1)
                    # if hit no gaussian, continue to next block
                    if not mask.any():
                        continue
                    mask_xyzs = xyzs[mask]  # [L, 3]
                    mask_covs = covs[mask]  # [L, 6]
                    mask_opas = opacities[mask].view(1, -1)  # [L, 1] --> [1, L]

                    # query per point-gaussian pair.
                    g_pts = pts.unsqueeze(1).repeat(
                        1, mask_covs.shape[0], 1
                    ) - mask_xyzs.unsqueeze(
                        0
                    )  # [M, L, 3]
                    g_covs = mask_covs.unsqueeze(0).repeat(
                        pts.shape[0], 1, 1
                    )  # [M, L, 6]

                    # batch on gaussian to avoid OOM
                    batch_g = 1024
                    val = 0
                    for start in range(0, g_covs.shape[1], batch_g):
                        end = min(start + batch_g, g_covs.shape[1])
                        w = gaussian_3d_coeff(
                            g_pts[:, start:end].reshape(-1, 3),
                            g_covs[:, start:end].reshape(-1, 6),
                        ).reshape(
                            pts.shape[0], -1
                        )  # [M, l]
                        val += (mask_opas[:, start:end] * w).sum(-1)

                    # kiui.lo(val, mask_opas, w)

                    occ[
                        xi * split_size : xi * split_size + len(xs),
                        yi * split_size : yi * split_size + len(ys),
                        zi * split_size : zi * split_size + len(zs),
                    ] = val.reshape(len(xs), len(ys), len(zs))

        return occ

    def extract_mesh(self, path, density_thresh=1, resolution=128, decimate_target=1e5):
        os.makedirs(os.path.dirname(path), exist_ok=True)

        from physdreamer.gaussian_3d.scene.mesh import Mesh
        from physdreamer.gaussian_3d.scene.mesh_utils import decimate_mesh, clean_mesh

        occ = self.extract_fields(resolution).detach().cpu().numpy()

        print(occ.shape, occ.min(), occ.max(), occ.mean(), "occ stats")
        print(np.percentile(occ, [0, 1, 5, 10, 50, 90, 95, 99, 100]), "occ percentiles")
        import mcubes

        vertices, triangles = mcubes.marching_cubes(occ, density_thresh)
        vertices = vertices / (resolution - 1.0) * 2 - 1

        # transform back to the original space
        vertices = vertices / self.scale + self.center.detach().cpu().numpy()

        vertices, triangles = clean_mesh(
            vertices, triangles, remesh=True, remesh_size=0.015
        )
        if decimate_target > 0 and triangles.shape[0] > decimate_target:
            vertices, triangles = decimate_mesh(vertices, triangles, decimate_target)

        v = torch.from_numpy(vertices.astype(np.float32)).contiguous().cuda()
        f = torch.from_numpy(triangles.astype(np.int32)).contiguous().cuda()

        print(
            f"[INFO] marching cubes result: {v.shape} ({v.min().item()}-{v.max().item()}), {f.shape}"
        )

        mesh = Mesh(v=v, f=f, device="cuda")

        return mesh


def gaussian_3d_coeff(xyzs, covs):
    # xyzs: [N, 3]
    # covs: [N, 6]
    x, y, z = xyzs[:, 0], xyzs[:, 1], xyzs[:, 2]
    a, b, c, d, e, f = (
        covs[:, 0],
        covs[:, 1],
        covs[:, 2],
        covs[:, 3],
        covs[:, 4],
        covs[:, 5],
    )

    # eps must be small enough !!!
    inv_det = 1 / (a * d * f + 2 * e * c * b - e**2 * a - c**2 * d - b**2 * f + 1e-24)
    inv_a = (d * f - e**2) * inv_det
    inv_b = (e * c - b * f) * inv_det
    inv_c = (e * b - c * d) * inv_det
    inv_d = (a * f - c**2) * inv_det
    inv_e = (b * c - e * a) * inv_det
    inv_f = (a * d - b**2) * inv_det

    power = (
        -0.5 * (x**2 * inv_a + y**2 * inv_d + z**2 * inv_f)
        - x * y * inv_b
        - x * z * inv_c
        - y * z * inv_e
    )

    power[power > 0] = -1e10  # abnormal values... make weights 0

    return torch.exp(power)


================================================
FILE: physdreamer/gaussian_3d/scene/mesh.py
================================================
import os
import cv2
import torch
import trimesh
import numpy as np


def dot(x, y):
    return torch.sum(x * y, -1, keepdim=True)


def length(x, eps=1e-20):
    return torch.sqrt(torch.clamp(dot(x, x), min=eps))


def safe_normalize(x, eps=1e-20):
    return x / length(x, eps)


class Mesh:
    def __init__(
        self,
        v=None,
        f=None,
        vn=None,
        fn=None,
        vt=None,
        ft=None,
        albedo=None,
        vc=None,  # vertex color
        device=None,
    ):
        self.device = device
        self.v = v
        self.vn = vn
        self.vt = vt
        self.f = f
        self.fn = fn
        self.ft = ft
        # only support a single albedo
        self.albedo = albedo
        # support vertex color is no albedo
        self.vc = vc

        self.ori_center = 0
        self.ori_scale = 1

    @classmethod
    def load(
        cls,
        path=None,
        resize=True,
        renormal=True,
        retex=False,
        front_dir="+z",
        **kwargs,
    ):
        # assume init with kwargs
        if path is None:
            mesh = cls(**kwargs)
        # obj supports face uv
        elif path.endswith(".obj"):
            mesh = cls.load_obj(path, **kwargs)
        # trimesh only supports vertex uv, but can load more formats
        else:
            mesh = cls.load_trimesh(path, **kwargs)

        print(f"[Mesh loading] v: {mesh.v.shape}, f: {mesh.f.shape}")
        # auto-normalize
        if resize:
            mesh.auto_size()
        # auto-fix normal
        if renormal or mesh.vn is None:
            mesh.auto_normal()
            print(f"[Mesh loading] vn: {mesh.vn.shape}, fn: {mesh.fn.shape}")
        # auto-fix texcoords
        if retex or (mesh.albedo is not None and mesh.vt is None):
            mesh.auto_uv(cache_path=path)
            print(f"[Mesh loading] vt: {mesh.vt.shape}, ft: {mesh.ft.shape}")

        # rotate front dir to +z
        if front_dir != "+z":
            # axis switch
            if "-z" in front_dir:
                T = torch.tensor(
                    [[1, 0, 0], [0, 1, 0], [0, 0, -1]],
                    device=mesh.device,
                    dtype=torch.float32,
                )
            elif "+x" in front_dir:
                T = torch.tensor(
                    [[0, 0, 1], [0, 1, 0], [1, 0, 0]],
                    device=mesh.device,
                    dtype=torch.float32,
                )
            elif "-x" in front_dir:
                T = torch.tensor(
                    [[0, 0, -1], [0, 1, 0], [1, 0, 0]],
                    device=mesh.device,
                    dtype=torch.float32,
                )
            elif "+y" in front_dir:
                T = torch.tensor(
                    [[1, 0, 0], [0, 0, 1], [0, 1, 0]],
                    device=mesh.device,
                    dtype=torch.float32,
                )
            elif "-y" in front_dir:
                T = torch.tensor(
                    [[1, 0, 0], [0, 0, -1], [0, 1, 0]],
                    device=mesh.device,
                    dtype=torch.float32,
                )
            else:
                T = torch.tensor(
                    [[1, 0, 0], [0, 1, 0], [0, 0, 1]],
                    device=mesh.device,
                    dtype=torch.float32,
                )
            # rotation (how many 90 degrees)
            if "1" in front_dir:
                T @= torch.tensor(
                    [[0, -1, 0], [1, 0, 0], [0, 0, 1]],
                    device=mesh.device,
                    dtype=torch.float32,
                )
            elif "2" in front_dir:
                T @= torch.tensor(
                    [[1, 0, 0], [0, -1, 0], [0, 0, 1]],
                    device=mesh.device,
                    dtype=torch.float32,
                )
            elif "3" in front_dir:
                T @= torch.tensor(
                    [[0, 1, 0], [-1, 0, 0], [0, 0, 1]],
                    device=mesh.device,
                    dtype=torch.float32,
                )
            mesh.v @= T
            mesh.vn @= T

        return mesh

    # load from obj file
    @classmethod
    def load_obj(cls, path, albedo_path=None, device=None):
        assert os.path.splitext(path)[-1] == ".obj"

        mesh = cls()

        # device
        if device is None:
            device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

        mesh.device = device

        # load obj
        with open(path, "r") as f:
            lines = f.readlines()

        def parse_f_v(fv):
            # pass in a vertex term of a face, return {v, vt, vn} (-1 if not provided)
            # supported forms:
            # f v1 v2 v3
            # f v1/vt1 v2/vt2 v3/vt3
            # f v1/vt1/vn1 v2/vt2/vn2 v3/vt3/vn3
            # f v1//vn1 v2//vn2 v3//vn3
            xs = [int(x) - 1 if x != "" else -1 for x in fv.split("/")]
            xs.extend([-1] * (3 - len(xs)))
            return xs[0], xs[1], xs[2]

        # NOTE: we ignore usemtl, and assume the mesh ONLY uses one material (first in mtl)
        vertices, texcoords, normals = [], [], []
        faces, tfaces, nfaces = [], [], []
        mtl_path = None

        for line in lines:
            split_line = line.split()
            # empty line
            if len(split_line) == 0:
                continue
            prefix = split_line[0].lower()
            # mtllib
            if prefix == "mtllib":
                mtl_path = split_line[1]
            # usemtl
            elif prefix == "usemtl":
                pass  # ignored
            # v/vn/vt
            elif prefix == "v":
                vertices.append([float(v) for v in split_line[1:]])
            elif prefix == "vn":
                normals.append([float(v) for v in split_line[1:]])
            elif prefix == "vt":
                val = [float(v) for v in split_line[1:]]
                texcoords.append([val[0], 1.0 - val[1]])
            elif prefix == "f":
                vs = split_line[1:]
                nv = len(vs)
                v0, t0, n0 = parse_f_v(vs[0])
                for i in range(nv - 2):  # triangulate (assume vertices are ordered)
                    v1, t1, n1 = parse_f_v(vs[i + 1])
                    v2, t2, n2 = parse_f_v(vs[i + 2])
                    faces.append([v0, v1, v2])
                    tfaces.append([t0, t1, t2])
                    nfaces.append([n0, n1, n2])

        mesh.v = torch.tensor(vertices, dtype=torch.float32, device=device)
        mesh.vt = (
            torch.tensor(texcoords, dtype=torch.float32, device=device)
            if len(texcoords) > 0
            else None
        )
        mesh.vn = (
            torch.tensor(normals, dtype=torch.float32, device=device)
            if len(normals) > 0
            else None
        )

        mesh.f = torch.tensor(faces, dtype=torch.int32, device=device)
        mesh.ft = (
            torch.tensor(tfaces, dtype=torch.int32, device=device)
            if len(texcoords) > 0
            else None
        )
        mesh.fn = (
            torch.tensor(nfaces, dtype=torch.int32, device=device)
            if len(normals) > 0
            else None
        )

        # see if there is vertex color
        use_vertex_color = False
        if mesh.v.shape[1] == 6:
            use_vertex_color = True
            mesh.vc = mesh.v[:, 3:]
            mesh.v = mesh.v[:, :3]
            print(f"[load_obj] use vertex color: {mesh.vc.shape}")

        # try to load texture image
        if not use_vertex_color:
            # try to retrieve mtl file
            mtl_path_candidates = []
            if mtl_path is not None:
                mtl_path_candidates.append(mtl_path)
                mtl_path_candidates.append(
                    os.path.join(os.path.dirname(path), mtl_path)
                )
            mtl_path_candidates.append(path.replace(".obj", ".mtl"))

            mtl_path = None
            for candidate in mtl_path_candidates:
                if os.path.exists(candidate):
                    mtl_path = candidate
                    break

            # if albedo_path is not provided, try retrieve it from mtl
            if mtl_path is not None and albedo_path is None:
                with open(mtl_path, "r") as f:
                    lines = f.readlines()
                for line in lines:
                    split_line = line.split()
                    # empty line
                    if len(split_line) == 0:
                        continue
                    prefix = split_line[0]
                    # NOTE: simply use the first map_Kd as albedo!
                    if "map_Kd" in prefix:
                        albedo_path = os.path.join(os.path.dirname(path), split_line[1])
                        print(f"[load_obj] use texture from: {albedo_path}")
                        break

            # still not found albedo_path, or the path doesn't exist
            if albedo_path is None or not os.path.exists(albedo_path):
                # init an empty texture
                print(f"[load_obj] init empty albedo!")
                # albedo = np.random.rand(1024, 1024, 3).astype(np.float32)
                albedo = np.ones((1024, 1024, 3), dtype=np.float32) * np.array(
                    [0.5, 0.5, 0.5]
                )  # default color
            else:
                albedo = cv2.imread(albedo_path, cv2.IMREAD_UNCHANGED)
                albedo = cv2.cvtColor(albedo, cv2.COLOR_BGR2RGB)
                albedo = albedo.astype(np.float32) / 255
                print(f"[load_obj] load texture: {albedo.shape}")

                # import matplotlib.pyplot as plt
                # plt.imshow(albedo)
                # plt.show()

            mesh.albedo = torch.tensor(albedo, dtype=torch.float32, device=device)

        return mesh

    @classmethod
    def load_trimesh(cls, path, device=None):
        mesh = cls()

        # device
        if device is None:
            device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

        mesh.device = device

        # use trimesh to load ply/glb, assume only has one single RootMesh...
        _data = trimesh.load(path)
        if isinstance(_data, trimesh.Scene):
            if len(_data.geometry) == 1:
                _mesh = list(_data.geometry.values())[0]
            else:
                # manual concat, will lose texture
                _concat = []
                for g in _data.geometry.values():
                    if isinstance(g, trimesh.Trimesh):
                        _concat.append(g)
                _mesh = trimesh.util.concatenate(_concat)
        else:
            _mesh = _data

        if _mesh.visual.kind == "vertex":
            vertex_colors = _mesh.visual.vertex_colors
            vertex_colors = np.array(vertex_colors[..., :3]).astype(np.float32) / 255
            mesh.vc = torch.tensor(vertex_colors, dtype=torch.float32, device=device)
            print(f"[load_trimesh] use vertex color: {mesh.vc.shape}")
        elif _mesh.visual.kind == "texture":
            _material = _mesh.visual.material
            if isinstance(_material, trimesh.visual.material.PBRMaterial):
                texture = np.array(_material.baseColorTexture).astype(np.float32) / 255
            elif isinstance(_material, trimesh.visual.material.SimpleMaterial):
                texture = (
                    np.array(_material.to_pbr().baseColorTexture).astype(np.float32)
                    / 255
                )
            else:
                raise NotImplementedError(
                    f"material type {type(_material)} not supported!"
                )
            mesh.albedo = torch.tensor(texture, dtype=torch.float32, device=device)
            print(f"[load_trimesh] load texture: {texture.shape}")
        else:
            texture = np.ones((1024, 1024, 3), dtype=np.float32) * np.array(
                [0.5, 0.5, 0.5]
            )
            mesh.albedo = torch.tensor(texture, dtype=torch.float32, device=device)
            print(f"[load_trimesh] failed to load texture.")

        vertices = _mesh.vertices

        try:
            texcoords = _mesh.visual.uv
            texcoords[:, 1] = 1 - texcoords[:, 1]
        except Exception as e:
            texcoords = None

        try:
            normals = _mesh.vertex_normals
        except Exception as e:
            normals = None

        # trimesh only support vertex uv...
        faces = tfaces = nfaces = _mesh.faces

        mesh.v = torch.tensor(vertices, dtype=torch.float32, device=device)
        mesh.vt = (
            torch.tensor(texcoords, dtype=torch.float32, device=device)
            if texcoords is not None
            else None
        )
        mesh.vn = (
            torch.tensor(normals, dtype=torch.float32, device=device)
            if normals is not None
            else None
        )

        mesh.f = torch.tensor(faces, dtype=torch.int32, device=device)
        mesh.ft = (
            torch.tensor(tfaces, dtype=torch.int32, device=device)
            if texcoords is not None
            else None
        )
        mesh.fn = (
            torch.tensor(nfaces, dtype=torch.int32, device=device)
            if normals is not None
            else None
        )

        return mesh

    # aabb
    def aabb(self):
        return torch.min(self.v, dim=0).values, torch.max(self.v, dim=0).values

    # unit size
    @torch.no_grad()
    def auto_size(self):
        vmin, vmax = self.aabb()
        self.ori_center = (vmax + vmin) / 2
        self.ori_scale = 1.2 / torch.max(vmax - vmin).item()
        self.v = (self.v - self.ori_center) * self.ori_scale

    def auto_normal(self):
        i0, i1, i2 = self.f[:, 0].long(), self.f[:, 1].long(), self.f[:, 2].long()
        v0, v1, v2 = self.v[i0, :], self.v[i1, :], self.v[i2, :]

        face_normals = torch.cross(v1 - v0, v2 - v0)

        # Splat face normals to vertices
        vn = torch.zeros_like(self.v)
        vn.scatter_add_(0, i0[:, None].repeat(1, 3), face_normals)
        vn.scatter_add_(0, i1[:, None].repeat(1, 3), face_normals)
        vn.scatter_add_(0, i2[:, None].repeat(1, 3), face_normals)

        # Normalize, replace zero (degenerated) normals with some default value
        vn = torch.where(
            dot(vn, vn) > 1e-20,
            vn,
            torch.tensor([0.0, 0.0, 1.0], dtype=torch.float32, device=vn.device),
        )
        vn = safe_normalize(vn)

        self.vn = vn
        self.fn = self.f

    def auto_uv(self, cache_path=None, vmap=True):
        # try to load cache
        if cache_path is not None:
            cache_path = os.path.splitext(cache_path)[0] + "_uv.npz"
        if cache_path is not None and os.path.exists(cache_path):
            data = np.load(cache_path)
            vt_np, ft_np, vmapping = data["vt"], data["ft"], data["vmapping"]
        else:
            import xatlas

            v_np = self.v.detach().cpu().numpy()
            f_np = self.f.detach().int().cpu().numpy()
            atlas = xatlas.Atlas()
            atlas.add_mesh(v_np, f_np)
            chart_options = xatlas.ChartOptions()
            # chart_options.max_iterations = 4
            atlas.generate(chart_options=chart_options)
            vmapping, ft_np, vt_np = atlas[0]  # [N], [M, 3], [N, 2]

            # save to cache
            if cache_path is not None:
                np.savez(cache_path, vt=vt_np, ft=ft_np, vmapping=vmapping)

        vt = torch.from_numpy(vt_np.astype(np.float32)).to(self.device)
        ft = torch.from_numpy(ft_np.astype(np.int32)).to(self.device)
        self.vt = vt
        self.ft = ft

        if vmap:
            # remap v/f to vt/ft, so each v correspond to a unique vt. (necessary for gltf)
            vmapping = (
                torch.from_numpy(vmapping.astype(np.int64)).long().to(self.device)
            )
            self.align_v_to_vt(vmapping)

    def align_v_to_vt(self, vmapping=None):
        # remap v/f and vn/vn to vt/ft.
        if vmapping is None:
            ft = self.ft.view(-1).long()
            f = self.f.view(-1).long()
            vmapping = torch.zeros(
                self.vt.shape[0], dtype=torch.long, device=self.device
            )
            vmapping[ft] = f  # scatter, randomly choose one if index is not unique

        self.v = self.v[vmapping]
        self.f = self.ft
        # assume fn == f
        if self.vn is not None:
            self.vn = self.vn[vmapping]
            self.fn = self.ft

    def to(self, device):
        self.device = device
        for name in ["v", "f", "vn", "fn", "vt", "ft", "albedo"]:
            tensor = getattr(self, name)
            if tensor is not None:
                setattr(self, name, tensor.to(device))
        return self

    def write(self, path):
        if path.endswith(".ply"):
            self.write_ply(path)
        elif path.endswith(".obj"):
            self.write_obj(path)
        elif path.endswith(".glb") or path.endswith(".gltf"):
            self.write_glb(path)
        else:
            raise NotImplementedError(f"format {path} not supported!")

    # write to ply file (only geom)
    def write_ply(self, path):
        v_np = self.v.detach().cpu().numpy()
        f_np = self.f.detach().cpu().numpy()

        _mesh = trimesh.Trimesh(vertices=v_np, faces=f_np)
        _mesh.export(path)

    # write to gltf/glb file (geom + texture)
    def write_glb(self, path):
        assert (
            self.vn is not None and self.vt is not None
        )  # should be improved to support export without texture...

        # assert self.v.shape[0] == self.vn.shape[0] and self.v.shape[0] == self.vt.shape[0]
        if self.v.shape[0] != self.vt.shape[0]:
            self.align_v_to_vt()

        # assume f == fn == ft

        import pygltflib

        f_np = self.f.detach().cpu().numpy().astype(np.uint32)
        v_np = self.v.detach().cpu().numpy().astype(np.float32)
        # vn_np = self.vn.detach().cpu().numpy().astype(np.float32)
        vt_np = self.vt.detach().cpu().numpy().astype(np.float32)

        albedo = self.albedo.detach().cpu().numpy()
        albedo = (albedo * 255).astype(np.uint8)
        albedo = cv2.cvtColor(albedo, cv2.COLOR_RGB2BGR)

        f_np_blob = f_np.flatten().tobytes()
        v_np_blob = v_np.tobytes()
        # vn_np_blob = vn_np.tobytes()
        vt_np_blob = vt_np.tobytes()
        albedo_blob = cv2.imencode(".png", albedo)[1].tobytes()

        gltf = pygltflib.GLTF2(
            scene=0,
            scenes=[pygltflib.Scene(nodes=[0])],
            nodes=[pygltflib.Node(mesh=0)],
            meshes=[
                pygltflib.Mesh(
                    primitives=[
                        pygltflib.Primitive(
                            # indices to accessors (0 is triangles)
                            attributes=pygltflib.Attributes(
                                POSITION=1,
                                TEXCOORD_0=2,
                            ),
                            indices=0,
                            material=0,
                        )
                    ]
                )
            ],
            materials=[
                pygltflib.Material(
                    pbrMetallicRoughness=pygltflib.PbrMetallicRoughness(
                        baseColorTexture=pygltflib.TextureInfo(index=0, texCoord=0),
                        metallicFactor=0.0,
                        roughnessFactor=1.0,
                    ),
                    alphaCutoff=0,
                    doubleSided=True,
                )
            ],
            textures=[
                pygltflib.Texture(sampler=0, source=0),
            ],
            samplers=[
                pygltflib.Sampler(
                    magFilter=pygltflib.LINEAR,
                    minFilter=pygltflib.LINEAR_MIPMAP_LINEAR,
                    wrapS=pygltflib.REPEAT,
                    wrapT=pygltflib.REPEAT,
                ),
            ],
            images=[
                # use embedded (buffer) image
                pygltflib.Image(bufferView=3, mimeType="image/png"),
            ],
            buffers=[
                pygltflib.Buffer(
                    byteLength=len(f_np_blob)
                    + len(v_np_blob)
                    + len(vt_np_blob)
                    + len(albedo_blob)
                )
            ],
            # buffer view (based on dtype)
            bufferViews=[
                # triangles; as flatten (element) array
                pygltflib.BufferView(
                    buffer=0,
                    byteLength=len(f_np_blob),
                    target=pygltflib.ELEMENT_ARRAY_BUFFER,  # GL_ELEMENT_ARRAY_BUFFER (34963)
                ),
                # positions; as vec3 array
                pygltflib.BufferView(
                    buffer=0,
                    byteOffset=len(f_np_blob),
                    byteLength=len(v_np_blob),
                    byteStride=12,  # vec3
                    target=pygltflib.ARRAY_BUFFER,  # GL_ARRAY_BUFFER (34962)
                ),
                # texcoords; as vec2 array
                pygltflib.BufferView(
                    buffer=0,
                    byteOffset=len(f_np_blob) + len(v_np_blob),
                    byteLength=len(vt_np_blob),
                    byteStride=8,  # vec2
                    target=pygltflib.ARRAY_BUFFER,
                ),
                # texture; as none target
                pygltflib.BufferView(
                    buffer=0,
                    byteOffset=len(f_np_blob) + len(v_np_blob) + len(vt_np_blob),
                    byteLength=len(albedo_blob),
                ),
            ],
            accessors=[
                # 0 = triangles
                pygltflib.Accessor(
                    bufferView=0,
                    componentType=pygltflib.UNSIGNED_INT,  # GL_UNSIGNED_INT (5125)
                    count=f_np.size,
                    type=pygltflib.SCALAR,
                    max=[int(f_np.max())],
                    min=[int(f_np.min())],
                ),
                # 1 = positions
                pygltflib.Accessor(
                    bufferView=1,
                    componentType=pygltflib.FLOAT,  # GL_FLOAT (5126)
                    count=len(v_np),
                    type=pygltflib.VEC3,
                    max=v_np.max(axis=0).tolist(),
                    min=v_np.min(axis=0).tolist(),
                ),
                # 2 = texcoords
                pygltflib.Accessor(
                    bufferView=2,
                    componentType=pygltflib.FLOAT,
                    count=len(vt_np),
                    type=pygltflib.VEC2,
                    max=vt_np.max(axis=0).tolist(),
                    min=vt_np.min(axis=0).tolist(),
                ),
            ],
        )

        # set actual data
        gltf.set_binary_blob(f_np_blob + v_np_blob + vt_np_blob + albedo_blob)

        # glb = b"".join(gltf.save_to_bytes())
        gltf.save(path)

    # write to obj file (geom + texture)
    def write_obj(self, path):
        mtl_path = path.replace(".obj", ".mtl")
        albedo_path = path.replace(".obj", "_albedo.png")

        v_np = self.v.detach().cpu().numpy()
        vt_np = self.vt.detach().cpu().numpy() if self.vt is not None else None
        vn_np = self.vn.detach().cpu().numpy() if self.vn is not None else None
        f_np = self.f.detach().cpu().numpy()
        ft_np = self.ft.detach().cpu().numpy() if self.ft is not None else None
        fn_np = self.fn.detach().cpu().numpy() if self.fn is not None else None

        with open(path, "w") as fp:
            fp.write(f"mtllib {os.path.basename(mtl_path)} \n")

            for v in v_np:
                fp.write(f"v {v[0]} {v[1]} {v[2]} \n")

            if vt_np is not None:
                for v in vt_np:
                    fp.write(f"vt {v[0]} {1 - v[1]} \n")

            if vn_np is not None:
                for v in vn_np:
                    fp.write(f"vn {v[0]} {v[1]} {v[2]} \n")

            fp.write(f"usemtl defaultMat \n")
            for i in range(len(f_np)):
                fp.write(
                    f'f {f_np[i, 0] + 1}/{ft_np[i, 0] + 1 if ft_np is not None else ""}/{fn_np[i, 0] + 1 if fn_np is not None else ""} \
                             {f_np[i, 1] + 1}/{ft_np[i, 1] + 1 if ft_np is not None else ""}/{fn_np[i, 1] + 1 if fn_np is not None else ""} \
                             {f_np[i, 2] + 1}/{ft_np[i, 2] + 1 if ft_np is not None else ""}/{fn_np[i, 2] + 1 if fn_np is not None else ""} \n'
                )

        with open(mtl_path, "w") as fp:
            fp.write(f"newmtl defaultMat \n")
            fp.write(f"Ka 1 1 1 \n")
            fp.write(f"Kd 1 1 1 \n")
            fp.write(f"Ks 0 0 0 \n")
            fp.write(f"Tr 1 \n")
            fp.write(f"illum 1 \n")
            fp.write(f"Ns 0 \n")
            fp.write(f"map_Kd {os.path.basename(albedo_path)} \n")

        if not (False or self.albedo is None):
            albedo = self.albedo.detach().cpu().numpy()
            albedo = (albedo * 255).astype(np.uint8)
            cv2.imwrite(albedo_path, cv2.cvtColor(albedo, cv2.COLOR_RGB2BGR))


================================================
FILE: physdreamer/gaussian_3d/scene/mesh_utils.py
================================================
import numpy as np
import pymeshlab as pml


def poisson_mesh_reconstruction(points, normals=None):
    # points/normals: [N, 3] np.ndarray

    import open3d as o3d

    pcd = o3d.geometry.PointCloud()
    pcd.points = o3d.utility.Vector3dVector(points)

    # outlier removal
    pcd, ind = pcd.remove_statistical_outlier(nb_neighbors=20, std_ratio=10)

    # normals
    if normals is None:
        pcd.estimate_normals()
    else:
        pcd.normals = o3d.utility.Vector3dVector(normals[ind])

    # visualize
    o3d.visualization.draw_geometries([pcd], point_show_normal=False)

    mesh, densities = o3d.geometry.TriangleMesh.create_from_point_cloud_poisson(
        pcd, depth=9
    )
    vertices_to_remove = densities < np.quantile(densities, 0.1)
    mesh.remove_vertices_by_mask(vertices_to_remove)

    # visualize
    o3d.visualization.draw_geometries([mesh])

    vertices = np.asarray(mesh.vertices)
    triangles = np.asarray(mesh.triangles)

    print(
        f"[INFO] poisson mesh reconstruction: {points.shape} --> {vertices.shape} / {triangles.shape}"
    )

    return vertices, triangles


def decimate_mesh(
    verts, faces, target, backend="pymeshlab", remesh=False, optimalplacement=True
):
    # optimalplacement: default is True, but for flat mesh must turn False to prevent spike artifect.

    _ori_vert_shape = verts.shape
    _ori_face_shape = faces.shape

    if backend == "pyfqmr":
        import pyfqmr

        solver = pyfqmr.Simplify()
        solver.setMesh(verts, faces)
        solver.simplify_mesh(target_count=target, preserve_border=False, verbose=False)
        verts, faces, normals = solver.getMesh()
    else:
        m = pml.Mesh(verts, faces)
        ms = pml.MeshSet()
        ms.add_mesh(m, "mesh")  # will copy!

        # filters
        # ms.meshing_decimation_clustering(threshold=pml.PercentageValue(1))
        ms.meshing_decimation_quadric_edge_collapse(
            targetfacenum=int(target), optimalplacement=optimalplacement
        )

        if remesh:
            # ms.apply_coord_taubin_smoothing()
            ms.meshing_isotropic_explicit_remeshing(
                iterations=3, targetlen=pml.PercentageValue(1)
            )

        # extract mesh
        m = ms.current_mesh()
        verts = m.vertex_matrix()
        faces = m.face_matrix()

    print(
        f"[INFO] mesh decimation: {_ori_vert_shape} --> {verts.shape}, {_ori_face_shape} --> {faces.shape}"
    )

    return verts, faces


def clean_mesh(
    verts,
    faces,
    v_pct=1,
    min_f=64,
    min_d=20,
    repair=True,
    remesh=True,
    remesh_size=0.01,
):
    # verts: [N, 3]
    # faces: [N, 3]

    _ori_vert_shape = verts.shape
    _ori_face_shape = faces.shape

    m = pml.Mesh(verts, faces)
    ms = pml.MeshSet()
    ms.add_mesh(m, "mesh")  # will copy!

    # filters
    ms.meshing_remove_unreferenced_vertices()  # verts not refed by any faces

    if v_pct > 0:
        ms.meshing_merge_close_vertices(
            threshold=pml.PercentageValue(v_pct)
        )  # 1/10000 of bounding box diagonal

    ms.meshing_remove_duplicate_faces()  # faces defined by the same verts
    ms.meshing_remove_null_faces()  # faces with area == 0

    if min_d > 0:
        ms.meshing_remove_connected_component_by_diameter(
            mincomponentdiag=pml.PercentageValue(min_d)
        )

    if min_f > 0:
        ms.meshing_remove_connected_component_by_face_number(mincomponentsize=min_f)

    if repair:
        # ms.meshing_remove_t_vertices(method=0, threshold=40, repeat=True)
        ms.meshing_repair_non_manifold_edges(method=0)
        ms.meshing_repair_non_manifold_vertices(vertdispratio=0)

    if remesh:
        # ms.apply_coord_taubin_smoothing()
        ms.meshing_isotropic_explicit_remeshing(
            iterations=3, targetlen=pml.PureValue(remesh_size)
        )

    # extract mesh
    m = ms.current_mesh()
    verts = m.vertex_matrix()
    faces = m.face_matrix()

    print(
        f"[INFO] mesh cleaning: {_ori_vert_shape} --> {verts.shape}, {_ori_face_shape} --> {faces.shape}"
    )

    return verts, faces


================================================
FILE: physdreamer/gaussian_3d/utils/camera_utils.py
================================================
#
# Copyright (C) 2023, Inria
# GRAPHDECO research group, https://team.inria.fr/graphdeco
# All rights reserved.
#
# This software is free for non-commercial, research and evaluation use
# under the terms of the LICENSE.md file.
#
# For inquiries contact  george.drettakis@inria.fr
#

from physdreamer.gaussian_3d.scene.cameras import Camera
import numpy as np
from physdreamer.gaussian_3d.utils.general_utils import PILtoTorch
from physdreamer.gaussian_3d.utils.graphics_utils import fov2focal
import torch

WARNED = False


def loadCam(args, id, cam_info, resolution_scale):
    orig_w, orig_h = cam_info.image.size

    if args.resolution in [1, 2, 4, 8]:
        resolution = round(orig_w / (resolution_scale * args.resolution)), round(
            orig_h / (resolution_scale * args.resolution)
        )
    else:  # should be a type that converts to float
        if args.resolution == -1:
            if orig_w > 1600:
                global WARNED
                if not WARNED:
                    print(
                        "[ INFO ] Encountered quite large input images (>1.6K pixels width), rescaling to 1.6K.\n "
                        "If this is not desired, please explicitly specify '--resolution/-r' as 1"
                    )
                    WARNED = True
                global_down = orig_w / 1600
            else:
                global_down = 1
        else:
            global_down = orig_w / args.resolution

        scale = float(global_down) * float(resolution_scale)
        resolution = (int(orig_w / scale), int(orig_h / scale))

    resized_image_rgb = PILtoTorch(cam_info.image, resolution)

    gt_image = resized_image_rgb[:3, ...]
    loaded_mask = None

    if resized_image_rgb.shape[1] == 4:
        loaded_mask = resized_image_rgb[3:4, ...]

    return Camera(
        colmap_id=cam_info.uid,
        R=cam_info.R,
        T=cam_info.T,
        FoVx=cam_info.FovX,
        FoVy=cam_info.FovY,
        image=gt_image,
        gt_alpha_mask=loaded_mask,
        image_name=cam_info.image_name,
        uid=id,
        data_device=args.data_device,
    )


def cameraList_from_camInfos(cam_infos, resolution_scale, args):
    camera_list = []

    for id, c in enumerate(cam_infos):
        camera_list.append(loadCam(args, id, c, resolution_scale))

    return camera_list


def camera_to_JSON(id, camera: Camera):
    Rt = np.zeros((4, 4))
    Rt[:3, :3] = camera.R.transpose()
    Rt[:3, 3] = camera.T
    Rt[3, 3] = 1.0

    W2C = np.linalg.inv(Rt)
    pos = W2C[:3, 3]
    rot = W2C[:3, :3]
    serializable_array_2d = [x.tolist() for x in rot]
    camera_entry = {
        "id": id,
        "img_name": camera.image_name,
        "width": camera.width,
        "height": camera.height,
        "position": pos.tolist(),
        "rotation": serializable_array_2d,
        "fy": fov2focal(camera.FovY, camera.height),
        "fx": fov2focal(camera.FovX, camera.width),
    }
    return camera_entry


def look_at(from_point, to_point, up_vector=(0, 1, 0)):
    """
    Compute the look-at matrix for a camera.

    :param from_point: The position of the camera.
    :param to_point: The point the camera is looking at.
    :param up_vector: The up direction of the camera.
    :return: The 4x4 look-at matrix.
    """

    # minus z for opengl. z for colmap
    forward = np.array(to_point) - np.array(from_point)
    forward = forward / (np.linalg.norm(forward) + 1e-5)

    # x-axis
    # Right direction is the cross product of the forward vector and the up vector
    right = np.cross(up_vector, forward)
    right = right / (np.linalg.norm(right) + 1e-5)

    # y axis
    # True up direction is the cross product of the right vector and the forward vector
    true_up = np.cross(forward, right)
    true_up = true_up / (np.linalg.norm(true_up) + 1e-5)

    # camera to world
    rotation = np.array(
        [
            [right[0], true_up[0], forward[0]],
            [right[1], true_up[1], forward[1]],
            [right[2], true_up[2], forward[2]],
        ]
    )

    # Construct the translation matrix
    translation = np.array(
        [
            [-from_point[0]],
            [-from_point[1]],
            [-from_point[2]],
        ]
    )

    # Combine the rotation and translation to get the look-at matrix
    T = 1.0 * rotation.transpose() @ translation

    return rotation.transpose(), T


def create_cameras_around_sphere(
    radius=6,
    elevation=0,
    fovx=35,
    resolutions=(720, 1080),
    num_cams=60,
    center=(0, 0, 0),
):
    """
    Create cameras around a sphere.

    :param radius: The radius of the circle on which cameras are placed.
    :param elevation: The elevation angle in degrees.
    :param fovx: The horizontal field of view of the cameras.
    :param resolutions: The resolution of the cameras.
    :param num_cams: The number of cameras.
    :param center: The center of the sphere.
    :return: A list of camera extrinsics (world2camera transformations).
    """
    extrinsics = []

    # Convert elevation to radians
    elevation_rad = np.radians(elevation)

    # Compute the y-coordinate of the cameras based on the elevation
    z = radius * np.sin(elevation_rad)

    # Compute the radius of the circle at the given elevation
    circle_radius = radius * np.cos(elevation_rad)

    for i in range(num_cams):
        # Compute the angle for the current camera
        angle = 2 * np.pi * i / num_cams

        # Compute the x and z coordinates of the camera
        x = circle_radius * np.cos(angle) + center[0]
        y = circle_radius * np.sin(angle) + center[1]

        # Create the look-at matrix for the camera
        R, T = look_at((x, y, z + center[2]), center)
        extrinsics.append([R, T.squeeze(axis=-1)])

    cam_list = []
    dummy_image = torch.tensor(
        np.zeros((3, resolutions[0], resolutions[1]), dtype=np.uint8)
    )
    for i in range(num_cams):
        R, T = extrinsics[i]

        # R is stored transposed due to 'glm' in CUDA code
        R = R.transpose()
        cam = Camera(
            colmap_id=i,
            R=R,
            T=T,
            FoVx=fovx,
            FoVy=fovx * resolutions[1] / resolutions[0],
            image_name="",
            uid=i,
            data_device="cuda",
            image=dummy_image,
            gt_alpha_mask=None,
        )

        cam_list.append(cam)

    return cam_list


================================================
FILE: physdreamer/gaussian_3d/utils/general_utils.py
================================================
#
# Copyright (C) 2023, Inria
# GRAPHDECO research group, https://team.inria.fr/graphdeco
# All rights reserved.
#
# This software is free for non-commercial, research and evaluation use 
# under the terms of the LICENSE.md file.
#
# For inquiries contact  george.drettakis@inria.fr
#

import torch
import sys
from datetime import datetime
import numpy as np
import random

def inverse_sigmoid(x):
    return torch.log(x/(1-x))

def PILtoTorch(pil_image, resolution):
    resized_image_PIL = pil_image.resize(resolution)
    resized_image = torch.from_numpy(np.array(resized_image_PIL)) / 255.0
    if len(resized_image.shape) == 3:
        return resized_image.permute(2, 0, 1)
    else:
        return resized_image.unsqueeze(dim=-1).permute(2, 0, 1)

def get_expon_lr_func(
    lr_init, lr_final, lr_delay_steps=0, lr_delay_mult=1.0, max_steps=1000000
):
    """
    Copied from Plenoxels

    Continuous learning rate decay function. Adapted from JaxNeRF
    The returned rate is lr_init when step=0 and lr_final when step=max_steps, and
    is log-linearly interpolated elsewhere (equivalent to exponential decay).
    If lr_delay_steps>0 then the learning rate will be scaled by some smooth
    function of lr_delay_mult, such that the initial learning rate is
    lr_init*lr_delay_mult at the beginning of optimization but will be eased back
    to the normal learning rate when steps>lr_delay_steps.
    :param conf: config subtree 'lr' or similar
    :param max_steps: int, the number of steps during optimization.
    :return HoF which takes step as input
    """

    def helper(step):
        if step < 0 or (lr_init == 0.0 and lr_final == 0.0):
            # Disable this parameter
            return 0.0
        if lr_delay_steps > 0:
            # A kind of reverse cosine decay.
            delay_rate = lr_delay_mult + (1 - lr_delay_mult) * np.sin(
                0.5 * np.pi * np.clip(step / lr_delay_steps, 0, 1)
            )
        else:
            delay_rate = 1.0
        t = np.clip(step / max_steps, 0, 1)
        log_lerp = np.exp(np.log(lr_init) * (1 - t) + np.log(lr_final) * t)
        return delay_rate * log_lerp

    return helper

def strip_lowerdiag(L):
    uncertainty = torch.zeros((L.shape[0], 6), dtype=torch.float, device="cuda")

    uncertainty[:, 0] = L[:, 0, 0]
    uncertainty[:, 1] = L[:, 0, 1]
    uncertainty[:, 2] = L[:, 0, 2]
    uncertainty[:, 3] = L[:, 1, 1]
    uncertainty[:, 4] = L[:, 1, 2]
    uncertainty[:, 5] = L[:, 2, 2]
    return uncertainty

def strip_symmetric(sym):
    return strip_lowerdiag(sym)

def build_rotation(r):
    norm = torch.sqrt(r[:,0]*r[:,0] + r[:,1]*r[:,1] + r[:,2]*r[:,2] + r[:,3]*r[:,3])

    q = r / norm[:, None]

    R = torch.zeros((q.size(0), 3, 3), device='cuda')

    r = q[:, 0]
    x = q[:, 1]
    y = q[:, 2]
    z = q[:, 3]

    R[:, 0, 0] = 1 - 2 * (y*y + z*z)
    R[:, 0, 1] = 2 * (x*y - r*z)
    R[:, 0, 2] = 2 * (x*z + r*y)
    R[:, 1, 0] = 2 * (x*y + r*z)
    R[:, 1, 1] = 1 - 2 * (x*x + z*z)
    R[:, 1, 2] = 2 * (y*z - r*x)
    R[:, 2, 0] = 2 * (x*z - r*y)
    R[:, 2, 1] = 2 * (y*z + r*x)
    R[:, 2, 2] = 1 - 2 * (x*x + y*y)
    return R

def build_scaling_rotation(s, r):
    L = torch.zeros((s.shape[0], 3, 3), dtype=torch.float, device="cuda")
    R = build_rotation(r)

    L[:,0,0] = s[:,0]
    L[:,1,1] = s[:,1]
    L[:,2,2] = s[:,2]

    L = R @ L
    return L

def safe_state(silent):
    old_f = sys.stdout
    class F:
        def __init__(self, silent):
            self.silent = silent

        def write(self, x):
            if not self.silent:
                if x.endswith("\n"):
                    old_f.write(x.replace("\n", " [{}]\n".format(str(datetime.now().strftime("%d/%m %H:%M:%S")))))
                else:
                    old_f.write(x)

        def flush(self):
            old_f.flush()

    sys.stdout = F(silent)

    random.seed(0)
    np.random.seed(0)
    torch.manual_seed(0)
    torch.cuda.set_device(torch.device("cuda:0"))


================================================
FILE: physdreamer/gaussian_3d/utils/graphics_utils.py
================================================
#
# Copyright (C) 2023, Inria
# GRAPHDECO research group, https://team.inria.fr/graphdeco
# All rights reserved.
#
# This software is free for non-commercial, research and evaluation use 
# under the terms of the LICENSE.md file.
#
# For inquiries contact  george.drettakis@inria.fr
#

import torch
import math
import numpy as np
from typing import NamedTuple

class BasicPointCloud(NamedTuple):
    points : np.array
    colors : np.array
    normals : np.array

def geom_transform_points(points, transf_matrix):
    P, _ = points.shape
    ones = torch.ones(P, 1, dtype=points.dtype, device=points.device)
    points_hom = torch.cat([points, ones], dim=1)
    points_out = torch.matmul(points_hom, transf_matrix.unsqueeze(0))

    denom = points_out[..., 3:] + 0.0000001
    return (points_out[..., :3] / denom).squeeze(dim=0)

def getWorld2View(R, t):
    Rt = np.zeros((4, 4))
    Rt[:3, :3] = R.transpose()
    Rt[:3, 3] = t
    Rt[3, 3] = 1.0
    return np.float32(Rt)

def getWorld2View2(R, t, translate=np.array([.0, .0, .0]), scale=1.0):
    Rt = np.zeros((4, 4))
    Rt[:3, :3] = R.transpose()
    Rt[:3, 3] = t
    Rt[3, 3] = 1.0

    C2W = np.linalg.inv(Rt)
    cam_center = C2W[:3, 3]
    cam_center = (cam_center + translate) * scale
    C2W[:3, 3] = cam_center
    Rt = np.linalg.inv(C2W)
    return np.float32(Rt)

def getProjectionMatrix(znear, zfar, fovX, fovY):
    tanHalfFovY = math.tan((fovY / 2))
    tanHalfFovX = math.tan((fovX / 2))

    top = tanHalfFovY * znear
    bottom = -top
    right = tanHalfFovX * znear
    left = -right

    P = torch.zeros(4, 4)

    z_sign = 1.0

    P[0, 0] = 2.0 * znear / (right - left)
    P[1, 1] = 2.0 * znear / (top - bottom)
    P[0, 2] = (right + left) / (right - left)
    P[1, 2] = (top + bottom) / (top - bottom)
    P[3, 2] = z_sign
    P[2, 2] = z_sign * zfar / (zfar - znear)
    P[2, 3] = -(zfar * znear) / (zfar - znear)
    return P

def fov2focal(fov, pixels):
    return pixels / (2 * math.tan(fov / 2))

def focal2fov(focal, pixels):
    return 2*math.atan(pixels/(2*focal))

================================================
FILE: physdreamer/gaussian_3d/utils/image_utils.py
================================================
#
# Copyright (C) 2023, Inria
# GRAPHDECO research group, https://team.inria.fr/graphdeco
# All rights reserved.
#
# This software is free for non-commercial, research and evaluation use 
# under the terms of the LICENSE.md file.
#
# For inquiries contact  george.drettakis@inria.fr
#

import torch

def mse(img1, img2):
    return (((img1 - img2)) ** 2).view(img1.shape[0], -1).mean(1, keepdim=True)

def psnr(img1, img2):
    mse = (((img1 - img2)) ** 2).view(img1.shape[0], -1).mean(1, keepdim=True)
    return 20 * torch.log10(1.0 / torch.sqrt(mse))


================================================
FILE: physdreamer/gaussian_3d/utils/loss_utils.py
================================================
#
# Copyright (C) 2023, Inria
# GRAPHDECO research group, https://team.inria.fr/graphdeco
# All rights reserved.
#
# This software is free for non-commercial, research and evaluation use 
# under the terms of the LICENSE.md file.
#
# For inquiries contact  george.drettakis@inria.fr
#

import torch
import torch.nn.functional as F
from torch.autograd import Variable
from math import exp

def l1_loss(network_output, gt):
    return torch.abs((network_output - gt)).mean()

def l2_loss(network_output, gt):
    return ((network_output - gt) ** 2).mean()

def gaussian(window_size, sigma):
    gauss = torch.Tensor([exp(-(x - window_size // 2) ** 2 / float(2 * sigma ** 2)) for x in range(window_size)])
    return gauss / gauss.sum()

def create_window(window_size, channel):
    _1D_window = gaussian(window_size, 1.5).unsqueeze(1)
    _2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
    window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous())
    return window

def ssim(img1, img2, window_size=11, size_average=True):
    channel = img1.size(-3)
    window = create_window(window_size, channel)

    if img1.is_cuda:
        window = window.cuda(img1.get_device())
    window = window.type_as(img1)

    return _ssim(img1, img2, window, window_size, channel, size_average)

def _ssim(img1, img2, window, window_size, channel, size_average=True):
    mu1 = F.conv2d(img1, window, padding=window_size // 2, groups=channel)
    mu2 = F.conv2d(img2, window, padding=window_size // 2, groups=channel)

    mu1_sq = mu1.pow(2)
    mu2_sq = mu2.pow(2)
    mu1_mu2 = mu1 * mu2

    sigma1_sq = F.conv2d(img1 * img1, window, padding=window_size // 2, groups=channel) - mu1_sq
    sigma2_sq = F.conv2d(img2 * img2, window, padding=window_size // 2, groups=channel) - mu2_sq
    sigma12 = F.conv2d(img1 * img2, window, padding=window_size // 2, groups=channel) - mu1_mu2

    C1 = 0.01 ** 2
    C2 = 0.03 ** 2

    ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2))

    if size_average:
        return ssim_map.mean()
    else:
        return ssim_map.mean(1).mean(1).mean(1)



================================================
FILE: physdreamer/gaussian_3d/utils/rigid_body_utils.py
================================================
import torch
import torch.nn.functional as F


def get_rigid_transform(A, B):
    """
    Estimate the rigid body transformation between two sets of 3D points.
    A and B are Nx3 matrices where each row is a 3D point.
    Returns a rotation matrix R and translation vector t.
    Args:
        A, B: [batch, N, 3] matrix of 3D points
    Outputs:
        R, t: [batch, 3, 3/1]
        target = R @ source (source shape [3, 1]) + t
    """
    assert A.shape == B.shape, "Input matrices must have the same shape"
    assert A.shape[-1] == 3, "Input matrices must have 3 columns (x, y, z coordinates)"

    # Compute centroids. [..., 1, 3]
    centroid_A = torch.mean(A, dim=-2, keepdim=True)
    centroid_B = torch.mean(B, dim=-2, keepdim=True)

    # Center the point sets
    A_centered = A - centroid_A
    B_centered = B - centroid_B

    # Compute the cross-covariance matrix. [..., 3, 3]
    H = A_centered.transpose(-2, -1) @ B_centered

    # Compute the Singular Value Decomposition. Along last two dimensions
    U, S, Vt = torch.linalg.svd(H)

    # Compute the rotation matrix
    R = Vt.transpose(-2, -1) @ U.transpose(-2, -1)

    # Ensure a right-handed coordinate system
    flip_mask = (torch.det(R) < 0) * -2.0 + 1.0
    # Vt[:, 2, :] *= flip_mask[..., None]

    # [N] => [N, 3]
    pad_flip_mask = torch.stack(
        [torch.ones_like(flip_mask), torch.ones_like(flip_mask), flip_mask], dim=-1
    )
    Vt = Vt * pad_flip_mask[..., None]

    # Compute the rotation matrix
    R = Vt.transpose(-2, -1) @ U.transpose(-2, -1)

    # print(R.shape, centroid_A.shape, centroid_B.shape, flip_mask.shape)
    # Compute the translation
    t = centroid_B - (R @ centroid_A.transpose(-2, -1)).transpose(-2, -1)
    t = t.transpose(-2, -1)
    return R, t


def _test_rigid_transform():
    # Example usage:
    A = torch.tensor([[1, 2, 3], [4, 5, 6], [9, 8, 10], [10, -5, 1]]) * 1.0

    R_synthesized = torch.tensor([[1, 0, 0], [0, -1, 0], [0, 0, -1]]) * 1.0
    # init a random rotation matrix:

    B = (R_synthesized @ A.T).T + 2.0  # Just an example offset

    R, t = get_rigid_transform(A[None, ...], B[None, ...])
    print("Rotation matrix R:")
    print(R)
    print("\nTranslation vector t:")
    print(t)


def _sqrt_positive_part(x: torch.Tensor) -> torch.Tensor:
    """
    Returns torch.sqrt(torch.max(0, x))
    but with a zero subgradient where x is 0.
    """
    ret = torch.zeros_like(x)
    positive_mask = x > 0
    ret[positive_mask] = torch.sqrt(x[positive_mask])
    return ret


def matrix_to_quaternion(matrix: torch.Tensor) -> torch.Tensor:
    """
    from pytorch3d. Based on trace_method like: https://github.com/KieranWynn/pyquaternion/blob/master/pyquaternion/quaternion.py#L205
    Convert rotations given as rotation matrices to quaternions.

    Args:
        matrix: Rotation matrices as tensor of shape (..., 3, 3).

    Returns:
        quaternions with real part first, as tensor of shape (..., 4).
    """
    if matrix.size(-1) != 3 or matrix.size(-2) != 3:
        raise ValueError(f"Invalid rotation matrix shape {matrix.shape}.")

    batch_dim = matrix.shape[:-2]
    m00, m01, m02, m10, m11, m12, m20, m21, m22 = torch.unbind(
        matrix.reshape(batch_dim + (9,)), dim=-1
    )

    q_abs = _sqrt_positive_part(
        torch.stack(
            [
                1.0 + m00 + m11 + m22,
                1.0 + m00 - m11 - m22,
                1.0 - m00 + m11 - m22,
                1.0 - m00 - m11 + m22,
            ],
            dim=-1,
        )
    )

    # we produce the desired quaternion multiplied by each of r, i, j, k
    quat_by_rijk = torch.stack(
        [
            # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and
            #  `int`.
            torch.stack([q_abs[..., 0] ** 2, m21 - m12, m02 - m20, m10 - m01], dim=-1),
            # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and
            #  `int`.
            torch.stack([m21 - m12, q_abs[..., 1] ** 2, m10 + m01, m02 + m20], dim=-1),
            # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and
            #  `int`.
            torch.stack([m02 - m20, m10 + m01, q_abs[..., 2] ** 2, m12 + m21], dim=-1),
            # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and
            #  `int`.
            torch.stack([m10 - m01, m20 + m02, m21 + m12, q_abs[..., 3] ** 2], dim=-1),
        ],
        dim=-2,
    )

    # We floor here at 0.1 but the exact level is not important; if q_abs is small,
    # the candidate won't be picked.
    flr = torch.tensor(0.1).to(dtype=q_abs.dtype, device=q_abs.device)
    quat_candidates = quat_by_rijk / (2.0 * q_abs[..., None].max(flr))

    # if not for numerical problems, quat_candidates[i] should be same (up to a sign),
    # forall i; we pick the best-conditioned one (with the largest denominator)

    return quat_candidates[
        F.one_hot(q_abs.argmax(dim=-1), num_classes=4) > 0.5, :
    ].reshape(batch_dim + (4,))


def quternion_to_matrix(r):
    norm = torch.sqrt(
        r[:, 0] * r[:, 0] + r[:, 1] * r[:, 1] + r[:, 2] * r[:, 2] + r[:, 3] * r[:, 3]
    )

    q = r / norm[:, None]

    R = torch.zeros((q.size(0), 3, 3), device="cuda")

    r = q[:, 0]
    x = q[:, 1]
    y = q[:, 2]
    z = q[:, 3]

    R[:, 0, 0] = 1 - 2 * (y * y + z * z)
    R[:, 0, 1] = 2 * (x * y - r * z)
    R[:, 0, 2] = 2 * (x * z + r * y)
    R[:, 1, 0] = 2 * (x * y + r * z)
    R[:, 1, 1] = 1 - 2 * (x * x + z * z)
    R[:, 1, 2] = 2 * (y * z - r * x)
    R[:, 2, 0] = 2 * (x * z - r * y)
    R[:, 2, 1] = 2 * (y * z + r * x)
    R[:, 2, 2] = 1 - 2 * (x * x + y * y)
    return R


def standardize_quaternion(quaternions: torch.Tensor) -> torch.Tensor:
    """
    from Pytorch3d
    Convert a unit quaternion to a standard form: one in which the real
    part is non negative.

    Args:
        quaternions: Quaternions with real part first,
            as tensor of shape (..., 4).

    Returns:
        Standardized quaternions as tensor of shape (..., 4).
    """
    return torch.where(quaternions[..., 0:1] < 0, -quaternions, quaternions)


def quaternion_multiply(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
    """
    From pytorch3d
    Multiply two quaternions.
    Usual torch rules for broadcasting apply.

    Args:
        a: Quaternions as tensor of shape (..., 4), real part first.
        b: Quaternions as tensor of shape (..., 4), real part first.

    Returns:
        The product of a and b, a tensor of quaternions shape (..., 4).
    """
    aw, ax, ay, az = torch.unbind(a, -1)
    bw, bx, by, bz = torch.unbind(b, -1)
    ow = aw * bw - ax * bx - ay * by - az * bz
    ox = aw * bx + ax * bw + ay * bz - az * by
    oy = aw * by - ax * bz + ay * bw + az * bx
    oz = aw * bz + ax * by - ay * bx + az * bw
    ret = torch.stack((ow, ox, oy, oz), -1)
    ret = standardize_quaternion(ret)
    return ret


def _test_matrix_to_quaternion():
    # init a random batch of quaternion
    r = torch.randn((10, 4)).cuda()

    norm = torch.sqrt(
        r[:, 0] * r[:, 0] + r[:, 1] * r[:, 1] + r[:, 2] * r[:, 2] + r[:, 3] * r[:, 3]
    )

    q = r / norm[:, None]

    q = standardize_quaternion(q)

    R = quternion_to_matrix(q)

    I_rec = R @ R.transpose(-2, -1)
    I_rec_error = torch.abs(I_rec - torch.eye(3, device="cuda")[None, ...]).max()

    q_recovered = matrix_to_quaternion(R)
    norm_ = torch.linalg.norm(q_recovered, dim=-1)
    q_recovered = q_recovered / norm_[..., None]
    q_recovered = standardize_quaternion(q_recovered)

    print(q_recovered.shape, q.shape, R.shape)

    rec = (q - q_recovered).abs().max()

    print("rotation to I error:", I_rec_error, "quant rec error: ", rec)


def _test_matrix_to_quaternion_2():
    R = (
        torch.tensor(
            [[[1, 0, 0], [0, -1, 0], [0, 0, -1]], [[1, 0, 0], [0, 0, 1], [0, -1, 0]]]
        )
        * 1.0
    )

    q_rec = matrix_to_quaternion(R.transpose(-2, -1))

    R_rec = quternion_to_matrix(q_rec)

    print(R_rec)


if __name__ == "__main__":
    # _test_rigid_transform()
    _test_matrix_to_quaternion()

    _test_matrix_to_quaternion_2()


================================================
FILE: physdreamer/gaussian_3d/utils/sh_utils.py
================================================
#  Copyright 2021 The PlenOctree Authors.
#  Redistribution and use in source and binary forms, with or without
#  modification, are permitted provided that the following conditions are met:
#
#  1. Redistributions of source code must retain the above copyright notice,
#  this list of conditions and the following disclaimer.
#
#  2. Redistributions in binary form must reproduce the above copyright notice,
#  this list of conditions and the following disclaimer in the documentation
#  and/or other materials provided with the distribution.
#
#  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
#  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
#  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
#  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
#  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
#  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
#  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
#  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
#  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
#  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
#  POSSIBILITY OF SUCH DAMAGE.

import torch

C0 = 0.28209479177387814
C1 = 0.4886025119029199
C2 = [
    1.0925484305920792,
    -1.0925484305920792,
    0.31539156525252005,
    -1.0925484305920792,
    0.5462742152960396
]
C3 = [
    -0.5900435899266435,
    2.890611442640554,
    -0.4570457994644658,
    0.3731763325901154,
    -0.4570457994644658,
    1.445305721320277,
    -0.5900435899266435
]
C4 = [
    2.5033429417967046,
    -1.7701307697799304,
    0.9461746957575601,
    -0.6690465435572892,
    0.10578554691520431,
    -0.6690465435572892,
    0.47308734787878004,
    -1.7701307697799304,
    0.6258357354491761,
]   


def eval_sh(deg, sh, dirs):
    """
    Evaluate spherical harmonics at unit directions
    using hardcoded SH polynomials.
    Works with torch/np/jnp.
    ... Can be 0 or more batch dimensions.
    Args:
        deg: int SH deg. Currently, 0-3 supported
        sh: jnp.ndarray SH coeffs [..., C, (deg + 1) ** 2]
        dirs: jnp.ndarray unit directions [..., 3]
    Returns:
        [..., C]
    """
    assert deg <= 4 and deg >= 0
    coeff = (deg + 1) ** 2
    assert sh.shape[-1] >= coeff

    result = C0 * sh[..., 0]
    if deg > 0:
        x, y, z = dirs[..., 0:1], dirs[..., 1:2], dirs[..., 2:3]
        result = (result -
                C1 * y * sh[..., 1] +
                C1 * z * sh[..., 2] -
                C1 * x * sh[..., 3])

        if deg > 1:
            xx, yy, zz = x * x, y * y, z * z
            xy, yz, xz = x * y, y * z, x * z
            result = (result +
                    C2[0] * xy * sh[..., 4] +
                    C2[1] * yz * sh[..., 5] +
                    C2[2] * (2.0 * zz - xx - yy) * sh[..., 6] +
                    C2[3] * xz * sh[..., 7] +
                    C2[4] * (xx - yy) * sh[..., 8])

            if deg > 2:
                result = (result +
                C3[0] * y * (3 * xx - yy) * sh[..., 9] +
                C3[1] * xy * z * sh[..., 10] +
                C3[2] * y * (4 * zz - xx - yy)* sh[..., 11] +
                C3[3] * z * (2 * zz - 3 * xx - 3 * yy) * sh[..., 12] +
                C3[4] * x * (4 * zz - xx - yy) * sh[..., 13] +
                C3[5] * z * (xx - yy) * sh[..., 14] +
                C3[6] * x * (xx - 3 * yy) * sh[..., 15])

                if deg > 3:
                    result = (result + C4[0] * xy * (xx - yy) * sh[..., 16] +
                            C4[1] * yz * (3 * xx - yy) * sh[..., 17] +
                            C4[2] * xy * (7 * zz - 1) * sh[..., 18] +
                            C4[3] * yz * (7 * zz - 3) * sh[..., 19] +
                            C4[4] * (zz * (35 * zz - 30) + 3) * sh[..., 20] +
                            C4[5] * xz * (7 * zz - 3) * sh[..., 21] +
                            C4[6] * (xx - yy) * (7 * zz - 1) * sh[..., 22] +
                            C4[7] * xz * (xx - 3 * yy) * sh[..., 23] +
                            C4[8] * (xx * (xx - 3 * yy) - yy * (3 * xx - yy)) * sh[..., 24])
    return result

def RGB2SH(rgb):
    return (rgb - 0.5) / C0

def SH2RGB(sh):
    return sh * C0 + 0.5

================================================
FILE: physdreamer/gaussian_3d/utils/system_utils.py
================================================
#
# Copyright (C) 2023, Inria
# GRAPHDECO research group, https://team.inria.fr/graphdeco
# All rights reserved.
#
# This software is free for non-commercial, research and evaluation use 
# under the terms of the LICENSE.md file.
#
# For inquiries contact  george.drettakis@inria.fr
#

from errno import EEXIST
from os import makedirs, path
import os

def mkdir_p(folder_path):
    # Creates a directory. equivalent to using mkdir -p on the command line
    try:
        makedirs(folder_path)
    except OSError as exc: # Python >2.5
        if exc.errno == EEXIST and path.isdir(folder_path):
            pass
        else:
            raise

def searchForMaxIteration(folder):
    saved_iters = [int(fname.split("_")[-1]) for fname in os.listdir(folder)]
    return max(saved_iters)


=========================================
Download .txt
gitextract_1v1oa6eg/

├── .gitignore
├── README.md
├── physdreamer/
│   ├── field_components/
│   │   ├── encoding.py
│   │   └── mlp.py
│   ├── fields/
│   │   ├── mul_offset_field.py
│   │   ├── mul_se3_field.py
│   │   ├── offset_field.py
│   │   ├── se3_field.py
│   │   └── triplane_field.py
│   ├── gaussian_3d/
│   │   ├── README.md
│   │   ├── arguments/
│   │   │   └── __init__.py
│   │   ├── gaussian_renderer/
│   │   │   ├── __init__.py
│   │   │   ├── depth_uv_render.py
│   │   │   ├── feat_render.py
│   │   │   ├── flow_depth_render.py
│   │   │   └── render.py
│   │   ├── scene/
│   │   │   ├── __init__.py
│   │   │   ├── cameras.py
│   │   │   ├── colmap_loader.py
│   │   │   ├── dataset_readers.py
│   │   │   ├── gaussian_model.py
│   │   │   ├── mesh.py
│   │   │   └── mesh_utils.py
│   │   └── utils/
│   │       ├── camera_utils.py
│   │       ├── general_utils.py
│   │       ├── graphics_utils.py
│   │       ├── image_utils.py
│   │       ├── loss_utils.py
│   │       ├── rigid_body_utils.py
│   │       ├── sh_utils.py
│   │       └── system_utils.py
│   ├── losses/
│   │   └── smoothness_loss.py
│   ├── operators/
│   │   ├── dct.py
│   │   ├── np_operators.py
│   │   └── rotation.py
│   ├── utils/
│   │   ├── camera_utils.py
│   │   ├── colmap_utils.py
│   │   ├── config.py
│   │   ├── img_utils.py
│   │   ├── io_utils.py
│   │   ├── optimizer.py
│   │   ├── print_utils.py
│   │   ├── pytorch_mssim.py
│   │   ├── svd_helpper.py
│   │   └── torch_utils.py
│   └── warp_mpm/
│       ├── README.md
│       ├── gaussian_sim_utils.py
│       ├── mpm_data_structure.py
│       ├── mpm_solver_diff.py
│       ├── mpm_utils.py
│       └── warp_utils.py
├── projects/
│   ├── inference/
│   │   ├── README.md
│   │   ├── config_demo.py
│   │   ├── configs/
│   │   │   ├── alocasia.py
│   │   │   ├── carnation.py
│   │   │   ├── hat.py
│   │   │   └── telephone.py
│   │   ├── demo.py
│   │   ├── local_utils.py
│   │   └── run.sh
│   └── uncleaned_train/
│       ├── .gitignore
│       ├── README.md
│       ├── exp_motion/
│       │   └── train/
│       │       ├── config.yml
│       │       ├── config_demo.py
│       │       ├── convert_gaussian_to_mesh.py
│       │       ├── fast_train_velocity.py
│       │       ├── interface.py
│       │       ├── local_utils.py
│       │       ├── model_config.py
│       │       └── train_material.py
│       ├── motionrep/
│       │   ├── datatools/
│       │   │   ├── _convert_fbx_to_mesh.py
│       │   │   ├── blender_deforming_things4d.py
│       │   │   ├── blender_install_packages.py
│       │   │   ├── blender_render_imgs.py
│       │   │   ├── deforming_things4d.py
│       │   │   ├── dragon_animation.py
│       │   │   ├── fbx_to_mesh.py
│       │   │   ├── fbx_to_mesh_flag.py
│       │   │   ├── render_blender_annimations.py
│       │   │   ├── render_fbx_first_frame.py
│       │   │   ├── render_obj.py
│       │   │   ├── render_obj_external_texture.py
│       │   │   ├── test_colmap_camera.py
│       │   │   └── transform_obj_for_blender.py
│       │   ├── diffusion/
│       │   │   ├── builder.py
│       │   │   ├── discretizer.py
│       │   │   ├── draft.py
│       │   │   ├── gaussian_diffusion.py
│       │   │   ├── losses.py
│       │   │   ├── resample.py
│       │   │   ├── respace.py
│       │   │   ├── sigma_sampling.py
│       │   │   ├── sv_diffusion_engine.py
│       │   │   ├── svd_conditioner.py
│       │   │   ├── svd_sds_engine.py
│       │   │   ├── svd_sds_engine_backup.py
│       │   │   ├── svd_sds_wdecoder_engine.py
│       │   │   └── video_diffusion_loss.py
│       │   ├── field_components/
│       │   │   ├── encoding.py
│       │   │   └── mlp.py
│       │   ├── fields/
│       │   │   ├── dct_trajectory_field.py
│       │   │   ├── discrete_field.py
│       │   │   ├── mul_offset_field.py
│       │   │   ├── mul_se3_field.py
│       │   │   ├── offset_field.py
│       │   │   ├── se3_field.py
│       │   │   ├── triplane_field.py
│       │   │   └── video_triplane_disp_field.py
│       │   ├── gaussian_3d/
│       │   │   ├── arguments/
│       │   │   │   └── __init__.py
│       │   │   ├── gaussian_renderer/
│       │   │   │   ├── __init__.py
│       │   │   │   ├── depth_uv_render.py
│       │   │   │   ├── feat_render.py
│       │   │   │   ├── flow_depth_render.py
│       │   │   │   ├── motion_renderer.py
│       │   │   │   └── render.py
│       │   │   ├── scene/
│       │   │   │   ├── __init__.py
│       │   │   │   ├── cameras.py
│       │   │   │   ├── colmap_loader.py
│       │   │   │   ├── dataset_readers.py
│       │   │   │   ├── gaussian_model.py
│       │   │   │   ├── mesh.py
│       │   │   │   └── mesh_utils.py
│       │   │   └── utils/
│       │   │       ├── camera_utils.py
│       │   │       ├── general_utils.py
│       │   │       ├── graphics_utils.py
│       │   │       ├── image_utils.py
│       │   │       ├── loss_utils.py
│       │   │       ├── rigid_body_utils.py
│       │   │       ├── sh_utils.py
│       │   │       └── system_utils.py
│       │   ├── losses/
│       │   │   ├── se3_loss.py
│       │   │   └── smoothness_loss.py
│       │   ├── operators/
│       │   │   ├── dct.py
│       │   │   ├── np_operators.py
│       │   │   └── rotation.py
│       │   └── utils/
│       │       ├── camera_utils.py
│       │       ├── colmap_utils.py
│       │       ├── config.py
│       │       ├── dct.py
│       │       ├── flow_utils.py
│       │       ├── img_utils.py
│       │       ├── io_utils.py
│       │       ├── optimizer.py
│       │       ├── peft_utils.py
│       │       ├── print_utils.py
│       │       ├── pytorch_mssim.py
│       │       ├── svd_helpper.py
│       │       └── torch_utils.py
│       └── thirdparty_code/
│           └── warp_mpm/
│               ├── backup/
│               │   ├── convert_gaussian_to_mesh.py
│               │   ├── diff_warp_utils.py
│               │   ├── engine_utils.py
│               │   ├── grad_test.py
│               │   ├── mpm_solver_warp.py
│               │   ├── mpm_solver_warp_diff.py
│               │   ├── mpm_utils.py
│               │   ├── run_gaussian.py
│               │   ├── run_gaussian_static.py
│               │   ├── run_sand.py
│               │   ├── sim_grad.py
│               │   ├── solver_grad_test.py
│               │   ├── test_inverse_sim.py
│               │   ├── test_sim.py
│               │   ├── warp_rewrite.py
│               │   └── warp_utils.py
│               ├── backup_jan10/
│               │   ├── gaussian_sim_utils.py
│               │   ├── mpm_data_structure.py
│               │   ├── mpm_solver_diff.py
│               │   ├── mpm_utils.py
│               │   └── warp_utils.py
│               ├── gaussian_sim_utils.py
│               ├── mpm_data_structure.py
│               ├── mpm_solver_diff.py
│               ├── mpm_utils.py
│               └── warp_utils.py
├── requirements.txt
└── setup.py
Download .txt
SYMBOL INDEX (1427 symbols across 154 files)

FILE: physdreamer/field_components/encoding.py
  class TemporalKplanesEncoding (line 12) | class TemporalKplanesEncoding(nn.Module):
    method __init__ (line 19) | def __init__(
    method forward (line 60) | def forward(self, inp: Float[Tensor, "*bs 4"]):
    method compute_temporal_smoothness (line 86) | def compute_temporal_smoothness(
    method compute_plane_tv (line 96) | def compute_plane_tv(
    method visualize (line 106) | def visualize(
    method functional_forward (line 117) | def functional_forward(
  class TriplanesEncoding (line 152) | class TriplanesEncoding(nn.Module):
    method __init__ (line 159) | def __init__(
    method forward (line 199) | def forward(self, inp: Float[Tensor, "*bs 3"]):
    method compute_plane_tv (line 225) | def compute_plane_tv(
  class PlaneEncoding (line 236) | class PlaneEncoding(nn.Module):
    method __init__ (line 243) | def __init__(
    method forward (line 276) | def forward(self, inp: Float[Tensor, "*bs 2"]):
    method compute_plane_tv (line 291) | def compute_plane_tv(
  class TemporalNeRFEncoding (line 302) | class TemporalNeRFEncoding(nn.Module):
    method __init__ (line 303) | def __init__(
    method get_out_dim (line 320) | def get_out_dim(self) -> int:
    method forward (line 328) | def forward(

FILE: physdreamer/field_components/mlp.py
  class MLP (line 11) | class MLP(nn.Module):
    method __init__ (line 12) | def __init__(
    method build_nn_modules (line 40) | def build_nn_modules(self) -> None:
    method pytorch_fwd (line 65) | def pytorch_fwd(
    method forward (line 88) | def forward(

FILE: physdreamer/fields/mul_offset_field.py
  class MulTemporalKplanesOffsetfields (line 15) | class MulTemporalKplanesOffsetfields(nn.Module):
    method __init__ (line 28) | def __init__(
    method forward (line 74) | def forward(
    method compute_smoothess_loss (line 98) | def compute_smoothess_loss(
    method compute_loss (line 117) | def compute_loss(
    method arap_loss (line 133) | def arap_loss(self, inp):

FILE: physdreamer/fields/mul_se3_field.py
  class MulTemporalKplanesSE3fields (line 15) | class MulTemporalKplanesSE3fields(nn.Module):
    method __init__ (line 26) | def __init__(
    method forward (line 75) | def forward(
    method compute_smoothess_loss (line 111) | def compute_smoothess_loss(
    method compute_loss (line 130) | def compute_loss(

FILE: physdreamer/fields/offset_field.py
  class TemporalKplanesOffsetfields (line 15) | class TemporalKplanesOffsetfields(nn.Module):
    method __init__ (line 26) | def __init__(
    method forward (line 66) | def forward(
    method compute_smoothess_loss (line 87) | def compute_smoothess_loss(
    method compute_loss (line 100) | def compute_loss(
    method arap_loss (line 116) | def arap_loss(self, inp):
    method forward_with_plane_coefs (line 119) | def forward_with_plane_coefs(

FILE: physdreamer/fields/se3_field.py
  class TemporalKplanesSE3fields (line 15) | class TemporalKplanesSE3fields(nn.Module):
    method __init__ (line 26) | def __init__(
    method forward (line 69) | def forward(
    method compute_smoothess_loss (line 120) | def compute_smoothess_loss(
    method compute_loss (line 133) | def compute_loss(

FILE: physdreamer/fields/triplane_field.py
  class TriplaneFields (line 11) | class TriplaneFields(nn.Module):
    method __init__ (line 22) | def __init__(
    method forward (line 57) | def forward(
    method compute_smoothess_loss (line 72) | def compute_smoothess_loss(
  function compute_entropy (line 80) | def compute_entropy(p):
  class TriplaneFieldsWithEntropy (line 86) | class TriplaneFieldsWithEntropy(nn.Module):
    method __init__ (line 97) | def __init__(
    method forward (line 134) | def forward(
    method compute_smoothess_loss (line 155) | def compute_smoothess_loss(

FILE: physdreamer/gaussian_3d/arguments/__init__.py
  class GroupParams (line 16) | class GroupParams:
  class ParamGroup (line 19) | class ParamGroup:
    method __init__ (line 20) | def __init__(self, parser: ArgumentParser, name : str, fill_none = Fal...
    method extract (line 40) | def extract(self, args):
  class ModelParams (line 47) | class ModelParams(ParamGroup):
    method __init__ (line 48) | def __init__(self, parser, sentinel=False):
    method extract (line 59) | def extract(self, args):
  class PipelineParams (line 64) | class PipelineParams(ParamGroup):
    method __init__ (line 65) | def __init__(self, parser):
  class OptimizationParams (line 71) | class OptimizationParams(ParamGroup):
    method __init__ (line 72) | def __init__(self, parser):
  function get_combined_args (line 91) | def get_combined_args(parser : ArgumentParser):

FILE: physdreamer/gaussian_3d/gaussian_renderer/depth_uv_render.py
  function render_uv_depth_w_gaussian (line 12) | def render_uv_depth_w_gaussian(

FILE: physdreamer/gaussian_3d/gaussian_renderer/feat_render.py
  function render_feat_gaussian (line 12) | def render_feat_gaussian(

FILE: physdreamer/gaussian_3d/gaussian_renderer/flow_depth_render.py
  function render_flow_depth_w_gaussian (line 12) | def render_flow_depth_w_gaussian(

FILE: physdreamer/gaussian_3d/gaussian_renderer/render.py
  function render_gaussian (line 21) | def render_gaussian(
  function gaussian_intrin_scale (line 127) | def gaussian_intrin_scale(x_or_y: torch.Tensor, w_or_h: float):
  function render_arrow_in_screen (line 134) | def render_arrow_in_screen(viewpoint_camera, points_3d):
  function render_arrow_in_screen_back (line 190) | def render_arrow_in_screen_back(viewpoint_camera, points_3d):
  function eval_sh (line 261) | def eval_sh(deg, sh, dirs):
  function RGB2SH (line 327) | def RGB2SH(rgb):
  function SH2RGB (line 331) | def SH2RGB(sh):

FILE: physdreamer/gaussian_3d/scene/__init__.py
  class Scene (line 26) | class Scene:
    method __init__ (line 29) | def __init__(
    method save (line 116) | def save(self, iteration):
    method getTrainCameras (line 122) | def getTrainCameras(self, scale=1.0):
    method getTestCameras (line 125) | def getTestCameras(self, scale=1.0):

FILE: physdreamer/gaussian_3d/scene/cameras.py
  class Camera (line 21) | class Camera(nn.Module):
    method __init__ (line 22) | def __init__(
  class MiniCam (line 91) | class MiniCam:
    method __init__ (line 92) | def __init__(

FILE: physdreamer/gaussian_3d/scene/colmap_loader.py
  function qvec2rotmat (line 43) | def qvec2rotmat(qvec):
  function rotmat2qvec (line 55) | def rotmat2qvec(R):
  class Image (line 68) | class Image(BaseImage):
    method qvec2rotmat (line 69) | def qvec2rotmat(self):
  function read_next_bytes (line 72) | def read_next_bytes(fid, num_bytes, format_char_sequence, endian_charact...
  function read_points3D_text (line 83) | def read_points3D_text(path):
  function read_points3D_binary (line 113) | def read_points3D_binary(path_to_model_file):
  function read_intrinsics_text (line 144) | def read_intrinsics_text(path):
  function read_extrinsics_binary (line 168) | def read_extrinsics_binary(path_to_model_file):
  function read_intrinsics_binary (line 203) | def read_intrinsics_binary(path_to_model_file):
  function read_extrinsics_text (line 232) | def read_extrinsics_text(path):
  function read_colmap_bin_array (line 261) | def read_colmap_bin_array(path):

FILE: physdreamer/gaussian_3d/scene/dataset_readers.py
  class CameraInfo (line 45) | class CameraInfo(NamedTuple):
  class SceneInfo (line 58) | class SceneInfo(NamedTuple):
  function getNerfppNorm (line 66) | def getNerfppNorm(cam_info):
  function readColmapCameras (line 90) | def readColmapCameras(cam_extrinsics, cam_intrinsics, images_folder):
  function fetchPly (line 142) | def fetchPly(path):
  function storePly (line 151) | def storePly(path, xyz, rgb):
  function readColmapSceneInfo (line 177) | def readColmapSceneInfo(path, images, eval, llffhold=8):
  function readCamerasFromTransforms (line 233) | def readCamerasFromTransforms(path, transformsfile, white_background, ex...
  function readNerfSyntheticInfo (line 297) | def readNerfSyntheticInfo(path, white_background, eval, extension=".png"):
  class NoImageCamera (line 349) | class NoImageCamera(nn.Module):
    method __init__ (line 350) | def __init__(
  function fast_read_cameras_from_transform_file (line 427) | def fast_read_cameras_from_transform_file(file_path, width=1080, height=...

FILE: physdreamer/gaussian_3d/scene/gaussian_model.py
  class GaussianModel (line 37) | class GaussianModel:
    method setup_functions (line 38) | def setup_functions(self):
    method __init__ (line 55) | def __init__(self, sh_degree: int = 3):
    method capture (line 74) | def capture(self):
    method restore (line 95) | def restore(self, model_args, training_args):
    method capture_training_args (line 118) | def capture_training_args(
    method get_scaling (line 124) | def get_scaling(self):
    method get_rotation (line 128) | def get_rotation(self):
    method get_xyz (line 132) | def get_xyz(self):
    method get_features (line 136) | def get_features(self):
    method get_opacity (line 142) | def get_opacity(self):
    method get_covariance (line 145) | def get_covariance(self, scaling_modifier=1):
    method oneupSHdegree (line 150) | def oneupSHdegree(self):
    method create_from_pcd (line 154) | def create_from_pcd(self, pcd: BasicPointCloud, spatial_lr_scale: float):
    method training_setup (line 196) | def training_setup(self, training_args):
    method update_learning_rate (line 242) | def update_learning_rate(self, iteration):
    method construct_list_of_attributes (line 250) | def construct_list_of_attributes(self):
    method save_ply (line 264) | def save_ply(self, path):
    method reset_opacity (line 301) | def reset_opacity(self):
    method load_ply (line 308) | def load_ply(self, path):
    method replace_tensor_to_optimizer (line 388) | def replace_tensor_to_optimizer(self, tensor, name):
    method _prune_optimizer (line 403) | def _prune_optimizer(self, mask):
    method prune_points (line 425) | def prune_points(self, mask):
    method cat_tensors_to_optimizer (line 441) | def cat_tensors_to_optimizer(self, tensors_dict):
    method densification_postfix (line 475) | def densification_postfix(
    method densify_and_split (line 505) | def densify_and_split(self, grads, grad_threshold, scene_extent, N=2):
    method densify_and_clone (line 549) | def densify_and_clone(self, grads, grad_threshold, scene_extent):
    method densify_and_prune (line 576) | def densify_and_prune(self, max_grad, min_opacity, extent, max_screen_...
    method add_densification_stats (line 594) | def add_densification_stats(self, viewspace_point_tensor, update_filter):
    method apply_discrete_offset_filds (line 600) | def apply_discrete_offset_filds(self, origin_points, offsets):
    method apply_discrete_offset_filds_with_R (line 646) | def apply_discrete_offset_filds_with_R(self, origin_points, offsets, t...
    method apply_se3_fields (line 711) | def apply_se3_fields(
    method apply_offset_fields (line 772) | def apply_offset_fields(self, offset_field, timestamp: float):
    method apply_offset_fields_with_R (line 812) | def apply_offset_fields_with_R(self, offset_field, timestamp: float, e...
    method init_from_mesh (line 879) | def init_from_mesh(
    method detach_grad (line 943) | def detach_grad(
    method apply_mask (line 953) | def apply_mask(self, mask):
    method extract_fields (line 982) | def extract_fields(self, resolution=128, num_blocks=16, relax_ratio=1.5):
    method extract_mesh (line 1073) | def extract_mesh(self, path, density_thresh=1, resolution=128, decimat...
  function gaussian_3d_coeff (line 1109) | def gaussian_3d_coeff(xyzs, covs):

FILE: physdreamer/gaussian_3d/scene/mesh.py
  function dot (line 8) | def dot(x, y):
  function length (line 12) | def length(x, eps=1e-20):
  function safe_normalize (line 16) | def safe_normalize(x, eps=1e-20):
  class Mesh (line 20) | class Mesh:
    method __init__ (line 21) | def __init__(
    method load (line 49) | def load(
    method load_obj (line 146) | def load_obj(cls, path, albedo_path=None, device=None):
    method load_trimesh (line 296) | def load_trimesh(cls, path, device=None):
    method aabb (line 390) | def aabb(self):
    method auto_size (line 395) | def auto_size(self):
    method auto_normal (line 401) | def auto_normal(self):
    method auto_uv (line 424) | def auto_uv(self, cache_path=None, vmap=True):
    method align_v_to_vt (line 459) | def align_v_to_vt(self, vmapping=None):
    method to (line 476) | def to(self, device):
    method write (line 484) | def write(self, path):
    method write_ply (line 495) | def write_ply(self, path):
    method write_glb (line 503) | def write_glb(self, path):
    method write_obj (line 653) | def write_obj(self, path):

FILE: physdreamer/gaussian_3d/scene/mesh_utils.py
  function poisson_mesh_reconstruction (line 5) | def poisson_mesh_reconstruction(points, normals=None):
  function decimate_mesh (line 44) | def decimate_mesh(
  function clean_mesh (line 88) | def clean_mesh(

FILE: physdreamer/gaussian_3d/utils/camera_utils.py
  function loadCam (line 21) | def loadCam(args, id, cam_info, resolution_scale):
  function cameraList_from_camInfos (line 69) | def cameraList_from_camInfos(cam_infos, resolution_scale, args):
  function camera_to_JSON (line 78) | def camera_to_JSON(id, camera: Camera):
  function look_at (line 101) | def look_at(from_point, to_point, up_vector=(0, 1, 0)):
  function create_cameras_around_sphere (line 149) | def create_cameras_around_sphere(

FILE: physdreamer/gaussian_3d/utils/general_utils.py
  function inverse_sigmoid (line 18) | def inverse_sigmoid(x):
  function PILtoTorch (line 21) | def PILtoTorch(pil_image, resolution):
  function get_expon_lr_func (line 29) | def get_expon_lr_func(
  function strip_lowerdiag (line 64) | def strip_lowerdiag(L):
  function strip_symmetric (line 75) | def strip_symmetric(sym):
  function build_rotation (line 78) | def build_rotation(r):
  function build_scaling_rotation (line 101) | def build_scaling_rotation(s, r):
  function safe_state (line 112) | def safe_state(silent):

FILE: physdreamer/gaussian_3d/utils/graphics_utils.py
  class BasicPointCloud (line 17) | class BasicPointCloud(NamedTuple):
  function geom_transform_points (line 22) | def geom_transform_points(points, transf_matrix):
  function getWorld2View (line 31) | def getWorld2View(R, t):
  function getWorld2View2 (line 38) | def getWorld2View2(R, t, translate=np.array([.0, .0, .0]), scale=1.0):
  function getProjectionMatrix (line 51) | def getProjectionMatrix(znear, zfar, fovX, fovY):
  function fov2focal (line 73) | def fov2focal(fov, pixels):
  function focal2fov (line 76) | def focal2fov(focal, pixels):

FILE: physdreamer/gaussian_3d/utils/image_utils.py
  function mse (line 14) | def mse(img1, img2):
  function psnr (line 17) | def psnr(img1, img2):

FILE: physdreamer/gaussian_3d/utils/loss_utils.py
  function l1_loss (line 17) | def l1_loss(network_output, gt):
  function l2_loss (line 20) | def l2_loss(network_output, gt):
  function gaussian (line 23) | def gaussian(window_size, sigma):
  function create_window (line 27) | def create_window(window_size, channel):
  function ssim (line 33) | def ssim(img1, img2, window_size=11, size_average=True):
  function _ssim (line 43) | def _ssim(img1, img2, window, window_size, channel, size_average=True):

FILE: physdreamer/gaussian_3d/utils/rigid_body_utils.py
  function get_rigid_transform (line 5) | def get_rigid_transform(A, B):
  function _test_rigid_transform (line 56) | def _test_rigid_transform():
  function _sqrt_positive_part (line 72) | def _sqrt_positive_part(x: torch.Tensor) -> torch.Tensor:
  function matrix_to_quaternion (line 83) | def matrix_to_quaternion(matrix: torch.Tensor) -> torch.Tensor:
  function quternion_to_matrix (line 146) | def quternion_to_matrix(r):
  function standardize_quaternion (line 172) | def standardize_quaternion(quaternions: torch.Tensor) -> torch.Tensor:
  function quaternion_multiply (line 188) | def quaternion_multiply(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
  function _test_matrix_to_quaternion (line 212) | def _test_matrix_to_quaternion():
  function _test_matrix_to_quaternion_2 (line 241) | def _test_matrix_to_quaternion_2():

FILE: physdreamer/gaussian_3d/utils/sh_utils.py
  function eval_sh (line 57) | def eval_sh(deg, sh, dirs):
  function RGB2SH (line 114) | def RGB2SH(rgb):
  function SH2RGB (line 117) | def SH2RGB(sh):

FILE: physdreamer/gaussian_3d/utils/system_utils.py
  function mkdir_p (line 16) | def mkdir_p(folder_path):
  function searchForMaxIteration (line 26) | def searchForMaxIteration(folder):

FILE: physdreamer/losses/smoothness_loss.py
  function compute_plane_tv (line 5) | def compute_plane_tv(t: torch.Tensor, only_w: bool = False) -> float:
  function compute_plane_smoothness (line 26) | def compute_plane_smoothness(t: torch.Tensor) -> float:

FILE: physdreamer/operators/dct.py
  function dct1_rfft_impl (line 12) | def dct1_rfft_impl(x):
  function dct_fft_impl (line 16) | def dct_fft_impl(v):
  function idct_irfft_impl (line 20) | def idct_irfft_impl(V):
  function dct (line 24) | def dct(x, norm=None):
  function idct (line 63) | def idct(X, norm=None):
  function dct_3d (line 110) | def dct_3d(x, norm=None):
  function idct_3d (line 127) | def idct_3d(X, norm=None):
  function code_test_dct3d (line 146) | def code_test_dct3d():

FILE: physdreamer/operators/np_operators.py
  function feature_map_to_rgb_pca (line 7) | def feature_map_to_rgb_pca(feature_map):

FILE: physdreamer/operators/rotation.py
  function rotation_6d_to_matrix (line 7) | def rotation_6d_to_matrix(d6: torch.Tensor) -> torch.Tensor:
  function matrix_to_rotation_6d (line 31) | def matrix_to_rotation_6d(matrix: torch.Tensor) -> torch.Tensor:
  function quaternion_to_matrix (line 50) | def quaternion_to_matrix(quaternions: torch.Tensor) -> torch.Tensor:
  function _sqrt_positive_part (line 82) | def _sqrt_positive_part(x: torch.Tensor) -> torch.Tensor:
  function matrix_to_quaternion (line 93) | def matrix_to_quaternion(matrix: torch.Tensor) -> torch.Tensor:

FILE: physdreamer/utils/camera_utils.py
  function normalize (line 4) | def normalize(x: np.ndarray) -> np.ndarray:
  function viewmatrix (line 9) | def viewmatrix(lookdir: np.ndarray, up: np.ndarray, position: np.ndarray...
  function generate_spiral_path (line 18) | def generate_spiral_path(

FILE: physdreamer/utils/colmap_utils.py
  function qvec2rotmat (line 43) | def qvec2rotmat(qvec):
  function rotmat2qvec (line 55) | def rotmat2qvec(R):
  class Image (line 68) | class Image(BaseImage):
    method qvec2rotmat (line 69) | def qvec2rotmat(self):
  function read_next_bytes (line 72) | def read_next_bytes(fid, num_bytes, format_char_sequence, endian_charact...
  function read_points3D_text (line 83) | def read_points3D_text(path):
  function read_points3D_binary (line 125) | def read_points3D_binary(path_to_model_file):
  function read_intrinsics_text (line 156) | def read_intrinsics_text(path):
  function read_extrinsics_binary (line 180) | def read_extrinsics_binary(path_to_model_file):
  function read_intrinsics_binary (line 215) | def read_intrinsics_binary(path_to_model_file):
  function read_extrinsics_text (line 244) | def read_extrinsics_text(path):
  function read_colmap_bin_array (line 273) | def read_colmap_bin_array(path):

FILE: physdreamer/utils/config.py
  function load_config_with_merge (line 4) | def load_config_with_merge(config_path: str):
  function merge_without_none (line 17) | def merge_without_none(base_cfg, override_cfg):
  function create_config (line 26) | def create_config(config_path, args, cli_args: list = []):

FILE: physdreamer/utils/img_utils.py
  function make_grid (line 10) | def make_grid(imgs: torch.Tensor, scale=0.5):
  function compute_psnr (line 37) | def compute_psnr(img1, img2, mask=None):
  function torch_rgb_to_gray (line 64) | def torch_rgb_to_gray(image):
  function compute_gradient_loss (line 76) | def compute_gradient_loss(pred, gt, mask=None):
  function mark_image_with_red_squares (line 144) | def mark_image_with_red_squares(img):
  function gaussian (line 157) | def gaussian(window_size, sigma):
  function create_window (line 168) | def create_window(window_size, channel):
  function compute_ssim (line 177) | def compute_ssim(img1, img2, window_size=11, size_average=True):
  function _ssim (line 188) | def _ssim(img1, img2, window, window_size, channel, size_average=True):
  function compute_low_res_psnr (line 223) | def compute_low_res_psnr(img1, img2, scale_factor):
  function compute_low_res_mse (line 239) | def compute_low_res_mse(img1, img2, scale_factor):

FILE: physdreamer/utils/io_utils.py
  function read_video_cv2 (line 9) | def read_video_cv2(video_path, rgb=True):
  function save_video_cv2 (line 31) | def save_video_cv2(video_path, img_list, fps):
  function save_video_imageio (line 47) | def save_video_imageio(video_path, img_list, fps):
  function save_gif_imageio (line 60) | def save_gif_imageio(video_path, img_list, fps):
  function save_video_mediapy (line 71) | def save_video_mediapy(video_frames, output_video_path: str = None, fps:...

FILE: physdreamer/utils/optimizer.py
  function get_linear_schedule_with_warmup (line 5) | def get_linear_schedule_with_warmup(

FILE: physdreamer/utils/print_utils.py
  function print_if_zero_rank (line 4) | def print_if_zero_rank(s):

FILE: physdreamer/utils/pytorch_mssim.py
  function gaussian (line 8) | def gaussian(window_size, sigma):
  function create_window (line 13) | def create_window(window_size, channel=1):
  function create_window_3d (line 19) | def create_window_3d(window_size, channel=1):
  function ssim (line 27) | def ssim(img1, img2, window_size=11, window=None, size_average=True, ful...
  function ssim_matlab (line 81) | def ssim_matlab(img1, img2, window_size=11, window=None, size_average=Tr...
  function msssim (line 141) | def msssim(img1, img2, window_size=11, size_average=True, val_range=None...
  class SSIM (line 171) | class SSIM(torch.nn.Module):
    method __init__ (line 172) | def __init__(self, window_size=11, size_average=True, val_range=None):
    method forward (line 182) | def forward(self, img1, img2):
  class MSSSIM (line 196) | class MSSSIM(torch.nn.Module):
    method __init__ (line 197) | def __init__(self, window_size=11, size_average=True, channel=3):
    method forward (line 203) | def forward(self, img1, img2):

FILE: physdreamer/utils/svd_helpper.py
  function init_st (line 18) | def init_st(version_dict, load_ckpt=True, load_filter=True):
  function load_model_from_config (line 38) | def load_model_from_config(config, ckpt=None, verbose=True):
  function load_model (line 72) | def load_model(model):
  function set_lowvram_mode (line 79) | def set_lowvram_mode(mode):
  function initial_model_load (line 84) | def initial_model_load(model):
  function unload_model (line 93) | def unload_model(model):
  function get_unique_embedder_keys_from_conditioner (line 100) | def get_unique_embedder_keys_from_conditioner(conditioner):
  function get_batch (line 104) | def get_batch(keys, value_dict, N, T, device):

FILE: physdreamer/utils/torch_utils.py
  function get_sync_time (line 5) | def get_sync_time():

FILE: physdreamer/warp_mpm/gaussian_sim_utils.py
  function get_volume (line 4) | def get_volume(xyzs: np.ndarray, resolution=128) -> np.ndarray:

FILE: physdreamer/warp_mpm/mpm_data_structure.py
  class MPMStateStruct (line 14) | class MPMStateStruct(object):
    method init (line 41) | def init(
    method init_grid (line 97) | def init_grid(
    method from_torch (line 119) | def from_torch(
    method reset_state (line 174) | def reset_state(
    method continue_from_torch (line 257) | def continue_from_torch(
    method set_require_grad (line 294) | def set_require_grad(self, requires_grad=True):
    method reset_density (line 305) | def reset_density(
    method partial_clone (line 356) | def partial_clone(self, device="cuda:0", requires_grad=True):
  class MPMModelStruct (line 391) | class MPMModelStruct(object):
    method init (line 424) | def init(
    method finalize_mu_lam (line 448) | def finalize_mu_lam(self, n_particles, device="cuda:0"):
    method init_other_params (line 456) | def init_other_params(self, n_grid=100, grid_lim=1.0, device="cuda:0"):
    method from_torch (line 486) | def from_torch(
    method set_require_grad (line 494) | def set_require_grad(self, requires_grad=True):
  class Dirichlet_collider (line 503) | class Dirichlet_collider:
  class GridCollider (line 537) | class GridCollider:
  class Impulse_modifier (line 548) | class Impulse_modifier:
  class MPMtailoredStruct (line 564) | class MPMtailoredStruct:
  class MaterialParamsModifier (line 590) | class MaterialParamsModifier:
  class ParticleVelocityModifier (line 599) | class ParticleVelocityModifier:
  function compute_mu_lam_from_E_nu_clean (line 621) | def compute_mu_lam_from_E_nu_clean(
  function set_vec3_to_zero (line 633) | def set_vec3_to_zero(target_array: wp.array(dtype=wp.vec3)):
  function set_vec3_to_vec3 (line 639) | def set_vec3_to_vec3(
  function set_float_vec_to_vec_wmask (line 647) | def set_float_vec_to_vec_wmask(
  function set_float_vec_to_vec (line 658) | def set_float_vec_to_vec(
  function set_mat33_to_identity (line 666) | def set_mat33_to_identity(target_array: wp.array(dtype=wp.mat33)):
  function set_mat33_to_zero (line 672) | def set_mat33_to_zero(target_array: wp.array(dtype=wp.mat33)):
  function add_identity_to_mat33 (line 678) | def add_identity_to_mat33(target_array: wp.array(dtype=wp.mat33)):
  function subtract_identity_to_mat33 (line 686) | def subtract_identity_to_mat33(target_array: wp.array(dtype=wp.mat33)):
  function add_vec3_to_vec3 (line 694) | def add_vec3_to_vec3(
  function set_value_to_float_array (line 702) | def set_value_to_float_array(target_array: wp.array(dtype=float), value:...
  function set_warpvalue_to_float_array (line 708) | def set_warpvalue_to_float_array(
  function get_float_array_product (line 716) | def get_float_array_product(
  function torch2warp_quat (line 725) | def torch2warp_quat(t, copy=False, dtype=warp.types.float32, dvc="cuda:0"):
  function torch2warp_float (line 746) | def torch2warp_float(t, copy=False, dtype=warp.types.float32, dvc="cuda:...
  function torch2warp_vec3 (line 766) | def torch2warp_vec3(t, copy=False, dtype=warp.types.float32, dvc="cuda:0"):
  function torch2warp_mat33 (line 787) | def torch2warp_mat33(t, copy=False, dtype=warp.types.float32, dvc="cuda:...

FILE: physdreamer/warp_mpm/mpm_solver_diff.py
  class MPMWARPDiff (line 13) | class MPMWARPDiff(object):
    method __init__ (line 18) | def __init__(self, n_particles, n_grid=100, grid_lim=1.0, device="cuda...
    method initialize (line 22) | def initialize(self, n_particles, n_grid=100, grid_lim=1.0, device="cu...
    method set_parameters (line 39) | def set_parameters(self, device="cuda:0", **kwargs):
    method set_parameters_dict (line 42) | def set_parameters_dict(self, mpm_model, mpm_state, kwargs={}, device=...
    method set_E_nu (line 110) | def set_E_nu(self, mpm_model, E: float, nu: float, device="cuda:0"):
    method set_E_nu_from_torch (line 141) | def set_E_nu_from_torch(
    method prepare_mu_lam (line 160) | def prepare_mu_lam(self, mpm_model, mpm_state, device="cuda:0"):
    method p2g2p_differentiable (line 169) | def p2g2p_differentiable(
    method p2g2p (line 297) | def p2g2p(self, mpm_model, mpm_state, step, dt, device="cuda:0"):
    method print_time_profile (line 435) | def print_time_profile(self):
    method add_surface_collider (line 441) | def add_surface_collider(
    method set_velocity_on_cuboid (line 545) | def set_velocity_on_cuboid(
    method add_bounding_box (line 602) | def add_bounding_box(self, start_time=0.0, end_time=999.0):
    method add_impulse_on_particles (line 674) | def add_impulse_on_particles(
    method enforce_particle_velocity_translation (line 724) | def enforce_particle_velocity_translation(
    method enforce_particle_velocity_rotation (line 772) | def enforce_particle_velocity_rotation(
    method release_particles_sequentially (line 877) | def release_particles_sequentially(
    method enforce_particle_velocity_by_mask (line 904) | def enforce_particle_velocity_by_mask(
    method restart_and_compute_F_C (line 945) | def restart_and_compute_F_C(self, mpm_model, mpm_state, target_pos, de...
    method enforce_grid_velocity_by_mask (line 995) | def enforce_grid_velocity_by_mask(
    method add_impulse_on_particles_with_mask (line 1025) | def add_impulse_on_particles_with_mask(

FILE: physdreamer/warp_mpm/mpm_utils.py
  function kirchoff_stress_FCR (line 9) | def kirchoff_stress_FCR(
  function kirchoff_stress_neoHookean (line 19) | def kirchoff_stress_neoHookean(
  function kirchoff_stress_StVK (line 51) | def kirchoff_stress_StVK(
  function kirchoff_stress_drucker_prager (line 70) | def kirchoff_stress_drucker_prager(
  function von_mises_return_mapping (line 88) | def von_mises_return_mapping(F_trial: wp.mat33, model: MPMModelStruct, p...
  function von_mises_return_mapping_with_damage (line 134) | def von_mises_return_mapping_with_damage(
  function viscoplasticity_return_mapping_with_StVK (line 191) | def viscoplasticity_return_mapping_with_StVK(
  function sand_return_mapping (line 238) | def sand_return_mapping(
  function compute_mu_lam_from_E_nu (line 278) | def compute_mu_lam_from_E_nu(state: MPMStateStruct, model: MPMModelStruct):
  function zero_grid (line 287) | def zero_grid(state: MPMStateStruct, model: MPMModelStruct):
  function compute_dweight (line 295) | def compute_dweight(
  function update_cov (line 307) | def update_cov(state: MPMStateStruct, p: int, grad_v: wp.mat33, dt: float):
  function update_cov_differentiable (line 330) | def update_cov_differentiable(
  function p2g_apic_with_stress (line 359) | def p2g_apic_with_stress(state: MPMStateStruct, model: MPMModelStruct, d...
  function grid_normalization_and_gravity (line 428) | def grid_normalization_and_gravity(
  function g2p (line 442) | def g2p(state: MPMStateStruct, model: MPMModelStruct, dt: float):
  function g2p_differentiable (line 503) | def g2p_differentiable(
  function clip_particle_x (line 583) | def clip_particle_x(state: MPMStateStruct, model: MPMModelStruct):
  function compute_stress_from_F_trial (line 604) | def compute_stress_from_F_trial(
  function add_damping_via_grid (line 723) | def add_damping_via_grid(state: MPMStateStruct, scale: float):
  function apply_additional_params (line 738) | def apply_additional_params(
  function selection_add_impulse_on_particles (line 759) | def selection_add_impulse_on_particles(
  function selection_enforce_particle_velocity_translation (line 775) | def selection_enforce_particle_velocity_translation(
  function selection_enforce_particle_velocity_cylinder (line 791) | def selection_enforce_particle_velocity_cylinder(
  function compute_position_l2_loss (line 812) | def compute_position_l2_loss(
  function aggregate_grad (line 829) | def aggregate_grad(x: wp.array(dtype=float), grad: wp.array(dtype=float)):
  function set_F_C_p2g (line 837) | def set_F_C_p2g(
  function set_F_C_g2p (line 874) | def set_F_C_g2p(state: MPMStateStruct, model: MPMModelStruct):
  function compute_posloss_with_grad (line 923) | def compute_posloss_with_grad(
  function compute_veloloss_with_grad (line 943) | def compute_veloloss_with_grad(
  function compute_Floss_with_grad (line 964) | def compute_Floss_with_grad(
  function compute_Closs_with_grad (line 997) | def compute_Closs_with_grad(

FILE: physdreamer/warp_mpm/warp_utils.py
  function from_torch_safe (line 13) | def from_torch_safe(t, dtype=None, requires_grad=None, grad=None):
  class MyTape (line 93) | class MyTape(wp.Tape):
    method get_adjoint (line 95) | def get_adjoint(self, a):
  class CondTape (line 131) | class CondTape(object):
    method __init__ (line 132) | def __init__(self, tape: Optional[MyTape], cond: bool = True) -> None:
    method __enter__ (line 136) | def __enter__(self):
    method __exit__ (line 140) | def __exit__(self, exc_type, exc_value, traceback):

FILE: projects/inference/config_demo.py
  class DemoParams (line 17) | class DemoParams(object):
    method __init__ (line 18) | def __init__(self, scene_name):
    method get_cfg (line 59) | def get_cfg(

FILE: projects/inference/demo.py
  function create_dataset (line 61) | def create_dataset(args):
  class Trainer (line 94) | class Trainer:
    method __init__ (line 95) | def __init__(self, args):
    method init_trainable_params (line 181) | def init_trainable_params(
    method setup_simulation (line 203) | def setup_simulation(self, dataset_dir, grid_size=100):
    method add_constant_force (line 412) | def add_constant_force(self, center_point, radius, force, dt, start_ti...
    method get_simulation_input (line 429) | def get_simulation_input(self, device):
    method get_material_params (line 471) | def get_material_params(self, device):
    method load (line 499) | def load(self, checkpoint_dir):
    method setup_render (line 513) | def setup_render(self, args, gaussian_path, white_background=True):
    method demo (line 580) | def demo(
    method compute_metric (line 788) | def compute_metric(self, exp_name, result_dir):
  function parse_args (line 893) | def parse_args():

FILE: projects/inference/local_utils.py
  function cycle (line 29) | def cycle(dl: torch.utils.data.DataLoader):
  function load_motion_model (line 35) | def load_motion_model(model, checkpoint_path):
  function create_spatial_fields (line 42) | def create_spatial_fields(
  function create_motion_model (line 68) | def create_motion_model(
  function create_velocity_model (line 99) | def create_velocity_model(
  function create_svd_model (line 125) | def create_svd_model(model_name="svd_full", ckpt_path=None):
  class LinearStepAnneal (line 151) | class LinearStepAnneal(object):
    method __init__ (line 153) | def __init__(
    method compute_state (line 175) | def compute_state(self, cur_iter):
  function setup_boundary_condition (line 187) | def setup_boundary_condition(
  function setup_plannar_boundary_condition (line 216) | def setup_plannar_boundary_condition(
  function find_far_points (line 259) | def find_far_points(xyzs, selected_points, thres=0.05):
  function setup_boundary_condition_with_points (line 289) | def setup_boundary_condition_with_points(
  function setup_bottom_boundary_condition (line 311) | def setup_bottom_boundary_condition(xyzs, mpm_solver, mpm_state, percent...
  function render_single_view_video (line 334) | def render_single_view_video(
  function render_gaussian_seq (line 403) | def render_gaussian_seq(cam, render_params, gaussian_pos_list, gaussian_...
  function render_gaussian_seq_w_mask (line 430) | def render_gaussian_seq_w_mask(
  function render_gaussian_seq_w_mask_with_disp (line 470) | def render_gaussian_seq_w_mask_with_disp(
  function render_gaussian_seq_w_mask_with_disp_for_figure (line 524) | def render_gaussian_seq_w_mask_with_disp_for_figure(
  function render_gaussian_seq_w_mask_cam_seq (line 591) | def render_gaussian_seq_w_mask_cam_seq(
  function apply_grid_bc_w_freeze_pts (line 631) | def apply_grid_bc_w_freeze_pts(grid_size, grid_lim, freeze_pts, mpm_solv...
  function add_constant_force (line 661) | def add_constant_force(
  function render_force_2d (line 703) | def render_force_2d(cam, render_params, center_point, force):
  function render_gaussian_seq_w_mask_cam_seq_with_force (line 750) | def render_gaussian_seq_w_mask_cam_seq_with_force(
  function render_gaussian_seq_w_mask_cam_seq_with_force_with_disp (line 828) | def render_gaussian_seq_w_mask_cam_seq_with_force_with_disp(
  function downsample_with_kmeans (line 909) | def downsample_with_kmeans(points_array: np.ndarray, num_points: int):
  function downsample_with_kmeans_gpu (line 935) | def downsample_with_kmeans_gpu(points_array: torch.Tensor, num_points: i...
  function downsample_with_kmeans_gpu_with_chunk (line 971) | def downsample_with_kmeans_gpu_with_chunk(points_array: torch.Tensor, nu...
  function interpolate_points (line 1029) | def interpolate_points(query_points, drive_displacement, top_k_index):
  function interpolate_points_w_R (line 1046) | def interpolate_points_w_R(
  function create_camera_path (line 1078) | def create_camera_path(
  function get_camera_trajectory (line 1137) | def get_camera_trajectory(cam, num_pos, camera_cfg: dict, dataset):

FILE: projects/uncleaned_train/exp_motion/train/config_demo.py
  class DemoParams (line 15) | class DemoParams(object):
    method __init__ (line 16) | def __init__(self):
    method get_cfg (line 41) | def get_cfg(

FILE: projects/uncleaned_train/exp_motion/train/convert_gaussian_to_mesh.py
  function convert_gaussian_to_mesh (line 9) | def convert_gaussian_to_mesh(gaussian_path, thresh=0.1, save_path=None):
  function internal_filling (line 33) | def internal_filling(gaussian_path, thresh=2.0,  save_path=None, resolut...

FILE: projects/uncleaned_train/exp_motion/train/fast_train_velocity.py
  function create_dataset (line 76) | def create_dataset(args):
  class Trainer (line 110) | class Trainer:
    method __init__ (line 111) | def __init__(self, args):
    method init_trainable_params (line 308) | def init_trainable_params(
    method setup_simulation (line 338) | def setup_simulation(self, dataset_dir, grid_size=100):
    method set_simulation_state (line 553) | def set_simulation_state(
    method get_density_velocity (line 591) | def get_density_velocity(self, time_stamp: float, device, requires_gra...
    method train_one_step (line 618) | def train_one_step(self):
    method train (line 875) | def train(self):
    method inference (line 889) | def inference(
    method save (line 977) | def save(
    method load (line 1000) | def load(self, checkpoint_dir):
    method setup_eval (line 1013) | def setup_eval(self, args, gaussian_path, white_background=True):
    method eval (line 1087) | def eval(
  function parse_args (line 1126) | def parse_args():

FILE: projects/uncleaned_train/exp_motion/train/interface.py
  class MPMDifferentiableSimulation (line 18) | class MPMDifferentiableSimulation(autograd.Function):
    method forward (line 21) | def forward(
    method backward (line 155) | def backward(ctx, out_pos_grad: Float[Tensor, "n 3"]):
  class MPMDifferentiableSimulationWCheckpoint (line 241) | class MPMDifferentiableSimulationWCheckpoint(autograd.Function):
    method forward (line 249) | def forward(
    method backward (line 344) | def backward(ctx, out_pos_grad: Float[Tensor, "n 3"], out_velo_grad: F...
  class MPMDifferentiableSimulationClean (line 543) | class MPMDifferentiableSimulationClean(autograd.Function):
    method forward (line 551) | def forward(
    method backward (line 665) | def backward(ctx, out_pos_grad: Float[Tensor, "n 3"], out_velo_grad: F...

FILE: projects/uncleaned_train/exp_motion/train/local_utils.py
  function cycle (line 30) | def cycle(dl: torch.utils.data.DataLoader):
  function load_motion_model (line 36) | def load_motion_model(model, checkpoint_path):
  function create_spatial_fields (line 43) | def create_spatial_fields(
  function create_motion_model (line 85) | def create_motion_model(
  function create_velocity_model (line 116) | def create_velocity_model(
  function create_svd_model (line 142) | def create_svd_model(model_name="svd_full", ckpt_path=None):
  class LinearStepAnneal (line 166) | class LinearStepAnneal(object):
    method __init__ (line 168) | def __init__(
    method compute_state (line 190) | def compute_state(self, cur_iter):
  function setup_boundary_condition (line 202) | def setup_boundary_condition(
  function setup_plannar_boundary_condition (line 231) | def setup_plannar_boundary_condition(
  function find_far_points (line 274) | def find_far_points(xyzs, selected_points, thres=0.05):
  function setup_boundary_condition_with_points (line 304) | def setup_boundary_condition_with_points(
  function setup_bottom_boundary_condition (line 326) | def setup_bottom_boundary_condition(xyzs, mpm_solver, mpm_state, percent...
  function render_single_view_video (line 349) | def render_single_view_video(
  function render_gaussian_seq (line 418) | def render_gaussian_seq(cam, render_params, gaussian_pos_list, gaussian_...
  function render_gaussian_seq_w_mask (line 445) | def render_gaussian_seq_w_mask(
  function render_gaussian_seq_w_mask_with_disp (line 485) | def render_gaussian_seq_w_mask_with_disp(
  function render_gaussian_seq_w_mask_with_disp_for_figure (line 539) | def render_gaussian_seq_w_mask_with_disp_for_figure(
  function render_gaussian_seq_w_mask_cam_seq (line 606) | def render_gaussian_seq_w_mask_cam_seq(
  function apply_grid_bc_w_freeze_pts (line 646) | def apply_grid_bc_w_freeze_pts(grid_size, grid_lim, freeze_pts, mpm_solv...
  function add_constant_force (line 676) | def add_constant_force(
  function render_force_2d (line 718) | def render_force_2d(cam, render_params, center_point, force):
  function render_gaussian_seq_w_mask_cam_seq_with_force (line 765) | def render_gaussian_seq_w_mask_cam_seq_with_force(
  function render_gaussian_seq_w_mask_cam_seq_with_force_with_disp (line 843) | def render_gaussian_seq_w_mask_cam_seq_with_force_with_disp(
  function downsample_with_kmeans (line 924) | def downsample_with_kmeans(points_array: np.ndarray, num_points: int):
  function downsample_with_kmeans_gpu (line 950) | def downsample_with_kmeans_gpu(points_array: torch.Tensor, num_points: i...
  function interpolate_points (line 985) | def interpolate_points(query_points, drive_displacement, top_k_index):
  function interpolate_points_w_R (line 1002) | def interpolate_points_w_R(
  function create_camera_path (line 1034) | def create_camera_path(
  function get_camera_trajectory (line 1093) | def get_camera_trajectory(cam, num_pos, camera_cfg: dict, dataset):

FILE: projects/uncleaned_train/exp_motion/train/train_material.py
  function create_dataset (line 94) | def create_dataset(args):
  class Trainer (line 132) | class Trainer:
    method __init__ (line 133) | def __init__(self, args):
    method init_trainable_params (line 363) | def init_trainable_params(
    method setup_simulation (line 394) | def setup_simulation(self, dataset_dir, grid_size=100):
    method get_simulation_input (line 618) | def get_simulation_input(self, device):
    method get_material_params (line 661) | def get_material_params(self, device):
    method train_one_step (line 691) | def train_one_step(self):
    method train (line 1052) | def train(self):
    method inference (line 1066) | def inference(
    method save (line 1162) | def save(
    method load (line 1185) | def load(self, checkpoint_dir):
    method setup_eval (line 1198) | def setup_eval(self, args, gaussian_path, white_background=True):
    method demo (line 1272) | def demo(
  function parse_args (line 1417) | def parse_args():

FILE: projects/uncleaned_train/motionrep/datatools/_convert_fbx_to_mesh.py
  function convert (line 8) | def convert(fbx_path):
  function convert2 (line 85) | def convert2(fbx_path):
  function main (line 191) | def main():

FILE: projects/uncleaned_train/motionrep/datatools/blender_deforming_things4d.py
  function anime_read (line 9) | def anime_read(filename):

FILE: projects/uncleaned_train/motionrep/datatools/blender_render_imgs.py
  function focal2fov (line 12) | def focal2fov(focal, pixels):
  function create_camera (line 16) | def create_camera(location, rotation):
  function set_camera_look_at (line 22) | def set_camera_look_at(camera, target_point):
  function setup_alpha_mask (line 32) | def setup_alpha_mask(obj_name, pass_index=1):
  function render_scene (line 68) | def render_scene(camera, output_path):
  function setup_light (line 84) | def setup_light():
  function create_mesh_from_data (line 110) | def create_mesh_from_data(vertices, faces):
  function write_next_bytes (line 198) | def write_next_bytes(fid, data, format_char_sequence, endian_character="...
  function write_cameras_binary (line 214) | def write_cameras_binary(cameras, path_to_model_file):
  function write_images_binary (line 231) | def write_images_binary(images, path_to_model_file):
  function write_points3D_binary (line 252) | def write_points3D_binary(points3D, path_to_model_file):
  function get_colmap_camera (line 271) | def get_colmap_camera(camera_obj, render_resolution):
  function main (line 320) | def main():

FILE: projects/uncleaned_train/motionrep/datatools/deforming_things4d.py
  function anime_read (line 5) | def anime_read(filename):
  function extract_trajectory (line 32) | def extract_trajectory(
  function main (line 55) | def main():

FILE: projects/uncleaned_train/motionrep/datatools/fbx_to_mesh.py
  function convert_to_mesh (line 6) | def convert_to_mesh(fbx_path, output_dir):
  function convert_obj_to_traj (line 50) | def convert_obj_to_traj(meshes_dir):
  function main (line 78) | def main():

FILE: projects/uncleaned_train/motionrep/datatools/fbx_to_mesh_flag.py
  function convert_to_mesh (line 6) | def convert_to_mesh(fbx_path, output_dir):
  function subdivde_mesh (line 53) | def subdivde_mesh(mesh_directory, output_directory):
  function convert_obj_to_traj (line 96) | def convert_obj_to_traj(meshes_dir):
  function main (line 124) | def main():

FILE: projects/uncleaned_train/motionrep/datatools/render_blender_annimations.py
  function focal2fov (line 12) | def focal2fov(focal, pixels):
  function create_camera (line 16) | def create_camera(location, rotation):
  function set_camera_look_at (line 22) | def set_camera_look_at(camera, target_point):
  function setup_alpha_mask (line 32) | def setup_alpha_mask(obj_name, pass_index=1):
  function render_scene (line 68) | def render_scene(camera, output_path):
  function setup_light (line 84) | def setup_light():
  function create_mesh_from_data (line 110) | def create_mesh_from_data(vertices, faces):
  function write_next_bytes (line 198) | def write_next_bytes(fid, data, format_char_sequence, endian_character="...
  function write_cameras_binary (line 214) | def write_cameras_binary(cameras, path_to_model_file):
  function write_images_binary (line 231) | def write_images_binary(images, path_to_model_file):
  function write_points3D_binary (line 252) | def write_points3D_binary(points3D, path_to_model_file):
  function get_colmap_camera (line 271) | def get_colmap_camera(camera_obj, render_resolution):
  function main (line 320) | def main():

FILE: projects/uncleaned_train/motionrep/datatools/render_fbx_first_frame.py
  function focal2fov (line 12) | def focal2fov(focal, pixels):
  function create_camera (line 16) | def create_camera(location, rotation):
  function set_camera_look_at (line 22) | def set_camera_look_at(camera, target_point):
  function setup_alpha_mask (line 32) | def setup_alpha_mask(obj_name, pass_index=1):
  function render_scene (line 68) | def render_scene(camera, output_path, mask_name="U3DMesh"):
  function normalize_mesh (line 84) | def normalize_mesh(obj):
  function setup_light (line 115) | def setup_light():
  function create_mesh_from_fpx (line 153) | def create_mesh_from_fpx(fbx_path):
  function get_colmap_camera (line 173) | def get_colmap_camera(camera_obj, render_resolution):
  function get_textures (line 222) | def get_textures(
  function main (line 303) | def main():
  function find_material (line 398) | def find_material():

FILE: projects/uncleaned_train/motionrep/datatools/render_obj.py
  function focal2fov (line 12) | def focal2fov(focal, pixels):
  function create_camera (line 16) | def create_camera(location, rotation):
  function set_camera_look_at (line 22) | def set_camera_look_at(camera, target_point):
  function setup_alpha_mask (line 32) | def setup_alpha_mask(obj_name, pass_index=1):
  function render_scene (line 68) | def render_scene(camera, output_path, mask_name="U3DMesh"):
  function setup_light (line 84) | def setup_light():
  function create_mesh_from_obj (line 122) | def create_mesh_from_obj(obj_file_path):
  function get_focal_length (line 143) | def get_focal_length(camera_obj, render_resolution):
  function normalize_mesh (line 176) | def normalize_mesh(transform_meta_path, mesh_objects):
  function apply_rotation (line 196) | def apply_rotation(mesh_objects):
  function main (line 209) | def main():

FILE: projects/uncleaned_train/motionrep/datatools/render_obj_external_texture.py
  function focal2fov (line 12) | def focal2fov(focal, pixels):
  function create_camera (line 16) | def create_camera(location, rotation):
  function set_camera_look_at (line 22) | def set_camera_look_at(camera, target_point):
  function setup_alpha_mask (line 32) | def setup_alpha_mask(obj_name, pass_index=1):
  function render_scene (line 68) | def render_scene(camera, output_path, mask_name="U3DMesh"):
  function setup_light (line 84) | def setup_light():
  function create_mesh_from_obj (line 122) | def create_mesh_from_obj(obj_file_path):
  function get_focal_length (line 143) | def get_focal_length(camera_obj, render_resolution):
  function normalize_mesh (line 176) | def normalize_mesh(transform_meta_path, mesh_objects):
  function apply_rotation (line 196) | def apply_rotation(mesh_objects):
  function get_textures (line 209) | def get_textures(
  function main (line 290) | def main():

FILE: projects/uncleaned_train/motionrep/datatools/test_colmap_camera.py
  function qvec2rotmat (line 42) | def qvec2rotmat(qvec):
  function rotmat2qvec (line 64) | def rotmat2qvec(R):
  function fov2focal (line 84) | def fov2focal(fov, pixels):
  class Image (line 88) | class Image(BaseImage):
    method qvec2rotmat (line 89) | def qvec2rotmat(self):
  function read_next_bytes (line 93) | def read_next_bytes(fid, num_bytes, format_char_sequence, endian_charact...
  function read_points3D_text (line 105) | def read_points3D_text(path):
  function read_points3D_binary (line 147) | def read_points3D_binary(path_to_model_file):
  function read_intrinsics_text (line 182) | def read_intrinsics_text(path):
  function read_extrinsics_binary (line 209) | def read_extrinsics_binary(path_to_model_file):
  function read_intrinsics_binary (line 255) | def read_intrinsics_binary(path_to_model_file):
  function read_extrinsics_text (line 288) | def read_extrinsics_text(path):
  function read_colmap_bin_array (line 323) | def read_colmap_bin_array(path):
  class CameraInfo (line 348) | class CameraInfo(NamedTuple):
  function focal2fov (line 361) | def focal2fov(focal, pixels):
  function readColmapCameras (line 365) | def readColmapCameras(cam_extrinsics, cam_intrinsics, images_folder):
  function read_camera_points (line 417) | def read_camera_points(dir_path):
  function extract_projection_matrix (line 436) | def extract_projection_matrix(cam_info):
  function main (line 462) | def main():

FILE: projects/uncleaned_train/motionrep/datatools/transform_obj_for_blender.py
  function transform_vertex (line 8) | def transform_vertex(vertex: np.ndarray, transform_dict):
  function colmap_to_blender_transform (line 26) | def colmap_to_blender_transform(vertex: np.ndarray):
  function copy_mtl_file (line 35) | def copy_mtl_file(obj_path, transformed_obj_path):
  function main (line 46) | def main():

FILE: projects/uncleaned_train/motionrep/diffusion/builder.py
  function create_gaussian_diffusion (line 5) | def create_gaussian_diffusion(

FILE: projects/uncleaned_train/motionrep/diffusion/discretizer.py
  class EDMResShiftedDiscretization (line 6) | class EDMResShiftedDiscretization(Discretization):
    method __init__ (line 7) | def __init__(
    method get_sigmas (line 15) | def get_sigmas(self, n, device="cpu"):

FILE: projects/uncleaned_train/motionrep/diffusion/draft.py
  function latent_sds (line 5) | def latent_sds(input_x, schduler, unet, t_range=[0.02, 0.98]):

FILE: projects/uncleaned_train/motionrep/diffusion/gaussian_diffusion.py
  function mean_flat (line 19) | def mean_flat(tensor):
  function get_named_beta_schedule (line 26) | def get_named_beta_schedule(schedule_name, num_diffusion_timesteps):
  function betas_for_alpha_bar (line 53) | def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.9...
  class ModelMeanType (line 73) | class ModelMeanType(enum.Enum):
  class ModelVarType (line 83) | class ModelVarType(enum.Enum):
  class LossType (line 97) | class LossType(enum.Enum):
    method is_vb (line 105) | def is_vb(self):
  class GaussianDiffusion (line 109) | class GaussianDiffusion:
    method __init__ (line 126) | def __init__(
    method q_mean_variance (line 179) | def q_mean_variance(self, x_start, t):
    method q_sample (line 196) | def q_sample(self, x_start, t, noise=None):
    method q_posterior_mean_variance (line 216) | def q_posterior_mean_variance(self, x_start, x_t, t):
    method p_mean_variance (line 240) | def p_mean_variance(
    method _predict_xstart_from_eps (line 336) | def _predict_xstart_from_eps(self, x_t, t, eps):
    method _predict_xstart_from_xprev (line 343) | def _predict_xstart_from_xprev(self, x_t, t, xprev):
    method _predict_eps_from_xstart (line 353) | def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
    method _scale_timesteps (line 359) | def _scale_timesteps(self, t):
    method condition_mean (line 364) | def condition_mean(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
    method condition_score (line 379) | def condition_score(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
    method p_sample (line 403) | def p_sample(
    method p_sample_loop (line 449) | def p_sample_loop(
    method p_sample_loop_progressive (line 495) | def p_sample_loop_progressive(
    method ddim_sample (line 545) | def ddim_sample(
    method ddim_reverse_sample (line 611) | def ddim_reverse_sample(
    method ddim_sample_loop (line 649) | def ddim_sample_loop(
    method ddim_sample_loop_progressive (line 689) | def ddim_sample_loop_progressive(
    method _vb_terms_bpd (line 745) | def _vb_terms_bpd(
    method training_losses (line 780) | def training_losses(self, model, x_start, t, model_kwargs=None, noise=...
    method _prior_bpd (line 876) | def _prior_bpd(self, x_start):
    method calc_bpd_loop (line 894) | def calc_bpd_loop(self, model, x_start, clip_denoised=True, model_kwar...
  function _extract_into_tensor (line 952) | def _extract_into_tensor(arr, timesteps, broadcast_shape):

FILE: projects/uncleaned_train/motionrep/diffusion/losses.py
  function normal_kl (line 12) | def normal_kl(mean1, logvar1, mean2, logvar2):
  function approx_standard_normal_cdf (line 42) | def approx_standard_normal_cdf(x):
  function discretized_gaussian_log_likelihood (line 50) | def discretized_gaussian_log_likelihood(x, *, means, log_scales):

FILE: projects/uncleaned_train/motionrep/diffusion/resample.py
  function create_named_schedule_sampler (line 12) | def create_named_schedule_sampler(name, diffusion):
  class ScheduleSampler (line 27) | class ScheduleSampler(ABC):
    method weights (line 39) | def weights(self):
    method sample (line 46) | def sample(self, batch_size, device):
  class UniformSampler (line 65) | class UniformSampler(ScheduleSampler):
    method __init__ (line 66) | def __init__(self, diffusion):
    method weights (line 70) | def weights(self):
  class LossAwareSampler (line 74) | class LossAwareSampler(ScheduleSampler):
    method update_with_local_losses (line 75) | def update_with_local_losses(self, local_ts, local_losses):
    method update_with_all_losses (line 111) | def update_with_all_losses(self, ts, losses):
  class LossSecondMomentResampler (line 128) | class LossSecondMomentResampler(LossAwareSampler):
    method __init__ (line 129) | def __init__(self, diffusion, history_per_term=10, uniform_prob=0.001):
    method weights (line 138) | def weights(self):
    method update_with_all_losses (line 147) | def update_with_all_losses(self, ts, losses):
    method _warmed_up (line 157) | def _warmed_up(self):

FILE: projects/uncleaned_train/motionrep/diffusion/respace.py
  function space_timesteps (line 7) | def space_timesteps(num_timesteps, section_counts):
  class SpacedDiffusion (line 63) | class SpacedDiffusion(GaussianDiffusion):
    method __init__ (line 72) | def __init__(self, use_timesteps, **kwargs):
    method p_mean_variance (line 88) | def p_mean_variance(
    method training_losses (line 93) | def training_losses(
    method condition_mean (line 98) | def condition_mean(self, cond_fn, *args, **kwargs):
    method condition_score (line 101) | def condition_score(self, cond_fn, *args, **kwargs):
    method _wrap_model (line 104) | def _wrap_model(self, model):
    method _scale_timesteps (line 111) | def _scale_timesteps(self, t):
  class _WrappedModel (line 116) | class _WrappedModel:
    method __init__ (line 117) | def __init__(self, model, timestep_map, rescale_timesteps, original_nu...
    method __call__ (line 123) | def __call__(self, x, ts, **kwargs):

FILE: projects/uncleaned_train/motionrep/diffusion/sigma_sampling.py
  function exists (line 7) | def exists(x):
  function default (line 11) | def default(val, d):
  class EDMSamplingWithResShift (line 17) | class EDMSamplingWithResShift:
    method __init__ (line 18) | def __init__(self, p_mean=-1.2, p_std=1.2, scale_shift=320.0 / 576):
    method __call__ (line 23) | def __call__(self, n_samples, rand=None):

FILE: projects/uncleaned_train/motionrep/diffusion/sv_diffusion_engine.py
  class SVDiffusionEngine (line 25) | class SVDiffusionEngine(pl.LightningModule):
    method __init__ (line 30) | def __init__(
    method init_from_ckpt (line 103) | def init_from_ckpt(
    method _init_first_stage (line 126) | def _init_first_stage(self, config):
    method get_input (line 133) | def get_input(self, batch):
    method decode_first_stage (line 139) | def decode_first_stage(self, z):
    method encode_first_stage (line 159) | def encode_first_stage(self, x):
    method forward (line 173) | def forward(self, batch, training=True):
    method shared_step (line 186) | def shared_step(self, batch: Dict) -> Any:
    method training_step (line 193) | def training_step(self, batch, batch_idx):
    method on_train_start (line 217) | def on_train_start(self, *args, **kwargs):
    method on_train_batch_end (line 221) | def on_train_batch_end(self, *args, **kwargs):
    method ema_scope (line 226) | def ema_scope(self, context=None):
    method instantiate_optimizer_from_config (line 240) | def instantiate_optimizer_from_config(self, params, lr, cfg):
    method configure_optimizers (line 245) | def configure_optimizers(self):
    method get_trainable_parameters (line 265) | def get_trainable_parameters(self):
    method sample (line 280) | def sample(
    method log_conditionings (line 297) | def log_conditionings(self, batch: Dict, n: int) -> Dict:
    method log_images (line 336) | def log_images(

FILE: projects/uncleaned_train/motionrep/diffusion/svd_conditioner.py
  class SVDConditioner (line 19) | class SVDConditioner(GeneralConditioner):
    method __init__ (line 23) | def __init__(self, emb_models: Union[List, ListConfig]):
    method forward (line 26) | def forward(

FILE: projects/uncleaned_train/motionrep/diffusion/svd_sds_engine.py
  class SVDSDSEngine (line 32) | class SVDSDSEngine(pl.LightningModule):
    method __init__ (line 37) | def __init__(
    method init_from_ckpt (line 98) | def init_from_ckpt(
    method _init_first_stage (line 122) | def _init_first_stage(self, config):
    method get_input (line 132) | def get_input(self, batch):
    method encode_first_stage (line 137) | def encode_first_stage(self, x):
    method forward (line 151) | def forward(self, batch, sample_time_range=[0.02, 0.98]):
    method forward_with_encoder_chunk (line 177) | def forward_with_encoder_chunk(
    method edm_sds (line 227) | def edm_sds(self, input_x, extra_input, sample_time_range=[0.02, 0.98]):
    method edm_sds_multistep (line 327) | def edm_sds_multistep(self, input_x, extra_input, sample_time_range=[0...
    method sampler_step (line 433) | def sampler_step(self, sigma, noised_input, c, uc=None, num_frames=Non...

FILE: projects/uncleaned_train/motionrep/diffusion/svd_sds_engine_backup.py
  class SVDSDSEngine (line 30) | class SVDSDSEngine(pl.LightningModule):
    method __init__ (line 35) | def __init__(
    method init_from_ckpt (line 86) | def init_from_ckpt(
    method _init_first_stage (line 110) | def _init_first_stage(self, config):
    method get_input (line 120) | def get_input(self, batch):
    method encode_first_stage (line 125) | def encode_first_stage(self, x):
    method forward (line 139) | def forward(self, batch, training=True):
    method forward_with_encoder_chunk (line 159) | def forward_with_encoder_chunk(self, batch, chunk_size=2):
    method emd_sds (line 203) | def emd_sds(self, input_x, extra_input):

FILE: projects/uncleaned_train/motionrep/diffusion/svd_sds_wdecoder_engine.py
  class SVDWDecSDSEngine (line 33) | class SVDWDecSDSEngine(pl.LightningModule):
    method __init__ (line 38) | def __init__(
    method init_from_ckpt (line 99) | def init_from_ckpt(
    method _init_first_stage (line 123) | def _init_first_stage(self, config):
    method get_input (line 130) | def get_input(self, batch):
    method encode_first_stage (line 135) | def encode_first_stage(self, x):
    method decode_first_stage (line 150) | def decode_first_stage(self, z):
    method forward (line 169) | def forward(self, batch, sample_time_range=[0.02, 0.98]):
    method forward_with_encoder_chunk (line 195) | def forward_with_encoder_chunk(
    method edm_sds (line 245) | def edm_sds(self, input_x, extra_input, sample_time_range=[0.02, 0.98]):
    method edm_sds_multistep (line 346) | def edm_sds_multistep(self, input_x, extra_input, sample_time_range=[0...
    method resample_multistep (line 453) | def resample_multistep(self, input_x, extra_input, sample_time_range=[...
    method sampler_step (line 558) | def sampler_step(self, sigma, noised_input, c, uc=None, num_frames=Non...

FILE: projects/uncleaned_train/motionrep/diffusion/video_diffusion_loss.py
  class StandardVideoDiffusionLoss (line 13) | class StandardVideoDiffusionLoss(nn.Module):
    method __init__ (line 14) | def __init__(
    method get_noised_input (line 43) | def get_noised_input(
    method forward (line 49) | def forward(
    method _forward (line 65) | def _forward(
    method get_loss (line 109) | def get_loss(self, model_output, target, w):

FILE: projects/uncleaned_train/motionrep/field_components/encoding.py
  class TemporalKplanesEncoding (line 9) | class TemporalKplanesEncoding(nn.Module):
    method __init__ (line 16) | def __init__(
    method forward (line 57) | def forward(self, inp: Float[Tensor, "*bs 4"]):
    method compute_temporal_smoothness (line 83) | def compute_temporal_smoothness(
    method compute_plane_tv (line 93) | def compute_plane_tv(
    method visualize (line 103) | def visualize(
    method functional_forward (line 114) | def functional_forward(
  class TriplanesEncoding (line 149) | class TriplanesEncoding(nn.Module):
    method __init__ (line 156) | def __init__(
    method forward (line 196) | def forward(self, inp: Float[Tensor, "*bs 3"]):
    method compute_plane_tv (line 222) | def compute_plane_tv(
  class PlaneEncoding (line 233) | class PlaneEncoding(nn.Module):
    method __init__ (line 240) | def __init__(
    method forward (line 273) | def forward(self, inp: Float[Tensor, "*bs 2"]):
    method compute_plane_tv (line 288) | def compute_plane_tv(
  class TemporalNeRFEncoding (line 299) | class TemporalNeRFEncoding(nn.Module):
    method __init__ (line 300) | def __init__(
    method get_out_dim (line 317) | def get_out_dim(self) -> int:
    method forward (line 325) | def forward(

FILE: projects/uncleaned_train/motionrep/field_components/mlp.py
  class MLP (line 11) | class MLP(nn.Module):
    method __init__ (line 12) | def __init__(
    method build_nn_modules (line 40) | def build_nn_modules(self) -> None:
    method pytorch_fwd (line 65) | def pytorch_fwd(
    method forward (line 88) | def forward(

FILE: projects/uncleaned_train/motionrep/fields/dct_trajectory_field.py
  class DCTTrajctoryField (line 7) | class DCTTrajctoryField(nn.Module):
    method __init__ (line 8) | def __init__(
    method forward (line 14) | def forward(self, x):
    method query_points_at_time (line 17) | def query_points_at_time(self, x, t):

FILE: projects/uncleaned_train/motionrep/fields/discrete_field.py
  class PointSetMotionSE3 (line 15) | class PointSetMotionSE3(nn.Module):
    method __init__ (line 26) | def __init__(
    method construct_knn (line 70) | def construct_knn(self, inpx: Float[Tensor, "*bs 3"], topk=10, chunk_s...
    method prepare_isometry (line 94) | def prepare_isometry(self, points, knn_ind):
    method _forward_single_time (line 105) | def _forward_single_time(self, time_ind: int):
    method forward (line 121) | def forward(
    method compute_smoothess_loss (line 137) | def compute_smoothess_loss(
    method compute_arap_loss (line 146) | def compute_arap_loss(
    method compute_isometry_loss (line 181) | def compute_isometry_loss(
    method compute_loss (line 199) | def compute_loss(

FILE: projects/uncleaned_train/motionrep/fields/mul_offset_field.py
  class MulTemporalKplanesOffsetfields (line 15) | class MulTemporalKplanesOffsetfields(nn.Module):
    method __init__ (line 28) | def __init__(
    method forward (line 74) | def forward(
    method compute_smoothess_loss (line 98) | def compute_smoothess_loss(
    method compute_loss (line 117) | def compute_loss(
    method arap_loss (line 133) | def arap_loss(self, inp):

FILE: projects/uncleaned_train/motionrep/fields/mul_se3_field.py
  class MulTemporalKplanesSE3fields (line 15) | class MulTemporalKplanesSE3fields(nn.Module):
    method __init__ (line 26) | def __init__(
    method forward (line 75) | def forward(
    method compute_smoothess_loss (line 111) | def compute_smoothess_loss(
    method compute_loss (line 130) | def compute_loss(

FILE: projects/uncleaned_train/motionrep/fields/offset_field.py
  class TemporalKplanesOffsetfields (line 15) | class TemporalKplanesOffsetfields(nn.Module):
    method __init__ (line 26) | def __init__(
    method forward (line 66) | def forward(
    method compute_smoothess_loss (line 87) | def compute_smoothess_loss(
    method compute_loss (line 100) | def compute_loss(
    method arap_loss (line 116) | def arap_loss(self, inp):
    method forward_with_plane_coefs (line 119) | def forward_with_plane_coefs(

FILE: projects/uncleaned_train/motionrep/fields/se3_field.py
  class TemporalKplanesSE3fields (line 15) | class TemporalKplanesSE3fields(nn.Module):
    method __init__ (line 26) | def __init__(
    method forward (line 69) | def forward(
    method compute_smoothess_loss (line 120) | def compute_smoothess_loss(
    method compute_loss (line 133) | def compute_loss(

FILE: projects/uncleaned_train/motionrep/fields/triplane_field.py
  class TriplaneFields (line 11) | class TriplaneFields(nn.Module):
    method __init__ (line 22) | def __init__(
    method forward (line 55) | def forward(
    method compute_smoothess_loss (line 70) | def compute_smoothess_loss(
  function compute_entropy (line 78) | def compute_entropy(p):
  class TriplaneFieldsWithEntropy (line 82) | class TriplaneFieldsWithEntropy(nn.Module):
    method __init__ (line 93) | def __init__(
    method forward (line 130) | def forward(
    method compute_smoothess_loss (line 152) | def compute_smoothess_loss(

FILE: projects/uncleaned_train/motionrep/fields/video_triplane_disp_field.py
  class TriplaneDispFields (line 16) | class TriplaneDispFields(nn.Module):
    method __init__ (line 27) | def __init__(
    method forward (line 82) | def forward(
    method compute_smoothess_loss (line 102) | def compute_smoothess_loss(
    method get_canonical (line 110) | def get_canonical(
    method sample_canonical (line 127) | def sample_canonical(
  class PlaneDynamicDispFields (line 180) | class PlaneDynamicDispFields(nn.Module):
    method __init__ (line 191) | def __init__(
    method forward (line 255) | def forward(
    method compute_smoothess_loss (line 281) | def compute_smoothess_loss(
    method get_canonical (line 289) | def get_canonical(
    method sample_canonical (line 306) | def sample_canonical(

FILE: projects/uncleaned_train/motionrep/gaussian_3d/arguments/__init__.py
  class GroupParams (line 16) | class GroupParams:
  class ParamGroup (line 19) | class ParamGroup:
    method __init__ (line 20) | def __init__(self, parser: ArgumentParser, name : str, fill_none = Fal...
    method extract (line 40) | def extract(self, args):
  class ModelParams (line 47) | class ModelParams(ParamGroup):
    method __init__ (line 48) | def __init__(self, parser, sentinel=False):
    method extract (line 59) | def extract(self, args):
  class PipelineParams (line 64) | class PipelineParams(ParamGroup):
    method __init__ (line 65) | def __init__(self, parser):
  class OptimizationParams (line 71) | class OptimizationParams(ParamGroup):
    method __init__ (line 72) | def __init__(self, parser):
  function get_combined_args (line 91) | def get_combined_args(parser : ArgumentParser):

FILE: projects/uncleaned_train/motionrep/gaussian_3d/gaussian_renderer/depth_uv_render.py
  function render_uv_depth_w_gaussian (line 12) | def render_uv_depth_w_gaussian(

FILE: projects/uncleaned_train/motionrep/gaussian_3d/gaussian_renderer/feat_render.py
  function render_feat_gaussian (line 12) | def render_feat_gaussian(

FILE: projects/uncleaned_train/motionrep/gaussian_3d/gaussian_renderer/flow_depth_render.py
  function render_flow_depth_w_gaussian (line 12) | def render_flow_depth_w_gaussian(

FILE: projects/uncleaned_train/motionrep/gaussian_3d/gaussian_renderer/motion_renderer.py
  function render_motion_w_gaussian (line 9) | def render_motion_w_gaussian(

FILE: projects/uncleaned_train/motionrep/gaussian_3d/gaussian_renderer/render.py
  function render_gaussian (line 21) | def render_gaussian(
  function gaussian_intrin_scale (line 127) | def gaussian_intrin_scale(x_or_y: torch.Tensor, w_or_h: float):
  function render_arrow_in_screen (line 134) | def render_arrow_in_screen(viewpoint_camera, points_3d):
  function render_arrow_in_screen_back (line 190) | def render_arrow_in_screen_back(viewpoint_camera, points_3d):
  function eval_sh (line 261) | def eval_sh(deg, sh, dirs):
  function RGB2SH (line 327) | def RGB2SH(rgb):
  function SH2RGB (line 331) | def SH2RGB(sh):

FILE: projects/uncleaned_train/motionrep/gaussian_3d/scene/__init__.py
  class Scene (line 26) | class Scene:
    method __init__ (line 29) | def __init__(
    method save (line 116) | def save(self, iteration):
    method getTrainCameras (line 122) | def getTrainCameras(self, scale=1.0):
    method getTestCameras (line 125) | def getTestCameras(self, scale=1.0):

FILE: projects/uncleaned_train/motionrep/gaussian_3d/scene/cameras.py
  class Camera (line 21) | class Camera(nn.Module):
    method __init__ (line 22) | def __init__(
  class MiniCam (line 91) | class MiniCam:
    method __init__ (line 92) | def __init__(

FILE: projects/uncleaned_train/motionrep/gaussian_3d/scene/colmap_loader.py
  function qvec2rotmat (line 43) | def qvec2rotmat(qvec):
  function rotmat2qvec (line 55) | def rotmat2qvec(R):
  class Image (line 68) | class Image(BaseImage):
    method qvec2rotmat (line 69) | def qvec2rotmat(self):
  function read_next_bytes (line 72) | def read_next_bytes(fid, num_bytes, format_char_sequence, endian_charact...
  function read_points3D_text (line 83) | def read_points3D_text(path):
  function read_points3D_binary (line 113) | def read_points3D_binary(path_to_model_file):
  function read_intrinsics_text (line 144) | def read_intrinsics_text(path):
  function read_extrinsics_binary (line 168) | def read_extrinsics_binary(path_to_model_file):
  function read_intrinsics_binary (line 203) | def read_intrinsics_binary(path_to_model_file):
  function read_extrinsics_text (line 232) | def read_extrinsics_text(path):
  function read_colmap_bin_array (line 261) | def read_colmap_bin_array(path):

FILE: projects/uncleaned_train/motionrep/gaussian_3d/scene/dataset_readers.py
  class CameraInfo (line 45) | class CameraInfo(NamedTuple):
  class SceneInfo (line 58) | class SceneInfo(NamedTuple):
  function getNerfppNorm (line 66) | def getNerfppNorm(cam_info):
  function readColmapCameras (line 90) | def readColmapCameras(cam_extrinsics, cam_intrinsics, images_folder):
  function fetchPly (line 142) | def fetchPly(path):
  function storePly (line 151) | def storePly(path, xyz, rgb):
  function readColmapSceneInfo (line 177) | def readColmapSceneInfo(path, images, eval, llffhold=8):
  function readCamerasFromTransforms (line 233) | def readCamerasFromTransforms(path, transformsfile, white_background, ex...
  function readNerfSyntheticInfo (line 297) | def readNerfSyntheticInfo(path, white_background, eval, extension=".png"):
  class NoImageCamera (line 349) | class NoImageCamera(nn.Module):
    method __init__ (line 350) | def __init__(
  function fast_read_cameras_from_transform_file (line 425) | def fast_read_cameras_from_transform_file(file_path, width=1080, height=...

FILE: projects/uncleaned_train/motionrep/gaussian_3d/scene/gaussian_model.py
  class GaussianModel (line 37) | class GaussianModel:
    method setup_functions (line 38) | def setup_functions(self):
    method __init__ (line 55) | def __init__(self, sh_degree: int = 3):
    method capture (line 74) | def capture(self):
    method restore (line 95) | def restore(self, model_args, training_args):
    method capture_training_args (line 118) | def capture_training_args(
    method get_scaling (line 124) | def get_scaling(self):
    method get_rotation (line 128) | def get_rotation(self):
    method get_xyz (line 132) | def get_xyz(self):
    method get_features (line 136) | def get_features(self):
    method get_opacity (line 142) | def get_opacity(self):
    method get_covariance (line 145) | def get_covariance(self, scaling_modifier=1):
    method oneupSHdegree (line 150) | def oneupSHdegree(self):
    method create_from_pcd (line 154) | def create_from_pcd(self, pcd: BasicPointCloud, spatial_lr_scale: float):
    method training_setup (line 196) | def training_setup(self, training_args):
    method update_learning_rate (line 242) | def update_learning_rate(self, iteration):
    method construct_list_of_attributes (line 250) | def construct_list_of_attributes(self):
    method save_ply (line 264) | def save_ply(self, path):
    method reset_opacity (line 301) | def reset_opacity(self):
    method load_ply (line 308) | def load_ply(self, path):
    method replace_tensor_to_optimizer (line 388) | def replace_tensor_to_optimizer(self, tensor, name):
    method _prune_optimizer (line 403) | def _prune_optimizer(self, mask):
    method prune_points (line 425) | def prune_points(self, mask):
    method cat_tensors_to_optimizer (line 441) | def cat_tensors_to_optimizer(self, tensors_dict):
    method densification_postfix (line 475) | def densification_postfix(
    method densify_and_split (line 505) | def densify_and_split(self, grads, grad_threshold, scene_extent, N=2):
    method densify_and_clone (line 549) | def densify_and_clone(self, grads, grad_threshold, scene_extent):
    method densify_and_prune (line 576) | def densify_and_prune(self, max_grad, min_opacity, extent, max_screen_...
    method add_densification_stats (line 594) | def add_densification_stats(self, viewspace_point_tensor, update_filter):
    method apply_discrete_offset_filds (line 600) | def apply_discrete_offset_filds(self, origin_points, offsets):
    method apply_discrete_offset_filds_with_R (line 646) | def apply_discrete_offset_filds_with_R(self, origin_points, offsets, t...
    method apply_se3_fields (line 711) | def apply_se3_fields(
    method apply_offset_fields (line 772) | def apply_offset_fields(self, offset_field, timestamp: float):
    method apply_offset_fields_with_R (line 812) | def apply_offset_fields_with_R(self, offset_field, timestamp: float, e...
    method init_from_mesh (line 879) | def init_from_mesh(
    method detach_grad (line 943) | def detach_grad(
    method apply_mask (line 953) | def apply_mask(self, mask):
    method extract_fields (line 982) | def extract_fields(self, resolution=128, num_blocks=16, relax_ratio=1.5):
    method extract_mesh (line 1073) | def extract_mesh(self, path, density_thresh=1, resolution=128, decimat...
  function gaussian_3d_coeff (line 1109) | def gaussian_3d_coeff(xyzs, covs):

FILE: projects/uncleaned_train/motionrep/gaussian_3d/scene/mesh.py
  function dot (line 8) | def dot(x, y):
  function length (line 12) | def length(x, eps=1e-20):
  function safe_normalize (line 16) | def safe_normalize(x, eps=1e-20):
  class Mesh (line 20) | class Mesh:
    method __init__ (line 21) | def __init__(
    method load (line 49) | def load(
    method load_obj (line 146) | def load_obj(cls, path, albedo_path=None, device=None):
    method load_trimesh (line 296) | def load_trimesh(cls, path, device=None):
    method aabb (line 390) | def aabb(self):
    method auto_size (line 395) | def auto_size(self):
    method auto_normal (line 401) | def auto_normal(self):
    method auto_uv (line 424) | def auto_uv(self, cache_path=None, vmap=True):
    method align_v_to_vt (line 459) | def align_v_to_vt(self, vmapping=None):
    method to (line 476) | def to(self, device):
    method write (line 484) | def write(self, path):
    method write_ply (line 495) | def write_ply(self, path):
    method write_glb (line 503) | def write_glb(self, path):
    method write_obj (line 653) | def write_obj(self, path):

FILE: projects/uncleaned_train/motionrep/gaussian_3d/scene/mesh_utils.py
  function poisson_mesh_reconstruction (line 5) | def poisson_mesh_reconstruction(points, normals=None):
  function decimate_mesh (line 44) | def decimate_mesh(
  function clean_mesh (line 88) | def clean_mesh(

FILE: projects/uncleaned_train/motionrep/gaussian_3d/utils/camera_utils.py
  function loadCam (line 21) | def loadCam(args, id, cam_info, resolution_scale):
  function cameraList_from_camInfos (line 69) | def cameraList_from_camInfos(cam_infos, resolution_scale, args):
  function camera_to_JSON (line 78) | def camera_to_JSON(id, camera: Camera):
  function look_at (line 101) | def look_at(from_point, to_point, up_vector=(0, 1, 0)):
  function create_cameras_around_sphere (line 149) | def create_cameras_around_sphere(

FILE: projects/uncleaned_train/motionrep/gaussian_3d/utils/general_utils.py
  function inverse_sigmoid (line 18) | def inverse_sigmoid(x):
  function PILtoTorch (line 21) | def PILtoTorch(pil_image, resolution):
  function get_expon_lr_func (line 29) | def get_expon_lr_func(
  function strip_lowerdiag (line 64) | def strip_lowerdiag(L):
  function strip_symmetric (line 75) | def strip_symmetric(sym):
  function build_rotation (line 78) | def build_rotation(r):
  function build_scaling_rotation (line 101) | def build_scaling_rotation(s, r):
  function safe_state (line 112) | def safe_state(silent):

FILE: projects/uncleaned_train/motionrep/gaussian_3d/utils/graphics_utils.py
  class BasicPointCloud (line 17) | class BasicPointCloud(NamedTuple):
  function geom_transform_points (line 22) | def geom_transform_points(points, transf_matrix):
  function getWorld2View (line 31) | def getWorld2View(R, t):
  function getWorld2View2 (line 38) | def getWorld2View2(R, t, translate=np.array([.0, .0, .0]), scale=1.0):
  function getProjectionMatrix (line 51) | def getProjectionMatrix(znear, zfar, fovX, fovY):
  function fov2focal (line 73) | def fov2focal(fov, pixels):
  function focal2fov (line 76) | def focal2fov(focal, pixels):

FILE: projects/uncleaned_train/motionrep/gaussian_3d/utils/image_utils.py
  function mse (line 14) | def mse(img1, img2):
  function psnr (line 17) | def psnr(img1, img2):

FILE: projects/uncleaned_train/motionrep/gaussian_3d/utils/loss_utils.py
  function l1_loss (line 17) | def l1_loss(network_output, gt):
  function l2_loss (line 20) | def l2_loss(network_output, gt):
  function gaussian (line 23) | def gaussian(window_size, sigma):
  function create_window (line 27) | def create_window(window_size, channel):
  function ssim (line 33) | def ssim(img1, img2, window_size=11, size_average=True):
  function _ssim (line 43) | def _ssim(img1, img2, window, window_size, channel, size_average=True):

FILE: projects/uncleaned_train/motionrep/gaussian_3d/utils/rigid_body_utils.py
  function get_rigid_transform (line 5) | def get_rigid_transform(A, B):
  function _test_rigid_transform (line 56) | def _test_rigid_transform():
  function _sqrt_positive_part (line 72) | def _sqrt_positive_part(x: torch.Tensor) -> torch.Tensor:
  function matrix_to_quaternion (line 83) | def matrix_to_quaternion(matrix: torch.Tensor) -> torch.Tensor:
  function quternion_to_matrix (line 146) | def quternion_to_matrix(r):
  function standardize_quaternion (line 172) | def standardize_quaternion(quaternions: torch.Tensor) -> torch.Tensor:
  function quaternion_multiply (line 188) | def quaternion_multiply(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
  function _test_matrix_to_quaternion (line 212) | def _test_matrix_to_quaternion():
  function _test_matrix_to_quaternion_2 (line 241) | def _test_matrix_to_quaternion_2():

FILE: projects/uncleaned_train/motionrep/gaussian_3d/utils/sh_utils.py
  function eval_sh (line 57) | def eval_sh(deg, sh, dirs):
  function RGB2SH (line 114) | def RGB2SH(rgb):
  function SH2RGB (line 117) | def SH2RGB(sh):

FILE: projects/uncleaned_train/motionrep/gaussian_3d/utils/system_utils.py
  function mkdir_p (line 16) | def mkdir_p(folder_path):
  function searchForMaxIteration (line 26) | def searchForMaxIteration(folder):

FILE: projects/uncleaned_train/motionrep/losses/smoothness_loss.py
  function compute_plane_tv (line 5) | def compute_plane_tv(t: torch.Tensor, only_w: bool = False) -> float:
  function compute_plane_smoothness (line 26) | def compute_plane_smoothness(t: torch.Tensor) -> float:

FILE: projects/uncleaned_train/motionrep/operators/dct.py
  function dct1_rfft_impl (line 12) | def dct1_rfft_impl(x):
  function dct_fft_impl (line 16) | def dct_fft_impl(v):
  function idct_irfft_impl (line 20) | def idct_irfft_impl(V):
  function dct (line 24) | def dct(x, norm=None):
  function idct (line 63) | def idct(X, norm=None):
  function dct_3d (line 110) | def dct_3d(x, norm=None):
  function idct_3d (line 127) | def idct_3d(X, norm=None):
  function code_test_dct3d (line 146) | def code_test_dct3d():

FILE: projects/uncleaned_train/motionrep/operators/np_operators.py
  function feature_map_to_rgb_pca (line 7) | def feature_map_to_rgb_pca(feature_map):

FILE: projects/uncleaned_train/motionrep/operators/rotation.py
  function rotation_6d_to_matrix (line 7) | def rotation_6d_to_matrix(d6: torch.Tensor) -> torch.Tensor:
  function matrix_to_rotation_6d (line 31) | def matrix_to_rotation_6d(matrix: torch.Tensor) -> torch.Tensor:
  function quaternion_to_matrix (line 50) | def quaternion_to_matrix(quaternions: torch.Tensor) -> torch.Tensor:
  function _sqrt_positive_part (line 82) | def _sqrt_positive_part(x: torch.Tensor) -> torch.Tensor:
  function matrix_to_quaternion (line 93) | def matrix_to_quaternion(matrix: torch.Tensor) -> torch.Tensor:

FILE: projects/uncleaned_train/motionrep/utils/camera_utils.py
  function normalize (line 4) | def normalize(x: np.ndarray) -> np.ndarray:
  function viewmatrix (line 9) | def viewmatrix(lookdir: np.ndarray, up: np.ndarray, position: np.ndarray...
  function generate_spiral_path (line 18) | def generate_spiral_path(

FILE: projects/uncleaned_train/motionrep/utils/colmap_utils.py
  function qvec2rotmat (line 43) | def qvec2rotmat(qvec):
  function rotmat2qvec (line 55) | def rotmat2qvec(R):
  class Image (line 68) | class Image(BaseImage):
    method qvec2rotmat (line 69) | def qvec2rotmat(self):
  function read_next_bytes (line 72) | def read_next_bytes(fid, num_bytes, format_char_sequence, endian_charact...
  function read_points3D_text (line 83) | def read_points3D_text(path):
  function read_points3D_binary (line 125) | def read_points3D_binary(path_to_model_file):
  function read_intrinsics_text (line 156) | def read_intrinsics_text(path):
  function read_extrinsics_binary (line 180) | def read_extrinsics_binary(path_to_model_file):
  function read_intrinsics_binary (line 215) | def read_intrinsics_binary(path_to_model_file):
  function read_extrinsics_text (line 244) | def read_extrinsics_text(path):
  function read_colmap_bin_array (line 273) | def read_colmap_bin_array(path):

FILE: projects/uncleaned_train/motionrep/utils/config.py
  function load_config_with_merge (line 4) | def load_config_with_merge(config_path: str):
  function merge_without_none (line 17) | def merge_without_none(base_cfg, override_cfg):
  function create_config (line 26) | def create_config(config_path, args, cli_args: list = []):

FILE: projects/uncleaned_train/motionrep/utils/dct.py
  function dct1_rfft_impl (line 12) | def dct1_rfft_impl(x):
  function dct_fft_impl (line 16) | def dct_fft_impl(v):
  function idct_irfft_impl (line 20) | def idct_irfft_impl(V):
  function dct (line 24) | def dct(x, norm=None):
  function idct (line 63) | def idct(X, norm=None):
  function dct_3d (line 110) | def dct_3d(x, norm=None):
  function idct_3d (line 127) | def idct_3d(X, norm=None):
  function code_test_dct3d (line 146) | def code_test_dct3d():
  function unwarp_phase (line 164) | def unwarp_phase(phase, frequency_array):
  function get_mag_phase (line 177) | def get_mag_phase(fft_weights, s=3.0 / 16.0):
  function get_fft_from_mag_phase (line 214) | def get_fft_from_mag_phase(mag_phase, s=3.0 / 16.0):
  function get_displacements_from_fft_coeffs (line 245) | def get_displacements_from_fft_coeffs(fft_coe, t, s=3.0 / 16.0):
  function bandpass_filter (line 278) | def bandpass_filter(signal: torch.Tensor, low_cutoff, high_cutoff, fs: i...
  function bandpass_filter_numpy (line 299) | def bandpass_filter_numpy(signal: np.ndarray, low_cutoff, high_cutoff, fs):

FILE: projects/uncleaned_train/motionrep/utils/flow_utils.py
  function flow_to_image (line 4) | def flow_to_image(flow, display=False):
  function make_color_wheel (line 48) | def make_color_wheel():
  function compute_color (line 98) | def compute_color(u, v):

FILE: projects/uncleaned_train/motionrep/utils/img_utils.py
  function make_grid (line 10) | def make_grid(imgs: torch.Tensor, scale=0.5):
  function compute_psnr (line 37) | def compute_psnr(img1, img2, mask=None):
  function torch_rgb_to_gray (line 64) | def torch_rgb_to_gray(image):
  function compute_gradient_loss (line 76) | def compute_gradient_loss(pred, gt, mask=None):
  function mark_image_with_red_squares (line 144) | def mark_image_with_red_squares(img):
  function gaussian (line 157) | def gaussian(window_size, sigma):
  function create_window (line 168) | def create_window(window_size, channel):
  function compute_ssim (line 177) | def compute_ssim(img1, img2, window_size=11, size_average=True):
  function _ssim (line 188) | def _ssim(img1, img2, window, window_size, channel, size_average=True):
  function compute_low_res_psnr (line 223) | def compute_low_res_psnr(img1, img2, scale_factor):
  function compute_low_res_mse (line 239) | def compute_low_res_mse(img1, img2, scale_factor):

FILE: projects/uncleaned_train/motionrep/utils/io_utils.py
  function read_video_cv2 (line 9) | def read_video_cv2(video_path, rgb=True):
  function save_video_cv2 (line 31) | def save_video_cv2(video_path, img_list, fps):
  function save_video_imageio (line 47) | def save_video_imageio(video_path, img_list, fps):
  function save_gif_imageio (line 60) | def save_gif_imageio(video_path, img_list, fps):
  function save_video_mediapy (line 71) | def save_video_mediapy(video_frames, output_video_path: str = None, fps:...

FILE: projects/uncleaned_train/motionrep/utils/optimizer.py
  function get_linear_schedule_with_warmup (line 5) | def get_linear_schedule_with_warmup(

FILE: projects/uncleaned_train/motionrep/utils/peft_utils.py
  function save_peft_adaptor (line 7) | def save_peft_adaptor(model: peft.PeftModel, dir, save_base_model=False):
  function load_peft_adaptor_and_merge (line 15) | def load_peft_adaptor_and_merge(adaptor_path, base_model):
  function _code_test_peft_load_save (line 22) | def _code_test_peft_load_save():

FILE: projects/uncleaned_train/motionrep/utils/print_utils.py
  function print_if_zero_rank (line 4) | def print_if_zero_rank(s):

FILE: projects/uncleaned_train/motionrep/utils/pytorch_mssim.py
  function gaussian (line 8) | def gaussian(window_size, sigma):
  function create_window (line 13) | def create_window(window_size, channel=1):
  function create_window_3d (line 19) | def create_window_3d(window_size, channel=1):
  function ssim (line 27) | def ssim(img1, img2, window_size=11, window=None, size_average=True, ful...
  function ssim_matlab (line 81) | def ssim_matlab(img1, img2, window_size=11, window=None, size_average=Tr...
  function msssim (line 141) | def msssim(img1, img2, window_size=11, size_average=True, val_range=None...
  class SSIM (line 171) | class SSIM(torch.nn.Module):
    method __init__ (line 172) | def __init__(self, window_size=11, size_average=True, val_range=None):
    method forward (line 182) | def forward(self, img1, img2):
  class MSSSIM (line 196) | class MSSSIM(torch.nn.Module):
    method __init__ (line 197) | def __init__(self, window_size=11, size_average=True, channel=3):
    method forward (line 203) | def forward(self, img1, img2):

FILE: projects/uncleaned_train/motionrep/utils/svd_helpper.py
  function init_st (line 18) | def init_st(version_dict, load_ckpt=True, load_filter=True):
  function load_model_from_config (line 38) | def load_model_from_config(config, ckpt=None, verbose=True):
  function load_model (line 72) | def load_model(model):
  function set_lowvram_mode (line 79) | def set_lowvram_mode(mode):
  function initial_model_load (line 84) | def initial_model_load(model):
  function unload_model (line 93) | def unload_model(model):
  function get_unique_embedder_keys_from_conditioner (line 100) | def get_unique_embedder_keys_from_conditioner(conditioner):
  function get_batch (line 104) | def get_batch(keys, value_dict, N, T, device):

FILE: projects/uncleaned_train/motionrep/utils/torch_utils.py
  function get_sync_time (line 5) | def get_sync_time():

FILE: projects/uncleaned_train/thirdparty_code/warp_mpm/backup/convert_gaussian_to_mesh.py
  function convert_gaussian_to_mesh (line 9) | def convert_gaussian_to_mesh(gaussian_path, save_path=None):
  function internal_filling (line 33) | def internal_filling(gaussian_path, save_path=None, resolution=64):

FILE: projects/uncleaned_train/thirdparty_code/warp_mpm/backup/diff_warp_utils.py
  class MPMStateStruct (line 10) | class MPMStateStruct(object):
    method init (line 40) | def init(
    method init_grid (line 104) | def init_grid(
    method from_torch (line 126) | def from_torch(
    method reset_state (line 182) | def reset_state(
    method set_require_grad (line 250) | def set_require_grad(self, requires_grad=True):
  class ParticleStateStruct (line 262) | class ParticleStateStruct(object):
    method init (line 280) | def init(
    method from_torch (line 322) | def from_torch(
    method set_require_grad (line 392) | def set_require_grad(self, requires_grad=True):
  class MPMModelStruct (line 401) | class MPMModelStruct(object):
    method init (line 434) | def init(
    method finalize_mu_lam (line 458) | def finalize_mu_lam(self, n_particles, device="cuda:0"):
    method init_other_params (line 466) | def init_other_params(self, n_grid=100, grid_lim=1.0, device="cuda:0"):
    method from_torch (line 496) | def from_torch(
    method set_require_grad (line 504) | def set_require_grad(self, requires_grad=True):
  class Dirichlet_collider (line 513) | class Dirichlet_collider:
  class Impulse_modifier (line 547) | class Impulse_modifier:
  class MPMtailoredStruct (line 563) | class MPMtailoredStruct:
  class MaterialParamsModifier (line 589) | class MaterialParamsModifier:
  class ParticleVelocityModifier (line 598) | class ParticleVelocityModifier:
  function compute_mu_lam_from_E_nu_clean (line 620) | def compute_mu_lam_from_E_nu_clean(
  function set_vec3_to_zero (line 632) | def set_vec3_to_zero(target_array: wp.array(dtype=wp.vec3)):
  function set_mat33_to_identity (line 638) | def set_mat33_to_identity(target_array: wp.array(dtype=wp.mat33)):
  function set_mat33_to_zero (line 644) | def set_mat33_to_zero(target_array: wp.array(dtype=wp.mat33)):
  function add_identity_to_mat33 (line 650) | def add_identity_to_mat33(target_array: wp.array(dtype=wp.mat33)):
  function subtract_identity_to_mat33 (line 658) | def subtract_identity_to_mat33(target_array: wp.array(dtype=wp.mat33)):
  function add_vec3_to_vec3 (line 666) | def add_vec3_to_vec3(
  function set_value_to_float_array (line 674) | def set_value_to_float_array(target_array: wp.array(dtype=float), value:...
  function set_warpvalue_to_float_array (line 680) | def set_warpvalue_to_float_array(
  function get_float_array_product (line 688) | def get_float_array_product(
  function torch2warp_quat (line 697) | def torch2warp_quat(t, copy=False, dtype=warp.types.float32, dvc="cuda:0"):
  function torch2warp_float (line 718) | def torch2warp_float(t, copy=False, dtype=warp.types.float32, dvc="cuda:...
  function torch2warp_vec3 (line 738) | def torch2warp_vec3(t, copy=False, dtype=warp.types.float32, dvc="cuda:0"):
  function torch2warp_mat33 (line 759) | def torch2warp_mat33(t, copy=False, dtype=warp.types.float32, dvc="cuda:...

FILE: projects/uncleaned_train/thirdparty_code/warp_mpm/backup/engine_utils.py
  function save_data_at_frame (line 8) | def save_data_at_frame(mpm_solver, dir_name, frame, save_to_ply = True, ...
  function particle_position_to_ply (line 38) | def particle_position_to_ply(mpm_solver, filename):
  function particle_position_tensor_to_ply (line 58) | def particle_position_tensor_to_ply(position_tensor, filename):

FILE: projects/uncleaned_train/thirdparty_code/warp_mpm/backup/grad_test.py
  function test (line 16) | def test(input_dir, output_dir=None, fps=6, device=0):
  function position_loss_kernel (line 183) | def position_loss_kernel(mpm_state: MPMStateStruct, loss: wp.array(dtype...
  function g2p_test (line 192) | def g2p_test(state: MPMStateStruct, model: MPMModelStruct, dt: float):

FILE: projects/uncleaned_train/thirdparty_code/warp_mpm/backup/mpm_solver_warp.py
  class MPM_Simulator_WARP (line 12) | class MPM_Simulator_WARP:
    method __init__ (line 13) | def __init__(self, n_particles, n_grid=100, grid_lim=1.0, device="cuda...
    method initialize (line 17) | def initialize(self, n_particles, n_grid=100, grid_lim=1.0, device="cu...
    method load_from_sampling (line 145) | def load_from_sampling(
    method load_initial_data_from_torch (line 196) | def load_initial_data_from_torch(
    method set_parameters (line 251) | def set_parameters(self, device="cuda:0", **kwargs):
    method set_parameters_dict (line 254) | def set_parameters_dict(self, kwargs={}, device="cuda:0"):
    method finalize_mu_lam (line 389) | def finalize_mu_lam(self, device="cuda:0"):
    method p2g2p (line 397) | def p2g2p(self, step, dt, device="cuda:0"):
    method reset_densities_and_update_masses (line 520) | def reset_densities_and_update_masses(
    method import_particle_x_from_torch (line 539) | def import_particle_x_from_torch(self, tensor_x, clone=True, device="c...
    method import_particle_v_from_torch (line 546) | def import_particle_v_from_torch(self, tensor_v, clone=True, device="c...
    method import_particle_F_from_torch (line 553) | def import_particle_F_from_torch(self, tensor_F, clone=True, device="c...
    method import_particle_C_from_torch (line 561) | def import_particle_C_from_torch(self, tensor_C, clone=True, device="c...
    method export_particle_x_to_torch (line 568) | def export_particle_x_to_torch(self):
    method export_particle_v_to_torch (line 571) | def export_particle_v_to_torch(self):
    method export_particle_F_to_torch (line 574) | def export_particle_F_to_torch(self):
    method export_particle_R_to_torch (line 579) | def export_particle_R_to_torch(self, device="cuda:0"):
    method export_particle_C_to_torch (line 597) | def export_particle_C_to_torch(self):
    method export_particle_cov_to_torch (line 602) | def export_particle_cov_to_torch(self, device="cuda:0"):
    method print_time_profile (line 620) | def print_time_profile(self):
    method add_surface_collider (line 626) | def add_surface_collider(
    method set_velocity_on_cuboid (line 730) | def set_velocity_on_cuboid(
    method add_bounding_box (line 787) | def add_bounding_box(self, start_time=0.0, end_time=999.0):
    method add_impulse_on_particles (line 859) | def add_impulse_on_particles(
    method enforce_particle_velocity_translation (line 908) | def enforce_particle_velocity_translation(
    method enforce_particle_velocity_rotation (line 956) | def enforce_particle_velocity_rotation(
    method release_particles_sequentially (line 1060) | def release_particles_sequentially(

FILE: projects/uncleaned_train/thirdparty_code/warp_mpm/backup/mpm_solver_warp_diff.py
  class MPM_Simulator_WARPDiff (line 13) | class MPM_Simulator_WARPDiff(object):
    method __init__ (line 18) | def __init__(self, n_particles, n_grid=100, grid_lim=1.0, device="cuda...
    method initialize (line 22) | def initialize(self, n_particles, n_grid=100, grid_lim=1.0, device="cu...
    method set_parameters (line 39) | def set_parameters(self, device="cuda:0", **kwargs):
    method set_parameters_dict (line 42) | def set_parameters_dict(self, mpm_model, mpm_state, kwargs={}, device=...
    method set_E_nu (line 108) | def set_E_nu(self, mpm_model, E: float, nu: float, device="cuda:0"):
    method p2g2p (line 123) | def p2g2p(self, mpm_model, mpm_state, step, dt, device="cuda:0"):
    method print_time_profile (line 254) | def print_time_profile(self):
    method add_surface_collider (line 260) | def add_surface_collider(
    method set_velocity_on_cuboid (line 364) | def set_velocity_on_cuboid(
    method add_bounding_box (line 421) | def add_bounding_box(self, start_time=0.0, end_time=999.0):
    method add_impulse_on_particles (line 493) | def add_impulse_on_particles(
    method enforce_particle_velocity_translation (line 543) | def enforce_particle_velocity_translation(
    method enforce_particle_velocity_rotation (line 591) | def enforce_particle_velocity_rotation(
    method release_particles_sequentially (line 696) | def release_particles_sequentially(
    method enforce_particle_velocity_by_mask (line 723) | def enforce_particle_velocity_by_mask(

FILE: projects/uncleaned_train/thirdparty_code/warp_mpm/backup/mpm_utils.py
  function kirchoff_stress_FCR (line 9) | def kirchoff_stress_FCR(
  function kirchoff_stress_neoHookean (line 19) | def kirchoff_stress_neoHookean(
  function kirchoff_stress_StVK (line 41) | def kirchoff_stress_StVK(
  function kirchoff_stress_drucker_prager (line 60) | def kirchoff_stress_drucker_prager(
  function von_mises_return_mapping (line 78) | def von_mises_return_mapping(F_trial: wp.mat33, model: MPMModelStruct, p...
  function von_mises_return_mapping_with_damage (line 124) | def von_mises_return_mapping_with_damage(
  function viscoplasticity_return_mapping_with_StVK (line 181) | def viscoplasticity_return_mapping_with_StVK(
  function sand_return_mapping (line 228) | def sand_return_mapping(
  function compute_mu_lam_from_E_nu (line 268) | def compute_mu_lam_from_E_nu(state: MPMStateStruct, model: MPMModelStruct):
  function zero_grid (line 277) | def zero_grid(state: MPMStateStruct, model: MPMModelStruct):
  function compute_dweight (line 285) | def compute_dweight(
  function update_cov (line 297) | def update_cov(state: MPMStateStruct, p: int, grad_v: wp.mat33, dt: float):
  function p2g_apic_with_stress (line 320) | def p2g_apic_with_stress(state: MPMStateStruct, model: MPMModelStruct, d...
  function grid_normalization_and_gravity (line 389) | def grid_normalization_and_gravity(
  function g2p (line 403) | def g2p(state: MPMStateStruct, model: MPMModelStruct, dt: float):
  function compute_stress_from_F_trial (line 457) | def compute_stress_from_F_trial(
  function compute_cov_from_F (line 512) | def compute_cov_from_F(state: MPMStateStruct, model: MPMModelStruct):
  function compute_R_from_F (line 539) | def compute_R_from_F(state: MPMStateStruct, model: MPMModelStruct):
  function add_damping_via_grid (line 566) | def add_damping_via_grid(state: MPMStateStruct, scale: float):
  function apply_additional_params (line 574) | def apply_additional_params(
  function selection_add_impulse_on_particles (line 595) | def selection_add_impulse_on_particles(
  function selection_enforce_particle_velocity_translation (line 611) | def selection_enforce_particle_velocity_translation(
  function selection_enforce_particle_velocity_cylinder (line 627) | def selection_enforce_particle_velocity_cylinder(

FILE: projects/uncleaned_train/thirdparty_code/warp_mpm/backup/run_gaussian.py
  function load_gaussians (line 12) | def load_gaussians(input_dir: str = None):
  function init_volume (line 42) | def init_volume(xyz, grid=[-1, 1], num_grid=20):
  function run_mpm_gaussian (line 46) | def run_mpm_gaussian(input_dir, output_dir=None, fps=6, device=0):
  function code_test (line 165) | def code_test(input_dir, device=0):
  function render_gaussians (line 177) | def render_gaussians(

FILE: projects/uncleaned_train/thirdparty_code/warp_mpm/backup/run_gaussian_static.py
  function load_gaussians (line 13) | def load_gaussians(input_dir: str = None):
  function get_volume (line 68) | def get_volume(xyzs: np.ndarray, resolution=128) -> np.ndarray:
  function run_mpm_gaussian (line 99) | def run_mpm_gaussian(input_dir, output_dir=None, fps=6, device=0):
  function code_test (line 260) | def code_test(input_dir, device=0):
  function render_gaussians (line 272) | def render_gaussians(

FILE: projects/uncleaned_train/thirdparty_code/warp_mpm/backup/sim_grad.py
  class MyTape (line 16) | class MyTape(wp.Tape):
    method get_adjoint (line 19) | def get_adjoint(self, a):
  function test (line 56) | def test(input_dir, output_dir=None, fps=6, device=0):
  function g2p_test (line 171) | def g2p_test(state: MPMStateStruct, model: MPMModelStruct, dt: float):
  function position_loss_kernel (line 224) | def position_loss_kernel(mpm_state: MPMStateStruct, loss: wp.array(dtype...
  function position_loss_kernel_raw (line 233) | def position_loss_kernel_raw(particle_x: wp.array(dtype=wp.vec3), loss: ...

FILE: projects/uncleaned_train/thirdparty_code/warp_mpm/backup/solver_grad_test.py
  function test (line 17) | def test(input_dir, output_dir=None, fps=6, device=0):
  function position_loss_kernel (line 152) | def position_loss_kernel(mpm_state: MPMStateStruct, loss: wp.array(dtype...

FILE: projects/uncleaned_train/thirdparty_code/warp_mpm/backup/test_inverse_sim.py
  function test (line 18) | def test(
  function position_loss_kernel (line 211) | def position_loss_kernel(
  function step_kernel (line 228) | def step_kernel(x: wp.array(dtype=float), grad: wp.array(dtype=float), a...
  function aggregate_grad (line 236) | def aggregate_grad(x: wp.array(dtype=float), grad: wp.array(dtype=float)):

FILE: projects/uncleaned_train/thirdparty_code/warp_mpm/backup/test_sim.py
  class MPMStateStruct (line 6) | class MPMStateStruct:
  class MPM_Simulator_WARPDiff (line 18) | class MPM_Simulator_WARPDiff:
    method __init__ (line 19) | def __init__(self, x, v, vol, device):
  function vec3_add (line 30) | def vec3_add(mpm_state: MPMStateStruct, selection: wp.array(dtype=wp.flo...
  function loss_kernel (line 47) | def loss_kernel(mpm_state: MPMStateStruct,  loss: wp.array(dtype=float)):
  function main (line 56) | def main():

FILE: projects/uncleaned_train/thirdparty_code/warp_mpm/backup/warp_rewrite.py
  function from_torch_safe (line 13) | def from_torch_safe(t, dtype=None, requires_grad=None, grad=None):
  class MyTape (line 93) | class MyTape(wp.Tape):
    method get_adjoint (line 95) | def get_adjoint(self, a):

FILE: projects/uncleaned_train/thirdparty_code/warp_mpm/backup/warp_utils.py
  class MPMModelStruct (line 7) | class MPMModelStruct:
  class MPMStateStruct (line 42) | class MPMStateStruct:
  class Dirichlet_collider (line 73) | class Dirichlet_collider:
  class Impulse_modifier (line 108) | class Impulse_modifier:
  class MPMtailoredStruct (line 124) | class MPMtailoredStruct:
  class MaterialParamsModifier (line 149) | class MaterialParamsModifier:
  class ParticleVelocityModifier (line 157) | class ParticleVelocityModifier:
  function set_vec3_to_zero (line 181) | def set_vec3_to_zero(target_array: wp.array(dtype=wp.vec3)):
  function set_mat33_to_identity (line 187) | def set_mat33_to_identity(target_array: wp.array(dtype=wp.mat33)):
  function add_identity_to_mat33 (line 193) | def add_identity_to_mat33(target_array: wp.array(dtype=wp.mat33)):
  function subtract_identity_to_mat33 (line 201) | def subtract_identity_to_mat33(target_array: wp.array(dtype=wp.mat33)):
  function add_vec3_to_vec3 (line 209) | def add_vec3_to_vec3(
  function set_value_to_float_array (line 217) | def set_value_to_float_array(target_array: wp.array(dtype=float), value:...
  function set_warpvalue_to_float_array (line 223) | def set_warpvalue_to_float_array(target_array: wp.array(dtype=float), va...
  function get_float_array_product (line 229) | def get_float_array_product(
  function torch2warp_quat (line 238) | def torch2warp_quat(t, copy=False, dtype=warp.types.float32, dvc="cuda:0"):
  function torch2warp_float (line 258) | def torch2warp_float(t, copy=False, dtype=warp.types.float32, dvc="cuda:...
  function torch2warp_vec3 (line 277) | def torch2warp_vec3(t, copy=False, dtype=warp.types.float32, dvc="cuda:0"):
  function torch2warp_mat33 (line 298) | def torch2warp_mat33(t, copy=False, dtype=warp.types.float32, dvc="cuda:...

FILE: projects/uncleaned_train/thirdparty_code/warp_mpm/backup_jan10/gaussian_sim_utils.py
  function get_volume (line 3) | def get_volume(xyzs: np.ndarray, resolution=128) -> np.ndarray:

FILE: projects/uncleaned_train/thirdparty_code/warp_mpm/backup_jan10/mpm_data_structure.py
  class MPMStateStruct (line 13) | class MPMStateStruct(object):
    method init (line 43) | def init(
    method init_grid (line 107) | def init_grid(
    method from_torch (line 129) | def from_torch(
    method reset_state (line 185) | def reset_state(
    method set_require_grad (line 274) | def set_require_grad(self, requires_grad=True):
    method reset_density (line 284) | def reset_density(self, tensor_density: Tensor,
  class ParticleStateStruct (line 311) | class ParticleStateStruct(object):
    method init (line 329) | def init(
    method from_torch (line 371) | def from_torch(
    method set_require_grad (line 441) | def set_require_grad(self, requires_grad=True):
  class MPMModelStruct (line 450) | class MPMModelStruct(object):
    method init (line 483) | def init(
    method finalize_mu_lam (line 507) | def finalize_mu_lam(self, n_particles, device="cuda:0"):
    method init_other_params (line 515) | def init_other_params(self, n_grid=100, grid_lim=1.0, device="cuda:0"):
    method from_torch (line 545) | def from_torch(
    method set_require_grad (line 553) | def set_require_grad(self, requires_grad=True):
  class Dirichlet_collider (line 562) | class Dirichlet_collider:
  class Impulse_modifier (line 596) | class Impulse_modifier:
  class MPMtailoredStruct (line 612) | class MPMtailoredStruct:
  class MaterialParamsModifier (line 638) | class MaterialParamsModifier:
  class ParticleVelocityModifier (line 647) | class ParticleVelocityModifier:
  function compute_mu_lam_from_E_nu_clean (line 669) | def compute_mu_lam_from_E_nu_clean(
  function set_vec3_to_zero (line 681) | def set_vec3_to_zero(target_array: wp.array(dtype=wp.vec3)):
  function set_vec3_to_vec3 (line 686) | def set_vec3_to_vec3(source_array: wp.array(dtype=wp.vec3), target_array...
  function set_float_vec_to_vec_wmask (line 691) | def set_float_vec_to_vec_wmask(source_array: wp.array(dtype=float), targ...
  function set_float_vec_to_vec (line 697) | def set_float_vec_to_vec(source_array: wp.array(dtype=float), target_arr...
  function set_mat33_to_identity (line 704) | def set_mat33_to_identity(target_array: wp.array(dtype=wp.mat33)):
  function set_mat33_to_zero (line 710) | def set_mat33_to_zero(target_array: wp.array(dtype=wp.mat33)):
  function add_identity_to_mat33 (line 716) | def add_identity_to_mat33(target_array: wp.array(dtype=wp.mat33)):
  function subtract_identity_to_mat33 (line 724) | def subtract_identity_to_mat33(target_array: wp.array(dtype=wp.mat33)):
  function add_vec3_to_vec3 (line 732) | def add_vec3_to_vec3(
  function set_value_to_float_array (line 740) | def set_value_to_float_array(target_array: wp.array(dtype=float), value:...
  function set_warpvalue_to_float_array (line 746) | def set_warpvalue_to_float_array(
  function get_float_array_product (line 754) | def get_float_array_product(
  function torch2warp_quat (line 763) | def torch2warp_quat(t, copy=False, dtype=warp.types.float32, dvc="cuda:0"):
  function torch2warp_float (line 784) | def torch2warp_float(t, copy=False, dtype=warp.types.float32, dvc="cuda:...
  function torch2warp_vec3 (line 804) | def torch2warp_vec3(t, copy=False, dtype=warp.types.float32, dvc="cuda:0"):
  function torch2warp_mat33 (line 825) | def torch2warp_mat33(t, copy=False, dtype=warp.types.float32, dvc="cuda:...

FILE: projects/uncleaned_train/thirdparty_code/warp_mpm/backup_jan10/mpm_solver_diff.py
  class MPMWARPDiff (line 12) | class MPMWARPDiff(object):
    method __init__ (line 17) | def __init__(self, n_particles, n_grid=100, grid_lim=1.0, device="cuda...
    method initialize (line 21) | def initialize(self, n_particles, n_grid=100, grid_lim=1.0, device="cu...
    method set_parameters (line 38) | def set_parameters(self, device="cuda:0", **kwargs):
    method set_parameters_dict (line 41) | def set_parameters_dict(self, mpm_model, mpm_state, kwargs={}, device=...
    method set_E_nu (line 107) | def set_E_nu(self, mpm_model, E: float, nu: float, device="cuda:0"):
    method p2g2p (line 139) | def p2g2p(self, mpm_model, mpm_state, step, dt, device="cuda:0"):
    method print_time_profile (line 284) | def print_time_profile(self):
    method add_surface_collider (line 290) | def add_surface_collider(
    method set_velocity_on_cuboid (line 394) | def set_velocity_on_cuboid(
    method add_bounding_box (line 451) | def add_bounding_box(self, start_time=0.0, end_time=999.0):
    method add_impulse_on_particles (line 523) | def add_impulse_on_particles(
    method enforce_particle_velocity_translation (line 573) | def enforce_particle_velocity_translation(
    method enforce_particle_velocity_rotation (line 621) | def enforce_particle_velocity_rotation(
    method release_particles_sequentially (line 726) | def release_particles_sequentially(
    method enforce_particle_velocity_by_mask (line 753) | def enforce_particle_velocity_by_mask(
    method restart_and_compute_F_C (line 792) | def restart_and_compute_F_C(self, mpm_model, mpm_state, target_pos, de...

FILE: projects/uncleaned_train/thirdparty_code/warp_mpm/backup_jan10/mpm_utils.py
  function kirchoff_stress_FCR (line 9) | def kirchoff_stress_FCR(
  function kirchoff_stress_neoHookean (line 19) | def kirchoff_stress_neoHookean(
  function kirchoff_stress_StVK (line 41) | def kirchoff_stress_StVK(
  function kirchoff_stress_drucker_prager (line 60) | def kirchoff_stress_drucker_prager(
  function von_mises_return_mapping (line 78) | def von_mises_return_mapping(F_trial: wp.mat33, model: MPMModelStruct, p...
  function von_mises_return_mapping_with_damage (line 124) | def von_mises_return_mapping_with_damage(
  function viscoplasticity_return_mapping_with_StVK (line 181) | def viscoplasticity_return_mapping_with_StVK(
  function sand_return_mapping (line 228) | def sand_return_mapping(
  function compute_mu_lam_from_E_nu (line 268) | def compute_mu_lam_from_E_nu(state: MPMStateStruct, model: MPMModelStruct):
  function zero_grid (line 277) | def zero_grid(state: MPMStateStruct, model: MPMModelStruct):
  function compute_dweight (line 285) | def compute_dweight(
  function update_cov (line 297) | def update_cov(state: MPMStateStruct, p: int, grad_v: wp.mat33, dt: float):
  function p2g_apic_with_stress (line 320) | def p2g_apic_with_stress(state: MPMStateStruct, model: MPMModelStruct, d...
  function grid_normalization_and_gravity (line 389) | def grid_normalization_and_gravity(
  function g2p (line 403) | def g2p(state: MPMStateStruct, model: MPMModelStruct, dt: float):
  function clip_particle_x (line 464) | def clip_particle_x(state: MPMStateStruct, model: MPMModelStruct):
  function compute_stress_from_F_trial (line 484) | def compute_stress_from_F_trial(
  function compute_cov_from_F (line 539) | def compute_cov_from_F(state: MPMStateStruct, model: MPMModelStruct):
  function compute_R_from_F (line 566) | def compute_R_from_F(state: MPMStateStruct, model: MPMModelStruct):
  function add_damping_via_grid (line 593) | def add_damping_via_grid(state: MPMStateStruct, scale: float):
  function apply_additional_params (line 602) | def apply_additional_params(
  function selection_add_impulse_on_particles (line 623) | def selection_add_impulse_on_particles(
  function selection_enforce_particle_velocity_translation (line 639) | def selection_enforce_particle_velocity_translation(
  function selection_enforce_particle_velocity_cylinder (line 655) | def selection_enforce_particle_velocity_cylinder(
  function compute_position_l2_loss (line 675) | def compute_position_l2_loss(
  function aggregate_grad (line 691) | def aggregate_grad(x: wp.array(dtype=float), grad: wp.array(dtype=float)):
  function set_F_C_p2g (line 699) | def set_F_C_p2g(state: MPMStateStruct, model: MPMModelStruct, target_pos...
  function set_F_C_g2p (line 735) | def set_F_C_g2p(state: MPMStateStruct, model: MPMModelStruct):
  function compute_posloss_with_grad (line 785) | def compute_posloss_with_grad(

FILE: projects/uncleaned_train/thirdparty_code/warp_mpm/backup_jan10/warp_utils.py
  function from_torch_safe (line 13) | def from_torch_safe(t, dtype=None, requires_grad=None, grad=None):
  class MyTape (line 93) | class MyTape(wp.Tape):
    method get_adjoint (line 95) | def get_adjoint(self, a):
  class CondTape (line 131) | class CondTape(object):
    method __init__ (line 132) | def __init__(self, tape: Optional[MyTape], cond: bool = True) -> None:
    method __enter__ (line 136) | def __enter__(self):
    method __exit__ (line 140) | def __exit__(self, exc_type, exc_value, traceback):

FILE: projects/uncleaned_train/thirdparty_code/warp_mpm/gaussian_sim_utils.py
  function get_volume (line 4) | def get_volume(xyzs: np.ndarray, resolution=128) -> np.ndarray:

FILE: projects/uncleaned_train/thirdparty_code/warp_mpm/mpm_data_structure.py
  class MPMStateStruct (line 14) | class MPMStateStruct(object):
    method init (line 41) | def init(
    method init_grid (line 97) | def init_grid(
    method from_torch (line 119) | def from_torch(
    method reset_state (line 174) | def reset_state(
    method continue_from_torch (line 257) | def continue_from_torch(
    method set_require_grad (line 294) | def set_require_grad(self, requires_grad=True):
    method reset_density (line 305) | def reset_density(
    method partial_clone (line 356) | def partial_clone(self, device="cuda:0", requires_grad=True):
  class MPMModelStruct (line 391) | class MPMModelStruct(object):
    method init (line 424) | def init(
    method finalize_mu_lam (line 448) | def finalize_mu_lam(self, n_particles, device="cuda:0"):
    method init_other_params (line 456) | def init_other_params(self, n_grid=100, grid_lim=1.0, device="cuda:0"):
    method from_torch (line 486) | def from_torch(
    method set_require_grad (line 494) | def set_require_grad(self, requires_grad=True):
  class Dirichlet_collider (line 503) | class Dirichlet_collider:
  class GridCollider (line 537) | class GridCollider:
  class Impulse_modifier (line 548) | class Impulse_modifier:
  class MPMtailoredStruct (line 564) | class MPMtailoredStruct:
  class MaterialParamsModifier (line 590) | class MaterialParamsModifier:
  class ParticleVelocityModifier (line 599) | class ParticleVelocityModifier:
  function compute_mu_lam_from_E_nu_clean (line 621) | def compute_mu_lam_from_E_nu_clean(
  function set_vec3_to_zero (line 633) | def set_vec3_to_zero(target_array: wp.array(dtype=wp.vec3)):
  function set_vec3_to_vec3 (line 639) | def set_vec3_to_vec3(
  function set_float_vec_to_vec_wmask (line 647) | def set_float_vec_to_vec_wmask(
  function set_float_vec_to_vec (line 658) | def set_float_vec_to_vec(
  function set_mat33_to_identity (line 666) | def set_mat33_to_identity(target_array: wp.array(dtype=wp.mat33)):
  function set_mat33_to_zero (line 672) | def set_mat33_to_zero(target_array: wp.array(dtype=wp.mat33)):
  function add_identity_to_mat33 (line 678) | def add_identity_to_mat33(target_array: wp.array(dtype=wp.mat33)):
  function subtract_identity_to_mat33 (line 686) | def subtract_identity_to_mat33(target_array: wp.array(dtype=wp.mat33)):
  function add_vec3_to_vec3 (line 694) | def add_vec3_to_vec3(
  function set_value_to_float_array (line 702) | def set_value_to_float_array(target_array: wp.array(dtype=float), value:...
  function set_warpvalue_to_float_array (line 708) | def set_warpvalue_to_float_array(
  function get_float_array_product (line 716) | def get_float_array_product(
  function torch2warp_quat (line 725) | def torch2warp_quat(t, copy=False, dtype=warp.types.float32, dvc="cuda:0"):
  function torch2warp_float (line 746) | def torch2warp_float(t, copy=False, dtype=warp.types.float32, dvc="cuda:...
  function torch2warp_vec3 (line 766) | def torch2warp_vec3(t, copy=False, dtype=warp.types.float32, dvc="cuda:0"):
  function torch2warp_mat33 (line 787) | def torch2warp_mat33(t, copy=False, dtype=warp.types.float32, dvc="cuda:...

FILE: projects/uncleaned_train/thirdparty_code/warp_mpm/mpm_solver_diff.py
  class MPMWARPDiff (line 13) | class MPMWARPDiff(object):
    method __init__ (line 18) | def __init__(self, n_particles, n_grid=100, grid_lim=1.0, device="cuda...
    method initialize (line 22) | def initialize(self, n_particles, n_grid=100, grid_lim=1.0, device="cu...
    method set_parameters (line 39) | def set_parameters(self, device="cuda:0", **kwargs):
    method set_parameters_dict (line 42) | def set_parameters_dict(self, mpm_model, mpm_state, kwargs={}, device=...
    method set_E_nu (line 110) | def set_E_nu(self, mpm_model, E: float, nu: float, device="cuda:0"):
    method set_E_nu_from_torch (line 141) | def set_E_nu_from_torch(
    method prepare_mu_lam (line 160) | def prepare_mu_lam(self, mpm_model, mpm_state, device="cuda:0"):
    method p2g2p_differentiable (line 169) | def p2g2p_differentiable(
    method p2g2p (line 297) | def p2g2p(self, mpm_model, mpm_state, step, dt, device="cuda:0"):
    method print_time_profile (line 435) | def print_time_profile(self):
    method add_surface_collider (line 441) | def add_surface_collider(
    method set_velocity_on_cuboid (line 545) | def set_velocity_on_cuboid(
    method add_bounding_box (line 602) | def add_bounding_box(self, start_time=0.0, end_time=999.0):
    method add_impulse_on_particles (line 674) | def add_impulse_on_particles(
    method enforce_particle_velocity_translation (line 724) | def enforce_particle_velocity_translation(
    method enforce_particle_velocity_rotation (line 772) | def enforce_particle_velocity_rotation(
    method release_particles_sequentially (line 877) | def release_particles_sequentially(
    method enforce_particle_velocity_by_mask (line 904) | def enforce_particle_velocity_by_mask(
    method restart_and_compute_F_C (line 945) | def restart_and_compute_F_C(self, mpm_model, mpm_state, target_pos, de...
    method enforce_grid_velocity_by_mask (line 995) | def enforce_grid_velocity_by_mask(
    method add_impulse_on_particles_with_mask (line 1025) | def add_impulse_on_particles_with_mask(

FILE: projects/uncleaned_train/thirdparty_code/warp_mpm/mpm_utils.py
  function kirchoff_stress_FCR (line 9) | def kirchoff_stress_FCR(
  function kirchoff_stress_neoHookean (line 19) | def kirchoff_stress_neoHookean(
  function kirchoff_stress_StVK (line 51) | def kirchoff_stress_StVK(
  function kirchoff_stress_drucker_prager (line 70) | def kirchoff_stress_drucker_prager(
  function von_mises_return_mapping (line 88) | def von_mises_return_mapping(F_trial: wp.mat33, model: MPMModelStruct, p...
  function von_mises_return_mapping_with_damage (line 134) | def von_mises_return_mapping_with_damage(
  function viscoplasticity_return_mapping_with_StVK (line 191) | def viscoplasticity_return_mapping_with_StVK(
  function sand_return_mapping (line 238) | def sand_return_mapping(
  function compute_mu_lam_from_E_nu (line 278) | def compute_mu_lam_from_E_nu(state: MPMStateStruct, model: MPMModelStruct):
  function zero_grid (line 287) | def zero_grid(state: MPMStateStruct, model: MPMModelStruct):
  function compute_dweight (line 295) | def compute_dweight(
  function update_cov (line 307) | def update_cov(state: MPMStateStruct, p: int, grad_v: wp.mat33, dt: float):
  function update_cov_differentiable (line 330) | def update_cov_differentiable(
  function p2g_apic_with_stress (line 359) | def p2g_apic_with_stress(state: MPMStateStruct, model: MPMModelStruct, d...
  function grid_normalization_and_gravity (line 428) | def grid_normalization_and_gravity(
  function g2p (line 442) | def g2p(state: MPMStateStruct, model: MPMModelStruct, dt: float):
  function g2p_differentiable (line 503) | def g2p_differentiable(
  function clip_particle_x (line 583) | def clip_particle_x(state: MPMStateStruct, model: MPMModelStruct):
  function compute_stress_from_F_trial (line 604) | def compute_stress_from_F_trial(
  function add_damping_via_grid (line 723) | def add_damping_via_grid(state: MPMStateStruct, scale: float):
  function apply_additional_params (line 738) | def apply_additional_params(
  function selection_add_impulse_on_particles (line 759) | def selection_add_impulse_on_particles(
  function selection_enforce_particle_velocity_translation (line 775) | def selection_enforce_particle_velocity_translation(
  function selection_enforce_particle_velocity_cylinder (line 791) | def selection_enforce_particle_velocity_cylinder(
  function compute_position_l2_loss (line 812) | def compute_position_l2_loss(
  function aggregate_grad (line 829) | def aggregate_grad(x: wp.array(dtype=float), grad: wp.array(dtype=float)):
  function set_F_C_p2g (line 837) | def set_F_C_p2g(
  function set_F_C_g2p (line 874) | def set_F_C_g2p(state: MPMStateStruct, model: MPMModelStruct):
  function compute_posloss_with_grad (line 923) | def compute_posloss_with_grad(
  function compute_veloloss_with_grad (line 943) | def compute_veloloss_with_grad(
  function compute_Floss_with_grad (line 964) | def compute_Floss_with_grad(
  function compute_Closs_with_grad (line 997) | def compute_Closs_with_grad(

FILE: projects/uncleaned_train/thirdparty_code/warp_mpm/warp_utils.py
  function from_torch_safe (line 13) | def from_torch_safe(t, dtype=None, requires_grad=None, grad=None):
  class MyTape (line 93) | class MyTape(wp.Tape):
    method get_adjoint (line 95) | def get_adjoint(self, a):
  class CondTape (line 131) | class CondTape(object):
    method __init__ (line 132) | def __init__(self, tape: Optional[MyTape], cond: bool = True) -> None:
    method __enter__ (line 136) | def __enter__(self):
    method __exit__ (line 140) | def __exit__(self, exc_type, exc_value, traceback):
Condensed preview — 176 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (1,561K chars).
[
  {
    "path": ".gitignore",
    "chars": 36,
    "preview": "*.pyc\n\nmodels/\ndata/\noutput/\nwandb/\n"
  },
  {
    "path": "README.md",
    "chars": 1565,
    "preview": "# PhysDreamer: Physics-Based Interaction with 3D Objects via Video Generation [[website](https://physdreamer.github.io/)"
  },
  {
    "path": "physdreamer/field_components/encoding.py",
    "chars": 10601,
    "preview": "import torch\nimport torch.nn.functional as F\nfrom jaxtyping import Float, Int, Shaped\nfrom torch import Tensor, nn\nfrom "
  },
  {
    "path": "physdreamer/field_components/mlp.py",
    "chars": 3190,
    "preview": "\"\"\"\nMostly from nerfstudio: https://github.com/nerfstudio-project/nerfstudio/blob/main/nerfstudio/field_components/mlp.p"
  },
  {
    "path": "physdreamer/fields/mul_offset_field.py",
    "chars": 4131,
    "preview": "import torch\nimport torch.nn.functional as F\nfrom jaxtyping import Float, Int, Shaped\nfrom torch import Tensor, nn\nfrom "
  },
  {
    "path": "physdreamer/fields/mul_se3_field.py",
    "chars": 4648,
    "preview": "import torch\nimport torch.nn.functional as F\nfrom jaxtyping import Float, Int, Shaped\nfrom torch import Tensor, nn\nfrom "
  },
  {
    "path": "physdreamer/fields/offset_field.py",
    "chars": 4249,
    "preview": "import torch\nimport torch.nn.functional as F\nfrom jaxtyping import Float, Int, Shaped\nfrom torch import Tensor, nn\nfrom "
  },
  {
    "path": "physdreamer/fields/se3_field.py",
    "chars": 4682,
    "preview": "import torch\nimport torch.nn.functional as F\nfrom jaxtyping import Float, Int, Shaped\nfrom torch import Tensor, nn\nfrom "
  },
  {
    "path": "physdreamer/fields/triplane_field.py",
    "chars": 4496,
    "preview": "import torch\nimport torch.nn.functional as F\nfrom jaxtyping import Float, Int, Shaped\nfrom torch import Tensor, nn\nfrom "
  },
  {
    "path": "physdreamer/gaussian_3d/README.md",
    "chars": 154,
    "preview": "This folder is mainly a copy paste from https://github.com/graphdeco-inria/gaussian-splatting\n\nWe add some function to r"
  },
  {
    "path": "physdreamer/gaussian_3d/arguments/__init__.py",
    "chars": 3782,
    "preview": "#\n# Copyright (C) 2023, Inria\n# GRAPHDECO research group, https://team.inria.fr/graphdeco\n# All rights reserved.\n#\n# Thi"
  },
  {
    "path": "physdreamer/gaussian_3d/gaussian_renderer/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "physdreamer/gaussian_3d/gaussian_renderer/depth_uv_render.py",
    "chars": 4081,
    "preview": "import torch\nfrom physdreamer.gaussian_3d.scene.gaussian_model import GaussianModel\nimport math\n\nfrom diff_gaussian_rast"
  },
  {
    "path": "physdreamer/gaussian_3d/gaussian_renderer/feat_render.py",
    "chars": 3189,
    "preview": "import torch\nfrom physdreamer.gaussian_3d.scene.gaussian_model import GaussianModel\nimport math\n\nfrom diff_gaussian_rast"
  },
  {
    "path": "physdreamer/gaussian_3d/gaussian_renderer/flow_depth_render.py",
    "chars": 4353,
    "preview": "import torch\nfrom physdreamer.gaussian_3d.scene.gaussian_model import GaussianModel\nimport math\n\nfrom diff_gaussian_rast"
  },
  {
    "path": "physdreamer/gaussian_3d/gaussian_renderer/render.py",
    "chars": 10124,
    "preview": "#\n# Copyright (C) 2023, Inria\n# GRAPHDECO research group, https://team.inria.fr/graphdeco\n# All rights reserved.\n#\n# Thi"
  },
  {
    "path": "physdreamer/gaussian_3d/scene/__init__.py",
    "chars": 4475,
    "preview": "#\n# Copyright (C) 2023, Inria\n# GRAPHDECO research group, https://team.inria.fr/graphdeco\n# All rights reserved.\n#\n# Thi"
  },
  {
    "path": "physdreamer/gaussian_3d/scene/cameras.py",
    "chars": 3024,
    "preview": "#\n# Copyright (C) 2023, Inria\n# GRAPHDECO research group, https://team.inria.fr/graphdeco\n# All rights reserved.\n#\n# Thi"
  },
  {
    "path": "physdreamer/gaussian_3d/scene/colmap_loader.py",
    "chars": 11722,
    "preview": "#\n# Copyright (C) 2023, Inria\n# GRAPHDECO research group, https://team.inria.fr/graphdeco\n# All rights reserved.\n#\n# Thi"
  },
  {
    "path": "physdreamer/gaussian_3d/scene/dataset_readers.py",
    "chars": 14656,
    "preview": "#\n# Copyright (C) 2023, Inria\n# GRAPHDECO research group, https://team.inria.fr/graphdeco\n# All rights reserved.\n#\n# Thi"
  },
  {
    "path": "physdreamer/gaussian_3d/scene/gaussian_model.py",
    "chars": 40502,
    "preview": "#\n# Copyright (C) 2023, Inria\n# GRAPHDECO research group, https://team.inria.fr/graphdeco\n# All rights reserved.\n#\n# Thi"
  },
  {
    "path": "physdreamer/gaussian_3d/scene/mesh.py",
    "chars": 25164,
    "preview": "import os\nimport cv2\nimport torch\nimport trimesh\nimport numpy as np\n\n\ndef dot(x, y):\n    return torch.sum(x * y, -1, kee"
  },
  {
    "path": "physdreamer/gaussian_3d/scene/mesh_utils.py",
    "chars": 4093,
    "preview": "import numpy as np\nimport pymeshlab as pml\n\n\ndef poisson_mesh_reconstruction(points, normals=None):\n    # points/normals"
  },
  {
    "path": "physdreamer/gaussian_3d/utils/camera_utils.py",
    "chars": 6378,
    "preview": "#\n# Copyright (C) 2023, Inria\n# GRAPHDECO research group, https://team.inria.fr/graphdeco\n# All rights reserved.\n#\n# Thi"
  },
  {
    "path": "physdreamer/gaussian_3d/utils/general_utils.py",
    "chars": 3971,
    "preview": "#\n# Copyright (C) 2023, Inria\n# GRAPHDECO research group, https://team.inria.fr/graphdeco\n# All rights reserved.\n#\n# Thi"
  },
  {
    "path": "physdreamer/gaussian_3d/utils/graphics_utils.py",
    "chars": 2052,
    "preview": "#\n# Copyright (C) 2023, Inria\n# GRAPHDECO research group, https://team.inria.fr/graphdeco\n# All rights reserved.\n#\n# Thi"
  },
  {
    "path": "physdreamer/gaussian_3d/utils/image_utils.py",
    "chars": 554,
    "preview": "#\n# Copyright (C) 2023, Inria\n# GRAPHDECO research group, https://team.inria.fr/graphdeco\n# All rights reserved.\n#\n# Thi"
  },
  {
    "path": "physdreamer/gaussian_3d/utils/loss_utils.py",
    "chars": 2191,
    "preview": "#\n# Copyright (C) 2023, Inria\n# GRAPHDECO research group, https://team.inria.fr/graphdeco\n# All rights reserved.\n#\n# Thi"
  },
  {
    "path": "physdreamer/gaussian_3d/utils/rigid_body_utils.py",
    "chars": 8133,
    "preview": "import torch\nimport torch.nn.functional as F\n\n\ndef get_rigid_transform(A, B):\n    \"\"\"\n    Estimate the rigid body transf"
  },
  {
    "path": "physdreamer/gaussian_3d/utils/sh_utils.py",
    "chars": 4371,
    "preview": "#  Copyright 2021 The PlenOctree Authors.\n#  Redistribution and use in source and binary forms, with or without\n#  modif"
  },
  {
    "path": "physdreamer/gaussian_3d/utils/system_utils.py",
    "chars": 785,
    "preview": "#\n# Copyright (C) 2023, Inria\n# GRAPHDECO research group, https://team.inria.fr/graphdeco\n# All rights reserved.\n#\n# Thi"
  },
  {
    "path": "physdreamer/losses/smoothness_loss.py",
    "chars": 1163,
    "preview": "import torch\nfrom typing import Tuple\n\n\ndef compute_plane_tv(t: torch.Tensor, only_w: bool = False) -> float:\n    \"\"\"Com"
  },
  {
    "path": "physdreamer/operators/dct.py",
    "chars": 4464,
    "preview": "\"\"\"\nCode from https://github.com/zh217/torch-dct/blob/master/torch_dct/_dct.py\n\"\"\"\nimport numpy as np\nimport torch\nimpor"
  },
  {
    "path": "physdreamer/operators/np_operators.py",
    "chars": 925,
    "preview": "import torch\nimport numpy as np\nfrom sklearn.decomposition import PCA\nimport matplotlib.pyplot as plt\n\n\ndef feature_map_"
  },
  {
    "path": "physdreamer/operators/rotation.py",
    "chars": 5241,
    "preview": "from typing import Optional\n\nimport torch\nimport torch.nn.functional as F\n\n\ndef rotation_6d_to_matrix(d6: torch.Tensor) "
  },
  {
    "path": "physdreamer/utils/camera_utils.py",
    "chars": 1290,
    "preview": "import numpy as np\n\n\ndef normalize(x: np.ndarray) -> np.ndarray:\n    \"\"\"Normalization helper function.\"\"\"\n    return x /"
  },
  {
    "path": "physdreamer/utils/colmap_utils.py",
    "chars": 11859,
    "preview": "#\n# Copyright (C) 2023, Inria\n# GRAPHDECO research group, https://team.inria.fr/graphdeco\n# All rights reserved.\n#\n# Thi"
  },
  {
    "path": "physdreamer/utils/config.py",
    "chars": 1271,
    "preview": "from omegaconf import OmegaConf\n\n\ndef load_config_with_merge(config_path: str):\n    cfg = OmegaConf.load(config_path)\n\n "
  },
  {
    "path": "physdreamer/utils/img_utils.py",
    "chars": 6843,
    "preview": "import torch\nimport torchvision\nimport cv2\nimport numpy as np\nimport torch.nn.functional as F\nfrom torch.autograd import"
  },
  {
    "path": "physdreamer/utils/io_utils.py",
    "chars": 2035,
    "preview": "import cv2\nimport imageio\nimport numpy as np\nimport mediapy\nimport os\nimport PIL\n\n\ndef read_video_cv2(video_path, rgb=Tr"
  },
  {
    "path": "physdreamer/utils/optimizer.py",
    "chars": 1323,
    "preview": "import torch\nfrom torch.optim.lr_scheduler import LambdaLR\n\n\ndef get_linear_schedule_with_warmup(\n    optimizer, num_war"
  },
  {
    "path": "physdreamer/utils/print_utils.py",
    "chars": 177,
    "preview": "import torch.distributed as dist\n\n\ndef print_if_zero_rank(s):\n    if (not dist.is_initialized()) and (dist.is_initialize"
  },
  {
    "path": "physdreamer/utils/pytorch_mssim.py",
    "chars": 7057,
    "preview": "import torch\nimport torch.nn.functional as F\nfrom math import exp\nimport numpy as np\n\ndevice = torch.device(\"cuda\" if to"
  },
  {
    "path": "physdreamer/utils/svd_helpper.py",
    "chars": 4050,
    "preview": "from glob import glob\nfrom sys import version\nfrom typing import Dict, List, Optional, Tuple, Union\nimport numpy as np\ni"
  },
  {
    "path": "physdreamer/utils/torch_utils.py",
    "chars": 138,
    "preview": "import torch\nimport time\n\n\ndef get_sync_time():\n    if torch.cuda.is_available():\n        torch.cuda.synchronize()\n    r"
  },
  {
    "path": "physdreamer/warp_mpm/README.md",
    "chars": 264,
    "preview": "This folder is mainly copy paste from  https://github.com/zeshunzong/warp-mpm\n\nThe biggest change is to make some operat"
  },
  {
    "path": "physdreamer/warp_mpm/gaussian_sim_utils.py",
    "chars": 1170,
    "preview": "import numpy as np\n\n\ndef get_volume(xyzs: np.ndarray, resolution=128) -> np.ndarray:\n\n    # set a grid in the range of ["
  },
  {
    "path": "physdreamer/warp_mpm/mpm_data_structure.py",
    "chars": 23790,
    "preview": "import warp as wp\nimport warp.torch\nimport torch\nfrom typing import Optional, Union, Sequence, Any\nfrom torch import Ten"
  },
  {
    "path": "physdreamer/warp_mpm/mpm_solver_diff.py",
    "chars": 38812,
    "preview": "import sys\nimport os\n\nimport warp as wp\n\nsys.path.append(os.path.dirname(os.path.realpath(__file__)))\nfrom mpm_data_stru"
  },
  {
    "path": "physdreamer/warp_mpm/mpm_utils.py",
    "chars": 34899,
    "preview": "import warp as wp\nfrom mpm_data_structure import *\nimport numpy as np\nimport math\n\n\n# compute stress from F\n@wp.func\ndef"
  },
  {
    "path": "physdreamer/warp_mpm/warp_utils.py",
    "chars": 5168,
    "preview": "import warp as wp\nimport ctypes\nfrom typing import Optional\n\nfrom warp.torch import (\n    dtype_from_torch,\n    device_f"
  },
  {
    "path": "projects/inference/README.md",
    "chars": 907,
    "preview": "## How to run\n\n**config file**\n\nThe config files for four scenes: carnation, aloacasia, hat, telephone is in `configs/` "
  },
  {
    "path": "projects/inference/config_demo.py",
    "chars": 3842,
    "preview": "import numpy as np\n\n# from model_config import (\n#     model_list,\n#     camera_cfg_list,\n#     points_list,\n#     force"
  },
  {
    "path": "projects/inference/configs/alocasia.py",
    "chars": 2072,
    "preview": "import numpy as np\n\ndataset_dir = \"../../data/physics_dreamer/alocasia/\"\nresult_dir = \"output/alocasia/results\"\nexp_name"
  },
  {
    "path": "projects/inference/configs/carnation.py",
    "chars": 2179,
    "preview": "import numpy as np\n\ndataset_dir = \"../../data/physics_dreamer/carnations/\"\nresult_dir = \"output/carnations/demos\"\nexp_na"
  },
  {
    "path": "projects/inference/configs/hat.py",
    "chars": 1663,
    "preview": "import numpy as np\n\ndataset_dir = \"../../data/physics_dreamer/hat/\"\nresult_dir = \"output/hat/demo\"\nexp_name = \"hat\"\n\nmod"
  },
  {
    "path": "projects/inference/configs/telephone.py",
    "chars": 1725,
    "preview": "import numpy as np\n\nexp_name = \"telephone\"\ndataset_dir = \"../../data/physics_dreamer/telephone/\"\nresult_dir = \"output/te"
  },
  {
    "path": "projects/inference/demo.py",
    "chars": 34285,
    "preview": "import argparse\nimport os\nimport numpy as np\nimport torch\nfrom tqdm import tqdm\nimport point_cloud_utils as pcu\nfrom acc"
  },
  {
    "path": "projects/inference/local_utils.py",
    "chars": 32048,
    "preview": "import os\nimport torch\nfrom jaxtyping import Float, Int, Shaped\nfrom torch import Tensor\nfrom time import time\nfrom omeg"
  },
  {
    "path": "projects/inference/run.sh",
    "chars": 427,
    "preview": "# python3 demo.py --scene_name carnation --apply_force --force_id 1  --point_id 0 --force_mag 2.0 --cam_id 0\n\n# python3 "
  },
  {
    "path": "projects/uncleaned_train/.gitignore",
    "chars": 185,
    "preview": "img_data/\ntmp/\n./data/\ndataset/\nmodels/\nmodel\noutput/\noutputs/\n*.sh\nexp_motion/*.sh\n__pycache__\n*__pycache__/\n*/__pycach"
  },
  {
    "path": "projects/uncleaned_train/README.md",
    "chars": 824,
    "preview": "This folder contains the original uncleaned training code. This folder can be viewed as an independent folder, it did no"
  },
  {
    "path": "projects/uncleaned_train/exp_motion/train/config.yml",
    "chars": 205,
    "preview": "dataset_dir: \n# optimization\nwarmup_step: 10\nmax_grad_norm: 10.0\n\nrand_bg: False\n\nvelo_dir: [\n  \"../../data/physics_drea"
  },
  {
    "path": "projects/uncleaned_train/exp_motion/train/config_demo.py",
    "chars": 2809,
    "preview": "import numpy as np\n\nfrom model_config import (\n    model_list,\n    camera_cfg_list,\n    points_list,\n    force_direction"
  },
  {
    "path": "projects/uncleaned_train/exp_motion/train/convert_gaussian_to_mesh.py",
    "chars": 3495,
    "preview": "import os\nfrom random import gauss\nfrom fire import Fire\nfrom motionrep.gaussian_3d.scene import GaussianModel\nimport nu"
  },
  {
    "path": "projects/uncleaned_train/exp_motion/train/fast_train_velocity.py",
    "chars": 43274,
    "preview": "import argparse\nimport os\nimport numpy as np\nimport torch\nfrom tqdm import tqdm\n\nfrom torch import Tensor\nfrom jaxtyping"
  },
  {
    "path": "projects/uncleaned_train/exp_motion/train/interface.py",
    "chars": 28740,
    "preview": "from typing import Optional, Tuple\nfrom jaxtyping import Float, Int, Shaped\nimport torch\nimport torch.autograd as autogr"
  },
  {
    "path": "projects/uncleaned_train/exp_motion/train/local_utils.py",
    "chars": 30804,
    "preview": "import os\nimport torch\nfrom jaxtyping import Float, Int, Shaped\nfrom torch import Tensor\nfrom time import time\nfrom omeg"
  },
  {
    "path": "projects/uncleaned_train/exp_motion/train/model_config.py",
    "chars": 1803,
    "preview": "import numpy as np\n\ndataset_dir = \"../../data/physics_dreamer/hat_nerfstudio/\"\nresult_dir = \"output/hat/results_force\"\ne"
  },
  {
    "path": "projects/uncleaned_train/exp_motion/train/train_material.py",
    "chars": 55100,
    "preview": "import argparse\nimport os\nimport numpy as np\nimport torch\nfrom tqdm import tqdm\n\nfrom torch import Tensor\nfrom jaxtyping"
  },
  {
    "path": "projects/uncleaned_train/motionrep/datatools/_convert_fbx_to_mesh.py",
    "chars": 7368,
    "preview": "import bpy\nimport numpy as np\nimport sys\nimport point_cloud_utils as pcu\nimport os\n\n\ndef convert(fbx_path):\n    bpy.ops."
  },
  {
    "path": "projects/uncleaned_train/motionrep/datatools/blender_deforming_things4d.py",
    "chars": 1469,
    "preview": "import sys\nimport numpy\nimport os\n\nimport PIL\nimport mathutils\n\n\ndef anime_read(filename):\n    \"\"\"\n    filename: path of"
  },
  {
    "path": "projects/uncleaned_train/motionrep/datatools/blender_install_packages.py",
    "chars": 97,
    "preview": "import site\nimport pip\n\n# pip.main([\"install\", \"point-cloud-utils\", \"--target\", site.USER_SITE])\n"
  },
  {
    "path": "projects/uncleaned_train/motionrep/datatools/blender_render_imgs.py",
    "chars": 17337,
    "preview": "import bpy\nimport os\nimport numpy as np\nimport math\nimport sys\nimport struct\nimport collections\nfrom mathutils import Ma"
  },
  {
    "path": "projects/uncleaned_train/motionrep/datatools/deforming_things4d.py",
    "chars": 4003,
    "preview": "import os\nimport numpy as np\n\n\ndef anime_read(filename):\n    \"\"\"\n    filename: path of .anime file\n    return:\n        n"
  },
  {
    "path": "projects/uncleaned_train/motionrep/datatools/dragon_animation.py",
    "chars": 1365,
    "preview": "import bpy\n\n# Clear existing data\nbpy.ops.wm.read_factory_settings(use_empty=True)\n\n# 1. Import the FBX file\nfbx_path = "
  },
  {
    "path": "projects/uncleaned_train/motionrep/datatools/fbx_to_mesh.py",
    "chars": 3062,
    "preview": "import bpy\nimport os\nimport sys\n\n\ndef convert_to_mesh(fbx_path, output_dir):\n    bpy.ops.import_scene.fbx(filepath=fbx_p"
  },
  {
    "path": "projects/uncleaned_train/motionrep/datatools/fbx_to_mesh_flag.py",
    "chars": 4844,
    "preview": "import bpy\nimport os\nimport sys\n\n\ndef convert_to_mesh(fbx_path, output_dir):\n    bpy.ops.import_scene.fbx(filepath=fbx_p"
  },
  {
    "path": "projects/uncleaned_train/motionrep/datatools/render_blender_annimations.py",
    "chars": 16039,
    "preview": "import bpy\nimport os\nimport numpy as np\nimport math\nimport sys\nimport struct\nimport collections\nfrom mathutils import Ma"
  },
  {
    "path": "projects/uncleaned_train/motionrep/datatools/render_fbx_first_frame.py",
    "chars": 13774,
    "preview": "import bpy\nimport os\nimport numpy as np\nimport math\nimport sys\nimport struct\nimport collections\nfrom mathutils import Ma"
  },
  {
    "path": "projects/uncleaned_train/motionrep/datatools/render_obj.py",
    "chars": 9766,
    "preview": "import bpy\nimport os\nimport numpy as np\nimport math\nimport sys\nimport struct\nimport collections\nfrom mathutils import Ma"
  },
  {
    "path": "projects/uncleaned_train/motionrep/datatools/render_obj_external_texture.py",
    "chars": 12752,
    "preview": "import bpy\nimport os\nimport numpy as np\nimport math\nimport sys\nimport struct\nimport collections\nfrom mathutils import Ma"
  },
  {
    "path": "projects/uncleaned_train/motionrep/datatools/test_colmap_camera.py",
    "chars": 17420,
    "preview": "import numpy as np\nimport os\nimport sys\nimport argparse\nimport collections\nimport struct\nfrom typing import NamedTuple\ni"
  },
  {
    "path": "projects/uncleaned_train/motionrep/datatools/transform_obj_for_blender.py",
    "chars": 2716,
    "preview": "import point_cloud_utils as pcu\nimport argparse\nimport os\nimport json\nimport numpy as np\n\n\ndef transform_vertex(vertex: "
  },
  {
    "path": "projects/uncleaned_train/motionrep/diffusion/builder.py",
    "chars": 1303,
    "preview": "from . import gaussian_diffusion as gd\nfrom .respace import SpacedDiffusion, space_timesteps\n\n\ndef create_gaussian_diffu"
  },
  {
    "path": "projects/uncleaned_train/motionrep/diffusion/discretizer.py",
    "chars": 717,
    "preview": "import torch\n\nfrom sgm.modules.diffusionmodules.discretizer import Discretization\n\n\nclass EDMResShiftedDiscretization(Di"
  },
  {
    "path": "projects/uncleaned_train/motionrep/diffusion/draft.py",
    "chars": 638,
    "preview": "\n\nimport numpy as np\n\ndef latent_sds(input_x, schduler, unet, t_range=[0.02, 0.98]):\n\n    # t_range_annel: [0.02, 0.98] "
  },
  {
    "path": "projects/uncleaned_train/motionrep/diffusion/gaussian_diffusion.py",
    "chars": 36367,
    "preview": "\"\"\"\nThis code started out as a PyTorch port of Ho et al's diffusion models:\nhttps://github.com/hojonathanho/diffusion/bl"
  },
  {
    "path": "projects/uncleaned_train/motionrep/diffusion/losses.py",
    "chars": 2534,
    "preview": "\"\"\"\nHelpers for various likelihood-based losses. These are ported from the original\nHo et al. diffusion models codebase:"
  },
  {
    "path": "projects/uncleaned_train/motionrep/diffusion/resample.py",
    "chars": 5820,
    "preview": "\"\"\"\nCode borrowed from https://github.com/Sin3DM/Sin3DM/blob/9c3ac12a655157469c71632346ebf569354ae7f6/src/diffusion/resa"
  },
  {
    "path": "projects/uncleaned_train/motionrep/diffusion/respace.py",
    "chars": 5193,
    "preview": "import numpy as np\nimport torch as th\n\nfrom .gaussian_diffusion import GaussianDiffusion\n\n\ndef space_timesteps(num_times"
  },
  {
    "path": "projects/uncleaned_train/motionrep/diffusion/sigma_sampling.py",
    "chars": 610,
    "preview": "import torch\nfrom inspect import isfunction\n\n# import sgm\n\n\ndef exists(x):\n    return x is not None\n\n\ndef default(val, d"
  },
  {
    "path": "projects/uncleaned_train/motionrep/diffusion/sv_diffusion_engine.py",
    "chars": 13691,
    "preview": "import math\nfrom contextlib import contextmanager\nfrom typing import Any, Dict, List, Optional, Tuple, Union\n\nimport pyt"
  },
  {
    "path": "projects/uncleaned_train/motionrep/diffusion/svd_conditioner.py",
    "chars": 2979,
    "preview": "\"\"\"\nModified from https://github.com/Stability-AI/generative-models/blob/main/sgm/modules/encoders/modules.py\n\"\"\"\nimport"
  },
  {
    "path": "projects/uncleaned_train/motionrep/diffusion/svd_sds_engine.py",
    "chars": 16171,
    "preview": "import math\nfrom contextlib import contextmanager\nfrom typing import Any, Dict, List, Optional, Tuple, Union\n\nimport pyt"
  },
  {
    "path": "projects/uncleaned_train/motionrep/diffusion/svd_sds_engine_backup.py",
    "chars": 9766,
    "preview": "import math\nfrom contextlib import contextmanager\nfrom typing import Any, Dict, List, Optional, Tuple, Union\n\nimport pyt"
  },
  {
    "path": "projects/uncleaned_train/motionrep/diffusion/svd_sds_wdecoder_engine.py",
    "chars": 21084,
    "preview": "import math\nfrom contextlib import contextmanager\nfrom typing import Any, Dict, List, Optional, Tuple, Union\n\nimport pyt"
  },
  {
    "path": "projects/uncleaned_train/motionrep/diffusion/video_diffusion_loss.py",
    "chars": 4408,
    "preview": "from typing import Dict, List, Optional, Tuple, Union\n\nimport torch\nimport torch.nn as nn\n\nfrom sgm.modules.autoencoding"
  },
  {
    "path": "projects/uncleaned_train/motionrep/field_components/encoding.py",
    "chars": 10590,
    "preview": "import torch\nimport torch.nn.functional as F\nfrom jaxtyping import Float, Int, Shaped\nfrom torch import Tensor, nn\nfrom "
  },
  {
    "path": "projects/uncleaned_train/motionrep/field_components/mlp.py",
    "chars": 3190,
    "preview": "\"\"\"\nMostly from nerfstudio: https://github.com/nerfstudio-project/nerfstudio/blob/main/nerfstudio/field_components/mlp.p"
  },
  {
    "path": "projects/uncleaned_train/motionrep/fields/dct_trajectory_field.py",
    "chars": 307,
    "preview": "import torch\nimport torch.nn as nn\n\nfrom motionrep.utils.dct import dct, idct, dct3d, idct_3d\n\n\nclass DCTTrajctoryField("
  },
  {
    "path": "projects/uncleaned_train/motionrep/fields/discrete_field.py",
    "chars": 6992,
    "preview": "import torch\nimport torch.nn.functional as F\nfrom jaxtyping import Float, Int, Shaped\nfrom torch import Tensor, nn\nfrom "
  },
  {
    "path": "projects/uncleaned_train/motionrep/fields/mul_offset_field.py",
    "chars": 4123,
    "preview": "import torch\nimport torch.nn.functional as F\nfrom jaxtyping import Float, Int, Shaped\nfrom torch import Tensor, nn\nfrom "
  },
  {
    "path": "projects/uncleaned_train/motionrep/fields/mul_se3_field.py",
    "chars": 4640,
    "preview": "import torch\nimport torch.nn.functional as F\nfrom jaxtyping import Float, Int, Shaped\nfrom torch import Tensor, nn\nfrom "
  },
  {
    "path": "projects/uncleaned_train/motionrep/fields/offset_field.py",
    "chars": 4241,
    "preview": "import torch\nimport torch.nn.functional as F\nfrom jaxtyping import Float, Int, Shaped\nfrom torch import Tensor, nn\nfrom "
  },
  {
    "path": "projects/uncleaned_train/motionrep/fields/se3_field.py",
    "chars": 4674,
    "preview": "import torch\nimport torch.nn.functional as F\nfrom jaxtyping import Float, Int, Shaped\nfrom torch import Tensor, nn\nfrom "
  },
  {
    "path": "projects/uncleaned_train/motionrep/fields/triplane_field.py",
    "chars": 4421,
    "preview": "import torch\nimport torch.nn.functional as F\nfrom jaxtyping import Float, Int, Shaped\nfrom torch import Tensor, nn\nfrom "
  },
  {
    "path": "projects/uncleaned_train/motionrep/fields/video_triplane_disp_field.py",
    "chars": 10899,
    "preview": "import torch\nimport torch.nn.functional as F\nfrom jaxtyping import Float, Int, Shaped\nfrom torch import Tensor, nn\nfrom "
  },
  {
    "path": "projects/uncleaned_train/motionrep/gaussian_3d/arguments/__init__.py",
    "chars": 3782,
    "preview": "#\n# Copyright (C) 2023, Inria\n# GRAPHDECO research group, https://team.inria.fr/graphdeco\n# All rights reserved.\n#\n# Thi"
  },
  {
    "path": "projects/uncleaned_train/motionrep/gaussian_3d/gaussian_renderer/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "projects/uncleaned_train/motionrep/gaussian_3d/gaussian_renderer/depth_uv_render.py",
    "chars": 4082,
    "preview": "import torch\nfrom motionrep.gaussian_3d.scene.gaussian_model import GaussianModel\nimport math\n\nfrom diff_gaussian_raster"
  },
  {
    "path": "projects/uncleaned_train/motionrep/gaussian_3d/gaussian_renderer/feat_render.py",
    "chars": 3192,
    "preview": "import torch\nfrom motionrep.gaussian_3d.scene.gaussian_model import GaussianModel\nimport math\n\nfrom diff_gaussian_raster"
  },
  {
    "path": "projects/uncleaned_train/motionrep/gaussian_3d/gaussian_renderer/flow_depth_render.py",
    "chars": 4351,
    "preview": "import torch\nfrom motionrep.gaussian_3d.scene.gaussian_model import GaussianModel\nimport math\n\nfrom diff_gaussian_raster"
  },
  {
    "path": "projects/uncleaned_train/motionrep/gaussian_3d/gaussian_renderer/motion_renderer.py",
    "chars": 5200,
    "preview": "import torch\nfrom motionrep.gaussian_3d.scene.gaussian_model import GaussianModel\nimport math\n\nfrom diff_gaussian_raster"
  },
  {
    "path": "projects/uncleaned_train/motionrep/gaussian_3d/gaussian_renderer/render.py",
    "chars": 10122,
    "preview": "#\n# Copyright (C) 2023, Inria\n# GRAPHDECO research group, https://team.inria.fr/graphdeco\n# All rights reserved.\n#\n# Thi"
  },
  {
    "path": "projects/uncleaned_train/motionrep/gaussian_3d/scene/__init__.py",
    "chars": 4465,
    "preview": "#\n# Copyright (C) 2023, Inria\n# GRAPHDECO research group, https://team.inria.fr/graphdeco\n# All rights reserved.\n#\n# Thi"
  },
  {
    "path": "projects/uncleaned_train/motionrep/gaussian_3d/scene/cameras.py",
    "chars": 3022,
    "preview": "#\n# Copyright (C) 2023, Inria\n# GRAPHDECO research group, https://team.inria.fr/graphdeco\n# All rights reserved.\n#\n# Thi"
  },
  {
    "path": "projects/uncleaned_train/motionrep/gaussian_3d/scene/colmap_loader.py",
    "chars": 11722,
    "preview": "#\n# Copyright (C) 2023, Inria\n# GRAPHDECO research group, https://team.inria.fr/graphdeco\n# All rights reserved.\n#\n# Thi"
  },
  {
    "path": "projects/uncleaned_train/motionrep/gaussian_3d/scene/dataset_readers.py",
    "chars": 14630,
    "preview": "#\n# Copyright (C) 2023, Inria\n# GRAPHDECO research group, https://team.inria.fr/graphdeco\n# All rights reserved.\n#\n# Thi"
  },
  {
    "path": "projects/uncleaned_train/motionrep/gaussian_3d/scene/gaussian_model.py",
    "chars": 40486,
    "preview": "#\n# Copyright (C) 2023, Inria\n# GRAPHDECO research group, https://team.inria.fr/graphdeco\n# All rights reserved.\n#\n# Thi"
  },
  {
    "path": "projects/uncleaned_train/motionrep/gaussian_3d/scene/mesh.py",
    "chars": 25164,
    "preview": "import os\nimport cv2\nimport torch\nimport trimesh\nimport numpy as np\n\n\ndef dot(x, y):\n    return torch.sum(x * y, -1, kee"
  },
  {
    "path": "projects/uncleaned_train/motionrep/gaussian_3d/scene/mesh_utils.py",
    "chars": 4093,
    "preview": "import numpy as np\nimport pymeshlab as pml\n\n\ndef poisson_mesh_reconstruction(points, normals=None):\n    # points/normals"
  },
  {
    "path": "projects/uncleaned_train/motionrep/gaussian_3d/utils/camera_utils.py",
    "chars": 6372,
    "preview": "#\n# Copyright (C) 2023, Inria\n# GRAPHDECO research group, https://team.inria.fr/graphdeco\n# All rights reserved.\n#\n# Thi"
  },
  {
    "path": "projects/uncleaned_train/motionrep/gaussian_3d/utils/general_utils.py",
    "chars": 3971,
    "preview": "#\n# Copyright (C) 2023, Inria\n# GRAPHDECO research group, https://team.inria.fr/graphdeco\n# All rights reserved.\n#\n# Thi"
  },
  {
    "path": "projects/uncleaned_train/motionrep/gaussian_3d/utils/graphics_utils.py",
    "chars": 2052,
    "preview": "#\n# Copyright (C) 2023, Inria\n# GRAPHDECO research group, https://team.inria.fr/graphdeco\n# All rights reserved.\n#\n# Thi"
  },
  {
    "path": "projects/uncleaned_train/motionrep/gaussian_3d/utils/image_utils.py",
    "chars": 554,
    "preview": "#\n# Copyright (C) 2023, Inria\n# GRAPHDECO research group, https://team.inria.fr/graphdeco\n# All rights reserved.\n#\n# Thi"
  },
  {
    "path": "projects/uncleaned_train/motionrep/gaussian_3d/utils/loss_utils.py",
    "chars": 2191,
    "preview": "#\n# Copyright (C) 2023, Inria\n# GRAPHDECO research group, https://team.inria.fr/graphdeco\n# All rights reserved.\n#\n# Thi"
  },
  {
    "path": "projects/uncleaned_train/motionrep/gaussian_3d/utils/rigid_body_utils.py",
    "chars": 8133,
    "preview": "import torch\nimport torch.nn.functional as F\n\n\ndef get_rigid_transform(A, B):\n    \"\"\"\n    Estimate the rigid body transf"
  },
  {
    "path": "projects/uncleaned_train/motionrep/gaussian_3d/utils/sh_utils.py",
    "chars": 4371,
    "preview": "#  Copyright 2021 The PlenOctree Authors.\n#  Redistribution and use in source and binary forms, with or without\n#  modif"
  },
  {
    "path": "projects/uncleaned_train/motionrep/gaussian_3d/utils/system_utils.py",
    "chars": 785,
    "preview": "#\n# Copyright (C) 2023, Inria\n# GRAPHDECO research group, https://team.inria.fr/graphdeco\n# All rights reserved.\n#\n# Thi"
  },
  {
    "path": "projects/uncleaned_train/motionrep/losses/se3_loss.py",
    "chars": 67,
    "preview": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n"
  },
  {
    "path": "projects/uncleaned_train/motionrep/losses/smoothness_loss.py",
    "chars": 1163,
    "preview": "import torch\nfrom typing import Tuple\n\n\ndef compute_plane_tv(t: torch.Tensor, only_w: bool = False) -> float:\n    \"\"\"Com"
  },
  {
    "path": "projects/uncleaned_train/motionrep/operators/dct.py",
    "chars": 4464,
    "preview": "\"\"\"\nCode from https://github.com/zh217/torch-dct/blob/master/torch_dct/_dct.py\n\"\"\"\nimport numpy as np\nimport torch\nimpor"
  },
  {
    "path": "projects/uncleaned_train/motionrep/operators/np_operators.py",
    "chars": 925,
    "preview": "import torch\nimport numpy as np\nfrom sklearn.decomposition import PCA\nimport matplotlib.pyplot as plt\n\n\ndef feature_map_"
  },
  {
    "path": "projects/uncleaned_train/motionrep/operators/rotation.py",
    "chars": 5241,
    "preview": "from typing import Optional\n\nimport torch\nimport torch.nn.functional as F\n\n\ndef rotation_6d_to_matrix(d6: torch.Tensor) "
  },
  {
    "path": "projects/uncleaned_train/motionrep/utils/camera_utils.py",
    "chars": 1290,
    "preview": "import numpy as np\n\n\ndef normalize(x: np.ndarray) -> np.ndarray:\n    \"\"\"Normalization helper function.\"\"\"\n    return x /"
  },
  {
    "path": "projects/uncleaned_train/motionrep/utils/colmap_utils.py",
    "chars": 11859,
    "preview": "#\n# Copyright (C) 2023, Inria\n# GRAPHDECO research group, https://team.inria.fr/graphdeco\n# All rights reserved.\n#\n# Thi"
  },
  {
    "path": "projects/uncleaned_train/motionrep/utils/config.py",
    "chars": 1271,
    "preview": "from omegaconf import OmegaConf\n\n\ndef load_config_with_merge(config_path: str):\n    cfg = OmegaConf.load(config_path)\n\n "
  },
  {
    "path": "projects/uncleaned_train/motionrep/utils/dct.py",
    "chars": 8451,
    "preview": "\"\"\"\nCode from https://github.com/zh217/torch-dct/blob/master/torch_dct/_dct.py\n\"\"\"\nimport numpy as np\nimport torch\nimpor"
  },
  {
    "path": "projects/uncleaned_train/motionrep/utils/flow_utils.py",
    "chars": 3196,
    "preview": "\nimport numpy as np\n\ndef flow_to_image(flow, display=False):\n    \"\"\"\n    Convert flow into middlebury color code image\n "
  },
  {
    "path": "projects/uncleaned_train/motionrep/utils/img_utils.py",
    "chars": 6843,
    "preview": "import torch\nimport torchvision\nimport cv2\nimport numpy as np\nimport torch.nn.functional as F\nfrom torch.autograd import"
  },
  {
    "path": "projects/uncleaned_train/motionrep/utils/io_utils.py",
    "chars": 2035,
    "preview": "import cv2\nimport imageio\nimport numpy as np\nimport mediapy\nimport os\nimport PIL\n\n\ndef read_video_cv2(video_path, rgb=Tr"
  },
  {
    "path": "projects/uncleaned_train/motionrep/utils/optimizer.py",
    "chars": 1323,
    "preview": "import torch\nfrom torch.optim.lr_scheduler import LambdaLR\n\n\ndef get_linear_schedule_with_warmup(\n    optimizer, num_war"
  },
  {
    "path": "projects/uncleaned_train/motionrep/utils/peft_utils.py",
    "chars": 1974,
    "preview": "import peft\n\nfrom peft.utils.save_and_load import get_peft_model_state_dict\nfrom peft import PeftModel\n\n\ndef save_peft_a"
  },
  {
    "path": "projects/uncleaned_train/motionrep/utils/print_utils.py",
    "chars": 177,
    "preview": "import torch.distributed as dist\n\n\ndef print_if_zero_rank(s):\n    if (not dist.is_initialized()) and (dist.is_initialize"
  },
  {
    "path": "projects/uncleaned_train/motionrep/utils/pytorch_mssim.py",
    "chars": 7057,
    "preview": "import torch\nimport torch.nn.functional as F\nfrom math import exp\nimport numpy as np\n\ndevice = torch.device(\"cuda\" if to"
  },
  {
    "path": "projects/uncleaned_train/motionrep/utils/svd_helpper.py",
    "chars": 4050,
    "preview": "from glob import glob\nfrom sys import version\nfrom typing import Dict, List, Optional, Tuple, Union\nimport numpy as np\ni"
  },
  {
    "path": "projects/uncleaned_train/motionrep/utils/torch_utils.py",
    "chars": 138,
    "preview": "import torch\nimport time\n\n\ndef get_sync_time():\n    if torch.cuda.is_available():\n        torch.cuda.synchronize()\n    r"
  },
  {
    "path": "projects/uncleaned_train/thirdparty_code/warp_mpm/backup/convert_gaussian_to_mesh.py",
    "chars": 2883,
    "preview": "import os\nfrom random import gauss\nfrom fire import Fire\nfrom motionrep.gaussian_3d.scene import GaussianModel\nimport nu"
  },
  {
    "path": "projects/uncleaned_train/thirdparty_code/warp_mpm/backup/diff_warp_utils.py",
    "chars": 24093,
    "preview": "import warp as wp\nimport warp.torch\nimport torch\nfrom typing import Optional, Union, Sequence, Any\nfrom torch import Ten"
  },
  {
    "path": "projects/uncleaned_train/thirdparty_code/warp_mpm/backup/engine_utils.py",
    "chars": 2641,
    "preview": "import numpy as np\nimport h5py\nimport os\nimport sys\nimport warp as wp\nimport torch\n\ndef save_data_at_frame(mpm_solver, d"
  },
  {
    "path": "projects/uncleaned_train/thirdparty_code/warp_mpm/backup/grad_test.py",
    "chars": 8815,
    "preview": "import warp as wp\nimport numpy as np\nimport torch\nimport os\nfrom mpm_solver_warp_diff import MPM_Simulator_WARPDiff\nfrom"
  },
  {
    "path": "projects/uncleaned_train/thirdparty_code/warp_mpm/backup/mpm_solver_warp.py",
    "chars": 41290,
    "preview": "import sys\nimport os\n\nimport warp as wp\n\nsys.path.append(os.path.dirname(os.path.realpath(__file__)))\nfrom engine_utils "
  },
  {
    "path": "projects/uncleaned_train/thirdparty_code/warp_mpm/backup/mpm_solver_warp_diff.py",
    "chars": 28659,
    "preview": "import sys\nimport os\n\nimport warp as wp\n\nsys.path.append(os.path.dirname(os.path.realpath(__file__)))\nfrom engine_utils "
  },
  {
    "path": "projects/uncleaned_train/thirdparty_code/warp_mpm/backup/mpm_utils.py",
    "chars": 22970,
    "preview": "import warp as wp\nfrom diff_warp_utils import *\nimport numpy as np\nimport math\n\n\n# compute stress from F\n@wp.func\ndef ki"
  },
  {
    "path": "projects/uncleaned_train/thirdparty_code/warp_mpm/backup/run_gaussian.py",
    "chars": 8077,
    "preview": "import time\nimport numpy as np\nfrom fire import Fire\nimport os\nimport warp as wp\nfrom mpm_solver_warp import MPM_Simulat"
  },
  {
    "path": "projects/uncleaned_train/thirdparty_code/warp_mpm/backup/run_gaussian_static.py",
    "chars": 11004,
    "preview": "import time\nimport numpy as np\nfrom fire import Fire\nimport os\nimport warp as wp\nfrom mpm_solver_warp import MPM_Simulat"
  },
  {
    "path": "projects/uncleaned_train/thirdparty_code/warp_mpm/backup/run_sand.py",
    "chars": 2119,
    "preview": "\nimport warp as wp\nfrom mpm_solver_warp import MPM_Simulator_WARP\nfrom engine_utils import *\nimport torch\nwp.init()\nwp.c"
  },
  {
    "path": "projects/uncleaned_train/thirdparty_code/warp_mpm/backup/sim_grad.py",
    "chars": 9161,
    "preview": "import warp as wp\nimport numpy as np\nimport torch\nimport os\nfrom mpm_solver_warp_diff import MPM_Simulator_WARPDiff\nfrom"
  },
  {
    "path": "projects/uncleaned_train/thirdparty_code/warp_mpm/backup/solver_grad_test.py",
    "chars": 4642,
    "preview": "import warp as wp\nimport numpy as np\nimport torch\nimport os\nfrom mpm_solver_warp_diff import MPM_Simulator_WARPDiff\nfrom"
  },
  {
    "path": "projects/uncleaned_train/thirdparty_code/warp_mpm/backup/test_inverse_sim.py",
    "chars": 6854,
    "preview": "import warp as wp\nimport numpy as np\nimport torch\nimport os\nfrom mpm_solver_warp_diff import MPM_Simulator_WARPDiff\nfrom"
  },
  {
    "path": "projects/uncleaned_train/thirdparty_code/warp_mpm/backup/test_sim.py",
    "chars": 3395,
    "preview": "import warp as wp \nimport numpy as np\nimport torch\n\n@wp.struct\nclass MPMStateStruct:\n    ###### essential #####\n    # pa"
  },
  {
    "path": "projects/uncleaned_train/thirdparty_code/warp_mpm/backup/warp_rewrite.py",
    "chars": 4642,
    "preview": "import warp as wp\nimport ctypes\n\n\nfrom warp.torch import (\n    dtype_from_torch,\n    device_from_torch,\n    dtype_is_com"
  },
  {
    "path": "projects/uncleaned_train/thirdparty_code/warp_mpm/backup/warp_utils.py",
    "chars": 7804,
    "preview": "import warp as wp\nimport warp.torch\nimport torch\n\n\n@wp.struct\nclass MPMModelStruct:\n    ####### essential #######\n    gr"
  },
  {
    "path": "projects/uncleaned_train/thirdparty_code/warp_mpm/backup_jan10/gaussian_sim_utils.py",
    "chars": 698,
    "preview": "import numpy as np\n\ndef get_volume(xyzs: np.ndarray, resolution=128) -> np.ndarray:\n\n    # set a grid in the range of [-"
  },
  {
    "path": "projects/uncleaned_train/thirdparty_code/warp_mpm/backup_jan10/mpm_data_structure.py",
    "chars": 26583,
    "preview": "import warp as wp\nimport warp.torch\nimport torch\nfrom typing import Optional, Union, Sequence, Any\nfrom torch import Ten"
  },
  {
    "path": "projects/uncleaned_train/thirdparty_code/warp_mpm/backup_jan10/mpm_solver_diff.py",
    "chars": 30781,
    "preview": "import sys\nimport os\n\nimport warp as wp\n\nsys.path.append(os.path.dirname(os.path.realpath(__file__)))\nfrom mpm_data_stru"
  },
  {
    "path": "projects/uncleaned_train/thirdparty_code/warp_mpm/backup_jan10/mpm_utils.py",
    "chars": 28270,
    "preview": "import warp as wp\nfrom mpm_data_structure import *\nimport numpy as np\nimport math\n\n\n# compute stress from F\n@wp.func\ndef"
  },
  {
    "path": "projects/uncleaned_train/thirdparty_code/warp_mpm/backup_jan10/warp_utils.py",
    "chars": 5168,
    "preview": "import warp as wp\nimport ctypes\nfrom typing import Optional\n\nfrom warp.torch import (\n    dtype_from_torch,\n    device_f"
  },
  {
    "path": "projects/uncleaned_train/thirdparty_code/warp_mpm/gaussian_sim_utils.py",
    "chars": 1170,
    "preview": "import numpy as np\n\n\ndef get_volume(xyzs: np.ndarray, resolution=128) -> np.ndarray:\n\n    # set a grid in the range of ["
  },
  {
    "path": "projects/uncleaned_train/thirdparty_code/warp_mpm/mpm_data_structure.py",
    "chars": 23790,
    "preview": "import warp as wp\nimport warp.torch\nimport torch\nfrom typing import Optional, Union, Sequence, Any\nfrom torch import Ten"
  },
  {
    "path": "projects/uncleaned_train/thirdparty_code/warp_mpm/mpm_solver_diff.py",
    "chars": 38812,
    "preview": "import sys\nimport os\n\nimport warp as wp\n\nsys.path.append(os.path.dirname(os.path.realpath(__file__)))\nfrom mpm_data_stru"
  },
  {
    "path": "projects/uncleaned_train/thirdparty_code/warp_mpm/mpm_utils.py",
    "chars": 34899,
    "preview": "import warp as wp\nfrom mpm_data_structure import *\nimport numpy as np\nimport math\n\n\n# compute stress from F\n@wp.func\ndef"
  },
  {
    "path": "projects/uncleaned_train/thirdparty_code/warp_mpm/warp_utils.py",
    "chars": 5168,
    "preview": "import warp as wp\nimport ctypes\nfrom typing import Optional\n\nfrom warp.torch import (\n    dtype_from_torch,\n    device_f"
  },
  {
    "path": "requirements.txt",
    "chars": 586,
    "preview": "accelerate==0.25.0\ndecord==0.6.0\neinops==0.7.0\nfire==0.5.0\nimageio==2.34.0\nipython==8.12.3\nipython==8.18.1\njaxtyping==0."
  },
  {
    "path": "setup.py",
    "chars": 129,
    "preview": "from setuptools import setup, find_packages\n\nsetup(\n    name=\"physdreamer\",\n    version=\"0.0.1\",\n    packages=find_packa"
  }
]

About this extraction

This page contains the full source code of the a1600012888/PhysDreamer GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 176 files (1.4 MB), approximately 394.3k tokens, and a symbol index with 1427 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.

Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.

Copied to clipboard!