Showing preview only (1,948K chars total). Download the full file or copy to clipboard to get everything.
Repository: jiachens/ModelNet40-C
Branch: master
Commit: fad786c430c0
Files: 461
Total size: 12.8 MB
Directory structure:
gitextract_51znd243/
├── .gitignore
├── .gitmodules
├── CurveNet/
│ ├── README.md
│ └── core/
│ ├── data.py
│ ├── main_cls.py
│ ├── main_normal.py
│ ├── main_partseg.py
│ ├── models/
│ │ ├── curvenet_cls.py
│ │ ├── curvenet_normal.py
│ │ ├── curvenet_seg.py
│ │ ├── curvenet_util.py
│ │ └── walk.py
│ └── util.py
├── GDANet/
│ ├── README.md
│ └── model/
│ ├── GDANet_cls.py
│ ├── GDANet_ptseg.py
│ ├── __init__.py
│ └── util/
│ ├── GDANet_util.py
│ ├── __init__.py
│ ├── data_util.py
│ └── util.py
├── LICENSE
├── PCT_Pytorch/
│ ├── LICENSE
│ ├── README.md
│ ├── checkpoints/
│ │ └── best/
│ │ └── models/
│ │ └── model.t7
│ ├── data.py
│ ├── main.py
│ ├── model.py
│ ├── model_new.py
│ ├── pointnet2_ops_lib/
│ │ ├── MANIFEST.in
│ │ ├── pointnet2_ops/
│ │ │ ├── __init__.py
│ │ │ ├── _ext-src/
│ │ │ │ ├── include/
│ │ │ │ │ ├── ball_query.h
│ │ │ │ │ ├── cuda_utils.h
│ │ │ │ │ ├── group_points.h
│ │ │ │ │ ├── interpolate.h
│ │ │ │ │ ├── sampling.h
│ │ │ │ │ └── utils.h
│ │ │ │ └── src/
│ │ │ │ ├── ball_query.cpp
│ │ │ │ ├── ball_query_gpu.cu
│ │ │ │ ├── bindings.cpp
│ │ │ │ ├── group_points.cpp
│ │ │ │ ├── group_points_gpu.cu
│ │ │ │ ├── interpolate.cpp
│ │ │ │ ├── interpolate_gpu.cu
│ │ │ │ ├── sampling.cpp
│ │ │ │ └── sampling_gpu.cu
│ │ │ ├── _version.py
│ │ │ ├── pointnet2_modules.py
│ │ │ └── pointnet2_utils.py
│ │ └── setup.py
│ ├── test.sh
│ ├── train.sh
│ └── util.py
├── README.md
├── all_utils.py
├── aug_utils.py
├── configs/
│ ├── bn/
│ │ ├── dgcnn.yaml
│ │ ├── pct.yaml
│ │ ├── pointnet.yaml
│ │ ├── pointnet2.yaml
│ │ ├── rscnn.yaml
│ │ └── simpleview.yaml
│ ├── corruption/
│ │ ├── curvenet.yaml
│ │ ├── dgcnn.yaml
│ │ ├── gdanet.yaml
│ │ ├── pct.yaml
│ │ ├── pointMLP.yaml
│ │ ├── pointMLP2.yaml
│ │ ├── pointnet.yaml
│ │ ├── pointnet2.yaml
│ │ ├── rscnn.yaml
│ │ └── simpleview.yaml
│ ├── cutmix/
│ │ ├── dgcnn_k.yaml
│ │ ├── dgcnn_r.yaml
│ │ ├── pct_k.yaml
│ │ ├── pct_r.yaml
│ │ ├── pointnet2_k.yaml
│ │ ├── pointnet2_r.yaml
│ │ ├── pointnet_k.yaml
│ │ ├── pointnet_r.yaml
│ │ ├── rscnn_k.yaml
│ │ ├── rscnn_r.yaml
│ │ ├── simpleview_k.yaml
│ │ └── simpleview_r.yaml
│ ├── dgcnn_curvenet_run_1.yaml
│ ├── dgcnn_dgcnn_0.25_run_1.yaml
│ ├── dgcnn_dgcnn_0.25_valid_run_1.yaml
│ ├── dgcnn_dgcnn_0.5_run_1.yaml
│ ├── dgcnn_dgcnn_0.5_valid_run_1.yaml
│ ├── dgcnn_dgcnn_ce_run_1.yaml
│ ├── dgcnn_dgcnn_ce_valid_run_1.yaml
│ ├── dgcnn_dgcnn_run_1.yaml
│ ├── dgcnn_dgcnn_valid_run_1.yaml
│ ├── dgcnn_gdanet_run_1.yaml
│ ├── dgcnn_pct_run_1.yaml
│ ├── dgcnn_pointMLP2_run_1.yaml
│ ├── dgcnn_pointMLP_run_1.yaml
│ ├── dgcnn_pointnet2_0.25_run_1.yaml
│ ├── dgcnn_pointnet2_0.25_valid_run_1.yaml
│ ├── dgcnn_pointnet2_0.5_run_1.yaml
│ ├── dgcnn_pointnet2_0.5_valid_run_1.yaml
│ ├── dgcnn_pointnet2_ce_run_1.yaml
│ ├── dgcnn_pointnet2_ce_valid_run_1.yaml
│ ├── dgcnn_pointnet2_run_1.yaml
│ ├── dgcnn_pointnet2_valid_run_1.yaml
│ ├── dgcnn_pointnet_0.25_run_1.yaml
│ ├── dgcnn_pointnet_0.25_valid_run_1.yaml
│ ├── dgcnn_pointnet_0.5_run_1.yaml
│ ├── dgcnn_pointnet_0.5_valid_run_1.yaml
│ ├── dgcnn_pointnet_ce_run_1.yaml
│ ├── dgcnn_pointnet_ce_valid_run_1.yaml
│ ├── dgcnn_pointnet_run_1.yaml
│ ├── dgcnn_pointnet_valid_run_1.yaml
│ ├── dgcnn_rscnn_0.25_run_1.yaml
│ ├── dgcnn_rscnn_0.25_valid_run_1.yaml
│ ├── dgcnn_rscnn_0.5_run_1.yaml
│ ├── dgcnn_rscnn_0.5_valid_run_1.yaml
│ ├── dgcnn_rscnn_ce_run_1.yaml
│ ├── dgcnn_rscnn_ce_valid_run_1.yaml
│ ├── dgcnn_rscnn_run_1.yaml
│ ├── dgcnn_rscnn_valid_run_1.yaml
│ ├── dgcnn_simpleview_0.25_run_1.yaml
│ ├── dgcnn_simpleview_0.25_valid_run_1.yaml
│ ├── dgcnn_simpleview_0.5_run_1.yaml
│ ├── dgcnn_simpleview_0.5_valid_run_1.yaml
│ ├── dgcnn_simpleview_ce_run_1.yaml
│ ├── dgcnn_simpleview_ce_valid_run_1.yaml
│ ├── dgcnn_simpleview_run_1.yaml
│ ├── dgcnn_simpleview_valid_run_1.yaml
│ ├── mixup/
│ │ ├── dgcnn.yaml
│ │ ├── pct.yaml
│ │ ├── pointnet.yaml
│ │ ├── pointnet2.yaml
│ │ ├── rscnn.yaml
│ │ └── simpleview.yaml
│ ├── pgd/
│ │ ├── dgcnn.yaml
│ │ ├── pct.yaml
│ │ └── pointnet.yaml
│ ├── pointnet2_dgcnn_run_1.yaml
│ ├── pointnet2_dgcnn_valid_run_1.yaml
│ ├── pointnet2_pointnet2_run_1.yaml
│ ├── pointnet2_pointnet2_valid_run_1.yaml
│ ├── pointnet2_pointnet_run_1.yaml
│ ├── pointnet2_pointnet_valid_run_1.yaml
│ ├── pointnet2_rscnn_run_1.yaml
│ ├── pointnet2_rscnn_valid_run_1.yaml
│ ├── pointnet2_simpleview_run_1.yaml
│ ├── pointnet2_simpleview_valid_run_1.yaml
│ ├── rscnn_dgcnn_run_1.yaml
│ ├── rscnn_pointnet2_run_1.yaml
│ ├── rscnn_pointnet_run_1.yaml
│ ├── rscnn_rscnn_run_1.yaml
│ ├── rscnn_simpleview_run_1.yaml
│ ├── rsmix/
│ │ ├── dgcnn.yaml
│ │ ├── pct.yaml
│ │ ├── pointnet.yaml
│ │ ├── pointnet2.yaml
│ │ ├── rscnn.yaml
│ │ └── simpleview.yaml
│ ├── tent/
│ │ ├── dgcnn.yaml
│ │ ├── pct.yaml
│ │ ├── pointnet.yaml
│ │ ├── pointnet2.yaml
│ │ ├── rscnn.yaml
│ │ └── simpleview.yaml
│ └── tent_cutmix/
│ ├── dgcnn.yaml
│ ├── pct.yaml
│ ├── pointnet.yaml
│ ├── pointnet2.yaml
│ ├── rscnn.yaml
│ └── simpleview.yaml
├── configs.py
├── data/
│ ├── convert.py
│ ├── create_modelnet40_small.py
│ ├── create_modelnet40_valid.py
│ ├── distortion.py
│ ├── generate_c.py
│ ├── occlusion.py
│ ├── process.py
│ └── util.py
├── dataloader.py
├── dgcnn/
│ ├── .gitignore
│ ├── README.md
│ ├── pytorch/
│ │ ├── README.md
│ │ ├── data.py
│ │ ├── main.py
│ │ ├── model.py
│ │ └── util.py
│ └── tensorflow/
│ ├── README.md
│ ├── evaluate.py
│ ├── models/
│ │ ├── dgcnn.py
│ │ └── transform_nets.py
│ ├── part_seg/
│ │ ├── README.md
│ │ ├── download_data.sh
│ │ ├── part_seg_model.py
│ │ ├── test.py
│ │ ├── testing_ply_file_list.txt
│ │ └── train_multi_gpu.py
│ ├── provider.py
│ ├── sem_seg/
│ │ ├── README.md
│ │ ├── batch_inference.py
│ │ ├── collect_indoor3d_data.py
│ │ ├── download_data.sh
│ │ ├── eval_iou_accuracy.py
│ │ ├── indoor3d_util.py
│ │ ├── meta/
│ │ │ ├── all_data_label.txt
│ │ │ ├── anno_paths.txt
│ │ │ ├── area1_data_label.txt
│ │ │ ├── area2_data_label.txt
│ │ │ ├── area3_data_label.txt
│ │ │ ├── area4_data_label.txt
│ │ │ ├── area5_data_label.txt
│ │ │ ├── area6_data_label.txt
│ │ │ └── class_names.txt
│ │ ├── model.py
│ │ ├── test_job.sh
│ │ ├── train.py
│ │ └── train_job.sh
│ ├── train.py
│ └── utils/
│ ├── data_prep_util.py
│ ├── eulerangles.py
│ ├── pc_util.py
│ ├── plyfile.py
│ └── tf_util.py
├── download.sh
├── emd/
│ ├── README.md
│ ├── emd.cpp
│ ├── emd_cuda.cu
│ ├── emd_module.py
│ └── setup.py
├── eval_cor.sh
├── eval_og.sh
├── eval_tent_cutmix.sh
├── gdrivedl.py
├── main.py
├── models/
│ ├── __init__.py
│ ├── curvenet.py
│ ├── dgcnn.py
│ ├── gdanet.py
│ ├── model_utils.py
│ ├── mv.py
│ ├── mv_utils.py
│ ├── pct.py
│ ├── pointmlp.py
│ ├── pointmlp2.py
│ ├── pointnet.py
│ ├── pointnet2.py
│ ├── resnet.py
│ └── rscnn.py
├── pc_utils.py
├── pointMLP/
│ ├── .gitignore
│ ├── LICENSE
│ ├── README.md
│ └── classification_ModelNet40/
│ ├── data.py
│ ├── helper.py
│ ├── main.py
│ ├── models/
│ │ ├── __init__.py
│ │ └── pointmlp.py
│ ├── utils/
│ │ ├── __init__.py
│ │ ├── logger.py
│ │ ├── misc.py
│ │ └── progress/
│ │ ├── .gitignore
│ │ ├── LICENSE
│ │ ├── MANIFEST.in
│ │ ├── README.rst
│ │ ├── progress/
│ │ │ ├── __init__.py
│ │ │ ├── bar.py
│ │ │ ├── counter.py
│ │ │ ├── helpers.py
│ │ │ └── spinner.py
│ │ ├── setup.py
│ │ └── test_progress.py
│ └── voting.py
├── pointnet2_pyt/
│ ├── .gitignore
│ ├── .pre-commit-config.yaml
│ ├── .travis.yml
│ ├── MANIFEST.in
│ ├── README.rst
│ ├── UNLICENSE
│ ├── __init__.py
│ ├── pointnet2/
│ │ ├── __init__.py
│ │ ├── _ext-src/
│ │ │ ├── include/
│ │ │ │ ├── ball_query.h
│ │ │ │ ├── cuda_utils.h
│ │ │ │ ├── group_points.h
│ │ │ │ ├── interpolate.h
│ │ │ │ ├── sampling.h
│ │ │ │ └── utils.h
│ │ │ └── src/
│ │ │ ├── ball_query.cpp
│ │ │ ├── ball_query_gpu.cu
│ │ │ ├── bindings.cpp
│ │ │ ├── group_points.cpp
│ │ │ ├── group_points_gpu.cu
│ │ │ ├── interpolate.cpp
│ │ │ ├── interpolate_gpu.cu
│ │ │ ├── sampling.cpp
│ │ │ └── sampling_gpu.cu
│ │ ├── data/
│ │ │ ├── .gitignore
│ │ │ ├── Indoor3DSemSegLoader.py
│ │ │ ├── ModelNet40Loader.py
│ │ │ ├── __init__.py
│ │ │ └── data_utils.py
│ │ ├── models/
│ │ │ ├── __init__.py
│ │ │ ├── pointnet2_msg_cls.py
│ │ │ ├── pointnet2_msg_sem.py
│ │ │ ├── pointnet2_ssg_cls.py
│ │ │ └── pointnet2_ssg_sem.py
│ │ ├── train/
│ │ │ ├── __init__.py
│ │ │ ├── train_cls.py
│ │ │ └── train_sem_seg.py
│ │ └── utils/
│ │ ├── .gitignore
│ │ ├── __init__.py
│ │ ├── linalg_utils.py
│ │ ├── pointnet2_modules.py
│ │ └── pointnet2_utils.py
│ ├── setup.py
│ ├── tests/
│ │ ├── conftest.py
│ │ ├── test_cls_msg.py
│ │ ├── test_cls_ssg.py
│ │ ├── test_semseg_msg.py
│ │ └── test_semseg_ssg.py
│ └── tox.ini
├── pointnet2_tf/
│ ├── LICENSE
│ ├── README.md
│ ├── data/
│ │ └── README.md
│ ├── evaluate.py
│ ├── modelnet_dataset.py
│ ├── modelnet_h5_dataset.py
│ ├── models/
│ │ ├── pointnet2_cls_msg.py
│ │ ├── pointnet2_cls_ssg.py
│ │ ├── pointnet2_part_seg.py
│ │ ├── pointnet2_part_seg_msg_one_hot.py
│ │ ├── pointnet2_sem_seg.py
│ │ └── pointnet_cls_basic.py
│ ├── part_seg/
│ │ ├── command.sh
│ │ ├── command_one_hot.sh
│ │ ├── evaluate.py
│ │ ├── part_dataset.py
│ │ ├── part_dataset_all_normal.py
│ │ ├── test.py
│ │ ├── train.py
│ │ └── train_one_hot.py
│ ├── scannet/
│ │ ├── README.md
│ │ ├── pc_util.py
│ │ ├── preprocessing/
│ │ │ ├── collect_scannet_scenes.py
│ │ │ ├── demo.py
│ │ │ ├── fetch_label_names.py
│ │ │ ├── scannet-labels.combined.tsv
│ │ │ └── scannet_util.py
│ │ ├── scannet_dataset.py
│ │ ├── scene_util.py
│ │ └── train.py
│ ├── tf_ops/
│ │ ├── 3d_interpolation/
│ │ │ ├── interpolate.cpp
│ │ │ ├── tf_interpolate.cpp
│ │ │ ├── tf_interpolate.py
│ │ │ ├── tf_interpolate_compile.sh
│ │ │ ├── tf_interpolate_op_test.py
│ │ │ └── visu_interpolation.py
│ │ ├── grouping/
│ │ │ ├── .gitignore
│ │ │ ├── test/
│ │ │ │ ├── compile.sh
│ │ │ │ ├── query_ball_point.cpp
│ │ │ │ ├── query_ball_point.cu
│ │ │ │ ├── query_ball_point_block.cu
│ │ │ │ ├── query_ball_point_grid.cu
│ │ │ │ ├── selection_sort.cpp
│ │ │ │ ├── selection_sort.cu
│ │ │ │ └── selection_sort_const.cu
│ │ │ ├── tf_grouping.cpp
│ │ │ ├── tf_grouping.py
│ │ │ ├── tf_grouping_compile.sh
│ │ │ ├── tf_grouping_g.cu
│ │ │ └── tf_grouping_op_test.py
│ │ └── sampling/
│ │ ├── .gitignore
│ │ ├── tf_sampling.cpp
│ │ ├── tf_sampling.py
│ │ ├── tf_sampling_compile.sh
│ │ └── tf_sampling_g.cu
│ ├── train.py
│ ├── train_multi_gpu.py
│ └── utils/
│ ├── README.md
│ ├── compile_render_balls_so.sh
│ ├── pc_util.py
│ ├── pointnet_util.py
│ ├── provider.py
│ ├── render_balls_so.cpp
│ ├── show3d_balls.py
│ └── tf_util.py
├── pointnet_pyt/
│ ├── .gitignore
│ ├── LICENSE
│ ├── README.md
│ ├── misc/
│ │ ├── modelnet_id.txt
│ │ └── num_seg_classes.txt
│ ├── pointnet/
│ │ ├── __init__.py
│ │ ├── dataset.py
│ │ └── model.py
│ ├── scripts/
│ │ ├── build.sh
│ │ └── download.sh
│ ├── setup.py
│ └── utils/
│ ├── render_balls_so.cpp
│ ├── show3d_balls.py
│ ├── show_cls.py
│ ├── show_seg.py
│ ├── train_classification.py
│ └── train_segmentation.py
├── requirements.txt
├── rs_cnn/
│ ├── .gitignore
│ ├── CMakeLists.txt
│ ├── LICENSE
│ ├── README.md
│ ├── cfgs/
│ │ ├── config_msn_partseg.yaml
│ │ └── config_ssn_cls.yaml
│ ├── data/
│ │ ├── ModelNet40Loader.py
│ │ ├── ShapeNetPartLoader.py
│ │ ├── __init__.py
│ │ └── data_utils.py
│ ├── docs/
│ │ ├── _config.yml
│ │ └── index.md
│ ├── models/
│ │ ├── __init__.py
│ │ ├── rscnn_msn_seg.py
│ │ └── rscnn_ssn_cls.py
│ ├── train_cls.py
│ ├── train_cls.sh
│ ├── train_partseg.py
│ ├── train_partseg.sh
│ ├── utils/
│ │ ├── __init__.py
│ │ ├── _ext/
│ │ │ ├── __init__.py
│ │ │ └── pointnet2/
│ │ │ └── __init__.py
│ │ ├── build_ffi.py
│ │ ├── cinclude/
│ │ │ ├── ball_query_gpu.h
│ │ │ ├── ball_query_wrapper.h
│ │ │ ├── cuda_utils.h
│ │ │ ├── group_points_gpu.h
│ │ │ ├── group_points_wrapper.h
│ │ │ ├── interpolate_gpu.h
│ │ │ ├── interpolate_wrapper.h
│ │ │ ├── sampling_gpu.h
│ │ │ └── sampling_wrapper.h
│ │ ├── csrc/
│ │ │ ├── ball_query.c
│ │ │ ├── ball_query_gpu.cu
│ │ │ ├── group_points.c
│ │ │ ├── group_points_gpu.cu
│ │ │ ├── interpolate.c
│ │ │ ├── interpolate_gpu.cu
│ │ │ ├── sampling.c
│ │ │ └── sampling_gpu.cu
│ │ ├── linalg_utils.py
│ │ ├── pointnet2_modules.py
│ │ ├── pointnet2_modules_updated.py
│ │ ├── pointnet2_utils.py
│ │ └── pytorch_utils/
│ │ ├── __init__.py
│ │ └── pytorch_utils.py
│ ├── voting_evaluate_cls.py
│ └── voting_evaluate_partseg.py
├── setup.sh
├── third_party/
│ ├── bn_helper.py
│ └── tent_helper.py
└── visualize/
├── README.md
├── config.py
├── confusion_matrix.py
├── examples.py
├── main_results.py
└── pointflow_fig_colorful.py
================================================
FILE CONTENTS
================================================
================================================
FILE: .gitignore
================================================
*__pycache__/
data/modelnet40_ply_hdf5_2048
data/ModelNet40
data/modelnet40_c
runs/
pretrained/
cor_exp/
*.out
/output
# Created by https://www.toptal.com/developers/gitignore/api/python,cuda,zsh,c++
# Edit at https://www.toptal.com/developers/gitignore?templates=python,cuda,zsh,c++
### C++ ###
# Prerequisites
*.d
# Compiled Object files
*.slo
*.lo
*.o
*.obj
# Precompiled Headers
*.gch
*.pch
# Compiled Dynamic libraries
*.so
*.dylib
*.dll
# Fortran module files
*.mod
*.smod
# Compiled Static libraries
*.lai
*.la
*.a
*.lib
# Executables
*.exe
*.out
*.app
### CUDA ###
*.i
*.ii
*.gpu
*.ptx
*.cubin
*.fatbin
### Python ###
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
### Zsh ###
# Zsh compiled script + zrecompile backup
*.zwc
*.zwc.old
# Zsh completion-optimization dumpfile
*zcompdump*
# Zsh zcalc history
.zcalc_history
# A popular plugin manager's files
._zinit
.zinit_lstupd
# zdharma/zshelldoc tool's files
zsdoc/data
# robbyrussell/oh-my-zsh/plugins/per-directory-history plugin's files
# (when set-up to store the history in the local directory)
.directory_history
# MichaelAquilina/zsh-autoswitch-virtualenv plugin's files
# (for Zsh plugins using Python)
# Zunit tests' output
/tests/_output/*
!/tests/_output/.gitkeep
# End of https://www.toptal.com/developers/gitignore/api/python,cuda,zsh,c++
{"mode":"full","isActive":false}
================================================
FILE: .gitmodules
================================================
[submodule "PyGeM"]
path = PyGeM
url = https://github.com/mathLab/PyGeM.git
[submodule "visualize/mitsuba2"]
path = visualize/mitsuba2
url = https://github.com/mitsuba-renderer/mitsuba2
================================================
FILE: CurveNet/README.md
================================================
# CurveNet
Official implementation of "Walk in the Cloud: Learning Curves for Point Clouds Shape Analysis", ICCV 2021
[](https://paperswithcode.com/sota/3d-point-cloud-classification-on-modelnet40?p=walk-in-the-cloud-learning-curves-for-point)
[](https://paperswithcode.com/sota/3d-part-segmentation-on-shapenet-part?p=walk-in-the-cloud-learning-curves-for-point)
Paper: https://arxiv.org/abs/2105.01288

## Requirements
- Python>=3.7
- PyTorch>=1.2
- Packages: glob, h5py, sklearn
## Contents
- [Point Cloud Classification](#point-cloud-classification)
- [Point Cloud Part Segmentation](#point-cloud-part-segmentation)
- [Point Cloud Normal Estimation](#point-cloud-normal-estimation)
**NOTE:** Please change your current directory to ```core/``` first before excuting the following commands.
## Point Cloud Classification
### Data
The ModelNet40 dataset is primarily used for the classification experiments. At your first run, the program will automatically download the data if it is not in ```data/```. Or, you can manually download the [offical data](https://shapenet.cs.stanford.edu/media/modelnet40_ply_hdf5_2048.zip) and unzip to ```data/```.
Alternatively, you can place your downloaded data anywhere you like, and link the path to ```DATA_DIR``` in ```core/data.py```. Otherwise, the download will still be automatically triggered.
### Train
Train with our default settings (same as in the paper):
```
python3 main_cls.py --exp_name=curvenet_cls_1
```
Train with customized settings with the flags: ```--lr```, ```--scheduler```, ```--batch_size```.
Alternatively, you can directly modify ```core/start_cls.sh``` and simply run:
```
./start_cls.sh
```
**NOTE:** Our reported model achieves **93.8%/94.2%** accuracy (see sections below). However, due to randomness, the best result might require repeated training processes. Hence, we also provide another benchmark result here (where we repeated 5 runs with different random seeds, and report their average), which is **93.65%** accuracy.
<!-- **NOTE:** Due to randomness, the results could be slightly different than the one reported in our paper. We repeated 5 runs with different random seeds, and got an average of **93.65%** classification accuracy. -->
### Evaluation
Evaluate without voting:
```
python3 main_cls.py --exp_name=curvenet_cls_1 --eval=True --model_path=PATH_TO_YOUR_MODEL
```
Alternatively, you can directly modify ```core/test_cls.sh``` and simply run:
```
./test_cls.sh
```
For voting, we used the ```voting_evaluate_cls.py```script provided in [RSCNN](https://github.com/Yochengliu/Relation-Shape-CNN). Please refer to their license for usage.
### Evaluation with our pretrained model:
Please download our pretrained model ```cls/``` at [google drive](https://drive.google.com/drive/folders/1kX-zIipyzB0iMaopcijzdTRuHeTzfTSz?usp=sharing).
And then run:
```
python3 main_cls.py --exp_name=curvenet_cls_pretrained --eval --model_path=PATH_TO_PRETRAINED/cls/models/model.t7
```
## Point Cloud Part Segmentation
### Data
The ShapeNet Part dataset is primarily used for the part segmentation experiments. At your first run, the program will automatically download the data if it is not in ```data/```. Or, you can manually download the [offical data](https://shapenet.cs.stanford.edu/media/shapenet_part_seg_hdf5_data.zip) and unzip to ```data/```.
Alternatively, you can place your downloaded data anywhere you like, and link the path to ```DATA_DIR``` in ```core/data.py```. Otherwise, the download will still be automatically triggered.
### Train
Train with our default settings (same as in the paper):
```
python3 main_partseg.py --exp_name=curvenet_seg_1
```
Train with customized settings with the flags: ```--lr```, ```--scheduler```, ```--batch_size```.
Alternatively, you can directly modify ```core/start_part.sh``` and simply run:
```
./start_part.sh
```
**NOTE:** Our reported model achieves **86.6%/86.8%** mIoU (see sections below). However, due to randomness, the best result might require repeated training processes. Hence, we also provide another benchmark result here (where we repeated 5 runs with different random seeds, and report their average), which is **86.46** mIoU.
<!-- **NOTE:** Due to randomness, the results could be slightly different than the one reported in our paper. We repeated 5 runs with different random seeds, and got an average of **86.46** mIoU. -->
### Evaluation
Evaluate without voting:
```
python3 main_partseg.py --exp_name=curvenet_seg_1 --eval=True --model_path=PATH_TO_YOUR_MODEL
```
Alternatively, you can directly modify ```core/test_cls.sh``` and simply run:
```
./test_cls.sh
```
For voting, we used the ```voting_evaluate_partseg.py```script provided in [RSCNN](https://github.com/Yochengliu/Relation-Shape-CNN). Please refer to their license for usage.
### Evaluation with our pretrained model:
Please download our pretrained model ```partseg/``` at [google drive](https://drive.google.com/drive/folders/1kX-zIipyzB0iMaopcijzdTRuHeTzfTSz?usp=sharing).
And then run:
```
python3 main_partseg.py --exp_name=curvenet_seg_pretrained --eval=True --model_path=PATH_TO_PRETRAINED/partseg/models/model.t7
```
## Point Cloud Normal Estimation
### Data
The ModelNet40 dataset is used for the normal estimation experiments. We have preprocessed the raw ModelNet40 dataset into ```.h5``` files. Each point cloud instance contains 2048 randomly sampled points and point-to-point normal ground truths.
Please download our processed data [here](https://drive.google.com/file/d/1j6lB3ZOF0_x_l9bqdchAxIYBi7Devie8/view?usp=sharing) and place it to ```data/```, or you need to specify the data root path in ```core/data.py```.
### Train
Train with our default settings (same as in the paper):
```
python3 main_normal.py --exp_name=curvenet_normal_1
```
Train with customized settings with the flags: ```--multiplier```, ```--lr```, ```--scheduler```, ```--batch_size```.
Alternatively, you can directly modify ```core/start_normal.sh``` and simply run:
```
./start_normal.sh
```
### Evaluation
Evaluate without voting:
```
python3 main_normal.py --exp_name=curvenet_normal_1 --eval=True --model_path=PATH_TO_YOUR_MODEL
```
Alternatively, you can directly modify ```core/test_normal.sh``` and simply run:
```
./test_normal.sh
```
### Evaluation with our pretrained model:
Please download our pretrained model ```normal/``` at [google drive](https://drive.google.com/drive/folders/1kX-zIipyzB0iMaopcijzdTRuHeTzfTSz?usp=sharing).
And then run:
```
python3 main_normal.py --exp_name=curvenet_normal_pretrained --eval=True --model_path=PATH_TO_PRETRAINED/normal/models/model.t7
```
## Citation
If you find this repo useful in your work or research, please cite:
```
@InProceedings{Xiang_2021_ICCV,
author = {Xiang, Tiange and Zhang, Chaoyi and Song, Yang and Yu, Jianhui and Cai, Weidong},
title = {Walk in the Cloud: Learning Curves for Point Clouds Shape Analysis},
booktitle = {Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV)},
month = {October},
year = {2021},
pages = {915-924}
}
```
## Acknowledgement
Our code borrows a lot from:
- [DGCNN](https://github.com/WangYueFt/dgcnn)
- [DGCNN.pytorch](https://github.com/AnTao97/dgcnn.pytorch)
- [CloserLook3D](https://github.com/zeliu98/CloserLook3D)
================================================
FILE: CurveNet/core/data.py
================================================
"""
@Author: Yue Wang
@Contact: yuewangx@mit.edu
@File: data.py
@Time: 2018/10/13 6:21 PM
Modified by
@Author: Tiange Xiang
@Contact: txia7609@uni.sydney.edu.au
@Time: 2021/1/21 3:10 PM
"""
import os
import sys
import glob
import h5py
import numpy as np
import torch
from torch.utils.data import Dataset
# change this to your data root
DATA_DIR = '../data/'
def download_modelnet40():
if not os.path.exists(DATA_DIR):
os.mkdir(DATA_DIR)
if not os.path.exists(os.path.join(DATA_DIR, 'modelnet40_ply_hdf5_2048')):
os.mkdir(os.path.join(DATA_DIR, 'modelnet40_ply_hdf5_2048'))
www = 'https://shapenet.cs.stanford.edu/media/modelnet40_ply_hdf5_2048.zip'
zipfile = os.path.basename(www)
os.system('wget %s --no-check-certificate; unzip %s' % (www, zipfile))
os.system('mv %s %s' % (zipfile[:-4], DATA_DIR))
os.system('rm %s' % (zipfile))
def download_shapenetpart():
if not os.path.exists(DATA_DIR):
os.mkdir(DATA_DIR)
if not os.path.exists(os.path.join(DATA_DIR, 'shapenet_part_seg_hdf5_data')):
os.mkdir(os.path.join(DATA_DIR, 'shapenet_part_seg_hdf5_data'))
www = 'https://shapenet.cs.stanford.edu/media/shapenet_part_seg_hdf5_data.zip'
zipfile = os.path.basename(www)
os.system('wget %s --no-check-certificate; unzip %s' % (www, zipfile))
os.system('mv %s %s' % (zipfile[:-4], os.path.join(DATA_DIR, 'shapenet_part_seg_hdf5_data')))
os.system('rm %s' % (zipfile))
def load_data_normal(partition):
f = h5py.File(os.path.join(DATA_DIR, 'modelnet40_normal', 'normal_%s.h5'%partition), 'r+')
data = f['xyz'][:].astype('float32')
label = f['normal'][:].astype('float32')
f.close()
return data, label
def load_data_cls(partition):
download_modelnet40()
all_data = []
all_label = []
for h5_name in glob.glob(os.path.join(DATA_DIR, 'modelnet40*hdf5_2048', '*%s*.h5'%partition)):
f = h5py.File(h5_name, 'r+')
data = f['data'][:].astype('float32')
label = f['label'][:].astype('int64')
f.close()
all_data.append(data)
all_label.append(label)
all_data = np.concatenate(all_data, axis=0)
all_label = np.concatenate(all_label, axis=0)
return all_data, all_label
def load_data_partseg(partition):
download_shapenetpart()
all_data = []
all_label = []
all_seg = []
if partition == 'trainval':
file = glob.glob(os.path.join(DATA_DIR, 'shapenet_part_seg_hdf5_data', 'hdf5_data', '*train*.h5')) \
+ glob.glob(os.path.join(DATA_DIR, 'shapenet_part_seg_hdf5_data', 'hdf5_data', '*val*.h5'))
else:
file = glob.glob(os.path.join(DATA_DIR, 'shapenet_part_seg_hdf5_data', 'hdf5_data', '*%s*.h5'%partition))
for h5_name in file:
f = h5py.File(h5_name, 'r+')
data = f['data'][:].astype('float32')
label = f['label'][:].astype('int64')
seg = f['pid'][:].astype('int64')
f.close()
all_data.append(data)
all_label.append(label)
all_seg.append(seg)
all_data = np.concatenate(all_data, axis=0)
all_label = np.concatenate(all_label, axis=0)
all_seg = np.concatenate(all_seg, axis=0)
return all_data, all_label, all_seg
def translate_pointcloud(pointcloud):
xyz1 = np.random.uniform(low=2./3., high=3./2., size=[3])
xyz2 = np.random.uniform(low=-0.2, high=0.2, size=[3])
translated_pointcloud = np.add(np.multiply(pointcloud, xyz1), xyz2).astype('float32')
return translated_pointcloud
def jitter_pointcloud(pointcloud, sigma=0.01, clip=0.02):
N, C = pointcloud.shape
pointcloud += np.clip(sigma * np.random.randn(N, C), -1*clip, clip)
return pointcloud
def rotate_pointcloud(pointcloud):
theta = np.pi*2 * np.random.uniform()
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta)],[np.sin(theta), np.cos(theta)]])
pointcloud[:,[0,2]] = pointcloud[:,[0,2]].dot(rotation_matrix) # random rotation (x,z)
return pointcloud
class ModelNet40(Dataset):
def __init__(self, num_points, partition='train'):
self.data, self.label = load_data_cls(partition)
self.num_points = num_points
self.partition = partition
def __getitem__(self, item):
pointcloud = self.data[item][:self.num_points]
label = self.label[item]
if self.partition == 'train':
pointcloud = translate_pointcloud(pointcloud)
#pointcloud = rotate_pointcloud(pointcloud)
np.random.shuffle(pointcloud)
return pointcloud, label
def __len__(self):
return self.data.shape[0]
class ModelNetNormal(Dataset):
def __init__(self, num_points, partition='train'):
self.data, self.label = load_data_normal(partition)
self.num_points = num_points
self.partition = partition
def __getitem__(self, item):
pointcloud = self.data[item][:self.num_points]
label = self.label[item][:self.num_points]
if self.partition == 'train':
#pointcloud = translate_pointcloud(pointcloud)
idx = np.arange(0, pointcloud.shape[0], dtype=np.int64)
np.random.shuffle(idx)
pointcloud = self.data[item][idx]
label = self.label[item][idx]
return pointcloud, label
def __len__(self):
return self.data.shape[0]
class ShapeNetPart(Dataset):
def __init__(self, num_points=2048, partition='train', class_choice=None):
self.data, self.label, self.seg = load_data_partseg(partition)
self.cat2id = {'airplane': 0, 'bag': 1, 'cap': 2, 'car': 3, 'chair': 4,
'earphone': 5, 'guitar': 6, 'knife': 7, 'lamp': 8, 'laptop': 9,
'motor': 10, 'mug': 11, 'pistol': 12, 'rocket': 13, 'skateboard': 14, 'table': 15}
self.seg_num = [4, 2, 2, 4, 4, 3, 3, 2, 4, 2, 6, 2, 3, 3, 3, 3]
self.index_start = [0, 4, 6, 8, 12, 16, 19, 22, 24, 28, 30, 36, 38, 41, 44, 47]
self.num_points = num_points
self.partition = partition
self.class_choice = class_choice
if self.class_choice != None:
id_choice = self.cat2id[self.class_choice]
indices = (self.label == id_choice).squeeze()
self.data = self.data[indices]
self.label = self.label[indices]
self.seg = self.seg[indices]
self.seg_num_all = self.seg_num[id_choice]
self.seg_start_index = self.index_start[id_choice]
else:
self.seg_num_all = 50
self.seg_start_index = 0
def __getitem__(self, item):
pointcloud = self.data[item][:self.num_points]
label = self.label[item]
seg = self.seg[item][:self.num_points]
if self.partition == 'trainval':
pointcloud = translate_pointcloud(pointcloud)
indices = list(range(pointcloud.shape[0]))
np.random.shuffle(indices)
pointcloud = pointcloud[indices]
seg = seg[indices]
return pointcloud, label, seg
def __len__(self):
return self.data.shape[0]
================================================
FILE: CurveNet/core/main_cls.py
================================================
"""
@Author: Yue Wang
@Contact: yuewangx@mit.edu
@File: main_cls.py
@Time: 2018/10/13 10:39 PM
Modified by
@Author: Tiange Xiang
@Contact: txia7609@uni.sydney.edu.au
@Time: 2021/01/21 3:10 PM
"""
from __future__ import print_function
import os
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim.lr_scheduler import CosineAnnealingLR, MultiStepLR
from data import ModelNet40
from models.curvenet_cls import CurveNet
import numpy as np
from torch.utils.data import DataLoader
from util import cal_loss, IOStream
import sklearn.metrics as metrics
def _init_():
# fix random seed
torch.manual_seed(seed)
np.random.seed(seed)
torch.cuda.manual_seed_all(seed)
torch.cuda.manual_seed(seed)
torch.set_printoptions(10)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
os.environ['PYTHONHASHSEED'] = str(seed)
# prepare file structures
if not os.path.exists('../checkpoints'):
os.makedirs('../checkpoints')
if not os.path.exists('../checkpoints/'+args.exp_name):
os.makedirs('../checkpoints/'+args.exp_name)
if not os.path.exists('../checkpoints/'+args.exp_name+'/'+'models'):
os.makedirs('../checkpoints/'+args.exp_name+'/'+'models')
os.system('cp main_cls.py ../checkpoints/'+args.exp_name+'/main_cls.py.backup')
os.system('cp models/curvenet_cls.py ../checkpoints/'+args.exp_name+'/curvenet_cls.py.backup')
def train(args, io):
train_loader = DataLoader(ModelNet40(partition='train', num_points=args.num_points), num_workers=8,
batch_size=args.batch_size, shuffle=True, drop_last=True)
test_loader = DataLoader(ModelNet40(partition='test', num_points=args.num_points), num_workers=8,
batch_size=args.test_batch_size, shuffle=False, drop_last=False)
device = torch.device("cuda" if args.cuda else "cpu")
io.cprint("Let's use" + str(torch.cuda.device_count()) + "GPUs!")
# create model
model = CurveNet().to(device)
model = nn.DataParallel(model)
if args.use_sgd:
io.cprint("Use SGD")
opt = optim.SGD(model.parameters(), lr=args.lr*100, momentum=args.momentum, weight_decay=1e-4)
else:
io.cprint("Use Adam")
opt = optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-4)
if args.scheduler == 'cos':
scheduler = CosineAnnealingLR(opt, args.epochs, eta_min=1e-3)
elif args.scheduler == 'step':
scheduler = MultiStepLR(opt, [120, 160], gamma=0.1)
criterion = cal_loss
best_test_acc = 0
for epoch in range(args.epochs):
####################
# Train
####################
train_loss = 0.0
count = 0.0
model.train()
train_pred = []
train_true = []
for data, label in train_loader:
data, label = data.to(device), label.to(device).squeeze()
data = data.permute(0, 2, 1)
batch_size = data.size()[0]
opt.zero_grad()
logits = model(data)
loss = criterion(logits, label)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
opt.step()
preds = logits.max(dim=1)[1]
count += batch_size
train_loss += loss.item() * batch_size
train_true.append(label.cpu().numpy())
train_pred.append(preds.detach().cpu().numpy())
if args.scheduler == 'cos':
scheduler.step()
elif args.scheduler == 'step':
if opt.param_groups[0]['lr'] > 1e-5:
scheduler.step()
if opt.param_groups[0]['lr'] < 1e-5:
for param_group in opt.param_groups:
param_group['lr'] = 1e-5
train_true = np.concatenate(train_true)
train_pred = np.concatenate(train_pred)
outstr = 'Train %d, loss: %.6f, train acc: %.6f' % (epoch, train_loss*1.0/count,
metrics.accuracy_score(
train_true, train_pred))
io.cprint(outstr)
####################
# Test
####################
test_loss = 0.0
count = 0.0
model.eval()
test_pred = []
test_true = []
for data, label in test_loader:
data, label = data.to(device), label.to(device).squeeze()
data = data.permute(0, 2, 1)
batch_size = data.size()[0]
logits = model(data)
loss = criterion(logits, label)
preds = logits.max(dim=1)[1]
count += batch_size
test_loss += loss.item() * batch_size
test_true.append(label.cpu().numpy())
test_pred.append(preds.detach().cpu().numpy())
test_true = np.concatenate(test_true)
test_pred = np.concatenate(test_pred)
test_acc = metrics.accuracy_score(test_true, test_pred)
outstr = 'Test %d, loss: %.6f, test acc: %.6f' % (epoch, test_loss*1.0/count, test_acc)
io.cprint(outstr)
if test_acc >= best_test_acc:
best_test_acc = test_acc
torch.save(model.state_dict(), '../checkpoints/%s/models/model.t7' % args.exp_name)
io.cprint('best: %.3f' % best_test_acc)
def test(args, io):
test_loader = DataLoader(ModelNet40(partition='test', num_points=args.num_points),
batch_size=args.test_batch_size, shuffle=False, drop_last=False)
device = torch.device("cuda" if args.cuda else "cpu")
#Try to load models
model = CurveNet().to(device)
model = nn.DataParallel(model)
model.load_state_dict(torch.load(args.model_path))
model = model.eval()
test_acc = 0.0
count = 0.0
test_true = []
test_pred = []
for data, label in test_loader:
data, label = data.to(device), label.to(device).squeeze()
data = data.permute(0, 2, 1)
batch_size = data.size()[0]
logits = model(data)
preds = logits.max(dim=1)[1]
test_true.append(label.cpu().numpy())
test_pred.append(preds.detach().cpu().numpy())
test_true = np.concatenate(test_true)
test_pred = np.concatenate(test_pred)
test_acc = metrics.accuracy_score(test_true, test_pred)
outstr = 'Test :: test acc: %.6f'%(test_acc)
io.cprint(outstr)
if __name__ == "__main__":
# Training settings
parser = argparse.ArgumentParser(description='Point Cloud Recognition')
parser.add_argument('--exp_name', type=str, default='exp', metavar='N',
help='Name of the experiment')
parser.add_argument('--dataset', type=str, default='modelnet40', metavar='N',
choices=['modelnet40'])
parser.add_argument('--batch_size', type=int, default=32, metavar='batch_size',
help='Size of batch)')
parser.add_argument('--test_batch_size', type=int, default=16, metavar='batch_size',
help='Size of batch)')
parser.add_argument('--epochs', type=int, default=200, metavar='N',
help='number of episode to train ')
parser.add_argument('--use_sgd', type=bool, default=True,
help='Use SGD')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='learning rate (default: 0.001, 0.1 if using sgd)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--scheduler', type=str, default='cos', metavar='N',
choices=['cos', 'step'],
help='Scheduler to use, [cos, step]')
parser.add_argument('--no_cuda', type=bool, default=False,
help='enables CUDA training')
parser.add_argument('--eval', type=bool, default=False,
help='evaluate the model')
parser.add_argument('--num_points', type=int, default=1024,
help='num of points to use')
parser.add_argument('--model_path', type=str, default='', metavar='N',
help='Pretrained model path')
args = parser.parse_args()
seed = np.random.randint(1, 10000)
_init_()
if args.eval:
io = IOStream('../checkpoints/' + args.exp_name + '/eval.log')
else:
io = IOStream('../checkpoints/' + args.exp_name + '/run.log')
io.cprint(str(args))
io.cprint('random seed is: ' + str(seed))
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.cuda:
io.cprint(
'Using GPU : ' + str(torch.cuda.current_device()) + ' from ' + str(torch.cuda.device_count()) + ' devices')
else:
io.cprint('Using CPU')
if not args.eval:
train(args, io)
else:
with torch.no_grad():
test(args, io)
================================================
FILE: CurveNet/core/main_normal.py
================================================
"""
@Author: Tiange Xiang
@Contact: txia7609@uni.sydney.edu.au
@File: main_normal.py
@Time: 2021/01/21 3:10 PM
"""
from __future__ import print_function
import os
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim.lr_scheduler import CosineAnnealingLR, MultiStepLR
from data import ModelNetNormal
from models.curvenet_normal import CurveNet
import numpy as np
from torch.utils.data import DataLoader
from util import IOStream
def _init_():
# fix random seed
torch.manual_seed(seed)
np.random.seed(seed)
torch.cuda.manual_seed_all(seed)
torch.cuda.manual_seed(seed)
torch.set_printoptions(10)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
os.environ['PYTHONHASHSEED'] = str(seed)
# prepare file structures
if not os.path.exists('../checkpoints'):
os.makedirs('../checkpoints')
if not os.path.exists('../checkpoints/'+args.exp_name):
os.makedirs('../checkpoints/'+args.exp_name)
if not os.path.exists('../checkpoints/'+args.exp_name+'/'+'models'):
os.makedirs('../checkpoints/'+args.exp_name+'/'+'models')
os.system('cp main_normal.py ../checkpoints/'+args.exp_name+'/main_normal.py.backup')
os.system('cp models/curvenet_normal.py ../checkpoints/'+args.exp_name+'/curvenet_normal.py.backup')
def train(args, io):
train_loader = DataLoader(ModelNetNormal(args.num_points, partition='train'),
num_workers=8, batch_size=args.batch_size, shuffle=True, drop_last=True)
test_loader = DataLoader(ModelNetNormal(args.num_points, partition='test'),
num_workers=8, batch_size=args.test_batch_size, shuffle=False, drop_last=False)
device = torch.device("cuda" if args.cuda else "cpu")
# create model
model = CurveNet(args.multiplier).to(device)
model = nn.DataParallel(model)
io.cprint("Let's use" + str(torch.cuda.device_count()) + "GPUs!")
if args.use_sgd:
io.cprint("Use SGD")
opt = optim.SGD(model.parameters(), lr=args.lr*100, momentum=args.momentum, weight_decay=1e-4)
else:
io.cprint("Use Adam")
opt = optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-4)
if args.scheduler == 'cos':
scheduler = CosineAnnealingLR(opt, args.epochs, eta_min=1e-3)
elif args.scheduler == 'step':
scheduler = MultiStepLR(opt, [140, 180], gamma=0.1)
criterion = torch.nn.CosineEmbeddingLoss()
best_test_loss = 99
for epoch in range(args.epochs):
####################
# Train
####################
train_loss = 0.0
count = 0.0
model.train()
for data, seg in train_loader:
data, seg = data.to(device), seg.to(device)
data = data.permute(0, 2, 1)
batch_size = data.size()[0]
opt.zero_grad()
seg_pred = model(data)
seg_pred = seg_pred.permute(0, 2, 1).contiguous()
#print(seg_pred.shape, seg.shape)
loss = criterion(seg_pred.view(-1, 3), seg.view(-1,3).squeeze(), torch.tensor(1).cuda())
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
opt.step()
count += batch_size
train_loss += loss.item() * batch_size
if args.scheduler == 'cos':
scheduler.step()
elif args.scheduler == 'step':
if opt.param_groups[0]['lr'] > 1e-5:
scheduler.step()
if opt.param_groups[0]['lr'] < 1e-5:
for param_group in opt.param_groups:
param_group['lr'] = 1e-5
outstr = 'Train %d, loss: %.6f' % (epoch, train_loss/count)
io.cprint(outstr)
####################
# Test
####################
test_loss = 0.0
count = 0.0
model.eval()
for data, seg in test_loader:
data, seg = data.to(device), seg.to(device)
data = data.permute(0, 2, 1)
batch_size = data.size()[0]
seg_pred = model(data)
seg_pred = seg_pred.permute(0, 2, 1).contiguous()
loss = criterion(seg_pred.view(-1, 3), seg.view(-1,3).squeeze(), torch.tensor(1).cuda())
count += batch_size
test_loss += loss.item() * batch_size
if test_loss*1.0/count <= best_test_loss:
best_test_loss = test_loss*1.0/count
torch.save(model.state_dict(), '../checkpoints/%s/models/model.t7' % args.exp_name)
outstr = 'Test %d, loss: %.6f, best loss %.6f' % (epoch, test_loss/count, best_test_loss)
io.cprint(outstr)
def test(args, io):
test_loader = DataLoader(ModelNetNormal(args.num_points, partition='test'),
batch_size=args.test_batch_size, shuffle=False, drop_last=False)
device = torch.device("cuda" if args.cuda else "cpu")
#Try to load models
model = CurveNet(args.multiplier).to(device)
model = nn.DataParallel(model)
model.load_state_dict(torch.load(args.model_path))
criterion = torch.nn.CosineEmbeddingLoss()
model = model.eval()
test_loss = 0.0
count = 0
for data, seg in test_loader:
data, seg = data.to(device), seg.to(device)
#print(data.shape, seg.shape)
data = data.permute(0, 2, 1)
batch_size = data.size()[0]
seg_pred = model(data)
seg_pred = seg_pred.permute(0, 2, 1).contiguous()
loss = criterion(seg_pred.view(-1, 3), seg.view(-1,3).squeeze(), torch.tensor(1).cuda())
count += batch_size
test_loss += loss.item() * batch_size
outstr = 'Test :: test loss: %.6f' % (test_loss*1.0/count)
io.cprint(outstr)
if __name__ == "__main__":
# Training settings
parser = argparse.ArgumentParser(description='Point Cloud Part Segmentation')
parser.add_argument('--exp_name', type=str, default='exp', metavar='N',
help='Name of the experiment')
parser.add_argument('--batch_size', type=int, default=32, metavar='batch_size',
help='Size of batch)')
parser.add_argument('--test_batch_size', type=int, default=16, metavar='batch_size',
help='Size of batch)')
parser.add_argument('--epochs', type=int, default=200, metavar='N',
help='number of episode to train ')
parser.add_argument('--use_sgd', type=bool, default=True,
help='Use SGD')
parser.add_argument('--lr', type=float, default=0.0005, metavar='LR',
help='learning rate')
parser.add_argument('--multiplier', type=float, default=2.0, metavar='MP',
help='network expansion multiplier')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--scheduler', type=str, default='cos', metavar='N',
choices=['cos', 'step'],
help='Scheduler to use, [cos, step]')
parser.add_argument('--no_cuda', type=bool, default=False,
help='enables CUDA training')
parser.add_argument('--eval', type=bool, default=False,
help='evaluate the model')
parser.add_argument('--num_points', type=int, default=1024,
help='num of points to use')
parser.add_argument('--model_path', type=str, default='', metavar='N',
help='Pretrained model path')
args = parser.parse_args()
seed = np.random.randint(1, 10000)
_init_()
io = IOStream('../checkpoints/' + args.exp_name + '/run.log')
io.cprint(str(args))
io.cprint('random seed is: ' + str(seed))
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.cuda:
io.cprint(
'Using GPU : ' + str(torch.cuda.current_device()) + ' from ' + str(torch.cuda.device_count()) + ' devices')
else:
io.cprint('Using CPU')
if not args.eval:
train(args, io)
else:
with torch.no_grad():
test(args, io)
================================================
FILE: CurveNet/core/main_partseg.py
================================================
"""
@Author: An Tao
@Contact: ta19@mails.tsinghua.edu.cn
@File: main_partseg.py
@Time: 2019/12/31 11:17 AM
Modified by
@Author: Tiange Xiang
@Contact: txia7609@uni.sydney.edu.au
@Time: 2021/01/21 3:10 PM
"""
from __future__ import print_function
import os
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim.lr_scheduler import CosineAnnealingLR, StepLR, MultiStepLR
from data import ShapeNetPart
from models.curvenet_seg import CurveNet
import numpy as np
from torch.utils.data import DataLoader
from util import cal_loss, IOStream
import sklearn.metrics as metrics
seg_num = [4, 2, 2, 4, 4, 3, 3, 2, 4, 2, 6, 2, 3, 3, 3, 3]
index_start = [0, 4, 6, 8, 12, 16, 19, 22, 24, 28, 30, 36, 38, 41, 44, 47]
def _init_():
# fix random seed
torch.manual_seed(seed)
np.random.seed(seed)
torch.cuda.manual_seed_all(seed)
torch.cuda.manual_seed(seed)
torch.set_printoptions(10)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
os.environ['PYTHONHASHSEED'] = str(seed)
# prepare file structures
if not os.path.exists('../checkpoints'):
os.makedirs('../checkpoints')
if not os.path.exists('../checkpoints/'+args.exp_name):
os.makedirs('../checkpoints/'+args.exp_name)
if not os.path.exists('../checkpoints/'+args.exp_name+'/'+'models'):
os.makedirs('../checkpoints/'+args.exp_name+'/'+'models')
os.system('cp main_partseg.py ../checkpoints/'+args.exp_name+'/main_partseg.py.backup')
os.system('cp models/curvenet_seg.py ../checkpoints/'+args.exp_name+'/curvenet_seg.py.backup')
def calculate_shape_IoU(pred_np, seg_np, label, class_choice, eva=False):
label = label.squeeze()
shape_ious = []
category = {}
for shape_idx in range(seg_np.shape[0]):
if not class_choice:
start_index = index_start[label[shape_idx]]
num = seg_num[label[shape_idx]]
parts = range(start_index, start_index + num)
else:
parts = range(seg_num[label[0]])
part_ious = []
for part in parts:
I = np.sum(np.logical_and(pred_np[shape_idx] == part, seg_np[shape_idx] == part))
U = np.sum(np.logical_or(pred_np[shape_idx] == part, seg_np[shape_idx] == part))
if U == 0:
iou = 1 # If the union of groundtruth and prediction points is empty, then count part IoU as 1
else:
iou = I / float(U)
part_ious.append(iou)
shape_ious.append(np.mean(part_ious))
if label[shape_idx] not in category:
category[label[shape_idx]] = [shape_ious[-1]]
else:
category[label[shape_idx]].append(shape_ious[-1])
if eva:
return shape_ious, category
else:
return shape_ious
def train(args, io):
train_dataset = ShapeNetPart(partition='trainval', num_points=args.num_points, class_choice=args.class_choice)
if (len(train_dataset) < 100):
drop_last = False
else:
drop_last = True
train_loader = DataLoader(train_dataset, num_workers=8, batch_size=args.batch_size, shuffle=True, drop_last=drop_last)
test_loader = DataLoader(ShapeNetPart(partition='test', num_points=args.num_points, class_choice=args.class_choice),
num_workers=8, batch_size=args.test_batch_size, shuffle=False, drop_last=False)
device = torch.device("cuda" if args.cuda else "cpu")
io.cprint("Let's use" + str(torch.cuda.device_count()) + "GPUs!")
seg_num_all = train_loader.dataset.seg_num_all
seg_start_index = train_loader.dataset.seg_start_index
# create model
model = CurveNet().to(device)
model = nn.DataParallel(model)
if args.use_sgd:
print("Use SGD")
opt = optim.SGD(model.parameters(), lr=args.lr*100, momentum=args.momentum, weight_decay=1e-4)
else:
print("Use Adam")
opt = optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-4)
if args.scheduler == 'cos':
scheduler = CosineAnnealingLR(opt, args.epochs, eta_min=1e-3)
elif args.scheduler == 'step':
scheduler = MultiStepLR(opt, [140, 180], gamma=0.1)
criterion = cal_loss
best_test_iou = 0
for epoch in range(args.epochs):
####################
# Train
####################
train_loss = 0.0
count = 0.0
model.train()
train_true_cls = []
train_pred_cls = []
train_true_seg = []
train_pred_seg = []
train_label_seg = []
for data, label, seg in train_loader:
seg = seg - seg_start_index
label_one_hot = np.zeros((label.shape[0], 16))
for idx in range(label.shape[0]):
label_one_hot[idx, label[idx]] = 1
label_one_hot = torch.from_numpy(label_one_hot.astype(np.float32))
data, label_one_hot, seg = data.to(device), label_one_hot.to(device), seg.to(device)
data = data.permute(0, 2, 1)
batch_size = data.size()[0]
opt.zero_grad()
seg_pred = model(data, label_one_hot)
seg_pred = seg_pred.permute(0, 2, 1).contiguous()
loss = criterion(seg_pred.view(-1, seg_num_all), seg.view(-1,1).squeeze())
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
opt.step()
pred = seg_pred.max(dim=2)[1] # (batch_size, num_points)
count += batch_size
train_loss += loss.item() * batch_size
seg_np = seg.cpu().numpy() # (batch_size, num_points)
pred_np = pred.detach().cpu().numpy() # (batch_size, num_points)
train_true_cls.append(seg_np.reshape(-1)) # (batch_size * num_points)
train_pred_cls.append(pred_np.reshape(-1)) # (batch_size * num_points)
train_true_seg.append(seg_np)
train_pred_seg.append(pred_np)
train_label_seg.append(label.reshape(-1))
if args.scheduler == 'cos':
scheduler.step()
elif args.scheduler == 'step':
if opt.param_groups[0]['lr'] > 1e-5:
scheduler.step()
if opt.param_groups[0]['lr'] < 1e-5:
for param_group in opt.param_groups:
param_group['lr'] = 1e-5
train_true_cls = np.concatenate(train_true_cls)
train_pred_cls = np.concatenate(train_pred_cls)
train_acc = metrics.accuracy_score(train_true_cls, train_pred_cls)
avg_per_class_acc = metrics.balanced_accuracy_score(train_true_cls, train_pred_cls)
train_true_seg = np.concatenate(train_true_seg, axis=0)
train_pred_seg = np.concatenate(train_pred_seg, axis=0)
train_label_seg = np.concatenate(train_label_seg)
train_ious = calculate_shape_IoU(train_pred_seg, train_true_seg, train_label_seg, args.class_choice)
outstr = 'Train %d, loss: %.6f, train acc: %.6f, train avg acc: %.6f, train iou: %.6f' % (epoch,
train_loss*1.0/count,
train_acc,
avg_per_class_acc,
np.mean(train_ious))
io.cprint(outstr)
####################
# Test
####################
test_loss = 0.0
count = 0.0
model.eval()
test_true_cls = []
test_pred_cls = []
test_true_seg = []
test_pred_seg = []
test_label_seg = []
for data, label, seg in test_loader:
seg = seg - seg_start_index
label_one_hot = np.zeros((label.shape[0], 16))
for idx in range(label.shape[0]):
label_one_hot[idx, label[idx]] = 1
label_one_hot = torch.from_numpy(label_one_hot.astype(np.float32))
data, label_one_hot, seg = data.to(device), label_one_hot.to(device), seg.to(device)
data = data.permute(0, 2, 1)
batch_size = data.size()[0]
seg_pred = model(data, label_one_hot)
seg_pred = seg_pred.permute(0, 2, 1).contiguous()
loss = criterion(seg_pred.view(-1, seg_num_all), seg.view(-1,1).squeeze())
pred = seg_pred.max(dim=2)[1]
count += batch_size
test_loss += loss.item() * batch_size
seg_np = seg.cpu().numpy()
pred_np = pred.detach().cpu().numpy()
test_true_cls.append(seg_np.reshape(-1))
test_pred_cls.append(pred_np.reshape(-1))
test_true_seg.append(seg_np)
test_pred_seg.append(pred_np)
test_label_seg.append(label.reshape(-1))
test_true_cls = np.concatenate(test_true_cls)
test_pred_cls = np.concatenate(test_pred_cls)
test_acc = metrics.accuracy_score(test_true_cls, test_pred_cls)
avg_per_class_acc = metrics.balanced_accuracy_score(test_true_cls, test_pred_cls)
test_true_seg = np.concatenate(test_true_seg, axis=0)
test_pred_seg = np.concatenate(test_pred_seg, axis=0)
test_label_seg = np.concatenate(test_label_seg)
test_ious = calculate_shape_IoU(test_pred_seg, test_true_seg, test_label_seg, args.class_choice)
outstr = 'Test %d, loss: %.6f, test acc: %.6f, test avg acc: %.6f, test iou: %.6f, best iou %.6f' % (epoch,
test_loss*1.0/count,
test_acc,
avg_per_class_acc,
np.mean(test_ious), best_test_iou)
io.cprint(outstr)
if np.mean(test_ious) >= best_test_iou:
best_test_iou = np.mean(test_ious)
torch.save(model.state_dict(), '../checkpoints/%s/models/model.t7' % args.exp_name)
def test(args, io):
test_loader = DataLoader(ShapeNetPart(partition='test', num_points=args.num_points, class_choice=args.class_choice),
batch_size=args.test_batch_size, shuffle=True, drop_last=False)
device = torch.device("cuda" if args.cuda else "cpu")
#Try to load models
seg_start_index = test_loader.dataset.seg_start_index
model = CurveNet().to(device)
model = nn.DataParallel(model)
model.load_state_dict(torch.load(args.model_path))
model = model.eval()
test_acc = 0.0
test_true_cls = []
test_pred_cls = []
test_true_seg = []
test_pred_seg = []
test_label_seg = []
category = {}
for data, label, seg in test_loader:
seg = seg - seg_start_index
label_one_hot = np.zeros((label.shape[0], 16))
for idx in range(label.shape[0]):
label_one_hot[idx, label[idx]] = 1
label_one_hot = torch.from_numpy(label_one_hot.astype(np.float32))
data, label_one_hot, seg = data.to(device), label_one_hot.to(device), seg.to(device)
data = data.permute(0, 2, 1)
seg_pred = model(data, label_one_hot)
seg_pred = seg_pred.permute(0, 2, 1).contiguous()
pred = seg_pred.max(dim=2)[1]
seg_np = seg.cpu().numpy()
pred_np = pred.detach().cpu().numpy()
test_true_cls.append(seg_np.reshape(-1))
test_pred_cls.append(pred_np.reshape(-1))
test_true_seg.append(seg_np)
test_pred_seg.append(pred_np)
test_label_seg.append(label.reshape(-1))
test_true_cls = np.concatenate(test_true_cls)
test_pred_cls = np.concatenate(test_pred_cls)
test_acc = metrics.accuracy_score(test_true_cls, test_pred_cls)
avg_per_class_acc = metrics.balanced_accuracy_score(test_true_cls, test_pred_cls)
test_true_seg = np.concatenate(test_true_seg, axis=0)
test_pred_seg = np.concatenate(test_pred_seg, axis=0)
test_label_seg = np.concatenate(test_label_seg)
test_ious,category = calculate_shape_IoU(test_pred_seg, test_true_seg, test_label_seg, args.class_choice, eva=True)
outstr = 'Test :: test acc: %.6f, test avg acc: %.6f, test iou: %.6f' % (test_acc,
avg_per_class_acc,
np.mean(test_ious))
io.cprint(outstr)
results = []
for key in category.keys():
results.append((int(key), np.mean(category[key]), len(category[key])))
results.sort(key=lambda x:x[0])
for re in results:
io.cprint('idx: %d mIoU: %.3f num: %d' % (re[0], re[1], re[2]))
if __name__ == "__main__":
# Training settings
parser = argparse.ArgumentParser(description='Point Cloud Part Segmentation')
parser.add_argument('--exp_name', type=str, default='exp', metavar='N',
help='Name of the experiment')
parser.add_argument('--dataset', type=str, default='shapenetpart', metavar='N',
choices=['shapenetpart'])
parser.add_argument('--class_choice', type=str, default=None, metavar='N',
choices=['airplane', 'bag', 'cap', 'car', 'chair',
'earphone', 'guitar', 'knife', 'lamp', 'laptop',
'motor', 'mug', 'pistol', 'rocket', 'skateboard', 'table'])
parser.add_argument('--batch_size', type=int, default=32, metavar='batch_size',
help='Size of batch)')
parser.add_argument('--test_batch_size', type=int, default=16, metavar='batch_size',
help='Size of batch)')
parser.add_argument('--epochs', type=int, default=200, metavar='N',
help='number of episode to train ')
parser.add_argument('--use_sgd', type=bool, default=True,
help='Use SGD')
parser.add_argument('--lr', type=float, default=0.0005, metavar='LR',
help='learning rate (default: 0.001, 0.1 if using sgd)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--scheduler', type=str, default='step', metavar='N',
choices=['cos', 'step'],
help='Scheduler to use, [cos, step]')
parser.add_argument('--no_cuda', type=bool, default=False,
help='enables CUDA training')
parser.add_argument('--eval', type=bool, default=False,
help='evaluate the model')
parser.add_argument('--num_points', type=int, default=2048,
help='num of points to use')
parser.add_argument('--model_path', type=str, default='', metavar='N',
help='Pretrained model path')
args = parser.parse_args()
seed = np.random.randint(1, 10000)
_init_()
if args.eval:
io = IOStream('../checkpoints/' + args.exp_name + '/eval.log')
else:
io = IOStream('../checkpoints/' + args.exp_name + '/run.log')
io.cprint(str(args))
io.cprint('random seed is: ' + str(seed))
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.cuda:
io.cprint(
'Using GPU : ' + str(torch.cuda.current_device()) + ' from ' + str(torch.cuda.device_count()) + ' devices')
else:
io.cprint('Using CPU')
if not args.eval:
train(args, io)
else:
with torch.no_grad():
test(args, io)
================================================
FILE: CurveNet/core/models/curvenet_cls.py
================================================
"""
@Author: Tiange Xiang
@Contact: txia7609@uni.sydney.edu.au
@File: curvenet_cls.py
@Time: 2021/01/21 3:10 PM
"""
import torch.nn as nn
import torch.nn.functional as F
from .curvenet_util import *
curve_config = {
'default': [[100, 5], [100, 5], None, None],
'long': [[10, 30], None, None, None]
}
class CurveNet(nn.Module):
def __init__(self, num_classes=40, k=20, setting='default'):
super(CurveNet, self).__init__()
assert setting in curve_config
additional_channel = 32
self.lpfa = LPFA(9, additional_channel, k=k, mlp_num=1, initial=True)
# encoder
self.cic11 = CIC(npoint=1024, radius=0.05, k=k, in_channels=additional_channel, output_channels=64, bottleneck_ratio=2, mlp_num=1, curve_config=curve_config[setting][0])
self.cic12 = CIC(npoint=1024, radius=0.05, k=k, in_channels=64, output_channels=64, bottleneck_ratio=4, mlp_num=1, curve_config=curve_config[setting][0])
self.cic21 = CIC(npoint=1024, radius=0.05, k=k, in_channels=64, output_channels=128, bottleneck_ratio=2, mlp_num=1, curve_config=curve_config[setting][1])
self.cic22 = CIC(npoint=1024, radius=0.1, k=k, in_channels=128, output_channels=128, bottleneck_ratio=4, mlp_num=1, curve_config=curve_config[setting][1])
self.cic31 = CIC(npoint=256, radius=0.1, k=k, in_channels=128, output_channels=256, bottleneck_ratio=2, mlp_num=1, curve_config=curve_config[setting][2])
self.cic32 = CIC(npoint=256, radius=0.2, k=k, in_channels=256, output_channels=256, bottleneck_ratio=4, mlp_num=1, curve_config=curve_config[setting][2])
self.cic41 = CIC(npoint=64, radius=0.2, k=k, in_channels=256, output_channels=512, bottleneck_ratio=2, mlp_num=1, curve_config=curve_config[setting][3])
self.cic42 = CIC(npoint=64, radius=0.4, k=k, in_channels=512, output_channels=512, bottleneck_ratio=4, mlp_num=1, curve_config=curve_config[setting][3])
self.conv0 = nn.Sequential(
nn.Conv1d(512, 1024, kernel_size=1, bias=False),
nn.BatchNorm1d(1024),
nn.ReLU(inplace=True))
self.conv1 = nn.Linear(1024 * 2, 512, bias=False)
self.conv2 = nn.Linear(512, num_classes)
self.bn1 = nn.BatchNorm1d(512)
self.dp1 = nn.Dropout(p=0.5)
def forward(self, xyz):
l0_points = self.lpfa(xyz, xyz)
l1_xyz, l1_points = self.cic11(xyz, l0_points)
l1_xyz, l1_points = self.cic12(l1_xyz, l1_points)
l2_xyz, l2_points = self.cic21(l1_xyz, l1_points)
l2_xyz, l2_points = self.cic22(l2_xyz, l2_points)
l3_xyz, l3_points = self.cic31(l2_xyz, l2_points)
l3_xyz, l3_points = self.cic32(l3_xyz, l3_points)
l4_xyz, l4_points = self.cic41(l3_xyz, l3_points)
l4_xyz, l4_points = self.cic42(l4_xyz, l4_points)
x = self.conv0(l4_points)
x_max = F.adaptive_max_pool1d(x, 1)
x_avg = F.adaptive_avg_pool1d(x, 1)
x = torch.cat((x_max, x_avg), dim=1).squeeze(-1)
x = F.relu(self.bn1(self.conv1(x).unsqueeze(-1)), inplace=True).squeeze(-1)
x = self.dp1(x)
x = self.conv2(x)
return x
================================================
FILE: CurveNet/core/models/curvenet_normal.py
================================================
"""
@Author: Tiange Xiang
@Contact: txia7609@uni.sydney.edu.au
@File: curvenet_normal.py
@Time: 2021/01/21 3:10 PM
"""
import torch.nn as nn
import torch.nn.functional as F
from .curvenet_util import *
curve_config = {
'default': [[100, 5], [100, 5], None, None]
}
class CurveNet(nn.Module):
def __init__(self, num_classes=3, k=20, multiplier=1.0, setting='default'):
super(CurveNet, self).__init__()
assert setting in curve_config
additional_channel = 64
channels = [128, 256, 512, 1024]
channels = [int(c * multiplier) for c in channels]
self.lpfa = LPFA(9, additional_channel, k=k, mlp_num=1, initial=True)
# encoder
self.cic11 = CIC(npoint=1024, radius=0.1, k=k, in_channels=additional_channel, output_channels=channels[0], bottleneck_ratio=2, curve_config=curve_config[setting][0])
self.cic12 = CIC(npoint=1024, radius=0.1, k=k, in_channels=channels[0], output_channels=channels[0], bottleneck_ratio=4, curve_config=curve_config[setting][0])
self.cic21 = CIC(npoint=256, radius=0.2, k=k, in_channels=channels[0], output_channels=channels[1], bottleneck_ratio=2, curve_config=curve_config[setting][1])
self.cic22 = CIC(npoint=256, radius=0.2, k=k, in_channels=channels[1], output_channels=channels[1], bottleneck_ratio=4, curve_config=curve_config[setting][1])
self.cic31 = CIC(npoint=64, radius=0.4, k=k, in_channels=channels[1], output_channels=channels[2], bottleneck_ratio=2, curve_config=curve_config[setting][2])
self.cic32 = CIC(npoint=64, radius=0.4, k=k, in_channels=channels[2], output_channels=channels[2], bottleneck_ratio=4, curve_config=curve_config[setting][2])
self.cic41 = CIC(npoint=16, radius=0.8, k=15, in_channels=channels[2], output_channels=channels[3], bottleneck_ratio=2, curve_config=curve_config[setting][3])
self.cic42 = CIC(npoint=16, radius=0.8, k=15, in_channels=channels[3], output_channels=channels[3], bottleneck_ratio=4, curve_config=curve_config[setting][3])
#self.cic43 = CIC(npoint=16, radius=0.8, k=15, in_channels=2048, output_channels=2048, bottleneck_ratio=4, curve_config=curve_config[setting][3])
# decoder
self.fp3 = PointNetFeaturePropagation(in_channel=channels[3] + channels[2], mlp=[channels[2], channels[2]], att=[channels[3], channels[3]//2, channels[3]//8])
self.up_cic4 = CIC(npoint=64, radius=0.8, k=k, in_channels=channels[2], output_channels=channels[2], bottleneck_ratio=4)
self.fp2 = PointNetFeaturePropagation(in_channel=channels[2] + channels[1], mlp=[channels[1], channels[1]], att=[channels[2], channels[2]//2, channels[2]//8])
self.up_cic3 = CIC(npoint=256, radius=0.4, k=k, in_channels=channels[1], output_channels=channels[1], bottleneck_ratio=4)
self.fp1 = PointNetFeaturePropagation(in_channel=channels[1] + channels[0], mlp=[channels[0], channels[0]], att=[channels[1], channels[1]//2, channels[1]//8])
self.up_cic2 = CIC(npoint=1024, radius=0.1, k=k, in_channels=channels[0]+3, output_channels=channels[0], bottleneck_ratio=4)
self.up_cic1 = CIC(npoint=1024, radius=0.1, k=k, in_channels=channels[0], output_channels=channels[0], bottleneck_ratio=4)
self.point_conv = nn.Sequential(
nn.Conv2d(9, additional_channel, kernel_size=1, bias=False),
nn.BatchNorm2d(additional_channel),
nn.LeakyReLU(negative_slope=0.2, inplace=True))
self.conv1 = nn.Conv1d(channels[0], num_classes, 1)
def forward(self, xyz):
l0_points = self.lpfa(xyz, xyz)
l1_xyz, l1_points = self.cic11(xyz, l0_points)
l1_xyz, l1_points = self.cic12(l1_xyz, l1_points)
l2_xyz, l2_points = self.cic21(l1_xyz, l1_points)
l2_xyz, l2_points = self.cic22(l2_xyz, l2_points)
l3_xyz, l3_points = self.cic31(l2_xyz, l2_points)
l3_xyz, l3_points = self.cic32(l3_xyz, l3_points)
l4_xyz, l4_points = self.cic41(l3_xyz, l3_points)
l4_xyz, l4_points = self.cic42(l4_xyz, l4_points)
#l4_xyz, l4_points = self.cic43(l4_xyz, l4_points)
l3_points = self.fp3(l3_xyz, l4_xyz, l3_points, l4_points)
l3_xyz, l3_points = self.up_cic4(l3_xyz, l3_points)
l2_points = self.fp2(l2_xyz, l3_xyz, l2_points, l3_points)
l2_xyz, l2_points = self.up_cic3(l2_xyz, l2_points)
l1_points = self.fp1(l1_xyz, l2_xyz, l1_points, l2_points)
x = torch.cat((l1_xyz, l1_points), dim=1)
xyz, x = self.up_cic2(l1_xyz, x)
xyz, x = self.up_cic1(xyz, x)
x = self.conv1(x)
return x
================================================
FILE: CurveNet/core/models/curvenet_seg.py
================================================
"""
@Author: Tiange Xiang
@Contact: txia7609@uni.sydney.edu.au
@File: curvenet_seg.py
@Time: 2021/01/21 3:10 PM
"""
import torch.nn as nn
import torch.nn.functional as F
from .curvenet_util import *
curve_config = {
'default': [[100, 5], [100, 5], None, None, None]
}
class CurveNet(nn.Module):
def __init__(self, num_classes=50, category=16, k=32, setting='default'):
super(CurveNet, self).__init__()
assert setting in curve_config
additional_channel = 32
self.lpfa = LPFA(9, additional_channel, k=k, mlp_num=1, initial=True)
# encoder
self.cic11 = CIC(npoint=2048, radius=0.2, k=k, in_channels=additional_channel, output_channels=64, bottleneck_ratio=2, curve_config=curve_config[setting][0])
self.cic12 = CIC(npoint=2048, radius=0.2, k=k, in_channels=64, output_channels=64, bottleneck_ratio=4, curve_config=curve_config[setting][0])
self.cic21 = CIC(npoint=512, radius=0.4, k=k, in_channels=64, output_channels=128, bottleneck_ratio=2, curve_config=curve_config[setting][1])
self.cic22 = CIC(npoint=512, radius=0.4, k=k, in_channels=128, output_channels=128, bottleneck_ratio=4, curve_config=curve_config[setting][1])
self.cic31 = CIC(npoint=128, radius=0.8, k=k, in_channels=128, output_channels=256, bottleneck_ratio=2, curve_config=curve_config[setting][2])
self.cic32 = CIC(npoint=128, radius=0.8, k=k, in_channels=256, output_channels=256, bottleneck_ratio=4, curve_config=curve_config[setting][2])
self.cic41 = CIC(npoint=32, radius=1.2, k=31, in_channels=256, output_channels=512, bottleneck_ratio=2, curve_config=curve_config[setting][3])
self.cic42 = CIC(npoint=32, radius=1.2, k=31, in_channels=512, output_channels=512, bottleneck_ratio=4, curve_config=curve_config[setting][3])
self.cic51 = CIC(npoint=8, radius=2.0, k=7, in_channels=512, output_channels=1024, bottleneck_ratio=2, curve_config=curve_config[setting][4])
self.cic52 = CIC(npoint=8, radius=2.0, k=7, in_channels=1024, output_channels=1024, bottleneck_ratio=4, curve_config=curve_config[setting][4])
self.cic53 = CIC(npoint=8, radius=2.0, k=7, in_channels=1024, output_channels=1024, bottleneck_ratio=4, curve_config=curve_config[setting][4])
# decoder
self.fp4 = PointNetFeaturePropagation(in_channel=1024 + 512, mlp=[512, 512], att=[1024, 512, 256])
self.up_cic5 = CIC(npoint=32, radius=1.2, k=31, in_channels=512, output_channels=512, bottleneck_ratio=4)
self.fp3 = PointNetFeaturePropagation(in_channel=512 + 256, mlp=[256, 256], att=[512, 256, 128])
self.up_cic4 = CIC(npoint=128, radius=0.8, k=k, in_channels=256, output_channels=256, bottleneck_ratio=4)
self.fp2 = PointNetFeaturePropagation(in_channel=256 + 128, mlp=[128, 128], att=[256, 128, 64])
self.up_cic3 = CIC(npoint=512, radius=0.4, k=k, in_channels=128, output_channels=128, bottleneck_ratio=4)
self.fp1 = PointNetFeaturePropagation(in_channel=128 + 64, mlp=[64, 64], att=[128, 64, 32])
self.up_cic2 = CIC(npoint=2048, radius=0.2, k=k, in_channels=128+64+64+category+3, output_channels=256, bottleneck_ratio=4)
self.up_cic1 = CIC(npoint=2048, radius=0.2, k=k, in_channels=256, output_channels=256, bottleneck_ratio=4)
self.global_conv2 = nn.Sequential(
nn.Conv1d(1024, 128, kernel_size=1, bias=False),
nn.BatchNorm1d(128),
nn.LeakyReLU(negative_slope=0.2))
self.global_conv1 = nn.Sequential(
nn.Conv1d(512, 64, kernel_size=1, bias=False),
nn.BatchNorm1d(64),
nn.LeakyReLU(negative_slope=0.2))
self.conv1 = nn.Conv1d(256, 256, 1, bias=False)
self.bn1 = nn.BatchNorm1d(256)
self.drop1 = nn.Dropout(0.5)
self.conv2 = nn.Conv1d(256, num_classes, 1)
self.se = nn.Sequential(nn.AdaptiveAvgPool1d(1),
nn.Conv1d(256, 256//8, 1, bias=False),
nn.BatchNorm1d(256//8),
nn.LeakyReLU(negative_slope=0.2),
nn.Conv1d(256//8, 256, 1, bias=False),
nn.Sigmoid())
def forward(self, xyz, l=None):
batch_size = xyz.size(0)
l0_points = self.lpfa(xyz, xyz)
l1_xyz, l1_points = self.cic11(xyz, l0_points)
l1_xyz, l1_points = self.cic12(l1_xyz, l1_points)
l2_xyz, l2_points = self.cic21(l1_xyz, l1_points)
l2_xyz, l2_points = self.cic22(l2_xyz, l2_points)
l3_xyz, l3_points = self.cic31(l2_xyz, l2_points)
l3_xyz, l3_points = self.cic32(l3_xyz, l3_points)
l4_xyz, l4_points = self.cic41(l3_xyz, l3_points)
l4_xyz, l4_points = self.cic42(l4_xyz, l4_points)
l5_xyz, l5_points = self.cic51(l4_xyz, l4_points)
l5_xyz, l5_points = self.cic52(l5_xyz, l5_points)
l5_xyz, l5_points = self.cic53(l5_xyz, l5_points)
# global features
emb1 = self.global_conv1(l4_points)
emb1 = emb1.max(dim=-1, keepdim=True)[0] # bs, 64, 1
emb2 = self.global_conv2(l5_points)
emb2 = emb2.max(dim=-1, keepdim=True)[0] # bs, 128, 1
# Feature Propagation layers
l4_points = self.fp4(l4_xyz, l5_xyz, l4_points, l5_points)
l4_xyz, l4_points = self.up_cic5(l4_xyz, l4_points)
l3_points = self.fp3(l3_xyz, l4_xyz, l3_points, l4_points)
l3_xyz, l3_points = self.up_cic4(l3_xyz, l3_points)
l2_points = self.fp2(l2_xyz, l3_xyz, l2_points, l3_points)
l2_xyz, l2_points = self.up_cic3(l2_xyz, l2_points)
l1_points = self.fp1(l1_xyz, l2_xyz, l1_points, l2_points)
if l is not None:
l = l.view(batch_size, -1, 1)
emb = torch.cat((emb1, emb2, l), dim=1) # bs, 128 + 16, 1
l = emb.expand(-1,-1, xyz.size(-1))
x = torch.cat((l1_xyz, l1_points, l), dim=1)
xyz, x = self.up_cic2(l1_xyz, x)
xyz, x = self.up_cic1(xyz, x)
x = F.leaky_relu(self.bn1(self.conv1(x)), 0.2, inplace=True)
se = self.se(x)
x = x * se
x = self.drop1(x)
x = self.conv2(x)
return x
================================================
FILE: CurveNet/core/models/curvenet_util.py
================================================
"""
@Author: Yue Wang
@Contact: yuewangx@mit.edu
@File: pointnet_util.py
@Time: 2018/10/13 10:39 PM
Modified by
@Author: Tiange Xiang
@Contact: txia7609@uni.sydney.edu.au
@Time: 2021/01/21 3:10 PM
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from time import time
import numpy as np
from .walk import Walk
def knn(x, k):
k = k + 1
inner = -2 * torch.matmul(x.transpose(2, 1), x)
xx = torch.sum(x**2, dim=1, keepdim=True)
pairwise_distance = -xx - inner - xx.transpose(2, 1)
idx = pairwise_distance.topk(k=k, dim=-1)[1] # (batch_size, num_points, k)
return idx
def normal_knn(x, k):
inner = -2 * torch.matmul(x.transpose(2, 1), x)
xx = torch.sum(x**2, dim=1, keepdim=True)
pairwise_distance = -xx - inner - xx.transpose(2, 1)
idx = pairwise_distance.topk(k=k, dim=-1)[1] # (batch_size, num_points, k)
return idx
def pc_normalize(pc):
l = pc.shape[0]
centroid = np.mean(pc, axis=0)
pc = pc - centroid
m = np.max(np.sqrt(np.sum(pc**2, axis=1)))
pc = pc / m
return pc
def square_distance(src, dst):
"""
Calculate Euclid distance between each two points.
"""
B, N, _ = src.shape
_, M, _ = dst.shape
dist = -2 * torch.matmul(src, dst.permute(0, 2, 1))
dist += torch.sum(src ** 2, -1).view(B, N, 1)
dist += torch.sum(dst ** 2, -1).view(B, 1, M)
return dist
def index_points(points, idx):
"""
Input:
points: input points data, [B, N, C]
idx: sample index data, [B, S]
Return:
new_points:, indexed points data, [B, S, C]
"""
device = points.device
B = points.shape[0]
view_shape = list(idx.shape)
view_shape[1:] = [1] * (len(view_shape) - 1)
repeat_shape = list(idx.shape)
repeat_shape[0] = 1
batch_indices = torch.arange(B, dtype=torch.long).to(device).view(view_shape).repeat(repeat_shape)
new_points = points[batch_indices, idx, :]
return new_points
def farthest_point_sample(xyz, npoint):
"""
Input:
xyz: pointcloud data, [B, N, 3]
npoint: number of samples
Return:
centroids: sampled pointcloud index, [B, npoint]
"""
device = xyz.device
B, N, C = xyz.shape
centroids = torch.zeros(B, npoint, dtype=torch.long).to(device)
distance = torch.ones(B, N).to(device) * 1e10
farthest = torch.randint(0, N, (B,), dtype=torch.long).to(device) * 0
batch_indices = torch.arange(B, dtype=torch.long).to(device)
for i in range(npoint):
centroids[:, i] = farthest
centroid = xyz[batch_indices, farthest, :].view(B, 1, 3)
dist = torch.sum((xyz - centroid) ** 2, -1)
mask = dist < distance
distance[mask] = dist[mask]
farthest = torch.max(distance, -1)[1]
return centroids
def query_ball_point(radius, nsample, xyz, new_xyz):
"""
Input:
radius: local region radius
nsample: max sample number in local region
xyz: all points, [B, N, 3]
new_xyz: query points, [B, S, 3]
Return:
group_idx: grouped points index, [B, S, nsample]
"""
device = xyz.device
B, N, C = xyz.shape
_, S, _ = new_xyz.shape
group_idx = torch.arange(N, dtype=torch.long).to(device).view(1, 1, N).repeat([B, S, 1])
sqrdists = square_distance(new_xyz, xyz)
group_idx[sqrdists > radius ** 2] = N
group_idx = group_idx.sort(dim=-1)[0][:, :, :nsample]
group_first = group_idx[:, :, 0].view(B, S, 1).repeat([1, 1, nsample])
mask = group_idx == N
group_idx[mask] = group_first[mask]
return group_idx
def sample_and_group(npoint, radius, nsample, xyz, points, returnfps=False):
"""
Input:
npoint:
radius:
nsample:
xyz: input points position data, [B, N, 3]
points: input points data, [B, N, D]
Return:
new_xyz: sampled points position data, [B, npoint, nsample, 3]
new_points: sampled points data, [B, npoint, nsample, 3+D]
"""
new_xyz = index_points(xyz, farthest_point_sample(xyz, npoint))
torch.cuda.empty_cache()
idx = query_ball_point(radius, nsample, xyz, new_xyz)
torch.cuda.empty_cache()
new_points = index_points(points, idx)
torch.cuda.empty_cache()
if returnfps:
return new_xyz, new_points, idx
else:
return new_xyz, new_points
class Attention_block(nn.Module):
'''
Used in attention U-Net.
'''
def __init__(self,F_g,F_l,F_int):
super(Attention_block,self).__init__()
self.W_g = nn.Sequential(
nn.Conv1d(F_g, F_int, kernel_size=1,stride=1,padding=0,bias=True),
nn.BatchNorm1d(F_int)
)
self.W_x = nn.Sequential(
nn.Conv1d(F_l, F_int, kernel_size=1,stride=1,padding=0,bias=True),
nn.BatchNorm1d(F_int)
)
self.psi = nn.Sequential(
nn.Conv1d(F_int, 1, kernel_size=1,stride=1,padding=0,bias=True),
nn.BatchNorm1d(1),
nn.Sigmoid()
)
def forward(self,g,x):
g1 = self.W_g(g)
x1 = self.W_x(x)
psi = F.leaky_relu(g1+x1, negative_slope=0.2)
psi = self.psi(psi)
return psi, 1. - psi
class LPFA(nn.Module):
def __init__(self, in_channel, out_channel, k, mlp_num=2, initial=False):
super(LPFA, self).__init__()
self.k = k
self.device = torch.device('cuda')
self.initial = initial
if not initial:
self.xyz2feature = nn.Sequential(
nn.Conv2d(9, in_channel, kernel_size=1, bias=False),
nn.BatchNorm2d(in_channel))
self.mlp = []
for _ in range(mlp_num):
self.mlp.append(nn.Sequential(nn.Conv2d(in_channel, out_channel, 1, bias=False),
nn.BatchNorm2d(out_channel),
nn.LeakyReLU(0.2)))
in_channel = out_channel
self.mlp = nn.Sequential(*self.mlp)
def forward(self, x, xyz, idx=None):
x = self.group_feature(x, xyz, idx)
x = self.mlp(x)
if self.initial:
x = x.max(dim=-1, keepdim=False)[0]
else:
x = x.mean(dim=-1, keepdim=False)
return x
def group_feature(self, x, xyz, idx):
batch_size, num_dims, num_points = x.size()
if idx is None:
idx = knn(xyz, k=self.k)[:,:,:self.k] # (batch_size, num_points, k)
idx_base = torch.arange(0, batch_size, device=self.device).view(-1, 1, 1) * num_points
idx = idx + idx_base
idx = idx.view(-1)
xyz = xyz.transpose(2, 1).contiguous() # bs, n, 3
point_feature = xyz.view(batch_size * num_points, -1)[idx, :]
point_feature = point_feature.view(batch_size, num_points, self.k, -1) # bs, n, k, 3
points = xyz.view(batch_size, num_points, 1, 3).expand(-1, -1, self.k, -1) # bs, n, k, 3
point_feature = torch.cat((points, point_feature, point_feature - points),
dim=3).permute(0, 3, 1, 2).contiguous()
if self.initial:
return point_feature
x = x.transpose(2, 1).contiguous() # bs, n, c
feature = x.view(batch_size * num_points, -1)[idx, :]
feature = feature.view(batch_size, num_points, self.k, num_dims) #bs, n, k, c
x = x.view(batch_size, num_points, 1, num_dims)
feature = feature - x
feature = feature.permute(0, 3, 1, 2).contiguous()
point_feature = self.xyz2feature(point_feature) #bs, c, n, k
feature = F.leaky_relu(feature + point_feature, 0.2)
return feature #bs, c, n, k
class PointNetFeaturePropagation(nn.Module):
def __init__(self, in_channel, mlp, att=None):
super(PointNetFeaturePropagation, self).__init__()
self.mlp_convs = nn.ModuleList()
self.mlp_bns = nn.ModuleList()
last_channel = in_channel
self.att = None
if att is not None:
self.att = Attention_block(F_g=att[0],F_l=att[1],F_int=att[2])
for out_channel in mlp:
self.mlp_convs.append(nn.Conv1d(last_channel, out_channel, 1))
self.mlp_bns.append(nn.BatchNorm1d(out_channel))
last_channel = out_channel
def forward(self, xyz1, xyz2, points1, points2):
"""
Input:
xyz1: input points position data, [B, C, N]
xyz2: sampled input points position data, [B, C, S], skipped xyz
points1: input points data, [B, D, N]
points2: input points data, [B, D, S], skipped features
Return:
new_points: upsampled points data, [B, D', N]
"""
xyz1 = xyz1.permute(0, 2, 1)
xyz2 = xyz2.permute(0, 2, 1)
points2 = points2.permute(0, 2, 1)
B, N, C = xyz1.shape
_, S, _ = xyz2.shape
if S == 1:
interpolated_points = points2.repeat(1, N, 1)
else:
dists = square_distance(xyz1, xyz2)
dists, idx = dists.sort(dim=-1)
dists, idx = dists[:, :, :3], idx[:, :, :3] # [B, N, 3]
dist_recip = 1.0 / (dists + 1e-8)
norm = torch.sum(dist_recip, dim=2, keepdim=True)
weight = dist_recip / norm
interpolated_points = torch.sum(index_points(points2, idx) * weight.view(B, N, 3, 1), dim=2)
# skip attention
if self.att is not None:
psix, psig = self.att(interpolated_points.permute(0, 2, 1), points1)
points1 = points1 * psix
if points1 is not None:
points1 = points1.permute(0, 2, 1)
new_points = torch.cat([points1, interpolated_points], dim=-1)
else:
new_points = interpolated_points
new_points = new_points.permute(0, 2, 1)
for i, conv in enumerate(self.mlp_convs):
bn = self.mlp_bns[i]
new_points = F.leaky_relu(bn(conv(new_points)), 0.2)
return new_points
class CIC(nn.Module):
def __init__(self, npoint, radius, k, in_channels, output_channels, bottleneck_ratio=2, mlp_num=2, curve_config=None):
super(CIC, self).__init__()
self.in_channels = in_channels
self.output_channels = output_channels
self.bottleneck_ratio = bottleneck_ratio
self.radius = radius
self.k = k
self.npoint = npoint
planes = in_channels // bottleneck_ratio
self.use_curve = curve_config is not None
if self.use_curve:
self.curveaggregation = CurveAggregation(planes)
self.curvegrouping = CurveGrouping(planes, k, curve_config[0], curve_config[1])
self.conv1 = nn.Sequential(
nn.Conv1d(in_channels,
planes,
kernel_size=1,
bias=False),
nn.BatchNorm1d(in_channels // bottleneck_ratio),
nn.LeakyReLU(negative_slope=0.2, inplace=True))
self.conv2 = nn.Sequential(
nn.Conv1d(planes, output_channels, kernel_size=1, bias=False),
nn.BatchNorm1d(output_channels))
if in_channels != output_channels:
self.shortcut = nn.Sequential(
nn.Conv1d(in_channels,
output_channels,
kernel_size=1,
bias=False),
nn.BatchNorm1d(output_channels))
self.relu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
self.maxpool = MaskedMaxPool(npoint, radius, k)
self.lpfa = LPFA(planes, planes, k, mlp_num=mlp_num, initial=False)
def forward(self, xyz, x):
# max pool
if xyz.size(-1) != self.npoint:
xyz, x = self.maxpool(
xyz.transpose(1, 2).contiguous(), x)
xyz = xyz.transpose(1, 2)
shortcut = x
x = self.conv1(x) # bs, c', n
idx = knn(xyz, self.k)
if self.use_curve:
# curve grouping
curves = self.curvegrouping(x, xyz, idx[:,:,1:]) # avoid self-loop
# curve aggregation
x = self.curveaggregation(x, curves)
x = self.lpfa(x, xyz, idx=idx[:,:,:self.k]) #bs, c', n, k
x = self.conv2(x) # bs, c, n
if self.in_channels != self.output_channels:
shortcut = self.shortcut(shortcut)
x = self.relu(x + shortcut)
return xyz, x
class CurveAggregation(nn.Module):
def __init__(self, in_channel):
super(CurveAggregation, self).__init__()
self.in_channel = in_channel
mid_feature = in_channel // 2
self.conva = nn.Conv1d(in_channel,
mid_feature,
kernel_size=1,
bias=False)
self.convb = nn.Conv1d(in_channel,
mid_feature,
kernel_size=1,
bias=False)
self.convc = nn.Conv1d(in_channel,
mid_feature,
kernel_size=1,
bias=False)
self.convn = nn.Conv1d(mid_feature,
mid_feature,
kernel_size=1,
bias=False)
self.convl = nn.Conv1d(mid_feature,
mid_feature,
kernel_size=1,
bias=False)
self.convd = nn.Sequential(
nn.Conv1d(mid_feature * 2,
in_channel,
kernel_size=1,
bias=False),
nn.BatchNorm1d(in_channel))
self.line_conv_att = nn.Conv2d(in_channel,
1,
kernel_size=1,
bias=False)
def forward(self, x, curves):
curves_att = self.line_conv_att(curves) # bs, 1, c_n, c_l
curver_inter = torch.sum(curves * F.softmax(curves_att, dim=-1), dim=-1) #bs, c, c_n
curves_intra = torch.sum(curves * F.softmax(curves_att, dim=-2), dim=-2) #bs, c, c_l
curver_inter = self.conva(curver_inter) # bs, mid, n
curves_intra = self.convb(curves_intra) # bs, mid ,n
x_logits = self.convc(x).transpose(1, 2).contiguous()
x_inter = F.softmax(torch.bmm(x_logits, curver_inter), dim=-1) # bs, n, c_n
x_intra = F.softmax(torch.bmm(x_logits, curves_intra), dim=-1) # bs, l, c_l
curver_inter = self.convn(curver_inter).transpose(1, 2).contiguous()
curves_intra = self.convl(curves_intra).transpose(1, 2).contiguous()
x_inter = torch.bmm(x_inter, curver_inter)
x_intra = torch.bmm(x_intra, curves_intra)
curve_features = torch.cat((x_inter, x_intra),dim=-1).transpose(1, 2).contiguous()
x = x + self.convd(curve_features)
return F.leaky_relu(x, negative_slope=0.2)
class CurveGrouping(nn.Module):
def __init__(self, in_channel, k, curve_num, curve_length):
super(CurveGrouping, self).__init__()
self.curve_num = curve_num
self.curve_length = curve_length
self.in_channel = in_channel
self.k = k
self.att = nn.Conv1d(in_channel, 1, kernel_size=1, bias=False)
self.walk = Walk(in_channel, k, curve_num, curve_length)
def forward(self, x, xyz, idx):
# starting point selection in self attention style
x_att = torch.sigmoid(self.att(x))
x = x * x_att
_, start_index = torch.topk(x_att,
self.curve_num,
dim=2,
sorted=False)
start_index = start_index.squeeze().unsqueeze(2)
curves = self.walk(xyz, x, idx, start_index) #bs, c, c_n, c_l
return curves
class MaskedMaxPool(nn.Module):
def __init__(self, npoint, radius, k):
super(MaskedMaxPool, self).__init__()
self.npoint = npoint
self.radius = radius
self.k = k
def forward(self, xyz, features):
sub_xyz, neighborhood_features = sample_and_group(self.npoint, self.radius, self.k, xyz, features.transpose(1,2))
neighborhood_features = neighborhood_features.permute(0, 3, 1, 2).contiguous()
sub_features = F.max_pool2d(
neighborhood_features, kernel_size=[1, neighborhood_features.shape[3]]
) # bs, c, n, 1
sub_features = torch.squeeze(sub_features, -1) # bs, c, n
return sub_xyz, sub_features
================================================
FILE: CurveNet/core/models/walk.py
================================================
"""
@Author: Tiange Xiang
@Contact: txia7609@uni.sydney.edu.au
@File: walk.py
@Time: 2021/01/21 3:10 PM
"""
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
def batched_index_select(input, dim, index):
views = [input.shape[0]] + \
[1 if i != dim else -1 for i in range(1, len(input.shape))]
expanse = list(input.shape)
expanse[0] = -1
expanse[dim] = -1
index = index.view(views).expand(expanse)
return torch.gather(input, dim, index)
def gumbel_softmax(logits, dim, temperature=1):
"""
ST-gumple-softmax w/o random gumbel samplings
input: [*, n_class]
return: flatten --> [*, n_class] an one-hot vector
"""
y = F.softmax(logits / temperature, dim=dim)
shape = y.size()
_, ind = y.max(dim=-1)
y_hard = torch.zeros_like(y).view(-1, shape[-1])
y_hard.scatter_(1, ind.view(-1, 1), 1)
y_hard = y_hard.view(*shape)
y_hard = (y_hard - y).detach() + y
return y_hard
class Walk(nn.Module):
'''
Walk in the cloud
'''
def __init__(self, in_channel, k, curve_num, curve_length):
super(Walk, self).__init__()
self.curve_num = curve_num
self.curve_length = curve_length
self.k = k
self.agent_mlp = nn.Sequential(
nn.Conv2d(in_channel * 2,
1,
kernel_size=1,
bias=False), nn.BatchNorm2d(1))
self.momentum_mlp = nn.Sequential(
nn.Conv1d(in_channel * 2,
2,
kernel_size=1,
bias=False), nn.BatchNorm1d(2))
def crossover_suppression(self, cur, neighbor, bn, n, k):
# cur: bs*n, 3
# neighbor: bs*n, 3, k
neighbor = neighbor.detach()
cur = cur.unsqueeze(-1).detach()
dot = torch.bmm(cur.transpose(1,2), neighbor) # bs*n, 1, k
norm1 = torch.norm(cur, dim=1, keepdim=True)
norm2 = torch.norm(neighbor, dim=1, keepdim=True)
divider = torch.clamp(norm1 * norm2, min=1e-8)
ans = torch.div(dot, divider).squeeze() # bs*n, k
# normalize to [0, 1]
ans = 1. + ans
ans = torch.clamp(ans, 0., 1.0)
return ans.detach()
def forward(self, xyz, x, adj, cur):
bn, c, tot_points = x.size()
# raw point coordinates
xyz = xyz.transpose(1,2).contiguous # bs, n, 3
# point features
x = x.transpose(1,2).contiguous() # bs, n, c
flatten_x = x.view(bn * tot_points, -1)
batch_offset = torch.arange(0, bn, device=torch.device('cuda')).detach() * tot_points
# indices of neighbors for the starting points
tmp_adj = (adj + batch_offset.view(-1,1,1)).view(adj.size(0)*adj.size(1),-1) #bs, n, k
# batch flattened indices for teh starting points
flatten_cur = (cur + batch_offset.view(-1,1,1)).view(-1)
curves = []
# one step at a time
for step in range(self.curve_length):
if step == 0:
# get starting point features using flattend indices
starting_points = flatten_x[flatten_cur, :].contiguous()
pre_feature = starting_points.view(bn, self.curve_num, -1, 1).transpose(1,2) # bs * n, c
else:
# dynamic momentum
cat_feature = torch.cat((cur_feature.squeeze(), pre_feature.squeeze()),dim=1)
att_feature = F.softmax(self.momentum_mlp(cat_feature),dim=1).view(bn, 1, self.curve_num, 2) # bs, 1, n, 2
cat_feature = torch.cat((cur_feature, pre_feature),dim=-1) # bs, c, n, 2
# update curve descriptor
pre_feature = torch.sum(cat_feature * att_feature, dim=-1, keepdim=True) # bs, c, n
pre_feature_cos = pre_feature.transpose(1,2).contiguous().view(bn * self.curve_num, -1)
pick_idx = tmp_adj[flatten_cur] # bs*n, k
# get the neighbors of current points
pick_values = flatten_x[pick_idx.view(-1),:]
# reshape to fit crossover suppresion below
pick_values_cos = pick_values.view(bn * self.curve_num, self.k, c)
pick_values = pick_values_cos.view(bn, self.curve_num, self.k, c)
pick_values_cos = pick_values_cos.transpose(1,2).contiguous()
pick_values = pick_values.permute(0,3,1,2) # bs, c, n, k
pre_feature_expand = pre_feature.expand_as(pick_values)
# concat current point features with curve descriptors
pre_feature_expand = torch.cat((pick_values, pre_feature_expand),dim=1)
# which node to pick next?
pre_feature_expand = self.agent_mlp(pre_feature_expand) # bs, 1, n, k
if step !=0:
# cross over supression
d = self.crossover_suppression(cur_feature_cos - pre_feature_cos,
pick_values_cos - cur_feature_cos.unsqueeze(-1),
bn, self.curve_num, self.k)
d = d.view(bn, self.curve_num, self.k).unsqueeze(1) # bs, 1, n, k
pre_feature_expand = torch.mul(pre_feature_expand, d)
pre_feature_expand = gumbel_softmax(pre_feature_expand, -1) #bs, 1, n, k
cur_feature = torch.sum(pick_values * pre_feature_expand, dim=-1, keepdim=True) # bs, c, n, 1
cur_feature_cos = cur_feature.transpose(1,2).contiguous().view(bn * self.curve_num, c)
cur = torch.argmax(pre_feature_expand, dim=-1).view(-1, 1) # bs * n, 1
flatten_cur = batched_index_select(pick_idx, 1, cur).squeeze() # bs * n
# collect curve progress
curves.append(cur_feature)
return torch.cat(curves,dim=-1)
================================================
FILE: CurveNet/core/util.py
================================================
"""
@Author: Yue Wang
@Contact: yuewangx@mit.edu
@File: util
@Time: 4/5/19 3:47 PM
"""
import numpy as np
import torch
import torch.nn.functional as F
def cal_loss(pred, gold, smoothing=True):
''' Calculate cross entropy loss, apply label smoothing if needed. '''
gold = gold.contiguous().view(-1)
if smoothing:
eps = 0.2
n_class = pred.size(1)
one_hot = torch.zeros_like(pred).scatter(1, gold.view(-1, 1), 1)
one_hot = one_hot * (1 - eps) + (1 - one_hot) * eps / (n_class - 1)
log_prb = F.log_softmax(pred, dim=1)
loss = -(one_hot * log_prb).sum(dim=1).mean()
else:
loss = F.cross_entropy(pred, gold, reduction='mean')
return loss
class IOStream():
def __init__(self, path):
self.f = open(path, 'a')
def cprint(self, text):
print(text)
self.f.write(text+'\n')
self.f.flush()
def close(self):
self.f.close()
================================================
FILE: GDANet/README.md
================================================
# Learning Geometry-Disentangled Representation for Complementary Understanding of 3D Object Point Cloud.
This repository is built for the paper:
__Learning Geometry-Disentangled Representation for Complementary Understanding of 3D Object Point Cloud (_AAAI2021_)__ [[arXiv](https://arxiv.org/abs/2012.10921)]
<br>
by [Mutian Xu*](https://mutianxu.github.io/), [Junhao Zhang*](https://junhaozhang98.github.io/), Zhipeng Zhou, Mingye Xu, Xiaojuan Qi and Yu Qiao.
## Overview
Geometry-Disentangled Attention Network for 3D object point cloud classification and segmentation (GDANet):
<img src = './imgs/GDANet.jpg' width = 800>
## Citation
If you find the code or trained models useful, please consider citing:
@misc{xu2021learning,
title={Learning Geometry-Disentangled Representation for Complementary Understanding of 3D Object Point Cloud},
author={Mutian Xu and Junhao Zhang and Zhipeng Zhou and Mingye Xu and Xiaojuan Qi and Yu Qiao},
year={2021},
eprint={2012.10921},
archivePrefix={arXiv},
primaryClass={cs.CV}
## Installation
### Requirements
* Linux (tested on Ubuntu 14.04/16.04)
* Python 3.5+
* PyTorch 1.0+
### Dataset
* Create the folder to symlink the data later:
`mkdir -p data`
* __Object Classification__:
Download and unzip [ModelNet40](https://shapenet.cs.stanford.edu/media/modelnet40_ply_hdf5_2048.zip) (415M), then symlink the path to it as follows (you can alternatively modify the path [here](https://github.com/mutianxu/GDANet/blob/main/util/data_util.py#L12)) :
`ln -s /path to modelnet40/modelnet40_ply_hdf5_2048 data`
* __Shape Part Segmentation__:
Download and unzip [ShapeNet Part](https://shapenet.cs.stanford.edu/media/shapenetcore_partanno_segmentation_benchmark_v0_normal.zip) (674M), then symlink the path to it as follows (you can alternatively modify the path [here](https://github.com/mutianxu/GDANet/blob/main/util/data_util.py#L70)) :
`ln -s /path to shapenet part/shapenetcore_partanno_segmentation_benchmark_v0_normal data`
## Usage
### Object Classification on ModelNet40
* Train:
`python main_cls.py`
* Test:
* Run the voting evaluation script, after this voting you will get an accuracy of 93.8% if all things go right:
`python voting_eval_modelnet.py --model_path 'pretrained/GDANet_ModelNet40_93.4.t7'`
* You can also directly evaluate our pretrained model without voting to get an accuracy of 93.4%:
`python main.py --eval True --model_path 'pretrained/GDANet_ModelNet40_93.4.t7'`
### Shape Part Segmentation on ShapeNet Part
* Train:
* Training from scratch:
`python main_ptseg.py`
* If you want resume training from checkpoints, specify `resume` in the args:
`python main_ptseg.py --resume True`
* Test:
You can choose to test the model with the best instance mIoU, class mIoU or accuracy, by specifying `model_type` in the args:
* `python main_ptseg.py --model_type 'ins_iou'` (best instance mIoU, default)
* `python main_ptseg.py --model_type 'cls_iou'` (best class mIoU)
* `python main_ptseg.py --model_type 'acc'` (best accuracy)
## Other information
Please contact Mutian Xu (mino1018@outlook.com) or Junhao Zhang (junhaozhang98@gmail.com) for further discussion.
## Acknowledgement
This code is is partially borrowed from [DGCNN](https://github.com/WangYueFt/dgcnn) and [PointNet++](https://github.com/charlesq34/pointnet2).
================================================
FILE: GDANet/model/GDANet_cls.py
================================================
import torch.nn as nn
import torch
import torch.nn.functional as F
from .util.GDANet_util import local_operator, GDM, SGCAM
class GDANET(nn.Module):
def __init__(self, number_class=40):
super(GDANET, self).__init__()
self.bn1 = nn.BatchNorm2d(64, momentum=0.1)
self.bn11 = nn.BatchNorm2d(64, momentum=0.1)
self.bn12 = nn.BatchNorm1d(64, momentum=0.1)
self.bn2 = nn.BatchNorm2d(64, momentum=0.1)
self.bn21 = nn.BatchNorm2d(64, momentum=0.1)
self.bn22 = nn.BatchNorm1d(64, momentum=0.1)
self.bn3 = nn.BatchNorm2d(128, momentum=0.1)
self.bn31 = nn.BatchNorm2d(128, momentum=0.1)
self.bn32 = nn.BatchNorm1d(128, momentum=0.1)
self.bn4 = nn.BatchNorm1d(512, momentum=0.1)
self.conv1 = nn.Sequential(nn.Conv2d(6, 64, kernel_size=1, bias=True),
self.bn1)
self.conv11 = nn.Sequential(nn.Conv2d(64, 64, kernel_size=1, bias=True),
self.bn11)
self.conv12 = nn.Sequential(nn.Conv1d(64 * 2, 64, kernel_size=1, bias=True),
self.bn12)
self.conv2 = nn.Sequential(nn.Conv2d(67 * 2, 64, kernel_size=1, bias=True),
self.bn2)
self.conv21 = nn.Sequential(nn.Conv2d(64, 64, kernel_size=1, bias=True),
self.bn21)
self.conv22 = nn.Sequential(nn.Conv1d(64 * 2, 64, kernel_size=1, bias=True),
self.bn22)
self.conv3 = nn.Sequential(nn.Conv2d(131 * 2, 128, kernel_size=1, bias=True),
self.bn3)
self.conv31 = nn.Sequential(nn.Conv2d(128, 128, kernel_size=1, bias=True),
self.bn31)
self.conv32 = nn.Sequential(nn.Conv1d(128, 128, kernel_size=1, bias=True),
self.bn32)
self.conv4 = nn.Sequential(nn.Conv1d(256, 512, kernel_size=1, bias=True),
self.bn4)
self.SGCAM_1s = SGCAM(64)
self.SGCAM_1g = SGCAM(64)
self.SGCAM_2s = SGCAM(64)
self.SGCAM_2g = SGCAM(64)
self.linear1 = nn.Linear(1024, 512, bias=True)
self.bn6 = nn.BatchNorm1d(512)
self.dp1 = nn.Dropout(p=0.4)
self.linear2 = nn.Linear(512, 256, bias=True)
self.bn7 = nn.BatchNorm1d(256)
self.dp2 = nn.Dropout(p=0.4)
self.linear3 = nn.Linear(256, number_class, bias=True)
def forward(self, x):
B, C, N = x.size()
###############
"""block 1"""
# Local operator:
x1 = local_operator(x, k=30)
x1 = F.relu(self.conv1(x1))
x1 = F.relu(self.conv11(x1))
x1 = x1.max(dim=-1, keepdim=False)[0]
# Geometry-Disentangle Module:
x1s, x1g = GDM(x1, M=256)
# Sharp-Gentle Complementary Attention Module:
y1s = self.SGCAM_1s(x1, x1s.transpose(2, 1))
y1g = self.SGCAM_1g(x1, x1g.transpose(2, 1))
z1 = torch.cat([y1s, y1g], 1)
z1 = F.relu(self.conv12(z1))
###############
"""block 2"""
x1t = torch.cat((x, z1), dim=1)
x2 = local_operator(x1t, k=30)
x2 = F.relu(self.conv2(x2))
x2 = F.relu(self.conv21(x2))
x2 = x2.max(dim=-1, keepdim=False)[0]
x2s, x2g = GDM(x2, M=256)
y2s = self.SGCAM_2s(x2, x2s.transpose(2, 1))
y2g = self.SGCAM_2g(x2, x2g.transpose(2, 1))
z2 = torch.cat([y2s, y2g], 1)
z2 = F.relu(self.conv22(z2))
###############
x2t = torch.cat((x1t, z2), dim=1)
x3 = local_operator(x2t, k=30)
x3 = F.relu(self.conv3(x3))
x3 = F.relu(self.conv31(x3))
x3 = x3.max(dim=-1, keepdim=False)[0]
z3 = F.relu(self.conv32(x3))
###############
x = torch.cat((z1, z2, z3), dim=1)
x = F.relu(self.conv4(x))
x11 = F.adaptive_max_pool1d(x, 1).view(B, -1)
x22 = F.adaptive_avg_pool1d(x, 1).view(B, -1)
x = torch.cat((x11, x22), 1)
x = F.relu(self.bn6(self.linear1(x)))
x = self.dp1(x)
x = F.relu(self.bn7(self.linear2(x)))
x = self.dp2(x)
x = self.linear3(x)
return x
================================================
FILE: GDANet/model/GDANet_ptseg.py
================================================
import torch.nn as nn
import torch
import torch.nn.functional as F
from util.GDANet_util import local_operator_withnorm, local_operator, GDM, SGCAM
class GDANet(nn.Module):
def __init__(self, num_classes):
super(GDANet, self).__init__()
self.bn1 = nn.BatchNorm2d(64, momentum=0.1)
self.bn11 = nn.BatchNorm2d(64, momentum=0.1)
self.bn12 = nn.BatchNorm1d(64, momentum=0.1)
self.bn2 = nn.BatchNorm2d(64, momentum=0.1)
self.bn21 = nn.BatchNorm2d(64, momentum=0.1)
self.bn22 = nn.BatchNorm1d(64, momentum=0.1)
self.bn3 = nn.BatchNorm2d(128, momentum=0.1)
self.bn31 = nn.BatchNorm2d(128, momentum=0.1)
self.bn32 = nn.BatchNorm1d(128, momentum=0.1)
self.bn4 = nn.BatchNorm1d(512, momentum=0.1)
self.bnc = nn.BatchNorm1d(64, momentum=0.1)
self.bn5 = nn.BatchNorm1d(256, momentum=0.1)
self.bn6 = nn.BatchNorm1d(256, momentum=0.1)
self.bn7 = nn.BatchNorm1d(128, momentum=0.1)
self.conv1 = nn.Sequential(nn.Conv2d(9, 64, kernel_size=1, bias=True),
self.bn1)
self.conv11 = nn.Sequential(nn.Conv2d(64, 64, kernel_size=1, bias=True),
self.bn11)
self.conv12 = nn.Sequential(nn.Conv1d(64*2, 64, kernel_size=1, bias=True),
self.bn12)
self.conv2 = nn.Sequential(nn.Conv2d(67 * 2, 64, kernel_size=1, bias=True),
self.bn2)
self.conv21 = nn.Sequential(nn.Conv2d(64, 64, kernel_size=1, bias=True),
self.bn21)
self.conv22 = nn.Sequential(nn.Conv1d(64*2, 64, kernel_size=1, bias=True),
self.bn22)
self.conv3 = nn.Sequential(nn.Conv2d(131 * 2, 128, kernel_size=1, bias=True),
self.bn3)
self.conv31 = nn.Sequential(nn.Conv2d(128, 128, kernel_size=1, bias=True),
self.bn31)
self.conv32 = nn.Sequential(nn.Conv1d(128, 128, kernel_size=1, bias=True),
self.bn32)
self.conv4 = nn.Sequential(nn.Conv1d(256, 512, kernel_size=1, bias=True),
self.bn4)
self.convc = nn.Sequential(nn.Conv1d(16, 64, kernel_size=1, bias=True),
self.bnc)
self.conv5 = nn.Sequential(nn.Conv1d(256 + 512 + 64, 256, kernel_size=1, bias=True),
self.bn5)
self.dp1 = nn.Dropout(0.4)
self.conv6 = nn.Sequential(nn.Conv1d(256, 256, kernel_size=1, bias=True),
self.bn6)
self.dp2 = nn.Dropout(0.4)
self.conv7 = nn.Sequential(nn.Conv1d(256, 128, kernel_size=1, bias=True),
self.bn7)
self.conv8 = nn.Conv1d(128, num_classes, kernel_size=1, bias=True)
self.SGCAM_1s = SGCAM(64)
self.SGCAM_1g = SGCAM(64)
self.SGCAM_2s = SGCAM(64)
self.SGCAM_2g = SGCAM(64)
def forward(self, x, norm_plt, cls_label):
B, C, N = x.size()
###############
"""block 1"""
x1 = local_operator_withnorm(x, norm_plt, k=30)
x1 = F.relu(self.conv1(x1))
x1 = F.relu(self.conv11(x1))
x1 = x1.max(dim=-1, keepdim=False)[0]
x1h, x1l = GDM(x1, M=512)
x1h = self.SGCAM_1s(x1, x1h.transpose(2, 1))
x1l = self.SGCAM_1g(x1, x1l.transpose(2, 1))
x1 = torch.cat([x1h, x1l], 1)
x1 = F.relu(self.conv12(x1))
###############
"""block 1"""
x1t = torch.cat((x, x1), dim=1)
x2 = local_operator(x1t, k=30)
x2 = F.relu(self.conv2(x2))
x2 = F.relu(self.conv21(x2))
x2 = x2.max(dim=-1, keepdim=False)[0]
x2h, x2l = GDM(x2, M=512)
x2h = self.SGCAM_2s(x2, x2h.transpose(2, 1))
x2l = self.SGCAM_2g(x2, x2l.transpose(2, 1))
x2 = torch.cat([x2h, x2l], 1)
x2 = F.relu(self.conv22(x2))
###############
x2t = torch.cat((x1t, x2), dim=1)
x3 = local_operator(x2t, k=30)
x3 = F.relu(self.conv3(x3))
x3 = F.relu(self.conv31(x3))
x3 = x3.max(dim=-1, keepdim=False)[0]
x3 = F.relu(self.conv32(x3))
###############
xx = torch.cat((x1, x2, x3), dim=1)
xc = F.relu(self.conv4(xx))
xc = F.adaptive_max_pool1d(xc, 1).view(B, -1)
cls_label = cls_label.view(B, 16, 1)
cls_label = F.relu(self.convc(cls_label))
cls = torch.cat((xc.view(B, 512, 1), cls_label), dim=1)
cls = cls.repeat(1, 1, N)
x = torch.cat((xx, cls), dim=1)
x = F.relu(self.conv5(x))
x = self.dp1(x)
x = F.relu(self.conv6(x))
x = self.dp2(x)
x = F.relu(self.conv7(x))
x = self.conv8(x)
x = F.log_softmax(x, dim=1)
x = x.permute(0, 2, 1) # b,n,50
return x
================================================
FILE: GDANet/model/__init__.py
================================================
================================================
FILE: GDANet/model/util/GDANet_util.py
================================================
import torch
from torch import nn
def knn(x, k):
inner = -2*torch.matmul(x.transpose(2, 1), x)
xx = torch.sum(x**2, dim=1, keepdim=True)
pairwise_distance = -xx - inner - xx.transpose(2, 1)
idx = pairwise_distance.topk(k=k, dim=-1)[1] # (batch_size, num_points, k)
return idx, pairwise_distance
def local_operator(x, k):
batch_size = x.size(0)
num_points = x.size(2)
x = x.view(batch_size, -1, num_points)
idx, _ = knn(x, k=k)
device = torch.device('cuda')
idx_base = torch.arange(0, batch_size, device=device).view(-1, 1, 1) * num_points
idx = idx + idx_base
idx = idx.view(-1)
_, num_dims, _ = x.size()
x = x.transpose(2, 1).contiguous()
neighbor = x.view(batch_size * num_points, -1).contiguous()[idx, :]
neighbor = neighbor.view(batch_size, num_points, k, num_dims).contiguous()
x = x.view(batch_size, num_points, 1, num_dims).contiguous().repeat(1, 1, k, 1)
feature = torch.cat((neighbor-x, neighbor), dim=3).permute(0, 3, 1, 2).contiguous() # local and global all in
return feature
def local_operator_withnorm(x, norm_plt, k):
batch_size = x.size(0)
num_points = x.size(2)
x = x.view(batch_size, -1, num_points)
norm_plt = norm_plt.view(batch_size, -1, num_points)
idx, _ = knn(x, k=k) # (batch_size, num_points, k)
device = torch.device('cuda')
idx_base = torch.arange(0, batch_size, device=device).view(-1, 1, 1) * num_points
idx = idx + idx_base
idx = idx.view(-1)
_, num_dims, _ = x.size()
x = x.transpose(2, 1).contiguous()
norm_plt = norm_plt.transpose(2, 1).contiguous()
neighbor = x.view(batch_size * num_points, -1)[idx, :]
neighbor_norm = norm_plt.view(batch_size * num_points, -1)[idx, :]
neighbor = neighbor.view(batch_size, num_points, k, num_dims)
neighbor_norm = neighbor_norm.view(batch_size, num_points, k, num_dims)
x = x.view(batch_size, num_points, 1, num_dims).repeat(1, 1, k, 1)
feature = torch.cat((neighbor-x, neighbor, neighbor_norm), dim=3).permute(0, 3, 1, 2) # 3c
return feature
def GDM(x, M):
"""
Geometry-Disentangle Module
M: number of disentangled points in both sharp and gentle variation components
"""
k = 64 # number of neighbors to decide the range of j in Eq.(5)
tau = 0.2 # threshold in Eq.(2)
sigma = 2 # parameters of f (Gaussian function in Eq.(2))
###############
"""Graph Construction:"""
device = torch.device('cuda')
batch_size = x.size(0)
num_points = x.size(2)
x = x.view(batch_size, -1, num_points)
idx, p = knn(x, k=k) # p: -[(x1-x2)^2+...]
# here we add a tau
p1 = torch.abs(p)
p1 = torch.sqrt(p1)
mask = p1 < tau
# here we add a sigma
p = p / (sigma * sigma)
w = torch.exp(p) # b,n,n
w = torch.mul(mask.float(), w)
b = 1/torch.sum(w, dim=1)
b = b.reshape(batch_size, num_points, 1).repeat(1, 1, num_points)
c = torch.eye(num_points, num_points, device=device)
c = c.expand(batch_size, num_points, num_points)
D = b * c # b,n,n
A = torch.matmul(D, w) # normalized adjacency matrix A_hat
# Get Aij in a local area:
idx2 = idx.view(batch_size * num_points, -1)
idx_base2 = torch.arange(0, batch_size * num_points, device=device).view(-1, 1) * num_points
idx2 = idx2 + idx_base2
idx2 = idx2.reshape(batch_size * num_points, k)[:, 1:k]
idx2 = idx2.reshape(batch_size * num_points * (k - 1))
idx2 = idx2.view(-1)
A = A.view(-1).contiguous()
A = A[idx2].reshape(batch_size, num_points, k - 1).contiguous() # Aij: b,n,k
###############
"""Disentangling Point Clouds into Sharp(xs) and Gentle(xg) Variation Components:"""
idx_base = torch.arange(0, batch_size, device=device).view(-1, 1, 1) * num_points
idx = idx + idx_base
idx = idx.reshape(batch_size * num_points, k)[:, 1:k]
idx = idx.reshape(batch_size * num_points * (k - 1))
_, num_dims, _ = x.size()
x = x.transpose(2, 1).contiguous() # b,n,c
neighbor = x.view(batch_size * num_points, -1).contiguous()[idx, :]
neighbor = neighbor.view(batch_size, num_points, k - 1, num_dims).contiguous() # b,n,k,c
A = A.reshape(batch_size, num_points, k - 1, 1).contiguous() # b,n,k,1
n = A.mul(neighbor) # b,n,k,c
n = torch.sum(n, dim=2) # b,n,c
pai = torch.norm(x - n, dim=-1).pow(2) # Eq.(5)
pais = pai.topk(k=M, dim=-1)[1] # first M points as the sharp variation component
paig = (-pai).topk(k=M, dim=-1)[1] # last M points as the gentle variation component
pai_base = torch.arange(0, batch_size, device=device).view(-1, 1) * num_points
indices = (pais + pai_base).view(-1)
indiceg = (paig + pai_base).view(-1)
xs = x.view(batch_size * num_points, -1).contiguous()[indices, :]
xg = x.view(batch_size * num_points, -1).contiguous()[indiceg, :]
xs = xs.view(batch_size, M, -1).contiguous() # b,M,c
xg = xg.view(batch_size, M, -1).contiguous() # b,M,c
return xs, xg
class SGCAM(nn.Module):
"""Sharp-Gentle Complementary Attention Module:"""
def __init__(self, in_channels, inter_channels=None, bn_layer=True):
super(SGCAM, self).__init__()
self.in_channels = in_channels
self.inter_channels = inter_channels
if self.inter_channels is None:
self.inter_channels = in_channels // 2
if self.inter_channels == 0:
self.inter_channels = 1
conv_nd = nn.Conv1d
bn = nn.BatchNorm1d
self.g = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
if bn_layer:
self.W = nn.Sequential(
conv_nd(in_channels=self.inter_channels, out_channels=self.in_channels,
kernel_size=1, stride=1, padding=0),
bn(self.in_channels)
)
nn.init.constant(self.W[1].weight, 0)
nn.init.constant(self.W[1].bias, 0)
else:
self.W = conv_nd(in_channels=self.inter_channels, out_channels=self.in_channels,
kernel_size=1, stride=1, padding=0)
nn.init.constant(self.W.weight, 0)
nn.init.constant(self.W.bias, 0)
self.theta = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
self.phi = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
def forward(self, x, x_2):
batch_size = x.size(0)
g_x = self.g(x_2).view(batch_size, self.inter_channels, -1).contiguous()
g_x = g_x.permute(0, 2, 1).contiguous()
theta_x = self.theta(x).view(batch_size, self.inter_channels, -1).contiguous()
theta_x = theta_x.permute(0, 2, 1).contiguous()
phi_x = self.phi(x_2).view(batch_size, self.inter_channels, -1).contiguous()
W = torch.matmul(theta_x, phi_x) # Attention Matrix
N = W.size(-1)
W_div_C = W / N
y = torch.matmul(W_div_C, g_x)
y = y.permute(0, 2, 1).contiguous()
y = y.view(batch_size, self.inter_channels, *x.size()[2:]).contiguous()
W_y = self.W(y)
y = W_y + x
return y
================================================
FILE: GDANet/model/util/__init__.py
================================================
================================================
FILE: GDANet/model/util/data_util.py
================================================
import glob
import h5py
import numpy as np
from torch.utils.data import Dataset
import os
import json
def load_data(partition):
all_data = []
all_label = []
for h5_name in glob.glob('./data/modelnet40_ply_hdf5_2048/ply_data_%s*.h5' % partition):
f = h5py.File(h5_name)
data = f['data'][:].astype('float32')
label = f['label'][:].astype('int64')
f.close()
all_data.append(data)
all_label.append(label)
all_data = np.concatenate(all_data, axis=0)
all_label = np.concatenate(all_label, axis=0)
return all_data, all_label
def pc_normalize(pc):
centroid = np.mean(pc, axis=0)
pc = pc - centroid
m = np.max(np.sqrt(np.sum(pc ** 2, axis=1)))
pc = pc / m
return pc
def translate_pointcloud(pointcloud):
xyz1 = np.random.uniform(low=2./3., high=3./2., size=[3])
xyz2 = np.random.uniform(low=-0.2, high=0.2, size=[3])
translated_pointcloud = np.add(np.multiply(pointcloud, xyz1), xyz2).astype('float32')
return translated_pointcloud
def jitter_pointcloud(pointcloud, sigma=0.01, clip=0.02):
N, C = pointcloud.shape
pointcloud += np.clip(sigma * np.random.randn(N, C), -1*clip, clip)
return pointcloud
# =========== ModelNet40 =================
class ModelNet40(Dataset):
def __init__(self, num_points, partition='train'):
self.data, self.label = load_data(partition)
self.num_points = num_points
self.partition = partition # Here the new given partition will cover the 'train'
def __getitem__(self, item): # indice of the pts or label
pointcloud = self.data[item][:self.num_points]
label = self.label[item]
if self.partition == 'train':
# pointcloud = pc_normalize(pointcloud) # you can try to add it or not to train our model
pointcloud = translate_pointcloud(pointcloud)
np.random.shuffle(pointcloud) # shuffle the order of pts
return pointcloud, label
def __len__(self):
return self.data.shape[0]
# =========== ShapeNet Part =================
class PartNormalDataset(Dataset):
def __init__(self, npoints=2500, split='train', normalize=False):
self.npoints = npoints
self.root = './data/shapenetcore_partanno_segmentation_benchmark_v0_normal'
self.catfile = os.path.join(self.root, 'synsetoffset2category.txt')
self.cat = {}
self.normalize = normalize
with open(self.catfile, 'r') as f:
for line in f:
ls = line.strip().split()
self.cat[ls[0]] = ls[1]
self.cat = {k: v for k, v in self.cat.items()}
self.meta = {}
with open(os.path.join(self.root, 'train_test_split', 'shuffled_train_file_list.json'), 'r') as f:
train_ids = set([str(d.split('/')[2]) for d in json.load(f)])
with open(os.path.join(self.root, 'train_test_split', 'shuffled_val_file_list.json'), 'r') as f:
val_ids = set([str(d.split('/')[2]) for d in json.load(f)])
with open(os.path.join(self.root, 'train_test_split', 'shuffled_test_file_list.json'), 'r') as f:
test_ids = set([str(d.split('/')[2]) for d in json.load(f)])
for item in self.cat:
self.meta[item] = []
dir_point = os.path.join(self.root, self.cat[item])
fns = sorted(os.listdir(dir_point))
if split == 'trainval':
fns = [fn for fn in fns if ((fn[0:-4] in train_ids) or (fn[0:-4] in val_ids))]
elif split == 'train':
fns = [fn for fn in fns if fn[0:-4] in train_ids]
elif split == 'val':
fns = [fn for fn in fns if fn[0:-4] in val_ids]
elif split == 'test':
fns = [fn for fn in fns if fn[0:-4] in test_ids]
else:
print('Unknown split: %s. Exiting..' % (split))
exit(-1)
for fn in fns:
token = (os.path.splitext(os.path.basename(fn))[0])
self.meta[item].append(os.path.join(dir_point, token + '.txt'))
self.datapath = []
for item in self.cat:
for fn in self.meta[item]:
self.datapath.append((item, fn))
self.classes = dict(zip(self.cat, range(len(self.cat))))
# Mapping from category ('Chair') to a list of int [10,11,12,13] as segmentation labels
self.seg_classes = {'Earphone': [16, 17, 18], 'Motorbike': [30, 31, 32, 33, 34, 35], 'Rocket': [41, 42, 43],
'Car': [8, 9, 10, 11], 'Laptop': [28, 29], 'Cap': [6, 7], 'Skateboard': [44, 45, 46],
'Mug': [36, 37], 'Guitar': [19, 20, 21], 'Bag': [4, 5], 'Lamp': [24, 25, 26, 27],
'Table': [47, 48, 49], 'Airplane': [0, 1, 2, 3], 'Pistol': [38, 39, 40],
'Chair': [12, 13, 14, 15], 'Knife': [22, 23]}
self.cache = {} # from index to (point_set, cls, seg) tuple
self.cache_size = 20000
def __getitem__(self, index):
if index in self.cache:
point_set, normal, seg, cls = self.cache[index]
else:
fn = self.datapath[index]
cat = self.datapath[index][0]
cls = self.classes[cat]
cls = np.array([cls]).astype(np.int32)
data = np.loadtxt(fn[1]).astype(np.float32)
point_set = data[:, 0:3]
normal = data[:, 3:6]
seg = data[:, -1].astype(np.int32)
if len(self.cache) < self.cache_size:
self.cache[index] = (point_set, normal, seg, cls)
if self.normalize:
point_set = pc_normalize(point_set)
choice = np.random.choice(len(seg), self.npoints, replace=True)
# resample
# note that the number of points in some points clouds is less than 2048, thus use random.choice
# remember to use the same seed during train and test for a getting stable result
point_set = point_set[choice, :]
seg = seg[choice]
normal = normal[choice, :]
return point_set, cls, seg, normal
def __len__(self):
return len(self.datapath)
if __name__ == '__main__':
train = ModelNet40(1024)
test = ModelNet40(1024, 'test')
for data, label in train:
print(data.shape)
print(label.shape)
================================================
FILE: GDANet/model/util/util.py
================================================
import numpy as np
import torch
import torch.nn.functional as F
def cal_loss(pred, gold, smoothing=True):
''' Calculate cross entropy loss, apply label smoothing if needed. '''
gold = gold.contiguous().view(-1) # gold is the groudtruth label in the dataloader
if smoothing:
eps = 0.2
n_class = pred.size(1) # the number of feature_dim of the ouput, which is output channels
one_hot = torch.zeros_like(pred).scatter(1, gold.view(-1, 1), 1)
one_hot = one_hot * (1 - eps) + (1 - one_hot) * eps / (n_class - 1)
log_prb = F.log_softmax(pred, dim=1)
loss = -(one_hot * log_prb).sum(dim=1).mean()
else:
loss = F.cross_entropy(pred, gold, reduction='mean')
return loss
# create a file and write the text into it:
class IOStream():
def __init__(self, path):
self.f = open(path, 'a')
def cprint(self, text):
print(text)
self.f.write(text+'\n')
self.f.flush()
def close(self):
self.f.close()
def to_categorical(y, num_classes):
""" 1-hot encodes a tensor """
new_y = torch.eye(num_classes)[y.cpu().data.numpy(),]
if (y.is_cuda):
return new_y.cuda(non_blocking=True)
return new_y
def compute_overall_iou(pred, target, num_classes):
shape_ious = []
pred = pred.max(dim=2)[1] # (batch_size, num_points) the pred_class_idx of each point in each sample
pred_np = pred.cpu().data.numpy()
target_np = target.cpu().data.numpy()
for shape_idx in range(pred.size(0)): # sample_idx
part_ious = []
for part in range(num_classes): # class_idx! no matter which category, only consider all part_classes of all categories, check all 50 classes
# for target, each point has a class no matter which category owns this point! also 50 classes!!!
# only return 1 when both belongs to this class, which means correct:
I = np.sum(np.logical_and(pred_np[shape_idx] == part, target_np[shape_idx] == part))
# always return 1 when either is belongs to this class:
U = np.sum(np.logical_or(pred_np[shape_idx] == part, target_np[shape_idx] == part))
F = np.sum(target_np[shape_idx] == part)
if F != 0:
iou = I / float(U) # iou across all points for this class
part_ious.append(iou) # append the iou of this class
shape_ious.append(np.mean(part_ious)) # each time append an average iou across all classes of this sample (sample_level!)
return shape_ious # [batch_size]
================================================
FILE: LICENSE
================================================
BSD 3-Clause License
Copyright (c) 2021, University of Michigan
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
================================================
FILE: PCT_Pytorch/LICENSE
================================================
MIT License
Copyright (c) 2021 Strawberry-Eat-Mango
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
================================================
FILE: PCT_Pytorch/README.md
================================================
## PCT: Point Cloud Transformer
This is a Pytorch implementation of PCT: Point Cloud Transformer.
Paper link: https://arxiv.org/pdf/2012.09688.pdf
### Requirements
python >= 3.7
pytorch >= 1.6
h5py
scikit-learn
and
```shell script
pip install pointnet2_ops_lib/.
```
The code is from https://github.com/erikwijmans/Pointnet2_PyTorch https://github.com/WangYueFt/dgcnn and https://github.com/MenghaoGuo/PCT
### Models
We get an accuracy of 93.2% on the ModelNet40(http://modelnet.cs.princeton.edu/) validation dataset
The path of the model is in ./checkpoints/best/models/model.t7
### Example training and testing
```shell script
# train
python main.py --exp_name=train --num_points=1024 --use_sgd=True --batch_size 32 --epochs 250 --lr 0.0001
# test
python main.py --exp_name=test --num_points=1024 --use_sgd=True --eval=True --model_path=checkpoints/best/models/model.t7 --test_batch_size 8
```
### Citation
If it is helpful for your work, please cite this paper:
```latex
@misc{guo2020pct,
title={PCT: Point Cloud Transformer},
author={Meng-Hao Guo and Jun-Xiong Cai and Zheng-Ning Liu and Tai-Jiang Mu and Ralph R. Martin and Shi-Min Hu},
year={2020},
eprint={2012.09688},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
```
================================================
FILE: PCT_Pytorch/checkpoints/best/models/model.t7
================================================
[File too large to display: 11.0 MB]
================================================
FILE: PCT_Pytorch/data.py
================================================
import os
import glob
import h5py
import numpy as np
from torch.utils.data import Dataset
def download():
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(BASE_DIR, 'data')
if not os.path.exists(DATA_DIR):
os.mkdir(DATA_DIR)
if not os.path.exists(os.path.join(DATA_DIR, 'modelnet40_ply_hdf5_2048')):
www = 'https://shapenet.cs.stanford.edu/media/modelnet40_ply_hdf5_2048.zip'
zipfile = os.path.basename(www)
os.system('wget %s; unzip %s' % (www, zipfile))
os.system('mv %s %s' % (zipfile[:-4], DATA_DIR))
os.system('rm %s' % (zipfile))
def load_data(partition):
download()
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(BASE_DIR, 'data')
all_data = []
all_label = []
for h5_name in glob.glob(os.path.join(DATA_DIR, 'modelnet40_ply_hdf5_2048', 'ply_data_%s*.h5'%partition)):
f = h5py.File(h5_name)
data = f['data'][:].astype('float32')
label = f['label'][:].astype('int64')
f.close()
all_data.append(data)
all_label.append(label)
all_data = np.concatenate(all_data, axis=0)
all_label = np.concatenate(all_label, axis=0)
return all_data, all_label
def random_point_dropout(pc, max_dropout_ratio=0.875):
''' batch_pc: BxNx3 '''
# for b in range(batch_pc.shape[0]):
dropout_ratio = np.random.random()*max_dropout_ratio # 0~0.875
drop_idx = np.where(np.random.random((pc.shape[0]))<=dropout_ratio)[0]
# print ('use random drop', len(drop_idx))
if len(drop_idx)>0:
pc[drop_idx,:] = pc[0,:] # set to the first point
return pc
def translate_pointcloud(pointcloud):
xyz1 = np.random.uniform(low=2./3., high=3./2., size=[3])
xyz2 = np.random.uniform(low=-0.2, high=0.2, size=[3])
translated_pointcloud = np.add(np.multiply(pointcloud, xyz1), xyz2).astype('float32')
return translated_pointcloud
def jitter_pointcloud(pointcloud, sigma=0.01, clip=0.02):
N, C = pointcloud.shape
pointcloud += np.clip(sigma * np.random.randn(N, C), -1*clip, clip)
return pointcloud
class ModelNet40(Dataset):
def __init__(self, num_points, partition='train'):
self.data, self.label = load_data(partition)
self.num_points = num_points
self.partition = partition
def __getitem__(self, item):
pointcloud = self.data[item][:self.num_points]
label = self.label[item]
if self.partition == 'train':
pointcloud = random_point_dropout(pointcloud) # open for dgcnn not for our idea for all
pointcloud = translate_pointcloud(pointcloud)
np.random.shuffle(pointcloud)
return pointcloud, label
def __len__(self):
return self.data.shape[0]
if __name__ == '__main__':
train = ModelNet40(1024)
test = ModelNet40(1024, 'test')
for data, label in train:
print(data.shape)
print(label.shape)
================================================
FILE: PCT_Pytorch/main.py
================================================
from __future__ import print_function
import os
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim.lr_scheduler import CosineAnnealingLR
from data import ModelNet40
from .model import Pct
import numpy as np
from torch.utils.data import DataLoader
from .util import cal_loss, IOStream
import sklearn.metrics as metrics
import time
def _init_():
if not os.path.exists('checkpoints'):
os.makedirs('checkpoints')
if not os.path.exists('checkpoints/'+args.exp_name):
os.makedirs('checkpoints/'+args.exp_name)
if not os.path.exists('checkpoints/'+args.exp_name+'/'+'models'):
os.makedirs('checkpoints/'+args.exp_name+'/'+'models')
os.system('cp main.py checkpoints'+'/'+args.exp_name+'/'+'main.py.backup')
os.system('cp model.py checkpoints' + '/' + args.exp_name + '/' + 'model.py.backup')
os.system('cp util.py checkpoints' + '/' + args.exp_name + '/' + 'util.py.backup')
os.system('cp data.py checkpoints' + '/' + args.exp_name + '/' + 'data.py.backup')
def train(args, io):
train_loader = DataLoader(ModelNet40(partition='train', num_points=args.num_points), num_workers=8,
batch_size=args.batch_size, shuffle=True, drop_last=True)
test_loader = DataLoader(ModelNet40(partition='test', num_points=args.num_points), num_workers=8,
batch_size=args.test_batch_size, shuffle=True, drop_last=False)
device = torch.device("cuda" if args.cuda else "cpu")
model = Pct(args).to(device)
print(str(model))
model = nn.DataParallel(model)
if args.use_sgd:
print("Use SGD")
opt = optim.SGD(model.parameters(), lr=args.lr*100, momentum=args.momentum, weight_decay=5e-4)
else:
print("Use Adam")
opt = optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-4)
scheduler = CosineAnnealingLR(opt, args.epochs, eta_min=args.lr)
criterion = cal_loss
best_test_acc = 0
for epoch in range(args.epochs):
scheduler.step()
train_loss = 0.0
count = 0.0
model.train()
train_pred = []
train_true = []
idx = 0
total_time = 0.0
for data, label in (train_loader):
data, label = data.to(device), label.to(device).squeeze()
data = data.permute(0, 2, 1)
batch_size = data.size()[0]
opt.zero_grad()
start_time = time.time()
logits = model(data)
loss = criterion(logits, label)
loss.backward()
opt.step()
end_time = time.time()
total_time += (end_time - start_time)
preds = logits.max(dim=1)[1]
count += batch_size
train_loss += loss.item() * batch_size
train_true.append(label.cpu().numpy())
train_pred.append(preds.detach().cpu().numpy())
idx += 1
print ('train total time is',total_time)
train_true = np.concatenate(train_true)
train_pred = np.concatenate(train_pred)
outstr = 'Train %d, loss: %.6f, train acc: %.6f, train avg acc: %.6f' % (epoch,
train_loss*1.0/count,
metrics.accuracy_score(
train_true, train_pred),
metrics.balanced_accuracy_score(
train_true, train_pred))
io.cprint(outstr)
####################
# Test
####################
test_loss = 0.0
count = 0.0
model.eval()
test_pred = []
test_true = []
total_time = 0.0
for data, label in test_loader:
data, label = data.to(device), label.to(device).squeeze()
data = data.permute(0, 2, 1)
batch_size = data.size()[0]
start_time = time.time()
logits = model(data)
end_time = time.time()
total_time += (end_time - start_time)
loss = criterion(logits, label)
preds = logits.max(dim=1)[1]
count += batch_size
test_loss += loss.item() * batch_size
test_true.append(label.cpu().numpy())
test_pred.append(preds.detach().cpu().numpy())
print ('test total time is', total_time)
test_true = np.concatenate(test_true)
test_pred = np.concatenate(test_pred)
test_acc = metrics.accuracy_score(test_true, test_pred)
avg_per_class_acc = metrics.balanced_accuracy_score(test_true, test_pred)
outstr = 'Test %d, loss: %.6f, test acc: %.6f, test avg acc: %.6f' % (epoch,
test_loss*1.0/count,
test_acc,
avg_per_class_acc)
io.cprint(outstr)
if test_acc >= best_test_acc:
best_test_acc = test_acc
torch.save(model.state_dict(), 'checkpoints/%s/models/model.t7' % args.exp_name)
def test(args, io):
test_loader = DataLoader(ModelNet40(partition='test', num_points=args.num_points),
batch_size=args.test_batch_size, shuffle=True, drop_last=False)
device = torch.device("cuda" if args.cuda else "cpu")
model = Pct(args).to(device)
model = nn.DataParallel(model)
model.load_state_dict(torch.load(args.model_path))
model = model.eval()
test_true = []
test_pred = []
for data, label in test_loader:
data, label = data.to(device), label.to(device).squeeze()
data = data.permute(0, 2, 1)
logits = model(data)
preds = logits.max(dim=1)[1]
if args.test_batch_size == 1:
test_true.append([label.cpu().numpy()])
test_pred.append([preds.detach().cpu().numpy()])
else:
test_true.append(label.cpu().numpy())
test_pred.append(preds.detach().cpu().numpy())
test_true = np.concatenate(test_true)
test_pred = np.concatenate(test_pred)
test_acc = metrics.accuracy_score(test_true, test_pred)
avg_per_class_acc = metrics.balanced_accuracy_score(test_true, test_pred)
outstr = 'Test :: test acc: %.6f, test avg acc: %.6f'%(test_acc, avg_per_class_acc)
io.cprint(outstr)
if __name__ == "__main__":
# Training settings
parser = argparse.ArgumentParser(description='Point Cloud Recognition')
parser.add_argument('--exp_name', type=str, default='exp', metavar='N',
help='Name of the experiment')
parser.add_argument('--dataset', type=str, default='modelnet40', metavar='N',
choices=['modelnet40'])
parser.add_argument('--batch_size', type=int, default=32, metavar='batch_size',
help='Size of batch)')
parser.add_argument('--test_batch_size', type=int, default=16, metavar='batch_size',
help='Size of batch)')
parser.add_argument('--epochs', type=int, default=250, metavar='N',
help='number of episode to train ')
parser.add_argument('--use_sgd', type=bool, default=True,
help='Use SGD')
parser.add_argument('--lr', type=float, default=0.0001, metavar='LR',
help='learning rate (default: 0.001, 0.1 if using sgd)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--no_cuda', type=bool, default=False,
help='enables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--eval', type=bool, default=False,
help='evaluate the model')
parser.add_argument('--num_points', type=int, default=1024,
help='num of points to use')
parser.add_argument('--dropout', type=float, default=0.5,
help='dropout rate')
parser.add_argument('--model_path', type=str, default='', metavar='N',
help='Pretrained model path')
args = parser.parse_args()
_init_()
io = IOStream('checkpoints/' + args.exp_name + '/run.log')
io.cprint(str(args))
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
io.cprint(
'Using GPU : ' + str(torch.cuda.current_device()) + ' from ' + str(torch.cuda.device_count()) + ' devices')
torch.cuda.manual_seed(args.seed)
else:
io.cprint('Using CPU')
if not args.eval:
train(args, io)
else:
test(args, io)
================================================
FILE: PCT_Pytorch/model.py
================================================
import torch
import torch.nn as nn
import torch.nn.functional as F
from .util import sample_and_group
class Local_op(nn.Module):
def __init__(self, in_channels, out_channels):
super(Local_op, self).__init__()
self.conv1 = nn.Conv1d(in_channels, out_channels, kernel_size=1, bias=False)
self.conv2 = nn.Conv1d(out_channels, out_channels, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm1d(out_channels)
self.bn2 = nn.BatchNorm1d(out_channels)
def forward(self, x):
b, n, s, d = x.size() # torch.Size([32, 512, 32, 6])
x = x.permute(0, 1, 3, 2)
x = x.reshape(-1, d, s)
batch_size, _, N = x.size()
x = F.relu(self.bn1(self.conv1(x))) # B, D, N
x = F.relu(self.bn2(self.conv2(x))) # B, D, N
x = F.adaptive_max_pool1d(x, 1).view(batch_size, -1)
x = x.reshape(b, n, -1).permute(0, 2, 1)
return x
class Pct(nn.Module):
def __init__(self, args, output_channels=40):
super(Pct, self).__init__()
self.args = args
self.conv1 = nn.Conv1d(3, 64, kernel_size=1, bias=False)
self.conv2 = nn.Conv1d(64, 64, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(64)
self.gather_local_0 = Local_op(in_channels=128, out_channels=128)
self.gather_local_1 = Local_op(in_channels=256, out_channels=256)
self.pt_last = Point_Transformer_Last(args)
self.conv_fuse = nn.Sequential(nn.Conv1d(1280, 1024, kernel_size=1, bias=False),
nn.BatchNorm1d(1024),
nn.LeakyReLU(negative_slope=0.2))
self.linear1 = nn.Linear(1024, 512, bias=False)
self.bn6 = nn.BatchNorm1d(512)
self.dp1 = nn.Dropout(p=args.dropout)
self.linear2 = nn.Linear(512, 256)
self.bn7 = nn.BatchNorm1d(256)
self.dp2 = nn.Dropout(p=args.dropout)
self.linear3 = nn.Linear(256, output_channels)
def forward(self, x):
xyz = x.permute(0, 2, 1)
batch_size, _, _ = x.size()
# B, D, N
x = F.relu(self.bn1(self.conv1(x)))
# B, D, N
x = F.relu(self.bn2(self.conv2(x)))
x = x.permute(0, 2, 1)
new_xyz, new_feature = sample_and_group(npoint=512, radius=0.15, nsample=32, xyz=xyz, points=x)
feature_0 = self.gather_local_0(new_feature)
feature = feature_0.permute(0, 2, 1)
new_xyz, new_feature = sample_and_group(npoint=256, radius=0.2, nsample=32, xyz=new_xyz, points=feature)
feature_1 = self.gather_local_1(new_feature)
x = self.pt_last(feature_1)
x = torch.cat([x, feature_1], dim=1)
x = self.conv_fuse(x)
x = F.adaptive_max_pool1d(x, 1).view(batch_size, -1)
x = F.leaky_relu(self.bn6(self.linear1(x)), negative_slope=0.2)
x = self.dp1(x)
x = F.leaky_relu(self.bn7(self.linear2(x)), negative_slope=0.2)
x = self.dp2(x)
x = self.linear3(x)
return x
class Point_Transformer_Last(nn.Module):
def __init__(self, args, channels=256):
super(Point_Transformer_Last, self).__init__()
self.args = args
self.conv1 = nn.Conv1d(channels, channels, kernel_size=1, bias=False)
self.conv2 = nn.Conv1d(channels, channels, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm1d(channels)
self.bn2 = nn.BatchNorm1d(channels)
self.sa1 = SA_Layer(channels)
self.sa2 = SA_Layer(channels)
self.sa3 = SA_Layer(channels)
self.sa4 = SA_Layer(channels)
def forward(self, x):
#
# b, 3, npoint, nsample
# conv2d 3 -> 128 channels 1, 1
# b * npoint, c, nsample
# permute reshape
batch_size, _, N = x.size()
# B, D, N
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x1 = self.sa1(x)
x2 = self.sa2(x1)
x3 = self.sa3(x2)
x4 = self.sa4(x3)
x = torch.cat((x1, x2, x3, x4), dim=1)
return x
class SA_Layer(nn.Module):
def __init__(self, channels):
super(SA_Layer, self).__init__()
self.q_conv = nn.Conv1d(channels, channels // 4, 1, bias=False)
self.k_conv = nn.Conv1d(channels, channels // 4, 1, bias=False)
self.q_conv.weight = self.k_conv.weight
self.q_conv.bias = self.k_conv.bias
self.v_conv = nn.Conv1d(channels, channels, 1)
self.trans_conv = nn.Conv1d(channels, channels, 1)
self.after_norm = nn.BatchNorm1d(channels)
self.act = nn.ReLU()
self.softmax = nn.Softmax(dim=-1)
def forward(self, x):
# b, n, c
x_q = self.q_conv(x).permute(0, 2, 1)
# b, c, n
x_k = self.k_conv(x)
x_v = self.v_conv(x)
# b, n, n
energy = torch.bmm(x_q, x_k)
attention = self.softmax(energy)
attention = attention / (1e-9 + attention.sum(dim=1, keepdim=True))
# b, c, n
x_r = torch.bmm(x_v, attention)
x_r = self.act(self.after_norm(self.trans_conv(x - x_r)))
x = x + x_r
return x
================================================
FILE: PCT_Pytorch/model_new.py
================================================
import torch
import torch.nn as nn
import torch.nn.functional as F
from util import sample_and_group
class Local_op(nn.Module):
def __init__(self, in_channels, out_channels):
super(Local_op, self).__init__()
self.conv1 = nn.Conv1d(in_channels, out_channels, kernel_size=1, bias=False)
self.conv2 = nn.Conv1d(out_channels, out_channels, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm1d(out_channels)
self.bn2 = nn.BatchNorm1d(out_channels)
def forward(self, x):
b, n, s, d = x.size() # torch.Size([32, 512, 32, 6])
x = x.permute(0, 1, 3, 2)
x = x.reshape(-1, d, s)
batch_size, _, N = x.size()
x = F.relu(self.bn1(self.conv1(x))) # B, D, N
x = F.relu(self.bn2(self.conv2(x))) # B, D, N
x = F.adaptive_max_pool1d(x, 1).view(batch_size, -1)
x = x.reshape(b, n, -1).permute(0, 2, 1)
return x
class Pct(nn.Module):
def __init__(self, args, output_channels=40):
super(Pct, self).__init__()
self.args = args
self.conv1 = nn.Conv1d(3, 64, kernel_size=1, bias=False)
self.conv2 = nn.Conv1d(64, 64, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(64)
self.gather_local_0 = Local_op(in_channels=128, out_channels=128)
self.gather_local_1 = Local_op(in_channels=256, out_channels=256)
self.pt_last = Point_Transformer_Last(args)
self.conv_fuse = nn.Sequential(nn.Conv1d(1280, 1024, kernel_size=1, bias=False),
nn.BatchNorm1d(1024),
nn.LeakyReLU(negative_slope=0.2))
self.linear1 = nn.Linear(1024, 512, bias=False)
self.bn6 = nn.BatchNorm1d(512)
self.dp1 = nn.Dropout(p=args.dropout)
self.linear2 = nn.Linear(512, 256)
self.bn7 = nn.BatchNorm1d(256)
self.dp2 = nn.Dropout(p=args.dropout)
self.linear3 = nn.Linear(256, output_channels)
def forward(self, x):
xyz = x.permute(0, 2, 1)
batch_size, _, _ = x.size()
# B, D, N
x = F.relu(self.bn1(self.conv1(x)))
# B, D, N
x = F.relu(self.bn2(self.conv2(x)))
x = x.permute(0, 2, 1)
new_xyz, new_feature = sample_and_group(npoint=512, radius=0.15, nsample=32, xyz=xyz, points=x)
feature_0 = self.gather_local_0(new_feature)
feature = feature_0.permute(0, 2, 1)
new_xyz, new_feature = sample_and_group(npoint=256, radius=0.2, nsample=32, xyz=new_xyz, points=feature)
feature_1 = self.gather_local_1(new_feature)
x = self.pt_last(feature_1, new_xyz)
x = torch.cat([x, feature_1], dim=1)
x = self.conv_fuse(x)
x = F.adaptive_max_pool1d(x, 1).view(batch_size, -1)
x = F.leaky_relu(self.bn6(self.linear1(x)), negative_slope=0.2)
x = self.dp1(x)
x = F.leaky_relu(self.bn7(self.linear2(x)), negative_slope=0.2)
x = self.dp2(x)
x = self.linear3(x)
return x
class Point_Transformer_Last(nn.Module):
def __init__(self, args, channels=256):
super(Point_Transformer_Last, self).__init__()
self.args = args
self.conv1 = nn.Conv1d(channels, channels, kernel_size=1, bias=False)
self.pos_xyz = nn.Conv1d(3, channels, 1)
self.bn1 = nn.BatchNorm1d(channels)
self.sa1 = SA_Layer(channels)
self.sa2 = SA_Layer(channels)
self.sa3 = SA_Layer(channels)
self.sa4 = SA_Layer(channels)
def forward(self, x, xyz):
#
# b, 3, npoint, nsample
# conv2d 3 -> 128 channels 1, 1
# b * npoint, c, nsample
# permute reshape
batch_size, _, N = x.size()
xyz = xyz.permute(0, 2, 1)
xyz = self.pos_xyz(xyz)
# B, D, N
x = F.relu(self.bn1(self.conv1(x)))
x1 = self.sa1(x, xyz)
x2 = self.sa2(x1, xyz)
x3 = self.sa3(x2, xyz)
x4 = self.sa4(x3, xyz)
x = torch.cat((x1, x2, x3, x4), dim=1)
return x
class SA_Layer(nn.Module):
def __init__(self, channels):
super(SA_Layer, self).__init__()
self.q_conv = nn.Conv1d(channels, channels // 4, 1, bias=False)
self.k_conv = nn.Conv1d(channels, channels // 4, 1, bias=False)
self.q_conv.weight = self.k_conv.weight
self.q_conv.bias = self.k_conv.bias
self.v_conv = nn.Conv1d(channels, channels, 1)
self.trans_conv = nn.Conv1d(channels, channels, 1)
self.after_norm = nn.BatchNorm1d(channels)
self.act = nn.ReLU()
self.softmax = nn.Softmax(dim=-1)
def forward(self, x, xyz):
# b, n, c
x = x + xyz
x_q = self.q_conv(x).permute(0, 2, 1)
# b, c, n
x_k = self.k_conv(x)
x_v = self.v_conv(x)
# b, n, n
energy = torch.bmm(x_q, x_k)
attention = self.softmax(energy)
attention = attention / (1e-9 + attention.sum(dim=1, keepdim=True))
# b, c, n
x_r = torch.bmm(x_v, attention)
x_r = self.act(self.after_norm(self.trans_conv(x - x_r)))
x = x + x_r
return x
================================================
FILE: PCT_Pytorch/pointnet2_ops_lib/MANIFEST.in
================================================
graft pointnet2_ops/_ext-src
================================================
FILE: PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/__init__.py
================================================
import pointnet2_ops.pointnet2_modules
import pointnet2_ops.pointnet2_utils
from pointnet2_ops._version import __version__
================================================
FILE: PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/_ext-src/include/ball_query.h
================================================
#pragma once
#include <torch/extension.h>
at::Tensor ball_query(at::Tensor new_xyz, at::Tensor xyz, const float radius,
const int nsample);
================================================
FILE: PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/_ext-src/include/cuda_utils.h
================================================
#ifndef _CUDA_UTILS_H
#define _CUDA_UTILS_H
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <cmath>
#include <cuda.h>
#include <cuda_runtime.h>
#include <vector>
#define TOTAL_THREADS 512
inline int opt_n_threads(int work_size) {
const int pow_2 = std::log(static_cast<double>(work_size)) / std::log(2.0);
return max(min(1 << pow_2, TOTAL_THREADS), 1);
}
inline dim3 opt_block_config(int x, int y) {
const int x_threads = opt_n_threads(x);
const int y_threads =
max(min(opt_n_threads(y), TOTAL_THREADS / x_threads), 1);
dim3 block_config(x_threads, y_threads, 1);
return block_config;
}
#define CUDA_CHECK_ERRORS() \
do { \
cudaError_t err = cudaGetLastError(); \
if (cudaSuccess != err) { \
fprintf(stderr, "CUDA kernel failed : %s\n%s at L:%d in %s\n", \
cudaGetErrorString(err), __PRETTY_FUNCTION__, __LINE__, \
__FILE__); \
exit(-1); \
} \
} while (0)
#endif
================================================
FILE: PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/_ext-src/include/group_points.h
================================================
#pragma once
#include <torch/extension.h>
at::Tensor group_points(at::Tensor points, at::Tensor idx);
at::Tensor group_points_grad(at::Tensor grad_out, at::Tensor idx, const int n);
================================================
FILE: PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/_ext-src/include/interpolate.h
================================================
#pragma once
#include <torch/extension.h>
#include <vector>
std::vector<at::Tensor> three_nn(at::Tensor unknowns, at::Tensor knows);
at::Tensor three_interpolate(at::Tensor points, at::Tensor idx,
at::Tensor weight);
at::Tensor three_interpolate_grad(at::Tensor grad_out, at::Tensor idx,
at::Tensor weight, const int m);
================================================
FILE: PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/_ext-src/include/sampling.h
================================================
#pragma once
#include <torch/extension.h>
at::Tensor gather_points(at::Tensor points, at::Tensor idx);
at::Tensor gather_points_grad(at::Tensor grad_out, at::Tensor idx, const int n);
at::Tensor furthest_point_sampling(at::Tensor points, const int nsamples);
================================================
FILE: PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/_ext-src/include/utils.h
================================================
#pragma once
#include <ATen/cuda/CUDAContext.h>
#include <torch/extension.h>
#define CHECK_CUDA(x) \
do { \
AT_ASSERT(x.is_cuda(), #x " must be a CUDA tensor"); \
} while (0)
#define CHECK_CONTIGUOUS(x) \
do { \
AT_ASSERT(x.is_contiguous(), #x " must be a contiguous tensor"); \
} while (0)
#define CHECK_IS_INT(x) \
do { \
AT_ASSERT(x.scalar_type() == at::ScalarType::Int, \
#x " must be an int tensor"); \
} while (0)
#define CHECK_IS_FLOAT(x) \
do { \
AT_ASSERT(x.scalar_type() == at::ScalarType::Float, \
#x " must be a float tensor"); \
} while (0)
================================================
FILE: PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/_ext-src/src/ball_query.cpp
================================================
#include "ball_query.h"
#include "utils.h"
void query_ball_point_kernel_wrapper(int b, int n, int m, float radius,
int nsample, const float *new_xyz,
const float *xyz, int *idx);
at::Tensor ball_query(at::Tensor new_xyz, at::Tensor xyz, const float radius,
const int nsample) {
CHECK_CONTIGUOUS(new_xyz);
CHECK_CONTIGUOUS(xyz);
CHECK_IS_FLOAT(new_xyz);
CHECK_IS_FLOAT(xyz);
if (new_xyz.is_cuda()) {
CHECK_CUDA(xyz);
}
at::Tensor idx =
torch::zeros({new_xyz.size(0), new_xyz.size(1), nsample},
at::device(new_xyz.device()).dtype(at::ScalarType::Int));
if (new_xyz.is_cuda()) {
query_ball_point_kernel_wrapper(xyz.size(0), xyz.size(1), new_xyz.size(1),
radius, nsample, new_xyz.data_ptr<float>(),
xyz.data_ptr<float>(), idx.data_ptr<int>());
} else {
AT_ASSERT(false, "CPU not supported");
}
return idx;
}
================================================
FILE: PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/_ext-src/src/ball_query_gpu.cu
================================================
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "cuda_utils.h"
// input: new_xyz(b, m, 3) xyz(b, n, 3)
// output: idx(b, m, nsample)
__global__ void query_ball_point_kernel(int b, int n, int m, float radius,
int nsample,
const float *__restrict__ new_xyz,
const float *__restrict__ xyz,
int *__restrict__ idx) {
int batch_index = blockIdx.x;
xyz += batch_index * n * 3;
new_xyz += batch_index * m * 3;
idx += m * nsample * batch_index;
int index = threadIdx.x;
int stride = blockDim.x;
float radius2 = radius * radius;
for (int j = index; j < m; j += stride) {
float new_x = new_xyz[j * 3 + 0];
float new_y = new_xyz[j * 3 + 1];
float new_z = new_xyz[j * 3 + 2];
for (int k = 0, cnt = 0; k < n && cnt < nsample; ++k) {
float x = xyz[k * 3 + 0];
float y = xyz[k * 3 + 1];
float z = xyz[k * 3 + 2];
float d2 = (new_x - x) * (new_x - x) + (new_y - y) * (new_y - y) +
(new_z - z) * (new_z - z);
if (d2 < radius2) {
if (cnt == 0) {
for (int l = 0; l < nsample; ++l) {
idx[j * nsample + l] = k;
}
}
idx[j * nsample + cnt] = k;
++cnt;
}
}
}
}
void query_ball_point_kernel_wrapper(int b, int n, int m, float radius,
int nsample, const float *new_xyz,
const float *xyz, int *idx) {
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
query_ball_point_kernel<<<b, opt_n_threads(m), 0, stream>>>(
b, n, m, radius, nsample, new_xyz, xyz, idx);
CUDA_CHECK_ERRORS();
}
================================================
FILE: PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/_ext-src/src/bindings.cpp
================================================
#include "ball_query.h"
#include "group_points.h"
#include "interpolate.h"
#include "sampling.h"
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("gather_points", &gather_points);
m.def("gather_points_grad", &gather_points_grad);
m.def("furthest_point_sampling", &furthest_point_sampling);
m.def("three_nn", &three_nn);
m.def("three_interpolate", &three_interpolate);
m.def("three_interpolate_grad", &three_interpolate_grad);
m.def("ball_query", &ball_query);
m.def("group_points", &group_points);
m.def("group_points_grad", &group_points_grad);
}
================================================
FILE: PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/_ext-src/src/group_points.cpp
================================================
#include "group_points.h"
#include "utils.h"
void group_points_kernel_wrapper(int b, int c, int n, int npoints, int nsample,
const float *points, const int *idx,
float *out);
void group_points_grad_kernel_wrapper(int b, int c, int n, int npoints,
int nsample, const float *grad_out,
const int *idx, float *grad_points);
at::Tensor group_points(at::Tensor points, at::Tensor idx) {
CHECK_CONTIGUOUS(points);
CHECK_CONTIGUOUS(idx);
CHECK_IS_FLOAT(points);
CHECK_IS_INT(idx);
if (points.is_cuda()) {
CHECK_CUDA(idx);
}
at::Tensor output =
torch::zeros({points.size(0), points.size(1), idx.size(1), idx.size(2)},
at::device(points.device()).dtype(at::ScalarType::Float));
if (points.is_cuda()) {
group_points_kernel_wrapper(points.size(0), points.size(1), points.size(2),
idx.size(1), idx.size(2),
points.data_ptr<float>(), idx.data_ptr<int>(),
output.data_ptr<float>());
} else {
AT_ASSERT(false, "CPU not supported");
}
return output;
}
at::Tensor group_points_grad(at::Tensor grad_out, at::Tensor idx, const int n) {
CHECK_CONTIGUOUS(grad_out);
CHECK_CONTIGUOUS(idx);
CHECK_IS_FLOAT(grad_out);
CHECK_IS_INT(idx);
if (grad_out.is_cuda()) {
CHECK_CUDA(idx);
}
at::Tensor output =
torch::zeros({grad_out.size(0), grad_out.size(1), n},
at::device(grad_out.device()).dtype(at::ScalarType::Float));
if (grad_out.is_cuda()) {
group_points_grad_kernel_wrapper(
grad_out.size(0), grad_out.size(1), n, idx.size(1), idx.size(2),
grad_out.data_ptr<float>(), idx.data_ptr<int>(),
output.data_ptr<float>());
} else {
AT_ASSERT(false, "CPU not supported");
}
return output;
}
================================================
FILE: PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/_ext-src/src/group_points_gpu.cu
================================================
#include <stdio.h>
#include <stdlib.h>
#include "cuda_utils.h"
// input: points(b, c, n) idx(b, npoints, nsample)
// output: out(b, c, npoints, nsample)
__global__ void group_points_kernel(int b, int c, int n, int npoints,
int nsample,
const float *__restrict__ points,
const int *__restrict__ idx,
float *__restrict__ out) {
int batch_index = blockIdx.x;
points += batch_index * n * c;
idx += batch_index * npoints * nsample;
out += batch_index * npoints * nsample * c;
const int index = threadIdx.y * blockDim.x + threadIdx.x;
const int stride = blockDim.y * blockDim.x;
for (int i = index; i < c * npoints; i += stride) {
const int l = i / npoints;
const int j = i % npoints;
for (int k = 0; k < nsample; ++k) {
int ii = idx[j * nsample + k];
out[(l * npoints + j) * nsample + k] = points[l * n + ii];
}
}
}
void group_points_kernel_wrapper(int b, int c, int n, int npoints, int nsample,
const float *points, const int *idx,
float *out) {
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
group_points_kernel<<<b, opt_block_config(npoints, c), 0, stream>>>(
b, c, n, npoints, nsample, points, idx, out);
CUDA_CHECK_ERRORS();
}
// input: grad_out(b, c, npoints, nsample), idx(b, npoints, nsample)
// output: grad_points(b, c, n)
__global__ void group_points_grad_kernel(int b, int c, int n, int npoints,
int nsample,
const float *__restrict__ grad_out,
const int *__restrict__ idx,
float *__restrict__ grad_points) {
int batch_index = blockIdx.x;
grad_out += batch_index * npoints * nsample * c;
idx += batch_index * npoints * nsample;
grad_points += batch_index * n * c;
const int index = threadIdx.y * blockDim.x + threadIdx.x;
const int stride = blockDim.y * blockDim.x;
for (int i = index; i < c * npoints; i += stride) {
const int l = i / npoints;
const int j = i % npoints;
for (int k = 0; k < nsample; ++k) {
int ii = idx[j * nsample + k];
atomicAdd(grad_points + l * n + ii,
grad_out[(l * npoints + j) * nsample + k]);
}
}
}
void group_points_grad_kernel_wrapper(int b, int c, int n, int npoints,
int nsample, const float *grad_out,
const int *idx, float *grad_points) {
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
group_points_grad_kernel<<<b, opt_block_config(npoints, c), 0, stream>>>(
b, c, n, npoints, nsample, grad_out, idx, grad_points);
CUDA_CHECK_ERRORS();
}
================================================
FILE: PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/_ext-src/src/interpolate.cpp
================================================
#include "interpolate.h"
#include "utils.h"
void three_nn_kernel_wrapper(int b, int n, int m, const float *unknown,
const float *known, float *dist2, int *idx);
void three_interpolate_kernel_wrapper(int b, int c, int m, int n,
const float *points, const int *idx,
const float *weight, float *out);
void three_interpolate_grad_kernel_wrapper(int b, int c, int n, int m,
const float *grad_out,
const int *idx, const float *weight,
float *grad_points);
std::vector<at::Tensor> three_nn(at::Tensor unknowns, at::Tensor knows) {
CHECK_CONTIGUOUS(unknowns);
CHECK_CONTIGUOUS(knows);
CHECK_IS_FLOAT(unknowns);
CHECK_IS_FLOAT(knows);
if (unknowns.is_cuda()) {
CHECK_CUDA(knows);
}
at::Tensor idx =
torch::zeros({unknowns.size(0), unknowns.size(1), 3},
at::device(unknowns.device()).dtype(at::ScalarType::Int));
at::Tensor dist2 =
torch::zeros({unknowns.size(0), unknowns.size(1), 3},
at::device(unknowns.device()).dtype(at::ScalarType::Float));
if (unknowns.is_cuda()) {
three_nn_kernel_wrapper(unknowns.size(0), unknowns.size(1), knows.size(1),
unknowns.data_ptr<float>(), knows.data_ptr<float>(),
dist2.data_ptr<float>(), idx.data_ptr<int>());
} else {
AT_ASSERT(false, "CPU not supported");
}
return {dist2, idx};
}
at::Tensor three_interpolate(at::Tensor points, at::Tensor idx,
at::Tensor weight) {
CHECK_CONTIGUOUS(points);
CHECK_CONTIGUOUS(idx);
CHECK_CONTIGUOUS(weight);
CHECK_IS_FLOAT(points);
CHECK_IS_INT(idx);
CHECK_IS_FLOAT(weight);
if (points.is_cuda()) {
CHECK_CUDA(idx);
CHECK_CUDA(weight);
}
at::Tensor output =
torch::zeros({points.size(0), points.size(1), idx.size(1)},
at::device(points.device()).dtype(at::ScalarType::Float));
if (points.is_cuda()) {
three_interpolate_kernel_wrapper(
points.size(0), points.size(1), points.size(2), idx.size(1),
points.data_ptr<float>(), idx.data_ptr<int>(), weight.data_ptr<float>(),
output.data_ptr<float>());
} else {
AT_ASSERT(false, "CPU not supported");
}
return output;
}
at::Tensor three_interpolate_grad(at::Tensor grad_out, at::Tensor idx,
at::Tensor weight, const int m) {
CHECK_CONTIGUOUS(grad_out);
CHECK_CONTIGUOUS(idx);
CHECK_CONTIGUOUS(weight);
CHECK_IS_FLOAT(grad_out);
CHECK_IS_INT(idx);
CHECK_IS_FLOAT(weight);
if (grad_out.is_cuda()) {
CHECK_CUDA(idx);
CHECK_CUDA(weight);
}
at::Tensor output =
torch::zeros({grad_out.size(0), grad_out.size(1), m},
at::device(grad_out.device()).dtype(at::ScalarType::Float));
if (grad_out.is_cuda()) {
three_interpolate_grad_kernel_wrapper(
grad_out.size(0), grad_out.size(1), grad_out.size(2), m,
grad_out.data_ptr<float>(), idx.data_ptr<int>(),
weight.data_ptr<float>(), output.data_ptr<float>());
} else {
AT_ASSERT(false, "CPU not supported");
}
return output;
}
================================================
FILE: PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/_ext-src/src/interpolate_gpu.cu
================================================
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "cuda_utils.h"
// input: unknown(b, n, 3) known(b, m, 3)
// output: dist2(b, n, 3), idx(b, n, 3)
__global__ void three_nn_kernel(int b, int n, int m,
const float *__restrict__ unknown,
const float *__restrict__ known,
float *__restrict__ dist2,
int *__restrict__ idx) {
int batch_index = blockIdx.x;
unknown += batch_index * n * 3;
known += batch_index * m * 3;
dist2 += batch_index * n * 3;
idx += batch_index * n * 3;
int index = threadIdx.x;
int stride = blockDim.x;
for (int j = index; j < n; j += stride) {
float ux = unknown[j * 3 + 0];
float uy = unknown[j * 3 + 1];
float uz = unknown[j * 3 + 2];
double best1 = 1e40, best2 = 1e40, best3 = 1e40;
int besti1 = 0, besti2 = 0, besti3 = 0;
for (int k = 0; k < m; ++k) {
float x = known[k * 3 + 0];
float y = known[k * 3 + 1];
float z = known[k * 3 + 2];
float d = (ux - x) * (ux - x) + (uy - y) * (uy - y) + (uz - z) * (uz - z);
if (d < best1) {
best3 = best2;
besti3 = besti2;
best2 = best1;
besti2 = besti1;
best1 = d;
besti1 = k;
} else if (d < best2) {
best3 = best2;
besti3 = besti2;
best2 = d;
besti2 = k;
} else if (d < best3) {
best3 = d;
besti3 = k;
}
}
dist2[j * 3 + 0] = best1;
dist2[j * 3 + 1] = best2;
dist2[j * 3 + 2] = best3;
idx[j * 3 + 0] = besti1;
idx[j * 3 + 1] = besti2;
idx[j * 3 + 2] = besti3;
}
}
void three_nn_kernel_wrapper(int b, int n, int m, const float *unknown,
const float *known, float *dist2, int *idx) {
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
three_nn_kernel<<<b, opt_n_threads(n), 0, stream>>>(b, n, m, unknown, known,
dist2, idx);
CUDA_CHECK_ERRORS();
}
// input: points(b, c, m), idx(b, n, 3), weight(b, n, 3)
// output: out(b, c, n)
__global__ void three_interpolate_kernel(int b, int c, int m, int n,
const float *__restrict__ points,
const int *__restrict__ idx,
const float *__restrict__ weight,
float *__restrict__ out) {
int batch_index = blockIdx.x;
points += batch_index * m * c;
idx += batch_index * n * 3;
weight += batch_index * n * 3;
out += batch_index * n * c;
const int index = threadIdx.y * blockDim.x + threadIdx.x;
const int stride = blockDim.y * blockDim.x;
for (int i = index; i < c * n; i += stride) {
const int l = i / n;
const int j = i % n;
float w1 = weight[j * 3 + 0];
float w2 = weight[j * 3 + 1];
float w3 = weight[j * 3 + 2];
int i1 = idx[j * 3 + 0];
int i2 = idx[j * 3 + 1];
int i3 = idx[j * 3 + 2];
out[i] = points[l * m + i1] * w1 + points[l * m + i2] * w2 +
points[l * m + i3] * w3;
}
}
void three_interpolate_kernel_wrapper(int b, int c, int m, int n,
const float *points, const int *idx,
const float *weight, float *out) {
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
three_interpolate_kernel<<<b, opt_block_config(n, c), 0, stream>>>(
b, c, m, n, points, idx, weight, out);
CUDA_CHECK_ERRORS();
}
// input: grad_out(b, c, n), idx(b, n, 3), weight(b, n, 3)
// output: grad_points(b, c, m)
__global__ void three_interpolate_grad_kernel(
int b, int c, int n, int m, const float *__restrict__ grad_out,
const int *__restrict__ idx, const float *__restrict__ weight,
float *__restrict__ grad_points) {
int batch_index = blockIdx.x;
grad_out += batch_index * n * c;
idx += batch_index * n * 3;
weight += batch_index * n * 3;
grad_points += batch_index * m * c;
const int index = threadIdx.y * blockDim.x + threadIdx.x;
const int stride = blockDim.y * blockDim.x;
for (int i = index; i < c * n; i += stride) {
const int l = i / n;
const int j = i % n;
float w1 = weight[j * 3 + 0];
float w2 = weight[j * 3 + 1];
float w3 = weight[j * 3 + 2];
int i1 = idx[j * 3 + 0];
int i2 = idx[j * 3 + 1];
int i3 = idx[j * 3 + 2];
atomicAdd(grad_points + l * m + i1, grad_out[i] * w1);
atomicAdd(grad_points + l * m + i2, grad_out[i] * w2);
atomicAdd(grad_points + l * m + i3, grad_out[i] * w3);
}
}
void three_interpolate_grad_kernel_wrapper(int b, int c, int n, int m,
const float *grad_out,
const int *idx, const float *weight,
float *grad_points) {
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
three_interpolate_grad_kernel<<<b, opt_block_config(n, c), 0, stream>>>(
b, c, n, m, grad_out, idx, weight, grad_points);
CUDA_CHECK_ERRORS();
}
================================================
FILE: PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/_ext-src/src/sampling.cpp
================================================
#include "sampling.h"
#include "utils.h"
void gather_points_kernel_wrapper(int b, int c, int n, int npoints,
const float *points, const int *idx,
float *out);
void gather_points_grad_kernel_wrapper(int b, int c, int n, int npoints,
const float *grad_out, const int *idx,
float *grad_points);
void furthest_point_sampling_kernel_wrapper(int b, int n, int m,
const float *dataset, float *temp,
int *idxs);
at::Tensor gather_points(at::Tensor points, at::Tensor idx) {
CHECK_CONTIGUOUS(points);
CHECK_CONTIGUOUS(idx);
CHECK_IS_FLOAT(points);
CHECK_IS_INT(idx);
if (points.is_cuda()) {
CHECK_CUDA(idx);
}
at::Tensor output =
torch::zeros({points.size(0), points.size(1), idx.size(1)},
at::device(points.device()).dtype(at::ScalarType::Float));
if (points.is_cuda()) {
gather_points_kernel_wrapper(points.size(0), points.size(1), points.size(2),
idx.size(1), points.data_ptr<float>(),
idx.data_ptr<int>(), output.data_ptr<float>());
} else {
AT_ASSERT(false, "CPU not supported");
}
return output;
}
at::Tensor gather_points_grad(at::Tensor grad_out, at::Tensor idx,
const int n) {
CHECK_CONTIGUOUS(grad_out);
CHECK_CONTIGUOUS(idx);
CHECK_IS_FLOAT(grad_out);
CHECK_IS_INT(idx);
if (grad_out.is_cuda()) {
CHECK_CUDA(idx);
}
at::Tensor output =
torch::zeros({grad_out.size(0), grad_out.size(1), n},
at::device(grad_out.device()).dtype(at::ScalarType::Float));
if (grad_out.is_cuda()) {
gather_points_grad_kernel_wrapper(grad_out.size(0), grad_out.size(1), n,
idx.size(1), grad_out.data_ptr<float>(),
idx.data_ptr<int>(),
output.data_ptr<float>());
} else {
AT_ASSERT(false, "CPU not supported");
}
return output;
}
at::Tensor furthest_point_sampling(at::Tensor points, const int nsamples) {
CHECK_CONTIGUOUS(points);
CHECK_IS_FLOAT(points);
at::Tensor output =
torch::zeros({points.size(0), nsamples},
at::device(points.device()).dtype(at::ScalarType::Int));
at::Tensor tmp =
torch::full({points.size(0), points.size(1)}, 1e10,
at::device(points.device()).dtype(at::ScalarType::Float));
if (points.is_cuda()) {
furthest_point_sampling_kernel_wrapper(
points.size(0), points.size(1), nsamples, points.data_ptr<float>(),
tmp.data_ptr<float>(), output.data_ptr<int>());
} else {
AT_ASSERT(false, "CPU not supported");
}
return output;
}
================================================
FILE: PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/_ext-src/src/sampling_gpu.cu
================================================
#include <stdio.h>
#include <stdlib.h>
#include "cuda_utils.h"
// input: points(b, c, n) idx(b, m)
// output: out(b, c, m)
__global__ void gather_points_kernel(int b, int c, int n, int m,
const float *__restrict__ points,
const int *__restrict__ idx,
float *__restrict__ out) {
for (int i = blockIdx.x; i < b; i += gridDim.x) {
for (int l = blockIdx.y; l < c; l += gridDim.y) {
for (int j = threadIdx.x; j < m; j += blockDim.x) {
int a = idx[i * m + j];
out[(i * c + l) * m + j] = points[(i * c + l) * n + a];
}
}
}
}
void gather_points_kernel_wrapper(int b, int c, int n, int npoints,
const float *points, const int *idx,
float *out) {
gather_points_kernel<<<dim3(b, c, 1), opt_n_threads(npoints), 0,
at::cuda::getCurrentCUDAStream()>>>(b, c, n, npoints,
points, idx, out);
CUDA_CHECK_ERRORS();
}
// input: grad_out(b, c, m) idx(b, m)
// output: grad_points(b, c, n)
__global__ void gather_points_grad_kernel(int b, int c, int n, int m,
const float *__restrict__ grad_out,
const int *__restrict__ idx,
float *__restrict__ grad_points) {
for (int i = blockIdx.x; i < b; i += gridDim.x) {
for (int l = blockIdx.y; l < c; l += gridDim.y) {
for (int j = threadIdx.x; j < m; j += blockDim.x) {
int a = idx[i * m + j];
atomicAdd(grad_points + (i * c + l) * n + a,
grad_out[(i * c + l) * m + j]);
}
}
}
}
void gather_points_grad_kernel_wrapper(int b, int c, int n, int npoints,
const float *grad_out, const int *idx,
float *grad_points) {
gather_points_grad_kernel<<<dim3(b, c, 1), opt_n_threads(npoints), 0,
at::cuda::getCurrentCUDAStream()>>>(
b, c, n, npoints, grad_out, idx, grad_points);
CUDA_CHECK_ERRORS();
}
__device__ void __update(float *__restrict__ dists, int *__restrict__ dists_i,
int idx1, int idx2) {
const float v1 = dists[idx1], v2 = dists[idx2];
const int i1 = dists_i[idx1], i2 = dists_i[idx2];
dists[idx1] = max(v1, v2);
dists_i[idx1] = v2 > v1 ? i2 : i1;
}
// Input dataset: (b, n, 3), tmp: (b, n)
// Ouput idxs (b, m)
template <unsigned int block_size>
__global__ void furthest_point_sampling_kernel(
int b, int n, int m, const float *__restrict__ dataset,
float *__restrict__ temp, int *__restrict__ idxs) {
if (m <= 0) return;
__shared__ float dists[block_size];
__shared__ int dists_i[block_size];
int batch_index = blockIdx.x;
dataset += batch_index * n * 3;
temp += batch_index * n;
idxs += batch_index * m;
int tid = threadIdx.x;
const int stride = block_size;
int old = 0;
if (threadIdx.x == 0) idxs[0] = old;
__syncthreads();
for (int j = 1; j < m; j++) {
int besti = 0;
float best = -1;
float x1 = dataset[old * 3 + 0];
float y1 = dataset[old * 3 + 1];
float z1 = dataset[old * 3 + 2];
for (int k = tid; k < n; k += stride) {
float x2, y2, z2;
x2 = dataset[k * 3 + 0];
y2 = dataset[k * 3 + 1];
z2 = dataset[k * 3 + 2];
float mag = (x2 * x2) + (y2 * y2) + (z2 * z2);
if (mag <= 1e-3) continue;
float d =
(x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) + (z2 - z1) * (z2 - z1);
float d2 = min(d, temp[k]);
temp[k] = d2;
besti = d2 > best ? k : besti;
best = d2 > best ? d2 : best;
}
dists[tid] = best;
dists_i[tid] = besti;
__syncthreads();
if (block_size >= 512) {
if (tid < 256) {
__update(dists, dists_i, tid, tid + 256);
}
__syncthreads();
}
if (block_size >= 256) {
if (tid < 128) {
__update(dists, dists_i, tid, tid + 128);
}
__syncthreads();
}
if (block_size >= 128) {
if (tid < 64) {
__update(dists, dists_i, tid, tid + 64);
}
__syncthreads();
}
if (block_size >= 64) {
if (tid < 32) {
__update(dists, dists_i, tid, tid + 32);
}
__syncthreads();
}
if (block_size >= 32) {
if (tid < 16) {
__update(dists, dists_i, tid, tid + 16);
}
__syncthreads();
}
if (block_size >= 16) {
if (tid < 8) {
__update(dists, dists_i, tid, tid + 8);
}
__syncthreads();
}
if (block_size >= 8) {
if (tid < 4) {
__update(dists, dists_i, tid, tid + 4);
}
__syncthreads();
}
if (block_size >= 4) {
if (tid < 2) {
__update(dists, dists_i, tid, tid + 2);
}
__syncthreads();
}
if (block_size >= 2) {
if (tid < 1) {
__update(dists, dists_i, tid, tid + 1);
}
__syncthreads();
}
old = dists_i[0];
if (tid == 0) idxs[j] = old;
}
}
void furthest_point_sampling_kernel_wrapper(int b, int n, int m,
const float *dataset, float *temp,
int *idxs) {
unsigned int n_threads = opt_n_threads(n);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
switch (n_threads) {
case 512:
furthest_point_sampling_kernel<512>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
break;
case 256:
furthest_point_sampling_kernel<256>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
break;
case 128:
furthest_point_sampling_kernel<128>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
break;
case 64:
furthest_point_sampling_kernel<64>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
break;
case 32:
furthest_point_sampling_kernel<32>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
break;
case 16:
furthest_point_sampling_kernel<16>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
break;
case 8:
furthest_point_sampling_kernel<8>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
break;
case 4:
furthest_point_sampling_kernel<4>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
break;
case 2:
furthest_point_sampling_kernel<2>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
break;
case 1:
furthest_point_sampling_kernel<1>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
break;
default:
furthest_point_sampling_kernel<512>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
}
CUDA_CHECK_ERRORS();
}
================================================
FILE: PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/_version.py
================================================
__version__ = "3.0.0"
================================================
FILE: PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/pointnet2_modules.py
================================================
from typing import List, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from pointnet2_ops import pointnet2_utils
def build_shared_mlp(mlp_spec: List[int], bn: bool = True):
layers = []
for i in range(1, len(mlp_spec)):
layers.append(
nn.Conv2d(mlp_spec[i - 1], mlp_spec[i], kernel_size=1, bias=not bn)
)
if bn:
layers.append(nn.BatchNorm2d(mlp_spec[i]))
layers.append(nn.ReLU(True))
return nn.Sequential(*layers)
class _PointnetSAModuleBase(nn.Module):
def __init__(self):
super(_PointnetSAModuleBase, self).__init__()
self.npoint = None
self.groupers = None
self.mlps = None
def forward(
self, xyz: torch.Tensor, features: Optional[torch.Tensor]
) -> Tuple[torch.Tensor, torch.Tensor]:
r"""
Parameters
----------
xyz : torch.Tensor
(B, N, 3) tensor of the xyz coordinates of the features
features : torch.Tensor
(B, C, N) tensor of the descriptors of the the features
Returns
-------
new_xyz : torch.Tensor
(B, npoint, 3) tensor of the new features' xyz
new_features : torch.Tensor
(B, \sum_k(mlps[k][-1]), npoint) tensor of the new_features descriptors
"""
new_features_list = []
xyz_flipped = xyz.transpose(1, 2).contiguous()
new_xyz = (
pointnet2_utils.gather_operation(
xyz_flipped, pointnet2_utils.furthest_point_sample(xyz, self.npoint)
)
.transpose(1, 2)
.contiguous()
if self.npoint is not None
else None
)
for i in range(len(self.groupers)):
new_features = self.groupers[i](
xyz, new_xyz, features
) # (B, C, npoint, nsample)
new_features = self.mlps[i](new_features) # (B, mlp[-1], npoint, nsample)
new_features = F.max_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
) # (B, mlp[-1], npoint, 1)
new_features = new_features.squeeze(-1) # (B, mlp[-1], npoint)
new_features_list.append(new_features)
return new_xyz, torch.cat(new_features_list, dim=1)
class PointnetSAModuleMSG(_PointnetSAModuleBase):
r"""Pointnet set abstrction layer with multiscale grouping
Parameters
----------
npoint : int
Number of features
radii : list of float32
list of radii to group with
nsamples : list of int32
Number of samples in each ball query
mlps : list of list of int32
Spec of the pointnet before the global max_pool for each scale
bn : bool
Use batchnorm
"""
def __init__(self, npoint, radii, nsamples, mlps, bn=True, use_xyz=True):
# type: (PointnetSAModuleMSG, int, List[float], List[int], List[List[int]], bool, bool) -> None
super(PointnetSAModuleMSG, self).__init__()
assert len(radii) == len(nsamples) == len(mlps)
self.npoint = npoint
self.groupers = nn.ModuleList()
self.mlps = nn.ModuleList()
for i in range(len(radii)):
radius = radii[i]
nsample = nsamples[i]
self.groupers.append(
pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz)
if npoint is not None
else pointnet2_utils.GroupAll(use_xyz)
)
mlp_spec = mlps[i]
if use_xyz:
mlp_spec[0] += 3
self.mlps.append(build_shared_mlp(mlp_spec, bn))
class PointnetSAModule(PointnetSAModuleMSG):
r"""Pointnet set abstrction layer
Parameters
----------
npoint : int
Number of features
radius : float
Radius of ball
nsample : int
Number of samples in the ball query
mlp : list
Spec of the pointnet before the global max_pool
bn : bool
Use batchnorm
"""
def __init__(
self, mlp, npoint=None, radius=None, nsample=None, bn=True, use_xyz=True
):
# type: (PointnetSAModule, List[int], int, float, int, bool, bool) -> None
super(PointnetSAModule, self).__init__(
mlps=[mlp],
npoint=npoint,
radii=[radius],
nsamples=[nsample],
bn=bn,
use_xyz=use_xyz,
)
class PointnetFPModule(nn.Module):
r"""Propigates the features of one set to another
Parameters
----------
mlp : list
Pointnet module parameters
bn : bool
Use batchnorm
"""
def __init__(self, mlp, bn=True):
# type: (PointnetFPModule, List[int], bool) -> None
super(PointnetFPModule, self).__init__()
self.mlp = build_shared_mlp(mlp, bn=bn)
def forward(self, unknown, known, unknow_feats, known_feats):
# type: (PointnetFPModule, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor) -> torch.Tensor
r"""
Parameters
----------
unknown : torch.Tensor
(B, n, 3) tensor of the xyz positions of the unknown features
known : torch.Tensor
(B, m, 3) tensor of the xyz positions of the known features
unknow_feats : torch.Tensor
(B, C1, n) tensor of the features to be propigated to
known_feats : torch.Tensor
(B, C2, m) tensor of features to be propigated
Returns
-------
new_features : torch.Tensor
(B, mlp[-1], n) tensor of the features of the unknown features
"""
if known is not None:
dist, idx = pointnet2_utils.three_nn(unknown, known)
dist_recip = 1.0 / (dist + 1e-8)
norm = torch.sum(dist_recip, dim=2, keepdim=True)
weight = dist_recip / norm
interpolated_feats = pointnet2_utils.three_interpolate(
known_feats, idx, weight
)
else:
interpolated_feats = known_feats.expand(
*(known_feats.size()[0:2] + [unknown.size(1)])
)
if unknow_feats is not None:
new_features = torch.cat(
[interpolated_feats, unknow_feats], dim=1
) # (B, C2 + C1, n)
else:
new_features = interpolated_feats
new_features = new_features.unsqueeze(-1)
new_features = self.mlp(new_features)
return new_features.squeeze(-1)
================================================
FILE: PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/pointnet2_utils.py
================================================
import torch
import torch.nn as nn
import warnings
from torch.autograd import Function
from typing import *
try:
import pointnet2_ops._ext as _ext
except ImportError:
from torch.utils.cpp_extension import load
import glob
import os.path as osp
import os
warnings.warn("Unable to load pointnet2_ops cpp extension. JIT Compiling.")
_ext_src_root = osp.join(osp.dirname(__file__), "_ext-src")
_ext_sources = glob.glob(osp.join(_ext_src_root, "src", "*.cpp")) + glob.glob(
osp.join(_ext_src_root, "src", "*.cu")
)
_ext_headers = glob.glob(osp.join(_ext_src_root, "include", "*"))
os.environ["TORCH_CUDA_ARCH_LIST"] = "3.7+PTX;5.0;6.0;6.1;6.2;7.0;7.5"
_ext = load(
"_ext",
sources=_ext_sources,
extra_include_paths=[osp.join(_ext_src_root, "include")],
extra_cflags=["-O3"],
extra_cuda_cflags=["-O3", "-Xfatbin", "-compress-all"],
with_cuda=True,
)
class FurthestPointSampling(Function):
@staticmethod
def forward(ctx, xyz, npoint):
# type: (Any, torch.Tensor, int) -> torch.Tensor
r"""
Uses iterative furthest point sampling to select a set of npoint features that have the largest
minimum distance
Parameters
----------
xyz : torch.Tensor
(B, N, 3) tensor where N > npoint
npoint : int32
number of features in the sampled set
Returns
-------
torch.Tensor
(B, npoint) tensor containing the set
"""
out = _ext.furthest_point_sampling(xyz, npoint)
ctx.mark_non_differentiable(out)
return out
@staticmethod
def backward(ctx, grad_out):
return ()
furthest_point_sample = FurthestPointSampling.apply
class GatherOperation(Function):
@staticmethod
def forward(ctx, features, idx):
# type: (Any, torch.Tensor, torch.Tensor) -> torch.Tensor
r"""
Parameters
----------
features : torch.Tensor
(B, C, N) tensor
idx : torch.Tensor
(B, npoint) tensor of the features to gather
Returns
-------
torch.Tensor
(B, C, npoint) tensor
"""
ctx.save_for_backward(idx, features)
return _ext.gather_points(features, idx)
@staticmethod
def backward(ctx, grad_out):
idx, features = ctx.saved_tensors
N = features.size(2)
grad_features = _ext.gather_points_grad(grad_out.contiguous(), idx, N)
return grad_features, None
gather_operation = GatherOperation.apply
class ThreeNN(Function):
@staticmethod
def forward(ctx, unknown, known):
# type: (Any, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]
r"""
Find the three nearest neighbors of unknown in known
Parameters
----------
unknown : torch.Tensor
(B, n, 3) tensor of known features
known : torch.Tensor
(B, m, 3) tensor of unknown features
Returns
-------
dist : torch.Tensor
(B, n, 3) l2 distance to the three nearest neighbors
idx : torch.Tensor
(B, n, 3) index of 3 nearest neighbors
"""
dist2, idx = _ext.three_nn(unknown, known)
dist = torch.sqrt(dist2)
ctx.mark_non_differentiable(dist, idx)
return dist, idx
@staticmethod
def backward(ctx, grad_dist, grad_idx):
return ()
three_nn = ThreeNN.apply
class ThreeInterpolate(Function):
@staticmethod
def forward(ctx, features, idx, weight):
# type(Any, torch.Tensor, torch.Tensor, torch.Tensor) -> Torch.Tensor
r"""
Performs weight linear interpolation on 3 features
Parameters
----------
features : torch.Tensor
(B, c, m) Features descriptors to be interpolated from
idx : torch.Tensor
(B, n, 3) three nearest neighbors of the target features in features
weight : torch.Tensor
(B, n, 3) weights
Returns
-------
torch.Tensor
(B, c, n) tensor of the interpolated features
"""
ctx.save_for_backward(idx, weight, features)
return _ext.three_interpolate(features, idx, weight)
@staticmethod
def backward(ctx, grad_out):
# type: (Any, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]
r"""
Parameters
----------
grad_out : torch.Tensor
(B, c, n) tensor with gradients of ouputs
Returns
-------
grad_features : torch.Tensor
(B, c, m) tensor with gradients of features
None
None
"""
idx, weight, features = ctx.saved_tensors
m = features.size(2)
grad_features = _ext.three_interpolate_grad(
grad_out.contiguous(), idx, weight, m
)
return grad_features, torch.zeros_like(idx), torch.zeros_like(weight)
three_interpolate = ThreeInterpolate.apply
class GroupingOperation(Function):
@staticmethod
def forward(ctx, features, idx):
# type: (Any, torch.Tensor, torch.Tensor) -> torch.Tensor
r"""
Parameters
----------
features : torch.Tensor
(B, C, N) tensor of features to group
idx : torch.Tensor
(B, npoint, nsample) tensor containing the indicies of features to group with
Returns
-------
torch.Tensor
(B, C, npoint, nsample) tensor
"""
ctx.save_for_backward(idx, features)
return _ext.group_points(features, idx)
@staticmethod
def backward(ctx, grad_out):
# type: (Any, torch.tensor) -> Tuple[torch.Tensor, torch.Tensor]
r"""
Parameters
----------
grad_out : torch.Tensor
(B, C, npoint, nsample) tensor of the gradients of the output from forward
Returns
-------
torch.Tensor
(B, C, N) gradient of the features
None
"""
idx, features = ctx.saved_tensors
N = features.size(2)
grad_features = _ext.group_points_grad(grad_out.contiguous(), idx, N)
return grad_features, torch.zeros_like(idx)
grouping_operation = GroupingOperation.apply
class BallQuery(Function):
@staticmethod
def forward(ctx, radius, nsample, xyz, new_xyz):
# type: (Any, float, int, torch.Tensor, torch.Tensor) -> torch.Tensor
r"""
Parameters
----------
radius : float
radius of the balls
nsample : int
maximum number of features in the balls
xyz : torch.Tensor
(B, N, 3) xyz coordinates of the features
new_xyz : torch.Tensor
(B, npoint, 3) centers of the ball query
Returns
-------
torch.Tensor
(B, npoint, nsample) tensor with the indicies of the features that form the query balls
"""
output = _ext.ball_query(new_xyz, xyz, radius, nsample)
ctx.mark_non_differentiable(output)
return output
@staticmethod
def backward(ctx, grad_out):
return ()
ball_query = BallQuery.apply
class QueryAndGroup(nn.Module):
r"""
Groups with a ball query of radius
Parameters
---------
radius : float32
Radius of ball
nsample : int32
Maximum number of features to gather in the ball
"""
def __init__(self, radius, nsample, use_xyz=True):
# type: (QueryAndGroup, float, int, bool) -> None
super(QueryAndGroup, self).__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
def forward(self, xyz, new_xyz, features=None):
# type: (QueryAndGroup, torch.Tensor. torch.Tensor, torch.Tensor) -> Tuple[Torch.Tensor]
r"""
Parameters
----------
xyz : torch.Tensor
xyz coordinates of the features (B, N, 3)
new_xyz : torch.Tensor
centriods (B, npoint, 3)
features : torch.Tensor
Descriptors of the features (B, C, N)
Returns
-------
new_features : torch.Tensor
(B, 3 + C, npoint, nsample) tensor
"""
idx = ball_query(self.radius, self.nsample, xyz, new_xyz)
xyz_trans = xyz.transpose(1, 2).contiguous()
grouped_xyz = grouping_operation(xyz_trans, idx) # (B, 3, npoint, nsample)
grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1)
if features is not None:
grouped_features = grouping_operation(features, idx)
if self.use_xyz:
new_features = torch.cat(
[grouped_xyz, grouped_features], dim=1
) # (B, C + 3, npoint, nsample)
else:
new_features = grouped_features
else:
assert (
self.use_xyz
), "Cannot have not features and not use xyz as a feature!"
new_features = grouped_xyz
return new_features
class GroupAll(nn.Module):
r"""
Groups all features
Parameters
---------
"""
def __init__(self, use_xyz=True):
# type: (GroupAll, bool) -> None
super(GroupAll, self).__init__()
self.use_xyz = use_xyz
def forward(self, xyz, new_xyz, features=None):
# type: (GroupAll, torch.Tensor, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor]
r"""
Parameters
----------
xyz : torch.Tensor
xyz coordinates of the features (B, N, 3)
new_xyz : torch.Tensor
Ignored
features : torch.Tensor
Descriptors of the features (B, C, N)
Returns
-------
new_features : torch.Tensor
(B, C + 3, 1, N) tensor
"""
grouped_xyz = xyz.transpose(1, 2).unsqueeze(2)
if features is not None:
grouped_features = features.unsqueeze(2)
if self.use_xyz:
new_features = torch.cat(
[grouped_xyz, grouped_features], dim=1
) # (B, 3 + C, 1, N)
else:
new_features = grouped_features
else:
new_features = grouped_xyz
return new_features
================================================
FILE: PCT_Pytorch/pointnet2_ops_lib/setup.py
================================================
import glob
import os
import os.path as osp
from setuptools import find_packages, setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
this_dir = osp.dirname(osp.abspath(__file__))
_ext_src_root = osp.join("pointnet2_ops", "_ext-src")
_ext_sources = glob.glob(osp.join(_ext_src_root, "src", "*.cpp")) + glob.glob(
osp.join(_ext_src_root, "src", "*.cu")
)
_ext_headers = glob.glob(osp.join(_ext_src_root, "include", "*"))
requirements = ["torch>=1.4"]
exec(open(osp.join("pointnet2_ops", "_version.py")).read())
os.environ["TORCH_CUDA_ARCH_LIST"] = "3.7+PTX;5.0;6.0;6.1;6.2;7.0;7.5"
setup(
name="pointnet2_ops",
version=__version__,
au
gitextract_51znd243/
├── .gitignore
├── .gitmodules
├── CurveNet/
│ ├── README.md
│ └── core/
│ ├── data.py
│ ├── main_cls.py
│ ├── main_normal.py
│ ├── main_partseg.py
│ ├── models/
│ │ ├── curvenet_cls.py
│ │ ├── curvenet_normal.py
│ │ ├── curvenet_seg.py
│ │ ├── curvenet_util.py
│ │ └── walk.py
│ └── util.py
├── GDANet/
│ ├── README.md
│ └── model/
│ ├── GDANet_cls.py
│ ├── GDANet_ptseg.py
│ ├── __init__.py
│ └── util/
│ ├── GDANet_util.py
│ ├── __init__.py
│ ├── data_util.py
│ └── util.py
├── LICENSE
├── PCT_Pytorch/
│ ├── LICENSE
│ ├── README.md
│ ├── checkpoints/
│ │ └── best/
│ │ └── models/
│ │ └── model.t7
│ ├── data.py
│ ├── main.py
│ ├── model.py
│ ├── model_new.py
│ ├── pointnet2_ops_lib/
│ │ ├── MANIFEST.in
│ │ ├── pointnet2_ops/
│ │ │ ├── __init__.py
│ │ │ ├── _ext-src/
│ │ │ │ ├── include/
│ │ │ │ │ ├── ball_query.h
│ │ │ │ │ ├── cuda_utils.h
│ │ │ │ │ ├── group_points.h
│ │ │ │ │ ├── interpolate.h
│ │ │ │ │ ├── sampling.h
│ │ │ │ │ └── utils.h
│ │ │ │ └── src/
│ │ │ │ ├── ball_query.cpp
│ │ │ │ ├── ball_query_gpu.cu
│ │ │ │ ├── bindings.cpp
│ │ │ │ ├── group_points.cpp
│ │ │ │ ├── group_points_gpu.cu
│ │ │ │ ├── interpolate.cpp
│ │ │ │ ├── interpolate_gpu.cu
│ │ │ │ ├── sampling.cpp
│ │ │ │ └── sampling_gpu.cu
│ │ │ ├── _version.py
│ │ │ ├── pointnet2_modules.py
│ │ │ └── pointnet2_utils.py
│ │ └── setup.py
│ ├── test.sh
│ ├── train.sh
│ └── util.py
├── README.md
├── all_utils.py
├── aug_utils.py
├── configs/
│ ├── bn/
│ │ ├── dgcnn.yaml
│ │ ├── pct.yaml
│ │ ├── pointnet.yaml
│ │ ├── pointnet2.yaml
│ │ ├── rscnn.yaml
│ │ └── simpleview.yaml
│ ├── corruption/
│ │ ├── curvenet.yaml
│ │ ├── dgcnn.yaml
│ │ ├── gdanet.yaml
│ │ ├── pct.yaml
│ │ ├── pointMLP.yaml
│ │ ├── pointMLP2.yaml
│ │ ├── pointnet.yaml
│ │ ├── pointnet2.yaml
│ │ ├── rscnn.yaml
│ │ └── simpleview.yaml
│ ├── cutmix/
│ │ ├── dgcnn_k.yaml
│ │ ├── dgcnn_r.yaml
│ │ ├── pct_k.yaml
│ │ ├── pct_r.yaml
│ │ ├── pointnet2_k.yaml
│ │ ├── pointnet2_r.yaml
│ │ ├── pointnet_k.yaml
│ │ ├── pointnet_r.yaml
│ │ ├── rscnn_k.yaml
│ │ ├── rscnn_r.yaml
│ │ ├── simpleview_k.yaml
│ │ └── simpleview_r.yaml
│ ├── dgcnn_curvenet_run_1.yaml
│ ├── dgcnn_dgcnn_0.25_run_1.yaml
│ ├── dgcnn_dgcnn_0.25_valid_run_1.yaml
│ ├── dgcnn_dgcnn_0.5_run_1.yaml
│ ├── dgcnn_dgcnn_0.5_valid_run_1.yaml
│ ├── dgcnn_dgcnn_ce_run_1.yaml
│ ├── dgcnn_dgcnn_ce_valid_run_1.yaml
│ ├── dgcnn_dgcnn_run_1.yaml
│ ├── dgcnn_dgcnn_valid_run_1.yaml
│ ├── dgcnn_gdanet_run_1.yaml
│ ├── dgcnn_pct_run_1.yaml
│ ├── dgcnn_pointMLP2_run_1.yaml
│ ├── dgcnn_pointMLP_run_1.yaml
│ ├── dgcnn_pointnet2_0.25_run_1.yaml
│ ├── dgcnn_pointnet2_0.25_valid_run_1.yaml
│ ├── dgcnn_pointnet2_0.5_run_1.yaml
│ ├── dgcnn_pointnet2_0.5_valid_run_1.yaml
│ ├── dgcnn_pointnet2_ce_run_1.yaml
│ ├── dgcnn_pointnet2_ce_valid_run_1.yaml
│ ├── dgcnn_pointnet2_run_1.yaml
│ ├── dgcnn_pointnet2_valid_run_1.yaml
│ ├── dgcnn_pointnet_0.25_run_1.yaml
│ ├── dgcnn_pointnet_0.25_valid_run_1.yaml
│ ├── dgcnn_pointnet_0.5_run_1.yaml
│ ├── dgcnn_pointnet_0.5_valid_run_1.yaml
│ ├── dgcnn_pointnet_ce_run_1.yaml
│ ├── dgcnn_pointnet_ce_valid_run_1.yaml
│ ├── dgcnn_pointnet_run_1.yaml
│ ├── dgcnn_pointnet_valid_run_1.yaml
│ ├── dgcnn_rscnn_0.25_run_1.yaml
│ ├── dgcnn_rscnn_0.25_valid_run_1.yaml
│ ├── dgcnn_rscnn_0.5_run_1.yaml
│ ├── dgcnn_rscnn_0.5_valid_run_1.yaml
│ ├── dgcnn_rscnn_ce_run_1.yaml
│ ├── dgcnn_rscnn_ce_valid_run_1.yaml
│ ├── dgcnn_rscnn_run_1.yaml
│ ├── dgcnn_rscnn_valid_run_1.yaml
│ ├── dgcnn_simpleview_0.25_run_1.yaml
│ ├── dgcnn_simpleview_0.25_valid_run_1.yaml
│ ├── dgcnn_simpleview_0.5_run_1.yaml
│ ├── dgcnn_simpleview_0.5_valid_run_1.yaml
│ ├── dgcnn_simpleview_ce_run_1.yaml
│ ├── dgcnn_simpleview_ce_valid_run_1.yaml
│ ├── dgcnn_simpleview_run_1.yaml
│ ├── dgcnn_simpleview_valid_run_1.yaml
│ ├── mixup/
│ │ ├── dgcnn.yaml
│ │ ├── pct.yaml
│ │ ├── pointnet.yaml
│ │ ├── pointnet2.yaml
│ │ ├── rscnn.yaml
│ │ └── simpleview.yaml
│ ├── pgd/
│ │ ├── dgcnn.yaml
│ │ ├── pct.yaml
│ │ └── pointnet.yaml
│ ├── pointnet2_dgcnn_run_1.yaml
│ ├── pointnet2_dgcnn_valid_run_1.yaml
│ ├── pointnet2_pointnet2_run_1.yaml
│ ├── pointnet2_pointnet2_valid_run_1.yaml
│ ├── pointnet2_pointnet_run_1.yaml
│ ├── pointnet2_pointnet_valid_run_1.yaml
│ ├── pointnet2_rscnn_run_1.yaml
│ ├── pointnet2_rscnn_valid_run_1.yaml
│ ├── pointnet2_simpleview_run_1.yaml
│ ├── pointnet2_simpleview_valid_run_1.yaml
│ ├── rscnn_dgcnn_run_1.yaml
│ ├── rscnn_pointnet2_run_1.yaml
│ ├── rscnn_pointnet_run_1.yaml
│ ├── rscnn_rscnn_run_1.yaml
│ ├── rscnn_simpleview_run_1.yaml
│ ├── rsmix/
│ │ ├── dgcnn.yaml
│ │ ├── pct.yaml
│ │ ├── pointnet.yaml
│ │ ├── pointnet2.yaml
│ │ ├── rscnn.yaml
│ │ └── simpleview.yaml
│ ├── tent/
│ │ ├── dgcnn.yaml
│ │ ├── pct.yaml
│ │ ├── pointnet.yaml
│ │ ├── pointnet2.yaml
│ │ ├── rscnn.yaml
│ │ └── simpleview.yaml
│ └── tent_cutmix/
│ ├── dgcnn.yaml
│ ├── pct.yaml
│ ├── pointnet.yaml
│ ├── pointnet2.yaml
│ ├── rscnn.yaml
│ └── simpleview.yaml
├── configs.py
├── data/
│ ├── convert.py
│ ├── create_modelnet40_small.py
│ ├── create_modelnet40_valid.py
│ ├── distortion.py
│ ├── generate_c.py
│ ├── occlusion.py
│ ├── process.py
│ └── util.py
├── dataloader.py
├── dgcnn/
│ ├── .gitignore
│ ├── README.md
│ ├── pytorch/
│ │ ├── README.md
│ │ ├── data.py
│ │ ├── main.py
│ │ ├── model.py
│ │ └── util.py
│ └── tensorflow/
│ ├── README.md
│ ├── evaluate.py
│ ├── models/
│ │ ├── dgcnn.py
│ │ └── transform_nets.py
│ ├── part_seg/
│ │ ├── README.md
│ │ ├── download_data.sh
│ │ ├── part_seg_model.py
│ │ ├── test.py
│ │ ├── testing_ply_file_list.txt
│ │ └── train_multi_gpu.py
│ ├── provider.py
│ ├── sem_seg/
│ │ ├── README.md
│ │ ├── batch_inference.py
│ │ ├── collect_indoor3d_data.py
│ │ ├── download_data.sh
│ │ ├── eval_iou_accuracy.py
│ │ ├── indoor3d_util.py
│ │ ├── meta/
│ │ │ ├── all_data_label.txt
│ │ │ ├── anno_paths.txt
│ │ │ ├── area1_data_label.txt
│ │ │ ├── area2_data_label.txt
│ │ │ ├── area3_data_label.txt
│ │ │ ├── area4_data_label.txt
│ │ │ ├── area5_data_label.txt
│ │ │ ├── area6_data_label.txt
│ │ │ └── class_names.txt
│ │ ├── model.py
│ │ ├── test_job.sh
│ │ ├── train.py
│ │ └── train_job.sh
│ ├── train.py
│ └── utils/
│ ├── data_prep_util.py
│ ├── eulerangles.py
│ ├── pc_util.py
│ ├── plyfile.py
│ └── tf_util.py
├── download.sh
├── emd/
│ ├── README.md
│ ├── emd.cpp
│ ├── emd_cuda.cu
│ ├── emd_module.py
│ └── setup.py
├── eval_cor.sh
├── eval_og.sh
├── eval_tent_cutmix.sh
├── gdrivedl.py
├── main.py
├── models/
│ ├── __init__.py
│ ├── curvenet.py
│ ├── dgcnn.py
│ ├── gdanet.py
│ ├── model_utils.py
│ ├── mv.py
│ ├── mv_utils.py
│ ├── pct.py
│ ├── pointmlp.py
│ ├── pointmlp2.py
│ ├── pointnet.py
│ ├── pointnet2.py
│ ├── resnet.py
│ └── rscnn.py
├── pc_utils.py
├── pointMLP/
│ ├── .gitignore
│ ├── LICENSE
│ ├── README.md
│ └── classification_ModelNet40/
│ ├── data.py
│ ├── helper.py
│ ├── main.py
│ ├── models/
│ │ ├── __init__.py
│ │ └── pointmlp.py
│ ├── utils/
│ │ ├── __init__.py
│ │ ├── logger.py
│ │ ├── misc.py
│ │ └── progress/
│ │ ├── .gitignore
│ │ ├── LICENSE
│ │ ├── MANIFEST.in
│ │ ├── README.rst
│ │ ├── progress/
│ │ │ ├── __init__.py
│ │ │ ├── bar.py
│ │ │ ├── counter.py
│ │ │ ├── helpers.py
│ │ │ └── spinner.py
│ │ ├── setup.py
│ │ └── test_progress.py
│ └── voting.py
├── pointnet2_pyt/
│ ├── .gitignore
│ ├── .pre-commit-config.yaml
│ ├── .travis.yml
│ ├── MANIFEST.in
│ ├── README.rst
│ ├── UNLICENSE
│ ├── __init__.py
│ ├── pointnet2/
│ │ ├── __init__.py
│ │ ├── _ext-src/
│ │ │ ├── include/
│ │ │ │ ├── ball_query.h
│ │ │ │ ├── cuda_utils.h
│ │ │ │ ├── group_points.h
│ │ │ │ ├── interpolate.h
│ │ │ │ ├── sampling.h
│ │ │ │ └── utils.h
│ │ │ └── src/
│ │ │ ├── ball_query.cpp
│ │ │ ├── ball_query_gpu.cu
│ │ │ ├── bindings.cpp
│ │ │ ├── group_points.cpp
│ │ │ ├── group_points_gpu.cu
│ │ │ ├── interpolate.cpp
│ │ │ ├── interpolate_gpu.cu
│ │ │ ├── sampling.cpp
│ │ │ └── sampling_gpu.cu
│ │ ├── data/
│ │ │ ├── .gitignore
│ │ │ ├── Indoor3DSemSegLoader.py
│ │ │ ├── ModelNet40Loader.py
│ │ │ ├── __init__.py
│ │ │ └── data_utils.py
│ │ ├── models/
│ │ │ ├── __init__.py
│ │ │ ├── pointnet2_msg_cls.py
│ │ │ ├── pointnet2_msg_sem.py
│ │ │ ├── pointnet2_ssg_cls.py
│ │ │ └── pointnet2_ssg_sem.py
│ │ ├── train/
│ │ │ ├── __init__.py
│ │ │ ├── train_cls.py
│ │ │ └── train_sem_seg.py
│ │ └── utils/
│ │ ├── .gitignore
│ │ ├── __init__.py
│ │ ├── linalg_utils.py
│ │ ├── pointnet2_modules.py
│ │ └── pointnet2_utils.py
│ ├── setup.py
│ ├── tests/
│ │ ├── conftest.py
│ │ ├── test_cls_msg.py
│ │ ├── test_cls_ssg.py
│ │ ├── test_semseg_msg.py
│ │ └── test_semseg_ssg.py
│ └── tox.ini
├── pointnet2_tf/
│ ├── LICENSE
│ ├── README.md
│ ├── data/
│ │ └── README.md
│ ├── evaluate.py
│ ├── modelnet_dataset.py
│ ├── modelnet_h5_dataset.py
│ ├── models/
│ │ ├── pointnet2_cls_msg.py
│ │ ├── pointnet2_cls_ssg.py
│ │ ├── pointnet2_part_seg.py
│ │ ├── pointnet2_part_seg_msg_one_hot.py
│ │ ├── pointnet2_sem_seg.py
│ │ └── pointnet_cls_basic.py
│ ├── part_seg/
│ │ ├── command.sh
│ │ ├── command_one_hot.sh
│ │ ├── evaluate.py
│ │ ├── part_dataset.py
│ │ ├── part_dataset_all_normal.py
│ │ ├── test.py
│ │ ├── train.py
│ │ └── train_one_hot.py
│ ├── scannet/
│ │ ├── README.md
│ │ ├── pc_util.py
│ │ ├── preprocessing/
│ │ │ ├── collect_scannet_scenes.py
│ │ │ ├── demo.py
│ │ │ ├── fetch_label_names.py
│ │ │ ├── scannet-labels.combined.tsv
│ │ │ └── scannet_util.py
│ │ ├── scannet_dataset.py
│ │ ├── scene_util.py
│ │ └── train.py
│ ├── tf_ops/
│ │ ├── 3d_interpolation/
│ │ │ ├── interpolate.cpp
│ │ │ ├── tf_interpolate.cpp
│ │ │ ├── tf_interpolate.py
│ │ │ ├── tf_interpolate_compile.sh
│ │ │ ├── tf_interpolate_op_test.py
│ │ │ └── visu_interpolation.py
│ │ ├── grouping/
│ │ │ ├── .gitignore
│ │ │ ├── test/
│ │ │ │ ├── compile.sh
│ │ │ │ ├── query_ball_point.cpp
│ │ │ │ ├── query_ball_point.cu
│ │ │ │ ├── query_ball_point_block.cu
│ │ │ │ ├── query_ball_point_grid.cu
│ │ │ │ ├── selection_sort.cpp
│ │ │ │ ├── selection_sort.cu
│ │ │ │ └── selection_sort_const.cu
│ │ │ ├── tf_grouping.cpp
│ │ │ ├── tf_grouping.py
│ │ │ ├── tf_grouping_compile.sh
│ │ │ ├── tf_grouping_g.cu
│ │ │ └── tf_grouping_op_test.py
│ │ └── sampling/
│ │ ├── .gitignore
│ │ ├── tf_sampling.cpp
│ │ ├── tf_sampling.py
│ │ ├── tf_sampling_compile.sh
│ │ └── tf_sampling_g.cu
│ ├── train.py
│ ├── train_multi_gpu.py
│ └── utils/
│ ├── README.md
│ ├── compile_render_balls_so.sh
│ ├── pc_util.py
│ ├── pointnet_util.py
│ ├── provider.py
│ ├── render_balls_so.cpp
│ ├── show3d_balls.py
│ └── tf_util.py
├── pointnet_pyt/
│ ├── .gitignore
│ ├── LICENSE
│ ├── README.md
│ ├── misc/
│ │ ├── modelnet_id.txt
│ │ └── num_seg_classes.txt
│ ├── pointnet/
│ │ ├── __init__.py
│ │ ├── dataset.py
│ │ └── model.py
│ ├── scripts/
│ │ ├── build.sh
│ │ └── download.sh
│ ├── setup.py
│ └── utils/
│ ├── render_balls_so.cpp
│ ├── show3d_balls.py
│ ├── show_cls.py
│ ├── show_seg.py
│ ├── train_classification.py
│ └── train_segmentation.py
├── requirements.txt
├── rs_cnn/
│ ├── .gitignore
│ ├── CMakeLists.txt
│ ├── LICENSE
│ ├── README.md
│ ├── cfgs/
│ │ ├── config_msn_partseg.yaml
│ │ └── config_ssn_cls.yaml
│ ├── data/
│ │ ├── ModelNet40Loader.py
│ │ ├── ShapeNetPartLoader.py
│ │ ├── __init__.py
│ │ └── data_utils.py
│ ├── docs/
│ │ ├── _config.yml
│ │ └── index.md
│ ├── models/
│ │ ├── __init__.py
│ │ ├── rscnn_msn_seg.py
│ │ └── rscnn_ssn_cls.py
│ ├── train_cls.py
│ ├── train_cls.sh
│ ├── train_partseg.py
│ ├── train_partseg.sh
│ ├── utils/
│ │ ├── __init__.py
│ │ ├── _ext/
│ │ │ ├── __init__.py
│ │ │ └── pointnet2/
│ │ │ └── __init__.py
│ │ ├── build_ffi.py
│ │ ├── cinclude/
│ │ │ ├── ball_query_gpu.h
│ │ │ ├── ball_query_wrapper.h
│ │ │ ├── cuda_utils.h
│ │ │ ├── group_points_gpu.h
│ │ │ ├── group_points_wrapper.h
│ │ │ ├── interpolate_gpu.h
│ │ │ ├── interpolate_wrapper.h
│ │ │ ├── sampling_gpu.h
│ │ │ └── sampling_wrapper.h
│ │ ├── csrc/
│ │ │ ├── ball_query.c
│ │ │ ├── ball_query_gpu.cu
│ │ │ ├── group_points.c
│ │ │ ├── group_points_gpu.cu
│ │ │ ├── interpolate.c
│ │ │ ├── interpolate_gpu.cu
│ │ │ ├── sampling.c
│ │ │ └── sampling_gpu.cu
│ │ ├── linalg_utils.py
│ │ ├── pointnet2_modules.py
│ │ ├── pointnet2_modules_updated.py
│ │ ├── pointnet2_utils.py
│ │ └── pytorch_utils/
│ │ ├── __init__.py
│ │ └── pytorch_utils.py
│ ├── voting_evaluate_cls.py
│ └── voting_evaluate_partseg.py
├── setup.sh
├── third_party/
│ ├── bn_helper.py
│ └── tent_helper.py
└── visualize/
├── README.md
├── config.py
├── confusion_matrix.py
├── examples.py
├── main_results.py
└── pointflow_fig_colorful.py
SYMBOL INDEX (1404 symbols across 184 files)
FILE: CurveNet/core/data.py
function download_modelnet40 (line 26) | def download_modelnet40():
function download_shapenetpart (line 38) | def download_shapenetpart():
function load_data_normal (line 50) | def load_data_normal(partition):
function load_data_cls (line 58) | def load_data_cls(partition):
function load_data_partseg (line 74) | def load_data_partseg(partition):
function translate_pointcloud (line 99) | def translate_pointcloud(pointcloud):
function jitter_pointcloud (line 107) | def jitter_pointcloud(pointcloud, sigma=0.01, clip=0.02):
function rotate_pointcloud (line 113) | def rotate_pointcloud(pointcloud):
class ModelNet40 (line 120) | class ModelNet40(Dataset):
method __init__ (line 121) | def __init__(self, num_points, partition='train'):
method __getitem__ (line 126) | def __getitem__(self, item):
method __len__ (line 135) | def __len__(self):
class ModelNetNormal (line 138) | class ModelNetNormal(Dataset):
method __init__ (line 139) | def __init__(self, num_points, partition='train'):
method __getitem__ (line 144) | def __getitem__(self, item):
method __len__ (line 155) | def __len__(self):
class ShapeNetPart (line 158) | class ShapeNetPart(Dataset):
method __init__ (line 159) | def __init__(self, num_points=2048, partition='train', class_choice=No...
method __getitem__ (line 182) | def __getitem__(self, item):
method __len__ (line 194) | def __len__(self):
FILE: CurveNet/core/main_cls.py
function _init_ (line 29) | def _init_():
function train (line 50) | def train(args, io):
function test (line 147) | def test(args, io):
FILE: CurveNet/core/main_normal.py
function _init_ (line 24) | def _init_():
function train (line 45) | def train(args, io):
function test (line 130) | def test(args, io):
FILE: CurveNet/core/main_partseg.py
function _init_ (line 32) | def _init_():
function calculate_shape_IoU (line 53) | def calculate_shape_IoU(pred_np, seg_np, label, class_choice, eva=False):
function train (line 84) | def train(args, io):
function test (line 231) | def test(args, io):
FILE: CurveNet/core/models/curvenet_cls.py
class CurveNet (line 18) | class CurveNet(nn.Module):
method __init__ (line 19) | def __init__(self, num_classes=40, k=20, setting='default'):
method forward (line 49) | def forward(self, xyz):
FILE: CurveNet/core/models/curvenet_normal.py
class CurveNet (line 17) | class CurveNet(nn.Module):
method __init__ (line 18) | def __init__(self, num_classes=3, k=20, multiplier=1.0, setting='defau...
method forward (line 60) | def forward(self, xyz):
FILE: CurveNet/core/models/curvenet_seg.py
class CurveNet (line 17) | class CurveNet(nn.Module):
method __init__ (line 18) | def __init__(self, num_classes=50, category=16, k=32, setting='default'):
method forward (line 78) | def forward(self, xyz, l=None):
FILE: CurveNet/core/models/curvenet_util.py
function knn (line 22) | def knn(x, k):
function normal_knn (line 31) | def normal_knn(x, k):
function pc_normalize (line 39) | def pc_normalize(pc):
function square_distance (line 47) | def square_distance(src, dst):
function index_points (line 58) | def index_points(points, idx):
function farthest_point_sample (line 78) | def farthest_point_sample(xyz, npoint):
function query_ball_point (line 101) | def query_ball_point(radius, nsample, xyz, new_xyz):
function sample_and_group (line 123) | def sample_and_group(npoint, radius, nsample, xyz, points, returnfps=Fal...
class Attention_block (line 149) | class Attention_block(nn.Module):
method __init__ (line 153) | def __init__(self,F_g,F_l,F_int):
method forward (line 171) | def forward(self,g,x):
class LPFA (line 180) | class LPFA(nn.Module):
method __init__ (line 181) | def __init__(self, in_channel, out_channel, k, mlp_num=2, initial=False):
method forward (line 200) | def forward(self, x, xyz, idx=None):
method group_feature (line 211) | def group_feature(self, x, xyz, idx):
class PointNetFeaturePropagation (line 244) | class PointNetFeaturePropagation(nn.Module):
method __init__ (line 245) | def __init__(self, in_channel, mlp, att=None):
method forward (line 259) | def forward(self, xyz1, xyz2, points1, points2):
class CIC (line 308) | class CIC(nn.Module):
method __init__ (line 309) | def __init__(self, npoint, radius, k, in_channels, output_channels, bo...
method forward (line 351) | def forward(self, xyz, x):
class CurveAggregation (line 383) | class CurveAggregation(nn.Module):
method __init__ (line 384) | def __init__(self, in_channel):
method forward (line 419) | def forward(self, x, curves):
class CurveGrouping (line 445) | class CurveGrouping(nn.Module):
method __init__ (line 446) | def __init__(self, in_channel, k, curve_num, curve_length):
method forward (line 457) | def forward(self, x, xyz, idx):
class MaskedMaxPool (line 473) | class MaskedMaxPool(nn.Module):
method __init__ (line 474) | def __init__(self, npoint, radius, k):
method forward (line 480) | def forward(self, xyz, features):
FILE: CurveNet/core/models/walk.py
function batched_index_select (line 14) | def batched_index_select(input, dim, index):
function gumbel_softmax (line 23) | def gumbel_softmax(logits, dim, temperature=1):
class Walk (line 40) | class Walk(nn.Module):
method __init__ (line 44) | def __init__(self, in_channel, k, curve_num, curve_length):
method crossover_suppression (line 61) | def crossover_suppression(self, cur, neighbor, bn, n, k):
method forward (line 78) | def forward(self, xyz, x, adj, cur):
FILE: CurveNet/core/util.py
function cal_loss (line 14) | def cal_loss(pred, gold, smoothing=True):
class IOStream (line 34) | class IOStream():
method __init__ (line 35) | def __init__(self, path):
method cprint (line 38) | def cprint(self, text):
method close (line 43) | def close(self):
FILE: GDANet/model/GDANet_cls.py
class GDANET (line 7) | class GDANET(nn.Module):
method __init__ (line 8) | def __init__(self, number_class=40):
method forward (line 62) | def forward(self, x):
FILE: GDANet/model/GDANet_ptseg.py
class GDANet (line 7) | class GDANet(nn.Module):
method __init__ (line 8) | def __init__(self, num_classes):
method forward (line 71) | def forward(self, x, norm_plt, cls_label):
FILE: GDANet/model/util/GDANet_util.py
function knn (line 5) | def knn(x, k):
function local_operator (line 14) | def local_operator(x, k):
function local_operator_withnorm (line 42) | def local_operator_withnorm(x, norm_plt, k):
function GDM (line 74) | def GDM(x, M):
class SGCAM (line 153) | class SGCAM(nn.Module):
method __init__ (line 155) | def __init__(self, in_channels, inter_channels=None, bn_layer=True):
method forward (line 192) | def forward(self, x, x_2):
FILE: GDANet/model/util/data_util.py
function load_data (line 9) | def load_data(partition):
function pc_normalize (line 24) | def pc_normalize(pc):
function translate_pointcloud (line 32) | def translate_pointcloud(pointcloud):
function jitter_pointcloud (line 40) | def jitter_pointcloud(pointcloud, sigma=0.01, clip=0.02):
class ModelNet40 (line 47) | class ModelNet40(Dataset):
method __init__ (line 48) | def __init__(self, num_points, partition='train'):
method __getitem__ (line 53) | def __getitem__(self, item): # indice of the pts or label
method __len__ (line 62) | def __len__(self):
class PartNormalDataset (line 67) | class PartNormalDataset(Dataset):
method __init__ (line 68) | def __init__(self, npoints=2500, split='train', normalize=False):
method __getitem__ (line 125) | def __getitem__(self, index):
method __len__ (line 154) | def __len__(self):
FILE: GDANet/model/util/util.py
function cal_loss (line 6) | def cal_loss(pred, gold, smoothing=True):
class IOStream (line 27) | class IOStream():
method __init__ (line 28) | def __init__(self, path):
method cprint (line 31) | def cprint(self, text):
method close (line 36) | def close(self):
function to_categorical (line 40) | def to_categorical(y, num_classes):
function compute_overall_iou (line 48) | def compute_overall_iou(pred, target, num_classes):
FILE: PCT_Pytorch/data.py
function download (line 7) | def download():
function load_data (line 19) | def load_data(partition):
function random_point_dropout (line 36) | def random_point_dropout(pc, max_dropout_ratio=0.875):
function translate_pointcloud (line 47) | def translate_pointcloud(pointcloud):
function jitter_pointcloud (line 54) | def jitter_pointcloud(pointcloud, sigma=0.01, clip=0.02):
class ModelNet40 (line 60) | class ModelNet40(Dataset):
method __init__ (line 61) | def __init__(self, num_points, partition='train'):
method __getitem__ (line 66) | def __getitem__(self, item):
method __len__ (line 75) | def __len__(self):
FILE: PCT_Pytorch/main.py
function _init_ (line 17) | def _init_():
function train (line 29) | def train(args, io):
function test (line 132) | def test(args, io):
FILE: PCT_Pytorch/model.py
class Local_op (line 6) | class Local_op(nn.Module):
method __init__ (line 7) | def __init__(self, in_channels, out_channels):
method forward (line 14) | def forward(self, x):
class Pct (line 25) | class Pct(nn.Module):
method __init__ (line 26) | def __init__(self, args, output_channels=40):
method forward (line 51) | def forward(self, x):
class Point_Transformer_Last (line 77) | class Point_Transformer_Last(nn.Module):
method __init__ (line 78) | def __init__(self, args, channels=256):
method forward (line 92) | def forward(self, x):
class SA_Layer (line 111) | class SA_Layer(nn.Module):
method __init__ (line 112) | def __init__(self, channels):
method forward (line 125) | def forward(self, x):
FILE: PCT_Pytorch/model_new.py
class Local_op (line 6) | class Local_op(nn.Module):
method __init__ (line 7) | def __init__(self, in_channels, out_channels):
method forward (line 14) | def forward(self, x):
class Pct (line 25) | class Pct(nn.Module):
method __init__ (line 26) | def __init__(self, args, output_channels=40):
method forward (line 50) | def forward(self, x):
class Point_Transformer_Last (line 76) | class Point_Transformer_Last(nn.Module):
method __init__ (line 77) | def __init__(self, args, channels=256):
method forward (line 89) | def forward(self, x, xyz):
class SA_Layer (line 108) | class SA_Layer(nn.Module):
method __init__ (line 109) | def __init__(self, channels):
method forward (line 123) | def forward(self, x, xyz):
FILE: PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/_ext-src/include/cuda_utils.h
function opt_n_threads (line 15) | inline int opt_n_threads(int work_size) {
function dim3 (line 21) | inline dim3 opt_block_config(int x, int y) {
FILE: PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/_ext-src/src/ball_query.cpp
function ball_query (line 8) | at::Tensor ball_query(at::Tensor new_xyz, at::Tensor xyz, const float ra...
FILE: PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/_ext-src/src/bindings.cpp
function PYBIND11_MODULE (line 6) | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
FILE: PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/_ext-src/src/group_points.cpp
function group_points (line 12) | at::Tensor group_points(at::Tensor points, at::Tensor idx) {
function group_points_grad (line 38) | at::Tensor group_points_grad(at::Tensor grad_out, at::Tensor idx, const ...
FILE: PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/_ext-src/src/interpolate.cpp
function three_nn (line 14) | std::vector<at::Tensor> three_nn(at::Tensor unknowns, at::Tensor knows) {
function three_interpolate (line 42) | at::Tensor three_interpolate(at::Tensor points, at::Tensor idx,
function three_interpolate_grad (line 71) | at::Tensor three_interpolate_grad(at::Tensor grad_out, at::Tensor idx,
FILE: PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/_ext-src/src/sampling.cpp
function gather_points (line 15) | at::Tensor gather_points(at::Tensor points, at::Tensor idx) {
function gather_points_grad (line 40) | at::Tensor gather_points_grad(at::Tensor grad_out, at::Tensor idx,
function furthest_point_sampling (line 66) | at::Tensor furthest_point_sampling(at::Tensor points, const int nsamples) {
FILE: PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/pointnet2_modules.py
function build_shared_mlp (line 9) | def build_shared_mlp(mlp_spec: List[int], bn: bool = True):
class _PointnetSAModuleBase (line 22) | class _PointnetSAModuleBase(nn.Module):
method __init__ (line 23) | def __init__(self):
method forward (line 29) | def forward(
class PointnetSAModuleMSG (line 77) | class PointnetSAModuleMSG(_PointnetSAModuleBase):
method __init__ (line 94) | def __init__(self, npoint, radii, nsamples, mlps, bn=True, use_xyz=True):
class PointnetSAModule (line 118) | class PointnetSAModule(PointnetSAModuleMSG):
method __init__ (line 135) | def __init__(
class PointnetFPModule (line 149) | class PointnetFPModule(nn.Module):
method __init__ (line 160) | def __init__(self, mlp, bn=True):
method forward (line 165) | def forward(self, unknown, known, unknow_feats, known_feats):
FILE: PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/pointnet2_utils.py
class FurthestPointSampling (line 34) | class FurthestPointSampling(Function):
method forward (line 36) | def forward(ctx, xyz, npoint):
method backward (line 61) | def backward(ctx, grad_out):
class GatherOperation (line 68) | class GatherOperation(Function):
method forward (line 70) | def forward(ctx, features, idx):
method backward (line 93) | def backward(ctx, grad_out):
class ThreeNN (line 104) | class ThreeNN(Function):
method forward (line 106) | def forward(ctx, unknown, known):
method backward (line 132) | def backward(ctx, grad_dist, grad_idx):
class ThreeInterpolate (line 139) | class ThreeInterpolate(Function):
method forward (line 141) | def forward(ctx, features, idx, weight):
method backward (line 164) | def backward(ctx, grad_out):
class GroupingOperation (line 194) | class GroupingOperation(Function):
method forward (line 196) | def forward(ctx, features, idx):
method backward (line 217) | def backward(ctx, grad_out):
class BallQuery (line 243) | class BallQuery(Function):
method forward (line 245) | def forward(ctx, radius, nsample, xyz, new_xyz):
method backward (line 272) | def backward(ctx, grad_out):
class QueryAndGroup (line 279) | class QueryAndGroup(nn.Module):
method __init__ (line 291) | def __init__(self, radius, nsample, use_xyz=True):
method forward (line 296) | def forward(self, xyz, new_xyz, features=None):
class GroupAll (line 336) | class GroupAll(nn.Module):
method __init__ (line 344) | def __init__(self, use_xyz=True):
method forward (line 349) | def forward(self, xyz, new_xyz, features=None):
FILE: PCT_Pytorch/util.py
function cal_loss (line 5) | def cal_loss(pred, gold, smoothing=True):
class IOStream (line 24) | class IOStream():
method __init__ (line 25) | def __init__(self, path):
method cprint (line 28) | def cprint(self, text):
method close (line 33) | def close(self):
function square_distance (line 36) | def square_distance(src, dst):
function index_points (line 57) | def index_points(points, idx):
function query_ball_point (line 75) | def query_ball_point(radius, nsample, xyz, new_xyz):
function knn_point (line 97) | def knn_point(nsample, xyz, new_xyz):
function sample_and_group (line 110) | def sample_and_group(npoint, radius, nsample, xyz, points):
FILE: all_utils.py
class TensorboardManager (line 23) | class TensorboardManager:
method __init__ (line 24) | def __init__(self, path):
method update (line 27) | def update(self, split, step, vals):
method close (line 31) | def close(self):
class TrackTrain (line 36) | class TrackTrain:
method __init__ (line 37) | def __init__(self, early_stop_patience):
method record_epoch (line 48) | def record_epoch(self, epoch_id, train_metric, val_metric, test_metric):
method save_model (line 66) | def save_model(self, epoch_id, split):
method early_stop (line 94) | def early_stop(self, epoch_id):
class PerfTrackVal (line 102) | class PerfTrackVal:
method __init__ (line 106) | def __init__(self, task, extra_param=None):
method update (line 115) | def update(self, data_batch, out):
method agg (line 122) | def agg(self):
method update_class_see_corr (line 132) | def update_class_see_corr(self, logit, label):
method get_correct_list (line 145) | def get_correct_list(logit, label):
method get_avg_list (line 150) | def get_avg_list(all_list):
class PerfTrackTrain (line 156) | class PerfTrackTrain(PerfTrackVal):
method __init__ (line 160) | def __init__(self, task, extra_param=None):
method update_loss (line 165) | def update_loss(self, loss):
method agg_loss (line 168) | def agg_loss(self):
method update_all (line 172) | def update_all(self, data_batch, out, loss):
function smooth_loss (line 178) | def smooth_loss(pred, gold):
function rscnn_voting_evaluate_cls (line 192) | def rscnn_voting_evaluate_cls(loader, model, data_batch_to_points_target,
function pn2_vote_evaluate_cls (line 279) | def pn2_vote_evaluate_cls(dataloader, model, log_file, num_votes=[12]):
FILE: aug_utils.py
function cutmix_r (line 8) | def cutmix_r(data_batch,cfg):
function cutmix_k (line 50) | def cutmix_k(data_batch,cfg):
function mixup (line 95) | def mixup(data_batch,cfg):
function knn_points (line 130) | def knn_points(k, xyz, query, nsample=512):
function cut_points_knn (line 154) | def cut_points_knn(data_batch, idx, radius, nsample=512, k=512):
function cut_points (line 173) | def cut_points(data_batch, idx, radius, nsample=512):
function query_ball_point_for_rsmix (line 193) | def query_ball_point_for_rsmix(radius, nsample, xyz, new_xyz):
function square_distance (line 223) | def square_distance(src, dst):
function pts_num_ctrl (line 250) | def pts_num_ctrl(pts_erase_idx, pts_add_idx):
function rsmix (line 266) | def rsmix(data, cfg, n_sample=512, KNN=False):
function pgd (line 331) | def pgd(data_batch,model, task, loss_name, dataset_name, step= 7, eps=0....
FILE: configs.py
function get_cfg_defaults (line 106) | def get_cfg_defaults():
FILE: data/convert.py
function load_mesh (line 4) | def load_mesh(filepath):
function export_mesh (line 8) | def export_mesh(mesh, filepath):
function load_pcd (line 12) | def load_pcd(filepath):
function export_pcd (line 16) | def export_pcd(pcd, filepath):
function mesh_to_pcd (line 20) | def mesh_to_pcd(mesh, number_of_points=2048):
FILE: data/create_modelnet40_small.py
function main (line 9) | def main(split_size):
FILE: data/create_modelnet40_valid.py
function main (line 7) | def main():
FILE: data/distortion.py
function core_distortion (line 9) | def core_distortion(points, n_control_points=[2,2,2], displacement=None):
function distortion (line 28) | def distortion(points, direction_mask=np.array([1,1,1]), point_mask=np.o...
function distortion_2 (line 44) | def distortion_2(points, severity=(0.4,3), func = 'gaussian_spline'):
function distortion_3 (line 65) | def distortion_3(points, severity=(0.4,3)):
FILE: data/generate_c.py
function rotation (line 19) | def rotation(pointcloud,severity):
function shear (line 39) | def shear(pointcloud,severity):
function scale (line 56) | def scale(pointcloud,severity):
function uniform_noise (line 82) | def uniform_noise(pointcloud, severity):
function gaussian_noise (line 93) | def gaussian_noise(pointcloud, severity):
function background_noise (line 104) | def background_noise(pointcloud, severity):
function upsampling (line 114) | def upsampling(pointcloud, severity):
function impulse_noise (line 125) | def impulse_noise(pointcloud, severity):
function cutout (line 137) | def cutout(pointcloud, severity):
function uniform_sampling (line 153) | def uniform_sampling(pointcloud, severity):
function density_inc (line 162) | def density_inc(pointcloud, severity):
function density (line 187) | def density(pointcloud, severity):
function occlusion (line 202) | def occlusion(severity):
function simulate_lidar (line 238) | def simulate_lidar(pointcloud,pose,severity):
function lidar (line 270) | def lidar(severity):
function ffd_distortion (line 309) | def ffd_distortion(pointcloud, severity):
function rbf_distortion (line 315) | def rbf_distortion(pointcloud, severity):
function rbf_distortion_inv (line 321) | def rbf_distortion_inv(pointcloud, severity):
function load_data (line 329) | def load_data():
function save_data (line 352) | def save_data(data,corruption,severity):
FILE: data/occlusion.py
function random_pose (line 7) | def random_pose(severity):
function lidar_pose (line 32) | def lidar_pose(severity):
function get_default_camera_extrinsic (line 58) | def get_default_camera_extrinsic():
function get_default_camera_intrinsic (line 65) | def get_default_camera_intrinsic(width=1920, height=1080):
function core_occlusion (line 76) | def core_occlusion(mesh, type, camera_extrinsic=None, camera_intrinsic=N...
function occlusion_1 (line 114) | def occlusion_1(mesh, type, severity, window_width=1080, window_height=7...
FILE: data/util.py
function get_points (line 6) | def get_points(data):
function set_points (line 15) | def set_points(data, points):
function normalize (line 26) | def normalize(new_pc):
function denomalize (line 41) | def denomalize(points, scale, offset, hard_copy=False):
function shuffle_data (line 51) | def shuffle_data(data):
function appendSpherical_np (line 58) | def appendSpherical_np(xyz):
function appendCart_np (line 67) | def appendCart_np(xyz):
FILE: dataloader.py
class ModelNet40Rscnn (line 18) | class ModelNet40Rscnn(Dataset):
method __init__ (line 19) | def __init__(self, split, data_path, train_data_path,
method __len__ (line 39) | def __len__(self):
method __getitem__ (line 42) | def __getitem__(self, idx):
method batch_proc (line 50) | def batch_proc(self, data_batch, device):
class ModelNet40PN2 (line 75) | class ModelNet40PN2(Dataset):
method __init__ (line 76) | def __init__(self, split, train_data_path,
method __len__ (line 105) | def __len__(self):
method __getitem__ (line 108) | def __getitem__(self, idx):
method batch_proc (line 111) | def batch_proc(self, data_batch, device):
class ModelNet40Dgcnn (line 123) | class ModelNet40Dgcnn(Dataset):
method __init__ (line 124) | def __init__(self, split, train_data_path,
method __len__ (line 140) | def __len__(self):
method __getitem__ (line 143) | def __getitem__(self, idx):
function load_data (line 147) | def load_data(data_path,corruption,severity):
class ModelNet40C (line 157) | class ModelNet40C(Dataset):
method __init__ (line 158) | def __init__(self, split, test_data_path,corruption,severity):
method __getitem__ (line 171) | def __getitem__(self, item):
method __len__ (line 176) | def __len__(self):
function create_dataloader (line 180) | def create_dataloader(split, cfg):
FILE: dgcnn/pytorch/data.py
function download (line 19) | def download():
function load_data (line 32) | def load_data(data_path):
function translate_pointcloud (line 53) | def translate_pointcloud(pointcloud):
function jitter_pointcloud (line 61) | def jitter_pointcloud(pointcloud, sigma=0.01, clip=0.02):
class ModelNet40 (line 67) | class ModelNet40(Dataset):
method __init__ (line 68) | def __init__(self, num_points, data_path, partition='train'):
method __getitem__ (line 73) | def __getitem__(self, item):
method __len__ (line 81) | def __len__(self):
FILE: dgcnn/pytorch/main.py
function _init_ (line 27) | def _init_():
function train (line 39) | def train(args, io):
function test (line 139) | def test(args, io):
FILE: dgcnn/pytorch/model.py
function knn (line 21) | def knn(x, k):
function get_graph_feature (line 30) | def get_graph_feature(x, k=20, idx=None):
class PointNet (line 56) | class PointNet(nn.Module):
method __init__ (line 57) | def __init__(self, args, output_channels=40):
method forward (line 75) | def forward(self, x):
class DGCNN (line 88) | class DGCNN(nn.Module):
method __init__ (line 89) | def __init__(self, args, output_channels=40):
method forward (line 131) | def forward(self, x):
FILE: dgcnn/pytorch/util.py
function cal_loss (line 16) | def cal_loss(pred, gold, smoothing=True):
class IOStream (line 36) | class IOStream():
method __init__ (line 37) | def __init__(self, path):
method cprint (line 40) | def cprint(self, text):
method close (line 45) | def close(self):
FILE: dgcnn/tensorflow/evaluate.py
function log_string (line 51) | def log_string(out_str):
function evaluate (line 56) | def evaluate(num_votes):
function eval_one_epoch (line 90) | def eval_one_epoch(sess, ops, num_votes=1, topk=1):
FILE: dgcnn/tensorflow/models/dgcnn.py
function placeholder_inputs (line 14) | def placeholder_inputs(batch_size, num_point):
function get_model (line 20) | def get_model(point_cloud, is_training, bn_decay=None):
function get_loss (line 101) | def get_loss(pred, label, end_points):
FILE: dgcnn/tensorflow/models/transform_nets.py
function input_transform_net (line 10) | def input_transform_net(edge_feature, is_training, bn_decay=None, K=3, i...
FILE: dgcnn/tensorflow/part_seg/part_seg_model.py
function get_model (line 14) | def get_model(point_cloud, input_label, is_training, cat_num, part_num, \
function get_loss (line 123) | def get_loss(seg_pred, seg):
FILE: dgcnn/tensorflow/part_seg/test.py
function printout (line 56) | def printout(flog, data):
function output_color_point_cloud (line 60) | def output_color_point_cloud(data, seg, out_file):
function output_color_point_cloud_red_blue (line 67) | def output_color_point_cloud_red_blue(data, seg, out_file):
function pc_normalize (line 81) | def pc_normalize(pc):
function placeholder_inputs (line 89) | def placeholder_inputs():
function output_color_point_cloud (line 94) | def output_color_point_cloud(data, seg, out_file):
function load_pts_seg_files (line 101) | def load_pts_seg_files(pts_file, seg_file, catid):
function pc_augment_to_point_num (line 110) | def pc_augment_to_point_num(pts, pn):
function convert_label_to_one_hot (line 119) | def convert_label_to_one_hot(labels):
function predict (line 125) | def predict():
FILE: dgcnn/tensorflow/part_seg/train_multi_gpu.py
function printout (line 84) | def printout(flog, data):
function convert_label_to_one_hot (line 88) | def convert_label_to_one_hot(labels):
function average_gradients (line 94) | def average_gradients(tower_grads):
function train (line 131) | def train():
FILE: dgcnn/tensorflow/provider.py
function shuffle_data (line 20) | def shuffle_data(data, labels):
function rotate_point_cloud (line 33) | def rotate_point_cloud(batch_data):
function rotate_point_cloud_by_angle (line 54) | def rotate_point_cloud_by_angle(batch_data, rotation_angle):
function rotate_perturbation_point_cloud (line 74) | def rotate_perturbation_point_cloud(batch_data, angle_sigma=0.06, angle_...
function jitter_point_cloud (line 99) | def jitter_point_cloud(batch_data, sigma=0.01, clip=0.05):
function shift_point_cloud (line 112) | def shift_point_cloud(batch_data, shift_range=0.1):
function random_scale_point_cloud (line 126) | def random_scale_point_cloud(batch_data, scale_low=0.8, scale_high=1.25):
function getDataFiles (line 139) | def getDataFiles(list_filename):
function load_h5 (line 142) | def load_h5(h5_filename):
function loadDataFile (line 148) | def loadDataFile(filename):
function load_h5_data_label_seg (line 152) | def load_h5_data_label_seg(h5_filename):
FILE: dgcnn/tensorflow/sem_seg/batch_inference.py
function log_string (line 34) | def log_string(out_str):
function evaluate (line 39) | def evaluate():
function eval_one_epoch (line 85) | def eval_one_epoch(sess, ops, room_path, out_data_label_filename, out_gt...
FILE: dgcnn/tensorflow/sem_seg/indoor3d_util.py
function collect_point_label (line 37) | def collect_point_label(anno_path, out_filename, file_format='txt'):
function point_label_to_obj (line 80) | def point_label_to_obj(input_filename, out_filename, label_color=True, e...
function sample_data (line 111) | def sample_data(data, num_sample):
function sample_data_label (line 128) | def sample_data_label(data, label, num_sample):
function room2blocks (line 133) | def room2blocks(data, label, num_point, block_size=1.0, stride=1.0,
function room2blocks_plus (line 205) | def room2blocks_plus(data_label, num_point, block_size, stride,
function room2blocks_wrapper (line 216) | def room2blocks_wrapper(data_label_filename, num_point, block_size=1.0, ...
function room2blocks_plus_normalized (line 228) | def room2blocks_plus_normalized(data_label, num_point, block_size, stride,
function room2blocks_wrapper_normalized (line 255) | def room2blocks_wrapper_normalized(data_label_filename, num_point, block...
function room2samples (line 267) | def room2samples(data, label, sample_num_point):
function room2samples_plus_normalized (line 303) | def room2samples_plus_normalized(data_label, num_point):
function room2samples_wrapper_normalized (line 329) | def room2samples_wrapper_normalized(data_label_filename, num_point):
function collect_bounding_box (line 344) | def collect_bounding_box(anno_path, out_filename):
function bbox_label_to_obj (line 386) | def bbox_label_to_obj(input_filename, out_filename_prefix, easy_view=Fal...
function bbox_label_to_obj_room (line 449) | def bbox_label_to_obj_room(input_filename, out_filename_prefix, easy_vie...
function collect_point_bounding_box (line 528) | def collect_point_bounding_box(anno_path, out_filename, file_format):
FILE: dgcnn/tensorflow/sem_seg/model.py
function placeholder_inputs (line 13) | def placeholder_inputs(batch_size, num_point):
function get_model (line 20) | def get_model(point_cloud, is_training, bn_decay=None):
function get_loss (line 109) | def get_loss(pred, label):
FILE: dgcnn/tensorflow/sem_seg/train.py
function log_string (line 95) | def log_string(out_str):
function get_learning_rate (line 101) | def get_learning_rate(batch):
function get_bn_decay (line 111) | def get_bn_decay(batch):
function average_gradients (line 121) | def average_gradients(tower_grads):
function train (line 155) | def train():
function train_one_epoch (line 243) | def train_one_epoch(sess, ops, train_writer):
FILE: dgcnn/tensorflow/train.py
function log_string (line 67) | def log_string(out_str):
function get_learning_rate (line 73) | def get_learning_rate(batch):
function get_bn_decay (line 83) | def get_bn_decay(batch):
function train (line 93) | def train():
function train_one_epoch (line 171) | def train_one_epoch(sess, ops, train_writer):
function eval_one_epoch (line 220) | def eval_one_epoch(sess, ops, test_writer):
FILE: dgcnn/tensorflow/utils/data_prep_util.py
function export_ply (line 15) | def export_ply(pc, filename):
function get_sampling_command (line 23) | def get_sampling_command(obj_filename, ply_filename):
function get_category_names (line 35) | def get_category_names():
function get_obj_filenames (line 41) | def get_obj_filenames():
function batch_mkdir (line 48) | def batch_mkdir(output_folder, subdir_list):
function save_h5_data_label_normal (line 60) | def save_h5_data_label_normal(h5_filename, data, label, normal,
function save_h5 (line 79) | def save_h5(h5_filename, data, label, data_dtype='uint8', label_dtype='u...
function load_h5_data_label_normal (line 92) | def load_h5_data_label_normal(h5_filename):
function load_h5_data_label_seg (line 100) | def load_h5_data_label_seg(h5_filename):
function load_h5 (line 108) | def load_h5(h5_filename):
function load_ply_data (line 119) | def load_ply_data(filename, point_num):
function load_ply_normal (line 126) | def load_ply_normal(filename, point_num):
function pad_arr_rows (line 134) | def pad_arr_rows(arr, row, pad='edge'):
FILE: dgcnn/tensorflow/utils/eulerangles.py
function euler2mat (line 98) | def euler2mat(z=0, y=0, x=0):
function mat2euler (line 198) | def mat2euler(M, cy_thresh=None):
function euler2quat (line 271) | def euler2quat(z=0, y=0, x=0):
function quat2euler (line 319) | def quat2euler(q):
function euler2angle_axis (line 348) | def euler2angle_axis(z=0, y=0, x=0):
function angle_axis2euler (line 382) | def angle_axis2euler(theta, vector, is_normalized=False):
FILE: dgcnn/tensorflow/utils/pc_util.py
function point_cloud_to_volume_batch (line 24) | def point_cloud_to_volume_batch(point_clouds, vsize=12, radius=1.0, flat...
function point_cloud_to_volume (line 41) | def point_cloud_to_volume(points, vsize, radius=1.0):
function volume_to_point_cloud (line 56) | def volume_to_point_cloud(vol):
function read_ply (line 77) | def read_ply(filename):
function write_ply (line 85) | def write_ply(points, filename, text=True):
function draw_point_cloud (line 97) | def draw_point_cloud(input_points, canvasSize=500, space=200, diameter=25,
function point_cloud_three_views (line 156) | def point_cloud_three_views(points):
function point_cloud_three_views_demo (line 171) | def point_cloud_three_views_demo():
function pyplot_draw_point_cloud (line 183) | def pyplot_draw_point_cloud(points, output_filename):
function pyplot_draw_volume (line 193) | def pyplot_draw_volume(vol, output_filename):
FILE: dgcnn/tensorflow/utils/plyfile.py
function _lookup_type (line 80) | def _lookup_type(type_str):
function _split_line (line 91) | def _split_line(line, n):
function make2d (line 101) | def make2d(array, cols=None, dtype=None):
class PlyParseError (line 121) | class PlyParseError(Exception):
method __init__ (line 131) | def __init__(self, message, element=None, row=None, prop=None):
method __repr__ (line 148) | def __repr__(self):
class PlyData (line 153) | class PlyData(object):
method __init__ (line 165) | def __init__(self, elements=[], text=False, byte_order='=',
method _get_elements (line 193) | def _get_elements(self):
method _set_elements (line 196) | def _set_elements(self, elements):
method _get_byte_order (line 202) | def _get_byte_order(self):
method _set_byte_order (line 205) | def _set_byte_order(self, byte_order):
method _index (line 213) | def _index(self):
method _parse_header (line 220) | def _parse_header(stream):
method read (line 275) | def read(stream):
method write (line 291) | def write(self, stream):
method header (line 307) | def header(self):
method __iter__ (line 333) | def __iter__(self):
method __len__ (line 336) | def __len__(self):
method __contains__ (line 339) | def __contains__(self, name):
method __getitem__ (line 342) | def __getitem__(self, name):
method __str__ (line 345) | def __str__(self):
method __repr__ (line 348) | def __repr__(self):
function _open_stream (line 355) | def _open_stream(stream, read_or_write):
class PlyElement (line 364) | class PlyElement(object):
method __init__ (line 379) | def __init__(self, name, properties, count, comments=[]):
method count (line 400) | def count(self):
method _get_data (line 403) | def _get_data(self):
method _set_data (line 406) | def _set_data(self, data):
method _check_sanity (line 413) | def _check_sanity(self):
method _get_properties (line 418) | def _get_properties(self):
method _set_properties (line 421) | def _set_properties(self, properties):
method _index (line 428) | def _index(self):
method ply_property (line 434) | def ply_property(self, name):
method name (line 438) | def name(self):
method _check_name (line 441) | def _check_name(self):
method dtype (line 446) | def dtype(self, byte_order='='):
method _parse_multi (line 458) | def _parse_multi(header_lines):
method _parse_one (line 471) | def _parse_one(lines):
method describe (line 507) | def describe(data, name, len_types={}, val_types={},
method _read (line 567) | def _read(self, stream, text, byte_order):
method _write (line 593) | def _write(self, stream, text, byte_order):
method _read_txt (line 611) | def _read_txt(self, stream):
method _write_txt (line 643) | def _write_txt(self, stream):
method _read_bin (line 656) | def _read_bin(self, stream, byte_order):
method _write_bin (line 673) | def _write_bin(self, stream, byte_order):
method header (line 684) | def header(self):
method __getitem__ (line 701) | def __getitem__(self, key):
method __setitem__ (line 704) | def __setitem__(self, key, value):
method __str__ (line 707) | def __str__(self):
method __repr__ (line 710) | def __repr__(self):
class PlyProperty (line 716) | class PlyProperty(object):
method __init__ (line 724) | def __init__(self, name, val_dtype):
method _get_val_dtype (line 729) | def _get_val_dtype(self):
method _set_val_dtype (line 732) | def _set_val_dtype(self, val_dtype):
method name (line 738) | def name(self):
method _check_name (line 741) | def _check_name(self):
method _parse_one (line 747) | def _parse_one(line):
method dtype (line 770) | def dtype(self, byte_order='='):
method _from_fields (line 778) | def _from_fields(self, fields):
method _to_fields (line 786) | def _to_fields(self, data):
method _read_bin (line 793) | def _read_bin(self, stream, byte_order):
method _write_bin (line 804) | def _write_bin(self, data, stream, byte_order):
method __str__ (line 811) | def __str__(self):
method __repr__ (line 815) | def __repr__(self):
class PlyListProperty (line 820) | class PlyListProperty(PlyProperty):
method __init__ (line 827) | def __init__(self, name, len_dtype, val_dtype):
method _get_len_dtype (line 832) | def _get_len_dtype(self):
method _set_len_dtype (line 835) | def _set_len_dtype(self, len_dtype):
method dtype (line 840) | def dtype(self, byte_order='='):
method list_dtype (line 847) | def list_dtype(self, byte_order='='):
method _from_fields (line 856) | def _from_fields(self, fields):
method _to_fields (line 867) | def _to_fields(self, data):
method _read_bin (line 881) | def _read_bin(self, stream, byte_order):
method _write_bin (line 895) | def _write_bin(self, data, stream, byte_order):
method __str__ (line 907) | def __str__(self):
method __repr__ (line 912) | def __repr__(self):
FILE: dgcnn/tensorflow/utils/tf_util.py
function _variable_on_cpu (line 12) | def _variable_on_cpu(name, shape, initializer, use_fp16=False, trainable...
function _variable_with_weight_decay (line 26) | def _variable_with_weight_decay(name, shape, stddev, wd, use_xavier=True):
function conv1d (line 54) | def conv1d(inputs,
function conv2d (line 115) | def conv2d(inputs,
function conv2d_transpose (line 176) | def conv2d_transpose(inputs,
function conv3d (line 257) | def conv3d(inputs,
function fully_connected (line 317) | def fully_connected(inputs,
function max_pool2d (line 357) | def max_pool2d(inputs,
function avg_pool2d (line 382) | def avg_pool2d(inputs,
function max_pool3d (line 408) | def max_pool3d(inputs,
function avg_pool3d (line 433) | def avg_pool3d(inputs,
function batch_norm_template (line 462) | def batch_norm_template(inputs, is_training, scope, moments_dims, bn_dec...
function batch_norm_dist_template (line 502) | def batch_norm_dist_template(inputs, is_training, scope, moments_dims, b...
function batch_norm_for_fc (line 539) | def batch_norm_for_fc(inputs, is_training, bn_decay, scope, is_dist=False):
function batch_norm_for_conv1d (line 557) | def batch_norm_for_conv1d(inputs, is_training, bn_decay, scope, is_dist=...
function batch_norm_for_conv2d (line 577) | def batch_norm_for_conv2d(inputs, is_training, bn_decay, scope, is_dist=...
function batch_norm_for_conv3d (line 596) | def batch_norm_for_conv3d(inputs, is_training, bn_decay, scope, is_dist=...
function dropout (line 614) | def dropout(inputs,
function pairwise_distance (line 638) | def pairwise_distance(point_cloud):
function knn (line 660) | def knn(adj_matrix, k=20):
function get_edge_feature (line 674) | def get_edge_feature(point_cloud, nn_idx, k=20):
FILE: emd/emd.cpp
function emd_forward (line 14) | int emd_forward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor dist, at::T...
function emd_backward (line 20) | int emd_backward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor gradxyz, a...
function PYBIND11_MODULE (line 28) | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
FILE: emd/emd_module.py
class emdFunction (line 31) | class emdFunction(Function):
method forward (line 33) | def forward(ctx, xyz1, xyz2, eps, iters):
method backward (line 64) | def backward(ctx, graddist, gradidx):
class emdModule (line 74) | class emdModule(nn.Module):
method __init__ (line 75) | def __init__(self):
method forward (line 78) | def forward(self, input1, input2, eps, iters):
function test_emd (line 81) | def test_emd():
FILE: gdrivedl.py
function output (line 39) | def output(text):
function sanitize (line 47) | def sanitize(filename):
function url_to_id (line 87) | def url_to_id(url):
class GDriveDL (line 96) | class GDriveDL(object):
method __init__ (line 97) | def __init__(self, quiet=False, overwrite=False):
method _request (line 103) | def _request(self, url):
method process_url (line 108) | def process_url(self, url, directory, filename=None):
method process_folder (line 126) | def process_folder(self, id, directory):
method process_file (line 150) | def process_file(self, id, directory, filename=None, confirm=''):
function main (line 214) | def main(args=None):
FILE: main.py
function adapt_bn (line 30) | def adapt_bn(data,model,cfg):
function adapt_tent (line 38) | def adapt_tent(data,model,cfg):
function check_inp_fmt (line 51) | def check_inp_fmt(task, data_batch, dataset_name):
function check_out_fmt (line 77) | def check_out_fmt(task, out, dataset_name):
function get_inp (line 100) | def get_inp(task, model, data_batch, batch_proc, dataset_name):
function get_loss (line 120) | def get_loss(task, loss_name, data_batch, out, dataset_name):
function validate (line 202) | def validate(task, loader, model, dataset_name, adapt = None, confusion ...
function train (line 258) | def train(task, loader, model, optimizer, loss_name, dataset_name, cfg):
function save_checkpoint (line 327) | def save_checkpoint(id, epoch, model, optimizer, lr_sched, bnm_sched, t...
function load_best_checkpoint (line 343) | def load_best_checkpoint(model, cfg):
function load_model_opt_sched (line 350) | def load_model_opt_sched(model, optimizer, lr_sched, bnm_sched, model_pa...
function get_model (line 375) | def get_model(cfg):
function get_metric_from_perf (line 425) | def get_metric_from_perf(task, perf, metric_name):
function get_optimizer (line 434) | def get_optimizer(optim_name, tr_arg, model):
function entry_train (line 465) | def entry_train(cfg, resume=False, model_path=""):
function entry_test (line 551) | def entry_test(cfg, test_or_valid, model_path="", confusion = False):
function rscnn_vote_evaluation (line 578) | def rscnn_vote_evaluation(cfg, model_path, log_file):
function pn2_vote_evaluation (line 604) | def pn2_vote_evaluation(cfg, model_path, log_file):
FILE: models/curvenet.py
class CurveNet (line 13) | class CurveNet(nn.Module):
method __init__ (line 15) | def __init__(self, task, dataset):
method forward (line 27) | def forward(self, pc, cls=None):
FILE: models/dgcnn.py
class DGCNN (line 7) | class DGCNN(nn.Module):
method __init__ (line 9) | def __init__(self, task, dataset):
method forward (line 29) | def forward(self, pc, cls=None):
FILE: models/gdanet.py
class GDANET (line 13) | class GDANET(nn.Module):
method __init__ (line 15) | def __init__(self, task, dataset):
method forward (line 24) | def forward(self, pc, normal=None, cls=None):
FILE: models/model_utils.py
class Squeeze (line 4) | class Squeeze(nn.Module):
method __init__ (line 5) | def __init__(self):
method forward (line 8) | def forward(self, inp):
class BatchNormPoint (line 11) | class BatchNormPoint(nn.Module):
method __init__ (line 12) | def __init__(self, feat_size, sync_bn=False):
method forward (line 21) | def forward(self, x):
FILE: models/mv.py
class MVModel (line 8) | class MVModel(nn.Module):
method __init__ (line 9) | def __init__(self, task, dataset, backbone,
method forward (line 33) | def forward(self, pc):
method get_img (line 46) | def get_img(self, pc):
method get_img_layers (line 58) | def get_img_layers(backbone, feat_size):
class MVFC (line 95) | class MVFC(nn.Module):
method __init__ (line 100) | def __init__(self, num_views, in_features, out_features, dropout_p):
method forward (line 117) | def forward(self, feat):
FILE: models/mv_utils.py
function euler2mat (line 7) | def euler2mat(angle):
function distribute (line 59) | def distribute(depth, _x, _y, size_x, size_y, image_height, image_width):
function points2depth (line 135) | def points2depth(points, image_height, image_width, size_x=4, size_y=4):
function batched_index_select (line 174) | def batched_index_select(inp, dim, index):
function point_fea_img_fea (line 189) | def point_fea_img_fea(point_fea, point_coo, h, w):
function distribute_img_fea_points (line 212) | def distribute_img_fea_points(img_fea, point_coord):
class PCViews (line 234) | class PCViews:
method __init__ (line 239) | def __init__(self):
method get_img (line 253) | def get_img(self, points):
method point_transform (line 280) | def point_transform(points, rot_mat, translation):
FILE: models/pct.py
class Pct (line 5) | class Pct(nn.Module):
method __init__ (line 7) | def __init__(self, task, dataset):
method forward (line 24) | def forward(self, pc, cls=None):
FILE: models/pointmlp.py
class pointMLP (line 13) | class pointMLP(nn.Module):
method __init__ (line 15) | def __init__(self, task, dataset):
method forward (line 27) | def forward(self, pc, cls=None):
FILE: models/pointmlp2.py
class pointMLP2 (line 12) | class pointMLP2(nn.Module):
method __init__ (line 14) | def __init__(self, task, dataset):
method forward (line 26) | def forward(self, pc, cls=None):
FILE: models/pointnet.py
class PointNet (line 6) | class PointNet(nn.Module):
method __init__ (line 8) | def __init__(self, dataset, task):
method forward (line 17) | def forward(self, pc, cls=None):
FILE: models/pointnet2.py
class PointNet2 (line 13) | class PointNet2(nn.Module):
method __init__ (line 15) | def __init__(self, task, dataset, version_cls):
method forward (line 24) | def forward(self, pc, normal=None, cls=None):
FILE: models/resnet.py
function conv3x3 (line 24) | def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
function conv1x1 (line 30) | def conv1x1(in_planes, out_planes, stride=1):
class BasicBlock (line 35) | class BasicBlock(nn.Module):
method __init__ (line 38) | def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
method forward (line 56) | def forward(self, x):
class Bottleneck (line 75) | class Bottleneck(nn.Module):
method __init__ (line 84) | def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
method forward (line 101) | def forward(self, x):
class ResNet (line 124) | class ResNet(nn.Module):
method __init__ (line 126) | def __init__(self, block, layers, num_classes=1000, zero_init_residual...
method _make_layer (line 177) | def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
method _forward_impl (line 201) | def _forward_impl(self, x):
method forward (line 219) | def forward(self, x):
function _resnet (line 223) | def _resnet(arch, block, layers, pretrained, progress, **kwargs):
function resnet18 (line 232) | def resnet18(pretrained=False, progress=True, **kwargs):
function resnet34 (line 243) | def resnet34(pretrained=False, progress=True, **kwargs):
function resnet50 (line 254) | def resnet50(pretrained=False, progress=True, **kwargs):
function resnet101 (line 265) | def resnet101(pretrained=False, progress=True, **kwargs):
function resnet152 (line 276) | def resnet152(pretrained=False, progress=True, **kwargs):
function resnext50_32x4d (line 287) | def resnext50_32x4d(pretrained=False, progress=True, **kwargs):
function resnext101_32x8d (line 300) | def resnext101_32x8d(pretrained=False, progress=True, **kwargs):
function wide_resnet50_2 (line 313) | def wide_resnet50_2(pretrained=False, progress=True, **kwargs):
function wide_resnet101_2 (line 329) | def wide_resnet101_2(pretrained=False, progress=True, **kwargs):
FILE: models/rscnn.py
class RSCNN (line 9) | class RSCNN(nn.Module):
method __init__ (line 11) | def __init__(self, task, dataset, ssn_or_msn):
method forward (line 35) | def forward(self, pc, cls=None):
FILE: pc_utils.py
function jitter_point_cloud (line 5) | def jitter_point_cloud(batch_data, sigma=0.01, clip=0.05):
function rotate_point_cloud (line 19) | def rotate_point_cloud(batch_data):
function translate_pointcloud (line 40) | def translate_pointcloud(pointcloud):
class PointcloudScaleAndTranslate (line 48) | class PointcloudScaleAndTranslate(object):
method __init__ (line 49) | def __init__(self, scale_low=2. / 3., scale_high=3. / 2., translate_ra...
method __call__ (line 61) | def __call__(self, pc):
FILE: pointMLP/classification_ModelNet40/data.py
function download (line 8) | def download():
function load_data (line 20) | def load_data(partition):
function random_point_dropout (line 38) | def random_point_dropout(pc, max_dropout_ratio=0.875):
function translate_pointcloud (line 49) | def translate_pointcloud(pointcloud):
function jitter_pointcloud (line 56) | def jitter_pointcloud(pointcloud, sigma=0.01, clip=0.02):
class ModelNet40 (line 62) | class ModelNet40(Dataset):
method __init__ (line 63) | def __init__(self, num_points, partition='train'):
method __getitem__ (line 68) | def __getitem__(self, item):
method __len__ (line 77) | def __len__(self):
FILE: pointMLP/classification_ModelNet40/helper.py
function cal_loss (line 4) | def cal_loss(pred, gold, smoothing=True):
FILE: pointMLP/classification_ModelNet40/main.py
function parse_args (line 24) | def parse_args():
function main (line 42) | def main():
function train (line 179) | def train(net, trainloader, optimizer, criterion, device):
function validate (line 219) | def validate(net, testloader, criterion, device):
FILE: pointMLP/classification_ModelNet40/models/pointmlp.py
function get_activation (line 12) | def get_activation(activation):
function square_distance (line 29) | def square_distance(src, dst):
function index_points (line 51) | def index_points(points, idx):
function farthest_point_sample (line 70) | def farthest_point_sample(xyz, npoint):
function query_ball_point (line 93) | def query_ball_point(radius, nsample, xyz, new_xyz):
function knn_point (line 116) | def knn_point(nsample, xyz, new_xyz):
class LocalGrouper (line 130) | class LocalGrouper(nn.Module):
method __init__ (line 131) | def __init__(self, channel, groups, kneighbors, use_xyz=True, normaliz...
method forward (line 154) | def forward(self, xyz, points):
class ConvBNReLU1D (line 185) | class ConvBNReLU1D(nn.Module):
method __init__ (line 186) | def __init__(self, in_channels, out_channels, kernel_size=1, bias=True...
method forward (line 195) | def forward(self, x):
class ConvBNReLURes1D (line 199) | class ConvBNReLURes1D(nn.Module):
method __init__ (line 200) | def __init__(self, channel, kernel_size=1, groups=1, res_expansion=1.0...
method forward (line 226) | def forward(self, x):
class PreExtraction (line 230) | class PreExtraction(nn.Module):
method __init__ (line 231) | def __init__(self, channels, out_channels, blocks=1, groups=1, res_ex...
method forward (line 249) | def forward(self, x):
class PosExtraction (line 261) | class PosExtraction(nn.Module):
method __init__ (line 262) | def __init__(self, channels, blocks=1, groups=1, res_expansion=1, bias...
method forward (line 276) | def forward(self, x): # [b, d, g]
class Model (line 280) | class Model(nn.Module):
method __init__ (line 281) | def __init__(self, points=1024, class_num=40, embed_dim=64, groups=1, ...
method forward (line 332) | def forward(self, x):
function pointMLP (line 349) | def pointMLP(num_classes=40, **kwargs) -> Model:
function pointMLPElite (line 356) | def pointMLPElite(num_classes=40, **kwargs) -> Model:
FILE: pointMLP/classification_ModelNet40/utils/logger.py
function savefig (line 11) | def savefig(fname, dpi=None):
function plot_overlap (line 15) | def plot_overlap(logger, names=None):
class Logger (line 23) | class Logger(object):
method __init__ (line 25) | def __init__(self, fpath, title=None, resume=False):
method set_names (line 47) | def set_names(self, names):
method append (line 61) | def append(self, numbers):
method plot (line 70) | def plot(self, names=None):
method close (line 79) | def close(self):
class LoggerMonitor (line 83) | class LoggerMonitor(object):
method __init__ (line 85) | def __init__ (self, paths):
method plot (line 92) | def plot(self, names=None):
FILE: pointMLP/classification_ModelNet40/utils/misc.py
function get_mean_and_std (line 26) | def get_mean_and_std(dataset):
function init_params (line 41) | def init_params(net):
function mkdir_p (line 56) | def mkdir_p(path):
class AverageMeter (line 66) | class AverageMeter(object):
method __init__ (line 70) | def __init__(self):
method reset (line 73) | def reset(self):
method update (line 79) | def update(self, val, n=1):
function progress_bar (line 90) | def progress_bar(current, total, msg=None):
function format_time (line 134) | def format_time(seconds):
function save_model (line 167) | def save_model(net, epoch, path, acc, is_best, **kwargs):
function save_args (line 182) | def save_args(args):
function set_seed (line 190) | def set_seed(seed=None):
class IOStream (line 205) | class IOStream():
method __init__ (line 206) | def __init__(self, path):
method cprint (line 209) | def cprint(self, text):
method close (line 214) | def close(self):
function cal_loss (line 218) | def cal_loss(pred, gold, smoothing=True):
FILE: pointMLP/classification_ModelNet40/utils/progress/progress/__init__.py
class Infinite (line 27) | class Infinite(object):
method __init__ (line 31) | def __init__(self, *args, **kwargs):
method __getitem__ (line 40) | def __getitem__(self, key):
method elapsed (line 46) | def elapsed(self):
method elapsed_td (line 50) | def elapsed_td(self):
method update_avg (line 53) | def update_avg(self, n, dt):
method update (line 58) | def update(self):
method start (line 61) | def start(self):
method finish (line 64) | def finish(self):
method next (line 67) | def next(self, n=1):
method iter (line 75) | def iter(self, it):
class Progress (line 84) | class Progress(Infinite):
method __init__ (line 85) | def __init__(self, *args, **kwargs):
method eta (line 90) | def eta(self):
method eta_td (line 94) | def eta_td(self):
method percent (line 98) | def percent(self):
method progress (line 102) | def progress(self):
method remaining (line 106) | def remaining(self):
method start (line 109) | def start(self):
method goto (line 112) | def goto(self, index):
method iter (line 116) | def iter(self, it):
FILE: pointMLP/classification_ModelNet40/utils/progress/progress/bar.py
class Bar (line 22) | class Bar(WritelnMixin, Progress):
method update (line 32) | def update(self):
class ChargingBar (line 45) | class ChargingBar(Bar):
class FillingSquaresBar (line 53) | class FillingSquaresBar(ChargingBar):
class FillingCirclesBar (line 58) | class FillingCirclesBar(ChargingBar):
class IncrementalBar (line 63) | class IncrementalBar(Bar):
method update (line 66) | def update(self):
class PixelBar (line 83) | class PixelBar(IncrementalBar):
class ShadyBar (line 87) | class ShadyBar(IncrementalBar):
FILE: pointMLP/classification_ModelNet40/utils/progress/progress/counter.py
class Counter (line 22) | class Counter(WriteMixin, Infinite):
method update (line 26) | def update(self):
class Countdown (line 30) | class Countdown(WriteMixin, Progress):
method update (line 33) | def update(self):
class Stack (line 37) | class Stack(WriteMixin, Progress):
method update (line 41) | def update(self):
class Pie (line 47) | class Pie(Stack):
FILE: pointMLP/classification_ModelNet40/utils/progress/progress/helpers.py
class WriteMixin (line 22) | class WriteMixin(object):
method __init__ (line 25) | def __init__(self, message=None, **kwargs):
method write (line 37) | def write(self, s):
method finish (line 45) | def finish(self):
class WritelnMixin (line 50) | class WritelnMixin(object):
method __init__ (line 53) | def __init__(self, message=None, **kwargs):
method clearln (line 61) | def clearln(self):
method writeln (line 65) | def writeln(self, line):
method finish (line 71) | def finish(self):
class SigIntMixin (line 82) | class SigIntMixin(object):
method __init__ (line 85) | def __init__(self, *args, **kwargs):
method _sigint_handler (line 89) | def _sigint_handler(self, signum, frame):
FILE: pointMLP/classification_ModelNet40/utils/progress/progress/spinner.py
class Spinner (line 22) | class Spinner(WriteMixin, Infinite):
method update (line 27) | def update(self):
class PieSpinner (line 32) | class PieSpinner(Spinner):
class MoonSpinner (line 36) | class MoonSpinner(Spinner):
class LineSpinner (line 40) | class LineSpinner(Spinner):
class PixelSpinner (line 43) | class PixelSpinner(Spinner):
FILE: pointMLP/classification_ModelNet40/utils/progress/test_progress.py
function sleep (line 16) | def sleep():
FILE: pointMLP/classification_ModelNet40/voting.py
function parse_args (line 23) | def parse_args():
class PointcloudScale (line 43) | class PointcloudScale(object): # input random scaling
method __init__ (line 44) | def __init__(self, scale_low=2. / 3., scale_high=3. / 2.):
method __call__ (line 48) | def __call__(self, pc):
function main (line 57) | def main():
function validate (line 113) | def validate(net, testloader, criterion, device):
function voting (line 147) | def voting(net, testloader, device, args):
FILE: pointnet2_pyt/pointnet2/_ext-src/include/cuda_utils.h
function opt_n_threads (line 15) | inline int opt_n_threads(int work_size) {
function dim3 (line 21) | inline dim3 opt_block_config(int x, int y) {
FILE: pointnet2_pyt/pointnet2/_ext-src/src/ball_query.cpp
function ball_query (line 8) | at::Tensor ball_query(at::Tensor new_xyz, at::Tensor xyz, const float ra...
FILE: pointnet2_pyt/pointnet2/_ext-src/src/bindings.cpp
function PYBIND11_MODULE (line 6) | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
FILE: pointnet2_pyt/pointnet2/_ext-src/src/group_points.cpp
function group_points (line 12) | at::Tensor group_points(at::Tensor points, at::Tensor idx) {
function group_points_grad (line 37) | at::Tensor group_points_grad(at::Tensor grad_out, at::Tensor idx, const ...
FILE: pointnet2_pyt/pointnet2/_ext-src/src/interpolate.cpp
function three_nn (line 14) | std::vector<at::Tensor> three_nn(at::Tensor unknowns, at::Tensor knows) {
function three_interpolate (line 42) | at::Tensor three_interpolate(at::Tensor points, at::Tensor idx,
function three_interpolate_grad (line 71) | at::Tensor three_interpolate_grad(at::Tensor grad_out, at::Tensor idx,
FILE: pointnet2_pyt/pointnet2/_ext-src/src/sampling.cpp
function gather_points (line 15) | at::Tensor gather_points(at::Tensor points, at::Tensor idx) {
function gather_points_grad (line 40) | at::Tensor gather_points_grad(at::Tensor grad_out, at::Tensor idx,
function furthest_point_sampling (line 65) | at::Tensor furthest_point_sampling(at::Tensor points, const int nsamples) {
FILE: pointnet2_pyt/pointnet2/data/Indoor3DSemSegLoader.py
function _get_data_files (line 19) | def _get_data_files(list_filename):
function _load_data_file (line 24) | def _load_data_file(name):
class Indoor3DSemSeg (line 31) | class Indoor3DSemSeg(data.Dataset):
method __init__ (line 32) | def __init__(self, num_points, train=True, download=True, data_precent...
method __getitem__ (line 84) | def __getitem__(self, idx):
method __len__ (line 97) | def __len__(self):
method set_num_points (line 100) | def set_num_points(self, pts):
method randomize (line 103) | def randomize(self):
FILE: pointnet2_pyt/pointnet2/data/ModelNet40Loader.py
function _get_data_files (line 19) | def _get_data_files(list_filename):
function _load_data_file (line 24) | def _load_data_file(name):
class ModelNet40Cls (line 31) | class ModelNet40Cls(data.Dataset):
method __init__ (line 32) | def __init__(self, num_points, transforms=None, train=True, download=T...
method __getitem__ (line 69) | def __getitem__(self, idx):
method __len__ (line 81) | def __len__(self):
method set_num_points (line 84) | def set_num_points(self, pts):
method randomize (line 87) | def randomize(self):
FILE: pointnet2_pyt/pointnet2/data/data_utils.py
function angle_axis (line 12) | def angle_axis(angle, axis):
class PointcloudScale (line 45) | class PointcloudScale(object):
method __init__ (line 46) | def __init__(self, lo=0.8, hi=1.25):
method __call__ (line 49) | def __call__(self, points):
class PointcloudRotate (line 55) | class PointcloudRotate(object):
method __init__ (line 56) | def __init__(self, axis=np.array([0.0, 1.0, 0.0])):
method __call__ (line 59) | def __call__(self, points):
class PointcloudRotatePerturbation (line 75) | class PointcloudRotatePerturbation(object):
method __init__ (line 76) | def __init__(self, angle_sigma=0.06, angle_clip=0.18):
method _get_angles (line 79) | def _get_angles(self):
method __call__ (line 86) | def __call__(self, points):
class PointcloudJitter (line 106) | class PointcloudJitter(object):
method __init__ (line 107) | def __init__(self, std=0.01, clip=0.05):
method __call__ (line 110) | def __call__(self, points):
class PointcloudTranslate (line 120) | class PointcloudTranslate(object):
method __init__ (line 121) | def __init__(self, translate_range=0.1):
method __call__ (line 124) | def __call__(self, points):
class PointcloudToTensor (line 130) | class PointcloudToTensor(object):
method __call__ (line 131) | def __call__(self, points):
class PointcloudRandomInputDropout (line 135) | class PointcloudRandomInputDropout(object):
method __init__ (line 136) | def __init__(self, max_dropout_ratio=0.875):
method __call__ (line 140) | def __call__(self, points):
FILE: pointnet2_pyt/pointnet2/models/pointnet2_msg_cls.py
function model_fn_decorator (line 16) | def model_fn_decorator(criterion):
class Pointnet2MSG (line 37) | class Pointnet2MSG(nn.Module):
method __init__ (line 53) | def __init__(self, num_classes, input_channels=3, use_xyz=True, versio...
method _break_up_pc (line 118) | def _break_up_pc(self, pc):
method forward (line 124) | def forward(self, pointcloud):
class Pointnet2MSG5K (line 147) | class Pointnet2MSG5K(nn.Module):
method __init__ (line 163) | def __init__(self, num_classes, input_channels=3, use_xyz=True):
method _break_up_pc (line 208) | def _break_up_pc(self, pc):
method forward (line 214) | def forward(self, pointcloud):
FILE: pointnet2_pyt/pointnet2/models/pointnet2_msg_sem.py
function model_fn_decorator (line 16) | def model_fn_decorator(criterion):
class Pointnet2MSG (line 36) | class Pointnet2MSG(nn.Module):
method __init__ (line 52) | def __init__(self, num_classes, input_channels=6, use_xyz=True):
method _break_up_pc (line 117) | def _break_up_pc(self, pc):
method forward (line 123) | def forward(self, pointcloud):
FILE: pointnet2_pyt/pointnet2/models/pointnet2_ssg_cls.py
function model_fn_decorator (line 16) | def model_fn_decorator(criterion):
class Pointnet2SSG (line 37) | class Pointnet2SSG(nn.Module):
method __init__ (line 53) | def __init__(self, num_classes, input_channels=3, use_xyz=True):
method _break_up_pc (line 88) | def _break_up_pc(self, pc):
method forward (line 94) | def forward(self, pointcloud):
FILE: pointnet2_pyt/pointnet2/models/pointnet2_ssg_sem.py
function model_fn_decorator (line 16) | def model_fn_decorator(criterion):
class Pointnet2SSG (line 36) | class Pointnet2SSG(nn.Module):
method __init__ (line 52) | def __init__(self, num_classes, input_channels=3, use_xyz=True):
method _break_up_pc (line 108) | def _break_up_pc(self, pc):
method forward (line 114) | def forward(self, pointcloud):
FILE: pointnet2_pyt/pointnet2/train/train_cls.py
function parse_args (line 29) | def parse_args():
FILE: pointnet2_pyt/pointnet2/utils/linalg_utils.py
function pdist2 (line 15) | def pdist2(X, Z=None, order=PDist2Order.d_second):
function pdist2_slow (line 66) | def pdist2_slow(X, Z=None):
FILE: pointnet2_pyt/pointnet2/utils/pointnet2_modules.py
class _PointnetSAModuleBase (line 20) | class _PointnetSAModuleBase(nn.Module):
method __init__ (line 21) | def __init__(self):
method forward (line 27) | def forward(self, xyz, features=None):
class PointnetSAModuleMSG (line 74) | class PointnetSAModuleMSG(_PointnetSAModuleBase):
method __init__ (line 91) | def __init__(self, npoint, radii, nsamples, mlps, bn=True, use_xyz=True):
class PointnetSAModule (line 115) | class PointnetSAModule(PointnetSAModuleMSG):
method __init__ (line 132) | def __init__(
class PointnetFPModule (line 146) | class PointnetFPModule(nn.Module):
method __init__ (line 157) | def __init__(self, mlp, bn=True):
method forward (line 162) | def forward(self, unknown, known, unknow_feats, known_feats):
FILE: pointnet2_pyt/pointnet2/utils/pointnet2_utils.py
class RandomDropout (line 34) | class RandomDropout(nn.Module):
method __init__ (line 35) | def __init__(self, p=0.5, inplace=False):
method forward (line 40) | def forward(self, X):
class FurthestPointSampling (line 45) | class FurthestPointSampling(Function):
method forward (line 47) | def forward(ctx, xyz, npoint):
method backward (line 68) | def backward(xyz, a=None):
class GatherOperation (line 75) | class GatherOperation(Function):
method forward (line 77) | def forward(ctx, features, idx):
method backward (line 102) | def backward(ctx, grad_out):
class ThreeNN (line 112) | class ThreeNN(Function):
method forward (line 114) | def forward(ctx, unknown, known):
method backward (line 137) | def backward(ctx, a=None, b=None):
class ThreeInterpolate (line 144) | class ThreeInterpolate(Function):
method forward (line 146) | def forward(ctx, features, idx, weight):
method backward (line 172) | def backward(ctx, grad_out):
class GroupingOperation (line 201) | class GroupingOperation(Function):
method forward (line 203) | def forward(ctx, features, idx):
method backward (line 227) | def backward(ctx, grad_out):
class BallQuery (line 252) | class BallQuery(Function):
method forward (line 254) | def forward(ctx, radius, nsample, xyz, new_xyz):
method backward (line 277) | def backward(ctx, a=None):
class QueryAndGroup (line 284) | class QueryAndGroup(nn.Module):
method __init__ (line 296) | def __init__(self, radius, nsample, use_xyz=True):
method forward (line 301) | def forward(self, xyz, new_xyz, features=None):
class GroupAll (line 341) | class GroupAll(nn.Module):
method __init__ (line 349) | def __init__(self, use_xyz=True):
method forward (line 354) | def forward(self, xyz, new_xyz, features=None):
FILE: pointnet2_pyt/tests/conftest.py
function _test_loop (line 15) | def _test_loop(model, model_fn, inputs, labels):
function cls_test_xyz (line 31) | def cls_test_xyz(model, model_fn):
function cls_test_no_xyz (line 41) | def cls_test_no_xyz(model, model_fn):
function semseg_test_xyz (line 51) | def semseg_test_xyz(model, model_fn):
function semseg_test_no_xyz (line 61) | def semseg_test_no_xyz(model, model_fn):
FILE: pointnet2_pyt/tests/test_cls_msg.py
function test_xyz (line 13) | def test_xyz():
function test_no_xyz (line 18) | def test_no_xyz():
FILE: pointnet2_pyt/tests/test_cls_ssg.py
function test_xyz (line 13) | def test_xyz():
function test_no_xyz (line 18) | def test_no_xyz():
FILE: pointnet2_pyt/tests/test_semseg_msg.py
function test_xyz (line 13) | def test_xyz():
function test_no_xyz (line 18) | def test_no_xyz():
FILE: pointnet2_pyt/tests/test_semseg_ssg.py
function test_xyz (line 13) | def test_xyz():
function test_no_xyz (line 18) | def test_no_xyz():
FILE: pointnet2_tf/evaluate.py
function log_string (line 62) | def log_string(out_str):
function evaluate (line 67) | def evaluate(num_votes):
function eval_one_epoch (line 102) | def eval_one_epoch(sess, ops, num_votes=1, topk=1):
FILE: pointnet2_tf/modelnet_dataset.py
function pc_normalize (line 15) | def pc_normalize(pc):
class ModelNetDataset (line 23) | class ModelNetDataset():
method __init__ (line 24) | def __init__(self, root, batch_size = 32, npoints = 1024, split='train...
method _augment_batch_data (line 60) | def _augment_batch_data(self, batch_data):
method _get_item (line 75) | def _get_item(self, index):
method __getitem__ (line 93) | def __getitem__(self, index):
method __len__ (line 96) | def __len__(self):
method num_channel (line 99) | def num_channel(self):
method reset (line 105) | def reset(self):
method has_next_batch (line 112) | def has_next_batch(self):
method next_batch (line 115) | def next_batch(self, augment=False):
FILE: pointnet2_tf/modelnet_h5_dataset.py
function shuffle_data (line 24) | def shuffle_data(data, labels):
function getDataFiles (line 36) | def getDataFiles(list_filename):
function load_h5 (line 39) | def load_h5(h5_filename):
function loadDataFile (line 45) | def loadDataFile(filename):
class ModelNetH5Dataset (line 49) | class ModelNetH5Dataset(object):
method __init__ (line 50) | def __init__(self, list_filename, batch_size = 32, npoints = 1024, shu...
method reset (line 58) | def reset(self):
method _augment_batch_data (line 67) | def _augment_batch_data(self, batch_data):
method _get_data_filename (line 77) | def _get_data_filename(self):
method _load_data_file (line 80) | def _load_data_file(self, filename):
method _has_next_batch_in_file (line 87) | def _has_next_batch_in_file(self):
method num_channel (line 90) | def num_channel(self):
method has_next_batch (line 93) | def has_next_batch(self):
method next_batch (line 103) | def next_batch(self, augment=False):
FILE: pointnet2_tf/models/pointnet2_cls_msg.py
function placeholder_inputs (line 11) | def placeholder_inputs(batch_size, num_point):
function get_model (line 17) | def get_model(point_cloud, is_training, bn_decay=None):
function get_loss (line 42) | def get_loss(pred, label, end_points):
FILE: pointnet2_tf/models/pointnet2_cls_ssg.py
function placeholder_inputs (line 15) | def placeholder_inputs(batch_size, num_point):
function get_model (line 20) | def get_model(point_cloud, is_training, bn_decay=None):
function get_loss (line 47) | def get_loss(pred, label, end_points):
FILE: pointnet2_tf/models/pointnet2_part_seg.py
function placeholder_inputs (line 11) | def placeholder_inputs(batch_size, num_point):
function get_model (line 17) | def get_model(point_cloud, is_training, bn_decay=None):
function get_loss (line 44) | def get_loss(pred, label):
FILE: pointnet2_tf/models/pointnet2_part_seg_msg_one_hot.py
function placeholder_inputs (line 11) | def placeholder_inputs(batch_size, num_point):
function get_model (line 19) | def get_model(point_cloud, cls_label, is_training, bn_decay=None):
function get_loss (line 50) | def get_loss(pred, label):
FILE: pointnet2_tf/models/pointnet2_sem_seg.py
function placeholder_inputs (line 11) | def placeholder_inputs(batch_size, num_point):
function get_model (line 18) | def get_model(point_cloud, is_training, num_class, bn_decay=None):
function get_loss (line 48) | def get_loss(pred, label, smpw):
FILE: pointnet2_tf/models/pointnet_cls_basic.py
function placeholder_inputs (line 15) | def placeholder_inputs(batch_size, num_point):
function get_model (line 21) | def get_model(point_cloud, is_training, bn_decay=None):
function get_loss (line 67) | def get_loss(pred, label, end_points):
FILE: pointnet2_tf/part_seg/evaluate.py
function log_string (line 53) | def log_string(out_str):
function evaluate (line 58) | def evaluate():
function get_batch (line 85) | def get_batch(dataset, idxs, start_idx, end_idx):
function eval_one_epoch (line 96) | def eval_one_epoch(sess, ops):
FILE: pointnet2_tf/part_seg/part_dataset.py
function pc_normalize (line 11) | def pc_normalize(pc):
class PartDataset (line 19) | class PartDataset():
method __init__ (line 20) | def __init__(self, root, npoints = 2500, classification = False, class...
method __getitem__ (line 87) | def __getitem__(self, index):
method __len__ (line 112) | def __len__(self):
FILE: pointnet2_tf/part_seg/part_dataset_all_normal.py
function pc_normalize (line 11) | def pc_normalize(pc):
class PartNormalDataset (line 19) | class PartNormalDataset():
method __init__ (line 20) | def __init__(self, root, npoints = 2500, classification = False, split...
method __getitem__ (line 83) | def __getitem__(self, index):
method __len__ (line 114) | def __len__(self):
FILE: pointnet2_tf/part_seg/test.py
function get_model (line 37) | def get_model(batch_size, num_point):
function inference (line 59) | def inference(sess, ops, pc, batch_size):
FILE: pointnet2_tf/part_seg/train.py
function log_string (line 69) | def log_string(out_str):
function get_learning_rate (line 74) | def get_learning_rate(batch):
function get_bn_decay (line 84) | def get_bn_decay(batch):
function train (line 94) | def train():
function get_batch (line 168) | def get_batch(dataset, idxs, start_idx, end_idx):
function train_one_epoch (line 179) | def train_one_epoch(sess, ops, train_writer):
function eval_one_epoch (line 223) | def eval_one_epoch(sess, ops, test_writer):
FILE: pointnet2_tf/part_seg/train_one_hot.py
function log_string (line 69) | def log_string(out_str):
function get_learning_rate (line 74) | def get_learning_rate(batch):
function get_bn_decay (line 84) | def get_bn_decay(batch):
function train (line 94) | def train():
function get_batch (line 171) | def get_batch(dataset, idxs, start_idx, end_idx):
function train_one_epoch (line 184) | def train_one_epoch(sess, ops, train_writer):
function eval_one_epoch (line 229) | def eval_one_epoch(sess, ops, test_writer):
FILE: pointnet2_tf/scannet/pc_util.py
function point_cloud_label_to_surface_voxel_label (line 23) | def point_cloud_label_to_surface_voxel_label(point_cloud, label, res=0.0...
function point_cloud_label_to_surface_voxel_label_fast (line 39) | def point_cloud_label_to_surface_voxel_label_fast(point_cloud, label, re...
function point_cloud_to_volume_batch (line 53) | def point_cloud_to_volume_batch(point_clouds, vsize=12, radius=1.0, flat...
function point_cloud_to_volume (line 70) | def point_cloud_to_volume(points, vsize, radius=1.0):
function volume_to_point_cloud (line 85) | def volume_to_point_cloud(vol):
function point_cloud_to_volume_v2_batch (line 102) | def point_cloud_to_volume_v2_batch(point_clouds, vsize=12, radius=1.0, n...
function point_cloud_to_volume_v2 (line 113) | def point_cloud_to_volume_v2(points, vsize, radius=1.0, num_sample=128):
function point_cloud_to_image_batch (line 155) | def point_cloud_to_image_batch(point_clouds, imgsize, radius=1.0, num_sa...
function point_cloud_to_image (line 167) | def point_cloud_to_image(points, imgsize, radius=1.0, num_sample=128):
function read_ply (line 205) | def read_ply(filename):
function read_ply_xyz (line 212) | def read_ply_xyz(filename):
function read_ply_xyzrgb (line 224) | def read_ply_xyzrgb(filename):
function write_ply (line 239) | def write_ply(points, filename, text=True):
function draw_point_cloud (line 251) | def draw_point_cloud(input_points, canvasSize=500, space=200, diameter=25,
function point_cloud_three_views (line 310) | def point_cloud_three_views(points):
function point_cloud_three_views_demo (line 324) | def point_cloud_three_views_demo():
function pyplot_draw_point_cloud (line 336) | def pyplot_draw_point_cloud(points, output_filename):
function pyplot_draw_volume (line 347) | def pyplot_draw_volume(vol, output_filename):
function write_ply_color (line 354) | def write_ply_color(points, labels, out_filename, num_classes=None):
function write_ply_rgb (line 371) | def write_ply_rgb(points, colors, out_filename, num_classes=None):
FILE: pointnet2_tf/scannet/preprocessing/collect_scannet_scenes.py
function collect_one_scene_data_label (line 19) | def collect_one_scene_data_label(scene_name, out_filename):
function log_string (line 82) | def log_string(out_str):
FILE: pointnet2_tf/scannet/preprocessing/scannet_util.py
function get_raw2scannet_label_map (line 5) | def get_raw2scannet_label_map():
FILE: pointnet2_tf/scannet/scannet_dataset.py
class ScannetDataset (line 8) | class ScannetDataset():
method __init__ (line 9) | def __init__(self, root, npoints=8192, split='train'):
method __getitem__ (line 27) | def __getitem__(self, index):
method __len__ (line 61) | def __len__(self):
class ScannetDatasetWholeScene (line 64) | class ScannetDatasetWholeScene():
method __init__ (line 65) | def __init__(self, root, npoints=8192, split='train'):
method __getitem__ (line 83) | def __getitem__(self, index):
method __len__ (line 119) | def __len__(self):
class ScannetDatasetVirtualScan (line 122) | class ScannetDatasetVirtualScan():
method __init__ (line 123) | def __init__(self, root, npoints=8192, split='train'):
method __getitem__ (line 141) | def __getitem__(self, index):
method __len__ (line 166) | def __len__(self):
FILE: pointnet2_tf/scannet/scene_util.py
function cart2sph (line 11) | def cart2sph(xyz):
function virtual_scan (line 20) | def virtual_scan(xyz, mode=-1):
FILE: pointnet2_tf/scannet/train.py
function log_string (line 73) | def log_string(out_str):
function get_learning_rate (line 78) | def get_learning_rate(batch):
function get_bn_decay (line 88) | def get_bn_decay(batch):
function train (line 98) | def train():
function get_batch_wdp (line 181) | def get_batch_wdp(dataset, idxs, start_idx, end_idx):
function get_batch (line 199) | def get_batch(dataset, idxs, start_idx, end_idx):
function train_one_epoch (line 211) | def train_one_epoch(sess, ops, train_writer):
function eval_one_epoch (line 252) | def eval_one_epoch(sess, ops, test_writer):
function eval_whole_scene_one_epoch (line 326) | def eval_whole_scene_one_epoch(sess, ops, test_writer):
FILE: pointnet2_tf/tf_ops/3d_interpolation/interpolate.cpp
function randomf (line 9) | float randomf(){
function get_time (line 12) | static double get_time(){
function threenn_cpu (line 21) | void threenn_cpu(int b, int n, int m, const float *xyz1, const float *xy...
function get_weights_cpu (line 69) | void get_weights_cpu(int b, int n, const float *dist, float *weight) {
function interpolate_cpu (line 84) | void interpolate_cpu(int b, int m, int c, int n, const float *points, co...
function interpolate_grad_cpu (line 108) | void interpolate_grad_cpu(int b, int n, int c, int m, const float *grad_...
function main (line 132) | int main()
FILE: pointnet2_tf/tf_ops/3d_interpolation/tf_interpolate.cpp
function randomf (line 48) | float randomf(){
function get_time (line 51) | static double get_time(){
function threenn_cpu (line 60) | void threenn_cpu(int b, int n, int m, const float *xyz1, const float *xy...
function threeinterpolate_cpu (line 107) | void threeinterpolate_cpu(int b, int m, int c, int n, const float *point...
function threeinterpolate_grad_cpu (line 131) | void threeinterpolate_grad_cpu(int b, int n, int c, int m, const float *...
class ThreeNNOp (line 157) | class ThreeNNOp : public OpKernel {
method ThreeNNOp (line 159) | explicit ThreeNNOp(OpKernelConstruction* context) : OpKernel(context) {}
method Compute (line 161) | void Compute(OpKernelContext* context) override {
class ThreeInterpolateOp (line 191) | class ThreeInterpolateOp: public OpKernel{
method ThreeInterpolateOp (line 193) | explicit ThreeInterpolateOp(OpKernelConstruction * context):OpKernel(c...
method Compute (line 195) | void Compute(OpKernelContext * context) override {
class ThreeInterpolateGradOp (line 225) | class ThreeInterpolateGradOp: public OpKernel{
method ThreeInterpolateGradOp (line 227) | explicit ThreeInterpolateGradOp(OpKernelConstruction * context):OpKern...
method Compute (line 229) | void Compute(OpKernelContext * context) override {
FILE: pointnet2_tf/tf_ops/3d_interpolation/tf_interpolate.py
function three_nn (line 8) | def three_nn(xyz1, xyz2):
function three_interpolate (line 19) | def three_interpolate(points, idx, weight):
function _three_interpolate_grad (line 30) | def _three_interpolate_grad(op, grad_out):
FILE: pointnet2_tf/tf_ops/3d_interpolation/tf_interpolate_op_test.py
class GroupPointTest (line 5) | class GroupPointTest(tf.test.TestCase):
method test (line 6) | def test(self):
method test_grad (line 9) | def test_grad(self):
FILE: pointnet2_tf/tf_ops/3d_interpolation/visu_interpolation.py
function fun (line 16) | def fun(xyz1,xyz2,pts2):
FILE: pointnet2_tf/tf_ops/grouping/test/query_ball_point.cpp
function randomf (line 9) | float randomf(){
function get_time (line 12) | static double get_time(){
function query_ball_point_cpu (line 19) | void query_ball_point_cpu(int b, int n, int m, float radius, int nsample...
function group_point_cpu (line 52) | void group_point_cpu(int b, int n, int c, int m, int nsample, const floa...
function group_point_grad_cpu (line 70) | void group_point_grad_cpu(int b, int n, int c, int m, int nsample, const...
function main (line 86) | int main()
FILE: pointnet2_tf/tf_ops/grouping/test/selection_sort.cpp
function randomf (line 9) | float randomf(){
function get_time (line 12) | static double get_time(){
function selection_sort_cpu (line 20) | void selection_sort_cpu(int b, int n, int m, int k, const float *dist, i...
function main (line 65) | int main()
FILE: pointnet2_tf/tf_ops/grouping/tf_grouping.cpp
class QueryBallPointGpuOp (line 67) | class QueryBallPointGpuOp : public OpKernel {
method QueryBallPointGpuOp (line 69) | explicit QueryBallPointGpuOp(OpKernelConstruction* context) : OpKernel...
method Compute (line 77) | void Compute(OpKernelContext* context) override {
class SelectionSortGpuOp (line 109) | class SelectionSortGpuOp : public OpKernel {
method SelectionSortGpuOp (line 111) | explicit SelectionSortGpuOp(OpKernelConstruction* context) : OpKernel(...
method Compute (line 116) | void Compute(OpKernelContext* context) override {
class GroupPointGpuOp (line 143) | class GroupPointGpuOp: public OpKernel{
method GroupPointGpuOp (line 145) | explicit GroupPointGpuOp(OpKernelConstruction * context):OpKernel(cont...
method Compute (line 147) | void Compute(OpKernelContext * context) override {
class GroupPointGradGpuOp (line 174) | class GroupPointGradGpuOp: public OpKernel{
method GroupPointGradGpuOp (line 176) | explicit GroupPointGradGpuOp(OpKernelConstruction * context):OpKernel(...
method Compute (line 178) | void Compute(OpKernelContext * context) override {
FILE: pointnet2_tf/tf_ops/grouping/tf_grouping.py
function query_ball_point (line 8) | def query_ball_point(radius, nsample, xyz1, xyz2):
function select_top_k (line 22) | def select_top_k(k, dist):
function group_point (line 33) | def group_point(points, idx):
function _group_point_grad (line 43) | def _group_point_grad(op, grad_out):
function knn_point (line 48) | def knn_point(k, xyz1, xyz2):
FILE: pointnet2_tf/tf_ops/grouping/tf_grouping_op_test.py
class GroupPointTest (line 5) | class GroupPointTest(tf.test.TestCase):
method test (line 6) | def test(self):
method test_grad (line 9) | def test_grad(self):
FILE: pointnet2_tf/tf_ops/sampling/tf_sampling.cpp
class ProbSampleGpuOp (line 66) | class ProbSampleGpuOp: public OpKernel{
method ProbSampleGpuOp (line 68) | explicit ProbSampleGpuOp(OpKernelConstruction* context):OpKernel(conte...
method Compute (line 69) | void Compute(OpKernelContext * context)override{
class FarthestPointSampleGpuOp (line 95) | class FarthestPointSampleGpuOp: public OpKernel{
method FarthestPointSampleGpuOp (line 97) | explicit FarthestPointSampleGpuOp(OpKernelConstruction* context):OpKer...
method Compute (line 101) | void Compute(OpKernelContext * context)override{
class GatherPointGpuOp (line 126) | class GatherPointGpuOp: public OpKernel{
method GatherPointGpuOp (line 128) | explicit GatherPointGpuOp(OpKernelConstruction * context):OpKernel(con...
method Compute (line 129) | void Compute(OpKernelContext * context)override{
class GatherPointGradGpuOp (line 151) | class GatherPointGradGpuOp: public OpKernel{
method GatherPointGradGpuOp (line 153) | explicit GatherPointGradGpuOp(OpKernelConstruction * context):OpKernel...
method Compute (line 154) | void Compute(OpKernelContext * context)override{
FILE: pointnet2_tf/tf_ops/sampling/tf_sampling.py
function prob_sample (line 13) | def prob_sample(inp,inpr):
function gather_point (line 29) | def gather_point(inp,idx):
function _gather_point_grad (line 44) | def _gather_point_grad(op,out_g):
function farthest_point_sample (line 48) | def farthest_point_sample(npoint,inp):
FILE: pointnet2_tf/train.py
function log_string (line 81) | def log_string(out_str):
function get_learning_rate (line 86) | def get_learning_rate(batch):
function get_bn_decay (line 96) | def get_bn_decay(batch):
function train (line 106) | def train():
function train_one_epoch (line 186) | def train_one_epoch(sess, ops, train_writer):
function eval_one_epoch (line 229) | def eval_one_epoch(sess, ops, test_writer):
FILE: pointnet2_tf/train_multi_gpu.py
function log_string (line 86) | def log_string(out_str):
function average_gradients (line 91) | def average_gradients(tower_grads):
function get_learning_rate (line 129) | def get_learning_rate(batch):
function get_bn_decay (line 139) | def get_bn_decay(batch):
function train (line 149) | def train():
function train_one_epoch (line 260) | def train_one_epoch(sess, ops, train_writer):
function eval_one_epoch (line 303) | def eval_one_epoch(sess, ops, test_writer):
FILE: pointnet2_tf/utils/pc_util.py
function point_cloud_to_volume_batch (line 24) | def point_cloud_to_volume_batch(point_clouds, vsize=12, radius=1.0, flat...
function point_cloud_to_volume (line 41) | def point_cloud_to_volume(points, vsize, radius=1.0):
function volume_to_point_cloud (line 56) | def volume_to_point_cloud(vol):
function point_cloud_to_volume_v2_batch (line 73) | def point_cloud_to_volume_v2_batch(point_clouds, vsize=12, radius=1.0, n...
function point_cloud_to_volume_v2 (line 84) | def point_cloud_to_volume_v2(points, vsize, radius=1.0, num_sample=128):
function point_cloud_to_image_batch (line 126) | def point_cloud_to_image_batch(point_clouds, imgsize, radius=1.0, num_sa...
function point_cloud_to_image (line 138) | def point_cloud_to_image(points, imgsize, radius=1.0, num_sample=128):
function read_ply (line 176) | def read_ply(filename):
function write_ply (line 184) | def write_ply(points, filename, text=True):
function draw_point_cloud (line 196) | def draw_point_cloud(input_points, canvasSize=500, space=200, diameter=25,
function point_cloud_three_views (line 255) | def point_cloud_three_views(points):
function point_cloud_three_views_demo (line 269) | def point_cloud_three_views_demo():
function pyplot_draw_point_cloud (line 281) | def pyplot_draw_point_cloud(points, output_filename):
function pyplot_draw_volume (line 292) | def pyplot_draw_volume(vol, output_filename):
function write_ply_color (line 299) | def write_ply_color(points, labels, out_filename, num_classes=None):
FILE: pointnet2_tf/utils/pointnet_util.py
function sample_and_group (line 22) | def sample_and_group(npoint, radius, nsample, xyz, points, knn=False, us...
function sample_and_group_all (line 59) | def sample_and_group_all(xyz, points, use_xyz=True):
function pointnet_sa_module (line 87) | def pointnet_sa_module(xyz, points, npoint, radius, nsample, mlp, mlp2, ...
function pointnet_sa_module_msg (line 156) | def pointnet_sa_module_msg(xyz, points, npoint, radius_list, nsample_lis...
function pointnet_fp_module (line 199) | def pointnet_fp_module(xyz1, xyz2, points1, points2, mlp, is_training, b...
FILE: pointnet2_tf/utils/provider.py
function shuffle_data (line 8) | def shuffle_data(data, labels):
function shuffle_points (line 20) | def shuffle_points(batch_data):
function rotate_point_cloud (line 32) | def rotate_point_cloud(batch_data):
function rotate_point_cloud_z (line 52) | def rotate_point_cloud_z(batch_data):
function rotate_point_cloud_with_normal (line 72) | def rotate_point_cloud_with_normal(batch_xyz_normal):
function rotate_perturbation_point_cloud_with_normal (line 92) | def rotate_perturbation_point_cloud_with_normal(batch_data, angle_sigma=...
function rotate_point_cloud_by_angle (line 119) | def rotate_point_cloud_by_angle(batch_data, rotation_angle):
function rotate_point_cloud_by_angle_with_normal (line 138) | def rotate_point_cloud_by_angle_with_normal(batch_data, rotation_angle):
function rotate_perturbation_point_cloud (line 162) | def rotate_perturbation_point_cloud(batch_data, angle_sigma=0.06, angle_...
function jitter_point_cloud (line 187) | def jitter_point_cloud(batch_data, sigma=0.01, clip=0.05):
function shift_point_cloud (line 200) | def shift_point_cloud(batch_data, shift_range=0.1):
function random_scale_point_cloud (line 214) | def random_scale_point_cloud(batch_data, scale_low=0.8, scale_high=1.25):
function random_point_dropout (line 227) | def random_point_dropout(batch_pc, max_dropout_ratio=0.875):
function getDataFiles (line 237) | def getDataFiles(list_filename):
function load_h5 (line 240) | def load_h5(h5_filename):
function loadDataFile (line 246) | def loadDataFile(filename):
FILE: pointnet2_tf/utils/render_balls_so.cpp
type PointInfo (line 7) | struct PointInfo{
function render_ball (line 14) | void render_ball(int h,int w,unsigned char * show,int n,int * xyzs,float...
FILE: pointnet2_tf/utils/show3d_balls.py
function onmouse (line 12) | def onmouse(*args):
function showpoints (line 25) | def showpoints(xyz,c_gt=None, c_pred = None ,waittime=0,showrot=False,ma...
FILE: pointnet2_tf/utils/tf_util.py
function _variable_on_cpu (line 10) | def _variable_on_cpu(name, shape, initializer, use_fp16=False):
function _variable_with_weight_decay (line 24) | def _variable_with_weight_decay(name, shape, stddev, wd, use_xavier=True):
function conv1d (line 52) | def conv1d(inputs,
function conv2d (line 120) | def conv2d(inputs,
function conv2d_transpose (line 188) | def conv2d_transpose(inputs,
function conv3d (line 268) | def conv3d(inputs,
function fully_connected (line 327) | def fully_connected(inputs,
function max_pool2d (line 366) | def max_pool2d(inputs,
function avg_pool2d (line 391) | def avg_pool2d(inputs,
function max_pool3d (line 417) | def max_pool3d(inputs,
function avg_pool3d (line 442) | def avg_pool3d(inputs,
function batch_norm_template_unused (line 468) | def batch_norm_template_unused(inputs, is_training, scope, moments_dims,...
function batch_norm_template (line 512) | def batch_norm_template(inputs, is_training, scope, moments_dims_unused,...
function batch_norm_for_fc (line 534) | def batch_norm_for_fc(inputs, is_training, bn_decay, scope):
function batch_norm_for_conv1d (line 548) | def batch_norm_for_conv1d(inputs, is_training, bn_decay, scope, data_for...
function batch_norm_for_conv2d (line 565) | def batch_norm_for_conv2d(inputs, is_training, bn_decay, scope, data_for...
function batch_norm_for_conv3d (line 580) | def batch_norm_for_conv3d(inputs, is_training, bn_decay, scope):
function dropout (line 594) | def dropout(inputs,
FILE: pointnet_pyt/pointnet/dataset.py
function get_segmentation_classes (line 12) | def get_segmentation_classes(root):
function gen_modelnet_id (line 46) | def gen_modelnet_id(root):
class ShapeNetDataset (line 56) | class ShapeNetDataset(data.Dataset):
method __init__ (line 57) | def __init__(self,
method __getitem__ (line 109) | def __getitem__(self, index):
method __len__ (line 140) | def __len__(self):
class ModelNetDataset (line 143) | class ModelNetDataset(data.Dataset):
method __init__ (line 144) | def __init__(self,
method __getitem__ (line 167) | def __getitem__(self, index):
method __len__ (line 191) | def __len__(self):
FILE: pointnet_pyt/pointnet/model.py
class STN3d (line 11) | class STN3d(nn.Module):
method __init__ (line 12) | def __init__(self):
method forward (line 29) | def forward(self, x):
class STNkd (line 49) | class STNkd(nn.Module):
method __init__ (line 50) | def __init__(self, k=64):
method forward (line 68) | def forward(self, x):
class PointNetfeat (line 87) | class PointNetfeat(nn.Module):
method __init__ (line 88) | def __init__(self, global_feat = True, feature_transform = False):
method forward (line 102) | def forward(self, x):
class PointNetCls (line 129) | class PointNetCls(nn.Module):
method __init__ (line 130) | def __init__(self, k=2, feature_transform=False):
method forward (line 142) | def forward(self, x):
class PointNetDenseCls (line 150) | class PointNetDenseCls(nn.Module):
method __init__ (line 151) | def __init__(self, k = 2, feature_transform=False):
method forward (line 164) | def forward(self, x):
function feature_transform_regularizer (line 177) | def feature_transform_regularizer(trans):
FILE: pointnet_pyt/utils/render_balls_so.cpp
type PointInfo (line 7) | struct PointInfo{
function render_ball (line 14) | void render_ball(int h,int w,unsigned char * show,int n,int * xyzs,float...
FILE: pointnet_pyt/utils/show3d_balls.py
function onmouse (line 10) | def onmouse(*args):
function showpoints (line 24) | def showpoints(xyz,c_gt=None, c_pred = None, waittime=0,
FILE: rs_cnn/data/ModelNet40Loader.py
function _get_data_files (line 9) | def _get_data_files(list_filename):
function _load_data_file (line 13) | def _load_data_file(name):
class ModelNet40Cls (line 19) | class ModelNet40Cls(data.Dataset):
method __init__ (line 20) | def __init__(self, num_points, root, data_file, transforms=None, train...
method __getitem__ (line 47) | def __getitem__(self, idx):
method __len__ (line 60) | def __len__(self):
FILE: rs_cnn/data/ShapeNetPartLoader.py
function pc_normalize (line 12) | def pc_normalize(pc):
class ShapeNetPart (line 20) | class ShapeNetPart():
method __init__ (line 21) | def __init__(self, root, num_points = 2048, split='train', normalize=T...
method __getitem__ (line 87) | def __getitem__(self, index):
method __len__ (line 102) | def __len__(self):
method preload (line 105) | def preload(self):
FILE: rs_cnn/data/data_utils.py
class PointcloudToTensor (line 4) | class PointcloudToTensor(object):
method __call__ (line 5) | def __call__(self, points):
function angle_axis (line 8) | def angle_axis(angle: float, axis: np.ndarray):
class PointcloudRotatebyAngle (line 39) | class PointcloudRotatebyAngle(object):
method __init__ (line 40) | def __init__(self, rotation_angle = 0.0):
method __call__ (line 43) | def __call__(self, pc):
class PointcloudJitter (line 67) | class PointcloudJitter(object):
method __init__ (line 68) | def __init__(self, std=0.01, clip=0.05):
method __call__ (line 71) | def __call__(self, pc):
class PointcloudScaleAndTranslate (line 81) | class PointcloudScaleAndTranslate(object):
method __init__ (line 82) | def __init__(self, scale_low=2. / 3., scale_high=3. / 2., translate_ra...
method __call__ (line 87) | def __call__(self, pc):
class PointcloudScale (line 97) | class PointcloudScale(object):
method __init__ (line 98) | def __init__(self, scale_low=2. / 3., scale_high=3. / 2.):
method __call__ (line 102) | def __call__(self, pc):
class PointcloudTranslate (line 111) | class PointcloudTranslate(object):
method __init__ (line 112) | def __init__(self, translate_range=0.2):
method __call__ (line 115) | def __call__(self, pc):
class PointcloudRandomInputDropout (line 124) | class PointcloudRandomInputDropout(object):
method __init__ (line 125) | def __init__(self, max_dropout_ratio=0.875):
method __call__ (line 129) | def __call__(self, pc):
FILE: rs_cnn/models/rscnn_msn_seg.py
class RSCNN_MSN (line 12) | class RSCNN_MSN(nn.Module):
method __init__ (line 28) | def __init__(self, num_classes, input_channels=0, relation_prior=1, us...
method _break_up_pc (line 116) | def _break_up_pc(self, pc):
method forward (line 125) | def forward(self, pointcloud: torch.cuda.FloatTensor, cls):
FILE: rs_cnn/models/rscnn_ssn_cls.py
class RSCNN_SSN (line 13) | class RSCNN_SSN(nn.Module):
method __init__ (line 29) | def __init__(self, num_classes, input_channels=0, relation_prior=1, us...
method _break_up_pc (line 74) | def _break_up_pc(self, pc):
method forward (line 82) | def forward(self, pointcloud: torch.cuda.FloatTensor):
FILE: rs_cnn/train_cls.py
function main (line 34) | def main():
function train (line 95) | def train(train_dataloader, test_dataloader, model, criterion, optimizer...
function validate (line 135) | def validate(test_dataloader, model, criterion, args, iter):
FILE: rs_cnn/train_partseg.py
function main (line 33) | def main():
function train (line 95) | def train(train_dataloader, test_dataloader, model, criterion, optimizer...
function validate (line 138) | def validate(test_dataloader, model, criterion, args, iter):
FILE: rs_cnn/utils/_ext/pointnet2/__init__.py
function _import_symbols (line 6) | def _import_symbols(locals):
FILE: rs_cnn/utils/build_ffi.py
function parse_args (line 10) | def parse_args():
function build (line 26) | def build(args):
function clean (line 45) | def clean(args):
FILE: rs_cnn/utils/cinclude/cuda_utils.h
function opt_n_threads (line 8) | inline int opt_n_threads(int work_size) {
function dim3 (line 14) | inline dim3 opt_block_config(int x, int y) {
FILE: rs_cnn/utils/csrc/ball_query.c
function ball_query_wrapper (line 7) | int ball_query_wrapper(int b, int n, int m, float radius, int nsample,
FILE: rs_cnn/utils/csrc/group_points.c
function group_points_wrapper (line 7) | int group_points_wrapper(int b, int c, int n, int npoints, int nsample,
function group_points_grad_wrapper (line 23) | int group_points_grad_wrapper(int b, int c, int n, int npoints, int nsam...
FILE: rs_cnn/utils/csrc/interpolate.c
function three_nn_wrapper (line 10) | void three_nn_wrapper(int b, int n, int m, THCudaTensor *unknown_tensor,
function three_interpolate_wrapper (line 22) | void three_interpolate_wrapper(int b, int c, int m, int n,
function three_interpolate_grad_wrapper (line 38) | void three_interpolate_grad_wrapper(int b, int c, int n, int m,
FILE: rs_cnn/utils/csrc/sampling.c
function gather_points_wrapper (line 7) | int gather_points_wrapper(int b, int c, int n, int npoints,
function gather_points_grad_wrapper (line 22) | int gather_points_grad_wrapper(int b, int c, int n, int npoints,
function furthest_point_sampling_wrapper (line 38) | int furthest_point_sampling_wrapper(int b, int n, int m,
FILE: rs_cnn/utils/linalg_utils.py
function pdist2 (line 7) | def pdist2(
function pdist2_slow (line 61) | def pdist2_slow(X, Z=None):
FILE: rs_cnn/utils/pointnet2_modules.py
class _PointnetSAModuleBase (line 12) | class _PointnetSAModuleBase(nn.Module):
method __init__ (line 14) | def __init__(self):
method forward (line 20) | def forward(self, xyz: torch.Tensor,
class PointnetSAModuleMSG (line 59) | class PointnetSAModuleMSG(_PointnetSAModuleBase):
method __init__ (line 76) | def __init__(
class PointnetSAModule (line 155) | class PointnetSAModule(PointnetSAModuleMSG):
method __init__ (line 172) | def __init__(
class PointnetFPModule (line 190) | class PointnetFPModule(nn.Module):
method __init__ (line 201) | def __init__(self, *, mlp: List[int], bn: bool = True):
method forward (line 205) | def forward(
FILE: rs_cnn/utils/pointnet2_modules_updated.py
class QueryAndGroup (line 13) | class QueryAndGroup(nn.Module):
method __init__ (line 24) | def __init__(self, radius: float, nsample: int, use_xyz: bool = True):
method forward (line 28) | def forward(
class GroupAll (line 73) | class GroupAll(nn.Module):
method __init__ (line 80) | def __init__(self, use_xyz: bool = True):
method forward (line 84) | def forward(
class _PointnetSAModuleBase (line 118) | class _PointnetSAModuleBase(nn.Module):
method __init__ (line 120) | def __init__(self):
method forward (line 126) | def forward(self, xyz: torch.Tensor,
class PointnetSAModuleMSG (line 164) | class PointnetSAModuleMSG(_PointnetSAModuleBase):
method __init__ (line 180) | def __init__(
class PointnetSAModule (line 259) | class PointnetSAModule(PointnetSAModuleMSG):
method __init__ (line 275) | def __init__(
class PointnetFPModule (line 293) | class PointnetFPModule(nn.Module):
method __init__ (line 303) | def __init__(self, *, mlp: List[int], bn: bool = True):
method forward (line 307) | def forward(
FILE: rs_cnn/utils/pointnet2_utils.py
class RandomDropout (line 14) | class RandomDropout(nn.Module):
method __init__ (line 16) | def __init__(self, p=0.5, inplace=False):
method forward (line 21) | def forward(self, X):
class FurthestPointSampling (line 28) | class FurthestPointSampling(Function):
method forward (line 31) | def forward(ctx, xyz: torch.Tensor, npoint: int) -> torch.Tensor:
method backward (line 60) | def backward(xyz, a=None):
class GatherOperation (line 67) | class GatherOperation(Function):
method forward (line 70) | def forward(ctx, features: torch.Tensor, idx: torch.Tensor) -> torch.T...
method backward (line 103) | def backward(ctx, grad_out):
class ThreeNN (line 119) | class ThreeNN(Function):
method forward (line 122) | def forward(ctx, unknown: torch.Tensor,
method backward (line 153) | def backward(ctx, a=None, b=None):
class ThreeInterpolate (line 160) | class ThreeInterpolate(Function):
method forward (line 163) | def forward(
method backward (line 200) | def backward(ctx, grad_out: torch.Tensor
class GroupingOperation (line 233) | class GroupingOperation(Function):
method forward (line 236) | def forward(ctx, features: torch.Tensor, idx: torch.Tensor) -> torch.T...
method backward (line 267) | def backward(ctx,
class BallQuery (line 298) | class BallQuery(Function):
method forward (line 301) | def forward(
method backward (line 337) | def backward(ctx, a=None):
class QueryAndGroup (line 344) | class QueryAndGroup(nn.Module):
method __init__ (line 356) | def __init__(self, radius: float, nsample: int, use_xyz: bool = True):
method forward (line 360) | def forward(
class GroupAll (line 405) | class GroupAll(nn.Module):
method __init__ (line 413) | def __init__(self, use_xyz: bool = True):
method forward (line 417) | def forward(
FILE: rs_cnn/utils/pytorch_utils/pytorch_utils.py
class RSConv (line 15) | class RSConv(nn.Module):
method __init__ (line 20) | def __init__(
method forward (line 46) | def forward(self, input): # input: (B, 3 + 3 + C_in, npoint, centroid ...
class RSConvLayer (line 75) | class RSConvLayer(nn.Sequential):
method __init__ (line 77) | def __init__(
class SharedRSConv (line 100) | class SharedRSConv(nn.Sequential):
method __init__ (line 102) | def __init__(
class GloAvgConv (line 132) | class GloAvgConv(nn.Module):
method __init__ (line 137) | def __init__(
method forward (line 156) | def forward(self, x):
class SharedMLP (line 166) | class SharedMLP(nn.Sequential):
method __init__ (line 168) | def __init__(
class _BNBase (line 194) | class _BNBase(nn.Sequential):
method __init__ (line 196) | def __init__(self, in_size, batch_norm=None, name=""):
class BatchNorm1d (line 204) | class BatchNorm1d(_BNBase):
method __init__ (line 206) | def __init__(self, in_size: int, *, name: str = ""):
class BatchNorm2d (line 210) | class BatchNorm2d(_BNBase):
method __init__ (line 212) | def __init__(self, in_size: int, name: str = ""):
class BatchNorm3d (line 216) | class BatchNorm3d(_BNBase):
method __init__ (line 218) | def __init__(self, in_size: int, name: str = ""):
class _ConvBase (line 222) | class _ConvBase(nn.Sequential):
method __init__ (line 224) | def __init__(
class Conv1d (line 278) | class Conv1d(_ConvBase):
method __init__ (line 280) | def __init__(
class Conv2d (line 312) | class Conv2d(_ConvBase):
method __init__ (line 314) | def __init__(
class Conv3d (line 346) | class Conv3d(_ConvBase):
method __init__ (line 348) | def __init__(
class FC (line 380) | class FC(nn.Sequential):
method __init__ (line 382) | def __init__(
class _DropoutNoScaling (line 418) | class _DropoutNoScaling(InplaceFunction):
method _make_noise (line 421) | def _make_noise(input):
method symbolic (line 425) | def symbolic(g, input, p=0.5, train=False, inplace=False):
method forward (line 437) | def forward(cls, ctx, input, p=0.5, train=False, inplace=False):
method backward (line 465) | def backward(ctx, grad_output):
class _FeatureDropoutNoScaling (line 475) | class _FeatureDropoutNoScaling(_DropoutNoScaling):
method symbolic (line 478) | def symbolic(input, p=0.5, train=False, inplace=False):
method _make_noise (line 482) | def _make_noise(input):
function group_model_params (line 492) | def group_model_params(model: nn.Module):
function checkpoint_state (line 511) | def checkpoint_state(model=None, optimizer=None, best_prec=None, epoch=N...
function save_checkpoint (line 529) | def save_checkpoint(
function load_checkpoint (line 538) | def load_checkpoint(model=None, optimizer=None, filename='checkpoint'):
function variable_size_collate (line 556) | def variable_size_collate(pad_val=0, use_shared_memory=True):
class TrainValSplitter (line 626) | class TrainValSplitter():
method __init__ (line 639) | def __init__(
class CrossValSplitter (line 654) | class CrossValSplitter():
method __init__ (line 670) | def __init__(self, *, numel: int, k_folds: int, shuffled: bool = False):
method __iter__ (line 685) | def __iter__(self):
method __len__ (line 689) | def __len__(self):
method __getitem__ (line 692) | def __getitem__(self, idx):
method __next__ (line 699) | def __next__(self):
method update_metrics (line 706) | def update_metrics(self, to_post: dict):
method print_metrics (line 713) | def print_metrics(self):
function set_bn_momentum_default (line 722) | def set_bn_momentum_default(bn_momentum):
class BNMomentumScheduler (line 731) | class BNMomentumScheduler(object):
method __init__ (line 733) | def __init__(
method step (line 751) | def step(self, epoch=None):
method get_momentum (line 758) | def get_momentum(self, epoch=None):
FILE: rs_cnn/voting_evaluate_cls.py
function main (line 38) | def main():
FILE: rs_cnn/voting_evaluate_partseg.py
function main (line 37) | def main():
FILE: third_party/bn_helper.py
function configure_model (line 4) | def configure_model(model, eps, momentum, reset_stats, no_stats):
FILE: third_party/tent_helper.py
function collect_params (line 6) | def collect_params(model):
function softmax_entropy (line 23) | def softmax_entropy(x: torch.Tensor) -> torch.Tensor:
function configure_model (line 27) | def configure_model(model,eps, momentum):
function forward_and_adapt (line 47) | def forward_and_adapt(x, model, optimizer):
FILE: visualize/examples.py
function build_examples (line 14) | def build_examples(example_ids, severity):
function load_examples (line 30) | def load_examples():
function rotation_matrix (line 36) | def rotation_matrix(pitch, yaw, roll):
function draw_one_example (line 49) | def draw_one_example(example, rotate=[0, 0], scale=1, window_width=1080,...
function draw_one_example_colorful (line 88) | def draw_one_example_colorful(example, save="test.png"):
function draw_examples (line 106) | def draw_examples(tag, examples, colorful=False):
FILE: visualize/main_results.py
function format_data (line 17) | def format_data():
function update_font (line 79) | def update_font(font):
function load_data (line 88) | def load_data():
function draw_train_mode_comparison (line 92) | def draw_train_mode_comparison(df=None, figure_path=None):
function draw_model_comparison (line 129) | def draw_model_comparison(df=None, figure_path=None, mode="train", metri...
function draw_corruption_comparison (line 178) | def draw_corruption_comparison(df=None, figure_path=None):
function get_best_model (line 207) | def get_best_model(df=None):
function get_best_train_mode (line 234) | def get_best_train_mode(df=None):
function get_corruption_tables (line 264) | def get_corruption_tables(df=None, metric="acc"):
function draw_teaser (line 294) | def draw_teaser(df=None):
function get_table_1 (line 334) | def get_table_1(df=None):
function get_table_2 (line 349) | def get_table_2():
function get_table_3 (line 372) | def get_table_3():
function get_table_4 (line 405) | def get_table_4():
function draw_severity_comparison (line 437) | def draw_severity_comparison():
function draw_test_adaptation (line 498) | def draw_test_adaptation():
FILE: visualize/pointflow_fig_colorful.py
function standardize_bbox (line 3) | def standardize_bbox(pcl, points_per_object):
function colormap (line 84) | def colormap(x,y,z):
function colorful_pcd (line 92) | def colorful_pcd(pcd_data, output_file):
Condensed preview — 461 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (1,960K chars).
[
{
"path": ".gitignore",
"chars": 3348,
"preview": "*__pycache__/\ndata/modelnet40_ply_hdf5_2048\ndata/ModelNet40\ndata/modelnet40_c\nruns/\npretrained/\ncor_exp/\n*.out\n/output\n\n"
},
{
"path": ".gitmodules",
"chars": 190,
"preview": "[submodule \"PyGeM\"]\n\tpath = PyGeM\n\turl = https://github.com/mathLab/PyGeM.git\n[submodule \"visualize/mitsuba2\"]\n\tpath = v"
},
{
"path": "CurveNet/README.md",
"chars": 7748,
"preview": "# CurveNet\nOfficial implementation of \"Walk in the Cloud: Learning Curves for Point Clouds Shape Analysis\", ICCV 2021\n\n["
},
{
"path": "CurveNet/core/data.py",
"chars": 7145,
"preview": "\"\"\"\n@Author: Yue Wang\n@Contact: yuewangx@mit.edu\n@File: data.py\n@Time: 2018/10/13 6:21 PM\n\nModified by \n@Author: Tiange "
},
{
"path": "CurveNet/core/main_cls.py",
"chars": 9057,
"preview": "\"\"\"\n@Author: Yue Wang\n@Contact: yuewangx@mit.edu\n@File: main_cls.py\n@Time: 2018/10/13 10:39 PM\n\nModified by \n@Author: Ti"
},
{
"path": "CurveNet/core/main_normal.py",
"chars": 8255,
"preview": "\"\"\"\n@Author: Tiange Xiang\n@Contact: txia7609@uni.sydney.edu.au\n@File: main_normal.py\n@Time: 2021/01/21 3:10 PM\n\"\"\"\n\n\nfro"
},
{
"path": "CurveNet/core/main_partseg.py",
"chars": 15928,
"preview": "\"\"\"\n@Author: An Tao\n@Contact: ta19@mails.tsinghua.edu.cn\n@File: main_partseg.py\n@Time: 2019/12/31 11:17 AM\n\nModified by "
},
{
"path": "CurveNet/core/models/curvenet_cls.py",
"chars": 3178,
"preview": "\"\"\"\n@Author: Tiange Xiang\n@Contact: txia7609@uni.sydney.edu.au\n@File: curvenet_cls.py\n@Time: 2021/01/21 3:10 PM\n\"\"\"\n\nimp"
},
{
"path": "CurveNet/core/models/curvenet_normal.py",
"chars": 4635,
"preview": "\"\"\"\n@Author: Tiange Xiang\n@Contact: txia7609@uni.sydney.edu.au\n@File: curvenet_normal.py\n@Time: 2021/01/21 3:10 PM\n\"\"\"\n\n"
},
{
"path": "CurveNet/core/models/curvenet_seg.py",
"chars": 6239,
"preview": "\"\"\"\n@Author: Tiange Xiang\n@Contact: txia7609@uni.sydney.edu.au\n@File: curvenet_seg.py\n@Time: 2021/01/21 3:10 PM\n\"\"\"\n\nimp"
},
{
"path": "CurveNet/core/models/curvenet_util.py",
"chars": 16637,
"preview": "\"\"\"\n@Author: Yue Wang\n@Contact: yuewangx@mit.edu\n@File: pointnet_util.py\n@Time: 2018/10/13 10:39 PM\n\nModified by \n@Autho"
},
{
"path": "CurveNet/core/models/walk.py",
"chars": 5859,
"preview": "\"\"\"\n@Author: Tiange Xiang\n@Contact: txia7609@uni.sydney.edu.au\n@File: walk.py\n@Time: 2021/01/21 3:10 PM\n\"\"\"\n\nimport nump"
},
{
"path": "CurveNet/core/util.py",
"chars": 949,
"preview": "\"\"\"\n@Author: Yue Wang\n@Contact: yuewangx@mit.edu\n@File: util\n@Time: 4/5/19 3:47 PM\n\"\"\"\n\n\nimport numpy as np\nimport torch"
},
{
"path": "GDANet/README.md",
"chars": 3505,
"preview": "# Learning Geometry-Disentangled Representation for Complementary Understanding of 3D Object Point Cloud. \nThis reposito"
},
{
"path": "GDANet/model/GDANet_cls.py",
"chars": 4382,
"preview": "import torch.nn as nn\r\nimport torch\r\nimport torch.nn.functional as F\r\nfrom .util.GDANet_util import local_operator, GDM,"
},
{
"path": "GDANet/model/GDANet_ptseg.py",
"chars": 5114,
"preview": "import torch.nn as nn\r\nimport torch\r\nimport torch.nn.functional as F\r\nfrom util.GDANet_util import local_operator_withno"
},
{
"path": "GDANet/model/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "GDANet/model/util/GDANet_util.py",
"chars": 7377,
"preview": "import torch\nfrom torch import nn\n\n\ndef knn(x, k):\n inner = -2*torch.matmul(x.transpose(2, 1), x)\n xx = torch.sum("
},
{
"path": "GDANet/model/util/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "GDANet/model/util/data_util.py",
"chars": 6371,
"preview": "import glob\nimport h5py\nimport numpy as np\nfrom torch.utils.data import Dataset\nimport os\nimport json\n\n\ndef load_data(pa"
},
{
"path": "GDANet/model/util/util.py",
"chars": 2582,
"preview": "import numpy as np\nimport torch\nimport torch.nn.functional as F\n\n\ndef cal_loss(pred, gold, smoothing=True):\n ''' Calc"
},
{
"path": "LICENSE",
"chars": 1530,
"preview": "BSD 3-Clause License\n\nCopyright (c) 2021, University of Michigan\nAll rights reserved.\n\nRedistribution and use in source "
},
{
"path": "PCT_Pytorch/LICENSE",
"chars": 1077,
"preview": "MIT License\n\nCopyright (c) 2021 Strawberry-Eat-Mango\n\nPermission is hereby granted, free of charge, to any person obtain"
},
{
"path": "PCT_Pytorch/README.md",
"chars": 1276,
"preview": "## PCT: Point Cloud Transformer\nThis is a Pytorch implementation of PCT: Point Cloud Transformer.\n\nPaper link: https://a"
},
{
"path": "PCT_Pytorch/data.py",
"chars": 2989,
"preview": "import os\nimport glob\nimport h5py\nimport numpy as np\nfrom torch.utils.data import Dataset\n\ndef download():\n BASE_DIR "
},
{
"path": "PCT_Pytorch/main.py",
"chars": 9136,
"preview": "from __future__ import print_function\nimport os\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torch.optim as"
},
{
"path": "PCT_Pytorch/model.py",
"chars": 5170,
"preview": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom .util import sample_and_group \n\nclass Local_op(n"
},
{
"path": "PCT_Pytorch/model_new.py",
"chars": 5177,
"preview": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom util import sample_and_group \n\nclass Local_op(nn"
},
{
"path": "PCT_Pytorch/pointnet2_ops_lib/MANIFEST.in",
"chars": 29,
"preview": "graft pointnet2_ops/_ext-src\n"
},
{
"path": "PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/__init__.py",
"chars": 123,
"preview": "import pointnet2_ops.pointnet2_modules\nimport pointnet2_ops.pointnet2_utils\nfrom pointnet2_ops._version import __version"
},
{
"path": "PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/_ext-src/include/ball_query.h",
"chars": 163,
"preview": "#pragma once\n#include <torch/extension.h>\n\nat::Tensor ball_query(at::Tensor new_xyz, at::Tensor xyz, const float radius,"
},
{
"path": "PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/_ext-src/include/cuda_utils.h",
"chars": 1303,
"preview": "#ifndef _CUDA_UTILS_H\n#define _CUDA_UTILS_H\n\n#include <ATen/ATen.h>\n#include <ATen/cuda/CUDAContext.h>\n#include <cmath>\n"
},
{
"path": "PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/_ext-src/include/group_points.h",
"chars": 183,
"preview": "#pragma once\n#include <torch/extension.h>\n\nat::Tensor group_points(at::Tensor points, at::Tensor idx);\nat::Tensor group_"
},
{
"path": "PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/_ext-src/include/interpolate.h",
"chars": 386,
"preview": "#pragma once\n\n#include <torch/extension.h>\n#include <vector>\n\nstd::vector<at::Tensor> three_nn(at::Tensor unknowns, at::"
},
{
"path": "PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/_ext-src/include/sampling.h",
"chars": 260,
"preview": "#pragma once\n#include <torch/extension.h>\n\nat::Tensor gather_points(at::Tensor points, at::Tensor idx);\nat::Tensor gathe"
},
{
"path": "PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/_ext-src/include/utils.h",
"chars": 983,
"preview": "#pragma once\n#include <ATen/cuda/CUDAContext.h>\n#include <torch/extension.h>\n\n#define CHECK_CUDA(x) "
},
{
"path": "PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/_ext-src/src/ball_query.cpp",
"chars": 1037,
"preview": "#include \"ball_query.h\"\n#include \"utils.h\"\n\nvoid query_ball_point_kernel_wrapper(int b, int n, int m, float radius,\n "
},
{
"path": "PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/_ext-src/src/ball_query_gpu.cu",
"chars": 1784,
"preview": "#include <math.h>\n#include <stdio.h>\n#include <stdlib.h>\n\n#include \"cuda_utils.h\"\n\n// input: new_xyz(b, m, 3) xyz(b, n, "
},
{
"path": "PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/_ext-src/src/bindings.cpp",
"chars": 570,
"preview": "#include \"ball_query.h\"\n#include \"group_points.h\"\n#include \"interpolate.h\"\n#include \"sampling.h\"\n\nPYBIND11_MODULE(TORCH_"
},
{
"path": "PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/_ext-src/src/group_points.cpp",
"chars": 1952,
"preview": "#include \"group_points.h\"\n#include \"utils.h\"\n\nvoid group_points_kernel_wrapper(int b, int c, int n, int npoints, int nsa"
},
{
"path": "PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/_ext-src/src/group_points_gpu.cu",
"chars": 2885,
"preview": "#include <stdio.h>\n#include <stdlib.h>\n\n#include \"cuda_utils.h\"\n\n// input: points(b, c, n) idx(b, npoints, nsample)\n// o"
},
{
"path": "PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/_ext-src/src/interpolate.cpp",
"chars": 3304,
"preview": "#include \"interpolate.h\"\n#include \"utils.h\"\n\nvoid three_nn_kernel_wrapper(int b, int n, int m, const float *unknown,\n "
},
{
"path": "PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/_ext-src/src/interpolate_gpu.cu",
"chars": 5141,
"preview": "#include <math.h>\n#include <stdio.h>\n#include <stdlib.h>\n\n#include \"cuda_utils.h\"\n\n// input: unknown(b, n, 3) known(b, m"
},
{
"path": "PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/_ext-src/src/sampling.cpp",
"chars": 2894,
"preview": "#include \"sampling.h\"\n#include \"utils.h\"\n\nvoid gather_points_kernel_wrapper(int b, int c, int n, int npoints,\n "
},
{
"path": "PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/_ext-src/src/sampling_gpu.cu",
"chars": 7019,
"preview": "#include <stdio.h>\n#include <stdlib.h>\n\n#include \"cuda_utils.h\"\n\n// input: points(b, c, n) idx(b, m)\n// output: out(b, c"
},
{
"path": "PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/_version.py",
"chars": 22,
"preview": "__version__ = \"3.0.0\"\n"
},
{
"path": "PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/pointnet2_modules.py",
"chars": 6530,
"preview": "from typing import List, Optional, Tuple\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom pointn"
},
{
"path": "PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/pointnet2_utils.py",
"chars": 10396,
"preview": "import torch\nimport torch.nn as nn\nimport warnings\nfrom torch.autograd import Function\nfrom typing import *\n\ntry:\n im"
},
{
"path": "PCT_Pytorch/pointnet2_ops_lib/setup.py",
"chars": 1185,
"preview": "import glob\nimport os\nimport os.path as osp\n\nfrom setuptools import find_packages, setup\nfrom torch.utils.cpp_extension "
},
{
"path": "PCT_Pytorch/test.sh",
"chars": 142,
"preview": "python main.py --exp_name=test --num_points=1024 --use_sgd=True --eval=True --model_path=checkpoints/best/models/model.t"
},
{
"path": "PCT_Pytorch/train.sh",
"chars": 132,
"preview": "CUDA_VISIBLE_DEVICES=0 python3.7 main.py --exp_name=train --num_points=1024 --use_sgd=True --batch_size 32 --epochs 250 "
},
{
"path": "PCT_Pytorch/util.py",
"chars": 4557,
"preview": "import torch\nimport torch.nn.functional as F\nfrom pointnet2_ops import pointnet2_utils\n\ndef cal_loss(pred, gold, smoothi"
},
{
"path": "README.md",
"chars": 16612,
"preview": "# Benchmarking Robustness of 3D Point Cloud Recognition against Common Corruptions \n[\nimport emd_module as emd\n"
},
{
"path": "configs/bn/dgcnn.yaml",
"chars": 213,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_c\n EXP_ID: c_dgcnn_run_1\n LOSS_NAME: smooth\n "
},
{
"path": "configs/bn/pct.yaml",
"chars": 235,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_c\n EXP_ID: c_pct_run_1\n LOSS_NAME: smooth\n M"
},
{
"path": "configs/bn/pointnet.yaml",
"chars": 210,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_c\n EXP_ID: c_pointnet_run_1 \n LOSS_NAME: smoo"
},
{
"path": "configs/bn/pointnet2.yaml",
"chars": 205,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_c\n EXP_ID: c_pointnet2_run_1\n LOSS_NAME: smoo"
},
{
"path": "configs/bn/rscnn.yaml",
"chars": 197,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_c\n EXP_ID: c_rscnn_run_1\n LOSS_NAME: smooth\n "
},
{
"path": "configs/bn/simpleview.yaml",
"chars": 190,
"preview": "DATALOADER:\n batch_size: 18\n num_workers: 0\nEXP:\n DATASET: modelnet40_c\n EXP_ID: c_simpleview_run_1\n LOSS_NAME: smo"
},
{
"path": "configs/corruption/curvenet.yaml",
"chars": 199,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_c\n EXP_ID: c_curvenet_run_1\n LOSS_NAME: smoot"
},
{
"path": "configs/corruption/dgcnn.yaml",
"chars": 193,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_c\n EXP_ID: c_dgcnn_run_1\n LOSS_NAME: smooth\n "
},
{
"path": "configs/corruption/gdanet.yaml",
"chars": 195,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_c\n EXP_ID: c_gdanet_run_1\n LOSS_NAME: smooth\n"
},
{
"path": "configs/corruption/pct.yaml",
"chars": 216,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_c\n EXP_ID: c_pct_run_1\n LOSS_NAME: smooth\n M"
},
{
"path": "configs/corruption/pointMLP.yaml",
"chars": 199,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_c\n EXP_ID: c_pointMLP_run_1\n LOSS_NAME: smoot"
},
{
"path": "configs/corruption/pointMLP2.yaml",
"chars": 201,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_c\n EXP_ID: c_pointMLP2_run_1\n LOSS_NAME: smoo"
},
{
"path": "configs/corruption/pointnet.yaml",
"chars": 191,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_c\n EXP_ID: c_pointnet_run_1 \n LOSS_NAME: smoo"
},
{
"path": "configs/corruption/pointnet2.yaml",
"chars": 186,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_c\n EXP_ID: c_pointnet2_run_1\n LOSS_NAME: smoo"
},
{
"path": "configs/corruption/rscnn.yaml",
"chars": 178,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_c\n EXP_ID: c_rscnn_run_1\n LOSS_NAME: smooth\n "
},
{
"path": "configs/corruption/simpleview.yaml",
"chars": 171,
"preview": "DATALOADER:\n batch_size: 18\n num_workers: 0\nEXP:\n DATASET: modelnet40_c\n EXP_ID: c_simpleview_run_1\n LOSS_NAME: smo"
},
{
"path": "configs/cutmix/dgcnn_k.yaml",
"chars": 250,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_dgcnn\n EXP_ID: cutmix_k_dgcnn_run_1\n LOSS_NAM"
},
{
"path": "configs/cutmix/dgcnn_r.yaml",
"chars": 250,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_dgcnn\n EXP_ID: cutmix_r_dgcnn_run_1\n LOSS_NAM"
},
{
"path": "configs/cutmix/pct_k.yaml",
"chars": 272,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_dgcnn\n EXP_ID: cutmix_k_pct_run_1\n LOSS_NAME:"
},
{
"path": "configs/cutmix/pct_r.yaml",
"chars": 273,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_dgcnn\n EXP_ID: cutmix_r_pct_run_1\n LOSS_NAME:"
},
{
"path": "configs/cutmix/pointnet2_k.yaml",
"chars": 242,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_dgcnn\n EXP_ID: cutmix_k_pointnet2_run_1\n LOSS"
},
{
"path": "configs/cutmix/pointnet2_r.yaml",
"chars": 242,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_dgcnn\n EXP_ID: cutmix_r_pointnet2_run_1\n LOSS"
},
{
"path": "configs/cutmix/pointnet_k.yaml",
"chars": 246,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_dgcnn\n EXP_ID: cutmix_k_pointnet_run_1 \n LOSS"
},
{
"path": "configs/cutmix/pointnet_r.yaml",
"chars": 246,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_dgcnn\n EXP_ID: cutmix_r_pointnet_run_1 \n LOSS"
},
{
"path": "configs/cutmix/rscnn_k.yaml",
"chars": 234,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_dgcnn\n EXP_ID: cutmix_k_rscnn_run_1\n LOSS_NAM"
},
{
"path": "configs/cutmix/rscnn_r.yaml",
"chars": 234,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_dgcnn\n EXP_ID: cutmix_r_rscnn_run_1\n LOSS_NAM"
},
{
"path": "configs/cutmix/simpleview_k.yaml",
"chars": 226,
"preview": "DATALOADER:\n batch_size: 18\n num_workers: 0\nEXP:\n DATASET: modelnet40_dgcnn\n EXP_ID: cutmix_k_simpleview_run_1\n LOS"
},
{
"path": "configs/cutmix/simpleview_r.yaml",
"chars": 226,
"preview": "DATALOADER:\n batch_size: 18\n num_workers: 0\nEXP:\n DATASET: modelnet40_dgcnn\n EXP_ID: cutmix_r_simpleview_run_1\n LOS"
},
{
"path": "configs/dgcnn_curvenet_run_1.yaml",
"chars": 192,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_dgcnn\n EXP_ID: dgcnn_curvenet_run_1\n LOSS_NAM"
},
{
"path": "configs/dgcnn_dgcnn_0.25_run_1.yaml",
"chars": 320,
"preview": "DATALOADER:\n MODELNET40_DGCNN:\n train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_split_0.3125_file"
},
{
"path": "configs/dgcnn_dgcnn_0.25_valid_run_1.yaml",
"chars": 458,
"preview": "DATALOADER:\n MODELNET40_DGCNN:\n train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_split_0.3125_file"
},
{
"path": "configs/dgcnn_dgcnn_0.5_run_1.yaml",
"chars": 318,
"preview": "DATALOADER:\n MODELNET40_DGCNN:\n train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_split_0.625_files"
},
{
"path": "configs/dgcnn_dgcnn_0.5_valid_run_1.yaml",
"chars": 456,
"preview": "DATALOADER:\n MODELNET40_DGCNN:\n train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_split_0.625_files"
},
{
"path": "configs/dgcnn_dgcnn_ce_run_1.yaml",
"chars": 211,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_dgcnn\n EXP_ID: dgcnn_dgcnn_ce_run_1\n LOSS_NAM"
},
{
"path": "configs/dgcnn_dgcnn_ce_valid_run_1.yaml",
"chars": 450,
"preview": "DATALOADER:\n MODELNET40_DGCNN:\n train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_files.txt\n val"
},
{
"path": "configs/dgcnn_dgcnn_run_1.yaml",
"chars": 201,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_dgcnn\n EXP_ID: dgcnn_dgcnn_run_1\n LOSS_NAME: "
},
{
"path": "configs/dgcnn_dgcnn_valid_run_1.yaml",
"chars": 428,
"preview": "DATALOADER:\n MODELNET40_DGCNN:\n train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_files.txt\n val"
},
{
"path": "configs/dgcnn_gdanet_run_1.yaml",
"chars": 188,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_dgcnn\n EXP_ID: dgcnn_gdanet_run_1\n LOSS_NAME:"
},
{
"path": "configs/dgcnn_pct_run_1.yaml",
"chars": 224,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_dgcnn\n EXP_ID: dgcnn_pct_run_1\n LOSS_NAME: sm"
},
{
"path": "configs/dgcnn_pointMLP2_run_1.yaml",
"chars": 194,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_dgcnn\n EXP_ID: dgcnn_pointMLP2_run_1\n LOSS_NA"
},
{
"path": "configs/dgcnn_pointMLP_run_1.yaml",
"chars": 192,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_dgcnn\n EXP_ID: dgcnn_pointMLP_run_1\n LOSS_NAM"
},
{
"path": "configs/dgcnn_pointnet2_0.25_run_1.yaml",
"chars": 327,
"preview": "DATALOADER:\n MODELNET40_DGCNN:\n train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_split_0.3125_file"
},
{
"path": "configs/dgcnn_pointnet2_0.25_valid_run_1.yaml",
"chars": 465,
"preview": "DATALOADER:\n MODELNET40_DGCNN:\n train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_split_0.3125_file"
},
{
"path": "configs/dgcnn_pointnet2_0.5_run_1.yaml",
"chars": 325,
"preview": "DATALOADER:\n MODELNET40_DGCNN:\n train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_split_0.625_files"
},
{
"path": "configs/dgcnn_pointnet2_0.5_valid_run_1.yaml",
"chars": 463,
"preview": "DATALOADER:\n MODELNET40_DGCNN:\n train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_split_0.625_files"
},
{
"path": "configs/dgcnn_pointnet2_ce_run_1.yaml",
"chars": 218,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_dgcnn\n EXP_ID: dgcnn_pointnet2_ce_run_1\n LOSS"
},
{
"path": "configs/dgcnn_pointnet2_ce_valid_run_1.yaml",
"chars": 457,
"preview": "DATALOADER:\n MODELNET40_DGCNN:\n train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_files.txt\n val"
},
{
"path": "configs/dgcnn_pointnet2_run_1.yaml",
"chars": 194,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_dgcnn\n EXP_ID: dgcnn_pointnet2_run_1\n LOSS_NA"
},
{
"path": "configs/dgcnn_pointnet2_valid_run_1.yaml",
"chars": 433,
"preview": "DATALOADER:\n MODELNET40_DGCNN:\n train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_files.txt\n val"
},
{
"path": "configs/dgcnn_pointnet_0.25_run_1.yaml",
"chars": 317,
"preview": "DATALOADER:\n MODELNET40_DGCNN:\n train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_split_0.3125_file"
},
{
"path": "configs/dgcnn_pointnet_0.25_valid_run_1.yaml",
"chars": 455,
"preview": "DATALOADER:\n MODELNET40_DGCNN:\n train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_split_0.3125_file"
},
{
"path": "configs/dgcnn_pointnet_0.5_run_1.yaml",
"chars": 315,
"preview": "DATALOADER:\n MODELNET40_DGCNN:\n train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_split_0.625_files"
},
{
"path": "configs/dgcnn_pointnet_0.5_valid_run_1.yaml",
"chars": 453,
"preview": "DATALOADER:\n MODELNET40_DGCNN:\n train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_split_0.625_files"
},
{
"path": "configs/dgcnn_pointnet_ce_run_1.yaml",
"chars": 208,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_dgcnn\n EXP_ID: dgcnn_pointnet_ce_run_1\n LOSS_"
},
{
"path": "configs/dgcnn_pointnet_ce_valid_run_1.yaml",
"chars": 447,
"preview": "DATALOADER:\n MODELNET40_DGCNN:\n train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_files.txt\n val"
},
{
"path": "configs/dgcnn_pointnet_run_1.yaml",
"chars": 198,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_dgcnn\n EXP_ID: dgcnn_pointnet_run_1\n LOSS_NAM"
},
{
"path": "configs/dgcnn_pointnet_valid_run_1.yaml",
"chars": 437,
"preview": "DATALOADER:\n MODELNET40_DGCNN:\n train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_files.txt\n val"
},
{
"path": "configs/dgcnn_rscnn_0.25_run_1.yaml",
"chars": 305,
"preview": "DATALOADER:\n MODELNET40_DGCNN:\n train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_split_0.3125_file"
},
{
"path": "configs/dgcnn_rscnn_0.25_valid_run_1.yaml",
"chars": 443,
"preview": "DATALOADER:\n MODELNET40_DGCNN:\n train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_split_0.3125_file"
},
{
"path": "configs/dgcnn_rscnn_0.5_run_1.yaml",
"chars": 303,
"preview": "DATALOADER:\n MODELNET40_DGCNN:\n train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_split_0.625_files"
},
{
"path": "configs/dgcnn_rscnn_0.5_valid_run_1.yaml",
"chars": 441,
"preview": "DATALOADER:\n MODELNET40_DGCNN:\n train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_split_0.625_files"
},
{
"path": "configs/dgcnn_rscnn_ce_run_1.yaml",
"chars": 196,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_dgcnn\n EXP_ID: dgcnn_rscnn_ce_run_1\n LOSS_NAM"
},
{
"path": "configs/dgcnn_rscnn_ce_valid_run_1.yaml",
"chars": 435,
"preview": "DATALOADER:\n MODELNET40_DGCNN:\n train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_files.txt\n val"
},
{
"path": "configs/dgcnn_rscnn_run_1.yaml",
"chars": 186,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_dgcnn\n EXP_ID: dgcnn_rscnn_run_1\n LOSS_NAME: "
},
{
"path": "configs/dgcnn_rscnn_valid_run_1.yaml",
"chars": 425,
"preview": "DATALOADER:\n MODELNET40_DGCNN:\n train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_files.txt\n val"
},
{
"path": "configs/dgcnn_simpleview_0.25_run_1.yaml",
"chars": 298,
"preview": "DATALOADER:\n MODELNET40_DGCNN:\n train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_split_0.3125_file"
},
{
"path": "configs/dgcnn_simpleview_0.25_valid_run_1.yaml",
"chars": 436,
"preview": "DATALOADER:\n MODELNET40_DGCNN:\n train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_split_0.3125_file"
},
{
"path": "configs/dgcnn_simpleview_0.5_run_1.yaml",
"chars": 296,
"preview": "DATALOADER:\n MODELNET40_DGCNN:\n train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_split_0.625_files"
},
{
"path": "configs/dgcnn_simpleview_0.5_valid_run_1.yaml",
"chars": 434,
"preview": "DATALOADER:\n MODELNET40_DGCNN:\n train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_split_0.625_files"
},
{
"path": "configs/dgcnn_simpleview_ce_run_1.yaml",
"chars": 189,
"preview": "DATALOADER:\n batch_size: 18\n num_workers: 0\nEXP:\n DATASET: modelnet40_dgcnn\n EXP_ID: dgcnn_simpleview_ce_run_1\n LOS"
},
{
"path": "configs/dgcnn_simpleview_ce_valid_run_1.yaml",
"chars": 428,
"preview": "DATALOADER:\n MODELNET40_DGCNN:\n train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_files.txt\n val"
},
{
"path": "configs/dgcnn_simpleview_run_1.yaml",
"chars": 179,
"preview": "DATALOADER:\n batch_size: 18\n num_workers: 0\nEXP:\n DATASET: modelnet40_dgcnn\n EXP_ID: dgcnn_simpleview_run_1\n LOSS_N"
},
{
"path": "configs/dgcnn_simpleview_valid_run_1.yaml",
"chars": 418,
"preview": "DATALOADER:\n MODELNET40_DGCNN:\n train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_files.txt\n val"
},
{
"path": "configs/mixup/dgcnn.yaml",
"chars": 244,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_dgcnn\n EXP_ID: mixup_dgcnn_run_1\n LOSS_NAME: "
},
{
"path": "configs/mixup/pct.yaml",
"chars": 267,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_dgcnn\n EXP_ID: mixup_pct_run_1\n LOSS_NAME: sm"
},
{
"path": "configs/mixup/pointnet.yaml",
"chars": 240,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_dgcnn\n EXP_ID: mixup_pointnet_run_1 \n LOSS_NA"
},
{
"path": "configs/mixup/pointnet2.yaml",
"chars": 236,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_dgcnn\n EXP_ID: mixup_pointnet2_run_1\n LOSS_NA"
},
{
"path": "configs/mixup/rscnn.yaml",
"chars": 228,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_dgcnn\n EXP_ID: mixup_rscnn_run_1\n LOSS_NAME: "
},
{
"path": "configs/mixup/simpleview.yaml",
"chars": 252,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_dgcnn\n EXP_ID: mixup_simpleview_run_1 \n LOSS_"
},
{
"path": "configs/pgd/dgcnn.yaml",
"chars": 217,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_dgcnn\n EXP_ID: pgd_dgcnn_run_1\n LOSS_NAME: sm"
},
{
"path": "configs/pgd/pct.yaml",
"chars": 240,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_dgcnn\n EXP_ID: pgd_pct_run_1\n LOSS_NAME: smoo"
},
{
"path": "configs/pgd/pointnet.yaml",
"chars": 213,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_dgcnn\n EXP_ID: pgd_pointnet_run_1 \n LOSS_NAME"
},
{
"path": "configs/pointnet2_dgcnn_run_1.yaml",
"chars": 198,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_pn2\n EXP_ID: pointnet2_dgcnn_run_1\n LOSS_NAME"
},
{
"path": "configs/pointnet2_dgcnn_valid_run_1.yaml",
"chars": 435,
"preview": "DATALOADER:\n MODELNET40_PN2:\n train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_files.txt\n valid"
},
{
"path": "configs/pointnet2_pointnet2_run_1.yaml",
"chars": 203,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_pn2\n EXP_ID: pointnet2_pointnet2_run_1\n LOSS_"
},
{
"path": "configs/pointnet2_pointnet2_valid_run_1.yaml",
"chars": 440,
"preview": "DATALOADER:\n MODELNET40_PN2:\n train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_files.txt\n valid"
},
{
"path": "configs/pointnet2_pointnet_run_1.yaml",
"chars": 207,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_pn2\n EXP_ID: pointnet2_pointnet_run_1\n LOSS_N"
},
{
"path": "configs/pointnet2_pointnet_valid_run_1.yaml",
"chars": 444,
"preview": "DATALOADER:\n MODELNET40_PN2:\n train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_files.txt\n valid"
},
{
"path": "configs/pointnet2_rscnn_run_1.yaml",
"chars": 195,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_pn2\n EXP_ID: pointnet2_rscnn_run_1\n LOSS_NAME"
},
{
"path": "configs/pointnet2_rscnn_valid_run_1.yaml",
"chars": 432,
"preview": "DATALOADER:\n MODELNET40_PN2:\n train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_files.txt\n valid"
},
{
"path": "configs/pointnet2_simpleview_run_1.yaml",
"chars": 188,
"preview": "DATALOADER:\n batch_size: 18\n num_workers: 0\nEXP:\n DATASET: modelnet40_pn2\n EXP_ID: pointnet2_simpleview_run_1\n LOSS"
},
{
"path": "configs/pointnet2_simpleview_valid_run_1.yaml",
"chars": 425,
"preview": "DATALOADER:\n MODELNET40_PN2:\n train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_files.txt\n valid"
},
{
"path": "configs/rscnn_dgcnn_run_1.yaml",
"chars": 208,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_rscnn\n EXP_ID: rscnn_dgcnn_run_1\n LOSS_NAME: "
},
{
"path": "configs/rscnn_pointnet2_run_1.yaml",
"chars": 215,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_rscnn\n EXP_ID: rscnn_pointnet2_run_1\n LOSS_NA"
},
{
"path": "configs/rscnn_pointnet_run_1.yaml",
"chars": 205,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_rscnn\n EXP_ID: rscnn_pointnet_run_1\n LOSS_NAM"
},
{
"path": "configs/rscnn_rscnn_run_1.yaml",
"chars": 193,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_rscnn\n EXP_ID: rscnn_rscnn_run_1\n LOSS_NAME: "
},
{
"path": "configs/rscnn_simpleview_run_1.yaml",
"chars": 186,
"preview": "DATALOADER:\n batch_size: 18\n num_workers: 0\nEXP:\n DATASET: modelnet40_rscnn\n EXP_ID: rscnn_simpleview_run_1\n LOSS_N"
},
{
"path": "configs/rsmix/dgcnn.yaml",
"chars": 242,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_dgcnn\n EXP_ID: rsmix_dgcnn_run_1 \n LOSS_NAME:"
},
{
"path": "configs/rsmix/pct.yaml",
"chars": 267,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_dgcnn\n EXP_ID: rsmix_pct_run_1\n LOSS_NAME: sm"
},
{
"path": "configs/rsmix/pointnet.yaml",
"chars": 240,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_dgcnn\n EXP_ID: rsmix_pointnet_run_1 \n LOSS_NA"
},
{
"path": "configs/rsmix/pointnet2.yaml",
"chars": 236,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_dgcnn\n EXP_ID: rsmix_pointnet2_run_1\n LOSS_NA"
},
{
"path": "configs/rsmix/rscnn.yaml",
"chars": 228,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_dgcnn\n EXP_ID: rsmix_rscnn_run_1\n LOSS_NAME: "
},
{
"path": "configs/rsmix/simpleview.yaml",
"chars": 252,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_dgcnn\n EXP_ID: rsmix_simpleview_run_1 \n LOSS_"
},
{
"path": "configs/tent/dgcnn.yaml",
"chars": 226,
"preview": "DATALOADER:\n batch_size: 16\n num_workers: 0\nEXP:\n DATASET: modelnet40_c\n EXP_ID: c_dgcnn_run_1\n LOSS_NAME: smooth\n "
},
{
"path": "configs/tent/pct.yaml",
"chars": 248,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_c\n EXP_ID: c_pct_run_1\n LOSS_NAME: smooth\n M"
},
{
"path": "configs/tent/pointnet.yaml",
"chars": 223,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_c\n EXP_ID: c_pointnet_run_1 \n LOSS_NAME: smoo"
},
{
"path": "configs/tent/pointnet2.yaml",
"chars": 218,
"preview": "DATALOADER:\n batch_size: 16\n num_workers: 0\nEXP:\n DATASET: modelnet40_c\n EXP_ID: c_pointnet2_run_1\n LOSS_NAME: smoo"
},
{
"path": "configs/tent/rscnn.yaml",
"chars": 210,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_c\n EXP_ID: c_rscnn_run_1\n LOSS_NAME: smooth\n "
},
{
"path": "configs/tent/simpleview.yaml",
"chars": 203,
"preview": "DATALOADER:\n batch_size: 18\n num_workers: 0\nEXP:\n DATASET: modelnet40_c\n EXP_ID: c_simpleview_run_1\n LOSS_NAME: smo"
},
{
"path": "configs/tent_cutmix/dgcnn.yaml",
"chars": 225,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_c\n EXP_ID: c_dgcnn_run_1\n LOSS_NAME: smooth\n "
},
{
"path": "configs/tent_cutmix/pct.yaml",
"chars": 248,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_c\n EXP_ID: c_pct_run_1\n LOSS_NAME: smooth\n M"
},
{
"path": "configs/tent_cutmix/pointnet.yaml",
"chars": 223,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_c\n EXP_ID: c_pointnet_run_1 \n LOSS_NAME: smoo"
},
{
"path": "configs/tent_cutmix/pointnet2.yaml",
"chars": 218,
"preview": "DATALOADER:\n batch_size: 16\n num_workers: 0\nEXP:\n DATASET: modelnet40_c\n EXP_ID: c_pointnet2_run_1\n LOSS_NAME: smoo"
},
{
"path": "configs/tent_cutmix/rscnn.yaml",
"chars": 210,
"preview": "DATALOADER:\n batch_size: 32\n num_workers: 0\nEXP:\n DATASET: modelnet40_c\n EXP_ID: c_rscnn_run_1\n LOSS_NAME: smooth\n "
},
{
"path": "configs/tent_cutmix/simpleview.yaml",
"chars": 203,
"preview": "DATALOADER:\n batch_size: 18\n num_workers: 0\nEXP:\n DATASET: modelnet40_c\n EXP_ID: c_simpleview_run_1\n LOSS_NAME: smo"
},
{
"path": "configs.py",
"chars": 4806,
"preview": "from yacs.config import CfgNode as CN\n\n_C = CN()\n# ---------------------------------------------------------------------"
},
{
"path": "data/convert.py",
"chars": 448,
"preview": "import open3d as o3d\n\n\ndef load_mesh(filepath):\n return o3d.io.read_triangle_mesh(filepath)\n\n\ndef export_mesh(mesh, f"
},
{
"path": "data/create_modelnet40_small.py",
"chars": 1979,
"preview": "#!/usr/bin/env python\nimport os\nimport h5py\nimport numpy as np\n\nnp.random.seed(123)\n\n\ndef main(split_size):\n modelnet"
},
{
"path": "data/create_modelnet40_valid.py",
"chars": 2522,
"preview": "#!/usr/bin/env python\nimport os\nimport h5py\nimport numpy as np\n\nnp.random.seed(123)\ndef main():\n modelnet40_dir = \"./"
},
{
"path": "data/distortion.py",
"chars": 3052,
"preview": "import pygem\nfrom pygem import FFD, RBF, IDW\nimport open3d as o3d\nimport copy\nimport numpy as np\nnp.random.seed(2021)\n\n\n"
},
{
"path": "data/generate_c.py",
"chars": 13614,
"preview": "### Generate Various Common Corruptions ###\nfrom operator import index\nimport os\nimport h5py\nimport json\nimport numpy a"
},
{
"path": "data/occlusion.py",
"chars": 5230,
"preview": "import open3d as o3d\nimport numpy as np\nfrom util import get_points, set_points, normalize, shuffle_data\n\n\n\ndef random_p"
},
{
"path": "data/process.py",
"chars": 1101,
"preview": "import os\n\nSHAPE = [\"airplane\",\n\"bathtub\",\n\"bed\",\n\"bench\",\n\"bookshelf\",\n\"bottle\",\n\"bowl\",\n\"car\",\n\"chair\",\n\"cone\",\n\"cup\","
},
{
"path": "data/util.py",
"chars": 2537,
"preview": "import open3d as o3d\nimport numpy as np\nimport copy\n\n\ndef get_points(data):\n if isinstance(data, o3d.cpu.pybind.geome"
},
{
"path": "dataloader.py",
"chars": 8028,
"preview": "import numpy as np\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms\nimpo"
},
{
"path": "dgcnn/.gitignore",
"chars": 104,
"preview": "data/\nlog/\n*.pyc\n.DS_Store\npytorch/pretrained/\npytorch/checkpoints/\ntensorflow/part_seg/train_results/\n\n"
},
{
"path": "dgcnn/README.md",
"chars": 1802,
"preview": "# Dynamic Graph CNN for Learning on Point Clouds\nWe propose a new neural network module dubbed EdgeConv suitable for CNN"
},
{
"path": "dgcnn/pytorch/README.md",
"chars": 1145,
"preview": "# Dynamic Graph CNN for Learning on Point Clouds (PyTorch)\n\n## Point Cloud Classification\n* Run the training script:\n\n\n`"
},
{
"path": "dgcnn/pytorch/data.py",
"chars": 2829,
"preview": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n@Author: Yue Wang\n@Contact: yuewangx@mit.edu\n@File: data.py\n@Time: 201"
},
{
"path": "dgcnn/pytorch/main.py",
"chars": 9913,
"preview": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n@Author: Yue Wang\n@Contact: yuewangx@mit.edu\n@File: main.py\n@Time: 201"
},
{
"path": "dgcnn/pytorch/model.py",
"chars": 5698,
"preview": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n@Author: Yue Wang\n@Contact: yuewangx@mit.edu\n@File: model.py\n@Time: 20"
},
{
"path": "dgcnn/pytorch/util.py",
"chars": 995,
"preview": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n@Author: Yue Wang\n@Contact: yuewangx@mit.edu\n@File: util\n@Time: 4/5/19"
},
{
"path": "dgcnn/tensorflow/README.md",
"chars": 239,
"preview": "# Dynamic Graph CNN for Learning on Point Clouds (TensorFlow)\n\n## Point Cloud Classification\n* Run the training script:\n"
},
{
"path": "dgcnn/tensorflow/evaluate.py",
"chars": 7154,
"preview": "import tensorflow as tf\nimport numpy as np\nimport argparse\nimport socket\nimport importlib\nimport time\nimport os\nimport s"
},
{
"path": "dgcnn/tensorflow/models/dgcnn.py",
"chars": 5127,
"preview": "import tensorflow as tf\nimport numpy as np\nimport math\nimport sys\nimport os\nBASE_DIR = os.path.dirname(os.path.abspath(_"
},
{
"path": "dgcnn/tensorflow/models/transform_nets.py",
"chars": 2273,
"preview": "import tensorflow as tf\nimport numpy as np\nimport sys\nimport os\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nsy"
},
{
"path": "dgcnn/tensorflow/part_seg/README.md",
"chars": 584,
"preview": "## Part segmentation\n\n### Dataset \n\nLoad the data for part segmentation.\n\n```\nsh +x download_data.sh\n```\n\n### Train\n\nTra"
},
{
"path": "dgcnn/tensorflow/part_seg/download_data.sh",
"chars": 453,
"preview": "#!/bin/bash\n\n# Download original ShapeNetPart dataset (around 1GB) ['PartAnnotation']\nwget https://shapenet.cs.stanford."
},
{
"path": "dgcnn/tensorflow/part_seg/part_seg_model.py",
"chars": 5669,
"preview": "import tensorflow as tf\nimport numpy as np\nimport math\nimport os\nimport sys\nBASE_DIR = os.path.dirname(os.path.abspath(_"
},
{
"path": "dgcnn/tensorflow/part_seg/test.py",
"chars": 8955,
"preview": "import argparse\nimport tensorflow as tf\nimport json\nimport numpy as np\nimport os\nimport sys\nBASE_DIR = os.path.dirname(o"
},
{
"path": "dgcnn/tensorflow/part_seg/testing_ply_file_list.txt",
"chars": 393014,
"preview": "03001627/points/355fa0f35b61fdd7aa74a6b5ee13e775.pts 03001627/expert_verified/points_label/355fa0f35b61fdd7aa74a6b5ee13e"
},
{
"path": "dgcnn/tensorflow/part_seg/train_multi_gpu.py",
"chars": 15192,
"preview": "import argparse\nimport subprocess\nimport tensorflow as tf\nimport numpy as np\nfrom datetime import datetime\nimport json\ni"
},
{
"path": "dgcnn/tensorflow/provider.py",
"chars": 5232,
"preview": "import os\nimport sys\nimport numpy as np\nimport h5py\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nsys.path.appen"
},
{
"path": "dgcnn/tensorflow/sem_seg/README.md",
"chars": 1310,
"preview": "## Semantic segmentation of indoor scenes\n\n### Dataset\n\n1. Donwload prepared HDF5 data for training:\n```\nsh +x download_"
},
{
"path": "dgcnn/tensorflow/sem_seg/batch_inference.py",
"chars": 6758,
"preview": "import argparse\nimport os\nimport sys\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nROOT_DIR = os.path.dirname(BA"
}
]
// ... and 261 more files (download for full content)
About this extraction
This page contains the full source code of the jiachens/ModelNet40-C GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 461 files (12.8 MB), approximately 595.6k tokens, and a symbol index with 1404 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.