Repository: jiachens/ModelNet40-C
Branch: master
Commit: fad786c430c0
Files: 461
Total size: 12.8 MB
Directory structure:
gitextract_51znd243/
├── .gitignore
├── .gitmodules
├── CurveNet/
│ ├── README.md
│ └── core/
│ ├── data.py
│ ├── main_cls.py
│ ├── main_normal.py
│ ├── main_partseg.py
│ ├── models/
│ │ ├── curvenet_cls.py
│ │ ├── curvenet_normal.py
│ │ ├── curvenet_seg.py
│ │ ├── curvenet_util.py
│ │ └── walk.py
│ └── util.py
├── GDANet/
│ ├── README.md
│ └── model/
│ ├── GDANet_cls.py
│ ├── GDANet_ptseg.py
│ ├── __init__.py
│ └── util/
│ ├── GDANet_util.py
│ ├── __init__.py
│ ├── data_util.py
│ └── util.py
├── LICENSE
├── PCT_Pytorch/
│ ├── LICENSE
│ ├── README.md
│ ├── checkpoints/
│ │ └── best/
│ │ └── models/
│ │ └── model.t7
│ ├── data.py
│ ├── main.py
│ ├── model.py
│ ├── model_new.py
│ ├── pointnet2_ops_lib/
│ │ ├── MANIFEST.in
│ │ ├── pointnet2_ops/
│ │ │ ├── __init__.py
│ │ │ ├── _ext-src/
│ │ │ │ ├── include/
│ │ │ │ │ ├── ball_query.h
│ │ │ │ │ ├── cuda_utils.h
│ │ │ │ │ ├── group_points.h
│ │ │ │ │ ├── interpolate.h
│ │ │ │ │ ├── sampling.h
│ │ │ │ │ └── utils.h
│ │ │ │ └── src/
│ │ │ │ ├── ball_query.cpp
│ │ │ │ ├── ball_query_gpu.cu
│ │ │ │ ├── bindings.cpp
│ │ │ │ ├── group_points.cpp
│ │ │ │ ├── group_points_gpu.cu
│ │ │ │ ├── interpolate.cpp
│ │ │ │ ├── interpolate_gpu.cu
│ │ │ │ ├── sampling.cpp
│ │ │ │ └── sampling_gpu.cu
│ │ │ ├── _version.py
│ │ │ ├── pointnet2_modules.py
│ │ │ └── pointnet2_utils.py
│ │ └── setup.py
│ ├── test.sh
│ ├── train.sh
│ └── util.py
├── README.md
├── all_utils.py
├── aug_utils.py
├── configs/
│ ├── bn/
│ │ ├── dgcnn.yaml
│ │ ├── pct.yaml
│ │ ├── pointnet.yaml
│ │ ├── pointnet2.yaml
│ │ ├── rscnn.yaml
│ │ └── simpleview.yaml
│ ├── corruption/
│ │ ├── curvenet.yaml
│ │ ├── dgcnn.yaml
│ │ ├── gdanet.yaml
│ │ ├── pct.yaml
│ │ ├── pointMLP.yaml
│ │ ├── pointMLP2.yaml
│ │ ├── pointnet.yaml
│ │ ├── pointnet2.yaml
│ │ ├── rscnn.yaml
│ │ └── simpleview.yaml
│ ├── cutmix/
│ │ ├── dgcnn_k.yaml
│ │ ├── dgcnn_r.yaml
│ │ ├── pct_k.yaml
│ │ ├── pct_r.yaml
│ │ ├── pointnet2_k.yaml
│ │ ├── pointnet2_r.yaml
│ │ ├── pointnet_k.yaml
│ │ ├── pointnet_r.yaml
│ │ ├── rscnn_k.yaml
│ │ ├── rscnn_r.yaml
│ │ ├── simpleview_k.yaml
│ │ └── simpleview_r.yaml
│ ├── dgcnn_curvenet_run_1.yaml
│ ├── dgcnn_dgcnn_0.25_run_1.yaml
│ ├── dgcnn_dgcnn_0.25_valid_run_1.yaml
│ ├── dgcnn_dgcnn_0.5_run_1.yaml
│ ├── dgcnn_dgcnn_0.5_valid_run_1.yaml
│ ├── dgcnn_dgcnn_ce_run_1.yaml
│ ├── dgcnn_dgcnn_ce_valid_run_1.yaml
│ ├── dgcnn_dgcnn_run_1.yaml
│ ├── dgcnn_dgcnn_valid_run_1.yaml
│ ├── dgcnn_gdanet_run_1.yaml
│ ├── dgcnn_pct_run_1.yaml
│ ├── dgcnn_pointMLP2_run_1.yaml
│ ├── dgcnn_pointMLP_run_1.yaml
│ ├── dgcnn_pointnet2_0.25_run_1.yaml
│ ├── dgcnn_pointnet2_0.25_valid_run_1.yaml
│ ├── dgcnn_pointnet2_0.5_run_1.yaml
│ ├── dgcnn_pointnet2_0.5_valid_run_1.yaml
│ ├── dgcnn_pointnet2_ce_run_1.yaml
│ ├── dgcnn_pointnet2_ce_valid_run_1.yaml
│ ├── dgcnn_pointnet2_run_1.yaml
│ ├── dgcnn_pointnet2_valid_run_1.yaml
│ ├── dgcnn_pointnet_0.25_run_1.yaml
│ ├── dgcnn_pointnet_0.25_valid_run_1.yaml
│ ├── dgcnn_pointnet_0.5_run_1.yaml
│ ├── dgcnn_pointnet_0.5_valid_run_1.yaml
│ ├── dgcnn_pointnet_ce_run_1.yaml
│ ├── dgcnn_pointnet_ce_valid_run_1.yaml
│ ├── dgcnn_pointnet_run_1.yaml
│ ├── dgcnn_pointnet_valid_run_1.yaml
│ ├── dgcnn_rscnn_0.25_run_1.yaml
│ ├── dgcnn_rscnn_0.25_valid_run_1.yaml
│ ├── dgcnn_rscnn_0.5_run_1.yaml
│ ├── dgcnn_rscnn_0.5_valid_run_1.yaml
│ ├── dgcnn_rscnn_ce_run_1.yaml
│ ├── dgcnn_rscnn_ce_valid_run_1.yaml
│ ├── dgcnn_rscnn_run_1.yaml
│ ├── dgcnn_rscnn_valid_run_1.yaml
│ ├── dgcnn_simpleview_0.25_run_1.yaml
│ ├── dgcnn_simpleview_0.25_valid_run_1.yaml
│ ├── dgcnn_simpleview_0.5_run_1.yaml
│ ├── dgcnn_simpleview_0.5_valid_run_1.yaml
│ ├── dgcnn_simpleview_ce_run_1.yaml
│ ├── dgcnn_simpleview_ce_valid_run_1.yaml
│ ├── dgcnn_simpleview_run_1.yaml
│ ├── dgcnn_simpleview_valid_run_1.yaml
│ ├── mixup/
│ │ ├── dgcnn.yaml
│ │ ├── pct.yaml
│ │ ├── pointnet.yaml
│ │ ├── pointnet2.yaml
│ │ ├── rscnn.yaml
│ │ └── simpleview.yaml
│ ├── pgd/
│ │ ├── dgcnn.yaml
│ │ ├── pct.yaml
│ │ └── pointnet.yaml
│ ├── pointnet2_dgcnn_run_1.yaml
│ ├── pointnet2_dgcnn_valid_run_1.yaml
│ ├── pointnet2_pointnet2_run_1.yaml
│ ├── pointnet2_pointnet2_valid_run_1.yaml
│ ├── pointnet2_pointnet_run_1.yaml
│ ├── pointnet2_pointnet_valid_run_1.yaml
│ ├── pointnet2_rscnn_run_1.yaml
│ ├── pointnet2_rscnn_valid_run_1.yaml
│ ├── pointnet2_simpleview_run_1.yaml
│ ├── pointnet2_simpleview_valid_run_1.yaml
│ ├── rscnn_dgcnn_run_1.yaml
│ ├── rscnn_pointnet2_run_1.yaml
│ ├── rscnn_pointnet_run_1.yaml
│ ├── rscnn_rscnn_run_1.yaml
│ ├── rscnn_simpleview_run_1.yaml
│ ├── rsmix/
│ │ ├── dgcnn.yaml
│ │ ├── pct.yaml
│ │ ├── pointnet.yaml
│ │ ├── pointnet2.yaml
│ │ ├── rscnn.yaml
│ │ └── simpleview.yaml
│ ├── tent/
│ │ ├── dgcnn.yaml
│ │ ├── pct.yaml
│ │ ├── pointnet.yaml
│ │ ├── pointnet2.yaml
│ │ ├── rscnn.yaml
│ │ └── simpleview.yaml
│ └── tent_cutmix/
│ ├── dgcnn.yaml
│ ├── pct.yaml
│ ├── pointnet.yaml
│ ├── pointnet2.yaml
│ ├── rscnn.yaml
│ └── simpleview.yaml
├── configs.py
├── data/
│ ├── convert.py
│ ├── create_modelnet40_small.py
│ ├── create_modelnet40_valid.py
│ ├── distortion.py
│ ├── generate_c.py
│ ├── occlusion.py
│ ├── process.py
│ └── util.py
├── dataloader.py
├── dgcnn/
│ ├── .gitignore
│ ├── README.md
│ ├── pytorch/
│ │ ├── README.md
│ │ ├── data.py
│ │ ├── main.py
│ │ ├── model.py
│ │ └── util.py
│ └── tensorflow/
│ ├── README.md
│ ├── evaluate.py
│ ├── models/
│ │ ├── dgcnn.py
│ │ └── transform_nets.py
│ ├── part_seg/
│ │ ├── README.md
│ │ ├── download_data.sh
│ │ ├── part_seg_model.py
│ │ ├── test.py
│ │ ├── testing_ply_file_list.txt
│ │ └── train_multi_gpu.py
│ ├── provider.py
│ ├── sem_seg/
│ │ ├── README.md
│ │ ├── batch_inference.py
│ │ ├── collect_indoor3d_data.py
│ │ ├── download_data.sh
│ │ ├── eval_iou_accuracy.py
│ │ ├── indoor3d_util.py
│ │ ├── meta/
│ │ │ ├── all_data_label.txt
│ │ │ ├── anno_paths.txt
│ │ │ ├── area1_data_label.txt
│ │ │ ├── area2_data_label.txt
│ │ │ ├── area3_data_label.txt
│ │ │ ├── area4_data_label.txt
│ │ │ ├── area5_data_label.txt
│ │ │ ├── area6_data_label.txt
│ │ │ └── class_names.txt
│ │ ├── model.py
│ │ ├── test_job.sh
│ │ ├── train.py
│ │ └── train_job.sh
│ ├── train.py
│ └── utils/
│ ├── data_prep_util.py
│ ├── eulerangles.py
│ ├── pc_util.py
│ ├── plyfile.py
│ └── tf_util.py
├── download.sh
├── emd/
│ ├── README.md
│ ├── emd.cpp
│ ├── emd_cuda.cu
│ ├── emd_module.py
│ └── setup.py
├── eval_cor.sh
├── eval_og.sh
├── eval_tent_cutmix.sh
├── gdrivedl.py
├── main.py
├── models/
│ ├── __init__.py
│ ├── curvenet.py
│ ├── dgcnn.py
│ ├── gdanet.py
│ ├── model_utils.py
│ ├── mv.py
│ ├── mv_utils.py
│ ├── pct.py
│ ├── pointmlp.py
│ ├── pointmlp2.py
│ ├── pointnet.py
│ ├── pointnet2.py
│ ├── resnet.py
│ └── rscnn.py
├── pc_utils.py
├── pointMLP/
│ ├── .gitignore
│ ├── LICENSE
│ ├── README.md
│ └── classification_ModelNet40/
│ ├── data.py
│ ├── helper.py
│ ├── main.py
│ ├── models/
│ │ ├── __init__.py
│ │ └── pointmlp.py
│ ├── utils/
│ │ ├── __init__.py
│ │ ├── logger.py
│ │ ├── misc.py
│ │ └── progress/
│ │ ├── .gitignore
│ │ ├── LICENSE
│ │ ├── MANIFEST.in
│ │ ├── README.rst
│ │ ├── progress/
│ │ │ ├── __init__.py
│ │ │ ├── bar.py
│ │ │ ├── counter.py
│ │ │ ├── helpers.py
│ │ │ └── spinner.py
│ │ ├── setup.py
│ │ └── test_progress.py
│ └── voting.py
├── pointnet2_pyt/
│ ├── .gitignore
│ ├── .pre-commit-config.yaml
│ ├── .travis.yml
│ ├── MANIFEST.in
│ ├── README.rst
│ ├── UNLICENSE
│ ├── __init__.py
│ ├── pointnet2/
│ │ ├── __init__.py
│ │ ├── _ext-src/
│ │ │ ├── include/
│ │ │ │ ├── ball_query.h
│ │ │ │ ├── cuda_utils.h
│ │ │ │ ├── group_points.h
│ │ │ │ ├── interpolate.h
│ │ │ │ ├── sampling.h
│ │ │ │ └── utils.h
│ │ │ └── src/
│ │ │ ├── ball_query.cpp
│ │ │ ├── ball_query_gpu.cu
│ │ │ ├── bindings.cpp
│ │ │ ├── group_points.cpp
│ │ │ ├── group_points_gpu.cu
│ │ │ ├── interpolate.cpp
│ │ │ ├── interpolate_gpu.cu
│ │ │ ├── sampling.cpp
│ │ │ └── sampling_gpu.cu
│ │ ├── data/
│ │ │ ├── .gitignore
│ │ │ ├── Indoor3DSemSegLoader.py
│ │ │ ├── ModelNet40Loader.py
│ │ │ ├── __init__.py
│ │ │ └── data_utils.py
│ │ ├── models/
│ │ │ ├── __init__.py
│ │ │ ├── pointnet2_msg_cls.py
│ │ │ ├── pointnet2_msg_sem.py
│ │ │ ├── pointnet2_ssg_cls.py
│ │ │ └── pointnet2_ssg_sem.py
│ │ ├── train/
│ │ │ ├── __init__.py
│ │ │ ├── train_cls.py
│ │ │ └── train_sem_seg.py
│ │ └── utils/
│ │ ├── .gitignore
│ │ ├── __init__.py
│ │ ├── linalg_utils.py
│ │ ├── pointnet2_modules.py
│ │ └── pointnet2_utils.py
│ ├── setup.py
│ ├── tests/
│ │ ├── conftest.py
│ │ ├── test_cls_msg.py
│ │ ├── test_cls_ssg.py
│ │ ├── test_semseg_msg.py
│ │ └── test_semseg_ssg.py
│ └── tox.ini
├── pointnet2_tf/
│ ├── LICENSE
│ ├── README.md
│ ├── data/
│ │ └── README.md
│ ├── evaluate.py
│ ├── modelnet_dataset.py
│ ├── modelnet_h5_dataset.py
│ ├── models/
│ │ ├── pointnet2_cls_msg.py
│ │ ├── pointnet2_cls_ssg.py
│ │ ├── pointnet2_part_seg.py
│ │ ├── pointnet2_part_seg_msg_one_hot.py
│ │ ├── pointnet2_sem_seg.py
│ │ └── pointnet_cls_basic.py
│ ├── part_seg/
│ │ ├── command.sh
│ │ ├── command_one_hot.sh
│ │ ├── evaluate.py
│ │ ├── part_dataset.py
│ │ ├── part_dataset_all_normal.py
│ │ ├── test.py
│ │ ├── train.py
│ │ └── train_one_hot.py
│ ├── scannet/
│ │ ├── README.md
│ │ ├── pc_util.py
│ │ ├── preprocessing/
│ │ │ ├── collect_scannet_scenes.py
│ │ │ ├── demo.py
│ │ │ ├── fetch_label_names.py
│ │ │ ├── scannet-labels.combined.tsv
│ │ │ └── scannet_util.py
│ │ ├── scannet_dataset.py
│ │ ├── scene_util.py
│ │ └── train.py
│ ├── tf_ops/
│ │ ├── 3d_interpolation/
│ │ │ ├── interpolate.cpp
│ │ │ ├── tf_interpolate.cpp
│ │ │ ├── tf_interpolate.py
│ │ │ ├── tf_interpolate_compile.sh
│ │ │ ├── tf_interpolate_op_test.py
│ │ │ └── visu_interpolation.py
│ │ ├── grouping/
│ │ │ ├── .gitignore
│ │ │ ├── test/
│ │ │ │ ├── compile.sh
│ │ │ │ ├── query_ball_point.cpp
│ │ │ │ ├── query_ball_point.cu
│ │ │ │ ├── query_ball_point_block.cu
│ │ │ │ ├── query_ball_point_grid.cu
│ │ │ │ ├── selection_sort.cpp
│ │ │ │ ├── selection_sort.cu
│ │ │ │ └── selection_sort_const.cu
│ │ │ ├── tf_grouping.cpp
│ │ │ ├── tf_grouping.py
│ │ │ ├── tf_grouping_compile.sh
│ │ │ ├── tf_grouping_g.cu
│ │ │ └── tf_grouping_op_test.py
│ │ └── sampling/
│ │ ├── .gitignore
│ │ ├── tf_sampling.cpp
│ │ ├── tf_sampling.py
│ │ ├── tf_sampling_compile.sh
│ │ └── tf_sampling_g.cu
│ ├── train.py
│ ├── train_multi_gpu.py
│ └── utils/
│ ├── README.md
│ ├── compile_render_balls_so.sh
│ ├── pc_util.py
│ ├── pointnet_util.py
│ ├── provider.py
│ ├── render_balls_so.cpp
│ ├── show3d_balls.py
│ └── tf_util.py
├── pointnet_pyt/
│ ├── .gitignore
│ ├── LICENSE
│ ├── README.md
│ ├── misc/
│ │ ├── modelnet_id.txt
│ │ └── num_seg_classes.txt
│ ├── pointnet/
│ │ ├── __init__.py
│ │ ├── dataset.py
│ │ └── model.py
│ ├── scripts/
│ │ ├── build.sh
│ │ └── download.sh
│ ├── setup.py
│ └── utils/
│ ├── render_balls_so.cpp
│ ├── show3d_balls.py
│ ├── show_cls.py
│ ├── show_seg.py
│ ├── train_classification.py
│ └── train_segmentation.py
├── requirements.txt
├── rs_cnn/
│ ├── .gitignore
│ ├── CMakeLists.txt
│ ├── LICENSE
│ ├── README.md
│ ├── cfgs/
│ │ ├── config_msn_partseg.yaml
│ │ └── config_ssn_cls.yaml
│ ├── data/
│ │ ├── ModelNet40Loader.py
│ │ ├── ShapeNetPartLoader.py
│ │ ├── __init__.py
│ │ └── data_utils.py
│ ├── docs/
│ │ ├── _config.yml
│ │ └── index.md
│ ├── models/
│ │ ├── __init__.py
│ │ ├── rscnn_msn_seg.py
│ │ └── rscnn_ssn_cls.py
│ ├── train_cls.py
│ ├── train_cls.sh
│ ├── train_partseg.py
│ ├── train_partseg.sh
│ ├── utils/
│ │ ├── __init__.py
│ │ ├── _ext/
│ │ │ ├── __init__.py
│ │ │ └── pointnet2/
│ │ │ └── __init__.py
│ │ ├── build_ffi.py
│ │ ├── cinclude/
│ │ │ ├── ball_query_gpu.h
│ │ │ ├── ball_query_wrapper.h
│ │ │ ├── cuda_utils.h
│ │ │ ├── group_points_gpu.h
│ │ │ ├── group_points_wrapper.h
│ │ │ ├── interpolate_gpu.h
│ │ │ ├── interpolate_wrapper.h
│ │ │ ├── sampling_gpu.h
│ │ │ └── sampling_wrapper.h
│ │ ├── csrc/
│ │ │ ├── ball_query.c
│ │ │ ├── ball_query_gpu.cu
│ │ │ ├── group_points.c
│ │ │ ├── group_points_gpu.cu
│ │ │ ├── interpolate.c
│ │ │ ├── interpolate_gpu.cu
│ │ │ ├── sampling.c
│ │ │ └── sampling_gpu.cu
│ │ ├── linalg_utils.py
│ │ ├── pointnet2_modules.py
│ │ ├── pointnet2_modules_updated.py
│ │ ├── pointnet2_utils.py
│ │ └── pytorch_utils/
│ │ ├── __init__.py
│ │ └── pytorch_utils.py
│ ├── voting_evaluate_cls.py
│ └── voting_evaluate_partseg.py
├── setup.sh
├── third_party/
│ ├── bn_helper.py
│ └── tent_helper.py
└── visualize/
├── README.md
├── config.py
├── confusion_matrix.py
├── examples.py
├── main_results.py
└── pointflow_fig_colorful.py
================================================
FILE CONTENTS
================================================
================================================
FILE: .gitignore
================================================
*__pycache__/
data/modelnet40_ply_hdf5_2048
data/ModelNet40
data/modelnet40_c
runs/
pretrained/
cor_exp/
*.out
/output
# Created by https://www.toptal.com/developers/gitignore/api/python,cuda,zsh,c++
# Edit at https://www.toptal.com/developers/gitignore?templates=python,cuda,zsh,c++
### C++ ###
# Prerequisites
*.d
# Compiled Object files
*.slo
*.lo
*.o
*.obj
# Precompiled Headers
*.gch
*.pch
# Compiled Dynamic libraries
*.so
*.dylib
*.dll
# Fortran module files
*.mod
*.smod
# Compiled Static libraries
*.lai
*.la
*.a
*.lib
# Executables
*.exe
*.out
*.app
### CUDA ###
*.i
*.ii
*.gpu
*.ptx
*.cubin
*.fatbin
### Python ###
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
### Zsh ###
# Zsh compiled script + zrecompile backup
*.zwc
*.zwc.old
# Zsh completion-optimization dumpfile
*zcompdump*
# Zsh zcalc history
.zcalc_history
# A popular plugin manager's files
._zinit
.zinit_lstupd
# zdharma/zshelldoc tool's files
zsdoc/data
# robbyrussell/oh-my-zsh/plugins/per-directory-history plugin's files
# (when set-up to store the history in the local directory)
.directory_history
# MichaelAquilina/zsh-autoswitch-virtualenv plugin's files
# (for Zsh plugins using Python)
# Zunit tests' output
/tests/_output/*
!/tests/_output/.gitkeep
# End of https://www.toptal.com/developers/gitignore/api/python,cuda,zsh,c++
{"mode":"full","isActive":false}
================================================
FILE: .gitmodules
================================================
[submodule "PyGeM"]
path = PyGeM
url = https://github.com/mathLab/PyGeM.git
[submodule "visualize/mitsuba2"]
path = visualize/mitsuba2
url = https://github.com/mitsuba-renderer/mitsuba2
================================================
FILE: CurveNet/README.md
================================================
# CurveNet
Official implementation of "Walk in the Cloud: Learning Curves for Point Clouds Shape Analysis", ICCV 2021
[](https://paperswithcode.com/sota/3d-point-cloud-classification-on-modelnet40?p=walk-in-the-cloud-learning-curves-for-point)
[](https://paperswithcode.com/sota/3d-part-segmentation-on-shapenet-part?p=walk-in-the-cloud-learning-curves-for-point)
Paper: https://arxiv.org/abs/2105.01288

## Requirements
- Python>=3.7
- PyTorch>=1.2
- Packages: glob, h5py, sklearn
## Contents
- [Point Cloud Classification](#point-cloud-classification)
- [Point Cloud Part Segmentation](#point-cloud-part-segmentation)
- [Point Cloud Normal Estimation](#point-cloud-normal-estimation)
**NOTE:** Please change your current directory to ```core/``` first before excuting the following commands.
## Point Cloud Classification
### Data
The ModelNet40 dataset is primarily used for the classification experiments. At your first run, the program will automatically download the data if it is not in ```data/```. Or, you can manually download the [offical data](https://shapenet.cs.stanford.edu/media/modelnet40_ply_hdf5_2048.zip) and unzip to ```data/```.
Alternatively, you can place your downloaded data anywhere you like, and link the path to ```DATA_DIR``` in ```core/data.py```. Otherwise, the download will still be automatically triggered.
### Train
Train with our default settings (same as in the paper):
```
python3 main_cls.py --exp_name=curvenet_cls_1
```
Train with customized settings with the flags: ```--lr```, ```--scheduler```, ```--batch_size```.
Alternatively, you can directly modify ```core/start_cls.sh``` and simply run:
```
./start_cls.sh
```
**NOTE:** Our reported model achieves **93.8%/94.2%** accuracy (see sections below). However, due to randomness, the best result might require repeated training processes. Hence, we also provide another benchmark result here (where we repeated 5 runs with different random seeds, and report their average), which is **93.65%** accuracy.
### Evaluation
Evaluate without voting:
```
python3 main_cls.py --exp_name=curvenet_cls_1 --eval=True --model_path=PATH_TO_YOUR_MODEL
```
Alternatively, you can directly modify ```core/test_cls.sh``` and simply run:
```
./test_cls.sh
```
For voting, we used the ```voting_evaluate_cls.py```script provided in [RSCNN](https://github.com/Yochengliu/Relation-Shape-CNN). Please refer to their license for usage.
### Evaluation with our pretrained model:
Please download our pretrained model ```cls/``` at [google drive](https://drive.google.com/drive/folders/1kX-zIipyzB0iMaopcijzdTRuHeTzfTSz?usp=sharing).
And then run:
```
python3 main_cls.py --exp_name=curvenet_cls_pretrained --eval --model_path=PATH_TO_PRETRAINED/cls/models/model.t7
```
## Point Cloud Part Segmentation
### Data
The ShapeNet Part dataset is primarily used for the part segmentation experiments. At your first run, the program will automatically download the data if it is not in ```data/```. Or, you can manually download the [offical data](https://shapenet.cs.stanford.edu/media/shapenet_part_seg_hdf5_data.zip) and unzip to ```data/```.
Alternatively, you can place your downloaded data anywhere you like, and link the path to ```DATA_DIR``` in ```core/data.py```. Otherwise, the download will still be automatically triggered.
### Train
Train with our default settings (same as in the paper):
```
python3 main_partseg.py --exp_name=curvenet_seg_1
```
Train with customized settings with the flags: ```--lr```, ```--scheduler```, ```--batch_size```.
Alternatively, you can directly modify ```core/start_part.sh``` and simply run:
```
./start_part.sh
```
**NOTE:** Our reported model achieves **86.6%/86.8%** mIoU (see sections below). However, due to randomness, the best result might require repeated training processes. Hence, we also provide another benchmark result here (where we repeated 5 runs with different random seeds, and report their average), which is **86.46** mIoU.
### Evaluation
Evaluate without voting:
```
python3 main_partseg.py --exp_name=curvenet_seg_1 --eval=True --model_path=PATH_TO_YOUR_MODEL
```
Alternatively, you can directly modify ```core/test_cls.sh``` and simply run:
```
./test_cls.sh
```
For voting, we used the ```voting_evaluate_partseg.py```script provided in [RSCNN](https://github.com/Yochengliu/Relation-Shape-CNN). Please refer to their license for usage.
### Evaluation with our pretrained model:
Please download our pretrained model ```partseg/``` at [google drive](https://drive.google.com/drive/folders/1kX-zIipyzB0iMaopcijzdTRuHeTzfTSz?usp=sharing).
And then run:
```
python3 main_partseg.py --exp_name=curvenet_seg_pretrained --eval=True --model_path=PATH_TO_PRETRAINED/partseg/models/model.t7
```
## Point Cloud Normal Estimation
### Data
The ModelNet40 dataset is used for the normal estimation experiments. We have preprocessed the raw ModelNet40 dataset into ```.h5``` files. Each point cloud instance contains 2048 randomly sampled points and point-to-point normal ground truths.
Please download our processed data [here](https://drive.google.com/file/d/1j6lB3ZOF0_x_l9bqdchAxIYBi7Devie8/view?usp=sharing) and place it to ```data/```, or you need to specify the data root path in ```core/data.py```.
### Train
Train with our default settings (same as in the paper):
```
python3 main_normal.py --exp_name=curvenet_normal_1
```
Train with customized settings with the flags: ```--multiplier```, ```--lr```, ```--scheduler```, ```--batch_size```.
Alternatively, you can directly modify ```core/start_normal.sh``` and simply run:
```
./start_normal.sh
```
### Evaluation
Evaluate without voting:
```
python3 main_normal.py --exp_name=curvenet_normal_1 --eval=True --model_path=PATH_TO_YOUR_MODEL
```
Alternatively, you can directly modify ```core/test_normal.sh``` and simply run:
```
./test_normal.sh
```
### Evaluation with our pretrained model:
Please download our pretrained model ```normal/``` at [google drive](https://drive.google.com/drive/folders/1kX-zIipyzB0iMaopcijzdTRuHeTzfTSz?usp=sharing).
And then run:
```
python3 main_normal.py --exp_name=curvenet_normal_pretrained --eval=True --model_path=PATH_TO_PRETRAINED/normal/models/model.t7
```
## Citation
If you find this repo useful in your work or research, please cite:
```
@InProceedings{Xiang_2021_ICCV,
author = {Xiang, Tiange and Zhang, Chaoyi and Song, Yang and Yu, Jianhui and Cai, Weidong},
title = {Walk in the Cloud: Learning Curves for Point Clouds Shape Analysis},
booktitle = {Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV)},
month = {October},
year = {2021},
pages = {915-924}
}
```
## Acknowledgement
Our code borrows a lot from:
- [DGCNN](https://github.com/WangYueFt/dgcnn)
- [DGCNN.pytorch](https://github.com/AnTao97/dgcnn.pytorch)
- [CloserLook3D](https://github.com/zeliu98/CloserLook3D)
================================================
FILE: CurveNet/core/data.py
================================================
"""
@Author: Yue Wang
@Contact: yuewangx@mit.edu
@File: data.py
@Time: 2018/10/13 6:21 PM
Modified by
@Author: Tiange Xiang
@Contact: txia7609@uni.sydney.edu.au
@Time: 2021/1/21 3:10 PM
"""
import os
import sys
import glob
import h5py
import numpy as np
import torch
from torch.utils.data import Dataset
# change this to your data root
DATA_DIR = '../data/'
def download_modelnet40():
if not os.path.exists(DATA_DIR):
os.mkdir(DATA_DIR)
if not os.path.exists(os.path.join(DATA_DIR, 'modelnet40_ply_hdf5_2048')):
os.mkdir(os.path.join(DATA_DIR, 'modelnet40_ply_hdf5_2048'))
www = 'https://shapenet.cs.stanford.edu/media/modelnet40_ply_hdf5_2048.zip'
zipfile = os.path.basename(www)
os.system('wget %s --no-check-certificate; unzip %s' % (www, zipfile))
os.system('mv %s %s' % (zipfile[:-4], DATA_DIR))
os.system('rm %s' % (zipfile))
def download_shapenetpart():
if not os.path.exists(DATA_DIR):
os.mkdir(DATA_DIR)
if not os.path.exists(os.path.join(DATA_DIR, 'shapenet_part_seg_hdf5_data')):
os.mkdir(os.path.join(DATA_DIR, 'shapenet_part_seg_hdf5_data'))
www = 'https://shapenet.cs.stanford.edu/media/shapenet_part_seg_hdf5_data.zip'
zipfile = os.path.basename(www)
os.system('wget %s --no-check-certificate; unzip %s' % (www, zipfile))
os.system('mv %s %s' % (zipfile[:-4], os.path.join(DATA_DIR, 'shapenet_part_seg_hdf5_data')))
os.system('rm %s' % (zipfile))
def load_data_normal(partition):
f = h5py.File(os.path.join(DATA_DIR, 'modelnet40_normal', 'normal_%s.h5'%partition), 'r+')
data = f['xyz'][:].astype('float32')
label = f['normal'][:].astype('float32')
f.close()
return data, label
def load_data_cls(partition):
download_modelnet40()
all_data = []
all_label = []
for h5_name in glob.glob(os.path.join(DATA_DIR, 'modelnet40*hdf5_2048', '*%s*.h5'%partition)):
f = h5py.File(h5_name, 'r+')
data = f['data'][:].astype('float32')
label = f['label'][:].astype('int64')
f.close()
all_data.append(data)
all_label.append(label)
all_data = np.concatenate(all_data, axis=0)
all_label = np.concatenate(all_label, axis=0)
return all_data, all_label
def load_data_partseg(partition):
download_shapenetpart()
all_data = []
all_label = []
all_seg = []
if partition == 'trainval':
file = glob.glob(os.path.join(DATA_DIR, 'shapenet_part_seg_hdf5_data', 'hdf5_data', '*train*.h5')) \
+ glob.glob(os.path.join(DATA_DIR, 'shapenet_part_seg_hdf5_data', 'hdf5_data', '*val*.h5'))
else:
file = glob.glob(os.path.join(DATA_DIR, 'shapenet_part_seg_hdf5_data', 'hdf5_data', '*%s*.h5'%partition))
for h5_name in file:
f = h5py.File(h5_name, 'r+')
data = f['data'][:].astype('float32')
label = f['label'][:].astype('int64')
seg = f['pid'][:].astype('int64')
f.close()
all_data.append(data)
all_label.append(label)
all_seg.append(seg)
all_data = np.concatenate(all_data, axis=0)
all_label = np.concatenate(all_label, axis=0)
all_seg = np.concatenate(all_seg, axis=0)
return all_data, all_label, all_seg
def translate_pointcloud(pointcloud):
xyz1 = np.random.uniform(low=2./3., high=3./2., size=[3])
xyz2 = np.random.uniform(low=-0.2, high=0.2, size=[3])
translated_pointcloud = np.add(np.multiply(pointcloud, xyz1), xyz2).astype('float32')
return translated_pointcloud
def jitter_pointcloud(pointcloud, sigma=0.01, clip=0.02):
N, C = pointcloud.shape
pointcloud += np.clip(sigma * np.random.randn(N, C), -1*clip, clip)
return pointcloud
def rotate_pointcloud(pointcloud):
theta = np.pi*2 * np.random.uniform()
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta)],[np.sin(theta), np.cos(theta)]])
pointcloud[:,[0,2]] = pointcloud[:,[0,2]].dot(rotation_matrix) # random rotation (x,z)
return pointcloud
class ModelNet40(Dataset):
def __init__(self, num_points, partition='train'):
self.data, self.label = load_data_cls(partition)
self.num_points = num_points
self.partition = partition
def __getitem__(self, item):
pointcloud = self.data[item][:self.num_points]
label = self.label[item]
if self.partition == 'train':
pointcloud = translate_pointcloud(pointcloud)
#pointcloud = rotate_pointcloud(pointcloud)
np.random.shuffle(pointcloud)
return pointcloud, label
def __len__(self):
return self.data.shape[0]
class ModelNetNormal(Dataset):
def __init__(self, num_points, partition='train'):
self.data, self.label = load_data_normal(partition)
self.num_points = num_points
self.partition = partition
def __getitem__(self, item):
pointcloud = self.data[item][:self.num_points]
label = self.label[item][:self.num_points]
if self.partition == 'train':
#pointcloud = translate_pointcloud(pointcloud)
idx = np.arange(0, pointcloud.shape[0], dtype=np.int64)
np.random.shuffle(idx)
pointcloud = self.data[item][idx]
label = self.label[item][idx]
return pointcloud, label
def __len__(self):
return self.data.shape[0]
class ShapeNetPart(Dataset):
def __init__(self, num_points=2048, partition='train', class_choice=None):
self.data, self.label, self.seg = load_data_partseg(partition)
self.cat2id = {'airplane': 0, 'bag': 1, 'cap': 2, 'car': 3, 'chair': 4,
'earphone': 5, 'guitar': 6, 'knife': 7, 'lamp': 8, 'laptop': 9,
'motor': 10, 'mug': 11, 'pistol': 12, 'rocket': 13, 'skateboard': 14, 'table': 15}
self.seg_num = [4, 2, 2, 4, 4, 3, 3, 2, 4, 2, 6, 2, 3, 3, 3, 3]
self.index_start = [0, 4, 6, 8, 12, 16, 19, 22, 24, 28, 30, 36, 38, 41, 44, 47]
self.num_points = num_points
self.partition = partition
self.class_choice = class_choice
if self.class_choice != None:
id_choice = self.cat2id[self.class_choice]
indices = (self.label == id_choice).squeeze()
self.data = self.data[indices]
self.label = self.label[indices]
self.seg = self.seg[indices]
self.seg_num_all = self.seg_num[id_choice]
self.seg_start_index = self.index_start[id_choice]
else:
self.seg_num_all = 50
self.seg_start_index = 0
def __getitem__(self, item):
pointcloud = self.data[item][:self.num_points]
label = self.label[item]
seg = self.seg[item][:self.num_points]
if self.partition == 'trainval':
pointcloud = translate_pointcloud(pointcloud)
indices = list(range(pointcloud.shape[0]))
np.random.shuffle(indices)
pointcloud = pointcloud[indices]
seg = seg[indices]
return pointcloud, label, seg
def __len__(self):
return self.data.shape[0]
================================================
FILE: CurveNet/core/main_cls.py
================================================
"""
@Author: Yue Wang
@Contact: yuewangx@mit.edu
@File: main_cls.py
@Time: 2018/10/13 10:39 PM
Modified by
@Author: Tiange Xiang
@Contact: txia7609@uni.sydney.edu.au
@Time: 2021/01/21 3:10 PM
"""
from __future__ import print_function
import os
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim.lr_scheduler import CosineAnnealingLR, MultiStepLR
from data import ModelNet40
from models.curvenet_cls import CurveNet
import numpy as np
from torch.utils.data import DataLoader
from util import cal_loss, IOStream
import sklearn.metrics as metrics
def _init_():
# fix random seed
torch.manual_seed(seed)
np.random.seed(seed)
torch.cuda.manual_seed_all(seed)
torch.cuda.manual_seed(seed)
torch.set_printoptions(10)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
os.environ['PYTHONHASHSEED'] = str(seed)
# prepare file structures
if not os.path.exists('../checkpoints'):
os.makedirs('../checkpoints')
if not os.path.exists('../checkpoints/'+args.exp_name):
os.makedirs('../checkpoints/'+args.exp_name)
if not os.path.exists('../checkpoints/'+args.exp_name+'/'+'models'):
os.makedirs('../checkpoints/'+args.exp_name+'/'+'models')
os.system('cp main_cls.py ../checkpoints/'+args.exp_name+'/main_cls.py.backup')
os.system('cp models/curvenet_cls.py ../checkpoints/'+args.exp_name+'/curvenet_cls.py.backup')
def train(args, io):
train_loader = DataLoader(ModelNet40(partition='train', num_points=args.num_points), num_workers=8,
batch_size=args.batch_size, shuffle=True, drop_last=True)
test_loader = DataLoader(ModelNet40(partition='test', num_points=args.num_points), num_workers=8,
batch_size=args.test_batch_size, shuffle=False, drop_last=False)
device = torch.device("cuda" if args.cuda else "cpu")
io.cprint("Let's use" + str(torch.cuda.device_count()) + "GPUs!")
# create model
model = CurveNet().to(device)
model = nn.DataParallel(model)
if args.use_sgd:
io.cprint("Use SGD")
opt = optim.SGD(model.parameters(), lr=args.lr*100, momentum=args.momentum, weight_decay=1e-4)
else:
io.cprint("Use Adam")
opt = optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-4)
if args.scheduler == 'cos':
scheduler = CosineAnnealingLR(opt, args.epochs, eta_min=1e-3)
elif args.scheduler == 'step':
scheduler = MultiStepLR(opt, [120, 160], gamma=0.1)
criterion = cal_loss
best_test_acc = 0
for epoch in range(args.epochs):
####################
# Train
####################
train_loss = 0.0
count = 0.0
model.train()
train_pred = []
train_true = []
for data, label in train_loader:
data, label = data.to(device), label.to(device).squeeze()
data = data.permute(0, 2, 1)
batch_size = data.size()[0]
opt.zero_grad()
logits = model(data)
loss = criterion(logits, label)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
opt.step()
preds = logits.max(dim=1)[1]
count += batch_size
train_loss += loss.item() * batch_size
train_true.append(label.cpu().numpy())
train_pred.append(preds.detach().cpu().numpy())
if args.scheduler == 'cos':
scheduler.step()
elif args.scheduler == 'step':
if opt.param_groups[0]['lr'] > 1e-5:
scheduler.step()
if opt.param_groups[0]['lr'] < 1e-5:
for param_group in opt.param_groups:
param_group['lr'] = 1e-5
train_true = np.concatenate(train_true)
train_pred = np.concatenate(train_pred)
outstr = 'Train %d, loss: %.6f, train acc: %.6f' % (epoch, train_loss*1.0/count,
metrics.accuracy_score(
train_true, train_pred))
io.cprint(outstr)
####################
# Test
####################
test_loss = 0.0
count = 0.0
model.eval()
test_pred = []
test_true = []
for data, label in test_loader:
data, label = data.to(device), label.to(device).squeeze()
data = data.permute(0, 2, 1)
batch_size = data.size()[0]
logits = model(data)
loss = criterion(logits, label)
preds = logits.max(dim=1)[1]
count += batch_size
test_loss += loss.item() * batch_size
test_true.append(label.cpu().numpy())
test_pred.append(preds.detach().cpu().numpy())
test_true = np.concatenate(test_true)
test_pred = np.concatenate(test_pred)
test_acc = metrics.accuracy_score(test_true, test_pred)
outstr = 'Test %d, loss: %.6f, test acc: %.6f' % (epoch, test_loss*1.0/count, test_acc)
io.cprint(outstr)
if test_acc >= best_test_acc:
best_test_acc = test_acc
torch.save(model.state_dict(), '../checkpoints/%s/models/model.t7' % args.exp_name)
io.cprint('best: %.3f' % best_test_acc)
def test(args, io):
test_loader = DataLoader(ModelNet40(partition='test', num_points=args.num_points),
batch_size=args.test_batch_size, shuffle=False, drop_last=False)
device = torch.device("cuda" if args.cuda else "cpu")
#Try to load models
model = CurveNet().to(device)
model = nn.DataParallel(model)
model.load_state_dict(torch.load(args.model_path))
model = model.eval()
test_acc = 0.0
count = 0.0
test_true = []
test_pred = []
for data, label in test_loader:
data, label = data.to(device), label.to(device).squeeze()
data = data.permute(0, 2, 1)
batch_size = data.size()[0]
logits = model(data)
preds = logits.max(dim=1)[1]
test_true.append(label.cpu().numpy())
test_pred.append(preds.detach().cpu().numpy())
test_true = np.concatenate(test_true)
test_pred = np.concatenate(test_pred)
test_acc = metrics.accuracy_score(test_true, test_pred)
outstr = 'Test :: test acc: %.6f'%(test_acc)
io.cprint(outstr)
if __name__ == "__main__":
# Training settings
parser = argparse.ArgumentParser(description='Point Cloud Recognition')
parser.add_argument('--exp_name', type=str, default='exp', metavar='N',
help='Name of the experiment')
parser.add_argument('--dataset', type=str, default='modelnet40', metavar='N',
choices=['modelnet40'])
parser.add_argument('--batch_size', type=int, default=32, metavar='batch_size',
help='Size of batch)')
parser.add_argument('--test_batch_size', type=int, default=16, metavar='batch_size',
help='Size of batch)')
parser.add_argument('--epochs', type=int, default=200, metavar='N',
help='number of episode to train ')
parser.add_argument('--use_sgd', type=bool, default=True,
help='Use SGD')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='learning rate (default: 0.001, 0.1 if using sgd)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--scheduler', type=str, default='cos', metavar='N',
choices=['cos', 'step'],
help='Scheduler to use, [cos, step]')
parser.add_argument('--no_cuda', type=bool, default=False,
help='enables CUDA training')
parser.add_argument('--eval', type=bool, default=False,
help='evaluate the model')
parser.add_argument('--num_points', type=int, default=1024,
help='num of points to use')
parser.add_argument('--model_path', type=str, default='', metavar='N',
help='Pretrained model path')
args = parser.parse_args()
seed = np.random.randint(1, 10000)
_init_()
if args.eval:
io = IOStream('../checkpoints/' + args.exp_name + '/eval.log')
else:
io = IOStream('../checkpoints/' + args.exp_name + '/run.log')
io.cprint(str(args))
io.cprint('random seed is: ' + str(seed))
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.cuda:
io.cprint(
'Using GPU : ' + str(torch.cuda.current_device()) + ' from ' + str(torch.cuda.device_count()) + ' devices')
else:
io.cprint('Using CPU')
if not args.eval:
train(args, io)
else:
with torch.no_grad():
test(args, io)
================================================
FILE: CurveNet/core/main_normal.py
================================================
"""
@Author: Tiange Xiang
@Contact: txia7609@uni.sydney.edu.au
@File: main_normal.py
@Time: 2021/01/21 3:10 PM
"""
from __future__ import print_function
import os
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim.lr_scheduler import CosineAnnealingLR, MultiStepLR
from data import ModelNetNormal
from models.curvenet_normal import CurveNet
import numpy as np
from torch.utils.data import DataLoader
from util import IOStream
def _init_():
# fix random seed
torch.manual_seed(seed)
np.random.seed(seed)
torch.cuda.manual_seed_all(seed)
torch.cuda.manual_seed(seed)
torch.set_printoptions(10)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
os.environ['PYTHONHASHSEED'] = str(seed)
# prepare file structures
if not os.path.exists('../checkpoints'):
os.makedirs('../checkpoints')
if not os.path.exists('../checkpoints/'+args.exp_name):
os.makedirs('../checkpoints/'+args.exp_name)
if not os.path.exists('../checkpoints/'+args.exp_name+'/'+'models'):
os.makedirs('../checkpoints/'+args.exp_name+'/'+'models')
os.system('cp main_normal.py ../checkpoints/'+args.exp_name+'/main_normal.py.backup')
os.system('cp models/curvenet_normal.py ../checkpoints/'+args.exp_name+'/curvenet_normal.py.backup')
def train(args, io):
train_loader = DataLoader(ModelNetNormal(args.num_points, partition='train'),
num_workers=8, batch_size=args.batch_size, shuffle=True, drop_last=True)
test_loader = DataLoader(ModelNetNormal(args.num_points, partition='test'),
num_workers=8, batch_size=args.test_batch_size, shuffle=False, drop_last=False)
device = torch.device("cuda" if args.cuda else "cpu")
# create model
model = CurveNet(args.multiplier).to(device)
model = nn.DataParallel(model)
io.cprint("Let's use" + str(torch.cuda.device_count()) + "GPUs!")
if args.use_sgd:
io.cprint("Use SGD")
opt = optim.SGD(model.parameters(), lr=args.lr*100, momentum=args.momentum, weight_decay=1e-4)
else:
io.cprint("Use Adam")
opt = optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-4)
if args.scheduler == 'cos':
scheduler = CosineAnnealingLR(opt, args.epochs, eta_min=1e-3)
elif args.scheduler == 'step':
scheduler = MultiStepLR(opt, [140, 180], gamma=0.1)
criterion = torch.nn.CosineEmbeddingLoss()
best_test_loss = 99
for epoch in range(args.epochs):
####################
# Train
####################
train_loss = 0.0
count = 0.0
model.train()
for data, seg in train_loader:
data, seg = data.to(device), seg.to(device)
data = data.permute(0, 2, 1)
batch_size = data.size()[0]
opt.zero_grad()
seg_pred = model(data)
seg_pred = seg_pred.permute(0, 2, 1).contiguous()
#print(seg_pred.shape, seg.shape)
loss = criterion(seg_pred.view(-1, 3), seg.view(-1,3).squeeze(), torch.tensor(1).cuda())
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
opt.step()
count += batch_size
train_loss += loss.item() * batch_size
if args.scheduler == 'cos':
scheduler.step()
elif args.scheduler == 'step':
if opt.param_groups[0]['lr'] > 1e-5:
scheduler.step()
if opt.param_groups[0]['lr'] < 1e-5:
for param_group in opt.param_groups:
param_group['lr'] = 1e-5
outstr = 'Train %d, loss: %.6f' % (epoch, train_loss/count)
io.cprint(outstr)
####################
# Test
####################
test_loss = 0.0
count = 0.0
model.eval()
for data, seg in test_loader:
data, seg = data.to(device), seg.to(device)
data = data.permute(0, 2, 1)
batch_size = data.size()[0]
seg_pred = model(data)
seg_pred = seg_pred.permute(0, 2, 1).contiguous()
loss = criterion(seg_pred.view(-1, 3), seg.view(-1,3).squeeze(), torch.tensor(1).cuda())
count += batch_size
test_loss += loss.item() * batch_size
if test_loss*1.0/count <= best_test_loss:
best_test_loss = test_loss*1.0/count
torch.save(model.state_dict(), '../checkpoints/%s/models/model.t7' % args.exp_name)
outstr = 'Test %d, loss: %.6f, best loss %.6f' % (epoch, test_loss/count, best_test_loss)
io.cprint(outstr)
def test(args, io):
test_loader = DataLoader(ModelNetNormal(args.num_points, partition='test'),
batch_size=args.test_batch_size, shuffle=False, drop_last=False)
device = torch.device("cuda" if args.cuda else "cpu")
#Try to load models
model = CurveNet(args.multiplier).to(device)
model = nn.DataParallel(model)
model.load_state_dict(torch.load(args.model_path))
criterion = torch.nn.CosineEmbeddingLoss()
model = model.eval()
test_loss = 0.0
count = 0
for data, seg in test_loader:
data, seg = data.to(device), seg.to(device)
#print(data.shape, seg.shape)
data = data.permute(0, 2, 1)
batch_size = data.size()[0]
seg_pred = model(data)
seg_pred = seg_pred.permute(0, 2, 1).contiguous()
loss = criterion(seg_pred.view(-1, 3), seg.view(-1,3).squeeze(), torch.tensor(1).cuda())
count += batch_size
test_loss += loss.item() * batch_size
outstr = 'Test :: test loss: %.6f' % (test_loss*1.0/count)
io.cprint(outstr)
if __name__ == "__main__":
# Training settings
parser = argparse.ArgumentParser(description='Point Cloud Part Segmentation')
parser.add_argument('--exp_name', type=str, default='exp', metavar='N',
help='Name of the experiment')
parser.add_argument('--batch_size', type=int, default=32, metavar='batch_size',
help='Size of batch)')
parser.add_argument('--test_batch_size', type=int, default=16, metavar='batch_size',
help='Size of batch)')
parser.add_argument('--epochs', type=int, default=200, metavar='N',
help='number of episode to train ')
parser.add_argument('--use_sgd', type=bool, default=True,
help='Use SGD')
parser.add_argument('--lr', type=float, default=0.0005, metavar='LR',
help='learning rate')
parser.add_argument('--multiplier', type=float, default=2.0, metavar='MP',
help='network expansion multiplier')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--scheduler', type=str, default='cos', metavar='N',
choices=['cos', 'step'],
help='Scheduler to use, [cos, step]')
parser.add_argument('--no_cuda', type=bool, default=False,
help='enables CUDA training')
parser.add_argument('--eval', type=bool, default=False,
help='evaluate the model')
parser.add_argument('--num_points', type=int, default=1024,
help='num of points to use')
parser.add_argument('--model_path', type=str, default='', metavar='N',
help='Pretrained model path')
args = parser.parse_args()
seed = np.random.randint(1, 10000)
_init_()
io = IOStream('../checkpoints/' + args.exp_name + '/run.log')
io.cprint(str(args))
io.cprint('random seed is: ' + str(seed))
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.cuda:
io.cprint(
'Using GPU : ' + str(torch.cuda.current_device()) + ' from ' + str(torch.cuda.device_count()) + ' devices')
else:
io.cprint('Using CPU')
if not args.eval:
train(args, io)
else:
with torch.no_grad():
test(args, io)
================================================
FILE: CurveNet/core/main_partseg.py
================================================
"""
@Author: An Tao
@Contact: ta19@mails.tsinghua.edu.cn
@File: main_partseg.py
@Time: 2019/12/31 11:17 AM
Modified by
@Author: Tiange Xiang
@Contact: txia7609@uni.sydney.edu.au
@Time: 2021/01/21 3:10 PM
"""
from __future__ import print_function
import os
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim.lr_scheduler import CosineAnnealingLR, StepLR, MultiStepLR
from data import ShapeNetPart
from models.curvenet_seg import CurveNet
import numpy as np
from torch.utils.data import DataLoader
from util import cal_loss, IOStream
import sklearn.metrics as metrics
seg_num = [4, 2, 2, 4, 4, 3, 3, 2, 4, 2, 6, 2, 3, 3, 3, 3]
index_start = [0, 4, 6, 8, 12, 16, 19, 22, 24, 28, 30, 36, 38, 41, 44, 47]
def _init_():
# fix random seed
torch.manual_seed(seed)
np.random.seed(seed)
torch.cuda.manual_seed_all(seed)
torch.cuda.manual_seed(seed)
torch.set_printoptions(10)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
os.environ['PYTHONHASHSEED'] = str(seed)
# prepare file structures
if not os.path.exists('../checkpoints'):
os.makedirs('../checkpoints')
if not os.path.exists('../checkpoints/'+args.exp_name):
os.makedirs('../checkpoints/'+args.exp_name)
if not os.path.exists('../checkpoints/'+args.exp_name+'/'+'models'):
os.makedirs('../checkpoints/'+args.exp_name+'/'+'models')
os.system('cp main_partseg.py ../checkpoints/'+args.exp_name+'/main_partseg.py.backup')
os.system('cp models/curvenet_seg.py ../checkpoints/'+args.exp_name+'/curvenet_seg.py.backup')
def calculate_shape_IoU(pred_np, seg_np, label, class_choice, eva=False):
label = label.squeeze()
shape_ious = []
category = {}
for shape_idx in range(seg_np.shape[0]):
if not class_choice:
start_index = index_start[label[shape_idx]]
num = seg_num[label[shape_idx]]
parts = range(start_index, start_index + num)
else:
parts = range(seg_num[label[0]])
part_ious = []
for part in parts:
I = np.sum(np.logical_and(pred_np[shape_idx] == part, seg_np[shape_idx] == part))
U = np.sum(np.logical_or(pred_np[shape_idx] == part, seg_np[shape_idx] == part))
if U == 0:
iou = 1 # If the union of groundtruth and prediction points is empty, then count part IoU as 1
else:
iou = I / float(U)
part_ious.append(iou)
shape_ious.append(np.mean(part_ious))
if label[shape_idx] not in category:
category[label[shape_idx]] = [shape_ious[-1]]
else:
category[label[shape_idx]].append(shape_ious[-1])
if eva:
return shape_ious, category
else:
return shape_ious
def train(args, io):
train_dataset = ShapeNetPart(partition='trainval', num_points=args.num_points, class_choice=args.class_choice)
if (len(train_dataset) < 100):
drop_last = False
else:
drop_last = True
train_loader = DataLoader(train_dataset, num_workers=8, batch_size=args.batch_size, shuffle=True, drop_last=drop_last)
test_loader = DataLoader(ShapeNetPart(partition='test', num_points=args.num_points, class_choice=args.class_choice),
num_workers=8, batch_size=args.test_batch_size, shuffle=False, drop_last=False)
device = torch.device("cuda" if args.cuda else "cpu")
io.cprint("Let's use" + str(torch.cuda.device_count()) + "GPUs!")
seg_num_all = train_loader.dataset.seg_num_all
seg_start_index = train_loader.dataset.seg_start_index
# create model
model = CurveNet().to(device)
model = nn.DataParallel(model)
if args.use_sgd:
print("Use SGD")
opt = optim.SGD(model.parameters(), lr=args.lr*100, momentum=args.momentum, weight_decay=1e-4)
else:
print("Use Adam")
opt = optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-4)
if args.scheduler == 'cos':
scheduler = CosineAnnealingLR(opt, args.epochs, eta_min=1e-3)
elif args.scheduler == 'step':
scheduler = MultiStepLR(opt, [140, 180], gamma=0.1)
criterion = cal_loss
best_test_iou = 0
for epoch in range(args.epochs):
####################
# Train
####################
train_loss = 0.0
count = 0.0
model.train()
train_true_cls = []
train_pred_cls = []
train_true_seg = []
train_pred_seg = []
train_label_seg = []
for data, label, seg in train_loader:
seg = seg - seg_start_index
label_one_hot = np.zeros((label.shape[0], 16))
for idx in range(label.shape[0]):
label_one_hot[idx, label[idx]] = 1
label_one_hot = torch.from_numpy(label_one_hot.astype(np.float32))
data, label_one_hot, seg = data.to(device), label_one_hot.to(device), seg.to(device)
data = data.permute(0, 2, 1)
batch_size = data.size()[0]
opt.zero_grad()
seg_pred = model(data, label_one_hot)
seg_pred = seg_pred.permute(0, 2, 1).contiguous()
loss = criterion(seg_pred.view(-1, seg_num_all), seg.view(-1,1).squeeze())
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
opt.step()
pred = seg_pred.max(dim=2)[1] # (batch_size, num_points)
count += batch_size
train_loss += loss.item() * batch_size
seg_np = seg.cpu().numpy() # (batch_size, num_points)
pred_np = pred.detach().cpu().numpy() # (batch_size, num_points)
train_true_cls.append(seg_np.reshape(-1)) # (batch_size * num_points)
train_pred_cls.append(pred_np.reshape(-1)) # (batch_size * num_points)
train_true_seg.append(seg_np)
train_pred_seg.append(pred_np)
train_label_seg.append(label.reshape(-1))
if args.scheduler == 'cos':
scheduler.step()
elif args.scheduler == 'step':
if opt.param_groups[0]['lr'] > 1e-5:
scheduler.step()
if opt.param_groups[0]['lr'] < 1e-5:
for param_group in opt.param_groups:
param_group['lr'] = 1e-5
train_true_cls = np.concatenate(train_true_cls)
train_pred_cls = np.concatenate(train_pred_cls)
train_acc = metrics.accuracy_score(train_true_cls, train_pred_cls)
avg_per_class_acc = metrics.balanced_accuracy_score(train_true_cls, train_pred_cls)
train_true_seg = np.concatenate(train_true_seg, axis=0)
train_pred_seg = np.concatenate(train_pred_seg, axis=0)
train_label_seg = np.concatenate(train_label_seg)
train_ious = calculate_shape_IoU(train_pred_seg, train_true_seg, train_label_seg, args.class_choice)
outstr = 'Train %d, loss: %.6f, train acc: %.6f, train avg acc: %.6f, train iou: %.6f' % (epoch,
train_loss*1.0/count,
train_acc,
avg_per_class_acc,
np.mean(train_ious))
io.cprint(outstr)
####################
# Test
####################
test_loss = 0.0
count = 0.0
model.eval()
test_true_cls = []
test_pred_cls = []
test_true_seg = []
test_pred_seg = []
test_label_seg = []
for data, label, seg in test_loader:
seg = seg - seg_start_index
label_one_hot = np.zeros((label.shape[0], 16))
for idx in range(label.shape[0]):
label_one_hot[idx, label[idx]] = 1
label_one_hot = torch.from_numpy(label_one_hot.astype(np.float32))
data, label_one_hot, seg = data.to(device), label_one_hot.to(device), seg.to(device)
data = data.permute(0, 2, 1)
batch_size = data.size()[0]
seg_pred = model(data, label_one_hot)
seg_pred = seg_pred.permute(0, 2, 1).contiguous()
loss = criterion(seg_pred.view(-1, seg_num_all), seg.view(-1,1).squeeze())
pred = seg_pred.max(dim=2)[1]
count += batch_size
test_loss += loss.item() * batch_size
seg_np = seg.cpu().numpy()
pred_np = pred.detach().cpu().numpy()
test_true_cls.append(seg_np.reshape(-1))
test_pred_cls.append(pred_np.reshape(-1))
test_true_seg.append(seg_np)
test_pred_seg.append(pred_np)
test_label_seg.append(label.reshape(-1))
test_true_cls = np.concatenate(test_true_cls)
test_pred_cls = np.concatenate(test_pred_cls)
test_acc = metrics.accuracy_score(test_true_cls, test_pred_cls)
avg_per_class_acc = metrics.balanced_accuracy_score(test_true_cls, test_pred_cls)
test_true_seg = np.concatenate(test_true_seg, axis=0)
test_pred_seg = np.concatenate(test_pred_seg, axis=0)
test_label_seg = np.concatenate(test_label_seg)
test_ious = calculate_shape_IoU(test_pred_seg, test_true_seg, test_label_seg, args.class_choice)
outstr = 'Test %d, loss: %.6f, test acc: %.6f, test avg acc: %.6f, test iou: %.6f, best iou %.6f' % (epoch,
test_loss*1.0/count,
test_acc,
avg_per_class_acc,
np.mean(test_ious), best_test_iou)
io.cprint(outstr)
if np.mean(test_ious) >= best_test_iou:
best_test_iou = np.mean(test_ious)
torch.save(model.state_dict(), '../checkpoints/%s/models/model.t7' % args.exp_name)
def test(args, io):
test_loader = DataLoader(ShapeNetPart(partition='test', num_points=args.num_points, class_choice=args.class_choice),
batch_size=args.test_batch_size, shuffle=True, drop_last=False)
device = torch.device("cuda" if args.cuda else "cpu")
#Try to load models
seg_start_index = test_loader.dataset.seg_start_index
model = CurveNet().to(device)
model = nn.DataParallel(model)
model.load_state_dict(torch.load(args.model_path))
model = model.eval()
test_acc = 0.0
test_true_cls = []
test_pred_cls = []
test_true_seg = []
test_pred_seg = []
test_label_seg = []
category = {}
for data, label, seg in test_loader:
seg = seg - seg_start_index
label_one_hot = np.zeros((label.shape[0], 16))
for idx in range(label.shape[0]):
label_one_hot[idx, label[idx]] = 1
label_one_hot = torch.from_numpy(label_one_hot.astype(np.float32))
data, label_one_hot, seg = data.to(device), label_one_hot.to(device), seg.to(device)
data = data.permute(0, 2, 1)
seg_pred = model(data, label_one_hot)
seg_pred = seg_pred.permute(0, 2, 1).contiguous()
pred = seg_pred.max(dim=2)[1]
seg_np = seg.cpu().numpy()
pred_np = pred.detach().cpu().numpy()
test_true_cls.append(seg_np.reshape(-1))
test_pred_cls.append(pred_np.reshape(-1))
test_true_seg.append(seg_np)
test_pred_seg.append(pred_np)
test_label_seg.append(label.reshape(-1))
test_true_cls = np.concatenate(test_true_cls)
test_pred_cls = np.concatenate(test_pred_cls)
test_acc = metrics.accuracy_score(test_true_cls, test_pred_cls)
avg_per_class_acc = metrics.balanced_accuracy_score(test_true_cls, test_pred_cls)
test_true_seg = np.concatenate(test_true_seg, axis=0)
test_pred_seg = np.concatenate(test_pred_seg, axis=0)
test_label_seg = np.concatenate(test_label_seg)
test_ious,category = calculate_shape_IoU(test_pred_seg, test_true_seg, test_label_seg, args.class_choice, eva=True)
outstr = 'Test :: test acc: %.6f, test avg acc: %.6f, test iou: %.6f' % (test_acc,
avg_per_class_acc,
np.mean(test_ious))
io.cprint(outstr)
results = []
for key in category.keys():
results.append((int(key), np.mean(category[key]), len(category[key])))
results.sort(key=lambda x:x[0])
for re in results:
io.cprint('idx: %d mIoU: %.3f num: %d' % (re[0], re[1], re[2]))
if __name__ == "__main__":
# Training settings
parser = argparse.ArgumentParser(description='Point Cloud Part Segmentation')
parser.add_argument('--exp_name', type=str, default='exp', metavar='N',
help='Name of the experiment')
parser.add_argument('--dataset', type=str, default='shapenetpart', metavar='N',
choices=['shapenetpart'])
parser.add_argument('--class_choice', type=str, default=None, metavar='N',
choices=['airplane', 'bag', 'cap', 'car', 'chair',
'earphone', 'guitar', 'knife', 'lamp', 'laptop',
'motor', 'mug', 'pistol', 'rocket', 'skateboard', 'table'])
parser.add_argument('--batch_size', type=int, default=32, metavar='batch_size',
help='Size of batch)')
parser.add_argument('--test_batch_size', type=int, default=16, metavar='batch_size',
help='Size of batch)')
parser.add_argument('--epochs', type=int, default=200, metavar='N',
help='number of episode to train ')
parser.add_argument('--use_sgd', type=bool, default=True,
help='Use SGD')
parser.add_argument('--lr', type=float, default=0.0005, metavar='LR',
help='learning rate (default: 0.001, 0.1 if using sgd)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--scheduler', type=str, default='step', metavar='N',
choices=['cos', 'step'],
help='Scheduler to use, [cos, step]')
parser.add_argument('--no_cuda', type=bool, default=False,
help='enables CUDA training')
parser.add_argument('--eval', type=bool, default=False,
help='evaluate the model')
parser.add_argument('--num_points', type=int, default=2048,
help='num of points to use')
parser.add_argument('--model_path', type=str, default='', metavar='N',
help='Pretrained model path')
args = parser.parse_args()
seed = np.random.randint(1, 10000)
_init_()
if args.eval:
io = IOStream('../checkpoints/' + args.exp_name + '/eval.log')
else:
io = IOStream('../checkpoints/' + args.exp_name + '/run.log')
io.cprint(str(args))
io.cprint('random seed is: ' + str(seed))
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.cuda:
io.cprint(
'Using GPU : ' + str(torch.cuda.current_device()) + ' from ' + str(torch.cuda.device_count()) + ' devices')
else:
io.cprint('Using CPU')
if not args.eval:
train(args, io)
else:
with torch.no_grad():
test(args, io)
================================================
FILE: CurveNet/core/models/curvenet_cls.py
================================================
"""
@Author: Tiange Xiang
@Contact: txia7609@uni.sydney.edu.au
@File: curvenet_cls.py
@Time: 2021/01/21 3:10 PM
"""
import torch.nn as nn
import torch.nn.functional as F
from .curvenet_util import *
curve_config = {
'default': [[100, 5], [100, 5], None, None],
'long': [[10, 30], None, None, None]
}
class CurveNet(nn.Module):
def __init__(self, num_classes=40, k=20, setting='default'):
super(CurveNet, self).__init__()
assert setting in curve_config
additional_channel = 32
self.lpfa = LPFA(9, additional_channel, k=k, mlp_num=1, initial=True)
# encoder
self.cic11 = CIC(npoint=1024, radius=0.05, k=k, in_channels=additional_channel, output_channels=64, bottleneck_ratio=2, mlp_num=1, curve_config=curve_config[setting][0])
self.cic12 = CIC(npoint=1024, radius=0.05, k=k, in_channels=64, output_channels=64, bottleneck_ratio=4, mlp_num=1, curve_config=curve_config[setting][0])
self.cic21 = CIC(npoint=1024, radius=0.05, k=k, in_channels=64, output_channels=128, bottleneck_ratio=2, mlp_num=1, curve_config=curve_config[setting][1])
self.cic22 = CIC(npoint=1024, radius=0.1, k=k, in_channels=128, output_channels=128, bottleneck_ratio=4, mlp_num=1, curve_config=curve_config[setting][1])
self.cic31 = CIC(npoint=256, radius=0.1, k=k, in_channels=128, output_channels=256, bottleneck_ratio=2, mlp_num=1, curve_config=curve_config[setting][2])
self.cic32 = CIC(npoint=256, radius=0.2, k=k, in_channels=256, output_channels=256, bottleneck_ratio=4, mlp_num=1, curve_config=curve_config[setting][2])
self.cic41 = CIC(npoint=64, radius=0.2, k=k, in_channels=256, output_channels=512, bottleneck_ratio=2, mlp_num=1, curve_config=curve_config[setting][3])
self.cic42 = CIC(npoint=64, radius=0.4, k=k, in_channels=512, output_channels=512, bottleneck_ratio=4, mlp_num=1, curve_config=curve_config[setting][3])
self.conv0 = nn.Sequential(
nn.Conv1d(512, 1024, kernel_size=1, bias=False),
nn.BatchNorm1d(1024),
nn.ReLU(inplace=True))
self.conv1 = nn.Linear(1024 * 2, 512, bias=False)
self.conv2 = nn.Linear(512, num_classes)
self.bn1 = nn.BatchNorm1d(512)
self.dp1 = nn.Dropout(p=0.5)
def forward(self, xyz):
l0_points = self.lpfa(xyz, xyz)
l1_xyz, l1_points = self.cic11(xyz, l0_points)
l1_xyz, l1_points = self.cic12(l1_xyz, l1_points)
l2_xyz, l2_points = self.cic21(l1_xyz, l1_points)
l2_xyz, l2_points = self.cic22(l2_xyz, l2_points)
l3_xyz, l3_points = self.cic31(l2_xyz, l2_points)
l3_xyz, l3_points = self.cic32(l3_xyz, l3_points)
l4_xyz, l4_points = self.cic41(l3_xyz, l3_points)
l4_xyz, l4_points = self.cic42(l4_xyz, l4_points)
x = self.conv0(l4_points)
x_max = F.adaptive_max_pool1d(x, 1)
x_avg = F.adaptive_avg_pool1d(x, 1)
x = torch.cat((x_max, x_avg), dim=1).squeeze(-1)
x = F.relu(self.bn1(self.conv1(x).unsqueeze(-1)), inplace=True).squeeze(-1)
x = self.dp1(x)
x = self.conv2(x)
return x
================================================
FILE: CurveNet/core/models/curvenet_normal.py
================================================
"""
@Author: Tiange Xiang
@Contact: txia7609@uni.sydney.edu.au
@File: curvenet_normal.py
@Time: 2021/01/21 3:10 PM
"""
import torch.nn as nn
import torch.nn.functional as F
from .curvenet_util import *
curve_config = {
'default': [[100, 5], [100, 5], None, None]
}
class CurveNet(nn.Module):
def __init__(self, num_classes=3, k=20, multiplier=1.0, setting='default'):
super(CurveNet, self).__init__()
assert setting in curve_config
additional_channel = 64
channels = [128, 256, 512, 1024]
channels = [int(c * multiplier) for c in channels]
self.lpfa = LPFA(9, additional_channel, k=k, mlp_num=1, initial=True)
# encoder
self.cic11 = CIC(npoint=1024, radius=0.1, k=k, in_channels=additional_channel, output_channels=channels[0], bottleneck_ratio=2, curve_config=curve_config[setting][0])
self.cic12 = CIC(npoint=1024, radius=0.1, k=k, in_channels=channels[0], output_channels=channels[0], bottleneck_ratio=4, curve_config=curve_config[setting][0])
self.cic21 = CIC(npoint=256, radius=0.2, k=k, in_channels=channels[0], output_channels=channels[1], bottleneck_ratio=2, curve_config=curve_config[setting][1])
self.cic22 = CIC(npoint=256, radius=0.2, k=k, in_channels=channels[1], output_channels=channels[1], bottleneck_ratio=4, curve_config=curve_config[setting][1])
self.cic31 = CIC(npoint=64, radius=0.4, k=k, in_channels=channels[1], output_channels=channels[2], bottleneck_ratio=2, curve_config=curve_config[setting][2])
self.cic32 = CIC(npoint=64, radius=0.4, k=k, in_channels=channels[2], output_channels=channels[2], bottleneck_ratio=4, curve_config=curve_config[setting][2])
self.cic41 = CIC(npoint=16, radius=0.8, k=15, in_channels=channels[2], output_channels=channels[3], bottleneck_ratio=2, curve_config=curve_config[setting][3])
self.cic42 = CIC(npoint=16, radius=0.8, k=15, in_channels=channels[3], output_channels=channels[3], bottleneck_ratio=4, curve_config=curve_config[setting][3])
#self.cic43 = CIC(npoint=16, radius=0.8, k=15, in_channels=2048, output_channels=2048, bottleneck_ratio=4, curve_config=curve_config[setting][3])
# decoder
self.fp3 = PointNetFeaturePropagation(in_channel=channels[3] + channels[2], mlp=[channels[2], channels[2]], att=[channels[3], channels[3]//2, channels[3]//8])
self.up_cic4 = CIC(npoint=64, radius=0.8, k=k, in_channels=channels[2], output_channels=channels[2], bottleneck_ratio=4)
self.fp2 = PointNetFeaturePropagation(in_channel=channels[2] + channels[1], mlp=[channels[1], channels[1]], att=[channels[2], channels[2]//2, channels[2]//8])
self.up_cic3 = CIC(npoint=256, radius=0.4, k=k, in_channels=channels[1], output_channels=channels[1], bottleneck_ratio=4)
self.fp1 = PointNetFeaturePropagation(in_channel=channels[1] + channels[0], mlp=[channels[0], channels[0]], att=[channels[1], channels[1]//2, channels[1]//8])
self.up_cic2 = CIC(npoint=1024, radius=0.1, k=k, in_channels=channels[0]+3, output_channels=channels[0], bottleneck_ratio=4)
self.up_cic1 = CIC(npoint=1024, radius=0.1, k=k, in_channels=channels[0], output_channels=channels[0], bottleneck_ratio=4)
self.point_conv = nn.Sequential(
nn.Conv2d(9, additional_channel, kernel_size=1, bias=False),
nn.BatchNorm2d(additional_channel),
nn.LeakyReLU(negative_slope=0.2, inplace=True))
self.conv1 = nn.Conv1d(channels[0], num_classes, 1)
def forward(self, xyz):
l0_points = self.lpfa(xyz, xyz)
l1_xyz, l1_points = self.cic11(xyz, l0_points)
l1_xyz, l1_points = self.cic12(l1_xyz, l1_points)
l2_xyz, l2_points = self.cic21(l1_xyz, l1_points)
l2_xyz, l2_points = self.cic22(l2_xyz, l2_points)
l3_xyz, l3_points = self.cic31(l2_xyz, l2_points)
l3_xyz, l3_points = self.cic32(l3_xyz, l3_points)
l4_xyz, l4_points = self.cic41(l3_xyz, l3_points)
l4_xyz, l4_points = self.cic42(l4_xyz, l4_points)
#l4_xyz, l4_points = self.cic43(l4_xyz, l4_points)
l3_points = self.fp3(l3_xyz, l4_xyz, l3_points, l4_points)
l3_xyz, l3_points = self.up_cic4(l3_xyz, l3_points)
l2_points = self.fp2(l2_xyz, l3_xyz, l2_points, l3_points)
l2_xyz, l2_points = self.up_cic3(l2_xyz, l2_points)
l1_points = self.fp1(l1_xyz, l2_xyz, l1_points, l2_points)
x = torch.cat((l1_xyz, l1_points), dim=1)
xyz, x = self.up_cic2(l1_xyz, x)
xyz, x = self.up_cic1(xyz, x)
x = self.conv1(x)
return x
================================================
FILE: CurveNet/core/models/curvenet_seg.py
================================================
"""
@Author: Tiange Xiang
@Contact: txia7609@uni.sydney.edu.au
@File: curvenet_seg.py
@Time: 2021/01/21 3:10 PM
"""
import torch.nn as nn
import torch.nn.functional as F
from .curvenet_util import *
curve_config = {
'default': [[100, 5], [100, 5], None, None, None]
}
class CurveNet(nn.Module):
def __init__(self, num_classes=50, category=16, k=32, setting='default'):
super(CurveNet, self).__init__()
assert setting in curve_config
additional_channel = 32
self.lpfa = LPFA(9, additional_channel, k=k, mlp_num=1, initial=True)
# encoder
self.cic11 = CIC(npoint=2048, radius=0.2, k=k, in_channels=additional_channel, output_channels=64, bottleneck_ratio=2, curve_config=curve_config[setting][0])
self.cic12 = CIC(npoint=2048, radius=0.2, k=k, in_channels=64, output_channels=64, bottleneck_ratio=4, curve_config=curve_config[setting][0])
self.cic21 = CIC(npoint=512, radius=0.4, k=k, in_channels=64, output_channels=128, bottleneck_ratio=2, curve_config=curve_config[setting][1])
self.cic22 = CIC(npoint=512, radius=0.4, k=k, in_channels=128, output_channels=128, bottleneck_ratio=4, curve_config=curve_config[setting][1])
self.cic31 = CIC(npoint=128, radius=0.8, k=k, in_channels=128, output_channels=256, bottleneck_ratio=2, curve_config=curve_config[setting][2])
self.cic32 = CIC(npoint=128, radius=0.8, k=k, in_channels=256, output_channels=256, bottleneck_ratio=4, curve_config=curve_config[setting][2])
self.cic41 = CIC(npoint=32, radius=1.2, k=31, in_channels=256, output_channels=512, bottleneck_ratio=2, curve_config=curve_config[setting][3])
self.cic42 = CIC(npoint=32, radius=1.2, k=31, in_channels=512, output_channels=512, bottleneck_ratio=4, curve_config=curve_config[setting][3])
self.cic51 = CIC(npoint=8, radius=2.0, k=7, in_channels=512, output_channels=1024, bottleneck_ratio=2, curve_config=curve_config[setting][4])
self.cic52 = CIC(npoint=8, radius=2.0, k=7, in_channels=1024, output_channels=1024, bottleneck_ratio=4, curve_config=curve_config[setting][4])
self.cic53 = CIC(npoint=8, radius=2.0, k=7, in_channels=1024, output_channels=1024, bottleneck_ratio=4, curve_config=curve_config[setting][4])
# decoder
self.fp4 = PointNetFeaturePropagation(in_channel=1024 + 512, mlp=[512, 512], att=[1024, 512, 256])
self.up_cic5 = CIC(npoint=32, radius=1.2, k=31, in_channels=512, output_channels=512, bottleneck_ratio=4)
self.fp3 = PointNetFeaturePropagation(in_channel=512 + 256, mlp=[256, 256], att=[512, 256, 128])
self.up_cic4 = CIC(npoint=128, radius=0.8, k=k, in_channels=256, output_channels=256, bottleneck_ratio=4)
self.fp2 = PointNetFeaturePropagation(in_channel=256 + 128, mlp=[128, 128], att=[256, 128, 64])
self.up_cic3 = CIC(npoint=512, radius=0.4, k=k, in_channels=128, output_channels=128, bottleneck_ratio=4)
self.fp1 = PointNetFeaturePropagation(in_channel=128 + 64, mlp=[64, 64], att=[128, 64, 32])
self.up_cic2 = CIC(npoint=2048, radius=0.2, k=k, in_channels=128+64+64+category+3, output_channels=256, bottleneck_ratio=4)
self.up_cic1 = CIC(npoint=2048, radius=0.2, k=k, in_channels=256, output_channels=256, bottleneck_ratio=4)
self.global_conv2 = nn.Sequential(
nn.Conv1d(1024, 128, kernel_size=1, bias=False),
nn.BatchNorm1d(128),
nn.LeakyReLU(negative_slope=0.2))
self.global_conv1 = nn.Sequential(
nn.Conv1d(512, 64, kernel_size=1, bias=False),
nn.BatchNorm1d(64),
nn.LeakyReLU(negative_slope=0.2))
self.conv1 = nn.Conv1d(256, 256, 1, bias=False)
self.bn1 = nn.BatchNorm1d(256)
self.drop1 = nn.Dropout(0.5)
self.conv2 = nn.Conv1d(256, num_classes, 1)
self.se = nn.Sequential(nn.AdaptiveAvgPool1d(1),
nn.Conv1d(256, 256//8, 1, bias=False),
nn.BatchNorm1d(256//8),
nn.LeakyReLU(negative_slope=0.2),
nn.Conv1d(256//8, 256, 1, bias=False),
nn.Sigmoid())
def forward(self, xyz, l=None):
batch_size = xyz.size(0)
l0_points = self.lpfa(xyz, xyz)
l1_xyz, l1_points = self.cic11(xyz, l0_points)
l1_xyz, l1_points = self.cic12(l1_xyz, l1_points)
l2_xyz, l2_points = self.cic21(l1_xyz, l1_points)
l2_xyz, l2_points = self.cic22(l2_xyz, l2_points)
l3_xyz, l3_points = self.cic31(l2_xyz, l2_points)
l3_xyz, l3_points = self.cic32(l3_xyz, l3_points)
l4_xyz, l4_points = self.cic41(l3_xyz, l3_points)
l4_xyz, l4_points = self.cic42(l4_xyz, l4_points)
l5_xyz, l5_points = self.cic51(l4_xyz, l4_points)
l5_xyz, l5_points = self.cic52(l5_xyz, l5_points)
l5_xyz, l5_points = self.cic53(l5_xyz, l5_points)
# global features
emb1 = self.global_conv1(l4_points)
emb1 = emb1.max(dim=-1, keepdim=True)[0] # bs, 64, 1
emb2 = self.global_conv2(l5_points)
emb2 = emb2.max(dim=-1, keepdim=True)[0] # bs, 128, 1
# Feature Propagation layers
l4_points = self.fp4(l4_xyz, l5_xyz, l4_points, l5_points)
l4_xyz, l4_points = self.up_cic5(l4_xyz, l4_points)
l3_points = self.fp3(l3_xyz, l4_xyz, l3_points, l4_points)
l3_xyz, l3_points = self.up_cic4(l3_xyz, l3_points)
l2_points = self.fp2(l2_xyz, l3_xyz, l2_points, l3_points)
l2_xyz, l2_points = self.up_cic3(l2_xyz, l2_points)
l1_points = self.fp1(l1_xyz, l2_xyz, l1_points, l2_points)
if l is not None:
l = l.view(batch_size, -1, 1)
emb = torch.cat((emb1, emb2, l), dim=1) # bs, 128 + 16, 1
l = emb.expand(-1,-1, xyz.size(-1))
x = torch.cat((l1_xyz, l1_points, l), dim=1)
xyz, x = self.up_cic2(l1_xyz, x)
xyz, x = self.up_cic1(xyz, x)
x = F.leaky_relu(self.bn1(self.conv1(x)), 0.2, inplace=True)
se = self.se(x)
x = x * se
x = self.drop1(x)
x = self.conv2(x)
return x
================================================
FILE: CurveNet/core/models/curvenet_util.py
================================================
"""
@Author: Yue Wang
@Contact: yuewangx@mit.edu
@File: pointnet_util.py
@Time: 2018/10/13 10:39 PM
Modified by
@Author: Tiange Xiang
@Contact: txia7609@uni.sydney.edu.au
@Time: 2021/01/21 3:10 PM
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from time import time
import numpy as np
from .walk import Walk
def knn(x, k):
k = k + 1
inner = -2 * torch.matmul(x.transpose(2, 1), x)
xx = torch.sum(x**2, dim=1, keepdim=True)
pairwise_distance = -xx - inner - xx.transpose(2, 1)
idx = pairwise_distance.topk(k=k, dim=-1)[1] # (batch_size, num_points, k)
return idx
def normal_knn(x, k):
inner = -2 * torch.matmul(x.transpose(2, 1), x)
xx = torch.sum(x**2, dim=1, keepdim=True)
pairwise_distance = -xx - inner - xx.transpose(2, 1)
idx = pairwise_distance.topk(k=k, dim=-1)[1] # (batch_size, num_points, k)
return idx
def pc_normalize(pc):
l = pc.shape[0]
centroid = np.mean(pc, axis=0)
pc = pc - centroid
m = np.max(np.sqrt(np.sum(pc**2, axis=1)))
pc = pc / m
return pc
def square_distance(src, dst):
"""
Calculate Euclid distance between each two points.
"""
B, N, _ = src.shape
_, M, _ = dst.shape
dist = -2 * torch.matmul(src, dst.permute(0, 2, 1))
dist += torch.sum(src ** 2, -1).view(B, N, 1)
dist += torch.sum(dst ** 2, -1).view(B, 1, M)
return dist
def index_points(points, idx):
"""
Input:
points: input points data, [B, N, C]
idx: sample index data, [B, S]
Return:
new_points:, indexed points data, [B, S, C]
"""
device = points.device
B = points.shape[0]
view_shape = list(idx.shape)
view_shape[1:] = [1] * (len(view_shape) - 1)
repeat_shape = list(idx.shape)
repeat_shape[0] = 1
batch_indices = torch.arange(B, dtype=torch.long).to(device).view(view_shape).repeat(repeat_shape)
new_points = points[batch_indices, idx, :]
return new_points
def farthest_point_sample(xyz, npoint):
"""
Input:
xyz: pointcloud data, [B, N, 3]
npoint: number of samples
Return:
centroids: sampled pointcloud index, [B, npoint]
"""
device = xyz.device
B, N, C = xyz.shape
centroids = torch.zeros(B, npoint, dtype=torch.long).to(device)
distance = torch.ones(B, N).to(device) * 1e10
farthest = torch.randint(0, N, (B,), dtype=torch.long).to(device) * 0
batch_indices = torch.arange(B, dtype=torch.long).to(device)
for i in range(npoint):
centroids[:, i] = farthest
centroid = xyz[batch_indices, farthest, :].view(B, 1, 3)
dist = torch.sum((xyz - centroid) ** 2, -1)
mask = dist < distance
distance[mask] = dist[mask]
farthest = torch.max(distance, -1)[1]
return centroids
def query_ball_point(radius, nsample, xyz, new_xyz):
"""
Input:
radius: local region radius
nsample: max sample number in local region
xyz: all points, [B, N, 3]
new_xyz: query points, [B, S, 3]
Return:
group_idx: grouped points index, [B, S, nsample]
"""
device = xyz.device
B, N, C = xyz.shape
_, S, _ = new_xyz.shape
group_idx = torch.arange(N, dtype=torch.long).to(device).view(1, 1, N).repeat([B, S, 1])
sqrdists = square_distance(new_xyz, xyz)
group_idx[sqrdists > radius ** 2] = N
group_idx = group_idx.sort(dim=-1)[0][:, :, :nsample]
group_first = group_idx[:, :, 0].view(B, S, 1).repeat([1, 1, nsample])
mask = group_idx == N
group_idx[mask] = group_first[mask]
return group_idx
def sample_and_group(npoint, radius, nsample, xyz, points, returnfps=False):
"""
Input:
npoint:
radius:
nsample:
xyz: input points position data, [B, N, 3]
points: input points data, [B, N, D]
Return:
new_xyz: sampled points position data, [B, npoint, nsample, 3]
new_points: sampled points data, [B, npoint, nsample, 3+D]
"""
new_xyz = index_points(xyz, farthest_point_sample(xyz, npoint))
torch.cuda.empty_cache()
idx = query_ball_point(radius, nsample, xyz, new_xyz)
torch.cuda.empty_cache()
new_points = index_points(points, idx)
torch.cuda.empty_cache()
if returnfps:
return new_xyz, new_points, idx
else:
return new_xyz, new_points
class Attention_block(nn.Module):
'''
Used in attention U-Net.
'''
def __init__(self,F_g,F_l,F_int):
super(Attention_block,self).__init__()
self.W_g = nn.Sequential(
nn.Conv1d(F_g, F_int, kernel_size=1,stride=1,padding=0,bias=True),
nn.BatchNorm1d(F_int)
)
self.W_x = nn.Sequential(
nn.Conv1d(F_l, F_int, kernel_size=1,stride=1,padding=0,bias=True),
nn.BatchNorm1d(F_int)
)
self.psi = nn.Sequential(
nn.Conv1d(F_int, 1, kernel_size=1,stride=1,padding=0,bias=True),
nn.BatchNorm1d(1),
nn.Sigmoid()
)
def forward(self,g,x):
g1 = self.W_g(g)
x1 = self.W_x(x)
psi = F.leaky_relu(g1+x1, negative_slope=0.2)
psi = self.psi(psi)
return psi, 1. - psi
class LPFA(nn.Module):
def __init__(self, in_channel, out_channel, k, mlp_num=2, initial=False):
super(LPFA, self).__init__()
self.k = k
self.device = torch.device('cuda')
self.initial = initial
if not initial:
self.xyz2feature = nn.Sequential(
nn.Conv2d(9, in_channel, kernel_size=1, bias=False),
nn.BatchNorm2d(in_channel))
self.mlp = []
for _ in range(mlp_num):
self.mlp.append(nn.Sequential(nn.Conv2d(in_channel, out_channel, 1, bias=False),
nn.BatchNorm2d(out_channel),
nn.LeakyReLU(0.2)))
in_channel = out_channel
self.mlp = nn.Sequential(*self.mlp)
def forward(self, x, xyz, idx=None):
x = self.group_feature(x, xyz, idx)
x = self.mlp(x)
if self.initial:
x = x.max(dim=-1, keepdim=False)[0]
else:
x = x.mean(dim=-1, keepdim=False)
return x
def group_feature(self, x, xyz, idx):
batch_size, num_dims, num_points = x.size()
if idx is None:
idx = knn(xyz, k=self.k)[:,:,:self.k] # (batch_size, num_points, k)
idx_base = torch.arange(0, batch_size, device=self.device).view(-1, 1, 1) * num_points
idx = idx + idx_base
idx = idx.view(-1)
xyz = xyz.transpose(2, 1).contiguous() # bs, n, 3
point_feature = xyz.view(batch_size * num_points, -1)[idx, :]
point_feature = point_feature.view(batch_size, num_points, self.k, -1) # bs, n, k, 3
points = xyz.view(batch_size, num_points, 1, 3).expand(-1, -1, self.k, -1) # bs, n, k, 3
point_feature = torch.cat((points, point_feature, point_feature - points),
dim=3).permute(0, 3, 1, 2).contiguous()
if self.initial:
return point_feature
x = x.transpose(2, 1).contiguous() # bs, n, c
feature = x.view(batch_size * num_points, -1)[idx, :]
feature = feature.view(batch_size, num_points, self.k, num_dims) #bs, n, k, c
x = x.view(batch_size, num_points, 1, num_dims)
feature = feature - x
feature = feature.permute(0, 3, 1, 2).contiguous()
point_feature = self.xyz2feature(point_feature) #bs, c, n, k
feature = F.leaky_relu(feature + point_feature, 0.2)
return feature #bs, c, n, k
class PointNetFeaturePropagation(nn.Module):
def __init__(self, in_channel, mlp, att=None):
super(PointNetFeaturePropagation, self).__init__()
self.mlp_convs = nn.ModuleList()
self.mlp_bns = nn.ModuleList()
last_channel = in_channel
self.att = None
if att is not None:
self.att = Attention_block(F_g=att[0],F_l=att[1],F_int=att[2])
for out_channel in mlp:
self.mlp_convs.append(nn.Conv1d(last_channel, out_channel, 1))
self.mlp_bns.append(nn.BatchNorm1d(out_channel))
last_channel = out_channel
def forward(self, xyz1, xyz2, points1, points2):
"""
Input:
xyz1: input points position data, [B, C, N]
xyz2: sampled input points position data, [B, C, S], skipped xyz
points1: input points data, [B, D, N]
points2: input points data, [B, D, S], skipped features
Return:
new_points: upsampled points data, [B, D', N]
"""
xyz1 = xyz1.permute(0, 2, 1)
xyz2 = xyz2.permute(0, 2, 1)
points2 = points2.permute(0, 2, 1)
B, N, C = xyz1.shape
_, S, _ = xyz2.shape
if S == 1:
interpolated_points = points2.repeat(1, N, 1)
else:
dists = square_distance(xyz1, xyz2)
dists, idx = dists.sort(dim=-1)
dists, idx = dists[:, :, :3], idx[:, :, :3] # [B, N, 3]
dist_recip = 1.0 / (dists + 1e-8)
norm = torch.sum(dist_recip, dim=2, keepdim=True)
weight = dist_recip / norm
interpolated_points = torch.sum(index_points(points2, idx) * weight.view(B, N, 3, 1), dim=2)
# skip attention
if self.att is not None:
psix, psig = self.att(interpolated_points.permute(0, 2, 1), points1)
points1 = points1 * psix
if points1 is not None:
points1 = points1.permute(0, 2, 1)
new_points = torch.cat([points1, interpolated_points], dim=-1)
else:
new_points = interpolated_points
new_points = new_points.permute(0, 2, 1)
for i, conv in enumerate(self.mlp_convs):
bn = self.mlp_bns[i]
new_points = F.leaky_relu(bn(conv(new_points)), 0.2)
return new_points
class CIC(nn.Module):
def __init__(self, npoint, radius, k, in_channels, output_channels, bottleneck_ratio=2, mlp_num=2, curve_config=None):
super(CIC, self).__init__()
self.in_channels = in_channels
self.output_channels = output_channels
self.bottleneck_ratio = bottleneck_ratio
self.radius = radius
self.k = k
self.npoint = npoint
planes = in_channels // bottleneck_ratio
self.use_curve = curve_config is not None
if self.use_curve:
self.curveaggregation = CurveAggregation(planes)
self.curvegrouping = CurveGrouping(planes, k, curve_config[0], curve_config[1])
self.conv1 = nn.Sequential(
nn.Conv1d(in_channels,
planes,
kernel_size=1,
bias=False),
nn.BatchNorm1d(in_channels // bottleneck_ratio),
nn.LeakyReLU(negative_slope=0.2, inplace=True))
self.conv2 = nn.Sequential(
nn.Conv1d(planes, output_channels, kernel_size=1, bias=False),
nn.BatchNorm1d(output_channels))
if in_channels != output_channels:
self.shortcut = nn.Sequential(
nn.Conv1d(in_channels,
output_channels,
kernel_size=1,
bias=False),
nn.BatchNorm1d(output_channels))
self.relu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
self.maxpool = MaskedMaxPool(npoint, radius, k)
self.lpfa = LPFA(planes, planes, k, mlp_num=mlp_num, initial=False)
def forward(self, xyz, x):
# max pool
if xyz.size(-1) != self.npoint:
xyz, x = self.maxpool(
xyz.transpose(1, 2).contiguous(), x)
xyz = xyz.transpose(1, 2)
shortcut = x
x = self.conv1(x) # bs, c', n
idx = knn(xyz, self.k)
if self.use_curve:
# curve grouping
curves = self.curvegrouping(x, xyz, idx[:,:,1:]) # avoid self-loop
# curve aggregation
x = self.curveaggregation(x, curves)
x = self.lpfa(x, xyz, idx=idx[:,:,:self.k]) #bs, c', n, k
x = self.conv2(x) # bs, c, n
if self.in_channels != self.output_channels:
shortcut = self.shortcut(shortcut)
x = self.relu(x + shortcut)
return xyz, x
class CurveAggregation(nn.Module):
def __init__(self, in_channel):
super(CurveAggregation, self).__init__()
self.in_channel = in_channel
mid_feature = in_channel // 2
self.conva = nn.Conv1d(in_channel,
mid_feature,
kernel_size=1,
bias=False)
self.convb = nn.Conv1d(in_channel,
mid_feature,
kernel_size=1,
bias=False)
self.convc = nn.Conv1d(in_channel,
mid_feature,
kernel_size=1,
bias=False)
self.convn = nn.Conv1d(mid_feature,
mid_feature,
kernel_size=1,
bias=False)
self.convl = nn.Conv1d(mid_feature,
mid_feature,
kernel_size=1,
bias=False)
self.convd = nn.Sequential(
nn.Conv1d(mid_feature * 2,
in_channel,
kernel_size=1,
bias=False),
nn.BatchNorm1d(in_channel))
self.line_conv_att = nn.Conv2d(in_channel,
1,
kernel_size=1,
bias=False)
def forward(self, x, curves):
curves_att = self.line_conv_att(curves) # bs, 1, c_n, c_l
curver_inter = torch.sum(curves * F.softmax(curves_att, dim=-1), dim=-1) #bs, c, c_n
curves_intra = torch.sum(curves * F.softmax(curves_att, dim=-2), dim=-2) #bs, c, c_l
curver_inter = self.conva(curver_inter) # bs, mid, n
curves_intra = self.convb(curves_intra) # bs, mid ,n
x_logits = self.convc(x).transpose(1, 2).contiguous()
x_inter = F.softmax(torch.bmm(x_logits, curver_inter), dim=-1) # bs, n, c_n
x_intra = F.softmax(torch.bmm(x_logits, curves_intra), dim=-1) # bs, l, c_l
curver_inter = self.convn(curver_inter).transpose(1, 2).contiguous()
curves_intra = self.convl(curves_intra).transpose(1, 2).contiguous()
x_inter = torch.bmm(x_inter, curver_inter)
x_intra = torch.bmm(x_intra, curves_intra)
curve_features = torch.cat((x_inter, x_intra),dim=-1).transpose(1, 2).contiguous()
x = x + self.convd(curve_features)
return F.leaky_relu(x, negative_slope=0.2)
class CurveGrouping(nn.Module):
def __init__(self, in_channel, k, curve_num, curve_length):
super(CurveGrouping, self).__init__()
self.curve_num = curve_num
self.curve_length = curve_length
self.in_channel = in_channel
self.k = k
self.att = nn.Conv1d(in_channel, 1, kernel_size=1, bias=False)
self.walk = Walk(in_channel, k, curve_num, curve_length)
def forward(self, x, xyz, idx):
# starting point selection in self attention style
x_att = torch.sigmoid(self.att(x))
x = x * x_att
_, start_index = torch.topk(x_att,
self.curve_num,
dim=2,
sorted=False)
start_index = start_index.squeeze().unsqueeze(2)
curves = self.walk(xyz, x, idx, start_index) #bs, c, c_n, c_l
return curves
class MaskedMaxPool(nn.Module):
def __init__(self, npoint, radius, k):
super(MaskedMaxPool, self).__init__()
self.npoint = npoint
self.radius = radius
self.k = k
def forward(self, xyz, features):
sub_xyz, neighborhood_features = sample_and_group(self.npoint, self.radius, self.k, xyz, features.transpose(1,2))
neighborhood_features = neighborhood_features.permute(0, 3, 1, 2).contiguous()
sub_features = F.max_pool2d(
neighborhood_features, kernel_size=[1, neighborhood_features.shape[3]]
) # bs, c, n, 1
sub_features = torch.squeeze(sub_features, -1) # bs, c, n
return sub_xyz, sub_features
================================================
FILE: CurveNet/core/models/walk.py
================================================
"""
@Author: Tiange Xiang
@Contact: txia7609@uni.sydney.edu.au
@File: walk.py
@Time: 2021/01/21 3:10 PM
"""
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
def batched_index_select(input, dim, index):
views = [input.shape[0]] + \
[1 if i != dim else -1 for i in range(1, len(input.shape))]
expanse = list(input.shape)
expanse[0] = -1
expanse[dim] = -1
index = index.view(views).expand(expanse)
return torch.gather(input, dim, index)
def gumbel_softmax(logits, dim, temperature=1):
"""
ST-gumple-softmax w/o random gumbel samplings
input: [*, n_class]
return: flatten --> [*, n_class] an one-hot vector
"""
y = F.softmax(logits / temperature, dim=dim)
shape = y.size()
_, ind = y.max(dim=-1)
y_hard = torch.zeros_like(y).view(-1, shape[-1])
y_hard.scatter_(1, ind.view(-1, 1), 1)
y_hard = y_hard.view(*shape)
y_hard = (y_hard - y).detach() + y
return y_hard
class Walk(nn.Module):
'''
Walk in the cloud
'''
def __init__(self, in_channel, k, curve_num, curve_length):
super(Walk, self).__init__()
self.curve_num = curve_num
self.curve_length = curve_length
self.k = k
self.agent_mlp = nn.Sequential(
nn.Conv2d(in_channel * 2,
1,
kernel_size=1,
bias=False), nn.BatchNorm2d(1))
self.momentum_mlp = nn.Sequential(
nn.Conv1d(in_channel * 2,
2,
kernel_size=1,
bias=False), nn.BatchNorm1d(2))
def crossover_suppression(self, cur, neighbor, bn, n, k):
# cur: bs*n, 3
# neighbor: bs*n, 3, k
neighbor = neighbor.detach()
cur = cur.unsqueeze(-1).detach()
dot = torch.bmm(cur.transpose(1,2), neighbor) # bs*n, 1, k
norm1 = torch.norm(cur, dim=1, keepdim=True)
norm2 = torch.norm(neighbor, dim=1, keepdim=True)
divider = torch.clamp(norm1 * norm2, min=1e-8)
ans = torch.div(dot, divider).squeeze() # bs*n, k
# normalize to [0, 1]
ans = 1. + ans
ans = torch.clamp(ans, 0., 1.0)
return ans.detach()
def forward(self, xyz, x, adj, cur):
bn, c, tot_points = x.size()
# raw point coordinates
xyz = xyz.transpose(1,2).contiguous # bs, n, 3
# point features
x = x.transpose(1,2).contiguous() # bs, n, c
flatten_x = x.view(bn * tot_points, -1)
batch_offset = torch.arange(0, bn, device=torch.device('cuda')).detach() * tot_points
# indices of neighbors for the starting points
tmp_adj = (adj + batch_offset.view(-1,1,1)).view(adj.size(0)*adj.size(1),-1) #bs, n, k
# batch flattened indices for teh starting points
flatten_cur = (cur + batch_offset.view(-1,1,1)).view(-1)
curves = []
# one step at a time
for step in range(self.curve_length):
if step == 0:
# get starting point features using flattend indices
starting_points = flatten_x[flatten_cur, :].contiguous()
pre_feature = starting_points.view(bn, self.curve_num, -1, 1).transpose(1,2) # bs * n, c
else:
# dynamic momentum
cat_feature = torch.cat((cur_feature.squeeze(), pre_feature.squeeze()),dim=1)
att_feature = F.softmax(self.momentum_mlp(cat_feature),dim=1).view(bn, 1, self.curve_num, 2) # bs, 1, n, 2
cat_feature = torch.cat((cur_feature, pre_feature),dim=-1) # bs, c, n, 2
# update curve descriptor
pre_feature = torch.sum(cat_feature * att_feature, dim=-1, keepdim=True) # bs, c, n
pre_feature_cos = pre_feature.transpose(1,2).contiguous().view(bn * self.curve_num, -1)
pick_idx = tmp_adj[flatten_cur] # bs*n, k
# get the neighbors of current points
pick_values = flatten_x[pick_idx.view(-1),:]
# reshape to fit crossover suppresion below
pick_values_cos = pick_values.view(bn * self.curve_num, self.k, c)
pick_values = pick_values_cos.view(bn, self.curve_num, self.k, c)
pick_values_cos = pick_values_cos.transpose(1,2).contiguous()
pick_values = pick_values.permute(0,3,1,2) # bs, c, n, k
pre_feature_expand = pre_feature.expand_as(pick_values)
# concat current point features with curve descriptors
pre_feature_expand = torch.cat((pick_values, pre_feature_expand),dim=1)
# which node to pick next?
pre_feature_expand = self.agent_mlp(pre_feature_expand) # bs, 1, n, k
if step !=0:
# cross over supression
d = self.crossover_suppression(cur_feature_cos - pre_feature_cos,
pick_values_cos - cur_feature_cos.unsqueeze(-1),
bn, self.curve_num, self.k)
d = d.view(bn, self.curve_num, self.k).unsqueeze(1) # bs, 1, n, k
pre_feature_expand = torch.mul(pre_feature_expand, d)
pre_feature_expand = gumbel_softmax(pre_feature_expand, -1) #bs, 1, n, k
cur_feature = torch.sum(pick_values * pre_feature_expand, dim=-1, keepdim=True) # bs, c, n, 1
cur_feature_cos = cur_feature.transpose(1,2).contiguous().view(bn * self.curve_num, c)
cur = torch.argmax(pre_feature_expand, dim=-1).view(-1, 1) # bs * n, 1
flatten_cur = batched_index_select(pick_idx, 1, cur).squeeze() # bs * n
# collect curve progress
curves.append(cur_feature)
return torch.cat(curves,dim=-1)
================================================
FILE: CurveNet/core/util.py
================================================
"""
@Author: Yue Wang
@Contact: yuewangx@mit.edu
@File: util
@Time: 4/5/19 3:47 PM
"""
import numpy as np
import torch
import torch.nn.functional as F
def cal_loss(pred, gold, smoothing=True):
''' Calculate cross entropy loss, apply label smoothing if needed. '''
gold = gold.contiguous().view(-1)
if smoothing:
eps = 0.2
n_class = pred.size(1)
one_hot = torch.zeros_like(pred).scatter(1, gold.view(-1, 1), 1)
one_hot = one_hot * (1 - eps) + (1 - one_hot) * eps / (n_class - 1)
log_prb = F.log_softmax(pred, dim=1)
loss = -(one_hot * log_prb).sum(dim=1).mean()
else:
loss = F.cross_entropy(pred, gold, reduction='mean')
return loss
class IOStream():
def __init__(self, path):
self.f = open(path, 'a')
def cprint(self, text):
print(text)
self.f.write(text+'\n')
self.f.flush()
def close(self):
self.f.close()
================================================
FILE: GDANet/README.md
================================================
# Learning Geometry-Disentangled Representation for Complementary Understanding of 3D Object Point Cloud.
This repository is built for the paper:
__Learning Geometry-Disentangled Representation for Complementary Understanding of 3D Object Point Cloud (_AAAI2021_)__ [[arXiv](https://arxiv.org/abs/2012.10921)]
by [Mutian Xu*](https://mutianxu.github.io/), [Junhao Zhang*](https://junhaozhang98.github.io/), Zhipeng Zhou, Mingye Xu, Xiaojuan Qi and Yu Qiao.
## Overview
Geometry-Disentangled Attention Network for 3D object point cloud classification and segmentation (GDANet):
## Citation
If you find the code or trained models useful, please consider citing:
@misc{xu2021learning,
title={Learning Geometry-Disentangled Representation for Complementary Understanding of 3D Object Point Cloud},
author={Mutian Xu and Junhao Zhang and Zhipeng Zhou and Mingye Xu and Xiaojuan Qi and Yu Qiao},
year={2021},
eprint={2012.10921},
archivePrefix={arXiv},
primaryClass={cs.CV}
## Installation
### Requirements
* Linux (tested on Ubuntu 14.04/16.04)
* Python 3.5+
* PyTorch 1.0+
### Dataset
* Create the folder to symlink the data later:
`mkdir -p data`
* __Object Classification__:
Download and unzip [ModelNet40](https://shapenet.cs.stanford.edu/media/modelnet40_ply_hdf5_2048.zip) (415M), then symlink the path to it as follows (you can alternatively modify the path [here](https://github.com/mutianxu/GDANet/blob/main/util/data_util.py#L12)) :
`ln -s /path to modelnet40/modelnet40_ply_hdf5_2048 data`
* __Shape Part Segmentation__:
Download and unzip [ShapeNet Part](https://shapenet.cs.stanford.edu/media/shapenetcore_partanno_segmentation_benchmark_v0_normal.zip) (674M), then symlink the path to it as follows (you can alternatively modify the path [here](https://github.com/mutianxu/GDANet/blob/main/util/data_util.py#L70)) :
`ln -s /path to shapenet part/shapenetcore_partanno_segmentation_benchmark_v0_normal data`
## Usage
### Object Classification on ModelNet40
* Train:
`python main_cls.py`
* Test:
* Run the voting evaluation script, after this voting you will get an accuracy of 93.8% if all things go right:
`python voting_eval_modelnet.py --model_path 'pretrained/GDANet_ModelNet40_93.4.t7'`
* You can also directly evaluate our pretrained model without voting to get an accuracy of 93.4%:
`python main.py --eval True --model_path 'pretrained/GDANet_ModelNet40_93.4.t7'`
### Shape Part Segmentation on ShapeNet Part
* Train:
* Training from scratch:
`python main_ptseg.py`
* If you want resume training from checkpoints, specify `resume` in the args:
`python main_ptseg.py --resume True`
* Test:
You can choose to test the model with the best instance mIoU, class mIoU or accuracy, by specifying `model_type` in the args:
* `python main_ptseg.py --model_type 'ins_iou'` (best instance mIoU, default)
* `python main_ptseg.py --model_type 'cls_iou'` (best class mIoU)
* `python main_ptseg.py --model_type 'acc'` (best accuracy)
## Other information
Please contact Mutian Xu (mino1018@outlook.com) or Junhao Zhang (junhaozhang98@gmail.com) for further discussion.
## Acknowledgement
This code is is partially borrowed from [DGCNN](https://github.com/WangYueFt/dgcnn) and [PointNet++](https://github.com/charlesq34/pointnet2).
================================================
FILE: GDANet/model/GDANet_cls.py
================================================
import torch.nn as nn
import torch
import torch.nn.functional as F
from .util.GDANet_util import local_operator, GDM, SGCAM
class GDANET(nn.Module):
def __init__(self, number_class=40):
super(GDANET, self).__init__()
self.bn1 = nn.BatchNorm2d(64, momentum=0.1)
self.bn11 = nn.BatchNorm2d(64, momentum=0.1)
self.bn12 = nn.BatchNorm1d(64, momentum=0.1)
self.bn2 = nn.BatchNorm2d(64, momentum=0.1)
self.bn21 = nn.BatchNorm2d(64, momentum=0.1)
self.bn22 = nn.BatchNorm1d(64, momentum=0.1)
self.bn3 = nn.BatchNorm2d(128, momentum=0.1)
self.bn31 = nn.BatchNorm2d(128, momentum=0.1)
self.bn32 = nn.BatchNorm1d(128, momentum=0.1)
self.bn4 = nn.BatchNorm1d(512, momentum=0.1)
self.conv1 = nn.Sequential(nn.Conv2d(6, 64, kernel_size=1, bias=True),
self.bn1)
self.conv11 = nn.Sequential(nn.Conv2d(64, 64, kernel_size=1, bias=True),
self.bn11)
self.conv12 = nn.Sequential(nn.Conv1d(64 * 2, 64, kernel_size=1, bias=True),
self.bn12)
self.conv2 = nn.Sequential(nn.Conv2d(67 * 2, 64, kernel_size=1, bias=True),
self.bn2)
self.conv21 = nn.Sequential(nn.Conv2d(64, 64, kernel_size=1, bias=True),
self.bn21)
self.conv22 = nn.Sequential(nn.Conv1d(64 * 2, 64, kernel_size=1, bias=True),
self.bn22)
self.conv3 = nn.Sequential(nn.Conv2d(131 * 2, 128, kernel_size=1, bias=True),
self.bn3)
self.conv31 = nn.Sequential(nn.Conv2d(128, 128, kernel_size=1, bias=True),
self.bn31)
self.conv32 = nn.Sequential(nn.Conv1d(128, 128, kernel_size=1, bias=True),
self.bn32)
self.conv4 = nn.Sequential(nn.Conv1d(256, 512, kernel_size=1, bias=True),
self.bn4)
self.SGCAM_1s = SGCAM(64)
self.SGCAM_1g = SGCAM(64)
self.SGCAM_2s = SGCAM(64)
self.SGCAM_2g = SGCAM(64)
self.linear1 = nn.Linear(1024, 512, bias=True)
self.bn6 = nn.BatchNorm1d(512)
self.dp1 = nn.Dropout(p=0.4)
self.linear2 = nn.Linear(512, 256, bias=True)
self.bn7 = nn.BatchNorm1d(256)
self.dp2 = nn.Dropout(p=0.4)
self.linear3 = nn.Linear(256, number_class, bias=True)
def forward(self, x):
B, C, N = x.size()
###############
"""block 1"""
# Local operator:
x1 = local_operator(x, k=30)
x1 = F.relu(self.conv1(x1))
x1 = F.relu(self.conv11(x1))
x1 = x1.max(dim=-1, keepdim=False)[0]
# Geometry-Disentangle Module:
x1s, x1g = GDM(x1, M=256)
# Sharp-Gentle Complementary Attention Module:
y1s = self.SGCAM_1s(x1, x1s.transpose(2, 1))
y1g = self.SGCAM_1g(x1, x1g.transpose(2, 1))
z1 = torch.cat([y1s, y1g], 1)
z1 = F.relu(self.conv12(z1))
###############
"""block 2"""
x1t = torch.cat((x, z1), dim=1)
x2 = local_operator(x1t, k=30)
x2 = F.relu(self.conv2(x2))
x2 = F.relu(self.conv21(x2))
x2 = x2.max(dim=-1, keepdim=False)[0]
x2s, x2g = GDM(x2, M=256)
y2s = self.SGCAM_2s(x2, x2s.transpose(2, 1))
y2g = self.SGCAM_2g(x2, x2g.transpose(2, 1))
z2 = torch.cat([y2s, y2g], 1)
z2 = F.relu(self.conv22(z2))
###############
x2t = torch.cat((x1t, z2), dim=1)
x3 = local_operator(x2t, k=30)
x3 = F.relu(self.conv3(x3))
x3 = F.relu(self.conv31(x3))
x3 = x3.max(dim=-1, keepdim=False)[0]
z3 = F.relu(self.conv32(x3))
###############
x = torch.cat((z1, z2, z3), dim=1)
x = F.relu(self.conv4(x))
x11 = F.adaptive_max_pool1d(x, 1).view(B, -1)
x22 = F.adaptive_avg_pool1d(x, 1).view(B, -1)
x = torch.cat((x11, x22), 1)
x = F.relu(self.bn6(self.linear1(x)))
x = self.dp1(x)
x = F.relu(self.bn7(self.linear2(x)))
x = self.dp2(x)
x = self.linear3(x)
return x
================================================
FILE: GDANet/model/GDANet_ptseg.py
================================================
import torch.nn as nn
import torch
import torch.nn.functional as F
from util.GDANet_util import local_operator_withnorm, local_operator, GDM, SGCAM
class GDANet(nn.Module):
def __init__(self, num_classes):
super(GDANet, self).__init__()
self.bn1 = nn.BatchNorm2d(64, momentum=0.1)
self.bn11 = nn.BatchNorm2d(64, momentum=0.1)
self.bn12 = nn.BatchNorm1d(64, momentum=0.1)
self.bn2 = nn.BatchNorm2d(64, momentum=0.1)
self.bn21 = nn.BatchNorm2d(64, momentum=0.1)
self.bn22 = nn.BatchNorm1d(64, momentum=0.1)
self.bn3 = nn.BatchNorm2d(128, momentum=0.1)
self.bn31 = nn.BatchNorm2d(128, momentum=0.1)
self.bn32 = nn.BatchNorm1d(128, momentum=0.1)
self.bn4 = nn.BatchNorm1d(512, momentum=0.1)
self.bnc = nn.BatchNorm1d(64, momentum=0.1)
self.bn5 = nn.BatchNorm1d(256, momentum=0.1)
self.bn6 = nn.BatchNorm1d(256, momentum=0.1)
self.bn7 = nn.BatchNorm1d(128, momentum=0.1)
self.conv1 = nn.Sequential(nn.Conv2d(9, 64, kernel_size=1, bias=True),
self.bn1)
self.conv11 = nn.Sequential(nn.Conv2d(64, 64, kernel_size=1, bias=True),
self.bn11)
self.conv12 = nn.Sequential(nn.Conv1d(64*2, 64, kernel_size=1, bias=True),
self.bn12)
self.conv2 = nn.Sequential(nn.Conv2d(67 * 2, 64, kernel_size=1, bias=True),
self.bn2)
self.conv21 = nn.Sequential(nn.Conv2d(64, 64, kernel_size=1, bias=True),
self.bn21)
self.conv22 = nn.Sequential(nn.Conv1d(64*2, 64, kernel_size=1, bias=True),
self.bn22)
self.conv3 = nn.Sequential(nn.Conv2d(131 * 2, 128, kernel_size=1, bias=True),
self.bn3)
self.conv31 = nn.Sequential(nn.Conv2d(128, 128, kernel_size=1, bias=True),
self.bn31)
self.conv32 = nn.Sequential(nn.Conv1d(128, 128, kernel_size=1, bias=True),
self.bn32)
self.conv4 = nn.Sequential(nn.Conv1d(256, 512, kernel_size=1, bias=True),
self.bn4)
self.convc = nn.Sequential(nn.Conv1d(16, 64, kernel_size=1, bias=True),
self.bnc)
self.conv5 = nn.Sequential(nn.Conv1d(256 + 512 + 64, 256, kernel_size=1, bias=True),
self.bn5)
self.dp1 = nn.Dropout(0.4)
self.conv6 = nn.Sequential(nn.Conv1d(256, 256, kernel_size=1, bias=True),
self.bn6)
self.dp2 = nn.Dropout(0.4)
self.conv7 = nn.Sequential(nn.Conv1d(256, 128, kernel_size=1, bias=True),
self.bn7)
self.conv8 = nn.Conv1d(128, num_classes, kernel_size=1, bias=True)
self.SGCAM_1s = SGCAM(64)
self.SGCAM_1g = SGCAM(64)
self.SGCAM_2s = SGCAM(64)
self.SGCAM_2g = SGCAM(64)
def forward(self, x, norm_plt, cls_label):
B, C, N = x.size()
###############
"""block 1"""
x1 = local_operator_withnorm(x, norm_plt, k=30)
x1 = F.relu(self.conv1(x1))
x1 = F.relu(self.conv11(x1))
x1 = x1.max(dim=-1, keepdim=False)[0]
x1h, x1l = GDM(x1, M=512)
x1h = self.SGCAM_1s(x1, x1h.transpose(2, 1))
x1l = self.SGCAM_1g(x1, x1l.transpose(2, 1))
x1 = torch.cat([x1h, x1l], 1)
x1 = F.relu(self.conv12(x1))
###############
"""block 1"""
x1t = torch.cat((x, x1), dim=1)
x2 = local_operator(x1t, k=30)
x2 = F.relu(self.conv2(x2))
x2 = F.relu(self.conv21(x2))
x2 = x2.max(dim=-1, keepdim=False)[0]
x2h, x2l = GDM(x2, M=512)
x2h = self.SGCAM_2s(x2, x2h.transpose(2, 1))
x2l = self.SGCAM_2g(x2, x2l.transpose(2, 1))
x2 = torch.cat([x2h, x2l], 1)
x2 = F.relu(self.conv22(x2))
###############
x2t = torch.cat((x1t, x2), dim=1)
x3 = local_operator(x2t, k=30)
x3 = F.relu(self.conv3(x3))
x3 = F.relu(self.conv31(x3))
x3 = x3.max(dim=-1, keepdim=False)[0]
x3 = F.relu(self.conv32(x3))
###############
xx = torch.cat((x1, x2, x3), dim=1)
xc = F.relu(self.conv4(xx))
xc = F.adaptive_max_pool1d(xc, 1).view(B, -1)
cls_label = cls_label.view(B, 16, 1)
cls_label = F.relu(self.convc(cls_label))
cls = torch.cat((xc.view(B, 512, 1), cls_label), dim=1)
cls = cls.repeat(1, 1, N)
x = torch.cat((xx, cls), dim=1)
x = F.relu(self.conv5(x))
x = self.dp1(x)
x = F.relu(self.conv6(x))
x = self.dp2(x)
x = F.relu(self.conv7(x))
x = self.conv8(x)
x = F.log_softmax(x, dim=1)
x = x.permute(0, 2, 1) # b,n,50
return x
================================================
FILE: GDANet/model/__init__.py
================================================
================================================
FILE: GDANet/model/util/GDANet_util.py
================================================
import torch
from torch import nn
def knn(x, k):
inner = -2*torch.matmul(x.transpose(2, 1), x)
xx = torch.sum(x**2, dim=1, keepdim=True)
pairwise_distance = -xx - inner - xx.transpose(2, 1)
idx = pairwise_distance.topk(k=k, dim=-1)[1] # (batch_size, num_points, k)
return idx, pairwise_distance
def local_operator(x, k):
batch_size = x.size(0)
num_points = x.size(2)
x = x.view(batch_size, -1, num_points)
idx, _ = knn(x, k=k)
device = torch.device('cuda')
idx_base = torch.arange(0, batch_size, device=device).view(-1, 1, 1) * num_points
idx = idx + idx_base
idx = idx.view(-1)
_, num_dims, _ = x.size()
x = x.transpose(2, 1).contiguous()
neighbor = x.view(batch_size * num_points, -1).contiguous()[idx, :]
neighbor = neighbor.view(batch_size, num_points, k, num_dims).contiguous()
x = x.view(batch_size, num_points, 1, num_dims).contiguous().repeat(1, 1, k, 1)
feature = torch.cat((neighbor-x, neighbor), dim=3).permute(0, 3, 1, 2).contiguous() # local and global all in
return feature
def local_operator_withnorm(x, norm_plt, k):
batch_size = x.size(0)
num_points = x.size(2)
x = x.view(batch_size, -1, num_points)
norm_plt = norm_plt.view(batch_size, -1, num_points)
idx, _ = knn(x, k=k) # (batch_size, num_points, k)
device = torch.device('cuda')
idx_base = torch.arange(0, batch_size, device=device).view(-1, 1, 1) * num_points
idx = idx + idx_base
idx = idx.view(-1)
_, num_dims, _ = x.size()
x = x.transpose(2, 1).contiguous()
norm_plt = norm_plt.transpose(2, 1).contiguous()
neighbor = x.view(batch_size * num_points, -1)[idx, :]
neighbor_norm = norm_plt.view(batch_size * num_points, -1)[idx, :]
neighbor = neighbor.view(batch_size, num_points, k, num_dims)
neighbor_norm = neighbor_norm.view(batch_size, num_points, k, num_dims)
x = x.view(batch_size, num_points, 1, num_dims).repeat(1, 1, k, 1)
feature = torch.cat((neighbor-x, neighbor, neighbor_norm), dim=3).permute(0, 3, 1, 2) # 3c
return feature
def GDM(x, M):
"""
Geometry-Disentangle Module
M: number of disentangled points in both sharp and gentle variation components
"""
k = 64 # number of neighbors to decide the range of j in Eq.(5)
tau = 0.2 # threshold in Eq.(2)
sigma = 2 # parameters of f (Gaussian function in Eq.(2))
###############
"""Graph Construction:"""
device = torch.device('cuda')
batch_size = x.size(0)
num_points = x.size(2)
x = x.view(batch_size, -1, num_points)
idx, p = knn(x, k=k) # p: -[(x1-x2)^2+...]
# here we add a tau
p1 = torch.abs(p)
p1 = torch.sqrt(p1)
mask = p1 < tau
# here we add a sigma
p = p / (sigma * sigma)
w = torch.exp(p) # b,n,n
w = torch.mul(mask.float(), w)
b = 1/torch.sum(w, dim=1)
b = b.reshape(batch_size, num_points, 1).repeat(1, 1, num_points)
c = torch.eye(num_points, num_points, device=device)
c = c.expand(batch_size, num_points, num_points)
D = b * c # b,n,n
A = torch.matmul(D, w) # normalized adjacency matrix A_hat
# Get Aij in a local area:
idx2 = idx.view(batch_size * num_points, -1)
idx_base2 = torch.arange(0, batch_size * num_points, device=device).view(-1, 1) * num_points
idx2 = idx2 + idx_base2
idx2 = idx2.reshape(batch_size * num_points, k)[:, 1:k]
idx2 = idx2.reshape(batch_size * num_points * (k - 1))
idx2 = idx2.view(-1)
A = A.view(-1).contiguous()
A = A[idx2].reshape(batch_size, num_points, k - 1).contiguous() # Aij: b,n,k
###############
"""Disentangling Point Clouds into Sharp(xs) and Gentle(xg) Variation Components:"""
idx_base = torch.arange(0, batch_size, device=device).view(-1, 1, 1) * num_points
idx = idx + idx_base
idx = idx.reshape(batch_size * num_points, k)[:, 1:k]
idx = idx.reshape(batch_size * num_points * (k - 1))
_, num_dims, _ = x.size()
x = x.transpose(2, 1).contiguous() # b,n,c
neighbor = x.view(batch_size * num_points, -1).contiguous()[idx, :]
neighbor = neighbor.view(batch_size, num_points, k - 1, num_dims).contiguous() # b,n,k,c
A = A.reshape(batch_size, num_points, k - 1, 1).contiguous() # b,n,k,1
n = A.mul(neighbor) # b,n,k,c
n = torch.sum(n, dim=2) # b,n,c
pai = torch.norm(x - n, dim=-1).pow(2) # Eq.(5)
pais = pai.topk(k=M, dim=-1)[1] # first M points as the sharp variation component
paig = (-pai).topk(k=M, dim=-1)[1] # last M points as the gentle variation component
pai_base = torch.arange(0, batch_size, device=device).view(-1, 1) * num_points
indices = (pais + pai_base).view(-1)
indiceg = (paig + pai_base).view(-1)
xs = x.view(batch_size * num_points, -1).contiguous()[indices, :]
xg = x.view(batch_size * num_points, -1).contiguous()[indiceg, :]
xs = xs.view(batch_size, M, -1).contiguous() # b,M,c
xg = xg.view(batch_size, M, -1).contiguous() # b,M,c
return xs, xg
class SGCAM(nn.Module):
"""Sharp-Gentle Complementary Attention Module:"""
def __init__(self, in_channels, inter_channels=None, bn_layer=True):
super(SGCAM, self).__init__()
self.in_channels = in_channels
self.inter_channels = inter_channels
if self.inter_channels is None:
self.inter_channels = in_channels // 2
if self.inter_channels == 0:
self.inter_channels = 1
conv_nd = nn.Conv1d
bn = nn.BatchNorm1d
self.g = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
if bn_layer:
self.W = nn.Sequential(
conv_nd(in_channels=self.inter_channels, out_channels=self.in_channels,
kernel_size=1, stride=1, padding=0),
bn(self.in_channels)
)
nn.init.constant(self.W[1].weight, 0)
nn.init.constant(self.W[1].bias, 0)
else:
self.W = conv_nd(in_channels=self.inter_channels, out_channels=self.in_channels,
kernel_size=1, stride=1, padding=0)
nn.init.constant(self.W.weight, 0)
nn.init.constant(self.W.bias, 0)
self.theta = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
self.phi = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
def forward(self, x, x_2):
batch_size = x.size(0)
g_x = self.g(x_2).view(batch_size, self.inter_channels, -1).contiguous()
g_x = g_x.permute(0, 2, 1).contiguous()
theta_x = self.theta(x).view(batch_size, self.inter_channels, -1).contiguous()
theta_x = theta_x.permute(0, 2, 1).contiguous()
phi_x = self.phi(x_2).view(batch_size, self.inter_channels, -1).contiguous()
W = torch.matmul(theta_x, phi_x) # Attention Matrix
N = W.size(-1)
W_div_C = W / N
y = torch.matmul(W_div_C, g_x)
y = y.permute(0, 2, 1).contiguous()
y = y.view(batch_size, self.inter_channels, *x.size()[2:]).contiguous()
W_y = self.W(y)
y = W_y + x
return y
================================================
FILE: GDANet/model/util/__init__.py
================================================
================================================
FILE: GDANet/model/util/data_util.py
================================================
import glob
import h5py
import numpy as np
from torch.utils.data import Dataset
import os
import json
def load_data(partition):
all_data = []
all_label = []
for h5_name in glob.glob('./data/modelnet40_ply_hdf5_2048/ply_data_%s*.h5' % partition):
f = h5py.File(h5_name)
data = f['data'][:].astype('float32')
label = f['label'][:].astype('int64')
f.close()
all_data.append(data)
all_label.append(label)
all_data = np.concatenate(all_data, axis=0)
all_label = np.concatenate(all_label, axis=0)
return all_data, all_label
def pc_normalize(pc):
centroid = np.mean(pc, axis=0)
pc = pc - centroid
m = np.max(np.sqrt(np.sum(pc ** 2, axis=1)))
pc = pc / m
return pc
def translate_pointcloud(pointcloud):
xyz1 = np.random.uniform(low=2./3., high=3./2., size=[3])
xyz2 = np.random.uniform(low=-0.2, high=0.2, size=[3])
translated_pointcloud = np.add(np.multiply(pointcloud, xyz1), xyz2).astype('float32')
return translated_pointcloud
def jitter_pointcloud(pointcloud, sigma=0.01, clip=0.02):
N, C = pointcloud.shape
pointcloud += np.clip(sigma * np.random.randn(N, C), -1*clip, clip)
return pointcloud
# =========== ModelNet40 =================
class ModelNet40(Dataset):
def __init__(self, num_points, partition='train'):
self.data, self.label = load_data(partition)
self.num_points = num_points
self.partition = partition # Here the new given partition will cover the 'train'
def __getitem__(self, item): # indice of the pts or label
pointcloud = self.data[item][:self.num_points]
label = self.label[item]
if self.partition == 'train':
# pointcloud = pc_normalize(pointcloud) # you can try to add it or not to train our model
pointcloud = translate_pointcloud(pointcloud)
np.random.shuffle(pointcloud) # shuffle the order of pts
return pointcloud, label
def __len__(self):
return self.data.shape[0]
# =========== ShapeNet Part =================
class PartNormalDataset(Dataset):
def __init__(self, npoints=2500, split='train', normalize=False):
self.npoints = npoints
self.root = './data/shapenetcore_partanno_segmentation_benchmark_v0_normal'
self.catfile = os.path.join(self.root, 'synsetoffset2category.txt')
self.cat = {}
self.normalize = normalize
with open(self.catfile, 'r') as f:
for line in f:
ls = line.strip().split()
self.cat[ls[0]] = ls[1]
self.cat = {k: v for k, v in self.cat.items()}
self.meta = {}
with open(os.path.join(self.root, 'train_test_split', 'shuffled_train_file_list.json'), 'r') as f:
train_ids = set([str(d.split('/')[2]) for d in json.load(f)])
with open(os.path.join(self.root, 'train_test_split', 'shuffled_val_file_list.json'), 'r') as f:
val_ids = set([str(d.split('/')[2]) for d in json.load(f)])
with open(os.path.join(self.root, 'train_test_split', 'shuffled_test_file_list.json'), 'r') as f:
test_ids = set([str(d.split('/')[2]) for d in json.load(f)])
for item in self.cat:
self.meta[item] = []
dir_point = os.path.join(self.root, self.cat[item])
fns = sorted(os.listdir(dir_point))
if split == 'trainval':
fns = [fn for fn in fns if ((fn[0:-4] in train_ids) or (fn[0:-4] in val_ids))]
elif split == 'train':
fns = [fn for fn in fns if fn[0:-4] in train_ids]
elif split == 'val':
fns = [fn for fn in fns if fn[0:-4] in val_ids]
elif split == 'test':
fns = [fn for fn in fns if fn[0:-4] in test_ids]
else:
print('Unknown split: %s. Exiting..' % (split))
exit(-1)
for fn in fns:
token = (os.path.splitext(os.path.basename(fn))[0])
self.meta[item].append(os.path.join(dir_point, token + '.txt'))
self.datapath = []
for item in self.cat:
for fn in self.meta[item]:
self.datapath.append((item, fn))
self.classes = dict(zip(self.cat, range(len(self.cat))))
# Mapping from category ('Chair') to a list of int [10,11,12,13] as segmentation labels
self.seg_classes = {'Earphone': [16, 17, 18], 'Motorbike': [30, 31, 32, 33, 34, 35], 'Rocket': [41, 42, 43],
'Car': [8, 9, 10, 11], 'Laptop': [28, 29], 'Cap': [6, 7], 'Skateboard': [44, 45, 46],
'Mug': [36, 37], 'Guitar': [19, 20, 21], 'Bag': [4, 5], 'Lamp': [24, 25, 26, 27],
'Table': [47, 48, 49], 'Airplane': [0, 1, 2, 3], 'Pistol': [38, 39, 40],
'Chair': [12, 13, 14, 15], 'Knife': [22, 23]}
self.cache = {} # from index to (point_set, cls, seg) tuple
self.cache_size = 20000
def __getitem__(self, index):
if index in self.cache:
point_set, normal, seg, cls = self.cache[index]
else:
fn = self.datapath[index]
cat = self.datapath[index][0]
cls = self.classes[cat]
cls = np.array([cls]).astype(np.int32)
data = np.loadtxt(fn[1]).astype(np.float32)
point_set = data[:, 0:3]
normal = data[:, 3:6]
seg = data[:, -1].astype(np.int32)
if len(self.cache) < self.cache_size:
self.cache[index] = (point_set, normal, seg, cls)
if self.normalize:
point_set = pc_normalize(point_set)
choice = np.random.choice(len(seg), self.npoints, replace=True)
# resample
# note that the number of points in some points clouds is less than 2048, thus use random.choice
# remember to use the same seed during train and test for a getting stable result
point_set = point_set[choice, :]
seg = seg[choice]
normal = normal[choice, :]
return point_set, cls, seg, normal
def __len__(self):
return len(self.datapath)
if __name__ == '__main__':
train = ModelNet40(1024)
test = ModelNet40(1024, 'test')
for data, label in train:
print(data.shape)
print(label.shape)
================================================
FILE: GDANet/model/util/util.py
================================================
import numpy as np
import torch
import torch.nn.functional as F
def cal_loss(pred, gold, smoothing=True):
''' Calculate cross entropy loss, apply label smoothing if needed. '''
gold = gold.contiguous().view(-1) # gold is the groudtruth label in the dataloader
if smoothing:
eps = 0.2
n_class = pred.size(1) # the number of feature_dim of the ouput, which is output channels
one_hot = torch.zeros_like(pred).scatter(1, gold.view(-1, 1), 1)
one_hot = one_hot * (1 - eps) + (1 - one_hot) * eps / (n_class - 1)
log_prb = F.log_softmax(pred, dim=1)
loss = -(one_hot * log_prb).sum(dim=1).mean()
else:
loss = F.cross_entropy(pred, gold, reduction='mean')
return loss
# create a file and write the text into it:
class IOStream():
def __init__(self, path):
self.f = open(path, 'a')
def cprint(self, text):
print(text)
self.f.write(text+'\n')
self.f.flush()
def close(self):
self.f.close()
def to_categorical(y, num_classes):
""" 1-hot encodes a tensor """
new_y = torch.eye(num_classes)[y.cpu().data.numpy(),]
if (y.is_cuda):
return new_y.cuda(non_blocking=True)
return new_y
def compute_overall_iou(pred, target, num_classes):
shape_ious = []
pred = pred.max(dim=2)[1] # (batch_size, num_points) the pred_class_idx of each point in each sample
pred_np = pred.cpu().data.numpy()
target_np = target.cpu().data.numpy()
for shape_idx in range(pred.size(0)): # sample_idx
part_ious = []
for part in range(num_classes): # class_idx! no matter which category, only consider all part_classes of all categories, check all 50 classes
# for target, each point has a class no matter which category owns this point! also 50 classes!!!
# only return 1 when both belongs to this class, which means correct:
I = np.sum(np.logical_and(pred_np[shape_idx] == part, target_np[shape_idx] == part))
# always return 1 when either is belongs to this class:
U = np.sum(np.logical_or(pred_np[shape_idx] == part, target_np[shape_idx] == part))
F = np.sum(target_np[shape_idx] == part)
if F != 0:
iou = I / float(U) # iou across all points for this class
part_ious.append(iou) # append the iou of this class
shape_ious.append(np.mean(part_ious)) # each time append an average iou across all classes of this sample (sample_level!)
return shape_ious # [batch_size]
================================================
FILE: LICENSE
================================================
BSD 3-Clause License
Copyright (c) 2021, University of Michigan
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
================================================
FILE: PCT_Pytorch/LICENSE
================================================
MIT License
Copyright (c) 2021 Strawberry-Eat-Mango
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
================================================
FILE: PCT_Pytorch/README.md
================================================
## PCT: Point Cloud Transformer
This is a Pytorch implementation of PCT: Point Cloud Transformer.
Paper link: https://arxiv.org/pdf/2012.09688.pdf
### Requirements
python >= 3.7
pytorch >= 1.6
h5py
scikit-learn
and
```shell script
pip install pointnet2_ops_lib/.
```
The code is from https://github.com/erikwijmans/Pointnet2_PyTorch https://github.com/WangYueFt/dgcnn and https://github.com/MenghaoGuo/PCT
### Models
We get an accuracy of 93.2% on the ModelNet40(http://modelnet.cs.princeton.edu/) validation dataset
The path of the model is in ./checkpoints/best/models/model.t7
### Example training and testing
```shell script
# train
python main.py --exp_name=train --num_points=1024 --use_sgd=True --batch_size 32 --epochs 250 --lr 0.0001
# test
python main.py --exp_name=test --num_points=1024 --use_sgd=True --eval=True --model_path=checkpoints/best/models/model.t7 --test_batch_size 8
```
### Citation
If it is helpful for your work, please cite this paper:
```latex
@misc{guo2020pct,
title={PCT: Point Cloud Transformer},
author={Meng-Hao Guo and Jun-Xiong Cai and Zheng-Ning Liu and Tai-Jiang Mu and Ralph R. Martin and Shi-Min Hu},
year={2020},
eprint={2012.09688},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
```
================================================
FILE: PCT_Pytorch/checkpoints/best/models/model.t7
================================================
[File too large to display: 11.0 MB]
================================================
FILE: PCT_Pytorch/data.py
================================================
import os
import glob
import h5py
import numpy as np
from torch.utils.data import Dataset
def download():
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(BASE_DIR, 'data')
if not os.path.exists(DATA_DIR):
os.mkdir(DATA_DIR)
if not os.path.exists(os.path.join(DATA_DIR, 'modelnet40_ply_hdf5_2048')):
www = 'https://shapenet.cs.stanford.edu/media/modelnet40_ply_hdf5_2048.zip'
zipfile = os.path.basename(www)
os.system('wget %s; unzip %s' % (www, zipfile))
os.system('mv %s %s' % (zipfile[:-4], DATA_DIR))
os.system('rm %s' % (zipfile))
def load_data(partition):
download()
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(BASE_DIR, 'data')
all_data = []
all_label = []
for h5_name in glob.glob(os.path.join(DATA_DIR, 'modelnet40_ply_hdf5_2048', 'ply_data_%s*.h5'%partition)):
f = h5py.File(h5_name)
data = f['data'][:].astype('float32')
label = f['label'][:].astype('int64')
f.close()
all_data.append(data)
all_label.append(label)
all_data = np.concatenate(all_data, axis=0)
all_label = np.concatenate(all_label, axis=0)
return all_data, all_label
def random_point_dropout(pc, max_dropout_ratio=0.875):
''' batch_pc: BxNx3 '''
# for b in range(batch_pc.shape[0]):
dropout_ratio = np.random.random()*max_dropout_ratio # 0~0.875
drop_idx = np.where(np.random.random((pc.shape[0]))<=dropout_ratio)[0]
# print ('use random drop', len(drop_idx))
if len(drop_idx)>0:
pc[drop_idx,:] = pc[0,:] # set to the first point
return pc
def translate_pointcloud(pointcloud):
xyz1 = np.random.uniform(low=2./3., high=3./2., size=[3])
xyz2 = np.random.uniform(low=-0.2, high=0.2, size=[3])
translated_pointcloud = np.add(np.multiply(pointcloud, xyz1), xyz2).astype('float32')
return translated_pointcloud
def jitter_pointcloud(pointcloud, sigma=0.01, clip=0.02):
N, C = pointcloud.shape
pointcloud += np.clip(sigma * np.random.randn(N, C), -1*clip, clip)
return pointcloud
class ModelNet40(Dataset):
def __init__(self, num_points, partition='train'):
self.data, self.label = load_data(partition)
self.num_points = num_points
self.partition = partition
def __getitem__(self, item):
pointcloud = self.data[item][:self.num_points]
label = self.label[item]
if self.partition == 'train':
pointcloud = random_point_dropout(pointcloud) # open for dgcnn not for our idea for all
pointcloud = translate_pointcloud(pointcloud)
np.random.shuffle(pointcloud)
return pointcloud, label
def __len__(self):
return self.data.shape[0]
if __name__ == '__main__':
train = ModelNet40(1024)
test = ModelNet40(1024, 'test')
for data, label in train:
print(data.shape)
print(label.shape)
================================================
FILE: PCT_Pytorch/main.py
================================================
from __future__ import print_function
import os
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim.lr_scheduler import CosineAnnealingLR
from data import ModelNet40
from .model import Pct
import numpy as np
from torch.utils.data import DataLoader
from .util import cal_loss, IOStream
import sklearn.metrics as metrics
import time
def _init_():
if not os.path.exists('checkpoints'):
os.makedirs('checkpoints')
if not os.path.exists('checkpoints/'+args.exp_name):
os.makedirs('checkpoints/'+args.exp_name)
if not os.path.exists('checkpoints/'+args.exp_name+'/'+'models'):
os.makedirs('checkpoints/'+args.exp_name+'/'+'models')
os.system('cp main.py checkpoints'+'/'+args.exp_name+'/'+'main.py.backup')
os.system('cp model.py checkpoints' + '/' + args.exp_name + '/' + 'model.py.backup')
os.system('cp util.py checkpoints' + '/' + args.exp_name + '/' + 'util.py.backup')
os.system('cp data.py checkpoints' + '/' + args.exp_name + '/' + 'data.py.backup')
def train(args, io):
train_loader = DataLoader(ModelNet40(partition='train', num_points=args.num_points), num_workers=8,
batch_size=args.batch_size, shuffle=True, drop_last=True)
test_loader = DataLoader(ModelNet40(partition='test', num_points=args.num_points), num_workers=8,
batch_size=args.test_batch_size, shuffle=True, drop_last=False)
device = torch.device("cuda" if args.cuda else "cpu")
model = Pct(args).to(device)
print(str(model))
model = nn.DataParallel(model)
if args.use_sgd:
print("Use SGD")
opt = optim.SGD(model.parameters(), lr=args.lr*100, momentum=args.momentum, weight_decay=5e-4)
else:
print("Use Adam")
opt = optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-4)
scheduler = CosineAnnealingLR(opt, args.epochs, eta_min=args.lr)
criterion = cal_loss
best_test_acc = 0
for epoch in range(args.epochs):
scheduler.step()
train_loss = 0.0
count = 0.0
model.train()
train_pred = []
train_true = []
idx = 0
total_time = 0.0
for data, label in (train_loader):
data, label = data.to(device), label.to(device).squeeze()
data = data.permute(0, 2, 1)
batch_size = data.size()[0]
opt.zero_grad()
start_time = time.time()
logits = model(data)
loss = criterion(logits, label)
loss.backward()
opt.step()
end_time = time.time()
total_time += (end_time - start_time)
preds = logits.max(dim=1)[1]
count += batch_size
train_loss += loss.item() * batch_size
train_true.append(label.cpu().numpy())
train_pred.append(preds.detach().cpu().numpy())
idx += 1
print ('train total time is',total_time)
train_true = np.concatenate(train_true)
train_pred = np.concatenate(train_pred)
outstr = 'Train %d, loss: %.6f, train acc: %.6f, train avg acc: %.6f' % (epoch,
train_loss*1.0/count,
metrics.accuracy_score(
train_true, train_pred),
metrics.balanced_accuracy_score(
train_true, train_pred))
io.cprint(outstr)
####################
# Test
####################
test_loss = 0.0
count = 0.0
model.eval()
test_pred = []
test_true = []
total_time = 0.0
for data, label in test_loader:
data, label = data.to(device), label.to(device).squeeze()
data = data.permute(0, 2, 1)
batch_size = data.size()[0]
start_time = time.time()
logits = model(data)
end_time = time.time()
total_time += (end_time - start_time)
loss = criterion(logits, label)
preds = logits.max(dim=1)[1]
count += batch_size
test_loss += loss.item() * batch_size
test_true.append(label.cpu().numpy())
test_pred.append(preds.detach().cpu().numpy())
print ('test total time is', total_time)
test_true = np.concatenate(test_true)
test_pred = np.concatenate(test_pred)
test_acc = metrics.accuracy_score(test_true, test_pred)
avg_per_class_acc = metrics.balanced_accuracy_score(test_true, test_pred)
outstr = 'Test %d, loss: %.6f, test acc: %.6f, test avg acc: %.6f' % (epoch,
test_loss*1.0/count,
test_acc,
avg_per_class_acc)
io.cprint(outstr)
if test_acc >= best_test_acc:
best_test_acc = test_acc
torch.save(model.state_dict(), 'checkpoints/%s/models/model.t7' % args.exp_name)
def test(args, io):
test_loader = DataLoader(ModelNet40(partition='test', num_points=args.num_points),
batch_size=args.test_batch_size, shuffle=True, drop_last=False)
device = torch.device("cuda" if args.cuda else "cpu")
model = Pct(args).to(device)
model = nn.DataParallel(model)
model.load_state_dict(torch.load(args.model_path))
model = model.eval()
test_true = []
test_pred = []
for data, label in test_loader:
data, label = data.to(device), label.to(device).squeeze()
data = data.permute(0, 2, 1)
logits = model(data)
preds = logits.max(dim=1)[1]
if args.test_batch_size == 1:
test_true.append([label.cpu().numpy()])
test_pred.append([preds.detach().cpu().numpy()])
else:
test_true.append(label.cpu().numpy())
test_pred.append(preds.detach().cpu().numpy())
test_true = np.concatenate(test_true)
test_pred = np.concatenate(test_pred)
test_acc = metrics.accuracy_score(test_true, test_pred)
avg_per_class_acc = metrics.balanced_accuracy_score(test_true, test_pred)
outstr = 'Test :: test acc: %.6f, test avg acc: %.6f'%(test_acc, avg_per_class_acc)
io.cprint(outstr)
if __name__ == "__main__":
# Training settings
parser = argparse.ArgumentParser(description='Point Cloud Recognition')
parser.add_argument('--exp_name', type=str, default='exp', metavar='N',
help='Name of the experiment')
parser.add_argument('--dataset', type=str, default='modelnet40', metavar='N',
choices=['modelnet40'])
parser.add_argument('--batch_size', type=int, default=32, metavar='batch_size',
help='Size of batch)')
parser.add_argument('--test_batch_size', type=int, default=16, metavar='batch_size',
help='Size of batch)')
parser.add_argument('--epochs', type=int, default=250, metavar='N',
help='number of episode to train ')
parser.add_argument('--use_sgd', type=bool, default=True,
help='Use SGD')
parser.add_argument('--lr', type=float, default=0.0001, metavar='LR',
help='learning rate (default: 0.001, 0.1 if using sgd)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--no_cuda', type=bool, default=False,
help='enables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--eval', type=bool, default=False,
help='evaluate the model')
parser.add_argument('--num_points', type=int, default=1024,
help='num of points to use')
parser.add_argument('--dropout', type=float, default=0.5,
help='dropout rate')
parser.add_argument('--model_path', type=str, default='', metavar='N',
help='Pretrained model path')
args = parser.parse_args()
_init_()
io = IOStream('checkpoints/' + args.exp_name + '/run.log')
io.cprint(str(args))
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
io.cprint(
'Using GPU : ' + str(torch.cuda.current_device()) + ' from ' + str(torch.cuda.device_count()) + ' devices')
torch.cuda.manual_seed(args.seed)
else:
io.cprint('Using CPU')
if not args.eval:
train(args, io)
else:
test(args, io)
================================================
FILE: PCT_Pytorch/model.py
================================================
import torch
import torch.nn as nn
import torch.nn.functional as F
from .util import sample_and_group
class Local_op(nn.Module):
def __init__(self, in_channels, out_channels):
super(Local_op, self).__init__()
self.conv1 = nn.Conv1d(in_channels, out_channels, kernel_size=1, bias=False)
self.conv2 = nn.Conv1d(out_channels, out_channels, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm1d(out_channels)
self.bn2 = nn.BatchNorm1d(out_channels)
def forward(self, x):
b, n, s, d = x.size() # torch.Size([32, 512, 32, 6])
x = x.permute(0, 1, 3, 2)
x = x.reshape(-1, d, s)
batch_size, _, N = x.size()
x = F.relu(self.bn1(self.conv1(x))) # B, D, N
x = F.relu(self.bn2(self.conv2(x))) # B, D, N
x = F.adaptive_max_pool1d(x, 1).view(batch_size, -1)
x = x.reshape(b, n, -1).permute(0, 2, 1)
return x
class Pct(nn.Module):
def __init__(self, args, output_channels=40):
super(Pct, self).__init__()
self.args = args
self.conv1 = nn.Conv1d(3, 64, kernel_size=1, bias=False)
self.conv2 = nn.Conv1d(64, 64, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(64)
self.gather_local_0 = Local_op(in_channels=128, out_channels=128)
self.gather_local_1 = Local_op(in_channels=256, out_channels=256)
self.pt_last = Point_Transformer_Last(args)
self.conv_fuse = nn.Sequential(nn.Conv1d(1280, 1024, kernel_size=1, bias=False),
nn.BatchNorm1d(1024),
nn.LeakyReLU(negative_slope=0.2))
self.linear1 = nn.Linear(1024, 512, bias=False)
self.bn6 = nn.BatchNorm1d(512)
self.dp1 = nn.Dropout(p=args.dropout)
self.linear2 = nn.Linear(512, 256)
self.bn7 = nn.BatchNorm1d(256)
self.dp2 = nn.Dropout(p=args.dropout)
self.linear3 = nn.Linear(256, output_channels)
def forward(self, x):
xyz = x.permute(0, 2, 1)
batch_size, _, _ = x.size()
# B, D, N
x = F.relu(self.bn1(self.conv1(x)))
# B, D, N
x = F.relu(self.bn2(self.conv2(x)))
x = x.permute(0, 2, 1)
new_xyz, new_feature = sample_and_group(npoint=512, radius=0.15, nsample=32, xyz=xyz, points=x)
feature_0 = self.gather_local_0(new_feature)
feature = feature_0.permute(0, 2, 1)
new_xyz, new_feature = sample_and_group(npoint=256, radius=0.2, nsample=32, xyz=new_xyz, points=feature)
feature_1 = self.gather_local_1(new_feature)
x = self.pt_last(feature_1)
x = torch.cat([x, feature_1], dim=1)
x = self.conv_fuse(x)
x = F.adaptive_max_pool1d(x, 1).view(batch_size, -1)
x = F.leaky_relu(self.bn6(self.linear1(x)), negative_slope=0.2)
x = self.dp1(x)
x = F.leaky_relu(self.bn7(self.linear2(x)), negative_slope=0.2)
x = self.dp2(x)
x = self.linear3(x)
return x
class Point_Transformer_Last(nn.Module):
def __init__(self, args, channels=256):
super(Point_Transformer_Last, self).__init__()
self.args = args
self.conv1 = nn.Conv1d(channels, channels, kernel_size=1, bias=False)
self.conv2 = nn.Conv1d(channels, channels, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm1d(channels)
self.bn2 = nn.BatchNorm1d(channels)
self.sa1 = SA_Layer(channels)
self.sa2 = SA_Layer(channels)
self.sa3 = SA_Layer(channels)
self.sa4 = SA_Layer(channels)
def forward(self, x):
#
# b, 3, npoint, nsample
# conv2d 3 -> 128 channels 1, 1
# b * npoint, c, nsample
# permute reshape
batch_size, _, N = x.size()
# B, D, N
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x1 = self.sa1(x)
x2 = self.sa2(x1)
x3 = self.sa3(x2)
x4 = self.sa4(x3)
x = torch.cat((x1, x2, x3, x4), dim=1)
return x
class SA_Layer(nn.Module):
def __init__(self, channels):
super(SA_Layer, self).__init__()
self.q_conv = nn.Conv1d(channels, channels // 4, 1, bias=False)
self.k_conv = nn.Conv1d(channels, channels // 4, 1, bias=False)
self.q_conv.weight = self.k_conv.weight
self.q_conv.bias = self.k_conv.bias
self.v_conv = nn.Conv1d(channels, channels, 1)
self.trans_conv = nn.Conv1d(channels, channels, 1)
self.after_norm = nn.BatchNorm1d(channels)
self.act = nn.ReLU()
self.softmax = nn.Softmax(dim=-1)
def forward(self, x):
# b, n, c
x_q = self.q_conv(x).permute(0, 2, 1)
# b, c, n
x_k = self.k_conv(x)
x_v = self.v_conv(x)
# b, n, n
energy = torch.bmm(x_q, x_k)
attention = self.softmax(energy)
attention = attention / (1e-9 + attention.sum(dim=1, keepdim=True))
# b, c, n
x_r = torch.bmm(x_v, attention)
x_r = self.act(self.after_norm(self.trans_conv(x - x_r)))
x = x + x_r
return x
================================================
FILE: PCT_Pytorch/model_new.py
================================================
import torch
import torch.nn as nn
import torch.nn.functional as F
from util import sample_and_group
class Local_op(nn.Module):
def __init__(self, in_channels, out_channels):
super(Local_op, self).__init__()
self.conv1 = nn.Conv1d(in_channels, out_channels, kernel_size=1, bias=False)
self.conv2 = nn.Conv1d(out_channels, out_channels, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm1d(out_channels)
self.bn2 = nn.BatchNorm1d(out_channels)
def forward(self, x):
b, n, s, d = x.size() # torch.Size([32, 512, 32, 6])
x = x.permute(0, 1, 3, 2)
x = x.reshape(-1, d, s)
batch_size, _, N = x.size()
x = F.relu(self.bn1(self.conv1(x))) # B, D, N
x = F.relu(self.bn2(self.conv2(x))) # B, D, N
x = F.adaptive_max_pool1d(x, 1).view(batch_size, -1)
x = x.reshape(b, n, -1).permute(0, 2, 1)
return x
class Pct(nn.Module):
def __init__(self, args, output_channels=40):
super(Pct, self).__init__()
self.args = args
self.conv1 = nn.Conv1d(3, 64, kernel_size=1, bias=False)
self.conv2 = nn.Conv1d(64, 64, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(64)
self.gather_local_0 = Local_op(in_channels=128, out_channels=128)
self.gather_local_1 = Local_op(in_channels=256, out_channels=256)
self.pt_last = Point_Transformer_Last(args)
self.conv_fuse = nn.Sequential(nn.Conv1d(1280, 1024, kernel_size=1, bias=False),
nn.BatchNorm1d(1024),
nn.LeakyReLU(negative_slope=0.2))
self.linear1 = nn.Linear(1024, 512, bias=False)
self.bn6 = nn.BatchNorm1d(512)
self.dp1 = nn.Dropout(p=args.dropout)
self.linear2 = nn.Linear(512, 256)
self.bn7 = nn.BatchNorm1d(256)
self.dp2 = nn.Dropout(p=args.dropout)
self.linear3 = nn.Linear(256, output_channels)
def forward(self, x):
xyz = x.permute(0, 2, 1)
batch_size, _, _ = x.size()
# B, D, N
x = F.relu(self.bn1(self.conv1(x)))
# B, D, N
x = F.relu(self.bn2(self.conv2(x)))
x = x.permute(0, 2, 1)
new_xyz, new_feature = sample_and_group(npoint=512, radius=0.15, nsample=32, xyz=xyz, points=x)
feature_0 = self.gather_local_0(new_feature)
feature = feature_0.permute(0, 2, 1)
new_xyz, new_feature = sample_and_group(npoint=256, radius=0.2, nsample=32, xyz=new_xyz, points=feature)
feature_1 = self.gather_local_1(new_feature)
x = self.pt_last(feature_1, new_xyz)
x = torch.cat([x, feature_1], dim=1)
x = self.conv_fuse(x)
x = F.adaptive_max_pool1d(x, 1).view(batch_size, -1)
x = F.leaky_relu(self.bn6(self.linear1(x)), negative_slope=0.2)
x = self.dp1(x)
x = F.leaky_relu(self.bn7(self.linear2(x)), negative_slope=0.2)
x = self.dp2(x)
x = self.linear3(x)
return x
class Point_Transformer_Last(nn.Module):
def __init__(self, args, channels=256):
super(Point_Transformer_Last, self).__init__()
self.args = args
self.conv1 = nn.Conv1d(channels, channels, kernel_size=1, bias=False)
self.pos_xyz = nn.Conv1d(3, channels, 1)
self.bn1 = nn.BatchNorm1d(channels)
self.sa1 = SA_Layer(channels)
self.sa2 = SA_Layer(channels)
self.sa3 = SA_Layer(channels)
self.sa4 = SA_Layer(channels)
def forward(self, x, xyz):
#
# b, 3, npoint, nsample
# conv2d 3 -> 128 channels 1, 1
# b * npoint, c, nsample
# permute reshape
batch_size, _, N = x.size()
xyz = xyz.permute(0, 2, 1)
xyz = self.pos_xyz(xyz)
# B, D, N
x = F.relu(self.bn1(self.conv1(x)))
x1 = self.sa1(x, xyz)
x2 = self.sa2(x1, xyz)
x3 = self.sa3(x2, xyz)
x4 = self.sa4(x3, xyz)
x = torch.cat((x1, x2, x3, x4), dim=1)
return x
class SA_Layer(nn.Module):
def __init__(self, channels):
super(SA_Layer, self).__init__()
self.q_conv = nn.Conv1d(channels, channels // 4, 1, bias=False)
self.k_conv = nn.Conv1d(channels, channels // 4, 1, bias=False)
self.q_conv.weight = self.k_conv.weight
self.q_conv.bias = self.k_conv.bias
self.v_conv = nn.Conv1d(channels, channels, 1)
self.trans_conv = nn.Conv1d(channels, channels, 1)
self.after_norm = nn.BatchNorm1d(channels)
self.act = nn.ReLU()
self.softmax = nn.Softmax(dim=-1)
def forward(self, x, xyz):
# b, n, c
x = x + xyz
x_q = self.q_conv(x).permute(0, 2, 1)
# b, c, n
x_k = self.k_conv(x)
x_v = self.v_conv(x)
# b, n, n
energy = torch.bmm(x_q, x_k)
attention = self.softmax(energy)
attention = attention / (1e-9 + attention.sum(dim=1, keepdim=True))
# b, c, n
x_r = torch.bmm(x_v, attention)
x_r = self.act(self.after_norm(self.trans_conv(x - x_r)))
x = x + x_r
return x
================================================
FILE: PCT_Pytorch/pointnet2_ops_lib/MANIFEST.in
================================================
graft pointnet2_ops/_ext-src
================================================
FILE: PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/__init__.py
================================================
import pointnet2_ops.pointnet2_modules
import pointnet2_ops.pointnet2_utils
from pointnet2_ops._version import __version__
================================================
FILE: PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/_ext-src/include/ball_query.h
================================================
#pragma once
#include
at::Tensor ball_query(at::Tensor new_xyz, at::Tensor xyz, const float radius,
const int nsample);
================================================
FILE: PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/_ext-src/include/cuda_utils.h
================================================
#ifndef _CUDA_UTILS_H
#define _CUDA_UTILS_H
#include
#include
#include
#include
#include
#include
#define TOTAL_THREADS 512
inline int opt_n_threads(int work_size) {
const int pow_2 = std::log(static_cast(work_size)) / std::log(2.0);
return max(min(1 << pow_2, TOTAL_THREADS), 1);
}
inline dim3 opt_block_config(int x, int y) {
const int x_threads = opt_n_threads(x);
const int y_threads =
max(min(opt_n_threads(y), TOTAL_THREADS / x_threads), 1);
dim3 block_config(x_threads, y_threads, 1);
return block_config;
}
#define CUDA_CHECK_ERRORS() \
do { \
cudaError_t err = cudaGetLastError(); \
if (cudaSuccess != err) { \
fprintf(stderr, "CUDA kernel failed : %s\n%s at L:%d in %s\n", \
cudaGetErrorString(err), __PRETTY_FUNCTION__, __LINE__, \
__FILE__); \
exit(-1); \
} \
} while (0)
#endif
================================================
FILE: PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/_ext-src/include/group_points.h
================================================
#pragma once
#include
at::Tensor group_points(at::Tensor points, at::Tensor idx);
at::Tensor group_points_grad(at::Tensor grad_out, at::Tensor idx, const int n);
================================================
FILE: PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/_ext-src/include/interpolate.h
================================================
#pragma once
#include
#include
std::vector three_nn(at::Tensor unknowns, at::Tensor knows);
at::Tensor three_interpolate(at::Tensor points, at::Tensor idx,
at::Tensor weight);
at::Tensor three_interpolate_grad(at::Tensor grad_out, at::Tensor idx,
at::Tensor weight, const int m);
================================================
FILE: PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/_ext-src/include/sampling.h
================================================
#pragma once
#include
at::Tensor gather_points(at::Tensor points, at::Tensor idx);
at::Tensor gather_points_grad(at::Tensor grad_out, at::Tensor idx, const int n);
at::Tensor furthest_point_sampling(at::Tensor points, const int nsamples);
================================================
FILE: PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/_ext-src/include/utils.h
================================================
#pragma once
#include
#include
#define CHECK_CUDA(x) \
do { \
AT_ASSERT(x.is_cuda(), #x " must be a CUDA tensor"); \
} while (0)
#define CHECK_CONTIGUOUS(x) \
do { \
AT_ASSERT(x.is_contiguous(), #x " must be a contiguous tensor"); \
} while (0)
#define CHECK_IS_INT(x) \
do { \
AT_ASSERT(x.scalar_type() == at::ScalarType::Int, \
#x " must be an int tensor"); \
} while (0)
#define CHECK_IS_FLOAT(x) \
do { \
AT_ASSERT(x.scalar_type() == at::ScalarType::Float, \
#x " must be a float tensor"); \
} while (0)
================================================
FILE: PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/_ext-src/src/ball_query.cpp
================================================
#include "ball_query.h"
#include "utils.h"
void query_ball_point_kernel_wrapper(int b, int n, int m, float radius,
int nsample, const float *new_xyz,
const float *xyz, int *idx);
at::Tensor ball_query(at::Tensor new_xyz, at::Tensor xyz, const float radius,
const int nsample) {
CHECK_CONTIGUOUS(new_xyz);
CHECK_CONTIGUOUS(xyz);
CHECK_IS_FLOAT(new_xyz);
CHECK_IS_FLOAT(xyz);
if (new_xyz.is_cuda()) {
CHECK_CUDA(xyz);
}
at::Tensor idx =
torch::zeros({new_xyz.size(0), new_xyz.size(1), nsample},
at::device(new_xyz.device()).dtype(at::ScalarType::Int));
if (new_xyz.is_cuda()) {
query_ball_point_kernel_wrapper(xyz.size(0), xyz.size(1), new_xyz.size(1),
radius, nsample, new_xyz.data_ptr(),
xyz.data_ptr(), idx.data_ptr());
} else {
AT_ASSERT(false, "CPU not supported");
}
return idx;
}
================================================
FILE: PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/_ext-src/src/ball_query_gpu.cu
================================================
#include
#include
#include
#include "cuda_utils.h"
// input: new_xyz(b, m, 3) xyz(b, n, 3)
// output: idx(b, m, nsample)
__global__ void query_ball_point_kernel(int b, int n, int m, float radius,
int nsample,
const float *__restrict__ new_xyz,
const float *__restrict__ xyz,
int *__restrict__ idx) {
int batch_index = blockIdx.x;
xyz += batch_index * n * 3;
new_xyz += batch_index * m * 3;
idx += m * nsample * batch_index;
int index = threadIdx.x;
int stride = blockDim.x;
float radius2 = radius * radius;
for (int j = index; j < m; j += stride) {
float new_x = new_xyz[j * 3 + 0];
float new_y = new_xyz[j * 3 + 1];
float new_z = new_xyz[j * 3 + 2];
for (int k = 0, cnt = 0; k < n && cnt < nsample; ++k) {
float x = xyz[k * 3 + 0];
float y = xyz[k * 3 + 1];
float z = xyz[k * 3 + 2];
float d2 = (new_x - x) * (new_x - x) + (new_y - y) * (new_y - y) +
(new_z - z) * (new_z - z);
if (d2 < radius2) {
if (cnt == 0) {
for (int l = 0; l < nsample; ++l) {
idx[j * nsample + l] = k;
}
}
idx[j * nsample + cnt] = k;
++cnt;
}
}
}
}
void query_ball_point_kernel_wrapper(int b, int n, int m, float radius,
int nsample, const float *new_xyz,
const float *xyz, int *idx) {
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
query_ball_point_kernel<<>>(
b, n, m, radius, nsample, new_xyz, xyz, idx);
CUDA_CHECK_ERRORS();
}
================================================
FILE: PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/_ext-src/src/bindings.cpp
================================================
#include "ball_query.h"
#include "group_points.h"
#include "interpolate.h"
#include "sampling.h"
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("gather_points", &gather_points);
m.def("gather_points_grad", &gather_points_grad);
m.def("furthest_point_sampling", &furthest_point_sampling);
m.def("three_nn", &three_nn);
m.def("three_interpolate", &three_interpolate);
m.def("three_interpolate_grad", &three_interpolate_grad);
m.def("ball_query", &ball_query);
m.def("group_points", &group_points);
m.def("group_points_grad", &group_points_grad);
}
================================================
FILE: PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/_ext-src/src/group_points.cpp
================================================
#include "group_points.h"
#include "utils.h"
void group_points_kernel_wrapper(int b, int c, int n, int npoints, int nsample,
const float *points, const int *idx,
float *out);
void group_points_grad_kernel_wrapper(int b, int c, int n, int npoints,
int nsample, const float *grad_out,
const int *idx, float *grad_points);
at::Tensor group_points(at::Tensor points, at::Tensor idx) {
CHECK_CONTIGUOUS(points);
CHECK_CONTIGUOUS(idx);
CHECK_IS_FLOAT(points);
CHECK_IS_INT(idx);
if (points.is_cuda()) {
CHECK_CUDA(idx);
}
at::Tensor output =
torch::zeros({points.size(0), points.size(1), idx.size(1), idx.size(2)},
at::device(points.device()).dtype(at::ScalarType::Float));
if (points.is_cuda()) {
group_points_kernel_wrapper(points.size(0), points.size(1), points.size(2),
idx.size(1), idx.size(2),
points.data_ptr(), idx.data_ptr(),
output.data_ptr());
} else {
AT_ASSERT(false, "CPU not supported");
}
return output;
}
at::Tensor group_points_grad(at::Tensor grad_out, at::Tensor idx, const int n) {
CHECK_CONTIGUOUS(grad_out);
CHECK_CONTIGUOUS(idx);
CHECK_IS_FLOAT(grad_out);
CHECK_IS_INT(idx);
if (grad_out.is_cuda()) {
CHECK_CUDA(idx);
}
at::Tensor output =
torch::zeros({grad_out.size(0), grad_out.size(1), n},
at::device(grad_out.device()).dtype(at::ScalarType::Float));
if (grad_out.is_cuda()) {
group_points_grad_kernel_wrapper(
grad_out.size(0), grad_out.size(1), n, idx.size(1), idx.size(2),
grad_out.data_ptr(), idx.data_ptr(),
output.data_ptr());
} else {
AT_ASSERT(false, "CPU not supported");
}
return output;
}
================================================
FILE: PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/_ext-src/src/group_points_gpu.cu
================================================
#include
#include
#include "cuda_utils.h"
// input: points(b, c, n) idx(b, npoints, nsample)
// output: out(b, c, npoints, nsample)
__global__ void group_points_kernel(int b, int c, int n, int npoints,
int nsample,
const float *__restrict__ points,
const int *__restrict__ idx,
float *__restrict__ out) {
int batch_index = blockIdx.x;
points += batch_index * n * c;
idx += batch_index * npoints * nsample;
out += batch_index * npoints * nsample * c;
const int index = threadIdx.y * blockDim.x + threadIdx.x;
const int stride = blockDim.y * blockDim.x;
for (int i = index; i < c * npoints; i += stride) {
const int l = i / npoints;
const int j = i % npoints;
for (int k = 0; k < nsample; ++k) {
int ii = idx[j * nsample + k];
out[(l * npoints + j) * nsample + k] = points[l * n + ii];
}
}
}
void group_points_kernel_wrapper(int b, int c, int n, int npoints, int nsample,
const float *points, const int *idx,
float *out) {
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
group_points_kernel<<>>(
b, c, n, npoints, nsample, points, idx, out);
CUDA_CHECK_ERRORS();
}
// input: grad_out(b, c, npoints, nsample), idx(b, npoints, nsample)
// output: grad_points(b, c, n)
__global__ void group_points_grad_kernel(int b, int c, int n, int npoints,
int nsample,
const float *__restrict__ grad_out,
const int *__restrict__ idx,
float *__restrict__ grad_points) {
int batch_index = blockIdx.x;
grad_out += batch_index * npoints * nsample * c;
idx += batch_index * npoints * nsample;
grad_points += batch_index * n * c;
const int index = threadIdx.y * blockDim.x + threadIdx.x;
const int stride = blockDim.y * blockDim.x;
for (int i = index; i < c * npoints; i += stride) {
const int l = i / npoints;
const int j = i % npoints;
for (int k = 0; k < nsample; ++k) {
int ii = idx[j * nsample + k];
atomicAdd(grad_points + l * n + ii,
grad_out[(l * npoints + j) * nsample + k]);
}
}
}
void group_points_grad_kernel_wrapper(int b, int c, int n, int npoints,
int nsample, const float *grad_out,
const int *idx, float *grad_points) {
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
group_points_grad_kernel<<>>(
b, c, n, npoints, nsample, grad_out, idx, grad_points);
CUDA_CHECK_ERRORS();
}
================================================
FILE: PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/_ext-src/src/interpolate.cpp
================================================
#include "interpolate.h"
#include "utils.h"
void three_nn_kernel_wrapper(int b, int n, int m, const float *unknown,
const float *known, float *dist2, int *idx);
void three_interpolate_kernel_wrapper(int b, int c, int m, int n,
const float *points, const int *idx,
const float *weight, float *out);
void three_interpolate_grad_kernel_wrapper(int b, int c, int n, int m,
const float *grad_out,
const int *idx, const float *weight,
float *grad_points);
std::vector three_nn(at::Tensor unknowns, at::Tensor knows) {
CHECK_CONTIGUOUS(unknowns);
CHECK_CONTIGUOUS(knows);
CHECK_IS_FLOAT(unknowns);
CHECK_IS_FLOAT(knows);
if (unknowns.is_cuda()) {
CHECK_CUDA(knows);
}
at::Tensor idx =
torch::zeros({unknowns.size(0), unknowns.size(1), 3},
at::device(unknowns.device()).dtype(at::ScalarType::Int));
at::Tensor dist2 =
torch::zeros({unknowns.size(0), unknowns.size(1), 3},
at::device(unknowns.device()).dtype(at::ScalarType::Float));
if (unknowns.is_cuda()) {
three_nn_kernel_wrapper(unknowns.size(0), unknowns.size(1), knows.size(1),
unknowns.data_ptr(), knows.data_ptr(),
dist2.data_ptr(), idx.data_ptr());
} else {
AT_ASSERT(false, "CPU not supported");
}
return {dist2, idx};
}
at::Tensor three_interpolate(at::Tensor points, at::Tensor idx,
at::Tensor weight) {
CHECK_CONTIGUOUS(points);
CHECK_CONTIGUOUS(idx);
CHECK_CONTIGUOUS(weight);
CHECK_IS_FLOAT(points);
CHECK_IS_INT(idx);
CHECK_IS_FLOAT(weight);
if (points.is_cuda()) {
CHECK_CUDA(idx);
CHECK_CUDA(weight);
}
at::Tensor output =
torch::zeros({points.size(0), points.size(1), idx.size(1)},
at::device(points.device()).dtype(at::ScalarType::Float));
if (points.is_cuda()) {
three_interpolate_kernel_wrapper(
points.size(0), points.size(1), points.size(2), idx.size(1),
points.data_ptr(), idx.data_ptr(), weight.data_ptr(),
output.data_ptr());
} else {
AT_ASSERT(false, "CPU not supported");
}
return output;
}
at::Tensor three_interpolate_grad(at::Tensor grad_out, at::Tensor idx,
at::Tensor weight, const int m) {
CHECK_CONTIGUOUS(grad_out);
CHECK_CONTIGUOUS(idx);
CHECK_CONTIGUOUS(weight);
CHECK_IS_FLOAT(grad_out);
CHECK_IS_INT(idx);
CHECK_IS_FLOAT(weight);
if (grad_out.is_cuda()) {
CHECK_CUDA(idx);
CHECK_CUDA(weight);
}
at::Tensor output =
torch::zeros({grad_out.size(0), grad_out.size(1), m},
at::device(grad_out.device()).dtype(at::ScalarType::Float));
if (grad_out.is_cuda()) {
three_interpolate_grad_kernel_wrapper(
grad_out.size(0), grad_out.size(1), grad_out.size(2), m,
grad_out.data_ptr(), idx.data_ptr(),
weight.data_ptr(), output.data_ptr());
} else {
AT_ASSERT(false, "CPU not supported");
}
return output;
}
================================================
FILE: PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/_ext-src/src/interpolate_gpu.cu
================================================
#include
#include
#include
#include "cuda_utils.h"
// input: unknown(b, n, 3) known(b, m, 3)
// output: dist2(b, n, 3), idx(b, n, 3)
__global__ void three_nn_kernel(int b, int n, int m,
const float *__restrict__ unknown,
const float *__restrict__ known,
float *__restrict__ dist2,
int *__restrict__ idx) {
int batch_index = blockIdx.x;
unknown += batch_index * n * 3;
known += batch_index * m * 3;
dist2 += batch_index * n * 3;
idx += batch_index * n * 3;
int index = threadIdx.x;
int stride = blockDim.x;
for (int j = index; j < n; j += stride) {
float ux = unknown[j * 3 + 0];
float uy = unknown[j * 3 + 1];
float uz = unknown[j * 3 + 2];
double best1 = 1e40, best2 = 1e40, best3 = 1e40;
int besti1 = 0, besti2 = 0, besti3 = 0;
for (int k = 0; k < m; ++k) {
float x = known[k * 3 + 0];
float y = known[k * 3 + 1];
float z = known[k * 3 + 2];
float d = (ux - x) * (ux - x) + (uy - y) * (uy - y) + (uz - z) * (uz - z);
if (d < best1) {
best3 = best2;
besti3 = besti2;
best2 = best1;
besti2 = besti1;
best1 = d;
besti1 = k;
} else if (d < best2) {
best3 = best2;
besti3 = besti2;
best2 = d;
besti2 = k;
} else if (d < best3) {
best3 = d;
besti3 = k;
}
}
dist2[j * 3 + 0] = best1;
dist2[j * 3 + 1] = best2;
dist2[j * 3 + 2] = best3;
idx[j * 3 + 0] = besti1;
idx[j * 3 + 1] = besti2;
idx[j * 3 + 2] = besti3;
}
}
void three_nn_kernel_wrapper(int b, int n, int m, const float *unknown,
const float *known, float *dist2, int *idx) {
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
three_nn_kernel<<>>(b, n, m, unknown, known,
dist2, idx);
CUDA_CHECK_ERRORS();
}
// input: points(b, c, m), idx(b, n, 3), weight(b, n, 3)
// output: out(b, c, n)
__global__ void three_interpolate_kernel(int b, int c, int m, int n,
const float *__restrict__ points,
const int *__restrict__ idx,
const float *__restrict__ weight,
float *__restrict__ out) {
int batch_index = blockIdx.x;
points += batch_index * m * c;
idx += batch_index * n * 3;
weight += batch_index * n * 3;
out += batch_index * n * c;
const int index = threadIdx.y * blockDim.x + threadIdx.x;
const int stride = blockDim.y * blockDim.x;
for (int i = index; i < c * n; i += stride) {
const int l = i / n;
const int j = i % n;
float w1 = weight[j * 3 + 0];
float w2 = weight[j * 3 + 1];
float w3 = weight[j * 3 + 2];
int i1 = idx[j * 3 + 0];
int i2 = idx[j * 3 + 1];
int i3 = idx[j * 3 + 2];
out[i] = points[l * m + i1] * w1 + points[l * m + i2] * w2 +
points[l * m + i3] * w3;
}
}
void three_interpolate_kernel_wrapper(int b, int c, int m, int n,
const float *points, const int *idx,
const float *weight, float *out) {
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
three_interpolate_kernel<<>>(
b, c, m, n, points, idx, weight, out);
CUDA_CHECK_ERRORS();
}
// input: grad_out(b, c, n), idx(b, n, 3), weight(b, n, 3)
// output: grad_points(b, c, m)
__global__ void three_interpolate_grad_kernel(
int b, int c, int n, int m, const float *__restrict__ grad_out,
const int *__restrict__ idx, const float *__restrict__ weight,
float *__restrict__ grad_points) {
int batch_index = blockIdx.x;
grad_out += batch_index * n * c;
idx += batch_index * n * 3;
weight += batch_index * n * 3;
grad_points += batch_index * m * c;
const int index = threadIdx.y * blockDim.x + threadIdx.x;
const int stride = blockDim.y * blockDim.x;
for (int i = index; i < c * n; i += stride) {
const int l = i / n;
const int j = i % n;
float w1 = weight[j * 3 + 0];
float w2 = weight[j * 3 + 1];
float w3 = weight[j * 3 + 2];
int i1 = idx[j * 3 + 0];
int i2 = idx[j * 3 + 1];
int i3 = idx[j * 3 + 2];
atomicAdd(grad_points + l * m + i1, grad_out[i] * w1);
atomicAdd(grad_points + l * m + i2, grad_out[i] * w2);
atomicAdd(grad_points + l * m + i3, grad_out[i] * w3);
}
}
void three_interpolate_grad_kernel_wrapper(int b, int c, int n, int m,
const float *grad_out,
const int *idx, const float *weight,
float *grad_points) {
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
three_interpolate_grad_kernel<<>>(
b, c, n, m, grad_out, idx, weight, grad_points);
CUDA_CHECK_ERRORS();
}
================================================
FILE: PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/_ext-src/src/sampling.cpp
================================================
#include "sampling.h"
#include "utils.h"
void gather_points_kernel_wrapper(int b, int c, int n, int npoints,
const float *points, const int *idx,
float *out);
void gather_points_grad_kernel_wrapper(int b, int c, int n, int npoints,
const float *grad_out, const int *idx,
float *grad_points);
void furthest_point_sampling_kernel_wrapper(int b, int n, int m,
const float *dataset, float *temp,
int *idxs);
at::Tensor gather_points(at::Tensor points, at::Tensor idx) {
CHECK_CONTIGUOUS(points);
CHECK_CONTIGUOUS(idx);
CHECK_IS_FLOAT(points);
CHECK_IS_INT(idx);
if (points.is_cuda()) {
CHECK_CUDA(idx);
}
at::Tensor output =
torch::zeros({points.size(0), points.size(1), idx.size(1)},
at::device(points.device()).dtype(at::ScalarType::Float));
if (points.is_cuda()) {
gather_points_kernel_wrapper(points.size(0), points.size(1), points.size(2),
idx.size(1), points.data_ptr(),
idx.data_ptr(), output.data_ptr());
} else {
AT_ASSERT(false, "CPU not supported");
}
return output;
}
at::Tensor gather_points_grad(at::Tensor grad_out, at::Tensor idx,
const int n) {
CHECK_CONTIGUOUS(grad_out);
CHECK_CONTIGUOUS(idx);
CHECK_IS_FLOAT(grad_out);
CHECK_IS_INT(idx);
if (grad_out.is_cuda()) {
CHECK_CUDA(idx);
}
at::Tensor output =
torch::zeros({grad_out.size(0), grad_out.size(1), n},
at::device(grad_out.device()).dtype(at::ScalarType::Float));
if (grad_out.is_cuda()) {
gather_points_grad_kernel_wrapper(grad_out.size(0), grad_out.size(1), n,
idx.size(1), grad_out.data_ptr(),
idx.data_ptr(),
output.data_ptr());
} else {
AT_ASSERT(false, "CPU not supported");
}
return output;
}
at::Tensor furthest_point_sampling(at::Tensor points, const int nsamples) {
CHECK_CONTIGUOUS(points);
CHECK_IS_FLOAT(points);
at::Tensor output =
torch::zeros({points.size(0), nsamples},
at::device(points.device()).dtype(at::ScalarType::Int));
at::Tensor tmp =
torch::full({points.size(0), points.size(1)}, 1e10,
at::device(points.device()).dtype(at::ScalarType::Float));
if (points.is_cuda()) {
furthest_point_sampling_kernel_wrapper(
points.size(0), points.size(1), nsamples, points.data_ptr(),
tmp.data_ptr(), output.data_ptr());
} else {
AT_ASSERT(false, "CPU not supported");
}
return output;
}
================================================
FILE: PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/_ext-src/src/sampling_gpu.cu
================================================
#include
#include
#include "cuda_utils.h"
// input: points(b, c, n) idx(b, m)
// output: out(b, c, m)
__global__ void gather_points_kernel(int b, int c, int n, int m,
const float *__restrict__ points,
const int *__restrict__ idx,
float *__restrict__ out) {
for (int i = blockIdx.x; i < b; i += gridDim.x) {
for (int l = blockIdx.y; l < c; l += gridDim.y) {
for (int j = threadIdx.x; j < m; j += blockDim.x) {
int a = idx[i * m + j];
out[(i * c + l) * m + j] = points[(i * c + l) * n + a];
}
}
}
}
void gather_points_kernel_wrapper(int b, int c, int n, int npoints,
const float *points, const int *idx,
float *out) {
gather_points_kernel<<>>(b, c, n, npoints,
points, idx, out);
CUDA_CHECK_ERRORS();
}
// input: grad_out(b, c, m) idx(b, m)
// output: grad_points(b, c, n)
__global__ void gather_points_grad_kernel(int b, int c, int n, int m,
const float *__restrict__ grad_out,
const int *__restrict__ idx,
float *__restrict__ grad_points) {
for (int i = blockIdx.x; i < b; i += gridDim.x) {
for (int l = blockIdx.y; l < c; l += gridDim.y) {
for (int j = threadIdx.x; j < m; j += blockDim.x) {
int a = idx[i * m + j];
atomicAdd(grad_points + (i * c + l) * n + a,
grad_out[(i * c + l) * m + j]);
}
}
}
}
void gather_points_grad_kernel_wrapper(int b, int c, int n, int npoints,
const float *grad_out, const int *idx,
float *grad_points) {
gather_points_grad_kernel<<>>(
b, c, n, npoints, grad_out, idx, grad_points);
CUDA_CHECK_ERRORS();
}
__device__ void __update(float *__restrict__ dists, int *__restrict__ dists_i,
int idx1, int idx2) {
const float v1 = dists[idx1], v2 = dists[idx2];
const int i1 = dists_i[idx1], i2 = dists_i[idx2];
dists[idx1] = max(v1, v2);
dists_i[idx1] = v2 > v1 ? i2 : i1;
}
// Input dataset: (b, n, 3), tmp: (b, n)
// Ouput idxs (b, m)
template
__global__ void furthest_point_sampling_kernel(
int b, int n, int m, const float *__restrict__ dataset,
float *__restrict__ temp, int *__restrict__ idxs) {
if (m <= 0) return;
__shared__ float dists[block_size];
__shared__ int dists_i[block_size];
int batch_index = blockIdx.x;
dataset += batch_index * n * 3;
temp += batch_index * n;
idxs += batch_index * m;
int tid = threadIdx.x;
const int stride = block_size;
int old = 0;
if (threadIdx.x == 0) idxs[0] = old;
__syncthreads();
for (int j = 1; j < m; j++) {
int besti = 0;
float best = -1;
float x1 = dataset[old * 3 + 0];
float y1 = dataset[old * 3 + 1];
float z1 = dataset[old * 3 + 2];
for (int k = tid; k < n; k += stride) {
float x2, y2, z2;
x2 = dataset[k * 3 + 0];
y2 = dataset[k * 3 + 1];
z2 = dataset[k * 3 + 2];
float mag = (x2 * x2) + (y2 * y2) + (z2 * z2);
if (mag <= 1e-3) continue;
float d =
(x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) + (z2 - z1) * (z2 - z1);
float d2 = min(d, temp[k]);
temp[k] = d2;
besti = d2 > best ? k : besti;
best = d2 > best ? d2 : best;
}
dists[tid] = best;
dists_i[tid] = besti;
__syncthreads();
if (block_size >= 512) {
if (tid < 256) {
__update(dists, dists_i, tid, tid + 256);
}
__syncthreads();
}
if (block_size >= 256) {
if (tid < 128) {
__update(dists, dists_i, tid, tid + 128);
}
__syncthreads();
}
if (block_size >= 128) {
if (tid < 64) {
__update(dists, dists_i, tid, tid + 64);
}
__syncthreads();
}
if (block_size >= 64) {
if (tid < 32) {
__update(dists, dists_i, tid, tid + 32);
}
__syncthreads();
}
if (block_size >= 32) {
if (tid < 16) {
__update(dists, dists_i, tid, tid + 16);
}
__syncthreads();
}
if (block_size >= 16) {
if (tid < 8) {
__update(dists, dists_i, tid, tid + 8);
}
__syncthreads();
}
if (block_size >= 8) {
if (tid < 4) {
__update(dists, dists_i, tid, tid + 4);
}
__syncthreads();
}
if (block_size >= 4) {
if (tid < 2) {
__update(dists, dists_i, tid, tid + 2);
}
__syncthreads();
}
if (block_size >= 2) {
if (tid < 1) {
__update(dists, dists_i, tid, tid + 1);
}
__syncthreads();
}
old = dists_i[0];
if (tid == 0) idxs[j] = old;
}
}
void furthest_point_sampling_kernel_wrapper(int b, int n, int m,
const float *dataset, float *temp,
int *idxs) {
unsigned int n_threads = opt_n_threads(n);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
switch (n_threads) {
case 512:
furthest_point_sampling_kernel<512>
<<>>(b, n, m, dataset, temp, idxs);
break;
case 256:
furthest_point_sampling_kernel<256>
<<>>(b, n, m, dataset, temp, idxs);
break;
case 128:
furthest_point_sampling_kernel<128>
<<>>(b, n, m, dataset, temp, idxs);
break;
case 64:
furthest_point_sampling_kernel<64>
<<>>(b, n, m, dataset, temp, idxs);
break;
case 32:
furthest_point_sampling_kernel<32>
<<>>(b, n, m, dataset, temp, idxs);
break;
case 16:
furthest_point_sampling_kernel<16>
<<>>(b, n, m, dataset, temp, idxs);
break;
case 8:
furthest_point_sampling_kernel<8>
<<>>(b, n, m, dataset, temp, idxs);
break;
case 4:
furthest_point_sampling_kernel<4>
<<>>(b, n, m, dataset, temp, idxs);
break;
case 2:
furthest_point_sampling_kernel<2>
<<>>(b, n, m, dataset, temp, idxs);
break;
case 1:
furthest_point_sampling_kernel<1>
<<>>(b, n, m, dataset, temp, idxs);
break;
default:
furthest_point_sampling_kernel<512>
<<>>(b, n, m, dataset, temp, idxs);
}
CUDA_CHECK_ERRORS();
}
================================================
FILE: PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/_version.py
================================================
__version__ = "3.0.0"
================================================
FILE: PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/pointnet2_modules.py
================================================
from typing import List, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from pointnet2_ops import pointnet2_utils
def build_shared_mlp(mlp_spec: List[int], bn: bool = True):
layers = []
for i in range(1, len(mlp_spec)):
layers.append(
nn.Conv2d(mlp_spec[i - 1], mlp_spec[i], kernel_size=1, bias=not bn)
)
if bn:
layers.append(nn.BatchNorm2d(mlp_spec[i]))
layers.append(nn.ReLU(True))
return nn.Sequential(*layers)
class _PointnetSAModuleBase(nn.Module):
def __init__(self):
super(_PointnetSAModuleBase, self).__init__()
self.npoint = None
self.groupers = None
self.mlps = None
def forward(
self, xyz: torch.Tensor, features: Optional[torch.Tensor]
) -> Tuple[torch.Tensor, torch.Tensor]:
r"""
Parameters
----------
xyz : torch.Tensor
(B, N, 3) tensor of the xyz coordinates of the features
features : torch.Tensor
(B, C, N) tensor of the descriptors of the the features
Returns
-------
new_xyz : torch.Tensor
(B, npoint, 3) tensor of the new features' xyz
new_features : torch.Tensor
(B, \sum_k(mlps[k][-1]), npoint) tensor of the new_features descriptors
"""
new_features_list = []
xyz_flipped = xyz.transpose(1, 2).contiguous()
new_xyz = (
pointnet2_utils.gather_operation(
xyz_flipped, pointnet2_utils.furthest_point_sample(xyz, self.npoint)
)
.transpose(1, 2)
.contiguous()
if self.npoint is not None
else None
)
for i in range(len(self.groupers)):
new_features = self.groupers[i](
xyz, new_xyz, features
) # (B, C, npoint, nsample)
new_features = self.mlps[i](new_features) # (B, mlp[-1], npoint, nsample)
new_features = F.max_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
) # (B, mlp[-1], npoint, 1)
new_features = new_features.squeeze(-1) # (B, mlp[-1], npoint)
new_features_list.append(new_features)
return new_xyz, torch.cat(new_features_list, dim=1)
class PointnetSAModuleMSG(_PointnetSAModuleBase):
r"""Pointnet set abstrction layer with multiscale grouping
Parameters
----------
npoint : int
Number of features
radii : list of float32
list of radii to group with
nsamples : list of int32
Number of samples in each ball query
mlps : list of list of int32
Spec of the pointnet before the global max_pool for each scale
bn : bool
Use batchnorm
"""
def __init__(self, npoint, radii, nsamples, mlps, bn=True, use_xyz=True):
# type: (PointnetSAModuleMSG, int, List[float], List[int], List[List[int]], bool, bool) -> None
super(PointnetSAModuleMSG, self).__init__()
assert len(radii) == len(nsamples) == len(mlps)
self.npoint = npoint
self.groupers = nn.ModuleList()
self.mlps = nn.ModuleList()
for i in range(len(radii)):
radius = radii[i]
nsample = nsamples[i]
self.groupers.append(
pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz)
if npoint is not None
else pointnet2_utils.GroupAll(use_xyz)
)
mlp_spec = mlps[i]
if use_xyz:
mlp_spec[0] += 3
self.mlps.append(build_shared_mlp(mlp_spec, bn))
class PointnetSAModule(PointnetSAModuleMSG):
r"""Pointnet set abstrction layer
Parameters
----------
npoint : int
Number of features
radius : float
Radius of ball
nsample : int
Number of samples in the ball query
mlp : list
Spec of the pointnet before the global max_pool
bn : bool
Use batchnorm
"""
def __init__(
self, mlp, npoint=None, radius=None, nsample=None, bn=True, use_xyz=True
):
# type: (PointnetSAModule, List[int], int, float, int, bool, bool) -> None
super(PointnetSAModule, self).__init__(
mlps=[mlp],
npoint=npoint,
radii=[radius],
nsamples=[nsample],
bn=bn,
use_xyz=use_xyz,
)
class PointnetFPModule(nn.Module):
r"""Propigates the features of one set to another
Parameters
----------
mlp : list
Pointnet module parameters
bn : bool
Use batchnorm
"""
def __init__(self, mlp, bn=True):
# type: (PointnetFPModule, List[int], bool) -> None
super(PointnetFPModule, self).__init__()
self.mlp = build_shared_mlp(mlp, bn=bn)
def forward(self, unknown, known, unknow_feats, known_feats):
# type: (PointnetFPModule, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor) -> torch.Tensor
r"""
Parameters
----------
unknown : torch.Tensor
(B, n, 3) tensor of the xyz positions of the unknown features
known : torch.Tensor
(B, m, 3) tensor of the xyz positions of the known features
unknow_feats : torch.Tensor
(B, C1, n) tensor of the features to be propigated to
known_feats : torch.Tensor
(B, C2, m) tensor of features to be propigated
Returns
-------
new_features : torch.Tensor
(B, mlp[-1], n) tensor of the features of the unknown features
"""
if known is not None:
dist, idx = pointnet2_utils.three_nn(unknown, known)
dist_recip = 1.0 / (dist + 1e-8)
norm = torch.sum(dist_recip, dim=2, keepdim=True)
weight = dist_recip / norm
interpolated_feats = pointnet2_utils.three_interpolate(
known_feats, idx, weight
)
else:
interpolated_feats = known_feats.expand(
*(known_feats.size()[0:2] + [unknown.size(1)])
)
if unknow_feats is not None:
new_features = torch.cat(
[interpolated_feats, unknow_feats], dim=1
) # (B, C2 + C1, n)
else:
new_features = interpolated_feats
new_features = new_features.unsqueeze(-1)
new_features = self.mlp(new_features)
return new_features.squeeze(-1)
================================================
FILE: PCT_Pytorch/pointnet2_ops_lib/pointnet2_ops/pointnet2_utils.py
================================================
import torch
import torch.nn as nn
import warnings
from torch.autograd import Function
from typing import *
try:
import pointnet2_ops._ext as _ext
except ImportError:
from torch.utils.cpp_extension import load
import glob
import os.path as osp
import os
warnings.warn("Unable to load pointnet2_ops cpp extension. JIT Compiling.")
_ext_src_root = osp.join(osp.dirname(__file__), "_ext-src")
_ext_sources = glob.glob(osp.join(_ext_src_root, "src", "*.cpp")) + glob.glob(
osp.join(_ext_src_root, "src", "*.cu")
)
_ext_headers = glob.glob(osp.join(_ext_src_root, "include", "*"))
os.environ["TORCH_CUDA_ARCH_LIST"] = "3.7+PTX;5.0;6.0;6.1;6.2;7.0;7.5"
_ext = load(
"_ext",
sources=_ext_sources,
extra_include_paths=[osp.join(_ext_src_root, "include")],
extra_cflags=["-O3"],
extra_cuda_cflags=["-O3", "-Xfatbin", "-compress-all"],
with_cuda=True,
)
class FurthestPointSampling(Function):
@staticmethod
def forward(ctx, xyz, npoint):
# type: (Any, torch.Tensor, int) -> torch.Tensor
r"""
Uses iterative furthest point sampling to select a set of npoint features that have the largest
minimum distance
Parameters
----------
xyz : torch.Tensor
(B, N, 3) tensor where N > npoint
npoint : int32
number of features in the sampled set
Returns
-------
torch.Tensor
(B, npoint) tensor containing the set
"""
out = _ext.furthest_point_sampling(xyz, npoint)
ctx.mark_non_differentiable(out)
return out
@staticmethod
def backward(ctx, grad_out):
return ()
furthest_point_sample = FurthestPointSampling.apply
class GatherOperation(Function):
@staticmethod
def forward(ctx, features, idx):
# type: (Any, torch.Tensor, torch.Tensor) -> torch.Tensor
r"""
Parameters
----------
features : torch.Tensor
(B, C, N) tensor
idx : torch.Tensor
(B, npoint) tensor of the features to gather
Returns
-------
torch.Tensor
(B, C, npoint) tensor
"""
ctx.save_for_backward(idx, features)
return _ext.gather_points(features, idx)
@staticmethod
def backward(ctx, grad_out):
idx, features = ctx.saved_tensors
N = features.size(2)
grad_features = _ext.gather_points_grad(grad_out.contiguous(), idx, N)
return grad_features, None
gather_operation = GatherOperation.apply
class ThreeNN(Function):
@staticmethod
def forward(ctx, unknown, known):
# type: (Any, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]
r"""
Find the three nearest neighbors of unknown in known
Parameters
----------
unknown : torch.Tensor
(B, n, 3) tensor of known features
known : torch.Tensor
(B, m, 3) tensor of unknown features
Returns
-------
dist : torch.Tensor
(B, n, 3) l2 distance to the three nearest neighbors
idx : torch.Tensor
(B, n, 3) index of 3 nearest neighbors
"""
dist2, idx = _ext.three_nn(unknown, known)
dist = torch.sqrt(dist2)
ctx.mark_non_differentiable(dist, idx)
return dist, idx
@staticmethod
def backward(ctx, grad_dist, grad_idx):
return ()
three_nn = ThreeNN.apply
class ThreeInterpolate(Function):
@staticmethod
def forward(ctx, features, idx, weight):
# type(Any, torch.Tensor, torch.Tensor, torch.Tensor) -> Torch.Tensor
r"""
Performs weight linear interpolation on 3 features
Parameters
----------
features : torch.Tensor
(B, c, m) Features descriptors to be interpolated from
idx : torch.Tensor
(B, n, 3) three nearest neighbors of the target features in features
weight : torch.Tensor
(B, n, 3) weights
Returns
-------
torch.Tensor
(B, c, n) tensor of the interpolated features
"""
ctx.save_for_backward(idx, weight, features)
return _ext.three_interpolate(features, idx, weight)
@staticmethod
def backward(ctx, grad_out):
# type: (Any, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]
r"""
Parameters
----------
grad_out : torch.Tensor
(B, c, n) tensor with gradients of ouputs
Returns
-------
grad_features : torch.Tensor
(B, c, m) tensor with gradients of features
None
None
"""
idx, weight, features = ctx.saved_tensors
m = features.size(2)
grad_features = _ext.three_interpolate_grad(
grad_out.contiguous(), idx, weight, m
)
return grad_features, torch.zeros_like(idx), torch.zeros_like(weight)
three_interpolate = ThreeInterpolate.apply
class GroupingOperation(Function):
@staticmethod
def forward(ctx, features, idx):
# type: (Any, torch.Tensor, torch.Tensor) -> torch.Tensor
r"""
Parameters
----------
features : torch.Tensor
(B, C, N) tensor of features to group
idx : torch.Tensor
(B, npoint, nsample) tensor containing the indicies of features to group with
Returns
-------
torch.Tensor
(B, C, npoint, nsample) tensor
"""
ctx.save_for_backward(idx, features)
return _ext.group_points(features, idx)
@staticmethod
def backward(ctx, grad_out):
# type: (Any, torch.tensor) -> Tuple[torch.Tensor, torch.Tensor]
r"""
Parameters
----------
grad_out : torch.Tensor
(B, C, npoint, nsample) tensor of the gradients of the output from forward
Returns
-------
torch.Tensor
(B, C, N) gradient of the features
None
"""
idx, features = ctx.saved_tensors
N = features.size(2)
grad_features = _ext.group_points_grad(grad_out.contiguous(), idx, N)
return grad_features, torch.zeros_like(idx)
grouping_operation = GroupingOperation.apply
class BallQuery(Function):
@staticmethod
def forward(ctx, radius, nsample, xyz, new_xyz):
# type: (Any, float, int, torch.Tensor, torch.Tensor) -> torch.Tensor
r"""
Parameters
----------
radius : float
radius of the balls
nsample : int
maximum number of features in the balls
xyz : torch.Tensor
(B, N, 3) xyz coordinates of the features
new_xyz : torch.Tensor
(B, npoint, 3) centers of the ball query
Returns
-------
torch.Tensor
(B, npoint, nsample) tensor with the indicies of the features that form the query balls
"""
output = _ext.ball_query(new_xyz, xyz, radius, nsample)
ctx.mark_non_differentiable(output)
return output
@staticmethod
def backward(ctx, grad_out):
return ()
ball_query = BallQuery.apply
class QueryAndGroup(nn.Module):
r"""
Groups with a ball query of radius
Parameters
---------
radius : float32
Radius of ball
nsample : int32
Maximum number of features to gather in the ball
"""
def __init__(self, radius, nsample, use_xyz=True):
# type: (QueryAndGroup, float, int, bool) -> None
super(QueryAndGroup, self).__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
def forward(self, xyz, new_xyz, features=None):
# type: (QueryAndGroup, torch.Tensor. torch.Tensor, torch.Tensor) -> Tuple[Torch.Tensor]
r"""
Parameters
----------
xyz : torch.Tensor
xyz coordinates of the features (B, N, 3)
new_xyz : torch.Tensor
centriods (B, npoint, 3)
features : torch.Tensor
Descriptors of the features (B, C, N)
Returns
-------
new_features : torch.Tensor
(B, 3 + C, npoint, nsample) tensor
"""
idx = ball_query(self.radius, self.nsample, xyz, new_xyz)
xyz_trans = xyz.transpose(1, 2).contiguous()
grouped_xyz = grouping_operation(xyz_trans, idx) # (B, 3, npoint, nsample)
grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1)
if features is not None:
grouped_features = grouping_operation(features, idx)
if self.use_xyz:
new_features = torch.cat(
[grouped_xyz, grouped_features], dim=1
) # (B, C + 3, npoint, nsample)
else:
new_features = grouped_features
else:
assert (
self.use_xyz
), "Cannot have not features and not use xyz as a feature!"
new_features = grouped_xyz
return new_features
class GroupAll(nn.Module):
r"""
Groups all features
Parameters
---------
"""
def __init__(self, use_xyz=True):
# type: (GroupAll, bool) -> None
super(GroupAll, self).__init__()
self.use_xyz = use_xyz
def forward(self, xyz, new_xyz, features=None):
# type: (GroupAll, torch.Tensor, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor]
r"""
Parameters
----------
xyz : torch.Tensor
xyz coordinates of the features (B, N, 3)
new_xyz : torch.Tensor
Ignored
features : torch.Tensor
Descriptors of the features (B, C, N)
Returns
-------
new_features : torch.Tensor
(B, C + 3, 1, N) tensor
"""
grouped_xyz = xyz.transpose(1, 2).unsqueeze(2)
if features is not None:
grouped_features = features.unsqueeze(2)
if self.use_xyz:
new_features = torch.cat(
[grouped_xyz, grouped_features], dim=1
) # (B, 3 + C, 1, N)
else:
new_features = grouped_features
else:
new_features = grouped_xyz
return new_features
================================================
FILE: PCT_Pytorch/pointnet2_ops_lib/setup.py
================================================
import glob
import os
import os.path as osp
from setuptools import find_packages, setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
this_dir = osp.dirname(osp.abspath(__file__))
_ext_src_root = osp.join("pointnet2_ops", "_ext-src")
_ext_sources = glob.glob(osp.join(_ext_src_root, "src", "*.cpp")) + glob.glob(
osp.join(_ext_src_root, "src", "*.cu")
)
_ext_headers = glob.glob(osp.join(_ext_src_root, "include", "*"))
requirements = ["torch>=1.4"]
exec(open(osp.join("pointnet2_ops", "_version.py")).read())
os.environ["TORCH_CUDA_ARCH_LIST"] = "3.7+PTX;5.0;6.0;6.1;6.2;7.0;7.5"
setup(
name="pointnet2_ops",
version=__version__,
author="Erik Wijmans",
packages=find_packages(),
install_requires=requirements,
ext_modules=[
CUDAExtension(
name="pointnet2_ops._ext",
sources=_ext_sources,
extra_compile_args={
"cxx": ["-O3"],
"nvcc": ["-O3", "-Xfatbin", "-compress-all"],
},
include_dirs=[osp.join(this_dir, _ext_src_root, "include")],
)
],
cmdclass={"build_ext": BuildExtension},
include_package_data=True,
)
================================================
FILE: PCT_Pytorch/test.sh
================================================
python main.py --exp_name=test --num_points=1024 --use_sgd=True --eval=True --model_path=checkpoints/best/models/model.t7 --test_batch_size 8
================================================
FILE: PCT_Pytorch/train.sh
================================================
CUDA_VISIBLE_DEVICES=0 python3.7 main.py --exp_name=train --num_points=1024 --use_sgd=True --batch_size 32 --epochs 250 --lr 0.0001
================================================
FILE: PCT_Pytorch/util.py
================================================
import torch
import torch.nn.functional as F
from pointnet2_ops import pointnet2_utils
def cal_loss(pred, gold, smoothing=True):
''' Calculate cross entropy loss, apply label smoothing if needed. '''
gold = gold.contiguous().view(-1)
if smoothing:
eps = 0.2
n_class = pred.size(1)
one_hot = torch.zeros_like(pred).scatter(1, gold.view(-1, 1), 1)
one_hot = one_hot * (1 - eps) + (1 - one_hot) * eps / (n_class - 1)
log_prb = F.log_softmax(pred, dim=1)
loss = -(one_hot * log_prb).sum(dim=1).mean()
else:
loss = F.cross_entropy(pred, gold, reduction='mean')
return loss
class IOStream():
def __init__(self, path):
self.f = open(path, 'a')
def cprint(self, text):
print(text)
self.f.write(text+'\n')
self.f.flush()
def close(self):
self.f.close()
def square_distance(src, dst):
"""
Calculate Euclid distance between each two points.
src^T * dst = xn * xm + yn * ym + zn * zm;
sum(src^2, dim=-1) = xn*xn + yn*yn + zn*zn;
sum(dst^2, dim=-1) = xm*xm + ym*ym + zm*zm;
dist = (xn - xm)^2 + (yn - ym)^2 + (zn - zm)^2
= sum(src**2,dim=-1)+sum(dst**2,dim=-1)-2*src^T*dst
Input:
src: source points, [B, N, C]
dst: target points, [B, M, C]
Output:
dist: per-point square distance, [B, N, M]
"""
B, N, _ = src.shape
_, M, _ = dst.shape
dist = -2 * torch.matmul(src, dst.permute(0, 2, 1))
dist += torch.sum(src ** 2, -1).view(B, N, 1)
dist += torch.sum(dst ** 2, -1).view(B, 1, M)
return dist
def index_points(points, idx):
"""
Input:
points: input points data, [B, N, C]
idx: sample index data, [B, S]
Return:
new_points:, indexed points data, [B, S, C]
"""
device = points.device
B = points.shape[0]
view_shape = list(idx.shape)
view_shape[1:] = [1] * (len(view_shape) - 1)
repeat_shape = list(idx.shape)
repeat_shape[0] = 1
batch_indices = torch.arange(B, dtype=torch.long).to(device).view(view_shape).repeat(repeat_shape)
new_points = points[batch_indices, idx, :]
return new_points
def query_ball_point(radius, nsample, xyz, new_xyz):
"""
Input:
radius: local region radius
nsample: max sample number in local region
xyz: all points, [B, N, 3]
new_xyz: query points, [B, S, 3]
Return:
group_idx: grouped points index, [B, S, nsample]
"""
device = xyz.device
B, N, C = xyz.shape
_, S, _ = new_xyz.shape
group_idx = torch.arange(N, dtype=torch.long).to(device).view(1, 1, N).repeat([B, S, 1])
sqrdists = square_distance(new_xyz, xyz)
group_idx[sqrdists > radius ** 2] = N
group_idx = group_idx.sort(dim=-1)[0][:, :, :nsample]
group_first = group_idx[:, :, 0].view(B, S, 1).repeat([1, 1, nsample])
mask = group_idx == N
group_idx[mask] = group_first[mask]
return group_idx
def knn_point(nsample, xyz, new_xyz):
"""
Input:
nsample: max sample number in local region
xyz: all points, [B, N, C]
new_xyz: query points, [B, S, C]
Return:
group_idx: grouped points index, [B, S, nsample]
"""
sqrdists = square_distance(new_xyz, xyz)
_, group_idx = torch.topk(sqrdists, nsample, dim = -1, largest=False, sorted=False)
return group_idx
def sample_and_group(npoint, radius, nsample, xyz, points):
"""
Input:
npoint:
radius:
nsample:
xyz: input points position data, [B, N, 3]
points: input points data, [B, N, D]
Return:
new_xyz: sampled points position data, [B, npoint, nsample, 3]
new_points: sampled points data, [B, npoint, nsample, 3+D]
"""
B, N, C = xyz.shape
S = npoint
xyz = xyz.contiguous()
fps_idx = pointnet2_utils.furthest_point_sample(xyz, npoint).long() # [B, npoint]
new_xyz = index_points(xyz, fps_idx)
new_points = index_points(points, fps_idx)
# new_xyz = xyz[:]
# new_points = points[:]
idx = knn_point(nsample, xyz, new_xyz)
#idx = query_ball_point(radius, nsample, xyz, new_xyz)
grouped_xyz = index_points(xyz, idx) # [B, npoint, nsample, C]
grouped_xyz_norm = grouped_xyz - new_xyz.view(B, S, 1, C)
grouped_points = index_points(points, idx)
grouped_points_norm = grouped_points - new_points.view(B, S, 1, -1)
new_points = torch.cat([grouped_points_norm, new_points.view(B, S, 1, -1).repeat(1, 1, nsample, 1)], dim=-1)
return new_xyz, new_points
================================================
FILE: README.md
================================================
# Benchmarking Robustness of 3D Point Cloud Recognition against Common Corruptions
[](https://paperswithcode.com/sota/3d-point-cloud-classification-on-modelnet40-c?p=benchmarking-robustness-of-3d-point-cloud)
This repo contains the dataset and code for the paper [Benchmarking Robustness of 3D Point Cloud Recognition against Common Corruptions](https://arxiv.org/abs/2201.12296) by Jiachen Sun et al. This codebase is based on [SimpleView](https://github.com/princeton-vl/SimpleView), and we thank the authors for their great contributions.
## ModelNet40-C


More visualizations can be found [here](https://github.com/jiachens/ModelNet40-C/blob/master/img).
[Download ModelNet40-C from Google Drive.](https://drive.google.com/drive/folders/10YeQRh92r_WdL-Dnog2zQfFr03UW4qXX?usp=sharing)
[Download ModelNet40-C using our provided script.](#download-datasets-including-modelnet40-c-and-pre-trained-models)
[Download ModelNet40-C from Zenodo.](https://zenodo.org/record/6017834#.YgNeKu7MK3J)
## ModelNet40-C Leaderboard
**Architecture+Data Augmentation Leaderboard**
| **Architecture** | **Data Augmentation** | **Corruption Error Rate (%)** | **Clean Error Rate (%)** | **Checkpoint** |
|------------------|---------------|:-------------------------:|:--------------------:|--------------------------------------------------------------------------------------------------|
| PCT | PointCutMix-R | 16.3 | 7.2 | [checkpoint](https://drive.google.com/file/d/1OcH0o7V_RhAOj9pSuS39G43VrBWS-1v3/view?usp=sharing) |
| PCT | PointCutMix-K | 16.5 | 6.9 | [checkpoint](https://drive.google.com/file/d/1T4KwMkgAqAItHZc-Q96H1qGMPNoObJkJ/view?usp=sharing) |
| DGCNN | PointCutMix-R | 17.3 | 6.8 | [checkpoint](https://drive.google.com/file/d/1Z_6D_MmjecDHhY2q-I-aok9nlD9RkAS1/view?usp=sharing) |
| PCT | RSMix | 17.3 | 6.9 | [checkpoint](https://drive.google.com/file/d/18BqbMCpdbEGdyQVdMwYPDrff5bmgeF9B/view?usp=sharing) |
| DGCNN | PointCutMix-K | 17.3 | 7.4 | [checkpoint](https://drive.google.com/file/d/1rUQApmyEJUpv7JzkhJuwEeZOmmZ9vEDU/view?usp=sharing) |
| RSCNN | PointCutMix-R | 17.9 | 7.6 | [checkpoint](https://drive.google.com/file/d/1EggUiFcCgpHOwjgQKRgBxST1utfOAryc/view?usp=sharing) |
| DGCNN | RSMix | 18.1 | 7.1 | [checkpoint](https://drive.google.com/file/d/11tNaF-YsJ6hZNm2pY6LX6Ny-ceGkI0Cr/view?usp=sharing) |
| PCT | PGD Adv Train | 18.4 | 8.9 | [checkpoint](https://drive.google.com/file/d/1Y7JaW-CLPCcqQQGiuL9BfVKfAkm6MxEA/view?usp=sharing) |
| PointNet++ | PointCutMix-R | 19.1 | 7.1 | [checkpoint](https://drive.google.com/file/d/1un_H1oq18MrN604mbR3htBNqdOgnXqwQ/view?usp=sharing) |
| PointNet++ | PointMixup | 19.3 | 7.1 | [checkpoint](https://drive.google.com/file/d/1fzFOeJcenn7a4glsfs7IEcSTjovZoAkB/view?usp=sharing) |
| PCT | PointMixup | 19.5 | 7.4 | [checkpoint](https://drive.google.com/file/d/1OcBm-PCImcW8h1mb9ZY4CcX2nDN_rB8b/view?usp=sharing) |
| SimpleView | PointCutMix-R | 19.7 | 7.9 | [checkpoint](https://drive.google.com/file/d/178LQKtmCeNIbdPXYZXZHRmAQt-YCY_eL/view?usp=sharing) |
| RSCNN | PointMixup | 19.8 | 7.2 | [checkpoint](https://drive.google.com/file/d/1FRPU_QTR3vda1CqPWKkREprIZshv4cYk/view?usp=sharing) |
| PointNet++ | PointCutMix-K | 20.2 | 6.7 | [checkpoint](https://drive.google.com/file/d/1JLL7ym-fMUS4VFisf-AENB5trYJb_0-J/view?usp=sharing) |
We allow users to directly download all pre-trained models with every data augmentation method [here](#download-datasets-including-modelnet40-c-and-pre-trained-models).
**Architecture Leaderboard**
| **Architecture** | **Corruption Error Rate (%)** | **Clean Error Rate (%)** | **Checkpoint** |
|------------------|:-------------------------:|:--------------------:|--------------------------------------------------------------------------------------------------|
| CurveNet | 22.7 | 6.6 | checkpoint |
| PointNet++ | 23.6 | 7.0 | [checkpoint](https://drive.google.com/file/d/18_297KJ8slsJq1rGDsvuQ29VICs-EJTa/view?usp=sharing) |
| PCT | 25.5 | 7.1 | [checkpoint](https://drive.google.com/file/d/1NFAhupQKn-sBLYW1YpUAf4jdqMpFcV7Z/view?usp=sharing) |
| GDANet | 25.6 | 7.5 | checkpoint |
| DGCNN | 25.9 | 7.4 | [checkpoint](https://drive.google.com/file/d/1JMCmujJM4J_OyxuZuDN4befFmtG1_p49/view?usp=sharing) |
| RSCNN | 26.2 | 7.7 | [checkpoint](https://drive.google.com/file/d/1RKhXKjZvKvZM2the2qqFhnytAX2H634U/view?usp=sharing) |
| SimpleView | 27.2 | 6.1 | [checkpoint](https://drive.google.com/file/d/1jscF5p3Q7DHWl-FgGGemQP3CeXITsTyY/view?usp=sharing) |
| PointNet | 28.3 | 9.3 | [checkpoint](https://drive.google.com/file/d/1eW26u0nm6HETwDSiCyCEoLLY3WnOVt73/view?usp=sharing) |
| PointMLP | 31.9 | 6.3 | checkpoint |
| PointMLP-Elite | 32.4 | 7.2 | checkpoint |
More models' results coming soon ......
We allow users to directly download all pre-trained models with standard training [here](#download-datasets-including-modelnet40-c-and-pre-trained-models).
## Getting Started
First clone the repository. We would refer to the directory containing the code as `ModelNet40-C`.
```
git clone --recurse-submodules git@github.com:jiachens/ModelNet40-C.git
```
#### Requirements
The code is tested on Linux OS with Python version **3.7.5**, CUDA version **10.0**, CuDNN version **7.6** and GCC version **5.4**. We recommend using these versions especially for installing [pointnet++ custom CUDA modules](https://github.com/erikwijmans/Pointnet2_PyTorch/tree/22e8cf527b696b63b66f3873d80ae5f93744bdef).
[02-23-2022] The updated codes are tested on Python version **3.7.5**, CUDA version **11.4**, CuDNN version **8.2** and GCC version **7.5** with the latest ```torch``` and ```torchvision``` libs, but we still suggest the original setup in case of any unstable bugs.
#### Install Libraries
We recommend you first install [Anaconda](https://anaconda.org/) and create a virtual environment.
```
conda create --name modelnetc python=3.7.5
```
Activate the virtual environment and install the libraries. Make sure you are in `ModelNet40-C`.
```
conda activate modelnetc
pip install -r requirements.txt
conda install sed # for downloading data and pretrained models
```
For PointNet++, we need to install custom CUDA modules. Make sure you have access to a GPU during this step. You might need to set the appropriate `TORCH_CUDA_ARCH_LIST` environment variable depending on your GPU model. The following command should work for most cases `export TORCH_CUDA_ARCH_LIST="6.0;6.1;6.2;7.0;7.5"`. However, if the install fails, check if `TORCH_CUDA_ARCH_LIST` is correctly set. More details could be found [here](https://en.wikipedia.org/wiki/CUDA#GPUs_supported).
Third-party modules `pointnet2_pyt`, `PCT_Pytorch`, `emd`, and `PyGeM` can be installed by the following script.
```
./setup.sh
```
#### Download Datasets Including ModelNet40-C and Pre-trained Models
Make sure you are in `ModelNet40-C`. `download.sh` script can be used for downloading all the data and the pretrained models. It also places them at the correct locations.
To download ModelNet40 execute the following command. This will download the ModelNet40 point cloud dataset released with pointnet++ as well as the validation splits used in our work.
```
./download.sh modelnet40
```
To generate the ModelNet40-C dataset, please run:
```
python data/process.py
python data/generate_c.py
```
NOTE that the generation needs a monitor connected since Open3D library does not support background rendering.
We also allow users to download ModelNet40-C directly. Please fill this [Google form](https://docs.google.com/forms/d/e/1FAIpQLSdrzt8EtQdjGMlwIwWAzb39KzzVzijpK6-sPEaps07MjQwGGQ/viewform?usp=sf_link) while downloading our dataset.
```
./download.sh modelnet40_c
```
To download the pretrained models with standard training recipe, execute the following command.
```
./download.sh cor_exp
```
To download the pretrained models using different data augmentation strategies, execute the following command.
```
./download.sh runs
```
#### New Features
\[02-23-2022\]
- We include PointMLP-Elite and GDANet in our benchmark
\[02-18-2022\]
- We include CurveNet and PointMLP in our benchmark
\[01-28-2022\]
- We include Point Cloud Transformer (PCT) in our benchmark
- `ModelNet40-C/configs` contains config files to enable different data augmentations and test-time adaptation methods
- `ModelNet40-C/aug_utils.py` contains the data augmentation codes in our paper
- `ModelNet40-C/third_party` contains the test-time adaptation used in our paper
#### Code Organization In Originial SimpleView
- `ModelNet40-C/models`: Code for various models in PyTorch.
- `ModelNet40-C/configs`: Configuration files for various models.
- `ModelNet40-C/main.py`: Training and testing any models.
- `ModelNet40-C/configs.py`: Hyperparameters for different models and dataloader.
- `ModelNet40-C/dataloader.py`: Code for different variants of the dataloader.
- `ModelNet40-C/*_utils.py`: Code for various utility functions.
## Running Experiments
#### Training and Config files
To train or test any model, we use the `main.py` script. The format for running this script is as follows.
```
python main.py --exp-config
```
The config files are named as `_<_extra>_run_.yaml` (` ∈ [dgcnn, pointnet2, rscnn]`; ` ∈ [dgcnn, pointnet2, rscnn, pointnet, simpleview]` ). For example, the config file to run an experiment for PointNet++ in DGCNN protocol with seed 1 `dgcnn_pointnet2_run_1.yaml`. To run a new experiment with a different seed, you need to change the `SEED` parameter in the config file. All of our experiments are done based on seed 1.
We additionally leverage PointCutMix: `configs/cutmix`, PointMixup: `configs/mixup`, RSMix: `configs/rsmix`, and PGD-based adversarial training `configs/pgd` as the training-time config files.
For example, to train PCT with PointCutMix-R, please use the following command:
```
python main.py --exp-config configs/cutmix/pct_r.yaml
```
#### Evaluate a pretrained model
We provide pretrained models. They can be downloaded using the `./download.sh cor_exp` and `./download.sh runs` commands and are stored in the `ModelNet40-C/runs` (for data augmentation recipes) and `ModelNet40-C/cor_exp` (for standard trained models) folders. To test a pretrained model, the command is of the following format.
Additionally, we provide test-time config files in `configs/bn` and `configs/tent` for BN and TENT in our paper with the following commands:
```
python main.py --entry test --model-path //.pth --exp-config configs/.yaml
```
We list all the evaluation commands in the `eval_cor.sh`, `eval_og.sh`, `eval_tent_cutmix.sh` scripts. Note that in `eval_cor.sh` it is expected that pgd with PointNet++, RSCNN, and SimpleView do not have outputs since they do not fit the adversarial training framework. We have mentioned this in our paper.
## Citation
Please cite our paper and SimpleView if you use our benchmark and analysis results. Thank you!
```
@article{sun2022benchmarking,
title={Benchmarking Robustness of 3D Point Cloud Recognition Against Common Corruptions},
author={Jiachen Sun and Qingzhao Zhang and Bhavya Kailkhura and Zhiding Yu and Chaowei Xiao and Z. Morley Mao},
journal={arXiv preprint arXiv:2201.12296},
year={2022}
}
```
```
@article{goyal2021revisiting,
title={Revisiting Point Cloud Shape Classification with a Simple and Effective Baseline},
author={Goyal, Ankit and Law, Hei and Liu, Bowei and Newell, Alejandro and Deng, Jia},
journal={International Conference on Machine Learning},
year={2021}
}
```
## References
[1] [Zhang, Jinlai, et al. "PointCutMix: Regularization Strategy for Point Cloud Classification." arXiv preprint arXiv:2101.01461 (2021).](https://arxiv.org/pdf/2101.01461.pdf)
[2] [Chen, Yunlu, et al. "Pointmixup: Augmentation for point clouds." Computer Vision–ECCV 2020: 16th European Conference, Glasgow, UK, August 23–28, 2020, Proceedings, Part III 16. Springer International Publishing, 2020.](https://link.springer.com/content/pdf/10.1007/978-3-030-58580-8_20.pdf)
[3] [Lee, Dogyoon, et al. "Regularization Strategy for Point Cloud via Rigidly Mixed Sample." Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 2021.](https://openaccess.thecvf.com/content/CVPR2021/papers/Lee_Regularization_Strategy_for_Point_Cloud_via_Rigidly_Mixed_Sample_CVPR_2021_paper.pdf)
[4] [Sun, Jiachen, et al. "Adversarially Robust 3D Point Cloud Recognition Using Self-Supervisions." Advances in Neural Information Processing Systems 34 (2021).](https://proceedings.neurips.cc/paper/2021/file/82cadb0649a3af4968404c9f6031b233-Paper.pdf)
[5] [Schneider, Steffen, et al. "Improving robustness against common corruptions by covariate shift adaptation." arXiv preprint arXiv:2006.16971 (2020).](https://arxiv.org/pdf/2006.16971.pdf)
[6] [Wang, Dequan, et al. "Tent: Fully test-time adaptation by entropy minimization." arXiv preprint arXiv:2006.10726 (2020).](https://arxiv.org/pdf/2006.10726.pdf)
[7] [Qi, Charles R., et al. "Pointnet: Deep learning on point sets for 3d classification and segmentation." Proceedings of the IEEE conference on computer vision and pattern recognition. 2017.](https://openaccess.thecvf.com/content_cvpr_2017/papers/Qi_PointNet_Deep_Learning_CVPR_2017_paper.pdf)
[8] [Qi, Charles R., et al. "Pointnet++: Deep hierarchical feature learning on point sets in a metric space." arXiv preprint arXiv:1706.02413 (2017).](https://arxiv.org/pdf/1706.02413.pdf)
[9] [Liu, Yongcheng, et al. "Relation-shape convolutional neural network for point cloud analysis." Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 2019.](https://openaccess.thecvf.com/content_CVPR_2019/papers/Liu_Relation-Shape_Convolutional_Neural_Network_for_Point_Cloud_Analysis_CVPR_2019_paper.pdf)
[10] [Wang, Yue, et al. "Dynamic graph cnn for learning on point clouds." Acm Transactions On Graphics (tog) 38.5 (2019): 1-12.](https://dl.acm.org/doi/pdf/10.1145/3326362)
[11] [Goyal, Ankit, et al. "Revisiting Point Cloud Shape Classification with a Simple and Effective Baseline." arXiv preprint arXiv:2106.05304 (2021).](https://arxiv.org/pdf/2106.05304.pdf)
[12] [Guo, Meng-Hao, et al. "Pct: Point cloud transformer." Computational Visual Media 7.2 (2021): 187-199.](https://link.springer.com/content/pdf/10.1007/s41095-021-0229-5.pdf)
[13] [Xiang, Tiange, et al. "Walk in the cloud: Learning curves for point clouds shape analysis." Proceedings of the IEEE/CVF International Conference on Computer Vision. 2021.](https://arxiv.org/pdf/2105.01288.pdf)
[14] [Ma, Xu, et al. "Rethinking Network Design and Local Geometry in Point Cloud: A Simple Residual MLP Framework." arXiv preprint arXiv:2202.07123 (2022).](https://arxiv.org/pdf/2202.07123.pdf)
[15] [Learning Geometry-Disentangled Representation for Complementary Understanding of 3D Object Point Cloud.](https://arxiv.org/abs/2012.10921)
================================================
FILE: all_utils.py
================================================
import tensorboardX
import pdb
import sys
from collections import MutableMapping, Hashable
import csv
import os
import torch
import torch.nn.functional as F
import numpy as np
from progressbar import ProgressBar
import sys
# Additional information that might be necessary to get the model
DATASET_NUM_CLASS = {
'modelnet40': 40,
'modelnet40_c': 40,
'modelnet40_rscnn': 40,
'modelnet40_pn2': 40,
'modelnet40_dgcnn': 40,
}
class TensorboardManager:
def __init__(self, path):
self.writer = tensorboardX.SummaryWriter(path)
def update(self, split, step, vals):
for k, v in vals.items():
self.writer.add_scalar('%s_%s' % (split, k), v, step)
def close(self):
self.writer.flush()
self.writer.close()
class TrackTrain:
def __init__(self, early_stop_patience):
self.early_stop_patience = early_stop_patience
self.counter = -1
self.best_epoch_val = -1
self.best_epoch_train = -1
self.best_epoch_test = -1
self.best_val = float("-inf")
self.best_test = float("-inf")
self.best_train = float("-inf")
self.test_best_val = float("-inf")
def record_epoch(self, epoch_id, train_metric, val_metric, test_metric):
assert epoch_id == (self.counter + 1)
self.counter += 1
if val_metric >= self.best_val:
self.best_val = val_metric
self.best_epoch_val = epoch_id
self.test_best_val = test_metric
if test_metric >= self.best_test:
self.best_test = test_metric
self.best_epoch_test = epoch_id
if train_metric >= self.best_train:
self.best_train = train_metric
self.best_epoch_train = epoch_id
def save_model(self, epoch_id, split):
"""
Whether to save the current model or not
:param epoch_id:
:param split:
:return:
"""
assert epoch_id == self.counter
if split == 'val':
if self.best_epoch_val == epoch_id:
_save_model = True
else:
_save_model = False
elif split == 'test':
if self.best_epoch_test == epoch_id:
_save_model = True
else:
_save_model = False
elif split == 'train':
if self.best_epoch_train == epoch_id:
_save_model = True
else:
_save_model = False
else:
assert False
return _save_model
def early_stop(self, epoch_id):
assert epoch_id == self.counter
if (epoch_id - self.best_epoch_val) > self.early_stop_patience:
return True
else:
return False
class PerfTrackVal:
"""
Records epoch wise performance for validation
"""
def __init__(self, task, extra_param=None):
self.task = task
if task in ['cls', 'cls_trans']:
assert extra_param is None
self.all = []
self.class_seen = None
self.class_corr = None
else:
assert False
def update(self, data_batch, out):
if self.task in ['cls', 'cls_trans']:
correct = self.get_correct_list(out['logit'], data_batch['label'])
self.all.extend(correct)
self.update_class_see_corr(out['logit'], data_batch['label'])
else:
assert False
def agg(self):
if self.task in ['cls', 'cls_trans']:
perf = {
'acc': self.get_avg_list(self.all),
'class_acc': np.mean(np.array(self.class_corr) / np.array(self.class_seen,dtype=np.float))
}
else:
assert False
return perf
def update_class_see_corr(self, logit, label):
if self.class_seen is None:
num_class = logit.shape[1]
self.class_seen = [0] * num_class
self.class_corr = [0] * num_class
pred_label = logit.argmax(axis=1).to('cpu').tolist()
for _pred_label, _label in zip(pred_label, label):
self.class_seen[_label] += 1
if _pred_label == _label:
self.class_corr[_pred_label] += 1
@staticmethod
def get_correct_list(logit, label):
label = label.to(logit.device)
pred_class = logit.argmax(axis=1)
return (label == pred_class).to('cpu').tolist()
@staticmethod
def get_avg_list(all_list):
for x in all_list:
assert isinstance(x, bool)
return sum(all_list) / len(all_list)
class PerfTrackTrain(PerfTrackVal):
"""
Records epoch wise performance during training
"""
def __init__(self, task, extra_param=None):
super().__init__(task, extra_param)
# add a list to track loss
self.all_loss = []
def update_loss(self, loss):
self.all_loss.append(loss.item())
def agg_loss(self):
# print(self.all_loss)
return sum(self.all_loss) / len(self.all_loss)
def update_all(self, data_batch, out, loss):
self.update(data_batch, out)
self.update_loss(loss)
# source: https://github.com/WangYueFt/dgcnn/blob/master/pytorch/util.py
def smooth_loss(pred, gold):
eps = 0.2
n_class = pred.size(1)
one_hot = torch.zeros_like(pred).scatter(1, gold.view(-1, 1), 1)
one_hot = one_hot * (1 - eps) + (1 - one_hot) * eps / (n_class - 1)
log_prb = F.log_softmax(pred, dim=1)
loss = -(one_hot * log_prb).sum(dim=1).mean()
return loss
def rscnn_voting_evaluate_cls(loader, model, data_batch_to_points_target,
points_to_inp, out_to_prob, log_file):
"""
:param loader:
:param model:
:param data_batch_to_points_target:
:param points_to_inp: transform the points to input for the particular model
that is evaluated
:param out_to_prob:
:return:
"""
import rs_cnn.data.data_utils as d_utils
import pointnet2.utils.pointnet2_utils as pointnet2_utils
import numpy as np
terminal = sys.stdout
log = open(log_file, "w")
NUM_REPEAT = 300
NUM_VOTE = 10
PointcloudScale = d_utils.PointcloudScale() # initialize random scaling
def data_aug(vote_id, pc):
# furthest point sampling
# (B, npoint)
fps_idx = pointnet2_utils.furthest_point_sample(points, 1200)
new_fps_idx = fps_idx[:, np.random.choice(1200, num_points, False)]
new_points = pointnet2_utils.gather_operation(points.transpose(1, 2).contiguous(), new_fps_idx).transpose(1, 2).contiguous()
if vote_id > 0:
pc_out = PointcloudScale(new_points)
else:
pc_out = pc
return pc_out
print(f"RSCNN EVALUATE, NUM_REPEAT {NUM_REPEAT}, NUM_VOTE {NUM_VOTE}")
num_points = loader.dataset.num_points
print(f"Number of points {num_points}")
# evaluate
sys.stdout.flush()
PointcloudScale = d_utils.PointcloudScale() # initialize random scaling
model.eval()
global_acc = 0
with torch.no_grad():
for i in range(NUM_REPEAT):
preds = []
labels = []
for j, data in enumerate(loader, 0):
points, target = data_batch_to_points_target(data)
points, target = points.cuda(), target.cuda()
pred = 0
for v in range(NUM_VOTE):
new_points = data_aug(v, points)
inp = points_to_inp(new_points)
out = model(**inp)
prob = out_to_prob(out)
pred += prob
# pred += F.softmax(model(**inp), dim = 1)
pred /= NUM_VOTE
target = target.view(-1)
_, pred_choice = torch.max(pred.data, -1)
preds.append(pred_choice)
labels.append(target.data)
preds = torch.cat(preds, 0)
labels = torch.cat(labels, 0)
acc = (preds == labels).sum().float() / labels.numel()
if acc > global_acc:
global_acc = acc
message1 = 'Repeat %3d \t Acc: %0.6f' % (i + 1, acc)
message2 = '\nBest voting till now, acc: %0.6f' % (global_acc)
message = f'{message1} \n {message2}'
terminal.write(message)
log.write(message)
message = '\nBest voting acc: %0.6f' % (global_acc)
terminal.write(message)
log.write(message)
return global_acc
# https://github.com/charlesq34/pointnet2/blob/master/evaluate.py
# https://github.com/charlesq34/pointnet2/issues/8
# we try to keep the variables names similar to the original implementation
def pn2_vote_evaluate_cls(dataloader, model, log_file, num_votes=[12]):
from pointnet2_tf.utils import provider
model.eval()
terminal = sys.stdout
log = open(log_file, "w")
if isinstance(num_votes, list):
pass
else:
num_votes = [num_votes]
for _num_votes in num_votes:
print(f"num_votes: {_num_votes}")
NUM_CLASSES = DATASET_NUM_CLASS[dataloader.dataset.dataset_name]
SHAPE_NAMES = [line.rstrip() for line in
open('./data/modelnet40_ply_hdf5_2048/shape_names.txt')]
total_correct = 0
total_seen = 0
total_seen_class = [0 for _ in range(NUM_CLASSES)]
total_correct_class = [0 for _ in range(NUM_CLASSES)]
with torch.no_grad():
for _batch_data in dataloader:
# based on https://github.com/charlesq34/pointnet2/blob/master/evaluate.py#L125-L150
batch_data, batch_label = np.array(_batch_data['pc'].cpu()), np.array(_batch_data['label'].cpu())
bsize = batch_data.shape[0]
BATCH_SIZE = batch_data.shape[0]
NUM_POINT = batch_data.shape[1]
batch_pred_sum = np.zeros((BATCH_SIZE, NUM_CLASSES)) # score for classes
for vote_idx in range(_num_votes):
# Shuffle point order to achieve different farthest samplings
shuffled_indices = np.arange(NUM_POINT)
np.random.shuffle(shuffled_indices)
rotated_data = provider.rotate_point_cloud_by_angle(
batch_data[:, shuffled_indices, :], vote_idx/float(_num_votes) * np.pi * 2)
inp = {'pc': torch.tensor(rotated_data)}
out = model(**inp)
pred_val = np.array(out['logit'].cpu())
batch_pred_sum += pred_val
pred_val = np.argmax(batch_pred_sum, 1)
correct = np.sum(pred_val[0:bsize] == batch_label[0:bsize])
total_correct += correct
total_seen += bsize
for i in range(bsize):
l = batch_label[i]
total_seen_class[l] += 1
total_correct_class[l] += (pred_val[i] == l)
class_accuracies = np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float)
message = ""
for i, name in enumerate(SHAPE_NAMES):
message += f"\n {'%10s: %0.3f' % (name, class_accuracies[i])}"
message += f"\n {'eval accuracy: %f'% (total_correct / float(total_seen))}"
message += f"\n {'eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float)))}"
terminal.write(message)
log.write(message)
================================================
FILE: aug_utils.py
================================================
import numpy as np
import torch
import sys
from main import get_loss
sys.path.append("./emd/")
import emd_module as emd
def cutmix_r(data_batch,cfg):
r = np.random.rand(1)
if cfg.AUG.BETA > 0 and r < cfg.AUG.PROB:
lam = np.random.beta(cfg.AUG.BETA, cfg.AUG.BETA)
B = data_batch['pc'].size()[0]
rand_index = torch.randperm(B).cuda()
target_a = data_batch['label']
target_b = data_batch['label'][rand_index]
point_a = torch.zeros(B, 1024, 3)
point_b = torch.zeros(B, 1024, 3)
point_c = torch.zeros(B, 1024, 3)
point_a = data_batch['pc']
point_b = data_batch['pc'][rand_index]
point_c = data_batch['pc'][rand_index]
# point_a, point_b, point_c = point_a.to(device), point_b.to(device), point_c.to(device)
remd = emd.emdModule()
remd = remd.cuda()
dis, ind = remd(point_a, point_b, 0.005, 300)
for ass in range(B):
point_c[ass, :, :] = point_c[ass, ind[ass].long(), :]
int_lam = int(cfg.DATALOADER.MODELNET40_DGCNN.num_points * lam)
int_lam = max(1, int_lam)
gamma = np.random.choice(cfg.DATALOADER.MODELNET40_DGCNN.num_points, int_lam, replace=False, p=None)
for i2 in range(B):
data_batch['pc'][i2, gamma, :] = point_c[i2, gamma, :]
# adjust lambda to exactly match point ratio
lam = int_lam * 1.0 / cfg.DATALOADER.MODELNET40_DGCNN.num_points
# points = data_batch['pc'].transpose(2, 1)
data_batch['label_2'] = target_b
data_batch['lam'] = lam
return data_batch
# pred, trans_feat = model(points)
# loss = criterion(pred, target_a.long()) * (1. - lam) + criterion(pred, target_b.long()) * lam
def cutmix_k(data_batch,cfg):
r = np.random.rand(1)
if cfg.AUG.BETA > 0 and r < cfg.AUG.PROB:
lam = np.random.beta(cfg.AUG.BETA, cfg.AUG.BETA)
B = data_batch['pc'].size()[0]
rand_index = torch.randperm(B).cuda()
target_a = data_batch['label']
target_b = data_batch['label'][rand_index]
point_a = torch.zeros(B, 1024, 3)
point_b = torch.zeros(B, 1024, 3)
point_c = torch.zeros(B, 1024, 3)
point_a = data_batch['pc']
point_b = data_batch['pc'][rand_index]
point_c = data_batch['pc'][rand_index]
remd = emd.emdModule()
remd = remd.cuda()
dis, ind = remd(point_a, point_b, 0.005, 300)
for ass in range(B):
point_c[ass, :, :] = point_c[ass, ind[ass].long(), :]
int_lam = int(cfg.DATALOADER.MODELNET40_DGCNN.num_points * lam)
int_lam = max(1, int_lam)
random_point = torch.from_numpy(np.random.choice(1024, B, replace=False, p=None))
# kNN
ind1 = torch.tensor(range(B))
query = point_a[ind1, random_point].view(B, 1, 3)
dist = torch.sqrt(torch.sum((point_a - query.repeat(1, cfg.DATALOADER.MODELNET40_DGCNN.num_points, 1)) ** 2, 2))
idxs = dist.topk(int_lam, dim=1, largest=False, sorted=True).indices
for i2 in range(B):
data_batch['pc'][i2, idxs[i2], :] = point_c[i2, idxs[i2], :]
# adjust lambda to exactly match point ratio
lam = int_lam * 1.0 / cfg.DATALOADER.MODELNET40_DGCNN.num_points
# points = points.transpose(2, 1)
# pred, trans_feat = model(points)
# loss = criterion(pred, target_a.long()) * (1. - lam) + criterion(pred, target_b.long()) * lam
data_batch['label_2'] = target_b
data_batch['lam'] = lam
return data_batch
def mixup(data_batch,cfg):
batch_size = data_batch['pc'].size()[0]
idx_minor = torch.randperm(batch_size)
mixrates = (0.5 - np.abs(np.random.beta(cfg.AUG.MIXUPRATE, cfg.AUG.MIXUPRATE, batch_size) - 0.5))
label_main = data_batch['label']
label_minor = data_batch['label'][idx_minor]
label_new = torch.zeros(batch_size, 40)
for i in range(batch_size):
if label_main[i] == label_minor[i]: # same label
label_new[i][label_main[i]] = 1.0
else:
label_new[i][label_main[i]] = 1 - mixrates[i]
label_new[i][label_minor[i]] = mixrates[i]
label = label_new
data_minor = data_batch['pc'][idx_minor]
mix_rate = torch.tensor(mixrates).float()
mix_rate = mix_rate.unsqueeze_(1).unsqueeze_(2)
mix_rate_expand_xyz = mix_rate.expand(data_batch['pc'].shape)
remd = emd.emdModule()
remd = remd.cuda()
_, ass = remd(data_batch['pc'], data_minor, 0.005, 300)
ass = ass.long()
for i in range(batch_size):
data_minor[i] = data_minor[i][ass[i]]
data_batch['pc'] = data_batch['pc'] * (1 - mix_rate_expand_xyz) + data_minor * mix_rate_expand_xyz
data_batch['label_2'] = label_minor
data_batch['lam'] = torch.tensor(mix_rate).squeeze_()
return data_batch
def knn_points(k, xyz, query, nsample=512):
B, N, C = xyz.shape
_, S, _ = query.shape # S=1
tmp_idx = np.arange(N)
group_idx = np.repeat(tmp_idx[np.newaxis,np.newaxis,:], B, axis=0)
sqrdists = square_distance(query, xyz) # Bx1,N #제곱거리
tmp = np.sort(sqrdists, axis=2)
knn_dist = np.zeros((B,1))
for i in range(B):
knn_dist[i][0] = tmp[i][0][k]
group_idx[i][sqrdists[i]>knn_dist[i][0]]=N
# group_idx[sqrdists > radius ** 2] = N
# print("group idx : \n",group_idx)
# group_idx = group_idx.sort(dim=-1)[0][:, :, :nsample] # for torch.tensor
group_idx = np.sort(group_idx, axis=2)[:, :, :nsample]
# group_first = group_idx[:, :, 0].view(B, S, 1).repeat([1, 1, nsample])
tmp_idx = group_idx[:,:,0]
group_first = np.repeat(tmp_idx[:,np.newaxis,:], nsample, axis=2)
# repeat the first value of the idx in each batch
mask = group_idx == N
group_idx[mask] = group_first[mask]
return group_idx
def cut_points_knn(data_batch, idx, radius, nsample=512, k=512):
"""
input
points : BxNx3(=6 with normal)
idx : Bx1 one scalar(int) between 0~len(points)
output
idx : Bxn_sample
"""
B, N, C = data_batch.shape
B, S = idx.shape
query_points = np.zeros((B,1,C))
# print("idx : \n",idx)
for i in range(B):
query_points[i][0]=data_batch[i][idx[i][0]] # Bx1x3(=6 with normal)
# B x n_sample
group_idx = knn_points(k=k, xyz=data_batch[:,:,:3], query=query_points[:,:,:3], nsample=nsample)
return group_idx, query_points # group_idx: 16x?x6, query_points: 16x1x6
def cut_points(data_batch, idx, radius, nsample=512):
"""
input
points : BxNx3(=6 with normal)
idx : Bx1 one scalar(int) between 0~len(points)
output
idx : Bxn_sample
"""
B, N, C = data_batch.shape
B, S = idx.shape
query_points = np.zeros((B,1,C))
# print("idx : \n",idx)
for i in range(B):
query_points[i][0]=data_batch[i][idx[i][0]] # Bx1x3(=6 with normal)
# B x n_sample
group_idx = query_ball_point_for_rsmix(radius, nsample, data_batch[:,:,:3], query_points[:,:,:3])
return group_idx, query_points # group_idx: 16x?x6, query_points: 16x1x6
def query_ball_point_for_rsmix(radius, nsample, xyz, new_xyz):
"""
Input:
radius: local region radius
nsample: max sample number in local region
xyz: all points, [B, N, 3]
new_xyz: query points, [B, S, 3]
Return:
group_idx: grouped points index, [B, S, nsample], S=1
"""
# device = xyz.device
B, N, C = xyz.shape
_, S, _ = new_xyz.shape
# group_idx = torch.arange(N, dtype=torch.long).to(device).view(1, 1, N).repeat([B, S, 1])
tmp_idx = np.arange(N)
group_idx = np.repeat(tmp_idx[np.newaxis,np.newaxis,:], B, axis=0)
sqrdists = square_distance(new_xyz, xyz)
group_idx[sqrdists > radius ** 2] = N
# group_idx = group_idx.sort(dim=-1)[0][:, :, :nsample] # for torch.tensor
group_idx = np.sort(group_idx, axis=2)[:, :, :nsample]
# group_first = group_idx[:, :, 0].view(B, S, 1).repeat([1, 1, nsample])
tmp_idx = group_idx[:,:,0]
group_first = np.repeat(tmp_idx[:,np.newaxis,:], nsample, axis=2)
# repeat the first value of the idx in each batch
mask = group_idx == N
group_idx[mask] = group_first[mask]
return group_idx
def square_distance(src, dst):
"""
Calculate Euclid distance between each two points.
src^T * dst = xn * xm + yn * ym + zn * zm;
sum(src^2, dim=-1) = xn*xn + yn*yn + zn*zn;
sum(dst^2, dim=-1) = xm*xm + ym*ym + zm*zm;
dist = (xn - xm)^2 + (yn - ym)^2 + (zn - zm)^2
= sum(src**2,dim=-1)+sum(dst**2,dim=-1)-2*src^T*dst
Input:
src: source points, [B, N, C]
dst: target points, [B, M, C]
Output:
dist: per-point square distance, [B, N, M]
"""
B, N, _ = src.shape
_, M, _ = dst.shape
# dist = -2 * torch.matmul(src, dst.permute(0, 2, 1))
# dist += torch.sum(src ** 2, -1).view(B, N, 1)
# dist += torch.sum(dst ** 2, -1).view(B, 1, M)
dist = -2 * np.matmul(src, dst.transpose(0, 2, 1))
dist += np.sum(src ** 2, -1).reshape(B, N, 1)
dist += np.sum(dst ** 2, -1).reshape(B, 1, M)
return dist
def pts_num_ctrl(pts_erase_idx, pts_add_idx):
'''
input : pts - to erase
pts - to add
output :pts - to add (number controled)
'''
if len(pts_erase_idx)>=len(pts_add_idx):
num_diff = len(pts_erase_idx)-len(pts_add_idx)
if num_diff == 0:
pts_add_idx_ctrled = pts_add_idx
else:
pts_add_idx_ctrled = np.append(pts_add_idx, pts_add_idx[np.random.randint(0, len(pts_add_idx), size=num_diff)])
else:
pts_add_idx_ctrled = np.sort(np.random.choice(pts_add_idx, size=len(pts_erase_idx), replace=False))
return pts_add_idx_ctrled
def rsmix(data, cfg, n_sample=512, KNN=False):
cut_rad = np.random.beta(cfg.AUG.BETA, cfg.AUG.BETA)
data_batch = data['pc'].numpy()
label_batch = data['label'].numpy()
rand_index = np.random.choice(data_batch.shape[0],data_batch.shape[0], replace=False) # label dim : (16,) for model
if len(label_batch.shape) is 1:
label_batch = np.expand_dims(label_batch, axis=1)
label_a = label_batch[:,0]
label_b = label_batch[rand_index][:,0]
data_batch_rand = data_batch[rand_index] # BxNx3(with normal=6)
rand_idx_1 = np.random.randint(0,data_batch.shape[1], (data_batch.shape[0],1))
rand_idx_2 = np.random.randint(0,data_batch.shape[1], (data_batch.shape[0],1))
if KNN:
knn_para = min(int(np.ceil(cut_rad*n_sample)),n_sample)
pts_erase_idx, query_point_1 = cut_points_knn(data_batch, rand_idx_1, cut_rad, nsample=n_sample, k=knn_para) # B x num_points_in_radius_1 x 3(or 6)
pts_add_idx, query_point_2 = cut_points_knn(data_batch_rand, rand_idx_2, cut_rad, nsample=n_sample, k=knn_para) # B x num_points_in_radius_2 x 3(or 6)
else:
pts_erase_idx, query_point_1 = cut_points(data_batch, rand_idx_1, cut_rad, nsample=n_sample) # B x num_points_in_radius_1 x 3(or 6)
pts_add_idx, query_point_2 = cut_points(data_batch_rand, rand_idx_2, cut_rad, nsample=n_sample) # B x num_points_in_radius_2 x 3(or 6)
query_dist = query_point_1[:,:,:3] - query_point_2[:,:,:3]
pts_replaced = np.zeros((1,data_batch.shape[1],data_batch.shape[2]))
lam = np.zeros(data_batch.shape[0],dtype=float)
for i in range(data_batch.shape[0]):
if pts_erase_idx[i][0][0]==data_batch.shape[1]:
tmp_pts_replaced = np.expand_dims(data_batch[i], axis=0)
lam_tmp = 0
elif pts_add_idx[i][0][0]==data_batch.shape[1]:
pts_erase_idx_tmp = np.unique(pts_erase_idx[i].reshape(n_sample,),axis=0)
tmp_pts_erased = np.delete(data_batch[i], pts_erase_idx_tmp, axis=0) # B x N-num_rad_1 x 3(or 6)
dup_points_idx = np.random.randint(0,len(tmp_pts_erased), size=len(pts_erase_idx_tmp))
tmp_pts_replaced = np.expand_dims(np.concatenate((tmp_pts_erased, data_batch[i][dup_points_idx]), axis=0), axis=0)
lam_tmp = 0
else:
pts_erase_idx_tmp = np.unique(pts_erase_idx[i].reshape(n_sample,),axis=0)
pts_add_idx_tmp = np.unique(pts_add_idx[i].reshape(n_sample,),axis=0)
pts_add_idx_ctrled_tmp = pts_num_ctrl(pts_erase_idx_tmp,pts_add_idx_tmp)
tmp_pts_erased = np.delete(data_batch[i], pts_erase_idx_tmp, axis=0) # B x N-num_rad_1 x 3(or 6)
# input("INPUT : ")
tmp_pts_to_add = np.take(data_batch_rand[i], pts_add_idx_ctrled_tmp, axis=0)
tmp_pts_to_add[:,:3] = query_dist[i]+tmp_pts_to_add[:,:3]
tmp_pts_replaced = np.expand_dims(np.vstack((tmp_pts_erased,tmp_pts_to_add)), axis=0)
lam_tmp = len(pts_add_idx_ctrled_tmp)/(len(pts_add_idx_ctrled_tmp)+len(tmp_pts_erased))
pts_replaced = np.concatenate((pts_replaced, tmp_pts_replaced),axis=0)
lam[i] = lam_tmp
data_batch_mixed = np.delete(pts_replaced, [0], axis=0)
data['pc'] = torch.FloatTensor(data_batch_mixed)
data['label'] = torch.tensor(label_a)
data['label_2'] = torch.tensor(label_b)
data['lam'] = torch.tensor(lam)
return data
def pgd(data_batch,model, task, loss_name, dataset_name, step= 7, eps=0.05, alpha=0.01):
model.eval()
data = data_batch['pc']
adv_data=data.clone()
adv_data=adv_data+(torch.rand_like(adv_data)*eps*2-eps)
adv_data.detach()
adv_data_batch = {}
for _ in range(step):
adv_data.requires_grad=True
out = model(**{'pc':adv_data})
adv_data_batch['pc'] = adv_data
adv_data_batch['label'] = data_batch['label']
model.zero_grad()
loss = get_loss(task, loss_name, adv_data_batch, out, dataset_name)
loss.backward()
with torch.no_grad():
adv_data = adv_data + alpha * adv_data.grad.sign()
delta = adv_data-data
# print(delta)
delta = torch.clamp(delta,-eps,eps)
adv_data = (data+delta).detach_()
return adv_data_batch
================================================
FILE: configs/bn/dgcnn.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_c
EXP_ID: c_dgcnn_run_1
LOSS_NAME: smooth
METRIC: acc
MODEL_NAME: dgcnn
SEED: 1
TASK: cls
TRAIN:
l2: 1e-4
ADAPT:
METHOD: bn
================================================
FILE: configs/bn/pct.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_c
EXP_ID: c_pct_run_1
LOSS_NAME: smooth
MODEL_NAME: pct
SEED: 1
TASK: cls
OPTIMIZER: pct
TRAIN:
l2: 1e-4
learning_rate: 0.0001
ADAPT:
METHOD: bn
================================================
FILE: configs/bn/pointnet.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_c
EXP_ID: c_pointnet_run_1
LOSS_NAME: smooth
MODEL_NAME: pointnet
SEED: 1
TASK: cls_trans
TRAIN:
l2: 0.0
ADAPT:
METHOD: bn
================================================
FILE: configs/bn/pointnet2.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_c
EXP_ID: c_pointnet2_run_1
LOSS_NAME: smooth
MODEL_NAME: pointnet2
SEED: 1
TASK: cls
TRAIN:
l2: 0.0
ADAPT:
METHOD: bn
================================================
FILE: configs/bn/rscnn.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_c
EXP_ID: c_rscnn_run_1
LOSS_NAME: smooth
MODEL_NAME: rscnn
SEED: 1
TASK: cls
TRAIN:
l2: 0.0
ADAPT:
METHOD: bn
================================================
FILE: configs/bn/simpleview.yaml
================================================
DATALOADER:
batch_size: 18
num_workers: 0
EXP:
DATASET: modelnet40_c
EXP_ID: c_simpleview_run_1
LOSS_NAME: smooth
MODEL_NAME: simpleview
SEED: 1
TASK: cls
ADAPT:
METHOD: bn
================================================
FILE: configs/corruption/curvenet.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_c
EXP_ID: c_curvenet_run_1
LOSS_NAME: smooth
METRIC: acc
MODEL_NAME: curvenet
SEED: 1
TASK: cls
TRAIN:
l2: 1e-4
================================================
FILE: configs/corruption/dgcnn.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_c
EXP_ID: c_dgcnn_run_1
LOSS_NAME: smooth
METRIC: acc
MODEL_NAME: dgcnn
SEED: 1
TASK: cls
TRAIN:
l2: 1e-4
================================================
FILE: configs/corruption/gdanet.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_c
EXP_ID: c_gdanet_run_1
LOSS_NAME: smooth
METRIC: acc
MODEL_NAME: gdanet
SEED: 1
TASK: cls
TRAIN:
l2: 1e-4
================================================
FILE: configs/corruption/pct.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_c
EXP_ID: c_pct_run_1
LOSS_NAME: smooth
MODEL_NAME: pct
SEED: 1
TASK: cls
OPTIMIZER: pct
TRAIN:
l2: 1e-4
learning_rate: 0.0001
================================================
FILE: configs/corruption/pointMLP.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_c
EXP_ID: c_pointMLP_run_1
LOSS_NAME: smooth
METRIC: acc
MODEL_NAME: pointMLP
SEED: 1
TASK: cls
TRAIN:
l2: 1e-4
================================================
FILE: configs/corruption/pointMLP2.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_c
EXP_ID: c_pointMLP2_run_1
LOSS_NAME: smooth
METRIC: acc
MODEL_NAME: pointMLP2
SEED: 1
TASK: cls
TRAIN:
l2: 1e-4
================================================
FILE: configs/corruption/pointnet.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_c
EXP_ID: c_pointnet_run_1
LOSS_NAME: smooth
MODEL_NAME: pointnet
SEED: 1
TASK: cls_trans
TRAIN:
l2: 0.0
================================================
FILE: configs/corruption/pointnet2.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_c
EXP_ID: c_pointnet2_run_1
LOSS_NAME: smooth
MODEL_NAME: pointnet2
SEED: 1
TASK: cls
TRAIN:
l2: 0.0
================================================
FILE: configs/corruption/rscnn.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_c
EXP_ID: c_rscnn_run_1
LOSS_NAME: smooth
MODEL_NAME: rscnn
SEED: 1
TASK: cls
TRAIN:
l2: 0.0
================================================
FILE: configs/corruption/simpleview.yaml
================================================
DATALOADER:
batch_size: 18
num_workers: 0
EXP:
DATASET: modelnet40_c
EXP_ID: c_simpleview_run_1
LOSS_NAME: smooth
MODEL_NAME: simpleview
SEED: 1
TASK: cls
================================================
FILE: configs/cutmix/dgcnn_k.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: cutmix_k_dgcnn_run_1
LOSS_NAME: smooth
METRIC: acc
MODEL_NAME: dgcnn
SEED: 1
TASK: cls
TRAIN:
l2: 1e-4
AUG:
NAME: cutmix_k
BETA: 1.
PROB: 0.5
================================================
FILE: configs/cutmix/dgcnn_r.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: cutmix_r_dgcnn_run_1
LOSS_NAME: smooth
METRIC: acc
MODEL_NAME: dgcnn
SEED: 1
TASK: cls
TRAIN:
l2: 1e-4
AUG:
NAME: cutmix_r
BETA: 1.
PROB: 0.5
================================================
FILE: configs/cutmix/pct_k.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: cutmix_k_pct_run_1
LOSS_NAME: smooth
MODEL_NAME: pct
SEED: 1
TASK: cls
OPTIMIZER: pct
TRAIN:
l2: 1e-4
learning_rate: 0.0001
AUG:
NAME: cutmix_k
BETA: 1.
PROB: 0.5
================================================
FILE: configs/cutmix/pct_r.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: cutmix_r_pct_run_1
LOSS_NAME: smooth
MODEL_NAME: pct
SEED: 1
TASK: cls
OPTIMIZER: pct
TRAIN:
l2: 1e-4
learning_rate: 0.0001
AUG:
NAME: cutmix_r
BETA: 1.
PROB: 0.5
================================================
FILE: configs/cutmix/pointnet2_k.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: cutmix_k_pointnet2_run_1
LOSS_NAME: smooth
MODEL_NAME: pointnet2
SEED: 1
TASK: cls
TRAIN:
l2: 0.0
AUG:
NAME: cutmix_k
BETA: 1.
PROB: 0.5
================================================
FILE: configs/cutmix/pointnet2_r.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: cutmix_r_pointnet2_run_1
LOSS_NAME: smooth
MODEL_NAME: pointnet2
SEED: 1
TASK: cls
TRAIN:
l2: 0.0
AUG:
NAME: cutmix_r
BETA: 1.
PROB: 0.5
================================================
FILE: configs/cutmix/pointnet_k.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: cutmix_k_pointnet_run_1
LOSS_NAME: smooth
MODEL_NAME: pointnet
SEED: 1
TASK: cls_trans
TRAIN:
l2: 0.0
AUG:
NAME: cutmix_k
BETA: 1.
PROB: 0.5
================================================
FILE: configs/cutmix/pointnet_r.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: cutmix_r_pointnet_run_1
LOSS_NAME: smooth
MODEL_NAME: pointnet
SEED: 1
TASK: cls_trans
TRAIN:
l2: 0.0
AUG:
NAME: cutmix_r
BETA: 1.
PROB: 0.5
================================================
FILE: configs/cutmix/rscnn_k.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: cutmix_k_rscnn_run_1
LOSS_NAME: smooth
MODEL_NAME: rscnn
SEED: 1
TASK: cls
TRAIN:
l2: 0.0
AUG:
NAME: cutmix_k
BETA: 1.
PROB: 0.5
================================================
FILE: configs/cutmix/rscnn_r.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: cutmix_r_rscnn_run_1
LOSS_NAME: smooth
MODEL_NAME: rscnn
SEED: 1
TASK: cls
TRAIN:
l2: 0.0
AUG:
NAME: cutmix_r
BETA: 1.
PROB: 0.5
================================================
FILE: configs/cutmix/simpleview_k.yaml
================================================
DATALOADER:
batch_size: 18
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: cutmix_k_simpleview_run_1
LOSS_NAME: smooth
MODEL_NAME: simpleview
SEED: 1
TASK: cls
AUG:
NAME: cutmix_k
BETA: 1.
PROB: 0.5
================================================
FILE: configs/cutmix/simpleview_r.yaml
================================================
DATALOADER:
batch_size: 18
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: cutmix_r_simpleview_run_1
LOSS_NAME: smooth
MODEL_NAME: simpleview
SEED: 1
TASK: cls
AUG:
NAME: cutmix_r
BETA: 1.
PROB: 0.5
================================================
FILE: configs/dgcnn_curvenet_run_1.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: dgcnn_curvenet_run_1
LOSS_NAME: smooth
MODEL_NAME: curvenet
SEED: 1
TASK: cls
TRAIN:
l2: 0.0
================================================
FILE: configs/dgcnn_dgcnn_0.25_run_1.yaml
================================================
DATALOADER:
MODELNET40_DGCNN:
train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_split_0.3125_files.txt
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: dgcnn_dgcnn_0.25_run_1
LOSS_NAME: smooth
METRIC: acc
MODEL_NAME: dgcnn
SEED: 1
TASK: cls
TRAIN:
l2: 1e-4
================================================
FILE: configs/dgcnn_dgcnn_0.25_valid_run_1.yaml
================================================
DATALOADER:
MODELNET40_DGCNN:
train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_split_0.3125_files.txt
valid_data_path: ./data/modelnet40_ply_hdf5_2048/valid_files.txt
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: dgcnn_dgcnn_0.25_valid_run_1
LOSS_NAME: smooth
METRIC: acc
MODEL_NAME: dgcnn
SEED: 1
TASK: cls
EXP_EXTRA:
no_test: true
no_val: false
val_eval_freq: 25
TRAIN:
l2: 1e-4
================================================
FILE: configs/dgcnn_dgcnn_0.5_run_1.yaml
================================================
DATALOADER:
MODELNET40_DGCNN:
train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_split_0.625_files.txt
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: dgcnn_dgcnn_0.5_run_1
LOSS_NAME: smooth
METRIC: acc
MODEL_NAME: dgcnn
SEED: 1
TASK: cls
TRAIN:
l2: 1e-4
================================================
FILE: configs/dgcnn_dgcnn_0.5_valid_run_1.yaml
================================================
DATALOADER:
MODELNET40_DGCNN:
train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_split_0.625_files.txt
valid_data_path: ./data/modelnet40_ply_hdf5_2048/valid_files.txt
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: dgcnn_dgcnn_0.5_valid_run_1
LOSS_NAME: smooth
METRIC: acc
MODEL_NAME: dgcnn
SEED: 1
TASK: cls
EXP_EXTRA:
no_test: true
no_val: false
val_eval_freq: 25
TRAIN:
l2: 1e-4
================================================
FILE: configs/dgcnn_dgcnn_ce_run_1.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: dgcnn_dgcnn_ce_run_1
LOSS_NAME: cross_entropy
METRIC: acc
MODEL_NAME: dgcnn
SEED: 1
TASK: cls
TRAIN:
l2: 1e-4
================================================
FILE: configs/dgcnn_dgcnn_ce_valid_run_1.yaml
================================================
DATALOADER:
MODELNET40_DGCNN:
train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_files.txt
valid_data_path: ./data/modelnet40_ply_hdf5_2048/valid_files.txt
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: dgcnn_dgcnn_ce_valid_run_1
LOSS_NAME: cross_entropy
METRIC: acc
MODEL_NAME: dgcnn
SEED: 1
TASK: cls
EXP_EXTRA:
no_test: true
no_val: false
val_eval_freq: 25
TRAIN:
l2: 1e-4
================================================
FILE: configs/dgcnn_dgcnn_run_1.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: dgcnn_dgcnn_run_1
LOSS_NAME: smooth
METRIC: acc
MODEL_NAME: dgcnn
SEED: 1
TASK: cls
TRAIN:
l2: 1e-4
================================================
FILE: configs/dgcnn_dgcnn_valid_run_1.yaml
================================================
DATALOADER:
MODELNET40_DGCNN:
train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_files.txt
valid_data_path: ./data/modelnet40_ply_hdf5_2048/valid_files.txt
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: dgcnn_dgcnn_valid_run_1
LOSS_NAME: smooth
MODEL_NAME: dgcnn
SEED: 1
TASK: cls
EXP_EXTRA:
no_test: true
no_val: false
val_eval_freq: 25
TRAIN:
l2: 0.0001
================================================
FILE: configs/dgcnn_gdanet_run_1.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: dgcnn_gdanet_run_1
LOSS_NAME: smooth
MODEL_NAME: gdanet
SEED: 1
TASK: cls
TRAIN:
l2: 0.0
================================================
FILE: configs/dgcnn_pct_run_1.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: dgcnn_pct_run_1
LOSS_NAME: smooth
MODEL_NAME: pct
SEED: 1
TASK: cls
OPTIMIZER: pct
TRAIN:
l2: 1e-4
learning_rate: 0.0001
================================================
FILE: configs/dgcnn_pointMLP2_run_1.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: dgcnn_pointMLP2_run_1
LOSS_NAME: smooth
MODEL_NAME: pointMLP2
SEED: 1
TASK: cls
TRAIN:
l2: 0.0
================================================
FILE: configs/dgcnn_pointMLP_run_1.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: dgcnn_pointMLP_run_1
LOSS_NAME: smooth
MODEL_NAME: pointMLP
SEED: 1
TASK: cls
TRAIN:
l2: 0.0
================================================
FILE: configs/dgcnn_pointnet2_0.25_run_1.yaml
================================================
DATALOADER:
MODELNET40_DGCNN:
train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_split_0.3125_files.txt
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: dgcnn_pointnet2_0.25_run_1
LOSS_NAME: smooth
METRIC: acc
MODEL_NAME: pointnet2
SEED: 1
TASK: cls
TRAIN:
l2: 0.0
================================================
FILE: configs/dgcnn_pointnet2_0.25_valid_run_1.yaml
================================================
DATALOADER:
MODELNET40_DGCNN:
train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_split_0.3125_files.txt
valid_data_path: ./data/modelnet40_ply_hdf5_2048/valid_files.txt
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: dgcnn_pointnet2_0.25_valid_run_1
LOSS_NAME: smooth
METRIC: acc
MODEL_NAME: pointnet2
SEED: 1
TASK: cls
EXP_EXTRA:
no_test: true
no_val: false
val_eval_freq: 25
TRAIN:
l2: 0.0
================================================
FILE: configs/dgcnn_pointnet2_0.5_run_1.yaml
================================================
DATALOADER:
MODELNET40_DGCNN:
train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_split_0.625_files.txt
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: dgcnn_pointnet2_0.5_run_1
LOSS_NAME: smooth
METRIC: acc
MODEL_NAME: pointnet2
SEED: 1
TASK: cls
TRAIN:
l2: 0.0
================================================
FILE: configs/dgcnn_pointnet2_0.5_valid_run_1.yaml
================================================
DATALOADER:
MODELNET40_DGCNN:
train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_split_0.625_files.txt
valid_data_path: ./data/modelnet40_ply_hdf5_2048/valid_files.txt
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: dgcnn_pointnet2_0.5_valid_run_1
LOSS_NAME: smooth
METRIC: acc
MODEL_NAME: pointnet2
SEED: 1
TASK: cls
EXP_EXTRA:
no_test: true
no_val: false
val_eval_freq: 25
TRAIN:
l2: 0.0
================================================
FILE: configs/dgcnn_pointnet2_ce_run_1.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: dgcnn_pointnet2_ce_run_1
LOSS_NAME: cross_entropy
METRIC: acc
MODEL_NAME: pointnet2
SEED: 1
TASK: cls
TRAIN:
l2: 0.0
================================================
FILE: configs/dgcnn_pointnet2_ce_valid_run_1.yaml
================================================
DATALOADER:
MODELNET40_DGCNN:
train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_files.txt
valid_data_path: ./data/modelnet40_ply_hdf5_2048/valid_files.txt
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: dgcnn_pointnet2_ce_valid_run_1
LOSS_NAME: cross_entropy
METRIC: acc
MODEL_NAME: pointnet2
SEED: 1
TASK: cls
EXP_EXTRA:
no_test: true
no_val: false
val_eval_freq: 25
TRAIN:
l2: 0.0
================================================
FILE: configs/dgcnn_pointnet2_run_1.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: dgcnn_pointnet2_run_1
LOSS_NAME: smooth
MODEL_NAME: pointnet2
SEED: 1
TASK: cls
TRAIN:
l2: 0.0
================================================
FILE: configs/dgcnn_pointnet2_valid_run_1.yaml
================================================
DATALOADER:
MODELNET40_DGCNN:
train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_files.txt
valid_data_path: ./data/modelnet40_ply_hdf5_2048/valid_files.txt
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: dgcnn_pointnet2_valid_run_1
LOSS_NAME: smooth
MODEL_NAME: pointnet2
SEED: 1
TASK: cls
EXP_EXTRA:
no_test: true
no_val: false
val_eval_freq: 25
TRAIN:
l2: 0.0
================================================
FILE: configs/dgcnn_pointnet_0.25_run_1.yaml
================================================
DATALOADER:
MODELNET40_DGCNN:
train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_split_0.3125_files.txt
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: dgcnn_pointnet_0.25_run_1
LOSS_NAME: smooth
MODEL_NAME: pointnet
SEED: 1
TASK: cls_trans
TRAIN:
l2: 0.0
================================================
FILE: configs/dgcnn_pointnet_0.25_valid_run_1.yaml
================================================
DATALOADER:
MODELNET40_DGCNN:
train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_split_0.3125_files.txt
valid_data_path: ./data/modelnet40_ply_hdf5_2048/valid_files.txt
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: dgcnn_pointnet_0.25_valid_run_1
LOSS_NAME: smooth
MODEL_NAME: pointnet
SEED: 1
TASK: cls_trans
EXP_EXTRA:
no_test: true
no_val: false
val_eval_freq: 25
TRAIN:
l2: 0.0
================================================
FILE: configs/dgcnn_pointnet_0.5_run_1.yaml
================================================
DATALOADER:
MODELNET40_DGCNN:
train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_split_0.625_files.txt
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: dgcnn_pointnet_0.5_run_1
LOSS_NAME: smooth
MODEL_NAME: pointnet
SEED: 1
TASK: cls_trans
TRAIN:
l2: 0.0
================================================
FILE: configs/dgcnn_pointnet_0.5_valid_run_1.yaml
================================================
DATALOADER:
MODELNET40_DGCNN:
train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_split_0.625_files.txt
valid_data_path: ./data/modelnet40_ply_hdf5_2048/valid_files.txt
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: dgcnn_pointnet_0.5_valid_run_1
LOSS_NAME: smooth
MODEL_NAME: pointnet
SEED: 1
TASK: cls_trans
EXP_EXTRA:
no_test: true
no_val: false
val_eval_freq: 25
TRAIN:
l2: 0.0
================================================
FILE: configs/dgcnn_pointnet_ce_run_1.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: dgcnn_pointnet_ce_run_1
LOSS_NAME: cross_entropy
MODEL_NAME: pointnet
SEED: 1
TASK: cls_trans
TRAIN:
l2: 0.0
================================================
FILE: configs/dgcnn_pointnet_ce_valid_run_1.yaml
================================================
DATALOADER:
MODELNET40_DGCNN:
train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_files.txt
valid_data_path: ./data/modelnet40_ply_hdf5_2048/valid_files.txt
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: dgcnn_pointnet_ce_valid_run_1
LOSS_NAME: cross_entropy
MODEL_NAME: pointnet
SEED: 1
TASK: cls_trans
EXP_EXTRA:
no_test: true
no_val: false
val_eval_freq: 25
TRAIN:
l2: 0.0
================================================
FILE: configs/dgcnn_pointnet_run_1.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: dgcnn_pointnet_run_1
LOSS_NAME: smooth
MODEL_NAME: pointnet
SEED: 1
TASK: cls_trans
TRAIN:
l2: 0.0
================================================
FILE: configs/dgcnn_pointnet_valid_run_1.yaml
================================================
DATALOADER:
MODELNET40_DGCNN:
train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_files.txt
valid_data_path: ./data/modelnet40_ply_hdf5_2048/valid_files.txt
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: dgcnn_pointnet_valid_run_1
LOSS_NAME: smooth
MODEL_NAME: pointnet
SEED: 1
TASK: cls_trans
EXP_EXTRA:
no_test: true
no_val: false
val_eval_freq: 25
TRAIN:
l2: 0.0
================================================
FILE: configs/dgcnn_rscnn_0.25_run_1.yaml
================================================
DATALOADER:
MODELNET40_DGCNN:
train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_split_0.3125_files.txt
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: dgcnn_rscnn_0.25_run_1
LOSS_NAME: smooth
MODEL_NAME: rscnn
SEED: 1
TASK: cls
TRAIN:
l2: 0.0
================================================
FILE: configs/dgcnn_rscnn_0.25_valid_run_1.yaml
================================================
DATALOADER:
MODELNET40_DGCNN:
train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_split_0.3125_files.txt
valid_data_path: ./data/modelnet40_ply_hdf5_2048/valid_files.txt
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: dgcnn_rscnn_0.25_valid_run_1
LOSS_NAME: smooth
MODEL_NAME: rscnn
SEED: 1
TASK: cls
EXP_EXTRA:
no_test: true
no_val: false
val_eval_freq: 25
TRAIN:
l2: 0.0
================================================
FILE: configs/dgcnn_rscnn_0.5_run_1.yaml
================================================
DATALOADER:
MODELNET40_DGCNN:
train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_split_0.625_files.txt
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: dgcnn_rscnn_0.5_run_1
LOSS_NAME: smooth
MODEL_NAME: rscnn
SEED: 1
TASK: cls
TRAIN:
l2: 0.0
================================================
FILE: configs/dgcnn_rscnn_0.5_valid_run_1.yaml
================================================
DATALOADER:
MODELNET40_DGCNN:
train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_split_0.625_files.txt
valid_data_path: ./data/modelnet40_ply_hdf5_2048/valid_files.txt
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: dgcnn_rscnn_0.5_valid_run_1
LOSS_NAME: smooth
MODEL_NAME: rscnn
SEED: 1
TASK: cls
EXP_EXTRA:
no_test: true
no_val: false
val_eval_freq: 25
TRAIN:
l2: 0.0
================================================
FILE: configs/dgcnn_rscnn_ce_run_1.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: dgcnn_rscnn_ce_run_1
LOSS_NAME: cross_entropy
MODEL_NAME: rscnn
SEED: 1
TASK: cls
TRAIN:
l2: 0.0
================================================
FILE: configs/dgcnn_rscnn_ce_valid_run_1.yaml
================================================
DATALOADER:
MODELNET40_DGCNN:
train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_files.txt
valid_data_path: ./data/modelnet40_ply_hdf5_2048/valid_files.txt
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: dgcnn_rscnn_ce_valid_run_1
LOSS_NAME: cross_entropy
MODEL_NAME: rscnn
SEED: 1
TASK: cls
EXP_EXTRA:
no_test: true
no_val: false
val_eval_freq: 25
TRAIN:
l2: 0.0
================================================
FILE: configs/dgcnn_rscnn_run_1.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: dgcnn_rscnn_run_1
LOSS_NAME: smooth
MODEL_NAME: rscnn
SEED: 1
TASK: cls
TRAIN:
l2: 0.0
================================================
FILE: configs/dgcnn_rscnn_valid_run_1.yaml
================================================
DATALOADER:
MODELNET40_DGCNN:
train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_files.txt
valid_data_path: ./data/modelnet40_ply_hdf5_2048/valid_files.txt
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: dgcnn_rscnn_valid_run_1
LOSS_NAME: smooth
MODEL_NAME: rscnn
SEED: 1
TASK: cls
EXP_EXTRA:
no_test: true
no_val: false
val_eval_freq: 25
TRAIN:
l2: 0.0
================================================
FILE: configs/dgcnn_simpleview_0.25_run_1.yaml
================================================
DATALOADER:
MODELNET40_DGCNN:
train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_split_0.3125_files.txt
batch_size: 18
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: dgcnn_simpleview_0.25_run_1
LOSS_NAME: smooth
MODEL_NAME: simpleview
SEED: 1
TASK: cls
================================================
FILE: configs/dgcnn_simpleview_0.25_valid_run_1.yaml
================================================
DATALOADER:
MODELNET40_DGCNN:
train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_split_0.3125_files.txt
valid_data_path: ./data/modelnet40_ply_hdf5_2048/valid_files.txt
batch_size: 18
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: dgcnn_simpleview_0.25_valid_run_1
LOSS_NAME: smooth
MODEL_NAME: simpleview
SEED: 1
TASK: cls
EXP_EXTRA:
no_test: true
no_val: false
val_eval_freq: 25
================================================
FILE: configs/dgcnn_simpleview_0.5_run_1.yaml
================================================
DATALOADER:
MODELNET40_DGCNN:
train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_split_0.625_files.txt
batch_size: 18
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: dgcnn_simpleview_0.5_run_1
LOSS_NAME: smooth
MODEL_NAME: simpleview
SEED: 1
TASK: cls
================================================
FILE: configs/dgcnn_simpleview_0.5_valid_run_1.yaml
================================================
DATALOADER:
MODELNET40_DGCNN:
train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_split_0.625_files.txt
valid_data_path: ./data/modelnet40_ply_hdf5_2048/valid_files.txt
batch_size: 18
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: dgcnn_simpleview_0.5_valid_run_1
LOSS_NAME: smooth
MODEL_NAME: simpleview
SEED: 1
TASK: cls
EXP_EXTRA:
no_test: true
no_val: false
val_eval_freq: 25
================================================
FILE: configs/dgcnn_simpleview_ce_run_1.yaml
================================================
DATALOADER:
batch_size: 18
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: dgcnn_simpleview_ce_run_1
LOSS_NAME: cross_entropy
MODEL_NAME: simpleview
SEED: 1
TASK: cls
================================================
FILE: configs/dgcnn_simpleview_ce_valid_run_1.yaml
================================================
DATALOADER:
MODELNET40_DGCNN:
train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_files.txt
valid_data_path: ./data/modelnet40_ply_hdf5_2048/valid_files.txt
batch_size: 18
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: dgcnn_simpleview_ce_valid_run_1
LOSS_NAME: cross_entropy
MODEL_NAME: simpleview
SEED: 1
TASK: cls
EXP_EXTRA:
no_test: true
no_val: false
val_eval_freq: 25
================================================
FILE: configs/dgcnn_simpleview_run_1.yaml
================================================
DATALOADER:
batch_size: 18
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: dgcnn_simpleview_run_1
LOSS_NAME: smooth
MODEL_NAME: simpleview
SEED: 1
TASK: cls
================================================
FILE: configs/dgcnn_simpleview_valid_run_1.yaml
================================================
DATALOADER:
MODELNET40_DGCNN:
train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_files.txt
valid_data_path: ./data/modelnet40_ply_hdf5_2048/valid_files.txt
batch_size: 18
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: dgcnn_simpleview_valid_run_1
LOSS_NAME: smooth
MODEL_NAME: simpleview
SEED: 1
TASK: cls
EXP_EXTRA:
no_test: true
no_val: false
val_eval_freq: 25
================================================
FILE: configs/mixup/dgcnn.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: mixup_dgcnn_run_1
LOSS_NAME: smooth
METRIC: acc
MODEL_NAME: dgcnn
SEED: 1
TASK: cls
TRAIN:
l2: 1e-4
AUG:
NAME: mixup
BETA: 1.
PROB: 0.5
================================================
FILE: configs/mixup/pct.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: mixup_pct_run_1
LOSS_NAME: smooth
MODEL_NAME: pct
SEED: 1
TASK: cls
OPTIMIZER: pct
TRAIN:
l2: 1e-4
learning_rate: 0.0001
AUG:
NAME: mixup
BETA: 1.
PROB: 0.5
================================================
FILE: configs/mixup/pointnet.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: mixup_pointnet_run_1
LOSS_NAME: smooth
MODEL_NAME: pointnet
SEED: 1
TASK: cls_trans
TRAIN:
l2: 0.0
AUG:
NAME: mixup
BETA: 1.
PROB: 0.5
================================================
FILE: configs/mixup/pointnet2.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: mixup_pointnet2_run_1
LOSS_NAME: smooth
MODEL_NAME: pointnet2
SEED: 1
TASK: cls
TRAIN:
l2: 0.0
AUG:
NAME: mixup
BETA: 1.
PROB: 0.5
================================================
FILE: configs/mixup/rscnn.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: mixup_rscnn_run_1
LOSS_NAME: smooth
MODEL_NAME: rscnn
SEED: 1
TASK: cls
TRAIN:
l2: 0.0
AUG:
NAME: mixup
BETA: 1.
PROB: 0.5
================================================
FILE: configs/mixup/simpleview.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: mixup_simpleview_run_1
LOSS_NAME: smooth
MODEL_NAME: simpleview
SEED: 1
METRIC: acc
TASK: cls
TRAIN:
l2: 0.0
AUG:
NAME: mixup
BETA: 1.
PROB: 0.5
================================================
FILE: configs/pgd/dgcnn.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: pgd_dgcnn_run_1
LOSS_NAME: smooth
METRIC: acc
MODEL_NAME: dgcnn
SEED: 1
TASK: cls
TRAIN:
l2: 1e-4
AUG:
NAME: pgd
================================================
FILE: configs/pgd/pct.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: pgd_pct_run_1
LOSS_NAME: smooth
MODEL_NAME: pct
SEED: 1
TASK: cls
OPTIMIZER: pct
TRAIN:
l2: 1e-4
learning_rate: 0.0001
AUG:
NAME: pgd
================================================
FILE: configs/pgd/pointnet.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: pgd_pointnet_run_1
LOSS_NAME: smooth
MODEL_NAME: pointnet
SEED: 1
TASK: cls_trans
TRAIN:
l2: 0.0
AUG:
NAME: pgd
================================================
FILE: configs/pointnet2_dgcnn_run_1.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_pn2
EXP_ID: pointnet2_dgcnn_run_1
LOSS_NAME: cross_entropy
MODEL_NAME: dgcnn
SEED: 1
TASK: cls
TRAIN:
l2: 0.0001
================================================
FILE: configs/pointnet2_dgcnn_valid_run_1.yaml
================================================
DATALOADER:
MODELNET40_PN2:
train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_files.txt
valid_data_path: ./data/modelnet40_ply_hdf5_2048/valid_files.txt
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_pn2
EXP_ID: pointnet2_dgcnn_valid_run_1
LOSS_NAME: cross_entropy
MODEL_NAME: dgcnn
SEED: 1
TASK: cls
EXP_EXTRA:
no_test: true
no_val: false
val_eval_freq: 25
TRAIN:
l2: 0.0001
================================================
FILE: configs/pointnet2_pointnet2_run_1.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_pn2
EXP_ID: pointnet2_pointnet2_run_1
LOSS_NAME: cross_entropy
MODEL_NAME: pointnet2
SEED: 1
TASK: cls
TRAIN:
l2: 0.0
================================================
FILE: configs/pointnet2_pointnet2_valid_run_1.yaml
================================================
DATALOADER:
MODELNET40_PN2:
train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_files.txt
valid_data_path: ./data/modelnet40_ply_hdf5_2048/valid_files.txt
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_pn2
EXP_ID: pointnet2_pointnet2_valid_run_1
LOSS_NAME: cross_entropy
MODEL_NAME: pointnet2
SEED: 1
TASK: cls
EXP_EXTRA:
no_test: true
no_val: false
val_eval_freq: 25
TRAIN:
l2: 0.0
================================================
FILE: configs/pointnet2_pointnet_run_1.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_pn2
EXP_ID: pointnet2_pointnet_run_1
LOSS_NAME: cross_entropy
MODEL_NAME: pointnet
SEED: 1
TASK: cls_trans
TRAIN:
l2: 0.0
================================================
FILE: configs/pointnet2_pointnet_valid_run_1.yaml
================================================
DATALOADER:
MODELNET40_PN2:
train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_files.txt
valid_data_path: ./data/modelnet40_ply_hdf5_2048/valid_files.txt
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_pn2
EXP_ID: pointnet2_pointnet_valid_run_1
LOSS_NAME: cross_entropy
MODEL_NAME: pointnet
SEED: 1
TASK: cls_trans
EXP_EXTRA:
no_test: true
no_val: false
val_eval_freq: 25
TRAIN:
l2: 0.0
================================================
FILE: configs/pointnet2_rscnn_run_1.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_pn2
EXP_ID: pointnet2_rscnn_run_1
LOSS_NAME: cross_entropy
MODEL_NAME: rscnn
SEED: 1
TASK: cls
TRAIN:
l2: 0.0
================================================
FILE: configs/pointnet2_rscnn_valid_run_1.yaml
================================================
DATALOADER:
MODELNET40_PN2:
train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_files.txt
valid_data_path: ./data/modelnet40_ply_hdf5_2048/valid_files.txt
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_pn2
EXP_ID: pointnet2_rscnn_valid_run_1
LOSS_NAME: cross_entropy
MODEL_NAME: rscnn
SEED: 1
TASK: cls
EXP_EXTRA:
no_test: true
no_val: false
val_eval_freq: 25
TRAIN:
l2: 0.0
================================================
FILE: configs/pointnet2_simpleview_run_1.yaml
================================================
DATALOADER:
batch_size: 18
num_workers: 0
EXP:
DATASET: modelnet40_pn2
EXP_ID: pointnet2_simpleview_run_1
LOSS_NAME: cross_entropy
MODEL_NAME: simpleview
SEED: 1
TASK: cls
================================================
FILE: configs/pointnet2_simpleview_valid_run_1.yaml
================================================
DATALOADER:
MODELNET40_PN2:
train_data_path: ./data/modelnet40_ply_hdf5_2048/train_minus_valid_files.txt
valid_data_path: ./data/modelnet40_ply_hdf5_2048/valid_files.txt
batch_size: 18
num_workers: 0
EXP:
DATASET: modelnet40_pn2
EXP_ID: pointnet2_simpleview_valid_run_1
LOSS_NAME: cross_entropy
MODEL_NAME: simpleview
SEED: 1
TASK: cls
EXP_EXTRA:
no_test: true
no_val: false
val_eval_freq: 25
================================================
FILE: configs/rscnn_dgcnn_run_1.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_rscnn
EXP_ID: rscnn_dgcnn_run_1
LOSS_NAME: cross_entropy
METRIC: acc
MODEL_NAME: dgcnn
SEED: 1
TASK: cls
TRAIN:
l2: 1e-4
================================================
FILE: configs/rscnn_pointnet2_run_1.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_rscnn
EXP_ID: rscnn_pointnet2_run_1
LOSS_NAME: cross_entropy
METRIC: acc
MODEL_NAME: pointnet2
SEED: 1
TASK: cls
TRAIN:
l2: 0.0
================================================
FILE: configs/rscnn_pointnet_run_1.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_rscnn
EXP_ID: rscnn_pointnet_run_1
LOSS_NAME: cross_entropy
MODEL_NAME: pointnet
SEED: 1
TASK: cls_trans
TRAIN:
l2: 0.0
================================================
FILE: configs/rscnn_rscnn_run_1.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_rscnn
EXP_ID: rscnn_rscnn_run_1
LOSS_NAME: cross_entropy
MODEL_NAME: rscnn
SEED: 1
TASK: cls
TRAIN:
l2: 0.0
================================================
FILE: configs/rscnn_simpleview_run_1.yaml
================================================
DATALOADER:
batch_size: 18
num_workers: 0
EXP:
DATASET: modelnet40_rscnn
EXP_ID: rscnn_simpleview_run_1
LOSS_NAME: cross_entropy
MODEL_NAME: simpleview
SEED: 1
TASK: cls
================================================
FILE: configs/rsmix/dgcnn.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: rsmix_dgcnn_run_1
LOSS_NAME: smooth
MODEL_NAME: dgcnn
SEED: 1
METRIC: acc
TASK: cls
TRAIN:
l2: 0.0
AUG:
NAME: rsmix
BETA: 1.
PROB: 0.5
================================================
FILE: configs/rsmix/pct.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: rsmix_pct_run_1
LOSS_NAME: smooth
MODEL_NAME: pct
SEED: 1
TASK: cls
OPTIMIZER: pct
TRAIN:
l2: 1e-4
learning_rate: 0.0001
AUG:
NAME: rsmix
BETA: 1.
PROB: 0.5
================================================
FILE: configs/rsmix/pointnet.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: rsmix_pointnet_run_1
LOSS_NAME: smooth
MODEL_NAME: pointnet
SEED: 1
TASK: cls_trans
TRAIN:
l2: 0.0
AUG:
NAME: rsmix
BETA: 1.
PROB: 0.5
================================================
FILE: configs/rsmix/pointnet2.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: rsmix_pointnet2_run_1
LOSS_NAME: smooth
MODEL_NAME: pointnet2
SEED: 1
TASK: cls
TRAIN:
l2: 0.0
AUG:
NAME: rsmix
BETA: 1.
PROB: 0.5
================================================
FILE: configs/rsmix/rscnn.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: rsmix_rscnn_run_1
LOSS_NAME: smooth
MODEL_NAME: rscnn
SEED: 1
TASK: cls
TRAIN:
l2: 0.0
AUG:
NAME: rsmix
BETA: 1.
PROB: 0.5
================================================
FILE: configs/rsmix/simpleview.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_dgcnn
EXP_ID: rsmix_simpleview_run_1
LOSS_NAME: smooth
MODEL_NAME: simpleview
SEED: 1
METRIC: acc
TASK: cls
TRAIN:
l2: 0.0
AUG:
NAME: rsmix
BETA: 1.
PROB: 0.5
================================================
FILE: configs/tent/dgcnn.yaml
================================================
DATALOADER:
batch_size: 16
num_workers: 0
EXP:
DATASET: modelnet40_c
EXP_ID: c_dgcnn_run_1
LOSS_NAME: smooth
METRIC: acc
MODEL_NAME: dgcnn
SEED: 1
TASK: cls
TRAIN:
l2: 1e-4
ADAPT:
METHOD: tent
ITER: 10
================================================
FILE: configs/tent/pct.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_c
EXP_ID: c_pct_run_1
LOSS_NAME: smooth
MODEL_NAME: pct
SEED: 1
TASK: cls
OPTIMIZER: pct
TRAIN:
l2: 1e-4
learning_rate: 0.0001
ADAPT:
METHOD: tent
ITER: 10
================================================
FILE: configs/tent/pointnet.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_c
EXP_ID: c_pointnet_run_1
LOSS_NAME: smooth
MODEL_NAME: pointnet
SEED: 1
TASK: cls_trans
TRAIN:
l2: 0.0
ADAPT:
METHOD: tent
ITER: 10
================================================
FILE: configs/tent/pointnet2.yaml
================================================
DATALOADER:
batch_size: 16
num_workers: 0
EXP:
DATASET: modelnet40_c
EXP_ID: c_pointnet2_run_1
LOSS_NAME: smooth
MODEL_NAME: pointnet2
SEED: 1
TASK: cls
TRAIN:
l2: 0.0
ADAPT:
METHOD: tent
ITER: 10
================================================
FILE: configs/tent/rscnn.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_c
EXP_ID: c_rscnn_run_1
LOSS_NAME: smooth
MODEL_NAME: rscnn
SEED: 1
TASK: cls
TRAIN:
l2: 0.0
ADAPT:
METHOD: tent
ITER: 10
================================================
FILE: configs/tent/simpleview.yaml
================================================
DATALOADER:
batch_size: 18
num_workers: 0
EXP:
DATASET: modelnet40_c
EXP_ID: c_simpleview_run_1
LOSS_NAME: smooth
MODEL_NAME: simpleview
SEED: 1
TASK: cls
ADAPT:
METHOD: tent
ITER: 10
================================================
FILE: configs/tent_cutmix/dgcnn.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_c
EXP_ID: c_dgcnn_run_1
LOSS_NAME: smooth
METRIC: acc
MODEL_NAME: dgcnn
SEED: 1
TASK: cls
TRAIN:
l2: 1e-4
ADAPT:
METHOD: tent
ITER: 10
================================================
FILE: configs/tent_cutmix/pct.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_c
EXP_ID: c_pct_run_1
LOSS_NAME: smooth
MODEL_NAME: pct
SEED: 1
TASK: cls
OPTIMIZER: pct
TRAIN:
l2: 1e-4
learning_rate: 0.0001
ADAPT:
METHOD: tent
ITER: 10
================================================
FILE: configs/tent_cutmix/pointnet.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_c
EXP_ID: c_pointnet_run_1
LOSS_NAME: smooth
MODEL_NAME: pointnet
SEED: 1
TASK: cls_trans
TRAIN:
l2: 0.0
ADAPT:
METHOD: tent
ITER: 10
================================================
FILE: configs/tent_cutmix/pointnet2.yaml
================================================
DATALOADER:
batch_size: 16
num_workers: 0
EXP:
DATASET: modelnet40_c
EXP_ID: c_pointnet2_run_1
LOSS_NAME: smooth
MODEL_NAME: pointnet2
SEED: 1
TASK: cls
TRAIN:
l2: 0.0
ADAPT:
METHOD: tent
ITER: 10
================================================
FILE: configs/tent_cutmix/rscnn.yaml
================================================
DATALOADER:
batch_size: 32
num_workers: 0
EXP:
DATASET: modelnet40_c
EXP_ID: c_rscnn_run_1
LOSS_NAME: smooth
MODEL_NAME: rscnn
SEED: 1
TASK: cls
TRAIN:
l2: 0.0
ADAPT:
METHOD: tent
ITER: 10
================================================
FILE: configs/tent_cutmix/simpleview.yaml
================================================
DATALOADER:
batch_size: 18
num_workers: 0
EXP:
DATASET: modelnet40_c
EXP_ID: c_simpleview_run_1
LOSS_NAME: smooth
MODEL_NAME: simpleview
SEED: 1
TASK: cls
ADAPT:
METHOD: tent
ITER: 10
================================================
FILE: configs.py
================================================
from yacs.config import CfgNode as CN
_C = CN()
# -----------------------------------------------------------------------------
# EXPERIMENT
# -----------------------------------------------------------------------------
_C.EXP = CN()
_C.EXP.EXP_ID = ""
_C.EXP.SEED = 0
_C.EXP.TASK = 'cls'
_C.EXP.DATASET = 'modelnet40'
_C.EXP.MODEL_NAME = 'mv'
_C.EXP.LOSS_NAME = 'cross_entropy'
_C.EXP.OPTIMIZER = 'vanilla'
_C.EXP.METRIC = 'acc'
#------------------------------------------------------------------------------
# Extra Experiment Parameters
#------------------------------------------------------------------------------
_C.EXP_EXTRA = CN()
_C.EXP_EXTRA.no_val = True
_C.EXP_EXTRA.no_test = False
_C.EXP_EXTRA.val_eval_freq = 1
_C.EXP_EXTRA.test_eval_freq = 1
_C.EXP_EXTRA.save_ckp = 25
# -----------------------------------------------------------------------------
# DATALOADER (contains things common across the datasets)
# -----------------------------------------------------------------------------
_C.DATALOADER = CN()
_C.DATALOADER.batch_size = 60
_C.DATALOADER.num_workers = 0
# -----------------------------------------------------------------------------
# TRAINING DETAILS (contains things common across the training)
# -----------------------------------------------------------------------------
_C.TRAIN = CN()
_C.TRAIN.num_epochs = 300
_C.TRAIN.learning_rate = 1e-3
_C.TRAIN.lr_decay_factor = 0.5
_C.TRAIN.lr_reduce_patience = 10
_C.TRAIN.l2 = 0.0
_C.TRAIN.early_stop = 300
_C.TRAIN.lr_clip = 0.00001
#-----------------------------------------------------------------------------
# MODELNET40_RSCNN
#-----------------------------------------------------------------------------
_C.DATALOADER.MODELNET40_RSCNN = CN()
_C.DATALOADER.MODELNET40_RSCNN.data_path = './data/'
_C.DATALOADER.MODELNET40_RSCNN.train_data_path = 'train_files.txt'
_C.DATALOADER.MODELNET40_RSCNN.valid_data_path = 'train_files.txt'
_C.DATALOADER.MODELNET40_RSCNN.test_data_path = 'test_files.txt'
_C.DATALOADER.MODELNET40_RSCNN.num_points = 1024
#-----------------------------------------------------------------------------
# MODELNET40_PN2
#-----------------------------------------------------------------------------
_C.DATALOADER.MODELNET40_PN2 = CN()
_C.DATALOADER.MODELNET40_PN2.train_data_path = './data/modelnet40_ply_hdf5_2048/train_files.txt'
_C.DATALOADER.MODELNET40_PN2.valid_data_path = './data/modelnet40_ply_hdf5_2048/train_files.txt'
_C.DATALOADER.MODELNET40_PN2.test_data_path = './data/modelnet40_ply_hdf5_2048/test_files.txt'
_C.DATALOADER.MODELNET40_PN2.num_points = 1024
#-----------------------------------------------------------------------------
# MODELNET40_DGCNN
#-----------------------------------------------------------------------------
_C.DATALOADER.MODELNET40_DGCNN = CN()
_C.DATALOADER.MODELNET40_DGCNN.train_data_path = './data/modelnet40_ply_hdf5_2048/train_files.txt'
_C.DATALOADER.MODELNET40_DGCNN.valid_data_path = './data/modelnet40_ply_hdf5_2048/train_files.txt'
_C.DATALOADER.MODELNET40_DGCNN.test_data_path = './data/modelnet40_ply_hdf5_2048/test_files.txt'
_C.DATALOADER.MODELNET40_DGCNN.num_points = 1024
#-----------------------------------------------------------------------------
# MODELNET40_C
#-----------------------------------------------------------------------------
_C.DATALOADER.MODELNET40_C = CN()
_C.DATALOADER.MODELNET40_C.test_data_path = './data/modelnet40_c/'
_C.DATALOADER.MODELNET40_C.corruption = 'uniform'
_C.DATALOADER.MODELNET40_C.severity = 1
# ----------------------------------------------------------------------------
# MODEL
# -----------------------------------------------------------------------------
_C.MODEL = CN()
# -----------------------------------------------------------------------------
# MV MODEL
# -----------------------------------------------------------------------------
_C.MODEL.MV = CN()
_C.MODEL.MV.backbone = 'resnet18'
_C.MODEL.MV.feat_size = 16
# -----------------------------------------------------------------------------
# RSCNN MODEL
# -----------------------------------------------------------------------------
_C.MODEL.RSCNN = CN()
_C.MODEL.RSCNN.ssn_or_msn = True
# -----------------------------------------------------------------------------
# PN2 MODEL
# -----------------------------------------------------------------------------
_C.MODEL.PN2 = CN()
_C.MODEL.PN2.version_cls = 1.0
_C.AUG = CN()
_C.AUG.NAME = 'none'
_C.AUG.BETA = 1.
_C.AUG.PROB = 0.5
_C.AUG.MIXUPRATE = 0.4
_C.ADAPT = CN()
_C.ADAPT.METHOD = 'none'
_C.ADAPT.ITER = 1
def get_cfg_defaults():
"""Get a yacs CfgNode object with default values for my_project."""
# Return a clone so that the defaults will not be altered
# This is for the "local variable" use pattern
return _C.clone()
================================================
FILE: data/convert.py
================================================
import open3d as o3d
def load_mesh(filepath):
return o3d.io.read_triangle_mesh(filepath)
def export_mesh(mesh, filepath):
o3d.io.write_triangle_mesh(filepath, mesh)
def load_pcd(filepath):
return o3d.io.read_point_cloud(filepath)
def export_pcd(pcd, filepath):
o3d.io.write_point_cloud(filepath, pcd)
def mesh_to_pcd(mesh, number_of_points=2048):
return mesh.sample_points_uniformly(number_of_points=number_of_points)
================================================
FILE: data/create_modelnet40_small.py
================================================
#!/usr/bin/env python
import os
import h5py
import numpy as np
np.random.seed(123)
def main(split_size):
modelnet40_dir = "./data/modelnet40_ply_hdf5_2048/"
modelnet40_train_file = os.path.join(
modelnet40_dir, "train_minus_valid_files.txt")
modelnet40_train_split_file = os.path.join(
modelnet40_dir, f"train_minus_valid_split_{split_size}_files.txt")
modelnet40_train_split_path = f"ply_data_trainminusval_split_{split_size}.h5"
with open(modelnet40_train_file, "r") as f:
modelnet40_train_paths = [l.strip() for l in f.readlines()]
data = []
labels = []
for modelnet40_train_path in modelnet40_train_paths:
train_h5 = h5py.File(modelnet40_train_path, "r")
data.append(train_h5["data"][:])
labels.append(train_h5["label"][:])
data = np.concatenate(data)
labels = np.concatenate(labels)
train_data = []
train_label = []
for i in range(40):
cls_inds = np.where(labels == i)[0]
num_objs = len(cls_inds)
num_train = int(num_objs * split_size)
cls_data = data[cls_inds]
np.random.shuffle(cls_data)
train_data.append(cls_data[:num_train])
train_label += [i] * num_train
train_data = np.concatenate(train_data)
train_label = np.array(train_label).reshape(-1, 1)
with open(modelnet40_train_split_file, "w") as f:
f.write(os.path.join(modelnet40_dir,
modelnet40_train_split_path) + "\n")
with h5py.File(
os.path.join(modelnet40_dir, modelnet40_train_split_path),
"w") as f:
f.create_dataset("data", data=train_data)
f.create_dataset("label", data=train_label)
print('data: {}'.format(data.shape))
print('train data: {}'.format(train_data.shape))
print('min_label: {}'.format(labels.min()))
print('max_label: {}'.format(labels.max()))
if __name__ == "__main__":
main(0.5 / 0.8)
main(0.25 / 0.8)
================================================
FILE: data/create_modelnet40_valid.py
================================================
#!/usr/bin/env python
import os
import h5py
import numpy as np
np.random.seed(123)
def main():
modelnet40_dir = "./data/modelnet40_ply_hdf5_2048/"
modelnet40_train_minus_valid_path = "ply_data_trainminusval.h5"
modelnet40_valid_path = "ply_data_valid.h5"
modelnet40_train_minus_valid_file = os.path.join(modelnet40_dir, "train_minus_valid_files.txt")
modelnet40_valid_file = os.path.join(modelnet40_dir, "valid_files.txt")
modelnet40_train_file = os.path.join(modelnet40_dir, "train_files.txt")
with open(modelnet40_train_file, "r") as f:
modelnet40_train_paths = [l.strip() for l in f.readlines()]
data = []
labels = []
for modelnet40_train_path in modelnet40_train_paths:
train_h5 = h5py.File(modelnet40_train_path, "r")
data.append(train_h5["data"][:])
labels.append(train_h5["label"][:])
data = np.concatenate(data)
labels = np.concatenate(labels)
train_data = []
train_label = []
valid_data = []
valid_label = []
for i in range(40):
cls_inds = np.where(labels == i)[0]
num_objs = len(cls_inds)
num_train = int(num_objs * 0.8)
num_valid = num_objs - num_train
cls_data = data[cls_inds]
np.random.shuffle(cls_data)
train_data.append(cls_data[:num_train])
valid_data.append(cls_data[num_train:])
train_label += [i] * num_train
valid_label += [i] * num_valid
train_data = np.concatenate(train_data)
valid_data = np.concatenate(valid_data)
train_label = np.array(train_label).reshape(-1, 1)
valid_label = np.array(valid_label).reshape(-1, 1)
with open(modelnet40_train_minus_valid_file, "w") as f:
f.write(os.path.join(modelnet40_dir, modelnet40_train_minus_valid_path) + "\n")
with open(modelnet40_valid_file, "w") as f:
f.write(os.path.join(modelnet40_dir, modelnet40_valid_path) + "\n")
with h5py.File(os.path.join(modelnet40_dir, modelnet40_train_minus_valid_path), "w") as f:
f.create_dataset("data", data=train_data)
f.create_dataset("label", data=train_label)
with h5py.File(os.path.join(modelnet40_dir, modelnet40_valid_path), "w") as f:
f.create_dataset("data", data=valid_data)
f.create_dataset("label", data=valid_label)
print('data: {}'.format(data.shape))
print('min_label: {}'.format(labels.min()))
print('max_label: {}'.format(labels.max()))
if __name__ == "__main__":
main()
================================================
FILE: data/distortion.py
================================================
import pygem
from pygem import FFD, RBF, IDW
import open3d as o3d
import copy
import numpy as np
np.random.seed(2021)
def core_distortion(points, n_control_points=[2,2,2], displacement=None):
"""
Ref: http://mathlab.github.io/PyGeM/tutorial-1-ffd.html
"""
# the size of displacement matrix: 3 * control_points.shape
if displacement is None:
displacement = np.zeros((3,*n_control_points))
ffd = FFD(n_control_points=n_control_points)
ffd.box_length = [2.,2.,2.]
ffd.box_origin = [-1., -1., -1.]
ffd.array_mu_x = displacement[0,:,:,:]
ffd.array_mu_y = displacement[1,:,:,:]
ffd.array_mu_z = displacement[2,:,:,:]
new_points = ffd(points)
return new_points
def distortion(points, direction_mask=np.array([1,1,1]), point_mask=np.ones((5,5,5)), severity=0.5):
n_control_points=[5,5,5]
# random
displacement = np.random.rand(3,*n_control_points) * 2 * severity - np.ones((3,*n_control_points)) * severity
displacement *= np.transpose(np.tile(direction_mask, (5, 5, 5, 1)), (3, 0, 1, 2))
displacement *= np.tile(point_mask, (3, 1, 1, 1))
points = core_distortion(points, n_control_points=n_control_points, displacement=displacement)
# points = denomalize(points, scale, offset)
# set_points(data, points)
return points
def distortion_2(points, severity=(0.4,3), func = 'gaussian_spline'):
rbf = RBF(func=func)
xv = np.linspace(-1, 1, severity[1])
yv = np.linspace(-1, 1, severity[1])
zv = np.linspace(-1, 1, severity[1])
z, y, x = np.meshgrid(zv, yv, xv)
mesh = np.array([x.ravel(), y.ravel(), z.ravel()]).T
rbf.original_control_points = mesh
alpha = np.random.uniform(-np.pi,np.pi,mesh.shape[0])
gamma = np.random.uniform(-np.pi,np.pi,mesh.shape[0])
distance = np.ones(mesh.shape[0]) * severity[0]
displacement_x = distance * np.cos(alpha) * np.sin(gamma)
displacement_y = distance * np.sin(alpha) * np.sin(gamma)
displacement_z = distance * np.cos(gamma)
displacement = np.array([displacement_x,displacement_y,displacement_z]).T
rbf.deformed_control_points = mesh + displacement
new_points = rbf(points)
return new_points
def distortion_3(points, severity=(0.4,3)):
idw = IDW()
xv = np.linspace(-1, 1, severity[1])
yv = np.linspace(-1, 1, severity[1])
zv = np.linspace(-1, 1, severity[1])
z, y, x = np.meshgrid(zv, yv, xv)
mesh = np.array([x.ravel(), y.ravel(), z.ravel()]).T
idw.original_control_points = mesh
alpha = np.random.uniform(-np.pi,np.pi,mesh.shape[0])
gamma = np.random.uniform(-np.pi,np.pi,mesh.shape[0])
distance = np.ones(mesh.shape[0]) * severity[0]
displacement_x = distance * np.cos(alpha) * np.sin(gamma)
displacement_y = distance * np.sin(alpha) * np.sin(gamma)
displacement_z = distance * np.cos(gamma)
displacement = np.array([displacement_x,displacement_y,displacement_z]).T
idw.deformed_control_points = mesh + displacement
new_points = idw(points)
return new_points
================================================
FILE: data/generate_c.py
================================================
### Generate Various Common Corruptions ###
from operator import index
import os
import h5py
import json
import numpy as np
from numpy import random
from convert import *
import distortion
from occlusion import *
from util import *
np.random.seed(2021)
### Transformation ###
'''
Rotate the point cloud
'''
def rotation(pointcloud,severity):
N, C = pointcloud.shape
c = [2.5, 5, 7.5, 10, 15][severity-1]
theta = np.random.uniform(c-2.5,c+2.5) * np.random.choice([-1,1]) * np.pi / 180.
gamma = np.random.uniform(c-2.5,c+2.5) * np.random.choice([-1,1]) * np.pi / 180.
beta = np.random.uniform(c-2.5,c+2.5) * np.random.choice([-1,1]) * np.pi / 180.
matrix_1 = np.array([[1,0,0],[0,np.cos(theta),-np.sin(theta)],[0,np.sin(theta),np.cos(theta)]])
matrix_2 = np.array([[np.cos(gamma),0,np.sin(gamma)],[0,1,0],[-np.sin(gamma),0,np.cos(gamma)]])
matrix_3 = np.array([[np.cos(beta),-np.sin(beta),0],[np.sin(beta),np.cos(beta),0],[0,0,1]])
new_pc = np.matmul(pointcloud,matrix_1)
new_pc = np.matmul(new_pc,matrix_2)
new_pc = np.matmul(new_pc,matrix_3).astype('float32')
return normalize(new_pc)
'''
Shear the point cloud
'''
def shear(pointcloud,severity):
N, C = pointcloud.shape
c = [0.05, 0.1, 0.15, 0.2, 0.25][severity-1]
a = np.random.uniform(c-0.05,c+0.05) * np.random.choice([-1,1])
b = np.random.uniform(c-0.05,c+0.05) * np.random.choice([-1,1])
d = np.random.uniform(c-0.05,c+0.05) * np.random.choice([-1,1])
e = np.random.uniform(c-0.05,c+0.05) * np.random.choice([-1,1])
f = np.random.uniform(c-0.05,c+0.05) * np.random.choice([-1,1])
g = np.random.uniform(c-0.05,c+0.05) * np.random.choice([-1,1])
matrix = np.array([[1,0,b],[d,1,e],[f,0,1]])
new_pc = np.matmul(pointcloud,matrix).astype('float32')
return normalize(new_pc)
'''
Scale the point cloud
'''
def scale(pointcloud,severity):
#TODO
N, C = pointcloud.shape
c = [0.1, 0.2, 0.3, 0.4, 0.5][severity-1]
a=b=d=1
r = np.random.randint(0,3)
t = np.random.choice([-1,1])
if r == 0:
a += c * t
b += c * (-t)
elif r == 1:
b += c * t
d += c * (-t)
elif r == 2:
a += c * t
d += c * (-t)
matrix = np.array([[a,0,0],[0,b,0],[0,0,d]])
new_pc = np.matmul(pointcloud,matrix).astype('float32')
return normalize(new_pc)
### Noise ###
'''
Add Uniform noise to point cloud
'''
def uniform_noise(pointcloud, severity):
#TODO
N, C = pointcloud.shape
c = [0.01, 0.02, 0.03, 0.04, 0.05][severity-1]
jitter = np.random.uniform(-c,c,(N, C))
new_pc = (pointcloud + jitter).astype('float32')
return normalize(new_pc)
'''
Add Gaussian noise to point cloud
'''
def gaussian_noise(pointcloud, severity):
N, C = pointcloud.shape
c = [0.01, 0.015, 0.02, 0.025, 0.03][severity-1]
jitter = np.random.normal(size=(N, C)) * c
new_pc = (pointcloud + jitter).astype('float32')
new_pc = np.clip(new_pc,-1,1)
return new_pc
'''
Add noise to the edge-length-2 cude
'''
def background_noise(pointcloud, severity):
N, C = pointcloud.shape
c = [N//45, N//40, N//35, N//30, N//20][severity-1]
jitter = np.random.uniform(-1,1,(c, C))
new_pc = np.concatenate((pointcloud,jitter),axis=0).astype('float32')
return normalize(new_pc)
'''
Upsampling
'''
def upsampling(pointcloud, severity):
N, C = pointcloud.shape
c = [N//5, N//4, N//3, N//2, N][severity-1]
index = np.random.choice(ORIG_NUM, c, replace=False)
add = pointcloud[index] + np.random.uniform(-0.05,0.05,(c, C))
new_pc = np.concatenate((pointcloud,add),axis=0).astype('float32')
return normalize(new_pc)
'''
Add impulse noise
'''
def impulse_noise(pointcloud, severity):
N, C = pointcloud.shape
c = [N//30, N//25, N//20, N//15, N//10][severity-1]
index = np.random.choice(ORIG_NUM, c, replace=False)
pointcloud[index] += np.random.choice([-1,1], size=(c,C)) * 0.1
return normalize(pointcloud)
### Point Number Modification ###
'''
Cutout several part in the point cloud
'''
def cutout(pointcloud, severity):
N, C = pointcloud.shape
c = [(2,30), (3,30), (5,30), (7,30), (10,30)][severity-1]
for _ in range(c[0]):
i = np.random.choice(pointcloud.shape[0],1)
picked = pointcloud[i]
dist = np.sum((pointcloud - picked)**2, axis=1, keepdims=True)
idx = np.argpartition(dist, c[1], axis=0)[:c[1]]
# pointcloud[idx.squeeze()] = 0
pointcloud = np.delete(pointcloud, idx.squeeze(), axis=0)
# print(pointcloud.shape)
return pointcloud
'''
Uniformly sampling the point cloud
'''
def uniform_sampling(pointcloud, severity):
N, C = pointcloud.shape
c = [N//15, N//10, N//8, N//6, N//2, 3 * N//4][severity-1]
index = np.random.choice(ORIG_NUM, ORIG_NUM - c, replace=False)
return pointcloud[index]
'''
Density-based up-sampling the point cloud
'''
def density_inc(pointcloud, severity):
N, C = pointcloud.shape
c = [(1,100), (2,100), (3,100), (4,100), (5,100)][severity-1]
# idx = np.random.choice(N,c[0])
temp = []
for _ in range(c[0]):
i = np.random.choice(pointcloud.shape[0],1)
picked = pointcloud[i]
dist = np.sum((pointcloud - picked)**2, axis=1, keepdims=True)
idx = np.argpartition(dist, c[1], axis=0)[:c[1]]
# idx_2 = np.random.choice(c[1],int((3/4) * c[1]),replace=False)
# idx = idx[idx_2]
temp.append(pointcloud[idx.squeeze()])
pointcloud = np.delete(pointcloud, idx.squeeze(), axis=0)
idx = np.random.choice(pointcloud.shape[0],1024 - c[0] * c[1])
temp.append(pointcloud[idx.squeeze()])
pointcloud = np.concatenate(temp)
# print(pointcloud.shape)
return pointcloud
'''
Density-based sampling the point cloud
'''
def density(pointcloud, severity):
N, C = pointcloud.shape
c = [(1,100), (2,100), (3,100), (4,100), (5,100)][severity-1]
for _ in range(c[0]):
i = np.random.choice(pointcloud.shape[0],1)
picked = pointcloud[i]
dist = np.sum((pointcloud - picked)**2, axis=1, keepdims=True)
idx = np.argpartition(dist, c[1], axis=0)[:c[1]]
idx_2 = np.random.choice(c[1],int((3/4) * c[1]),replace=False)
idx = idx[idx_2]
pointcloud = np.delete(pointcloud, idx.squeeze(), axis=0)
# pointcloud[idx.squeeze()] = 0
# print(pointcloud.shape)
return pointcloud
def occlusion(severity):
## severity here does not stand for real severity ##
pointcloud = []
f_0 = open("./data/modelnet40_ply_hdf5_2048/ply_data_test_0_id2file.json")
f_1 = open("./data/modelnet40_ply_hdf5_2048/ply_data_test_1_id2file.json")
lsit_0 = json.load(f_0)
lsit_1 = json.load(f_1)
f_0.close()
f_1.close()
for item in lsit_0 + lsit_1:
folder = item.split('/')[0]
mesh = item.split('/')[1][:-3] + 'off'
# print(mesh)
original_data = load_mesh("./data/ModelNet40/" + folder + "/test/" + mesh)
new_pc = occlusion_1(original_data,'occlusion',severity,n_points=1024)
theta = -np.pi / 2.
gamma = 0
beta = np.pi
matrix_1 = np.array([[1,0,0],[0,np.cos(theta),-np.sin(theta)],[0,np.sin(theta),np.cos(theta)]])
matrix_2 = np.array([[np.cos(gamma),0,np.sin(gamma)],[0,1,0],[-np.sin(gamma),0,np.cos(gamma)]])
matrix_3 = np.array([[np.cos(beta),-np.sin(beta),0],[np.sin(beta),np.cos(beta),0],[0,0,1]])
new_pc = np.matmul(new_pc,matrix_1)
new_pc = np.matmul(new_pc,matrix_2)
new_pc = normalize(np.matmul(new_pc,matrix_3).astype('float32'))
pointcloud.append(new_pc)
pointcloud = np.stack(pointcloud,axis=0)
np.save("./data/modelnet40_c/data_occlusion_" + str(severity) + ".npy", pointcloud)
return
def simulate_lidar(pointcloud,pose,severity):
pose = pose.transpose()
#####################################
# simplify the rotation to I matrix #
pose[:3,:3] = 0
pose[0,0] = pose[1,1] = pose[2,2] = 1
# Translate the point cloud #
pose[3,[0,1,2]] = -pose[3,[0,1,2]]
#####################################
pointcloud_new = np.concatenate([pointcloud,np.ones((pointcloud.shape[0],1))],axis=1)
pointcloud_new = np.dot(pointcloud_new,pose)
pointcloud_new = appendSpherical_np(pointcloud_new[:,:3])
delta = 1. * np.pi / 180.
cur = np.min(pointcloud_new[:,4])
new_pc = []
while cur + delta < np.max(pointcloud_new[:4]):
pointcloud_new[(pointcloud_new[:,4] >= cur+delta/4) & (pointcloud_new[:,4] < cur + delta*3/4),4] = cur + delta / 2.
new_pc.append(pointcloud_new[(pointcloud_new[:,4] >= cur+delta/4) & (pointcloud_new[:,4] < cur + delta*3/4)])
cur += delta
new_pc = np.concatenate(new_pc,axis=0)
# pointcloud = np.dot(pointcloud,np.linalg.inv(pose))
new_pc = appendCart_np(new_pc[:,3:])
new_pc = np.concatenate([new_pc[:,3:],np.ones((new_pc.shape[0],1))],axis=1)
new_pc = np.dot(new_pc,np.linalg.inv(pose))
index = np.random.choice(new_pc.shape[0],768)
new_pc = new_pc[index]
return new_pc[:,:3]
def lidar(severity):
## severity here does not stand for real severity ##
pointcloud = []
f_0 = open("./data/modelnet40_ply_hdf5_2048/ply_data_test_0_id2file.json")
f_1 = open("./data/modelnet40_ply_hdf5_2048/ply_data_test_1_id2file.json")
lsit_0 = json.load(f_0)
lsit_1 = json.load(f_1)
f_0.close()
f_1.close()
for item in lsit_0 + lsit_1:
folder = item.split('/')[0]
mesh = item.split('/')[1][:-3] + 'off'
original_data = load_mesh("./data/ModelNet40/" + folder + "/test/" + mesh)
new_pc,pose = occlusion_1(original_data,'lidar',severity,n_points=1024)
new_pc = simulate_lidar(new_pc,pose,severity)
theta = -np.pi / 2.
gamma = 0
beta = np.pi
matrix_1 = np.array([[1,0,0],[0,np.cos(theta),-np.sin(theta)],[0,np.sin(theta),np.cos(theta)]])
matrix_2 = np.array([[np.cos(gamma),0,np.sin(gamma)],[0,1,0],[-np.sin(gamma),0,np.cos(gamma)]])
matrix_3 = np.array([[np.cos(beta),-np.sin(beta),0],[np.sin(beta),np.cos(beta),0],[0,0,1]])
new_pc = np.matmul(new_pc,matrix_1)
new_pc = np.matmul(new_pc,matrix_2)
new_pc = np.matmul(new_pc,matrix_3).astype('float32')
pointcloud.append(new_pc)
pointcloud = np.stack(pointcloud,axis=0)
np.save("./data/modelnet40_c/data_lidar_" + str(severity) + ".npy", pointcloud)
return
def ffd_distortion(pointcloud, severity):
N, C = pointcloud.shape
c = [0.1,0.2,0.3,0.4,0.5][severity-1]
new_pc = distortion.distortion(pointcloud,severity=c)
return normalize(new_pc)
def rbf_distortion(pointcloud, severity):
N, C = pointcloud.shape
c = [(0.025,5),(0.05,5),(0.075,5),(0.1,5),(0.125,5)][severity-1]
new_pc = distortion.distortion_2(pointcloud,severity=c,func='multi_quadratic_biharmonic_spline')
return normalize(new_pc).astype('float32')
def rbf_distortion_inv(pointcloud, severity):
N, C = pointcloud.shape
c = [(0.025,5),(0.05,5),(0.075,5),(0.1,5),(0.125,5)][severity-1]
new_pc = distortion.distortion_2(pointcloud,severity=c,func='inv_multi_quadratic_biharmonic_spline')
return normalize(new_pc).astype('float32')
def load_data():
os.makedirs("./data/modelnet40_c",exist_ok = True)
modelnet40_dir = "./data/modelnet40_ply_hdf5_2048/"
modelnet40_test_file = os.path.join(modelnet40_dir, "test_files.txt")
with open(modelnet40_test_file, "r") as f:
modelnet40_test_paths = [l.strip() for l in f.readlines()]
data = []
labels = []
for modelnet40_test_path in modelnet40_test_paths:
test_h5 = h5py.File(modelnet40_test_path, "r")
data.append(test_h5["data"][:])
labels.append(test_h5["label"][:])
data = np.concatenate(data)
labels = np.concatenate(labels)
np.save("./data/modelnet40_c/label.npy", labels)
return data, labels
def save_data(data,corruption,severity):
if not MAP[corruption]:
np.save("./data/modelnet40_c/data_" + corruption + ".npy", data)
return
new_data = []
for i in range(data.shape[0]):
if corruption in ['occlusion', 'lidar']:
new_data.append(MAP[corruption](severity))
else:
new_data.append(MAP[corruption](data[i],severity))
new_data = np.stack(new_data,axis=0)
np.save("./data/modelnet40_c/data_" + corruption + "_" + str(severity) + ".npy", new_data)
MAP = {'uniform': uniform_noise,
'gaussian': gaussian_noise,
'background': background_noise,
'impulse': impulse_noise,
'scale': scale,
'upsampling': upsampling,
'shear': shear,
'rotation': rotation,
'cutout': cutout,
'density': density,
'density_inc': density_inc,
'distortion': ffd_distortion,
'distortion_rbf': rbf_distortion,
'distortion_rbf_inv': rbf_distortion_inv,
'occlusion': occlusion,
'lidar': lidar,
'original': None,
}
ORIG_NUM = 1024
if __name__ == "__main__":
data, labels = load_data()
for cor in MAP.keys():
# if cor in ['occlusion', 'lidar']:
# continue
for sev in [1,2,3,4,5]:
if cor == 'density_inc':
ORIG_NUM = 2048
else:
ORIG_NUM = 1024
index = np.random.choice(data.shape[1],ORIG_NUM,replace=False)
save_data(data[:,index,:], cor, sev)
print("Done with Corruption: {} with Severity: {}".format(cor,sev))
================================================
FILE: data/occlusion.py
================================================
import open3d as o3d
import numpy as np
from util import get_points, set_points, normalize, shuffle_data
def random_pose(severity):
"""generate a random camera pose"""
theta = 2 * np.pi * severity / 5
delta = np.pi / 5
angle_x = np.random.uniform(2./3. * np.pi, 5./6. * np.pi)
angle_y = 0
angle_z = np.random.uniform(theta-delta,theta+delta)
Rx = np.array([[1, 0, 0],
[0, np.cos(angle_x), -np.sin(angle_x)],
[0, np.sin(angle_x), np.cos(angle_x)]])
Ry = np.array([[np.cos(angle_y), 0, np.sin(angle_y)],
[0, 1, 0],
[-np.sin(angle_y), 0, np.cos(angle_y)]])
Rz = np.array([[np.cos(angle_z), -np.sin(angle_z), 0],
[np.sin(angle_z), np.cos(angle_z), 0],
[0, 0, 1]])
R = np.dot(Rz, np.dot(Ry, Rx))
# a rotation matrix with arbitrarily chosen yaw, pitch, roll
# Set camera pointing to the origin and 1 unit away from the origin
t = np.expand_dims(-R[:, 2] * 3., 1) # select the third column, reshape into (3, 1)-vector
matrix = np.concatenate([np.concatenate([R.T, -np.dot(R.T,t)], 1), [[0, 0, 0, 1]]], 0)
return matrix
def lidar_pose(severity):
"""generate a random LiDAR pose"""
theta = 2 * np.pi * severity / 5
delta = np.pi / 5
angle_x = 5./8. * np.pi
angle_y = 0
angle_z = np.random.uniform(theta-delta,theta+delta)
Rx = np.array([[1, 0, 0],
[0, np.cos(angle_x), -np.sin(angle_x)],
[0, np.sin(angle_x), np.cos(angle_x)]])
Ry = np.array([[np.cos(angle_y), 0, np.sin(angle_y)],
[0, 1, 0],
[-np.sin(angle_y), 0, np.cos(angle_y)]])
Rz = np.array([[np.cos(angle_z), -np.sin(angle_z), 0],
[np.sin(angle_z), np.cos(angle_z), 0],
[0, 0, 1]])
R = np.dot(Rz, np.dot(Ry, Rx))
# a rotation matrix with arbitrarily chosen yaw, pitch, roll
# Set camera pointing to the origin and 1 unit away from the origin
t = np.expand_dims(-R[:, 2] * 5, 1) # select the third column, reshape into (3, 1)-vector
pose = np.concatenate([np.concatenate([R, t], 1), [[0, 0, 0, 1]]], 0)
matrix = np.concatenate([np.concatenate([R.T, -np.dot(R.T,t)], 1), [[0, 0, 0, 1]]], 0)
return matrix, pose
def get_default_camera_extrinsic():
return np.array([[1,0,0,1],
[0,1,0,0],
[0,0,1,2],
[0,0,0,1]])
def get_default_camera_intrinsic(width=1920, height=1080):
return {
"width": width,
"height": height,
"fx": 365,
"fy": 365,
"cx": width / 2 - 0.5,
"cy": height / 2 - 0.5
}
def core_occlusion(mesh, type, camera_extrinsic=None, camera_intrinsic=None, window_width=1080, window_height=720, n_points=None, downsample_ratio=None):
if camera_extrinsic is None:
camera_extrinsic = get_default_camera_extrinsic()
if camera_intrinsic is None:
camera_intrinsic = get_default_camera_intrinsic()
camera_parameters = o3d.camera.PinholeCameraParameters()
camera_parameters.extrinsic = camera_extrinsic
camera_parameters.intrinsic.set_intrinsics(**camera_intrinsic)
viewer = o3d.visualization.Visualizer()
viewer.create_window(width=window_width, height=window_height)
viewer.add_geometry(mesh)
control = viewer.get_view_control()
control.convert_from_pinhole_camera_parameters(camera_parameters)
# viewer.run()
depth = viewer.capture_depth_float_buffer(do_render=True)
viewer.destroy_window()
pcd = o3d.geometry.PointCloud.create_from_depth_image(depth, camera_parameters.intrinsic, extrinsic=camera_parameters.extrinsic)
if downsample_ratio is not None:
ratio = int((1 - downsample_ratio) / downsample_ratio)
pcd = pcd.uniform_down_sample(ratio)
elif n_points is not None:
# print(np.asarray(pcd.points).shape[0])
ratio = int(np.asarray(pcd.points).shape[0] / n_points)
if ratio > 0:
# if type == 'occlusion':
set_points(pcd, shuffle_data(np.asarray(pcd.points)))
pcd = pcd.uniform_down_sample(ratio)
return pcd
def occlusion_1(mesh, type, severity, window_width=1080, window_height=720, n_points=None, downsample_ratio=None):
points = get_points(mesh)
points = normalize(points)
set_points(mesh, points)
if type == 'occlusion':
camera_extrinsic = random_pose(severity)
elif type == 'lidar':
camera_extrinsic,pose = lidar_pose(severity)
camera_intrinsic = get_default_camera_intrinsic(window_width, window_height)
pcd = core_occlusion(mesh, type, camera_extrinsic=camera_extrinsic, camera_intrinsic=camera_intrinsic, window_width=window_width, window_height=window_height, n_points=n_points, downsample_ratio=downsample_ratio)
points = get_points(pcd)
if points.shape[0] < n_points:
index = np.random.choice(points.shape[0], n_points)
points = points[index]
# points = normalize(points)
# points = denomalize(points, scale, offset)
if type == 'lidar':
return points[:n_points,:], pose
else:
return points[:n_points,:]
================================================
FILE: data/process.py
================================================
import os
SHAPE = ["airplane",
"bathtub",
"bed",
"bench",
"bookshelf",
"bottle",
"bowl",
"car",
"chair",
"cone",
"cup",
"curtain",
"desk",
"door",
"dresser",
"flower_pot",
"glass_box",
"guitar",
"keyboard",
"lamp",
"laptop",
"mantel",
"monitor",
"night_stand",
"person",
"piano",
"plant",
"radio",
"range_hood",
"sink",
"sofa",
"stairs",
"stool",
"table",
"tent",
"toilet",
"tv_stand",
"vase",
"wardrobe",
"xbox"
]
if __name__ == '__main__':
for object in SHAPE:
g = os.walk("data/ModelNet40/"+object+"/test")
for path,dir_list,file_list in g:
for file in file_list:
# print(file)
with open(os.path.join(path,file), "r") as f:
lines = f.readlines()
if len(lines[0]) == 4:
continue
else:
lines.insert(0,'OFF\n')
lines[1] = lines[1][3:]
# print(lines)
with open(os.path.join(path,file), "w") as f:
for line in lines:
f.write(line)
================================================
FILE: data/util.py
================================================
import open3d as o3d
import numpy as np
import copy
def get_points(data):
if isinstance(data, o3d.cpu.pybind.geometry.TriangleMesh):
return np.asarray(data.vertices)
elif isinstance(data, o3d.cpu.pybind.geometry.PointCloud):
return np.asarray(data.points)
else:
raise Exception("Wrong input data format: should be pointcloud or mesh")
def set_points(data, points):
if isinstance(data, o3d.cpu.pybind.geometry.TriangleMesh):
data.vertices = o3d.utility.Vector3dVector(points)
return data
elif isinstance(data, o3d.cpu.pybind.geometry.PointCloud):
data.points = o3d.utility.Vector3dVector(points)
return data
else:
raise Exception("Wrong input data format: should be pointcloud or mesh")
def normalize(new_pc):
new_pc[:,0] -= (np.max(new_pc[:,0]) + np.min(new_pc[:,0])) / 2
new_pc[:,1] -= (np.max(new_pc[:,1]) + np.min(new_pc[:,1])) / 2
new_pc[:,2] -= (np.max(new_pc[:,2]) + np.min(new_pc[:,2])) / 2
leng_x, leng_y, leng_z = np.max(new_pc[:,0]) - np.min(new_pc[:,0]), np.max(new_pc[:,1]) - np.min(new_pc[:,1]), np.max(new_pc[:,2]) - np.min(new_pc[:,2])
if leng_x >= leng_y and leng_x >= leng_z:
ratio = 2.0 / leng_x
elif leng_y >= leng_x and leng_y >= leng_z:
ratio = 2.0 / leng_y
else:
ratio = 2.0 / leng_z
new_pc *= ratio
return new_pc
def denomalize(points, scale, offset, hard_copy=False):
if hard_copy:
new_points = copy.deepcopy(points)
else:
new_points = points
n_points = new_points.shape[0]
new_points = new_points * np.tile(scale, (n_points,1)) + np.tile(offset, (n_points,1))
return new_points
def shuffle_data(data):
idx = np.arange(data.shape[0])
np.random.shuffle(idx)
return data[idx, ...]
def appendSpherical_np(xyz):
ptsnew = np.hstack((xyz, np.zeros(xyz.shape)))
xy = xyz[:,0]**2 + xyz[:,1]**2
ptsnew[:,3] = np.sqrt(xy + xyz[:,2]**2)
ptsnew[:,4] = np.arctan2(np.sqrt(xy), xyz[:,2]) # for elevation angle defined from Z-axis down
#ptsnew[:,4] = np.arctan2(xyz[:,2], np.sqrt(xy)) # for elevation angle defined from XY-plane up
ptsnew[:,5] = np.arctan2(xyz[:,1], xyz[:,0])
return ptsnew
def appendCart_np(xyz):
ptsnew = np.hstack((xyz, np.zeros(xyz.shape)))
ptsnew[:,3] = ptsnew[:,0] * np.sin(ptsnew[:,1]) * np.cos(ptsnew[:,2])
ptsnew[:,4] = ptsnew[:,0] * np.sin(ptsnew[:,1]) * np.sin(ptsnew[:,2])
ptsnew[:,5] = ptsnew[:,0] * np.cos(ptsnew[:,1])
return ptsnew
================================================
FILE: dataloader.py
================================================
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
import os
from pc_utils import (rotate_point_cloud, PointcloudScaleAndTranslate)
import rs_cnn.data.data_utils as rscnn_d_utils
from rs_cnn.data.ModelNet40Loader import ModelNet40Cls as rscnn_ModelNet40Cls
import PCT_Pytorch.pointnet2_ops_lib.pointnet2_ops.pointnet2_utils as pointnet2_utils
from pointnet2_tf.modelnet_h5_dataset import ModelNetH5Dataset as pointnet2_ModelNetH5Dataset
from dgcnn.pytorch.data import ModelNet40 as dgcnn_ModelNet40
# distilled from the following sources:
# https://github.com/Yochengliu/Relation-Shape-CNN/blob/master/data/ModelNet40Loader.py
# https://github.com/Yochengliu/Relation-Shape-CNN/blob/master/train_cls.py
class ModelNet40Rscnn(Dataset):
def __init__(self, split, data_path, train_data_path,
valid_data_path, test_data_path, num_points):
self.split = split
self.num_points = num_points
_transforms = transforms.Compose([rscnn_d_utils.PointcloudToTensor()])
rscnn_params = {
'num_points': 1024, # although it does not matter
'root': data_path,
'transforms': _transforms,
'train': (split in ["train", "valid"]),
'data_file': {
'train': train_data_path,
'valid': valid_data_path,
'test': test_data_path
}[self.split]
}
self.rscnn_dataset = rscnn_ModelNet40Cls(**rscnn_params)
self.PointcloudScaleAndTranslate = PointcloudScaleAndTranslate()
def __len__(self):
return self.rscnn_dataset.__len__()
def __getitem__(self, idx):
point, label = self.rscnn_dataset.__getitem__(idx)
# for compatibility with the overall code
point = np.array(point)
label = label[0].item()
return {'pc': point, 'label': label}
def batch_proc(self, data_batch, device):
point = data_batch['pc'].to(device)
if self.split == "train":
# (B, npoint)
fps_idx = pointnet2_utils.furthest_point_sample(point, 1200)
fps_idx = fps_idx[:, np.random.choice(1200, self.num_points,
False)]
point = pointnet2_utils.gather_operation(
point.transpose(1, 2).contiguous(),
fps_idx).transpose(1, 2).contiguous() # (B, N, 3)
point.data = self.PointcloudScaleAndTranslate(point.data)
else:
fps_idx = pointnet2_utils.furthest_point_sample(
point, self.num_points) # (B, npoint)
point = pointnet2_utils.gather_operation(
point.transpose(1, 2).contiguous(),
fps_idx).transpose(1, 2).contiguous()
# to maintain compatibility
point = point.cpu()
return {'pc': point, 'label': data_batch['label']}
# distilled from the following sources:
# https://github.com/charlesq34/pointnet2/blob/7961e26e31d0ba5a72020635cee03aac5d0e754a/modelnet_h5_dataset.py
# https://github.com/charlesq34/pointnet2/blob/7961e26e31d0ba5a72020635cee03aac5d0e754a/train.py
class ModelNet40PN2(Dataset):
def __init__(self, split, train_data_path,
valid_data_path, test_data_path, num_points):
self.split = split
self.dataset_name = 'modelnet40_pn2'
data_path = {
"train": train_data_path,
"valid": valid_data_path,
"test": test_data_path
}[self.split]
pointnet2_params = {
'list_filename': data_path,
# this has nothing to do with actual dataloader batch size
'batch_size': 32,
'npoints': num_points,
'shuffle': False
}
# loading all the pointnet2data
self._dataset = pointnet2_ModelNetH5Dataset(**pointnet2_params)
all_pc = []
all_label = []
while self._dataset.has_next_batch():
# augmentation here has nothing to do with actual data_augmentation
pc, label = self._dataset.next_batch(augment=False)
all_pc.append(pc)
all_label.append(label)
self.all_pc = np.concatenate(all_pc)
self.all_label = np.concatenate(all_label)
def __len__(self):
return self.all_pc.shape[0]
def __getitem__(self, idx):
return {'pc': self.all_pc[idx], 'label': np.int64(self.all_label[idx])}
def batch_proc(self, data_batch, device):
if self.split == "train":
point = np.array(data_batch['pc'])
point = self._dataset._augment_batch_data(point)
# converted to tensor to maintain compatibility with the other code
data_batch['pc'] = torch.tensor(point)
else:
pass
return data_batch
class ModelNet40Dgcnn(Dataset):
def __init__(self, split, train_data_path,
valid_data_path, test_data_path, num_points):
self.split = split
self.data_path = {
"train": train_data_path,
"valid": valid_data_path,
"test": test_data_path
}[self.split]
dgcnn_params = {
'partition': 'train' if split in ['train', 'valid'] else 'test',
'num_points': num_points,
"data_path": self.data_path
}
self.dataset = dgcnn_ModelNet40(**dgcnn_params)
def __len__(self):
return self.dataset.__len__()
def __getitem__(self, idx):
pc, label = self.dataset.__getitem__(idx)
return {'pc': pc, 'label': label.item()}
def load_data(data_path,corruption,severity):
DATA_DIR = os.path.join(data_path, 'data_' + corruption + '_' +str(severity) + '.npy')
# if corruption in ['occlusion']:
# LABEL_DIR = os.path.join(data_path, 'label_occlusion.npy')
LABEL_DIR = os.path.join(data_path, 'label.npy')
all_data = np.load(DATA_DIR)
all_label = np.load(LABEL_DIR)
return all_data, all_label
class ModelNet40C(Dataset):
def __init__(self, split, test_data_path,corruption,severity):
assert split == 'test'
self.split = split
self.data_path = {
"test": test_data_path
}[self.split]
self.corruption = corruption
self.severity = severity
self.data, self.label = load_data(self.data_path, self.corruption, self.severity)
# self.num_points = num_points
self.partition = 'test'
def __getitem__(self, item):
pointcloud = self.data[item]#[:self.num_points]
label = self.label[item]
return {'pc': pointcloud, 'label': label.item()}
def __len__(self):
return self.data.shape[0]
def create_dataloader(split, cfg):
num_workers = cfg.DATALOADER.num_workers
batch_size = cfg.DATALOADER.batch_size
dataset_args = {
"split": split
}
if cfg.EXP.DATASET == "modelnet40_rscnn":
dataset_args.update(dict(**cfg.DATALOADER.MODELNET40_RSCNN))
# augmentation directly done in the code so that
# it is as similar to the vanilla code as possible
dataset = ModelNet40Rscnn(**dataset_args)
elif cfg.EXP.DATASET == "modelnet40_pn2":
dataset_args.update(dict(**cfg.DATALOADER.MODELNET40_PN2))
dataset = ModelNet40PN2(**dataset_args)
elif cfg.EXP.DATASET == "modelnet40_dgcnn":
dataset_args.update(dict(**cfg.DATALOADER.MODELNET40_DGCNN))
dataset = ModelNet40Dgcnn(**dataset_args)
elif cfg.EXP.DATASET == "modelnet40_c":
dataset_args.update(dict(**cfg.DATALOADER.MODELNET40_C))
dataset = ModelNet40C(**dataset_args)
else:
assert False
if "batch_proc" not in dir(dataset):
dataset.batch_proc = None
return DataLoader(
dataset,
batch_size,
num_workers=num_workers,
shuffle=(split == "train"),
drop_last=(split == "train"),
pin_memory=(torch.cuda.is_available()) and (not num_workers)
)
================================================
FILE: dgcnn/.gitignore
================================================
data/
log/
*.pyc
.DS_Store
pytorch/pretrained/
pytorch/checkpoints/
tensorflow/part_seg/train_results/
================================================
FILE: dgcnn/README.md
================================================
# Dynamic Graph CNN for Learning on Point Clouds
We propose a new neural network module dubbed EdgeConv suitable for CNN-based high-level tasks on point clouds including classification and segmentation. EdgeConv is differentiable and can be plugged into existing architectures.
[[Project]](https://liuziwei7.github.io/projects/DGCNN) [[Paper]](https://arxiv.org/abs/1801.07829)
## Overview
`DGCNN` is the author's re-implementation of Dynamic Graph CNN, which achieves state-of-the-art performance on point-cloud-related high-level tasks including category classification, semantic segmentation and part segmentation.
Further information please contact [Yue Wang](https://www.csail.mit.edu/person/yue-wang) and [Yongbin Sun](https://autoid.mit.edu/people-2).
## Author's Implementations
The classification experiments in our paper are done with the pytorch implementation.
* [tensorflow-dgcnn](./tensorflow)
* [pytorch-dgcnn](./pytorch)
## Other Implementations
* [pytorch-geometric](https://pytorch-geometric.readthedocs.io/en/latest/modules/nn.html#torch_geometric.nn.conv.EdgeConv)
* [pytorch-dgcnn](https://github.com/AnTao97/dgcnn.pytorch) (This implementation on S3DIS achieves significant better results than our tensorflow implementation)
## Citation
Please cite this paper if you want to use it in your work,
@article{dgcnn,
title={Dynamic Graph CNN for Learning on Point Clouds},
author={Wang, Yue and Sun, Yongbin and Liu, Ziwei and Sarma, Sanjay E. and Bronstein, Michael M. and Solomon, Justin M.},
journal={ACM Transactions on Graphics (TOG)},
year={2019}
}
## License
MIT License
## Acknowledgement
The structure of this codebase is borrowed from [PointNet](https://github.com/charlesq34/pointnet).
================================================
FILE: dgcnn/pytorch/README.md
================================================
# Dynamic Graph CNN for Learning on Point Clouds (PyTorch)
## Point Cloud Classification
* Run the training script:
``` 1024 points
python main.py --exp_name=dgcnn_1024 --model=dgcnn --num_points=1024 --k=20 --use_sgd=True
```
``` 2048 points
python main.py --exp_name=dgcnn_2048 --model=dgcnn --num_points=2048 --k=40 --use_sgd=True
```
* Run the evaluation script after training finished:
``` 1024 points
python main.py --exp_name=dgcnn_1024_eval --model=dgcnn --num_points=1024 --k=20 --use_sgd=True --eval=True --model_path=checkpoints/dgcnn_1024/models/model.t7
```
``` 2048 points
python main.py --exp_name=dgcnn_2048_eval --model=dgcnn --num_points=2048 --k=40 --use_sgd=True --eval=True --model_path=checkpoints/dgcnn_2048/models/model.t7
```
* Run the evaluation script with pretrained models:
``` 1024 points
python main.py --exp_name=dgcnn_1024_eval --model=dgcnn --num_points=1024 --k=20 --use_sgd=True --eval=True --model_path=pretrained/model.1024.t7
```
``` 2048 points
python main.py --exp_name=dgcnn_2048_eval --model=dgcnn --num_points=2048 --k=40 --use_sgd=True --eval=True --model_path=pretrained/model.2048.t7
```
================================================
FILE: dgcnn/pytorch/data.py
================================================
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Author: Yue Wang
@Contact: yuewangx@mit.edu
@File: data.py
@Time: 2018/10/13 6:21 PM
"""
import os
import sys
import glob
import h5py
import numpy as np
from torch.utils.data import Dataset
def download():
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(BASE_DIR, '../../data')
if not os.path.exists(DATA_DIR):
os.mkdir(DATA_DIR)
if not os.path.exists(os.path.join(DATA_DIR, 'modelnet40_ply_hdf5_2048')):
www = 'https://shapenet.cs.stanford.edu/media/modelnet40_ply_hdf5_2048.zip'
zipfile = os.path.basename(www)
os.system('wget %s; unzip %s' % (www, zipfile))
os.system('mv %s %s' % (zipfile[:-4], DATA_DIR))
os.system('rm %s' % (zipfile))
def load_data(data_path):
download()
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(BASE_DIR, '../../data')
all_data = []
all_label = []
with open(data_path, "r") as f:
# for h5_name in glob.glob(os.path.join(DATA_DIR, 'modelnet40_ply_hdf5_2048', 'ply_data_%s*.h5'%partition)):
for h5_name in f.readlines():
# h5_name = os.path.join(BASE_DIR, "../../", h5_name.strip())
f = h5py.File(h5_name.strip(), 'r')
data = f['data'][:].astype('float32')
label = f['label'][:].astype('int64')
f.close()
all_data.append(data)
all_label.append(label)
all_data = np.concatenate(all_data, axis=0)
all_label = np.concatenate(all_label, axis=0)
return all_data, all_label
def translate_pointcloud(pointcloud):
xyz1 = np.random.uniform(low=2./3., high=3./2., size=[3])
xyz2 = np.random.uniform(low=-0.2, high=0.2, size=[3])
translated_pointcloud = np.add(np.multiply(pointcloud, xyz1), xyz2).astype('float32')
return translated_pointcloud
def jitter_pointcloud(pointcloud, sigma=0.01, clip=0.02):
N, C = pointcloud.shape
pointcloud += np.clip(sigma * np.random.randn(N, C), -1*clip, clip)
return pointcloud
class ModelNet40(Dataset):
def __init__(self, num_points, data_path, partition='train'):
self.data, self.label = load_data(data_path)
self.num_points = num_points
self.partition = partition
def __getitem__(self, item):
pointcloud = self.data[item][:self.num_points]
label = self.label[item]
if self.partition == 'train':
pointcloud = translate_pointcloud(pointcloud)
np.random.shuffle(pointcloud)
return pointcloud, label
def __len__(self):
return self.data.shape[0]
if __name__ == '__main__':
train = ModelNet40(1024)
test = ModelNet40(1024, 'test')
for data, label in train:
print(data.shape)
print(label.shape)
================================================
FILE: dgcnn/pytorch/main.py
================================================
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Author: Yue Wang
@Contact: yuewangx@mit.edu
@File: main.py
@Time: 2018/10/13 10:39 PM
"""
from __future__ import print_function
import os
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim.lr_scheduler import CosineAnnealingLR
from data import ModelNet40
from model import PointNet, DGCNN
import numpy as np
from torch.utils.data import DataLoader
from util import cal_loss, IOStream
import sklearn.metrics as metrics
def _init_():
if not os.path.exists('checkpoints'):
os.makedirs('checkpoints')
if not os.path.exists('checkpoints/'+args.exp_name):
os.makedirs('checkpoints/'+args.exp_name)
if not os.path.exists('checkpoints/'+args.exp_name+'/'+'models'):
os.makedirs('checkpoints/'+args.exp_name+'/'+'models')
os.system('cp main.py checkpoints'+'/'+args.exp_name+'/'+'main.py.backup')
os.system('cp model.py checkpoints' + '/' + args.exp_name + '/' + 'model.py.backup')
os.system('cp util.py checkpoints' + '/' + args.exp_name + '/' + 'util.py.backup')
os.system('cp data.py checkpoints' + '/' + args.exp_name + '/' + 'data.py.backup')
def train(args, io):
train_loader = DataLoader(ModelNet40(partition='train', num_points=args.num_points), num_workers=8,
batch_size=args.batch_size, shuffle=True, drop_last=True)
test_loader = DataLoader(ModelNet40(partition='test', num_points=args.num_points), num_workers=8,
batch_size=args.test_batch_size, shuffle=True, drop_last=False)
device = torch.device("cuda" if args.cuda else "cpu")
#Try to load models
if args.model == 'pointnet':
model = PointNet(args).to(device)
elif args.model == 'dgcnn':
model = DGCNN(args).to(device)
else:
raise Exception("Not implemented")
print(str(model))
model = nn.DataParallel(model)
print("Let's use", torch.cuda.device_count(), "GPUs!")
if args.use_sgd:
print("Use SGD")
opt = optim.SGD(model.parameters(), lr=args.lr*100, momentum=args.momentum, weight_decay=1e-4)
else:
print("Use Adam")
opt = optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-4)
scheduler = CosineAnnealingLR(opt, args.epochs, eta_min=args.lr)
print(f"Using the smoothing loss {bool(args.smoothing)}")
criterion = lambda x,y: cal_loss(x, y, bool(args.smoothing))
best_test_acc = 0
for epoch in range(args.epochs):
scheduler.step()
####################
# Train
####################
train_loss = 0.0
count = 0.0
model.train()
train_pred = []
train_true = []
for data, label in train_loader:
data, label = data.to(device), label.to(device).squeeze()
data = data.permute(0, 2, 1)
batch_size = data.size()[0]
opt.zero_grad()
logits = model(data)
loss = criterion(logits, label)
loss.backward()
opt.step()
preds = logits.max(dim=1)[1]
count += batch_size
train_loss += loss.item() * batch_size
train_true.append(label.cpu().numpy())
train_pred.append(preds.detach().cpu().numpy())
train_true = np.concatenate(train_true)
train_pred = np.concatenate(train_pred)
outstr = 'Train %d, loss: %.6f, train acc: %.6f, train avg acc: %.6f' % (epoch,
train_loss*1.0/count,
metrics.accuracy_score(
train_true, train_pred),
metrics.balanced_accuracy_score(
train_true, train_pred))
io.cprint(outstr)
####################
# Test
####################
test_loss = 0.0
count = 0.0
model.eval()
test_pred = []
test_true = []
for data, label in test_loader:
data, label = data.to(device), label.to(device).squeeze()
data = data.permute(0, 2, 1)
batch_size = data.size()[0]
logits = model(data)
loss = criterion(logits, label)
preds = logits.max(dim=1)[1]
count += batch_size
test_loss += loss.item() * batch_size
test_true.append(label.cpu().numpy())
test_pred.append(preds.detach().cpu().numpy())
test_true = np.concatenate(test_true)
test_pred = np.concatenate(test_pred)
test_acc = metrics.accuracy_score(test_true, test_pred)
avg_per_class_acc = metrics.balanced_accuracy_score(test_true, test_pred)
outstr = 'Test %d, loss: %.6f, test acc: %.6f, test avg acc: %.6f' % (epoch,
test_loss*1.0/count,
test_acc,
avg_per_class_acc)
io.cprint(outstr)
if test_acc >= best_test_acc:
best_test_acc = test_acc
torch.save(model.state_dict(), 'checkpoints/%s/models/model.t7' % args.exp_name)
def test(args, io):
test_loader = DataLoader(ModelNet40(partition='test', num_points=args.num_points),
batch_size=args.test_batch_size, shuffle=True, drop_last=False)
device = torch.device("cuda" if args.cuda else "cpu")
#Try to load models
model = DGCNN(args).to(device)
model = nn.DataParallel(model)
model.load_state_dict(torch.load(args.model_path))
model = model.eval()
test_acc = 0.0
count = 0.0
test_true = []
test_pred = []
for data, label in test_loader:
data, label = data.to(device), label.to(device).squeeze()
data = data.permute(0, 2, 1)
batch_size = data.size()[0]
logits = model(data)
preds = logits.max(dim=1)[1]
test_true.append(label.cpu().numpy())
test_pred.append(preds.detach().cpu().numpy())
test_true = np.concatenate(test_true)
test_pred = np.concatenate(test_pred)
test_acc = metrics.accuracy_score(test_true, test_pred)
avg_per_class_acc = metrics.balanced_accuracy_score(test_true, test_pred)
outstr = 'Test :: test acc: %.6f, test avg acc: %.6f'%(test_acc, avg_per_class_acc)
io.cprint(outstr)
if __name__ == "__main__":
# Training settings
parser = argparse.ArgumentParser(description='Point Cloud Recognition')
parser.add_argument('--exp_name', type=str, default='exp', metavar='N',
help='Name of the experiment')
parser.add_argument('--model', type=str, default='dgcnn', metavar='N',
choices=['pointnet', 'dgcnn'],
help='Model to use, [pointnet, dgcnn]')
parser.add_argument('--dataset', type=str, default='modelnet40', metavar='N',
choices=['modelnet40'])
parser.add_argument('--batch_size', type=int, default=32, metavar='batch_size',
help='Size of batch)')
parser.add_argument('--test_batch_size', type=int, default=16, metavar='batch_size',
help='Size of batch)')
parser.add_argument('--epochs', type=int, default=250, metavar='N',
help='number of episode to train ')
parser.add_argument('--use_sgd', type=bool, default=True,
help='Use SGD')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='learning rate (default: 0.001, 0.1 if using sgd)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--no_cuda', type=bool, default=False,
help='enables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--eval', type=bool, default=False,
help='evaluate the model')
parser.add_argument('--num_points', type=int, default=1024,
help='num of points to use')
parser.add_argument('--dropout', type=float, default=0.5,
help='dropout rate')
parser.add_argument('--emb_dims', type=int, default=1024, metavar='N',
help='Dimension of embeddings')
parser.add_argument('--k', type=int, default=20, metavar='N',
help='Num of nearest neighbors to use')
parser.add_argument('--model_path', type=str, default='', metavar='N',
help='Pretrained model path')
parser.add_argument('--smoothing', type=int, default=1,
help='Whether to use smoothing in the loss')
parser.add_argument('--leaky_relu', type=int, default=1,
help='Whether to use leaky_relu')
args = parser.parse_args()
_init_()
io = IOStream('checkpoints/' + args.exp_name + '/run.log')
io.cprint(str(args))
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
io.cprint(
'Using GPU : ' + str(torch.cuda.current_device()) + ' from ' + str(torch.cuda.device_count()) + ' devices')
torch.cuda.manual_seed(args.seed)
else:
io.cprint('Using CPU')
if not args.eval:
train(args, io)
else:
test(args, io)
================================================
FILE: dgcnn/pytorch/model.py
================================================
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Author: Yue Wang
@Contact: yuewangx@mit.edu
@File: model.py
@Time: 2018/10/13 6:35 PM
"""
import os
import sys
import copy
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
def knn(x, k):
inner = -2*torch.matmul(x.transpose(2, 1), x)
xx = torch.sum(x**2, dim=1, keepdim=True)
pairwise_distance = -xx - inner - xx.transpose(2, 1)
idx = pairwise_distance.topk(k=k, dim=-1)[1] # (batch_size, num_points, k)
return idx
def get_graph_feature(x, k=20, idx=None):
batch_size = x.size(0)
num_points = x.size(2)
x = x.view(batch_size, -1, num_points)
if idx is None:
idx = knn(x, k=k) # (batch_size, num_points, k)
device = torch.device('cuda')
idx_base = torch.arange(0, batch_size, device=device).view(-1, 1, 1)*num_points
idx = idx + idx_base
idx = idx.view(-1)
_, num_dims, _ = x.size()
x = x.transpose(2, 1).contiguous() # (batch_size, num_points, num_dims) -> (batch_size*num_points, num_dims) # batch_size * num_points * k + range(0, batch_size*num_points)
feature = x.view(batch_size*num_points, -1)[idx, :]
feature = feature.view(batch_size, num_points, k, num_dims)
x = x.view(batch_size, num_points, 1, num_dims).repeat(1, 1, k, 1)
feature = torch.cat((feature-x, x), dim=3).permute(0, 3, 1, 2).contiguous()
return feature
class PointNet(nn.Module):
def __init__(self, args, output_channels=40):
super(PointNet, self).__init__()
self.args = args
self.conv1 = nn.Conv1d(3, 64, kernel_size=1, bias=False)
self.conv2 = nn.Conv1d(64, 64, kernel_size=1, bias=False)
self.conv3 = nn.Conv1d(64, 64, kernel_size=1, bias=False)
self.conv4 = nn.Conv1d(64, 128, kernel_size=1, bias=False)
self.conv5 = nn.Conv1d(128, args.emb_dims, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(64)
self.bn3 = nn.BatchNorm1d(64)
self.bn4 = nn.BatchNorm1d(128)
self.bn5 = nn.BatchNorm1d(args.emb_dims)
self.linear1 = nn.Linear(args.emb_dims, 512, bias=False)
self.bn6 = nn.BatchNorm1d(512)
self.dp1 = nn.Dropout()
self.linear2 = nn.Linear(512, output_channels)
def forward(self, x):
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = F.relu(self.bn4(self.conv4(x)))
x = F.relu(self.bn5(self.conv5(x)))
x = F.adaptive_max_pool1d(x, 1).squeeze()
x = F.relu(self.bn6(self.linear1(x)))
x = self.dp1(x)
x = self.linear2(x)
return x
class DGCNN(nn.Module):
def __init__(self, args, output_channels=40):
super(DGCNN, self).__init__()
self.args = args
self.k = args.k
self.leaky_relu = bool(args.leaky_relu)
self.bn1 = nn.BatchNorm2d(64)
self.bn2 = nn.BatchNorm2d(64)
self.bn3 = nn.BatchNorm2d(128)
self.bn4 = nn.BatchNorm2d(256)
self.bn5 = nn.BatchNorm1d(args.emb_dims)
if self.leaky_relu:
act_mod = nn.LeakyReLU
act_mod_args = {'negative_slope': 0.2}
else:
act_mod = nn.ReLU
act_mod_args = {}
self.conv1 = nn.Sequential(nn.Conv2d(6, 64, kernel_size=1, bias=False),
self.bn1,
act_mod(**act_mod_args))
self.conv2 = nn.Sequential(nn.Conv2d(64*2, 64, kernel_size=1, bias=False),
self.bn2,
act_mod(**act_mod_args))
self.conv3 = nn.Sequential(nn.Conv2d(64*2, 128, kernel_size=1, bias=False),
self.bn3,
act_mod(**act_mod_args))
self.conv4 = nn.Sequential(nn.Conv2d(128*2, 256, kernel_size=1, bias=False),
self.bn4,
act_mod(**act_mod_args))
self.conv5 = nn.Sequential(nn.Conv1d(512, args.emb_dims, kernel_size=1, bias=False),
self.bn5,
act_mod(**act_mod_args))
self.linear1 = nn.Linear(args.emb_dims*2, 512, bias=False)
self.bn6 = nn.BatchNorm1d(512)
self.dp1 = nn.Dropout(p=args.dropout)
self.linear2 = nn.Linear(512, 256)
self.bn7 = nn.BatchNorm1d(256)
self.dp2 = nn.Dropout(p=args.dropout)
self.linear3 = nn.Linear(256, output_channels)
def forward(self, x):
batch_size = x.size(0)
x = get_graph_feature(x, k=self.k)
x = self.conv1(x)
x1 = x.max(dim=-1, keepdim=False)[0]
x = get_graph_feature(x1, k=self.k)
x = self.conv2(x)
x2 = x.max(dim=-1, keepdim=False)[0]
x = get_graph_feature(x2, k=self.k)
x = self.conv3(x)
x3 = x.max(dim=-1, keepdim=False)[0]
x = get_graph_feature(x3, k=self.k)
x = self.conv4(x)
x4 = x.max(dim=-1, keepdim=False)[0]
x = torch.cat((x1, x2, x3, x4), dim=1)
x = self.conv5(x)
x1 = F.adaptive_max_pool1d(x, 1).view(batch_size, -1)
x2 = F.adaptive_avg_pool1d(x, 1).view(batch_size, -1)
x = torch.cat((x1, x2), 1)
if self.leaky_relu:
act = lambda y: F.leaky_relu(y, negative_slope=0.2)
else:
act = F.relu
x = act(self.bn6(self.linear1(x)))
x = self.dp1(x)
x = act(self.bn7(self.linear2(x)))
x = self.dp2(x)
x = self.linear3(x)
return x
================================================
FILE: dgcnn/pytorch/util.py
================================================
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Author: Yue Wang
@Contact: yuewangx@mit.edu
@File: util
@Time: 4/5/19 3:47 PM
"""
import numpy as np
import torch
import torch.nn.functional as F
def cal_loss(pred, gold, smoothing=True):
''' Calculate cross entropy loss, apply label smoothing if needed. '''
gold = gold.contiguous().view(-1)
if smoothing:
eps = 0.2
n_class = pred.size(1)
one_hot = torch.zeros_like(pred).scatter(1, gold.view(-1, 1), 1)
one_hot = one_hot * (1 - eps) + (1 - one_hot) * eps / (n_class - 1)
log_prb = F.log_softmax(pred, dim=1)
loss = -(one_hot * log_prb).sum(dim=1).mean()
else:
loss = F.cross_entropy(pred, gold, reduction='mean')
return loss
class IOStream():
def __init__(self, path):
self.f = open(path, 'a')
def cprint(self, text):
print(text)
self.f.write(text+'\n')
self.f.flush()
def close(self):
self.f.close()
================================================
FILE: dgcnn/tensorflow/README.md
================================================
# Dynamic Graph CNN for Learning on Point Clouds (TensorFlow)
## Point Cloud Classification
* Run the training script:
``` bash
python train.py
```
* Run the evaluation script after training finished:
``` bash
python evalutate.py
```
================================================
FILE: dgcnn/tensorflow/evaluate.py
================================================
import tensorflow as tf
import numpy as np
import argparse
import socket
import importlib
import time
import os
import scipy.misc
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, 'models'))
sys.path.append(os.path.join(BASE_DIR, 'utils'))
import provider
import pc_util
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')
parser.add_argument('--model', default='dgcnn', help='Model name: dgcnn [default: dgcnn]')
parser.add_argument('--batch_size', type=int, default=4, help='Batch Size during training [default: 1]')
parser.add_argument('--num_point', type=int, default=1024, help='Point Number [256/512/1024/2048] [default: 1024]')
parser.add_argument('--model_path', default='log/model.ckpt', help='model checkpoint file path [default: log/model.ckpt]')
parser.add_argument('--dump_dir', default='dump', help='dump folder path [dump]')
parser.add_argument('--visu', action='store_true', help='Whether to dump image for error case [default: False]')
FLAGS = parser.parse_args()
BATCH_SIZE = FLAGS.batch_size
NUM_POINT = FLAGS.num_point
MODEL_PATH = FLAGS.model_path
GPU_INDEX = FLAGS.gpu
MODEL = importlib.import_module(FLAGS.model) # import network module
DUMP_DIR = FLAGS.dump_dir
if not os.path.exists(DUMP_DIR): os.mkdir(DUMP_DIR)
LOG_FOUT = open(os.path.join(DUMP_DIR, 'log_evaluate.txt'), 'w')
LOG_FOUT.write(str(FLAGS)+'\n')
NUM_CLASSES = 40
SHAPE_NAMES = [line.rstrip() for line in \
open(os.path.join(BASE_DIR, 'data/modelnet40_ply_hdf5_2048/shape_names.txt'))]
HOSTNAME = socket.gethostname()
# ModelNet40 official train/test split
TRAIN_FILES = provider.getDataFiles( \
os.path.join(BASE_DIR, 'data/modelnet40_ply_hdf5_2048/train_files.txt'))
TEST_FILES = provider.getDataFiles(\
os.path.join(BASE_DIR, 'data/modelnet40_ply_hdf5_2048/test_files.txt'))
def log_string(out_str):
LOG_FOUT.write(out_str+'\n')
LOG_FOUT.flush()
print(out_str)
def evaluate(num_votes):
is_training = False
with tf.device('/gpu:'+str(GPU_INDEX)):
pointclouds_pl, labels_pl = MODEL.placeholder_inputs(BATCH_SIZE, NUM_POINT)
is_training_pl = tf.placeholder(tf.bool, shape=())
# simple model
pred, end_points = MODEL.get_model(pointclouds_pl, is_training_pl)
loss = MODEL.get_loss(pred, labels_pl, end_points)
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
# Create a session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
config.log_device_placement = True
sess = tf.Session(config=config)
# Restore variables from disk.
saver.restore(sess, MODEL_PATH)
log_string("Model restored.")
ops = {'pointclouds_pl': pointclouds_pl,
'labels_pl': labels_pl,
'is_training_pl': is_training_pl,
'pred': pred,
'loss': loss}
eval_one_epoch(sess, ops, num_votes)
def eval_one_epoch(sess, ops, num_votes=1, topk=1):
error_cnt = 0
is_training = False
total_correct = 0
total_seen = 0
loss_sum = 0
total_seen_class = [0 for _ in range(NUM_CLASSES)]
total_correct_class = [0 for _ in range(NUM_CLASSES)]
fout = open(os.path.join(DUMP_DIR, 'pred_label.txt'), 'w')
for fn in range(len(TEST_FILES)):
log_string('----'+str(fn)+'----')
current_data, current_label = provider.loadDataFile(TEST_FILES[fn])
current_data = current_data[:,0:NUM_POINT,:]
current_label = np.squeeze(current_label)
print(current_data.shape)
file_size = current_data.shape[0]
num_batches = file_size // BATCH_SIZE
print(file_size)
for batch_idx in range(num_batches):
start_idx = batch_idx * BATCH_SIZE
end_idx = (batch_idx+1) * BATCH_SIZE
cur_batch_size = end_idx - start_idx
# Aggregating BEG
batch_loss_sum = 0 # sum of losses for the batch
batch_pred_sum = np.zeros((cur_batch_size, NUM_CLASSES)) # score for classes
batch_pred_classes = np.zeros((cur_batch_size, NUM_CLASSES)) # 0/1 for classes
for vote_idx in range(num_votes):
rotated_data = provider.rotate_point_cloud_by_angle(current_data[start_idx:end_idx, :, :],
vote_idx/float(num_votes) * np.pi * 2)
feed_dict = {ops['pointclouds_pl']: rotated_data,
ops['labels_pl']: current_label[start_idx:end_idx],
ops['is_training_pl']: is_training}
loss_val, pred_val = sess.run([ops['loss'], ops['pred']],
feed_dict=feed_dict)
batch_pred_sum += pred_val
batch_pred_val = np.argmax(pred_val, 1)
for el_idx in range(cur_batch_size):
batch_pred_classes[el_idx, batch_pred_val[el_idx]] += 1
batch_loss_sum += (loss_val * cur_batch_size / float(num_votes))
# pred_val_topk = np.argsort(batch_pred_sum, axis=-1)[:,-1*np.array(range(topk))-1]
# pred_val = np.argmax(batch_pred_classes, 1)
pred_val = np.argmax(batch_pred_sum, 1)
# Aggregating END
correct = np.sum(pred_val == current_label[start_idx:end_idx])
# correct = np.sum(pred_val_topk[:,0:topk] == label_val)
total_correct += correct
total_seen += cur_batch_size
loss_sum += batch_loss_sum
for i in range(start_idx, end_idx):
l = current_label[i]
total_seen_class[l] += 1
total_correct_class[l] += (pred_val[i-start_idx] == l)
fout.write('%d, %d\n' % (pred_val[i-start_idx], l))
if pred_val[i-start_idx] != l and FLAGS.visu: # ERROR CASE, DUMP!
img_filename = '%d_label_%s_pred_%s.jpg' % (error_cnt, SHAPE_NAMES[l],
SHAPE_NAMES[pred_val[i-start_idx]])
img_filename = os.path.join(DUMP_DIR, img_filename)
output_img = pc_util.point_cloud_three_views(np.squeeze(current_data[i, :, :]))
scipy.misc.imsave(img_filename, output_img)
error_cnt += 1
log_string('eval mean loss: %f' % (loss_sum / float(total_seen)))
log_string('eval accuracy: %f' % (total_correct / float(total_seen)))
log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float))))
class_accuracies = np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float)
for i, name in enumerate(SHAPE_NAMES):
log_string('%10s:\t%0.3f' % (name, class_accuracies[i]))
if __name__=='__main__':
with tf.Graph().as_default():
evaluate(num_votes=12)
LOG_FOUT.close()
================================================
FILE: dgcnn/tensorflow/models/dgcnn.py
================================================
import tensorflow as tf
import numpy as np
import math
import sys
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, '../utils'))
sys.path.append(os.path.join(BASE_DIR, '../../utils'))
import tf_util
from transform_nets import input_transform_net
def placeholder_inputs(batch_size, num_point):
pointclouds_pl = tf.placeholder(tf.float32, shape=(batch_size, num_point, 3))
labels_pl = tf.placeholder(tf.int32, shape=(batch_size))
return pointclouds_pl, labels_pl
def get_model(point_cloud, is_training, bn_decay=None):
""" Classification PointNet, input is BxNx3, output Bx40 """
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
end_points = {}
k = 20
adj_matrix = tf_util.pairwise_distance(point_cloud)
nn_idx = tf_util.knn(adj_matrix, k=k)
edge_feature = tf_util.get_edge_feature(point_cloud, nn_idx=nn_idx, k=k)
with tf.variable_scope('transform_net1') as sc:
transform = input_transform_net(edge_feature, is_training, bn_decay, K=3)
point_cloud_transformed = tf.matmul(point_cloud, transform)
adj_matrix = tf_util.pairwise_distance(point_cloud_transformed)
nn_idx = tf_util.knn(adj_matrix, k=k)
edge_feature = tf_util.get_edge_feature(point_cloud_transformed, nn_idx=nn_idx, k=k)
net = tf_util.conv2d(edge_feature, 64, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='dgcnn1', bn_decay=bn_decay)
net = tf.reduce_max(net, axis=-2, keep_dims=True)
net1 = net
adj_matrix = tf_util.pairwise_distance(net)
nn_idx = tf_util.knn(adj_matrix, k=k)
edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)
net = tf_util.conv2d(edge_feature, 64, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='dgcnn2', bn_decay=bn_decay)
net = tf.reduce_max(net, axis=-2, keep_dims=True)
net2 = net
adj_matrix = tf_util.pairwise_distance(net)
nn_idx = tf_util.knn(adj_matrix, k=k)
edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)
net = tf_util.conv2d(edge_feature, 64, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='dgcnn3', bn_decay=bn_decay)
net = tf.reduce_max(net, axis=-2, keep_dims=True)
net3 = net
adj_matrix = tf_util.pairwise_distance(net)
nn_idx = tf_util.knn(adj_matrix, k=k)
edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)
net = tf_util.conv2d(edge_feature, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='dgcnn4', bn_decay=bn_decay)
net = tf.reduce_max(net, axis=-2, keep_dims=True)
net4 = net
net = tf_util.conv2d(tf.concat([net1, net2, net3, net4], axis=-1), 1024, [1, 1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='agg', bn_decay=bn_decay)
net = tf.reduce_max(net, axis=1, keep_dims=True)
# MLP on global point cloud vector
net = tf.reshape(net, [batch_size, -1])
net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training,
scope='fc1', bn_decay=bn_decay)
net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training,
scope='dp1')
net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training,
scope='fc2', bn_decay=bn_decay)
net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training,
scope='dp2')
net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')
return net, end_points
def get_loss(pred, label, end_points):
""" pred: B*NUM_CLASSES,
label: B, """
labels = tf.one_hot(indices=label, depth=40)
loss = tf.losses.softmax_cross_entropy(onehot_labels=labels, logits=pred, label_smoothing=0.2)
classify_loss = tf.reduce_mean(loss)
return classify_loss
if __name__=='__main__':
batch_size = 2
num_pt = 124
pos_dim = 3
input_feed = np.random.rand(batch_size, num_pt, pos_dim)
label_feed = np.random.rand(batch_size)
label_feed[label_feed>=0.5] = 1
label_feed[label_feed<0.5] = 0
label_feed = label_feed.astype(np.int32)
# # np.save('./debug/input_feed.npy', input_feed)
# input_feed = np.load('./debug/input_feed.npy')
# print input_feed
with tf.Graph().as_default():
input_pl, label_pl = placeholder_inputs(batch_size, num_pt)
pos, ftr = get_model(input_pl, tf.constant(True))
# loss = get_loss(logits, label_pl, None)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
feed_dict = {input_pl: input_feed, label_pl: label_feed}
res1, res2 = sess.run([pos, ftr], feed_dict=feed_dict)
print res1.shape
print res1
print res2.shape
print res2
================================================
FILE: dgcnn/tensorflow/models/transform_nets.py
================================================
import tensorflow as tf
import numpy as np
import sys
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, '../utils'))
import tf_util
def input_transform_net(edge_feature, is_training, bn_decay=None, K=3, is_dist=False):
""" Input (XYZ) Transform Net, input is BxNx3 gray image
Return:
Transformation matrix of size 3xK """
batch_size = edge_feature.get_shape()[0].value
num_point = edge_feature.get_shape()[1].value
# input_image = tf.expand_dims(point_cloud, -1)
net = tf_util.conv2d(edge_feature, 64, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='tconv1', bn_decay=bn_decay, is_dist=is_dist)
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='tconv2', bn_decay=bn_decay, is_dist=is_dist)
net = tf.reduce_max(net, axis=-2, keep_dims=True)
net = tf_util.conv2d(net, 1024, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='tconv3', bn_decay=bn_decay, is_dist=is_dist)
net = tf_util.max_pool2d(net, [num_point,1],
padding='VALID', scope='tmaxpool')
net = tf.reshape(net, [batch_size, -1])
net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training,
scope='tfc1', bn_decay=bn_decay,is_dist=is_dist)
net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training,
scope='tfc2', bn_decay=bn_decay,is_dist=is_dist)
with tf.variable_scope('transform_XYZ') as sc:
# assert(K==3)
with tf.device('/cpu:0'):
weights = tf.get_variable('weights', [256, K*K],
initializer=tf.constant_initializer(0.0),
dtype=tf.float32)
biases = tf.get_variable('biases', [K*K],
initializer=tf.constant_initializer(0.0),
dtype=tf.float32)
biases += tf.constant(np.eye(K).flatten(), dtype=tf.float32)
transform = tf.matmul(net, weights)
transform = tf.nn.bias_add(transform, biases)
transform = tf.reshape(transform, [batch_size, K, K])
return transform
================================================
FILE: dgcnn/tensorflow/part_seg/README.md
================================================
## Part segmentation
### Dataset
Load the data for part segmentation.
```
sh +x download_data.sh
```
### Train
Train the model on 2 GPUs, each with 12 GB memeory.
```
python train_multi_gpu.py
```
Model parameters are saved every 5 epochs in "train_results/trained_models/".
### Evaluation
To evaluate the model saved after epoch n,
```
python test.py --model_path train_results/trained_models/epoch_n.ckpt
```
For example, if we want to test the model saved after 175 epochs (provided),
```
python test.py --model_path train_results/trained_models/epoch_175.ckpt
```
================================================
FILE: dgcnn/tensorflow/part_seg/download_data.sh
================================================
#!/bin/bash
# Download original ShapeNetPart dataset (around 1GB) ['PartAnnotation']
wget https://shapenet.cs.stanford.edu/ericyi/shapenetcore_partanno_v0.zip
unzip shapenetcore_partanno_v0.zip
rm shapenetcore_partanno_v0.zip
# Download HDF5 for ShapeNet Part segmentation (around 346MB) ['hdf5_data']
wget https://shapenet.cs.stanford.edu/media/shapenet_part_seg_hdf5_data.zip
unzip shapenet_part_seg_hdf5_data.zip
rm shapenet_part_seg_hdf5_data.zip
================================================
FILE: dgcnn/tensorflow/part_seg/part_seg_model.py
================================================
import tensorflow as tf
import numpy as np
import math
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.dirname(BASE_DIR))
sys.path.append(os.path.join(BASE_DIR, '../utils'))
sys.path.append(os.path.join(BASE_DIR, '../models'))
sys.path.append(os.path.join(BASE_DIR, '../'))
import tf_util
from transform_nets import input_transform_net
def get_model(point_cloud, input_label, is_training, cat_num, part_num, \
batch_size, num_point, weight_decay, bn_decay=None):
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
input_image = tf.expand_dims(point_cloud, -1)
k = 20
adj = tf_util.pairwise_distance(point_cloud)
nn_idx = tf_util.knn(adj, k=k)
edge_feature = tf_util.get_edge_feature(input_image, nn_idx=nn_idx, k=k)
with tf.variable_scope('transform_net1') as sc:
transform = input_transform_net(edge_feature, is_training, bn_decay, K=3, is_dist=True)
point_cloud_transformed = tf.matmul(point_cloud, transform)
input_image = tf.expand_dims(point_cloud_transformed, -1)
adj = tf_util.pairwise_distance(point_cloud_transformed)
nn_idx = tf_util.knn(adj, k=k)
edge_feature = tf_util.get_edge_feature(input_image, nn_idx=nn_idx, k=k)
out1 = tf_util.conv2d(edge_feature, 64, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training, weight_decay=weight_decay,
scope='adj_conv1', bn_decay=bn_decay, is_dist=True)
out2 = tf_util.conv2d(out1, 64, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training, weight_decay=weight_decay,
scope='adj_conv2', bn_decay=bn_decay, is_dist=True)
net_1 = tf.reduce_max(out2, axis=-2, keep_dims=True)
adj = tf_util.pairwise_distance(net_1)
nn_idx = tf_util.knn(adj, k=k)
edge_feature = tf_util.get_edge_feature(net_1, nn_idx=nn_idx, k=k)
out3 = tf_util.conv2d(edge_feature, 64, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training, weight_decay=weight_decay,
scope='adj_conv3', bn_decay=bn_decay, is_dist=True)
out4 = tf_util.conv2d(out3, 64, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training, weight_decay=weight_decay,
scope='adj_conv4', bn_decay=bn_decay, is_dist=True)
net_2 = tf.reduce_max(out4, axis=-2, keep_dims=True)
adj = tf_util.pairwise_distance(net_2)
nn_idx = tf_util.knn(adj, k=k)
edge_feature = tf_util.get_edge_feature(net_2, nn_idx=nn_idx, k=k)
out5 = tf_util.conv2d(edge_feature, 64, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training, weight_decay=weight_decay,
scope='adj_conv5', bn_decay=bn_decay, is_dist=True)
# out6 = tf_util.conv2d(out5, 64, [1,1],
# padding='VALID', stride=[1,1],
# bn=True, is_training=is_training, weight_decay=weight_decay,
# scope='adj_conv6', bn_decay=bn_decay, is_dist=True)
net_3 = tf.reduce_max(out5, axis=-2, keep_dims=True)
out7 = tf_util.conv2d(tf.concat([net_1, net_2, net_3], axis=-1), 1024, [1, 1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='adj_conv7', bn_decay=bn_decay, is_dist=True)
out_max = tf_util.max_pool2d(out7, [num_point, 1], padding='VALID', scope='maxpool')
one_hot_label_expand = tf.reshape(input_label, [batch_size, 1, 1, cat_num])
one_hot_label_expand = tf_util.conv2d(one_hot_label_expand, 64, [1, 1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='one_hot_label_expand', bn_decay=bn_decay, is_dist=True)
out_max = tf.concat(axis=3, values=[out_max, one_hot_label_expand])
expand = tf.tile(out_max, [1, num_point, 1, 1])
concat = tf.concat(axis=3, values=[expand,
net_1,
net_2,
net_3])
net2 = tf_util.conv2d(concat, 256, [1,1], padding='VALID', stride=[1,1], bn_decay=bn_decay,
bn=True, is_training=is_training, scope='seg/conv1', weight_decay=weight_decay, is_dist=True)
net2 = tf_util.dropout(net2, keep_prob=0.6, is_training=is_training, scope='seg/dp1')
net2 = tf_util.conv2d(net2, 256, [1,1], padding='VALID', stride=[1,1], bn_decay=bn_decay,
bn=True, is_training=is_training, scope='seg/conv2', weight_decay=weight_decay, is_dist=True)
net2 = tf_util.dropout(net2, keep_prob=0.6, is_training=is_training, scope='seg/dp2')
net2 = tf_util.conv2d(net2, 128, [1,1], padding='VALID', stride=[1,1], bn_decay=bn_decay,
bn=True, is_training=is_training, scope='seg/conv3', weight_decay=weight_decay, is_dist=True)
net2 = tf_util.conv2d(net2, part_num, [1,1], padding='VALID', stride=[1,1], activation_fn=None,
bn=False, scope='seg/conv4', weight_decay=weight_decay, is_dist=True)
net2 = tf.reshape(net2, [batch_size, num_point, part_num])
return net2
def get_loss(seg_pred, seg):
per_instance_seg_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=seg_pred, labels=seg), axis=1)
seg_loss = tf.reduce_mean(per_instance_seg_loss)
per_instance_seg_pred_res = tf.argmax(seg_pred, 2)
return seg_loss, per_instance_seg_loss, per_instance_seg_pred_res
================================================
FILE: dgcnn/tensorflow/part_seg/test.py
================================================
import argparse
import tensorflow as tf
import json
import numpy as np
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.dirname(BASE_DIR))
import provider
import part_seg_model as model
parser = argparse.ArgumentParser()
parser.add_argument('--model_path', default='train_results/trained_models/epoch_160.ckpt', help='Model checkpoint path')
FLAGS = parser.parse_args()
# DEFAULT SETTINGS
pretrained_model_path = FLAGS.model_path
hdf5_data_dir = os.path.join(BASE_DIR, './hdf5_data')
ply_data_dir = os.path.join(BASE_DIR, './PartAnnotation')
gpu_to_use = 0
output_dir = os.path.join(BASE_DIR, './test_results')
output_verbose = False
# MAIN SCRIPT
point_num = 3000
batch_size = 1
test_file_list = os.path.join(BASE_DIR, 'testing_ply_file_list.txt')
oid2cpid = json.load(open(os.path.join(hdf5_data_dir, 'overallid_to_catid_partid.json'), 'r'))
object2setofoid = {}
for idx in range(len(oid2cpid)):
objid, pid = oid2cpid[idx]
if not objid in object2setofoid.keys():
object2setofoid[objid] = []
object2setofoid[objid].append(idx)
all_obj_cat_file = os.path.join(hdf5_data_dir, 'all_object_categories.txt')
fin = open(all_obj_cat_file, 'r')
lines = [line.rstrip() for line in fin.readlines()]
objcats = [line.split()[1] for line in lines]
objnames = [line.split()[0] for line in lines]
on2oid = {objcats[i]:i for i in range(len(objcats))}
fin.close()
color_map_file = os.path.join(hdf5_data_dir, 'part_color_mapping.json')
color_map = json.load(open(color_map_file, 'r'))
NUM_OBJ_CATS = 16
NUM_PART_CATS = 50
cpid2oid = json.load(open(os.path.join(hdf5_data_dir, 'catid_partid_to_overallid.json'), 'r'))
def printout(flog, data):
print(data)
flog.write(data + '\n')
def output_color_point_cloud(data, seg, out_file):
with open(out_file, 'w') as f:
l = len(seg)
for i in range(l):
color = color_map[seg[i]]
f.write('v %f %f %f %f %f %f\n' % (data[i][0], data[i][1], data[i][2], color[0], color[1], color[2]))
def output_color_point_cloud_red_blue(data, seg, out_file):
with open(out_file, 'w') as f:
l = len(seg)
for i in range(l):
if seg[i] == 1:
color = [0, 0, 1]
elif seg[i] == 0:
color = [1, 0, 0]
else:
color = [0, 0, 0]
f.write('v %f %f %f %f %f %f\n' % (data[i][0], data[i][1], data[i][2], color[0], color[1], color[2]))
def pc_normalize(pc):
l = pc.shape[0]
centroid = np.mean(pc, axis=0)
pc = pc - centroid
m = np.max(np.sqrt(np.sum(pc**2, axis=1)))
pc = pc / m
return pc
def placeholder_inputs():
pointclouds_ph = tf.placeholder(tf.float32, shape=(batch_size, point_num, 3))
input_label_ph = tf.placeholder(tf.float32, shape=(batch_size, NUM_OBJ_CATS))
return pointclouds_ph, input_label_ph
def output_color_point_cloud(data, seg, out_file):
with open(out_file, 'w') as f:
l = len(seg)
for i in range(l):
color = color_map[seg[i]]
f.write('v %f %f %f %f %f %f\n' % (data[i][0], data[i][1], data[i][2], color[0], color[1], color[2]))
def load_pts_seg_files(pts_file, seg_file, catid):
with open(pts_file, 'r') as f:
pts_str = [item.rstrip() for item in f.readlines()]
pts = np.array([np.float32(s.split()) for s in pts_str], dtype=np.float32)
with open(seg_file, 'r') as f:
part_ids = np.array([int(item.rstrip()) for item in f.readlines()], dtype=np.uint8)
seg = np.array([cpid2oid[catid+'_'+str(x)] for x in part_ids])
return pts, seg
def pc_augment_to_point_num(pts, pn):
assert(pts.shape[0] <= pn)
cur_len = pts.shape[0]
res = np.array(pts)
while cur_len < pn:
res = np.concatenate((res, pts))
cur_len += pts.shape[0]
return res[:pn, :]
def convert_label_to_one_hot(labels):
label_one_hot = np.zeros((labels.shape[0], NUM_OBJ_CATS))
for idx in range(labels.shape[0]):
label_one_hot[idx, labels[idx]] = 1
return label_one_hot
def predict():
is_training = False
with tf.device('/gpu:'+str(gpu_to_use)):
pointclouds_ph, input_label_ph = placeholder_inputs()
is_training_ph = tf.placeholder(tf.bool, shape=())
seg_pred = model.get_model(pointclouds_ph, input_label_ph, \
cat_num=NUM_OBJ_CATS, part_num=NUM_PART_CATS, is_training=is_training_ph, \
batch_size=batch_size, num_point=point_num, weight_decay=0.0, bn_decay=None)
saver = tf.train.Saver()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
with tf.Session(config=config) as sess:
if not os.path.exists(output_dir):
os.mkdir(output_dir)
flog = open(os.path.join(output_dir, 'log.txt'), 'a')
printout(flog, 'Loading model %s' % pretrained_model_path)
saver.restore(sess, pretrained_model_path)
printout(flog, 'Model restored.')
batch_data = np.zeros([batch_size, point_num, 3]).astype(np.float32)
total_acc = 0.0
total_seen = 0
total_acc_iou = 0.0
total_per_cat_acc = np.zeros((NUM_OBJ_CATS)).astype(np.float32)
total_per_cat_iou = np.zeros((NUM_OBJ_CATS)).astype(np.float32)
total_per_cat_seen = np.zeros((NUM_OBJ_CATS)).astype(np.int32)
ffiles = open(test_file_list, 'r')
lines = [line.rstrip() for line in ffiles.readlines()]
pts_files = [line.split()[0] for line in lines]
seg_files = [line.split()[1] for line in lines]
labels = [line.split()[2] for line in lines]
ffiles.close()
len_pts_files = len(pts_files)
for shape_idx in range(len_pts_files):
if shape_idx % 100 == 0:
printout(flog, '%d/%d ...' % (shape_idx, len_pts_files))
cur_gt_label = on2oid[labels[shape_idx]] # 0/1/.../15
cur_label_one_hot = np.zeros((1, NUM_OBJ_CATS), dtype=np.float32)
cur_label_one_hot[0, cur_gt_label] = 1
pts_file_to_load = os.path.join(ply_data_dir, pts_files[shape_idx])
seg_file_to_load = os.path.join(ply_data_dir, seg_files[shape_idx])
pts, seg = load_pts_seg_files(pts_file_to_load, seg_file_to_load, objcats[cur_gt_label])
ori_point_num = len(seg)
batch_data[0, ...] = pc_augment_to_point_num(pc_normalize(pts), point_num)
seg_pred_res = sess.run(seg_pred, feed_dict={
pointclouds_ph: batch_data,
input_label_ph: cur_label_one_hot,
is_training_ph: is_training})
seg_pred_res = seg_pred_res[0, ...]
iou_oids = object2setofoid[objcats[cur_gt_label]]
non_cat_labels = list(set(np.arange(NUM_PART_CATS)).difference(set(iou_oids)))
mini = np.min(seg_pred_res)
seg_pred_res[:, non_cat_labels] = mini - 1000
seg_pred_val = np.argmax(seg_pred_res, axis=1)[:ori_point_num]
seg_acc = np.mean(seg_pred_val == seg)
total_acc += seg_acc
total_seen += 1
total_per_cat_seen[cur_gt_label] += 1
total_per_cat_acc[cur_gt_label] += seg_acc
mask = np.int32(seg_pred_val == seg)
total_iou = 0.0
iou_log = ''
for oid in iou_oids:
n_pred = np.sum(seg_pred_val == oid)
n_gt = np.sum(seg == oid)
n_intersect = np.sum(np.int32(seg == oid) * mask)
n_union = n_pred + n_gt - n_intersect
iou_log += '_' + str(n_pred)+'_'+str(n_gt)+'_'+str(n_intersect)+'_'+str(n_union)+'_'
if n_union == 0:
total_iou += 1
iou_log += '_1\n'
else:
total_iou += n_intersect * 1.0 / n_union
iou_log += '_'+str(n_intersect * 1.0 / n_union)+'\n'
avg_iou = total_iou / len(iou_oids)
total_acc_iou += avg_iou
total_per_cat_iou[cur_gt_label] += avg_iou
if output_verbose:
output_color_point_cloud(pts, seg, os.path.join(output_dir, str(shape_idx)+'_gt.obj'))
output_color_point_cloud(pts, seg_pred_val, os.path.join(output_dir, str(shape_idx)+'_pred.obj'))
output_color_point_cloud_red_blue(pts, np.int32(seg == seg_pred_val),
os.path.join(output_dir, str(shape_idx)+'_diff.obj'))
with open(os.path.join(output_dir, str(shape_idx)+'.log'), 'w') as fout:
fout.write('Total Point: %d\n\n' % ori_point_num)
fout.write('Ground Truth: %s\n' % objnames[cur_gt_label])
fout.write('Accuracy: %f\n' % seg_acc)
fout.write('IoU: %f\n\n' % avg_iou)
fout.write('IoU details: %s\n' % iou_log)
printout(flog, 'Accuracy: %f' % (total_acc / total_seen))
printout(flog, 'IoU: %f' % (total_acc_iou / total_seen))
for cat_idx in range(NUM_OBJ_CATS):
printout(flog, '\t ' + objcats[cat_idx] + ' Total Number: ' + str(total_per_cat_seen[cat_idx]))
if total_per_cat_seen[cat_idx] > 0:
printout(flog, '\t ' + objcats[cat_idx] + ' Accuracy: ' + \
str(total_per_cat_acc[cat_idx] / total_per_cat_seen[cat_idx]))
printout(flog, '\t ' + objcats[cat_idx] + ' IoU: '+ \
str(total_per_cat_iou[cat_idx] / total_per_cat_seen[cat_idx]))
with tf.Graph().as_default():
predict()
================================================
FILE: dgcnn/tensorflow/part_seg/testing_ply_file_list.txt
================================================
03001627/points/355fa0f35b61fdd7aa74a6b5ee13e775.pts 03001627/expert_verified/points_label/355fa0f35b61fdd7aa74a6b5ee13e775.seg 03001627
04379243/points/408c3db9b4ee6be2e9f3e9c758fef992.pts 04379243/expert_verified/points_label/408c3db9b4ee6be2e9f3e9c758fef992.seg 04379243
02691156/points/a1708ad923f3b51abbf3143b1cb6076a.pts 02691156/expert_verified/points_label/a1708ad923f3b51abbf3143b1cb6076a.seg 02691156
03001627/points/2783a969fa42cdecbe31379a5751d820.pts 03001627/expert_verified/points_label/2783a969fa42cdecbe31379a5751d820.seg 03001627
03001627/points/ed56af61297594bf1c4300651205adf3.pts 03001627/expert_verified/points_label/ed56af61297594bf1c4300651205adf3.seg 03001627
03001627/points/c0857de5101f704f3c5e1addd9922bf2.pts 03001627/expert_verified/points_label/c0857de5101f704f3c5e1addd9922bf2.seg 03001627
02691156/points/b72804a8bd3dbbaca8607f540cc62ba.pts 02691156/expert_verified/points_label/b72804a8bd3dbbaca8607f540cc62ba.seg 02691156
03001627/points/df609533cd186278398c7598b0d2e5d5.pts 03001627/expert_verified/points_label/df609533cd186278398c7598b0d2e5d5.seg 03001627
04379243/points/c24b7a315dbf2f3178ab7c8b395efbfe.pts 04379243/expert_verified/points_label/c24b7a315dbf2f3178ab7c8b395efbfe.seg 04379243
03636649/points/b8c87ad9d4930983a8d82fc8a3e54728.pts 03636649/expert_verified/points_label/b8c87ad9d4930983a8d82fc8a3e54728.seg 03636649
02691156/points/8add45a11c9fcb446eb5821e78d8898a.pts 02691156/expert_verified/points_label/8add45a11c9fcb446eb5821e78d8898a.seg 02691156
04379243/points/94d6518cf1e00eaac013a7bed5288654.pts 04379243/expert_verified/points_label/94d6518cf1e00eaac013a7bed5288654.seg 04379243
04379243/points/1dbb8fd083f96ad279b3e1be3524f72f.pts 04379243/expert_verified/points_label/1dbb8fd083f96ad279b3e1be3524f72f.seg 04379243
03001627/points/452115e132539be4daaaeef365d8f6e5.pts 03001627/expert_verified/points_label/452115e132539be4daaaeef365d8f6e5.seg 03001627
04379243/points/bd25dfa62c3c2cf772bd03149507655d.pts 04379243/expert_verified/points_label/bd25dfa62c3c2cf772bd03149507655d.seg 04379243
03948459/points/b1bbe535a833635d91f9af3df5b0c8fc.pts 03948459/expert_verified/points_label/b1bbe535a833635d91f9af3df5b0c8fc.seg 03948459
04379243/points/d41c8af82fe98a019fb4103277a6b93.pts 04379243/expert_verified/points_label/d41c8af82fe98a019fb4103277a6b93.seg 04379243
03001627/points/3109a0b9f9bc5fecb4cd1bd556007aed.pts 03001627/expert_verified/points_label/3109a0b9f9bc5fecb4cd1bd556007aed.seg 03001627
03001627/points/d38129a3301d31350b1fc43ca5e85e.pts 03001627/expert_verified/points_label/d38129a3301d31350b1fc43ca5e85e.seg 03001627
03636649/points/495af808806f1727a753b1b88fff4abb.pts 03636649/expert_verified/points_label/495af808806f1727a753b1b88fff4abb.seg 03636649
04379243/points/4d3cc502d4444c848cbb8bac2032149c.pts 04379243/expert_verified/points_label/4d3cc502d4444c848cbb8bac2032149c.seg 04379243
02691156/points/ed7e1a38fe33830b87697d3904b168b.pts 02691156/expert_verified/points_label/ed7e1a38fe33830b87697d3904b168b.seg 02691156
04379243/points/cf076ced8264a480cce90f0d61ed7a70.pts 04379243/expert_verified/points_label/cf076ced8264a480cce90f0d61ed7a70.seg 04379243
04379243/points/c04b363fd824528bd42b9650f19dd425.pts 04379243/expert_verified/points_label/c04b363fd824528bd42b9650f19dd425.seg 04379243
04379243/points/9705c2610980d0fdb2d0500bdfc28f70.pts 04379243/expert_verified/points_label/9705c2610980d0fdb2d0500bdfc28f70.seg 04379243
02691156/points/de29a1335c332a5ef7bc9a344bb7bae5.pts 02691156/expert_verified/points_label/de29a1335c332a5ef7bc9a344bb7bae5.seg 02691156
03001627/points/75d0664363f418efe461a9a9741d9415.pts 03001627/expert_verified/points_label/75d0664363f418efe461a9a9741d9415.seg 03001627
03001627/points/3421ad5a45b85f7a4b3c42e318f3affc.pts 03001627/expert_verified/points_label/3421ad5a45b85f7a4b3c42e318f3affc.seg 03001627
03001627/points/c67a255a26e30abb6b9f3980da0b1dff.pts 03001627/expert_verified/points_label/c67a255a26e30abb6b9f3980da0b1dff.seg 03001627
04379243/points/6791c92944c99c029f1deb04fb8ae481.pts 04379243/expert_verified/points_label/6791c92944c99c029f1deb04fb8ae481.seg 04379243
04379243/points/4b5536d2e9c5b9b7febad4f49b26ec52.pts 04379243/expert_verified/points_label/4b5536d2e9c5b9b7febad4f49b26ec52.seg 04379243
04379243/points/c5fc6c1e0d446d37acce1c6e70b58979.pts 04379243/expert_verified/points_label/c5fc6c1e0d446d37acce1c6e70b58979.seg 04379243
03001627/points/9c8d3c5779871705d22218517e73100.pts 03001627/expert_verified/points_label/9c8d3c5779871705d22218517e73100.seg 03001627
04379243/points/4f70d14dc276a9539a83764a2641fc5c.pts 04379243/expert_verified/points_label/4f70d14dc276a9539a83764a2641fc5c.seg 04379243
04379243/points/9d8f0444a8c09adff0d4c8f4dd125299.pts 04379243/expert_verified/points_label/9d8f0444a8c09adff0d4c8f4dd125299.seg 04379243
04379243/points/57fbb082f660c4f7716b680dedf77108.pts 04379243/expert_verified/points_label/57fbb082f660c4f7716b680dedf77108.seg 04379243
02958343/points/cb19594e73992a3d51008e496c6cfd2e.pts 02958343/expert_verified/points_label/cb19594e73992a3d51008e496c6cfd2e.seg 02958343
03624134/points/9d424831d05d363d870906b5178d97bd.pts 03624134/expert_verified/points_label/9d424831d05d363d870906b5178d97bd.seg 03624134
03001627/points/b884ff155c4117a7508dd48e67ad44bc.pts 03001627/expert_verified/points_label/b884ff155c4117a7508dd48e67ad44bc.seg 03001627
02958343/points/7a5eba46ba4cfac35aa429db266f0c30.pts 02958343/expert_verified/points_label/7a5eba46ba4cfac35aa429db266f0c30.seg 02958343
02691156/points/4def53f149137451b0009f08a96f38a9.pts 02691156/expert_verified/points_label/4def53f149137451b0009f08a96f38a9.seg 02691156
03001627/points/fa8f7c225d3b9f1def4a09e7eb872bd9.pts 03001627/expert_verified/points_label/fa8f7c225d3b9f1def4a09e7eb872bd9.seg 03001627
04225987/points/f5d7698b5a57d61226e0640b67de606.pts 04225987/expert_verified/points_label/f5d7698b5a57d61226e0640b67de606.seg 04225987
03001627/points/9aece6c6436cde6fd9ac1bf1eddffd24.pts 03001627/expert_verified/points_label/9aece6c6436cde6fd9ac1bf1eddffd24.seg 03001627
04099429/points/15474cf9caa757a528eba1f0b7744e9.pts 04099429/expert_verified/points_label/15474cf9caa757a528eba1f0b7744e9.seg 04099429
02691156/points/571cfb1da3d5b3704b5910188444efc8.pts 02691156/expert_verified/points_label/571cfb1da3d5b3704b5910188444efc8.seg 02691156
03636649/points/5d97be0e2414bfe0a8930422448288ea.pts 03636649/expert_verified/points_label/5d97be0e2414bfe0a8930422448288ea.seg 03636649
02958343/points/648ceaad362345518a6cf8c6b92417f2.pts 02958343/expert_verified/points_label/648ceaad362345518a6cf8c6b92417f2.seg 02958343
03001627/points/8a845bb67ee8486d6199d6fe090be061.pts 03001627/expert_verified/points_label/8a845bb67ee8486d6199d6fe090be061.seg 03001627
04379243/points/3645a90e02d16f0584aa8fa8b66ba302.pts 04379243/expert_verified/points_label/3645a90e02d16f0584aa8fa8b66ba302.seg 04379243
04379243/points/ecf3d40b14300d3c0c26b04b6b8e17a.pts 04379243/expert_verified/points_label/ecf3d40b14300d3c0c26b04b6b8e17a.seg 04379243
04379243/points/a860e5edcaec268e615bcf72f8385966.pts 04379243/expert_verified/points_label/a860e5edcaec268e615bcf72f8385966.seg 04379243
03001627/points/5edfec789343e0c3319f1c1eee46f332.pts 03001627/expert_verified/points_label/5edfec789343e0c3319f1c1eee46f332.seg 03001627
02691156/points/92fb0d6a866fe7aca8607f540cc62ba.pts 02691156/expert_verified/points_label/92fb0d6a866fe7aca8607f540cc62ba.seg 02691156
02958343/points/e4886a4d0c6ea960fe21694bd5f519d1.pts 02958343/expert_verified/points_label/e4886a4d0c6ea960fe21694bd5f519d1.seg 02958343
03636649/points/e3ee6b31e54e95b7d42b9650f19dd425.pts 03636649/expert_verified/points_label/e3ee6b31e54e95b7d42b9650f19dd425.seg 03636649
03467517/points/d546e034a6c659a425cd348738a8052a.pts 03467517/expert_verified/points_label/d546e034a6c659a425cd348738a8052a.seg 03467517
03001627/points/26a6ce644504c5fa22963ea1e168015d.pts 03001627/expert_verified/points_label/26a6ce644504c5fa22963ea1e168015d.seg 03001627
02691156/points/b2b1c1d5c757af8a7209009cfb89d4bd.pts 02691156/expert_verified/points_label/b2b1c1d5c757af8a7209009cfb89d4bd.seg 02691156
03467517/points/4bd2492d56d6b8c537b5646da91e9ed0.pts 03467517/expert_verified/points_label/4bd2492d56d6b8c537b5646da91e9ed0.seg 03467517
04379243/points/92ed9344484dd026dfd21203bf8b4b46.pts 04379243/expert_verified/points_label/92ed9344484dd026dfd21203bf8b4b46.seg 04379243
04379243/points/2d1d8a2f976387bd3145205f02ff9fc5.pts 04379243/expert_verified/points_label/2d1d8a2f976387bd3145205f02ff9fc5.seg 04379243
03467517/points/5b7fcd85ce6fd1931377689fa4e4b2d6.pts 03467517/expert_verified/points_label/5b7fcd85ce6fd1931377689fa4e4b2d6.seg 03467517
02691156/points/4cee36a2e8dd3b24b87697d3904b168b.pts 02691156/expert_verified/points_label/4cee36a2e8dd3b24b87697d3904b168b.seg 02691156
03001627/points/f23c1bb951fa8909bc01640b1b5116e7.pts 03001627/expert_verified/points_label/f23c1bb951fa8909bc01640b1b5116e7.seg 03001627
04379243/points/370b45eeeb9b11416f04d49e4de95b59.pts 04379243/expert_verified/points_label/370b45eeeb9b11416f04d49e4de95b59.seg 04379243
03001627/points/3885255ca5d75e69da2260dc4a1fc2c6.pts 03001627/expert_verified/points_label/3885255ca5d75e69da2260dc4a1fc2c6.seg 03001627
02691156/points/452c18f8997c53741adbb4c4e06ad649.pts 02691156/expert_verified/points_label/452c18f8997c53741adbb4c4e06ad649.seg 02691156
03001627/points/8b39b501c9fa4d349b9f2eb77f5e247e.pts 03001627/expert_verified/points_label/8b39b501c9fa4d349b9f2eb77f5e247e.seg 03001627
04379243/points/94966aa8a7a6f540f6807434c358ea12.pts 04379243/expert_verified/points_label/94966aa8a7a6f540f6807434c358ea12.seg 04379243
03001627/points/9b6f17ce2db29c4c9ae35d137ece64f9.pts 03001627/expert_verified/points_label/9b6f17ce2db29c4c9ae35d137ece64f9.seg 03001627
03467517/points/85bef84a26a91bff9ce363b13bdd195d.pts 03467517/expert_verified/points_label/85bef84a26a91bff9ce363b13bdd195d.seg 03467517
03624134/points/e98bc872371c852e15b040d25222e627.pts 03624134/expert_verified/points_label/e98bc872371c852e15b040d25222e627.seg 03624134
04379243/points/5dff67091a2f7ef1ab988fe471b1bd06.pts 04379243/expert_verified/points_label/5dff67091a2f7ef1ab988fe471b1bd06.seg 04379243
03001627/points/e6f37dff25ec4ca4f815ebdb2df45512.pts 03001627/expert_verified/points_label/e6f37dff25ec4ca4f815ebdb2df45512.seg 03001627
02691156/points/85a15c26a6e9921ae008cc4902bfe3cd.pts 02691156/expert_verified/points_label/85a15c26a6e9921ae008cc4902bfe3cd.seg 02691156
03001627/points/94371ddd6d62f7b762ec387b772e9e1.pts 03001627/expert_verified/points_label/94371ddd6d62f7b762ec387b772e9e1.seg 03001627
02691156/points/4374a3b4b98e247b398db3ebdf468ed7.pts 02691156/expert_verified/points_label/4374a3b4b98e247b398db3ebdf468ed7.seg 02691156
03948459/points/8fa02aab7237289667fdfbdf64f19325.pts 03948459/expert_verified/points_label/8fa02aab7237289667fdfbdf64f19325.seg 03948459
04379243/points/9f1fcee83cacf964f4b6538438a0b930.pts 04379243/expert_verified/points_label/9f1fcee83cacf964f4b6538438a0b930.seg 04379243
04225987/points/f5643778dbcd653655a834a7aafb0236.pts 04225987/expert_verified/points_label/f5643778dbcd653655a834a7aafb0236.seg 04225987
03636649/points/cdbe11124dbf418167ac0fa90111fad0.pts 03636649/expert_verified/points_label/cdbe11124dbf418167ac0fa90111fad0.seg 03636649
03001627/points/e3d23dc47ddd9620c9be65dfbd21428b.pts 03001627/expert_verified/points_label/e3d23dc47ddd9620c9be65dfbd21428b.seg 03001627
03001627/points/efd0411eaf2396c4de7ed732f5aeea4.pts 03001627/expert_verified/points_label/efd0411eaf2396c4de7ed732f5aeea4.seg 03001627
03636649/points/7ad15667f654fc08664b3b9b23ddfcbc.pts 03636649/expert_verified/points_label/7ad15667f654fc08664b3b9b23ddfcbc.seg 03636649
04379243/points/55d5fce641343449d42b9650f19dd425.pts 04379243/expert_verified/points_label/55d5fce641343449d42b9650f19dd425.seg 04379243
03467517/points/a31ef3a8c70b789b93f0194265a9746c.pts 03467517/expert_verified/points_label/a31ef3a8c70b789b93f0194265a9746c.seg 03467517
03001627/points/ccfc857f35c138ede785b88cc9024b2a.pts 03001627/expert_verified/points_label/ccfc857f35c138ede785b88cc9024b2a.seg 03001627
02691156/points/e3fd510add7b1aa3c19eb6ab3736de88.pts 02691156/expert_verified/points_label/e3fd510add7b1aa3c19eb6ab3736de88.seg 02691156
03636649/points/213d911cc489c352b5db3f95d706a0c9.pts 03636649/expert_verified/points_label/213d911cc489c352b5db3f95d706a0c9.seg 03636649
04225987/points/c171d90db4c4ba56cdb1768065dafd0c.pts 04225987/expert_verified/points_label/c171d90db4c4ba56cdb1768065dafd0c.seg 04225987
03797390/points/10f6e09036350e92b3f21f1137c3c347.pts 03797390/expert_verified/points_label/10f6e09036350e92b3f21f1137c3c347.seg 03797390
02691156/points/a374b0448461438ef3d4cc10d9776c62.pts 02691156/expert_verified/points_label/a374b0448461438ef3d4cc10d9776c62.seg 02691156
03001627/points/b6457a76f24de9f67aa6f8353fce2005.pts 03001627/expert_verified/points_label/b6457a76f24de9f67aa6f8353fce2005.seg 03001627
03001627/points/7fe08cd7a9b76c1dcbde89e0c48a01bf.pts 03001627/expert_verified/points_label/7fe08cd7a9b76c1dcbde89e0c48a01bf.seg 03001627
03001627/points/58867a00409c47c0813a1237d2827540.pts 03001627/expert_verified/points_label/58867a00409c47c0813a1237d2827540.seg 03001627
02958343/points/65e3e2893669a09cc7b48e36e31209b9.pts 02958343/expert_verified/points_label/65e3e2893669a09cc7b48e36e31209b9.seg 02958343
03948459/points/edec08542b9312b712b38b1d99376c0b.pts 03948459/expert_verified/points_label/edec08542b9312b712b38b1d99376c0b.seg 03948459
03636649/points/cd80cc92cf732e8d8a17805dbfb751e2.pts 03636649/expert_verified/points_label/cd80cc92cf732e8d8a17805dbfb751e2.seg 03636649
03467517/points/87650e8ff3d85672381b7fbf79296afb.pts 03467517/expert_verified/points_label/87650e8ff3d85672381b7fbf79296afb.seg 03467517
03636649/points/1e91664763d371937dd73da65dc0e6a7.pts 03636649/expert_verified/points_label/1e91664763d371937dd73da65dc0e6a7.seg 03636649
04379243/points/104c8e90ecf0e5351ed672982b7954af.pts 04379243/expert_verified/points_label/104c8e90ecf0e5351ed672982b7954af.seg 04379243
04379243/points/1834fac2f46a26f91933ffef19678834.pts 04379243/expert_verified/points_label/1834fac2f46a26f91933ffef19678834.seg 04379243
04379243/points/ed0be8928caab4bdab610b0c94236463.pts 04379243/expert_verified/points_label/ed0be8928caab4bdab610b0c94236463.seg 04379243
04379243/points/105f53a6471f3ceb4a420e3c1b966720.pts 04379243/expert_verified/points_label/105f53a6471f3ceb4a420e3c1b966720.seg 04379243
04379243/points/7bf5f689da285153583ff8a5fc7c1869.pts 04379243/expert_verified/points_label/7bf5f689da285153583ff8a5fc7c1869.seg 04379243
02958343/points/eface8341d001e9ceb01ae4a4788bd4f.pts 02958343/expert_verified/points_label/eface8341d001e9ceb01ae4a4788bd4f.seg 02958343
03001627/points/517880899d26080471a782a4379556c7.pts 03001627/expert_verified/points_label/517880899d26080471a782a4379556c7.seg 03001627
03001627/points/5ef3e4abd4386c8871bc6030acc85f1e.pts 03001627/expert_verified/points_label/5ef3e4abd4386c8871bc6030acc85f1e.seg 03001627
03001627/points/3eb60e6679d1df1dde7eedbb2790491b.pts 03001627/expert_verified/points_label/3eb60e6679d1df1dde7eedbb2790491b.seg 03001627
03001627/points/4702e6196503ff84f1c0e03f321d0b20.pts 03001627/expert_verified/points_label/4702e6196503ff84f1c0e03f321d0b20.seg 03001627
02958343/points/b0a7789537663f7ba1ff2929b2f5cf19.pts 02958343/expert_verified/points_label/b0a7789537663f7ba1ff2929b2f5cf19.seg 02958343
03636649/points/2ce7732982343c1d9792f6094a78f8d5.pts 03636649/expert_verified/points_label/2ce7732982343c1d9792f6094a78f8d5.seg 03636649
03467517/points/78a75ce8dc8dc197dc2b574e941c815b.pts 03467517/expert_verified/points_label/78a75ce8dc8dc197dc2b574e941c815b.seg 03467517
03636649/points/348d6ddf9e02cbddf647dc544bb0ab61.pts 03636649/expert_verified/points_label/348d6ddf9e02cbddf647dc544bb0ab61.seg 03636649
03001627/points/e56087cd55cce8b4f41a4361d0ca9bc8.pts 03001627/expert_verified/points_label/e56087cd55cce8b4f41a4361d0ca9bc8.seg 03001627
03642806/points/4d3dde22f529195bc887d5d9a11f3155.pts 03642806/expert_verified/points_label/4d3dde22f529195bc887d5d9a11f3155.seg 03642806
03001627/points/78e1977bc5f0f4041552c6ecbda964b.pts 03001627/expert_verified/points_label/78e1977bc5f0f4041552c6ecbda964b.seg 03001627
04379243/points/44360c91a7e91098d93768e7b9b1eabf.pts 04379243/expert_verified/points_label/44360c91a7e91098d93768e7b9b1eabf.seg 04379243
02691156/points/52ca6970fb09b561f9f7510373841dd9.pts 02691156/expert_verified/points_label/52ca6970fb09b561f9f7510373841dd9.seg 02691156
02958343/points/383f8d508b6f25f565d21723f535417.pts 02958343/expert_verified/points_label/383f8d508b6f25f565d21723f535417.seg 02958343
03001627/points/d6da5457b0682e24696b74614952b2d0.pts 03001627/expert_verified/points_label/d6da5457b0682e24696b74614952b2d0.seg 03001627
02691156/points/9f5dda6f01bbe29bf810506e9ae2dcc2.pts 02691156/expert_verified/points_label/9f5dda6f01bbe29bf810506e9ae2dcc2.seg 02691156
03467517/points/35e77edd3ae6ad4993f0194265a9746c.pts 03467517/expert_verified/points_label/35e77edd3ae6ad4993f0194265a9746c.seg 03467517
03001627/points/590d04438aeffbb58f447453fccbd9d3.pts 03001627/expert_verified/points_label/590d04438aeffbb58f447453fccbd9d3.seg 03001627
03001627/points/cdfa898eadf316122056b4bd5d870b47.pts 03001627/expert_verified/points_label/cdfa898eadf316122056b4bd5d870b47.seg 03001627
03001627/points/8e678a54f2ee4e5e492d9da2668ec34c.pts 03001627/expert_verified/points_label/8e678a54f2ee4e5e492d9da2668ec34c.seg 03001627
04379243/points/1804dd6f5c827c1a4bf8d5f43e57b138.pts 04379243/expert_verified/points_label/1804dd6f5c827c1a4bf8d5f43e57b138.seg 04379243
02691156/points/23eed87ac79f1b152f9c405cf0817830.pts 02691156/expert_verified/points_label/23eed87ac79f1b152f9c405cf0817830.seg 02691156
02691156/points/97bc5fffde64178f43afdb9c81ff2967.pts 02691156/expert_verified/points_label/97bc5fffde64178f43afdb9c81ff2967.seg 02691156
03001627/points/3b1f1913f2bc0dc171dbe96559c7bcae.pts 03001627/expert_verified/points_label/3b1f1913f2bc0dc171dbe96559c7bcae.seg 03001627
04379243/points/82e1c0b874b0a9e035cd53a06b1d2317.pts 04379243/expert_verified/points_label/82e1c0b874b0a9e035cd53a06b1d2317.seg 04379243
03001627/points/e0a0d5c2ba6fdca215b55266697a17be.pts 03001627/expert_verified/points_label/e0a0d5c2ba6fdca215b55266697a17be.seg 03001627
03636649/points/9b558be5e2b60e3eb09f0ca9c143fdfd.pts 03636649/expert_verified/points_label/9b558be5e2b60e3eb09f0ca9c143fdfd.seg 03636649
03001627/points/813be9a8485050571563f0911e3e5fc0.pts 03001627/expert_verified/points_label/813be9a8485050571563f0911e3e5fc0.seg 03001627
02958343/points/6ca9967adcf862a461c6c61410fc904b.pts 02958343/expert_verified/points_label/6ca9967adcf862a461c6c61410fc904b.seg 02958343
03624134/points/5663637633c938d1395331ebe4786cd.pts 03624134/expert_verified/points_label/5663637633c938d1395331ebe4786cd.seg 03624134
03636649/points/ec8dc2311d381a9e3d39d8012919dd25.pts 03636649/expert_verified/points_label/ec8dc2311d381a9e3d39d8012919dd25.seg 03636649
04379243/points/b685208ccf38786a6f1e07a56c129dfc.pts 04379243/expert_verified/points_label/b685208ccf38786a6f1e07a56c129dfc.seg 04379243
03636649/points/ce621e6df1ab9ae35d2cdb96c1afe34.pts 03636649/expert_verified/points_label/ce621e6df1ab9ae35d2cdb96c1afe34.seg 03636649
02691156/points/b092d523bdd320e4ca8607f540cc62ba.pts 02691156/expert_verified/points_label/b092d523bdd320e4ca8607f540cc62ba.seg 02691156
04379243/points/401fe961ec7b0cb5dcfcef693e7ec696.pts 04379243/expert_verified/points_label/401fe961ec7b0cb5dcfcef693e7ec696.seg 04379243
04225987/points/1e5fd1de723cc66cbb1ed6d4d8526a19.pts 04225987/expert_verified/points_label/1e5fd1de723cc66cbb1ed6d4d8526a19.seg 04225987
03001627/points/b987a2ca54c6ddecb74697ced5978572.pts 03001627/expert_verified/points_label/b987a2ca54c6ddecb74697ced5978572.seg 03001627
04379243/points/3e42e3386f4aea9277cf3bb06f394ad.pts 04379243/expert_verified/points_label/3e42e3386f4aea9277cf3bb06f394ad.seg 04379243
02958343/points/1198255e3d20d2f323f3ca54768fe2ee.pts 02958343/expert_verified/points_label/1198255e3d20d2f323f3ca54768fe2ee.seg 02958343
04379243/points/2b564ff0989caf58ab610b0c94236463.pts 04379243/expert_verified/points_label/2b564ff0989caf58ab610b0c94236463.seg 04379243
03636649/points/941271c5d9b192eaccd8f9b9403fd602.pts 03636649/expert_verified/points_label/941271c5d9b192eaccd8f9b9403fd602.seg 03636649
02691156/points/6aeae52e38f892a7e0091ae06332b2d5.pts 02691156/expert_verified/points_label/6aeae52e38f892a7e0091ae06332b2d5.seg 02691156
04379243/points/4cdfd605352adcb0da13974b3533fb59.pts 04379243/expert_verified/points_label/4cdfd605352adcb0da13974b3533fb59.seg 04379243
04379243/points/7c24e4f8778e224799a5e8f6c5baa224.pts 04379243/expert_verified/points_label/7c24e4f8778e224799a5e8f6c5baa224.seg 04379243
03001627/points/6272c21e439e0205c8687ff9b0b4e4ac.pts 03001627/expert_verified/points_label/6272c21e439e0205c8687ff9b0b4e4ac.seg 03001627
02691156/points/acd8f367c36a3d84fc7a6d75b3d807ff.pts 02691156/expert_verified/points_label/acd8f367c36a3d84fc7a6d75b3d807ff.seg 02691156
04379243/points/d58bdda16e6bba6f796740c80be6053.pts 04379243/expert_verified/points_label/d58bdda16e6bba6f796740c80be6053.seg 04379243
03636649/points/f97506704760741b460fa882e24b7e4a.pts 03636649/expert_verified/points_label/f97506704760741b460fa882e24b7e4a.seg 03636649
03636649/points/9f5c3ea9f8254b8bd42b9650f19dd425.pts 03636649/expert_verified/points_label/9f5c3ea9f8254b8bd42b9650f19dd425.seg 03636649
03797390/points/79e673336e836d1333becb3a9550cbb1.pts 03797390/expert_verified/points_label/79e673336e836d1333becb3a9550cbb1.seg 03797390
03948459/points/2d573d37cce5b48b9f433921788191f3.pts 03948459/expert_verified/points_label/2d573d37cce5b48b9f433921788191f3.seg 03948459
04379243/points/7aaad1c5c2be8c24a9ed7bb5b55809f8.pts 04379243/expert_verified/points_label/7aaad1c5c2be8c24a9ed7bb5b55809f8.seg 04379243
04379243/points/c6c412c771ab0ae015a34fa27bdf3d03.pts 04379243/expert_verified/points_label/c6c412c771ab0ae015a34fa27bdf3d03.seg 04379243
03467517/points/819251e11b46438ff6ff9bebca919581.pts 03467517/expert_verified/points_label/819251e11b46438ff6ff9bebca919581.seg 03467517
03001627/points/51f4ea68be319fe8990e5087098e19c.pts 03001627/expert_verified/points_label/51f4ea68be319fe8990e5087098e19c.seg 03001627
03467517/points/66b24797480ba515d57700c05b1862d8.pts 03467517/expert_verified/points_label/66b24797480ba515d57700c05b1862d8.seg 03467517
03790512/points/9d3b07f4475d501e8249f134aca4c817.pts 03790512/expert_verified/points_label/9d3b07f4475d501e8249f134aca4c817.seg 03790512
04379243/points/72cfb60a075369ab7252c133a7e17d94.pts 04379243/expert_verified/points_label/72cfb60a075369ab7252c133a7e17d94.seg 04379243
04379243/points/12a2733fc5f6b31ef8574543281e850f.pts 04379243/expert_verified/points_label/12a2733fc5f6b31ef8574543281e850f.seg 04379243
03636649/points/aed950102f1e9c7a659dda512294c744.pts 03636649/expert_verified/points_label/aed950102f1e9c7a659dda512294c744.seg 03636649
03001627/points/3126c6e9277b775b245ac1812a4e4d0c.pts 03001627/expert_verified/points_label/3126c6e9277b775b245ac1812a4e4d0c.seg 03001627
02958343/points/8decf42b145f98d148d2ba4615e03b21.pts 02958343/expert_verified/points_label/8decf42b145f98d148d2ba4615e03b21.seg 02958343
03467517/points/2f9bd6e61e038d8fd4b4ae2ff4c58b57.pts 03467517/expert_verified/points_label/2f9bd6e61e038d8fd4b4ae2ff4c58b57.seg 03467517
03467517/points/6a983b2ff1b8a42e1285d7bfa3e922e4.pts 03467517/expert_verified/points_label/6a983b2ff1b8a42e1285d7bfa3e922e4.seg 03467517
03261776/points/e33d6e8e39a75268957b6a4f3924d982.pts 03261776/expert_verified/points_label/e33d6e8e39a75268957b6a4f3924d982.seg 03261776
04379243/points/fe2f2b120d84ed909b896cf832106977.pts 04379243/expert_verified/points_label/fe2f2b120d84ed909b896cf832106977.seg 04379243
02958343/points/1328a95d69cefe32f200a72c9245aee7.pts 02958343/expert_verified/points_label/1328a95d69cefe32f200a72c9245aee7.seg 02958343
03001627/points/58409b308683d908ca2bec46a3b47519.pts 03001627/expert_verified/points_label/58409b308683d908ca2bec46a3b47519.seg 03001627
03001627/points/507a5070cde81fd867936ca58e67cec6.pts 03001627/expert_verified/points_label/507a5070cde81fd867936ca58e67cec6.seg 03001627
04379243/points/ec68e1edbb7e9bc7e93cebb6ba9ca43e.pts 04379243/expert_verified/points_label/ec68e1edbb7e9bc7e93cebb6ba9ca43e.seg 04379243
03001627/points/7facccfa81369078a8930422448288ea.pts 03001627/expert_verified/points_label/7facccfa81369078a8930422448288ea.seg 03001627
03001627/points/be0c5a0e91c99e804e1a714ee619465a.pts 03001627/expert_verified/points_label/be0c5a0e91c99e804e1a714ee619465a.seg 03001627
03001627/points/d73e46e07bdb3fe75fe4ecea39e8bd40.pts 03001627/expert_verified/points_label/d73e46e07bdb3fe75fe4ecea39e8bd40.seg 03001627
03636649/points/122fb7bfa09c184ca249f8489bc060dd.pts 03636649/expert_verified/points_label/122fb7bfa09c184ca249f8489bc060dd.seg 03636649
03001627/points/9ef3323c6ced7dfef313a0fb5fd4d79.pts 03001627/expert_verified/points_label/9ef3323c6ced7dfef313a0fb5fd4d79.seg 03001627
02691156/points/d8452d4fe51f2bab3554ccf8c30febe7.pts 02691156/expert_verified/points_label/d8452d4fe51f2bab3554ccf8c30febe7.seg 02691156
02691156/points/d59d75f52ac9b241ae0d772a1c85134a.pts 02691156/expert_verified/points_label/d59d75f52ac9b241ae0d772a1c85134a.seg 02691156
02691156/points/f9e80ce23d9536623fddedb0bf24c68a.pts 02691156/expert_verified/points_label/f9e80ce23d9536623fddedb0bf24c68a.seg 02691156
02691156/points/e69631d34410f99ac4f72bf08dc79a6.pts 02691156/expert_verified/points_label/e69631d34410f99ac4f72bf08dc79a6.seg 02691156
04379243/points/f7196ec7d732af5166decb1b3cdc5557.pts 04379243/expert_verified/points_label/f7196ec7d732af5166decb1b3cdc5557.seg 04379243
03261776/points/c5e47b627cb7818f17e22b7299bb7bc6.pts 03261776/expert_verified/points_label/c5e47b627cb7818f17e22b7299bb7bc6.seg 03261776
03001627/points/5a60c649a221293d72ed554eb3baedcc.pts 03001627/expert_verified/points_label/5a60c649a221293d72ed554eb3baedcc.seg 03001627
04379243/points/b117aac2e13630bb5d23c9bbb429abf9.pts 04379243/expert_verified/points_label/b117aac2e13630bb5d23c9bbb429abf9.seg 04379243
03642806/points/e4c34c87ed1bc2191ef7a71d6e01357e.pts 03642806/expert_verified/points_label/e4c34c87ed1bc2191ef7a71d6e01357e.seg 03642806
02691156/points/3fb7ceab42d7b17219ba010ddb4974fe.pts 02691156/expert_verified/points_label/3fb7ceab42d7b17219ba010ddb4974fe.seg 02691156
04379243/points/fc472163ea149f8e19fb4103277a6b93.pts 04379243/expert_verified/points_label/fc472163ea149f8e19fb4103277a6b93.seg 04379243
03001627/points/5ef73c9bee1b4adcd019a8a03d4a2a3.pts 03001627/expert_verified/points_label/5ef73c9bee1b4adcd019a8a03d4a2a3.seg 03001627
02691156/points/384e72f69e6f24404cb288947cda4a2c.pts 02691156/expert_verified/points_label/384e72f69e6f24404cb288947cda4a2c.seg 02691156
03636649/points/3fca250636e2b47a8d0fc77aab7a8d33.pts 03636649/expert_verified/points_label/3fca250636e2b47a8d0fc77aab7a8d33.seg 03636649
04379243/points/46957ba752c3554bd42b9650f19dd425.pts 04379243/expert_verified/points_label/46957ba752c3554bd42b9650f19dd425.seg 04379243
03001627/points/bce7ff621a5440bb34ee5c94ebdf7f1d.pts 03001627/expert_verified/points_label/bce7ff621a5440bb34ee5c94ebdf7f1d.seg 03001627
02691156/points/66ae19841350ac2d4ba2821676102936.pts 02691156/expert_verified/points_label/66ae19841350ac2d4ba2821676102936.seg 02691156
03001627/points/e53b07b648e8d041107a17cfae0b6df6.pts 03001627/expert_verified/points_label/e53b07b648e8d041107a17cfae0b6df6.seg 03001627
03624134/points/d1c757548ead4a4d8d03ca4865da5b6.pts 03624134/expert_verified/points_label/d1c757548ead4a4d8d03ca4865da5b6.seg 03624134
04379243/points/d19b4bde0766723c9b3bb0ef2a08be04.pts 04379243/expert_verified/points_label/d19b4bde0766723c9b3bb0ef2a08be04.seg 04379243
03001627/points/6ecec258a1b6fe2a6fee8e2140acec9.pts 03001627/expert_verified/points_label/6ecec258a1b6fe2a6fee8e2140acec9.seg 03001627
02691156/points/ab95a4e7f2d3cf9ca8607f540cc62ba.pts 02691156/expert_verified/points_label/ab95a4e7f2d3cf9ca8607f540cc62ba.seg 02691156
03624134/points/b61c9b5f29ad581c860a45e027159a9a.pts 03624134/expert_verified/points_label/b61c9b5f29ad581c860a45e027159a9a.seg 03624134
03001627/points/c7da2d72f9927f1881dff5c2e57ad46e.pts 03001627/expert_verified/points_label/c7da2d72f9927f1881dff5c2e57ad46e.seg 03001627
04379243/points/b9886dd3c4a651f3664b3b9b23ddfcbc.pts 04379243/expert_verified/points_label/b9886dd3c4a651f3664b3b9b23ddfcbc.seg 04379243
02691156/points/abc465975af79827dfb86dddee1d6ac3.pts 02691156/expert_verified/points_label/abc465975af79827dfb86dddee1d6ac3.seg 02691156
03636649/points/7be01530bf43f2ed8a83637b92bdc7.pts 03636649/expert_verified/points_label/7be01530bf43f2ed8a83637b92bdc7.seg 03636649
02691156/points/b81339a2f1dbc0de9598ceb95c7f0752.pts 02691156/expert_verified/points_label/b81339a2f1dbc0de9598ceb95c7f0752.seg 02691156
03001627/points/69709cb300ae3784ee72e5c46412e9a7.pts 03001627/expert_verified/points_label/69709cb300ae3784ee72e5c46412e9a7.seg 03001627
03001627/points/ec25a41ca233ed096e5a467428553af2.pts 03001627/expert_verified/points_label/ec25a41ca233ed096e5a467428553af2.seg 03001627
04379243/points/4e9394f9f64859aef4ef86403cccc399.pts 04379243/expert_verified/points_label/4e9394f9f64859aef4ef86403cccc399.seg 04379243
04379243/points/c477235c02413bfc44d2ca62bee212a0.pts 04379243/expert_verified/points_label/c477235c02413bfc44d2ca62bee212a0.seg 04379243
04379243/points/41b0491fdb14d41bd25ca1a27cf9bdec.pts 04379243/expert_verified/points_label/41b0491fdb14d41bd25ca1a27cf9bdec.seg 04379243
02691156/points/59eecc0a983a27a8130cc35407fba74a.pts 02691156/expert_verified/points_label/59eecc0a983a27a8130cc35407fba74a.seg 02691156
03467517/points/22129fab1497437cc3f912172873d52f.pts 03467517/expert_verified/points_label/22129fab1497437cc3f912172873d52f.seg 03467517
04379243/points/6365205d2324234fc8a1efeb4b91d393.pts 04379243/expert_verified/points_label/6365205d2324234fc8a1efeb4b91d393.seg 04379243
03001627/points/2a75b2bb82d7f77c3f9d6e0ade5188b0.pts 03001627/expert_verified/points_label/2a75b2bb82d7f77c3f9d6e0ade5188b0.seg 03001627
03001627/points/8f226d6b3089d3b7bca860dd9b04c52c.pts 03001627/expert_verified/points_label/8f226d6b3089d3b7bca860dd9b04c52c.seg 03001627
03624134/points/5e515b18ed17a418b056c98b2e5e5e4e.pts 03624134/expert_verified/points_label/5e515b18ed17a418b056c98b2e5e5e4e.seg 03624134
02691156/points/5bc41589eba11a4e15477d594f1fbd99.pts 02691156/expert_verified/points_label/5bc41589eba11a4e15477d594f1fbd99.seg 02691156
03001627/points/2bbf00f0c583fd8a4b3c42e318f3affc.pts 03001627/expert_verified/points_label/2bbf00f0c583fd8a4b3c42e318f3affc.seg 03001627
03790512/points/9e9300a6e1caec217395d58407f193ba.pts 03790512/expert_verified/points_label/9e9300a6e1caec217395d58407f193ba.seg 03790512
03636649/points/81894e0739e3fea9d49b2e04785f8492.pts 03636649/expert_verified/points_label/81894e0739e3fea9d49b2e04785f8492.seg 03636649
02958343/points/cdc8453c63ffc13e20f29d4da2b76f7a.pts 02958343/expert_verified/points_label/cdc8453c63ffc13e20f29d4da2b76f7a.seg 02958343
04379243/points/7a0b6685a30298fb8ae8d7de284e7d2.pts 04379243/expert_verified/points_label/7a0b6685a30298fb8ae8d7de284e7d2.seg 04379243
03001627/points/c5ee6b77f9f84adeed52100e321c9f3e.pts 03001627/expert_verified/points_label/c5ee6b77f9f84adeed52100e321c9f3e.seg 03001627
04379243/points/4e87db85d5dab96822339a4b4aacca6b.pts 04379243/expert_verified/points_label/4e87db85d5dab96822339a4b4aacca6b.seg 04379243
02958343/points/6dbae14e481e8fb9333e0bf0b765fa12.pts 02958343/expert_verified/points_label/6dbae14e481e8fb9333e0bf0b765fa12.seg 02958343
03467517/points/bad8978268948ea3d3eb77b119df6d.pts 03467517/expert_verified/points_label/bad8978268948ea3d3eb77b119df6d.seg 03467517
03001627/points/c552529c54b0612e53041c49040be3d5.pts 03001627/expert_verified/points_label/c552529c54b0612e53041c49040be3d5.seg 03001627
02958343/points/dca8ed788347b28c171cf359a50c99bc.pts 02958343/expert_verified/points_label/dca8ed788347b28c171cf359a50c99bc.seg 02958343
04379243/points/99720647e210078beaf288f952624966.pts 04379243/expert_verified/points_label/99720647e210078beaf288f952624966.seg 04379243
03001627/points/b1f4b2c32f8a2fa77ee217c21e683487.pts 03001627/expert_verified/points_label/b1f4b2c32f8a2fa77ee217c21e683487.seg 03001627
04379243/points/41cdb5b619790d5a74eb542502c2205f.pts 04379243/expert_verified/points_label/41cdb5b619790d5a74eb542502c2205f.seg 04379243
04379243/points/a25141a07c77c25467de2aaf749e5256.pts 04379243/expert_verified/points_label/a25141a07c77c25467de2aaf749e5256.seg 04379243
04379243/points/e9c3a3aa2278608bec15b38012222fa8.pts 04379243/expert_verified/points_label/e9c3a3aa2278608bec15b38012222fa8.seg 04379243
03636649/points/8e025c4aa0b0201a81a172d69c52a28a.pts 03636649/expert_verified/points_label/8e025c4aa0b0201a81a172d69c52a28a.seg 03636649
03001627/points/e175bc785390e8f6c05575120a46cd3b.pts 03001627/expert_verified/points_label/e175bc785390e8f6c05575120a46cd3b.seg 03001627
02691156/points/ecb4ae05d7dd135a619550d2af0b6117.pts 02691156/expert_verified/points_label/ecb4ae05d7dd135a619550d2af0b6117.seg 02691156
02691156/points/87069f21b11c180799a771d197c7b487.pts 02691156/expert_verified/points_label/87069f21b11c180799a771d197c7b487.seg 02691156
02691156/points/ca11efc8928c10908b96ae1a0a8b84ec.pts 02691156/expert_verified/points_label/ca11efc8928c10908b96ae1a0a8b84ec.seg 02691156
03790512/points/365c1f92a54c8cb52a45a87054fa7272.pts 03790512/expert_verified/points_label/365c1f92a54c8cb52a45a87054fa7272.seg 03790512
03636649/points/23040992da19679aaa7cb30470f3273c.pts 03636649/expert_verified/points_label/23040992da19679aaa7cb30470f3273c.seg 03636649
02691156/points/9441549e323552f2f001dddaf44c449b.pts 02691156/expert_verified/points_label/9441549e323552f2f001dddaf44c449b.seg 02691156
02958343/points/17bfc66c6bc0a99d68c415156b102065.pts 02958343/expert_verified/points_label/17bfc66c6bc0a99d68c415156b102065.seg 02958343
03001627/points/671d34c27cc0f1bf2deeb5ec76cf103b.pts 03001627/expert_verified/points_label/671d34c27cc0f1bf2deeb5ec76cf103b.seg 03001627
03642806/points/464edfe14e9fa45c3394926146371698.pts 03642806/expert_verified/points_label/464edfe14e9fa45c3394926146371698.seg 03642806
04379243/points/279c8601278e827dab610b0c94236463.pts 04379243/expert_verified/points_label/279c8601278e827dab610b0c94236463.seg 04379243
04379243/points/29d9c6d84c6a126917b431cae0dd70ed.pts 04379243/expert_verified/points_label/29d9c6d84c6a126917b431cae0dd70ed.seg 04379243
04379243/points/5d3d902051858e56ed1397afd2317e5b.pts 04379243/expert_verified/points_label/5d3d902051858e56ed1397afd2317e5b.seg 04379243
02958343/points/aa78d4465ae18312711f9e3a79a13dcf.pts 02958343/expert_verified/points_label/aa78d4465ae18312711f9e3a79a13dcf.seg 02958343
03001627/points/d561ff6788ab46517b016084e2ae95e.pts 03001627/expert_verified/points_label/d561ff6788ab46517b016084e2ae95e.seg 03001627
03001627/points/b24ed89d85b74771216fff6094e6695c.pts 03001627/expert_verified/points_label/b24ed89d85b74771216fff6094e6695c.seg 03001627
03636649/points/f6eeb5d67c32616648fda83c10428379.pts 03636649/expert_verified/points_label/f6eeb5d67c32616648fda83c10428379.seg 03636649
03001627/points/3b3a9f4e3aa9f2f4d39a194653571dfc.pts 03001627/expert_verified/points_label/3b3a9f4e3aa9f2f4d39a194653571dfc.seg 03001627
03001627/points/bd0b06e158bcee8ac0d89fc15154c9a2.pts 03001627/expert_verified/points_label/bd0b06e158bcee8ac0d89fc15154c9a2.seg 03001627
04379243/points/89251f322490e7047e38640a31d0bc3.pts 04379243/expert_verified/points_label/89251f322490e7047e38640a31d0bc3.seg 04379243
03001627/points/935f5e58e9e15231febad4f49b26ec52.pts 03001627/expert_verified/points_label/935f5e58e9e15231febad4f49b26ec52.seg 03001627
03467517/points/8f59fee745f1e37ea5c8e9fc8b2242fd.pts 03467517/expert_verified/points_label/8f59fee745f1e37ea5c8e9fc8b2242fd.seg 03467517
02691156/points/fddcb2b3d45ce98e641c309f1fd7e183.pts 02691156/expert_verified/points_label/fddcb2b3d45ce98e641c309f1fd7e183.seg 02691156
03001627/points/d915d2f1664bf76e71a70be9f12ce8b0.pts 03001627/expert_verified/points_label/d915d2f1664bf76e71a70be9f12ce8b0.seg 03001627
02958343/points/1ae9732840a315afab2c2809513f396e.pts 02958343/expert_verified/points_label/1ae9732840a315afab2c2809513f396e.seg 02958343
04379243/points/b658e507c84d6202610c2a68437007d6.pts 04379243/expert_verified/points_label/b658e507c84d6202610c2a68437007d6.seg 04379243
02958343/points/707d1e19b465d075adbfb30d8d1b297e.pts 02958343/expert_verified/points_label/707d1e19b465d075adbfb30d8d1b297e.seg 02958343
04379243/points/5b74412eba257e5182b796aa5845e185.pts 04379243/expert_verified/points_label/5b74412eba257e5182b796aa5845e185.seg 04379243
03636649/points/a801be11157a7f243d39d8012919dd25.pts 03636649/expert_verified/points_label/a801be11157a7f243d39d8012919dd25.seg 03636649
02691156/points/26e10058cf9835aaca8607f540cc62ba.pts 02691156/expert_verified/points_label/26e10058cf9835aaca8607f540cc62ba.seg 02691156
03636649/points/bc704db7b62582e5d1cbf3e52b9b6237.pts 03636649/expert_verified/points_label/bc704db7b62582e5d1cbf3e52b9b6237.seg 03636649
02691156/points/d2e2e23f5be557e2d1ab3b031c100cb1.pts 02691156/expert_verified/points_label/d2e2e23f5be557e2d1ab3b031c100cb1.seg 02691156
03001627/points/920af478601258e24762da3a3017ade.pts 03001627/expert_verified/points_label/920af478601258e24762da3a3017ade.seg 03001627
03001627/points/3ffd794e5100258483bc207d8a5912e3.pts 03001627/expert_verified/points_label/3ffd794e5100258483bc207d8a5912e3.seg 03001627
04379243/points/69c536d9e450cb79436e6787c76ef3f0.pts 04379243/expert_verified/points_label/69c536d9e450cb79436e6787c76ef3f0.seg 04379243
04379243/points/6cf6a546e2ecbffe815a7efb12912.pts 04379243/expert_verified/points_label/6cf6a546e2ecbffe815a7efb12912.seg 04379243
03001627/points/815f436a40c28da51f56aa11cd5e0c3e.pts 03001627/expert_verified/points_label/815f436a40c28da51f56aa11cd5e0c3e.seg 03001627
03642806/points/4504a4d244d05ddbf5f79806bd65844f.pts 03642806/expert_verified/points_label/4504a4d244d05ddbf5f79806bd65844f.seg 03642806
04379243/points/8ad9868947e7391113625562b56161f0.pts 04379243/expert_verified/points_label/8ad9868947e7391113625562b56161f0.seg 04379243
03001627/points/6b9c3d42724275cf7a5c8cd74a7bc29a.pts 03001627/expert_verified/points_label/6b9c3d42724275cf7a5c8cd74a7bc29a.seg 03001627
04379243/points/67e32538a35a5011a0ab1d82ef09f78f.pts 04379243/expert_verified/points_label/67e32538a35a5011a0ab1d82ef09f78f.seg 04379243
03624134/points/2743e37a65e198d51592d7a04a86fa53.pts 03624134/expert_verified/points_label/2743e37a65e198d51592d7a04a86fa53.seg 03624134
04379243/points/12df5c215f4364b7fe388cf6c4c3705d.pts 04379243/expert_verified/points_label/12df5c215f4364b7fe388cf6c4c3705d.seg 04379243
02958343/points/55e0897c0ac089a6da5cb3be8feeaadc.pts 02958343/expert_verified/points_label/55e0897c0ac089a6da5cb3be8feeaadc.seg 02958343
02773838/points/4e4fcfffec161ecaed13f430b2941481.pts 02773838/expert_verified/points_label/4e4fcfffec161ecaed13f430b2941481.seg 02773838
04379243/points/8ce70dead5119191cc3492a06e9bd850.pts 04379243/expert_verified/points_label/8ce70dead5119191cc3492a06e9bd850.seg 04379243
02691156/points/e033b6ad34586a86cc1c9e8218bfe7fc.pts 02691156/expert_verified/points_label/e033b6ad34586a86cc1c9e8218bfe7fc.seg 02691156
03636649/points/600b2f00113ad714e2367b9e27f16a71.pts 03636649/expert_verified/points_label/600b2f00113ad714e2367b9e27f16a71.seg 03636649
04379243/points/a74cad1781afed87dcfcef693e7ec696.pts 04379243/expert_verified/points_label/a74cad1781afed87dcfcef693e7ec696.seg 04379243
03001627/points/5402eecc67e489502fa77440dcb93214.pts 03001627/expert_verified/points_label/5402eecc67e489502fa77440dcb93214.seg 03001627
03001627/points/d5bd6ea417eba6ce456cbf78e1e89022.pts 03001627/expert_verified/points_label/d5bd6ea417eba6ce456cbf78e1e89022.seg 03001627
03001627/points/d4edd167061dac5f52a3901fa1436b1a.pts 03001627/expert_verified/points_label/d4edd167061dac5f52a3901fa1436b1a.seg 03001627
03636649/points/9fc3ddc511f4ef62dced62abd38a02b0.pts 03636649/expert_verified/points_label/9fc3ddc511f4ef62dced62abd38a02b0.seg 03636649
02691156/points/92a83ecaa10e8d3f78e919a72d9a39e7.pts 02691156/expert_verified/points_label/92a83ecaa10e8d3f78e919a72d9a39e7.seg 02691156
03001627/points/fee36ec8c8ae503fc68456e8da5b9a30.pts 03001627/expert_verified/points_label/fee36ec8c8ae503fc68456e8da5b9a30.seg 03001627
04379243/points/1df409cfefbb51658b9b51ae4415d5aa.pts 04379243/expert_verified/points_label/1df409cfefbb51658b9b51ae4415d5aa.seg 04379243
03001627/points/76283716a2c6586e266d673a6188bf4c.pts 03001627/expert_verified/points_label/76283716a2c6586e266d673a6188bf4c.seg 03001627
04379243/points/29b2aaca87d19a3c5759f4335ff2e408.pts 04379243/expert_verified/points_label/29b2aaca87d19a3c5759f4335ff2e408.seg 04379243
04379243/points/21ca4d36a0f6fa69b937d98d58545fa.pts 04379243/expert_verified/points_label/21ca4d36a0f6fa69b937d98d58545fa.seg 04379243
02691156/points/da1acb401541235be4d2773f0358b43b.pts 02691156/expert_verified/points_label/da1acb401541235be4d2773f0358b43b.seg 02691156
04379243/points/553c416f33c5e5e18b9b51ae4415d5aa.pts 04379243/expert_verified/points_label/553c416f33c5e5e18b9b51ae4415d5aa.seg 04379243
04379243/points/174832b73cd6d91c9856fa70a578baeb.pts 04379243/expert_verified/points_label/174832b73cd6d91c9856fa70a578baeb.seg 04379243
02691156/points/1c2e9dedbcf511e616a077c4c0fc1181.pts 02691156/expert_verified/points_label/1c2e9dedbcf511e616a077c4c0fc1181.seg 02691156
03001627/points/893c689b192bbe33ebadcdfba7971b71.pts 03001627/expert_verified/points_label/893c689b192bbe33ebadcdfba7971b71.seg 03001627
04379243/points/52037005fbff92d08fa35606145b47dc.pts 04379243/expert_verified/points_label/52037005fbff92d08fa35606145b47dc.seg 04379243
04225987/points/e38a4e6fb32b51a1bebb1fbb949ea955.pts 04225987/expert_verified/points_label/e38a4e6fb32b51a1bebb1fbb949ea955.seg 04225987
03636649/points/42bc0dce81734d892610e2a20d7c4b61.pts 03636649/expert_verified/points_label/42bc0dce81734d892610e2a20d7c4b61.seg 03636649
04379243/points/cb7ebc943b1b424988386fe1512ed26f.pts 04379243/expert_verified/points_label/cb7ebc943b1b424988386fe1512ed26f.seg 04379243
03624134/points/2d6e9b23e171760c3e332fb3cb6ebe50.pts 03624134/expert_verified/points_label/2d6e9b23e171760c3e332fb3cb6ebe50.seg 03624134
04379243/points/d05ff7b47febe58a656db3f863b4b796.pts 04379243/expert_verified/points_label/d05ff7b47febe58a656db3f863b4b796.seg 04379243
03636649/points/e178ab3b967c7fddc901d9dddb735c9f.pts 03636649/expert_verified/points_label/e178ab3b967c7fddc901d9dddb735c9f.seg 03636649
04379243/points/527b2d1e964f056383be1aa5a5ab0c80.pts 04379243/expert_verified/points_label/527b2d1e964f056383be1aa5a5ab0c80.seg 04379243
03001627/points/f1a1bb6ad29d703078d928ba1c4a6f75.pts 03001627/expert_verified/points_label/f1a1bb6ad29d703078d928ba1c4a6f75.seg 03001627
04379243/points/ed9dc0937009dc031311158f08f2982a.pts 04379243/expert_verified/points_label/ed9dc0937009dc031311158f08f2982a.seg 04379243
02691156/points/e41c5719ad09055f1b880c747ee1f83.pts 02691156/expert_verified/points_label/e41c5719ad09055f1b880c747ee1f83.seg 02691156
04379243/points/34bbe284f7499df071a782a4379556c7.pts 04379243/expert_verified/points_label/34bbe284f7499df071a782a4379556c7.seg 04379243
02691156/points/973df01cea43c7f690b1d6deb98feec6.pts 02691156/expert_verified/points_label/973df01cea43c7f690b1d6deb98feec6.seg 02691156
03001627/points/ed97d1c954fca49851ceffe90913a32.pts 03001627/expert_verified/points_label/ed97d1c954fca49851ceffe90913a32.seg 03001627
03001627/points/3a74e3d5172ee94fdef1c01cbd4ae0c.pts 03001627/expert_verified/points_label/3a74e3d5172ee94fdef1c01cbd4ae0c.seg 03001627
04379243/points/194b279c7e892a2d15fa8082e5524f79.pts 04379243/expert_verified/points_label/194b279c7e892a2d15fa8082e5524f79.seg 04379243
04379243/points/23ece3bf871619366ff454af1e8947f3.pts 04379243/expert_verified/points_label/23ece3bf871619366ff454af1e8947f3.seg 04379243
02691156/points/7de379891610f5feaf7dd1bfd65143a9.pts 02691156/expert_verified/points_label/7de379891610f5feaf7dd1bfd65143a9.seg 02691156
04379243/points/54ba7e77a2bf5fe3158b7df020486ff2.pts 04379243/expert_verified/points_label/54ba7e77a2bf5fe3158b7df020486ff2.seg 04379243
03001627/points/39825fb4341ebd1ccb002c1e2b5fc68b.pts 03001627/expert_verified/points_label/39825fb4341ebd1ccb002c1e2b5fc68b.seg 03001627
03001627/points/a32febea4a0ac30171a782a4379556c7.pts 03001627/expert_verified/points_label/a32febea4a0ac30171a782a4379556c7.seg 03001627
02691156/points/b9ba988dd9a6cf426e8b6dd39a855b69.pts 02691156/expert_verified/points_label/b9ba988dd9a6cf426e8b6dd39a855b69.seg 02691156
02691156/points/37b1f7f02c4b87dbca8607f540cc62ba.pts 02691156/expert_verified/points_label/37b1f7f02c4b87dbca8607f540cc62ba.seg 02691156
04379243/points/8ce538a671c6e684d93768e7b9b1eabf.pts 04379243/expert_verified/points_label/8ce538a671c6e684d93768e7b9b1eabf.seg 04379243
04225987/points/48bf45bffab55d7cf14c37b285d25cdf.pts 04225987/expert_verified/points_label/48bf45bffab55d7cf14c37b285d25cdf.seg 04225987
02691156/points/820ba20e5da8325f19ba010ddb4974fe.pts 02691156/expert_verified/points_label/820ba20e5da8325f19ba010ddb4974fe.seg 02691156
02691156/points/ff52c059efaca3c1ca8607f540cc62ba.pts 02691156/expert_verified/points_label/ff52c059efaca3c1ca8607f540cc62ba.seg 02691156
04379243/points/99737ff619cae25d6effbd64ad6b71b8.pts 04379243/expert_verified/points_label/99737ff619cae25d6effbd64ad6b71b8.seg 04379243
04379243/points/e3b7fbed310c2c397c8d78b9aede742.pts 04379243/expert_verified/points_label/e3b7fbed310c2c397c8d78b9aede742.seg 04379243
03001627/points/e8eedd37cb054e37b59d74a7c956bd18.pts 03001627/expert_verified/points_label/e8eedd37cb054e37b59d74a7c956bd18.seg 03001627
03790512/points/8134a965cc0b134bb37378f3c85478b4.pts 03790512/expert_verified/points_label/8134a965cc0b134bb37378f3c85478b4.seg 03790512
03636649/points/da5f13f4048dbd72fcb8d8c6d4df8143.pts 03636649/expert_verified/points_label/da5f13f4048dbd72fcb8d8c6d4df8143.seg 03636649
03001627/points/f5d8dd0309401ebac47a35332c17cce2.pts 03001627/expert_verified/points_label/f5d8dd0309401ebac47a35332c17cce2.seg 03001627
02691156/points/521eab9363fdc2a07209009cfb89d4bd.pts 02691156/expert_verified/points_label/521eab9363fdc2a07209009cfb89d4bd.seg 02691156
03636649/points/b1e552b454366a9d7787152e5befb05b.pts 03636649/expert_verified/points_label/b1e552b454366a9d7787152e5befb05b.seg 03636649
02958343/points/8590a6c8270375e34b5a812ecf553410.pts 02958343/expert_verified/points_label/8590a6c8270375e34b5a812ecf553410.seg 02958343
04379243/points/d46537f513283d6cdcfcef693e7ec696.pts 04379243/expert_verified/points_label/d46537f513283d6cdcfcef693e7ec696.seg 04379243
03001627/points/60a5795c905f3bb157f5033576317e1.pts 03001627/expert_verified/points_label/60a5795c905f3bb157f5033576317e1.seg 03001627
02691156/points/8996445c6d2407c0fb5c1b0f759e2bc1.pts 02691156/expert_verified/points_label/8996445c6d2407c0fb5c1b0f759e2bc1.seg 02691156
03624134/points/5e15d63317014f30ceea8802f71596b5.pts 03624134/expert_verified/points_label/5e15d63317014f30ceea8802f71596b5.seg 03624134
03642806/points/9d48ab8c41174e60888cad7f6c0e6001.pts 03642806/expert_verified/points_label/9d48ab8c41174e60888cad7f6c0e6001.seg 03642806
04379243/points/4cd35d6ec155d39633207e4c3ac155a4.pts 04379243/expert_verified/points_label/4cd35d6ec155d39633207e4c3ac155a4.seg 04379243
04379243/points/884d2cc0d3aa8a72640e544a5d67c33a.pts 04379243/expert_verified/points_label/884d2cc0d3aa8a72640e544a5d67c33a.seg 04379243
03001627/points/8191bad981637a71b356ab8b24c147.pts 03001627/expert_verified/points_label/8191bad981637a71b356ab8b24c147.seg 03001627
03261776/points/de3b9b253e8f1aaf8b15c58b209760b5.pts 03261776/expert_verified/points_label/de3b9b253e8f1aaf8b15c58b209760b5.seg 03261776
03636649/points/5b744ac897fe8bc557f40ff86fe708ff.pts 03636649/expert_verified/points_label/5b744ac897fe8bc557f40ff86fe708ff.seg 03636649
04379243/points/6cd84ff61583805c85e2af9bf984f0b5.pts 04379243/expert_verified/points_label/6cd84ff61583805c85e2af9bf984f0b5.seg 04379243
04379243/points/e65066d6b0b83719c3bd24f986301745.pts 04379243/expert_verified/points_label/e65066d6b0b83719c3bd24f986301745.seg 04379243
04379243/points/f3efcbd9745da90619fb4103277a6b93.pts 04379243/expert_verified/points_label/f3efcbd9745da90619fb4103277a6b93.seg 04379243
04379243/points/8ac4d93e65b9d58d9b937d98d58545fa.pts 04379243/expert_verified/points_label/8ac4d93e65b9d58d9b937d98d58545fa.seg 04379243
03636649/points/b69c3a0a46b932e3d3c1fbbc2200e255.pts 03636649/expert_verified/points_label/b69c3a0a46b932e3d3c1fbbc2200e255.seg 03636649
03636649/points/5c7965b0835a1a241de9bf5a9c22fde.pts 03636649/expert_verified/points_label/5c7965b0835a1a241de9bf5a9c22fde.seg 03636649
03001627/points/27ea798c55699b6d2c528d33bca1ac2.pts 03001627/expert_verified/points_label/27ea798c55699b6d2c528d33bca1ac2.seg 03001627
03467517/points/dc623742d6d1518e19959b248340fafd.pts 03467517/expert_verified/points_label/dc623742d6d1518e19959b248340fafd.seg 03467517
03001627/points/c6cb59e7645dd14d661ff085a0f14b7.pts 03001627/expert_verified/points_label/c6cb59e7645dd14d661ff085a0f14b7.seg 03001627
03948459/points/a3679104af613021912d826efe946a9f.pts 03948459/expert_verified/points_label/a3679104af613021912d826efe946a9f.seg 03948459
03467517/points/b6d2d35747549a5b93f0194265a9746c.pts 03467517/expert_verified/points_label/b6d2d35747549a5b93f0194265a9746c.seg 03467517
02691156/points/2c1fff0653854166e7a636089598229.pts 02691156/expert_verified/points_label/2c1fff0653854166e7a636089598229.seg 02691156
04379243/points/1040cd764facf6981190e285a2cbc9c.pts 04379243/expert_verified/points_label/1040cd764facf6981190e285a2cbc9c.seg 04379243
03001627/points/485831d92925bf03f3d7c13662c10792.pts 03001627/expert_verified/points_label/485831d92925bf03f3d7c13662c10792.seg 03001627
03636649/points/284986b4c72d624abd73284bc3c3cbac.pts 03636649/expert_verified/points_label/284986b4c72d624abd73284bc3c3cbac.seg 03636649
02691156/points/4c008f39378be18bc0909d98a1ff2b4.pts 02691156/expert_verified/points_label/4c008f39378be18bc0909d98a1ff2b4.seg 02691156
04379243/points/9611888ee0db1ecaf7d4d3ced798ad90.pts 04379243/expert_verified/points_label/9611888ee0db1ecaf7d4d3ced798ad90.seg 04379243
03467517/points/12e30808350dd945f4b498e11fb60a4b.pts 03467517/expert_verified/points_label/12e30808350dd945f4b498e11fb60a4b.seg 03467517
03467517/points/3243edb05f5e8803ac61a2f8346a8f.pts 03467517/expert_verified/points_label/3243edb05f5e8803ac61a2f8346a8f.seg 03467517
04379243/points/ec4675f62f6946118cbb8bac2032149c.pts 04379243/expert_verified/points_label/ec4675f62f6946118cbb8bac2032149c.seg 04379243
04379243/points/eb00a4e8b33d257cad16260d4d73b56.pts 04379243/expert_verified/points_label/eb00a4e8b33d257cad16260d4d73b56.seg 04379243
03001627/points/5607b02869c1f8a019fb4103277a6b93.pts 03001627/expert_verified/points_label/5607b02869c1f8a019fb4103277a6b93.seg 03001627
03636649/points/d456beea1501f278f70220cd6be776f7.pts 03636649/expert_verified/points_label/d456beea1501f278f70220cd6be776f7.seg 03636649
02691156/points/3feeb5f8ecbfcb4ba8f0518e94fcfb22.pts 02691156/expert_verified/points_label/3feeb5f8ecbfcb4ba8f0518e94fcfb22.seg 02691156
04379243/points/fe130356df1977499c2a886f3b75f1ff.pts 04379243/expert_verified/points_label/fe130356df1977499c2a886f3b75f1ff.seg 04379243
02958343/points/aa7f127bb8cd9db73755eb267a6f3b6b.pts 02958343/expert_verified/points_label/aa7f127bb8cd9db73755eb267a6f3b6b.seg 02958343
04379243/points/84a3c87bba5a472af51f77a6d7299806.pts 04379243/expert_verified/points_label/84a3c87bba5a472af51f77a6d7299806.seg 04379243
04099429/points/2de8ee55ff69502863098049d14fe32f.pts 04099429/expert_verified/points_label/2de8ee55ff69502863098049d14fe32f.seg 04099429
03624134/points/539ff9b2a7a0329e759e4c424bcdaafe.pts 03624134/expert_verified/points_label/539ff9b2a7a0329e759e4c424bcdaafe.seg 03624134
03948459/points/f3f6678898938575575e33965575974.pts 03948459/expert_verified/points_label/f3f6678898938575575e33965575974.seg 03948459
04379243/points/c26dfd3453d81bf7788eb1f5e7ba6e7b.pts 04379243/expert_verified/points_label/c26dfd3453d81bf7788eb1f5e7ba6e7b.seg 04379243
03001627/points/8117c55b8bbdbbc54c5c5c89015f1980.pts 03001627/expert_verified/points_label/8117c55b8bbdbbc54c5c5c89015f1980.seg 03001627
03624134/points/40ccb8ac250e0ea5880595487ba7a30b.pts 03624134/expert_verified/points_label/40ccb8ac250e0ea5880595487ba7a30b.seg 03624134
04379243/points/a0d2754011acdcc9d8a0e410093d6619.pts 04379243/expert_verified/points_label/a0d2754011acdcc9d8a0e410093d6619.seg 04379243
03790512/points/5bd41c7d3e158ac93ff4d2f5a7608a24.pts 03790512/expert_verified/points_label/5bd41c7d3e158ac93ff4d2f5a7608a24.seg 03790512
04379243/points/8f440a7c0e2af79f3ed0ffd59feeec00.pts 04379243/expert_verified/points_label/8f440a7c0e2af79f3ed0ffd59feeec00.seg 04379243
03001627/points/734ac9809aada180d18df440db206fb1.pts 03001627/expert_verified/points_label/734ac9809aada180d18df440db206fb1.seg 03001627
03001627/points/54f33a7cb3621d5ced98cca8f0ccd5f7.pts 03001627/expert_verified/points_label/54f33a7cb3621d5ced98cca8f0ccd5f7.seg 03001627
03001627/points/d274fc14092387c1e17e1cb731e2fa4f.pts 03001627/expert_verified/points_label/d274fc14092387c1e17e1cb731e2fa4f.seg 03001627
03636649/points/6ccb43088eda061dbfc838749f053cf9.pts 03636649/expert_verified/points_label/6ccb43088eda061dbfc838749f053cf9.seg 03636649
02773838/points/1b9ef45fefefa35ed13f430b2941481.pts 02773838/expert_verified/points_label/1b9ef45fefefa35ed13f430b2941481.seg 02773838
03001627/points/35053caa62eea36c116cc4e115d5fd2.pts 03001627/expert_verified/points_label/35053caa62eea36c116cc4e115d5fd2.seg 03001627
04379243/points/b893c20bfb5d718371a782a4379556c7.pts 04379243/expert_verified/points_label/b893c20bfb5d718371a782a4379556c7.seg 04379243
04379243/points/1a5062241d7903076f88aa1b7f7cc6c6.pts 04379243/expert_verified/points_label/1a5062241d7903076f88aa1b7f7cc6c6.seg 04379243
02958343/points/add26d8f4f91ba04c84b95bddf75b22d.pts 02958343/expert_verified/points_label/add26d8f4f91ba04c84b95bddf75b22d.seg 02958343
03636649/points/f85f26c5a807b22312bea13341a54c3f.pts 03636649/expert_verified/points_label/f85f26c5a807b22312bea13341a54c3f.seg 03636649
03001627/points/8a232028c2b2cfad43649af30eba8304.pts 03001627/expert_verified/points_label/8a232028c2b2cfad43649af30eba8304.seg 03001627
03636649/points/3a5a0f4c78e17b284f0c4075db76b7c.pts 03636649/expert_verified/points_label/3a5a0f4c78e17b284f0c4075db76b7c.seg 03636649
04379243/points/df811f7a858750875634c21965ee6bab.pts 04379243/expert_verified/points_label/df811f7a858750875634c21965ee6bab.seg 04379243
02691156/points/48706d323b9041d5438a95791ca4064d.pts 02691156/expert_verified/points_label/48706d323b9041d5438a95791ca4064d.seg 02691156
03790512/points/170cfc531a4fd09fe6905ba5363784c3.pts 03790512/expert_verified/points_label/170cfc531a4fd09fe6905ba5363784c3.seg 03790512
03467517/points/d4b2ddb52e8dcd3593f0194265a9746c.pts 03467517/expert_verified/points_label/d4b2ddb52e8dcd3593f0194265a9746c.seg 03467517
03636649/points/2af78c0b040634e5881cd5e2fd8f0f3b.pts 03636649/expert_verified/points_label/2af78c0b040634e5881cd5e2fd8f0f3b.seg 03636649
04379243/points/90cd6a48cf2789a9b430d97a45d5824.pts 04379243/expert_verified/points_label/90cd6a48cf2789a9b430d97a45d5824.seg 04379243
03001627/points/43290694390ad1adfc735c9ceab0161a.pts 03001627/expert_verified/points_label/43290694390ad1adfc735c9ceab0161a.seg 03001627
03636649/points/ed57181b9e7644a3f51f77a6d7299806.pts 03636649/expert_verified/points_label/ed57181b9e7644a3f51f77a6d7299806.seg 03636649
03261776/points/a9661a8bb610d902957b6a4f3924d982.pts 03261776/expert_verified/points_label/a9661a8bb610d902957b6a4f3924d982.seg 03261776
02691156/points/b31bbc50a0d3a4366cf1b4a8fc3914e.pts 02691156/expert_verified/points_label/b31bbc50a0d3a4366cf1b4a8fc3914e.seg 02691156
03001627/points/cd5ad4afabaed0d3e762624dc3c8fa2a.pts 03001627/expert_verified/points_label/cd5ad4afabaed0d3e762624dc3c8fa2a.seg 03001627
02958343/points/d2e1dc21db9b45df6436916a86a90ed7.pts 02958343/expert_verified/points_label/d2e1dc21db9b45df6436916a86a90ed7.seg 02958343
02691156/points/de9e093bb17848c3b2bd4a92202f8700.pts 02691156/expert_verified/points_label/de9e093bb17848c3b2bd4a92202f8700.seg 02691156
03467517/points/40cd2cafde62ff7ca24eeca91f583600.pts 03467517/expert_verified/points_label/40cd2cafde62ff7ca24eeca91f583600.seg 03467517
02958343/points/56e0fef0632aed0f1d27be7764701cfe.pts 02958343/expert_verified/points_label/56e0fef0632aed0f1d27be7764701cfe.seg 02958343
04379243/points/a4d149a48607de3d92f4c88fd91c6b1b.pts 04379243/expert_verified/points_label/a4d149a48607de3d92f4c88fd91c6b1b.seg 04379243
03636649/points/45f11cb4099c9c87bbc7a6acbd8f058b.pts 03636649/expert_verified/points_label/45f11cb4099c9c87bbc7a6acbd8f058b.seg 03636649
04379243/points/3558aeeb9698722acf19858fd1963d10.pts 04379243/expert_verified/points_label/3558aeeb9698722acf19858fd1963d10.seg 04379243
03636649/points/2a52bd01472ec7e1589ec67c01f5c1a7.pts 03636649/expert_verified/points_label/2a52bd01472ec7e1589ec67c01f5c1a7.seg 03636649
03467517/points/58bb21c325f021088f01c8e793a6e062.pts 03467517/expert_verified/points_label/58bb21c325f021088f01c8e793a6e062.seg 03467517
04379243/points/3997cdee934a9b238eb3bc6c6d15f9bf.pts 04379243/expert_verified/points_label/3997cdee934a9b238eb3bc6c6d15f9bf.seg 04379243
03001627/points/c4cab2a416a4537e2871cc0b3cc1a485.pts 03001627/expert_verified/points_label/c4cab2a416a4537e2871cc0b3cc1a485.seg 03001627
04379243/points/6aaa78b81528f4846674ff79eed6185a.pts 04379243/expert_verified/points_label/6aaa78b81528f4846674ff79eed6185a.seg 04379243
03636649/points/fd5f6ab819910a66dc7f95a5a82e36f7.pts 03636649/expert_verified/points_label/fd5f6ab819910a66dc7f95a5a82e36f7.seg 03636649
04379243/points/8e3303cae6cc104bad4f8ccb153c24e.pts 04379243/expert_verified/points_label/8e3303cae6cc104bad4f8ccb153c24e.seg 04379243
03001627/points/2f0318b23d899a84493f17f4fe9b9eb2.pts 03001627/expert_verified/points_label/2f0318b23d899a84493f17f4fe9b9eb2.seg 03001627
04379243/points/2406cdcd4c60c84132884c4c87a2e061.pts 04379243/expert_verified/points_label/2406cdcd4c60c84132884c4c87a2e061.seg 04379243
03790512/points/55caf44a43f2c04d468bac13e007a6e9.pts 03790512/expert_verified/points_label/55caf44a43f2c04d468bac13e007a6e9.seg 03790512
03001627/points/ee665ce6679ac8cfb502ac2eb9128f9a.pts 03001627/expert_verified/points_label/ee665ce6679ac8cfb502ac2eb9128f9a.seg 03001627
02691156/points/32edb6ba5788dc12d8ff6111270336a9.pts 02691156/expert_verified/points_label/32edb6ba5788dc12d8ff6111270336a9.seg 02691156
03636649/points/d0fde1daedab10365240248232b90795.pts 03636649/expert_verified/points_label/d0fde1daedab10365240248232b90795.seg 03636649
04379243/points/61b88b501933ebae8f7068c66465c4d6.pts 04379243/expert_verified/points_label/61b88b501933ebae8f7068c66465c4d6.seg 04379243
03001627/points/93556cf01e19f638bf80985a99195eb8.pts 03001627/expert_verified/points_label/93556cf01e19f638bf80985a99195eb8.seg 03001627
04379243/points/f3b8c91c5dd1cb6b8722573b29f0d6d8.pts 04379243/expert_verified/points_label/f3b8c91c5dd1cb6b8722573b29f0d6d8.seg 04379243
04379243/points/eae36b396f6b5f97664b3b9b23ddfcbc.pts 04379243/expert_verified/points_label/eae36b396f6b5f97664b3b9b23ddfcbc.seg 04379243
03624134/points/8bd5c4f395695ebdf40d02cc9d84a93a.pts 03624134/expert_verified/points_label/8bd5c4f395695ebdf40d02cc9d84a93a.seg 03624134
03001627/points/8c81ff18e04584547f409062bafc8e2.pts 03001627/expert_verified/points_label/8c81ff18e04584547f409062bafc8e2.seg 03001627
03001627/points/77e7660d71c6f3befebad4f49b26ec52.pts 03001627/expert_verified/points_label/77e7660d71c6f3befebad4f49b26ec52.seg 03001627
03261776/points/bc404e52bfcd2038538cf6df9faa9b65.pts 03261776/expert_verified/points_label/bc404e52bfcd2038538cf6df9faa9b65.seg 03261776
03001627/points/f09af71bebd4bea8a2651abaf391628e.pts 03001627/expert_verified/points_label/f09af71bebd4bea8a2651abaf391628e.seg 03001627
03001627/points/8c8efbe62a1547942b90a0fb76278f6f.pts 03001627/expert_verified/points_label/8c8efbe62a1547942b90a0fb76278f6f.seg 03001627
04379243/points/aed5697ff59e3d3035478a6869a3602d.pts 04379243/expert_verified/points_label/aed5697ff59e3d3035478a6869a3602d.seg 04379243
02691156/points/5ac00867c7d78b1690b1d6deb98feec6.pts 02691156/expert_verified/points_label/5ac00867c7d78b1690b1d6deb98feec6.seg 02691156
03001627/points/c709aa613431c0538a653a9f65a410f6.pts 03001627/expert_verified/points_label/c709aa613431c0538a653a9f65a410f6.seg 03001627
03624134/points/8facbe9d9f4da233d15a5887ec2183c9.pts 03624134/expert_verified/points_label/8facbe9d9f4da233d15a5887ec2183c9.seg 03624134
03642806/points/dbcd5a88a9d4f1d7579cfe4420588034.pts 03642806/expert_verified/points_label/dbcd5a88a9d4f1d7579cfe4420588034.seg 03642806
03636649/points/f29a94f969dd55ffc35131da26f8061a.pts 03636649/expert_verified/points_label/f29a94f969dd55ffc35131da26f8061a.seg 03636649
02958343/points/5e014eb2bd03daab9fbe97de4a41d527.pts 02958343/expert_verified/points_label/5e014eb2bd03daab9fbe97de4a41d527.seg 02958343
04379243/points/7105bd044f464358beedb4c8fd29e2d1.pts 04379243/expert_verified/points_label/7105bd044f464358beedb4c8fd29e2d1.seg 04379243
04379243/points/c827c0d4ef212f2b30cb1fe6fdc7d605.pts 04379243/expert_verified/points_label/c827c0d4ef212f2b30cb1fe6fdc7d605.seg 04379243
04379243/points/19bc9c781df1da46824080f516909671.pts 04379243/expert_verified/points_label/19bc9c781df1da46824080f516909671.seg 04379243
03001627/points/71b53a5f441d45b742b7e4c0136bdb7e.pts 03001627/expert_verified/points_label/71b53a5f441d45b742b7e4c0136bdb7e.seg 03001627
02958343/points/e7e94f8dbbe8c1e9784da3853aae78cd.pts 02958343/expert_verified/points_label/e7e94f8dbbe8c1e9784da3853aae78cd.seg 02958343
03790512/points/832c4a316c419228b37378f3c85478b4.pts 03790512/expert_verified/points_label/832c4a316c419228b37378f3c85478b4.seg 03790512
02954340/points/c7122c44495a5ac6aceb0fa31f18f016.pts 02954340/expert_verified/points_label/c7122c44495a5ac6aceb0fa31f18f016.seg 02954340
03001627/points/6b32d3a9198f8b03d1dcc55e36186e4e.pts 03001627/expert_verified/points_label/6b32d3a9198f8b03d1dcc55e36186e4e.seg 03001627
03636649/points/7893d0b50a7b6a768ec45924afa4ac91.pts 03636649/expert_verified/points_label/7893d0b50a7b6a768ec45924afa4ac91.seg 03636649
02691156/points/befcb95d80e0e49119ba010ddb4974fe.pts 02691156/expert_verified/points_label/befcb95d80e0e49119ba010ddb4974fe.seg 02691156
03001627/points/b70600293bab55c0593ebeeedbff73b.pts 03001627/expert_verified/points_label/b70600293bab55c0593ebeeedbff73b.seg 03001627
02691156/points/7fedb48b457ee9f31629b98cc1b1b992.pts 02691156/expert_verified/points_label/7fedb48b457ee9f31629b98cc1b1b992.seg 02691156
04099429/points/e04bda8655d9e606ebcdf982796b4fa.pts 04099429/expert_verified/points_label/e04bda8655d9e606ebcdf982796b4fa.seg 04099429
04379243/points/25bcea593e4314c3436e6787c76ef3f0.pts 04379243/expert_verified/points_label/25bcea593e4314c3436e6787c76ef3f0.seg 04379243
03636649/points/f3a9cc3060fd6b0e6e4f8fc909e0d34e.pts 03636649/expert_verified/points_label/f3a9cc3060fd6b0e6e4f8fc909e0d34e.seg 03636649
04379243/points/516928532093f765bababe11fcea8796.pts 04379243/expert_verified/points_label/516928532093f765bababe11fcea8796.seg 04379243
03001627/points/31569815c88e79de4458bae25a4e518a.pts 03001627/expert_verified/points_label/31569815c88e79de4458bae25a4e518a.seg 03001627
03001627/points/a08ad49c281128ea53615647c93fc704.pts 03001627/expert_verified/points_label/a08ad49c281128ea53615647c93fc704.seg 03001627
03642806/points/f5fc954736b06be15fd06491ae919ea3.pts 03642806/expert_verified/points_label/f5fc954736b06be15fd06491ae919ea3.seg 03642806
04379243/points/15b495c101881d96e2367b9e27f16a71.pts 04379243/expert_verified/points_label/15b495c101881d96e2367b9e27f16a71.seg 04379243
02691156/points/ebd991666f177f8f575bf8a4b14be4f4.pts 02691156/expert_verified/points_label/ebd991666f177f8f575bf8a4b14be4f4.seg 02691156
02691156/points/f7739764eb1c78a053f370d353cea84.pts 02691156/expert_verified/points_label/f7739764eb1c78a053f370d353cea84.seg 02691156
03636649/points/8a6d770e6b4942c5ef3a2c64cef919d0.pts 03636649/expert_verified/points_label/8a6d770e6b4942c5ef3a2c64cef919d0.seg 03636649
04379243/points/2fcc875b28c5557dcfcef693e7ec696.pts 04379243/expert_verified/points_label/2fcc875b28c5557dcfcef693e7ec696.seg 04379243
03636649/points/896abd405c79547086485c798787f66b.pts 03636649/expert_verified/points_label/896abd405c79547086485c798787f66b.seg 03636649
02691156/points/356a633ea047c549ca8607f540cc62ba.pts 02691156/expert_verified/points_label/356a633ea047c549ca8607f540cc62ba.seg 02691156
03001627/points/c983108db7fcfa3619fb4103277a6b93.pts 03001627/expert_verified/points_label/c983108db7fcfa3619fb4103277a6b93.seg 03001627
04225987/points/97f85bc59f09a9f455c660e6cd8e92b.pts 04225987/expert_verified/points_label/97f85bc59f09a9f455c660e6cd8e92b.seg 04225987
03636649/points/69a708be7245f4c9786e8e92cc08146.pts 03636649/expert_verified/points_label/69a708be7245f4c9786e8e92cc08146.seg 03636649
04379243/points/f71296c0a7e93ec282db9fca4b68095.pts 04379243/expert_verified/points_label/f71296c0a7e93ec282db9fca4b68095.seg 04379243
02691156/points/33faf711ed54a4d3db22b838c125a50b.pts 02691156/expert_verified/points_label/33faf711ed54a4d3db22b838c125a50b.seg 02691156
03642806/points/5d544ee4b094c6606436916a86a90ed7.pts 03642806/expert_verified/points_label/5d544ee4b094c6606436916a86a90ed7.seg 03642806
02691156/points/a0d63ee7fd87f93619ba010ddb4974fe.pts 02691156/expert_verified/points_label/a0d63ee7fd87f93619ba010ddb4974fe.seg 02691156
03001627/points/e30b412be565a1026efe57da6d3d385e.pts 03001627/expert_verified/points_label/e30b412be565a1026efe57da6d3d385e.seg 03001627
04379243/points/fe5e1df0653804d6ce4670b160b81e9.pts 04379243/expert_verified/points_label/fe5e1df0653804d6ce4670b160b81e9.seg 04379243
02691156/points/fd41d04f1aabbaea3fddedb0bf24c68a.pts 02691156/expert_verified/points_label/fd41d04f1aabbaea3fddedb0bf24c68a.seg 02691156
03624134/points/e79481b2fde3a3ab340fbf70397ab69a.pts 03624134/expert_verified/points_label/e79481b2fde3a3ab340fbf70397ab69a.seg 03624134
04379243/points/d06d27bc9ad1faabd7bf6fb68df7f786.pts 04379243/expert_verified/points_label/d06d27bc9ad1faabd7bf6fb68df7f786.seg 04379243
03001627/points/e4931ffa06d7b05cb04cb542e2c50eb4.pts 03001627/expert_verified/points_label/e4931ffa06d7b05cb04cb542e2c50eb4.seg 03001627
03001627/points/d4b5f8edc72b4676f4175ee3a177350a.pts 03001627/expert_verified/points_label/d4b5f8edc72b4676f4175ee3a177350a.seg 03001627
03636649/points/4f16fffbe480b835276206fae5d3c473.pts 03636649/expert_verified/points_label/4f16fffbe480b835276206fae5d3c473.seg 03636649
03001627/points/8ade914cd21b6e49656f29b05c68d39f.pts 03001627/expert_verified/points_label/8ade914cd21b6e49656f29b05c68d39f.seg 03001627
03001627/points/1e304b967d5253d5dd079f8cece51712.pts 03001627/expert_verified/points_label/1e304b967d5253d5dd079f8cece51712.seg 03001627
04379243/points/6d0ef6312f8af87a53e946fb2184f0c4.pts 04379243/expert_verified/points_label/6d0ef6312f8af87a53e946fb2184f0c4.seg 04379243
03948459/points/79c0cac016998c7cf7ba4a82f8032357.pts 03948459/expert_verified/points_label/79c0cac016998c7cf7ba4a82f8032357.seg 03948459
03642806/points/b51683c6285fa0f69067ac5c9d4ee692.pts 03642806/expert_verified/points_label/b51683c6285fa0f69067ac5c9d4ee692.seg 03642806
04379243/points/93cdfd14889492dd91a4fd87fee47737.pts 04379243/expert_verified/points_label/93cdfd14889492dd91a4fd87fee47737.seg 04379243
03636649/points/da8141b45da808199a06a7de97b096dc.pts 03636649/expert_verified/points_label/da8141b45da808199a06a7de97b096dc.seg 03636649
04379243/points/7d22cd72bf2762b19a4b266ed4d507c9.pts 04379243/expert_verified/points_label/7d22cd72bf2762b19a4b266ed4d507c9.seg 04379243
04225987/points/aa886bed91a13113d5498a74ca9ca78b.pts 04225987/expert_verified/points_label/aa886bed91a13113d5498a74ca9ca78b.seg 04225987
04379243/points/55547d2fae0e3dc21705bfd3afcd10e.pts 04379243/expert_verified/points_label/55547d2fae0e3dc21705bfd3afcd10e.seg 04379243
04379243/points/222c56ff9cddbaf4139eb23f7c8036f.pts 04379243/expert_verified/points_label/222c56ff9cddbaf4139eb23f7c8036f.seg 04379243
03636649/points/292f1f97a543d735dedf3c967c85981a.pts 03636649/expert_verified/points_label/292f1f97a543d735dedf3c967c85981a.seg 03636649
04379243/points/9e2318099f77d3df3527ecfeb345775f.pts 04379243/expert_verified/points_label/9e2318099f77d3df3527ecfeb345775f.seg 04379243
04379243/points/6ace903899706a5819fb4103277a6b93.pts 04379243/expert_verified/points_label/6ace903899706a5819fb4103277a6b93.seg 04379243
03636649/points/c080aefc6cbff8c81185ac82ed4da80d.pts 03636649/expert_verified/points_label/c080aefc6cbff8c81185ac82ed4da80d.seg 03636649
03790512/points/9dd4ae1c34af4766b4f2746c8140d6d6.pts 03790512/expert_verified/points_label/9dd4ae1c34af4766b4f2746c8140d6d6.seg 03790512
03001627/points/e199b1f6a70c9f56df44d20a516c07b3.pts 03001627/expert_verified/points_label/e199b1f6a70c9f56df44d20a516c07b3.seg 03001627
04379243/points/8129d4c51abc3356bababe11fcea8796.pts 04379243/expert_verified/points_label/8129d4c51abc3356bababe11fcea8796.seg 04379243
03001627/points/c9d8573a048c0e959c0ca344f487323e.pts 03001627/expert_verified/points_label/c9d8573a048c0e959c0ca344f487323e.seg 03001627
04379243/points/25eefc5a3c7b30e1f103d473de33521a.pts 04379243/expert_verified/points_label/25eefc5a3c7b30e1f103d473de33521a.seg 04379243
03624134/points/c20cca071ea58e3ef2c542131520d62e.pts 03624134/expert_verified/points_label/c20cca071ea58e3ef2c542131520d62e.seg 03624134
03001627/points/c86cfe147872280463626070a93463cf.pts 03001627/expert_verified/points_label/c86cfe147872280463626070a93463cf.seg 03001627
03001627/points/3853339519aca1bdfcd4910413c446d9.pts 03001627/expert_verified/points_label/3853339519aca1bdfcd4910413c446d9.seg 03001627
03001627/points/8cb44a50906b827615e7ec87bf4cc5ab.pts 03001627/expert_verified/points_label/8cb44a50906b827615e7ec87bf4cc5ab.seg 03001627
02691156/points/fd9f1cdaa381599bca8607f540cc62ba.pts 02691156/expert_verified/points_label/fd9f1cdaa381599bca8607f540cc62ba.seg 02691156
03001627/points/80dabf9ddbdc92f681806e3880250dff.pts 03001627/expert_verified/points_label/80dabf9ddbdc92f681806e3880250dff.seg 03001627
04379243/points/5919dea71f3bcb071d54ab02e78bef2.pts 04379243/expert_verified/points_label/5919dea71f3bcb071d54ab02e78bef2.seg 04379243
03636649/points/292ba732e002629e68c2f5eb1dd4dfaa.pts 03636649/expert_verified/points_label/292ba732e002629e68c2f5eb1dd4dfaa.seg 03636649
04379243/points/5d77e8f6ad3741a0c30ab36bf7b0552.pts 04379243/expert_verified/points_label/5d77e8f6ad3741a0c30ab36bf7b0552.seg 04379243
03467517/points/21a517abc4729e6e352e5d4d2615db5b.pts 03467517/expert_verified/points_label/21a517abc4729e6e352e5d4d2615db5b.seg 03467517
03467517/points/6554f6429eb7b67585e3c97721f726e4.pts 03467517/expert_verified/points_label/6554f6429eb7b67585e3c97721f726e4.seg 03467517
02958343/points/f84ba2039d0a4ec5afe717997470b28d.pts 02958343/expert_verified/points_label/f84ba2039d0a4ec5afe717997470b28d.seg 02958343
02691156/points/29fd29045703ff18b4a8b7176ed97248.pts 02691156/expert_verified/points_label/29fd29045703ff18b4a8b7176ed97248.seg 02691156
03467517/points/a7f449a1f2cd1f1693f0194265a9746c.pts 03467517/expert_verified/points_label/a7f449a1f2cd1f1693f0194265a9746c.seg 03467517
03790512/points/7fcee59a33976221a88e8cb97b773125.pts 03790512/expert_verified/points_label/7fcee59a33976221a88e8cb97b773125.seg 03790512
04099429/points/2407c2684ee757e89c4176ab56cb612.pts 04099429/expert_verified/points_label/2407c2684ee757e89c4176ab56cb612.seg 04099429
04379243/points/f621e2ad900ad48535836c728d324152.pts 04379243/expert_verified/points_label/f621e2ad900ad48535836c728d324152.seg 04379243
03001627/points/9a54daea9071a536bf80985a99195eb8.pts 03001627/expert_verified/points_label/9a54daea9071a536bf80985a99195eb8.seg 03001627
03001627/points/fd9e909b082d8175d319c38340319ae4.pts 03001627/expert_verified/points_label/fd9e909b082d8175d319c38340319ae4.seg 03001627
03001627/points/a8dd9990ecd74c45435897641a7ee684.pts 03001627/expert_verified/points_label/a8dd9990ecd74c45435897641a7ee684.seg 03001627
03636649/points/c6424950ca9447627d8864caa856253b.pts 03636649/expert_verified/points_label/c6424950ca9447627d8864caa856253b.seg 03636649
03948459/points/7f3ec97cfaea31137504cc74f24f0eee.pts 03948459/expert_verified/points_label/7f3ec97cfaea31137504cc74f24f0eee.seg 03948459
02691156/points/43abe330362164e99be82ec29531a70f.pts 02691156/expert_verified/points_label/43abe330362164e99be82ec29531a70f.seg 02691156
03001627/points/499c4b519c708ae84cd08aa7c510fb85.pts 03001627/expert_verified/points_label/499c4b519c708ae84cd08aa7c510fb85.seg 03001627
04379243/points/4c7931492b41f960d50eef20e0914a48.pts 04379243/expert_verified/points_label/4c7931492b41f960d50eef20e0914a48.seg 04379243
03001627/points/3f36e261cc87648ac3bd24f986301745.pts 03001627/expert_verified/points_label/3f36e261cc87648ac3bd24f986301745.seg 03001627
03001627/points/a09a88c11d0b27368821ad3452f1c8c9.pts 03001627/expert_verified/points_label/a09a88c11d0b27368821ad3452f1c8c9.seg 03001627
04379243/points/89cc879f005dcf50f1f50f6a678fb494.pts 04379243/expert_verified/points_label/89cc879f005dcf50f1f50f6a678fb494.seg 04379243
02958343/points/d34b0494fc4d756ab927782fc69a1fbb.pts 02958343/expert_verified/points_label/d34b0494fc4d756ab927782fc69a1fbb.seg 02958343
02958343/points/705840df46a582e2ac826a3c82da491.pts 02958343/expert_verified/points_label/705840df46a582e2ac826a3c82da491.seg 02958343
02691156/points/74a5f937c22aa08a3e70653c1b3170b5.pts 02691156/expert_verified/points_label/74a5f937c22aa08a3e70653c1b3170b5.seg 02691156
03948459/points/a0a1633186261a031274aa253a241db2.pts 03948459/expert_verified/points_label/a0a1633186261a031274aa253a241db2.seg 03948459
03001627/points/2de04227fae28e70b6eb6f056d511fe1.pts 03001627/expert_verified/points_label/2de04227fae28e70b6eb6f056d511fe1.seg 03001627
02691156/points/1e9ef313876bfba7d02c6d35cc802839.pts 02691156/expert_verified/points_label/1e9ef313876bfba7d02c6d35cc802839.seg 02691156
03636649/points/e99793b871d27333d42b9650f19dd425.pts 03636649/expert_verified/points_label/e99793b871d27333d42b9650f19dd425.seg 03636649
03001627/points/7228d43e00af4c1e2746490e2236e9a8.pts 03001627/expert_verified/points_label/7228d43e00af4c1e2746490e2236e9a8.seg 03001627
03636649/points/66111d2c7a23b0feb404555b84577afb.pts 03636649/expert_verified/points_label/66111d2c7a23b0feb404555b84577afb.seg 03636649
03001627/points/2499541ace317cbb8cb5d9909aeb1309.pts 03001627/expert_verified/points_label/2499541ace317cbb8cb5d9909aeb1309.seg 03001627
04379243/points/d151d9f45d8b14536cd661fb5fd95741.pts 04379243/expert_verified/points_label/d151d9f45d8b14536cd661fb5fd95741.seg 04379243
03001627/points/ea7be2b97e78d5b35a4480134e0cdd21.pts 03001627/expert_verified/points_label/ea7be2b97e78d5b35a4480134e0cdd21.seg 03001627
02958343/points/9c35f00f81110738783854950b26f0d3.pts 02958343/expert_verified/points_label/9c35f00f81110738783854950b26f0d3.seg 02958343
03001627/points/e30bd575bbd6c68c9710e093c764abec.pts 03001627/expert_verified/points_label/e30bd575bbd6c68c9710e093c764abec.seg 03001627
03790512/points/61b17f12bec91d057395d58407f193ba.pts 03790512/expert_verified/points_label/61b17f12bec91d057395d58407f193ba.seg 03790512
04379243/points/cd895c35fff495cdd0b93fa304cfa755.pts 04379243/expert_verified/points_label/cd895c35fff495cdd0b93fa304cfa755.seg 04379243
02958343/points/b70d970f8020c25dd141480e2c154d3.pts 02958343/expert_verified/points_label/b70d970f8020c25dd141480e2c154d3.seg 02958343
04379243/points/2642d805c53e243d629f73b53bd7a234.pts 04379243/expert_verified/points_label/2642d805c53e243d629f73b53bd7a234.seg 04379243
04379243/points/1bce2f4937d36446a32c566d71fa585c.pts 04379243/expert_verified/points_label/1bce2f4937d36446a32c566d71fa585c.seg 04379243
04379243/points/7c1bcea89b0037a2d67bd369ec608dad.pts 04379243/expert_verified/points_label/7c1bcea89b0037a2d67bd369ec608dad.seg 04379243
04379243/points/3154c61c595bd600e56ddd87eb888f65.pts 04379243/expert_verified/points_label/3154c61c595bd600e56ddd87eb888f65.seg 04379243
03001627/points/7a1de77ca204eaf28a514cac7cb18507.pts 03001627/expert_verified/points_label/7a1de77ca204eaf28a514cac7cb18507.seg 03001627
04379243/points/77ecc55547840f06d42b9650f19dd425.pts 04379243/expert_verified/points_label/77ecc55547840f06d42b9650f19dd425.seg 04379243
02691156/points/9a8aecab136ce50db7ef47444625afb2.pts 02691156/expert_verified/points_label/9a8aecab136ce50db7ef47444625afb2.seg 02691156
02958343/points/24866846d728484e1d1a964dea8a7aab.pts 02958343/expert_verified/points_label/24866846d728484e1d1a964dea8a7aab.seg 02958343
04099429/points/9b75297c580ff937b61ce5beb9f92726.pts 04099429/expert_verified/points_label/9b75297c580ff937b61ce5beb9f92726.seg 04099429
04225987/points/90dbe261a4d56dcf1082f2ea630bf69e.pts 04225987/expert_verified/points_label/90dbe261a4d56dcf1082f2ea630bf69e.seg 04225987
03001627/points/81b27636162e148bb3fb065fa3089331.pts 03001627/expert_verified/points_label/81b27636162e148bb3fb065fa3089331.seg 03001627
03642806/points/66d47a84a3d522dc9311bf79d4774e73.pts 03642806/expert_verified/points_label/66d47a84a3d522dc9311bf79d4774e73.seg 03642806
03001627/points/2a05ae00b701fda36567137a59cb1a56.pts 03001627/expert_verified/points_label/2a05ae00b701fda36567137a59cb1a56.seg 03001627
04379243/points/79df23303a3192c1cdf1dfd78f33901b.pts 04379243/expert_verified/points_label/79df23303a3192c1cdf1dfd78f33901b.seg 04379243
04379243/points/bf17779bec6abccf161bc5243aab8ea4.pts 04379243/expert_verified/points_label/bf17779bec6abccf161bc5243aab8ea4.seg 04379243
03001627/points/ece1a921c1bfd44947f5e245ee376525.pts 03001627/expert_verified/points_label/ece1a921c1bfd44947f5e245ee376525.seg 03001627
03636649/points/15c51ecb58bf304fef3a2c64cef919d0.pts 03636649/expert_verified/points_label/15c51ecb58bf304fef3a2c64cef919d0.seg 03636649
04379243/points/5d93e285b2006520ab610b0c94236463.pts 04379243/expert_verified/points_label/5d93e285b2006520ab610b0c94236463.seg 04379243
03636649/points/b2d5929e66044aeac7db9c21ccfbc4a1.pts 03636649/expert_verified/points_label/b2d5929e66044aeac7db9c21ccfbc4a1.seg 03636649
04379243/points/f3164e1781a296597f6f00dc967c386.pts 04379243/expert_verified/points_label/f3164e1781a296597f6f00dc967c386.seg 04379243
04379243/points/798a07e42d76013582695d8aaeacccc5.pts 04379243/expert_verified/points_label/798a07e42d76013582695d8aaeacccc5.seg 04379243
03948459/points/cc014e78b5cd9e7ed957eaf7f4edb205.pts 03948459/expert_verified/points_label/cc014e78b5cd9e7ed957eaf7f4edb205.seg 03948459
03636649/points/b3a98808fb1ccd892a5041fadf25a502.pts 03636649/expert_verified/points_label/b3a98808fb1ccd892a5041fadf25a502.seg 03636649
04379243/points/9472c006a5d35b9ab606ece4189242ff.pts 04379243/expert_verified/points_label/9472c006a5d35b9ab606ece4189242ff.seg 04379243
03001627/points/3f04adffb69b5ebee95cd0dc8c2f0e83.pts 03001627/expert_verified/points_label/3f04adffb69b5ebee95cd0dc8c2f0e83.seg 03001627
03001627/points/26aa22bd1da8b8c5b1a5c6ecbc81953c.pts 03001627/expert_verified/points_label/26aa22bd1da8b8c5b1a5c6ecbc81953c.seg 03001627
03001627/points/f68ecc9ec512915f36d8dd30a594b2af.pts 03001627/expert_verified/points_label/f68ecc9ec512915f36d8dd30a594b2af.seg 03001627
03642806/points/6489453e322cdb53f9f3c6290096f50f.pts 03642806/expert_verified/points_label/6489453e322cdb53f9f3c6290096f50f.seg 03642806
03001627/points/c53fa6829ec9a947d13b7d13ee32497.pts 03001627/expert_verified/points_label/c53fa6829ec9a947d13b7d13ee32497.seg 03001627
04379243/points/7f1bd688960e2c1b97f2016c3d6097c9.pts 04379243/expert_verified/points_label/7f1bd688960e2c1b97f2016c3d6097c9.seg 04379243
02958343/points/edb2ab8a1d7e20f36436916a86a90ed7.pts 02958343/expert_verified/points_label/edb2ab8a1d7e20f36436916a86a90ed7.seg 02958343
04379243/points/159a2a760327ca5bababe11fcea8796.pts 04379243/expert_verified/points_label/159a2a760327ca5bababe11fcea8796.seg 04379243
02958343/points/988108a7536d686824065b218dc1b5b9.pts 02958343/expert_verified/points_label/988108a7536d686824065b218dc1b5b9.seg 02958343
03636649/points/c695408a86062c4d242ea50288b3f64.pts 03636649/expert_verified/points_label/c695408a86062c4d242ea50288b3f64.seg 03636649
04379243/points/2e7cb2cbfbbb4d002ee19ebe356c2dcb.pts 04379243/expert_verified/points_label/2e7cb2cbfbbb4d002ee19ebe356c2dcb.seg 04379243
02691156/points/3d23703a618ce7df1e569ed4e4cfe84.pts 02691156/expert_verified/points_label/3d23703a618ce7df1e569ed4e4cfe84.seg 02691156
03636649/points/97b7d9aabe38f91df11c97be803c47d.pts 03636649/expert_verified/points_label/97b7d9aabe38f91df11c97be803c47d.seg 03636649
04379243/points/5be1589df948b227c955e5ed03ef3a2f.pts 04379243/expert_verified/points_label/5be1589df948b227c955e5ed03ef3a2f.seg 04379243
04379243/points/8ea7ca2c8b48eb68ab610b0c94236463.pts 04379243/expert_verified/points_label/8ea7ca2c8b48eb68ab610b0c94236463.seg 04379243
02958343/points/eb56379e243b0e2090da6b3e2ed8b49d.pts 02958343/expert_verified/points_label/eb56379e243b0e2090da6b3e2ed8b49d.seg 02958343
03001627/points/cc30a723aeba69a139e0f39f5249b0ba.pts 03001627/expert_verified/points_label/cc30a723aeba69a139e0f39f5249b0ba.seg 03001627
03001627/points/ff8efd10f5e6c5c7c6c0380e62f2644.pts 03001627/expert_verified/points_label/ff8efd10f5e6c5c7c6c0380e62f2644.seg 03001627
02691156/points/d0ee4253d406b3f05e9e2656aff7dd5b.pts 02691156/expert_verified/points_label/d0ee4253d406b3f05e9e2656aff7dd5b.seg 02691156
02691156/points/9afe827a622d8ca28699933784576e73.pts 02691156/expert_verified/points_label/9afe827a622d8ca28699933784576e73.seg 02691156
03467517/points/d82fc6db200cdf6ea24eeca91f583600.pts 03467517/expert_verified/points_label/d82fc6db200cdf6ea24eeca91f583600.seg 03467517
03642806/points/6123321e3af0b6328204b359ccd3949e.pts 03642806/expert_verified/points_label/6123321e3af0b6328204b359ccd3949e.seg 03642806
03636649/points/e15defcb3dd448094fffb007974c9976.pts 03636649/expert_verified/points_label/e15defcb3dd448094fffb007974c9976.seg 03636649
03001627/points/c7fe45610d10cb108ad3a7d07aac2767.pts 03001627/expert_verified/points_label/c7fe45610d10cb108ad3a7d07aac2767.seg 03001627
04379243/points/bfaa1c23d2622422ad16260d4d73b56.pts 04379243/expert_verified/points_label/bfaa1c23d2622422ad16260d4d73b56.seg 04379243
04379243/points/8e3fc5f1f8e9658ce8b2b8dc0c816caf.pts 04379243/expert_verified/points_label/8e3fc5f1f8e9658ce8b2b8dc0c816caf.seg 04379243
03467517/points/1a96f73d0929bd4793f0194265a9746c.pts 03467517/expert_verified/points_label/1a96f73d0929bd4793f0194265a9746c.seg 03467517
02691156/points/86b11ae736659136ca8607f540cc62ba.pts 02691156/expert_verified/points_label/86b11ae736659136ca8607f540cc62ba.seg 02691156
04379243/points/4c4c719ac4b61d8f812c9aaa38f9a422.pts 04379243/expert_verified/points_label/4c4c719ac4b61d8f812c9aaa38f9a422.seg 04379243
04379243/points/443eca86041e57ab1e99b149cff6a230.pts 04379243/expert_verified/points_label/443eca86041e57ab1e99b149cff6a230.seg 04379243
03948459/points/6b2d89a7f2b173f0d9deb3f829cc2475.pts 03948459/expert_verified/points_label/6b2d89a7f2b173f0d9deb3f829cc2475.seg 03948459
04379243/points/8d84471c4af977d917271868b642acd3.pts 04379243/expert_verified/points_label/8d84471c4af977d917271868b642acd3.seg 04379243
03636649/points/b78bef16d4f44844931e98da3a93e73e.pts 03636649/expert_verified/points_label/b78bef16d4f44844931e98da3a93e73e.seg 03636649
03636649/points/29985e44b73051d923500a5b036df62e.pts 03636649/expert_verified/points_label/29985e44b73051d923500a5b036df62e.seg 03636649
03642806/points/4f3575df3821e08c466909b3e9553909.pts 03642806/expert_verified/points_label/4f3575df3821e08c466909b3e9553909.seg 03642806
03001627/points/3774a2b8c71e70b9f18a36d57b7cced0.pts 03001627/expert_verified/points_label/3774a2b8c71e70b9f18a36d57b7cced0.seg 03001627
03001627/points/3ea40a75f22515557dcf230d8b7d162e.pts 03001627/expert_verified/points_label/3ea40a75f22515557dcf230d8b7d162e.seg 03001627
03001627/points/33c4f94e97c3fefd19fb4103277a6b93.pts 03001627/expert_verified/points_label/33c4f94e97c3fefd19fb4103277a6b93.seg 03001627
03636649/points/d7760d5f9e1e6a622cd2160e449d45ae.pts 03636649/expert_verified/points_label/d7760d5f9e1e6a622cd2160e449d45ae.seg 03636649
02954340/points/7f9ddfff396634f17790cd6f6e8952aa.pts 02954340/expert_verified/points_label/7f9ddfff396634f17790cd6f6e8952aa.seg 02954340
03001627/points/5e706e87ca60bd19ecb01bc908e8cea6.pts 03001627/expert_verified/points_label/5e706e87ca60bd19ecb01bc908e8cea6.seg 03001627
04379243/points/90c19c729cabdb864b8710a3469971b1.pts 04379243/expert_verified/points_label/90c19c729cabdb864b8710a3469971b1.seg 04379243
02691156/points/d08471df3e76602427743256ca3834f.pts 02691156/expert_verified/points_label/d08471df3e76602427743256ca3834f.seg 02691156
02958343/points/67c229c70e64a25e69c2e0a91b39f742.pts 02958343/expert_verified/points_label/67c229c70e64a25e69c2e0a91b39f742.seg 02958343
04379243/points/1011e1c9812b84d2a9ed7bb5b55809f8.pts 04379243/expert_verified/points_label/1011e1c9812b84d2a9ed7bb5b55809f8.seg 04379243
03636649/points/3e2d51c40b37c9c086052e834fbd2c4a.pts 03636649/expert_verified/points_label/3e2d51c40b37c9c086052e834fbd2c4a.seg 03636649
03001627/points/6b385a32489bab4abbc7a6acbd8f058b.pts 03001627/expert_verified/points_label/6b385a32489bab4abbc7a6acbd8f058b.seg 03001627
03001627/points/61d29e8133da0b58d1fd43e2bf80195.pts 03001627/expert_verified/points_label/61d29e8133da0b58d1fd43e2bf80195.seg 03001627
04379243/points/d5f2968e4b7254ccf4104961857ca9c.pts 04379243/expert_verified/points_label/d5f2968e4b7254ccf4104961857ca9c.seg 04379243
04379243/points/30c9865cfc4294a7ad16260d4d73b56.pts 04379243/expert_verified/points_label/30c9865cfc4294a7ad16260d4d73b56.seg 04379243
03001627/points/76919a456a23b9779368d1198f406e7.pts 03001627/expert_verified/points_label/76919a456a23b9779368d1198f406e7.seg 03001627
03001627/points/c12da8acb2c7973597e755dddca14449.pts 03001627/expert_verified/points_label/c12da8acb2c7973597e755dddca14449.seg 03001627
02958343/points/a5dcd1196a1ffa9739f20966eb25504f.pts 02958343/expert_verified/points_label/a5dcd1196a1ffa9739f20966eb25504f.seg 02958343
02691156/points/1deb997079e0b3cd6c1cd53dbc9f7b8e.pts 02691156/expert_verified/points_label/1deb997079e0b3cd6c1cd53dbc9f7b8e.seg 02691156
03636649/points/afb7cc3bbc3595a4e9b3dff83c7ff715.pts 03636649/expert_verified/points_label/afb7cc3bbc3595a4e9b3dff83c7ff715.seg 03636649
03636649/points/b4aee889d5e2a826f6747912091f1965.pts 03636649/expert_verified/points_label/b4aee889d5e2a826f6747912091f1965.seg 03636649
03636649/points/ea71ba1d8d8c8e5888a1de3dc61bfeef.pts 03636649/expert_verified/points_label/ea71ba1d8d8c8e5888a1de3dc61bfeef.seg 03636649
02958343/points/b0c2225ab347e28f1a48cf85d161a723.pts 02958343/expert_verified/points_label/b0c2225ab347e28f1a48cf85d161a723.seg 02958343
03001627/points/1ab8a3b55c14a7b27eaeab1f0c9120b7.pts 03001627/expert_verified/points_label/1ab8a3b55c14a7b27eaeab1f0c9120b7.seg 03001627
03261776/points/c6d19db35f69bae7b6d9c2cee7f2f72b.pts 03261776/expert_verified/points_label/c6d19db35f69bae7b6d9c2cee7f2f72b.seg 03261776
03001627/points/6d6e634ff34bd350c511e6b9b3b344f3.pts 03001627/expert_verified/points_label/6d6e634ff34bd350c511e6b9b3b344f3.seg 03001627
02691156/points/ce682d7a2bbf77b6fc4b92d3d335214a.pts 02691156/expert_verified/points_label/ce682d7a2bbf77b6fc4b92d3d335214a.seg 02691156
03261776/points/943048e64cc2bc980a070963925e308.pts 03261776/expert_verified/points_label/943048e64cc2bc980a070963925e308.seg 03261776
03642806/points/5a63c5f29f0bc0eb12d8efb2f101da03.pts 03642806/expert_verified/points_label/5a63c5f29f0bc0eb12d8efb2f101da03.seg 03642806
04379243/points/19678fdb9bc926505e4b35ff1ea95f37.pts 04379243/expert_verified/points_label/19678fdb9bc926505e4b35ff1ea95f37.seg 04379243
02958343/points/52f2a2472411fe2e6b418c7d9fedcaa9.pts 02958343/expert_verified/points_label/52f2a2472411fe2e6b418c7d9fedcaa9.seg 02958343
03001627/points/1ee92a9d78cccbda98d2e7dbe701ca48.pts 03001627/expert_verified/points_label/1ee92a9d78cccbda98d2e7dbe701ca48.seg 03001627
03001627/points/795f38ce5d8519938077cafed2bb8242.pts 03001627/expert_verified/points_label/795f38ce5d8519938077cafed2bb8242.seg 03001627
03001627/points/5e5121cc58c4fea78ce66f12ba927a2b.pts 03001627/expert_verified/points_label/5e5121cc58c4fea78ce66f12ba927a2b.seg 03001627
03001627/points/b998016472e9dd7a9b9f2eb77f5e247e.pts 03001627/expert_verified/points_label/b998016472e9dd7a9b9f2eb77f5e247e.seg 03001627
04379243/points/30b506e5e1fc282afdfcfddf24fb29ec.pts 04379243/expert_verified/points_label/30b506e5e1fc282afdfcfddf24fb29ec.seg 04379243
03624134/points/bcd7ed830358dbd6d58ea69ee1ced10e.pts 03624134/expert_verified/points_label/bcd7ed830358dbd6d58ea69ee1ced10e.seg 03624134
03001627/points/40d202afdcc49c6d35836c728d324152.pts 03001627/expert_verified/points_label/40d202afdcc49c6d35836c728d324152.seg 03001627
03467517/points/fdb74c27462dfd837c481698bd5233b4.pts 03467517/expert_verified/points_label/fdb74c27462dfd837c481698bd5233b4.seg 03467517
02691156/points/dc7c5d12854b9467b96212c8f6cd06e.pts 02691156/expert_verified/points_label/dc7c5d12854b9467b96212c8f6cd06e.seg 02691156
02691156/points/48e9c61de4db838d84b83051fa0ae5d2.pts 02691156/expert_verified/points_label/48e9c61de4db838d84b83051fa0ae5d2.seg 02691156
04379243/points/d187561a6b0cbd0acaed5ce7390f30b7.pts 04379243/expert_verified/points_label/d187561a6b0cbd0acaed5ce7390f30b7.seg 04379243
04379243/points/ae9e04d050f5cba1492d9da2668ec34c.pts 04379243/expert_verified/points_label/ae9e04d050f5cba1492d9da2668ec34c.seg 04379243
04379243/points/72c884f3b9b9119966f379f51753f72b.pts 04379243/expert_verified/points_label/72c884f3b9b9119966f379f51753f72b.seg 04379243
02691156/points/917694a71164f2148e8405d6c51a908.pts 02691156/expert_verified/points_label/917694a71164f2148e8405d6c51a908.seg 02691156
03001627/points/a2441f03fed7c13def31f91fe6afc8fa.pts 03001627/expert_verified/points_label/a2441f03fed7c13def31f91fe6afc8fa.seg 03001627
03001627/points/49c955a80749d2e1a5ffdf44ff86b795.pts 03001627/expert_verified/points_label/49c955a80749d2e1a5ffdf44ff86b795.seg 03001627
03636649/points/c43c89d862e10552b24ecc319936dfe2.pts 03636649/expert_verified/points_label/c43c89d862e10552b24ecc319936dfe2.seg 03636649
03636649/points/e5ff9311bee487f5ca4aaad7dc0e3a16.pts 03636649/expert_verified/points_label/e5ff9311bee487f5ca4aaad7dc0e3a16.seg 03636649
02958343/points/ba0ac1d1e25d3fad63f2c3a55558a78f.pts 02958343/expert_verified/points_label/ba0ac1d1e25d3fad63f2c3a55558a78f.seg 02958343
04379243/points/2f58b1ca8634a6b48b9b51ae4415d5aa.pts 04379243/expert_verified/points_label/2f58b1ca8634a6b48b9b51ae4415d5aa.seg 04379243
03001627/points/c585ee093bfd52af6512b7b24f3d84.pts 03001627/expert_verified/points_label/c585ee093bfd52af6512b7b24f3d84.seg 03001627
03001627/points/46f6a6e0f239282fc8687ff9b0b4e4ac.pts 03001627/expert_verified/points_label/46f6a6e0f239282fc8687ff9b0b4e4ac.seg 03001627
03642806/points/f72dc1ffeae0168aadcfd37206a0d18b.pts 03642806/expert_verified/points_label/f72dc1ffeae0168aadcfd37206a0d18b.seg 03642806
03948459/points/1e83ef6ed5d0b78b7efb854782e23566.pts 03948459/expert_verified/points_label/1e83ef6ed5d0b78b7efb854782e23566.seg 03948459
03001627/points/95e5f6e550761aefe65b629e4a22f51e.pts 03001627/expert_verified/points_label/95e5f6e550761aefe65b629e4a22f51e.seg 03001627
03001627/points/b38d05caee69c7ac8fc6229eb64e56a.pts 03001627/expert_verified/points_label/b38d05caee69c7ac8fc6229eb64e56a.seg 03001627
02691156/points/4ff50b9f815c58acca8607f540cc62ba.pts 02691156/expert_verified/points_label/4ff50b9f815c58acca8607f540cc62ba.seg 02691156
03636649/points/78a11c0b8e964c9b41657e31b569b105.pts 03636649/expert_verified/points_label/78a11c0b8e964c9b41657e31b569b105.seg 03636649
02958343/points/b1f75a8e8b9e921a8a6cf8c6b92417f2.pts 02958343/expert_verified/points_label/b1f75a8e8b9e921a8a6cf8c6b92417f2.seg 02958343
02958343/points/a836fc66c01eccca58c27e607f6e2d4c.pts 02958343/expert_verified/points_label/a836fc66c01eccca58c27e607f6e2d4c.seg 02958343
02691156/points/fac4af109beb0108b4f192eea1889928.pts 02691156/expert_verified/points_label/fac4af109beb0108b4f192eea1889928.seg 02691156
03467517/points/b9c10bf6fc2095f93f0194265a9746c.pts 03467517/expert_verified/points_label/b9c10bf6fc2095f93f0194265a9746c.seg 03467517
02691156/points/b976a48c015d6ced5e9e2656aff7dd5b.pts 02691156/expert_verified/points_label/b976a48c015d6ced5e9e2656aff7dd5b.seg 02691156
04379243/points/889f48aa85accd2ee73947fdf756a329.pts 04379243/expert_verified/points_label/889f48aa85accd2ee73947fdf756a329.seg 04379243
02691156/points/b6d61068ef2bf2d46059aeb39e538eb2.pts 02691156/expert_verified/points_label/b6d61068ef2bf2d46059aeb39e538eb2.seg 02691156
04379243/points/d94de64641651a2079b3e1be3524f72f.pts 04379243/expert_verified/points_label/d94de64641651a2079b3e1be3524f72f.seg 04379243
03001627/points/117bd6da01905949a81116f5456ee312.pts 03001627/expert_verified/points_label/117bd6da01905949a81116f5456ee312.seg 03001627
03636649/points/845542d0f578a9db1ec48bc3c478566d.pts 03636649/expert_verified/points_label/845542d0f578a9db1ec48bc3c478566d.seg 03636649
04379243/points/9391dcc782fa7f6bfdad344760a9dafd.pts 04379243/expert_verified/points_label/9391dcc782fa7f6bfdad344760a9dafd.seg 04379243
04379243/points/fe99a1127734f7852b70eac6546e93fd.pts 04379243/expert_verified/points_label/fe99a1127734f7852b70eac6546e93fd.seg 04379243
03001627/points/4e358c2dc0513971f98c0761af40e04.pts 03001627/expert_verified/points_label/4e358c2dc0513971f98c0761af40e04.seg 03001627
03636649/points/53afad2e573b26b141657e31b569b105.pts 03636649/expert_verified/points_label/53afad2e573b26b141657e31b569b105.seg 03636649
04379243/points/3e51742cb382aa1f79b3e1be3524f72f.pts 04379243/expert_verified/points_label/3e51742cb382aa1f79b3e1be3524f72f.seg 04379243
02958343/points/4f17af1ca7ae689d409b2c4484d833cc.pts 02958343/expert_verified/points_label/4f17af1ca7ae689d409b2c4484d833cc.seg 02958343
03467517/points/c739664436ac5237aa0c867d5b070a5d.pts 03467517/expert_verified/points_label/c739664436ac5237aa0c867d5b070a5d.seg 03467517
03797390/points/61c10dccfa8e508e2d66cbf6a91063.pts 03797390/expert_verified/points_label/61c10dccfa8e508e2d66cbf6a91063.seg 03797390
03467517/points/aa86d20d03b2303593f0194265a9746c.pts 03467517/expert_verified/points_label/aa86d20d03b2303593f0194265a9746c.seg 03467517
04379243/points/2f98d5e721e84debaa8081a7009091db.pts 04379243/expert_verified/points_label/2f98d5e721e84debaa8081a7009091db.seg 04379243
04379243/points/2a0f853dadd841f96f1e07a56c129dfc.pts 04379243/expert_verified/points_label/2a0f853dadd841f96f1e07a56c129dfc.seg 04379243
03001627/points/8031478c3fe31ddcc337647acafe65f0.pts 03001627/expert_verified/points_label/8031478c3fe31ddcc337647acafe65f0.seg 03001627
03636649/points/a53112591be182b9d93768e7b9b1eabf.pts 03636649/expert_verified/points_label/a53112591be182b9d93768e7b9b1eabf.seg 03636649
03001627/points/5bc916f8b9d0a7c6b40f0ac0fb9a650d.pts 03001627/expert_verified/points_label/5bc916f8b9d0a7c6b40f0ac0fb9a650d.seg 03001627
02691156/points/f2d4b8440d4bde5330afbcb38d77d0c3.pts 02691156/expert_verified/points_label/f2d4b8440d4bde5330afbcb38d77d0c3.seg 02691156
03001627/points/e4274fc2b9e4a5511882515d09f3979e.pts 03001627/expert_verified/points_label/e4274fc2b9e4a5511882515d09f3979e.seg 03001627
03001627/points/9ab18a33335373b2659dda512294c744.pts 03001627/expert_verified/points_label/9ab18a33335373b2659dda512294c744.seg 03001627
04379243/points/32ea6609eb659a2cec3367bccf60e518.pts 04379243/expert_verified/points_label/32ea6609eb659a2cec3367bccf60e518.seg 04379243
04379243/points/759cb93134fd5efde76bc197b3a3ffc0.pts 04379243/expert_verified/points_label/759cb93134fd5efde76bc197b3a3ffc0.seg 04379243
03001627/points/b8b5e172ee58899df2d9e72ba502035.pts 03001627/expert_verified/points_label/b8b5e172ee58899df2d9e72ba502035.seg 03001627
03001627/points/1886b3e3f3d4af3ace522e6dda26fb51.pts 03001627/expert_verified/points_label/1886b3e3f3d4af3ace522e6dda26fb51.seg 03001627
03948459/points/3f5f657bec9a21814ce6ac98dc4781fe.pts 03948459/expert_verified/points_label/3f5f657bec9a21814ce6ac98dc4781fe.seg 03948459
04379243/points/5adf5a7173e588ad76e9713f57a5fcb6.pts 04379243/expert_verified/points_label/5adf5a7173e588ad76e9713f57a5fcb6.seg 04379243
03001627/points/f33b6f791e9d64387d01b77e04a0bc7b.pts 03001627/expert_verified/points_label/f33b6f791e9d64387d01b77e04a0bc7b.seg 03001627
04379243/points/4e928377ae98ed8d99e8bf807e902261.pts 04379243/expert_verified/points_label/4e928377ae98ed8d99e8bf807e902261.seg 04379243
03001627/points/d7867d215f52107ba5e8cf3aa1686d66.pts 03001627/expert_verified/points_label/d7867d215f52107ba5e8cf3aa1686d66.seg 03001627
02691156/points/bddc2c1a4fae008947a1dbf5fd48a4dd.pts 02691156/expert_verified/points_label/bddc2c1a4fae008947a1dbf5fd48a4dd.seg 02691156
02958343/points/bafacc7f28509d4157abc6fa0d632bc7.pts 02958343/expert_verified/points_label/bafacc7f28509d4157abc6fa0d632bc7.seg 02958343
02691156/points/a14b262838529c2c81e1d9f6b27f1a92.pts 02691156/expert_verified/points_label/a14b262838529c2c81e1d9f6b27f1a92.seg 02691156
03001627/points/38afa26a419ea3abed040525648fc6d7.pts 03001627/expert_verified/points_label/38afa26a419ea3abed040525648fc6d7.seg 03001627
04379243/points/79f63a1564928af071a782a4379556c7.pts 04379243/expert_verified/points_label/79f63a1564928af071a782a4379556c7.seg 04379243
04379243/points/cbd1cd9b5423f890beedb4c8fd29e2d1.pts 04379243/expert_verified/points_label/cbd1cd9b5423f890beedb4c8fd29e2d1.seg 04379243
02691156/points/d74767519393a937f73e5bc170b7e2be.pts 02691156/expert_verified/points_label/d74767519393a937f73e5bc170b7e2be.seg 02691156
03001627/points/9a82269e56737217e16571f1d370cad9.pts 03001627/expert_verified/points_label/9a82269e56737217e16571f1d370cad9.seg 03001627
03001627/points/6e1e73e14637a28da1c367d7a459a9b7.pts 03001627/expert_verified/points_label/6e1e73e14637a28da1c367d7a459a9b7.seg 03001627
03797390/points/eecb13f61a93b4048f58d8b19de93f99.pts 03797390/expert_verified/points_label/eecb13f61a93b4048f58d8b19de93f99.seg 03797390
03001627/points/4f7523a3d276bfae4b3c42e318f3affc.pts 03001627/expert_verified/points_label/4f7523a3d276bfae4b3c42e318f3affc.seg 03001627
03624134/points/f19fe19693937db1cb03b57fca000b1f.pts 03624134/expert_verified/points_label/f19fe19693937db1cb03b57fca000b1f.seg 03624134
02958343/points/c3858a8b73dcb137e3bdba9430565083.pts 02958343/expert_verified/points_label/c3858a8b73dcb137e3bdba9430565083.seg 02958343
04379243/points/3ce930bb150aef8a69fb38085fbc320c.pts 04379243/expert_verified/points_label/3ce930bb150aef8a69fb38085fbc320c.seg 04379243
04379243/points/75e3cbf4b1ef0df971a782a4379556c7.pts 04379243/expert_verified/points_label/75e3cbf4b1ef0df971a782a4379556c7.seg 04379243
04379243/points/5040f8f3e2293db448e116352760c52d.pts 04379243/expert_verified/points_label/5040f8f3e2293db448e116352760c52d.seg 04379243
04379243/points/edaf24be15738ea2c5d1c45cadcaa3eb.pts 04379243/expert_verified/points_label/edaf24be15738ea2c5d1c45cadcaa3eb.seg 04379243
04379243/points/6fb52c296531dc17beedb4c8fd29e2d1.pts 04379243/expert_verified/points_label/6fb52c296531dc17beedb4c8fd29e2d1.seg 04379243
04379243/points/e777df6ffb40e3a1853d412328e7e7a6.pts 04379243/expert_verified/points_label/e777df6ffb40e3a1853d412328e7e7a6.seg 04379243
03001627/points/9c103621101bcf9919fb4103277a6b93.pts 03001627/expert_verified/points_label/9c103621101bcf9919fb4103277a6b93.seg 03001627
03001627/points/5d20adaf6d8f89fa2f1c10544d7d6f.pts 03001627/expert_verified/points_label/5d20adaf6d8f89fa2f1c10544d7d6f.seg 03001627
02691156/points/b80bd34ab330babbc8727b27ee96a4b7.pts 02691156/expert_verified/points_label/b80bd34ab330babbc8727b27ee96a4b7.seg 02691156
04379243/points/50d898f6d1c05cee2d99129afd32edf4.pts 04379243/expert_verified/points_label/50d898f6d1c05cee2d99129afd32edf4.seg 04379243
04379243/points/c0c836c630cdb4bb664b3b9b23ddfcbc.pts 04379243/expert_verified/points_label/c0c836c630cdb4bb664b3b9b23ddfcbc.seg 04379243
03790512/points/a1553e0bb7897a7ace0bf41e5f45753d.pts 03790512/expert_verified/points_label/a1553e0bb7897a7ace0bf41e5f45753d.seg 03790512
03467517/points/7701180906a0aa156a7ae841f1f88f87.pts 03467517/expert_verified/points_label/7701180906a0aa156a7ae841f1f88f87.seg 03467517
03467517/points/3ef569c13f4ab5f83ac61a2f8346a8f.pts 03467517/expert_verified/points_label/3ef569c13f4ab5f83ac61a2f8346a8f.seg 03467517
03636649/points/3834d7f376879c03eca29403b7226aa1.pts 03636649/expert_verified/points_label/3834d7f376879c03eca29403b7226aa1.seg 03636649
02958343/points/34ab29cea66952f16f48edd113a40fce.pts 02958343/expert_verified/points_label/34ab29cea66952f16f48edd113a40fce.seg 02958343
02958343/points/e24f388736f4e6fd2cdd250493632937.pts 02958343/expert_verified/points_label/e24f388736f4e6fd2cdd250493632937.seg 02958343
03001627/points/3ae022522800685c610195e4fb10d1de.pts 03001627/expert_verified/points_label/3ae022522800685c610195e4fb10d1de.seg 03001627
02691156/points/49660fd24e5c2fbab87697d3904b168b.pts 02691156/expert_verified/points_label/49660fd24e5c2fbab87697d3904b168b.seg 02691156
03642806/points/2d5d4d79cd464298566636e42679cc7f.pts 03642806/expert_verified/points_label/2d5d4d79cd464298566636e42679cc7f.seg 03642806
04379243/points/7988dedacce42552ab610b0c94236463.pts 04379243/expert_verified/points_label/7988dedacce42552ab610b0c94236463.seg 04379243
04379243/points/91ed62f2b3fd5919f12d7184a2ad3430.pts 04379243/expert_verified/points_label/91ed62f2b3fd5919f12d7184a2ad3430.seg 04379243
03001627/points/a5898fefb1733333a82b0d8d157287f5.pts 03001627/expert_verified/points_label/a5898fefb1733333a82b0d8d157287f5.seg 03001627
04379243/points/b4ef1de99422b08768661782af60b711.pts 04379243/expert_verified/points_label/b4ef1de99422b08768661782af60b711.seg 04379243
03001627/points/df2b7e697ab6ca0f155d75bbf62b80.pts 03001627/expert_verified/points_label/df2b7e697ab6ca0f155d75bbf62b80.seg 03001627
03467517/points/408a8e1b51266b9ccc34b900bb2492e.pts 03467517/expert_verified/points_label/408a8e1b51266b9ccc34b900bb2492e.seg 03467517
03001627/points/597f2b2153af0c544aabcf2a7cb640f9.pts 03001627/expert_verified/points_label/597f2b2153af0c544aabcf2a7cb640f9.seg 03001627
03001627/points/6870fbd4a7b733b0674f1c30a8cad95a.pts 03001627/expert_verified/points_label/6870fbd4a7b733b0674f1c30a8cad95a.seg 03001627
03001627/points/e35d7d19dcdc9e5c30e06a011e63236a.pts 03001627/expert_verified/points_label/e35d7d19dcdc9e5c30e06a011e63236a.seg 03001627
04225987/points/58ade10f7f87edc6e860048d7ced02e3.pts 04225987/expert_verified/points_label/58ade10f7f87edc6e860048d7ced02e3.seg 04225987
04379243/points/39cf5ae2b497715a84253b2030fab070.pts 04379243/expert_verified/points_label/39cf5ae2b497715a84253b2030fab070.seg 04379243
04379243/points/ab7b0db92f96381f8cbb8bac2032149c.pts 04379243/expert_verified/points_label/ab7b0db92f96381f8cbb8bac2032149c.seg 04379243
03001627/points/b117b01ab380362db8134b0fbf68257d.pts 03001627/expert_verified/points_label/b117b01ab380362db8134b0fbf68257d.seg 03001627
03467517/points/913f3c90f5b78256e98e318d424a4bb9.pts 03467517/expert_verified/points_label/913f3c90f5b78256e98e318d424a4bb9.seg 03467517
04379243/points/831985fb385a5b2a9ae2d75b4fc35b7.pts 04379243/expert_verified/points_label/831985fb385a5b2a9ae2d75b4fc35b7.seg 04379243
03467517/points/482b8b9a225b6ca1d57700c05b1862d8.pts 03467517/expert_verified/points_label/482b8b9a225b6ca1d57700c05b1862d8.seg 03467517
03001627/points/93a6876247c7a015d84b8ba651dfb8ac.pts 03001627/expert_verified/points_label/93a6876247c7a015d84b8ba651dfb8ac.seg 03001627
04379243/points/a78273aa10b2dfb0bc8d334f99e7f52.pts 04379243/expert_verified/points_label/a78273aa10b2dfb0bc8d334f99e7f52.seg 04379243
04379243/points/3c686ac317c496f9a71c812e027f94d9.pts 04379243/expert_verified/points_label/3c686ac317c496f9a71c812e027f94d9.seg 04379243
02691156/points/50755e616df58fe566cf1b4a8fc3914e.pts 02691156/expert_verified/points_label/50755e616df58fe566cf1b4a8fc3914e.seg 02691156
03001627/points/8cedc8e684d60ff42a06d8c81262ef96.pts 03001627/expert_verified/points_label/8cedc8e684d60ff42a06d8c81262ef96.seg 03001627
04379243/points/f74c321042dbc8e684d78f017ff73fd6.pts 04379243/expert_verified/points_label/f74c321042dbc8e684d78f017ff73fd6.seg 04379243
02958343/points/5130947e5f18e73a8321b7d65a99d2a.pts 02958343/expert_verified/points_label/5130947e5f18e73a8321b7d65a99d2a.seg 02958343
03261776/points/f5d210ff14ca9d29b6d9c2cee7f2f72b.pts 03261776/expert_verified/points_label/f5d210ff14ca9d29b6d9c2cee7f2f72b.seg 03261776
03001627/points/d36de0f850783d8fd6b3090036b71698.pts 03001627/expert_verified/points_label/d36de0f850783d8fd6b3090036b71698.seg 03001627
03001627/points/6897c2665267cca39eea64ae4d2b4158.pts 03001627/expert_verified/points_label/6897c2665267cca39eea64ae4d2b4158.seg 03001627
03001627/points/6e98c5d61e008b4c2871cc0b3cc1a485.pts 03001627/expert_verified/points_label/6e98c5d61e008b4c2871cc0b3cc1a485.seg 03001627
02958343/points/92f697d036addb55ed576c2966428f.pts 02958343/expert_verified/points_label/92f697d036addb55ed576c2966428f.seg 02958343
04379243/points/f3fd419f725aa894ba5342d638d0c267.pts 04379243/expert_verified/points_label/f3fd419f725aa894ba5342d638d0c267.seg 04379243
04379243/points/62eff79cf2e75bc2765ee729adbdf968.pts 04379243/expert_verified/points_label/62eff79cf2e75bc2765ee729adbdf968.seg 04379243
03001627/points/98a1f8651c962402492d9da2668ec34c.pts 03001627/expert_verified/points_label/98a1f8651c962402492d9da2668ec34c.seg 03001627
03636649/points/d90639e69c82f864eb2d9895648d1206.pts 03636649/expert_verified/points_label/d90639e69c82f864eb2d9895648d1206.seg 03636649
02954340/points/a1494210f6774b87b3e0e60b857dde8f.pts 02954340/expert_verified/points_label/a1494210f6774b87b3e0e60b857dde8f.seg 02954340
03467517/points/d528407fe43b5df193f0194265a9746c.pts 03467517/expert_verified/points_label/d528407fe43b5df193f0194265a9746c.seg 03467517
03636649/points/776e4b38023091002cd2160e449d45ae.pts 03636649/expert_verified/points_label/776e4b38023091002cd2160e449d45ae.seg 03636649
04379243/points/91df49ec00f2c5ce73f1ca2ca101a20d.pts 04379243/expert_verified/points_label/91df49ec00f2c5ce73f1ca2ca101a20d.seg 04379243
04379243/points/47f25d5b367326ceaaf15b62af6b513f.pts 04379243/expert_verified/points_label/47f25d5b367326ceaaf15b62af6b513f.seg 04379243
04379243/points/f5d6579b3a1f5a879d2be74cfb51ade1.pts 04379243/expert_verified/points_label/f5d6579b3a1f5a879d2be74cfb51ade1.seg 04379243
02691156/points/f6ea6663b48bf78261f1ef59130c405d.pts 02691156/expert_verified/points_label/f6ea6663b48bf78261f1ef59130c405d.seg 02691156
03001627/points/63da17eda9d415b5319c5e90e9cc9126.pts 03001627/expert_verified/points_label/63da17eda9d415b5319c5e90e9cc9126.seg 03001627
02691156/points/9fb60716f0f5a2b84408eb298433d643.pts 02691156/expert_verified/points_label/9fb60716f0f5a2b84408eb298433d643.seg 02691156
02773838/points/5161d9adede671d6edc32c5c9ec9f827.pts 02773838/expert_verified/points_label/5161d9adede671d6edc32c5c9ec9f827.seg 02773838
04379243/points/696beb1883be838cc955e5ed03ef3a2f.pts 04379243/expert_verified/points_label/696beb1883be838cc955e5ed03ef3a2f.seg 04379243
03001627/points/bc184c3cbe3349b19fb4103277a6b93.pts 03001627/expert_verified/points_label/bc184c3cbe3349b19fb4103277a6b93.seg 03001627
03642806/points/28fbfd8b8c9c6f16e1e44e2fc05361d9.pts 03642806/expert_verified/points_label/28fbfd8b8c9c6f16e1e44e2fc05361d9.seg 03642806
04379243/points/506e4e67efe1794c1dacbc3d67b5a11a.pts 04379243/expert_verified/points_label/506e4e67efe1794c1dacbc3d67b5a11a.seg 04379243
02691156/points/a48676cfe44fd9bee40acb87a6be88b3.pts 02691156/expert_verified/points_label/a48676cfe44fd9bee40acb87a6be88b3.seg 02691156
04379243/points/9e5926bfdc7f01749e65a3d2929a9516.pts 04379243/expert_verified/points_label/9e5926bfdc7f01749e65a3d2929a9516.seg 04379243
04379243/points/dc47d49db6ac670635d498476a30ff0e.pts 04379243/expert_verified/points_label/dc47d49db6ac670635d498476a30ff0e.seg 04379243
04379243/points/33c6e3b21a67b750e78d7b497732dce1.pts 04379243/expert_verified/points_label/33c6e3b21a67b750e78d7b497732dce1.seg 04379243
04379243/points/27295a6f585b7817febad4f49b26ec52.pts 04379243/expert_verified/points_label/27295a6f585b7817febad4f49b26ec52.seg 04379243
03624134/points/6f8b660661269406504c6b6d62466c67.pts 03624134/expert_verified/points_label/6f8b660661269406504c6b6d62466c67.seg 03624134
03642806/points/dbc61cbed5f7f2b33c1abb78f1519c49.pts 03642806/expert_verified/points_label/dbc61cbed5f7f2b33c1abb78f1519c49.seg 03642806
03001627/points/374bec02e71fe06528b4c5ec471dc963.pts 03001627/expert_verified/points_label/374bec02e71fe06528b4c5ec471dc963.seg 03001627
03001627/points/b41aaea5754adae0444b41d6d7f557fa.pts 03001627/expert_verified/points_label/b41aaea5754adae0444b41d6d7f557fa.seg 03001627
03001627/points/7f4f73ad1b3f882ba14472becb07b261.pts 03001627/expert_verified/points_label/7f4f73ad1b3f882ba14472becb07b261.seg 03001627
03001627/points/b80122c3a0543a7b7eaeab1f0c9120b7.pts 03001627/expert_verified/points_label/b80122c3a0543a7b7eaeab1f0c9120b7.seg 03001627
04379243/points/2e4fbab46e264616d93768e7b9b1eabf.pts 04379243/expert_verified/points_label/2e4fbab46e264616d93768e7b9b1eabf.seg 04379243
03001627/points/4a12589099b05c51e13b3410f3683610.pts 03001627/expert_verified/points_label/4a12589099b05c51e13b3410f3683610.seg 03001627
03001627/points/bc523df998d94c7223ac0bd64c9cb255.pts 03001627/expert_verified/points_label/bc523df998d94c7223ac0bd64c9cb255.seg 03001627
02691156/points/218caa58819e10d1fe40308d822f996c.pts 02691156/expert_verified/points_label/218caa58819e10d1fe40308d822f996c.seg 02691156
04379243/points/a5e951c9d7a9a93f8cbb8bac2032149c.pts 04379243/expert_verified/points_label/a5e951c9d7a9a93f8cbb8bac2032149c.seg 04379243
03636649/points/f228f6cd86162beb659dda512294c744.pts 03636649/expert_verified/points_label/f228f6cd86162beb659dda512294c744.seg 03636649
03467517/points/648a820e550bdfd093f0194265a9746c.pts 03467517/expert_verified/points_label/648a820e550bdfd093f0194265a9746c.seg 03467517
03624134/points/8f61777bf6b57fedc13545c5b1a2e607.pts 03624134/expert_verified/points_label/8f61777bf6b57fedc13545c5b1a2e607.seg 03624134
03001627/points/bb9efb4912a018b3c329e2758ab09ecb.pts 03001627/expert_verified/points_label/bb9efb4912a018b3c329e2758ab09ecb.seg 03001627
03001627/points/fdac1f9c0b030841c8687ff9b0b4e4ac.pts 03001627/expert_verified/points_label/fdac1f9c0b030841c8687ff9b0b4e4ac.seg 03001627
02691156/points/8ac8c21b63ff535fca8607f540cc62ba.pts 02691156/expert_verified/points_label/8ac8c21b63ff535fca8607f540cc62ba.seg 02691156
03467517/points/4e4d180e78d8b52a93f0194265a9746c.pts 03467517/expert_verified/points_label/4e4d180e78d8b52a93f0194265a9746c.seg 03467517
03636649/points/7bc1b202ebf000625949e084b65603cf.pts 03636649/expert_verified/points_label/7bc1b202ebf000625949e084b65603cf.seg 03636649
03001627/points/3c8362c1e57c30d7e6c5cd45aa112726.pts 03001627/expert_verified/points_label/3c8362c1e57c30d7e6c5cd45aa112726.seg 03001627
03001627/points/5510d5af1ab5714b3c42e318f3affc.pts 03001627/expert_verified/points_label/5510d5af1ab5714b3c42e318f3affc.seg 03001627
04379243/points/4d393b562df7cfad9a16b095d67f7209.pts 04379243/expert_verified/points_label/4d393b562df7cfad9a16b095d67f7209.seg 04379243
03797390/points/e984fd7e97c2be347eaeab1f0c9120b7.pts 03797390/expert_verified/points_label/e984fd7e97c2be347eaeab1f0c9120b7.seg 03797390
03001627/points/483d22dbbee32ee54e5c7d89bdfc49a3.pts 03001627/expert_verified/points_label/483d22dbbee32ee54e5c7d89bdfc49a3.seg 03001627
02691156/points/a5cd14be786fc8175e9e2656aff7dd5b.pts 02691156/expert_verified/points_label/a5cd14be786fc8175e9e2656aff7dd5b.seg 02691156
03636649/points/d4bbd93c0d85e77d7934a0d24a61231.pts 03636649/expert_verified/points_label/d4bbd93c0d85e77d7934a0d24a61231.seg 03636649
03467517/points/7027bc171baae1d663e148e250c0340d.pts 03467517/expert_verified/points_label/7027bc171baae1d663e148e250c0340d.seg 03467517
03636649/points/1a44dd6ee873d443da13974b3533fb59.pts 03636649/expert_verified/points_label/1a44dd6ee873d443da13974b3533fb59.seg 03636649
04379243/points/2e3037a285fd8b5c1be2a853ec4f9e8.pts 04379243/expert_verified/points_label/2e3037a285fd8b5c1be2a853ec4f9e8.seg 04379243
04379243/points/e3b585b15506fa7113f96345312df593.pts 04379243/expert_verified/points_label/e3b585b15506fa7113f96345312df593.seg 04379243
02958343/points/ee1d28a50a2b71e129348d14ca881f7d.pts 02958343/expert_verified/points_label/ee1d28a50a2b71e129348d14ca881f7d.seg 02958343
03001627/points/22af872ac796ed26ff8d7c1096fae070.pts 03001627/expert_verified/points_label/22af872ac796ed26ff8d7c1096fae070.seg 03001627
03642806/points/9b4ab67eb448c49c11ced4a54f2e6229.pts 03642806/expert_verified/points_label/9b4ab67eb448c49c11ced4a54f2e6229.seg 03642806
03624134/points/1640911b9dc0ef0da95c6095f89cd899.pts 03624134/expert_verified/points_label/1640911b9dc0ef0da95c6095f89cd899.seg 03624134
03001627/points/f6810de4042cc5ce57bd4bc6eae9b341.pts 03001627/expert_verified/points_label/f6810de4042cc5ce57bd4bc6eae9b341.seg 03001627
03001627/points/c46eb7460be602b6bf80985a99195eb8.pts 03001627/expert_verified/points_label/c46eb7460be602b6bf80985a99195eb8.seg 03001627
03624134/points/debbbf239d59d8724662dc124dd336ed.pts 03624134/expert_verified/points_label/debbbf239d59d8724662dc124dd336ed.seg 03624134
04379243/points/5b51e63726f21bb6a75d03186a0409e2.pts 04379243/expert_verified/points_label/5b51e63726f21bb6a75d03186a0409e2.seg 04379243
02691156/points/b59a7cab8e95f6eaf3a7414a84b5637.pts 02691156/expert_verified/points_label/b59a7cab8e95f6eaf3a7414a84b5637.seg 02691156
03001627/points/52c32b187590e8f3bba5aaac798c64af.pts 03001627/expert_verified/points_label/52c32b187590e8f3bba5aaac798c64af.seg 03001627
03001627/points/1c173d970e21e9a8be95ff480950e9ef.pts 03001627/expert_verified/points_label/1c173d970e21e9a8be95ff480950e9ef.seg 03001627
03624134/points/7238d0009faeacb5fd770de1635caa0.pts 03624134/expert_verified/points_label/7238d0009faeacb5fd770de1635caa0.seg 03624134
04379243/points/cc554812025dc498e7ed5b5b11f935c9.pts 04379243/expert_verified/points_label/cc554812025dc498e7ed5b5b11f935c9.seg 04379243
04379243/points/fff492e352c8cb336240c88cd4684446.pts 04379243/expert_verified/points_label/fff492e352c8cb336240c88cd4684446.seg 04379243
03636649/points/e0a2948797cc33b2e19a0cc107ada7cd.pts 03636649/expert_verified/points_label/e0a2948797cc33b2e19a0cc107ada7cd.seg 03636649
03636649/points/fe02f6594ed8b96ae85a3dc26b76b2ae.pts 03636649/expert_verified/points_label/fe02f6594ed8b96ae85a3dc26b76b2ae.seg 03636649
04379243/points/d4a7a1dc0f1a51986f15d61c214769af.pts 04379243/expert_verified/points_label/d4a7a1dc0f1a51986f15d61c214769af.seg 04379243
03624134/points/3dbda789bc59a5f99246ea0301684d80.pts 03624134/expert_verified/points_label/3dbda789bc59a5f99246ea0301684d80.seg 03624134
04379243/points/b82e068c2c18cd67b09f0ca9c143fdfd.pts 04379243/expert_verified/points_label/b82e068c2c18cd67b09f0ca9c143fdfd.seg 04379243
03001627/points/b360f2264526521f1dee989d1177ef4e.pts 03001627/expert_verified/points_label/b360f2264526521f1dee989d1177ef4e.seg 03001627
02691156/points/8ff8f3c845e7ae8443afdb9c81ff2967.pts 02691156/expert_verified/points_label/8ff8f3c845e7ae8443afdb9c81ff2967.seg 02691156
03001627/points/ea87765cf9dbe2fe55f46d55537192b6.pts 03001627/expert_verified/points_label/ea87765cf9dbe2fe55f46d55537192b6.seg 03001627
03001627/points/df23ca11080bb439676c272956dad3c2.pts 03001627/expert_verified/points_label/df23ca11080bb439676c272956dad3c2.seg 03001627
03790512/points/a3dfeae5bced3533b37378f3c85478b4.pts 03790512/expert_verified/points_label/a3dfeae5bced3533b37378f3c85478b4.seg 03790512
04379243/points/9af7a071bbd432baa5526f91aecc0c37.pts 04379243/expert_verified/points_label/9af7a071bbd432baa5526f91aecc0c37.seg 04379243
03001627/points/a8b5f5b6bf0cb2d6876b399a99a15c0f.pts 03001627/expert_verified/points_label/a8b5f5b6bf0cb2d6876b399a99a15c0f.seg 03001627
03001627/points/c7e590c0390e8d5debe67d9b32c3ddf8.pts 03001627/expert_verified/points_label/c7e590c0390e8d5debe67d9b32c3ddf8.seg 03001627
03790512/points/4f30742005b7c20e883158c0007ed9ba.pts 03790512/expert_verified/points_label/4f30742005b7c20e883158c0007ed9ba.seg 03790512
04379243/points/40b632472f8e69a7664b3b9b23ddfcbc.pts 04379243/expert_verified/points_label/40b632472f8e69a7664b3b9b23ddfcbc.seg 04379243
03467517/points/d71c17b4d1ffa131f10a27cbb87f3a5.pts 03467517/expert_verified/points_label/d71c17b4d1ffa131f10a27cbb87f3a5.seg 03467517
04379243/points/f563e9cd92a0dbe5a07b1c1d0ca9cf45.pts 04379243/expert_verified/points_label/f563e9cd92a0dbe5a07b1c1d0ca9cf45.seg 04379243
03797390/points/1a97f3c83016abca21d0de04f408950f.pts 03797390/expert_verified/points_label/1a97f3c83016abca21d0de04f408950f.seg 03797390
04379243/points/c3135e3b21b42e132449009b96f8a6ed.pts 04379243/expert_verified/points_label/c3135e3b21b42e132449009b96f8a6ed.seg 04379243
03636649/points/89b168160388c29da996f5a90dae9cac.pts 03636649/expert_verified/points_label/89b168160388c29da996f5a90dae9cac.seg 03636649
02958343/points/8bbbfdbec9251733ace5721ccacba16.pts 02958343/expert_verified/points_label/8bbbfdbec9251733ace5721ccacba16.seg 02958343
04379243/points/db5a895ae7358c97b66213207f46bee7.pts 04379243/expert_verified/points_label/db5a895ae7358c97b66213207f46bee7.seg 04379243
03001627/points/6a28919186eb55ecf69d0cf4fdc89b12.pts 03001627/expert_verified/points_label/6a28919186eb55ecf69d0cf4fdc89b12.seg 03001627
04379243/points/e7169243daef074dc82dc2efb3363de1.pts 04379243/expert_verified/points_label/e7169243daef074dc82dc2efb3363de1.seg 04379243
03467517/points/4ae5a491c3ffb473462c6cdd250c26bb.pts 03467517/expert_verified/points_label/4ae5a491c3ffb473462c6cdd250c26bb.seg 03467517
04379243/points/e1a8e9e2059f4792fbb8cbddab1c2002.pts 04379243/expert_verified/points_label/e1a8e9e2059f4792fbb8cbddab1c2002.seg 04379243
03467517/points/364f85832427992343820c03f9f59458.pts 03467517/expert_verified/points_label/364f85832427992343820c03f9f59458.seg 03467517
02958343/points/4822076e48b366371f0d59cde6139796.pts 02958343/expert_verified/points_label/4822076e48b366371f0d59cde6139796.seg 02958343
03636649/points/d34a10201a5448a253cf897b7fc1d12.pts 03636649/expert_verified/points_label/d34a10201a5448a253cf897b7fc1d12.seg 03636649
03467517/points/77095861248c816693f0194265a9746c.pts 03467517/expert_verified/points_label/77095861248c816693f0194265a9746c.seg 03467517
04379243/points/dacde6546ca2e07f66dc6ea1ac82d91f.pts 04379243/expert_verified/points_label/dacde6546ca2e07f66dc6ea1ac82d91f.seg 04379243
03636649/points/670ad2964ad5a98c9f1a71e46bbde97c.pts 03636649/expert_verified/points_label/670ad2964ad5a98c9f1a71e46bbde97c.seg 03636649
02691156/points/77c9fd0f0c6b0e9fca8607f540cc62ba.pts 02691156/expert_verified/points_label/77c9fd0f0c6b0e9fca8607f540cc62ba.seg 02691156
03001627/points/5fc6b04623ae6a9963ed57e35c972b4b.pts 03001627/expert_verified/points_label/5fc6b04623ae6a9963ed57e35c972b4b.seg 03001627
02958343/points/f18093ac0242d439f500cc506a763c18.pts 02958343/expert_verified/points_label/f18093ac0242d439f500cc506a763c18.seg 02958343
03001627/points/2fed64c67552aa689c1db271ad9472a7.pts 03001627/expert_verified/points_label/2fed64c67552aa689c1db271ad9472a7.seg 03001627
03001627/points/bf7e8e0dc4f4038cc2567be77cb7ab45.pts 03001627/expert_verified/points_label/bf7e8e0dc4f4038cc2567be77cb7ab45.seg 03001627
04379243/points/690e073a4000c7ae540e292bd26f307a.pts 04379243/expert_verified/points_label/690e073a4000c7ae540e292bd26f307a.seg 04379243
03467517/points/5fc56e6d220d775e381b7fbf79296afb.pts 03467517/expert_verified/points_label/5fc56e6d220d775e381b7fbf79296afb.seg 03467517
04379243/points/8af3fd230ea7ac6518101790733ed6b2.pts 04379243/expert_verified/points_label/8af3fd230ea7ac6518101790733ed6b2.seg 04379243
03636649/points/80436dff2a30721849655ac7c771b113.pts 03636649/expert_verified/points_label/80436dff2a30721849655ac7c771b113.seg 03636649
03790512/points/b767982d38b5171e429f1c522640e6f0.pts 03790512/expert_verified/points_label/b767982d38b5171e429f1c522640e6f0.seg 03790512
03001627/points/40e6fb27aeb9c9ab44f999802029a79a.pts 03001627/expert_verified/points_label/40e6fb27aeb9c9ab44f999802029a79a.seg 03001627
04379243/points/59e1afdec89de9442b70eac6546e93fd.pts 04379243/expert_verified/points_label/59e1afdec89de9442b70eac6546e93fd.seg 04379243
02691156/points/43d8125d940bb2ae850f318836ee7512.pts 02691156/expert_verified/points_label/43d8125d940bb2ae850f318836ee7512.seg 02691156
02691156/points/cbc9d6ae9d22fcc57f3efc94c2d31dc5.pts 02691156/expert_verified/points_label/cbc9d6ae9d22fcc57f3efc94c2d31dc5.seg 02691156
04379243/points/f585560965413925d706ecb3379aa341.pts 04379243/expert_verified/points_label/f585560965413925d706ecb3379aa341.seg 04379243
04379243/points/adee49b8f5251efeaade78cbbf8fad3b.pts 04379243/expert_verified/points_label/adee49b8f5251efeaade78cbbf8fad3b.seg 04379243
03261776/points/ccf84f2cbd3ebeb247ba1bc05b9a0f37.pts 03261776/expert_verified/points_label/ccf84f2cbd3ebeb247ba1bc05b9a0f37.seg 03261776
03001627/points/2343e2c4fa69f33a2ff834514c92e8fd.pts 03001627/expert_verified/points_label/2343e2c4fa69f33a2ff834514c92e8fd.seg 03001627
03636649/points/1d89da4ac1538ada9c949ae6274aa016.pts 03636649/expert_verified/points_label/1d89da4ac1538ada9c949ae6274aa016.seg 03636649
03001627/points/51e14c516e45ec3b18ed59365c9648a7.pts 03001627/expert_verified/points_label/51e14c516e45ec3b18ed59365c9648a7.seg 03001627
03001627/points/1e276a016b664e424d678187b8261d95.pts 03001627/expert_verified/points_label/1e276a016b664e424d678187b8261d95.seg 03001627
03636649/points/4deef34d95367b58c0d95250e682f6ee.pts 03636649/expert_verified/points_label/4deef34d95367b58c0d95250e682f6ee.seg 03636649
03001627/points/5d3eff6a1b9a119da011ccf7cbabf68e.pts 03001627/expert_verified/points_label/5d3eff6a1b9a119da011ccf7cbabf68e.seg 03001627
04379243/points/9afaf5ab87a889f67acae9ce58893de5.pts 04379243/expert_verified/points_label/9afaf5ab87a889f67acae9ce58893de5.seg 04379243
04379243/points/5431993203dfcf797ec12e029bc725db.pts 04379243/expert_verified/points_label/5431993203dfcf797ec12e029bc725db.seg 04379243
03001627/points/6a01eed3a575987211e48e4bcdc4a2a3.pts 03001627/expert_verified/points_label/6a01eed3a575987211e48e4bcdc4a2a3.seg 03001627
02958343/points/a8f2c3adc0671c15c64e95fc6a597455.pts 02958343/expert_verified/points_label/a8f2c3adc0671c15c64e95fc6a597455.seg 02958343
04379243/points/f60960ae4dc8e293c8ce22a41ea48e48.pts 04379243/expert_verified/points_label/f60960ae4dc8e293c8ce22a41ea48e48.seg 04379243
03624134/points/3a4f0118a57093cbf7c4ed45ce654123.pts 03624134/expert_verified/points_label/3a4f0118a57093cbf7c4ed45ce654123.seg 03624134
03636649/points/52783aa89adf06f3250c527721570ba0.pts 03636649/expert_verified/points_label/52783aa89adf06f3250c527721570ba0.seg 03636649
03001627/points/b13a4df698183bf9afb6676a5cd782b6.pts 03001627/expert_verified/points_label/b13a4df698183bf9afb6676a5cd782b6.seg 03001627
03636649/points/26f725bb6578936cd247b9308cd5c441.pts 03636649/expert_verified/points_label/26f725bb6578936cd247b9308cd5c441.seg 03636649
03001627/points/6df1ecffaa0abdbf327289c00b6dc9ca.pts 03001627/expert_verified/points_label/6df1ecffaa0abdbf327289c00b6dc9ca.seg 03001627
04379243/points/3c475d9f0433a7eaad2650d014e970a5.pts 04379243/expert_verified/points_label/3c475d9f0433a7eaad2650d014e970a5.seg 04379243
02958343/points/fee1c13922c07e8711b978ff9450f61b.pts 02958343/expert_verified/points_label/fee1c13922c07e8711b978ff9450f61b.seg 02958343
04379243/points/6bc941dbd290c7f21acdac000802e11c.pts 04379243/expert_verified/points_label/6bc941dbd290c7f21acdac000802e11c.seg 04379243
02958343/points/6333b9c777384ad14362be10a3fc8255.pts 02958343/expert_verified/points_label/6333b9c777384ad14362be10a3fc8255.seg 02958343
03001627/points/9a35f15e924e19db637adadafee6f182.pts 03001627/expert_verified/points_label/9a35f15e924e19db637adadafee6f182.seg 03001627
03001627/points/b0531a0d44fc22144224ee0743294f79.pts 03001627/expert_verified/points_label/b0531a0d44fc22144224ee0743294f79.seg 03001627
03636649/points/913ff6452d0ea43c9d62807daf4a2134.pts 03636649/expert_verified/points_label/913ff6452d0ea43c9d62807daf4a2134.seg 03636649
03467517/points/e45f323ce7ecab8393f0194265a9746c.pts 03467517/expert_verified/points_label/e45f323ce7ecab8393f0194265a9746c.seg 03467517
02691156/points/aa2af754642256c08699933784576e73.pts 02691156/expert_verified/points_label/aa2af754642256c08699933784576e73.seg 02691156
04379243/points/75b308ba45762ad499e8bf807e902261.pts 04379243/expert_verified/points_label/75b308ba45762ad499e8bf807e902261.seg 04379243
03001627/points/3622d983fd6d7b98e3a73d090627e9ba.pts 03001627/expert_verified/points_label/3622d983fd6d7b98e3a73d090627e9ba.seg 03001627
04225987/points/db4c8bf323465e4c537d393009a79347.pts 04225987/expert_verified/points_label/db4c8bf323465e4c537d393009a79347.seg 04225987
04379243/points/132bfde1fabe9ab771a782a4379556c7.pts 04379243/expert_verified/points_label/132bfde1fabe9ab771a782a4379556c7.seg 04379243
03001627/points/3dc8243b17bc790620768660cf080d12.pts 03001627/expert_verified/points_label/3dc8243b17bc790620768660cf080d12.seg 03001627
04379243/points/ccb96ea5f047c97f278d386bfa54545.pts 04379243/expert_verified/points_label/ccb96ea5f047c97f278d386bfa54545.seg 04379243
04379243/points/14ae5631e7dfa10430bbd4cddd04c77b.pts 04379243/expert_verified/points_label/14ae5631e7dfa10430bbd4cddd04c77b.seg 04379243
04379243/points/78a81cbd2a5720d93a938fdd57fac3b4.pts 04379243/expert_verified/points_label/78a81cbd2a5720d93a938fdd57fac3b4.seg 04379243
04379243/points/307bdd2a06137694a10ff7fd5e43a633.pts 04379243/expert_verified/points_label/307bdd2a06137694a10ff7fd5e43a633.seg 04379243
03001627/points/f3573756e64259f2b29d280b4e59c527.pts 03001627/expert_verified/points_label/f3573756e64259f2b29d280b4e59c527.seg 03001627
04379243/points/1815c6431b06dfb4f008d8a3590fb522.pts 04379243/expert_verified/points_label/1815c6431b06dfb4f008d8a3590fb522.seg 04379243
04379243/points/7fda06ada2d897baadab4c26397edfab.pts 04379243/expert_verified/points_label/7fda06ada2d897baadab4c26397edfab.seg 04379243
04379243/points/86b48365b2bd587e61830bc1b4d6c5ea.pts 04379243/expert_verified/points_label/86b48365b2bd587e61830bc1b4d6c5ea.seg 04379243
03948459/points/6aae44dd39fb9476f059c10da31213ea.pts 03948459/expert_verified/points_label/6aae44dd39fb9476f059c10da31213ea.seg 03948459
04379243/points/424c77a1f39ac41620dd2dd4d7d7656c.pts 04379243/expert_verified/points_label/424c77a1f39ac41620dd2dd4d7d7656c.seg 04379243
03001627/points/8778c23fd21bdebf8a80d99ff4e76c20.pts 03001627/expert_verified/points_label/8778c23fd21bdebf8a80d99ff4e76c20.seg 03001627
03001627/points/257deb231ce652169f2349486c570dd4.pts 03001627/expert_verified/points_label/257deb231ce652169f2349486c570dd4.seg 03001627
03642806/points/e5559cd005d5c4942a7b0c74c5f22fc4.pts 03642806/expert_verified/points_label/e5559cd005d5c4942a7b0c74c5f22fc4.seg 03642806
03001627/points/986e49bd8314d7424addf6a5f8726274.pts 03001627/expert_verified/points_label/986e49bd8314d7424addf6a5f8726274.seg 03001627
04379243/points/b3fc5247186936f1dcfcef693e7ec696.pts 04379243/expert_verified/points_label/b3fc5247186936f1dcfcef693e7ec696.seg 04379243
02691156/points/da9d111e1175d318bbf3143b1cb6076a.pts 02691156/expert_verified/points_label/da9d111e1175d318bbf3143b1cb6076a.seg 02691156
04379243/points/54b26954e478b1a34ea8d5f5f27d7ce3.pts 04379243/expert_verified/points_label/54b26954e478b1a34ea8d5f5f27d7ce3.seg 04379243
03001627/points/2d44744a7ea0bf724b3c42e318f3affc.pts 03001627/expert_verified/points_label/2d44744a7ea0bf724b3c42e318f3affc.seg 03001627
04379243/points/9dd63148e5b0a4f79eaa55bb236fb6e1.pts 04379243/expert_verified/points_label/9dd63148e5b0a4f79eaa55bb236fb6e1.seg 04379243
04379243/points/6ab7ebf9b94176456f1e07a56c129dfc.pts 04379243/expert_verified/points_label/6ab7ebf9b94176456f1e07a56c129dfc.seg 04379243
03001627/points/6aaa9bd6e835eb0f9b9f2eb77f5e247e.pts 03001627/expert_verified/points_label/6aaa9bd6e835eb0f9b9f2eb77f5e247e.seg 03001627
03636649/points/34020466b4342812218c9f1216abefd.pts 03636649/expert_verified/points_label/34020466b4342812218c9f1216abefd.seg 03636649
03001627/points/df7735e2bce09a511f98c0761af40e04.pts 03001627/expert_verified/points_label/df7735e2bce09a511f98c0761af40e04.seg 03001627
03636649/points/1d963d5c54613202b0aa15078ea6f391.pts 03636649/expert_verified/points_label/1d963d5c54613202b0aa15078ea6f391.seg 03636649
03636649/points/5a9e0dd068e2436bd7ebac63aa51083.pts 03636649/expert_verified/points_label/5a9e0dd068e2436bd7ebac63aa51083.seg 03636649
03001627/points/b1f50d8d41a8c53b6197fd390b16d14d.pts 03001627/expert_verified/points_label/b1f50d8d41a8c53b6197fd390b16d14d.seg 03001627
03001627/points/285931af369b12c2ccd42a2d6eea63ed.pts 03001627/expert_verified/points_label/285931af369b12c2ccd42a2d6eea63ed.seg 03001627
03636649/points/69429d8ffb5009a82060e7309fc3fc6.pts 03636649/expert_verified/points_label/69429d8ffb5009a82060e7309fc3fc6.seg 03636649
04379243/points/63b53646b3562677d395837145ded71.pts 04379243/expert_verified/points_label/63b53646b3562677d395837145ded71.seg 04379243
03001627/points/ee5ee3f6759aabacf2f43e6f841bd32b.pts 03001627/expert_verified/points_label/ee5ee3f6759aabacf2f43e6f841bd32b.seg 03001627
02691156/points/bdfbf1c555dacd9d325212819caa597d.pts 02691156/expert_verified/points_label/bdfbf1c555dacd9d325212819caa597d.seg 02691156
04379243/points/9f321f05a7808719ab610b0c94236463.pts 04379243/expert_verified/points_label/9f321f05a7808719ab610b0c94236463.seg 04379243
03624134/points/fb1f385d487d13d7aa0079d6fb0f853c.pts 03624134/expert_verified/points_label/fb1f385d487d13d7aa0079d6fb0f853c.seg 03624134
04379243/points/109738784a0a6129a02c88fe01f2b9c1.pts 04379243/expert_verified/points_label/109738784a0a6129a02c88fe01f2b9c1.seg 04379243
03467517/points/65e3bdc247b3ce3d4de904d1abbce016.pts 03467517/expert_verified/points_label/65e3bdc247b3ce3d4de904d1abbce016.seg 03467517
02691156/points/94ce3a5ad2576e73a5cac89017eae8d1.pts 02691156/expert_verified/points_label/94ce3a5ad2576e73a5cac89017eae8d1.seg 02691156
03001627/points/80fab0c55a60abb7dafb0be26f6b45d5.pts 03001627/expert_verified/points_label/80fab0c55a60abb7dafb0be26f6b45d5.seg 03001627
04379243/points/e6ee101d3cb13bdd16a2b5862518c93.pts 04379243/expert_verified/points_label/e6ee101d3cb13bdd16a2b5862518c93.seg 04379243
04379243/points/6f2ffe8c014a6a458af30108ea9ccb6c.pts 04379243/expert_verified/points_label/6f2ffe8c014a6a458af30108ea9ccb6c.seg 04379243
02958343/points/504793ed2da6cf7eba3e2415e22cd45c.pts 02958343/expert_verified/points_label/504793ed2da6cf7eba3e2415e22cd45c.seg 02958343
03467517/points/9e26dcbac33f056c343b0b12983b9982.pts 03467517/expert_verified/points_label/9e26dcbac33f056c343b0b12983b9982.seg 03467517
03467517/points/a92cd0b5d559075daa9518d76daaca23.pts 03467517/expert_verified/points_label/a92cd0b5d559075daa9518d76daaca23.seg 03467517
03636649/points/b6989c99bba1226539b3360f500ac52a.pts 03636649/expert_verified/points_label/b6989c99bba1226539b3360f500ac52a.seg 03636649
03624134/points/cc38f97557029b2a2b5fd8277662be97.pts 03624134/expert_verified/points_label/cc38f97557029b2a2b5fd8277662be97.seg 03624134
03790512/points/41cc9674e700c3fdb37378f3c85478b4.pts 03790512/expert_verified/points_label/41cc9674e700c3fdb37378f3c85478b4.seg 03790512
03001627/points/56b171b1f1521d27291d12adef12641b.pts 03001627/expert_verified/points_label/56b171b1f1521d27291d12adef12641b.seg 03001627
03636649/points/ddc2d39dac6e84506c5b8009db95f66f.pts 03636649/expert_verified/points_label/ddc2d39dac6e84506c5b8009db95f66f.seg 03636649
02691156/points/edc185566c1df89c35fc197bbabcd5bd.pts 02691156/expert_verified/points_label/edc185566c1df89c35fc197bbabcd5bd.seg 02691156
04379243/points/fb5e8a6361262c26acf7920879052e93.pts 04379243/expert_verified/points_label/fb5e8a6361262c26acf7920879052e93.seg 04379243
04379243/points/8862cddf90fddb3119fb4103277a6b93.pts 04379243/expert_verified/points_label/8862cddf90fddb3119fb4103277a6b93.seg 04379243
02691156/points/d5a94c9f09d238c4c3a35cee92bb95b.pts 02691156/expert_verified/points_label/d5a94c9f09d238c4c3a35cee92bb95b.seg 02691156
03636649/points/1682d4404196cf127588e2ca59b15f8.pts 03636649/expert_verified/points_label/1682d4404196cf127588e2ca59b15f8.seg 03636649
04379243/points/2f33abdfe147813e44949d7685cb63ea.pts 04379243/expert_verified/points_label/2f33abdfe147813e44949d7685cb63ea.seg 04379243
03001627/points/e158f7ba6828db5c654ea6737b0d3597.pts 03001627/expert_verified/points_label/e158f7ba6828db5c654ea6737b0d3597.seg 03001627
04379243/points/564474f25a4400c5dc20930e6fc85682.pts 04379243/expert_verified/points_label/564474f25a4400c5dc20930e6fc85682.seg 04379243
04379243/points/eb379b2b95e76502e258d1c3e7302e7b.pts 04379243/expert_verified/points_label/eb379b2b95e76502e258d1c3e7302e7b.seg 04379243
03001627/points/3a1b54325b3565e72ca4b544d68c52.pts 03001627/expert_verified/points_label/3a1b54325b3565e72ca4b544d68c52.seg 03001627
04225987/points/393ca71bd734f3071082f2ea630bf69e.pts 04225987/expert_verified/points_label/393ca71bd734f3071082f2ea630bf69e.seg 04225987
03636649/points/bd1cbcb990375022b45fed2806c331ab.pts 03636649/expert_verified/points_label/bd1cbcb990375022b45fed2806c331ab.seg 03636649
03001627/points/6a9dce6566cd61652b339ec555ba3bfc.pts 03001627/expert_verified/points_label/6a9dce6566cd61652b339ec555ba3bfc.seg 03001627
02691156/points/94379090010cd6bb874c9ce092a813ef.pts 02691156/expert_verified/points_label/94379090010cd6bb874c9ce092a813ef.seg 02691156
02773838/points/d3bd250ca3cb8e29976855a35549333.pts 02773838/expert_verified/points_label/d3bd250ca3cb8e29976855a35549333.seg 02773838
03001627/points/36cb782fbc164ac312591a3ac05fadf1.pts 03001627/expert_verified/points_label/36cb782fbc164ac312591a3ac05fadf1.seg 03001627
03642806/points/2211a40cc77a085362c091e763f81d3.pts 03642806/expert_verified/points_label/2211a40cc77a085362c091e763f81d3.seg 03642806
04379243/points/5cbd726c3ffd8fc49b458816be7a3962.pts 04379243/expert_verified/points_label/5cbd726c3ffd8fc49b458816be7a3962.seg 04379243
02691156/points/72aee7d0e998a68aca8607f540cc62ba.pts 02691156/expert_verified/points_label/72aee7d0e998a68aca8607f540cc62ba.seg 02691156
04379243/points/1c3310f4c05ce1f6a192483aa282f8e5.pts 04379243/expert_verified/points_label/1c3310f4c05ce1f6a192483aa282f8e5.seg 04379243
04379243/points/4ced745f960f7439b91767277279ac70.pts 04379243/expert_verified/points_label/4ced745f960f7439b91767277279ac70.seg 04379243
03642806/points/8d70fb6adc63e21eb7e0383b9609fa5.pts 03642806/expert_verified/points_label/8d70fb6adc63e21eb7e0383b9609fa5.seg 03642806
03001627/points/2bd6800d64c01d677721fafb59ea099.pts 03001627/expert_verified/points_label/2bd6800d64c01d677721fafb59ea099.seg 03001627
03467517/points/1abe78447898821e93f0194265a9746c.pts 03467517/expert_verified/points_label/1abe78447898821e93f0194265a9746c.seg 03467517
02691156/points/9bf3c126d5918c41f5c7319b71bdce6e.pts 02691156/expert_verified/points_label/9bf3c126d5918c41f5c7319b71bdce6e.seg 02691156
03642806/points/1312ea502b4e9b51701c1f58e22b85e8.pts 03642806/expert_verified/points_label/1312ea502b4e9b51701c1f58e22b85e8.seg 03642806
04379243/points/a9cc8112fb8c4ed5dfd21203bf8b4b46.pts 04379243/expert_verified/points_label/a9cc8112fb8c4ed5dfd21203bf8b4b46.seg 04379243
03642806/points/62b25a5e3119b8409023147b38c03c9f.pts 03642806/expert_verified/points_label/62b25a5e3119b8409023147b38c03c9f.seg 03642806
04379243/points/a4fcd8afe8b6de585beaf00da5b709c2.pts 04379243/expert_verified/points_label/a4fcd8afe8b6de585beaf00da5b709c2.seg 04379243
03636649/points/907fd296708ae71dd5fab5deb286066.pts 03636649/expert_verified/points_label/907fd296708ae71dd5fab5deb286066.seg 03636649
04379243/points/c5ae96124c15c734e6c5cd45aa112726.pts 04379243/expert_verified/points_label/c5ae96124c15c734e6c5cd45aa112726.seg 04379243
03642806/points/ef6d43add46d0cae4e07b09c086cc5c4.pts 03642806/expert_verified/points_label/ef6d43add46d0cae4e07b09c086cc5c4.seg 03642806
04379243/points/8d07df2bf706cda58c5591114064d173.pts 04379243/expert_verified/points_label/8d07df2bf706cda58c5591114064d173.seg 04379243
02958343/points/5316fab78a6732f0428df271ebc70bc0.pts 02958343/expert_verified/points_label/5316fab78a6732f0428df271ebc70bc0.seg 02958343
03467517/points/7946e354e342f560c5a468097fc791e4.pts 03467517/expert_verified/points_label/7946e354e342f560c5a468097fc791e4.seg 03467517
03467517/points/d3684d071dcb6bffd3193ed047bef161.pts 03467517/expert_verified/points_label/d3684d071dcb6bffd3193ed047bef161.seg 03467517
04379243/points/33b081062b2195e71771ee930e861b13.pts 04379243/expert_verified/points_label/33b081062b2195e71771ee930e861b13.seg 04379243
02958343/points/511962626501e4abf500cc506a763c18.pts 02958343/expert_verified/points_label/511962626501e4abf500cc506a763c18.seg 02958343
03797390/points/c82b9f1b98f044fc15cf6e5ad80f2da.pts 03797390/expert_verified/points_label/c82b9f1b98f044fc15cf6e5ad80f2da.seg 03797390
04379243/points/49f625856c796254d249abd69334079c.pts 04379243/expert_verified/points_label/49f625856c796254d249abd69334079c.seg 04379243
03001627/points/ca4900c42b8016ef8397cd720acaa508.pts 03001627/expert_verified/points_label/ca4900c42b8016ef8397cd720acaa508.seg 03001627
03636649/points/31a15957bd4f32f87eedf2c7d21f7cfa.pts 03636649/expert_verified/points_label/31a15957bd4f32f87eedf2c7d21f7cfa.seg 03636649
03797390/points/928a383f79698c3fb6d9bc28c8d8a2c4.pts 03797390/expert_verified/points_label/928a383f79698c3fb6d9bc28c8d8a2c4.seg 03797390
04379243/points/17e5a64889ca085fa5526f91aecc0c37.pts 04379243/expert_verified/points_label/17e5a64889ca085fa5526f91aecc0c37.seg 04379243
02958343/points/cbe2dc469c47bb80425b2c354eccabaf.pts 02958343/expert_verified/points_label/cbe2dc469c47bb80425b2c354eccabaf.seg 02958343
03001627/points/19c8189116dd7cd3e95c611687989498.pts 03001627/expert_verified/points_label/19c8189116dd7cd3e95c611687989498.seg 03001627
03636649/points/7f518fe982aae1b5940c8a2639c8747.pts 03636649/expert_verified/points_label/7f518fe982aae1b5940c8a2639c8747.seg 03636649
03636649/points/7b1fef0071908d4bd93768e7b9b1eabf.pts 03636649/expert_verified/points_label/7b1fef0071908d4bd93768e7b9b1eabf.seg 03636649
03001627/points/475e2c8f7a2c1bbd9acf9a86c283d1a2.pts 03001627/expert_verified/points_label/475e2c8f7a2c1bbd9acf9a86c283d1a2.seg 03001627
03467517/points/5c805aca7aa8bdd3ac61a2f8346a8f.pts 03467517/expert_verified/points_label/5c805aca7aa8bdd3ac61a2f8346a8f.seg 03467517
03790512/points/8032295bd3851d75468bac13e007a6e9.pts 03790512/expert_verified/points_label/8032295bd3851d75468bac13e007a6e9.seg 03790512
02691156/points/3e0561d70c7fd4f51c6e4e20f2b76086.pts 02691156/expert_verified/points_label/3e0561d70c7fd4f51c6e4e20f2b76086.seg 02691156
02691156/points/e5610bbacaf098508b96ae1a0a8b84ec.pts 02691156/expert_verified/points_label/e5610bbacaf098508b96ae1a0a8b84ec.seg 02691156
03467517/points/97e8ee1b6df404bd57700c05b1862d8.pts 03467517/expert_verified/points_label/97e8ee1b6df404bd57700c05b1862d8.seg 03467517
03636649/points/981b55897cee64403c8d0fdfb1cc2535.pts 03636649/expert_verified/points_label/981b55897cee64403c8d0fdfb1cc2535.seg 03636649
04379243/points/204d9ecc196990ebe8479ad2eabcbab4.pts 04379243/expert_verified/points_label/204d9ecc196990ebe8479ad2eabcbab4.seg 04379243
04379243/points/9d039675f4d51869f3edd695842c6d58.pts 04379243/expert_verified/points_label/9d039675f4d51869f3edd695842c6d58.seg 04379243
03467517/points/cb5b2e3f499e4fdecc571cd3cf8f17a1.pts 03467517/expert_verified/points_label/cb5b2e3f499e4fdecc571cd3cf8f17a1.seg 03467517
04379243/points/5243b5491a4f8a16a2b5862518c93.pts 04379243/expert_verified/points_label/5243b5491a4f8a16a2b5862518c93.seg 04379243
04379243/points/efbf0d75648b7c7d5792b99b8245d225.pts 04379243/expert_verified/points_label/efbf0d75648b7c7d5792b99b8245d225.seg 04379243
03001627/points/c8265e04c94bcb5a1346e336f65f96f6.pts 03001627/expert_verified/points_label/c8265e04c94bcb5a1346e336f65f96f6.seg 03001627
02958343/points/94cfcfb74e246f938acb0ff76f4aec7d.pts 02958343/expert_verified/points_label/94cfcfb74e246f938acb0ff76f4aec7d.seg 02958343
03467517/points/a0b6f040538d26e3ac61a2f8346a8f.pts 03467517/expert_verified/points_label/a0b6f040538d26e3ac61a2f8346a8f.seg 03467517
03001627/points/70f1f85d47c970bb78dd615a59de5f05.pts 03001627/expert_verified/points_label/70f1f85d47c970bb78dd615a59de5f05.seg 03001627
04379243/points/f4976e80b8533bcf85518f8659f21d56.pts 04379243/expert_verified/points_label/f4976e80b8533bcf85518f8659f21d56.seg 04379243
03636649/points/9fdaafde365beafc37f7ce56c66316ea.pts 03636649/expert_verified/points_label/9fdaafde365beafc37f7ce56c66316ea.seg 03636649
03467517/points/22033c6d7e5a90f193f0194265a9746c.pts 03467517/expert_verified/points_label/22033c6d7e5a90f193f0194265a9746c.seg 03467517
02691156/points/c1b5dc92221bcdad5fc84bf2b9ef981.pts 02691156/expert_verified/points_label/c1b5dc92221bcdad5fc84bf2b9ef981.seg 02691156
04379243/points/79d0985603f7ff3be6c5cd45aa112726.pts 04379243/expert_verified/points_label/79d0985603f7ff3be6c5cd45aa112726.seg 04379243
03467517/points/5d6c1516b83dec8663e148e250c0340d.pts 03467517/expert_verified/points_label/5d6c1516b83dec8663e148e250c0340d.seg 03467517
04379243/points/79c5df613523a462d42b9650f19dd425.pts 04379243/expert_verified/points_label/79c5df613523a462d42b9650f19dd425.seg 04379243
03001627/points/f19e8da9d8f369c531e63f1270e2b445.pts 03001627/expert_verified/points_label/f19e8da9d8f369c531e63f1270e2b445.seg 03001627
03001627/points/9a711bb7070ae88de948e3d64826c640.pts 03001627/expert_verified/points_label/9a711bb7070ae88de948e3d64826c640.seg 03001627
03467517/points/2adbf6c3f8f2d9ca7fe36b1f0a632ed8.pts 03467517/expert_verified/points_label/2adbf6c3f8f2d9ca7fe36b1f0a632ed8.seg 03467517
03001627/points/837ba605a4ab4a4f19fb4103277a6b93.pts 03001627/expert_verified/points_label/837ba605a4ab4a4f19fb4103277a6b93.seg 03001627
03001627/points/807f08096308af5e28c0cecb7de2397a.pts 03001627/expert_verified/points_label/807f08096308af5e28c0cecb7de2397a.seg 03001627
03467517/points/275c4f98ef07f2b393f0194265a9746c.pts 03467517/expert_verified/points_label/275c4f98ef07f2b393f0194265a9746c.seg 03467517
04379243/points/57afaabf994feb305512673aa47c7e3d.pts 04379243/expert_verified/points_label/57afaabf994feb305512673aa47c7e3d.seg 04379243
03001627/points/d9156f5552178de2713decb1a0563b12.pts 03001627/expert_verified/points_label/d9156f5552178de2713decb1a0563b12.seg 03001627
03948459/points/fe62130ce6fcd9b77754fed890b42399.pts 03948459/expert_verified/points_label/fe62130ce6fcd9b77754fed890b42399.seg 03948459
03261776/points/1757fe64e76a9630fc176230c2f2d294.pts 03261776/expert_verified/points_label/1757fe64e76a9630fc176230c2f2d294.seg 03261776
03790512/points/3fd1bff496b369f71765540024eb9fef.pts 03790512/expert_verified/points_label/3fd1bff496b369f71765540024eb9fef.seg 03790512
02958343/points/a6d494af391a97686436916a86a90ed7.pts 02958343/expert_verified/points_label/a6d494af391a97686436916a86a90ed7.seg 02958343
04099429/points/59389aac7b1ea9b09b28f5f9cf8893b5.pts 04099429/expert_verified/points_label/59389aac7b1ea9b09b28f5f9cf8893b5.seg 04099429
04379243/points/c399ed276ed35cb9a6ce08f0d82ba063.pts 04379243/expert_verified/points_label/c399ed276ed35cb9a6ce08f0d82ba063.seg 04379243
03624134/points/e4f610f36ba3c6f69246ea0301684d80.pts 03624134/expert_verified/points_label/e4f610f36ba3c6f69246ea0301684d80.seg 03624134
03636649/points/90b0f9a1ac2e54ecbc7f58784fda27b5.pts 03636649/expert_verified/points_label/90b0f9a1ac2e54ecbc7f58784fda27b5.seg 03636649
03636649/points/e5e9ff118631c2a3ee088de33038f12a.pts 03636649/expert_verified/points_label/e5e9ff118631c2a3ee088de33038f12a.seg 03636649
04099429/points/4936716925b1cd6428eba1f0b7744e9.pts 04099429/expert_verified/points_label/4936716925b1cd6428eba1f0b7744e9.seg 04099429
04379243/points/6e446bb5adf14b0b6121178eafd002fd.pts 04379243/expert_verified/points_label/6e446bb5adf14b0b6121178eafd002fd.seg 04379243
03001627/points/7ea38c936513f5df3772b104757a4809.pts 03001627/expert_verified/points_label/7ea38c936513f5df3772b104757a4809.seg 03001627
04379243/points/23d68e01b77089ae76ad4f5e7c7020eb.pts 04379243/expert_verified/points_label/23d68e01b77089ae76ad4f5e7c7020eb.seg 04379243
03636649/points/4d6bced89943df73b4edf02c99e16daa.pts 03636649/expert_verified/points_label/4d6bced89943df73b4edf02c99e16daa.seg 03636649
04379243/points/3459eec8eb56fa312bac236fe109e385.pts 04379243/expert_verified/points_label/3459eec8eb56fa312bac236fe109e385.seg 04379243
03261776/points/1a5e2a7cddc8e46aa681aea7976a4565.pts 03261776/expert_verified/points_label/1a5e2a7cddc8e46aa681aea7976a4565.seg 03261776
03001627/points/ed0d65c68a1fa5c485e2f8b1d3a373fe.pts 03001627/expert_verified/points_label/ed0d65c68a1fa5c485e2f8b1d3a373fe.seg 03001627
03636649/points/7b005e23eae2768eb08c032bedc99529.pts 03636649/expert_verified/points_label/7b005e23eae2768eb08c032bedc99529.seg 03636649
04379243/points/3f2e9c14ab1d26a0ebead06af665220.pts 04379243/expert_verified/points_label/3f2e9c14ab1d26a0ebead06af665220.seg 04379243
03001627/points/383ab6330284af461fc4ae93e00c18e5.pts 03001627/expert_verified/points_label/383ab6330284af461fc4ae93e00c18e5.seg 03001627
02691156/points/fc7387d630c84bb9c863ab010b80d9ed.pts 02691156/expert_verified/points_label/fc7387d630c84bb9c863ab010b80d9ed.seg 02691156
04225987/points/344e9402d06bd94031145076011658c5.pts 04225987/expert_verified/points_label/344e9402d06bd94031145076011658c5.seg 04225987
04379243/points/745a2b060d0f692bf4b6538438a0b930.pts 04379243/expert_verified/points_label/745a2b060d0f692bf4b6538438a0b930.seg 04379243
04379243/points/928ea87878a7bbe26cf876b69450cd4e.pts 04379243/expert_verified/points_label/928ea87878a7bbe26cf876b69450cd4e.seg 04379243
03001627/points/5fe56a4a9d5508c3b2373df00b89e5d.pts 03001627/expert_verified/points_label/5fe56a4a9d5508c3b2373df00b89e5d.seg 03001627
02691156/points/6a75658fb8242b9c590874dcd9dc8481.pts 02691156/expert_verified/points_label/6a75658fb8242b9c590874dcd9dc8481.seg 02691156
03948459/points/f377665c5b17d0ce61b636d79e46a7e9.pts 03948459/expert_verified/points_label/f377665c5b17d0ce61b636d79e46a7e9.seg 03948459
03642806/points/ab21f75b97d6b1054f22ce0a3592d5.pts 03642806/expert_verified/points_label/ab21f75b97d6b1054f22ce0a3592d5.seg 03642806
04379243/points/a2baf45f001e118e2c79f7f31759bfa7.pts 04379243/expert_verified/points_label/a2baf45f001e118e2c79f7f31759bfa7.seg 04379243
02691156/points/19ff8fce1658f864ca8607f540cc62ba.pts 02691156/expert_verified/points_label/19ff8fce1658f864ca8607f540cc62ba.seg 02691156
04379243/points/8bb3a7e1cb24fe6febad4f49b26ec52.pts 04379243/expert_verified/points_label/8bb3a7e1cb24fe6febad4f49b26ec52.seg 04379243
04379243/points/dbc5a4d1dc3a6e8271a782a4379556c7.pts 04379243/expert_verified/points_label/dbc5a4d1dc3a6e8271a782a4379556c7.seg 04379243
03001627/points/e6c11fed9469141ace8fba09dd640742.pts 03001627/expert_verified/points_label/e6c11fed9469141ace8fba09dd640742.seg 03001627
03797390/points/f99e19b8c4a729353deb88581ea8417a.pts 03797390/expert_verified/points_label/f99e19b8c4a729353deb88581ea8417a.seg 03797390
03001627/points/d454f99b99248bf337c99625b0c170be.pts 03001627/expert_verified/points_label/d454f99b99248bf337c99625b0c170be.seg 03001627
03636649/points/7c23362b39f318cbb18d6f615cb18bdd.pts 03636649/expert_verified/points_label/7c23362b39f318cbb18d6f615cb18bdd.seg 03636649
03001627/points/d8e2e2a923b372731cf97e154cc62f43.pts 03001627/expert_verified/points_label/d8e2e2a923b372731cf97e154cc62f43.seg 03001627
03642806/points/621882a4afd2a126369873c1090720a1.pts 03642806/expert_verified/points_label/621882a4afd2a126369873c1090720a1.seg 03642806
04379243/points/d5d1e750bb492dd5391e4d6c585a697a.pts 04379243/expert_verified/points_label/d5d1e750bb492dd5391e4d6c585a697a.seg 04379243
03467517/points/42f3172b8770d2fd2200c35bfa7099ee.pts 03467517/expert_verified/points_label/42f3172b8770d2fd2200c35bfa7099ee.seg 03467517
03624134/points/a2288d5f3a44233bc40c6b891c4913bd.pts 03624134/expert_verified/points_label/a2288d5f3a44233bc40c6b891c4913bd.seg 03624134
02691156/points/90612205109d7458e84aab2e1d454e3c.pts 02691156/expert_verified/points_label/90612205109d7458e84aab2e1d454e3c.seg 02691156
03001627/points/2c03bcb2a133ce28bb6caad47eee6580.pts 03001627/expert_verified/points_label/2c03bcb2a133ce28bb6caad47eee6580.seg 03001627
03001627/points/f23d3a85baabd7ae32d9baba75737e72.pts 03001627/expert_verified/points_label/f23d3a85baabd7ae32d9baba75737e72.seg 03001627
04379243/points/90be5de0faef91ef3f7e27638e63d848.pts 04379243/expert_verified/points_label/90be5de0faef91ef3f7e27638e63d848.seg 04379243
02691156/points/d5f01e2aa54bbf28ca8607f540cc62ba.pts 02691156/expert_verified/points_label/d5f01e2aa54bbf28ca8607f540cc62ba.seg 02691156
02691156/points/4f0bf26c62bb7c8b7e1c97634acf0214.pts 02691156/expert_verified/points_label/4f0bf26c62bb7c8b7e1c97634acf0214.seg 02691156
03001627/points/4246c8c293c56ea34b3c42e318f3affc.pts 03001627/expert_verified/points_label/4246c8c293c56ea34b3c42e318f3affc.seg 03001627
04379243/points/9b42cb91ccead6d42f6d10c5d1d56320.pts 04379243/expert_verified/points_label/9b42cb91ccead6d42f6d10c5d1d56320.seg 04379243
03001627/points/c67b7b62e529295dfc30525e763ef5eb.pts 03001627/expert_verified/points_label/c67b7b62e529295dfc30525e763ef5eb.seg 03001627
04379243/points/394c63a5658ef759b515d1675be6b5d3.pts 04379243/expert_verified/points_label/394c63a5658ef759b515d1675be6b5d3.seg 04379243
03636649/points/13ba3fbe8fbc53f3ef3a2c64cef919d0.pts 03636649/expert_verified/points_label/13ba3fbe8fbc53f3ef3a2c64cef919d0.seg 03636649
04379243/points/cb860d60db8f3d18febad4f49b26ec52.pts 04379243/expert_verified/points_label/cb860d60db8f3d18febad4f49b26ec52.seg 04379243
04379243/points/657aad273d665f5dd9823f45c4411583.pts 04379243/expert_verified/points_label/657aad273d665f5dd9823f45c4411583.seg 04379243
03001627/points/64fcd1ba0df5d54d79b3e1be3524f72f.pts 03001627/expert_verified/points_label/64fcd1ba0df5d54d79b3e1be3524f72f.seg 03001627
03642806/points/8489cb783d249651b674654e7bbe623d.pts 03642806/expert_verified/points_label/8489cb783d249651b674654e7bbe623d.seg 03642806
03467517/points/3824a2336972d144a24eeca91f583600.pts 03467517/expert_verified/points_label/3824a2336972d144a24eeca91f583600.seg 03467517
03797390/points/99eaa69cf6fe8811dec712af445786fe.pts 03797390/expert_verified/points_label/99eaa69cf6fe8811dec712af445786fe.seg 03797390
03001627/points/e31d71ed32273fede42ac999db581f5e.pts 03001627/expert_verified/points_label/e31d71ed32273fede42ac999db581f5e.seg 03001627
03001627/points/9a42cff883cbd358106f706dac6c58f0.pts 03001627/expert_verified/points_label/9a42cff883cbd358106f706dac6c58f0.seg 03001627
04379243/points/b515a107aa3a3fd0e3dff0d5ebb43915.pts 04379243/expert_verified/points_label/b515a107aa3a3fd0e3dff0d5ebb43915.seg 04379243
03001627/points/bd6a8b133fa4d269491d6cee03fef2a9.pts 03001627/expert_verified/points_label/bd6a8b133fa4d269491d6cee03fef2a9.seg 03001627
03001627/points/51c8f249e778e84a5bae8923b29985ad.pts 03001627/expert_verified/points_label/51c8f249e778e84a5bae8923b29985ad.seg 03001627
02691156/points/f12eefbbefabe566ca8607f540cc62ba.pts 02691156/expert_verified/points_label/f12eefbbefabe566ca8607f540cc62ba.seg 02691156
02691156/points/ad6e93a1db3e1da5977e4bb19a62128e.pts 02691156/expert_verified/points_label/ad6e93a1db3e1da5977e4bb19a62128e.seg 02691156
03001627/points/efa83c67ce47bfca304edcf7c4314468.pts 03001627/expert_verified/points_label/efa83c67ce47bfca304edcf7c4314468.seg 03001627
03624134/points/d6e9e4e07bafca0fa37f3fc191551700.pts 03624134/expert_verified/points_label/d6e9e4e07bafca0fa37f3fc191551700.seg 03624134
03642806/points/e083105e9c2a28bb0c3a03d0a1f182f.pts 03642806/expert_verified/points_label/e083105e9c2a28bb0c3a03d0a1f182f.seg 03642806
03001627/points/d2992fd5e6715bad3bbf93f83cbaf271.pts 03001627/expert_verified/points_label/d2992fd5e6715bad3bbf93f83cbaf271.seg 03001627
04379243/points/4a27cb9384782ce33e95c55cb020b7e6.pts 04379243/expert_verified/points_label/4a27cb9384782ce33e95c55cb020b7e6.seg 04379243
04379243/points/cf046edeff204b81cdf7280ff8af6720.pts 04379243/expert_verified/points_label/cf046edeff204b81cdf7280ff8af6720.seg 04379243
03001627/points/6534f04a1c349a3c8c6540fe6bc16d6f.pts 03001627/expert_verified/points_label/6534f04a1c349a3c8c6540fe6bc16d6f.seg 03001627
03636649/points/1917888a2b6901091735ea0e092a805a.pts 03636649/expert_verified/points_label/1917888a2b6901091735ea0e092a805a.seg 03636649
03636649/points/b37e07ac31fa4f311735ea0e092a805a.pts 03636649/expert_verified/points_label/b37e07ac31fa4f311735ea0e092a805a.seg 03636649
03636649/points/2f6f1fe66631572c6c5b8009db95f66f.pts 03636649/expert_verified/points_label/2f6f1fe66631572c6c5b8009db95f66f.seg 03636649
03467517/points/feab270427cee00a24eeca91f583600.pts 03467517/expert_verified/points_label/feab270427cee00a24eeca91f583600.seg 03467517
02691156/points/e30e25fe047ce1ea10b08ceced9a0113.pts 02691156/expert_verified/points_label/e30e25fe047ce1ea10b08ceced9a0113.seg 02691156
03636649/points/b2347fe81bd2db6a4b3c42e318f3affc.pts 03636649/expert_verified/points_label/b2347fe81bd2db6a4b3c42e318f3affc.seg 03636649
03001627/points/bb7755090f984ba85dd1bba5b1310523.pts 03001627/expert_verified/points_label/bb7755090f984ba85dd1bba5b1310523.seg 03001627
02691156/points/bc7ead8b45952ab8822054a0a020bf4a.pts 02691156/expert_verified/points_label/bc7ead8b45952ab8822054a0a020bf4a.seg 02691156
02691156/points/5a1d4af1f417d28566cf1b4a8fc3914e.pts 02691156/expert_verified/points_label/5a1d4af1f417d28566cf1b4a8fc3914e.seg 02691156
02691156/points/a6cbada42d1a30d0f5c7319b71bdce6e.pts 02691156/expert_verified/points_label/a6cbada42d1a30d0f5c7319b71bdce6e.seg 02691156
02691156/points/b785b39d10c33b5de9f07d25f575b2d4.pts 02691156/expert_verified/points_label/b785b39d10c33b5de9f07d25f575b2d4.seg 02691156
03001627/points/2df8d2af1bc4b9972056b4bd5d870b47.pts 03001627/expert_verified/points_label/2df8d2af1bc4b9972056b4bd5d870b47.seg 03001627
03797390/points/d46b98f63a017578ea456f4bbbc96af9.pts 03797390/expert_verified/points_label/d46b98f63a017578ea456f4bbbc96af9.seg 03797390
04379243/points/1adf96850963550f19fb4103277a6b93.pts 04379243/expert_verified/points_label/1adf96850963550f19fb4103277a6b93.seg 04379243
03001627/points/cb7a4324fdfa690e96dd43aa0ec847c9.pts 03001627/expert_verified/points_label/cb7a4324fdfa690e96dd43aa0ec847c9.seg 03001627
03624134/points/c19088b4c32c0f1d22b38218e60be05.pts 03624134/expert_verified/points_label/c19088b4c32c0f1d22b38218e60be05.seg 03624134
04379243/points/1acf7b0939f3eea2eafdf94e5032b200.pts 04379243/expert_verified/points_label/1acf7b0939f3eea2eafdf94e5032b200.seg 04379243
03467517/points/d50d06b159363b1693f0194265a9746c.pts 03467517/expert_verified/points_label/d50d06b159363b1693f0194265a9746c.seg 03467517
02691156/points/dacb447d7820e7f7ca8607f540cc62ba.pts 02691156/expert_verified/points_label/dacb447d7820e7f7ca8607f540cc62ba.seg 02691156
04379243/points/c3a9dc47c5bf10aac3bd24f986301745.pts 04379243/expert_verified/points_label/c3a9dc47c5bf10aac3bd24f986301745.seg 04379243
04379243/points/4791914b3bcaf57efebad4f49b26ec52.pts 04379243/expert_verified/points_label/4791914b3bcaf57efebad4f49b26ec52.seg 04379243
03001627/points/bf3f14225e8f899db62f9fb4b7f0626.pts 03001627/expert_verified/points_label/bf3f14225e8f899db62f9fb4b7f0626.seg 03001627
04379243/points/4f5c111a89b3fd27aa29e9f0529e8ef7.pts 04379243/expert_verified/points_label/4f5c111a89b3fd27aa29e9f0529e8ef7.seg 04379243
03001627/points/6af8d7bfa508b8d23759750e8db40476.pts 03001627/expert_verified/points_label/6af8d7bfa508b8d23759750e8db40476.seg 03001627
02691156/points/427030abcc0f11a8947bbeb9022263b8.pts 02691156/expert_verified/points_label/427030abcc0f11a8947bbeb9022263b8.seg 02691156
03642806/points/367fbaea8743ec1cc98452c8fce6b43.pts 03642806/expert_verified/points_label/367fbaea8743ec1cc98452c8fce6b43.seg 03642806
04379243/points/419412b927d11c7d8312881285c04cb3.pts 04379243/expert_verified/points_label/419412b927d11c7d8312881285c04cb3.seg 04379243
03001627/points/56cc047440e7c999a23949c21eddef76.pts 03001627/expert_verified/points_label/56cc047440e7c999a23949c21eddef76.seg 03001627
03790512/points/fdb6223c286cb653cc9e7530f9d8e186.pts 03790512/expert_verified/points_label/fdb6223c286cb653cc9e7530f9d8e186.seg 03790512
03636649/points/6b2a590446ad5794b10e111f2d30684d.pts 03636649/expert_verified/points_label/6b2a590446ad5794b10e111f2d30684d.seg 03636649
03001627/points/a3ce9ba74ab50352e6fe3612af521500.pts 03001627/expert_verified/points_label/a3ce9ba74ab50352e6fe3612af521500.seg 03001627
02958343/points/9986dd19b2c459152470de2774d6099.pts 02958343/expert_verified/points_label/9986dd19b2c459152470de2774d6099.seg 02958343
03642806/points/b806daf849a5dba289c212008d2a390e.pts 03642806/expert_verified/points_label/b806daf849a5dba289c212008d2a390e.seg 03642806
04379243/points/2eb503dde3cc027d86c701087a194026.pts 04379243/expert_verified/points_label/2eb503dde3cc027d86c701087a194026.seg 04379243
03001627/points/c4a4710012ee39bd19f4b416b31c46e0.pts 03001627/expert_verified/points_label/c4a4710012ee39bd19f4b416b31c46e0.seg 03001627
02958343/points/bd8654fbca233e41ddb8f37b1865d989.pts 02958343/expert_verified/points_label/bd8654fbca233e41ddb8f37b1865d989.seg 02958343
03001627/points/6fd485a2345c3dd69233bf560301e53.pts 03001627/expert_verified/points_label/6fd485a2345c3dd69233bf560301e53.seg 03001627
02691156/points/aebc4c46b3cb7c3bca8607f540cc62ba.pts 02691156/expert_verified/points_label/aebc4c46b3cb7c3bca8607f540cc62ba.seg 02691156
03001627/points/9343df9a7ed6cbba1923501fcdd899bb.pts 03001627/expert_verified/points_label/9343df9a7ed6cbba1923501fcdd899bb.seg 03001627
04379243/points/7fadae39394c5622c3bd24f986301745.pts 04379243/expert_verified/points_label/7fadae39394c5622c3bd24f986301745.seg 04379243
03001627/points/d619fd50c4d0fb46dea83bbf303af433.pts 03001627/expert_verified/points_label/d619fd50c4d0fb46dea83bbf303af433.seg 03001627
04379243/points/ef02c88a34b3888a1b1a00a31bfed97b.pts 04379243/expert_verified/points_label/ef02c88a34b3888a1b1a00a31bfed97b.seg 04379243
03467517/points/71d0016078dea05a94ca7929d4ba6d2d.pts 03467517/expert_verified/points_label/71d0016078dea05a94ca7929d4ba6d2d.seg 03467517
03001627/points/5623d0ec9efedbc9d4da89766e80607a.pts 03001627/expert_verified/points_label/5623d0ec9efedbc9d4da89766e80607a.seg 03001627
04379243/points/21486e6d0bd896ad5cca18918d24f6cd.pts 04379243/expert_verified/points_label/21486e6d0bd896ad5cca18918d24f6cd.seg 04379243
03636649/points/978df83c1cee012729a60d6ab40898d.pts 03636649/expert_verified/points_label/978df83c1cee012729a60d6ab40898d.seg 03636649
02691156/points/350d12f5290908c7f446f92b52bbd82a.pts 02691156/expert_verified/points_label/350d12f5290908c7f446f92b52bbd82a.seg 02691156
03636649/points/86d7a728dc35d634f800b597bc1c1eb5.pts 03636649/expert_verified/points_label/86d7a728dc35d634f800b597bc1c1eb5.seg 03636649
03001627/points/3b4292989394ba62f51f77a6d7299806.pts 03001627/expert_verified/points_label/3b4292989394ba62f51f77a6d7299806.seg 03001627
03001627/points/f5f18fccf9e16800dbd185de408ea209.pts 03001627/expert_verified/points_label/f5f18fccf9e16800dbd185de408ea209.seg 03001627
04379243/points/4d873bf1a658dcd523eb3ad3d378722a.pts 04379243/expert_verified/points_label/4d873bf1a658dcd523eb3ad3d378722a.seg 04379243
03001627/points/a3e4639ff201f69b22a3043dcd383f68.pts 03001627/expert_verified/points_label/a3e4639ff201f69b22a3043dcd383f68.seg 03001627
04379243/points/8d247c6f6aaf805a2530bfb25087f2b0.pts 04379243/expert_verified/points_label/8d247c6f6aaf805a2530bfb25087f2b0.seg 04379243
03467517/points/511fc5ccf4f1c857a24eeca91f583600.pts 03467517/expert_verified/points_label/511fc5ccf4f1c857a24eeca91f583600.seg 03467517
02691156/points/4635326bc4fdc3e9297cd7e2ef7dfa80.pts 02691156/expert_verified/points_label/4635326bc4fdc3e9297cd7e2ef7dfa80.seg 02691156
03001627/points/525776b59266140381dff5c2e57ad46e.pts 03001627/expert_verified/points_label/525776b59266140381dff5c2e57ad46e.seg 03001627
03001627/points/f1d6552ca66b2e37713decb1a0563b12.pts 03001627/expert_verified/points_label/f1d6552ca66b2e37713decb1a0563b12.seg 03001627
04379243/points/40ff8ae39ad13d014a873bbe35452b88.pts 04379243/expert_verified/points_label/40ff8ae39ad13d014a873bbe35452b88.seg 04379243
02691156/points/59f258b7aa7c1f7aa7d0c1e4eb8db7dc.pts 02691156/expert_verified/points_label/59f258b7aa7c1f7aa7d0c1e4eb8db7dc.seg 02691156
04379243/points/63aa14915f59ed8671a782a4379556c7.pts 04379243/expert_verified/points_label/63aa14915f59ed8671a782a4379556c7.seg 04379243
02691156/points/e16f9cc7dedcacdb9b0435532743fd43.pts 02691156/expert_verified/points_label/e16f9cc7dedcacdb9b0435532743fd43.seg 02691156
04379243/points/c5b83c681c085f2195493ccf8f26ab2c.pts 04379243/expert_verified/points_label/c5b83c681c085f2195493ccf8f26ab2c.seg 04379243
03001627/points/b2ba1569509cdb439451566a8c6563ed.pts 03001627/expert_verified/points_label/b2ba1569509cdb439451566a8c6563ed.seg 03001627
02691156/points/265f5348ab2320b2148672750a1a335.pts 02691156/expert_verified/points_label/265f5348ab2320b2148672750a1a335.seg 02691156
03001627/points/47da08d9c7cd7e104b3c42e318f3affc.pts 03001627/expert_verified/points_label/47da08d9c7cd7e104b3c42e318f3affc.seg 03001627
03001627/points/458356b9c5a8d7bd7cc86734cb2f5062.pts 03001627/expert_verified/points_label/458356b9c5a8d7bd7cc86734cb2f5062.seg 03001627
02691156/points/d20e3ed9b3430672bbf3143b1cb6076a.pts 02691156/expert_verified/points_label/d20e3ed9b3430672bbf3143b1cb6076a.seg 02691156
04379243/points/c45e6ceae72c7a97be8908669c476d49.pts 04379243/expert_verified/points_label/c45e6ceae72c7a97be8908669c476d49.seg 04379243
03001627/points/d9bbd1a1eaf6d2259d3ea1c6b57a0095.pts 03001627/expert_verified/points_label/d9bbd1a1eaf6d2259d3ea1c6b57a0095.seg 03001627
02958343/points/8242b114695b68286f522b2bb8ded829.pts 02958343/expert_verified/points_label/8242b114695b68286f522b2bb8ded829.seg 02958343
03001627/points/e4b40369894a16ce6821a1e68ba5ebab.pts 03001627/expert_verified/points_label/e4b40369894a16ce6821a1e68ba5ebab.seg 03001627
03636649/points/dfe800d8d8642e9647bc3701b998a7d5.pts 03636649/expert_verified/points_label/dfe800d8d8642e9647bc3701b998a7d5.seg 03636649
04379243/points/bdf7606e8d493149664b3b9b23ddfcbc.pts 04379243/expert_verified/points_label/bdf7606e8d493149664b3b9b23ddfcbc.seg 04379243
03001627/points/6015aaa9ef170d9bfdef1c01cbd4ae0c.pts 03001627/expert_verified/points_label/6015aaa9ef170d9bfdef1c01cbd4ae0c.seg 03001627
03624134/points/df7a65224f295122ed9c5b25fef60d04.pts 03624134/expert_verified/points_label/df7a65224f295122ed9c5b25fef60d04.seg 03624134
03467517/points/df959f68bb22e402a24eeca91f583600.pts 03467517/expert_verified/points_label/df959f68bb22e402a24eeca91f583600.seg 03467517
04379243/points/69604fc24b7976d69ccce4c6d5bb195f.pts 04379243/expert_verified/points_label/69604fc24b7976d69ccce4c6d5bb195f.seg 04379243
04379243/points/23aca164c7b2e2d4ad8af6714b643432.pts 04379243/expert_verified/points_label/23aca164c7b2e2d4ad8af6714b643432.seg 04379243
03636649/points/e37796d40348fa5fd8013bb984303089.pts 03636649/expert_verified/points_label/e37796d40348fa5fd8013bb984303089.seg 03636649
04379243/points/8cb6a2e9ba365c94593ebeeedbff73b.pts 04379243/expert_verified/points_label/8cb6a2e9ba365c94593ebeeedbff73b.seg 04379243
03001627/points/d6f2d44c693d2e857062f2d72cde5c95.pts 03001627/expert_verified/points_label/d6f2d44c693d2e857062f2d72cde5c95.seg 03001627
03948459/points/ed29dd43ad28f042d1987c07c912c6e1.pts 03948459/expert_verified/points_label/ed29dd43ad28f042d1987c07c912c6e1.seg 03948459
03001627/points/ca01fd0de2534323c594a0e804f37c1a.pts 03001627/expert_verified/points_label/ca01fd0de2534323c594a0e804f37c1a.seg 03001627
03636649/points/e7b719516449701362525a4d857f099d.pts 03636649/expert_verified/points_label/e7b719516449701362525a4d857f099d.seg 03636649
02691156/points/bd48d0beb5d1acf1d2106c9042f1bde9.pts 02691156/expert_verified/points_label/bd48d0beb5d1acf1d2106c9042f1bde9.seg 02691156
03636649/points/7cb828eb3b8e424b1e88064118b89a3e.pts 03636649/expert_verified/points_label/7cb828eb3b8e424b1e88064118b89a3e.seg 03636649
03001627/points/fdd21f7f2ca9f0bcbdcbca499b446e89.pts 03001627/expert_verified/points_label/fdd21f7f2ca9f0bcbdcbca499b446e89.seg 03001627
03636649/points/d779977c2417752b815c6de5374a8dd2.pts 03636649/expert_verified/points_label/d779977c2417752b815c6de5374a8dd2.seg 03636649
02691156/points/f3e2df468c15795872517bb0a6b4d3ef.pts 02691156/expert_verified/points_label/f3e2df468c15795872517bb0a6b4d3ef.seg 02691156
04379243/points/e3cc0b06be2c972cab610b0c94236463.pts 04379243/expert_verified/points_label/e3cc0b06be2c972cab610b0c94236463.seg 04379243
03261776/points/ca1c1c9aba8f4491a656de49935d2359.pts 03261776/expert_verified/points_label/ca1c1c9aba8f4491a656de49935d2359.seg 03261776
03001627/points/c535629f9661293dc16ef5c633c71b56.pts 03001627/expert_verified/points_label/c535629f9661293dc16ef5c633c71b56.seg 03001627
03636649/points/699fcda4f4e9166ec5eb7aae719027b2.pts 03636649/expert_verified/points_label/699fcda4f4e9166ec5eb7aae719027b2.seg 03636649
03001627/points/8a5d60067de905336c183a120a388982.pts 03001627/expert_verified/points_label/8a5d60067de905336c183a120a388982.seg 03001627
02691156/points/4ad92be763c2ded8fca1f1143bb6bc17.pts 02691156/expert_verified/points_label/4ad92be763c2ded8fca1f1143bb6bc17.seg 02691156
04379243/points/14d6b4b09dfc54e9d679a95896f75103.pts 04379243/expert_verified/points_label/14d6b4b09dfc54e9d679a95896f75103.seg 04379243
02691156/points/5e9129782c45b26992e39b8eae3e6b15.pts 02691156/expert_verified/points_label/5e9129782c45b26992e39b8eae3e6b15.seg 02691156
02691156/points/2aec6e6096e640add00d52e62bf14ee9.pts 02691156/expert_verified/points_label/2aec6e6096e640add00d52e62bf14ee9.seg 02691156
03642806/points/7b4260884a1dfd76b080af510dd640b.pts 03642806/expert_verified/points_label/7b4260884a1dfd76b080af510dd640b.seg 03642806
03636649/points/3a0edfd418e020b97f32712aef0efc5a.pts 03636649/expert_verified/points_label/3a0edfd418e020b97f32712aef0efc5a.seg 03636649
03467517/points/1c374a198daaddc493f0194265a9746c.pts 03467517/expert_verified/points_label/1c374a198daaddc493f0194265a9746c.seg 03467517
04379243/points/9d90a58677e619f94b8710a3469971b1.pts 04379243/expert_verified/points_label/9d90a58677e619f94b8710a3469971b1.seg 04379243
02691156/points/26f8a11864fd6bf7b68211fcc7956ac6.pts 02691156/expert_verified/points_label/26f8a11864fd6bf7b68211fcc7956ac6.seg 02691156
02773838/points/f5108ede5ca11f041f6736765dee4fa9.pts 02773838/expert_verified/points_label/f5108ede5ca11f041f6736765dee4fa9.seg 02773838
03001627/points/41ce60d5443c203eb31c248b8665b2e7.pts 03001627/expert_verified/points_label/41ce60d5443c203eb31c248b8665b2e7.seg 03001627
03797390/points/a637500654ca8d16c97cfc3e8a6b1d16.pts 03797390/expert_verified/points_label/a637500654ca8d16c97cfc3e8a6b1d16.seg 03797390
03001627/points/9ee4b9c97bcf4b3715dec43ae6a12831.pts 03001627/expert_verified/points_label/9ee4b9c97bcf4b3715dec43ae6a12831.seg 03001627
03001627/points/e2dbad7996e7e13430c589758b4b5646.pts 03001627/expert_verified/points_label/e2dbad7996e7e13430c589758b4b5646.seg 03001627
03001627/points/ec9f1fc13f2e4ae2c3bd24f986301745.pts 03001627/expert_verified/points_label/ec9f1fc13f2e4ae2c3bd24f986301745.seg 03001627
03624134/points/172b9a77462dcdeaed90ead9558ee6cb.pts 03624134/expert_verified/points_label/172b9a77462dcdeaed90ead9558ee6cb.seg 03624134
04379243/points/713a4be770bb19b9586b2526565371c0.pts 04379243/expert_verified/points_label/713a4be770bb19b9586b2526565371c0.seg 04379243
04379243/points/f2e6820ca69d9b7719fb4103277a6b93.pts 04379243/expert_verified/points_label/f2e6820ca69d9b7719fb4103277a6b93.seg 04379243
03001627/points/11a06e6f68b1d99c8687ff9b0b4e4ac.pts 03001627/expert_verified/points_label/11a06e6f68b1d99c8687ff9b0b4e4ac.seg 03001627
04379243/points/cfd7e354a5ae982aa0ab1d82ef09f78f.pts 04379243/expert_verified/points_label/cfd7e354a5ae982aa0ab1d82ef09f78f.seg 04379243
03797390/points/8012f52dd0a4d2f718a93a45bf780820.pts 03797390/expert_verified/points_label/8012f52dd0a4d2f718a93a45bf780820.seg 03797390
03636649/points/57c1bc69df779d87bbc7a6acbd8f058b.pts 03636649/expert_verified/points_label/57c1bc69df779d87bbc7a6acbd8f058b.seg 03636649
03948459/points/664579680dc09267e1f2a1daf140ac9f.pts 03948459/expert_verified/points_label/664579680dc09267e1f2a1daf140ac9f.seg 03948459
03001627/points/ca032d3b6dcbe1cea3056fa1e8da3997.pts 03001627/expert_verified/points_label/ca032d3b6dcbe1cea3056fa1e8da3997.seg 03001627
02691156/points/4a837740b388aa45d8ff6111270336a9.pts 02691156/expert_verified/points_label/4a837740b388aa45d8ff6111270336a9.seg 02691156
04099429/points/64803bab9799d0e698d2d2b2ae2563b0.pts 04099429/expert_verified/points_label/64803bab9799d0e698d2d2b2ae2563b0.seg 04099429
04379243/points/c2c36909e461e10adaaaeef365d8f6e5.pts 04379243/expert_verified/points_label/c2c36909e461e10adaaaeef365d8f6e5.seg 04379243
04379243/points/bc842e548e68a3cbb48513409ae7c51d.pts 04379243/expert_verified/points_label/bc842e548e68a3cbb48513409ae7c51d.seg 04379243
03467517/points/4709e55a82a63f64d57700c05b1862d8.pts 03467517/expert_verified/points_label/4709e55a82a63f64d57700c05b1862d8.seg 03467517
04379243/points/dc6f030d9ee566a5dcfcef693e7ec696.pts 04379243/expert_verified/points_label/dc6f030d9ee566a5dcfcef693e7ec696.seg 04379243
03001627/points/8be8093e99b94bd9cf320c31965db5a1.pts 03001627/expert_verified/points_label/8be8093e99b94bd9cf320c31965db5a1.seg 03001627
02958343/points/a0a1b0377d72e86bab3dd76bf33b0f5e.pts 02958343/expert_verified/points_label/a0a1b0377d72e86bab3dd76bf33b0f5e.seg 02958343
03001627/points/efc684ff4dc6ff49ccd42a2d6eea63ed.pts 03001627/expert_verified/points_label/efc684ff4dc6ff49ccd42a2d6eea63ed.seg 03001627
03001627/points/ff2223a085d32243696b74614952b2d0.pts 03001627/expert_verified/points_label/ff2223a085d32243696b74614952b2d0.seg 03001627
02954340/points/8b2951e32e0906bb5f6cb4951755315c.pts 02954340/expert_verified/points_label/8b2951e32e0906bb5f6cb4951755315c.seg 02954340
04379243/points/82b69c9b72a5159ce76bc197b3a3ffc0.pts 04379243/expert_verified/points_label/82b69c9b72a5159ce76bc197b3a3ffc0.seg 04379243
03642806/points/5b5247b13d5b21bdad2954b86711abbd.pts 03642806/expert_verified/points_label/5b5247b13d5b21bdad2954b86711abbd.seg 03642806
03636649/points/44e442591f82cd4cab0ac374f450cdc.pts 03636649/expert_verified/points_label/44e442591f82cd4cab0ac374f450cdc.seg 03636649
03001627/points/2a1184b04dd8f30e3e92f39ce48d644.pts 03001627/expert_verified/points_label/2a1184b04dd8f30e3e92f39ce48d644.seg 03001627
03636649/points/bc49fe3559e18fcb7d910d51d878f708.pts 03636649/expert_verified/points_label/bc49fe3559e18fcb7d910d51d878f708.seg 03636649
03624134/points/c50af8af50613e822bf26da672b84220.pts 03624134/expert_verified/points_label/c50af8af50613e822bf26da672b84220.seg 03624134
04225987/points/c0280aaad5473e8398c63cb68f11df34.pts 04225987/expert_verified/points_label/c0280aaad5473e8398c63cb68f11df34.seg 04225987
03636649/points/5849d1a237cb493c659dda512294c744.pts 03636649/expert_verified/points_label/5849d1a237cb493c659dda512294c744.seg 03636649
02958343/points/fcd90d547fdeb629f200a72c9245aee7.pts 02958343/expert_verified/points_label/fcd90d547fdeb629f200a72c9245aee7.seg 02958343
03001627/points/34898c36e711fbde713decb1a0563b12.pts 03001627/expert_verified/points_label/34898c36e711fbde713decb1a0563b12.seg 03001627
02691156/points/af696fc30a96a0c8bc0909d98a1ff2b4.pts 02691156/expert_verified/points_label/af696fc30a96a0c8bc0909d98a1ff2b4.seg 02691156
04379243/points/f28e030e715b9d3e318462aca9e62b6b.pts 04379243/expert_verified/points_label/f28e030e715b9d3e318462aca9e62b6b.seg 04379243
02691156/points/3c7e4628a9ea201bbf3143b1cb6076a.pts 02691156/expert_verified/points_label/3c7e4628a9ea201bbf3143b1cb6076a.seg 02691156
03636649/points/f092117adb1e9254d1cbf3e52b9b6237.pts 03636649/expert_verified/points_label/f092117adb1e9254d1cbf3e52b9b6237.seg 03636649
04379243/points/7dd881a26eea656d193afeeca14e3baa.pts 04379243/expert_verified/points_label/7dd881a26eea656d193afeeca14e3baa.seg 04379243
03001627/points/79a3115a6f96eef7c151419181ef256.pts 03001627/expert_verified/points_label/79a3115a6f96eef7c151419181ef256.seg 03001627
04379243/points/fc51355d4d03ff4ae6c5cd45aa112726.pts 04379243/expert_verified/points_label/fc51355d4d03ff4ae6c5cd45aa112726.seg 04379243
04379243/points/34121f5cc12135148c1cf3f7d7f0373.pts 04379243/expert_verified/points_label/34121f5cc12135148c1cf3f7d7f0373.seg 04379243
03624134/points/d5167211e757e79f012465c621a63e3.pts 03624134/expert_verified/points_label/d5167211e757e79f012465c621a63e3.seg 03624134
04379243/points/5b375eacdbe49cfaaa539cd22945e538.pts 04379243/expert_verified/points_label/5b375eacdbe49cfaaa539cd22945e538.seg 04379243
02691156/points/d3d788c1fb35227619ba010ddb4974fe.pts 02691156/expert_verified/points_label/d3d788c1fb35227619ba010ddb4974fe.seg 02691156
02691156/points/f26ea1a00455f44fb88e2a19106395c2.pts 02691156/expert_verified/points_label/f26ea1a00455f44fb88e2a19106395c2.seg 02691156
03001627/points/798a46965d9e0edfcea003eff0268278.pts 03001627/expert_verified/points_label/798a46965d9e0edfcea003eff0268278.seg 03001627
02691156/points/3069d990d52051eb3a34c2907e8f3f1f.pts 02691156/expert_verified/points_label/3069d990d52051eb3a34c2907e8f3f1f.seg 02691156
02691156/points/8c42e3042a4beaa7d5c40787c7bb7824.pts 02691156/expert_verified/points_label/8c42e3042a4beaa7d5c40787c7bb7824.seg 02691156
04379243/points/45c5ee611c73b90a509330ce00eb0b20.pts 04379243/expert_verified/points_label/45c5ee611c73b90a509330ce00eb0b20.seg 04379243
03001627/points/22ada577361ed0374b3c42e318f3affc.pts 03001627/expert_verified/points_label/22ada577361ed0374b3c42e318f3affc.seg 03001627
04379243/points/b6ad7be371729438dcfcef693e7ec696.pts 04379243/expert_verified/points_label/b6ad7be371729438dcfcef693e7ec696.seg 04379243
03636649/points/4c266f2b866c59e761fef32872c6fa53.pts 03636649/expert_verified/points_label/4c266f2b866c59e761fef32872c6fa53.seg 03636649
04379243/points/812dd06fc99f174e9f2349486c570dd4.pts 04379243/expert_verified/points_label/812dd06fc99f174e9f2349486c570dd4.seg 04379243
02691156/points/36a5bd4ca6a0b191532d23702363f9a5.pts 02691156/expert_verified/points_label/36a5bd4ca6a0b191532d23702363f9a5.seg 02691156
03001627/points/be0890a6a0f3fcf841f91bc9e1dece3b.pts 03001627/expert_verified/points_label/be0890a6a0f3fcf841f91bc9e1dece3b.seg 03001627
03642806/points/6008f256f3beafd9988abef1fd117e7.pts 03642806/expert_verified/points_label/6008f256f3beafd9988abef1fd117e7.seg 03642806
03001627/points/490941bf4a532b62492d9da2668ec34c.pts 03001627/expert_verified/points_label/490941bf4a532b62492d9da2668ec34c.seg 03001627
03636649/points/94940283714fdff6244ba644cf33cb2e.pts 03636649/expert_verified/points_label/94940283714fdff6244ba644cf33cb2e.seg 03636649
03642806/points/6227e7dd1a391e8d54f22ce0a3592d5.pts 03642806/expert_verified/points_label/6227e7dd1a391e8d54f22ce0a3592d5.seg 03642806
02691156/points/b2ceeee3c5b75962ac4f72bf08dc79a6.pts 02691156/expert_verified/points_label/b2ceeee3c5b75962ac4f72bf08dc79a6.seg 02691156
03642806/points/55a05b33f34e7211f71cb38553f14917.pts 03642806/expert_verified/points_label/55a05b33f34e7211f71cb38553f14917.seg 03642806
02773838/points/74c548ef3ca7b1987515e7bb7dba4019.pts 02773838/expert_verified/points_label/74c548ef3ca7b1987515e7bb7dba4019.seg 02773838
03467517/points/defcf80fcef4b51b3f431ca2c1260d62.pts 03467517/expert_verified/points_label/defcf80fcef4b51b3f431ca2c1260d62.seg 03467517
04379243/points/eaea1cf98b61abd043383304411cc9ec.pts 04379243/expert_verified/points_label/eaea1cf98b61abd043383304411cc9ec.seg 04379243
03001627/points/7f6858bd9d4af9df97316612e1a4343a.pts 03001627/expert_verified/points_label/7f6858bd9d4af9df97316612e1a4343a.seg 03001627
03001627/points/3c27660aacbcf99886327adaa986dff.pts 03001627/expert_verified/points_label/3c27660aacbcf99886327adaa986dff.seg 03001627
04379243/points/229d510bace435811572ee5ddf1b55b.pts 04379243/expert_verified/points_label/229d510bace435811572ee5ddf1b55b.seg 04379243
03636649/points/83c0ad378b5802b73d39d8012919dd25.pts 03636649/expert_verified/points_label/83c0ad378b5802b73d39d8012919dd25.seg 03636649
02691156/points/f009f3112625ee00b8cf782e8c539948.pts 02691156/expert_verified/points_label/f009f3112625ee00b8cf782e8c539948.seg 02691156
02691156/points/f13827d156628467b4cdad9a5bf52dd5.pts 02691156/expert_verified/points_label/f13827d156628467b4cdad9a5bf52dd5.seg 02691156
03636649/points/526251a7530426a4b3c42e318f3affc.pts 03636649/expert_verified/points_label/526251a7530426a4b3c42e318f3affc.seg 03636649
03001627/points/a1133464132d65fcfce0ccdae30f97db.pts 03001627/expert_verified/points_label/a1133464132d65fcfce0ccdae30f97db.seg 03001627
02691156/points/d844094b073a0452b04b2d1c5ce9783b.pts 02691156/expert_verified/points_label/d844094b073a0452b04b2d1c5ce9783b.seg 02691156
03948459/points/2f5b4bcb8d4dd901609e2d916fa0da27.pts 03948459/expert_verified/points_label/2f5b4bcb8d4dd901609e2d916fa0da27.seg 03948459
03636649/points/a4c06cd5032733af543df75232f6ff2b.pts 03636649/expert_verified/points_label/a4c06cd5032733af543df75232f6ff2b.seg 03636649
03636649/points/64eaa45bd2e01db8991ff09eca5b27a8.pts 03636649/expert_verified/points_label/64eaa45bd2e01db8991ff09eca5b27a8.seg 03636649
03636649/points/5bc478e9c4e0bb8180936c51aa7ffcf5.pts 03636649/expert_verified/points_label/5bc478e9c4e0bb8180936c51aa7ffcf5.seg 03636649
03636649/points/b02bd8e5ef9cfe354b3c42e318f3affc.pts 03636649/expert_verified/points_label/b02bd8e5ef9cfe354b3c42e318f3affc.seg 03636649
03636649/points/cf6c082b9534049494db33559ec0df30.pts 03636649/expert_verified/points_label/cf6c082b9534049494db33559ec0df30.seg 03636649
04225987/points/af4343c5b78b70b11082f2ea630bf69e.pts 04225987/expert_verified/points_label/af4343c5b78b70b11082f2ea630bf69e.seg 04225987
03467517/points/c084022f2ddbf95493f0194265a9746c.pts 03467517/expert_verified/points_label/c084022f2ddbf95493f0194265a9746c.seg 03467517
03001627/points/550dd11407c28f9f3bd04286517a8395.pts 03001627/expert_verified/points_label/550dd11407c28f9f3bd04286517a8395.seg 03001627
04379243/points/702cebffa33a19f019f079d1b712f46f.pts 04379243/expert_verified/points_label/702cebffa33a19f019f079d1b712f46f.seg 04379243
04379243/points/388d9e7b2b8a8f909492fbce0bd54e2e.pts 04379243/expert_verified/points_label/388d9e7b2b8a8f909492fbce0bd54e2e.seg 04379243
03636649/points/7634fbdcaa6b304d62c83ac1e3a4ebaa.pts 03636649/expert_verified/points_label/7634fbdcaa6b304d62c83ac1e3a4ebaa.seg 03636649
03636649/points/14d3d2418165ec86bba785994a529f86.pts 03636649/expert_verified/points_label/14d3d2418165ec86bba785994a529f86.seg 03636649
04379243/points/13e19274b358ec867aa3000697a75d55.pts 04379243/expert_verified/points_label/13e19274b358ec867aa3000697a75d55.seg 04379243
03467517/points/727fcc85add981325e683993f34d42f2.pts 03467517/expert_verified/points_label/727fcc85add981325e683993f34d42f2.seg 03467517
02691156/points/947d6b9cd1966e2e719b5362fe06bbb.pts 02691156/expert_verified/points_label/947d6b9cd1966e2e719b5362fe06bbb.seg 02691156
04379243/points/ee5f85db427865e63e5399147a5b4763.pts 04379243/expert_verified/points_label/ee5f85db427865e63e5399147a5b4763.seg 04379243
02691156/points/1678946724380812de689e373096b0e3.pts 02691156/expert_verified/points_label/1678946724380812de689e373096b0e3.seg 02691156
03001627/points/3fdef0a7606c397331ad067823a3f0ce.pts 03001627/expert_verified/points_label/3fdef0a7606c397331ad067823a3f0ce.seg 03001627
03636649/points/1bb465b8f22315d1116f219d90a571c2.pts 03636649/expert_verified/points_label/1bb465b8f22315d1116f219d90a571c2.seg 03636649
04379243/points/9dd5b7e6f90ee322b56d92c5d7b06038.pts 04379243/expert_verified/points_label/9dd5b7e6f90ee322b56d92c5d7b06038.seg 04379243
03467517/points/7eee3b79e053759143891ae68a82472e.pts 03467517/expert_verified/points_label/7eee3b79e053759143891ae68a82472e.seg 03467517
03001627/points/f4b6bf9253918b52944d8f8e13d63fde.pts 03001627/expert_verified/points_label/f4b6bf9253918b52944d8f8e13d63fde.seg 03001627
03636649/points/92e0f64c08f0c8ac3c8d0fdfb1cc2535.pts 03636649/expert_verified/points_label/92e0f64c08f0c8ac3c8d0fdfb1cc2535.seg 03636649
03624134/points/d63521a0dfac9c1f342494fa6f09f376.pts 03624134/expert_verified/points_label/d63521a0dfac9c1f342494fa6f09f376.seg 03624134
04379243/points/c7ff0afab4b7885a52160ba64fb535b2.pts 04379243/expert_verified/points_label/c7ff0afab4b7885a52160ba64fb535b2.seg 04379243
02958343/points/89765af115d9a4955591fcdffe729c55.pts 02958343/expert_verified/points_label/89765af115d9a4955591fcdffe729c55.seg 02958343
03636649/points/70bf2aaedbf9499ec889c00efdaf9928.pts 03636649/expert_verified/points_label/70bf2aaedbf9499ec889c00efdaf9928.seg 03636649
02958343/points/ef15b938dcfa9893c4d922e8a1141322.pts 02958343/expert_verified/points_label/ef15b938dcfa9893c4d922e8a1141322.seg 02958343
03636649/points/4bb676c497969016de98d10ab5975b59.pts 03636649/expert_verified/points_label/4bb676c497969016de98d10ab5975b59.seg 03636649
04379243/points/1c8121e1ad6cd6fc7a480f3f1d55ed3f.pts 04379243/expert_verified/points_label/1c8121e1ad6cd6fc7a480f3f1d55ed3f.seg 04379243
04379243/points/83b8e64089968ae8fd3feb4581507302.pts 04379243/expert_verified/points_label/83b8e64089968ae8fd3feb4581507302.seg 04379243
03636649/points/a4c0f3aed58f0e092fdae21c212bf119.pts 03636649/expert_verified/points_label/a4c0f3aed58f0e092fdae21c212bf119.seg 03636649
04379243/points/e02925509615eb5a4eaf5bbf36d243d4.pts 04379243/expert_verified/points_label/e02925509615eb5a4eaf5bbf36d243d4.seg 04379243
04379243/points/c5087fce38b009ae30bbd4cddd04c77b.pts 04379243/expert_verified/points_label/c5087fce38b009ae30bbd4cddd04c77b.seg 04379243
03001627/points/5107542cfbf142f36209799e55a657c.pts 03001627/expert_verified/points_label/5107542cfbf142f36209799e55a657c.seg 03001627
04379243/points/94a62cfdb84e88ca9a3528690d225ee1.pts 04379243/expert_verified/points_label/94a62cfdb84e88ca9a3528690d225ee1.seg 04379243
04379243/points/80ad1f839582d183fbf6f493308acc40.pts 04379243/expert_verified/points_label/80ad1f839582d183fbf6f493308acc40.seg 04379243
03001627/points/91819d15c2c044ebd47ffa500636d198.pts 03001627/expert_verified/points_label/91819d15c2c044ebd47ffa500636d198.seg 03001627
03636649/points/77a5a12147a6624d786810c22b062a88.pts 03636649/expert_verified/points_label/77a5a12147a6624d786810c22b062a88.seg 03636649
03001627/points/beb4c42cfa1c3b282811d30bba54859.pts 03001627/expert_verified/points_label/beb4c42cfa1c3b282811d30bba54859.seg 03001627
03636649/points/e529fc190753cc9df647dc544bb0ab61.pts 03636649/expert_verified/points_label/e529fc190753cc9df647dc544bb0ab61.seg 03636649
04379243/points/680d4a8b5a30601a4b3c42e318f3affc.pts 04379243/expert_verified/points_label/680d4a8b5a30601a4b3c42e318f3affc.seg 04379243
03001627/points/1d6f4020cab4ec1962d6a66a1a314d66.pts 03001627/expert_verified/points_label/1d6f4020cab4ec1962d6a66a1a314d66.seg 03001627
03001627/points/5b3fd3199d1bc950c1ae25a29e9d46d3.pts 03001627/expert_verified/points_label/5b3fd3199d1bc950c1ae25a29e9d46d3.seg 03001627
03001627/points/17e916fc863540ee3def89b32cef8e45.pts 03001627/expert_verified/points_label/17e916fc863540ee3def89b32cef8e45.seg 03001627
04379243/points/a5d5fc6b0bb7881419fb4103277a6b93.pts 04379243/expert_verified/points_label/a5d5fc6b0bb7881419fb4103277a6b93.seg 04379243
03001627/points/eafec1b145972dcd815b2b467e8e2eac.pts 03001627/expert_verified/points_label/eafec1b145972dcd815b2b467e8e2eac.seg 03001627
04379243/points/1fb2be490f45ec6e19fb4103277a6b93.pts 04379243/expert_verified/points_label/1fb2be490f45ec6e19fb4103277a6b93.seg 04379243
02691156/points/8b61ba80d9e487deca8607f540cc62ba.pts 02691156/expert_verified/points_label/8b61ba80d9e487deca8607f540cc62ba.seg 02691156
03467517/points/2d767b3fbb8a3053b8836869016d1afd.pts 03467517/expert_verified/points_label/2d767b3fbb8a3053b8836869016d1afd.seg 03467517
04379243/points/e0940f2229e42007d98e761e6d91dfc8.pts 04379243/expert_verified/points_label/e0940f2229e42007d98e761e6d91dfc8.seg 04379243
03001627/points/bb90094030f369e4305a3b2fd9173d6f.pts 03001627/expert_verified/points_label/bb90094030f369e4305a3b2fd9173d6f.seg 03001627
02958343/points/c6e3d9cf26016b5752aa494042b7c9db.pts 02958343/expert_verified/points_label/c6e3d9cf26016b5752aa494042b7c9db.seg 02958343
03001627/points/bd0fab2e72b445bd1e722bceee6e83aa.pts 03001627/expert_verified/points_label/bd0fab2e72b445bd1e722bceee6e83aa.seg 03001627
02691156/points/e86fd13a49f0ee0a62b600da24e0965.pts 02691156/expert_verified/points_label/e86fd13a49f0ee0a62b600da24e0965.seg 02691156
03001627/points/eeebe3fe14ee4d3aebefe6b1d594ad2e.pts 03001627/expert_verified/points_label/eeebe3fe14ee4d3aebefe6b1d594ad2e.seg 03001627
04379243/points/398dbb0a34ca527871a782a4379556c7.pts 04379243/expert_verified/points_label/398dbb0a34ca527871a782a4379556c7.seg 04379243
04379243/points/737cc2beda4a023619fb4103277a6b93.pts 04379243/expert_verified/points_label/737cc2beda4a023619fb4103277a6b93.seg 04379243
03001627/points/3895b96949fd81c5f07fee5fc5c45ee2.pts 03001627/expert_verified/points_label/3895b96949fd81c5f07fee5fc5c45ee2.seg 03001627
04379243/points/bba5ce8555c8fa89ba18ade30e563d37.pts 04379243/expert_verified/points_label/bba5ce8555c8fa89ba18ade30e563d37.seg 04379243
04379243/points/cab027dd0162c5b7f1426260885dd0ef.pts 04379243/expert_verified/points_label/cab027dd0162c5b7f1426260885dd0ef.seg 04379243
04379243/points/75f2bc98aecf198974984b9cd0997a52.pts 04379243/expert_verified/points_label/75f2bc98aecf198974984b9cd0997a52.seg 04379243
04379243/points/8d4fe49d942ec85ff4b6538438a0b930.pts 04379243/expert_verified/points_label/8d4fe49d942ec85ff4b6538438a0b930.seg 04379243
03001627/points/89dd53d0377c28207f7114254c4286d2.pts 03001627/expert_verified/points_label/89dd53d0377c28207f7114254c4286d2.seg 03001627
03636649/points/a37695d83a39adb52866fbd701f50f71.pts 03636649/expert_verified/points_label/a37695d83a39adb52866fbd701f50f71.seg 03636649
04379243/points/f99ebf0f053140525a0e5699b3040a35.pts 04379243/expert_verified/points_label/f99ebf0f053140525a0e5699b3040a35.seg 04379243
03624134/points/bbfd2df3edce576e1e652fa812161367.pts 03624134/expert_verified/points_label/bbfd2df3edce576e1e652fa812161367.seg 03624134
04379243/points/f0d8620b49ea76db83130614d8020b3.pts 04379243/expert_verified/points_label/f0d8620b49ea76db83130614d8020b3.seg 04379243
04379243/points/d01a6b35a54c8f77dd986a55e273fa14.pts 04379243/expert_verified/points_label/d01a6b35a54c8f77dd986a55e273fa14.seg 04379243
03001627/points/2f6b0ddf12d1311795bea7c29e873d16.pts 03001627/expert_verified/points_label/2f6b0ddf12d1311795bea7c29e873d16.seg 03001627
03001627/points/5695fd37d1e673cebf964fc57f6a7d6d.pts 03001627/expert_verified/points_label/5695fd37d1e673cebf964fc57f6a7d6d.seg 03001627
03636649/points/746b82746c6a02cca5f600ed2cf472ac.pts 03636649/expert_verified/points_label/746b82746c6a02cca5f600ed2cf472ac.seg 03636649
03001627/points/bcc4ea0133864bfe4d4c0769270d8651.pts 03001627/expert_verified/points_label/bcc4ea0133864bfe4d4c0769270d8651.seg 03001627
03624134/points/81ba3f06ec38eaa46016d22b1dfacd4b.pts 03624134/expert_verified/points_label/81ba3f06ec38eaa46016d22b1dfacd4b.seg 03624134
04379243/points/2a2d6560f14a01c6afac72146bbc9d59.pts 04379243/expert_verified/points_label/2a2d6560f14a01c6afac72146bbc9d59.seg 04379243
04379243/points/856e86709df98497dcfcef693e7ec696.pts 04379243/expert_verified/points_label/856e86709df98497dcfcef693e7ec696.seg 04379243
03948459/points/7418810de4b13e8430b6ca3ac82edfa3.pts 03948459/expert_verified/points_label/7418810de4b13e8430b6ca3ac82edfa3.seg 03948459
03001627/points/11e0f0dfd3d0b22130ddb6ead95f49cc.pts 03001627/expert_verified/points_label/11e0f0dfd3d0b22130ddb6ead95f49cc.seg 03001627
04379243/points/5c6748b094725d9af008d8a3590fb522.pts 04379243/expert_verified/points_label/5c6748b094725d9af008d8a3590fb522.seg 04379243
04379243/points/17f3a2945d6550cbf7628281ecb18112.pts 04379243/expert_verified/points_label/17f3a2945d6550cbf7628281ecb18112.seg 04379243
04379243/points/889c9aedc4ba47592fb02b79d375eea5.pts 04379243/expert_verified/points_label/889c9aedc4ba47592fb02b79d375eea5.seg 04379243
04379243/points/c0b74c61865b563067dc358060e3c47b.pts 04379243/expert_verified/points_label/c0b74c61865b563067dc358060e3c47b.seg 04379243
03636649/points/783b81aa54a69a26d42b9650f19dd425.pts 03636649/expert_verified/points_label/783b81aa54a69a26d42b9650f19dd425.seg 03636649
03467517/points/8b8b084109eef6d81082f2ea630bf69e.pts 03467517/expert_verified/points_label/8b8b084109eef6d81082f2ea630bf69e.seg 03467517
03001627/points/8a9af7d8a83d90fcd53e36731300f5b4.pts 03001627/expert_verified/points_label/8a9af7d8a83d90fcd53e36731300f5b4.seg 03001627
03001627/points/47aca56ff3a7b8a71a782a4379556c7.pts 03001627/expert_verified/points_label/47aca56ff3a7b8a71a782a4379556c7.seg 03001627
03001627/points/9fae8d94a028e9ec2818b21315fe1bde.pts 03001627/expert_verified/points_label/9fae8d94a028e9ec2818b21315fe1bde.seg 03001627
03001627/points/9a41550ba7dd31e3bf80985a99195eb8.pts 03001627/expert_verified/points_label/9a41550ba7dd31e3bf80985a99195eb8.seg 03001627
03001627/points/184b4797cea77beb5ca1c42bb8ac17a.pts 03001627/expert_verified/points_label/184b4797cea77beb5ca1c42bb8ac17a.seg 03001627
04379243/points/bc1ff7fc750617d690f7bef12e52ac08.pts 04379243/expert_verified/points_label/bc1ff7fc750617d690f7bef12e52ac08.seg 04379243
02691156/points/5fb64e3fc0abe449ca8607f540cc62ba.pts 02691156/expert_verified/points_label/5fb64e3fc0abe449ca8607f540cc62ba.seg 02691156
03001627/points/2e0beb3b6927a2b7e45ef4135c266a12.pts 03001627/expert_verified/points_label/2e0beb3b6927a2b7e45ef4135c266a12.seg 03001627
03467517/points/a38684b166ce2c77c155f88004a92bc8.pts 03467517/expert_verified/points_label/a38684b166ce2c77c155f88004a92bc8.seg 03467517
02691156/points/b590adb6d3486f6e90b1d6deb98feec6.pts 02691156/expert_verified/points_label/b590adb6d3486f6e90b1d6deb98feec6.seg 02691156
03636649/points/9d41e23f00d11d153033d35b49a20c8.pts 03636649/expert_verified/points_label/9d41e23f00d11d153033d35b49a20c8.seg 03636649
03001627/points/f4b141ab64a6c4e771a782a4379556c7.pts 03001627/expert_verified/points_label/f4b141ab64a6c4e771a782a4379556c7.seg 03001627
03948459/points/19e45672a3109f18be4927dbd39f74e9.pts 03948459/expert_verified/points_label/19e45672a3109f18be4927dbd39f74e9.seg 03948459
04379243/points/58475b1b20ece0c5eeb8d422649e5f2b.pts 04379243/expert_verified/points_label/58475b1b20ece0c5eeb8d422649e5f2b.seg 04379243
04379243/points/400393a56fc243c442c39a4fb8d01418.pts 04379243/expert_verified/points_label/400393a56fc243c442c39a4fb8d01418.seg 04379243
03001627/points/a128eda00983dd01fb7d9615be5ab4b0.pts 03001627/expert_verified/points_label/a128eda00983dd01fb7d9615be5ab4b0.seg 03001627
04379243/points/6af9a593129b028eb67e68783d58425a.pts 04379243/expert_verified/points_label/6af9a593129b028eb67e68783d58425a.seg 04379243
03001627/points/40f188600cf8362b654ea6737b0d3597.pts 03001627/expert_verified/points_label/40f188600cf8362b654ea6737b0d3597.seg 03001627
04379243/points/a4af8f822fa8d95456c08464b83f209e.pts 04379243/expert_verified/points_label/a4af8f822fa8d95456c08464b83f209e.seg 04379243
03001627/points/d9558dccfe8e3381e45ef4135c266a12.pts 03001627/expert_verified/points_label/d9558dccfe8e3381e45ef4135c266a12.seg 03001627
04379243/points/631028ddb76eed4dbb0085d0daabdaea.pts 04379243/expert_verified/points_label/631028ddb76eed4dbb0085d0daabdaea.seg 04379243
03001627/points/8967e65c1541d1874aa7f42ef07f614e.pts 03001627/expert_verified/points_label/8967e65c1541d1874aa7f42ef07f614e.seg 03001627
04379243/points/38feb6b209579f6faadbf8208284c675.pts 04379243/expert_verified/points_label/38feb6b209579f6faadbf8208284c675.seg 04379243
03624134/points/60277f4060b8703e4e18d7136dc2dc80.pts 03624134/expert_verified/points_label/60277f4060b8703e4e18d7136dc2dc80.seg 03624134
03467517/points/a78c3356a5dca4e7670b811945485012.pts 03467517/expert_verified/points_label/a78c3356a5dca4e7670b811945485012.seg 03467517
03797390/points/645b0e2ef3b95979204df312eabf367f.pts 03797390/expert_verified/points_label/645b0e2ef3b95979204df312eabf367f.seg 03797390
03467517/points/bd6057c7ac1ef31193f0194265a9746c.pts 03467517/expert_verified/points_label/bd6057c7ac1ef31193f0194265a9746c.seg 03467517
03790512/points/bcbcfdad5e0e1d9ba88e8cb97b773125.pts 03790512/expert_verified/points_label/bcbcfdad5e0e1d9ba88e8cb97b773125.seg 03790512
03636649/points/761fb0822bb05bc8ee0cd746086d989.pts 03636649/expert_verified/points_label/761fb0822bb05bc8ee0cd746086d989.seg 03636649
03636649/points/be13324c84d2a9d72b151d8b52c53b90.pts 03636649/expert_verified/points_label/be13324c84d2a9d72b151d8b52c53b90.seg 03636649
04379243/points/7b3dfbd70333485d219a1300d9489f4e.pts 04379243/expert_verified/points_label/7b3dfbd70333485d219a1300d9489f4e.seg 04379243
04379243/points/22c5cbe6271736bffebad4f49b26ec52.pts 04379243/expert_verified/points_label/22c5cbe6271736bffebad4f49b26ec52.seg 04379243
02958343/points/4b7b3b54dc04df53c19f1e8ed99ac2fa.pts 02958343/expert_verified/points_label/4b7b3b54dc04df53c19f1e8ed99ac2fa.seg 02958343
03636649/points/947c6753d77d8082290e2f84c414e6be.pts 03636649/expert_verified/points_label/947c6753d77d8082290e2f84c414e6be.seg 03636649
02958343/points/36c2770d00fdd0bdf1ee968c9039cc3.pts 02958343/expert_verified/points_label/36c2770d00fdd0bdf1ee968c9039cc3.seg 02958343
03001627/points/4ac17ecd78880859e302b6082b0ffc09.pts 03001627/expert_verified/points_label/4ac17ecd78880859e302b6082b0ffc09.seg 03001627
03636649/points/70b78b9439a9de7530f6e0ede20c4525.pts 03636649/expert_verified/points_label/70b78b9439a9de7530f6e0ede20c4525.seg 03636649
04379243/points/d8be4b45afb21cf1616fb9ab42452112.pts 04379243/expert_verified/points_label/d8be4b45afb21cf1616fb9ab42452112.seg 04379243
02691156/points/fe266c740580c102ff9ce0c50c2cd25a.pts 02691156/expert_verified/points_label/fe266c740580c102ff9ce0c50c2cd25a.seg 02691156
02958343/points/30f4617775480bcce27281f3b76d1f5.pts 02958343/expert_verified/points_label/30f4617775480bcce27281f3b76d1f5.seg 02958343
03467517/points/34874708b51c7ed493f0194265a9746c.pts 03467517/expert_verified/points_label/34874708b51c7ed493f0194265a9746c.seg 03467517
04225987/points/abdc4a823b1f78c397f47f3057557cbe.pts 04225987/expert_verified/points_label/abdc4a823b1f78c397f47f3057557cbe.seg 04225987
03948459/points/14fe99eb0c105a90fc9c56fb43681c11.pts 03948459/expert_verified/points_label/14fe99eb0c105a90fc9c56fb43681c11.seg 03948459
04379243/points/f5aecb6607876495e03eb69820d1aaf2.pts 04379243/expert_verified/points_label/f5aecb6607876495e03eb69820d1aaf2.seg 04379243
03001627/points/3c81fab5678a3872327289c00b6dc9ca.pts 03001627/expert_verified/points_label/3c81fab5678a3872327289c00b6dc9ca.seg 03001627
04379243/points/fe3351c94fbab8ce3002761e7a3ba3bd.pts 04379243/expert_verified/points_label/fe3351c94fbab8ce3002761e7a3ba3bd.seg 04379243
04379243/points/5f0c33039269b7a9f0e84b9d9ad447e2.pts 04379243/expert_verified/points_label/5f0c33039269b7a9f0e84b9d9ad447e2.seg 04379243
03001627/points/fa7347547e290732bf65e1af50b5b7d4.pts 03001627/expert_verified/points_label/fa7347547e290732bf65e1af50b5b7d4.seg 03001627
04379243/points/9c33336af33fd905776d8bc79b9caa2c.pts 04379243/expert_verified/points_label/9c33336af33fd905776d8bc79b9caa2c.seg 04379243
03001627/points/1d828c69106609f8cd783766d090e665.pts 03001627/expert_verified/points_label/1d828c69106609f8cd783766d090e665.seg 03001627
04379243/points/5fbb7a5f01f646ca5830980abc1c717a.pts 04379243/expert_verified/points_label/5fbb7a5f01f646ca5830980abc1c717a.seg 04379243
03636649/points/777a686890d74b350359b4e03cfdfa.pts 03636649/expert_verified/points_label/777a686890d74b350359b4e03cfdfa.seg 03636649
02773838/points/3077a9b76724b6d35de21284bb515a83.pts 02773838/expert_verified/points_label/3077a9b76724b6d35de21284bb515a83.seg 02773838
03642806/points/b233163860361eda8cfacef5204026d6.pts 03642806/expert_verified/points_label/b233163860361eda8cfacef5204026d6.seg 03642806
02958343/points/f10f279643fbb3276a78cd0552215cff.pts 02958343/expert_verified/points_label/f10f279643fbb3276a78cd0552215cff.seg 02958343
02691156/points/2c64c521c114df40e51f766854841067.pts 02691156/expert_verified/points_label/2c64c521c114df40e51f766854841067.seg 02691156
03001627/points/3b8f2b955ee9a904b3c42e318f3affc.pts 03001627/expert_verified/points_label/3b8f2b955ee9a904b3c42e318f3affc.seg 03001627
04379243/points/2a64bd38a4e42f33dc43fde5155b3946.pts 04379243/expert_verified/points_label/2a64bd38a4e42f33dc43fde5155b3946.seg 04379243
03001627/points/52310bca00e6a3671201d487ecde379e.pts 03001627/expert_verified/points_label/52310bca00e6a3671201d487ecde379e.seg 03001627
03001627/points/5346017af72c1843169d299c5f567c18.pts 03001627/expert_verified/points_label/5346017af72c1843169d299c5f567c18.seg 03001627
02954340/points/c1436c38beba0005284432ce2f42f498.pts 02954340/expert_verified/points_label/c1436c38beba0005284432ce2f42f498.seg 02954340
03636649/points/34ce1de178694f87e76bc197b3a3ffc0.pts 03636649/expert_verified/points_label/34ce1de178694f87e76bc197b3a3ffc0.seg 03636649
03001627/points/8e7714615a4b1e6f82390c5f604e0d9b.pts 03001627/expert_verified/points_label/8e7714615a4b1e6f82390c5f604e0d9b.seg 03001627
03948459/points/a3e6dcfc074489fd8ec2966c0323533e.pts 03948459/expert_verified/points_label/a3e6dcfc074489fd8ec2966c0323533e.seg 03948459
02691156/points/3ad337dcef167024fe6302fece358e4a.pts 02691156/expert_verified/points_label/3ad337dcef167024fe6302fece358e4a.seg 02691156
04379243/points/124cc3b92266c2767156f312cf4e035e.pts 04379243/expert_verified/points_label/124cc3b92266c2767156f312cf4e035e.seg 04379243
04379243/points/ee5f0411fcff59951105a3fc18779f13.pts 04379243/expert_verified/points_label/ee5f0411fcff59951105a3fc18779f13.seg 04379243
04379243/points/b1117a83ebf5a4c9c337a931444a5063.pts 04379243/expert_verified/points_label/b1117a83ebf5a4c9c337a931444a5063.seg 04379243
03001627/points/fb847cd696ec711197f2016c3d6097c9.pts 03001627/expert_verified/points_label/fb847cd696ec711197f2016c3d6097c9.seg 03001627
02691156/points/50da48c8e7644508fca1f1143bb6bc17.pts 02691156/expert_verified/points_label/50da48c8e7644508fca1f1143bb6bc17.seg 02691156
02958343/points/78c0bec338fa1c01d6b98bf27ff43caf.pts 02958343/expert_verified/points_label/78c0bec338fa1c01d6b98bf27ff43caf.seg 02958343
02691156/points/37fbd275a734ec1b66cf1b4a8fc3914e.pts 02691156/expert_verified/points_label/37fbd275a734ec1b66cf1b4a8fc3914e.seg 02691156
03636649/points/e053e531fc4341b5fcb8d8c6d4df8143.pts 03636649/expert_verified/points_label/e053e531fc4341b5fcb8d8c6d4df8143.seg 03636649
02691156/points/3db61220251b3c9de719b5362fe06bbb.pts 02691156/expert_verified/points_label/3db61220251b3c9de719b5362fe06bbb.seg 02691156
03642806/points/a7f983f1d0642745135a402b573354e4.pts 03642806/expert_verified/points_label/a7f983f1d0642745135a402b573354e4.seg 03642806
03001627/points/4e26eab28703c12bdd5f3f2440a93d21.pts 03001627/expert_verified/points_label/4e26eab28703c12bdd5f3f2440a93d21.seg 03001627
04225987/points/24e46e195f4907887a70e5e6aa241c88.pts 04225987/expert_verified/points_label/24e46e195f4907887a70e5e6aa241c88.seg 04225987
02691156/points/3ab1e94b6c3a1730c56cc5a87f567365.pts 02691156/expert_verified/points_label/3ab1e94b6c3a1730c56cc5a87f567365.seg 02691156
03001627/points/61b984febe54b752d61420a53a0cb96d.pts 03001627/expert_verified/points_label/61b984febe54b752d61420a53a0cb96d.seg 03001627
04379243/points/adf574f947f00bdd548b2639ebc3e759.pts 04379243/expert_verified/points_label/adf574f947f00bdd548b2639ebc3e759.seg 04379243
03001627/points/ef76b9cbf76bad40586ef70b3cee4240.pts 03001627/expert_verified/points_label/ef76b9cbf76bad40586ef70b3cee4240.seg 03001627
04379243/points/abef0c609ad3e9c2edea4b985280bcc1.pts 04379243/expert_verified/points_label/abef0c609ad3e9c2edea4b985280bcc1.seg 04379243
02773838/points/1b84dededd445058e44a5473032f38f.pts 02773838/expert_verified/points_label/1b84dededd445058e44a5473032f38f.seg 02773838
04379243/points/cd09a9641ea97d873823cce3247aa03b.pts 04379243/expert_verified/points_label/cd09a9641ea97d873823cce3247aa03b.seg 04379243
03636649/points/6aa1ce4e245001589f1a71e46bbde97c.pts 03636649/expert_verified/points_label/6aa1ce4e245001589f1a71e46bbde97c.seg 03636649
04379243/points/bb1aa2cdf216d348e76bc197b3a3ffc0.pts 04379243/expert_verified/points_label/bb1aa2cdf216d348e76bc197b3a3ffc0.seg 04379243
04379243/points/da1e75a8647bfd919778416969ddad32.pts 04379243/expert_verified/points_label/da1e75a8647bfd919778416969ddad32.seg 04379243
02958343/points/3d0308da43d52e3ef56f8ea3d9016e55.pts 02958343/expert_verified/points_label/3d0308da43d52e3ef56f8ea3d9016e55.seg 02958343
04379243/points/1ca75076bcebfac76c3484ac7eef025f.pts 04379243/expert_verified/points_label/1ca75076bcebfac76c3484ac7eef025f.seg 04379243
02691156/points/97ec5b82d9757b639cb1b92881e8e76.pts 02691156/expert_verified/points_label/97ec5b82d9757b639cb1b92881e8e76.seg 02691156
02691156/points/75db11c354c6342aad01ec966c80ac91.pts 02691156/expert_verified/points_label/75db11c354c6342aad01ec966c80ac91.seg 02691156
02691156/points/caf80ecbad22a7384e1799d9d4d697c3.pts 02691156/expert_verified/points_label/caf80ecbad22a7384e1799d9d4d697c3.seg 02691156
03001627/points/d6e0a95f00c7af6fbae0ffb97058b7cc.pts 03001627/expert_verified/points_label/d6e0a95f00c7af6fbae0ffb97058b7cc.seg 03001627
04379243/points/fa72e9cf7308066b1c072ac0b83fe07a.pts 04379243/expert_verified/points_label/fa72e9cf7308066b1c072ac0b83fe07a.seg 04379243
03790512/points/455485399ab75f93429f1c522640e6f0.pts 03790512/expert_verified/points_label/455485399ab75f93429f1c522640e6f0.seg 03790512
03642806/points/241ec8a746dd1cfc78f71a335ebabfa5.pts 03642806/expert_verified/points_label/241ec8a746dd1cfc78f71a335ebabfa5.seg 03642806
04379243/points/c6575b4c39a341c698d5fc0473d00a1c.pts 04379243/expert_verified/points_label/c6575b4c39a341c698d5fc0473d00a1c.seg 04379243
02958343/points/219a0021526791d18bb5c0bf5eec83fc.pts 02958343/expert_verified/points_label/219a0021526791d18bb5c0bf5eec83fc.seg 02958343
02691156/points/49917fb82beca4beca8607f540cc62ba.pts 02691156/expert_verified/points_label/49917fb82beca4beca8607f540cc62ba.seg 02691156
03636649/points/dac278ab197b5efefaa6996ece0d86f4.pts 03636649/expert_verified/points_label/dac278ab197b5efefaa6996ece0d86f4.seg 03636649
03467517/points/f146c58eaa06f5e4d57700c05b1862d8.pts 03467517/expert_verified/points_label/f146c58eaa06f5e4d57700c05b1862d8.seg 03467517
04379243/points/aaf6be1d92a8c61fdcfcef693e7ec696.pts 04379243/expert_verified/points_label/aaf6be1d92a8c61fdcfcef693e7ec696.seg 04379243
03001627/points/46789c1fb150dfaf51f77a6d7299806.pts 03001627/expert_verified/points_label/46789c1fb150dfaf51f77a6d7299806.seg 03001627
03790512/points/4a2f0b20ef680347395d58407f193ba.pts 03790512/expert_verified/points_label/4a2f0b20ef680347395d58407f193ba.seg 03790512
04379243/points/28ce06aa6f25b39f2d19175e7d19b7cb.pts 04379243/expert_verified/points_label/28ce06aa6f25b39f2d19175e7d19b7cb.seg 04379243
02958343/points/1710ff46ca275e171df27141dea8c9a.pts 02958343/expert_verified/points_label/1710ff46ca275e171df27141dea8c9a.seg 02958343
03636649/points/b57bcdb88c669663ec2a7a1f5fe7365d.pts 03636649/expert_verified/points_label/b57bcdb88c669663ec2a7a1f5fe7365d.seg 03636649
04379243/points/c348d279fd22730a9741b7ee128375de.pts 04379243/expert_verified/points_label/c348d279fd22730a9741b7ee128375de.seg 04379243
03001627/points/76fe7cf10c5dbf1edcb466b6f48b5810.pts 03001627/expert_verified/points_label/76fe7cf10c5dbf1edcb466b6f48b5810.seg 03001627
04379243/points/7727cc0cb47705632dfc2f8d5d30193c.pts 04379243/expert_verified/points_label/7727cc0cb47705632dfc2f8d5d30193c.seg 04379243
03797390/points/586e67c53f181dc22adf8abaa25e0215.pts 03797390/expert_verified/points_label/586e67c53f181dc22adf8abaa25e0215.seg 03797390
04379243/points/d9b418e6ec14dbf50efffb055ed6bd1.pts 04379243/expert_verified/points_label/d9b418e6ec14dbf50efffb055ed6bd1.seg 04379243
04379243/points/f52e52094d8240b2dcfcef693e7ec696.pts 04379243/expert_verified/points_label/f52e52094d8240b2dcfcef693e7ec696.seg 04379243
02691156/points/821309c2037b49135fab3f99161dc2c2.pts 02691156/expert_verified/points_label/821309c2037b49135fab3f99161dc2c2.seg 02691156
02954340/points/254e230d31a62470a52821bf1aa3b19a.pts 02954340/expert_verified/points_label/254e230d31a62470a52821bf1aa3b19a.seg 02954340
02691156/points/e8de6c58f4a772d771d03b466c72ce41.pts 02691156/expert_verified/points_label/e8de6c58f4a772d771d03b466c72ce41.seg 02691156
03642806/points/f1c6801e84c85a07bfb149497503af.pts 03642806/expert_verified/points_label/f1c6801e84c85a07bfb149497503af.seg 03642806
02691156/points/a04d10b24ede5e9a3de778e85611513b.pts 02691156/expert_verified/points_label/a04d10b24ede5e9a3de778e85611513b.seg 02691156
03467517/points/c8acdfaec5008118343b0b12983b9982.pts 03467517/expert_verified/points_label/c8acdfaec5008118343b0b12983b9982.seg 03467517
03001627/points/9c3e53d9d1e653c0bf80985a99195eb8.pts 03001627/expert_verified/points_label/9c3e53d9d1e653c0bf80985a99195eb8.seg 03001627
02691156/points/123bd9e948881939c38a1d3458dafa1b.pts 02691156/expert_verified/points_label/123bd9e948881939c38a1d3458dafa1b.seg 02691156
03948459/points/abc7a1373f4b30291adcc40d88daf7c8.pts 03948459/expert_verified/points_label/abc7a1373f4b30291adcc40d88daf7c8.seg 03948459
03636649/points/c906a9c7ae536a0c7fb7f79251dd7727.pts 03636649/expert_verified/points_label/c906a9c7ae536a0c7fb7f79251dd7727.seg 03636649
03797390/points/e71102b6da1d63f3a363b55cbd344baa.pts 03797390/expert_verified/points_label/e71102b6da1d63f3a363b55cbd344baa.seg 03797390
03642806/points/22389f9c3c049ce757c29983a611b1c6.pts 03642806/expert_verified/points_label/22389f9c3c049ce757c29983a611b1c6.seg 03642806
04379243/points/5c2c29fd07c365afe5c65540d3456093.pts 04379243/expert_verified/points_label/5c2c29fd07c365afe5c65540d3456093.seg 04379243
03001627/points/9a8dfc7a6831749f504721639e19f609.pts 03001627/expert_verified/points_label/9a8dfc7a6831749f504721639e19f609.seg 03001627
03001627/points/d49ce87d43cf4c8f1679065e1c457f94.pts 03001627/expert_verified/points_label/d49ce87d43cf4c8f1679065e1c457f94.seg 03001627
02691156/points/dfa36bffe436a98ee0534173b9189765.pts 02691156/expert_verified/points_label/dfa36bffe436a98ee0534173b9189765.seg 02691156
04379243/points/987b7b49a1435a4b1b17743c18fb63dc.pts 04379243/expert_verified/points_label/987b7b49a1435a4b1b17743c18fb63dc.seg 04379243
04379243/points/8d0d7787f4babee7e66285d36ebb986.pts 04379243/expert_verified/points_label/8d0d7787f4babee7e66285d36ebb986.seg 04379243
04379243/points/4f06092100d0164013d2510999d0f1d2.pts 04379243/expert_verified/points_label/4f06092100d0164013d2510999d0f1d2.seg 04379243
02958343/points/fce2b933f93d132f4f45033b2f001552.pts 02958343/expert_verified/points_label/fce2b933f93d132f4f45033b2f001552.seg 02958343
04379243/points/3817a222e96acc4ca78510b72d2281ea.pts 04379243/expert_verified/points_label/3817a222e96acc4ca78510b72d2281ea.seg 04379243
03001627/points/7ee09fdece7d9142afdb9a672b7d3b8a.pts 03001627/expert_verified/points_label/7ee09fdece7d9142afdb9a672b7d3b8a.seg 03001627
04379243/points/676d05aaaeecb8a04b3c42e318f3affc.pts 04379243/expert_verified/points_label/676d05aaaeecb8a04b3c42e318f3affc.seg 04379243
03624134/points/6813197ad5e7011fcc34b900bb2492e.pts 03624134/expert_verified/points_label/6813197ad5e7011fcc34b900bb2492e.seg 03624134
04379243/points/ea367e390741fc38dcfcef693e7ec696.pts 04379243/expert_verified/points_label/ea367e390741fc38dcfcef693e7ec696.seg 04379243
04379243/points/2e5ac0552fa296c43bbab77a66bc3671.pts 04379243/expert_verified/points_label/2e5ac0552fa296c43bbab77a66bc3671.seg 04379243
03467517/points/32a337387527f39193f0194265a9746c.pts 03467517/expert_verified/points_label/32a337387527f39193f0194265a9746c.seg 03467517
03001627/points/97cd4ed02e022ce7174150bd56e389a8.pts 03001627/expert_verified/points_label/97cd4ed02e022ce7174150bd56e389a8.seg 03001627
04379243/points/88e06a85e2a0f99fa7e7cb173e141227.pts 04379243/expert_verified/points_label/88e06a85e2a0f99fa7e7cb173e141227.seg 04379243
04379243/points/c5a02d586ea431a1e76bc197b3a3ffc0.pts 04379243/expert_verified/points_label/c5a02d586ea431a1e76bc197b3a3ffc0.seg 04379243
03001627/points/bcdcb4928e07e4174a623eb2e3317415.pts 03001627/expert_verified/points_label/bcdcb4928e07e4174a623eb2e3317415.seg 03001627
02691156/points/934dd5529c22cd05bc0909d98a1ff2b4.pts 02691156/expert_verified/points_label/934dd5529c22cd05bc0909d98a1ff2b4.seg 02691156
03001627/points/e696f4c7cd88b8b52ff834514c92e8fd.pts 03001627/expert_verified/points_label/e696f4c7cd88b8b52ff834514c92e8fd.seg 03001627
02691156/points/93ba822e84586999e3375a6b96a1d765.pts 02691156/expert_verified/points_label/93ba822e84586999e3375a6b96a1d765.seg 02691156
02958343/points/3ac664a7486a0bdff200a72c9245aee7.pts 02958343/expert_verified/points_label/3ac664a7486a0bdff200a72c9245aee7.seg 02958343
02691156/points/545cadae487b55bbc46ba5100bcdc520.pts 02691156/expert_verified/points_label/545cadae487b55bbc46ba5100bcdc520.seg 02691156
03001627/points/c47f71319ead4eb8a4fb72f4f3b0e317.pts 03001627/expert_verified/points_label/c47f71319ead4eb8a4fb72f4f3b0e317.seg 03001627
04379243/points/39bb09201e0cd201c17e7f250c5222bd.pts 04379243/expert_verified/points_label/39bb09201e0cd201c17e7f250c5222bd.seg 04379243
04379243/points/13782b95eeefcedacf004563556ddb36.pts 04379243/expert_verified/points_label/13782b95eeefcedacf004563556ddb36.seg 04379243
03001627/points/3cc90d903e0ec7aa61e11d707ecb7fa0.pts 03001627/expert_verified/points_label/3cc90d903e0ec7aa61e11d707ecb7fa0.seg 03001627
04379243/points/4079aaabaa6451a2765ca89770f206ec.pts 04379243/expert_verified/points_label/4079aaabaa6451a2765ca89770f206ec.seg 04379243
04379243/points/4bbf789edb243cafc955e5ed03ef3a2f.pts 04379243/expert_verified/points_label/4bbf789edb243cafc955e5ed03ef3a2f.seg 04379243
02773838/points/6187bd900c3bc002ed13f430b2941481.pts 02773838/expert_verified/points_label/6187bd900c3bc002ed13f430b2941481.seg 02773838
04379243/points/6dc6bb97c387b2f3af4e8812cf1b9e1.pts 04379243/expert_verified/points_label/6dc6bb97c387b2f3af4e8812cf1b9e1.seg 04379243
03467517/points/9c260623916034b6f7d037d5768b173f.pts 03467517/expert_verified/points_label/9c260623916034b6f7d037d5768b173f.seg 03467517
02691156/points/8d5c3d38de9c3685f2e77d54f4da142.pts 02691156/expert_verified/points_label/8d5c3d38de9c3685f2e77d54f4da142.seg 02691156
04379243/points/6152e14b042aa17546f41dc2aaef556b.pts 04379243/expert_verified/points_label/6152e14b042aa17546f41dc2aaef556b.seg 04379243
03467517/points/68a8bf89972cd337a77e8142614cdaae.pts 03467517/expert_verified/points_label/68a8bf89972cd337a77e8142614cdaae.seg 03467517
02691156/points/3d5354863690ac7eca27bba175814d1.pts 02691156/expert_verified/points_label/3d5354863690ac7eca27bba175814d1.seg 02691156
04379243/points/3411daa955306811d93768e7b9b1eabf.pts 04379243/expert_verified/points_label/3411daa955306811d93768e7b9b1eabf.seg 04379243
04379243/points/8594658920d6ea7b23656ce81843.pts 04379243/expert_verified/points_label/8594658920d6ea7b23656ce81843.seg 04379243
02691156/points/a074750e28ed3818203936772104a82d.pts 02691156/expert_verified/points_label/a074750e28ed3818203936772104a82d.seg 02691156
04379243/points/fcd4d0e1777f4841dcfcef693e7ec696.pts 04379243/expert_verified/points_label/fcd4d0e1777f4841dcfcef693e7ec696.seg 04379243
03948459/points/708e38e7b733fd22bfae4699de9cb91a.pts 03948459/expert_verified/points_label/708e38e7b733fd22bfae4699de9cb91a.seg 03948459
04379243/points/3c4e1361b066ea3b8ca998f0f87d0c84.pts 04379243/expert_verified/points_label/3c4e1361b066ea3b8ca998f0f87d0c84.seg 04379243
03624134/points/38798b7013607bbf1e0b76f10c6e38af.pts 03624134/expert_verified/points_label/38798b7013607bbf1e0b76f10c6e38af.seg 03624134
02691156/points/2176fa9f69e5e1dcca8607f540cc62ba.pts 02691156/expert_verified/points_label/2176fa9f69e5e1dcca8607f540cc62ba.seg 02691156
03467517/points/8dd7df733a5ba17acae98171fea031ef.pts 03467517/expert_verified/points_label/8dd7df733a5ba17acae98171fea031ef.seg 03467517
03001627/points/d3f31fd0fc99f45e8b3f6b4a44a70e52.pts 03001627/expert_verified/points_label/d3f31fd0fc99f45e8b3f6b4a44a70e52.seg 03001627
02691156/points/118e8142a8cb1fe19a4a28ef635593ce.pts 02691156/expert_verified/points_label/118e8142a8cb1fe19a4a28ef635593ce.seg 02691156
03624134/points/de62211649b4cced49384f9741ad64d8.pts 03624134/expert_verified/points_label/de62211649b4cced49384f9741ad64d8.seg 03624134
03642806/points/7a4342f61ed7b153341aafe10fd0cbd4.pts 03642806/expert_verified/points_label/7a4342f61ed7b153341aafe10fd0cbd4.seg 03642806
03001627/points/ba56f02dee485974c242632b2a8c3129.pts 03001627/expert_verified/points_label/ba56f02dee485974c242632b2a8c3129.seg 03001627
04379243/points/97b7baeb8a172de42f56f09e5bc67bee.pts 04379243/expert_verified/points_label/97b7baeb8a172de42f56f09e5bc67bee.seg 04379243
04379243/points/7b2af227264af938d42b9650f19dd425.pts 04379243/expert_verified/points_label/7b2af227264af938d42b9650f19dd425.seg 04379243
04379243/points/e25fdb977fb867fdc3bd24f986301745.pts 04379243/expert_verified/points_label/e25fdb977fb867fdc3bd24f986301745.seg 04379243
03467517/points/33da9c54f43be3e17693a84bff425e3.pts 03467517/expert_verified/points_label/33da9c54f43be3e17693a84bff425e3.seg 03467517
02691156/points/e1e5cfcabcbe26a03087f84b199fd297.pts 02691156/expert_verified/points_label/e1e5cfcabcbe26a03087f84b199fd297.seg 02691156
03636649/points/ba05811f301cdd791735ea0e092a805a.pts 03636649/expert_verified/points_label/ba05811f301cdd791735ea0e092a805a.seg 03636649
03001627/points/6678f63c9b584a549d9e5580ae9f8738.pts 03001627/expert_verified/points_label/6678f63c9b584a549d9e5580ae9f8738.seg 03001627
04379243/points/b6b8ede77085c0a95bea7c29e873d16.pts 04379243/expert_verified/points_label/b6b8ede77085c0a95bea7c29e873d16.seg 04379243
02691156/points/d81042a53dd1cc5bd90bfc986bc4c94d.pts 02691156/expert_verified/points_label/d81042a53dd1cc5bd90bfc986bc4c94d.seg 02691156
03001627/points/37b432326fecc8a1327289c00b6dc9ca.pts 03001627/expert_verified/points_label/37b432326fecc8a1327289c00b6dc9ca.seg 03001627
03636649/points/c898f9b1dddbb8801735ea0e092a805a.pts 03636649/expert_verified/points_label/c898f9b1dddbb8801735ea0e092a805a.seg 03636649
03001627/points/5d02aed0e9c93e829b9f2eb77f5e247e.pts 03001627/expert_verified/points_label/5d02aed0e9c93e829b9f2eb77f5e247e.seg 03001627
03001627/points/9a864d5de972a8c7cb686b8b855fed61.pts 03001627/expert_verified/points_label/9a864d5de972a8c7cb686b8b855fed61.seg 03001627
04379243/points/b14a14cc2f3c38c9e3def9c422df2282.pts 04379243/expert_verified/points_label/b14a14cc2f3c38c9e3def9c422df2282.seg 04379243
04379243/points/f2893a87ec37f8b3781cb4570305e329.pts 04379243/expert_verified/points_label/f2893a87ec37f8b3781cb4570305e329.seg 04379243
02691156/points/3fa511e1882e41eeca8607f540cc62ba.pts 02691156/expert_verified/points_label/3fa511e1882e41eeca8607f540cc62ba.seg 02691156
02691156/points/444d67950ff9a4cc1139bebb00fe5be8.pts 02691156/expert_verified/points_label/444d67950ff9a4cc1139bebb00fe5be8.seg 02691156
03001627/points/3d3b7f63f5525b1ae37f5a622d383617.pts 03001627/expert_verified/points_label/3d3b7f63f5525b1ae37f5a622d383617.seg 03001627
03001627/points/30beaf15d2d2beb1febad4f49b26ec52.pts 03001627/expert_verified/points_label/30beaf15d2d2beb1febad4f49b26ec52.seg 03001627
04379243/points/59f04ddbd896f4f5430644dfe647c381.pts 04379243/expert_verified/points_label/59f04ddbd896f4f5430644dfe647c381.seg 04379243
04379243/points/eb9b9b8d186a974a7afee304cce81d6f.pts 04379243/expert_verified/points_label/eb9b9b8d186a974a7afee304cce81d6f.seg 04379243
03790512/points/7c4fc3a05d5fc8b1d0f568c31c1cd62a.pts 03790512/expert_verified/points_label/7c4fc3a05d5fc8b1d0f568c31c1cd62a.seg 03790512
04379243/points/68142013a4f5e7c2febad4f49b26ec52.pts 04379243/expert_verified/points_label/68142013a4f5e7c2febad4f49b26ec52.seg 04379243
02958343/points/8053e014516531ddc3f500d7b182f6.pts 02958343/expert_verified/points_label/8053e014516531ddc3f500d7b182f6.seg 02958343
02958343/points/1a3782ae4bd711b66b418c7d9fedcaa9.pts 02958343/expert_verified/points_label/1a3782ae4bd711b66b418c7d9fedcaa9.seg 02958343
04379243/points/cc58de930acd321fac242c3aebc81b2f.pts 04379243/expert_verified/points_label/cc58de930acd321fac242c3aebc81b2f.seg 04379243
02691156/points/d4dac019726e980e203936772104a82d.pts 02691156/expert_verified/points_label/d4dac019726e980e203936772104a82d.seg 02691156
02954340/points/6e983d20e0bf80296829cd4082fbdbdf.pts 02954340/expert_verified/points_label/6e983d20e0bf80296829cd4082fbdbdf.seg 02954340
03636649/points/fad026744a6abb1937cf479d4bb58d.pts 03636649/expert_verified/points_label/fad026744a6abb1937cf479d4bb58d.seg 03636649
02958343/points/4d2d4e26349be1f3be2cbcda9b6dc9b2.pts 02958343/expert_verified/points_label/4d2d4e26349be1f3be2cbcda9b6dc9b2.seg 02958343
03636649/points/280fa01686e780ba3501c961e91ff6d7.pts 03636649/expert_verified/points_label/280fa01686e780ba3501c961e91ff6d7.seg 03636649
04379243/points/f02907c5c42e1e766f1e07a56c129dfc.pts 04379243/expert_verified/points_label/f02907c5c42e1e766f1e07a56c129dfc.seg 04379243
04379243/points/5f100571ffd90f8252b4875f731f71cd.pts 04379243/expert_verified/points_label/5f100571ffd90f8252b4875f731f71cd.seg 04379243
04379243/points/f718cb5d6202341dc183308b9aafe2ca.pts 04379243/expert_verified/points_label/f718cb5d6202341dc183308b9aafe2ca.seg 04379243
03642806/points/b436271050d647052f8d6d501b18a4b5.pts 03642806/expert_verified/points_label/b436271050d647052f8d6d501b18a4b5.seg 03642806
03001627/points/6dddf2b95ca09bf5febad4f49b26ec52.pts 03001627/expert_verified/points_label/6dddf2b95ca09bf5febad4f49b26ec52.seg 03001627
02691156/points/b812c2df636aa0218b96ae1a0a8b84ec.pts 02691156/expert_verified/points_label/b812c2df636aa0218b96ae1a0a8b84ec.seg 02691156
02958343/points/89edb3d434f4c983afe1d4530f4c6e24.pts 02958343/expert_verified/points_label/89edb3d434f4c983afe1d4530f4c6e24.seg 02958343
02958343/points/80ac9cc0d4c9dde3b7a7bc444c2d756b.pts 02958343/expert_verified/points_label/80ac9cc0d4c9dde3b7a7bc444c2d756b.seg 02958343
04379243/points/b62d45745434ac46c4cfe384be4426c3.pts 04379243/expert_verified/points_label/b62d45745434ac46c4cfe384be4426c3.seg 04379243
04379243/points/9c4afb731e910d3723500a5b036df62e.pts 04379243/expert_verified/points_label/9c4afb731e910d3723500a5b036df62e.seg 04379243
04379243/points/43fcddd5232a6021a56e8b79ca4e2911.pts 04379243/expert_verified/points_label/43fcddd5232a6021a56e8b79ca4e2911.seg 04379243
04379243/points/6724ae69c0bde4c09b7dad6c9c46bcf1.pts 04379243/expert_verified/points_label/6724ae69c0bde4c09b7dad6c9c46bcf1.seg 04379243
03001627/points/323fc7b1d2b44cb7ff2b8acf844d34d2.pts 03001627/expert_verified/points_label/323fc7b1d2b44cb7ff2b8acf844d34d2.seg 03001627
03001627/points/434cee44934612a81f98c0761af40e04.pts 03001627/expert_verified/points_label/434cee44934612a81f98c0761af40e04.seg 03001627
03636649/points/31dee666120727b0be78c8b300d2a963.pts 03636649/expert_verified/points_label/31dee666120727b0be78c8b300d2a963.seg 03636649
02958343/points/48f5446e6ac9c1b51f1446551412bde4.pts 02958343/expert_verified/points_label/48f5446e6ac9c1b51f1446551412bde4.seg 02958343
04379243/points/aa3eb180a4f6d8d42de421c2ab5cfb52.pts 04379243/expert_verified/points_label/aa3eb180a4f6d8d42de421c2ab5cfb52.seg 04379243
04379243/points/14e5e4db3246dacff12d7184a2ad3430.pts 04379243/expert_verified/points_label/14e5e4db3246dacff12d7184a2ad3430.seg 04379243
03001627/points/96c0ecd1ef80e818c8687ff9b0b4e4ac.pts 03001627/expert_verified/points_label/96c0ecd1ef80e818c8687ff9b0b4e4ac.seg 03001627
04225987/points/d4c042d11f29dffa1082f2ea630bf69e.pts 04225987/expert_verified/points_label/d4c042d11f29dffa1082f2ea630bf69e.seg 04225987
03642806/points/7ebff305b2e93504239603972bcd2e7b.pts 03642806/expert_verified/points_label/7ebff305b2e93504239603972bcd2e7b.seg 03642806
03467517/points/369fc7f8d880e1b793f0194265a9746c.pts 03467517/expert_verified/points_label/369fc7f8d880e1b793f0194265a9746c.seg 03467517
04379243/points/25f69a74efbff4d071a782a4379556c7.pts 04379243/expert_verified/points_label/25f69a74efbff4d071a782a4379556c7.seg 04379243
04379243/points/7cd4844def36a9f5bc7589eefbdbc3c5.pts 04379243/expert_verified/points_label/7cd4844def36a9f5bc7589eefbdbc3c5.seg 04379243
03467517/points/5852a24dde24a8ef93f0194265a9746c.pts 03467517/expert_verified/points_label/5852a24dde24a8ef93f0194265a9746c.seg 03467517
03001627/points/df8440d8678f3a91c8687ff9b0b4e4ac.pts 03001627/expert_verified/points_label/df8440d8678f3a91c8687ff9b0b4e4ac.seg 03001627
04379243/points/49bf25ff4401946524c10ba1eb690638.pts 04379243/expert_verified/points_label/49bf25ff4401946524c10ba1eb690638.seg 04379243
03001627/points/7eedcb6d76b8c23a9cdb421f6af95e5f.pts 03001627/expert_verified/points_label/7eedcb6d76b8c23a9cdb421f6af95e5f.seg 03001627
03797390/points/ff1a44e1c1785d618bca309f2c51966a.pts 03797390/expert_verified/points_label/ff1a44e1c1785d618bca309f2c51966a.seg 03797390
02958343/points/85f3dc3318f5200c8672c9b355cd2075.pts 02958343/expert_verified/points_label/85f3dc3318f5200c8672c9b355cd2075.seg 02958343
02691156/points/c9be9f07f5ae7c375d7629390efe0a2.pts 02691156/expert_verified/points_label/c9be9f07f5ae7c375d7629390efe0a2.seg 02691156
02691156/points/14cd2f1de7f68bf3ab550998f901c8e1.pts 02691156/expert_verified/points_label/14cd2f1de7f68bf3ab550998f901c8e1.seg 02691156
02958343/points/81fad64b8fd8f010b17445a1c29f6d34.pts 02958343/expert_verified/points_label/81fad64b8fd8f010b17445a1c29f6d34.seg 02958343
02958343/points/fe2ce22107693354f1cc1cb691702a23.pts 02958343/expert_verified/points_label/fe2ce22107693354f1cc1cb691702a23.seg 02958343
02691156/points/74cbf170c5f2fb587d9c9c8a8ba32919.pts 02691156/expert_verified/points_label/74cbf170c5f2fb587d9c9c8a8ba32919.seg 02691156
02691156/points/67dbb0de722cf5cd7a734abc5ba1db0f.pts 02691156/expert_verified/points_label/67dbb0de722cf5cd7a734abc5ba1db0f.seg 02691156
04379243/points/fa345f8f107d93b9ba70f71694a4b74c.pts 04379243/expert_verified/points_label/fa345f8f107d93b9ba70f71694a4b74c.seg 04379243
04379243/points/a45a7ba9a2842a55634c21965ee6bab.pts 04379243/expert_verified/points_label/a45a7ba9a2842a55634c21965ee6bab.seg 04379243
04379243/points/8d7ac6078989980fad16260d4d73b56.pts 04379243/expert_verified/points_label/8d7ac6078989980fad16260d4d73b56.seg 04379243
03001627/points/e803b31e2185d0405784b22e1081a3e1.pts 03001627/expert_verified/points_label/e803b31e2185d0405784b22e1081a3e1.seg 03001627
04379243/points/aaf3aeda0f848344b87028a4b477349f.pts 04379243/expert_verified/points_label/aaf3aeda0f848344b87028a4b477349f.seg 04379243
03636649/points/e94aab17400945413225afab722d9fd2.pts 03636649/expert_verified/points_label/e94aab17400945413225afab722d9fd2.seg 03636649
03001627/points/d2c465e85d2e8f1fcea003eff0268278.pts 03001627/expert_verified/points_label/d2c465e85d2e8f1fcea003eff0268278.seg 03001627
03001627/points/88376e3d3a23d263de29d28278a34a18.pts 03001627/expert_verified/points_label/88376e3d3a23d263de29d28278a34a18.seg 03001627
04379243/points/4775e71d37374444febad4f49b26ec52.pts 04379243/expert_verified/points_label/4775e71d37374444febad4f49b26ec52.seg 04379243
03636649/points/f12822778713f5e35b36bbc16e99b441.pts 03636649/expert_verified/points_label/f12822778713f5e35b36bbc16e99b441.seg 03636649
03636649/points/963e6743370d5c5c9b5d51fa8cce1753.pts 03636649/expert_verified/points_label/963e6743370d5c5c9b5d51fa8cce1753.seg 03636649
04379243/points/13c51c08c3695a09eda47978b73f5994.pts 04379243/expert_verified/points_label/13c51c08c3695a09eda47978b73f5994.seg 04379243
04379243/points/89827ac677337629ab610b0c94236463.pts 04379243/expert_verified/points_label/89827ac677337629ab610b0c94236463.seg 04379243
04379243/points/89b478643e53d3d6285c99063fc6fcf8.pts 04379243/expert_verified/points_label/89b478643e53d3d6285c99063fc6fcf8.seg 04379243
04379243/points/401cd99ace3b92fadf6cfab91d65bb91.pts 04379243/expert_verified/points_label/401cd99ace3b92fadf6cfab91d65bb91.seg 04379243
04379243/points/74c3d551e32a1cca664b3b9b23ddfcbc.pts 04379243/expert_verified/points_label/74c3d551e32a1cca664b3b9b23ddfcbc.seg 04379243
04379243/points/db64db160fd13a514e1a714ee619465a.pts 04379243/expert_verified/points_label/db64db160fd13a514e1a714ee619465a.seg 04379243
03001627/points/8e664a0bcaf9d2a45ca1aaa0789db621.pts 03001627/expert_verified/points_label/8e664a0bcaf9d2a45ca1aaa0789db621.seg 03001627
03001627/points/43897195d7f893d759c257be4c612509.pts 03001627/expert_verified/points_label/43897195d7f893d759c257be4c612509.seg 03001627
04379243/points/e6d8569c0957e7453002761e7a3ba3bd.pts 04379243/expert_verified/points_label/e6d8569c0957e7453002761e7a3ba3bd.seg 04379243
03636649/points/ead77648c9c7dbf8d42b9650f19dd425.pts 03636649/expert_verified/points_label/ead77648c9c7dbf8d42b9650f19dd425.seg 03636649
03636649/points/c54d3a5a9c8a655e46407779dbd69b2d.pts 03636649/expert_verified/points_label/c54d3a5a9c8a655e46407779dbd69b2d.seg 03636649
03001627/points/379f0efc898d7a7e9fe74a48bbc553d7.pts 03001627/expert_verified/points_label/379f0efc898d7a7e9fe74a48bbc553d7.seg 03001627
04379243/points/c1d44782ac45d6fe3671949e4f99cc76.pts 04379243/expert_verified/points_label/c1d44782ac45d6fe3671949e4f99cc76.seg 04379243
04379243/points/7b3b160dafe7e122d93768e7b9b1eabf.pts 04379243/expert_verified/points_label/7b3b160dafe7e122d93768e7b9b1eabf.seg 04379243
03001627/points/7f271ecbdeb7610d637adadafee6f182.pts 03001627/expert_verified/points_label/7f271ecbdeb7610d637adadafee6f182.seg 03001627
02958343/points/df34c25a1e1abe9428044fe9244db50a.pts 02958343/expert_verified/points_label/df34c25a1e1abe9428044fe9244db50a.seg 02958343
03948459/points/98c0bd351e275b3c96893524e607761d.pts 03948459/expert_verified/points_label/98c0bd351e275b3c96893524e607761d.seg 03948459
03636649/points/b96c8cc6529167bfcb8d8c6d4df8143.pts 03636649/expert_verified/points_label/b96c8cc6529167bfcb8d8c6d4df8143.seg 03636649
03624134/points/a33847e9c32c1afc93ac017b81605788.pts 03624134/expert_verified/points_label/a33847e9c32c1afc93ac017b81605788.seg 03624134
03001627/points/594d5b7f3e705a1ab3234e0da44b11e4.pts 03001627/expert_verified/points_label/594d5b7f3e705a1ab3234e0da44b11e4.seg 03001627
03001627/points/f0f04644e071d9348ca588a3264b9f86.pts 03001627/expert_verified/points_label/f0f04644e071d9348ca588a3264b9f86.seg 03001627
02691156/points/4bdb2c4fc6701174ca8607f540cc62ba.pts 02691156/expert_verified/points_label/4bdb2c4fc6701174ca8607f540cc62ba.seg 02691156
03001627/points/fc2a1c4c332f7731e45ef4135c266a12.pts 03001627/expert_verified/points_label/fc2a1c4c332f7731e45ef4135c266a12.seg 03001627
02691156/points/df68b8fb9f4531b42e690fa6dfd5d610.pts 02691156/expert_verified/points_label/df68b8fb9f4531b42e690fa6dfd5d610.seg 02691156
03642806/points/517de75577ac6e8a42b9615216f9a30d.pts 03642806/expert_verified/points_label/517de75577ac6e8a42b9615216f9a30d.seg 03642806
03001627/points/74cc57ea0e2e06dbe4106b1d06dc89b3.pts 03001627/expert_verified/points_label/74cc57ea0e2e06dbe4106b1d06dc89b3.seg 03001627
02691156/points/d72a483cf8a0cf2bbbf3143b1cb6076a.pts 02691156/expert_verified/points_label/d72a483cf8a0cf2bbbf3143b1cb6076a.seg 02691156
03001627/points/9c7b2ed3770d1a6ea6fee8e2140acec9.pts 03001627/expert_verified/points_label/9c7b2ed3770d1a6ea6fee8e2140acec9.seg 03001627
04379243/points/28fb9a81898f88c4ae8375def5e736d8.pts 04379243/expert_verified/points_label/28fb9a81898f88c4ae8375def5e736d8.seg 04379243
03636649/points/c0b0d7e15d3dfab1733c22d8b8e1c33d.pts 03636649/expert_verified/points_label/c0b0d7e15d3dfab1733c22d8b8e1c33d.seg 03636649
03001627/points/bb04dc0b336abf4b263915c09bc4854f.pts 03001627/expert_verified/points_label/bb04dc0b336abf4b263915c09bc4854f.seg 03001627
03001627/points/6caccdad9f8d4f0a7f1cdfc0a8f38f2e.pts 03001627/expert_verified/points_label/6caccdad9f8d4f0a7f1cdfc0a8f38f2e.seg 03001627
04379243/points/86ad91ef08c53dd77189b31b3e8c8ef3.pts 04379243/expert_verified/points_label/86ad91ef08c53dd77189b31b3e8c8ef3.seg 04379243
03790512/points/80e717f07645a4a0b37378f3c85478b4.pts 03790512/expert_verified/points_label/80e717f07645a4a0b37378f3c85478b4.seg 03790512
02691156/points/7d226c520a29c7705e28caa3b26a73fd.pts 02691156/expert_verified/points_label/7d226c520a29c7705e28caa3b26a73fd.seg 02691156
04379243/points/89c095a52766ecb05d2ac47f638a4ea4.pts 04379243/expert_verified/points_label/89c095a52766ecb05d2ac47f638a4ea4.seg 04379243
04379243/points/7b92f6facc2a27bc84cc0348a73b80c3.pts 04379243/expert_verified/points_label/7b92f6facc2a27bc84cc0348a73b80c3.seg 04379243
04379243/points/d578287c4a9452efa9af104529ef47c3.pts 04379243/expert_verified/points_label/d578287c4a9452efa9af104529ef47c3.seg 04379243
03636649/points/1475fe59961fc726f096eadaad23f93d.pts 03636649/expert_verified/points_label/1475fe59961fc726f096eadaad23f93d.seg 03636649
03790512/points/7d75e8200565ffa7b37378f3c85478b4.pts 03790512/expert_verified/points_label/7d75e8200565ffa7b37378f3c85478b4.seg 03790512
04379243/points/852826a94cce36ea9f1deb04fb8ae481.pts 04379243/expert_verified/points_label/852826a94cce36ea9f1deb04fb8ae481.seg 04379243
03001627/points/9c50878c91aeb8126bb6bc0db07c71e8.pts 03001627/expert_verified/points_label/9c50878c91aeb8126bb6bc0db07c71e8.seg 03001627
02691156/points/ce827e4c857d553f71d03b466c72ce41.pts 02691156/expert_verified/points_label/ce827e4c857d553f71d03b466c72ce41.seg 02691156
03001627/points/3aab16309520fb21dc0a8cba62d9a78a.pts 03001627/expert_verified/points_label/3aab16309520fb21dc0a8cba62d9a78a.seg 03001627
03001627/points/697cfbe6e043136b737a00f007529fbf.pts 03001627/expert_verified/points_label/697cfbe6e043136b737a00f007529fbf.seg 03001627
04379243/points/fd7769d0eba554c53def89b32cef8e45.pts 04379243/expert_verified/points_label/fd7769d0eba554c53def89b32cef8e45.seg 04379243
03948459/points/d7e86e0e5b1982d4bf0ab4d7096d87f2.pts 03948459/expert_verified/points_label/d7e86e0e5b1982d4bf0ab4d7096d87f2.seg 03948459
03001627/points/70cb8d70d961ca48b04cb542e2c50eb4.pts 03001627/expert_verified/points_label/70cb8d70d961ca48b04cb542e2c50eb4.seg 03001627
03636649/points/c26b7862f2afb7ee4b3c42e318f3affc.pts 03636649/expert_verified/points_label/c26b7862f2afb7ee4b3c42e318f3affc.seg 03636649
03624134/points/906b20dc0a5a5022714112b147c95c8b.pts 03624134/expert_verified/points_label/906b20dc0a5a5022714112b147c95c8b.seg 03624134
03001627/points/f5caa9b5ada31a8b3cf15c77de45986.pts 03001627/expert_verified/points_label/f5caa9b5ada31a8b3cf15c77de45986.seg 03001627
04379243/points/6110d87def4fa88c154c6bbaeb7d331f.pts 04379243/expert_verified/points_label/6110d87def4fa88c154c6bbaeb7d331f.seg 04379243
03642806/points/b5f6fd84a3f44ddb1aa47689117a61e1.pts 03642806/expert_verified/points_label/b5f6fd84a3f44ddb1aa47689117a61e1.seg 03642806
03001627/points/95317d46812e4ed4df5aea2392d894b4.pts 03001627/expert_verified/points_label/95317d46812e4ed4df5aea2392d894b4.seg 03001627
02691156/points/471ca950dbdf0c6c5f80f808704d6409.pts 02691156/expert_verified/points_label/471ca950dbdf0c6c5f80f808704d6409.seg 02691156
04379243/points/c9f85a671d551086d61f9b2773e1d72a.pts 04379243/expert_verified/points_label/c9f85a671d551086d61f9b2773e1d72a.seg 04379243
04379243/points/70f1b5f74faa9bda664b3b9b23ddfcbc.pts 04379243/expert_verified/points_label/70f1b5f74faa9bda664b3b9b23ddfcbc.seg 04379243
02691156/points/9a266b3a734e374687bf26680c510802.pts 02691156/expert_verified/points_label/9a266b3a734e374687bf26680c510802.seg 02691156
03001627/points/4c0983329afcd06f730e89ca0d2d13c3.pts 03001627/expert_verified/points_label/4c0983329afcd06f730e89ca0d2d13c3.seg 03001627
04379243/points/a7172fa4177661f4858699aaad4acee4.pts 04379243/expert_verified/points_label/a7172fa4177661f4858699aaad4acee4.seg 04379243
04379243/points/504d908a55f3e0c764810cc21086da42.pts 04379243/expert_verified/points_label/504d908a55f3e0c764810cc21086da42.seg 04379243
03948459/points/7ba9f65e926d5e3e6fe695987d47043.pts 03948459/expert_verified/points_label/7ba9f65e926d5e3e6fe695987d47043.seg 03948459
04379243/points/5b546ef5de5d10f3ecc9201d3d846bc1.pts 04379243/expert_verified/points_label/5b546ef5de5d10f3ecc9201d3d846bc1.seg 04379243
04379243/points/80f986ae572fce791429f9a19502375a.pts 04379243/expert_verified/points_label/80f986ae572fce791429f9a19502375a.seg 04379243
04379243/points/fd7a579772b195532de421c2ab5cfb52.pts 04379243/expert_verified/points_label/fd7a579772b195532de421c2ab5cfb52.seg 04379243
03001627/points/e09466e9c122dbfdf51f77a6d7299806.pts 03001627/expert_verified/points_label/e09466e9c122dbfdf51f77a6d7299806.seg 03001627
04379243/points/2a80c95b4bbcb73d87ed2480ebb0f3d2.pts 04379243/expert_verified/points_label/2a80c95b4bbcb73d87ed2480ebb0f3d2.seg 04379243
03467517/points/e0d74618e316b0f16d9376f644442e99.pts 03467517/expert_verified/points_label/e0d74618e316b0f16d9376f644442e99.seg 03467517
03001627/points/587ebb2aa71acfe644dd3aaee16d3f4c.pts 03001627/expert_verified/points_label/587ebb2aa71acfe644dd3aaee16d3f4c.seg 03001627
03467517/points/10d2c216c70b788485b61f146daff2fb.pts 03467517/expert_verified/points_label/10d2c216c70b788485b61f146daff2fb.seg 03467517
04379243/points/3c72ddd0dca19bbedcfcef693e7ec696.pts 04379243/expert_verified/points_label/3c72ddd0dca19bbedcfcef693e7ec696.seg 04379243
03001627/points/2742c0a5e984d92fa0dcc52ca811e565.pts 03001627/expert_verified/points_label/2742c0a5e984d92fa0dcc52ca811e565.seg 03001627
03624134/points/792f252dcb06f042dd56c1edf3f6e336.pts 03624134/expert_verified/points_label/792f252dcb06f042dd56c1edf3f6e336.seg 03624134
02691156/points/8fa9e2e8dbed43911f32208e53f871eb.pts 02691156/expert_verified/points_label/8fa9e2e8dbed43911f32208e53f871eb.seg 02691156
03001627/points/d4f5c3e3eab52d0a3334fb6668ccd834.pts 03001627/expert_verified/points_label/d4f5c3e3eab52d0a3334fb6668ccd834.seg 03001627
03642806/points/520d98e360cf44ec8139dd63d55edc44.pts 03642806/expert_verified/points_label/520d98e360cf44ec8139dd63d55edc44.seg 03642806
03467517/points/2eba922263fc1580cc010a80df5d3c87.pts 03467517/expert_verified/points_label/2eba922263fc1580cc010a80df5d3c87.seg 03467517
04379243/points/53c11596c3fc36a8a5094cb6d104b35.pts 04379243/expert_verified/points_label/53c11596c3fc36a8a5094cb6d104b35.seg 04379243
03467517/points/265009e163bf5c6f69da8e7f9a803d12.pts 03467517/expert_verified/points_label/265009e163bf5c6f69da8e7f9a803d12.seg 03467517
04379243/points/fbdf9bffeb353474c3a767747b75e56.pts 04379243/expert_verified/points_label/fbdf9bffeb353474c3a767747b75e56.seg 04379243
03636649/points/b4af7e9a7338a9a3225afab722d9fd2.pts 03636649/expert_verified/points_label/b4af7e9a7338a9a3225afab722d9fd2.seg 03636649
03001627/points/55eeb952519ceb87c3bd24f986301745.pts 03001627/expert_verified/points_label/55eeb952519ceb87c3bd24f986301745.seg 03001627
04379243/points/2259e09ebd0ed2befebad4f49b26ec52.pts 04379243/expert_verified/points_label/2259e09ebd0ed2befebad4f49b26ec52.seg 04379243
04379243/points/63fedc0334f5552dbec3a71604e140e3.pts 04379243/expert_verified/points_label/63fedc0334f5552dbec3a71604e140e3.seg 04379243
03001627/points/70ac5cb405df84575e62305d14755686.pts 03001627/expert_verified/points_label/70ac5cb405df84575e62305d14755686.seg 03001627
03001627/points/3f41b4339ebd59c1c397356311cbeea4.pts 03001627/expert_verified/points_label/3f41b4339ebd59c1c397356311cbeea4.seg 03001627
04379243/points/10bb44a54a12a74e4719088c8e42c6ab.pts 04379243/expert_verified/points_label/10bb44a54a12a74e4719088c8e42c6ab.seg 04379243
04379243/points/a83cda80e5c5a0fc3719086e0b4ab8be.pts 04379243/expert_verified/points_label/a83cda80e5c5a0fc3719086e0b4ab8be.seg 04379243
04379243/points/74983e99e7606eb114708467db3d00e2.pts 04379243/expert_verified/points_label/74983e99e7606eb114708467db3d00e2.seg 04379243
03001627/points/e052eaa1d5bbe795ded10515704c9720.pts 03001627/expert_verified/points_label/e052eaa1d5bbe795ded10515704c9720.seg 03001627
02691156/points/35892510dcd7cebb87bf26680c510802.pts 02691156/expert_verified/points_label/35892510dcd7cebb87bf26680c510802.seg 02691156
03001627/points/7f73cc6c1c9121a9b9f2eb77f5e247e.pts 03001627/expert_verified/points_label/7f73cc6c1c9121a9b9f2eb77f5e247e.seg 03001627
03001627/points/2a8554af80cfa5e719fb4103277a6b93.pts 03001627/expert_verified/points_label/2a8554af80cfa5e719fb4103277a6b93.seg 03001627
04379243/points/f82a5f3c2a57655d825da2b9ec9c8c29.pts 04379243/expert_verified/points_label/f82a5f3c2a57655d825da2b9ec9c8c29.seg 04379243
02691156/points/319cf93077118d19f64801ad2940cdd5.pts 02691156/expert_verified/points_label/319cf93077118d19f64801ad2940cdd5.seg 02691156
03790512/points/5bb3597d49c58017b37378f3c85478b4.pts 03790512/expert_verified/points_label/5bb3597d49c58017b37378f3c85478b4.seg 03790512
02958343/points/17926c1ef484b73e6758a098566bc94e.pts 02958343/expert_verified/points_label/17926c1ef484b73e6758a098566bc94e.seg 02958343
04379243/points/345c1bb95b12ff8c013a7bed5288654.pts 04379243/expert_verified/points_label/345c1bb95b12ff8c013a7bed5288654.seg 04379243
03001627/points/3b788994cd578990c35131da26f8061a.pts 03001627/expert_verified/points_label/3b788994cd578990c35131da26f8061a.seg 03001627
03636649/points/c25cc72cd06852e75bbea6ee257e41cc.pts 03636649/expert_verified/points_label/c25cc72cd06852e75bbea6ee257e41cc.seg 03636649
03001627/points/4e4570768f981ca7b95617254e8005c0.pts 03001627/expert_verified/points_label/4e4570768f981ca7b95617254e8005c0.seg 03001627
03642806/points/ef6d92c90aeabf5becae27d182a3e41c.pts 03642806/expert_verified/points_label/ef6d92c90aeabf5becae27d182a3e41c.seg 03642806
04379243/points/97718e2651d22b3a74740f837351e7eb.pts 04379243/expert_verified/points_label/97718e2651d22b3a74740f837351e7eb.seg 04379243
03948459/points/1f646ff59cabdddcd810dcd63f342aca.pts 03948459/expert_verified/points_label/1f646ff59cabdddcd810dcd63f342aca.seg 03948459
02958343/points/74f7b559d6af926012f2e446484bbaf7.pts 02958343/expert_verified/points_label/74f7b559d6af926012f2e446484bbaf7.seg 02958343
03001627/points/8b3619396de4df10db8860d0872e9c55.pts 03001627/expert_verified/points_label/8b3619396de4df10db8860d0872e9c55.seg 03001627
03001627/points/44ddb3d46266bb0ffebad4f49b26ec52.pts 03001627/expert_verified/points_label/44ddb3d46266bb0ffebad4f49b26ec52.seg 03001627
03001627/points/a5f300f3975497fa9dcf2183c858e6e5.pts 03001627/expert_verified/points_label/a5f300f3975497fa9dcf2183c858e6e5.seg 03001627
03467517/points/113b65f0e68314737c481698bd5233b4.pts 03467517/expert_verified/points_label/113b65f0e68314737c481698bd5233b4.seg 03467517
03001627/points/49795a9ebd9a9c6d2c697f0a1454869.pts 03001627/expert_verified/points_label/49795a9ebd9a9c6d2c697f0a1454869.seg 03001627
03001627/points/5822ae77b06bea3091da37ff8bdd2524.pts 03001627/expert_verified/points_label/5822ae77b06bea3091da37ff8bdd2524.seg 03001627
03467517/points/15222c5926c7058cc6df7dab8e567ef6.pts 03467517/expert_verified/points_label/15222c5926c7058cc6df7dab8e567ef6.seg 03467517
02691156/points/14d9c576d06622198f52dc705c3109b9.pts 02691156/expert_verified/points_label/14d9c576d06622198f52dc705c3109b9.seg 02691156
04379243/points/62ae9ded861138be9d2be74cfb51ade1.pts 04379243/expert_verified/points_label/62ae9ded861138be9d2be74cfb51ade1.seg 04379243
02958343/points/7b067be3aa39b1a124853ec273f6c1d2.pts 02958343/expert_verified/points_label/7b067be3aa39b1a124853ec273f6c1d2.seg 02958343
03636649/points/66cf69a98ff895e2b55fde51a411949f.pts 03636649/expert_verified/points_label/66cf69a98ff895e2b55fde51a411949f.seg 03636649
04379243/points/3253f2c59e6bd2a119fb4103277a6b93.pts 04379243/expert_verified/points_label/3253f2c59e6bd2a119fb4103277a6b93.seg 04379243
02691156/points/fe0c4db38fb6399990b1d6deb98feec6.pts 02691156/expert_verified/points_label/fe0c4db38fb6399990b1d6deb98feec6.seg 02691156
02691156/points/6d93492543d1087eb87697d3904b168b.pts 02691156/expert_verified/points_label/6d93492543d1087eb87697d3904b168b.seg 02691156
03636649/points/402f7ce2b87e7d1ac066b9622c005c53.pts 03636649/expert_verified/points_label/402f7ce2b87e7d1ac066b9622c005c53.seg 03636649
04379243/points/272a4cf3cfff3eb1e173cee47fbaa88.pts 04379243/expert_verified/points_label/272a4cf3cfff3eb1e173cee47fbaa88.seg 04379243
02691156/points/6420a3ff5e526d59e16519c843f95ce0.pts 02691156/expert_verified/points_label/6420a3ff5e526d59e16519c843f95ce0.seg 02691156
03001627/points/487040c5fdc68fdfe6cfc789522bfbab.pts 03001627/expert_verified/points_label/487040c5fdc68fdfe6cfc789522bfbab.seg 03001627
04379243/points/8f48ccd17a15baf5ce01c07526cf2aa4.pts 04379243/expert_verified/points_label/8f48ccd17a15baf5ce01c07526cf2aa4.seg 04379243
03001627/points/40e5d8e71ee3902a31358207d42bcb21.pts 03001627/expert_verified/points_label/40e5d8e71ee3902a31358207d42bcb21.seg 03001627
03636649/points/68491d576b5d35aade8e7376ce4e111f.pts 03636649/expert_verified/points_label/68491d576b5d35aade8e7376ce4e111f.seg 03636649
03467517/points/80aa2f0d66100844925eded29d6897b9.pts 03467517/expert_verified/points_label/80aa2f0d66100844925eded29d6897b9.seg 03467517
03001627/points/7929676e756dcd41577b5d737869717e.pts 03001627/expert_verified/points_label/7929676e756dcd41577b5d737869717e.seg 03001627
03001627/points/2cf7ccf97b09187fcb7547c95fbdff26.pts 03001627/expert_verified/points_label/2cf7ccf97b09187fcb7547c95fbdff26.seg 03001627
02691156/points/e8409b544c626028a9b2becd26dc2fc1.pts 02691156/expert_verified/points_label/e8409b544c626028a9b2becd26dc2fc1.seg 02691156
02691156/points/1e2de00cf19a0a33554ccf8c30febe7.pts 02691156/expert_verified/points_label/1e2de00cf19a0a33554ccf8c30febe7.seg 02691156
02691156/points/8f40518bd30467151e5ae32cb9e3711f.pts 02691156/expert_verified/points_label/8f40518bd30467151e5ae32cb9e3711f.seg 02691156
02958343/points/4f0147c8a158087a4c19dab9f2c7c52d.pts 02958343/expert_verified/points_label/4f0147c8a158087a4c19dab9f2c7c52d.seg 02958343
03624134/points/954fb0819736737a1b9c8e2fdbfc1118.pts 03624134/expert_verified/points_label/954fb0819736737a1b9c8e2fdbfc1118.seg 03624134
04379243/points/415a08a66b8527519f803a8da27dd9a9.pts 04379243/expert_verified/points_label/415a08a66b8527519f803a8da27dd9a9.seg 04379243
03001627/points/4bdbecfbc925219157915a20ae9ec6b6.pts 03001627/expert_verified/points_label/4bdbecfbc925219157915a20ae9ec6b6.seg 03001627
03624134/points/2f74196bd5cb462727c767f081f1365a.pts 03624134/expert_verified/points_label/2f74196bd5cb462727c767f081f1365a.seg 03624134
02958343/points/b5b6b09711cbee6daa44bfa127abe4bb.pts 02958343/expert_verified/points_label/b5b6b09711cbee6daa44bfa127abe4bb.seg 02958343
03001627/points/43e74f15a986eb626a90f735365ac29e.pts 03001627/expert_verified/points_label/43e74f15a986eb626a90f735365ac29e.seg 03001627
03624134/points/385bb539629cd6991dd89e5fcd05911a.pts 03624134/expert_verified/points_label/385bb539629cd6991dd89e5fcd05911a.seg 03624134
03642806/points/fdec2b8af5dd988cef56c22fd326c67.pts 03642806/expert_verified/points_label/fdec2b8af5dd988cef56c22fd326c67.seg 03642806
02958343/points/244a8476648bd073834daea73aa18748.pts 02958343/expert_verified/points_label/244a8476648bd073834daea73aa18748.seg 02958343
03467517/points/d91b0745e57f6508dc6782957fd2f5d2.pts 03467517/expert_verified/points_label/d91b0745e57f6508dc6782957fd2f5d2.seg 03467517
04379243/points/83f1ff21744e71ad2690c0a5b39562ad.pts 04379243/expert_verified/points_label/83f1ff21744e71ad2690c0a5b39562ad.seg 04379243
03001627/points/49aa713bec70ee1f1104b8f54582c707.pts 03001627/expert_verified/points_label/49aa713bec70ee1f1104b8f54582c707.seg 03001627
03001627/points/9231ef07326eae09b04cb542e2c50eb4.pts 03001627/expert_verified/points_label/9231ef07326eae09b04cb542e2c50eb4.seg 03001627
03642806/points/b211cfb105e9f97e6436916a86a90ed7.pts 03642806/expert_verified/points_label/b211cfb105e9f97e6436916a86a90ed7.seg 03642806
03001627/points/fdfedb5bb8cd35374233148ffd345970.pts 03001627/expert_verified/points_label/fdfedb5bb8cd35374233148ffd345970.seg 03001627
04379243/points/3037fac5bc67207e23fa92d98173c06f.pts 04379243/expert_verified/points_label/3037fac5bc67207e23fa92d98173c06f.seg 04379243
04379243/points/40d0dd3fe786e120d75c27ddd792e41a.pts 04379243/expert_verified/points_label/40d0dd3fe786e120d75c27ddd792e41a.seg 04379243
03001627/points/e6ea5e70c2f29d881e8fd793667dc14f.pts 03001627/expert_verified/points_label/e6ea5e70c2f29d881e8fd793667dc14f.seg 03001627
04379243/points/9502eecc3a057115b129901f80d24b7b.pts 04379243/expert_verified/points_label/9502eecc3a057115b129901f80d24b7b.seg 04379243
03001627/points/e68bb6f55e2454fac7f1f7c0570e288d.pts 03001627/expert_verified/points_label/e68bb6f55e2454fac7f1f7c0570e288d.seg 03001627
02691156/points/9bd8d0fa75bc21c5e3375a6b96a1d765.pts 02691156/expert_verified/points_label/9bd8d0fa75bc21c5e3375a6b96a1d765.seg 02691156
02958343/points/1714b6e57c8c4983fb1aad5dae793ff4.pts 02958343/expert_verified/points_label/1714b6e57c8c4983fb1aad5dae793ff4.seg 02958343
02691156/points/8a84a26158da1db7668586dcfb752ad.pts 02691156/expert_verified/points_label/8a84a26158da1db7668586dcfb752ad.seg 02691156
02691156/points/36d8c865f766e3e097872638b21438e3.pts 02691156/expert_verified/points_label/36d8c865f766e3e097872638b21438e3.seg 02691156
03001627/points/96e8a51b1680b756e99481ddc3bbddfb.pts 03001627/expert_verified/points_label/96e8a51b1680b756e99481ddc3bbddfb.seg 03001627
02958343/points/37ad66d0433beb633df8f4ac45647158.pts 02958343/expert_verified/points_label/37ad66d0433beb633df8f4ac45647158.seg 02958343
04379243/points/56a57ef7c3385c9f2f38c0d2792fb5e.pts 04379243/expert_verified/points_label/56a57ef7c3385c9f2f38c0d2792fb5e.seg 04379243
03467517/points/dbdf45cab0adbded1f260c1b356c52ce.pts 03467517/expert_verified/points_label/dbdf45cab0adbded1f260c1b356c52ce.seg 03467517
04379243/points/868bab5194e93577858699aaad4acee4.pts 04379243/expert_verified/points_label/868bab5194e93577858699aaad4acee4.seg 04379243
04379243/points/2bbd62449b56abee659dda512294c744.pts 04379243/expert_verified/points_label/2bbd62449b56abee659dda512294c744.seg 04379243
04379243/points/a18aa2d20d516333daf1f22b6daf05ed.pts 04379243/expert_verified/points_label/a18aa2d20d516333daf1f22b6daf05ed.seg 04379243
03636649/points/7a2362fbddbee9a4d197f67767b32741.pts 03636649/expert_verified/points_label/7a2362fbddbee9a4d197f67767b32741.seg 03636649
03636649/points/f9259d31df38bd5decd204cd7180226d.pts 03636649/expert_verified/points_label/f9259d31df38bd5decd204cd7180226d.seg 03636649
04379243/points/54e85b248576c4eb57cd80d4b17e7e11.pts 04379243/expert_verified/points_label/54e85b248576c4eb57cd80d4b17e7e11.seg 04379243
04379243/points/1299579419252fa954b02959579aa6bb.pts 04379243/expert_verified/points_label/1299579419252fa954b02959579aa6bb.seg 04379243
04379243/points/49ad167497a2af8c9672e39f89e4622e.pts 04379243/expert_verified/points_label/49ad167497a2af8c9672e39f89e4622e.seg 04379243
04379243/points/55221b101eec29dc656a19d1d18fdbac.pts 04379243/expert_verified/points_label/55221b101eec29dc656a19d1d18fdbac.seg 04379243
04379243/points/e8870f3190f6b8d4bd1025bd755a15aa.pts 04379243/expert_verified/points_label/e8870f3190f6b8d4bd1025bd755a15aa.seg 04379243
02691156/points/9818f0b88fed05b24b0a1bcf2fb497ec.pts 02691156/expert_verified/points_label/9818f0b88fed05b24b0a1bcf2fb497ec.seg 02691156
02691156/points/9ba460913d86466f62347b4731688b0f.pts 02691156/expert_verified/points_label/9ba460913d86466f62347b4731688b0f.seg 02691156
04379243/points/574447022c4473d455f46d55537192b6.pts 04379243/expert_verified/points_label/574447022c4473d455f46d55537192b6.seg 04379243
04379243/points/7b5b7bfa8580e913e2580b23e60e4674.pts 04379243/expert_verified/points_label/7b5b7bfa8580e913e2580b23e60e4674.seg 04379243
04225987/points/48f26ddc704fec2f379c6a1d59ef7283.pts 04225987/expert_verified/points_label/48f26ddc704fec2f379c6a1d59ef7283.seg 04225987
04379243/points/b7821e69687d767aab610b0c94236463.pts 04379243/expert_verified/points_label/b7821e69687d767aab610b0c94236463.seg 04379243
02691156/points/e42443669339a6c1a5a118bd15e6e34f.pts 02691156/expert_verified/points_label/e42443669339a6c1a5a118bd15e6e34f.seg 02691156
04379243/points/2444551d00693a0fab610b0c94236463.pts 04379243/expert_verified/points_label/2444551d00693a0fab610b0c94236463.seg 04379243
03467517/points/5e452914684ea7fc398707f20de9db08.pts 03467517/expert_verified/points_label/5e452914684ea7fc398707f20de9db08.seg 03467517
03001627/points/cc6840207c0cf55db30e42459dcb06f.pts 03001627/expert_verified/points_label/cc6840207c0cf55db30e42459dcb06f.seg 03001627
04379243/points/9046b2e610065fe5a5d95e73eecd308a.pts 04379243/expert_verified/points_label/9046b2e610065fe5a5d95e73eecd308a.seg 04379243
03467517/points/c651a91562b86ed8edb9371445f615ae.pts 03467517/expert_verified/points_label/c651a91562b86ed8edb9371445f615ae.seg 03467517
03001627/points/9bb6d3d76d4f5ba94b3c42e318f3affc.pts 03001627/expert_verified/points_label/9bb6d3d76d4f5ba94b3c42e318f3affc.seg 03001627
03001627/points/7fb336186da77367962800be79c6e52.pts 03001627/expert_verified/points_label/7fb336186da77367962800be79c6e52.seg 03001627
04379243/points/b69b2ff85d0ec661d8f9dd7647048a0c.pts 04379243/expert_verified/points_label/b69b2ff85d0ec661d8f9dd7647048a0c.seg 04379243
03001627/points/d2815e678f173616e6cfc789522bfbab.pts 03001627/expert_verified/points_label/d2815e678f173616e6cfc789522bfbab.seg 03001627
03636649/points/b8350fcf08ff0b2ca950bf8f33cff658.pts 03636649/expert_verified/points_label/b8350fcf08ff0b2ca950bf8f33cff658.seg 03636649
04379243/points/202e7b5c3ec079e299e8bf807e902261.pts 04379243/expert_verified/points_label/202e7b5c3ec079e299e8bf807e902261.seg 04379243
03001627/points/c8938f54fecab41e77cd061c90fcdb44.pts 03001627/expert_verified/points_label/c8938f54fecab41e77cd061c90fcdb44.seg 03001627
04379243/points/894e095c7036c8411933ffef19678834.pts 04379243/expert_verified/points_label/894e095c7036c8411933ffef19678834.seg 04379243
03001627/points/4362e715455f42ba9b9f2eb77f5e247e.pts 03001627/expert_verified/points_label/4362e715455f42ba9b9f2eb77f5e247e.seg 03001627
04379243/points/8963760f8bec0fee7f807d3c406ee.pts 04379243/expert_verified/points_label/8963760f8bec0fee7f807d3c406ee.seg 04379243
03948459/points/4acb6494e3aaeb39998978df244b5bd.pts 03948459/expert_verified/points_label/4acb6494e3aaeb39998978df244b5bd.seg 03948459
03636649/points/c1b939cc403a0662664b3b9b23ddfcbc.pts 03636649/expert_verified/points_label/c1b939cc403a0662664b3b9b23ddfcbc.seg 03636649
04379243/points/e64876f5590e6fb7c3bd24f986301745.pts 04379243/expert_verified/points_label/e64876f5590e6fb7c3bd24f986301745.seg 04379243
02691156/points/b8ce3803485b620b2c674305897e1782.pts 02691156/expert_verified/points_label/b8ce3803485b620b2c674305897e1782.seg 02691156
03636649/points/a60c6cf7d4893f2ba26bf7a8fd4719ad.pts 03636649/expert_verified/points_label/a60c6cf7d4893f2ba26bf7a8fd4719ad.seg 03636649
04379243/points/6ca66a443e651c1423500a5b036df62e.pts 04379243/expert_verified/points_label/6ca66a443e651c1423500a5b036df62e.seg 04379243
04379243/points/51930b149cf6125373fa072a624ce947.pts 04379243/expert_verified/points_label/51930b149cf6125373fa072a624ce947.seg 04379243
02691156/points/eb658ff31f0becea1d0f8853f6d023e3.pts 02691156/expert_verified/points_label/eb658ff31f0becea1d0f8853f6d023e3.seg 02691156
03642806/points/3f45cde6f7a13138e256fb3794905772.pts 03642806/expert_verified/points_label/3f45cde6f7a13138e256fb3794905772.seg 03642806
03001627/points/ea572cc193b804399c66df0f068d2a36.pts 03001627/expert_verified/points_label/ea572cc193b804399c66df0f068d2a36.seg 03001627
03001627/points/9e0a0ad80be6df7789d2595edb5088ee.pts 03001627/expert_verified/points_label/9e0a0ad80be6df7789d2595edb5088ee.seg 03001627
04379243/points/8eed35fd5b777acf58316b27df6c8e87.pts 04379243/expert_verified/points_label/8eed35fd5b777acf58316b27df6c8e87.seg 04379243
03642806/points/5baaa726f51cd09b507f3bf1d3472684.pts 03642806/expert_verified/points_label/5baaa726f51cd09b507f3bf1d3472684.seg 03642806
02691156/points/789f032dccc6092977b7d0d4764c121d.pts 02691156/expert_verified/points_label/789f032dccc6092977b7d0d4764c121d.seg 02691156
03001627/points/9682d28e03acd2e3735013f3db728e20.pts 03001627/expert_verified/points_label/9682d28e03acd2e3735013f3db728e20.seg 03001627
02958343/points/b50f9931670e25ef44ccce632b473b8c.pts 02958343/expert_verified/points_label/b50f9931670e25ef44ccce632b473b8c.seg 02958343
03467517/points/d3972d599036251369da8e7f9a803d12.pts 03467517/expert_verified/points_label/d3972d599036251369da8e7f9a803d12.seg 03467517
02691156/points/329987191cce68bfe64acd170567d820.pts 02691156/expert_verified/points_label/329987191cce68bfe64acd170567d820.seg 02691156
03636649/points/ab3e153cd23e992b576a354bb9319732.pts 03636649/expert_verified/points_label/ab3e153cd23e992b576a354bb9319732.seg 03636649
04379243/points/f850a69b0d308fbc19fb4103277a6b93.pts 04379243/expert_verified/points_label/f850a69b0d308fbc19fb4103277a6b93.seg 04379243
04379243/points/1645b28322131b6258c407efcf93be6b.pts 04379243/expert_verified/points_label/1645b28322131b6258c407efcf93be6b.seg 04379243
03001627/points/195464ae11f6bfe1cba091e036bf65ed.pts 03001627/expert_verified/points_label/195464ae11f6bfe1cba091e036bf65ed.seg 03001627
02691156/points/edd9583988b62c90328f15e6c60d0e90.pts 02691156/expert_verified/points_label/edd9583988b62c90328f15e6c60d0e90.seg 02691156
04225987/points/36aaae334d636ec28043db94fbc8c982.pts 04225987/expert_verified/points_label/36aaae334d636ec28043db94fbc8c982.seg 04225987
04379243/points/c3c467718eb9b2a313f96345312df593.pts 04379243/expert_verified/points_label/c3c467718eb9b2a313f96345312df593.seg 04379243
02691156/points/a1848a4a69b14704ca8607f540cc62ba.pts 02691156/expert_verified/points_label/a1848a4a69b14704ca8607f540cc62ba.seg 02691156
02958343/points/c8bd4d0ac34266ffaaa232d0915adae9.pts 02958343/expert_verified/points_label/c8bd4d0ac34266ffaaa232d0915adae9.seg 02958343
04379243/points/ad61a5bc7cba29b88cc413950b617e8f.pts 04379243/expert_verified/points_label/ad61a5bc7cba29b88cc413950b617e8f.seg 04379243
03642806/points/466ea85bb4653ba3a715ae636b111d77.pts 03642806/expert_verified/points_label/466ea85bb4653ba3a715ae636b111d77.seg 03642806
03001627/points/e93714e5553f63619215045784774049.pts 03001627/expert_verified/points_label/e93714e5553f63619215045784774049.seg 03001627
03636649/points/b88c9a7aaab268fb42b08fbc749346d6.pts 03636649/expert_verified/points_label/b88c9a7aaab268fb42b08fbc749346d6.seg 03636649
03636649/points/6ba931adfa36c7965208aab875b932bc.pts 03636649/expert_verified/points_label/6ba931adfa36c7965208aab875b932bc.seg 03636649
03001627/points/e3479f55f5894bb3c7f1f7c0570e288d.pts 03001627/expert_verified/points_label/e3479f55f5894bb3c7f1f7c0570e288d.seg 03001627
03467517/points/4c5288cc18896f8f352e5d4d2615db5b.pts 03467517/expert_verified/points_label/4c5288cc18896f8f352e5d4d2615db5b.seg 03467517
03001627/points/631e102e9a689339b0ec386df15ab64f.pts 03001627/expert_verified/points_label/631e102e9a689339b0ec386df15ab64f.seg 03001627
04379243/points/6daed91ae491c9cbe22ea6d770699e4b.pts 04379243/expert_verified/points_label/6daed91ae491c9cbe22ea6d770699e4b.seg 04379243
03001627/points/40e73a326cf95d0361c93c4994c91bd1.pts 03001627/expert_verified/points_label/40e73a326cf95d0361c93c4994c91bd1.seg 03001627
03467517/points/dc7708c870000008a24eeca91f583600.pts 03467517/expert_verified/points_label/dc7708c870000008a24eeca91f583600.seg 03467517
03001627/points/1ac6531a337de85f2f7628d6bf38bcc4.pts 03001627/expert_verified/points_label/1ac6531a337de85f2f7628d6bf38bcc4.seg 03001627
04379243/points/5191d64e9a1b9664bfdcc70dcc16baa1.pts 04379243/expert_verified/points_label/5191d64e9a1b9664bfdcc70dcc16baa1.seg 04379243
03636649/points/c4dc0ac169c91ff29f8c3d2002c77ddb.pts 03636649/expert_verified/points_label/c4dc0ac169c91ff29f8c3d2002c77ddb.seg 03636649
03624134/points/b8648ae17fb9937949f73a97204d432b.pts 03624134/expert_verified/points_label/b8648ae17fb9937949f73a97204d432b.seg 03624134
04379243/points/a465210c23b0136d7afee304cce81d6f.pts 04379243/expert_verified/points_label/a465210c23b0136d7afee304cce81d6f.seg 04379243
03001627/points/513686d6d63a1d8e577b5d737869717e.pts 03001627/expert_verified/points_label/513686d6d63a1d8e577b5d737869717e.seg 03001627
03624134/points/bee1a473472639e25ca3862a7efa6401.pts 03624134/expert_verified/points_label/bee1a473472639e25ca3862a7efa6401.seg 03624134
02691156/points/adb3ea03d7b954255e9e2656aff7dd5b.pts 02691156/expert_verified/points_label/adb3ea03d7b954255e9e2656aff7dd5b.seg 02691156
02691156/points/959f28c6724979ef9a6e43b878d5b335.pts 02691156/expert_verified/points_label/959f28c6724979ef9a6e43b878d5b335.seg 02691156
04379243/points/dec1d2cf8a4563d36cb02543e4df83bf.pts 04379243/expert_verified/points_label/dec1d2cf8a4563d36cb02543e4df83bf.seg 04379243
03790512/points/a9c432d1dc4034762a45a87054fa7272.pts 03790512/expert_verified/points_label/a9c432d1dc4034762a45a87054fa7272.seg 03790512
03001627/points/1b5e876f3559c231532a8e162f399205.pts 03001627/expert_verified/points_label/1b5e876f3559c231532a8e162f399205.seg 03001627
04379243/points/82e5309809e455d5f15fed2243deb166.pts 04379243/expert_verified/points_label/82e5309809e455d5f15fed2243deb166.seg 04379243
03467517/points/8f1f54d337bf6ccac782e6226a4f593e.pts 03467517/expert_verified/points_label/8f1f54d337bf6ccac782e6226a4f593e.seg 03467517
04379243/points/67d97102f9c54cc95512673aa47c7e3d.pts 04379243/expert_verified/points_label/67d97102f9c54cc95512673aa47c7e3d.seg 04379243
02691156/points/e0cc4f538a8da2d65d3bbd70fc7759b7.pts 02691156/expert_verified/points_label/e0cc4f538a8da2d65d3bbd70fc7759b7.seg 02691156
04379243/points/d0008b042256fb5f7ab911835312d4f1.pts 04379243/expert_verified/points_label/d0008b042256fb5f7ab911835312d4f1.seg 04379243
03467517/points/44c05e219618a6395b3335548350bdee.pts 03467517/expert_verified/points_label/44c05e219618a6395b3335548350bdee.seg 03467517
03001627/points/3f7808c221b01668b4d174e5c61f344.pts 03001627/expert_verified/points_label/3f7808c221b01668b4d174e5c61f344.seg 03001627
03467517/points/51abcb617b2faf3a24eeca91f583600.pts 03467517/expert_verified/points_label/51abcb617b2faf3a24eeca91f583600.seg 03467517
03636649/points/f38370fc4c112017a6e7138fdd58748.pts 03636649/expert_verified/points_label/f38370fc4c112017a6e7138fdd58748.seg 03636649
03001627/points/37607ea19e352af4fffc97a61124b1a9.pts 03001627/expert_verified/points_label/37607ea19e352af4fffc97a61124b1a9.seg 03001627
02958343/points/2cb6de89f5b6e702b626f6a649199824.pts 02958343/expert_verified/points_label/2cb6de89f5b6e702b626f6a649199824.seg 02958343
04099429/points/d781243cc1d1d2e91a0ec553feb1c2c3.pts 04099429/expert_verified/points_label/d781243cc1d1d2e91a0ec553feb1c2c3.seg 04099429
04379243/points/900afcc9f0f5fbfd858699aaad4acee4.pts 04379243/expert_verified/points_label/900afcc9f0f5fbfd858699aaad4acee4.seg 04379243
03001627/points/d13eb19745344ae5fb0eb7e753c06942.pts 03001627/expert_verified/points_label/d13eb19745344ae5fb0eb7e753c06942.seg 03001627
02958343/points/5785192c95cdd67b704715417c0f83c1.pts 02958343/expert_verified/points_label/5785192c95cdd67b704715417c0f83c1.seg 02958343
03001627/points/5bb5b15807158f71504721639e19f609.pts 03001627/expert_verified/points_label/5bb5b15807158f71504721639e19f609.seg 03001627
03636649/points/ba05f660341b7b7b70be09f44cb2fef5.pts 03636649/expert_verified/points_label/ba05f660341b7b7b70be09f44cb2fef5.seg 03636649
02691156/points/97066012fbca5983c74417871493eae8.pts 02691156/expert_verified/points_label/97066012fbca5983c74417871493eae8.seg 02691156
03001627/points/4499729e53c858ae71a782a4379556c7.pts 03001627/expert_verified/points_label/4499729e53c858ae71a782a4379556c7.seg 03001627
04379243/points/41d280b7db61ebddfebad4f49b26ec52.pts 04379243/expert_verified/points_label/41d280b7db61ebddfebad4f49b26ec52.seg 04379243
02773838/points/30bf69aa24dbb3fc9de193e488fc4dce.pts 02773838/expert_verified/points_label/30bf69aa24dbb3fc9de193e488fc4dce.seg 02773838
03467517/points/6c9a9c0e2af9d5b35f713e773d664ec2.pts 03467517/expert_verified/points_label/6c9a9c0e2af9d5b35f713e773d664ec2.seg 03467517
04379243/points/f979c7a650d29ea819fb4103277a6b93.pts 04379243/expert_verified/points_label/f979c7a650d29ea819fb4103277a6b93.seg 04379243
03001627/points/b631b78c2dcc748cba5342d638d0c267.pts 03001627/expert_verified/points_label/b631b78c2dcc748cba5342d638d0c267.seg 03001627
03467517/points/d2ad57f36e00c602baba3b7560fe62f4.pts 03467517/expert_verified/points_label/d2ad57f36e00c602baba3b7560fe62f4.seg 03467517
04379243/points/5771d5a3084b3ca3a2d7b309863cb1b.pts 04379243/expert_verified/points_label/5771d5a3084b3ca3a2d7b309863cb1b.seg 04379243
03636649/points/2d638c6b6b2feb9248da169d95204ce2.pts 03636649/expert_verified/points_label/2d638c6b6b2feb9248da169d95204ce2.seg 03636649
02958343/points/63a4e46bbbd855fc2b63d3b2a8c4e8b.pts 02958343/expert_verified/points_label/63a4e46bbbd855fc2b63d3b2a8c4e8b.seg 02958343
04379243/points/8c67fd5a15e8d9defebad4f49b26ec52.pts 04379243/expert_verified/points_label/8c67fd5a15e8d9defebad4f49b26ec52.seg 04379243
03467517/points/28c3903b29f6b38363e148e250c0340d.pts 03467517/expert_verified/points_label/28c3903b29f6b38363e148e250c0340d.seg 03467517
04379243/points/ab2967188299bea54cb0654f4cfa9684.pts 04379243/expert_verified/points_label/ab2967188299bea54cb0654f4cfa9684.seg 04379243
02691156/points/a9a7f21271b3efbaf446f92b52bbd82a.pts 02691156/expert_verified/points_label/a9a7f21271b3efbaf446f92b52bbd82a.seg 02691156
04379243/points/c3e43144fd61c56f19fb4103277a6b93.pts 04379243/expert_verified/points_label/c3e43144fd61c56f19fb4103277a6b93.seg 04379243
03001627/points/7fcde5fc8e023dd2a6fee8e2140acec9.pts 03001627/expert_verified/points_label/7fcde5fc8e023dd2a6fee8e2140acec9.seg 03001627
03790512/points/70d9cc5115bfedeeab548456bc75847f.pts 03790512/expert_verified/points_label/70d9cc5115bfedeeab548456bc75847f.seg 03790512
03001627/points/3c0dd3719baecf3319fb4103277a6b93.pts 03001627/expert_verified/points_label/3c0dd3719baecf3319fb4103277a6b93.seg 03001627
03636649/points/55077c2175d97b8889ab11a408196888.pts 03636649/expert_verified/points_label/55077c2175d97b8889ab11a408196888.seg 03636649
04379243/points/71fc8c7cdb48978282fa4d4f2c19b2ce.pts 04379243/expert_verified/points_label/71fc8c7cdb48978282fa4d4f2c19b2ce.seg 04379243
04379243/points/f0d5eefef970fa4b9f2349486c570dd4.pts 04379243/expert_verified/points_label/f0d5eefef970fa4b9f2349486c570dd4.seg 04379243
03642806/points/90c01fd78513bb99c9b20aa1b8066c46.pts 03642806/expert_verified/points_label/90c01fd78513bb99c9b20aa1b8066c46.seg 03642806
04379243/points/ca6c07357ba5125b8e2adb29857f8a1.pts 04379243/expert_verified/points_label/ca6c07357ba5125b8e2adb29857f8a1.seg 04379243
04379243/points/634bcd3197e337aafe4e4de1adda2150.pts 04379243/expert_verified/points_label/634bcd3197e337aafe4e4de1adda2150.seg 04379243
04379243/points/7b411de42d4960eb6e25f3efedf6785f.pts 04379243/expert_verified/points_label/7b411de42d4960eb6e25f3efedf6785f.seg 04379243
04379243/points/878414eb6e86494d9a8ef44e1d2c5b75.pts 04379243/expert_verified/points_label/878414eb6e86494d9a8ef44e1d2c5b75.seg 04379243
03001627/points/f3fa7bd00b76f6a87a8a6b9421844d96.pts 03001627/expert_verified/points_label/f3fa7bd00b76f6a87a8a6b9421844d96.seg 03001627
03467517/points/a2c1ee6a7ddb50a493f0194265a9746c.pts 03467517/expert_verified/points_label/a2c1ee6a7ddb50a493f0194265a9746c.seg 03467517
04379243/points/25bc205f6de491f4ccde40b1205ec7ff.pts 04379243/expert_verified/points_label/25bc205f6de491f4ccde40b1205ec7ff.seg 04379243
03636649/points/771d4def2e44bc169eb34048e600e1ea.pts 03636649/expert_verified/points_label/771d4def2e44bc169eb34048e600e1ea.seg 03636649
03624134/points/6ebe2a22b8d9d70862a95b942081dfee.pts 03624134/expert_verified/points_label/6ebe2a22b8d9d70862a95b942081dfee.seg 03624134
02691156/points/9b1fc3881a5335cb44012f72ba1e15a8.pts 02691156/expert_verified/points_label/9b1fc3881a5335cb44012f72ba1e15a8.seg 02691156
03001627/points/3dc252fd90d82b18c9be65dfbd21428b.pts 03001627/expert_verified/points_label/3dc252fd90d82b18c9be65dfbd21428b.seg 03001627
04379243/points/f6f180c3e72caacb5077539b37310c29.pts 04379243/expert_verified/points_label/f6f180c3e72caacb5077539b37310c29.seg 04379243
03642806/points/25bc168b214b54799e28e9cf32e5157.pts 03642806/expert_verified/points_label/25bc168b214b54799e28e9cf32e5157.seg 03642806
04379243/points/ac9fae8af57729945eee45c00c4de9d3.pts 04379243/expert_verified/points_label/ac9fae8af57729945eee45c00c4de9d3.seg 04379243
03001627/points/e8126f9e2d106620d2f33aaf794b5932.pts 03001627/expert_verified/points_label/e8126f9e2d106620d2f33aaf794b5932.seg 03001627
03624134/points/3dc5a6d79ed591bda709dec9a148b2fe.pts 03624134/expert_verified/points_label/3dc5a6d79ed591bda709dec9a148b2fe.seg 03624134
04379243/points/8f73278956fecb80327289c00b6dc9ca.pts 04379243/expert_verified/points_label/8f73278956fecb80327289c00b6dc9ca.seg 04379243
03948459/points/5f46578efd2c65e5d4ac2f5fcaa742ac.pts 03948459/expert_verified/points_label/5f46578efd2c65e5d4ac2f5fcaa742ac.seg 03948459
03624134/points/a05ea45d396c86784e52b614e584a543.pts 03624134/expert_verified/points_label/a05ea45d396c86784e52b614e584a543.seg 03624134
03001627/points/cd939609247df917d9d3572bbd9cf789.pts 03001627/expert_verified/points_label/cd939609247df917d9d3572bbd9cf789.seg 03001627
03261776/points/17c9866b42ae1831df4cfe396cee719e.pts 03261776/expert_verified/points_label/17c9866b42ae1831df4cfe396cee719e.seg 03261776
03797390/points/3d3e993f7baa4d7ef1ff24a8b1564a36.pts 03797390/expert_verified/points_label/3d3e993f7baa4d7ef1ff24a8b1564a36.seg 03797390
03467517/points/36b49aff54f6d7e893f0194265a9746c.pts 03467517/expert_verified/points_label/36b49aff54f6d7e893f0194265a9746c.seg 03467517
02691156/points/48df2496242053da4ee0fb6a51564c3.pts 02691156/expert_verified/points_label/48df2496242053da4ee0fb6a51564c3.seg 02691156
04379243/points/7ad23def902ea4f37b7a2c2624e46d0a.pts 04379243/expert_verified/points_label/7ad23def902ea4f37b7a2c2624e46d0a.seg 04379243
04379243/points/1a8fe5baa2d4b5f7ee84261b3d20656.pts 04379243/expert_verified/points_label/1a8fe5baa2d4b5f7ee84261b3d20656.seg 04379243
03467517/points/d685415d4fcd3205a24eeca91f583600.pts 03467517/expert_verified/points_label/d685415d4fcd3205a24eeca91f583600.seg 03467517
02958343/points/8e308d28d463427f43f0e92e826556b8.pts 02958343/expert_verified/points_label/8e308d28d463427f43f0e92e826556b8.seg 02958343
04379243/points/dc68436ab1a576f6573d2c9ac4b23fdf.pts 04379243/expert_verified/points_label/dc68436ab1a576f6573d2c9ac4b23fdf.seg 04379243
04379243/points/1a153612bcdab3e23cc149415a408229.pts 04379243/expert_verified/points_label/1a153612bcdab3e23cc149415a408229.seg 04379243
03001627/points/19ce953da9aa8065d747a43c11e738e9.pts 03001627/expert_verified/points_label/19ce953da9aa8065d747a43c11e738e9.seg 03001627
04379243/points/db2d4f781756e687d8864caa856253b.pts 04379243/expert_verified/points_label/db2d4f781756e687d8864caa856253b.seg 04379243
04379243/points/d8f851bbc98dccc23fa92d98173c06f.pts 04379243/expert_verified/points_label/d8f851bbc98dccc23fa92d98173c06f.seg 04379243
03467517/points/e585e31db7568c4cf0e1c0df18936d05.pts 03467517/expert_verified/points_label/e585e31db7568c4cf0e1c0df18936d05.seg 03467517
03001627/points/98ac0106ad244505e04fc3fcc1c852e0.pts 03001627/expert_verified/points_label/98ac0106ad244505e04fc3fcc1c852e0.seg 03001627
03001627/points/1b81441b7e597235d61420a53a0cb96d.pts 03001627/expert_verified/points_label/1b81441b7e597235d61420a53a0cb96d.seg 03001627
03001627/points/918145be863f7aeaf050758b903e6054.pts 03001627/expert_verified/points_label/918145be863f7aeaf050758b903e6054.seg 03001627
02691156/points/1af4b32eafffb0f7ee60c37cbf99c1c.pts 02691156/expert_verified/points_label/1af4b32eafffb0f7ee60c37cbf99c1c.seg 02691156
03636649/points/f4e1a4032b1686cec35131da26f8061a.pts 03636649/expert_verified/points_label/f4e1a4032b1686cec35131da26f8061a.seg 03636649
04379243/points/9c4dfafdbd7f9b76c955e5ed03ef3a2f.pts 04379243/expert_verified/points_label/9c4dfafdbd7f9b76c955e5ed03ef3a2f.seg 04379243
02691156/points/80b8f4da6b77eb66d208f79049825a82.pts 02691156/expert_verified/points_label/80b8f4da6b77eb66d208f79049825a82.seg 02691156
03642806/points/de2e95eac460c361e862e3cac45aa769.pts 03642806/expert_verified/points_label/de2e95eac460c361e862e3cac45aa769.seg 03642806
04379243/points/e2571e4eba2d9f5eab610b0c94236463.pts 04379243/expert_verified/points_label/e2571e4eba2d9f5eab610b0c94236463.seg 04379243
04379243/points/a0445e4888d56666b9d7c2fc41e80228.pts 04379243/expert_verified/points_label/a0445e4888d56666b9d7c2fc41e80228.seg 04379243
03001627/points/873c017f35957717b56a13a4b2372aa4.pts 03001627/expert_verified/points_label/873c017f35957717b56a13a4b2372aa4.seg 03001627
03001627/points/3af90da238ac4ddbf91663a74ccd2338.pts 03001627/expert_verified/points_label/3af90da238ac4ddbf91663a74ccd2338.seg 03001627
02958343/points/9698be0fd3516f01fbeda5389ab05f5f.pts 02958343/expert_verified/points_label/9698be0fd3516f01fbeda5389ab05f5f.seg 02958343
03790512/points/655b9dd9425cc3a12a45a87054fa7272.pts 03790512/expert_verified/points_label/655b9dd9425cc3a12a45a87054fa7272.seg 03790512
04379243/points/ec1c92efffb9ee78beedb4c8fd29e2d1.pts 04379243/expert_verified/points_label/ec1c92efffb9ee78beedb4c8fd29e2d1.seg 04379243
04379243/points/3b7fc97192e483ebb0bf045ee98272fc.pts 04379243/expert_verified/points_label/3b7fc97192e483ebb0bf045ee98272fc.seg 04379243
03467517/points/8c3d3e69d03d3443e84e459fb01822f.pts 03467517/expert_verified/points_label/8c3d3e69d03d3443e84e459fb01822f.seg 03467517
02691156/points/e0058b4948f87d3b87697d3904b168b.pts 02691156/expert_verified/points_label/e0058b4948f87d3b87697d3904b168b.seg 02691156
03001627/points/4428b7dc4b6696812905b6e26038a78.pts 03001627/expert_verified/points_label/4428b7dc4b6696812905b6e26038a78.seg 03001627
03636649/points/f7093dd024fd09fc7219d6d5c4afbaff.pts 03636649/expert_verified/points_label/f7093dd024fd09fc7219d6d5c4afbaff.seg 03636649
04379243/points/7d0c5e28089c2b7bd99e852ee772dfa4.pts 04379243/expert_verified/points_label/7d0c5e28089c2b7bd99e852ee772dfa4.seg 04379243
03636649/points/4916f793d87dd184d42b9650f19dd425.pts 03636649/expert_verified/points_label/4916f793d87dd184d42b9650f19dd425.seg 03636649
04379243/points/1ffcbc064f473b7de7c13848b2d8f5ec.pts 04379243/expert_verified/points_label/1ffcbc064f473b7de7c13848b2d8f5ec.seg 04379243
03636649/points/e180510d07b65fff571108a6d1e94edd.pts 03636649/expert_verified/points_label/e180510d07b65fff571108a6d1e94edd.seg 03636649
03636649/points/d9f6bd064c9fd456fcb8d8c6d4df8143.pts 03636649/expert_verified/points_label/d9f6bd064c9fd456fcb8d8c6d4df8143.seg 03636649
04379243/points/ec81c49ee12e8a70fd06de9ba37d44bd.pts 04379243/expert_verified/points_label/ec81c49ee12e8a70fd06de9ba37d44bd.seg 04379243
03636649/points/4a868756ae6404a5c0bc57897eddf6f.pts 03636649/expert_verified/points_label/4a868756ae6404a5c0bc57897eddf6f.seg 03636649
02958343/points/9c827e532de4967285089a13cc567dbd.pts 02958343/expert_verified/points_label/9c827e532de4967285089a13cc567dbd.seg 02958343
03797390/points/1c9f9e25c654cbca3c71bf3f4dd78475.pts 03797390/expert_verified/points_label/1c9f9e25c654cbca3c71bf3f4dd78475.seg 03797390
03001627/points/ca3670f77268f899febad4f49b26ec52.pts 03001627/expert_verified/points_label/ca3670f77268f899febad4f49b26ec52.seg 03001627
04379243/points/9b8e6eb835f0c8bcf37af16b2893f1d4.pts 04379243/expert_verified/points_label/9b8e6eb835f0c8bcf37af16b2893f1d4.seg 04379243
03001627/points/5c9d582488732ee0d7f7a4c4609b0913.pts 03001627/expert_verified/points_label/5c9d582488732ee0d7f7a4c4609b0913.seg 03001627
04379243/points/684ccc0f629ee45cab610b0c94236463.pts 04379243/expert_verified/points_label/684ccc0f629ee45cab610b0c94236463.seg 04379243
03001627/points/4913388a4c94547a81806e3880250dff.pts 03001627/expert_verified/points_label/4913388a4c94547a81806e3880250dff.seg 03001627
03636649/points/73378b714c5bfed2b922d818b19db1e.pts 03636649/expert_verified/points_label/73378b714c5bfed2b922d818b19db1e.seg 03636649
03001627/points/4a89a789f817ab5414038d588fd1342f.pts 03001627/expert_verified/points_label/4a89a789f817ab5414038d588fd1342f.seg 03001627
04379243/points/df7761a3b4ac638c9eaceb124b71b7be.pts 04379243/expert_verified/points_label/df7761a3b4ac638c9eaceb124b71b7be.seg 04379243
03001627/points/46557f689f4cf5dd2acd2bb6205825cb.pts 03001627/expert_verified/points_label/46557f689f4cf5dd2acd2bb6205825cb.seg 03001627
04379243/points/2db1f557e247ded7e907b6d9dc1d71b7.pts 04379243/expert_verified/points_label/2db1f557e247ded7e907b6d9dc1d71b7.seg 04379243
04379243/points/b69d9e876e7a80a29f2349486c570dd4.pts 04379243/expert_verified/points_label/b69d9e876e7a80a29f2349486c570dd4.seg 04379243
04379243/points/a94ea7183f27073248c0c0980e363341.pts 04379243/expert_verified/points_label/a94ea7183f27073248c0c0980e363341.seg 04379243
03636649/points/8f85c2195890ccf671f0940f5ed452dc.pts 03636649/expert_verified/points_label/8f85c2195890ccf671f0940f5ed452dc.seg 03636649
02691156/points/cc80380c511ec8e2c91a9d486db717.pts 02691156/expert_verified/points_label/cc80380c511ec8e2c91a9d486db717.seg 02691156
03642806/points/6b61ef17b4f45050b598e8984f11eb0c.pts 03642806/expert_verified/points_label/6b61ef17b4f45050b598e8984f11eb0c.seg 03642806
04379243/points/d9ce0b512e0420f8be95ff480950e9ef.pts 04379243/expert_verified/points_label/d9ce0b512e0420f8be95ff480950e9ef.seg 04379243
04379243/points/c27a1c6a26642c907ecc778b34d42f32.pts 04379243/expert_verified/points_label/c27a1c6a26642c907ecc778b34d42f32.seg 04379243
04379243/points/debd06d3176a5b728cbb8bac2032149c.pts 04379243/expert_verified/points_label/debd06d3176a5b728cbb8bac2032149c.seg 04379243
04099429/points/fa07813a89527d195d1df55cbe0874aa.pts 04099429/expert_verified/points_label/fa07813a89527d195d1df55cbe0874aa.seg 04099429
03001627/points/2a98a638f675f46e7d44dc16af152638.pts 03001627/expert_verified/points_label/2a98a638f675f46e7d44dc16af152638.seg 03001627
03624134/points/ec1eb959cc203f1de5a365227cfe63ec.pts 03624134/expert_verified/points_label/ec1eb959cc203f1de5a365227cfe63ec.seg 03624134
04379243/points/db0c430a51ac45c19d2be74cfb51ade1.pts 04379243/expert_verified/points_label/db0c430a51ac45c19d2be74cfb51ade1.seg 04379243
04379243/points/26b2a15646f6a3a06f1e07a56c129dfc.pts 04379243/expert_verified/points_label/26b2a15646f6a3a06f1e07a56c129dfc.seg 04379243
04379243/points/90343e416528b576f41d9ea5f63b1b05.pts 04379243/expert_verified/points_label/90343e416528b576f41d9ea5f63b1b05.seg 04379243
03001627/points/43d38ad2f5d103adf9b9977a2406713a.pts 03001627/expert_verified/points_label/43d38ad2f5d103adf9b9977a2406713a.seg 03001627
03001627/points/e279758e8a5b6a8d492d9da2668ec34c.pts 03001627/expert_verified/points_label/e279758e8a5b6a8d492d9da2668ec34c.seg 03001627
03642806/points/71907a4a567dce3bb0de1e7a6809fd90.pts 03642806/expert_verified/points_label/71907a4a567dce3bb0de1e7a6809fd90.seg 03642806
03636649/points/2958cd9fd799bf02cfbcbf340cec6da1.pts 03636649/expert_verified/points_label/2958cd9fd799bf02cfbcbf340cec6da1.seg 03636649
04379243/points/bd7c71ca15b0d4e56c252f74b6220e29.pts 04379243/expert_verified/points_label/bd7c71ca15b0d4e56c252f74b6220e29.seg 04379243
04379243/points/51c6a7298408c3f19730cb37c9a5f63b.pts 04379243/expert_verified/points_label/51c6a7298408c3f19730cb37c9a5f63b.seg 04379243
02691156/points/e3de366a0cfb59ed38294c37c250d7cd.pts 02691156/expert_verified/points_label/e3de366a0cfb59ed38294c37c250d7cd.seg 02691156
03467517/points/f288cd2146b8f4c1f0e1c0df18936d05.pts 03467517/expert_verified/points_label/f288cd2146b8f4c1f0e1c0df18936d05.seg 03467517
04379243/points/270430ab9efb9d85c0f947750540fb22.pts 04379243/expert_verified/points_label/270430ab9efb9d85c0f947750540fb22.seg 04379243
04379243/points/f5ad10e6a938aa80e85c7a030ebdf69a.pts 04379243/expert_verified/points_label/f5ad10e6a938aa80e85c7a030ebdf69a.seg 04379243
04379243/points/8343d98e3710f5bee1b32bbe69d5bc15.pts 04379243/expert_verified/points_label/8343d98e3710f5bee1b32bbe69d5bc15.seg 04379243
03790512/points/40b7a63fd9ede0cf48272812609617e2.pts 03790512/expert_verified/points_label/40b7a63fd9ede0cf48272812609617e2.seg 03790512
03467517/points/16bc13ee237ebeb38460585fe283a1c9.pts 03467517/expert_verified/points_label/16bc13ee237ebeb38460585fe283a1c9.seg 03467517
02691156/points/a56143efe74ee89ebbf3143b1cb6076a.pts 02691156/expert_verified/points_label/a56143efe74ee89ebbf3143b1cb6076a.seg 02691156
04379243/points/9a6ab25d91c92a5a35acfdef2ece21c0.pts 04379243/expert_verified/points_label/9a6ab25d91c92a5a35acfdef2ece21c0.seg 04379243
03467517/points/c9b60abdc17708fb78ad94b294a9faa6.pts 03467517/expert_verified/points_label/c9b60abdc17708fb78ad94b294a9faa6.seg 03467517
04379243/points/cde67434193a2a6f19fb4103277a6b93.pts 04379243/expert_verified/points_label/cde67434193a2a6f19fb4103277a6b93.seg 04379243
04379243/points/6b62c85b16e300557005dacb6907e37d.pts 04379243/expert_verified/points_label/6b62c85b16e300557005dacb6907e37d.seg 04379243
04379243/points/7956ac7aba6295d1c2fd07f66cbad0f7.pts 04379243/expert_verified/points_label/7956ac7aba6295d1c2fd07f66cbad0f7.seg 04379243
04379243/points/dcda90e411cb4e35506d1e1cc84da713.pts 04379243/expert_verified/points_label/dcda90e411cb4e35506d1e1cc84da713.seg 04379243
02691156/points/c494f446954523a8a32748a9f843a0bf.pts 02691156/expert_verified/points_label/c494f446954523a8a32748a9f843a0bf.seg 02691156
02691156/points/18e6f319062ccb49ca8607f540cc62ba.pts 02691156/expert_verified/points_label/18e6f319062ccb49ca8607f540cc62ba.seg 02691156
04379243/points/b7cead95e18b570d2c97486f63c12d76.pts 04379243/expert_verified/points_label/b7cead95e18b570d2c97486f63c12d76.seg 04379243
03948459/points/f6d52684720d52a01ab78426351eea4a.pts 03948459/expert_verified/points_label/f6d52684720d52a01ab78426351eea4a.seg 03948459
04379243/points/7eeceefed2b3aa2794f3bda96cf548cc.pts 04379243/expert_verified/points_label/7eeceefed2b3aa2794f3bda96cf548cc.seg 04379243
03001627/points/5eaa2730f10054d0f6cabe1df6f4c9d9.pts 03001627/expert_verified/points_label/5eaa2730f10054d0f6cabe1df6f4c9d9.seg 03001627
03001627/points/92f79b8e45269847f0efa341b439d741.pts 03001627/expert_verified/points_label/92f79b8e45269847f0efa341b439d741.seg 03001627
03001627/points/cbaca6a6edfa2d512b520984c067934c.pts 03001627/expert_verified/points_label/cbaca6a6edfa2d512b520984c067934c.seg 03001627
04379243/points/390e0db80fe12ef65fa6da97b9eb4a2f.pts 04379243/expert_verified/points_label/390e0db80fe12ef65fa6da97b9eb4a2f.seg 04379243
04379243/points/2ec33e8b457ac0fa278d386bfa54545.pts 04379243/expert_verified/points_label/2ec33e8b457ac0fa278d386bfa54545.seg 04379243
04225987/points/ac2b6924a60a7a87aa4f69d519551495.pts 04225987/expert_verified/points_label/ac2b6924a60a7a87aa4f69d519551495.seg 04225987
02958343/points/468780ef4ace9a422e877e82c90c24d.pts 02958343/expert_verified/points_label/468780ef4ace9a422e877e82c90c24d.seg 02958343
03001627/points/78c9204b2eac432b65b77a565916c7f.pts 03001627/expert_verified/points_label/78c9204b2eac432b65b77a565916c7f.seg 03001627
04379243/points/b278b58e294a7d2bac242c3aebc81b2f.pts 04379243/expert_verified/points_label/b278b58e294a7d2bac242c3aebc81b2f.seg 04379243
04379243/points/fc95d34ab1afb92b9118eee0b123125f.pts 04379243/expert_verified/points_label/fc95d34ab1afb92b9118eee0b123125f.seg 04379243
03790512/points/54f016b47a5864cd5dde04c96fd8146.pts 03790512/expert_verified/points_label/54f016b47a5864cd5dde04c96fd8146.seg 03790512
04379243/points/9afa121e3aec8bd7c387f328a37d8ece.pts 04379243/expert_verified/points_label/9afa121e3aec8bd7c387f328a37d8ece.seg 04379243
04379243/points/382889dbc86b5dd919fb4103277a6b93.pts 04379243/expert_verified/points_label/382889dbc86b5dd919fb4103277a6b93.seg 04379243
03467517/points/b83a81b2476ec59e59610f6f40382499.pts 03467517/expert_verified/points_label/b83a81b2476ec59e59610f6f40382499.seg 03467517
03001627/points/5d959b0f79a22e8c67c9124d122355ab.pts 03001627/expert_verified/points_label/5d959b0f79a22e8c67c9124d122355ab.seg 03001627
02691156/points/c4111dbb21e1f17043afdb9c81ff2967.pts 02691156/expert_verified/points_label/c4111dbb21e1f17043afdb9c81ff2967.seg 02691156
02691156/points/46829981c5c25285bfc0a2c490b4c222.pts 02691156/expert_verified/points_label/46829981c5c25285bfc0a2c490b4c222.seg 02691156
04379243/points/497659c4723fbc4fe90ff84c89de437.pts 04379243/expert_verified/points_label/497659c4723fbc4fe90ff84c89de437.seg 04379243
02691156/points/a805c30d4b09f11f62347b4731688b0f.pts 02691156/expert_verified/points_label/a805c30d4b09f11f62347b4731688b0f.seg 02691156
03636649/points/e485053f3e0d18252cd2160e449d45ae.pts 03636649/expert_verified/points_label/e485053f3e0d18252cd2160e449d45ae.seg 03636649
02958343/points/2fb5fe84c28b8b35cc02882a83047172.pts 02958343/expert_verified/points_label/2fb5fe84c28b8b35cc02882a83047172.seg 02958343
03636649/points/f7a4590c54e2ac7ce62fad6b4f42c880.pts 03636649/expert_verified/points_label/f7a4590c54e2ac7ce62fad6b4f42c880.seg 03636649
03642806/points/9fc5b76d363ca64ed03066fc8168e9c6.pts 03642806/expert_verified/points_label/9fc5b76d363ca64ed03066fc8168e9c6.seg 03642806
02691156/points/be080a797406422843afdb9c81ff2967.pts 02691156/expert_verified/points_label/be080a797406422843afdb9c81ff2967.seg 02691156
04379243/points/81a84fcb2b247a3348eaa510713cb074.pts 04379243/expert_verified/points_label/81a84fcb2b247a3348eaa510713cb074.seg 04379243
03001627/points/47c540c2e9c3483ce79a6b87656a120a.pts 03001627/expert_verified/points_label/47c540c2e9c3483ce79a6b87656a120a.seg 03001627
03001627/points/5073d7a546b9a4d0e810eba61b778ebb.pts 03001627/expert_verified/points_label/5073d7a546b9a4d0e810eba61b778ebb.seg 03001627
03001627/points/e4a890f2330ebd7e4a11872aa986426d.pts 03001627/expert_verified/points_label/e4a890f2330ebd7e4a11872aa986426d.seg 03001627
03001627/points/a7200578bd7bea065dc3653f8341633a.pts 03001627/expert_verified/points_label/a7200578bd7bea065dc3653f8341633a.seg 03001627
03467517/points/b004331ee5cc39caa24eeca91f583600.pts 03467517/expert_verified/points_label/b004331ee5cc39caa24eeca91f583600.seg 03467517
04379243/points/f01768b8b8ba025ee45ef4135c266a12.pts 04379243/expert_verified/points_label/f01768b8b8ba025ee45ef4135c266a12.seg 04379243
03642806/points/5173aa7f75ff3cf1b55fde51a411949f.pts 03642806/expert_verified/points_label/5173aa7f75ff3cf1b55fde51a411949f.seg 03642806
03636649/points/e7e45a8f0b0ab311c754474f0ac106.pts 03636649/expert_verified/points_label/e7e45a8f0b0ab311c754474f0ac106.seg 03636649
03642806/points/1b67b4bfed6688ba5b22feddf58c05e1.pts 03642806/expert_verified/points_label/1b67b4bfed6688ba5b22feddf58c05e1.seg 03642806
03797390/points/f1e439307b834015770a0ff1161fa15a.pts 03797390/expert_verified/points_label/f1e439307b834015770a0ff1161fa15a.seg 03797390
03001627/points/b6c9495629c00419940806ade53ef2f.pts 03001627/expert_verified/points_label/b6c9495629c00419940806ade53ef2f.seg 03001627
03001627/points/8e19d2ec95c45186a6fd617b2ff5d2d.pts 03001627/expert_verified/points_label/8e19d2ec95c45186a6fd617b2ff5d2d.seg 03001627
03001627/points/d7b8189fe69cebedc41b07b1627c4b43.pts 03001627/expert_verified/points_label/d7b8189fe69cebedc41b07b1627c4b43.seg 03001627
02691156/points/a7a0e7eddf4ffb8c19378fd691582500.pts 02691156/expert_verified/points_label/a7a0e7eddf4ffb8c19378fd691582500.seg 02691156
03001627/points/2b6cbad4ba1e9a0645881d7eab1353ba.pts 03001627/expert_verified/points_label/2b6cbad4ba1e9a0645881d7eab1353ba.seg 03001627
04379243/points/dade0594e68e2250be6c545952e7fa4a.pts 04379243/expert_verified/points_label/dade0594e68e2250be6c545952e7fa4a.seg 04379243
03001627/points/9850d225049f987e9b9f2eb77f5e247e.pts 03001627/expert_verified/points_label/9850d225049f987e9b9f2eb77f5e247e.seg 03001627
03948459/points/e9e6426605eb6d5952d52701459b1f0.pts 03948459/expert_verified/points_label/e9e6426605eb6d5952d52701459b1f0.seg 03948459
03636649/points/e507bc77c03a1b3afcb8d8c6d4df8143.pts 03636649/expert_verified/points_label/e507bc77c03a1b3afcb8d8c6d4df8143.seg 03636649
03797390/points/a6d9f9ae39728831808951ff5fb582ac.pts 03797390/expert_verified/points_label/a6d9f9ae39728831808951ff5fb582ac.seg 03797390
04379243/points/3144ba0c286cc61f490ad276cd2af3a4.pts 04379243/expert_verified/points_label/3144ba0c286cc61f490ad276cd2af3a4.seg 04379243
04379243/points/9be565678aab11cba0ab1d82ef09f78f.pts 04379243/expert_verified/points_label/9be565678aab11cba0ab1d82ef09f78f.seg 04379243
04379243/points/a4b2870ce7a54b8eec11c6b035aac769.pts 04379243/expert_verified/points_label/a4b2870ce7a54b8eec11c6b035aac769.seg 04379243
03636649/points/78b95abd1d1158ffef3a2c64cef919d0.pts 03636649/expert_verified/points_label/78b95abd1d1158ffef3a2c64cef919d0.seg 03636649
04379243/points/2182028f013e7eb530bbd4cddd04c77b.pts 04379243/expert_verified/points_label/2182028f013e7eb530bbd4cddd04c77b.seg 04379243
02691156/points/e00b89bc338348caa42c49797afd1f5c.pts 02691156/expert_verified/points_label/e00b89bc338348caa42c49797afd1f5c.seg 02691156
03001627/points/9d28a066df22319cca2e16d6cd76503c.pts 03001627/expert_verified/points_label/9d28a066df22319cca2e16d6cd76503c.seg 03001627
03636649/points/3c4d8c4ebe9dedbc2cd2160e449d45ae.pts 03636649/expert_verified/points_label/3c4d8c4ebe9dedbc2cd2160e449d45ae.seg 03636649
02691156/points/97d662e5e6345b46bd46d022fd7d80aa.pts 02691156/expert_verified/points_label/97d662e5e6345b46bd46d022fd7d80aa.seg 02691156
03001627/points/9dac39c51680daa2f71e06115e9c3b3e.pts 03001627/expert_verified/points_label/9dac39c51680daa2f71e06115e9c3b3e.seg 03001627
03624134/points/1ecb37ea8f0c4abc20fc54d2500eb7f1.pts 03624134/expert_verified/points_label/1ecb37ea8f0c4abc20fc54d2500eb7f1.seg 03624134
03624134/points/3a0f48139bfd3a4ea152d2e823b9fe06.pts 03624134/expert_verified/points_label/3a0f48139bfd3a4ea152d2e823b9fe06.seg 03624134
04379243/points/1264d88ae599df3fbeedb4c8fd29e2d1.pts 04379243/expert_verified/points_label/1264d88ae599df3fbeedb4c8fd29e2d1.seg 04379243
03001627/points/97bbc8970b05c4a3fcde6bcb709edd9a.pts 03001627/expert_verified/points_label/97bbc8970b05c4a3fcde6bcb709edd9a.seg 03001627
03636649/points/1f58b59a1b6b06df766fc93a239bada0.pts 03636649/expert_verified/points_label/1f58b59a1b6b06df766fc93a239bada0.seg 03636649
03001627/points/eb51e814c3f44a07914ced7dab3536b9.pts 03001627/expert_verified/points_label/eb51e814c3f44a07914ced7dab3536b9.seg 03001627
03636649/points/a138582b1d0b9cbb137af984a9f45d65.pts 03636649/expert_verified/points_label/a138582b1d0b9cbb137af984a9f45d65.seg 03636649
03790512/points/9f9de88a95b56660b37378f3c85478b4.pts 03790512/expert_verified/points_label/9f9de88a95b56660b37378f3c85478b4.seg 03790512
03001627/points/a521fba02ca7f9aa822215026d1e8d82.pts 03001627/expert_verified/points_label/a521fba02ca7f9aa822215026d1e8d82.seg 03001627
04225987/points/d303055e96cd59949da15808191f1405.pts 04225987/expert_verified/points_label/d303055e96cd59949da15808191f1405.seg 04225987
04379243/points/7e3022a7bd00eb4195b8ea6a366e14d.pts 04379243/expert_verified/points_label/7e3022a7bd00eb4195b8ea6a366e14d.seg 04379243
02691156/points/d83300deab42c100eb9db4e832a6dd82.pts 02691156/expert_verified/points_label/d83300deab42c100eb9db4e832a6dd82.seg 02691156
03642806/points/a4b410734514306ac401e233323032d6.pts 03642806/expert_verified/points_label/a4b410734514306ac401e233323032d6.seg 03642806
03790512/points/532e6f88a9975a27b37378f3c85478b4.pts 03790512/expert_verified/points_label/532e6f88a9975a27b37378f3c85478b4.seg 03790512
03642806/points/cc691d9e8e189ce47a381a112bfd785.pts 03642806/expert_verified/points_label/cc691d9e8e189ce47a381a112bfd785.seg 03642806
02691156/points/aa07239e9397cf189601fb40d0d298b9.pts 02691156/expert_verified/points_label/aa07239e9397cf189601fb40d0d298b9.seg 02691156
03642806/points/cc0535a34cdc7d676bf98d15712168f.pts 03642806/expert_verified/points_label/cc0535a34cdc7d676bf98d15712168f.seg 03642806
02691156/points/ddec69970cbc4d29112a90660b187a10.pts 02691156/expert_verified/points_label/ddec69970cbc4d29112a90660b187a10.seg 02691156
04379243/points/268e68f1819a225c1b4b790955c17432.pts 04379243/expert_verified/points_label/268e68f1819a225c1b4b790955c17432.seg 04379243
03624134/points/1943c87f92ac76e112cad8be168fe72d.pts 03624134/expert_verified/points_label/1943c87f92ac76e112cad8be168fe72d.seg 03624134
04379243/points/b9fc2f624533bb8119fb4103277a6b93.pts 04379243/expert_verified/points_label/b9fc2f624533bb8119fb4103277a6b93.seg 04379243
03001627/points/1c45b266d3c879dab36dcc661f3905d.pts 03001627/expert_verified/points_label/1c45b266d3c879dab36dcc661f3905d.seg 03001627
03948459/points/1660ef4b3f20b1e2a94b922b533051b7.pts 03948459/expert_verified/points_label/1660ef4b3f20b1e2a94b922b533051b7.seg 03948459
02691156/points/167250e2014c72dbb87697d3904b168b.pts 02691156/expert_verified/points_label/167250e2014c72dbb87697d3904b168b.seg 02691156
02691156/points/dfe65f8a20df11c5d1df55cbe0874aa.pts 02691156/expert_verified/points_label/dfe65f8a20df11c5d1df55cbe0874aa.seg 02691156
03001627/points/44a2a3952ea2315ff51f77a6d7299806.pts 03001627/expert_verified/points_label/44a2a3952ea2315ff51f77a6d7299806.seg 03001627
04379243/points/a1896691fe875eccb9968f25875bdef4.pts 04379243/expert_verified/points_label/a1896691fe875eccb9968f25875bdef4.seg 04379243
04379243/points/6f3506c9c5202101c4e8952b27b5f370.pts 04379243/expert_verified/points_label/6f3506c9c5202101c4e8952b27b5f370.seg 04379243
04379243/points/fead7e0c30a347b1710801cae5dc529.pts 04379243/expert_verified/points_label/fead7e0c30a347b1710801cae5dc529.seg 04379243
04379243/points/384bf53e12744e2019fb4103277a6b93.pts 04379243/expert_verified/points_label/384bf53e12744e2019fb4103277a6b93.seg 04379243
03001627/points/30378faa6bf5b245fdef1c01cbd4ae0c.pts 03001627/expert_verified/points_label/30378faa6bf5b245fdef1c01cbd4ae0c.seg 03001627
04379243/points/5690d17b330f73adfeb8ceb93793cb5.pts 04379243/expert_verified/points_label/5690d17b330f73adfeb8ceb93793cb5.seg 04379243
03467517/points/2e4ec0874ea34a50812ca0ac90db1c07.pts 03467517/expert_verified/points_label/2e4ec0874ea34a50812ca0ac90db1c07.seg 03467517
03001627/points/a007a3cd5b8ca7fb19fb4103277a6b93.pts 03001627/expert_verified/points_label/a007a3cd5b8ca7fb19fb4103277a6b93.seg 03001627
03001627/points/bc21c95f766502a78b03575bb54dfd4.pts 03001627/expert_verified/points_label/bc21c95f766502a78b03575bb54dfd4.seg 03001627
04379243/points/6a3ee73d42228f8581654cb17c02fd.pts 04379243/expert_verified/points_label/6a3ee73d42228f8581654cb17c02fd.seg 04379243
04379243/points/4b399cdce8337c29285e0e27752e54a8.pts 04379243/expert_verified/points_label/4b399cdce8337c29285e0e27752e54a8.seg 04379243
04379243/points/7f9d2da43d6aba67afb6676a5cd782b6.pts 04379243/expert_verified/points_label/7f9d2da43d6aba67afb6676a5cd782b6.seg 04379243
03001627/points/72669be1815b2bb81e4fe86c4ad3ec90.pts 03001627/expert_verified/points_label/72669be1815b2bb81e4fe86c4ad3ec90.seg 03001627
04379243/points/223fbcc813831d8c6e526771d2f7444e.pts 04379243/expert_verified/points_label/223fbcc813831d8c6e526771d2f7444e.seg 04379243
02691156/points/adeb5d68e8d65cc419ba010ddb4974fe.pts 02691156/expert_verified/points_label/adeb5d68e8d65cc419ba010ddb4974fe.seg 02691156
03001627/points/8a9d8dad6800d55ff37af16b2893f1d4.pts 03001627/expert_verified/points_label/8a9d8dad6800d55ff37af16b2893f1d4.seg 03001627
04379243/points/db406d9b2a94bce5622d7484764b58f.pts 04379243/expert_verified/points_label/db406d9b2a94bce5622d7484764b58f.seg 04379243
03001627/points/68b88c0be088c21d5e0096fb2d3266a.pts 03001627/expert_verified/points_label/68b88c0be088c21d5e0096fb2d3266a.seg 03001627
03790512/points/973d75ed9c12836f3d033e6cf82ec72c.pts 03790512/expert_verified/points_label/973d75ed9c12836f3d033e6cf82ec72c.seg 03790512
04379243/points/20292fba71362950c59c53f7df509858.pts 04379243/expert_verified/points_label/20292fba71362950c59c53f7df509858.seg 04379243
03001627/points/21fb308ca737174e22f2f93459bd863e.pts 03001627/expert_verified/points_label/21fb308ca737174e22f2f93459bd863e.seg 03001627
03001627/points/be9d5105e48ae27e713decb1a0563b12.pts 03001627/expert_verified/points_label/be9d5105e48ae27e713decb1a0563b12.seg 03001627
02958343/points/c6441f127d51e478f0fb72d24c42a39.pts 02958343/expert_verified/points_label/c6441f127d51e478f0fb72d24c42a39.seg 02958343
03001627/points/f29cbdb2c7bb10f9953d950bcd7de7a.pts 03001627/expert_verified/points_label/f29cbdb2c7bb10f9953d950bcd7de7a.seg 03001627
02691156/points/65654b5c4e488e0c961fa14fc879444e.pts 02691156/expert_verified/points_label/65654b5c4e488e0c961fa14fc879444e.seg 02691156
04379243/points/8654b644c766dd23d1dcc55e36186e4e.pts 04379243/expert_verified/points_label/8654b644c766dd23d1dcc55e36186e4e.seg 04379243
04379243/points/56bb7376dfa9cb5c8cf069d506f8b5ac.pts 04379243/expert_verified/points_label/56bb7376dfa9cb5c8cf069d506f8b5ac.seg 04379243
04379243/points/d291243cfb51ea7dcb25d116843b43a4.pts 04379243/expert_verified/points_label/d291243cfb51ea7dcb25d116843b43a4.seg 04379243
03790512/points/49edb54e97458de8d373c34785838ee4.pts 03790512/expert_verified/points_label/49edb54e97458de8d373c34785838ee4.seg 03790512
04379243/points/216da8313bc7b192ab610b0c94236463.pts 04379243/expert_verified/points_label/216da8313bc7b192ab610b0c94236463.seg 04379243
03001627/points/5ac8b44ff77e5490c8687ff9b0b4e4ac.pts 03001627/expert_verified/points_label/5ac8b44ff77e5490c8687ff9b0b4e4ac.seg 03001627
03001627/points/956063d67b939431f56aa11cd5e0c3e.pts 03001627/expert_verified/points_label/956063d67b939431f56aa11cd5e0c3e.seg 03001627
04379243/points/8dd8370dcaa8d770ea5682a3b818969a.pts 04379243/expert_verified/points_label/8dd8370dcaa8d770ea5682a3b818969a.seg 04379243
03636649/points/3b64d5033c580d2ef76898f881b76a.pts 03636649/expert_verified/points_label/3b64d5033c580d2ef76898f881b76a.seg 03636649
03001627/points/3d9dce1953180fe6f9c9f9697d1ec60.pts 03001627/expert_verified/points_label/3d9dce1953180fe6f9c9f9697d1ec60.seg 03001627
03001627/points/d1b03eeb33fd441d8189e5e3786f2290.pts 03001627/expert_verified/points_label/d1b03eeb33fd441d8189e5e3786f2290.seg 03001627
02691156/points/5294c39d2a57bd7e5cad6226edb8e82.pts 02691156/expert_verified/points_label/5294c39d2a57bd7e5cad6226edb8e82.seg 02691156
04379243/points/7bc93a4cc26fab5c8c12b667670a35f2.pts 04379243/expert_verified/points_label/7bc93a4cc26fab5c8c12b667670a35f2.seg 04379243
04379243/points/813d34995b5c4406b65b71636c46ae49.pts 04379243/expert_verified/points_label/813d34995b5c4406b65b71636c46ae49.seg 04379243
03001627/points/6782b941de7b2199a344c33f76676fbd.pts 03001627/expert_verified/points_label/6782b941de7b2199a344c33f76676fbd.seg 03001627
03636649/points/ea5ae3cfd142c3b923f93f957094a824.pts 03636649/expert_verified/points_label/ea5ae3cfd142c3b923f93f957094a824.seg 03636649
03001627/points/47caca00f993bc4e4b3c42e318f3affc.pts 03001627/expert_verified/points_label/47caca00f993bc4e4b3c42e318f3affc.seg 03001627
02691156/points/b702e35f4a59e81f64801ad2940cdd5.pts 02691156/expert_verified/points_label/b702e35f4a59e81f64801ad2940cdd5.seg 02691156
03636649/points/3b5f0c01c2b914fc6f16f167d27a7dab.pts 03636649/expert_verified/points_label/3b5f0c01c2b914fc6f16f167d27a7dab.seg 03636649
04379243/points/ad63116007d98a6d19758238d4c7aff2.pts 04379243/expert_verified/points_label/ad63116007d98a6d19758238d4c7aff2.seg 04379243
03797390/points/8f6c86feaa74698d5c91ee20ade72edc.pts 03797390/expert_verified/points_label/8f6c86feaa74698d5c91ee20ade72edc.seg 03797390
04379243/points/48baef3ab18d2d43d2afe8d5254a0d04.pts 04379243/expert_verified/points_label/48baef3ab18d2d43d2afe8d5254a0d04.seg 04379243
03001627/points/fe5310a3457bf0e5c4e8952b27b5f370.pts 03001627/expert_verified/points_label/fe5310a3457bf0e5c4e8952b27b5f370.seg 03001627
04379243/points/d4c330d27bbef3808f6610bf672cd686.pts 04379243/expert_verified/points_label/d4c330d27bbef3808f6610bf672cd686.seg 04379243
04379243/points/adcb67b58024afb99910b7ec4c4e599b.pts 04379243/expert_verified/points_label/adcb67b58024afb99910b7ec4c4e599b.seg 04379243
02958343/points/65d6433043c40046b82c0841410a924f.pts 02958343/expert_verified/points_label/65d6433043c40046b82c0841410a924f.seg 02958343
04379243/points/1a00aa6b75362cc5b324368d54a7416f.pts 04379243/expert_verified/points_label/1a00aa6b75362cc5b324368d54a7416f.seg 04379243
04379243/points/7982e2f2984978c6f4b6538438a0b930.pts 04379243/expert_verified/points_label/7982e2f2984978c6f4b6538438a0b930.seg 04379243
03467517/points/26e1801ea747f72f14fe0da28e4f8384.pts 03467517/expert_verified/points_label/26e1801ea747f72f14fe0da28e4f8384.seg 03467517
04379243/points/c8ee4a8b703180992985858e6f5832da.pts 04379243/expert_verified/points_label/c8ee4a8b703180992985858e6f5832da.seg 04379243
02691156/points/f24daae76836e249f0878b58b4e887bf.pts 02691156/expert_verified/points_label/f24daae76836e249f0878b58b4e887bf.seg 02691156
04379243/points/f29863d2fe8863d4195b8ea6a366e14d.pts 04379243/expert_verified/points_label/f29863d2fe8863d4195b8ea6a366e14d.seg 04379243
04379243/points/babb0963a0e17bb59cd0aef0207ac8c6.pts 04379243/expert_verified/points_label/babb0963a0e17bb59cd0aef0207ac8c6.seg 04379243
03001627/points/39911f927331db1c8687ff9b0b4e4ac.pts 03001627/expert_verified/points_label/39911f927331db1c8687ff9b0b4e4ac.seg 03001627
03001627/points/4a9d3ce54c09a2da696b74614952b2d0.pts 03001627/expert_verified/points_label/4a9d3ce54c09a2da696b74614952b2d0.seg 03001627
03642806/points/caa4afd404f24d21275c1147a304ed86.pts 03642806/expert_verified/points_label/caa4afd404f24d21275c1147a304ed86.seg 03642806
02691156/points/ff6e377e8e5b3757cc34b900bb2492e.pts 02691156/expert_verified/points_label/ff6e377e8e5b3757cc34b900bb2492e.seg 02691156
03001627/points/483cfed0659965ed73c478529c40c4e6.pts 03001627/expert_verified/points_label/483cfed0659965ed73c478529c40c4e6.seg 03001627
03797390/points/4b7888feea81219ab5f4a9188bfa0ef6.pts 03797390/expert_verified/points_label/4b7888feea81219ab5f4a9188bfa0ef6.seg 03797390
03790512/points/40d84e407c46e8d8b31e74d456742c7.pts 03790512/expert_verified/points_label/40d84e407c46e8d8b31e74d456742c7.seg 03790512
04379243/points/176e3b32d749ac94d79f2fc0b8d8ffad.pts 04379243/expert_verified/points_label/176e3b32d749ac94d79f2fc0b8d8ffad.seg 04379243
03001627/points/657790bc7fd16326c132086242d50af2.pts 03001627/expert_verified/points_label/657790bc7fd16326c132086242d50af2.seg 03001627
04379243/points/94c0ab5650ea392ddcfcef693e7ec696.pts 04379243/expert_verified/points_label/94c0ab5650ea392ddcfcef693e7ec696.seg 04379243
03624134/points/bf5cae3922d3cb2bca7250d90eb506cf.pts 03624134/expert_verified/points_label/bf5cae3922d3cb2bca7250d90eb506cf.seg 03624134
03001627/points/49a3b0242c13f92da6fee8e2140acec9.pts 03001627/expert_verified/points_label/49a3b0242c13f92da6fee8e2140acec9.seg 03001627
03636649/points/e4c9bb21fe5bfeb3e21f078602e2eda8.pts 03636649/expert_verified/points_label/e4c9bb21fe5bfeb3e21f078602e2eda8.seg 03636649
03636649/points/6595ee36783d261ed3281970e2c44dbe.pts 03636649/expert_verified/points_label/6595ee36783d261ed3281970e2c44dbe.seg 03636649
02958343/points/9a152b11907b11074549b3c52ae0632e.pts 02958343/expert_verified/points_label/9a152b11907b11074549b3c52ae0632e.seg 02958343
04379243/points/68a7bad2b06bc1a9d93768e7b9b1eabf.pts 04379243/expert_verified/points_label/68a7bad2b06bc1a9d93768e7b9b1eabf.seg 04379243
04379243/points/b9c756b2ff5d66ddfebad4f49b26ec52.pts 04379243/expert_verified/points_label/b9c756b2ff5d66ddfebad4f49b26ec52.seg 04379243
03797390/points/2d10421716b16580e45ef4135c266a12.pts 03797390/expert_verified/points_label/2d10421716b16580e45ef4135c266a12.seg 03797390
03001627/points/2c76aaa00e55c26836c07750784b6bc6.pts 03001627/expert_verified/points_label/2c76aaa00e55c26836c07750784b6bc6.seg 03001627
03636649/points/5cca570916f420e64b3c42e318f3affc.pts 03636649/expert_verified/points_label/5cca570916f420e64b3c42e318f3affc.seg 03636649
03001627/points/9225e57e34334ee019cb07ecb5b4102.pts 03001627/expert_verified/points_label/9225e57e34334ee019cb07ecb5b4102.seg 03001627
03001627/points/17aeeadccf0e560e274b862d3a151946.pts 03001627/expert_verified/points_label/17aeeadccf0e560e274b862d3a151946.seg 03001627
03636649/points/427806f30c61059c22e05b5d2ce39e3b.pts 03636649/expert_verified/points_label/427806f30c61059c22e05b5d2ce39e3b.seg 03636649
03636649/points/17349d6d35aac0685ed28d6c8a1bdfe5.pts 03636649/expert_verified/points_label/17349d6d35aac0685ed28d6c8a1bdfe5.seg 03636649
04379243/points/5ee4cbe45bdc4cd571a782a4379556c7.pts 04379243/expert_verified/points_label/5ee4cbe45bdc4cd571a782a4379556c7.seg 04379243
03636649/points/5eda619e5f36499fc1537287b5c50d9d.pts 03636649/expert_verified/points_label/5eda619e5f36499fc1537287b5c50d9d.seg 03636649
02691156/points/f57c74e194cd2b2bc8727b27ee96a4b7.pts 02691156/expert_verified/points_label/f57c74e194cd2b2bc8727b27ee96a4b7.seg 02691156
02958343/points/27d42437168ccd7ddd75f724c0ccbe00.pts 02958343/expert_verified/points_label/27d42437168ccd7ddd75f724c0ccbe00.seg 02958343
04379243/points/c8cf1c77bbb79d214719088c8e42c6ab.pts 04379243/expert_verified/points_label/c8cf1c77bbb79d214719088c8e42c6ab.seg 04379243
04379243/points/40b48121d1879be2ee0605a41c3320d6.pts 04379243/expert_verified/points_label/40b48121d1879be2ee0605a41c3320d6.seg 04379243
02691156/points/4f9b12d07dce21ac9d93a50cb0355558.pts 02691156/expert_verified/points_label/4f9b12d07dce21ac9d93a50cb0355558.seg 02691156
02691156/points/25bd1569261bc545e8323edc0fe816a8.pts 02691156/expert_verified/points_label/25bd1569261bc545e8323edc0fe816a8.seg 02691156
02691156/points/fbc429365ab7136be1a9c234926c21e2.pts 02691156/expert_verified/points_label/fbc429365ab7136be1a9c234926c21e2.seg 02691156
04379243/points/798c315f86d8f02f931e98da3a93e73e.pts 04379243/expert_verified/points_label/798c315f86d8f02f931e98da3a93e73e.seg 04379243
03790512/points/a0a40a9d5aabd6a7d5dde04c96fd8146.pts 03790512/expert_verified/points_label/a0a40a9d5aabd6a7d5dde04c96fd8146.seg 03790512
04379243/points/884f15cfc6a3eea3dcfcef693e7ec696.pts 04379243/expert_verified/points_label/884f15cfc6a3eea3dcfcef693e7ec696.seg 04379243
04379243/points/f16f939baeb7722e664b3b9b23ddfcbc.pts 04379243/expert_verified/points_label/f16f939baeb7722e664b3b9b23ddfcbc.seg 04379243
03001627/points/1e0580f443a9e6d2593ebeeedbff73b.pts 03001627/expert_verified/points_label/1e0580f443a9e6d2593ebeeedbff73b.seg 03001627
03636649/points/927e0654427c4d0b82241d99b4e87f38.pts 03636649/expert_verified/points_label/927e0654427c4d0b82241d99b4e87f38.seg 03636649
03001627/points/bdd29e651e5f6fb2b079317292bdc5d4.pts 03001627/expert_verified/points_label/bdd29e651e5f6fb2b079317292bdc5d4.seg 03001627
03642806/points/cb1e3a990782678b4b6682da890df381.pts 03642806/expert_verified/points_label/cb1e3a990782678b4b6682da890df381.seg 03642806
03001627/points/fd5ac9b342fe518b9d3ea1c6b57a0095.pts 03001627/expert_verified/points_label/fd5ac9b342fe518b9d3ea1c6b57a0095.seg 03001627
02958343/points/6bbcd5608ddf871a4cdd04162f008888.pts 02958343/expert_verified/points_label/6bbcd5608ddf871a4cdd04162f008888.seg 02958343
04379243/points/76338ed3326689b249524cfd5973a145.pts 04379243/expert_verified/points_label/76338ed3326689b249524cfd5973a145.seg 04379243
03001627/points/9a0571ae6169a6ebfebad4f49b26ec52.pts 03001627/expert_verified/points_label/9a0571ae6169a6ebfebad4f49b26ec52.seg 03001627
03948459/points/49429e1d1e90c1ca202be79d8b285c1e.pts 03948459/expert_verified/points_label/49429e1d1e90c1ca202be79d8b285c1e.seg 03948459
02691156/points/45a4ec99ed13ed773c2498c4c2f13ca.pts 02691156/expert_verified/points_label/45a4ec99ed13ed773c2498c4c2f13ca.seg 02691156
04379243/points/70995336d06fc07ae9f3e9c758fef992.pts 04379243/expert_verified/points_label/70995336d06fc07ae9f3e9c758fef992.seg 04379243
03001627/points/6fd76577d0df60669b9f2eb77f5e247e.pts 03001627/expert_verified/points_label/6fd76577d0df60669b9f2eb77f5e247e.seg 03001627
03001627/points/66f18d05d960ffe0bcd12732b5a4b789.pts 03001627/expert_verified/points_label/66f18d05d960ffe0bcd12732b5a4b789.seg 03001627
03001627/points/e401be99c5a51d8bef8e9284f76f3024.pts 03001627/expert_verified/points_label/e401be99c5a51d8bef8e9284f76f3024.seg 03001627
03001627/points/4a0b61d33846824ab1f04c301b6ccc90.pts 03001627/expert_verified/points_label/4a0b61d33846824ab1f04c301b6ccc90.seg 03001627
04379243/points/9a5cb4122d518111b339f790b1757e92.pts 04379243/expert_verified/points_label/9a5cb4122d518111b339f790b1757e92.seg 04379243
04379243/points/6281381ce38aa988de98d10ab5975b59.pts 04379243/expert_verified/points_label/6281381ce38aa988de98d10ab5975b59.seg 04379243
04379243/points/d382d9e34f365544278d386bfa54545.pts 04379243/expert_verified/points_label/d382d9e34f365544278d386bfa54545.seg 04379243
03948459/points/6de6e56c6f7d43692866658c90231a1a.pts 03948459/expert_verified/points_label/6de6e56c6f7d43692866658c90231a1a.seg 03948459
02691156/points/494a1698eb82572c3df325aac2f73830.pts 02691156/expert_verified/points_label/494a1698eb82572c3df325aac2f73830.seg 02691156
02691156/points/c581942f40cbb60819ba010ddb4974fe.pts 02691156/expert_verified/points_label/c581942f40cbb60819ba010ddb4974fe.seg 02691156
04379243/points/e9038664b7d35e6b436e6787c76ef3f0.pts 04379243/expert_verified/points_label/e9038664b7d35e6b436e6787c76ef3f0.seg 04379243
04099429/points/56c13d294f8afb1ffb88d148e845f82e.pts 04099429/expert_verified/points_label/56c13d294f8afb1ffb88d148e845f82e.seg 04099429
02958343/points/86fa16c6da908e6b44221994b043fd86.pts 02958343/expert_verified/points_label/86fa16c6da908e6b44221994b043fd86.seg 02958343
04379243/points/3249c3ad90085a9e98d5fc0473d00a1c.pts 04379243/expert_verified/points_label/3249c3ad90085a9e98d5fc0473d00a1c.seg 04379243
03636649/points/8581a3ae1f77319ac066b9622c005c53.pts 03636649/expert_verified/points_label/8581a3ae1f77319ac066b9622c005c53.seg 03636649
03790512/points/6e1397773a4d15db429f1c522640e6f0.pts 03790512/expert_verified/points_label/6e1397773a4d15db429f1c522640e6f0.seg 03790512
03624134/points/c1ab7029de67351cf97a65c35ea619f0.pts 03624134/expert_verified/points_label/c1ab7029de67351cf97a65c35ea619f0.seg 03624134
04379243/points/16e874e6165e836b30bbd4cddd04c77b.pts 04379243/expert_verified/points_label/16e874e6165e836b30bbd4cddd04c77b.seg 04379243
03636649/points/ff08713d837d87edf2098a9f7fc86999.pts 03636649/expert_verified/points_label/ff08713d837d87edf2098a9f7fc86999.seg 03636649
03790512/points/b649be9c09e2b332429f1c522640e6f0.pts 03790512/expert_verified/points_label/b649be9c09e2b332429f1c522640e6f0.seg 03790512
03001627/points/85b16941984902f8facfa12c7d71c89f.pts 03001627/expert_verified/points_label/85b16941984902f8facfa12c7d71c89f.seg 03001627
04379243/points/cf1a7653c10aaa0eab610b0c94236463.pts 04379243/expert_verified/points_label/cf1a7653c10aaa0eab610b0c94236463.seg 04379243
03001627/points/a42aa59fa23b4a4d9c0ca344f487323e.pts 03001627/expert_verified/points_label/a42aa59fa23b4a4d9c0ca344f487323e.seg 03001627
03001627/points/3f4f1d18c61a07f134b707eb14b2a4a5.pts 03001627/expert_verified/points_label/3f4f1d18c61a07f134b707eb14b2a4a5.seg 03001627
03001627/points/d2b9e98373e96afec8d65ca96e6b18ef.pts 03001627/expert_verified/points_label/d2b9e98373e96afec8d65ca96e6b18ef.seg 03001627
03636649/points/71dffdee89efe07cdff00b2637ddcbde.pts 03636649/expert_verified/points_label/71dffdee89efe07cdff00b2637ddcbde.seg 03636649
02691156/points/5ac0cd21410b2a6a341877ff7a6c751f.pts 02691156/expert_verified/points_label/5ac0cd21410b2a6a341877ff7a6c751f.seg 02691156
03636649/points/76eb7436c40e083384d184bdc625781a.pts 03636649/expert_verified/points_label/76eb7436c40e083384d184bdc625781a.seg 03636649
03642806/points/13330d1e7b199dd82530b9c2b65d3f86.pts 03642806/expert_verified/points_label/13330d1e7b199dd82530b9c2b65d3f86.seg 03642806
02691156/points/e726c8e6897130439a6e43b878d5b335.pts 02691156/expert_verified/points_label/e726c8e6897130439a6e43b878d5b335.seg 02691156
04379243/points/40a402e1d949364a104ceb84075e40d6.pts 04379243/expert_verified/points_label/40a402e1d949364a104ceb84075e40d6.seg 04379243
03001627/points/42140baad25c8598baa1a4ff2c45ffc9.pts 03001627/expert_verified/points_label/42140baad25c8598baa1a4ff2c45ffc9.seg 03001627
03001627/points/5283a98b5c693e64ebefe6b1d594ad2e.pts 03001627/expert_verified/points_label/5283a98b5c693e64ebefe6b1d594ad2e.seg 03001627
02691156/points/15898fef6fec88c53ada73811bb576de.pts 02691156/expert_verified/points_label/15898fef6fec88c53ada73811bb576de.seg 02691156
03001627/points/3f8d0d53e2bd74124b3c42e318f3affc.pts 03001627/expert_verified/points_label/3f8d0d53e2bd74124b3c42e318f3affc.seg 03001627
04379243/points/cd106955d3bdf8e751c4deb11af7079e.pts 04379243/expert_verified/points_label/cd106955d3bdf8e751c4deb11af7079e.seg 04379243
03001627/points/11506b96d41f7d3dd7c4a943f33e0384.pts 03001627/expert_verified/points_label/11506b96d41f7d3dd7c4a943f33e0384.seg 03001627
03001627/points/f51ab8433184dfd2c8687ff9b0b4e4ac.pts 03001627/expert_verified/points_label/f51ab8433184dfd2c8687ff9b0b4e4ac.seg 03001627
02691156/points/c9a6dcf87d1f15bca8607f540cc62ba.pts 02691156/expert_verified/points_label/c9a6dcf87d1f15bca8607f540cc62ba.seg 02691156
04379243/points/d9c75799ff9ff74664b3b9b23ddfcbc.pts 04379243/expert_verified/points_label/d9c75799ff9ff74664b3b9b23ddfcbc.seg 04379243
04379243/points/93e81005c19a74b8664b3b9b23ddfcbc.pts 04379243/expert_verified/points_label/93e81005c19a74b8664b3b9b23ddfcbc.seg 04379243
02958343/points/5057c9dbf72e0352728fa2df514c65d4.pts 02958343/expert_verified/points_label/5057c9dbf72e0352728fa2df514c65d4.seg 02958343
04379243/points/8ad88ee4442fd0fd8a6ba7ebad3985bb.pts 04379243/expert_verified/points_label/8ad88ee4442fd0fd8a6ba7ebad3985bb.seg 04379243
04379243/points/a2554ec7e2331a8fab610b0c94236463.pts 04379243/expert_verified/points_label/a2554ec7e2331a8fab610b0c94236463.seg 04379243
04379243/points/482a76d14781e55e25374da32e705c.pts 04379243/expert_verified/points_label/482a76d14781e55e25374da32e705c.seg 04379243
02691156/points/d06105ee2a2ae27c51008e496c6cfd2e.pts 02691156/expert_verified/points_label/d06105ee2a2ae27c51008e496c6cfd2e.seg 02691156
04379243/points/45a09b1ce3111e4f22f4fabdf1ee0670.pts 04379243/expert_verified/points_label/45a09b1ce3111e4f22f4fabdf1ee0670.seg 04379243
03467517/points/9aaad035af7e6ab1ed724609df3eb104.pts 03467517/expert_verified/points_label/9aaad035af7e6ab1ed724609df3eb104.seg 03467517
02691156/points/cf0cdaa94220ee3f4c3a35cee92bb95b.pts 02691156/expert_verified/points_label/cf0cdaa94220ee3f4c3a35cee92bb95b.seg 02691156
02691156/points/48cb2de06f46cde25ed29e0a9f14425.pts 02691156/expert_verified/points_label/48cb2de06f46cde25ed29e0a9f14425.seg 02691156
03001627/points/2f0a94efe6d1da7f8616812464c86290.pts 03001627/expert_verified/points_label/2f0a94efe6d1da7f8616812464c86290.seg 03001627
02691156/points/e0385af10bddc6a0ca8607f540cc62ba.pts 02691156/expert_verified/points_label/e0385af10bddc6a0ca8607f540cc62ba.seg 02691156
03467517/points/71139bd2ff6c4257280ec2e5049bb369.pts 03467517/expert_verified/points_label/71139bd2ff6c4257280ec2e5049bb369.seg 03467517
03001627/points/6251b398004a02fffebad4f49b26ec52.pts 03001627/expert_verified/points_label/6251b398004a02fffebad4f49b26ec52.seg 03001627
03467517/points/7eba657565cc69e913f86abea5e4b9e0.pts 03467517/expert_verified/points_label/7eba657565cc69e913f86abea5e4b9e0.seg 03467517
03001627/points/8d2fd4b9c583e1e6a12cdfe22cdc2f5d.pts 03001627/expert_verified/points_label/8d2fd4b9c583e1e6a12cdfe22cdc2f5d.seg 03001627
03001627/points/ffa1e25f499e586694e98ee4fdfd7464.pts 03001627/expert_verified/points_label/ffa1e25f499e586694e98ee4fdfd7464.seg 03001627
03797390/points/9af98540f45411467246665d3d3724c.pts 03797390/expert_verified/points_label/9af98540f45411467246665d3d3724c.seg 03797390
02691156/points/b9fabfa6d5fedbc3a8e091cb544689d5.pts 02691156/expert_verified/points_label/b9fabfa6d5fedbc3a8e091cb544689d5.seg 02691156
04379243/points/a2561614d015f2fdfebad4f49b26ec52.pts 04379243/expert_verified/points_label/a2561614d015f2fdfebad4f49b26ec52.seg 04379243
03642806/points/2134ad3fc25a6284193a4c984002ed32.pts 03642806/expert_verified/points_label/2134ad3fc25a6284193a4c984002ed32.seg 03642806
03001627/points/d3302b7fa6504cab1a461b43b8f257f.pts 03001627/expert_verified/points_label/d3302b7fa6504cab1a461b43b8f257f.seg 03001627
03467517/points/bf7026f9814230414269db3f92b7aa5e.pts 03467517/expert_verified/points_label/bf7026f9814230414269db3f92b7aa5e.seg 03467517
03636649/points/9aff9fdad0e3555c7eecb4e0df212ad9.pts 03636649/expert_verified/points_label/9aff9fdad0e3555c7eecb4e0df212ad9.seg 03636649
03797390/points/a3cd44bbd3ba5b019a4cbf5d3b79df06.pts 03797390/expert_verified/points_label/a3cd44bbd3ba5b019a4cbf5d3b79df06.seg 03797390
04099429/points/eff3a27a085e02e5146be45f8a3c1ff8.pts 04099429/expert_verified/points_label/eff3a27a085e02e5146be45f8a3c1ff8.seg 04099429
02958343/points/1e3f494626a24badf35b4953d8add91f.pts 02958343/expert_verified/points_label/1e3f494626a24badf35b4953d8add91f.seg 02958343
04379243/points/1f3e217cbc871152d7465eca206fda6f.pts 04379243/expert_verified/points_label/1f3e217cbc871152d7465eca206fda6f.seg 04379243
03636649/points/cef6757831b4d9738c8f019f17f4687c.pts 03636649/expert_verified/points_label/cef6757831b4d9738c8f019f17f4687c.seg 03636649
04379243/points/e8689b8b1610bf2841bb8a7ba579a58.pts 04379243/expert_verified/points_label/e8689b8b1610bf2841bb8a7ba579a58.seg 04379243
03001627/points/40168f46019eb867be7e1d42d63ca9f0.pts 03001627/expert_verified/points_label/40168f46019eb867be7e1d42d63ca9f0.seg 03001627
03624134/points/7aed22a7074f16431cf05d6e4dbb95af.pts 03624134/expert_verified/points_label/7aed22a7074f16431cf05d6e4dbb95af.seg 03624134
04379243/points/5d53ed3005f4dc6856786b90799c4fdb.pts 04379243/expert_verified/points_label/5d53ed3005f4dc6856786b90799c4fdb.seg 04379243
04379243/points/beebc267ea0c16a5c7f6a57f6f73d8a6.pts 04379243/expert_verified/points_label/beebc267ea0c16a5c7f6a57f6f73d8a6.seg 04379243
04379243/points/943d786e2df9251ec76aead7da70af41.pts 04379243/expert_verified/points_label/943d786e2df9251ec76aead7da70af41.seg 04379243
04379243/points/90d87b4d9a5a1e78f4b6538438a0b930.pts 04379243/expert_verified/points_label/90d87b4d9a5a1e78f4b6538438a0b930.seg 04379243
02958343/points/d47353fc60390df85d918097f81825e3.pts 02958343/expert_verified/points_label/d47353fc60390df85d918097f81825e3.seg 02958343
03624134/points/90021da7c71f6bcbf02ee453ff283e26.pts 03624134/expert_verified/points_label/90021da7c71f6bcbf02ee453ff283e26.seg 03624134
02958343/points/d1acd4916d3d3b57c48db2ed8f5e994c.pts 02958343/expert_verified/points_label/d1acd4916d3d3b57c48db2ed8f5e994c.seg 02958343
03001627/points/1d1c829a54f0ae426cdb122727dd360f.pts 03001627/expert_verified/points_label/1d1c829a54f0ae426cdb122727dd360f.seg 03001627
04379243/points/c35a14f84985f92a9856fa70a578baeb.pts 04379243/expert_verified/points_label/c35a14f84985f92a9856fa70a578baeb.seg 04379243
03636649/points/5c5119a226e1ce9934804d261199e1bf.pts 03636649/expert_verified/points_label/5c5119a226e1ce9934804d261199e1bf.seg 03636649
03636649/points/6bb8020fa82b27dde11a3e838aa2c287.pts 03636649/expert_verified/points_label/6bb8020fa82b27dde11a3e838aa2c287.seg 03636649
03797390/points/fad118b32085f3f2c2c72e575af174cd.pts 03797390/expert_verified/points_label/fad118b32085f3f2c2c72e575af174cd.seg 03797390
04379243/points/a82387cf9d9d253aa06f94abffad1304.pts 04379243/expert_verified/points_label/a82387cf9d9d253aa06f94abffad1304.seg 04379243
03948459/points/a7a340a901d63486260a770f90456bf7.pts 03948459/expert_verified/points_label/a7a340a901d63486260a770f90456bf7.seg 03948459
03624134/points/60e7b05ddeeb48eb37fa2c3ecb75f337.pts 03624134/expert_verified/points_label/60e7b05ddeeb48eb37fa2c3ecb75f337.seg 03624134
02958343/points/3e2c3cb4f4c65b9cde9d4070fcdfa604.pts 02958343/expert_verified/points_label/3e2c3cb4f4c65b9cde9d4070fcdfa604.seg 02958343
03001627/points/d58df0968070bf3b4b3c42e318f3affc.pts 03001627/expert_verified/points_label/d58df0968070bf3b4b3c42e318f3affc.seg 03001627
04379243/points/4a3641784a9ecca04fa8d6439169bda4.pts 04379243/expert_verified/points_label/4a3641784a9ecca04fa8d6439169bda4.seg 04379243
04225987/points/d31aaca67fd8ef1827d17dabad15093.pts 04225987/expert_verified/points_label/d31aaca67fd8ef1827d17dabad15093.seg 04225987
03001627/points/c51937167dd0db45f7628281ecb18112.pts 03001627/expert_verified/points_label/c51937167dd0db45f7628281ecb18112.seg 03001627
04379243/points/768cb2332a16fd63855931d119219022.pts 04379243/expert_verified/points_label/768cb2332a16fd63855931d119219022.seg 04379243
03001627/points/8c76176c82e3e42d283b00891f680579.pts 03001627/expert_verified/points_label/8c76176c82e3e42d283b00891f680579.seg 03001627
03001627/points/d4d9b991ff7d31e8c8687ff9b0b4e4ac.pts 03001627/expert_verified/points_label/d4d9b991ff7d31e8c8687ff9b0b4e4ac.seg 03001627
03797390/points/162201dfe14b73f0281365259d1cf342.pts 03797390/expert_verified/points_label/162201dfe14b73f0281365259d1cf342.seg 03797390
04379243/points/ed1e06e886b5514fe8f49d7c9e73ab9.pts 04379243/expert_verified/points_label/ed1e06e886b5514fe8f49d7c9e73ab9.seg 04379243
03636649/points/90651b3febfc3afe15226aa76eb7c3e.pts 03636649/expert_verified/points_label/90651b3febfc3afe15226aa76eb7c3e.seg 03636649
04379243/points/24b208dd138d8af36210db75a4cd581b.pts 04379243/expert_verified/points_label/24b208dd138d8af36210db75a4cd581b.seg 04379243
03001627/points/439418b35f600f4bb10dc0fca58d0b2c.pts 03001627/expert_verified/points_label/439418b35f600f4bb10dc0fca58d0b2c.seg 03001627
03636649/points/88257c5a48d94b1e2b151d8b52c53b90.pts 03636649/expert_verified/points_label/88257c5a48d94b1e2b151d8b52c53b90.seg 03636649
02691156/points/ad546b049b2246bd609e2d916fa0da27.pts 02691156/expert_verified/points_label/ad546b049b2246bd609e2d916fa0da27.seg 02691156
03001627/points/7efeece3b5cf2853d706779c93538ee1.pts 03001627/expert_verified/points_label/7efeece3b5cf2853d706779c93538ee1.seg 03001627
04379243/points/30dd74f09af6b1c2fe5c8ffd0f5eba47.pts 04379243/expert_verified/points_label/30dd74f09af6b1c2fe5c8ffd0f5eba47.seg 04379243
02691156/points/752d9a010346862551cfdb4c9f126c12.pts 02691156/expert_verified/points_label/752d9a010346862551cfdb4c9f126c12.seg 02691156
03001627/points/d1237422881f4d22ff25b0c2db862d19.pts 03001627/expert_verified/points_label/d1237422881f4d22ff25b0c2db862d19.seg 03001627
04379243/points/95af60aa8cb9be066a76e23e6f966dea.pts 04379243/expert_verified/points_label/95af60aa8cb9be066a76e23e6f966dea.seg 04379243
02691156/points/556d2b99469e62e623a346a784afd6ba.pts 02691156/expert_verified/points_label/556d2b99469e62e623a346a784afd6ba.seg 02691156
04379243/points/6e23179a3559775a65eacc25f128a1c5.pts 04379243/expert_verified/points_label/6e23179a3559775a65eacc25f128a1c5.seg 04379243
02691156/points/3b82e575165383903c83f6e156ad107a.pts 02691156/expert_verified/points_label/3b82e575165383903c83f6e156ad107a.seg 02691156
02773838/points/71ead7f072106c63ed13f430b2941481.pts 02773838/expert_verified/points_label/71ead7f072106c63ed13f430b2941481.seg 02773838
03001627/points/c9d68e1e5309ac25ac57e7d566628472.pts 03001627/expert_verified/points_label/c9d68e1e5309ac25ac57e7d566628472.seg 03001627
02691156/points/b3a59a941500e76535592b447835a16e.pts 02691156/expert_verified/points_label/b3a59a941500e76535592b447835a16e.seg 02691156
03797390/points/4d9764afa3fbeb1b6c69dceb67157a66.pts 03797390/expert_verified/points_label/4d9764afa3fbeb1b6c69dceb67157a66.seg 03797390
04379243/points/68ea1f319a9d724ec3bd24f986301745.pts 04379243/expert_verified/points_label/68ea1f319a9d724ec3bd24f986301745.seg 04379243
03001627/points/30363681727c804095937f6e581cbd41.pts 03001627/expert_verified/points_label/30363681727c804095937f6e581cbd41.seg 03001627
03001627/points/f4f1aba65ebe48eb70930286c914896b.pts 03001627/expert_verified/points_label/f4f1aba65ebe48eb70930286c914896b.seg 03001627
02691156/points/a3fc9ef9f611a783525e60273896d30a.pts 02691156/expert_verified/points_label/a3fc9ef9f611a783525e60273896d30a.seg 02691156
03636649/points/b0871c4ac8505d9c3d39d8012919dd25.pts 03636649/expert_verified/points_label/b0871c4ac8505d9c3d39d8012919dd25.seg 03636649
03001627/points/d7e26a070ee3b35cdf6cfab91d65bb91.pts 03001627/expert_verified/points_label/d7e26a070ee3b35cdf6cfab91d65bb91.seg 03001627
04379243/points/9012c6ca245c1bf4e6c5cd45aa112726.pts 04379243/expert_verified/points_label/9012c6ca245c1bf4e6c5cd45aa112726.seg 04379243
03636649/points/3ab9e4300cee0259f72e8839e840c146.pts 03636649/expert_verified/points_label/3ab9e4300cee0259f72e8839e840c146.seg 03636649
04379243/points/6e0fed54fcae8a62edccc47bf0dcf5d3.pts 04379243/expert_verified/points_label/6e0fed54fcae8a62edccc47bf0dcf5d3.seg 04379243
04379243/points/aafc579804cc095cbababe11fcea8796.pts 04379243/expert_verified/points_label/aafc579804cc095cbababe11fcea8796.seg 04379243
03636649/points/9adee08c737c7c134c6deb9ede0648df.pts 03636649/expert_verified/points_label/9adee08c737c7c134c6deb9ede0648df.seg 03636649
02691156/points/f39985959d394f8c863ab010b80d9ed.pts 02691156/expert_verified/points_label/f39985959d394f8c863ab010b80d9ed.seg 02691156
04379243/points/23d4170c7a0a2a014b3c42e318f3affc.pts 04379243/expert_verified/points_label/23d4170c7a0a2a014b3c42e318f3affc.seg 04379243
04379243/points/a1593fbe3a78c7858795000a72749c36.pts 04379243/expert_verified/points_label/a1593fbe3a78c7858795000a72749c36.seg 04379243
03001627/points/4b2ede169dcc83ce4591019e9d133858.pts 03001627/expert_verified/points_label/4b2ede169dcc83ce4591019e9d133858.seg 03001627
03001627/points/3fa1eeed2e8e2534febad4f49b26ec52.pts 03001627/expert_verified/points_label/3fa1eeed2e8e2534febad4f49b26ec52.seg 03001627
04379243/points/e8ba9621aef9395a3019620286259e2c.pts 04379243/expert_verified/points_label/e8ba9621aef9395a3019620286259e2c.seg 04379243
03001627/points/875925d42780159ffebad4f49b26ec52.pts 03001627/expert_verified/points_label/875925d42780159ffebad4f49b26ec52.seg 03001627
03001627/points/548ab6b6e8b2dc505ff61a3a2a0e2484.pts 03001627/expert_verified/points_label/548ab6b6e8b2dc505ff61a3a2a0e2484.seg 03001627
03467517/points/4f401d78068a9d348ee96618ee16ca27.pts 03467517/expert_verified/points_label/4f401d78068a9d348ee96618ee16ca27.seg 03467517
04379243/points/f7600660924857c0d31d0d81bfe9c743.pts 04379243/expert_verified/points_label/f7600660924857c0d31d0d81bfe9c743.seg 04379243
04379243/points/edba7eb533ae3578ece232edf44331c7.pts 04379243/expert_verified/points_label/edba7eb533ae3578ece232edf44331c7.seg 04379243
03001627/points/8b8fa92f9c677b0713decb1a0563b12.pts 03001627/expert_verified/points_label/8b8fa92f9c677b0713decb1a0563b12.seg 03001627
02691156/points/81e6b629264dad5daf2c6c19cc41708a.pts 02691156/expert_verified/points_label/81e6b629264dad5daf2c6c19cc41708a.seg 02691156
02691156/points/a0a7e673a1e1bca78699933784576e73.pts 02691156/expert_verified/points_label/a0a7e673a1e1bca78699933784576e73.seg 02691156
03636649/points/f01358d4f45cae23ce670f026edf07e5.pts 03636649/expert_verified/points_label/f01358d4f45cae23ce670f026edf07e5.seg 03636649
03001627/points/808fa82fe9ad86d9f1cc184b6fa3e1f9.pts 03001627/expert_verified/points_label/808fa82fe9ad86d9f1cc184b6fa3e1f9.seg 03001627
02691156/points/57937c7ab42260ebf119374ee5d5f944.pts 02691156/expert_verified/points_label/57937c7ab42260ebf119374ee5d5f944.seg 02691156
03001627/points/fbddac94cfa74a7b5c0228148b88226c.pts 03001627/expert_verified/points_label/fbddac94cfa74a7b5c0228148b88226c.seg 03001627
04379243/points/ad92bfc65465091c48d90eef8384210.pts 04379243/expert_verified/points_label/ad92bfc65465091c48d90eef8384210.seg 04379243
03467517/points/6ce23c82af30b629e8f705eb96ba3376.pts 03467517/expert_verified/points_label/6ce23c82af30b629e8f705eb96ba3376.seg 03467517
03001627/points/bd1787066323c7a64424fc4d3c9cb157.pts 03001627/expert_verified/points_label/bd1787066323c7a64424fc4d3c9cb157.seg 03001627
03001627/points/uca24feec-f0c0-454c-baaf-561530686f40.pts 03001627/expert_verified/points_label/uca24feec-f0c0-454c-baaf-561530686f40.seg 03001627
03001627/points/226704c72560008421ceb39dc3069834.pts 03001627/expert_verified/points_label/226704c72560008421ceb39dc3069834.seg 03001627
02691156/points/2c49289098e4492bca8607f540cc62ba.pts 02691156/expert_verified/points_label/2c49289098e4492bca8607f540cc62ba.seg 02691156
03001627/points/cff9a523a9e20eaeb40f0ac0fb9a650d.pts 03001627/expert_verified/points_label/cff9a523a9e20eaeb40f0ac0fb9a650d.seg 03001627
04379243/points/38e90183c838f443b43753a53e4593db.pts 04379243/expert_verified/points_label/38e90183c838f443b43753a53e4593db.seg 04379243
04379243/points/8b4ec70a3c1283b1fb5f8baea920e189.pts 04379243/expert_verified/points_label/8b4ec70a3c1283b1fb5f8baea920e189.seg 04379243
04379243/points/59a1703cb9320c018f49a52c8d710d0f.pts 04379243/expert_verified/points_label/59a1703cb9320c018f49a52c8d710d0f.seg 04379243
03636649/points/4ba237c2c40313f373b3ec02b97cb0f.pts 03636649/expert_verified/points_label/4ba237c2c40313f373b3ec02b97cb0f.seg 03636649
04379243/points/bb027ed892722b1f3399de188dc5ee56.pts 04379243/expert_verified/points_label/bb027ed892722b1f3399de188dc5ee56.seg 04379243
03467517/points/8b1d0f73e54ef59c93f0194265a9746c.pts 03467517/expert_verified/points_label/8b1d0f73e54ef59c93f0194265a9746c.seg 03467517
03467517/points/1300e8bafb819c8e1887f40a4f62df44.pts 03467517/expert_verified/points_label/1300e8bafb819c8e1887f40a4f62df44.seg 03467517
03642806/points/9fa387d7f442b96e75e60c00fabe2744.pts 03642806/expert_verified/points_label/9fa387d7f442b96e75e60c00fabe2744.seg 03642806
04379243/points/e153f757330a4ea3cdd1f51ef2b8f2ed.pts 04379243/expert_verified/points_label/e153f757330a4ea3cdd1f51ef2b8f2ed.seg 04379243
03636649/points/d00157a022079bdef3655a2ce983ab1f.pts 03636649/expert_verified/points_label/d00157a022079bdef3655a2ce983ab1f.seg 03636649
04379243/points/9eeea5f7b030ff6ac155f88004a92bc8.pts 04379243/expert_verified/points_label/9eeea5f7b030ff6ac155f88004a92bc8.seg 04379243
04379243/points/10ed64b4c7eb6d9311ee7ca4f000feba.pts 04379243/expert_verified/points_label/10ed64b4c7eb6d9311ee7ca4f000feba.seg 04379243
03001627/points/6db2255a51caf84e823e7e244bf84209.pts 03001627/expert_verified/points_label/6db2255a51caf84e823e7e244bf84209.seg 03001627
03001627/points/8ddaa112e6ba36b5b1e23c7675c49239.pts 03001627/expert_verified/points_label/8ddaa112e6ba36b5b1e23c7675c49239.seg 03001627
04379243/points/7813f4e4c0a58118cbb8bac2032149c.pts 04379243/expert_verified/points_label/7813f4e4c0a58118cbb8bac2032149c.seg 04379243
03797390/points/336122c3105440d193e42e2720468bf0.pts 03797390/expert_verified/points_label/336122c3105440d193e42e2720468bf0.seg 03797390
03001627/points/f2e2993abf4c952b2e69a7e134f91051.pts 03001627/expert_verified/points_label/f2e2993abf4c952b2e69a7e134f91051.seg 03001627
04379243/points/627248fa64c1db5fab610b0c94236463.pts 04379243/expert_verified/points_label/627248fa64c1db5fab610b0c94236463.seg 04379243
04379243/points/3b465822b34ed20ca05d3424fd8d541a.pts 04379243/expert_verified/points_label/3b465822b34ed20ca05d3424fd8d541a.seg 04379243
03467517/points/a7ddf2e5b9dc278293f0194265a9746c.pts 03467517/expert_verified/points_label/a7ddf2e5b9dc278293f0194265a9746c.seg 03467517
03636649/points/b36bfbbc98cb45431735ea0e092a805a.pts 03636649/expert_verified/points_label/b36bfbbc98cb45431735ea0e092a805a.seg 03636649
04379243/points/7d14ae7d0b7338bda0ab1d82ef09f78f.pts 04379243/expert_verified/points_label/7d14ae7d0b7338bda0ab1d82ef09f78f.seg 04379243
03467517/points/f7645b3c690d954682c2412261cb8600.pts 03467517/expert_verified/points_label/f7645b3c690d954682c2412261cb8600.seg 03467517
02958343/points/41a6deadd39b4c754d0f9a1ef5f184fe.pts 02958343/expert_verified/points_label/41a6deadd39b4c754d0f9a1ef5f184fe.seg 02958343
02691156/points/f74cbd91e6fb40dfce5965228d7e8c9f.pts 02691156/expert_verified/points_label/f74cbd91e6fb40dfce5965228d7e8c9f.seg 02691156
04379243/points/6c4c3bfe275e66b1b75e606711562bfc.pts 04379243/expert_verified/points_label/6c4c3bfe275e66b1b75e606711562bfc.seg 04379243
04379243/points/7d358a01c9467815a9505c473725122e.pts 04379243/expert_verified/points_label/7d358a01c9467815a9505c473725122e.seg 04379243
04379243/points/5fe3476df92392e1397aad305ec14786.pts 04379243/expert_verified/points_label/5fe3476df92392e1397aad305ec14786.seg 04379243
03001627/points/34d3960d35d8d5219b9f2eb77f5e247e.pts 03001627/expert_verified/points_label/34d3960d35d8d5219b9f2eb77f5e247e.seg 03001627
03001627/points/1b67a3a1101a9acb905477d2a8504646.pts 03001627/expert_verified/points_label/1b67a3a1101a9acb905477d2a8504646.seg 03001627
03001627/points/ee4858f78dc33591100e9bd5c4b0af54.pts 03001627/expert_verified/points_label/ee4858f78dc33591100e9bd5c4b0af54.seg 03001627
03001627/points/a578b0027e7d9ec7b2ca3ea77e53abe.pts 03001627/expert_verified/points_label/a578b0027e7d9ec7b2ca3ea77e53abe.seg 03001627
02691156/points/916950e40ca7aabc8b96ae1a0a8b84ec.pts 02691156/expert_verified/points_label/916950e40ca7aabc8b96ae1a0a8b84ec.seg 02691156
04379243/points/1abfb0c03c81fc2219fb4103277a6b93.pts 04379243/expert_verified/points_label/1abfb0c03c81fc2219fb4103277a6b93.seg 04379243
02691156/points/a702da03d770f5096e2738fc9da60e6f.pts 02691156/expert_verified/points_label/a702da03d770f5096e2738fc9da60e6f.seg 02691156
04379243/points/2e2894138df855b26f88aa1b7f7cc6c6.pts 04379243/expert_verified/points_label/2e2894138df855b26f88aa1b7f7cc6c6.seg 04379243
03001627/points/589cd6a1f4367fd834b707eb14b2a4a5.pts 03001627/expert_verified/points_label/589cd6a1f4367fd834b707eb14b2a4a5.seg 03001627
03636649/points/f8534299ecce5c16eaf14273fa406ffc.pts 03636649/expert_verified/points_label/f8534299ecce5c16eaf14273fa406ffc.seg 03636649
04379243/points/ea96b8a866121d1abed1bd9593e318c.pts 04379243/expert_verified/points_label/ea96b8a866121d1abed1bd9593e318c.seg 04379243
03624134/points/9746101f20473d346bbd83c2bc4c3b2e.pts 03624134/expert_verified/points_label/9746101f20473d346bbd83c2bc4c3b2e.seg 03624134
02958343/points/9c4a3879c71df693af0f25977186b501.pts 02958343/expert_verified/points_label/9c4a3879c71df693af0f25977186b501.seg 02958343
03001627/points/6621723f7af35f2dcd344c2b2cefcda6.pts 03001627/expert_verified/points_label/6621723f7af35f2dcd344c2b2cefcda6.seg 03001627
03948459/points/8c9e592c95f95e7c9a6e43b878d5b335.pts 03948459/expert_verified/points_label/8c9e592c95f95e7c9a6e43b878d5b335.seg 03948459
04379243/points/36a6d851dbe02410ad16260d4d73b56.pts 04379243/expert_verified/points_label/36a6d851dbe02410ad16260d4d73b56.seg 04379243
04379243/points/b1ca280d9567270ade98d10ab5975b59.pts 04379243/expert_verified/points_label/b1ca280d9567270ade98d10ab5975b59.seg 04379243
03467517/points/5ed99a0b793e1f5ee52744498b9b3051.pts 03467517/expert_verified/points_label/5ed99a0b793e1f5ee52744498b9b3051.seg 03467517
03001627/points/18fd8342fa5d1d4f5268b70948af88b2.pts 03001627/expert_verified/points_label/18fd8342fa5d1d4f5268b70948af88b2.seg 03001627
02691156/points/cc60baa1a796f5c14c3a35cee92bb95b.pts 02691156/expert_verified/points_label/cc60baa1a796f5c14c3a35cee92bb95b.seg 02691156
03642806/points/3237f5cd4bca555955357c338ec9641.pts 03642806/expert_verified/points_label/3237f5cd4bca555955357c338ec9641.seg 03642806
03001627/points/fee248777c9c4807f8bc1f8036e08e44.pts 03001627/expert_verified/points_label/fee248777c9c4807f8bc1f8036e08e44.seg 03001627
04379243/points/2d90a1998eca8778dcfcef693e7ec696.pts 04379243/expert_verified/points_label/2d90a1998eca8778dcfcef693e7ec696.seg 04379243
02958343/points/3ef7cfbc172840b2393bf61b30c528bb.pts 02958343/expert_verified/points_label/3ef7cfbc172840b2393bf61b30c528bb.seg 02958343
02691156/points/240fd3c1fd804ec1b8cf782e8c539948.pts 02691156/expert_verified/points_label/240fd3c1fd804ec1b8cf782e8c539948.seg 02691156
04379243/points/60c931dcc6d0982944bda2555d37e46.pts 04379243/expert_verified/points_label/60c931dcc6d0982944bda2555d37e46.seg 04379243
04379243/points/93040a14fad5588ed889130b88839a0c.pts 04379243/expert_verified/points_label/93040a14fad5588ed889130b88839a0c.seg 04379243
02958343/points/a75ff576da012340468bac13e007a6e9.pts 02958343/expert_verified/points_label/a75ff576da012340468bac13e007a6e9.seg 02958343
03467517/points/57286d92604c9ebea3d3eb77b119df6d.pts 03467517/expert_verified/points_label/57286d92604c9ebea3d3eb77b119df6d.seg 03467517
03636649/points/913ba6b6ac6aea3356c82fefb25b338b.pts 03636649/expert_verified/points_label/913ba6b6ac6aea3356c82fefb25b338b.seg 03636649
03001627/points/cce9ffdcc7ca8ddea300840c9d7bfa74.pts 03001627/expert_verified/points_label/cce9ffdcc7ca8ddea300840c9d7bfa74.seg 03001627
04379243/points/913c0ff011ad0658dcfcef693e7ec696.pts 04379243/expert_verified/points_label/913c0ff011ad0658dcfcef693e7ec696.seg 04379243
03001627/points/9d0b25421c13008e35836c728d324152.pts 03001627/expert_verified/points_label/9d0b25421c13008e35836c728d324152.seg 03001627
03797390/points/a8f7a0edd3edc3299e54b4084dc33544.pts 03797390/expert_verified/points_label/a8f7a0edd3edc3299e54b4084dc33544.seg 03797390
04379243/points/5b9a7b7952996844d802aa676be38da2.pts 04379243/expert_verified/points_label/5b9a7b7952996844d802aa676be38da2.seg 04379243
02954340/points/4bd0b6df02772d8f59c9250a427b57f.pts 02954340/expert_verified/points_label/4bd0b6df02772d8f59c9250a427b57f.seg 02954340
02958343/points/a72134cd499fd1c4f79e091fa09130a.pts 02958343/expert_verified/points_label/a72134cd499fd1c4f79e091fa09130a.seg 02958343
04379243/points/cc6fbdc6f2aa5ea3d889130b88839a0c.pts 04379243/expert_verified/points_label/cc6fbdc6f2aa5ea3d889130b88839a0c.seg 04379243
03624134/points/85ced924eedc6ff566b5b592ed1ddee0.pts 03624134/expert_verified/points_label/85ced924eedc6ff566b5b592ed1ddee0.seg 03624134
03001627/points/60622d74c0712934a5817f81a1efa3cc.pts 03001627/expert_verified/points_label/60622d74c0712934a5817f81a1efa3cc.seg 03001627
04379243/points/2633f011b236a8979070b65ce7b4b532.pts 04379243/expert_verified/points_label/2633f011b236a8979070b65ce7b4b532.seg 04379243
03001627/points/9d9d69e5f2bc80a867903707764646db.pts 03001627/expert_verified/points_label/9d9d69e5f2bc80a867903707764646db.seg 03001627
03001627/points/ce463d63d8771c5ccf19858fd1963d10.pts 03001627/expert_verified/points_label/ce463d63d8771c5ccf19858fd1963d10.seg 03001627
04379243/points/ad17445446e4fd3adcfcef693e7ec696.pts 04379243/expert_verified/points_label/ad17445446e4fd3adcfcef693e7ec696.seg 04379243
03001627/points/71372c1f20b6a04c43c40c5aa3d5c5b7.pts 03001627/expert_verified/points_label/71372c1f20b6a04c43c40c5aa3d5c5b7.seg 03001627
02691156/points/9436273fc1a5e3ca7af159eaf7625abf.pts 02691156/expert_verified/points_label/9436273fc1a5e3ca7af159eaf7625abf.seg 02691156
03797390/points/b98fa11a567f644344b25d683fe71de.pts 03797390/expert_verified/points_label/b98fa11a567f644344b25d683fe71de.seg 03797390
02691156/points/53eee66291c47a91bc0909d98a1ff2b4.pts 02691156/expert_verified/points_label/53eee66291c47a91bc0909d98a1ff2b4.seg 02691156
03642806/points/e55ececde88255b93e73f3893a7337bb.pts 03642806/expert_verified/points_label/e55ececde88255b93e73f3893a7337bb.seg 03642806
02958343/points/1079efee042629d4ce28f0f1b509eda.pts 02958343/expert_verified/points_label/1079efee042629d4ce28f0f1b509eda.seg 02958343
03001627/points/c826c65111c867ab45a1df43bcd9e471.pts 03001627/expert_verified/points_label/c826c65111c867ab45a1df43bcd9e471.seg 03001627
02958343/points/39201299cf83ec2577763486d77d1cb.pts 02958343/expert_verified/points_label/39201299cf83ec2577763486d77d1cb.seg 02958343
04379243/points/e8c01f71fd941af11190e285a2cbc9c.pts 04379243/expert_verified/points_label/e8c01f71fd941af11190e285a2cbc9c.seg 04379243
03001627/points/948f1555282e27da190c615a2115d2f7.pts 03001627/expert_verified/points_label/948f1555282e27da190c615a2115d2f7.seg 03001627
02691156/points/ca4ec545363b3b8e8c2814a4ead9cb90.pts 02691156/expert_verified/points_label/ca4ec545363b3b8e8c2814a4ead9cb90.seg 02691156
03001627/points/b8f4ce34b44620cc9b9f2eb77f5e247e.pts 03001627/expert_verified/points_label/b8f4ce34b44620cc9b9f2eb77f5e247e.seg 03001627
02958343/points/188621bbfc7d9477ce27281f3b76d1f5.pts 02958343/expert_verified/points_label/188621bbfc7d9477ce27281f3b76d1f5.seg 02958343
04379243/points/9a71b92445cd3f023a9bc242c86fb7a0.pts 04379243/expert_verified/points_label/9a71b92445cd3f023a9bc242c86fb7a0.seg 04379243
03001627/points/4372b33dfc84c2f56a9ab6fc87e1604e.pts 03001627/expert_verified/points_label/4372b33dfc84c2f56a9ab6fc87e1604e.seg 03001627
03001627/points/b16f1858c1a7c0a65001cb19c4a0eee4.pts 03001627/expert_verified/points_label/b16f1858c1a7c0a65001cb19c4a0eee4.seg 03001627
03467517/points/5238adec0790595930c206f77b5cb4d0.pts 03467517/expert_verified/points_label/5238adec0790595930c206f77b5cb4d0.seg 03467517
02958343/points/3ec7f0347638f7a891eea2fc80d4a25f.pts 02958343/expert_verified/points_label/3ec7f0347638f7a891eea2fc80d4a25f.seg 02958343
02691156/points/32e7224d196e5866bd564bd76cf3cbec.pts 02691156/expert_verified/points_label/32e7224d196e5866bd564bd76cf3cbec.seg 02691156
04379243/points/f9beeefdebf70350f4b6538438a0b930.pts 04379243/expert_verified/points_label/f9beeefdebf70350f4b6538438a0b930.seg 04379243
04379243/points/acbc99e153b9d4d419fb4103277a6b93.pts 04379243/expert_verified/points_label/acbc99e153b9d4d419fb4103277a6b93.seg 04379243
03467517/points/8ebc3d48afeceec752561cc0fb924c36.pts 03467517/expert_verified/points_label/8ebc3d48afeceec752561cc0fb924c36.seg 03467517
04379243/points/966cef675324e416cd415550f639925.pts 04379243/expert_verified/points_label/966cef675324e416cd415550f639925.seg 04379243
03636649/points/85f71a4724fa37c33d39d8012919dd25.pts 03636649/expert_verified/points_label/85f71a4724fa37c33d39d8012919dd25.seg 03636649
03636649/points/370623095c9773e42ce7d46577f8a9bd.pts 03636649/expert_verified/points_label/370623095c9773e42ce7d46577f8a9bd.seg 03636649
03624134/points/bbe934c9cdca9c1839ec49305bb07d3d.pts 03624134/expert_verified/points_label/bbe934c9cdca9c1839ec49305bb07d3d.seg 03624134
02958343/points/d22a2d20acbdca70c972ff3f74d38438.pts 02958343/expert_verified/points_label/d22a2d20acbdca70c972ff3f74d38438.seg 02958343
02958343/points/ff3c8e21a48ed17cc1bcae9def1986da.pts 02958343/expert_verified/points_label/ff3c8e21a48ed17cc1bcae9def1986da.seg 02958343
03001627/points/fd5ca05b59b30241d838ae16242881dc.pts 03001627/expert_verified/points_label/fd5ca05b59b30241d838ae16242881dc.seg 03001627
02691156/points/e3aff5ae3e8f2a7c4c2c88971423d0be.pts 02691156/expert_verified/points_label/e3aff5ae3e8f2a7c4c2c88971423d0be.seg 02691156
02691156/points/b4575e5e6161fd497b164268a44f7712.pts 02691156/expert_verified/points_label/b4575e5e6161fd497b164268a44f7712.seg 02691156
03467517/points/153e7883f6cf0e66d57700c05b1862d8.pts 03467517/expert_verified/points_label/153e7883f6cf0e66d57700c05b1862d8.seg 03467517
03642806/points/4fc3d56243d2d8801ef1ccfaf50f2048.pts 03642806/expert_verified/points_label/4fc3d56243d2d8801ef1ccfaf50f2048.seg 03642806
04379243/points/ec9861c234daf6bc915f51b5f5e95ffa.pts 04379243/expert_verified/points_label/ec9861c234daf6bc915f51b5f5e95ffa.seg 04379243
03001627/points/7114ef00fe68d053cccbd142483bf2e7.pts 03001627/expert_verified/points_label/7114ef00fe68d053cccbd142483bf2e7.seg 03001627
02691156/points/e812f54386acd072d44f37c9e0fb10d0.pts 02691156/expert_verified/points_label/e812f54386acd072d44f37c9e0fb10d0.seg 02691156
03001627/points/5490efbdadce792f524f4eb395a8604.pts 03001627/expert_verified/points_label/5490efbdadce792f524f4eb395a8604.seg 03001627
03948459/points/42740af029297f1d9874fa4c7b1a4298.pts 03948459/expert_verified/points_label/42740af029297f1d9874fa4c7b1a4298.seg 03948459
03001627/points/d1ec6e9b8063b7efd7f7a4c4609b0913.pts 03001627/expert_verified/points_label/d1ec6e9b8063b7efd7f7a4c4609b0913.seg 03001627
04379243/points/4b11be42b0c0482dd94faaee2b20e2bf.pts 04379243/expert_verified/points_label/4b11be42b0c0482dd94faaee2b20e2bf.seg 04379243
03001627/points/d29971cef754cc91cd8c5d1ba690a2c3.pts 03001627/expert_verified/points_label/d29971cef754cc91cd8c5d1ba690a2c3.seg 03001627
04379243/points/8cc8485f249a37f595b25bd3accf45b5.pts 04379243/expert_verified/points_label/8cc8485f249a37f595b25bd3accf45b5.seg 04379243
04379243/points/bb5dbf708d5eb7f82099f9e22ca45b04.pts 04379243/expert_verified/points_label/bb5dbf708d5eb7f82099f9e22ca45b04.seg 04379243
03001627/points/c1b64fef5f3efa0a129905ebfd12d5cd.pts 03001627/expert_verified/points_label/c1b64fef5f3efa0a129905ebfd12d5cd.seg 03001627
04379243/points/e58e958428584b2b79972b30518c97e2.pts 04379243/expert_verified/points_label/e58e958428584b2b79972b30518c97e2.seg 04379243
03790512/points/90a521e0def2631fd5dde04c96fd8146.pts 03790512/expert_verified/points_label/90a521e0def2631fd5dde04c96fd8146.seg 03790512
03467517/points/fcab134da044e5fc77f469126771fc30.pts 03467517/expert_verified/points_label/fcab134da044e5fc77f469126771fc30.seg 03467517
03001627/points/1d6faeb6d77d1f2cf95cd8df6bebbc3a.pts 03001627/expert_verified/points_label/1d6faeb6d77d1f2cf95cd8df6bebbc3a.seg 03001627
04379243/points/e993ddaf6d03003071a782a4379556c7.pts 04379243/expert_verified/points_label/e993ddaf6d03003071a782a4379556c7.seg 04379243
03001627/points/702cebffa33a19f019f079d1b712f46f.pts 03001627/expert_verified/points_label/702cebffa33a19f019f079d1b712f46f.seg 03001627
03790512/points/7b4eb8cbc470d0d6d5dde04c96fd8146.pts 03790512/expert_verified/points_label/7b4eb8cbc470d0d6d5dde04c96fd8146.seg 03790512
03001627/points/9515e377c1ec86529b9f2eb77f5e247e.pts 03001627/expert_verified/points_label/9515e377c1ec86529b9f2eb77f5e247e.seg 03001627
03001627/points/9c3d7b65c739a618285330f26226f8fb.pts 03001627/expert_verified/points_label/9c3d7b65c739a618285330f26226f8fb.seg 03001627
03790512/points/8ed4bdaf0c8b88ea8b31e74d456742c7.pts 03790512/expert_verified/points_label/8ed4bdaf0c8b88ea8b31e74d456742c7.seg 03790512
02958343/points/6ed2957beeb7940a9fbaa69916aaebda.pts 02958343/expert_verified/points_label/6ed2957beeb7940a9fbaa69916aaebda.seg 02958343
03001627/points/37e2b82d5e9dde21cbde89e0c48a01bf.pts 03001627/expert_verified/points_label/37e2b82d5e9dde21cbde89e0c48a01bf.seg 03001627
04379243/points/1b6bd64fda74bdc4d6983f351200ac6a.pts 04379243/expert_verified/points_label/1b6bd64fda74bdc4d6983f351200ac6a.seg 04379243
04379243/points/531381f5bbc69e485769b3af36a2ff9f.pts 04379243/expert_verified/points_label/531381f5bbc69e485769b3af36a2ff9f.seg 04379243
03790512/points/992fbae5178edcbc4e31d0cb4d7568.pts 03790512/expert_verified/points_label/992fbae5178edcbc4e31d0cb4d7568.seg 03790512
04379243/points/65e7fd8d158658106a76e23e6f966dea.pts 04379243/expert_verified/points_label/65e7fd8d158658106a76e23e6f966dea.seg 04379243
02691156/points/2229bc4e646f506679f56e78e8640bfb.pts 02691156/expert_verified/points_label/2229bc4e646f506679f56e78e8640bfb.seg 02691156
02954340/points/f40b47fcbf83b962f0d11ae402ef940e.pts 02954340/expert_verified/points_label/f40b47fcbf83b962f0d11ae402ef940e.seg 02954340
02773838/points/cbc2328cadf8dc573394926146371698.pts 02773838/expert_verified/points_label/cbc2328cadf8dc573394926146371698.seg 02773838
02958343/points/3c6d7c6ce950917b3a93df79ef2b80ef.pts 02958343/expert_verified/points_label/3c6d7c6ce950917b3a93df79ef2b80ef.seg 02958343
02958343/points/2ccaaa66525d7f095473e57e894e0ef5.pts 02958343/expert_verified/points_label/2ccaaa66525d7f095473e57e894e0ef5.seg 02958343
02691156/points/70d9304de59792a9515d73fcb34092fc.pts 02691156/expert_verified/points_label/70d9304de59792a9515d73fcb34092fc.seg 02691156
03001627/points/2ed8d45343a442097869557127addfc0.pts 03001627/expert_verified/points_label/2ed8d45343a442097869557127addfc0.seg 03001627
04379243/points/84f5e52756fc84f86df14337f24e49f4.pts 04379243/expert_verified/points_label/84f5e52756fc84f86df14337f24e49f4.seg 04379243
03001627/points/b33a3b1627ad61eb8ca4809dcf42fe1.pts 03001627/expert_verified/points_label/b33a3b1627ad61eb8ca4809dcf42fe1.seg 03001627
04379243/points/369c19c0971221f3664b3b9b23ddfcbc.pts 04379243/expert_verified/points_label/369c19c0971221f3664b3b9b23ddfcbc.seg 04379243
03642806/points/5a13f7551c20eb29f3ebfe51dc60263e.pts 03642806/expert_verified/points_label/5a13f7551c20eb29f3ebfe51dc60263e.seg 03642806
04379243/points/1b01ef65920c342323bdffac38e6b250.pts 04379243/expert_verified/points_label/1b01ef65920c342323bdffac38e6b250.seg 04379243
02691156/points/9b687f9cff46d43d89c2da356f872ebc.pts 02691156/expert_verified/points_label/9b687f9cff46d43d89c2da356f872ebc.seg 02691156
04379243/points/746ceaf694d85eb5d5192f88466da1dc.pts 04379243/expert_verified/points_label/746ceaf694d85eb5d5192f88466da1dc.seg 04379243
04379243/points/9f4eb0d734a2b7a4ab610b0c94236463.pts 04379243/expert_verified/points_label/9f4eb0d734a2b7a4ab610b0c94236463.seg 04379243
03001627/points/a1213da0e7efffcafebad4f49b26ec52.pts 03001627/expert_verified/points_label/a1213da0e7efffcafebad4f49b26ec52.seg 03001627
02958343/points/71b00ea32b1810ac373af83f3f2fe606.pts 02958343/expert_verified/points_label/71b00ea32b1810ac373af83f3f2fe606.seg 02958343
02691156/points/52a84fea7c314f4c3dfc741b4df74043.pts 02691156/expert_verified/points_label/52a84fea7c314f4c3dfc741b4df74043.seg 02691156
02958343/points/9f3c463272d13d39eb7780cdb3ece367.pts 02958343/expert_verified/points_label/9f3c463272d13d39eb7780cdb3ece367.seg 02958343
03001627/points/def03f645b3fbd665bb93149cc0adf0.pts 03001627/expert_verified/points_label/def03f645b3fbd665bb93149cc0adf0.seg 03001627
03001627/points/f9e386d968653602d68fb8f5d99affa0.pts 03001627/expert_verified/points_label/f9e386d968653602d68fb8f5d99affa0.seg 03001627
03467517/points/9c399ebc617349dcd016bd20f13ab302.pts 03467517/expert_verified/points_label/9c399ebc617349dcd016bd20f13ab302.seg 03467517
04379243/points/aaaba1bbe037d3b1e406974af41e8842.pts 04379243/expert_verified/points_label/aaaba1bbe037d3b1e406974af41e8842.seg 04379243
03001627/points/4030ea84b560b857febad4f49b26ec52.pts 03001627/expert_verified/points_label/4030ea84b560b857febad4f49b26ec52.seg 03001627
04379243/points/a38405108fb416d8356ca1f9220b9968.pts 04379243/expert_verified/points_label/a38405108fb416d8356ca1f9220b9968.seg 04379243
04379243/points/f864677894410315ab610b0c94236463.pts 04379243/expert_verified/points_label/f864677894410315ab610b0c94236463.seg 04379243
02954340/points/da5e5ec4c486d6c03baa6271927f050e.pts 02954340/expert_verified/points_label/da5e5ec4c486d6c03baa6271927f050e.seg 02954340
02691156/points/eed299b690be51ffbd931fcaa69140.pts 02691156/expert_verified/points_label/eed299b690be51ffbd931fcaa69140.seg 02691156
03797390/points/b4ae56d6638d5338de671f28c83d2dcb.pts 03797390/expert_verified/points_label/b4ae56d6638d5338de671f28c83d2dcb.seg 03797390
04379243/points/10cc8c941fc8aeaa71a782a4379556c7.pts 04379243/expert_verified/points_label/10cc8c941fc8aeaa71a782a4379556c7.seg 04379243
03636649/points/61b57e8b5da8fb13d527a9a6f5a872b9.pts 03636649/expert_verified/points_label/61b57e8b5da8fb13d527a9a6f5a872b9.seg 03636649
02691156/points/ae4a9574248395b671d03b466c72ce41.pts 02691156/expert_verified/points_label/ae4a9574248395b671d03b466c72ce41.seg 02691156
04379243/points/8cfe3ff92244310534506cc3910614fe.pts 04379243/expert_verified/points_label/8cfe3ff92244310534506cc3910614fe.seg 04379243
03001627/points/597cb92a5bfb580eed98cca8f0ccd5f7.pts 03001627/expert_verified/points_label/597cb92a5bfb580eed98cca8f0ccd5f7.seg 03001627
03001627/points/4231883e92a3c1a21c62d11641ffbd35.pts 03001627/expert_verified/points_label/4231883e92a3c1a21c62d11641ffbd35.seg 03001627
03636649/points/28793511c46b4fa030f6e0ede20c4525.pts 03636649/expert_verified/points_label/28793511c46b4fa030f6e0ede20c4525.seg 03636649
02958343/points/4c60f32b6efdc7217dfb1ee6a4b12bf8.pts 02958343/expert_verified/points_label/4c60f32b6efdc7217dfb1ee6a4b12bf8.seg 02958343
04379243/points/397c56f15e547fad1bb088904f7cb154.pts 04379243/expert_verified/points_label/397c56f15e547fad1bb088904f7cb154.seg 04379243
04379243/points/9bb816d6a3517a5ca74c2333655a11dd.pts 04379243/expert_verified/points_label/9bb816d6a3517a5ca74c2333655a11dd.seg 04379243
03790512/points/bae59e64a50d3aa2f68f798d07e007b6.pts 03790512/expert_verified/points_label/bae59e64a50d3aa2f68f798d07e007b6.seg 03790512
04379243/points/8b094873d775f6e21130871dbfe24c18.pts 04379243/expert_verified/points_label/8b094873d775f6e21130871dbfe24c18.seg 04379243
04379243/points/4d2f7c689e77df6b6dc1766995c17a41.pts 04379243/expert_verified/points_label/4d2f7c689e77df6b6dc1766995c17a41.seg 04379243
03467517/points/16916a50a064304bf6ed0b697979412e.pts 03467517/expert_verified/points_label/16916a50a064304bf6ed0b697979412e.seg 03467517
03636649/points/c802fa4c82498450af6016f34c89d087.pts 03636649/expert_verified/points_label/c802fa4c82498450af6016f34c89d087.seg 03636649
03001627/points/1ec5a88141aefca9cf6e4dd7ee69d71f.pts 03001627/expert_verified/points_label/1ec5a88141aefca9cf6e4dd7ee69d71f.seg 03001627
04379243/points/bdefbb1f281434e39961e1085a81acc5.pts 04379243/expert_verified/points_label/bdefbb1f281434e39961e1085a81acc5.seg 04379243
04379243/points/acf57dbafe8966f577fb15a8d7923976.pts 04379243/expert_verified/points_label/acf57dbafe8966f577fb15a8d7923976.seg 04379243
03642806/points/cc67f6608c41743ec1830f8ca7a3cbed.pts 03642806/expert_verified/points_label/cc67f6608c41743ec1830f8ca7a3cbed.seg 03642806
03001627/points/95e1571acdd75922afdb9a672b7d3b8a.pts 03001627/expert_verified/points_label/95e1571acdd75922afdb9a672b7d3b8a.seg 03001627
04379243/points/2ebe5dfb7bd9a50c6effbd64ad6b71b8.pts 04379243/expert_verified/points_label/2ebe5dfb7bd9a50c6effbd64ad6b71b8.seg 04379243
03001627/points/a6420c4ed13cf628945a77b945b7b70f.pts 03001627/expert_verified/points_label/a6420c4ed13cf628945a77b945b7b70f.seg 03001627
04379243/points/1de679dd26d8c69cae44c65a6d0f0732.pts 04379243/expert_verified/points_label/1de679dd26d8c69cae44c65a6d0f0732.seg 04379243
03001627/points/271012d5de261d08101accd22c701b9.pts 03001627/expert_verified/points_label/271012d5de261d08101accd22c701b9.seg 03001627
04379243/points/5e409a2627f7cd7d63ecd64ef0e6814c.pts 04379243/expert_verified/points_label/5e409a2627f7cd7d63ecd64ef0e6814c.seg 04379243
02691156/points/c9aeb20d7cd1b3b45e9e2656aff7dd5b.pts 02691156/expert_verified/points_label/c9aeb20d7cd1b3b45e9e2656aff7dd5b.seg 02691156
04379243/points/45b23ac79688170893ba1eeaf62819a2.pts 04379243/expert_verified/points_label/45b23ac79688170893ba1eeaf62819a2.seg 04379243
02691156/points/9ac292686a2fcebbe719b5362fe06bbb.pts 02691156/expert_verified/points_label/9ac292686a2fcebbe719b5362fe06bbb.seg 02691156
04379243/points/3b0c62bde7b24de85ce578b5b4bfae3c.pts 04379243/expert_verified/points_label/3b0c62bde7b24de85ce578b5b4bfae3c.seg 04379243
02958343/points/c487e9850891e1ec2d15396b7bcc6366.pts 02958343/expert_verified/points_label/c487e9850891e1ec2d15396b7bcc6366.seg 02958343
03636649/points/b8e25e0825cb5db7765609a3f435fe9d.pts 03636649/expert_verified/points_label/b8e25e0825cb5db7765609a3f435fe9d.seg 03636649
03001627/points/9fd6bb18dc21c70766ef9dd2f3ef27d3.pts 03001627/expert_verified/points_label/9fd6bb18dc21c70766ef9dd2f3ef27d3.seg 03001627
02958343/points/bf37249fc8e16fd8f9a88cc63b910f3.pts 02958343/expert_verified/points_label/bf37249fc8e16fd8f9a88cc63b910f3.seg 02958343
04225987/points/58ae991bd0350810b9ac379f661f5c75.pts 04225987/expert_verified/points_label/58ae991bd0350810b9ac379f661f5c75.seg 04225987
03001627/points/508306f8ddf1b54c41cc9e8c39b4e399.pts 03001627/expert_verified/points_label/508306f8ddf1b54c41cc9e8c39b4e399.seg 03001627
03642806/points/ef5b312fc20f1b20aab089a6db538ba7.pts 03642806/expert_verified/points_label/ef5b312fc20f1b20aab089a6db538ba7.seg 03642806
03001627/points/d97c5945e9449a58737e4e0df09d751.pts 03001627/expert_verified/points_label/d97c5945e9449a58737e4e0df09d751.seg 03001627
03001627/points/e1897a4391784bc2e8b2b8dc0c816caf.pts 03001627/expert_verified/points_label/e1897a4391784bc2e8b2b8dc0c816caf.seg 03001627
04379243/points/a624ebf0bf0451a8d93768e7b9b1eabf.pts 04379243/expert_verified/points_label/a624ebf0bf0451a8d93768e7b9b1eabf.seg 04379243
03636649/points/1e5e1ff56c27c0d2adc5f5aafedb1c38.pts 03636649/expert_verified/points_label/1e5e1ff56c27c0d2adc5f5aafedb1c38.seg 03636649
03642806/points/2ce3a50ca6087f30d8e007cc6755cce9.pts 03642806/expert_verified/points_label/2ce3a50ca6087f30d8e007cc6755cce9.seg 03642806
02691156/points/d615a8217b70af06bc0909d98a1ff2b4.pts 02691156/expert_verified/points_label/d615a8217b70af06bc0909d98a1ff2b4.seg 02691156
02691156/points/6f72a0d86494b551a834b9c8bfc8647a.pts 02691156/expert_verified/points_label/6f72a0d86494b551a834b9c8bfc8647a.seg 02691156
03001627/points/20fbab2b8770a1cbf51f77a6d7299806.pts 03001627/expert_verified/points_label/20fbab2b8770a1cbf51f77a6d7299806.seg 03001627
03001627/points/d239d38424429a9a4626612b5d655dc.pts 03001627/expert_verified/points_label/d239d38424429a9a4626612b5d655dc.seg 03001627
03001627/points/4c97f421c4ea4396d8ac5d7ad0953104.pts 03001627/expert_verified/points_label/4c97f421c4ea4396d8ac5d7ad0953104.seg 03001627
03001627/points/5b68a6c2baf0ad61d0de9c949c366777.pts 03001627/expert_verified/points_label/5b68a6c2baf0ad61d0de9c949c366777.seg 03001627
04379243/points/9bd1c242bd66d2fbb63c01786992bd2f.pts 04379243/expert_verified/points_label/9bd1c242bd66d2fbb63c01786992bd2f.seg 04379243
03001627/points/e2dbe84030167f1ca5aad165050e534c.pts 03001627/expert_verified/points_label/e2dbe84030167f1ca5aad165050e534c.seg 03001627
03001627/points/1c17cc67b8c747c3febad4f49b26ec52.pts 03001627/expert_verified/points_label/1c17cc67b8c747c3febad4f49b26ec52.seg 03001627
04379243/points/2766a883126503cac3bd24f986301745.pts 04379243/expert_verified/points_label/2766a883126503cac3bd24f986301745.seg 04379243
04225987/points/755dc44dae7791761082f2ea630bf69e.pts 04225987/expert_verified/points_label/755dc44dae7791761082f2ea630bf69e.seg 04225987
04379243/points/c38ba6c06d2b813230c589758b4b5646.pts 04379243/expert_verified/points_label/c38ba6c06d2b813230c589758b4b5646.seg 04379243
02691156/points/44c0cb6571f6f000ca8607f540cc62ba.pts 02691156/expert_verified/points_label/44c0cb6571f6f000ca8607f540cc62ba.seg 02691156
03636649/points/522bc10920249e67141c66e2b49d221.pts 03636649/expert_verified/points_label/522bc10920249e67141c66e2b49d221.seg 03636649
03790512/points/4548d86cf7f1c11ad373c34785838ee4.pts 03790512/expert_verified/points_label/4548d86cf7f1c11ad373c34785838ee4.seg 03790512
02958343/points/37c5ac3d5b34761add75f724c0ccbe00.pts 02958343/expert_verified/points_label/37c5ac3d5b34761add75f724c0ccbe00.seg 02958343
04379243/points/a15f31e2302f6ae5d67a73ffd62ba73f.pts 04379243/expert_verified/points_label/a15f31e2302f6ae5d67a73ffd62ba73f.seg 04379243
02958343/points/6d714f7b7170a581da8e502a3c6cb4fb.pts 02958343/expert_verified/points_label/6d714f7b7170a581da8e502a3c6cb4fb.seg 02958343
03624134/points/17c4163247e9237d4b7644126b1d71e0.pts 03624134/expert_verified/points_label/17c4163247e9237d4b7644126b1d71e0.seg 03624134
03636649/points/7972fd0fe5755b4ad42b9650f19dd425.pts 03636649/expert_verified/points_label/7972fd0fe5755b4ad42b9650f19dd425.seg 03636649
03001627/points/8ff4ba87d700054546992ce9fde1b2c2.pts 03001627/expert_verified/points_label/8ff4ba87d700054546992ce9fde1b2c2.seg 03001627
03636649/points/a654df55875a2104d663817442d5278.pts 03636649/expert_verified/points_label/a654df55875a2104d663817442d5278.seg 03636649
04379243/points/9c12fada31224bdf58c4e7e56d799d97.pts 04379243/expert_verified/points_label/9c12fada31224bdf58c4e7e56d799d97.seg 04379243
03636649/points/9dad7ce60aa168d72cd2160e449d45ae.pts 03636649/expert_verified/points_label/9dad7ce60aa168d72cd2160e449d45ae.seg 03636649
02691156/points/cfb555a4d82a600aca8607f540cc62ba.pts 02691156/expert_verified/points_label/cfb555a4d82a600aca8607f540cc62ba.seg 02691156
04379243/points/415c174ecdc612fb6f5c30e29039b12d.pts 04379243/expert_verified/points_label/415c174ecdc612fb6f5c30e29039b12d.seg 04379243
03467517/points/a5e2f05386e4ba55a894e1aba5d3799a.pts 03467517/expert_verified/points_label/a5e2f05386e4ba55a894e1aba5d3799a.seg 03467517
03001627/points/a91b2c89e543a4b3aa3d970c5602cd4a.pts 03001627/expert_verified/points_label/a91b2c89e543a4b3aa3d970c5602cd4a.seg 03001627
03624134/points/97ed13011e2d85e16029317225a75a9f.pts 03624134/expert_verified/points_label/97ed13011e2d85e16029317225a75a9f.seg 03624134
04379243/points/388ea3f8ba27da8b777b6246417c94ff.pts 04379243/expert_verified/points_label/388ea3f8ba27da8b777b6246417c94ff.seg 04379243
04379243/points/983cd9caf65adf1ddf6cfab91d65bb91.pts 04379243/expert_verified/points_label/983cd9caf65adf1ddf6cfab91d65bb91.seg 04379243
03001627/points/e65d2f0ed75a786a37b2bb75885cfc44.pts 03001627/expert_verified/points_label/e65d2f0ed75a786a37b2bb75885cfc44.seg 03001627
03624134/points/dce941899bcb752dfe474f09e3f3ac9a.pts 03624134/expert_verified/points_label/dce941899bcb752dfe474f09e3f3ac9a.seg 03624134
04379243/points/ea3bcd9e6c4205031964126395b17c2a.pts 04379243/expert_verified/points_label/ea3bcd9e6c4205031964126395b17c2a.seg 04379243
02691156/points/d13d131a649c5df38b96ae1a0a8b84ec.pts 02691156/expert_verified/points_label/d13d131a649c5df38b96ae1a0a8b84ec.seg 02691156
04379243/points/f917474a20558aa33bbab77a66bc3671.pts 04379243/expert_verified/points_label/f917474a20558aa33bbab77a66bc3671.seg 04379243
03001627/points/4a24652fbf2bed7e93583c67df8faf1.pts 03001627/expert_verified/points_label/4a24652fbf2bed7e93583c67df8faf1.seg 03001627
02691156/points/5dd2324cd6ebf52e293fdbda4e7beec9.pts 02691156/expert_verified/points_label/5dd2324cd6ebf52e293fdbda4e7beec9.seg 02691156
03642806/points/a59d3d87068d313c2656684d670220c2.pts 03642806/expert_verified/points_label/a59d3d87068d313c2656684d670220c2.seg 03642806
04379243/points/5354ecb0e3aa1da074a16879fb3ac81f.pts 04379243/expert_verified/points_label/5354ecb0e3aa1da074a16879fb3ac81f.seg 04379243
03642806/points/6c6a96e4486cc02cda66ecbb2c411f37.pts 03642806/expert_verified/points_label/6c6a96e4486cc02cda66ecbb2c411f37.seg 03642806
04225987/points/fd3627deb2476b0f1f942c57ac0e8959.pts 04225987/expert_verified/points_label/fd3627deb2476b0f1f942c57ac0e8959.seg 04225987
04379243/points/91bf48934d3b52ea36658c6705d0c08.pts 04379243/expert_verified/points_label/91bf48934d3b52ea36658c6705d0c08.seg 04379243
04379243/points/18be1556eb4da5af7ccf848ce05c84be.pts 04379243/expert_verified/points_label/18be1556eb4da5af7ccf848ce05c84be.seg 04379243
02958343/points/33211aabfefa14603b05c2ad25b4380f.pts 02958343/expert_verified/points_label/33211aabfefa14603b05c2ad25b4380f.seg 02958343
04379243/points/3243ddb2aa4d1659beb83c64f2162734.pts 04379243/expert_verified/points_label/3243ddb2aa4d1659beb83c64f2162734.seg 04379243
04379243/points/4ce90fe70faf4c3e255bc16374754e69.pts 04379243/expert_verified/points_label/4ce90fe70faf4c3e255bc16374754e69.seg 04379243
04379243/points/15be511a2433482aa192483aa282f8e5.pts 04379243/expert_verified/points_label/15be511a2433482aa192483aa282f8e5.seg 04379243
03624134/points/70b6b3ba6a27fd6f782db73f915dfbb8.pts 03624134/expert_verified/points_label/70b6b3ba6a27fd6f782db73f915dfbb8.seg 03624134
03001627/points/519d19f3adebd20aba49014d9a3afe99.pts 03001627/expert_verified/points_label/519d19f3adebd20aba49014d9a3afe99.seg 03001627
03467517/points/ca9720d793355dd693f0194265a9746c.pts 03467517/expert_verified/points_label/ca9720d793355dd693f0194265a9746c.seg 03467517
03001627/points/e19214cabca496a3f7b54e04c7238d7.pts 03001627/expert_verified/points_label/e19214cabca496a3f7b54e04c7238d7.seg 03001627
03001627/points/ea1bfe81b88395fcaa29e9f0529e8ef7.pts 03001627/expert_verified/points_label/ea1bfe81b88395fcaa29e9f0529e8ef7.seg 03001627
03001627/points/2b110b833111b38c420adf24e49f74c8.pts 03001627/expert_verified/points_label/2b110b833111b38c420adf24e49f74c8.seg 03001627
03001627/points/7b405c1d6d2dbea9f91663a74ccd2338.pts 03001627/expert_verified/points_label/7b405c1d6d2dbea9f91663a74ccd2338.seg 03001627
02691156/points/489d3e4cc3d790a0ca8607f540cc62ba.pts 02691156/expert_verified/points_label/489d3e4cc3d790a0ca8607f540cc62ba.seg 02691156
04379243/points/79eeee790ed5a5aac242632b2a8c3129.pts 04379243/expert_verified/points_label/79eeee790ed5a5aac242632b2a8c3129.seg 04379243
03624134/points/665bf5d30d342d64adee73efb2c043f8.pts 03624134/expert_verified/points_label/665bf5d30d342d64adee73efb2c043f8.seg 03624134
03467517/points/7f3f5c9953fb7e0a6cbec6f3d994a573.pts 03467517/expert_verified/points_label/7f3f5c9953fb7e0a6cbec6f3d994a573.seg 03467517
03001627/points/d2597d18fdc3594e1dc59d2adbe5297d.pts 03001627/expert_verified/points_label/d2597d18fdc3594e1dc59d2adbe5297d.seg 03001627
03001627/points/a9a1147eae9936f76f1e07a56c129dfc.pts 03001627/expert_verified/points_label/a9a1147eae9936f76f1e07a56c129dfc.seg 03001627
02691156/points/64cb683afd5e9e559db1d21b460eacef.pts 02691156/expert_verified/points_label/64cb683afd5e9e559db1d21b460eacef.seg 02691156
03624134/points/e0a78d771cfde145a5cea7e40e4d21ff.pts 03624134/expert_verified/points_label/e0a78d771cfde145a5cea7e40e4d21ff.seg 03624134
02691156/points/e59c4f290d8585a862b600da24e0965.pts 02691156/expert_verified/points_label/e59c4f290d8585a862b600da24e0965.seg 02691156
04379243/points/523ac3575244c7f3a130bbab7337a0cf.pts 04379243/expert_verified/points_label/523ac3575244c7f3a130bbab7337a0cf.seg 04379243
03001627/points/96e83c79e8d76d4519fb4103277a6b93.pts 03001627/expert_verified/points_label/96e83c79e8d76d4519fb4103277a6b93.seg 03001627
04379243/points/a2781622b5941ff2a886fe6408aa7382.pts 04379243/expert_verified/points_label/a2781622b5941ff2a886fe6408aa7382.seg 04379243
04379243/points/5d24567426a614ecfd726e98b98fb36f.pts 04379243/expert_verified/points_label/5d24567426a614ecfd726e98b98fb36f.seg 04379243
03001627/points/a5a2d09e5384237869513d0907f19c8f.pts 03001627/expert_verified/points_label/a5a2d09e5384237869513d0907f19c8f.seg 03001627
02691156/points/e02485f093835f45c1b64d86df61366a.pts 02691156/expert_verified/points_label/e02485f093835f45c1b64d86df61366a.seg 02691156
04379243/points/58f8fd169c9578e62f81cb887dc35578.pts 04379243/expert_verified/points_label/58f8fd169c9578e62f81cb887dc35578.seg 04379243
04379243/points/c755eeaa4a588fcba9126dd5adc92c1e.pts 04379243/expert_verified/points_label/c755eeaa4a588fcba9126dd5adc92c1e.seg 04379243
03001627/points/704179dd47a2282e676de9b6e111da8b.pts 03001627/expert_verified/points_label/704179dd47a2282e676de9b6e111da8b.seg 03001627
03001627/points/9253f198c06794cdc7689830acac6e59.pts 03001627/expert_verified/points_label/9253f198c06794cdc7689830acac6e59.seg 03001627
04379243/points/2ba8eb5ec0a05694593ebeeedbff73b.pts 04379243/expert_verified/points_label/2ba8eb5ec0a05694593ebeeedbff73b.seg 04379243
03467517/points/133ebdf2ca7bf4b81d4e8021f58beea0.pts 03467517/expert_verified/points_label/133ebdf2ca7bf4b81d4e8021f58beea0.seg 03467517
03467517/points/ba6d3dcff42ea7bba32c4b8efb0131e.pts 03467517/expert_verified/points_label/ba6d3dcff42ea7bba32c4b8efb0131e.seg 03467517
03467517/points/222b705a80d75a4343b0b12983b9982.pts 03467517/expert_verified/points_label/222b705a80d75a4343b0b12983b9982.seg 03467517
04379243/points/47317755c82114d5c3bd24f986301745.pts 04379243/expert_verified/points_label/47317755c82114d5c3bd24f986301745.seg 04379243
04379243/points/175c0be26d0f2e916cb0bd372b0960ba.pts 04379243/expert_verified/points_label/175c0be26d0f2e916cb0bd372b0960ba.seg 04379243
03636649/points/19388898dd69dd9fddc8e6d1ec6242c3.pts 03636649/expert_verified/points_label/19388898dd69dd9fddc8e6d1ec6242c3.seg 03636649
04379243/points/3cec584145ee513d635418e95eea8a17.pts 04379243/expert_verified/points_label/3cec584145ee513d635418e95eea8a17.seg 04379243
03001627/points/3a5c8d46fdc6793b956abdbfba57903a.pts 03001627/expert_verified/points_label/3a5c8d46fdc6793b956abdbfba57903a.seg 03001627
03001627/points/3d32d89db2286377e63c6421b71f17c8.pts 03001627/expert_verified/points_label/3d32d89db2286377e63c6421b71f17c8.seg 03001627
03001627/points/47a45ce9fb219083411e8b42940aba04.pts 03001627/expert_verified/points_label/47a45ce9fb219083411e8b42940aba04.seg 03001627
03467517/points/214f6a08b78670de2cb522418d5742a0.pts 03467517/expert_verified/points_label/214f6a08b78670de2cb522418d5742a0.seg 03467517
04379243/points/1b4bc147baf68d4ff008d8a3590fb522.pts 04379243/expert_verified/points_label/1b4bc147baf68d4ff008d8a3590fb522.seg 04379243
03467517/points/83b2ecf5caced214e313875ff213ee10.pts 03467517/expert_verified/points_label/83b2ecf5caced214e313875ff213ee10.seg 03467517
02691156/points/57fe8ad460bcb4929a4a28ef635593ce.pts 02691156/expert_verified/points_label/57fe8ad460bcb4929a4a28ef635593ce.seg 02691156
03624134/points/e8a6915bd0bcf1bebaa284808a1567a8.pts 03624134/expert_verified/points_label/e8a6915bd0bcf1bebaa284808a1567a8.seg 03624134
03001627/points/1da29597f89c2b004b3c42e318f3affc.pts 03001627/expert_verified/points_label/1da29597f89c2b004b3c42e318f3affc.seg 03001627
04379243/points/2ef899e67eecef65190a91fd9a6f7d55.pts 04379243/expert_verified/points_label/2ef899e67eecef65190a91fd9a6f7d55.seg 04379243
04379243/points/811a7be3be14bd2b62103e4bff47b4cd.pts 04379243/expert_verified/points_label/811a7be3be14bd2b62103e4bff47b4cd.seg 04379243
03948459/points/592017db407391c68e7e947594effe19.pts 03948459/expert_verified/points_label/592017db407391c68e7e947594effe19.seg 03948459
03636649/points/eb311e6232cb7011bb5bd941c6665c21.pts 03636649/expert_verified/points_label/eb311e6232cb7011bb5bd941c6665c21.seg 03636649
02691156/points/caa7e70beee4543f42c20743f866e1a6.pts 02691156/expert_verified/points_label/caa7e70beee4543f42c20743f866e1a6.seg 02691156
03001627/points/3aaa59b19eebcb5f41552c6ecbda964b.pts 03001627/expert_verified/points_label/3aaa59b19eebcb5f41552c6ecbda964b.seg 03001627
03001627/points/a93aac9ad86008e69fc01fb65ca37d30.pts 03001627/expert_verified/points_label/a93aac9ad86008e69fc01fb65ca37d30.seg 03001627
03624134/points/ceeb38ab7929361e76ec14627bf6bbcb.pts 03624134/expert_verified/points_label/ceeb38ab7929361e76ec14627bf6bbcb.seg 03624134
03001627/points/93dc91115a9002e1663fcfd6703c85f3.pts 03001627/expert_verified/points_label/93dc91115a9002e1663fcfd6703c85f3.seg 03001627
04379243/points/b08310a1d75702eda09ce9c1262c7237.pts 04379243/expert_verified/points_label/b08310a1d75702eda09ce9c1262c7237.seg 04379243
03797390/points/e9bd4ee553eb35c1d5ccc40b510e4bd.pts 03797390/expert_verified/points_label/e9bd4ee553eb35c1d5ccc40b510e4bd.seg 03797390
03001627/points/bdd57499bf64fab6bf80985a99195eb8.pts 03001627/expert_verified/points_label/bdd57499bf64fab6bf80985a99195eb8.seg 03001627
04379243/points/48af84a5600ad5bc19fb4103277a6b93.pts 04379243/expert_verified/points_label/48af84a5600ad5bc19fb4103277a6b93.seg 04379243
03001627/points/738395f54b301d80b1f5d603f931c1aa.pts 03001627/expert_verified/points_label/738395f54b301d80b1f5d603f931c1aa.seg 03001627
03790512/points/6819949f5625ca12d0f568c31c1cd62a.pts 03790512/expert_verified/points_label/6819949f5625ca12d0f568c31c1cd62a.seg 03790512
03467517/points/70d9a5d0330abd9df4b498e11fb60a4b.pts 03467517/expert_verified/points_label/70d9a5d0330abd9df4b498e11fb60a4b.seg 03467517
02958343/points/174f1a421f652029d577c0ac53e96823.pts 02958343/expert_verified/points_label/174f1a421f652029d577c0ac53e96823.seg 02958343
03001627/points/d764960666572084b1ea4e06e88051f3.pts 03001627/expert_verified/points_label/d764960666572084b1ea4e06e88051f3.seg 03001627
02691156/points/ba662ec78231c493252b4f9439ef95a6.pts 02691156/expert_verified/points_label/ba662ec78231c493252b4f9439ef95a6.seg 02691156
03636649/points/8a9f2e5b726ea37f60ad823977adaa23.pts 03636649/expert_verified/points_label/8a9f2e5b726ea37f60ad823977adaa23.seg 03636649
04379243/points/80af0f92ecf69f69f5ff054d67d5fe35.pts 04379243/expert_verified/points_label/80af0f92ecf69f69f5ff054d67d5fe35.seg 04379243
04379243/points/ce4e075487aa05ecdcfcef693e7ec696.pts 04379243/expert_verified/points_label/ce4e075487aa05ecdcfcef693e7ec696.seg 04379243
03001627/points/564f5f96bc718194166420d06689fcf.pts 03001627/expert_verified/points_label/564f5f96bc718194166420d06689fcf.seg 03001627
03636649/points/88d29e1350eda810c066b9622c005c53.pts 03636649/expert_verified/points_label/88d29e1350eda810c066b9622c005c53.seg 03636649
04379243/points/346db24c1279e8d273fdbe4b39ff4036.pts 04379243/expert_verified/points_label/346db24c1279e8d273fdbe4b39ff4036.seg 04379243
04379243/points/7062f5b229674ab7b0b54dd2cf2a35d4.pts 04379243/expert_verified/points_label/7062f5b229674ab7b0b54dd2cf2a35d4.seg 04379243
03636649/points/923097cec128ae77469cbaa3d6420fb4.pts 03636649/expert_verified/points_label/923097cec128ae77469cbaa3d6420fb4.seg 03636649
04379243/points/3fb5033b5ddaaf365f7afad12924b3b5.pts 04379243/expert_verified/points_label/3fb5033b5ddaaf365f7afad12924b3b5.seg 04379243
03636649/points/32e9d8a4b5a141a2615efc34c3b36ef0.pts 03636649/expert_verified/points_label/32e9d8a4b5a141a2615efc34c3b36ef0.seg 03636649
02691156/points/997cb29f544d6f2726360e1e29a956c7.pts 02691156/expert_verified/points_label/997cb29f544d6f2726360e1e29a956c7.seg 02691156
04379243/points/7df9115b511668bdde98d10ab5975b59.pts 04379243/expert_verified/points_label/7df9115b511668bdde98d10ab5975b59.seg 04379243
03636649/points/5580b95ab8e7806c6c5b8009db95f66f.pts 03636649/expert_verified/points_label/5580b95ab8e7806c6c5b8009db95f66f.seg 03636649
04379243/points/6862bebc1f59a5caac7bed72580dc30f.pts 04379243/expert_verified/points_label/6862bebc1f59a5caac7bed72580dc30f.seg 04379243
02691156/points/56ba815f883279b462b600da24e0965.pts 02691156/expert_verified/points_label/56ba815f883279b462b600da24e0965.seg 02691156
03797390/points/5c48d471200d2bf16e8a121e6886e18d.pts 03797390/expert_verified/points_label/5c48d471200d2bf16e8a121e6886e18d.seg 03797390
04379243/points/b48d04600e7cf2bebeedb4c8fd29e2d1.pts 04379243/expert_verified/points_label/b48d04600e7cf2bebeedb4c8fd29e2d1.seg 04379243
02958343/points/323c9dc2a8911e146f2f07de403e98d8.pts 02958343/expert_verified/points_label/323c9dc2a8911e146f2f07de403e98d8.seg 02958343
04225987/points/d3ff56062272f3e6346e65609be6d72f.pts 04225987/expert_verified/points_label/d3ff56062272f3e6346e65609be6d72f.seg 04225987
03001627/points/af28dbdce6ed8cea19fb4103277a6b93.pts 03001627/expert_verified/points_label/af28dbdce6ed8cea19fb4103277a6b93.seg 03001627
02958343/points/dfa6c32dec07727ee9d8921ebe6d5b8e.pts 02958343/expert_verified/points_label/dfa6c32dec07727ee9d8921ebe6d5b8e.seg 02958343
03001627/points/c2b898dd5601454d626d7e3d07da8352.pts 03001627/expert_verified/points_label/c2b898dd5601454d626d7e3d07da8352.seg 03001627
04379243/points/a7ef45d86ae5b496a97f238e46bc2221.pts 04379243/expert_verified/points_label/a7ef45d86ae5b496a97f238e46bc2221.seg 04379243
04379243/points/1bd138c3e54a75d32f38c0d2792fb5e.pts 04379243/expert_verified/points_label/1bd138c3e54a75d32f38c0d2792fb5e.seg 04379243
02958343/points/cd67376cac9f989151008e496c6cfd2e.pts 02958343/expert_verified/points_label/cd67376cac9f989151008e496c6cfd2e.seg 02958343
03948459/points/af9eaed1d9574387ab2c2809513f396e.pts 03948459/expert_verified/points_label/af9eaed1d9574387ab2c2809513f396e.seg 03948459
04379243/points/c418195771c7625945821c000807c3b1.pts 04379243/expert_verified/points_label/c418195771c7625945821c000807c3b1.seg 04379243
04379243/points/88b227c5fb3906ce47c638c0eee4a2b3.pts 04379243/expert_verified/points_label/88b227c5fb3906ce47c638c0eee4a2b3.seg 04379243
03467517/points/81bd0c7a35a147988cc3ae4061da3bb0.pts 03467517/expert_verified/points_label/81bd0c7a35a147988cc3ae4061da3bb0.seg 03467517
04379243/points/5292f2930f188e0a7ff6ace05b36a5.pts 04379243/expert_verified/points_label/5292f2930f188e0a7ff6ace05b36a5.seg 04379243
03636649/points/5f0a23ce527d0be52f38c0d2792fb5e.pts 03636649/expert_verified/points_label/5f0a23ce527d0be52f38c0d2792fb5e.seg 03636649
03636649/points/98cdb45ca9925feb194eb328dc97c7e2.pts 03636649/expert_verified/points_label/98cdb45ca9925feb194eb328dc97c7e2.seg 03636649
03790512/points/47054c1839830834a88e8cb97b773125.pts 03790512/expert_verified/points_label/47054c1839830834a88e8cb97b773125.seg 03790512
03001627/points/b058cc77e628ac01c433ba3e0e025e8c.pts 03001627/expert_verified/points_label/b058cc77e628ac01c433ba3e0e025e8c.seg 03001627
04225987/points/f74a5dfc0094e2d5561dce3fe08634b7.pts 04225987/expert_verified/points_label/f74a5dfc0094e2d5561dce3fe08634b7.seg 04225987
02958343/points/e20b8a9c388eeb012c8b6ee41d7d5d62.pts 02958343/expert_verified/points_label/e20b8a9c388eeb012c8b6ee41d7d5d62.seg 02958343
02958343/points/7203130a35ab20a4b1bb46d2556ba67d.pts 02958343/expert_verified/points_label/7203130a35ab20a4b1bb46d2556ba67d.seg 02958343
03261776/points/2c6f04001afcce7ded85c3dc02bada79.pts 03261776/expert_verified/points_label/2c6f04001afcce7ded85c3dc02bada79.seg 03261776
03001627/points/951fb0d7ad8ab2bec5b5bea66ef4576d.pts 03001627/expert_verified/points_label/951fb0d7ad8ab2bec5b5bea66ef4576d.seg 03001627
02691156/points/54e926e12382808b66cf1b4a8fc3914e.pts 02691156/expert_verified/points_label/54e926e12382808b66cf1b4a8fc3914e.seg 02691156
03001627/points/4c513ea0804fc008c8687ff9b0b4e4ac.pts 03001627/expert_verified/points_label/4c513ea0804fc008c8687ff9b0b4e4ac.seg 03001627
03001627/points/748957972cae6b03c56be62b05937331.pts 03001627/expert_verified/points_label/748957972cae6b03c56be62b05937331.seg 03001627
03001627/points/cc2639f8c584001a922dfe32810651d0.pts 03001627/expert_verified/points_label/cc2639f8c584001a922dfe32810651d0.seg 03001627
04379243/points/d2f811bc37858425a63ceecddc308b25.pts 04379243/expert_verified/points_label/d2f811bc37858425a63ceecddc308b25.seg 04379243
03001627/points/d48dac046436a29ec3bd24f986301745.pts 03001627/expert_verified/points_label/d48dac046436a29ec3bd24f986301745.seg 03001627
03001627/points/30fafef5c734f926781ba0fdb47276df.pts 03001627/expert_verified/points_label/30fafef5c734f926781ba0fdb47276df.seg 03001627
03001627/points/7293291b3fe8233fdef1c01cbd4ae0c.pts 03001627/expert_verified/points_label/7293291b3fe8233fdef1c01cbd4ae0c.seg 03001627
03636649/points/3deedc86a83bbf23f647dc544bb0ab61.pts 03636649/expert_verified/points_label/3deedc86a83bbf23f647dc544bb0ab61.seg 03636649
03467517/points/bb4a5712da8f63330d758421dd01f45.pts 03467517/expert_verified/points_label/bb4a5712da8f63330d758421dd01f45.seg 03467517
03636649/points/39af776c1435a3374b59758e9336ca87.pts 03636649/expert_verified/points_label/39af776c1435a3374b59758e9336ca87.seg 03636649
04379243/points/ef9f3af9b8453613febad4f49b26ec52.pts 04379243/expert_verified/points_label/ef9f3af9b8453613febad4f49b26ec52.seg 04379243
02691156/points/29192f8c96264e3435fc197bbabcd5bd.pts 02691156/expert_verified/points_label/29192f8c96264e3435fc197bbabcd5bd.seg 02691156
02691156/points/75d162523d703917b87697d3904b168b.pts 02691156/expert_verified/points_label/75d162523d703917b87697d3904b168b.seg 02691156
04379243/points/3c04f4e0d183976a7e7cb173e141227.pts 04379243/expert_verified/points_label/3c04f4e0d183976a7e7cb173e141227.seg 04379243
03790512/points/80011e85cd42668ad373c34785838ee4.pts 03790512/expert_verified/points_label/80011e85cd42668ad373c34785838ee4.seg 03790512
04379243/points/994e524d70043c3496e349c87c588bf2.pts 04379243/expert_verified/points_label/994e524d70043c3496e349c87c588bf2.seg 04379243
02691156/points/b1f08c51a098c43696d224195a988f09.pts 02691156/expert_verified/points_label/b1f08c51a098c43696d224195a988f09.seg 02691156
04379243/points/cb31b6293506eb639a3528690d225ee1.pts 04379243/expert_verified/points_label/cb31b6293506eb639a3528690d225ee1.seg 04379243
02691156/points/d70d648947c65b1eca8607f540cc62ba.pts 02691156/expert_verified/points_label/d70d648947c65b1eca8607f540cc62ba.seg 02691156
03636649/points/7bebdd742342ba93febad4f49b26ec52.pts 03636649/expert_verified/points_label/7bebdd742342ba93febad4f49b26ec52.seg 03636649
02691156/points/2a2caad9e540dcc687bf26680c510802.pts 02691156/expert_verified/points_label/2a2caad9e540dcc687bf26680c510802.seg 02691156
03790512/points/73fd19410ce60b83d5dde04c96fd8146.pts 03790512/expert_verified/points_label/73fd19410ce60b83d5dde04c96fd8146.seg 03790512
04379243/points/ccb8c52ff9e7a01819fb4103277a6b93.pts 04379243/expert_verified/points_label/ccb8c52ff9e7a01819fb4103277a6b93.seg 04379243
03467517/points/cc9e9ef3e1326c5363e148e250c0340d.pts 03467517/expert_verified/points_label/cc9e9ef3e1326c5363e148e250c0340d.seg 03467517
03001627/points/d5360f2b0b0299c29b9f2eb77f5e247e.pts 03001627/expert_verified/points_label/d5360f2b0b0299c29b9f2eb77f5e247e.seg 03001627
02691156/points/6b69e4c1cceb6e0681fa1ee3c368532e.pts 02691156/expert_verified/points_label/6b69e4c1cceb6e0681fa1ee3c368532e.seg 02691156
02691156/points/3ae96a1e1bb488942296d88107d065f6.pts 02691156/expert_verified/points_label/3ae96a1e1bb488942296d88107d065f6.seg 02691156
04379243/points/5e4351c4525fae6d6fa63795f94c4d8c.pts 04379243/expert_verified/points_label/5e4351c4525fae6d6fa63795f94c4d8c.seg 04379243
04225987/points/5c55e6b6708f730d758f6def7204bd6b.pts 04225987/expert_verified/points_label/5c55e6b6708f730d758f6def7204bd6b.seg 04225987
03001627/points/a48e359faed3da88d3519c62a8100783.pts 03001627/expert_verified/points_label/a48e359faed3da88d3519c62a8100783.seg 03001627
03467517/points/a4170135b1055cb8982c503992eaf09.pts 03467517/expert_verified/points_label/a4170135b1055cb8982c503992eaf09.seg 03467517
02958343/points/b3f1ad55fa401c35e8c505ac322336cc.pts 02958343/expert_verified/points_label/b3f1ad55fa401c35e8c505ac322336cc.seg 02958343
02691156/points/c7c5bb658cafcc7c67711f7c205c5b63.pts 02691156/expert_verified/points_label/c7c5bb658cafcc7c67711f7c205c5b63.seg 02691156
02691156/points/914c308ac4a9156842c20743f866e1a6.pts 02691156/expert_verified/points_label/914c308ac4a9156842c20743f866e1a6.seg 02691156
04379243/points/23acbe1f91d445f91ca1c7e576bee6b9.pts 04379243/expert_verified/points_label/23acbe1f91d445f91ca1c7e576bee6b9.seg 04379243
04379243/points/8eb366f4f602219b490ad276cd2af3a4.pts 04379243/expert_verified/points_label/8eb366f4f602219b490ad276cd2af3a4.seg 04379243
03624134/points/508ca8fa00e0cbb3e168961dc7b88f65.pts 03624134/expert_verified/points_label/508ca8fa00e0cbb3e168961dc7b88f65.seg 03624134
04379243/points/be045fca16562f6764c85287e21825c4.pts 04379243/expert_verified/points_label/be045fca16562f6764c85287e21825c4.seg 04379243
03001627/points/70f57047512c2eb84104b1c5cb7f9280.pts 03001627/expert_verified/points_label/70f57047512c2eb84104b1c5cb7f9280.seg 03001627
03001627/points/975ea4be01c7488611bc8e8361bc5303.pts 03001627/expert_verified/points_label/975ea4be01c7488611bc8e8361bc5303.seg 03001627
04379243/points/3c7cf00cd78adaef4b3c42e318f3affc.pts 04379243/expert_verified/points_label/3c7cf00cd78adaef4b3c42e318f3affc.seg 04379243
02773838/points/220f08ff0c1d2a4542282fc88db7886b.pts 02773838/expert_verified/points_label/220f08ff0c1d2a4542282fc88db7886b.seg 02773838
03636649/points/e35c4fadbf8d0426c26e81144f3196d5.pts 03636649/expert_verified/points_label/e35c4fadbf8d0426c26e81144f3196d5.seg 03636649
03642806/points/93958423b98be8b538ff1b6d120c56aa.pts 03642806/expert_verified/points_label/93958423b98be8b538ff1b6d120c56aa.seg 03642806
04379243/points/cf24f0128755080569080f7eaa8f3e1d.pts 04379243/expert_verified/points_label/cf24f0128755080569080f7eaa8f3e1d.seg 04379243
04379243/points/f5cbbe04afdc4697562b835b63cfd09c.pts 04379243/expert_verified/points_label/f5cbbe04afdc4697562b835b63cfd09c.seg 04379243
04379243/points/7a7590d19cf8274dab610b0c94236463.pts 04379243/expert_verified/points_label/7a7590d19cf8274dab610b0c94236463.seg 04379243
03001627/points/bdfc3a43eccaac7e908cb3a44391b80.pts 03001627/expert_verified/points_label/bdfc3a43eccaac7e908cb3a44391b80.seg 03001627
03636649/points/90d70f0a6b1cf72d79f0be73913de469.pts 03636649/expert_verified/points_label/90d70f0a6b1cf72d79f0be73913de469.seg 03636649
03642806/points/17069b6604fc28bfa2f5beb253216d5b.pts 03642806/expert_verified/points_label/17069b6604fc28bfa2f5beb253216d5b.seg 03642806
04379243/points/3b0625a3d623a7decfbec6fc6446a041.pts 04379243/expert_verified/points_label/3b0625a3d623a7decfbec6fc6446a041.seg 04379243
04379243/points/9482c5f0a38a73c0fa16d3c3138134ae.pts 04379243/expert_verified/points_label/9482c5f0a38a73c0fa16d3c3138134ae.seg 04379243
04379243/points/ed73c41dcfe9170119cc3eaf35cd388f.pts 04379243/expert_verified/points_label/ed73c41dcfe9170119cc3eaf35cd388f.seg 04379243
04379243/points/1abed35643d34f60afed86cbd9fd5335.pts 04379243/expert_verified/points_label/1abed35643d34f60afed86cbd9fd5335.seg 04379243
03001627/points/98e1936d3f25389bc3c6a889ee0bd115.pts 03001627/expert_verified/points_label/98e1936d3f25389bc3c6a889ee0bd115.seg 03001627
03797390/points/ef24c302911bcde6ea6ff2182dd34668.pts 03797390/expert_verified/points_label/ef24c302911bcde6ea6ff2182dd34668.seg 03797390
02773838/points/22b7d6fa819d62aefc69b7db9c6d5ad9.pts 02773838/expert_verified/points_label/22b7d6fa819d62aefc69b7db9c6d5ad9.seg 02773838
03001627/points/19666f52289092a3394a3bbfc81460.pts 03001627/expert_verified/points_label/19666f52289092a3394a3bbfc81460.seg 03001627
03001627/points/49b38e22f104005ecbde89e0c48a01bf.pts 03001627/expert_verified/points_label/49b38e22f104005ecbde89e0c48a01bf.seg 03001627
04379243/points/de077e0bd6932baef12d7184a2ad3430.pts 04379243/expert_verified/points_label/de077e0bd6932baef12d7184a2ad3430.seg 04379243
03001627/points/fe99f16c2532cdd07ba99ad16fdc05cd.pts 03001627/expert_verified/points_label/fe99f16c2532cdd07ba99ad16fdc05cd.seg 03001627
03642806/points/a17cf326705a6443a09a37cf78d1b866.pts 03642806/expert_verified/points_label/a17cf326705a6443a09a37cf78d1b866.seg 03642806
04379243/points/890940359fdfa036569c11df1aea8ca4.pts 04379243/expert_verified/points_label/890940359fdfa036569c11df1aea8ca4.seg 04379243
03642806/points/7f75b94bd59d649958dd315c54df0c15.pts 03642806/expert_verified/points_label/7f75b94bd59d649958dd315c54df0c15.seg 03642806
04379243/points/d0ef9d431a16e70de6c5cd45aa112726.pts 04379243/expert_verified/points_label/d0ef9d431a16e70de6c5cd45aa112726.seg 04379243
03001627/points/2dc5055b8d900ec7db4b0ee93cf61ed1.pts 03001627/expert_verified/points_label/2dc5055b8d900ec7db4b0ee93cf61ed1.seg 03001627
03001627/points/9e6b834449ed2db86199d6fe090be061.pts 03001627/expert_verified/points_label/9e6b834449ed2db86199d6fe090be061.seg 03001627
04379243/points/9e3f1901ea14aca753315facdf531a34.pts 04379243/expert_verified/points_label/9e3f1901ea14aca753315facdf531a34.seg 04379243
03001627/points/c4ebef05a72fc4f39d62eb3fdc2d3f8a.pts 03001627/expert_verified/points_label/c4ebef05a72fc4f39d62eb3fdc2d3f8a.seg 03001627
03001627/points/428b77d0ffe6ab456e06155d245f15d6.pts 03001627/expert_verified/points_label/428b77d0ffe6ab456e06155d245f15d6.seg 03001627
04225987/points/591971ce679ca4b93ad38b993d9e745f.pts 04225987/expert_verified/points_label/591971ce679ca4b93ad38b993d9e745f.seg 04225987
03790512/points/bcabe20e46e5126ed5dde04c96fd8146.pts 03790512/expert_verified/points_label/bcabe20e46e5126ed5dde04c96fd8146.seg 03790512
04379243/points/3ed500a12dfa511ba6040757a0125a99.pts 04379243/expert_verified/points_label/3ed500a12dfa511ba6040757a0125a99.seg 04379243
04379243/points/1581d2682187764730bbd4cddd04c77b.pts 04379243/expert_verified/points_label/1581d2682187764730bbd4cddd04c77b.seg 04379243
02691156/points/bb7d526405e9347b8f6810e1a2b6aa04.pts 02691156/expert_verified/points_label/bb7d526405e9347b8f6810e1a2b6aa04.seg 02691156
02691156/points/fb9deec3a422b06b609e2d916fa0da27.pts 02691156/expert_verified/points_label/fb9deec3a422b06b609e2d916fa0da27.seg 02691156
03636649/points/5e6abfc7d93fa5f1dc0efee4b442070.pts 03636649/expert_verified/points_label/5e6abfc7d93fa5f1dc0efee4b442070.seg 03636649
03467517/points/2dbc73ad4ce7950163e148e250c0340d.pts 03467517/expert_verified/points_label/2dbc73ad4ce7950163e148e250c0340d.seg 03467517
02958343/points/eea7f5d02088d49dfdb3c05088c091ae.pts 02958343/expert_verified/points_label/eea7f5d02088d49dfdb3c05088c091ae.seg 02958343
04379243/points/83c24aad3914e61a73376642dd664bfd.pts 04379243/expert_verified/points_label/83c24aad3914e61a73376642dd664bfd.seg 04379243
04379243/points/51874066ba946c58aaf15b62af6b513f.pts 04379243/expert_verified/points_label/51874066ba946c58aaf15b62af6b513f.seg 04379243
03636649/points/5be8cdad3b218e373d39d8012919dd25.pts 03636649/expert_verified/points_label/5be8cdad3b218e373d39d8012919dd25.seg 03636649
03636649/points/49cd0dd4d1c008edbbc7a6acbd8f058b.pts 03636649/expert_verified/points_label/49cd0dd4d1c008edbbc7a6acbd8f058b.seg 03636649
03642806/points/d7e7e6651a23afc68ba4e518219eb66a.pts 03642806/expert_verified/points_label/d7e7e6651a23afc68ba4e518219eb66a.seg 03642806
02958343/points/6026684ab31d567328044fe9244db50a.pts 02958343/expert_verified/points_label/6026684ab31d567328044fe9244db50a.seg 02958343
04379243/points/c177762c0445d57ab20aa91e9e90c311.pts 04379243/expert_verified/points_label/c177762c0445d57ab20aa91e9e90c311.seg 04379243
02691156/points/7bad9d15c0f0d3c03554ccf8c30febe7.pts 02691156/expert_verified/points_label/7bad9d15c0f0d3c03554ccf8c30febe7.seg 02691156
03636649/points/dd818b0269b1aa15fcb8d8c6d4df8143.pts 03636649/expert_verified/points_label/dd818b0269b1aa15fcb8d8c6d4df8143.seg 03636649
03624134/points/c4851aee1af7d874cc34b900bb2492e.pts 03624134/expert_verified/points_label/c4851aee1af7d874cc34b900bb2492e.seg 03624134
03001627/points/e2ced471afce616454bfa32aa0766acb.pts 03001627/expert_verified/points_label/e2ced471afce616454bfa32aa0766acb.seg 03001627
03797390/points/896f1d494bac0ebcdec712af445786fe.pts 03797390/expert_verified/points_label/896f1d494bac0ebcdec712af445786fe.seg 03797390
04379243/points/481e00e4559705c616a2b5862518c93.pts 04379243/expert_verified/points_label/481e00e4559705c616a2b5862518c93.seg 04379243
04379243/points/2ca883ba6a9dc6f68985be89a0ee21a.pts 04379243/expert_verified/points_label/2ca883ba6a9dc6f68985be89a0ee21a.seg 04379243
04379243/points/ebc82e7df36f6e9a33963916b86d221f.pts 04379243/expert_verified/points_label/ebc82e7df36f6e9a33963916b86d221f.seg 04379243
03001627/points/cdea84a63ad8c44febad4f49b26ec52.pts 03001627/expert_verified/points_label/cdea84a63ad8c44febad4f49b26ec52.seg 03001627
03624134/points/c71280ea272fbfed4b7644126b1d71e0.pts 03624134/expert_verified/points_label/c71280ea272fbfed4b7644126b1d71e0.seg 03624134
02958343/points/974c3d82f8726f086b418c7d9fedcaa9.pts 02958343/expert_verified/points_label/974c3d82f8726f086b418c7d9fedcaa9.seg 02958343
02958343/points/4dbf4e0654d0c234e811106a82796d20.pts 02958343/expert_verified/points_label/4dbf4e0654d0c234e811106a82796d20.seg 02958343
03467517/points/de9ca0c3e32f907dcb61cf5d9c47c2c7.pts 03467517/expert_verified/points_label/de9ca0c3e32f907dcb61cf5d9c47c2c7.seg 03467517
02958343/points/9f4bbcf9f51fe1e42957c02bdefc95c8.pts 02958343/expert_verified/points_label/9f4bbcf9f51fe1e42957c02bdefc95c8.seg 02958343
03467517/points/173e4f1824f7b9fa93f0194265a9746c.pts 03467517/expert_verified/points_label/173e4f1824f7b9fa93f0194265a9746c.seg 03467517
03636649/points/b4f166440439171741657e31b569b105.pts 03636649/expert_verified/points_label/b4f166440439171741657e31b569b105.seg 03636649
03948459/points/d1ba405fef56efa0fa29682ba98e856d.pts 03948459/expert_verified/points_label/d1ba405fef56efa0fa29682ba98e856d.seg 03948459
03467517/points/a39dcefa599a76dd93f0194265a9746c.pts 03467517/expert_verified/points_label/a39dcefa599a76dd93f0194265a9746c.seg 03467517
02958343/points/e213d976734431773a3afd30f2e86bd7.pts 02958343/expert_verified/points_label/e213d976734431773a3afd30f2e86bd7.seg 02958343
04379243/points/b1335d826d7d60726e066e11deddab75.pts 04379243/expert_verified/points_label/b1335d826d7d60726e066e11deddab75.seg 04379243
04379243/points/e37262abd76852ac00ee852f6d8aa3c.pts 04379243/expert_verified/points_label/e37262abd76852ac00ee852f6d8aa3c.seg 04379243
03001627/points/5d346bdb7db27accf3588493d5c284.pts 03001627/expert_verified/points_label/5d346bdb7db27accf3588493d5c284.seg 03001627
04379243/points/198ff59a42a147eb8ac5948d70801389.pts 04379243/expert_verified/points_label/198ff59a42a147eb8ac5948d70801389.seg 04379243
03001627/points/b3fd987b330d0d2acda56795a6fbde1f.pts 03001627/expert_verified/points_label/b3fd987b330d0d2acda56795a6fbde1f.seg 03001627
02691156/points/1cb757280b862ae52c7575c9089791ff.pts 02691156/expert_verified/points_label/1cb757280b862ae52c7575c9089791ff.seg 02691156
03636649/points/4631e756666a8a208ca4aeb5e3b33af7.pts 03636649/expert_verified/points_label/4631e756666a8a208ca4aeb5e3b33af7.seg 03636649
04379243/points/b82c6769c98e877d24d29f1dedd03a57.pts 04379243/expert_verified/points_label/b82c6769c98e877d24d29f1dedd03a57.seg 04379243
03636649/points/2b194d6bed8daa82c0b2dda5ff15ea28.pts 03636649/expert_verified/points_label/2b194d6bed8daa82c0b2dda5ff15ea28.seg 03636649
03001627/points/7e6b4a7b4dd60c40cc8bd7a04c9659f1.pts 03001627/expert_verified/points_label/7e6b4a7b4dd60c40cc8bd7a04c9659f1.seg 03001627
03948459/points/d1cc54762432fd058a2c998c0df41abe.pts 03948459/expert_verified/points_label/d1cc54762432fd058a2c998c0df41abe.seg 03948459
04225987/points/776eaffd7cbe7bc6b9e8bdc9c4a49aa2.pts 04225987/expert_verified/points_label/776eaffd7cbe7bc6b9e8bdc9c4a49aa2.seg 04225987
04379243/points/6ce30b0327db26f340b4c5428883e585.pts 04379243/expert_verified/points_label/6ce30b0327db26f340b4c5428883e585.seg 04379243
04379243/points/c5230678204a1bb8dcfcef693e7ec696.pts 04379243/expert_verified/points_label/c5230678204a1bb8dcfcef693e7ec696.seg 04379243
02691156/points/563cef4df464ddb1e153dd90dac45a6d.pts 02691156/expert_verified/points_label/563cef4df464ddb1e153dd90dac45a6d.seg 02691156
02958343/points/42e6ce03b361102ab86e0633bb69faea.pts 02958343/expert_verified/points_label/42e6ce03b361102ab86e0633bb69faea.seg 02958343
03001627/points/26e8033e59a3adf6bb53a6a5f5051240.pts 03001627/expert_verified/points_label/26e8033e59a3adf6bb53a6a5f5051240.seg 03001627
04379243/points/731b983cb313634fd018082a1777a5f8.pts 04379243/expert_verified/points_label/731b983cb313634fd018082a1777a5f8.seg 04379243
02691156/points/10aa040f470500c6a66ef8df4909ded9.pts 02691156/expert_verified/points_label/10aa040f470500c6a66ef8df4909ded9.seg 02691156
03467517/points/bb895a87931f51c893f0194265a9746c.pts 03467517/expert_verified/points_label/bb895a87931f51c893f0194265a9746c.seg 03467517
03624134/points/a105080ce4564145aeb54153795ede63.pts 03624134/expert_verified/points_label/a105080ce4564145aeb54153795ede63.seg 03624134
04379243/points/c12147db9b29ef9ee0480c954dcd56d1.pts 04379243/expert_verified/points_label/c12147db9b29ef9ee0480c954dcd56d1.seg 04379243
04379243/points/21cdc417e398378e40f3ac0af6b7e700.pts 04379243/expert_verified/points_label/21cdc417e398378e40f3ac0af6b7e700.seg 04379243
04379243/points/b11e0feb428f61edf008d8a3590fb522.pts 04379243/expert_verified/points_label/b11e0feb428f61edf008d8a3590fb522.seg 04379243
04379243/points/2700f6693447c32d66c64744a4252d3.pts 04379243/expert_verified/points_label/2700f6693447c32d66c64744a4252d3.seg 04379243
03467517/points/b6d0cf333c7e013993f0194265a9746c.pts 03467517/expert_verified/points_label/b6d0cf333c7e013993f0194265a9746c.seg 03467517
03001627/points/ece627bd883d9bbfb0eb7e753c06942.pts 03001627/expert_verified/points_label/ece627bd883d9bbfb0eb7e753c06942.seg 03001627
03636649/points/26f0f37f0f2623c4a3fa46ae73c48b4.pts 03636649/expert_verified/points_label/26f0f37f0f2623c4a3fa46ae73c48b4.seg 03636649
04379243/points/8b07d458499d63f36d96c6cb347d6a90.pts 04379243/expert_verified/points_label/8b07d458499d63f36d96c6cb347d6a90.seg 04379243
04379243/points/eb363770ee36b0309a79b01b89f55c86.pts 04379243/expert_verified/points_label/eb363770ee36b0309a79b01b89f55c86.seg 04379243
04379243/points/ccf36a20b7ef3bd128071d61462a212d.pts 04379243/expert_verified/points_label/ccf36a20b7ef3bd128071d61462a212d.seg 04379243
03001627/points/cf24fc2d10f8da31283b00891f680579.pts 03001627/expert_verified/points_label/cf24fc2d10f8da31283b00891f680579.seg 03001627
02958343/points/8b4879617bd256391738f25e3015f92e.pts 02958343/expert_verified/points_label/8b4879617bd256391738f25e3015f92e.seg 02958343
03001627/points/55e1cde05a99f6c7d1d34366ca81fb3b.pts 03001627/expert_verified/points_label/55e1cde05a99f6c7d1d34366ca81fb3b.seg 03001627
03001627/points/6c25ec1178e9bab6e545858398955dd1.pts 03001627/expert_verified/points_label/6c25ec1178e9bab6e545858398955dd1.seg 03001627
03001627/points/862f70e73fa70c9b1a719e2a845bdada.pts 03001627/expert_verified/points_label/862f70e73fa70c9b1a719e2a845bdada.seg 03001627
04379243/points/fa5dce1043f44c06ab88e3acae6e8bc5.pts 04379243/expert_verified/points_label/fa5dce1043f44c06ab88e3acae6e8bc5.seg 04379243
03467517/points/6f9d1467eb39f8abfae47f572c17b9cb.pts 03467517/expert_verified/points_label/6f9d1467eb39f8abfae47f572c17b9cb.seg 03467517
04379243/points/60ef2830979fd08ec72d4ae978770752.pts 04379243/expert_verified/points_label/60ef2830979fd08ec72d4ae978770752.seg 04379243
03624134/points/d69e028056c9291069654277b747a908.pts 03624134/expert_verified/points_label/d69e028056c9291069654277b747a908.seg 03624134
04379243/points/8e7c894039ae2cfe99e8bf807e902261.pts 04379243/expert_verified/points_label/8e7c894039ae2cfe99e8bf807e902261.seg 04379243
02958343/points/4e2ca20091449636599389919f6522e6.pts 02958343/expert_verified/points_label/4e2ca20091449636599389919f6522e6.seg 02958343
04379243/points/b10d84b3a04085b17618b16b281bdf56.pts 04379243/expert_verified/points_label/b10d84b3a04085b17618b16b281bdf56.seg 04379243
03948459/points/d13986cc2403a2034b4b3d2a28039009.pts 03948459/expert_verified/points_label/d13986cc2403a2034b4b3d2a28039009.seg 03948459
03636649/points/d97a86cea650ae0baf5b49ad7809302.pts 03636649/expert_verified/points_label/d97a86cea650ae0baf5b49ad7809302.seg 03636649
03797390/points/ca198dc3f7dc0cacec6338171298c66b.pts 03797390/expert_verified/points_label/ca198dc3f7dc0cacec6338171298c66b.seg 03797390
03636649/points/3f968096c74ee3a3b04a2e6a78ff6c49.pts 03636649/expert_verified/points_label/3f968096c74ee3a3b04a2e6a78ff6c49.seg 03636649
02691156/points/4d6ec762d1583ded46555ee25941a22e.pts 02691156/expert_verified/points_label/4d6ec762d1583ded46555ee25941a22e.seg 02691156
03467517/points/401ff6021157dee293f0194265a9746c.pts 03467517/expert_verified/points_label/401ff6021157dee293f0194265a9746c.seg 03467517
04379243/points/c1d808c75cc5e7ab4da5bb83ec125010.pts 04379243/expert_verified/points_label/c1d808c75cc5e7ab4da5bb83ec125010.seg 04379243
03790512/points/3d37db1d974499287395d58407f193ba.pts 03790512/expert_verified/points_label/3d37db1d974499287395d58407f193ba.seg 03790512
03624134/points/65892e0f7f93129d14cb807a24b99e1e.pts 03624134/expert_verified/points_label/65892e0f7f93129d14cb807a24b99e1e.seg 03624134
03624134/points/854e7bb73afaff7591ea3afb2749822f.pts 03624134/expert_verified/points_label/854e7bb73afaff7591ea3afb2749822f.seg 03624134
03624134/points/7b492f2baa1dc710cc34b900bb2492e.pts 03624134/expert_verified/points_label/7b492f2baa1dc710cc34b900bb2492e.seg 03624134
03636649/points/b4b15a84b9067f94a75d03186a0409e2.pts 03636649/expert_verified/points_label/b4b15a84b9067f94a75d03186a0409e2.seg 03636649
03636649/points/9db87bf898efd448cbde89e0c48a01bf.pts 03636649/expert_verified/points_label/9db87bf898efd448cbde89e0c48a01bf.seg 03636649
02954340/points/9bd54e0123d3cd70a52821bf1aa3b19a.pts 02954340/expert_verified/points_label/9bd54e0123d3cd70a52821bf1aa3b19a.seg 02954340
================================================
FILE: dgcnn/tensorflow/part_seg/train_multi_gpu.py
================================================
import argparse
import subprocess
import tensorflow as tf
import numpy as np
from datetime import datetime
import json
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.dirname(BASE_DIR))
import provider
import part_seg_model as model
TOWER_NAME = 'tower'
# DEFAULT SETTINGS
parser = argparse.ArgumentParser()
parser.add_argument('--num_gpu', type=int, default=2, help='The number of GPUs to use [default: 2]')
parser.add_argument('--batch', type=int, default=16, help='Batch Size per GPU during training [default: 32]')
parser.add_argument('--epoch', type=int, default=201, help='Epoch to run [default: 50]')
parser.add_argument('--point_num', type=int, default=2048, help='Point Number [256/512/1024/2048]')
parser.add_argument('--output_dir', type=str, default='train_results', help='Directory that stores all training logs and trained models')
parser.add_argument('--wd', type=float, default=0, help='Weight Decay [Default: 0.0]')
FLAGS = parser.parse_args()
hdf5_data_dir = os.path.join(BASE_DIR, './hdf5_data')
# MAIN SCRIPT
point_num = FLAGS.point_num
batch_size = FLAGS.batch
output_dir = FLAGS.output_dir
if not os.path.exists(output_dir):
os.mkdir(output_dir)
# color_map_file = os.path.join(hdf5_data_dir, 'part_color_mapping.json')
# color_map = json.load(open(color_map_file, 'r'))
all_obj_cats_file = os.path.join(hdf5_data_dir, 'all_object_categories.txt')
fin = open(all_obj_cats_file, 'r')
lines = [line.rstrip() for line in fin.readlines()]
all_obj_cats = [(line.split()[0], line.split()[1]) for line in lines]
fin.close()
all_cats = json.load(open(os.path.join(hdf5_data_dir, 'overallid_to_catid_partid.json'), 'r'))
NUM_CATEGORIES = 16
NUM_PART_CATS = len(all_cats)
print('#### Batch Size Per GPU: {0}'.format(batch_size))
print('#### Point Number: {0}'.format(point_num))
print('#### Using GPUs: {0}'.format(FLAGS.num_gpu))
DECAY_STEP = 16881 * 20
DECAY_RATE = 0.5
LEARNING_RATE_CLIP = 1e-5
BN_INIT_DECAY = 0.5
BN_DECAY_DECAY_RATE = 0.5
BN_DECAY_DECAY_STEP = float(DECAY_STEP * 2)
BN_DECAY_CLIP = 0.99
BASE_LEARNING_RATE = 0.003
MOMENTUM = 0.9
TRAINING_EPOCHES = FLAGS.epoch
print('### Training epoch: {0}'.format(TRAINING_EPOCHES))
TRAINING_FILE_LIST = os.path.join(hdf5_data_dir, 'train_hdf5_file_list.txt')
TESTING_FILE_LIST = os.path.join(hdf5_data_dir, 'val_hdf5_file_list.txt')
MODEL_STORAGE_PATH = os.path.join(output_dir, 'trained_models')
if not os.path.exists(MODEL_STORAGE_PATH):
os.mkdir(MODEL_STORAGE_PATH)
LOG_STORAGE_PATH = os.path.join(output_dir, 'logs')
if not os.path.exists(LOG_STORAGE_PATH):
os.mkdir(LOG_STORAGE_PATH)
SUMMARIES_FOLDER = os.path.join(output_dir, 'summaries')
if not os.path.exists(SUMMARIES_FOLDER):
os.mkdir(SUMMARIES_FOLDER)
def printout(flog, data):
print(data)
flog.write(data + '\n')
def convert_label_to_one_hot(labels):
label_one_hot = np.zeros((labels.shape[0], NUM_CATEGORIES))
for idx in range(labels.shape[0]):
label_one_hot[idx, labels[idx]] = 1
return label_one_hot
def average_gradients(tower_grads):
"""Calculate average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been
averaged across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
if g is None:
continue
expanded_g = tf.expand_dims(g, 0)
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(grads, 0)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
def train():
with tf.Graph().as_default(), tf.device('/cpu:0'):
batch = tf.Variable(0, trainable=False)
learning_rate = tf.train.exponential_decay(
BASE_LEARNING_RATE, # base learning rate
batch * batch_size, # global_var indicating the number of steps
DECAY_STEP, # step size
DECAY_RATE, # decay rate
staircase=True # Stair-case or continuous decreasing
)
learning_rate = tf.maximum(learning_rate, LEARNING_RATE_CLIP)
bn_momentum = tf.train.exponential_decay(
BN_INIT_DECAY,
batch*batch_size,
BN_DECAY_DECAY_STEP,
BN_DECAY_DECAY_RATE,
staircase=True)
bn_decay = tf.minimum(BN_DECAY_CLIP, 1 - bn_momentum)
lr_op = tf.summary.scalar('learning_rate', learning_rate)
batch_op = tf.summary.scalar('batch_number', batch)
bn_decay_op = tf.summary.scalar('bn_decay', bn_decay)
trainer = tf.train.AdamOptimizer(learning_rate)
# store tensors for different gpus
tower_grads = []
pointclouds_phs = []
input_label_phs = []
seg_phs =[]
is_training_phs =[]
with tf.variable_scope(tf.get_variable_scope()):
for i in xrange(FLAGS.num_gpu):
with tf.device('/gpu:%d' % i):
with tf.name_scope('%s_%d' % (TOWER_NAME, i)) as scope:
pointclouds_phs.append(tf.placeholder(tf.float32, shape=(batch_size, point_num, 3))) # for points
input_label_phs.append(tf.placeholder(tf.float32, shape=(batch_size, NUM_CATEGORIES))) # for one-hot category label
seg_phs.append(tf.placeholder(tf.int32, shape=(batch_size, point_num))) # for part labels
is_training_phs.append(tf.placeholder(tf.bool, shape=()))
seg_pred = model.get_model(pointclouds_phs[-1], input_label_phs[-1], \
is_training=is_training_phs[-1], bn_decay=bn_decay, cat_num=NUM_CATEGORIES, \
part_num=NUM_PART_CATS, batch_size=batch_size, num_point=point_num, weight_decay=FLAGS.wd)
loss, per_instance_seg_loss, per_instance_seg_pred_res \
= model.get_loss(seg_pred, seg_phs[-1])
total_training_loss_ph = tf.placeholder(tf.float32, shape=())
total_testing_loss_ph = tf.placeholder(tf.float32, shape=())
seg_training_acc_ph = tf.placeholder(tf.float32, shape=())
seg_testing_acc_ph = tf.placeholder(tf.float32, shape=())
seg_testing_acc_avg_cat_ph = tf.placeholder(tf.float32, shape=())
total_train_loss_sum_op = tf.summary.scalar('total_training_loss', total_training_loss_ph)
total_test_loss_sum_op = tf.summary.scalar('total_testing_loss', total_testing_loss_ph)
seg_train_acc_sum_op = tf.summary.scalar('seg_training_acc', seg_training_acc_ph)
seg_test_acc_sum_op = tf.summary.scalar('seg_testing_acc', seg_testing_acc_ph)
seg_test_acc_avg_cat_op = tf.summary.scalar('seg_testing_acc_avg_cat', seg_testing_acc_avg_cat_ph)
tf.get_variable_scope().reuse_variables()
grads = trainer.compute_gradients(loss)
tower_grads.append(grads)
grads = average_gradients(tower_grads)
train_op = trainer.apply_gradients(grads, global_step=batch)
saver = tf.train.Saver(tf.global_variables(), sharded=True, max_to_keep=20)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
sess = tf.Session(config=config)
init = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
sess.run(init)
train_writer = tf.summary.FileWriter(SUMMARIES_FOLDER + '/train', sess.graph)
test_writer = tf.summary.FileWriter(SUMMARIES_FOLDER + '/test')
train_file_list = provider.getDataFiles(TRAINING_FILE_LIST)
num_train_file = len(train_file_list)
test_file_list = provider.getDataFiles(TESTING_FILE_LIST)
num_test_file = len(test_file_list)
fcmd = open(os.path.join(LOG_STORAGE_PATH, 'cmd.txt'), 'w')
fcmd.write(str(FLAGS))
fcmd.close()
# write logs to the disk
flog = open(os.path.join(LOG_STORAGE_PATH, 'log.txt'), 'w')
def train_one_epoch(train_file_idx, epoch_num):
is_training = True
for i in range(num_train_file):
cur_train_filename = os.path.join(hdf5_data_dir, train_file_list[train_file_idx[i]])
printout(flog, 'Loading train file ' + cur_train_filename)
cur_data, cur_labels, cur_seg = provider.load_h5_data_label_seg(cur_train_filename)
cur_data, cur_labels, order = provider.shuffle_data(cur_data, np.squeeze(cur_labels))
cur_seg = cur_seg[order, ...]
cur_labels_one_hot = convert_label_to_one_hot(cur_labels)
num_data = len(cur_labels)
num_batch = num_data // (FLAGS.num_gpu * batch_size) # For all working gpus
total_loss = 0.0
total_seg_acc = 0.0
for j in range(num_batch):
begidx_0 = j * batch_size
endidx_0 = (j + 1) * batch_size
begidx_1 = (j + 1) * batch_size
endidx_1 = (j + 2) * batch_size
feed_dict = {
# For the first gpu
pointclouds_phs[0]: cur_data[begidx_0: endidx_0, ...],
input_label_phs[0]: cur_labels_one_hot[begidx_0: endidx_0, ...],
seg_phs[0]: cur_seg[begidx_0: endidx_0, ...],
is_training_phs[0]: is_training,
# For the second gpu
pointclouds_phs[1]: cur_data[begidx_1: endidx_1, ...],
input_label_phs[1]: cur_labels_one_hot[begidx_1: endidx_1, ...],
seg_phs[1]: cur_seg[begidx_1: endidx_1, ...],
is_training_phs[1]: is_training,
}
# train_op is for both gpus, and the others are for gpu_1
_, loss_val, per_instance_seg_loss_val, seg_pred_val, pred_seg_res \
= sess.run([train_op, loss, per_instance_seg_loss, seg_pred, per_instance_seg_pred_res], \
feed_dict=feed_dict)
per_instance_part_acc = np.mean(pred_seg_res == cur_seg[begidx_1: endidx_1, ...], axis=1)
average_part_acc = np.mean(per_instance_part_acc)
total_loss += loss_val
total_seg_acc += average_part_acc
total_loss = total_loss * 1.0 / num_batch
total_seg_acc = total_seg_acc * 1.0 / num_batch
lr_sum, bn_decay_sum, batch_sum, train_loss_sum, train_seg_acc_sum = sess.run(\
[lr_op, bn_decay_op, batch_op, total_train_loss_sum_op, seg_train_acc_sum_op], \
feed_dict={total_training_loss_ph: total_loss, seg_training_acc_ph: total_seg_acc})
train_writer.add_summary(train_loss_sum, i + epoch_num * num_train_file)
train_writer.add_summary(lr_sum, i + epoch_num * num_train_file)
train_writer.add_summary(bn_decay_sum, i + epoch_num * num_train_file)
train_writer.add_summary(train_seg_acc_sum, i + epoch_num * num_train_file)
train_writer.add_summary(batch_sum, i + epoch_num * num_train_file)
printout(flog, '\tTraining Total Mean_loss: %f' % total_loss)
printout(flog, '\t\tTraining Seg Accuracy: %f' % total_seg_acc)
def eval_one_epoch(epoch_num):
is_training = False
total_loss = 0.0
total_seg_acc = 0.0
total_seen = 0
total_seg_acc_per_cat = np.zeros((NUM_CATEGORIES)).astype(np.float32)
total_seen_per_cat = np.zeros((NUM_CATEGORIES)).astype(np.int32)
for i in range(num_test_file):
cur_test_filename = os.path.join(hdf5_data_dir, test_file_list[i])
printout(flog, 'Loading test file ' + cur_test_filename)
cur_data, cur_labels, cur_seg = provider.load_h5_data_label_seg(cur_test_filename)
cur_labels = np.squeeze(cur_labels)
cur_labels_one_hot = convert_label_to_one_hot(cur_labels)
num_data = len(cur_labels)
num_batch = num_data // batch_size
# Run on gpu_1, since the tensors used for evaluation are defined on gpu_1
for j in range(num_batch):
begidx = j * batch_size
endidx = (j + 1) * batch_size
feed_dict = {
pointclouds_phs[1]: cur_data[begidx: endidx, ...],
input_label_phs[1]: cur_labels_one_hot[begidx: endidx, ...],
seg_phs[1]: cur_seg[begidx: endidx, ...],
is_training_phs[1]: is_training}
loss_val, per_instance_seg_loss_val, seg_pred_val, pred_seg_res \
= sess.run([loss, per_instance_seg_loss, seg_pred, per_instance_seg_pred_res], \
feed_dict=feed_dict)
per_instance_part_acc = np.mean(pred_seg_res == cur_seg[begidx: endidx, ...], axis=1)
average_part_acc = np.mean(per_instance_part_acc)
total_seen += 1
total_loss += loss_val
total_seg_acc += average_part_acc
for shape_idx in range(begidx, endidx):
total_seen_per_cat[cur_labels[shape_idx]] += 1
total_seg_acc_per_cat[cur_labels[shape_idx]] += per_instance_part_acc[shape_idx - begidx]
total_loss = total_loss * 1.0 / total_seen
total_seg_acc = total_seg_acc * 1.0 / total_seen
test_loss_sum, test_seg_acc_sum = sess.run(\
[total_test_loss_sum_op, seg_test_acc_sum_op], \
feed_dict={total_testing_loss_ph: total_loss, \
seg_testing_acc_ph: total_seg_acc})
test_writer.add_summary(test_loss_sum, (epoch_num+1) * num_train_file-1)
test_writer.add_summary(test_seg_acc_sum, (epoch_num+1) * num_train_file-1)
printout(flog, '\tTesting Total Mean_loss: %f' % total_loss)
printout(flog, '\t\tTesting Seg Accuracy: %f' % total_seg_acc)
for cat_idx in range(NUM_CATEGORIES):
if total_seen_per_cat[cat_idx] > 0:
printout(flog, '\n\t\tCategory %s Object Number: %d' % (all_obj_cats[cat_idx][0], total_seen_per_cat[cat_idx]))
printout(flog, '\t\tCategory %s Seg Accuracy: %f' % (all_obj_cats[cat_idx][0], total_seg_acc_per_cat[cat_idx]/total_seen_per_cat[cat_idx]))
if not os.path.exists(MODEL_STORAGE_PATH):
os.mkdir(MODEL_STORAGE_PATH)
for epoch in range(TRAINING_EPOCHES):
printout(flog, '\n<<< Testing on the test dataset ...')
eval_one_epoch(epoch)
printout(flog, '\n>>> Training for the epoch %d/%d ...' % (epoch, TRAINING_EPOCHES))
train_file_idx = np.arange(0, len(train_file_list))
np.random.shuffle(train_file_idx)
train_one_epoch(train_file_idx, epoch)
if epoch % 5 == 0:
cp_filename = saver.save(sess, os.path.join(MODEL_STORAGE_PATH, 'epoch_' + str(epoch)+'.ckpt'))
printout(flog, 'Successfully store the checkpoint model into ' + cp_filename)
flog.flush()
flog.close()
if __name__=='__main__':
train()
================================================
FILE: dgcnn/tensorflow/provider.py
================================================
import os
import sys
import numpy as np
import h5py
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
# Download dataset for point cloud classification
DATA_DIR = os.path.join(BASE_DIR, 'data')
if not os.path.exists(DATA_DIR):
os.mkdir(DATA_DIR)
if not os.path.exists(os.path.join(DATA_DIR, 'modelnet40_ply_hdf5_2048')):
www = 'https://shapenet.cs.stanford.edu/media/modelnet40_ply_hdf5_2048.zip'
zipfile = os.path.basename(www)
os.system('wget %s; unzip %s' % (www, zipfile))
os.system('mv %s %s' % (zipfile[:-4], DATA_DIR))
os.system('rm %s' % (zipfile))
def shuffle_data(data, labels):
""" Shuffle data and labels.
Input:
data: B,N,... numpy array
label: B,... numpy array
Return:
shuffled data, label and shuffle indices
"""
idx = np.arange(len(labels))
np.random.shuffle(idx)
return data[idx, ...], labels[idx], idx
def rotate_point_cloud(batch_data):
""" Randomly rotate the point clouds to augument the dataset
rotation is per shape based along up direction
Input:
BxNx3 array, original batch of point clouds
Return:
BxNx3 array, rotated batch of point clouds
"""
rotated_data = np.zeros(batch_data.shape, dtype=np.float32)
for k in xrange(batch_data.shape[0]):
rotation_angle = np.random.uniform() * 2 * np.pi
cosval = np.cos(rotation_angle)
sinval = np.sin(rotation_angle)
rotation_matrix = np.array([[cosval, 0, sinval],
[0, 1, 0],
[-sinval, 0, cosval]])
shape_pc = batch_data[k, ...]
rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix)
return rotated_data
def rotate_point_cloud_by_angle(batch_data, rotation_angle):
""" Rotate the point cloud along up direction with certain angle.
Input:
BxNx3 array, original batch of point clouds
Return:
BxNx3 array, rotated batch of point clouds
"""
rotated_data = np.zeros(batch_data.shape, dtype=np.float32)
for k in xrange(batch_data.shape[0]):
#rotation_angle = np.random.uniform() * 2 * np.pi
cosval = np.cos(rotation_angle)
sinval = np.sin(rotation_angle)
rotation_matrix = np.array([[cosval, 0, sinval],
[0, 1, 0],
[-sinval, 0, cosval]])
shape_pc = batch_data[k, ...]
rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix)
return rotated_data
def rotate_perturbation_point_cloud(batch_data, angle_sigma=0.06, angle_clip=0.18):
""" Randomly perturb the point clouds by small rotations
Input:
BxNx3 array, original batch of point clouds
Return:
BxNx3 array, rotated batch of point clouds
"""
rotated_data = np.zeros(batch_data.shape, dtype=np.float32)
for k in xrange(batch_data.shape[0]):
angles = np.clip(angle_sigma*np.random.randn(3), -angle_clip, angle_clip)
Rx = np.array([[1,0,0],
[0,np.cos(angles[0]),-np.sin(angles[0])],
[0,np.sin(angles[0]),np.cos(angles[0])]])
Ry = np.array([[np.cos(angles[1]),0,np.sin(angles[1])],
[0,1,0],
[-np.sin(angles[1]),0,np.cos(angles[1])]])
Rz = np.array([[np.cos(angles[2]),-np.sin(angles[2]),0],
[np.sin(angles[2]),np.cos(angles[2]),0],
[0,0,1]])
R = np.dot(Rz, np.dot(Ry,Rx))
shape_pc = batch_data[k, ...]
rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), R)
return rotated_data
def jitter_point_cloud(batch_data, sigma=0.01, clip=0.05):
""" Randomly jitter points. jittering is per point.
Input:
BxNx3 array, original batch of point clouds
Return:
BxNx3 array, jittered batch of point clouds
"""
B, N, C = batch_data.shape
assert(clip > 0)
jittered_data = np.clip(sigma * np.random.randn(B, N, C), -1*clip, clip)
jittered_data += batch_data
return jittered_data
def shift_point_cloud(batch_data, shift_range=0.1):
""" Randomly shift point cloud. Shift is per point cloud.
Input:
BxNx3 array, original batch of point clouds
Return:
BxNx3 array, shifted batch of point clouds
"""
B, N, C = batch_data.shape
shifts = np.random.uniform(-shift_range, shift_range, (B,3))
for batch_index in range(B):
batch_data[batch_index,:,:] += shifts[batch_index,:]
return batch_data
def random_scale_point_cloud(batch_data, scale_low=0.8, scale_high=1.25):
""" Randomly scale the point cloud. Scale is per point cloud.
Input:
BxNx3 array, original batch of point clouds
Return:
BxNx3 array, scaled batch of point clouds
"""
B, N, C = batch_data.shape
scales = np.random.uniform(scale_low, scale_high, B)
for batch_index in range(B):
batch_data[batch_index,:,:] *= scales[batch_index]
return batch_data
def getDataFiles(list_filename):
return [line.rstrip() for line in open(list_filename)]
def load_h5(h5_filename):
f = h5py.File(h5_filename)
data = f['data'][:]
label = f['label'][:]
return (data, label)
def loadDataFile(filename):
return load_h5(filename)
def load_h5_data_label_seg(h5_filename):
f = h5py.File(h5_filename)
data = f['data'][:] # (2048, 2048, 3)
label = f['label'][:] # (2048, 1)
seg = f['pid'][:] # (2048, 2048)
return (data, label, seg)
================================================
FILE: dgcnn/tensorflow/sem_seg/README.md
================================================
## Semantic segmentation of indoor scenes
### Dataset
1. Donwload prepared HDF5 data for training:
```
sh +x download_data.sh
```
2. Download 3D indoor parsing dataset (S3DIS Dataset) for testing and visualization. "Stanford3dDataset_v1.2_Aligned_Version.zip" of the dataset is used. Unzip the downloaded file into "dgcnn/data/", and then run
```
python collect_indoor3d_data.py
```
to generate "dgcnn/data/stanford_indoor3d"
### Train
We use 6-fold training, such that 6 models are trained leaving 1 of 6 areas as the testing area for each model. We keep using 2 GPUs for distributed training. To train 6 models sequentially, run
```
sh +x train_job.sh
```
### Evaluation
1. To generate predicted results for all 6 areas, run
```
sh +x test_job.sh
```
The model parameters are saved every 10 epochs, the saved model used to generate predited results can be changed by setting "--model_path" in "test_job.sh". For example, if you want to use the model saved after 70 epochs, you can set "--model_path" to "log*n*/epoch_70.ckpt" for *n* = 1, 2, ..., 6. To visualize the results, you can add "--visu" flag in the end of each line in "test_job.sh".
2. To obtain overall quantitative evaluation results, run
```
python eval_iou_accuracy.py
```
================================================
FILE: dgcnn/tensorflow/sem_seg/batch_inference.py
================================================
import argparse
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(BASE_DIR)
from model import *
import indoor3d_util
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')
parser.add_argument('--batch_size', type=int, default=1, help='Batch Size during training [default: 1]')
parser.add_argument('--num_point', type=int, default=4096, help='Point number [default: 4096]')
parser.add_argument('--model_path', required=True, help='model checkpoint file path')
parser.add_argument('--dump_dir', required=True, help='dump folder path')
parser.add_argument('--output_filelist', required=True, help='TXT filename, filelist, each line is an output for a room')
parser.add_argument('--room_data_filelist', required=True, help='TXT filename, filelist, each line is a test room data label file.')
parser.add_argument('--no_clutter', action='store_true', help='If true, donot count the clutter class')
parser.add_argument('--visu', action='store_true', help='Whether to output OBJ file for prediction visualization.')
FLAGS = parser.parse_args()
BATCH_SIZE = FLAGS.batch_size
NUM_POINT = FLAGS.num_point
MODEL_PATH = FLAGS.model_path
GPU_INDEX = FLAGS.gpu
DUMP_DIR = FLAGS.dump_dir
if not os.path.exists(DUMP_DIR): os.mkdir(DUMP_DIR)
LOG_FOUT = open(os.path.join(DUMP_DIR, 'log_evaluate.txt'), 'w')
LOG_FOUT.write(str(FLAGS)+'\n')
ROOM_PATH_LIST = [os.path.join(ROOT_DIR,line.rstrip()) for line in open(FLAGS.room_data_filelist)]
NUM_CLASSES = 13
def log_string(out_str):
LOG_FOUT.write(out_str+'\n')
LOG_FOUT.flush()
print(out_str)
def evaluate():
is_training = False
with tf.device('/gpu:'+str(GPU_INDEX)):
pointclouds_pl, labels_pl = placeholder_inputs(BATCH_SIZE, NUM_POINT)
is_training_pl = tf.placeholder(tf.bool, shape=())
pred = get_model(pointclouds_pl, is_training_pl)
loss = get_loss(pred, labels_pl)
pred_softmax = tf.nn.softmax(pred)
saver = tf.train.Saver()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
sess = tf.Session(config=config)
saver.restore(sess, MODEL_PATH)
log_string("Model restored.")
ops = {'pointclouds_pl': pointclouds_pl,
'labels_pl': labels_pl,
'is_training_pl': is_training_pl,
'pred': pred,
'pred_softmax': pred_softmax,
'loss': loss}
total_correct = 0
total_seen = 0
fout_out_filelist = open(FLAGS.output_filelist, 'w')
for room_path in ROOM_PATH_LIST:
out_data_label_filename = os.path.basename(room_path)[:-4] + '_pred.txt'
out_data_label_filename = os.path.join(DUMP_DIR, out_data_label_filename)
out_gt_label_filename = os.path.basename(room_path)[:-4] + '_gt.txt'
out_gt_label_filename = os.path.join(DUMP_DIR, out_gt_label_filename)
print(room_path, out_data_label_filename)
# Evaluate room one by one.
a, b = eval_one_epoch(sess, ops, room_path, out_data_label_filename, out_gt_label_filename)
total_correct += a
total_seen += b
fout_out_filelist.write(out_data_label_filename+'\n')
fout_out_filelist.close()
log_string('all room eval accuracy: %f'% (total_correct / float(total_seen)))
def eval_one_epoch(sess, ops, room_path, out_data_label_filename, out_gt_label_filename):
error_cnt = 0
is_training = False
total_correct = 0
total_seen = 0
loss_sum = 0
total_seen_class = [0 for _ in range(NUM_CLASSES)]
total_correct_class = [0 for _ in range(NUM_CLASSES)]
if FLAGS.visu:
fout = open(os.path.join(DUMP_DIR, os.path.basename(room_path)[:-4]+'_pred.obj'), 'w')
fout_gt = open(os.path.join(DUMP_DIR, os.path.basename(room_path)[:-4]+'_gt.obj'), 'w')
fout_real_color = open(os.path.join(DUMP_DIR, os.path.basename(room_path)[:-4]+'_real_color.obj'), 'w')
fout_data_label = open(out_data_label_filename, 'w')
fout_gt_label = open(out_gt_label_filename, 'w')
current_data, current_label = indoor3d_util.room2blocks_wrapper_normalized(room_path, NUM_POINT)
current_data = current_data[:,0:NUM_POINT,:]
current_label = np.squeeze(current_label)
# Get room dimension..
data_label = np.load(room_path)
data = data_label[:,0:6]
max_room_x = max(data[:,0])
max_room_y = max(data[:,1])
max_room_z = max(data[:,2])
file_size = current_data.shape[0]
num_batches = file_size // BATCH_SIZE
print(file_size)
for batch_idx in range(num_batches):
start_idx = batch_idx * BATCH_SIZE
end_idx = (batch_idx+1) * BATCH_SIZE
cur_batch_size = end_idx - start_idx
feed_dict = {ops['pointclouds_pl']: current_data[start_idx:end_idx, :, :],
ops['labels_pl']: current_label[start_idx:end_idx],
ops['is_training_pl']: is_training}
loss_val, pred_val = sess.run([ops['loss'], ops['pred_softmax']],
feed_dict=feed_dict)
if FLAGS.no_clutter:
pred_label = np.argmax(pred_val[:,:,0:12], 2) # BxN
else:
pred_label = np.argmax(pred_val, 2) # BxN
# Save prediction labels to OBJ file
for b in range(BATCH_SIZE):
pts = current_data[start_idx+b, :, :]
l = current_label[start_idx+b,:]
pts[:,6] *= max_room_x
pts[:,7] *= max_room_y
pts[:,8] *= max_room_z
pts[:,3:6] *= 255.0
pred = pred_label[b, :]
for i in range(NUM_POINT):
color = indoor3d_util.g_label2color[pred[i]]
color_gt = indoor3d_util.g_label2color[current_label[start_idx+b, i]]
if FLAGS.visu:
fout.write('v %f %f %f %d %d %d\n' % (pts[i,6], pts[i,7], pts[i,8], color[0], color[1], color[2]))
fout_gt.write('v %f %f %f %d %d %d\n' % (pts[i,6], pts[i,7], pts[i,8], color_gt[0], color_gt[1], color_gt[2]))
fout_data_label.write('%f %f %f %d %d %d %f %d\n' % (pts[i,6], pts[i,7], pts[i,8], pts[i,3], pts[i,4], pts[i,5], pred_val[b,i,pred[i]], pred[i]))
fout_gt_label.write('%d\n' % (l[i]))
correct = np.sum(pred_label == current_label[start_idx:end_idx,:])
total_correct += correct
total_seen += (cur_batch_size*NUM_POINT)
loss_sum += (loss_val*BATCH_SIZE)
for i in range(start_idx, end_idx):
for j in range(NUM_POINT):
l = current_label[i, j]
total_seen_class[l] += 1
total_correct_class[l] += (pred_label[i-start_idx, j] == l)
log_string('eval mean loss: %f' % (loss_sum / float(total_seen/NUM_POINT)))
log_string('eval accuracy: %f'% (total_correct / float(total_seen)))
fout_data_label.close()
fout_gt_label.close()
if FLAGS.visu:
fout.close()
fout_gt.close()
return total_correct, total_seen
if __name__=='__main__':
with tf.Graph().as_default():
evaluate()
LOG_FOUT.close()
================================================
FILE: dgcnn/tensorflow/sem_seg/collect_indoor3d_data.py
================================================
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(BASE_DIR)
import indoor3d_util
anno_paths = [line.rstrip() for line in open(os.path.join(BASE_DIR, 'meta/anno_paths.txt'))]
anno_paths = [os.path.join(indoor3d_util.DATA_PATH, p) for p in anno_paths]
output_folder = os.path.join(ROOT_DIR, 'data/stanford_indoor3d')
if not os.path.exists(output_folder):
os.mkdir(output_folder)
# Note: there is an extra character in the v1.2 data in Area_5/hallway_6. It's fixed manually.
for anno_path in anno_paths:
print(anno_path)
try:
elements = anno_path.split('/')
out_filename = elements[-3]+'_'+elements[-2]+'.npy'
indoor3d_util.collect_point_label(anno_path, os.path.join(output_folder, out_filename), 'numpy')
except:
print(anno_path, 'ERROR!!')
================================================
FILE: dgcnn/tensorflow/sem_seg/download_data.sh
================================================
#!/bin/bash
# Download HDF5 for indoor 3d semantic segmentation (around 1.6GB) -> 'indoor3d_sem_seg_hdf5_data'
wget https://shapenet.cs.stanford.edu/media/indoor3d_sem_seg_hdf5_data.zip
unzip indoor3d_sem_seg_hdf5_data.zip
rm indoor3d_sem_seg_hdf5_data.zip
================================================
FILE: dgcnn/tensorflow/sem_seg/eval_iou_accuracy.py
================================================
import numpy as np
pred_data_label_filenames = []
for i in range(1,7):
file_name = 'log{}/output_filelist.txt'.format(i)
pred_data_label_filenames += [line.rstrip() for line in open(file_name)]
gt_label_filenames = [f.rstrip('_pred\.txt') + '_gt.txt' for f in pred_data_label_filenames]
num_room = len(gt_label_filenames)
gt_classes = [0 for _ in range(13)]
positive_classes = [0 for _ in range(13)]
true_positive_classes = [0 for _ in range(13)]
for i in range(num_room):
print(i)
data_label = np.loadtxt(pred_data_label_filenames[i])
pred_label = data_label[:,-1]
gt_label = np.loadtxt(gt_label_filenames[i])
print(gt_label.shape)
for j in xrange(gt_label.shape[0]):
gt_l = int(gt_label[j])
pred_l = int(pred_label[j])
gt_classes[gt_l] += 1
positive_classes[pred_l] += 1
true_positive_classes[gt_l] += int(gt_l==pred_l)
print(gt_classes)
print(positive_classes)
print(true_positive_classes)
print('Overall accuracy: {0}'.format(sum(true_positive_classes)/float(sum(positive_classes))))
print 'IoU:'
iou_list = []
for i in range(13):
iou = true_positive_classes[i]/float(gt_classes[i]+positive_classes[i]-true_positive_classes[i])
print(iou)
iou_list.append(iou)
print 'avg IoU:'
print(sum(iou_list)/13.0)
================================================
FILE: dgcnn/tensorflow/sem_seg/indoor3d_util.py
================================================
import numpy as np
import glob
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(BASE_DIR)
# -----------------------------------------------------------------------------
# CONSTANTS
# -----------------------------------------------------------------------------
DATA_PATH = os.path.join(ROOT_DIR, 'data', 'Stanford3dDataset_v1.2_Aligned_Version')
g_classes = [x.rstrip() for x in open(os.path.join(BASE_DIR, 'meta/class_names.txt'))]
g_class2label = {cls: i for i,cls in enumerate(g_classes)}
g_class2color = {'ceiling': [0,255,0],
'floor': [0,0,255],
'wall': [0,255,255],
'beam': [255,255,0],
'column': [255,0,255],
'window': [100,100,255],
'door': [200,200,100],
'table': [170,120,200],
'chair': [255,0,0],
'sofa': [200,100,100],
'bookcase': [10,200,100],
'board': [200,200,200],
'clutter': [50,50,50]}
g_easy_view_labels = [7,8,9,10,11,1]
g_label2color = {g_classes.index(cls): g_class2color[cls] for cls in g_classes}
# -----------------------------------------------------------------------------
# CONVERT ORIGINAL DATA TO OUR DATA_LABEL FILES
# -----------------------------------------------------------------------------
def collect_point_label(anno_path, out_filename, file_format='txt'):
""" Convert original dataset files to data_label file (each line is XYZRGBL).
We aggregated all the points from each instance in the room.
Args:
anno_path: path to annotations. e.g. Area_1/office_2/Annotations/
out_filename: path to save collected points and labels (each line is XYZRGBL)
file_format: txt or numpy, determines what file format to save.
Returns:
None
Note:
the points are shifted before save, the most negative point is now at origin.
"""
points_list = []
for f in glob.glob(os.path.join(anno_path, '*.txt')):
cls = os.path.basename(f).split('_')[0]
if cls not in g_classes: # note: in some room there is 'staris' class..
cls = 'clutter'
points = np.loadtxt(f)
labels = np.ones((points.shape[0],1)) * g_class2label[cls]
points_list.append(np.concatenate([points, labels], 1)) # Nx7
data_label = np.concatenate(points_list, 0)
xyz_min = np.amin(data_label, axis=0)[0:3]
data_label[:, 0:3] -= xyz_min
if file_format=='txt':
fout = open(out_filename, 'w')
for i in range(data_label.shape[0]):
fout.write('%f %f %f %d %d %d %d\n' % \
(data_label[i,0], data_label[i,1], data_label[i,2],
data_label[i,3], data_label[i,4], data_label[i,5],
data_label[i,6]))
fout.close()
elif file_format=='numpy':
np.save(out_filename, data_label)
else:
print('ERROR!! Unknown file format: %s, please use txt or numpy.' % \
(file_format))
exit()
def point_label_to_obj(input_filename, out_filename, label_color=True, easy_view=False, no_wall=False):
""" For visualization of a room from data_label file,
input_filename: each line is X Y Z R G B L
out_filename: OBJ filename,
visualize input file by coloring point with label color
easy_view: only visualize furnitures and floor
"""
data_label = np.loadtxt(input_filename)
data = data_label[:, 0:6]
label = data_label[:, -1].astype(int)
fout = open(out_filename, 'w')
for i in range(data.shape[0]):
color = g_label2color[label[i]]
if easy_view and (label[i] not in g_easy_view_labels):
continue
if no_wall and ((label[i] == 2) or (label[i]==0)):
continue
if label_color:
fout.write('v %f %f %f %d %d %d\n' % \
(data[i,0], data[i,1], data[i,2], color[0], color[1], color[2]))
else:
fout.write('v %f %f %f %d %d %d\n' % \
(data[i,0], data[i,1], data[i,2], data[i,3], data[i,4], data[i,5]))
fout.close()
# -----------------------------------------------------------------------------
# PREPARE BLOCK DATA FOR DEEPNETS TRAINING/TESTING
# -----------------------------------------------------------------------------
def sample_data(data, num_sample):
""" data is in N x ...
we want to keep num_samplexC of them.
if N > num_sample, we will randomly keep num_sample of them.
if N < num_sample, we will randomly duplicate samples.
"""
N = data.shape[0]
if (N == num_sample):
return data, range(N)
elif (N > num_sample):
sample = np.random.choice(N, num_sample)
return data[sample, ...], sample
else:
sample = np.random.choice(N, num_sample-N)
dup_data = data[sample, ...]
return np.concatenate([data, dup_data], 0), range(N)+list(sample)
def sample_data_label(data, label, num_sample):
new_data, sample_indices = sample_data(data, num_sample)
new_label = label[sample_indices]
return new_data, new_label
def room2blocks(data, label, num_point, block_size=1.0, stride=1.0,
random_sample=False, sample_num=None, sample_aug=1):
""" Prepare block training data.
Args:
data: N x 6 numpy array, 012 are XYZ in meters, 345 are RGB in [0,1]
assumes the data is shifted (min point is origin) and aligned
(aligned with XYZ axis)
label: N size uint8 numpy array from 0-12
num_point: int, how many points to sample in each block
block_size: float, physical size of the block in meters
stride: float, stride for block sweeping
random_sample: bool, if True, we will randomly sample blocks in the room
sample_num: int, if random sample, how many blocks to sample
[default: room area]
sample_aug: if random sample, how much aug
Returns:
block_datas: K x num_point x 6 np array of XYZRGB, RGB is in [0,1]
block_labels: K x num_point x 1 np array of uint8 labels
TODO: for this version, blocking is in fixed, non-overlapping pattern.
"""
assert(stride<=block_size)
limit = np.amax(data, 0)[0:3]
# Get the corner location for our sampling blocks
xbeg_list = []
ybeg_list = []
if not random_sample:
num_block_x = int(np.ceil((limit[0] - block_size) / stride)) + 1
num_block_y = int(np.ceil((limit[1] - block_size) / stride)) + 1
for i in range(num_block_x):
for j in range(num_block_y):
xbeg_list.append(i*stride)
ybeg_list.append(j*stride)
else:
num_block_x = int(np.ceil(limit[0] / block_size))
num_block_y = int(np.ceil(limit[1] / block_size))
if sample_num is None:
sample_num = num_block_x * num_block_y * sample_aug
for _ in range(sample_num):
xbeg = np.random.uniform(-block_size, limit[0])
ybeg = np.random.uniform(-block_size, limit[1])
xbeg_list.append(xbeg)
ybeg_list.append(ybeg)
# Collect blocks
block_data_list = []
block_label_list = []
idx = 0
for idx in range(len(xbeg_list)):
xbeg = xbeg_list[idx]
ybeg = ybeg_list[idx]
xcond = (data[:,0]<=xbeg+block_size) & (data[:,0]>=xbeg)
ycond = (data[:,1]<=ybeg+block_size) & (data[:,1]>=ybeg)
cond = xcond & ycond
if np.sum(cond) < 100: # discard block if there are less than 100 pts.
continue
block_data = data[cond, :]
block_label = label[cond]
# randomly subsample data
block_data_sampled, block_label_sampled = \
sample_data_label(block_data, block_label, num_point)
block_data_list.append(np.expand_dims(block_data_sampled, 0))
block_label_list.append(np.expand_dims(block_label_sampled, 0))
return np.concatenate(block_data_list, 0), \
np.concatenate(block_label_list, 0)
def room2blocks_plus(data_label, num_point, block_size, stride,
random_sample, sample_num, sample_aug):
""" room2block with input filename and RGB preprocessing.
"""
data = data_label[:,0:6]
data[:,3:6] /= 255.0
label = data_label[:,-1].astype(np.uint8)
return room2blocks(data, label, num_point, block_size, stride,
random_sample, sample_num, sample_aug)
def room2blocks_wrapper(data_label_filename, num_point, block_size=1.0, stride=1.0,
random_sample=False, sample_num=None, sample_aug=1):
if data_label_filename[-3:] == 'txt':
data_label = np.loadtxt(data_label_filename)
elif data_label_filename[-3:] == 'npy':
data_label = np.load(data_label_filename)
else:
print('Unknown file type! exiting.')
exit()
return room2blocks_plus(data_label, num_point, block_size, stride,
random_sample, sample_num, sample_aug)
def room2blocks_plus_normalized(data_label, num_point, block_size, stride,
random_sample, sample_num, sample_aug):
""" room2block, with input filename and RGB preprocessing.
for each block centralize XYZ, add normalized XYZ as 678 channels
"""
data = data_label[:,0:6]
data[:,3:6] /= 255.0
label = data_label[:,-1].astype(np.uint8)
max_room_x = max(data[:,0])
max_room_y = max(data[:,1])
max_room_z = max(data[:,2])
data_batch, label_batch = room2blocks(data, label, num_point, block_size, stride,
random_sample, sample_num, sample_aug)
new_data_batch = np.zeros((data_batch.shape[0], num_point, 9))
for b in range(data_batch.shape[0]):
new_data_batch[b, :, 6] = data_batch[b, :, 0]/max_room_x
new_data_batch[b, :, 7] = data_batch[b, :, 1]/max_room_y
new_data_batch[b, :, 8] = data_batch[b, :, 2]/max_room_z
minx = min(data_batch[b, :, 0])
miny = min(data_batch[b, :, 1])
data_batch[b, :, 0] -= (minx+block_size/2)
data_batch[b, :, 1] -= (miny+block_size/2)
new_data_batch[:, :, 0:6] = data_batch
return new_data_batch, label_batch
def room2blocks_wrapper_normalized(data_label_filename, num_point, block_size=1.0, stride=1.0,
random_sample=False, sample_num=None, sample_aug=1):
if data_label_filename[-3:] == 'txt':
data_label = np.loadtxt(data_label_filename)
elif data_label_filename[-3:] == 'npy':
data_label = np.load(data_label_filename)
else:
print('Unknown file type! exiting.')
exit()
return room2blocks_plus_normalized(data_label, num_point, block_size, stride,
random_sample, sample_num, sample_aug)
def room2samples(data, label, sample_num_point):
""" Prepare whole room samples.
Args:
data: N x 6 numpy array, 012 are XYZ in meters, 345 are RGB in [0,1]
assumes the data is shifted (min point is origin) and
aligned (aligned with XYZ axis)
label: N size uint8 numpy array from 0-12
sample_num_point: int, how many points to sample in each sample
Returns:
sample_datas: K x sample_num_point x 9
numpy array of XYZRGBX'Y'Z', RGB is in [0,1]
sample_labels: K x sample_num_point x 1 np array of uint8 labels
"""
N = data.shape[0]
order = np.arange(N)
np.random.shuffle(order)
data = data[order, :]
label = label[order]
batch_num = int(np.ceil(N / float(sample_num_point)))
sample_datas = np.zeros((batch_num, sample_num_point, 6))
sample_labels = np.zeros((batch_num, sample_num_point, 1))
for i in range(batch_num):
beg_idx = i*sample_num_point
end_idx = min((i+1)*sample_num_point, N)
num = end_idx - beg_idx
sample_datas[i,0:num,:] = data[beg_idx:end_idx, :]
sample_labels[i,0:num,0] = label[beg_idx:end_idx]
if num < sample_num_point:
makeup_indices = np.random.choice(N, sample_num_point - num)
sample_datas[i,num:,:] = data[makeup_indices, :]
sample_labels[i,num:,0] = label[makeup_indices]
return sample_datas, sample_labels
def room2samples_plus_normalized(data_label, num_point):
""" room2sample, with input filename and RGB preprocessing.
for each block centralize XYZ, add normalized XYZ as 678 channels
"""
data = data_label[:,0:6]
data[:,3:6] /= 255.0
label = data_label[:,-1].astype(np.uint8)
max_room_x = max(data[:,0])
max_room_y = max(data[:,1])
max_room_z = max(data[:,2])
#print(max_room_x, max_room_y, max_room_z)
data_batch, label_batch = room2samples(data, label, num_point)
new_data_batch = np.zeros((data_batch.shape[0], num_point, 9))
for b in range(data_batch.shape[0]):
new_data_batch[b, :, 6] = data_batch[b, :, 0]/max_room_x
new_data_batch[b, :, 7] = data_batch[b, :, 1]/max_room_y
new_data_batch[b, :, 8] = data_batch[b, :, 2]/max_room_z
#minx = min(data_batch[b, :, 0])
#miny = min(data_batch[b, :, 1])
#data_batch[b, :, 0] -= (minx+block_size/2)
#data_batch[b, :, 1] -= (miny+block_size/2)
new_data_batch[:, :, 0:6] = data_batch
return new_data_batch, label_batch
def room2samples_wrapper_normalized(data_label_filename, num_point):
if data_label_filename[-3:] == 'txt':
data_label = np.loadtxt(data_label_filename)
elif data_label_filename[-3:] == 'npy':
data_label = np.load(data_label_filename)
else:
print('Unknown file type! exiting.')
exit()
return room2samples_plus_normalized(data_label, num_point)
# -----------------------------------------------------------------------------
# EXTRACT INSTANCE BBOX FROM ORIGINAL DATA (for detection evaluation)
# -----------------------------------------------------------------------------
def collect_bounding_box(anno_path, out_filename):
""" Compute bounding boxes from each instance in original dataset files on
one room. **We assume the bbox is aligned with XYZ coordinate.**
Args:
anno_path: path to annotations. e.g. Area_1/office_2/Annotations/
out_filename: path to save instance bounding boxes for that room.
each line is x1 y1 z1 x2 y2 z2 label,
where (x1,y1,z1) is the point on the diagonal closer to origin
Returns:
None
Note:
room points are shifted, the most negative point is now at origin.
"""
bbox_label_list = []
for f in glob.glob(os.path.join(anno_path, '*.txt')):
cls = os.path.basename(f).split('_')[0]
if cls not in g_classes: # note: in some room there is 'staris' class..
cls = 'clutter'
points = np.loadtxt(f)
label = g_class2label[cls]
# Compute tightest axis aligned bounding box
xyz_min = np.amin(points[:, 0:3], axis=0)
xyz_max = np.amax(points[:, 0:3], axis=0)
ins_bbox_label = np.expand_dims(
np.concatenate([xyz_min, xyz_max, np.array([label])], 0), 0)
bbox_label_list.append(ins_bbox_label)
bbox_label = np.concatenate(bbox_label_list, 0)
room_xyz_min = np.amin(bbox_label[:, 0:3], axis=0)
bbox_label[:, 0:3] -= room_xyz_min
bbox_label[:, 3:6] -= room_xyz_min
fout = open(out_filename, 'w')
for i in range(bbox_label.shape[0]):
fout.write('%f %f %f %f %f %f %d\n' % \
(bbox_label[i,0], bbox_label[i,1], bbox_label[i,2],
bbox_label[i,3], bbox_label[i,4], bbox_label[i,5],
bbox_label[i,6]))
fout.close()
def bbox_label_to_obj(input_filename, out_filename_prefix, easy_view=False):
""" Visualization of bounding boxes.
Args:
input_filename: each line is x1 y1 z1 x2 y2 z2 label
out_filename_prefix: OBJ filename prefix,
visualize object by g_label2color
easy_view: if True, only visualize furniture and floor
Returns:
output a list of OBJ file and MTL files with the same prefix
"""
bbox_label = np.loadtxt(input_filename)
bbox = bbox_label[:, 0:6]
label = bbox_label[:, -1].astype(int)
v_cnt = 0 # count vertex
ins_cnt = 0 # count instance
for i in range(bbox.shape[0]):
if easy_view and (label[i] not in g_easy_view_labels):
continue
obj_filename = out_filename_prefix+'_'+g_classes[label[i]]+'_'+str(ins_cnt)+'.obj'
mtl_filename = out_filename_prefix+'_'+g_classes[label[i]]+'_'+str(ins_cnt)+'.mtl'
fout_obj = open(obj_filename, 'w')
fout_mtl = open(mtl_filename, 'w')
fout_obj.write('mtllib %s\n' % (os.path.basename(mtl_filename)))
length = bbox[i, 3:6] - bbox[i, 0:3]
a = length[0]
b = length[1]
c = length[2]
x = bbox[i, 0]
y = bbox[i, 1]
z = bbox[i, 2]
color = np.array(g_label2color[label[i]], dtype=float) / 255.0
material = 'material%d' % (ins_cnt)
fout_obj.write('usemtl %s\n' % (material))
fout_obj.write('v %f %f %f\n' % (x,y,z+c))
fout_obj.write('v %f %f %f\n' % (x,y+b,z+c))
fout_obj.write('v %f %f %f\n' % (x+a,y+b,z+c))
fout_obj.write('v %f %f %f\n' % (x+a,y,z+c))
fout_obj.write('v %f %f %f\n' % (x,y,z))
fout_obj.write('v %f %f %f\n' % (x,y+b,z))
fout_obj.write('v %f %f %f\n' % (x+a,y+b,z))
fout_obj.write('v %f %f %f\n' % (x+a,y,z))
fout_obj.write('g default\n')
v_cnt = 0 # for individual box
fout_obj.write('f %d %d %d %d\n' % (4+v_cnt, 3+v_cnt, 2+v_cnt, 1+v_cnt))
fout_obj.write('f %d %d %d %d\n' % (1+v_cnt, 2+v_cnt, 6+v_cnt, 5+v_cnt))
fout_obj.write('f %d %d %d %d\n' % (7+v_cnt, 6+v_cnt, 2+v_cnt, 3+v_cnt))
fout_obj.write('f %d %d %d %d\n' % (4+v_cnt, 8+v_cnt, 7+v_cnt, 3+v_cnt))
fout_obj.write('f %d %d %d %d\n' % (5+v_cnt, 8+v_cnt, 4+v_cnt, 1+v_cnt))
fout_obj.write('f %d %d %d %d\n' % (5+v_cnt, 6+v_cnt, 7+v_cnt, 8+v_cnt))
fout_obj.write('\n')
fout_mtl.write('newmtl %s\n' % (material))
fout_mtl.write('Kd %f %f %f\n' % (color[0], color[1], color[2]))
fout_mtl.write('\n')
fout_obj.close()
fout_mtl.close()
v_cnt += 8
ins_cnt += 1
def bbox_label_to_obj_room(input_filename, out_filename_prefix, easy_view=False, permute=None, center=False, exclude_table=False):
""" Visualization of bounding boxes.
Args:
input_filename: each line is x1 y1 z1 x2 y2 z2 label
out_filename_prefix: OBJ filename prefix,
visualize object by g_label2color
easy_view: if True, only visualize furniture and floor
permute: if not None, permute XYZ for rendering, e.g. [0 2 1]
center: if True, move obj to have zero origin
Returns:
output a list of OBJ file and MTL files with the same prefix
"""
bbox_label = np.loadtxt(input_filename)
bbox = bbox_label[:, 0:6]
if permute is not None:
assert(len(permute)==3)
permute = np.array(permute)
bbox[:,0:3] = bbox[:,permute]
bbox[:,3:6] = bbox[:,permute+3]
if center:
xyz_max = np.amax(bbox[:,3:6], 0)
bbox[:,0:3] -= (xyz_max/2.0)
bbox[:,3:6] -= (xyz_max/2.0)
bbox /= np.max(xyz_max/2.0)
label = bbox_label[:, -1].astype(int)
obj_filename = out_filename_prefix+'.obj'
mtl_filename = out_filename_prefix+'.mtl'
fout_obj = open(obj_filename, 'w')
fout_mtl = open(mtl_filename, 'w')
fout_obj.write('mtllib %s\n' % (os.path.basename(mtl_filename)))
v_cnt = 0 # count vertex
ins_cnt = 0 # count instance
for i in range(bbox.shape[0]):
if easy_view and (label[i] not in g_easy_view_labels):
continue
if exclude_table and label[i] == g_classes.index('table'):
continue
length = bbox[i, 3:6] - bbox[i, 0:3]
a = length[0]
b = length[1]
c = length[2]
x = bbox[i, 0]
y = bbox[i, 1]
z = bbox[i, 2]
color = np.array(g_label2color[label[i]], dtype=float) / 255.0
material = 'material%d' % (ins_cnt)
fout_obj.write('usemtl %s\n' % (material))
fout_obj.write('v %f %f %f\n' % (x,y,z+c))
fout_obj.write('v %f %f %f\n' % (x,y+b,z+c))
fout_obj.write('v %f %f %f\n' % (x+a,y+b,z+c))
fout_obj.write('v %f %f %f\n' % (x+a,y,z+c))
fout_obj.write('v %f %f %f\n' % (x,y,z))
fout_obj.write('v %f %f %f\n' % (x,y+b,z))
fout_obj.write('v %f %f %f\n' % (x+a,y+b,z))
fout_obj.write('v %f %f %f\n' % (x+a,y,z))
fout_obj.write('g default\n')
fout_obj.write('f %d %d %d %d\n' % (4+v_cnt, 3+v_cnt, 2+v_cnt, 1+v_cnt))
fout_obj.write('f %d %d %d %d\n' % (1+v_cnt, 2+v_cnt, 6+v_cnt, 5+v_cnt))
fout_obj.write('f %d %d %d %d\n' % (7+v_cnt, 6+v_cnt, 2+v_cnt, 3+v_cnt))
fout_obj.write('f %d %d %d %d\n' % (4+v_cnt, 8+v_cnt, 7+v_cnt, 3+v_cnt))
fout_obj.write('f %d %d %d %d\n' % (5+v_cnt, 8+v_cnt, 4+v_cnt, 1+v_cnt))
fout_obj.write('f %d %d %d %d\n' % (5+v_cnt, 6+v_cnt, 7+v_cnt, 8+v_cnt))
fout_obj.write('\n')
fout_mtl.write('newmtl %s\n' % (material))
fout_mtl.write('Kd %f %f %f\n' % (color[0], color[1], color[2]))
fout_mtl.write('\n')
v_cnt += 8
ins_cnt += 1
fout_obj.close()
fout_mtl.close()
def collect_point_bounding_box(anno_path, out_filename, file_format):
""" Compute bounding boxes from each instance in original dataset files on
one room. **We assume the bbox is aligned with XYZ coordinate.**
Save both the point XYZRGB and the bounding box for the point's
parent element.
Args:
anno_path: path to annotations. e.g. Area_1/office_2/Annotations/
out_filename: path to save instance bounding boxes for each point,
plus the point's XYZRGBL
each line is XYZRGBL offsetX offsetY offsetZ a b c,
where cx = X+offsetX, cy=X+offsetY, cz=Z+offsetZ
where (cx,cy,cz) is center of the box, a,b,c are distances from center
to the surfaces of the box, i.e. x1 = cx-a, x2 = cx+a, y1=cy-b etc.
file_format: output file format, txt or numpy
Returns:
None
Note:
room points are shifted, the most negative point is now at origin.
"""
point_bbox_list = []
for f in glob.glob(os.path.join(anno_path, '*.txt')):
cls = os.path.basename(f).split('_')[0]
if cls not in g_classes: # note: in some room there is 'staris' class..
cls = 'clutter'
points = np.loadtxt(f) # Nx6
label = g_class2label[cls] # N,
# Compute tightest axis aligned bounding box
xyz_min = np.amin(points[:, 0:3], axis=0) # 3,
xyz_max = np.amax(points[:, 0:3], axis=0) # 3,
xyz_center = (xyz_min + xyz_max) / 2
dimension = (xyz_max - xyz_min) / 2
xyz_offsets = xyz_center - points[:,0:3] # Nx3
dimensions = np.ones((points.shape[0],3)) * dimension # Nx3
labels = np.ones((points.shape[0],1)) * label # N
point_bbox_list.append(np.concatenate([points, labels,
xyz_offsets, dimensions], 1)) # Nx13
point_bbox = np.concatenate(point_bbox_list, 0) # KxNx13
room_xyz_min = np.amin(point_bbox[:, 0:3], axis=0)
point_bbox[:, 0:3] -= room_xyz_min
if file_format == 'txt':
fout = open(out_filename, 'w')
for i in range(point_bbox.shape[0]):
fout.write('%f %f %f %d %d %d %d %f %f %f %f %f %f\n' % \
(point_bbox[i,0], point_bbox[i,1], point_bbox[i,2],
point_bbox[i,3], point_bbox[i,4], point_bbox[i,5],
point_bbox[i,6],
point_bbox[i,7], point_bbox[i,8], point_bbox[i,9],
point_bbox[i,10], point_bbox[i,11], point_bbox[i,12]))
fout.close()
elif file_format == 'numpy':
np.save(out_filename, point_bbox)
else:
print('ERROR!! Unknown file format: %s, please use txt or numpy.' % \
(file_format))
exit()
================================================
FILE: dgcnn/tensorflow/sem_seg/meta/all_data_label.txt
================================================
Area_1_conferenceRoom_1.npy
Area_1_conferenceRoom_2.npy
Area_1_copyRoom_1.npy
Area_1_hallway_1.npy
Area_1_hallway_2.npy
Area_1_hallway_3.npy
Area_1_hallway_4.npy
Area_1_hallway_5.npy
Area_1_hallway_6.npy
Area_1_hallway_7.npy
Area_1_hallway_8.npy
Area_1_office_10.npy
Area_1_office_11.npy
Area_1_office_12.npy
Area_1_office_13.npy
Area_1_office_14.npy
Area_1_office_15.npy
Area_1_office_16.npy
Area_1_office_17.npy
Area_1_office_18.npy
Area_1_office_19.npy
Area_1_office_1.npy
Area_1_office_20.npy
Area_1_office_21.npy
Area_1_office_22.npy
Area_1_office_23.npy
Area_1_office_24.npy
Area_1_office_25.npy
Area_1_office_26.npy
Area_1_office_27.npy
Area_1_office_28.npy
Area_1_office_29.npy
Area_1_office_2.npy
Area_1_office_30.npy
Area_1_office_31.npy
Area_1_office_3.npy
Area_1_office_4.npy
Area_1_office_5.npy
Area_1_office_6.npy
Area_1_office_7.npy
Area_1_office_8.npy
Area_1_office_9.npy
Area_1_pantry_1.npy
Area_1_WC_1.npy
Area_2_auditorium_1.npy
Area_2_auditorium_2.npy
Area_2_conferenceRoom_1.npy
Area_2_hallway_10.npy
Area_2_hallway_11.npy
Area_2_hallway_12.npy
Area_2_hallway_1.npy
Area_2_hallway_2.npy
Area_2_hallway_3.npy
Area_2_hallway_4.npy
Area_2_hallway_5.npy
Area_2_hallway_6.npy
Area_2_hallway_7.npy
Area_2_hallway_8.npy
Area_2_hallway_9.npy
Area_2_office_10.npy
Area_2_office_11.npy
Area_2_office_12.npy
Area_2_office_13.npy
Area_2_office_14.npy
Area_2_office_1.npy
Area_2_office_2.npy
Area_2_office_3.npy
Area_2_office_4.npy
Area_2_office_5.npy
Area_2_office_6.npy
Area_2_office_7.npy
Area_2_office_8.npy
Area_2_office_9.npy
Area_2_storage_1.npy
Area_2_storage_2.npy
Area_2_storage_3.npy
Area_2_storage_4.npy
Area_2_storage_5.npy
Area_2_storage_6.npy
Area_2_storage_7.npy
Area_2_storage_8.npy
Area_2_storage_9.npy
Area_2_WC_1.npy
Area_2_WC_2.npy
Area_3_conferenceRoom_1.npy
Area_3_hallway_1.npy
Area_3_hallway_2.npy
Area_3_hallway_3.npy
Area_3_hallway_4.npy
Area_3_hallway_5.npy
Area_3_hallway_6.npy
Area_3_lounge_1.npy
Area_3_lounge_2.npy
Area_3_office_10.npy
Area_3_office_1.npy
Area_3_office_2.npy
Area_3_office_3.npy
Area_3_office_4.npy
Area_3_office_5.npy
Area_3_office_6.npy
Area_3_office_7.npy
Area_3_office_8.npy
Area_3_office_9.npy
Area_3_storage_1.npy
Area_3_storage_2.npy
Area_3_WC_1.npy
Area_3_WC_2.npy
Area_4_conferenceRoom_1.npy
Area_4_conferenceRoom_2.npy
Area_4_conferenceRoom_3.npy
Area_4_hallway_10.npy
Area_4_hallway_11.npy
Area_4_hallway_12.npy
Area_4_hallway_13.npy
Area_4_hallway_14.npy
Area_4_hallway_1.npy
Area_4_hallway_2.npy
Area_4_hallway_3.npy
Area_4_hallway_4.npy
Area_4_hallway_5.npy
Area_4_hallway_6.npy
Area_4_hallway_7.npy
Area_4_hallway_8.npy
Area_4_hallway_9.npy
Area_4_lobby_1.npy
Area_4_lobby_2.npy
Area_4_office_10.npy
Area_4_office_11.npy
Area_4_office_12.npy
Area_4_office_13.npy
Area_4_office_14.npy
Area_4_office_15.npy
Area_4_office_16.npy
Area_4_office_17.npy
Area_4_office_18.npy
Area_4_office_19.npy
Area_4_office_1.npy
Area_4_office_20.npy
Area_4_office_21.npy
Area_4_office_22.npy
Area_4_office_2.npy
Area_4_office_3.npy
Area_4_office_4.npy
Area_4_office_5.npy
Area_4_office_6.npy
Area_4_office_7.npy
Area_4_office_8.npy
Area_4_office_9.npy
Area_4_storage_1.npy
Area_4_storage_2.npy
Area_4_storage_3.npy
Area_4_storage_4.npy
Area_4_WC_1.npy
Area_4_WC_2.npy
Area_4_WC_3.npy
Area_4_WC_4.npy
Area_5_conferenceRoom_1.npy
Area_5_conferenceRoom_2.npy
Area_5_conferenceRoom_3.npy
Area_5_hallway_10.npy
Area_5_hallway_11.npy
Area_5_hallway_12.npy
Area_5_hallway_13.npy
Area_5_hallway_14.npy
Area_5_hallway_15.npy
Area_5_hallway_1.npy
Area_5_hallway_2.npy
Area_5_hallway_3.npy
Area_5_hallway_4.npy
Area_5_hallway_5.npy
Area_5_hallway_6.npy
Area_5_hallway_7.npy
Area_5_hallway_8.npy
Area_5_hallway_9.npy
Area_5_lobby_1.npy
Area_5_office_10.npy
Area_5_office_11.npy
Area_5_office_12.npy
Area_5_office_13.npy
Area_5_office_14.npy
Area_5_office_15.npy
Area_5_office_16.npy
Area_5_office_17.npy
Area_5_office_18.npy
Area_5_office_19.npy
Area_5_office_1.npy
Area_5_office_20.npy
Area_5_office_21.npy
Area_5_office_22.npy
Area_5_office_23.npy
Area_5_office_24.npy
Area_5_office_25.npy
Area_5_office_26.npy
Area_5_office_27.npy
Area_5_office_28.npy
Area_5_office_29.npy
Area_5_office_2.npy
Area_5_office_30.npy
Area_5_office_31.npy
Area_5_office_32.npy
Area_5_office_33.npy
Area_5_office_34.npy
Area_5_office_35.npy
Area_5_office_36.npy
Area_5_office_37.npy
Area_5_office_38.npy
Area_5_office_39.npy
Area_5_office_3.npy
Area_5_office_40.npy
Area_5_office_41.npy
Area_5_office_42.npy
Area_5_office_4.npy
Area_5_office_5.npy
Area_5_office_6.npy
Area_5_office_7.npy
Area_5_office_8.npy
Area_5_office_9.npy
Area_5_pantry_1.npy
Area_5_storage_1.npy
Area_5_storage_2.npy
Area_5_storage_3.npy
Area_5_storage_4.npy
Area_5_WC_1.npy
Area_5_WC_2.npy
Area_6_conferenceRoom_1.npy
Area_6_copyRoom_1.npy
Area_6_hallway_1.npy
Area_6_hallway_2.npy
Area_6_hallway_3.npy
Area_6_hallway_4.npy
Area_6_hallway_5.npy
Area_6_hallway_6.npy
Area_6_lounge_1.npy
Area_6_office_10.npy
Area_6_office_11.npy
Area_6_office_12.npy
Area_6_office_13.npy
Area_6_office_14.npy
Area_6_office_15.npy
Area_6_office_16.npy
Area_6_office_17.npy
Area_6_office_18.npy
Area_6_office_19.npy
Area_6_office_1.npy
Area_6_office_20.npy
Area_6_office_21.npy
Area_6_office_22.npy
Area_6_office_23.npy
Area_6_office_24.npy
Area_6_office_25.npy
Area_6_office_26.npy
Area_6_office_27.npy
Area_6_office_28.npy
Area_6_office_29.npy
Area_6_office_2.npy
Area_6_office_30.npy
Area_6_office_31.npy
Area_6_office_32.npy
Area_6_office_33.npy
Area_6_office_34.npy
Area_6_office_35.npy
Area_6_office_36.npy
Area_6_office_37.npy
Area_6_office_3.npy
Area_6_office_4.npy
Area_6_office_5.npy
Area_6_office_6.npy
Area_6_office_7.npy
Area_6_office_8.npy
Area_6_office_9.npy
Area_6_openspace_1.npy
Area_6_pantry_1.npy
================================================
FILE: dgcnn/tensorflow/sem_seg/meta/anno_paths.txt
================================================
Area_1/conferenceRoom_1/Annotations
Area_1/conferenceRoom_2/Annotations
Area_1/copyRoom_1/Annotations
Area_1/hallway_1/Annotations
Area_1/hallway_2/Annotations
Area_1/hallway_3/Annotations
Area_1/hallway_4/Annotations
Area_1/hallway_5/Annotations
Area_1/hallway_6/Annotations
Area_1/hallway_7/Annotations
Area_1/hallway_8/Annotations
Area_1/office_10/Annotations
Area_1/office_11/Annotations
Area_1/office_12/Annotations
Area_1/office_13/Annotations
Area_1/office_14/Annotations
Area_1/office_15/Annotations
Area_1/office_16/Annotations
Area_1/office_17/Annotations
Area_1/office_18/Annotations
Area_1/office_19/Annotations
Area_1/office_1/Annotations
Area_1/office_20/Annotations
Area_1/office_21/Annotations
Area_1/office_22/Annotations
Area_1/office_23/Annotations
Area_1/office_24/Annotations
Area_1/office_25/Annotations
Area_1/office_26/Annotations
Area_1/office_27/Annotations
Area_1/office_28/Annotations
Area_1/office_29/Annotations
Area_1/office_2/Annotations
Area_1/office_30/Annotations
Area_1/office_31/Annotations
Area_1/office_3/Annotations
Area_1/office_4/Annotations
Area_1/office_5/Annotations
Area_1/office_6/Annotations
Area_1/office_7/Annotations
Area_1/office_8/Annotations
Area_1/office_9/Annotations
Area_1/pantry_1/Annotations
Area_1/WC_1/Annotations
Area_2/auditorium_1/Annotations
Area_2/auditorium_2/Annotations
Area_2/conferenceRoom_1/Annotations
Area_2/hallway_10/Annotations
Area_2/hallway_11/Annotations
Area_2/hallway_12/Annotations
Area_2/hallway_1/Annotations
Area_2/hallway_2/Annotations
Area_2/hallway_3/Annotations
Area_2/hallway_4/Annotations
Area_2/hallway_5/Annotations
Area_2/hallway_6/Annotations
Area_2/hallway_7/Annotations
Area_2/hallway_8/Annotations
Area_2/hallway_9/Annotations
Area_2/office_10/Annotations
Area_2/office_11/Annotations
Area_2/office_12/Annotations
Area_2/office_13/Annotations
Area_2/office_14/Annotations
Area_2/office_1/Annotations
Area_2/office_2/Annotations
Area_2/office_3/Annotations
Area_2/office_4/Annotations
Area_2/office_5/Annotations
Area_2/office_6/Annotations
Area_2/office_7/Annotations
Area_2/office_8/Annotations
Area_2/office_9/Annotations
Area_2/storage_1/Annotations
Area_2/storage_2/Annotations
Area_2/storage_3/Annotations
Area_2/storage_4/Annotations
Area_2/storage_5/Annotations
Area_2/storage_6/Annotations
Area_2/storage_7/Annotations
Area_2/storage_8/Annotations
Area_2/storage_9/Annotations
Area_2/WC_1/Annotations
Area_2/WC_2/Annotations
Area_3/conferenceRoom_1/Annotations
Area_3/hallway_1/Annotations
Area_3/hallway_2/Annotations
Area_3/hallway_3/Annotations
Area_3/hallway_4/Annotations
Area_3/hallway_5/Annotations
Area_3/hallway_6/Annotations
Area_3/lounge_1/Annotations
Area_3/lounge_2/Annotations
Area_3/office_10/Annotations
Area_3/office_1/Annotations
Area_3/office_2/Annotations
Area_3/office_3/Annotations
Area_3/office_4/Annotations
Area_3/office_5/Annotations
Area_3/office_6/Annotations
Area_3/office_7/Annotations
Area_3/office_8/Annotations
Area_3/office_9/Annotations
Area_3/storage_1/Annotations
Area_3/storage_2/Annotations
Area_3/WC_1/Annotations
Area_3/WC_2/Annotations
Area_4/conferenceRoom_1/Annotations
Area_4/conferenceRoom_2/Annotations
Area_4/conferenceRoom_3/Annotations
Area_4/hallway_10/Annotations
Area_4/hallway_11/Annotations
Area_4/hallway_12/Annotations
Area_4/hallway_13/Annotations
Area_4/hallway_14/Annotations
Area_4/hallway_1/Annotations
Area_4/hallway_2/Annotations
Area_4/hallway_3/Annotations
Area_4/hallway_4/Annotations
Area_4/hallway_5/Annotations
Area_4/hallway_6/Annotations
Area_4/hallway_7/Annotations
Area_4/hallway_8/Annotations
Area_4/hallway_9/Annotations
Area_4/lobby_1/Annotations
Area_4/lobby_2/Annotations
Area_4/office_10/Annotations
Area_4/office_11/Annotations
Area_4/office_12/Annotations
Area_4/office_13/Annotations
Area_4/office_14/Annotations
Area_4/office_15/Annotations
Area_4/office_16/Annotations
Area_4/office_17/Annotations
Area_4/office_18/Annotations
Area_4/office_19/Annotations
Area_4/office_1/Annotations
Area_4/office_20/Annotations
Area_4/office_21/Annotations
Area_4/office_22/Annotations
Area_4/office_2/Annotations
Area_4/office_3/Annotations
Area_4/office_4/Annotations
Area_4/office_5/Annotations
Area_4/office_6/Annotations
Area_4/office_7/Annotations
Area_4/office_8/Annotations
Area_4/office_9/Annotations
Area_4/storage_1/Annotations
Area_4/storage_2/Annotations
Area_4/storage_3/Annotations
Area_4/storage_4/Annotations
Area_4/WC_1/Annotations
Area_4/WC_2/Annotations
Area_4/WC_3/Annotations
Area_4/WC_4/Annotations
Area_5/conferenceRoom_1/Annotations
Area_5/conferenceRoom_2/Annotations
Area_5/conferenceRoom_3/Annotations
Area_5/hallway_10/Annotations
Area_5/hallway_11/Annotations
Area_5/hallway_12/Annotations
Area_5/hallway_13/Annotations
Area_5/hallway_14/Annotations
Area_5/hallway_15/Annotations
Area_5/hallway_1/Annotations
Area_5/hallway_2/Annotations
Area_5/hallway_3/Annotations
Area_5/hallway_4/Annotations
Area_5/hallway_5/Annotations
Area_5/hallway_6/Annotations
Area_5/hallway_7/Annotations
Area_5/hallway_8/Annotations
Area_5/hallway_9/Annotations
Area_5/lobby_1/Annotations
Area_5/office_10/Annotations
Area_5/office_11/Annotations
Area_5/office_12/Annotations
Area_5/office_13/Annotations
Area_5/office_14/Annotations
Area_5/office_15/Annotations
Area_5/office_16/Annotations
Area_5/office_17/Annotations
Area_5/office_18/Annotations
Area_5/office_19/Annotations
Area_5/office_1/Annotations
Area_5/office_20/Annotations
Area_5/office_21/Annotations
Area_5/office_22/Annotations
Area_5/office_23/Annotations
Area_5/office_24/Annotations
Area_5/office_25/Annotations
Area_5/office_26/Annotations
Area_5/office_27/Annotations
Area_5/office_28/Annotations
Area_5/office_29/Annotations
Area_5/office_2/Annotations
Area_5/office_30/Annotations
Area_5/office_31/Annotations
Area_5/office_32/Annotations
Area_5/office_33/Annotations
Area_5/office_34/Annotations
Area_5/office_35/Annotations
Area_5/office_36/Annotations
Area_5/office_37/Annotations
Area_5/office_38/Annotations
Area_5/office_39/Annotations
Area_5/office_3/Annotations
Area_5/office_40/Annotations
Area_5/office_41/Annotations
Area_5/office_42/Annotations
Area_5/office_4/Annotations
Area_5/office_5/Annotations
Area_5/office_6/Annotations
Area_5/office_7/Annotations
Area_5/office_8/Annotations
Area_5/office_9/Annotations
Area_5/pantry_1/Annotations
Area_5/storage_1/Annotations
Area_5/storage_2/Annotations
Area_5/storage_3/Annotations
Area_5/storage_4/Annotations
Area_5/WC_1/Annotations
Area_5/WC_2/Annotations
Area_6/conferenceRoom_1/Annotations
Area_6/copyRoom_1/Annotations
Area_6/hallway_1/Annotations
Area_6/hallway_2/Annotations
Area_6/hallway_3/Annotations
Area_6/hallway_4/Annotations
Area_6/hallway_5/Annotations
Area_6/hallway_6/Annotations
Area_6/lounge_1/Annotations
Area_6/office_10/Annotations
Area_6/office_11/Annotations
Area_6/office_12/Annotations
Area_6/office_13/Annotations
Area_6/office_14/Annotations
Area_6/office_15/Annotations
Area_6/office_16/Annotations
Area_6/office_17/Annotations
Area_6/office_18/Annotations
Area_6/office_19/Annotations
Area_6/office_1/Annotations
Area_6/office_20/Annotations
Area_6/office_21/Annotations
Area_6/office_22/Annotations
Area_6/office_23/Annotations
Area_6/office_24/Annotations
Area_6/office_25/Annotations
Area_6/office_26/Annotations
Area_6/office_27/Annotations
Area_6/office_28/Annotations
Area_6/office_29/Annotations
Area_6/office_2/Annotations
Area_6/office_30/Annotations
Area_6/office_31/Annotations
Area_6/office_32/Annotations
Area_6/office_33/Annotations
Area_6/office_34/Annotations
Area_6/office_35/Annotations
Area_6/office_36/Annotations
Area_6/office_37/Annotations
Area_6/office_3/Annotations
Area_6/office_4/Annotations
Area_6/office_5/Annotations
Area_6/office_6/Annotations
Area_6/office_7/Annotations
Area_6/office_8/Annotations
Area_6/office_9/Annotations
Area_6/openspace_1/Annotations
Area_6/pantry_1/Annotations
================================================
FILE: dgcnn/tensorflow/sem_seg/meta/area1_data_label.txt
================================================
data/stanford_indoor3d/Area_1_conferenceRoom_1.npy
data/stanford_indoor3d/Area_1_conferenceRoom_2.npy
data/stanford_indoor3d/Area_1_copyRoom_1.npy
data/stanford_indoor3d/Area_1_hallway_1.npy
data/stanford_indoor3d/Area_1_hallway_2.npy
data/stanford_indoor3d/Area_1_hallway_3.npy
data/stanford_indoor3d/Area_1_hallway_4.npy
data/stanford_indoor3d/Area_1_hallway_5.npy
data/stanford_indoor3d/Area_1_hallway_6.npy
data/stanford_indoor3d/Area_1_hallway_7.npy
data/stanford_indoor3d/Area_1_hallway_8.npy
data/stanford_indoor3d/Area_1_office_10.npy
data/stanford_indoor3d/Area_1_office_11.npy
data/stanford_indoor3d/Area_1_office_12.npy
data/stanford_indoor3d/Area_1_office_13.npy
data/stanford_indoor3d/Area_1_office_14.npy
data/stanford_indoor3d/Area_1_office_15.npy
data/stanford_indoor3d/Area_1_office_16.npy
data/stanford_indoor3d/Area_1_office_17.npy
data/stanford_indoor3d/Area_1_office_18.npy
data/stanford_indoor3d/Area_1_office_19.npy
data/stanford_indoor3d/Area_1_office_1.npy
data/stanford_indoor3d/Area_1_office_20.npy
data/stanford_indoor3d/Area_1_office_21.npy
data/stanford_indoor3d/Area_1_office_22.npy
data/stanford_indoor3d/Area_1_office_23.npy
data/stanford_indoor3d/Area_1_office_24.npy
data/stanford_indoor3d/Area_1_office_25.npy
data/stanford_indoor3d/Area_1_office_26.npy
data/stanford_indoor3d/Area_1_office_27.npy
data/stanford_indoor3d/Area_1_office_28.npy
data/stanford_indoor3d/Area_1_office_29.npy
data/stanford_indoor3d/Area_1_office_2.npy
data/stanford_indoor3d/Area_1_office_30.npy
data/stanford_indoor3d/Area_1_office_31.npy
data/stanford_indoor3d/Area_1_office_3.npy
data/stanford_indoor3d/Area_1_office_4.npy
data/stanford_indoor3d/Area_1_office_5.npy
data/stanford_indoor3d/Area_1_office_6.npy
data/stanford_indoor3d/Area_1_office_7.npy
data/stanford_indoor3d/Area_1_office_8.npy
data/stanford_indoor3d/Area_1_office_9.npy
data/stanford_indoor3d/Area_1_pantry_1.npy
data/stanford_indoor3d/Area_1_WC_1.npy
================================================
FILE: dgcnn/tensorflow/sem_seg/meta/area2_data_label.txt
================================================
data/stanford_indoor3d/Area_2_auditorium_1.npy
data/stanford_indoor3d/Area_2_auditorium_2.npy
data/stanford_indoor3d/Area_2_conferenceRoom_1.npy
data/stanford_indoor3d/Area_2_hallway_10.npy
data/stanford_indoor3d/Area_2_hallway_11.npy
data/stanford_indoor3d/Area_2_hallway_12.npy
data/stanford_indoor3d/Area_2_hallway_1.npy
data/stanford_indoor3d/Area_2_hallway_2.npy
data/stanford_indoor3d/Area_2_hallway_3.npy
data/stanford_indoor3d/Area_2_hallway_4.npy
data/stanford_indoor3d/Area_2_hallway_5.npy
data/stanford_indoor3d/Area_2_hallway_6.npy
data/stanford_indoor3d/Area_2_hallway_7.npy
data/stanford_indoor3d/Area_2_hallway_8.npy
data/stanford_indoor3d/Area_2_hallway_9.npy
data/stanford_indoor3d/Area_2_office_10.npy
data/stanford_indoor3d/Area_2_office_11.npy
data/stanford_indoor3d/Area_2_office_12.npy
data/stanford_indoor3d/Area_2_office_13.npy
data/stanford_indoor3d/Area_2_office_14.npy
data/stanford_indoor3d/Area_2_office_1.npy
data/stanford_indoor3d/Area_2_office_2.npy
data/stanford_indoor3d/Area_2_office_3.npy
data/stanford_indoor3d/Area_2_office_4.npy
data/stanford_indoor3d/Area_2_office_5.npy
data/stanford_indoor3d/Area_2_office_6.npy
data/stanford_indoor3d/Area_2_office_7.npy
data/stanford_indoor3d/Area_2_office_8.npy
data/stanford_indoor3d/Area_2_office_9.npy
data/stanford_indoor3d/Area_2_storage_1.npy
data/stanford_indoor3d/Area_2_storage_2.npy
data/stanford_indoor3d/Area_2_storage_3.npy
data/stanford_indoor3d/Area_2_storage_4.npy
data/stanford_indoor3d/Area_2_storage_5.npy
data/stanford_indoor3d/Area_2_storage_6.npy
data/stanford_indoor3d/Area_2_storage_7.npy
data/stanford_indoor3d/Area_2_storage_8.npy
data/stanford_indoor3d/Area_2_storage_9.npy
data/stanford_indoor3d/Area_2_WC_1.npy
data/stanford_indoor3d/Area_2_WC_2.npy
================================================
FILE: dgcnn/tensorflow/sem_seg/meta/area3_data_label.txt
================================================
data/stanford_indoor3d/Area_3_conferenceRoom_1.npy
data/stanford_indoor3d/Area_3_hallway_1.npy
data/stanford_indoor3d/Area_3_hallway_2.npy
data/stanford_indoor3d/Area_3_hallway_3.npy
data/stanford_indoor3d/Area_3_hallway_4.npy
data/stanford_indoor3d/Area_3_hallway_5.npy
data/stanford_indoor3d/Area_3_hallway_6.npy
data/stanford_indoor3d/Area_3_lounge_1.npy
data/stanford_indoor3d/Area_3_lounge_2.npy
data/stanford_indoor3d/Area_3_office_10.npy
data/stanford_indoor3d/Area_3_office_1.npy
data/stanford_indoor3d/Area_3_office_2.npy
data/stanford_indoor3d/Area_3_office_3.npy
data/stanford_indoor3d/Area_3_office_4.npy
data/stanford_indoor3d/Area_3_office_5.npy
data/stanford_indoor3d/Area_3_office_6.npy
data/stanford_indoor3d/Area_3_office_7.npy
data/stanford_indoor3d/Area_3_office_8.npy
data/stanford_indoor3d/Area_3_office_9.npy
data/stanford_indoor3d/Area_3_storage_1.npy
data/stanford_indoor3d/Area_3_storage_2.npy
data/stanford_indoor3d/Area_3_WC_1.npy
data/stanford_indoor3d/Area_3_WC_2.npy
================================================
FILE: dgcnn/tensorflow/sem_seg/meta/area4_data_label.txt
================================================
data/stanford_indoor3d/Area_4_conferenceRoom_1.npy
data/stanford_indoor3d/Area_4_conferenceRoom_2.npy
data/stanford_indoor3d/Area_4_conferenceRoom_3.npy
data/stanford_indoor3d/Area_4_hallway_10.npy
data/stanford_indoor3d/Area_4_hallway_11.npy
data/stanford_indoor3d/Area_4_hallway_12.npy
data/stanford_indoor3d/Area_4_hallway_13.npy
data/stanford_indoor3d/Area_4_hallway_14.npy
data/stanford_indoor3d/Area_4_hallway_1.npy
data/stanford_indoor3d/Area_4_hallway_2.npy
data/stanford_indoor3d/Area_4_hallway_3.npy
data/stanford_indoor3d/Area_4_hallway_4.npy
data/stanford_indoor3d/Area_4_hallway_5.npy
data/stanford_indoor3d/Area_4_hallway_6.npy
data/stanford_indoor3d/Area_4_hallway_7.npy
data/stanford_indoor3d/Area_4_hallway_8.npy
data/stanford_indoor3d/Area_4_hallway_9.npy
data/stanford_indoor3d/Area_4_lobby_1.npy
data/stanford_indoor3d/Area_4_lobby_2.npy
data/stanford_indoor3d/Area_4_office_10.npy
data/stanford_indoor3d/Area_4_office_11.npy
data/stanford_indoor3d/Area_4_office_12.npy
data/stanford_indoor3d/Area_4_office_13.npy
data/stanford_indoor3d/Area_4_office_14.npy
data/stanford_indoor3d/Area_4_office_15.npy
data/stanford_indoor3d/Area_4_office_16.npy
data/stanford_indoor3d/Area_4_office_17.npy
data/stanford_indoor3d/Area_4_office_18.npy
data/stanford_indoor3d/Area_4_office_19.npy
data/stanford_indoor3d/Area_4_office_1.npy
data/stanford_indoor3d/Area_4_office_20.npy
data/stanford_indoor3d/Area_4_office_21.npy
data/stanford_indoor3d/Area_4_office_22.npy
data/stanford_indoor3d/Area_4_office_2.npy
data/stanford_indoor3d/Area_4_office_3.npy
data/stanford_indoor3d/Area_4_office_4.npy
data/stanford_indoor3d/Area_4_office_5.npy
data/stanford_indoor3d/Area_4_office_6.npy
data/stanford_indoor3d/Area_4_office_7.npy
data/stanford_indoor3d/Area_4_office_8.npy
data/stanford_indoor3d/Area_4_office_9.npy
data/stanford_indoor3d/Area_4_storage_1.npy
data/stanford_indoor3d/Area_4_storage_2.npy
data/stanford_indoor3d/Area_4_storage_3.npy
data/stanford_indoor3d/Area_4_storage_4.npy
data/stanford_indoor3d/Area_4_WC_1.npy
data/stanford_indoor3d/Area_4_WC_2.npy
data/stanford_indoor3d/Area_4_WC_3.npy
data/stanford_indoor3d/Area_4_WC_4.npy
================================================
FILE: dgcnn/tensorflow/sem_seg/meta/area5_data_label.txt
================================================
data/stanford_indoor3d/Area_5_conferenceRoom_1.npy
data/stanford_indoor3d/Area_5_conferenceRoom_2.npy
data/stanford_indoor3d/Area_5_conferenceRoom_3.npy
data/stanford_indoor3d/Area_5_hallway_10.npy
data/stanford_indoor3d/Area_5_hallway_11.npy
data/stanford_indoor3d/Area_5_hallway_12.npy
data/stanford_indoor3d/Area_5_hallway_13.npy
data/stanford_indoor3d/Area_5_hallway_14.npy
data/stanford_indoor3d/Area_5_hallway_15.npy
data/stanford_indoor3d/Area_5_hallway_1.npy
data/stanford_indoor3d/Area_5_hallway_2.npy
data/stanford_indoor3d/Area_5_hallway_3.npy
data/stanford_indoor3d/Area_5_hallway_4.npy
data/stanford_indoor3d/Area_5_hallway_5.npy
data/stanford_indoor3d/Area_5_hallway_6.npy
data/stanford_indoor3d/Area_5_hallway_7.npy
data/stanford_indoor3d/Area_5_hallway_8.npy
data/stanford_indoor3d/Area_5_hallway_9.npy
data/stanford_indoor3d/Area_5_lobby_1.npy
data/stanford_indoor3d/Area_5_office_10.npy
data/stanford_indoor3d/Area_5_office_11.npy
data/stanford_indoor3d/Area_5_office_12.npy
data/stanford_indoor3d/Area_5_office_13.npy
data/stanford_indoor3d/Area_5_office_14.npy
data/stanford_indoor3d/Area_5_office_15.npy
data/stanford_indoor3d/Area_5_office_16.npy
data/stanford_indoor3d/Area_5_office_17.npy
data/stanford_indoor3d/Area_5_office_18.npy
data/stanford_indoor3d/Area_5_office_19.npy
data/stanford_indoor3d/Area_5_office_1.npy
data/stanford_indoor3d/Area_5_office_20.npy
data/stanford_indoor3d/Area_5_office_21.npy
data/stanford_indoor3d/Area_5_office_22.npy
data/stanford_indoor3d/Area_5_office_23.npy
data/stanford_indoor3d/Area_5_office_24.npy
data/stanford_indoor3d/Area_5_office_25.npy
data/stanford_indoor3d/Area_5_office_26.npy
data/stanford_indoor3d/Area_5_office_27.npy
data/stanford_indoor3d/Area_5_office_28.npy
data/stanford_indoor3d/Area_5_office_29.npy
data/stanford_indoor3d/Area_5_office_2.npy
data/stanford_indoor3d/Area_5_office_30.npy
data/stanford_indoor3d/Area_5_office_31.npy
data/stanford_indoor3d/Area_5_office_32.npy
data/stanford_indoor3d/Area_5_office_33.npy
data/stanford_indoor3d/Area_5_office_34.npy
data/stanford_indoor3d/Area_5_office_35.npy
data/stanford_indoor3d/Area_5_office_36.npy
data/stanford_indoor3d/Area_5_office_37.npy
data/stanford_indoor3d/Area_5_office_38.npy
data/stanford_indoor3d/Area_5_office_39.npy
data/stanford_indoor3d/Area_5_office_3.npy
data/stanford_indoor3d/Area_5_office_40.npy
data/stanford_indoor3d/Area_5_office_41.npy
data/stanford_indoor3d/Area_5_office_42.npy
data/stanford_indoor3d/Area_5_office_4.npy
data/stanford_indoor3d/Area_5_office_5.npy
data/stanford_indoor3d/Area_5_office_6.npy
data/stanford_indoor3d/Area_5_office_7.npy
data/stanford_indoor3d/Area_5_office_8.npy
data/stanford_indoor3d/Area_5_office_9.npy
data/stanford_indoor3d/Area_5_pantry_1.npy
data/stanford_indoor3d/Area_5_storage_1.npy
data/stanford_indoor3d/Area_5_storage_2.npy
data/stanford_indoor3d/Area_5_storage_3.npy
data/stanford_indoor3d/Area_5_storage_4.npy
data/stanford_indoor3d/Area_5_WC_1.npy
data/stanford_indoor3d/Area_5_WC_2.npy
================================================
FILE: dgcnn/tensorflow/sem_seg/meta/area6_data_label.txt
================================================
data/stanford_indoor3d/Area_6_conferenceRoom_1.npy
data/stanford_indoor3d/Area_6_copyRoom_1.npy
data/stanford_indoor3d/Area_6_hallway_1.npy
data/stanford_indoor3d/Area_6_hallway_2.npy
data/stanford_indoor3d/Area_6_hallway_3.npy
data/stanford_indoor3d/Area_6_hallway_4.npy
data/stanford_indoor3d/Area_6_hallway_5.npy
data/stanford_indoor3d/Area_6_hallway_6.npy
data/stanford_indoor3d/Area_6_lounge_1.npy
data/stanford_indoor3d/Area_6_office_10.npy
data/stanford_indoor3d/Area_6_office_11.npy
data/stanford_indoor3d/Area_6_office_12.npy
data/stanford_indoor3d/Area_6_office_13.npy
data/stanford_indoor3d/Area_6_office_14.npy
data/stanford_indoor3d/Area_6_office_15.npy
data/stanford_indoor3d/Area_6_office_16.npy
data/stanford_indoor3d/Area_6_office_17.npy
data/stanford_indoor3d/Area_6_office_18.npy
data/stanford_indoor3d/Area_6_office_19.npy
data/stanford_indoor3d/Area_6_office_1.npy
data/stanford_indoor3d/Area_6_office_20.npy
data/stanford_indoor3d/Area_6_office_21.npy
data/stanford_indoor3d/Area_6_office_22.npy
data/stanford_indoor3d/Area_6_office_23.npy
data/stanford_indoor3d/Area_6_office_24.npy
data/stanford_indoor3d/Area_6_office_25.npy
data/stanford_indoor3d/Area_6_office_26.npy
data/stanford_indoor3d/Area_6_office_27.npy
data/stanford_indoor3d/Area_6_office_28.npy
data/stanford_indoor3d/Area_6_office_29.npy
data/stanford_indoor3d/Area_6_office_2.npy
data/stanford_indoor3d/Area_6_office_30.npy
data/stanford_indoor3d/Area_6_office_31.npy
data/stanford_indoor3d/Area_6_office_32.npy
data/stanford_indoor3d/Area_6_office_33.npy
data/stanford_indoor3d/Area_6_office_34.npy
data/stanford_indoor3d/Area_6_office_35.npy
data/stanford_indoor3d/Area_6_office_36.npy
data/stanford_indoor3d/Area_6_office_37.npy
data/stanford_indoor3d/Area_6_office_3.npy
data/stanford_indoor3d/Area_6_office_4.npy
data/stanford_indoor3d/Area_6_office_5.npy
data/stanford_indoor3d/Area_6_office_6.npy
data/stanford_indoor3d/Area_6_office_7.npy
data/stanford_indoor3d/Area_6_office_8.npy
data/stanford_indoor3d/Area_6_office_9.npy
data/stanford_indoor3d/Area_6_openspace_1.npy
data/stanford_indoor3d/Area_6_pantry_1.npy
================================================
FILE: dgcnn/tensorflow/sem_seg/meta/class_names.txt
================================================
ceiling
floor
wall
beam
column
window
door
table
chair
sofa
bookcase
board
clutter
================================================
FILE: dgcnn/tensorflow/sem_seg/model.py
================================================
import tensorflow as tf
import math
import time
import numpy as np
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
sys.path.append(os.path.join(BASE_DIR, '../models'))
import tf_util
def placeholder_inputs(batch_size, num_point):
pointclouds_pl = tf.placeholder(tf.float32,
shape=(batch_size, num_point, 9))
labels_pl = tf.placeholder(tf.int32,
shape=(batch_size, num_point))
return pointclouds_pl, labels_pl
def get_model(point_cloud, is_training, bn_decay=None):
""" ConvNet baseline, input is BxNx9 gray image """
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
input_image = tf.expand_dims(point_cloud, -1)
k = 20
adj = tf_util.pairwise_distance(point_cloud[:, :, 6:])
nn_idx = tf_util.knn(adj, k=k) # (batch, num_points, k)
edge_feature = tf_util.get_edge_feature(input_image, nn_idx=nn_idx, k=k)
out1 = tf_util.conv2d(edge_feature, 64, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training, weight_decay=weight_decay,
scope='adj_conv1', bn_decay=bn_decay, is_dist=True)
out2 = tf_util.conv2d(out1, 64, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training, weight_decay=weight_decay,
scope='adj_conv2', bn_decay=bn_decay, is_dist=True)
net_1 = tf.reduce_max(out2, axis=-2, keep_dims=True)
adj = tf_util.pairwise_distance(net_1)
nn_idx = tf_util.knn(adj, k=k)
edge_feature = tf_util.get_edge_feature(net_1, nn_idx=nn_idx, k=k)
out3 = tf_util.conv2d(edge_feature, 64, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training, weight_decay=weight_decay,
scope='adj_conv3', bn_decay=bn_decay, is_dist=True)
out4 = tf_util.conv2d(out3, 64, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training, weight_decay=weight_decay,
scope='adj_conv4', bn_decay=bn_decay, is_dist=True)
net_2 = tf.reduce_max(out4, axis=-2, keep_dims=True)
adj = tf_util.pairwise_distance(net_2)
nn_idx = tf_util.knn(adj, k=k)
edge_feature = tf_util.get_edge_feature(net_2, nn_idx=nn_idx, k=k)
out5 = tf_util.conv2d(edge_feature, 64, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training, weight_decay=weight_decay,
scope='adj_conv5', bn_decay=bn_decay, is_dist=True)
# out6 = tf_util.conv2d(out5, 64, [1,1],
# padding='VALID', stride=[1,1],
# bn=True, is_training=is_training, weight_decay=weight_decay,
# scope='adj_conv6', bn_decay=bn_decay, is_dist=True)
net_3 = tf.reduce_max(out5, axis=-2, keep_dims=True)
out7 = tf_util.conv2d(tf.concat([net_1, net_2, net_3], axis=-1), 1024, [1, 1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='adj_conv7', bn_decay=bn_decay, is_dist=True)
out_max = tf_util.max_pool2d(out7, [num_point, 1], padding='VALID', scope='maxpool')
expand = tf.tile(out_max, [1, num_point, 1, 1])
concat = tf.concat(axis=3, values=[expand,
net_1,
net_2,
net_3])
# CONV
net = tf_util.conv2d(concat, 512, [1,1], padding='VALID', stride=[1,1],
bn=True, is_training=is_training, scope='seg/conv1', is_dist=True)
net = tf_util.conv2d(net, 256, [1,1], padding='VALID', stride=[1,1],
bn=True, is_training=is_training, scope='seg/conv2', is_dist=True)
net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training, scope='dp1')
net = tf_util.conv2d(net, 13, [1,1], padding='VALID', stride=[1,1],
activation_fn=None, scope='seg/conv3', is_dist=True)
net = tf.squeeze(net, [2])
return net
def get_loss(pred, label):
""" pred: B,N,13; label: B,N """
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=label)
return tf.reduce_mean(loss)
================================================
FILE: dgcnn/tensorflow/sem_seg/test_job.sh
================================================
python batch_inference.py --model_path log1/epoch_60.ckpt --dump_dir log1/dump --output_filelist log1/output_filelist.txt --room_data_filelist meta/area1_data_label.txt
python batch_inference.py --model_path log2/epoch_60.ckpt --dump_dir log2/dump --output_filelist log2/output_filelist.txt --room_data_filelist meta/area2_data_label.txt
python batch_inference.py --model_path log3/epoch_60.ckpt --dump_dir log3/dump --output_filelist log3/output_filelist.txt --room_data_filelist meta/area3_data_label.txt
python batch_inference.py --model_path log4/epoch_60.ckpt --dump_dir log4/dump --output_filelist log4/output_filelist.txt --room_data_filelist meta/area4_data_label.txt
python batch_inference.py --model_path log5/epoch_60.ckpt --dump_dir log5/dump --output_filelist log5/output_filelist.txt --room_data_filelist meta/area5_data_label.txt
python batch_inference.py --model_path log6/epoch_60.ckpt --dump_dir log6/dump --output_filelist log6/output_filelist.txt --room_data_filelist meta/area6_data_label.txt
================================================
FILE: dgcnn/tensorflow/sem_seg/train.py
================================================
import argparse
import math
import h5py
import numpy as np
import tensorflow as tf
import socket
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(BASE_DIR)
sys.path.append(ROOT_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
import provider
import tf_util
from model import *
parser = argparse.ArgumentParser()
parser.add_argument('--num_gpu', type=int, default=2, help='the number of GPUs to use [default: 2]')
parser.add_argument('--log_dir', default='log', help='Log dir [default: log]')
parser.add_argument('--num_point', type=int, default=4096, help='Point number [default: 4096]')
parser.add_argument('--max_epoch', type=int, default=101, help='Epoch to run [default: 50]')
parser.add_argument('--batch_size', type=int, default=12, help='Batch Size during training for each GPU [default: 24]')
parser.add_argument('--learning_rate', type=float, default=0.001, help='Initial learning rate [default: 0.001]')
parser.add_argument('--momentum', type=float, default=0.9, help='Initial learning rate [default: 0.9]')
parser.add_argument('--optimizer', default='adam', help='adam or momentum [default: adam]')
parser.add_argument('--decay_step', type=int, default=300000, help='Decay step for lr decay [default: 300000]')
parser.add_argument('--decay_rate', type=float, default=0.5, help='Decay rate for lr decay [default: 0.5]')
parser.add_argument('--test_area', type=int, default=6, help='Which area to use for test, option: 1-6 [default: 6]')
FLAGS = parser.parse_args()
TOWER_NAME = 'tower'
BATCH_SIZE = FLAGS.batch_size
NUM_POINT = FLAGS.num_point
MAX_EPOCH = FLAGS.max_epoch
NUM_POINT = FLAGS.num_point
BASE_LEARNING_RATE = FLAGS.learning_rate
MOMENTUM = FLAGS.momentum
OPTIMIZER = FLAGS.optimizer
DECAY_STEP = FLAGS.decay_step
DECAY_RATE = FLAGS.decay_rate
LOG_DIR = FLAGS.log_dir
if not os.path.exists(LOG_DIR): os.mkdir(LOG_DIR)
os.system('cp model.py %s' % (LOG_DIR))
os.system('cp train.py %s' % (LOG_DIR))
LOG_FOUT = open(os.path.join(LOG_DIR, 'log_train.txt'), 'w')
LOG_FOUT.write(str(FLAGS)+'\n')
MAX_NUM_POINT = 4096
NUM_CLASSES = 13
BN_INIT_DECAY = 0.5
BN_DECAY_DECAY_RATE = 0.5
BN_DECAY_DECAY_STEP = float(DECAY_STEP)
BN_DECAY_CLIP = 0.99
HOSTNAME = socket.gethostname()
ALL_FILES = provider.getDataFiles('indoor3d_sem_seg_hdf5_data/all_files.txt')
room_filelist = [line.rstrip() for line in open('indoor3d_sem_seg_hdf5_data/room_filelist.txt')]
print len(room_filelist)
# Load ALL data
data_batch_list = []
label_batch_list = []
for h5_filename in ALL_FILES:
data_batch, label_batch = provider.loadDataFile(h5_filename)
data_batch_list.append(data_batch)
label_batch_list.append(label_batch)
data_batches = np.concatenate(data_batch_list, 0)
label_batches = np.concatenate(label_batch_list, 0)
print(data_batches.shape)
print(label_batches.shape)
test_area = 'Area_'+str(FLAGS.test_area)
train_idxs = []
test_idxs = []
for i,room_name in enumerate(room_filelist):
if test_area in room_name:
test_idxs.append(i)
else:
train_idxs.append(i)
train_data = data_batches[train_idxs,...]
train_label = label_batches[train_idxs]
test_data = data_batches[test_idxs,...]
test_label = label_batches[test_idxs]
print(train_data.shape, train_label.shape)
print(test_data.shape, test_label.shape)
def log_string(out_str):
LOG_FOUT.write(out_str+'\n')
LOG_FOUT.flush()
print(out_str)
def get_learning_rate(batch):
learning_rate = tf.train.exponential_decay(
BASE_LEARNING_RATE, # Base learning rate.
batch * BATCH_SIZE, # Current index into the dataset.
DECAY_STEP, # Decay step.
DECAY_RATE, # Decay rate.
staircase=True)
learning_rate = tf.maximum(learning_rate, 0.00001) # CLIP THE LEARNING RATE!!
return learning_rate
def get_bn_decay(batch):
bn_momentum = tf.train.exponential_decay(
BN_INIT_DECAY,
batch*BATCH_SIZE,
BN_DECAY_DECAY_STEP,
BN_DECAY_DECAY_RATE,
staircase=True)
bn_decay = tf.minimum(BN_DECAY_CLIP, 1 - bn_momentum)
return bn_decay
def average_gradients(tower_grads):
"""Calculate average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been
averaged across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
expanded_g = tf.expand_dims(g, 0)
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(grads, 0)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
def train():
with tf.Graph().as_default(), tf.device('/cpu:0'):
batch = tf.Variable(0, trainable=False)
bn_decay = get_bn_decay(batch)
tf.summary.scalar('bn_decay', bn_decay)
learning_rate = get_learning_rate(batch)
tf.summary.scalar('learning_rate', learning_rate)
trainer = tf.train.AdamOptimizer(learning_rate)
tower_grads = []
pointclouds_phs = []
labels_phs = []
is_training_phs =[]
with tf.variable_scope(tf.get_variable_scope()):
for i in xrange(FLAGS.num_gpu):
with tf.device('/gpu:%d' % i):
with tf.name_scope('%s_%d' % (TOWER_NAME, i)) as scope:
pointclouds_pl, labels_pl = placeholder_inputs(BATCH_SIZE, NUM_POINT)
is_training_pl = tf.placeholder(tf.bool, shape=())
pointclouds_phs.append(pointclouds_pl)
labels_phs.append(labels_pl)
is_training_phs.append(is_training_pl)
pred = get_model(pointclouds_phs[-1], is_training_phs[-1], bn_decay=bn_decay)
loss = get_loss(pred, labels_phs[-1])
tf.summary.scalar('loss', loss)
correct = tf.equal(tf.argmax(pred, 2), tf.to_int64(labels_phs[-1]))
accuracy = tf.reduce_sum(tf.cast(correct, tf.float32)) / float(BATCH_SIZE*NUM_POINT)
tf.summary.scalar('accuracy', accuracy)
tf.get_variable_scope().reuse_variables()
grads = trainer.compute_gradients(loss)
tower_grads.append(grads)
grads = average_gradients(tower_grads)
train_op = trainer.apply_gradients(grads, global_step=batch)
saver = tf.train.Saver(tf.global_variables(), sharded=True, max_to_keep=10)
# Create a session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
sess = tf.Session(config=config)
# Add summary writers
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'train'),
sess.graph)
test_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'test'))
# Init variables for two GPUs
init = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
sess.run(init)
ops = {'pointclouds_phs': pointclouds_phs,
'labels_phs': labels_phs,
'is_training_phs': is_training_phs,
'pred': pred,
'loss': loss,
'train_op': train_op,
'merged': merged,
'step': batch}
for epoch in range(MAX_EPOCH):
log_string('**** EPOCH %03d ****' % (epoch))
sys.stdout.flush()
train_one_epoch(sess, ops, train_writer)
# Save the variables to disk.
if epoch % 10 == 0:
save_path = saver.save(sess, os.path.join(LOG_DIR,'epoch_' + str(epoch)+'.ckpt'))
log_string("Model saved in file: %s" % save_path)
def train_one_epoch(sess, ops, train_writer):
""" ops: dict mapping from string to tf ops """
is_training = True
log_string('----')
current_data, current_label, _ = provider.shuffle_data(train_data[:,0:NUM_POINT,:], train_label)
file_size = current_data.shape[0]
num_batches = file_size // (FLAGS.num_gpu * BATCH_SIZE)
total_correct = 0
total_seen = 0
loss_sum = 0
for batch_idx in range(num_batches):
if batch_idx % 100 == 0:
print('Current batch/total batch num: %d/%d'%(batch_idx,num_batches))
start_idx_0 = batch_idx * BATCH_SIZE
end_idx_0 = (batch_idx+1) * BATCH_SIZE
start_idx_1 = (batch_idx+1) * BATCH_SIZE
end_idx_1 = (batch_idx+2) * BATCH_SIZE
feed_dict = {ops['pointclouds_phs'][0]: current_data[start_idx_0:end_idx_0, :, :],
ops['pointclouds_phs'][1]: current_data[start_idx_1:end_idx_1, :, :],
ops['labels_phs'][0]: current_label[start_idx_0:end_idx_0],
ops['labels_phs'][1]: current_label[start_idx_1:end_idx_1],
ops['is_training_phs'][0]: is_training,
ops['is_training_phs'][1]: is_training}
summary, step, _, loss_val, pred_val = sess.run([ops['merged'], ops['step'], ops['train_op'], ops['loss'], ops['pred']],
feed_dict=feed_dict)
train_writer.add_summary(summary, step)
pred_val = np.argmax(pred_val, 2)
correct = np.sum(pred_val == current_label[start_idx_1:end_idx_1])
total_correct += correct
total_seen += (BATCH_SIZE*NUM_POINT)
loss_sum += loss_val
log_string('mean loss: %f' % (loss_sum / float(num_batches)))
log_string('accuracy: %f' % (total_correct / float(total_seen)))
if __name__ == "__main__":
train()
LOG_FOUT.close()
================================================
FILE: dgcnn/tensorflow/sem_seg/train_job.sh
================================================
python train.py --log_dir log1 --test_area 1
python train.py --log_dir log2 --test_area 2
python train.py --log_dir log3 --test_area 3
python train.py --log_dir log4 --test_area 4
python train.py --log_dir log5 --test_area 5
python train.py --log_dir log6 --test_area 6
================================================
FILE: dgcnn/tensorflow/train.py
================================================
import argparse
import math
import h5py
import numpy as np
import tensorflow as tf
import socket
import importlib
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, 'models'))
sys.path.append(os.path.join(BASE_DIR, 'utils'))
import provider
import tf_util
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')
parser.add_argument('--model', default='dgcnn', help='Model name: dgcnn')
parser.add_argument('--log_dir', default='log', help='Log dir [default: log]')
parser.add_argument('--num_point', type=int, default=1024, help='Point Number [256/512/1024/2048] [default: 1024]')
parser.add_argument('--max_epoch', type=int, default=250, help='Epoch to run [default: 250]')
parser.add_argument('--batch_size', type=int, default=32, help='Batch Size during training [default: 32]')
parser.add_argument('--learning_rate', type=float, default=0.001, help='Initial learning rate [default: 0.001]')
parser.add_argument('--momentum', type=float, default=0.9, help='Initial learning rate [default: 0.9]')
parser.add_argument('--optimizer', default='adam', help='adam or momentum [default: adam]')
parser.add_argument('--decay_step', type=int, default=200000, help='Decay step for lr decay [default: 200000]')
parser.add_argument('--decay_rate', type=float, default=0.7, help='Decay rate for lr decay [default: 0.8]')
FLAGS = parser.parse_args()
BATCH_SIZE = FLAGS.batch_size
NUM_POINT = FLAGS.num_point
MAX_EPOCH = FLAGS.max_epoch
BASE_LEARNING_RATE = FLAGS.learning_rate
GPU_INDEX = FLAGS.gpu
MOMENTUM = FLAGS.momentum
OPTIMIZER = FLAGS.optimizer
DECAY_STEP = FLAGS.decay_step
DECAY_RATE = FLAGS.decay_rate
MODEL = importlib.import_module(FLAGS.model) # import network module
MODEL_FILE = os.path.join(BASE_DIR, 'models', FLAGS.model+'.py')
LOG_DIR = FLAGS.log_dir
if not os.path.exists(LOG_DIR): os.mkdir(LOG_DIR)
os.system('cp %s %s' % (MODEL_FILE, LOG_DIR)) # bkp of model def
os.system('cp train.py %s' % (LOG_DIR)) # bkp of train procedure
LOG_FOUT = open(os.path.join(LOG_DIR, 'log_train.txt'), 'w')
LOG_FOUT.write(str(FLAGS)+'\n')
MAX_NUM_POINT = 2048
NUM_CLASSES = 40
BN_INIT_DECAY = 0.5
BN_DECAY_DECAY_RATE = 0.5
BN_DECAY_DECAY_STEP = float(DECAY_STEP)
BN_DECAY_CLIP = 0.99
HOSTNAME = socket.gethostname()
# ModelNet40 official train/test split
TRAIN_FILES = provider.getDataFiles( \
os.path.join(BASE_DIR, 'data/modelnet40_ply_hdf5_2048/train_files.txt'))
TEST_FILES = provider.getDataFiles(\
os.path.join(BASE_DIR, 'data/modelnet40_ply_hdf5_2048/test_files.txt'))
def log_string(out_str):
LOG_FOUT.write(out_str+'\n')
LOG_FOUT.flush()
print(out_str)
def get_learning_rate(batch):
learning_rate = tf.train.exponential_decay(
BASE_LEARNING_RATE, # Base learning rate.
batch * BATCH_SIZE, # Current index into the dataset.
DECAY_STEP, # Decay step.
DECAY_RATE, # Decay rate.
staircase=True)
learning_rate = tf.maximum(learning_rate, 0.00001) # CLIP THE LEARNING RATE!
return learning_rate
def get_bn_decay(batch):
bn_momentum = tf.train.exponential_decay(
BN_INIT_DECAY,
batch*BATCH_SIZE,
BN_DECAY_DECAY_STEP,
BN_DECAY_DECAY_RATE,
staircase=True)
bn_decay = tf.minimum(BN_DECAY_CLIP, 1 - bn_momentum)
return bn_decay
def train():
with tf.Graph().as_default():
with tf.device('/gpu:'+str(GPU_INDEX)):
pointclouds_pl, labels_pl = MODEL.placeholder_inputs(BATCH_SIZE, NUM_POINT)
is_training_pl = tf.placeholder(tf.bool, shape=())
print(is_training_pl)
# Note the global_step=batch parameter to minimize.
# That tells the optimizer to helpfully increment the 'batch' parameter for you every time it trains.
batch = tf.Variable(0)
bn_decay = get_bn_decay(batch)
tf.summary.scalar('bn_decay', bn_decay)
# Get model and loss
pred, end_points = MODEL.get_model(pointclouds_pl, is_training_pl, bn_decay=bn_decay)
loss = MODEL.get_loss(pred, labels_pl, end_points)
tf.summary.scalar('loss', loss)
correct = tf.equal(tf.argmax(pred, 1), tf.to_int64(labels_pl))
accuracy = tf.reduce_sum(tf.cast(correct, tf.float32)) / float(BATCH_SIZE)
tf.summary.scalar('accuracy', accuracy)
# Get training operator
learning_rate = get_learning_rate(batch)
tf.summary.scalar('learning_rate', learning_rate)
if OPTIMIZER == 'momentum':
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=MOMENTUM)
elif OPTIMIZER == 'adam':
optimizer = tf.train.AdamOptimizer(learning_rate)
train_op = optimizer.minimize(loss, global_step=batch)
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
# Create a session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
config.log_device_placement = False
sess = tf.Session(config=config)
# Add summary writers
#merged = tf.merge_all_summaries()
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'train'),
sess.graph)
test_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'test'))
# Init variables
init = tf.global_variables_initializer()
# To fix the bug introduced in TF 0.12.1 as in
# http://stackoverflow.com/questions/41543774/invalidargumenterror-for-tensor-bool-tensorflow-0-12-1
#sess.run(init)
sess.run(init, {is_training_pl: True})
ops = {'pointclouds_pl': pointclouds_pl,
'labels_pl': labels_pl,
'is_training_pl': is_training_pl,
'pred': pred,
'loss': loss,
'train_op': train_op,
'merged': merged,
'step': batch}
for epoch in range(MAX_EPOCH):
log_string('**** EPOCH %03d ****' % (epoch))
sys.stdout.flush()
train_one_epoch(sess, ops, train_writer)
eval_one_epoch(sess, ops, test_writer)
# Save the variables to disk.
if epoch % 10 == 0:
save_path = saver.save(sess, os.path.join(LOG_DIR, "model.ckpt"))
log_string("Model saved in file: %s" % save_path)
def train_one_epoch(sess, ops, train_writer):
""" ops: dict mapping from string to tf ops """
is_training = True
# Shuffle train files
train_file_idxs = np.arange(0, len(TRAIN_FILES))
np.random.shuffle(train_file_idxs)
for fn in range(len(TRAIN_FILES)):
log_string('----' + str(fn) + '-----')
current_data, current_label = provider.loadDataFile(TRAIN_FILES[train_file_idxs[fn]])
current_data = current_data[:,0:NUM_POINT,:]
current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label))
current_label = np.squeeze(current_label)
file_size = current_data.shape[0]
num_batches = file_size // BATCH_SIZE
total_correct = 0
total_seen = 0
loss_sum = 0
for batch_idx in range(num_batches):
start_idx = batch_idx * BATCH_SIZE
end_idx = (batch_idx+1) * BATCH_SIZE
# Augment batched point clouds by rotation and jittering
rotated_data = provider.rotate_point_cloud(current_data[start_idx:end_idx, :, :])
jittered_data = provider.jitter_point_cloud(rotated_data)
jittered_data = provider.random_scale_point_cloud(jittered_data)
jittered_data = provider.rotate_perturbation_point_cloud(jittered_data)
jittered_data = provider.shift_point_cloud(jittered_data)
feed_dict = {ops['pointclouds_pl']: jittered_data,
ops['labels_pl']: current_label[start_idx:end_idx],
ops['is_training_pl']: is_training,}
summary, step, _, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
ops['train_op'], ops['loss'], ops['pred']], feed_dict=feed_dict)
train_writer.add_summary(summary, step)
pred_val = np.argmax(pred_val, 1)
correct = np.sum(pred_val == current_label[start_idx:end_idx])
total_correct += correct
total_seen += BATCH_SIZE
loss_sum += loss_val
log_string('mean loss: %f' % (loss_sum / float(num_batches)))
log_string('accuracy: %f' % (total_correct / float(total_seen)))
def eval_one_epoch(sess, ops, test_writer):
""" ops: dict mapping from string to tf ops """
is_training = False
total_correct = 0
total_seen = 0
loss_sum = 0
total_seen_class = [0 for _ in range(NUM_CLASSES)]
total_correct_class = [0 for _ in range(NUM_CLASSES)]
for fn in range(len(TEST_FILES)):
log_string('----' + str(fn) + '-----')
current_data, current_label = provider.loadDataFile(TEST_FILES[fn])
current_data = current_data[:,0:NUM_POINT,:]
current_label = np.squeeze(current_label)
file_size = current_data.shape[0]
num_batches = file_size // BATCH_SIZE
for batch_idx in range(num_batches):
start_idx = batch_idx * BATCH_SIZE
end_idx = (batch_idx+1) * BATCH_SIZE
feed_dict = {ops['pointclouds_pl']: current_data[start_idx:end_idx, :, :],
ops['labels_pl']: current_label[start_idx:end_idx],
ops['is_training_pl']: is_training}
summary, step, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
ops['loss'], ops['pred']], feed_dict=feed_dict)
pred_val = np.argmax(pred_val, 1)
correct = np.sum(pred_val == current_label[start_idx:end_idx])
total_correct += correct
total_seen += BATCH_SIZE
loss_sum += (loss_val*BATCH_SIZE)
for i in range(start_idx, end_idx):
l = current_label[i]
total_seen_class[l] += 1
total_correct_class[l] += (pred_val[i-start_idx] == l)
log_string('eval mean loss: %f' % (loss_sum / float(total_seen)))
log_string('eval accuracy: %f'% (total_correct / float(total_seen)))
log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float))))
if __name__ == "__main__":
train()
LOG_FOUT.close()
================================================
FILE: dgcnn/tensorflow/utils/data_prep_util.py
================================================
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
from plyfile import (PlyData, PlyElement, make2d, PlyParseError, PlyProperty)
import numpy as np
import h5py
SAMPLING_BIN = os.path.join(BASE_DIR, 'third_party/mesh_sampling/build/pcsample')
SAMPLING_POINT_NUM = 2048
SAMPLING_LEAF_SIZE = 0.005
MODELNET40_PATH = '../datasets/modelnet40'
def export_ply(pc, filename):
vertex = np.zeros(pc.shape[0], dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4')])
for i in range(pc.shape[0]):
vertex[i] = (pc[i][0], pc[i][1], pc[i][2])
ply_out = PlyData([PlyElement.describe(vertex, 'vertex', comments=['vertices'])])
ply_out.write(filename)
# Sample points on the obj shape
def get_sampling_command(obj_filename, ply_filename):
cmd = SAMPLING_BIN + ' ' + obj_filename
cmd += ' ' + ply_filename
cmd += ' -n_samples %d ' % SAMPLING_POINT_NUM
cmd += ' -leaf_size %f ' % SAMPLING_LEAF_SIZE
return cmd
# --------------------------------------------------------------
# Following are the helper functions to load MODELNET40 shapes
# --------------------------------------------------------------
# Read in the list of categories in MODELNET40
def get_category_names():
shape_names_file = os.path.join(MODELNET40_PATH, 'shape_names.txt')
shape_names = [line.rstrip() for line in open(shape_names_file)]
return shape_names
# Return all the filepaths for the shapes in MODELNET40
def get_obj_filenames():
obj_filelist_file = os.path.join(MODELNET40_PATH, 'filelist.txt')
obj_filenames = [os.path.join(MODELNET40_PATH, line.rstrip()) for line in open(obj_filelist_file)]
print('Got %d obj files in modelnet40.' % len(obj_filenames))
return obj_filenames
# Helper function to create the father folder and all subdir folders if not exist
def batch_mkdir(output_folder, subdir_list):
if not os.path.exists(output_folder):
os.mkdir(output_folder)
for subdir in subdir_list:
if not os.path.exists(os.path.join(output_folder, subdir)):
os.mkdir(os.path.join(output_folder, subdir))
# ----------------------------------------------------------------
# Following are the helper functions to load save/load HDF5 files
# ----------------------------------------------------------------
# Write numpy array data and label to h5_filename
def save_h5_data_label_normal(h5_filename, data, label, normal,
data_dtype='float32', label_dtype='uint8', noral_dtype='float32'):
h5_fout = h5py.File(h5_filename)
h5_fout.create_dataset(
'data', data=data,
compression='gzip', compression_opts=4,
dtype=data_dtype)
h5_fout.create_dataset(
'normal', data=normal,
compression='gzip', compression_opts=4,
dtype=normal_dtype)
h5_fout.create_dataset(
'label', data=label,
compression='gzip', compression_opts=1,
dtype=label_dtype)
h5_fout.close()
# Write numpy array data and label to h5_filename
def save_h5(h5_filename, data, label, data_dtype='uint8', label_dtype='uint8'):
h5_fout = h5py.File(h5_filename)
h5_fout.create_dataset(
'data', data=data,
compression='gzip', compression_opts=4,
dtype=data_dtype)
h5_fout.create_dataset(
'label', data=label,
compression='gzip', compression_opts=1,
dtype=label_dtype)
h5_fout.close()
# Read numpy array data and label from h5_filename
def load_h5_data_label_normal(h5_filename):
f = h5py.File(h5_filename)
data = f['data'][:]
label = f['label'][:]
normal = f['normal'][:]
return (data, label, normal)
# Read numpy array data and label from h5_filename
def load_h5_data_label_seg(h5_filename):
f = h5py.File(h5_filename)
data = f['data'][:]
label = f['label'][:]
seg = f['pid'][:]
return (data, label, seg)
# Read numpy array data and label from h5_filename
def load_h5(h5_filename):
f = h5py.File(h5_filename)
data = f['data'][:]
label = f['label'][:]
return (data, label)
# ----------------------------------------------------------------
# Following are the helper functions to load save/load PLY files
# ----------------------------------------------------------------
# Load PLY file
def load_ply_data(filename, point_num):
plydata = PlyData.read(filename)
pc = plydata['vertex'].data[:point_num]
pc_array = np.array([[x, y, z] for x,y,z in pc])
return pc_array
# Load PLY file
def load_ply_normal(filename, point_num):
plydata = PlyData.read(filename)
pc = plydata['normal'].data[:point_num]
pc_array = np.array([[x, y, z] for x,y,z in pc])
return pc_array
# Make up rows for Nxk array
# Input Pad is 'edge' or 'constant'
def pad_arr_rows(arr, row, pad='edge'):
assert(len(arr.shape) == 2)
assert(arr.shape[0] <= row)
assert(pad == 'edge' or pad == 'constant')
if arr.shape[0] == row:
return arr
if pad == 'edge':
return np.lib.pad(arr, ((0, row-arr.shape[0]), (0, 0)), 'edge')
if pad == 'constant':
return np.lib.pad(arr, ((0, row-arr.shape[0]), (0, 0)), 'constant', (0, 0))
================================================
FILE: dgcnn/tensorflow/utils/eulerangles.py
================================================
# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the NiBabel package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
''' Module implementing Euler angle rotations and their conversions
See:
* http://en.wikipedia.org/wiki/Rotation_matrix
* http://en.wikipedia.org/wiki/Euler_angles
* http://mathworld.wolfram.com/EulerAngles.html
See also: *Representing Attitude with Euler Angles and Quaternions: A
Reference* (2006) by James Diebel. A cached PDF link last found here:
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.110.5134
Euler's rotation theorem tells us that any rotation in 3D can be
described by 3 angles. Let's call the 3 angles the *Euler angle vector*
and call the angles in the vector :math:`alpha`, :math:`beta` and
:math:`gamma`. The vector is [ :math:`alpha`,
:math:`beta`. :math:`gamma` ] and, in this description, the order of the
parameters specifies the order in which the rotations occur (so the
rotation corresponding to :math:`alpha` is applied first).
In order to specify the meaning of an *Euler angle vector* we need to
specify the axes around which each of the rotations corresponding to
:math:`alpha`, :math:`beta` and :math:`gamma` will occur.
There are therefore three axes for the rotations :math:`alpha`,
:math:`beta` and :math:`gamma`; let's call them :math:`i` :math:`j`,
:math:`k`.
Let us express the rotation :math:`alpha` around axis `i` as a 3 by 3
rotation matrix `A`. Similarly :math:`beta` around `j` becomes 3 x 3
matrix `B` and :math:`gamma` around `k` becomes matrix `G`. Then the
whole rotation expressed by the Euler angle vector [ :math:`alpha`,
:math:`beta`. :math:`gamma` ], `R` is given by::
R = np.dot(G, np.dot(B, A))
See http://mathworld.wolfram.com/EulerAngles.html
The order :math:`G B A` expresses the fact that the rotations are
performed in the order of the vector (:math:`alpha` around axis `i` =
`A` first).
To convert a given Euler angle vector to a meaningful rotation, and a
rotation matrix, we need to define:
* the axes `i`, `j`, `k`
* whether a rotation matrix should be applied on the left of a vector to
be transformed (vectors are column vectors) or on the right (vectors
are row vectors).
* whether the rotations move the axes as they are applied (intrinsic
rotations) - compared the situation where the axes stay fixed and the
vectors move within the axis frame (extrinsic)
* the handedness of the coordinate system
See: http://en.wikipedia.org/wiki/Rotation_matrix#Ambiguities
We are using the following conventions:
* axes `i`, `j`, `k` are the `z`, `y`, and `x` axes respectively. Thus
an Euler angle vector [ :math:`alpha`, :math:`beta`. :math:`gamma` ]
in our convention implies a :math:`alpha` radian rotation around the
`z` axis, followed by a :math:`beta` rotation around the `y` axis,
followed by a :math:`gamma` rotation around the `x` axis.
* the rotation matrix applies on the left, to column vectors on the
right, so if `R` is the rotation matrix, and `v` is a 3 x N matrix
with N column vectors, the transformed vector set `vdash` is given by
``vdash = np.dot(R, v)``.
* extrinsic rotations - the axes are fixed, and do not move with the
rotations.
* a right-handed coordinate system
The convention of rotation around ``z``, followed by rotation around
``y``, followed by rotation around ``x``, is known (confusingly) as
"xyz", pitch-roll-yaw, Cardan angles, or Tait-Bryan angles.
'''
import math
import sys
if sys.version_info >= (3,0):
from functools import reduce
import numpy as np
_FLOAT_EPS_4 = np.finfo(float).eps * 4.0
def euler2mat(z=0, y=0, x=0):
''' Return matrix for rotations around z, y and x axes
Uses the z, then y, then x convention above
Parameters
----------
z : scalar
Rotation angle in radians around z-axis (performed first)
y : scalar
Rotation angle in radians around y-axis
x : scalar
Rotation angle in radians around x-axis (performed last)
Returns
-------
M : array shape (3,3)
Rotation matrix giving same rotation as for given angles
Examples
--------
>>> zrot = 1.3 # radians
>>> yrot = -0.1
>>> xrot = 0.2
>>> M = euler2mat(zrot, yrot, xrot)
>>> M.shape == (3, 3)
True
The output rotation matrix is equal to the composition of the
individual rotations
>>> M1 = euler2mat(zrot)
>>> M2 = euler2mat(0, yrot)
>>> M3 = euler2mat(0, 0, xrot)
>>> composed_M = np.dot(M3, np.dot(M2, M1))
>>> np.allclose(M, composed_M)
True
You can specify rotations by named arguments
>>> np.all(M3 == euler2mat(x=xrot))
True
When applying M to a vector, the vector should column vector to the
right of M. If the right hand side is a 2D array rather than a
vector, then each column of the 2D array represents a vector.
>>> vec = np.array([1, 0, 0]).reshape((3,1))
>>> v2 = np.dot(M, vec)
>>> vecs = np.array([[1, 0, 0],[0, 1, 0]]).T # giving 3x2 array
>>> vecs2 = np.dot(M, vecs)
Rotations are counter-clockwise.
>>> zred = np.dot(euler2mat(z=np.pi/2), np.eye(3))
>>> np.allclose(zred, [[0, -1, 0],[1, 0, 0], [0, 0, 1]])
True
>>> yred = np.dot(euler2mat(y=np.pi/2), np.eye(3))
>>> np.allclose(yred, [[0, 0, 1],[0, 1, 0], [-1, 0, 0]])
True
>>> xred = np.dot(euler2mat(x=np.pi/2), np.eye(3))
>>> np.allclose(xred, [[1, 0, 0],[0, 0, -1], [0, 1, 0]])
True
Notes
-----
The direction of rotation is given by the right-hand rule (orient
the thumb of the right hand along the axis around which the rotation
occurs, with the end of the thumb at the positive end of the axis;
curl your fingers; the direction your fingers curl is the direction
of rotation). Therefore, the rotations are counterclockwise if
looking along the axis of rotation from positive to negative.
'''
Ms = []
if z:
cosz = math.cos(z)
sinz = math.sin(z)
Ms.append(np.array(
[[cosz, -sinz, 0],
[sinz, cosz, 0],
[0, 0, 1]]))
if y:
cosy = math.cos(y)
siny = math.sin(y)
Ms.append(np.array(
[[cosy, 0, siny],
[0, 1, 0],
[-siny, 0, cosy]]))
if x:
cosx = math.cos(x)
sinx = math.sin(x)
Ms.append(np.array(
[[1, 0, 0],
[0, cosx, -sinx],
[0, sinx, cosx]]))
if Ms:
return reduce(np.dot, Ms[::-1])
return np.eye(3)
def mat2euler(M, cy_thresh=None):
''' Discover Euler angle vector from 3x3 matrix
Uses the conventions above.
Parameters
----------
M : array-like, shape (3,3)
cy_thresh : None or scalar, optional
threshold below which to give up on straightforward arctan for
estimating x rotation. If None (default), estimate from
precision of input.
Returns
-------
z : scalar
y : scalar
x : scalar
Rotations in radians around z, y, x axes, respectively
Notes
-----
If there was no numerical error, the routine could be derived using
Sympy expression for z then y then x rotation matrix, which is::
[ cos(y)*cos(z), -cos(y)*sin(z), sin(y)],
[cos(x)*sin(z) + cos(z)*sin(x)*sin(y), cos(x)*cos(z) - sin(x)*sin(y)*sin(z), -cos(y)*sin(x)],
[sin(x)*sin(z) - cos(x)*cos(z)*sin(y), cos(z)*sin(x) + cos(x)*sin(y)*sin(z), cos(x)*cos(y)]
with the obvious derivations for z, y, and x
z = atan2(-r12, r11)
y = asin(r13)
x = atan2(-r23, r33)
Problems arise when cos(y) is close to zero, because both of::
z = atan2(cos(y)*sin(z), cos(y)*cos(z))
x = atan2(cos(y)*sin(x), cos(x)*cos(y))
will be close to atan2(0, 0), and highly unstable.
The ``cy`` fix for numerical instability below is from: *Graphics
Gems IV*, Paul Heckbert (editor), Academic Press, 1994, ISBN:
0123361559. Specifically it comes from EulerAngles.c by Ken
Shoemake, and deals with the case where cos(y) is close to zero:
See: http://www.graphicsgems.org/
The code appears to be licensed (from the website) as "can be used
without restrictions".
'''
M = np.asarray(M)
if cy_thresh is None:
try:
cy_thresh = np.finfo(M.dtype).eps * 4
except ValueError:
cy_thresh = _FLOAT_EPS_4
r11, r12, r13, r21, r22, r23, r31, r32, r33 = M.flat
# cy: sqrt((cos(y)*cos(z))**2 + (cos(x)*cos(y))**2)
cy = math.sqrt(r33*r33 + r23*r23)
if cy > cy_thresh: # cos(y) not close to zero, standard form
z = math.atan2(-r12, r11) # atan2(cos(y)*sin(z), cos(y)*cos(z))
y = math.atan2(r13, cy) # atan2(sin(y), cy)
x = math.atan2(-r23, r33) # atan2(cos(y)*sin(x), cos(x)*cos(y))
else: # cos(y) (close to) zero, so x -> 0.0 (see above)
# so r21 -> sin(z), r22 -> cos(z) and
z = math.atan2(r21, r22)
y = math.atan2(r13, cy) # atan2(sin(y), cy)
x = 0.0
return z, y, x
def euler2quat(z=0, y=0, x=0):
''' Return quaternion corresponding to these Euler angles
Uses the z, then y, then x convention above
Parameters
----------
z : scalar
Rotation angle in radians around z-axis (performed first)
y : scalar
Rotation angle in radians around y-axis
x : scalar
Rotation angle in radians around x-axis (performed last)
Returns
-------
quat : array shape (4,)
Quaternion in w, x, y z (real, then vector) format
Notes
-----
We can derive this formula in Sympy using:
1. Formula giving quaternion corresponding to rotation of theta radians
about arbitrary axis:
http://mathworld.wolfram.com/EulerParameters.html
2. Generated formulae from 1.) for quaternions corresponding to
theta radians rotations about ``x, y, z`` axes
3. Apply quaternion multiplication formula -
http://en.wikipedia.org/wiki/Quaternions#Hamilton_product - to
formulae from 2.) to give formula for combined rotations.
'''
z = z/2.0
y = y/2.0
x = x/2.0
cz = math.cos(z)
sz = math.sin(z)
cy = math.cos(y)
sy = math.sin(y)
cx = math.cos(x)
sx = math.sin(x)
return np.array([
cx*cy*cz - sx*sy*sz,
cx*sy*sz + cy*cz*sx,
cx*cz*sy - sx*cy*sz,
cx*cy*sz + sx*cz*sy])
def quat2euler(q):
''' Return Euler angles corresponding to quaternion `q`
Parameters
----------
q : 4 element sequence
w, x, y, z of quaternion
Returns
-------
z : scalar
Rotation angle in radians around z-axis (performed first)
y : scalar
Rotation angle in radians around y-axis
x : scalar
Rotation angle in radians around x-axis (performed last)
Notes
-----
It's possible to reduce the amount of calculation a little, by
combining parts of the ``quat2mat`` and ``mat2euler`` functions, but
the reduction in computation is small, and the code repetition is
large.
'''
# delayed import to avoid cyclic dependencies
import nibabel.quaternions as nq
return mat2euler(nq.quat2mat(q))
def euler2angle_axis(z=0, y=0, x=0):
''' Return angle, axis corresponding to these Euler angles
Uses the z, then y, then x convention above
Parameters
----------
z : scalar
Rotation angle in radians around z-axis (performed first)
y : scalar
Rotation angle in radians around y-axis
x : scalar
Rotation angle in radians around x-axis (performed last)
Returns
-------
theta : scalar
angle of rotation
vector : array shape (3,)
axis around which rotation occurs
Examples
--------
>>> theta, vec = euler2angle_axis(0, 1.5, 0)
>>> print(theta)
1.5
>>> np.allclose(vec, [0, 1, 0])
True
'''
# delayed import to avoid cyclic dependencies
import nibabel.quaternions as nq
return nq.quat2angle_axis(euler2quat(z, y, x))
def angle_axis2euler(theta, vector, is_normalized=False):
''' Convert angle, axis pair to Euler angles
Parameters
----------
theta : scalar
angle of rotation
vector : 3 element sequence
vector specifying axis for rotation.
is_normalized : bool, optional
True if vector is already normalized (has norm of 1). Default
False
Returns
-------
z : scalar
y : scalar
x : scalar
Rotations in radians around z, y, x axes, respectively
Examples
--------
>>> z, y, x = angle_axis2euler(0, [1, 0, 0])
>>> np.allclose((z, y, x), 0)
True
Notes
-----
It's possible to reduce the amount of calculation a little, by
combining parts of the ``angle_axis2mat`` and ``mat2euler``
functions, but the reduction in computation is small, and the code
repetition is large.
'''
# delayed import to avoid cyclic dependencies
import nibabel.quaternions as nq
M = nq.angle_axis2mat(theta, vector, is_normalized)
return mat2euler(M)
================================================
FILE: dgcnn/tensorflow/utils/pc_util.py
================================================
""" Utility functions for processing point clouds.
Author: Charles R. Qi, Hao Su
Date: November 2016
"""
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
# Draw point cloud
from eulerangles import euler2mat
# Point cloud IO
import numpy as np
from plyfile import PlyData, PlyElement
# ----------------------------------------
# Point Cloud/Volume Conversions
# ----------------------------------------
def point_cloud_to_volume_batch(point_clouds, vsize=12, radius=1.0, flatten=True):
""" Input is BxNx3 batch of point cloud
Output is Bx(vsize^3)
"""
vol_list = []
for b in range(point_clouds.shape[0]):
vol = point_cloud_to_volume(np.squeeze(point_clouds[b,:,:]), vsize, radius)
if flatten:
vol_list.append(vol.flatten())
else:
vol_list.append(np.expand_dims(np.expand_dims(vol, -1), 0))
if flatten:
return np.vstack(vol_list)
else:
return np.concatenate(vol_list, 0)
def point_cloud_to_volume(points, vsize, radius=1.0):
""" input is Nx3 points.
output is vsize*vsize*vsize
assumes points are in range [-radius, radius]
"""
vol = np.zeros((vsize,vsize,vsize))
voxel = 2*radius/float(vsize)
locations = (points + radius)/voxel
locations = locations.astype(int)
vol[locations[:,0],locations[:,1],locations[:,2]] = 1.0
return vol
#a = np.zeros((16,1024,3))
#print point_cloud_to_volume_batch(a, 12, 1.0, False).shape
def volume_to_point_cloud(vol):
""" vol is occupancy grid (value = 0 or 1) of size vsize*vsize*vsize
return Nx3 numpy array.
"""
vsize = vol.shape[0]
assert(vol.shape[1] == vsize and vol.shape[1] == vsize)
points = []
for a in range(vsize):
for b in range(vsize):
for c in range(vsize):
if vol[a,b,c] == 1:
points.append(np.array([a,b,c]))
if len(points) == 0:
return np.zeros((0,3))
points = np.vstack(points)
return points
# ----------------------------------------
# Point cloud IO
# ----------------------------------------
def read_ply(filename):
""" read XYZ point cloud from filename PLY file """
plydata = PlyData.read(filename)
pc = plydata['vertex'].data
pc_array = np.array([[x, y, z] for x,y,z in pc])
return pc_array
def write_ply(points, filename, text=True):
""" input: Nx3, write points to filename as PLY format. """
points = [(points[i,0], points[i,1], points[i,2]) for i in range(points.shape[0])]
vertex = np.array(points, dtype=[('x', 'f4'), ('y', 'f4'),('z', 'f4')])
el = PlyElement.describe(vertex, 'vertex', comments=['vertices'])
PlyData([el], text=text).write(filename)
# ----------------------------------------
# Simple Point cloud and Volume Renderers
# ----------------------------------------
def draw_point_cloud(input_points, canvasSize=500, space=200, diameter=25,
xrot=0, yrot=0, zrot=0, switch_xyz=[0,1,2], normalize=True):
""" Render point cloud to image with alpha channel.
Input:
points: Nx3 numpy array (+y is up direction)
Output:
gray image as numpy array of size canvasSizexcanvasSize
"""
image = np.zeros((canvasSize, canvasSize))
if input_points is None or input_points.shape[0] == 0:
return image
points = input_points[:, switch_xyz]
M = euler2mat(zrot, yrot, xrot)
points = (np.dot(M, points.transpose())).transpose()
# Normalize the point cloud
# We normalize scale to fit points in a unit sphere
if normalize:
centroid = np.mean(points, axis=0)
points -= centroid
furthest_distance = np.max(np.sqrt(np.sum(abs(points)**2,axis=-1)))
points /= furthest_distance
# Pre-compute the Gaussian disk
radius = (diameter-1)/2.0
disk = np.zeros((diameter, diameter))
for i in range(diameter):
for j in range(diameter):
if (i - radius) * (i-radius) + (j-radius) * (j-radius) <= radius * radius:
disk[i, j] = np.exp((-(i-radius)**2 - (j-radius)**2)/(radius**2))
mask = np.argwhere(disk > 0)
dx = mask[:, 0]
dy = mask[:, 1]
dv = disk[disk > 0]
# Order points by z-buffer
zorder = np.argsort(points[:, 2])
points = points[zorder, :]
points[:, 2] = (points[:, 2] - np.min(points[:, 2])) / (np.max(points[:, 2] - np.min(points[:, 2])))
max_depth = np.max(points[:, 2])
for i in range(points.shape[0]):
j = points.shape[0] - i - 1
x = points[j, 0]
y = points[j, 1]
xc = canvasSize/2 + (x*space)
yc = canvasSize/2 + (y*space)
xc = int(np.round(xc))
yc = int(np.round(yc))
px = dx + xc
py = dy + yc
image[px, py] = image[px, py] * 0.7 + dv * (max_depth - points[j, 2]) * 0.3
image = image / np.max(image)
return image
def point_cloud_three_views(points):
""" input points Nx3 numpy array (+y is up direction).
return an numpy array gray image of size 500x1500. """
# +y is up direction
# xrot is azimuth
# yrot is in-plane
# zrot is elevation
img1 = draw_point_cloud(points, zrot=110/180.0*np.pi, xrot=45/180.0*np.pi, yrot=0/180.0*np.pi)
img2 = draw_point_cloud(points, zrot=70/180.0*np.pi, xrot=135/180.0*np.pi, yrot=0/180.0*np.pi)
img3 = draw_point_cloud(points, zrot=180.0/180.0*np.pi, xrot=90/180.0*np.pi, yrot=0/180.0*np.pi)
image_large = np.concatenate([img1, img2, img3], 1)
return image_large
from PIL import Image
def point_cloud_three_views_demo():
""" Demo for draw_point_cloud function """
points = read_ply('../third_party/mesh_sampling/piano.ply')
im_array = point_cloud_three_views(points)
img = Image.fromarray(np.uint8(im_array*255.0))
img.save('piano.jpg')
if __name__=="__main__":
point_cloud_three_views_demo()
import matplotlib.pyplot as plt
def pyplot_draw_point_cloud(points, output_filename):
""" points is a Nx3 numpy array """
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(points[:,0], points[:,1], points[:,2])
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
#savefig(output_filename)
def pyplot_draw_volume(vol, output_filename):
""" vol is of size vsize*vsize*vsize
output an image to output_filename
"""
points = volume_to_point_cloud(vol)
pyplot_draw_point_cloud(points, output_filename)
================================================
FILE: dgcnn/tensorflow/utils/plyfile.py
================================================
# Copyright 2014 Darsh Ranjan
#
# This file is part of python-plyfile.
#
# python-plyfile is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# python-plyfile is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with python-plyfile. If not, see
# .
from itertools import islice as _islice
import numpy as _np
from sys import byteorder as _byteorder
try:
_range = xrange
except NameError:
_range = range
# Many-many relation
_data_type_relation = [
('int8', 'i1'),
('char', 'i1'),
('uint8', 'u1'),
('uchar', 'b1'),
('uchar', 'u1'),
('int16', 'i2'),
('short', 'i2'),
('uint16', 'u2'),
('ushort', 'u2'),
('int32', 'i4'),
('int', 'i4'),
('uint32', 'u4'),
('uint', 'u4'),
('float32', 'f4'),
('float', 'f4'),
('float64', 'f8'),
('double', 'f8')
]
_data_types = dict(_data_type_relation)
_data_type_reverse = dict((b, a) for (a, b) in _data_type_relation)
_types_list = []
_types_set = set()
for (_a, _b) in _data_type_relation:
if _a not in _types_set:
_types_list.append(_a)
_types_set.add(_a)
if _b not in _types_set:
_types_list.append(_b)
_types_set.add(_b)
_byte_order_map = {
'ascii': '=',
'binary_little_endian': '<',
'binary_big_endian': '>'
}
_byte_order_reverse = {
'<': 'binary_little_endian',
'>': 'binary_big_endian'
}
_native_byte_order = {'little': '<', 'big': '>'}[_byteorder]
def _lookup_type(type_str):
if type_str not in _data_type_reverse:
try:
type_str = _data_types[type_str]
except KeyError:
raise ValueError("field type %r not in %r" %
(type_str, _types_list))
return _data_type_reverse[type_str]
def _split_line(line, n):
fields = line.split(None, n)
if len(fields) == n:
fields.append('')
assert len(fields) == n + 1
return fields
def make2d(array, cols=None, dtype=None):
'''
Make a 2D array from an array of arrays. The `cols' and `dtype'
arguments can be omitted if the array is not empty.
'''
if (cols is None or dtype is None) and not len(array):
raise RuntimeError("cols and dtype must be specified for empty "
"array")
if cols is None:
cols = len(array[0])
if dtype is None:
dtype = array[0].dtype
return _np.fromiter(array, [('_', dtype, (cols,))],
count=len(array))['_']
class PlyParseError(Exception):
'''
Raised when a PLY file cannot be parsed.
The attributes `element', `row', `property', and `message' give
additional information.
'''
def __init__(self, message, element=None, row=None, prop=None):
self.message = message
self.element = element
self.row = row
self.prop = prop
s = ''
if self.element:
s += 'element %r: ' % self.element.name
if self.row is not None:
s += 'row %d: ' % self.row
if self.prop:
s += 'property %r: ' % self.prop.name
s += self.message
Exception.__init__(self, s)
def __repr__(self):
return ('PlyParseError(%r, element=%r, row=%r, prop=%r)' %
self.message, self.element, self.row, self.prop)
class PlyData(object):
'''
PLY file header and data.
A PlyData instance is created in one of two ways: by the static
method PlyData.read (to read a PLY file), or directly from __init__
given a sequence of elements (which can then be written to a PLY
file).
'''
def __init__(self, elements=[], text=False, byte_order='=',
comments=[], obj_info=[]):
'''
elements: sequence of PlyElement instances.
text: whether the resulting PLY file will be text (True) or
binary (False).
byte_order: '<' for little-endian, '>' for big-endian, or '='
for native. This is only relevant if `text' is False.
comments: sequence of strings that will be placed in the header
between the 'ply' and 'format ...' lines.
obj_info: like comments, but will be placed in the header with
"obj_info ..." instead of "comment ...".
'''
if byte_order == '=' and not text:
byte_order = _native_byte_order
self.byte_order = byte_order
self.text = text
self.comments = list(comments)
self.obj_info = list(obj_info)
self.elements = elements
def _get_elements(self):
return self._elements
def _set_elements(self, elements):
self._elements = tuple(elements)
self._index()
elements = property(_get_elements, _set_elements)
def _get_byte_order(self):
return self._byte_order
def _set_byte_order(self, byte_order):
if byte_order not in ['<', '>', '=']:
raise ValueError("byte order must be '<', '>', or '='")
self._byte_order = byte_order
byte_order = property(_get_byte_order, _set_byte_order)
def _index(self):
self._element_lookup = dict((elt.name, elt) for elt in
self._elements)
if len(self._element_lookup) != len(self._elements):
raise ValueError("two elements with same name")
@staticmethod
def _parse_header(stream):
'''
Parse a PLY header from a readable file-like stream.
'''
lines = []
comments = {'comment': [], 'obj_info': []}
while True:
line = stream.readline().decode('ascii').strip()
fields = _split_line(line, 1)
if fields[0] == 'end_header':
break
elif fields[0] in comments.keys():
lines.append(fields)
else:
lines.append(line.split())
a = 0
if lines[a] != ['ply']:
raise PlyParseError("expected 'ply'")
a += 1
while lines[a][0] in comments.keys():
comments[lines[a][0]].append(lines[a][1])
a += 1
if lines[a][0] != 'format':
raise PlyParseError("expected 'format'")
if lines[a][2] != '1.0':
raise PlyParseError("expected version '1.0'")
if len(lines[a]) != 3:
raise PlyParseError("too many fields after 'format'")
fmt = lines[a][1]
if fmt not in _byte_order_map:
raise PlyParseError("don't understand format %r" % fmt)
byte_order = _byte_order_map[fmt]
text = fmt == 'ascii'
a += 1
while a < len(lines) and lines[a][0] in comments.keys():
comments[lines[a][0]].append(lines[a][1])
a += 1
return PlyData(PlyElement._parse_multi(lines[a:]),
text, byte_order,
comments['comment'], comments['obj_info'])
@staticmethod
def read(stream):
'''
Read PLY data from a readable file-like object or filename.
'''
(must_close, stream) = _open_stream(stream, 'read')
try:
data = PlyData._parse_header(stream)
for elt in data:
elt._read(stream, data.text, data.byte_order)
finally:
if must_close:
stream.close()
return data
def write(self, stream):
'''
Write PLY data to a writeable file-like object or filename.
'''
(must_close, stream) = _open_stream(stream, 'write')
try:
stream.write(self.header.encode('ascii'))
stream.write(b'\r\n')
for elt in self:
elt._write(stream, self.text, self.byte_order)
finally:
if must_close:
stream.close()
@property
def header(self):
'''
Provide PLY-formatted metadata for the instance.
'''
lines = ['ply']
if self.text:
lines.append('format ascii 1.0')
else:
lines.append('format ' +
_byte_order_reverse[self.byte_order] +
' 1.0')
# Some information is lost here, since all comments are placed
# between the 'format' line and the first element.
for c in self.comments:
lines.append('comment ' + c)
for c in self.obj_info:
lines.append('obj_info ' + c)
lines.extend(elt.header for elt in self.elements)
lines.append('end_header')
return '\r\n'.join(lines)
def __iter__(self):
return iter(self.elements)
def __len__(self):
return len(self.elements)
def __contains__(self, name):
return name in self._element_lookup
def __getitem__(self, name):
return self._element_lookup[name]
def __str__(self):
return self.header
def __repr__(self):
return ('PlyData(%r, text=%r, byte_order=%r, '
'comments=%r, obj_info=%r)' %
(self.elements, self.text, self.byte_order,
self.comments, self.obj_info))
def _open_stream(stream, read_or_write):
if hasattr(stream, read_or_write):
return (False, stream)
try:
return (True, open(stream, read_or_write[0] + 'b'))
except TypeError:
raise RuntimeError("expected open file or filename")
class PlyElement(object):
'''
PLY file element.
A client of this library doesn't normally need to instantiate this
directly, so the following is only for the sake of documenting the
internals.
Creating a PlyElement instance is generally done in one of two ways:
as a byproduct of PlyData.read (when reading a PLY file) and by
PlyElement.describe (before writing a PLY file).
'''
def __init__(self, name, properties, count, comments=[]):
'''
This is not part of the public interface. The preferred methods
of obtaining PlyElement instances are PlyData.read (to read from
a file) and PlyElement.describe (to construct from a numpy
array).
'''
self._name = str(name)
self._check_name()
self._count = count
self._properties = tuple(properties)
self._index()
self.comments = list(comments)
self._have_list = any(isinstance(p, PlyListProperty)
for p in self.properties)
@property
def count(self):
return self._count
def _get_data(self):
return self._data
def _set_data(self, data):
self._data = data
self._count = len(data)
self._check_sanity()
data = property(_get_data, _set_data)
def _check_sanity(self):
for prop in self.properties:
if prop.name not in self._data.dtype.fields:
raise ValueError("dangling property %r" % prop.name)
def _get_properties(self):
return self._properties
def _set_properties(self, properties):
self._properties = tuple(properties)
self._check_sanity()
self._index()
properties = property(_get_properties, _set_properties)
def _index(self):
self._property_lookup = dict((prop.name, prop)
for prop in self._properties)
if len(self._property_lookup) != len(self._properties):
raise ValueError("two properties with same name")
def ply_property(self, name):
return self._property_lookup[name]
@property
def name(self):
return self._name
def _check_name(self):
if any(c.isspace() for c in self._name):
msg = "element name %r contains spaces" % self._name
raise ValueError(msg)
def dtype(self, byte_order='='):
'''
Return the numpy dtype of the in-memory representation of the
data. (If there are no list properties, and the PLY format is
binary, then this also accurately describes the on-disk
representation of the element.)
'''
return [(prop.name, prop.dtype(byte_order))
for prop in self.properties]
@staticmethod
def _parse_multi(header_lines):
'''
Parse a list of PLY element definitions.
'''
elements = []
while header_lines:
(elt, header_lines) = PlyElement._parse_one(header_lines)
elements.append(elt)
return elements
@staticmethod
def _parse_one(lines):
'''
Consume one element definition. The unconsumed input is
returned along with a PlyElement instance.
'''
a = 0
line = lines[a]
if line[0] != 'element':
raise PlyParseError("expected 'element'")
if len(line) > 3:
raise PlyParseError("too many fields after 'element'")
if len(line) < 3:
raise PlyParseError("too few fields after 'element'")
(name, count) = (line[1], int(line[2]))
comments = []
properties = []
while True:
a += 1
if a >= len(lines):
break
if lines[a][0] == 'comment':
comments.append(lines[a][1])
elif lines[a][0] == 'property':
properties.append(PlyProperty._parse_one(lines[a]))
else:
break
return (PlyElement(name, properties, count, comments),
lines[a:])
@staticmethod
def describe(data, name, len_types={}, val_types={},
comments=[]):
'''
Construct a PlyElement from an array's metadata.
len_types and val_types can be given as mappings from list
property names to type strings (like 'u1', 'f4', etc., or
'int8', 'float32', etc.). These can be used to define the length
and value types of list properties. List property lengths
always default to type 'u1' (8-bit unsigned integer), and value
types default to 'i4' (32-bit integer).
'''
if not isinstance(data, _np.ndarray):
raise TypeError("only numpy arrays are supported")
if len(data.shape) != 1:
raise ValueError("only one-dimensional arrays are "
"supported")
count = len(data)
properties = []
descr = data.dtype.descr
for t in descr:
if not isinstance(t[1], str):
raise ValueError("nested records not supported")
if not t[0]:
raise ValueError("field with empty name")
if len(t) != 2 or t[1][1] == 'O':
# non-scalar field, which corresponds to a list
# property in PLY.
if t[1][1] == 'O':
if len(t) != 2:
raise ValueError("non-scalar object fields not "
"supported")
len_str = _data_type_reverse[len_types.get(t[0], 'u1')]
if t[1][1] == 'O':
val_type = val_types.get(t[0], 'i4')
val_str = _lookup_type(val_type)
else:
val_str = _lookup_type(t[1][1:])
prop = PlyListProperty(t[0], len_str, val_str)
else:
val_str = _lookup_type(t[1][1:])
prop = PlyProperty(t[0], val_str)
properties.append(prop)
elt = PlyElement(name, properties, count, comments)
elt.data = data
return elt
def _read(self, stream, text, byte_order):
'''
Read the actual data from a PLY file.
'''
if text:
self._read_txt(stream)
else:
if self._have_list:
# There are list properties, so a simple load is
# impossible.
self._read_bin(stream, byte_order)
else:
# There are no list properties, so loading the data is
# much more straightforward.
self._data = _np.fromfile(stream,
self.dtype(byte_order),
self.count)
if len(self._data) < self.count:
k = len(self._data)
del self._data
raise PlyParseError("early end-of-file", self, k)
self._check_sanity()
def _write(self, stream, text, byte_order):
'''
Write the data to a PLY file.
'''
if text:
self._write_txt(stream)
else:
if self._have_list:
# There are list properties, so serialization is
# slightly complicated.
self._write_bin(stream, byte_order)
else:
# no list properties, so serialization is
# straightforward.
self.data.astype(self.dtype(byte_order),
copy=False).tofile(stream)
def _read_txt(self, stream):
'''
Load a PLY element from an ASCII-format PLY file. The element
may contain list properties.
'''
self._data = _np.empty(self.count, dtype=self.dtype())
k = 0
for line in _islice(iter(stream.readline, b''), self.count):
fields = iter(line.strip().split())
for prop in self.properties:
try:
self._data[prop.name][k] = prop._from_fields(fields)
except StopIteration:
raise PlyParseError("early end-of-line",
self, k, prop)
except ValueError:
raise PlyParseError("malformed input",
self, k, prop)
try:
next(fields)
except StopIteration:
pass
else:
raise PlyParseError("expected end-of-line", self, k)
k += 1
if k < self.count:
del self._data
raise PlyParseError("early end-of-file", self, k)
def _write_txt(self, stream):
'''
Save a PLY element to an ASCII-format PLY file. The element may
contain list properties.
'''
for rec in self.data:
fields = []
for prop in self.properties:
fields.extend(prop._to_fields(rec[prop.name]))
_np.savetxt(stream, [fields], '%.18g', newline='\r\n')
def _read_bin(self, stream, byte_order):
'''
Load a PLY element from a binary PLY file. The element may
contain list properties.
'''
self._data = _np.empty(self.count, dtype=self.dtype(byte_order))
for k in _range(self.count):
for prop in self.properties:
try:
self._data[prop.name][k] = \
prop._read_bin(stream, byte_order)
except StopIteration:
raise PlyParseError("early end-of-file",
self, k, prop)
def _write_bin(self, stream, byte_order):
'''
Save a PLY element to a binary PLY file. The element may
contain list properties.
'''
for rec in self.data:
for prop in self.properties:
prop._write_bin(rec[prop.name], stream, byte_order)
@property
def header(self):
'''
Format this element's metadata as it would appear in a PLY
header.
'''
lines = ['element %s %d' % (self.name, self.count)]
# Some information is lost here, since all comments are placed
# between the 'element' line and the first property definition.
for c in self.comments:
lines.append('comment ' + c)
lines.extend(list(map(str, self.properties)))
return '\r\n'.join(lines)
def __getitem__(self, key):
return self.data[key]
def __setitem__(self, key, value):
self.data[key] = value
def __str__(self):
return self.header
def __repr__(self):
return ('PlyElement(%r, %r, count=%d, comments=%r)' %
(self.name, self.properties, self.count,
self.comments))
class PlyProperty(object):
'''
PLY property description. This class is pure metadata; the data
itself is contained in PlyElement instances.
'''
def __init__(self, name, val_dtype):
self._name = str(name)
self._check_name()
self.val_dtype = val_dtype
def _get_val_dtype(self):
return self._val_dtype
def _set_val_dtype(self, val_dtype):
self._val_dtype = _data_types[_lookup_type(val_dtype)]
val_dtype = property(_get_val_dtype, _set_val_dtype)
@property
def name(self):
return self._name
def _check_name(self):
if any(c.isspace() for c in self._name):
msg = "Error: property name %r contains spaces" % self._name
raise RuntimeError(msg)
@staticmethod
def _parse_one(line):
assert line[0] == 'property'
if line[1] == 'list':
if len(line) > 5:
raise PlyParseError("too many fields after "
"'property list'")
if len(line) < 5:
raise PlyParseError("too few fields after "
"'property list'")
return PlyListProperty(line[4], line[2], line[3])
else:
if len(line) > 3:
raise PlyParseError("too many fields after "
"'property'")
if len(line) < 3:
raise PlyParseError("too few fields after "
"'property'")
return PlyProperty(line[2], line[1])
def dtype(self, byte_order='='):
'''
Return the numpy dtype description for this property (as a tuple
of strings).
'''
return byte_order + self.val_dtype
def _from_fields(self, fields):
'''
Parse from generator. Raise StopIteration if the property could
not be read.
'''
return _np.dtype(self.dtype()).type(next(fields))
def _to_fields(self, data):
'''
Return generator over one item.
'''
yield _np.dtype(self.dtype()).type(data)
def _read_bin(self, stream, byte_order):
'''
Read data from a binary stream. Raise StopIteration if the
property could not be read.
'''
try:
return _np.fromfile(stream, self.dtype(byte_order), 1)[0]
except IndexError:
raise StopIteration
def _write_bin(self, data, stream, byte_order):
'''
Write data to a binary stream.
'''
_np.dtype(self.dtype(byte_order)).type(data).tofile(stream)
def __str__(self):
val_str = _data_type_reverse[self.val_dtype]
return 'property %s %s' % (val_str, self.name)
def __repr__(self):
return 'PlyProperty(%r, %r)' % (self.name,
_lookup_type(self.val_dtype))
class PlyListProperty(PlyProperty):
'''
PLY list property description.
'''
def __init__(self, name, len_dtype, val_dtype):
PlyProperty.__init__(self, name, val_dtype)
self.len_dtype = len_dtype
def _get_len_dtype(self):
return self._len_dtype
def _set_len_dtype(self, len_dtype):
self._len_dtype = _data_types[_lookup_type(len_dtype)]
len_dtype = property(_get_len_dtype, _set_len_dtype)
def dtype(self, byte_order='='):
'''
List properties always have a numpy dtype of "object".
'''
return '|O'
def list_dtype(self, byte_order='='):
'''
Return the pair (len_dtype, val_dtype) (both numpy-friendly
strings).
'''
return (byte_order + self.len_dtype,
byte_order + self.val_dtype)
def _from_fields(self, fields):
(len_t, val_t) = self.list_dtype()
n = int(_np.dtype(len_t).type(next(fields)))
data = _np.loadtxt(list(_islice(fields, n)), val_t, ndmin=1)
if len(data) < n:
raise StopIteration
return data
def _to_fields(self, data):
'''
Return generator over the (numerical) PLY representation of the
list data (length followed by actual data).
'''
(len_t, val_t) = self.list_dtype()
data = _np.asarray(data, dtype=val_t).ravel()
yield _np.dtype(len_t).type(data.size)
for x in data:
yield x
def _read_bin(self, stream, byte_order):
(len_t, val_t) = self.list_dtype(byte_order)
try:
n = _np.fromfile(stream, len_t, 1)[0]
except IndexError:
raise StopIteration
data = _np.fromfile(stream, val_t, n)
if len(data) < n:
raise StopIteration
return data
def _write_bin(self, data, stream, byte_order):
'''
Write data to a binary stream.
'''
(len_t, val_t) = self.list_dtype(byte_order)
data = _np.asarray(data, dtype=val_t).ravel()
_np.array(data.size, dtype=len_t).tofile(stream)
data.tofile(stream)
def __str__(self):
len_str = _data_type_reverse[self.len_dtype]
val_str = _data_type_reverse[self.val_dtype]
return 'property list %s %s %s' % (len_str, val_str, self.name)
def __repr__(self):
return ('PlyListProperty(%r, %r, %r)' %
(self.name,
_lookup_type(self.len_dtype),
_lookup_type(self.val_dtype)))
================================================
FILE: dgcnn/tensorflow/utils/tf_util.py
================================================
""" Wrapper functions for TensorFlow layers.
Author: Charles R. Qi
Date: November 2016
Upadted by Yue Wang and Yongbin Sun
"""
import numpy as np
import tensorflow as tf
def _variable_on_cpu(name, shape, initializer, use_fp16=False, trainable=True):
"""Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
"""
with tf.device('/cpu:0'):
dtype = tf.float16 if use_fp16 else tf.float32
var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype, trainable=trainable)
return var
def _variable_with_weight_decay(name, shape, stddev, wd, use_xavier=True):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
use_xavier: bool, whether to use xavier initializer
Returns:
Variable Tensor
"""
if use_xavier:
initializer = tf.contrib.layers.xavier_initializer()
else:
initializer = tf.truncated_normal_initializer(stddev=stddev)
var = _variable_on_cpu(name, shape, initializer)
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def conv1d(inputs,
num_output_channels,
kernel_size,
scope,
stride=1,
padding='SAME',
use_xavier=True,
stddev=1e-3,
weight_decay=0.0,
activation_fn=tf.nn.relu,
bn=False,
bn_decay=None,
is_training=None,
is_dist=False):
""" 1D convolution with non-linear operation.
Args:
inputs: 3-D tensor variable BxLxC
num_output_channels: int
kernel_size: int
scope: string
stride: int
padding: 'SAME' or 'VALID'
use_xavier: bool, use xavier_initializer if true
stddev: float, stddev for truncated_normal init
weight_decay: float
activation_fn: function
bn: bool, whether to use batch norm
bn_decay: float or float tensor variable in [0,1]
is_training: bool Tensor variable
Returns:
Variable tensor
"""
with tf.variable_scope(scope) as sc:
num_in_channels = inputs.get_shape()[-1].value
kernel_shape = [kernel_size,
num_in_channels, num_output_channels]
kernel = _variable_with_weight_decay('weights',
shape=kernel_shape,
use_xavier=use_xavier,
stddev=stddev,
wd=weight_decay)
outputs = tf.nn.conv1d(inputs, kernel,
stride=stride,
padding=padding)
biases = _variable_on_cpu('biases', [num_output_channels],
tf.constant_initializer(0.0))
outputs = tf.nn.bias_add(outputs, biases)
if bn:
outputs = batch_norm_for_conv1d(outputs, is_training,
bn_decay=bn_decay, scope='bn', is_dist=is_dist)
if activation_fn is not None:
outputs = activation_fn(outputs)
return outputs
def conv2d(inputs,
num_output_channels,
kernel_size,
scope,
stride=[1, 1],
padding='SAME',
use_xavier=True,
stddev=1e-3,
weight_decay=0.0,
activation_fn=tf.nn.relu,
bn=False,
bn_decay=None,
is_training=None,
is_dist=False):
""" 2D convolution with non-linear operation.
Args:
inputs: 4-D tensor variable BxHxWxC
num_output_channels: int
kernel_size: a list of 2 ints
scope: string
stride: a list of 2 ints
padding: 'SAME' or 'VALID'
use_xavier: bool, use xavier_initializer if true
stddev: float, stddev for truncated_normal init
weight_decay: float
activation_fn: function
bn: bool, whether to use batch norm
bn_decay: float or float tensor variable in [0,1]
is_training: bool Tensor variable
Returns:
Variable tensor
"""
with tf.variable_scope(scope) as sc:
kernel_h, kernel_w = kernel_size
num_in_channels = inputs.get_shape()[-1].value
kernel_shape = [kernel_h, kernel_w,
num_in_channels, num_output_channels]
kernel = _variable_with_weight_decay('weights',
shape=kernel_shape,
use_xavier=use_xavier,
stddev=stddev,
wd=weight_decay)
stride_h, stride_w = stride
outputs = tf.nn.conv2d(inputs, kernel,
[1, stride_h, stride_w, 1],
padding=padding)
biases = _variable_on_cpu('biases', [num_output_channels],
tf.constant_initializer(0.0))
outputs = tf.nn.bias_add(outputs, biases)
if bn:
outputs = batch_norm_for_conv2d(outputs, is_training,
bn_decay=bn_decay, scope='bn', is_dist=is_dist)
if activation_fn is not None:
outputs = activation_fn(outputs)
return outputs
def conv2d_transpose(inputs,
num_output_channels,
kernel_size,
scope,
stride=[1, 1],
padding='SAME',
use_xavier=True,
stddev=1e-3,
weight_decay=0.0,
activation_fn=tf.nn.relu,
bn=False,
bn_decay=None,
is_training=None,
is_dist=False):
""" 2D convolution transpose with non-linear operation.
Args:
inputs: 4-D tensor variable BxHxWxC
num_output_channels: int
kernel_size: a list of 2 ints
scope: string
stride: a list of 2 ints
padding: 'SAME' or 'VALID'
use_xavier: bool, use xavier_initializer if true
stddev: float, stddev for truncated_normal init
weight_decay: float
activation_fn: function
bn: bool, whether to use batch norm
bn_decay: float or float tensor variable in [0,1]
is_training: bool Tensor variable
Returns:
Variable tensor
Note: conv2d(conv2d_transpose(a, num_out, ksize, stride), a.shape[-1], ksize, stride) == a
"""
with tf.variable_scope(scope) as sc:
kernel_h, kernel_w = kernel_size
num_in_channels = inputs.get_shape()[-1].value
kernel_shape = [kernel_h, kernel_w,
num_output_channels, num_in_channels] # reversed to conv2d
kernel = _variable_with_weight_decay('weights',
shape=kernel_shape,
use_xavier=use_xavier,
stddev=stddev,
wd=weight_decay)
stride_h, stride_w = stride
# from slim.convolution2d_transpose
def get_deconv_dim(dim_size, stride_size, kernel_size, padding):
dim_size *= stride_size
if padding == 'VALID' and dim_size is not None:
dim_size += max(kernel_size - stride_size, 0)
return dim_size
# caculate output shape
batch_size = inputs.get_shape()[0].value
height = inputs.get_shape()[1].value
width = inputs.get_shape()[2].value
out_height = get_deconv_dim(height, stride_h, kernel_h, padding)
out_width = get_deconv_dim(width, stride_w, kernel_w, padding)
output_shape = [batch_size, out_height, out_width, num_output_channels]
outputs = tf.nn.conv2d_transpose(inputs, kernel, output_shape,
[1, stride_h, stride_w, 1],
padding=padding)
biases = _variable_on_cpu('biases', [num_output_channels],
tf.constant_initializer(0.0))
outputs = tf.nn.bias_add(outputs, biases)
if bn:
outputs = batch_norm_for_conv2d(outputs, is_training,
bn_decay=bn_decay, scope='bn', is_dist=is_dist)
if activation_fn is not None:
outputs = activation_fn(outputs)
return outputs
def conv3d(inputs,
num_output_channels,
kernel_size,
scope,
stride=[1, 1, 1],
padding='SAME',
use_xavier=True,
stddev=1e-3,
weight_decay=0.0,
activation_fn=tf.nn.relu,
bn=False,
bn_decay=None,
is_training=None,
is_dist=False):
""" 3D convolution with non-linear operation.
Args:
inputs: 5-D tensor variable BxDxHxWxC
num_output_channels: int
kernel_size: a list of 3 ints
scope: string
stride: a list of 3 ints
padding: 'SAME' or 'VALID'
use_xavier: bool, use xavier_initializer if true
stddev: float, stddev for truncated_normal init
weight_decay: float
activation_fn: function
bn: bool, whether to use batch norm
bn_decay: float or float tensor variable in [0,1]
is_training: bool Tensor variable
Returns:
Variable tensor
"""
with tf.variable_scope(scope) as sc:
kernel_d, kernel_h, kernel_w = kernel_size
num_in_channels = inputs.get_shape()[-1].value
kernel_shape = [kernel_d, kernel_h, kernel_w,
num_in_channels, num_output_channels]
kernel = _variable_with_weight_decay('weights',
shape=kernel_shape,
use_xavier=use_xavier,
stddev=stddev,
wd=weight_decay)
stride_d, stride_h, stride_w = stride
outputs = tf.nn.conv3d(inputs, kernel,
[1, stride_d, stride_h, stride_w, 1],
padding=padding)
biases = _variable_on_cpu('biases', [num_output_channels],
tf.constant_initializer(0.0))
outputs = tf.nn.bias_add(outputs, biases)
if bn:
outputs = batch_norm_for_conv3d(outputs, is_training,
bn_decay=bn_decay, scope='bn', is_dist=is_dist)
if activation_fn is not None:
outputs = activation_fn(outputs)
return outputs
def fully_connected(inputs,
num_outputs,
scope,
use_xavier=True,
stddev=1e-3,
weight_decay=0.0,
activation_fn=tf.nn.relu,
bn=False,
bn_decay=None,
is_training=None,
is_dist=False):
""" Fully connected layer with non-linear operation.
Args:
inputs: 2-D tensor BxN
num_outputs: int
Returns:
Variable tensor of size B x num_outputs.
"""
with tf.variable_scope(scope) as sc:
num_input_units = inputs.get_shape()[-1].value
weights = _variable_with_weight_decay('weights',
shape=[num_input_units, num_outputs],
use_xavier=use_xavier,
stddev=stddev,
wd=weight_decay)
outputs = tf.matmul(inputs, weights)
biases = _variable_on_cpu('biases', [num_outputs],
tf.constant_initializer(0.0))
outputs = tf.nn.bias_add(outputs, biases)
if bn:
outputs = batch_norm_for_fc(outputs, is_training, bn_decay, 'bn', is_dist=is_dist)
if activation_fn is not None:
outputs = activation_fn(outputs)
return outputs
def max_pool2d(inputs,
kernel_size,
scope,
stride=[2, 2],
padding='VALID'):
""" 2D max pooling.
Args:
inputs: 4-D tensor BxHxWxC
kernel_size: a list of 2 ints
stride: a list of 2 ints
Returns:
Variable tensor
"""
with tf.variable_scope(scope) as sc:
kernel_h, kernel_w = kernel_size
stride_h, stride_w = stride
outputs = tf.nn.max_pool(inputs,
ksize=[1, kernel_h, kernel_w, 1],
strides=[1, stride_h, stride_w, 1],
padding=padding,
name=sc.name)
return outputs
def avg_pool2d(inputs,
kernel_size,
scope,
stride=[2, 2],
padding='VALID'):
""" 2D avg pooling.
Args:
inputs: 4-D tensor BxHxWxC
kernel_size: a list of 2 ints
stride: a list of 2 ints
Returns:
Variable tensor
"""
with tf.variable_scope(scope) as sc:
kernel_h, kernel_w = kernel_size
stride_h, stride_w = stride
outputs = tf.nn.avg_pool(inputs,
ksize=[1, kernel_h, kernel_w, 1],
strides=[1, stride_h, stride_w, 1],
padding=padding,
name=sc.name)
return outputs
def max_pool3d(inputs,
kernel_size,
scope,
stride=[2, 2, 2],
padding='VALID'):
""" 3D max pooling.
Args:
inputs: 5-D tensor BxDxHxWxC
kernel_size: a list of 3 ints
stride: a list of 3 ints
Returns:
Variable tensor
"""
with tf.variable_scope(scope) as sc:
kernel_d, kernel_h, kernel_w = kernel_size
stride_d, stride_h, stride_w = stride
outputs = tf.nn.max_pool3d(inputs,
ksize=[1, kernel_d, kernel_h, kernel_w, 1],
strides=[1, stride_d, stride_h, stride_w, 1],
padding=padding,
name=sc.name)
return outputs
def avg_pool3d(inputs,
kernel_size,
scope,
stride=[2, 2, 2],
padding='VALID'):
""" 3D avg pooling.
Args:
inputs: 5-D tensor BxDxHxWxC
kernel_size: a list of 3 ints
stride: a list of 3 ints
Returns:
Variable tensor
"""
with tf.variable_scope(scope) as sc:
kernel_d, kernel_h, kernel_w = kernel_size
stride_d, stride_h, stride_w = stride
outputs = tf.nn.avg_pool3d(inputs,
ksize=[1, kernel_d, kernel_h, kernel_w, 1],
strides=[1, stride_d, stride_h, stride_w, 1],
padding=padding,
name=sc.name)
return outputs
def batch_norm_template(inputs, is_training, scope, moments_dims, bn_decay):
""" Batch normalization on convolutional maps and beyond...
Ref.: http://stackoverflow.com/questions/33949786/how-could-i-use-batch-normalization-in-tensorflow
Args:
inputs: Tensor, k-D input ... x C could be BC or BHWC or BDHWC
is_training: boolean tf.Varialbe, true indicates training phase
scope: string, variable scope
moments_dims: a list of ints, indicating dimensions for moments calculation
bn_decay: float or float tensor variable, controling moving average weight
Return:
normed: batch-normalized maps
"""
with tf.variable_scope(scope) as sc:
num_channels = inputs.get_shape()[-1].value
beta = tf.Variable(tf.constant(0.0, shape=[num_channels]),
name='beta', trainable=True)
gamma = tf.Variable(tf.constant(1.0, shape=[num_channels]),
name='gamma', trainable=True)
batch_mean, batch_var = tf.nn.moments(inputs, moments_dims, name='moments')
decay = bn_decay if bn_decay is not None else 0.9
ema = tf.train.ExponentialMovingAverage(decay=decay)
# Operator that maintains moving averages of variables.
ema_apply_op = tf.cond(is_training,
lambda: ema.apply([batch_mean, batch_var]),
lambda: tf.no_op())
# Update moving average and return current batch's avg and var.
def mean_var_with_update():
with tf.control_dependencies([ema_apply_op]):
return tf.identity(batch_mean), tf.identity(batch_var)
# ema.average returns the Variable holding the average of var.
mean, var = tf.cond(is_training,
mean_var_with_update,
lambda: (ema.average(batch_mean), ema.average(batch_var)))
normed = tf.nn.batch_normalization(inputs, mean, var, beta, gamma, 1e-3)
return normed
def batch_norm_dist_template(inputs, is_training, scope, moments_dims, bn_decay):
""" The batch normalization for distributed training.
Args:
inputs: Tensor, k-D input ... x C could be BC or BHWC or BDHWC
is_training: boolean tf.Varialbe, true indicates training phase
scope: string, variable scope
moments_dims: a list of ints, indicating dimensions for moments calculation
bn_decay: float or float tensor variable, controling moving average weight
Return:
normed: batch-normalized maps
"""
with tf.variable_scope(scope) as sc:
num_channels = inputs.get_shape()[-1].value
beta = _variable_on_cpu('beta', [num_channels], initializer=tf.zeros_initializer())
gamma = _variable_on_cpu('gamma', [num_channels], initializer=tf.ones_initializer())
pop_mean = _variable_on_cpu('pop_mean', [num_channels], initializer=tf.zeros_initializer(), trainable=False)
pop_var = _variable_on_cpu('pop_var', [num_channels], initializer=tf.ones_initializer(), trainable=False)
def train_bn_op():
batch_mean, batch_var = tf.nn.moments(inputs, moments_dims, name='moments')
decay = bn_decay if bn_decay is not None else 0.9
train_mean = tf.assign(pop_mean, pop_mean * decay + batch_mean * (1 - decay))
train_var = tf.assign(pop_var, pop_var * decay + batch_var * (1 - decay))
with tf.control_dependencies([train_mean, train_var]):
return tf.nn.batch_normalization(inputs, batch_mean, batch_var, beta, gamma, 1e-3)
def test_bn_op():
return tf.nn.batch_normalization(inputs, pop_mean, pop_var, beta, gamma, 1e-3)
normed = tf.cond(is_training,
train_bn_op,
test_bn_op)
return normed
def batch_norm_for_fc(inputs, is_training, bn_decay, scope, is_dist=False):
""" Batch normalization on FC data.
Args:
inputs: Tensor, 2D BxC input
is_training: boolean tf.Varialbe, true indicates training phase
bn_decay: float or float tensor variable, controling moving average weight
scope: string, variable scope
is_dist: true indicating distributed training scheme
Return:
normed: batch-normalized maps
"""
if is_dist:
return batch_norm_dist_template(inputs, is_training, scope, [0,], bn_decay)
else:
return batch_norm_template(inputs, is_training, scope, [0,], bn_decay)
def batch_norm_for_conv1d(inputs, is_training, bn_decay, scope, is_dist=False):
""" Batch normalization on 1D convolutional maps.
Args:
inputs: Tensor, 3D BLC input maps
is_training: boolean tf.Varialbe, true indicates training phase
bn_decay: float or float tensor variable, controling moving average weight
scope: string, variable scope
is_dist: true indicating distributed training scheme
Return:
normed: batch-normalized maps
"""
if is_dist:
return batch_norm_dist_template(inputs, is_training, scope, [0,1], bn_decay)
else:
return batch_norm_template(inputs, is_training, scope, [0,1], bn_decay)
def batch_norm_for_conv2d(inputs, is_training, bn_decay, scope, is_dist=False):
""" Batch normalization on 2D convolutional maps.
Args:
inputs: Tensor, 4D BHWC input maps
is_training: boolean tf.Varialbe, true indicates training phase
bn_decay: float or float tensor variable, controling moving average weight
scope: string, variable scope
is_dist: true indicating distributed training scheme
Return:
normed: batch-normalized maps
"""
if is_dist:
return batch_norm_dist_template(inputs, is_training, scope, [0,1,2], bn_decay)
else:
return batch_norm_template(inputs, is_training, scope, [0,1,2], bn_decay)
def batch_norm_for_conv3d(inputs, is_training, bn_decay, scope, is_dist=False):
""" Batch normalization on 3D convolutional maps.
Args:
inputs: Tensor, 5D BDHWC input maps
is_training: boolean tf.Varialbe, true indicates training phase
bn_decay: float or float tensor variable, controling moving average weight
scope: string, variable scope
is_dist: true indicating distributed training scheme
Return:
normed: batch-normalized maps
"""
if is_dist:
return batch_norm_dist_template(inputs, is_training, scope, [0,1,2,3], bn_decay)
else:
return batch_norm_template(inputs, is_training, scope, [0,1,2,3], bn_decay)
def dropout(inputs,
is_training,
scope,
keep_prob=0.5,
noise_shape=None):
""" Dropout layer.
Args:
inputs: tensor
is_training: boolean tf.Variable
scope: string
keep_prob: float in [0,1]
noise_shape: list of ints
Returns:
tensor variable
"""
with tf.variable_scope(scope) as sc:
outputs = tf.cond(is_training,
lambda: tf.nn.dropout(inputs, keep_prob, noise_shape),
lambda: inputs)
return outputs
def pairwise_distance(point_cloud):
"""Compute pairwise distance of a point cloud.
Args:
point_cloud: tensor (batch_size, num_points, num_dims)
Returns:
pairwise distance: (batch_size, num_points, num_points)
"""
og_batch_size = point_cloud.get_shape().as_list()[0]
point_cloud = tf.squeeze(point_cloud)
if og_batch_size == 1:
point_cloud = tf.expand_dims(point_cloud, 0)
point_cloud_transpose = tf.transpose(point_cloud, perm=[0, 2, 1])
point_cloud_inner = tf.matmul(point_cloud, point_cloud_transpose)
point_cloud_inner = -2*point_cloud_inner
point_cloud_square = tf.reduce_sum(tf.square(point_cloud), axis=-1, keep_dims=True)
point_cloud_square_tranpose = tf.transpose(point_cloud_square, perm=[0, 2, 1])
return point_cloud_square + point_cloud_inner + point_cloud_square_tranpose
def knn(adj_matrix, k=20):
"""Get KNN based on the pairwise distance.
Args:
pairwise distance: (batch_size, num_points, num_points)
k: int
Returns:
nearest neighbors: (batch_size, num_points, k)
"""
neg_adj = -adj_matrix
_, nn_idx = tf.nn.top_k(neg_adj, k=k)
return nn_idx
def get_edge_feature(point_cloud, nn_idx, k=20):
"""Construct edge feature for each point
Args:
point_cloud: (batch_size, num_points, 1, num_dims)
nn_idx: (batch_size, num_points, k)
k: int
Returns:
edge features: (batch_size, num_points, k, num_dims)
"""
og_batch_size = point_cloud.get_shape().as_list()[0]
point_cloud = tf.squeeze(point_cloud)
if og_batch_size == 1:
point_cloud = tf.expand_dims(point_cloud, 0)
point_cloud_central = point_cloud
point_cloud_shape = point_cloud.get_shape()
batch_size = point_cloud_shape[0].value
num_points = point_cloud_shape[1].value
num_dims = point_cloud_shape[2].value
idx_ = tf.range(batch_size) * num_points
idx_ = tf.reshape(idx_, [batch_size, 1, 1])
point_cloud_flat = tf.reshape(point_cloud, [-1, num_dims])
point_cloud_neighbors = tf.gather(point_cloud_flat, nn_idx+idx_)
point_cloud_central = tf.expand_dims(point_cloud_central, axis=-2)
point_cloud_central = tf.tile(point_cloud_central, [1, 1, k, 1])
edge_feature = tf.concat([point_cloud_central, point_cloud_neighbors-point_cloud_central], axis=-1)
return edge_feature
================================================
FILE: download.sh
================================================
###
# @Description:
# @Autor: Jiachen Sun
# @Date: 2022-02-16 22:23:16
# @LastEditors: Jiachen Sun
# @LastEditTime: 2022-02-17 13:09:42
###
wgetgdrive(){
# $1 = file ID
# $2 = file name
URL="https://docs.google.com/uc?export=download&id=$1"
wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate $URL -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=$1" -O $2 && rm -rf /tmp/cookies.txt
}
mkdir -p tmp
key="$1"
case $key in
pretrained)
wgetgdrive 1qSkMYYK1qkT4wMMeAXerSI2Q7AxWujsS tmp/pretrained.zip
unzip -o tmp/pretrained.zip
;;
runs)
mkdir -p runs
cd runs
python ../gdrivedl.py https://drive.google.com/drive/folders/1UT-OfAsQ1OGSa6HSLZcK6YyJeIkaJUfF?usp=sharing
cd ..
;;
cor_exp)
mkdir -p cor_exp
cd cor_exp
python ../gdrivedl.py https://drive.google.com/drive/folders/1iYcJwFCFm9JWSiL1puIVfjpEgNF2dSoy?usp=sharing
cd ..
;;
modelnet40_c)
mkdir -p data/modelnet40_c
cd data/modelnet40_c
python ../../gdrivedl.py https://drive.google.com/drive/folders/10YeQRh92r_WdL-Dnog2zQfFr03UW4qXX?usp=sharing
cd ../..
;;
modelnet40)
wget --no-check-certificate https://shapenet.cs.stanford.edu/media/modelnet40_ply_hdf5_2048.zip
unzip modelnet40_ply_hdf5_2048.zip
mv modelnet40_ply_hdf5_2048 data
rm -r modelnet40_ply_hdf5_2048.zip
wgetgdrive 1jXe7UR6He-pV3B7vIxMAjEt63Vhy1bV8 tmp/modelnet40_ply_hdf5_2048_valid_small.zip
unzip -o tmp/modelnet40_ply_hdf5_2048_valid_small.zip
mv modelnet40_ply_hdf5_2048_valid_small/* data/modelnet40_ply_hdf5_2048/
rm -r modelnet40_ply_hdf5_2048_valid_small
wget http://modelnet.cs.princeton.edu/ModelNet40.zip
unzip ModelNet40.zip
mv ModelNet40 data
rm -r ModelNet40.zip
rm -rf modelnet40_ply_hdf5_2048
;;
mesh)
wget --no-check-certificate http://modelnet.cs.princeton.edu/ModelNet40.zip
unzip ModelNet40.zip
mv ModelNet40 data
rm -r ModelNet40.zip
;;
*)
echo "unknow argument $1" # unknown argument
;;
esac
rm -r tmp
================================================
FILE: emd/README.md
================================================
## Earth Mover's Distance of point clouds

Compared to the Chamfer Distance (CD), the Earth Mover's Distance (EMD) is more reliable to distinguish the visual quality of the point clouds. See our [paper](http://cseweb.ucsd.edu/~mil070/projects/AAAI2020/paper.pdf) for more details.
We provide an EMD implementation for point cloud comparison, which only needs $O(n)$ memory and thus enables dense point clouds (with 10,000 points or over) and large batch size. It is based on an approximated algorithm (auction algorithm) and cannot guarantee a (but near) bijection assignment. It employs a parameter $\epsilon$ to balance the error rate and the speed of convergence. Smaller $\epsilon$ achieves more accurate results, but needs a longer time for convergence. The time complexity is $O(n^2k)$, where $k$ is the number of iterations. We set a $\epsilon = 0.005, k = 50$ during training and a $\epsilon = 0.002, k = 10000$ during testing.
### Compile
Run `python3 setup.py install` to compile.
### Example
See `emd_module.py/test_emd()` for examples.
### Input
- **xyz1, xyz2**: float tensors with shape `[#batch, #points, 3]`. xyz1 is the predicted point cloud and xyz2 is the ground truth point cloud. Two point clouds should have same size and be normalized to [0, 1]. The number of points should be a multiple of 1024. The batch size should be no greater than 512. Since we only calculate gradients for xyz1, please do not swap xyz1 and xyz2.
- **eps**: a float tensor, the parameter balances the error rate and the speed of convergence.
- **iters**: a int tensor, the number of iterations.
### Output
- **dist**: a float tensor with shape `[#batch, #points]`. sqrt(dist) are the L2 distances between the pairs of points.
- **assignment**: a int tensor with shape `[#batch, #points]`. The index of the matched point in the ground truth point cloud.
================================================
FILE: emd/emd.cpp
================================================
// EMD approximation module (based on auction algorithm)
// author: Minghua Liu
#include
#include
int emd_cuda_forward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor dist, at::Tensor assignment, at::Tensor price,
at::Tensor assignment_inv, at::Tensor bid, at::Tensor bid_increments, at::Tensor max_increments,
at::Tensor unass_idx, at::Tensor unass_cnt, at::Tensor unass_cnt_sum, at::Tensor cnt_tmp, at::Tensor max_idx, float eps, int iters);
int emd_cuda_backward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor gradxyz, at::Tensor graddist, at::Tensor idx);
int emd_forward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor dist, at::Tensor assignment, at::Tensor price,
at::Tensor assignment_inv, at::Tensor bid, at::Tensor bid_increments, at::Tensor max_increments,
at::Tensor unass_idx, at::Tensor unass_cnt, at::Tensor unass_cnt_sum, at::Tensor cnt_tmp, at::Tensor max_idx, float eps, int iters) {
return emd_cuda_forward(xyz1, xyz2, dist, assignment, price, assignment_inv, bid, bid_increments, max_increments, unass_idx, unass_cnt, unass_cnt_sum, cnt_tmp, max_idx, eps, iters);
}
int emd_backward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor gradxyz, at::Tensor graddist, at::Tensor idx) {
return emd_cuda_backward(xyz1, xyz2, gradxyz, graddist, idx);
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("forward", &emd_forward, "emd forward (CUDA)");
m.def("backward", &emd_backward, "emd backward (CUDA)");
}
================================================
FILE: emd/emd_cuda.cu
================================================
// EMD approximation module (based on auction algorithm)
// author: Minghua Liu
#include
#include
#include
#include
#include
__device__ __forceinline__ float atomicMax(float *address, float val)
{
int ret = __float_as_int(*address);
while(val > __int_as_float(ret))
{
int old = ret;
if((ret = atomicCAS((int *)address, old, __float_as_int(val))) == old)
break;
}
return __int_as_float(ret);
}
__global__ void clear(int b, int * cnt_tmp, int * unass_cnt) {
for (int i = threadIdx.x; i < b; i += blockDim.x) {
cnt_tmp[i] = 0;
unass_cnt[i] = 0;
}
}
__global__ void calc_unass_cnt(int b, int n, int * assignment, int * unass_cnt) {
// count the number of unassigned points in each batch
const int BLOCK_SIZE = 1024;
__shared__ int scan_array[BLOCK_SIZE];
for (int i = blockIdx.x; i < b; i += gridDim.x) {
scan_array[threadIdx.x] = assignment[i * n + blockIdx.y * BLOCK_SIZE + threadIdx.x] == -1 ? 1 : 0;
__syncthreads();
int stride = 1;
while(stride <= BLOCK_SIZE / 2) {
int index = (threadIdx.x + 1) * stride * 2 - 1;
if(index < BLOCK_SIZE)
scan_array[index] += scan_array[index - stride];
stride = stride * 2;
__syncthreads();
}
__syncthreads();
if (threadIdx.x == BLOCK_SIZE - 1) {
atomicAdd(&unass_cnt[i], scan_array[threadIdx.x]);
}
__syncthreads();
}
}
__global__ void calc_unass_cnt_sum(int b, int * unass_cnt, int * unass_cnt_sum) {
// count the cumulative sum over over unass_cnt
const int BLOCK_SIZE = 512; // batch_size <= 512
__shared__ int scan_array[BLOCK_SIZE];
scan_array[threadIdx.x] = unass_cnt[threadIdx.x];
__syncthreads();
int stride = 1;
while(stride <= BLOCK_SIZE / 2) {
int index = (threadIdx.x + 1) * stride * 2 - 1;
if(index < BLOCK_SIZE)
scan_array[index] += scan_array[index - stride];
stride = stride * 2;
__syncthreads();
}
__syncthreads();
stride = BLOCK_SIZE / 4;
while(stride > 0) {
int index = (threadIdx.x + 1) * stride * 2 - 1;
if((index + stride) < BLOCK_SIZE)
scan_array[index + stride] += scan_array[index];
stride = stride / 2;
__syncthreads();
}
__syncthreads();
//printf("%d\n", unass_cnt_sum[b - 1]);
unass_cnt_sum[threadIdx.x] = scan_array[threadIdx.x];
}
__global__ void calc_unass_idx(int b, int n, int * assignment, int * unass_idx, int * unass_cnt, int * unass_cnt_sum, int * cnt_tmp) {
// list all the unassigned points
for (int i = blockIdx.x; i < b; i += gridDim.x) {
if (assignment[i * n + blockIdx.y * 1024 + threadIdx.x] == -1) {
int idx = atomicAdd(&cnt_tmp[i], 1);
unass_idx[unass_cnt_sum[i] - unass_cnt[i] + idx] = blockIdx.y * 1024 + threadIdx.x;
}
}
}
__global__ void Bid(int b, int n, const float * xyz1, const float * xyz2, float eps, int * assignment, int * assignment_inv, float * price,
int * bid, float * bid_increments, float * max_increments, int * unass_cnt, int * unass_cnt_sum, int * unass_idx) {
const int batch = 2048, block_size = 1024, block_cnt = n / 1024;
__shared__ float xyz2_buf[batch * 3];
__shared__ float price_buf[batch];
__shared__ float best_buf[block_size];
__shared__ float better_buf[block_size];
__shared__ int best_i_buf[block_size];
for (int i = blockIdx.x; i < b; i += gridDim.x) {
int _unass_cnt = unass_cnt[i];
if (_unass_cnt == 0)
continue;
int _unass_cnt_sum = unass_cnt_sum[i];
int unass_per_block = (_unass_cnt + block_cnt - 1) / block_cnt;
int thread_per_unass = block_size / unass_per_block;
int unass_this_block = max(min(_unass_cnt - (int) blockIdx.y * unass_per_block, unass_per_block), 0);
float x1, y1, z1, best = -1e9, better = -1e9;
int best_i = -1, _unass_id = -1, thread_in_unass;
if (threadIdx.x < thread_per_unass * unass_this_block) {
_unass_id = unass_per_block * blockIdx.y + threadIdx.x / thread_per_unass + _unass_cnt_sum - _unass_cnt;
_unass_id = unass_idx[_unass_id];
thread_in_unass = threadIdx.x % thread_per_unass;
x1 = xyz1[(i * n + _unass_id) * 3 + 0];
y1 = xyz1[(i * n + _unass_id) * 3 + 1];
z1 = xyz1[(i * n + _unass_id) * 3 + 2];
}
for (int k2 = 0; k2 < n; k2 += batch) {
int end_k = min(n, k2 + batch) - k2;
for (int j = threadIdx.x; j < end_k * 3; j += blockDim.x) {
xyz2_buf[j] = xyz2[(i * n + k2) * 3 + j];
}
for (int j = threadIdx.x; j < end_k; j += blockDim.x) {
price_buf[j] = price[i * n + k2 + j];
}
__syncthreads();
if (_unass_id != -1) {
int delta = (end_k + thread_per_unass - 1) / thread_per_unass;
int l = thread_in_unass * delta;
int r = min((thread_in_unass + 1) * delta, end_k);
for (int k = l; k < r; k++)
//if (!last || assignment_inv[i * n + k + k2] == -1)
{
float x2 = xyz2_buf[k * 3 + 0] - x1;
float y2 = xyz2_buf[k * 3 + 1] - y1;
float z2 = xyz2_buf[k * 3 + 2] - z1;
// the coordinates of points should be normalized to [0, 1]
float d = 3.0 - sqrtf(x2 * x2 + y2 * y2 + z2 * z2) - price_buf[k];
if (d > best) {
better = best;
best = d;
best_i = k + k2;
}
else if (d > better) {
better = d;
}
}
}
__syncthreads();
}
best_buf[threadIdx.x] = best;
better_buf[threadIdx.x] = better;
best_i_buf[threadIdx.x] = best_i;
__syncthreads();
if (_unass_id != -1 && thread_in_unass == 0) {
for (int j = threadIdx.x + 1; j < threadIdx.x + thread_per_unass; j++) {
if (best_buf[j] > best) {
better = max(best, better_buf[j]);
best = best_buf[j];
best_i = best_i_buf[j];
}
else better = max(better, best_buf[j]);
}
bid[i * n + _unass_id] = best_i;
bid_increments[i * n + _unass_id] = best - better + eps;
atomicMax(&max_increments[i * n + best_i], best - better + eps);
}
}
}
__global__ void GetMax(int b, int n, int * assignment, int * bid, float * bid_increments, float * max_increments, int * max_idx) {
for (int i = blockIdx.x; i < b; i += gridDim.x) {
int j = threadIdx.x + blockIdx.y * blockDim.x;
if (assignment[i * n + j] == -1) {
int bid_id = bid[i * n + j];
float bid_inc = bid_increments[i * n + j];
float max_inc = max_increments[i * n + bid_id];
if (bid_inc - 1e-6 <= max_inc && max_inc <= bid_inc + 1e-6)
{
max_idx[i * n + bid_id] = j;
}
}
}
}
__global__ void Assign(int b, int n, int * assignment, int * assignment_inv, float * price, int * bid, float * bid_increments, float * max_increments, int * max_idx, bool last) {
for (int i = blockIdx.x; i < b; i += gridDim.x) {
int j = threadIdx.x + blockIdx.y * blockDim.x;
if (assignment[i * n + j] == -1) {
int bid_id = bid[i * n + j];
if (last || max_idx[i * n + bid_id] == j)
{
float bid_inc = bid_increments[i * n + j];
int ass_inv = assignment_inv[i * n + bid_id];
if (!last && ass_inv != -1) {
assignment[i * n + ass_inv] = -1;
}
assignment_inv[i * n + bid_id] = j;
assignment[i * n + j] = bid_id;
price[i * n + bid_id] += bid_inc;
max_increments[i * n + bid_id] = -1e9;
}
}
}
}
__global__ void CalcDist(int b, int n, float * xyz1, float * xyz2, float * dist, int * assignment) {
for (int i = blockIdx.x; i < b; i += gridDim.x) {
int j = threadIdx.x + blockIdx.y * blockDim.x;
int k = assignment[i * n + j];
float deltax = xyz1[(i * n + j) * 3 + 0] - xyz2[(i * n + k) * 3 + 0];
float deltay = xyz1[(i * n + j) * 3 + 1] - xyz2[(i * n + k) * 3 + 1];
float deltaz = xyz1[(i * n + j) * 3 + 2] - xyz2[(i * n + k) * 3 + 2];
dist[i * n + j] = deltax * deltax + deltay * deltay + deltaz * deltaz;
}
}
int emd_cuda_forward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor dist, at::Tensor assignment, at::Tensor price,
at::Tensor assignment_inv, at::Tensor bid, at::Tensor bid_increments, at::Tensor max_increments,
at::Tensor unass_idx, at::Tensor unass_cnt, at::Tensor unass_cnt_sum, at::Tensor cnt_tmp, at::Tensor max_idx, float eps, int iters) {
const auto batch_size = xyz1.size(0);
const auto n = xyz1.size(1); //num_points point cloud A
const auto m = xyz2.size(1); //num_points point cloud B
if (n != m) {
printf("Input Error! The two point clouds should have the same size.\n");
return -1;
}
if (batch_size > 512) {
printf("Input Error! The batch size should be less than 512.\n");
return -1;
}
if (n % 1024 != 0) {
printf("Input Error! The size of the point clouds should be a multiple of 1024.\n");
return -1;
}
//cudaEvent_t start,stop;
//cudaEventCreate(&start);
//cudaEventCreate(&stop);
//cudaEventRecord(start);
//int iters = 50;
for (int i = 0; i < iters; i++) {
clear<<<1, batch_size>>>(batch_size, cnt_tmp.data(), unass_cnt.data());
calc_unass_cnt<<>>(batch_size, n, assignment.data(), unass_cnt.data());
calc_unass_cnt_sum<<<1, batch_size>>>(batch_size, unass_cnt.data(), unass_cnt_sum.data());
calc_unass_idx<<>>(batch_size, n, assignment.data(), unass_idx.data(), unass_cnt.data(),
unass_cnt_sum.data(), cnt_tmp.data());
Bid<<>>(batch_size, n, xyz1.data(), xyz2.data(), eps, assignment.data(), assignment_inv.data(),
price.data(), bid.data(), bid_increments.data(), max_increments.data(),
unass_cnt.data(), unass_cnt_sum.data(), unass_idx.data());
GetMax<<>>(batch_size, n, assignment.data(), bid.data(), bid_increments.data(), max_increments.data(), max_idx.data());
Assign<<>>(batch_size, n, assignment.data(), assignment_inv.data(), price.data(), bid.data(),
bid_increments.data(), max_increments.data(), max_idx.data(), i == iters - 1);
}
CalcDist<<>>(batch_size, n, xyz1.data(), xyz2.data(), dist.data(), assignment.data());
//cudaEventRecord(stop);
//cudaEventSynchronize(stop);
//float elapsedTime;
//cudaEventElapsedTime(&elapsedTime,start,stop);
//printf("%lf\n", elapsedTime);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in nnd Output: %s\n", cudaGetErrorString(err));
return 0;
}
return 1;
}
__global__ void NmDistanceGradKernel(int b, int n, const float * xyz1, const float * xyz2, const float * grad_dist, const int * idx, float * grad_xyz){
for (int i = blockIdx.x; i < b; i += gridDim.x) {
for (int j = threadIdx.x + blockIdx.y * blockDim.x; j < n; j += blockDim.x * gridDim.y) {
float x1 = xyz1[(i * n + j) * 3 + 0];
float y1 = xyz1[(i * n + j) * 3 + 1];
float z1 = xyz1[(i * n + j) * 3 + 2];
int j2 = idx[i * n + j];
float x2 = xyz2[(i * n + j2) * 3 + 0];
float y2 = xyz2[(i * n + j2) * 3 + 1];
float z2 = xyz2[(i * n + j2) * 3 + 2];
float g = grad_dist[i * n + j] * 2;
atomicAdd(&(grad_xyz[(i * n + j) * 3 + 0]), g * (x1 - x2));
atomicAdd(&(grad_xyz[(i * n + j) * 3 + 1]), g * (y1 - y2));
atomicAdd(&(grad_xyz[(i * n + j) * 3 + 2]), g * (z1 - z2));
}
}
}
int emd_cuda_backward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor gradxyz, at::Tensor graddist, at::Tensor idx){
const auto batch_size = xyz1.size(0);
const auto n = xyz1.size(1);
const auto m = xyz2.size(1);
NmDistanceGradKernel<<>>(batch_size, n, xyz1.data(), xyz2.data(), graddist.data(), idx.data(), gradxyz.data());
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in nnd get grad: %s\n", cudaGetErrorString(err));
return 0;
}
return 1;
}
================================================
FILE: emd/emd_module.py
================================================
# EMD approximation module (based on auction algorithm)
# memory complexity: O(n)
# time complexity: O(n^2 * iter)
# author: Minghua Liu
# Input:
# xyz1, xyz2: [#batch, #points, 3]
# where xyz1 is the predicted point cloud and xyz2 is the ground truth point cloud
# two point clouds should have same size and be normalized to [0, 1]
# #points should be a multiple of 1024
# #batch should be no greater than 512
# eps is a parameter which balances the error rate and the speed of convergence
# iters is the number of iteration
# we only calculate gradient for xyz1
# Output:
# dist: [#batch, #points], sqrt(dist) -> L2 distance
# assignment: [#batch, #points], index of the matched point in the ground truth point cloud
# the result is an approximation and the assignment is not guranteed to be a bijection
import time
import numpy as np
import torch
from torch import nn
from torch.autograd import Function
import emd
class emdFunction(Function):
@staticmethod
def forward(ctx, xyz1, xyz2, eps, iters):
batchsize, n, _ = xyz1.size()
_, m, _ = xyz2.size()
assert(n == m)
assert(xyz1.size()[0] == xyz2.size()[0])
assert(n % 1024 == 0)
assert(batchsize <= 512)
xyz1 = xyz1.contiguous().float().cuda()
xyz2 = xyz2.contiguous().float().cuda()
dist = torch.zeros(batchsize, n, device='cuda').contiguous()
assignment = torch.zeros(batchsize, n, device='cuda', dtype=torch.int32).contiguous() - 1
assignment_inv = torch.zeros(batchsize, m, device='cuda', dtype=torch.int32).contiguous() - 1
price = torch.zeros(batchsize, m, device='cuda').contiguous()
bid = torch.zeros(batchsize, n, device='cuda', dtype=torch.int32).contiguous()
bid_increments = torch.zeros(batchsize, n, device='cuda').contiguous()
max_increments = torch.zeros(batchsize, m, device='cuda').contiguous()
unass_idx = torch.zeros(batchsize * n, device='cuda', dtype=torch.int32).contiguous()
max_idx = torch.zeros(batchsize * m, device='cuda', dtype=torch.int32).contiguous()
unass_cnt = torch.zeros(512, dtype=torch.int32, device='cuda').contiguous()
unass_cnt_sum = torch.zeros(512, dtype=torch.int32, device='cuda').contiguous()
cnt_tmp = torch.zeros(512, dtype=torch.int32, device='cuda').contiguous()
emd.forward(xyz1, xyz2, dist, assignment, price, assignment_inv, bid, bid_increments, max_increments, unass_idx, unass_cnt, unass_cnt_sum, cnt_tmp, max_idx, eps, iters)
ctx.save_for_backward(xyz1, xyz2, assignment)
return dist, assignment
@staticmethod
def backward(ctx, graddist, gradidx):
xyz1, xyz2, assignment = ctx.saved_tensors
graddist = graddist.contiguous()
gradxyz1 = torch.zeros(xyz1.size(), device='cuda').contiguous()
gradxyz2 = torch.zeros(xyz2.size(), device='cuda').contiguous()
emd.backward(xyz1, xyz2, gradxyz1, graddist, assignment)
return gradxyz1, gradxyz2, None, None
class emdModule(nn.Module):
def __init__(self):
super(emdModule, self).__init__()
def forward(self, input1, input2, eps, iters):
return emdFunction.apply(input1, input2, eps, iters)
def test_emd():
x1 = torch.rand(20, 8192, 3).cuda()
x2 = torch.rand(20, 8192, 3).cuda()
emd = emdModule()
start_time = time.perf_counter()
dis, assigment = emd(x1, x2, 0.05, 3000)
print("Input_size: ", x1.shape)
print("Runtime: %lfs" % (time.perf_counter() - start_time))
print("EMD: %lf" % np.sqrt(dis.cpu()).mean())
print("|set(assignment)|: %d" % assigment.unique().numel())
assigment = assigment.cpu().numpy()
assigment = np.expand_dims(assigment, -1)
x2 = np.take_along_axis(x2, assigment, axis = 1)
d = (x1 - x2) * (x1 - x2)
print("Verified EMD: %lf" % np.sqrt(d.cpu().sum(-1)).mean())
#test_emd()
================================================
FILE: emd/setup.py
================================================
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
setup(
name='emd',
ext_modules=[
CUDAExtension('emd', [
'emd.cpp',
'emd_cuda.cu',
]),
],
cmdclass={
'build_ext': BuildExtension
})
================================================
FILE: eval_cor.sh
================================================
###
# @Description:
# @Autor: Jiachen Sun
# @Date: 2022-02-16 22:23:16
# @LastEditors: Jiachen Sun
# @LastEditTime: 2022-02-23 17:20:27
###
if [ ! -d "output" ]; then
mkdir "output"
fi
for model in 'gdanet'; do #'pointnet' 'pct' 'rscnn' 'pointnet2' 'simpleview' 'dgcnn' 'pointMLP' 'curvenet'; do
for cor in 'uniform' 'gaussian' 'background' 'impulse' 'upsampling' 'distortion_rbf' 'distortion_rbf_inv' 'density' 'density_inc' 'shear' 'rotation' 'cutout' 'distortion' 'occlusion' 'lidar'; do
for sev in 1 2 3 4 5; do
# for aug in 'rsmix' 'cutmix_r' 'cutmix_k' 'mixup' 'pgd'; do
# CUDA_VISIBLE_DEVICES=0 python main.py --entry test --model-path runs/${aug}_${model}_run_1/model_best_test.pth --exp-config configs/corruption/${model}.yaml --severity ${sev} --corruption ${cor} --output ./output/${model}_${aug}_${cor}_${sev}.txt
# done
# for adapt in 'tent' 'bn'; do
# CUDA_VISIBLE_DEVICES=0 python main.py --entry test --model-path cor_exp/dgcnn_${model}_run_1/model_best_test.pth --exp-config configs/${adapt}/${model}.yaml --severity ${sev} --corruption ${cor} --output ./output/${model}_${adapt}_${cor}_${sev}.txt
# done
CUDA_VISIBLE_DEVICES=0 python main.py --entry test --model-path runs/dgcnn_${model}_run_1/model_best_test.pth --exp-config configs/corruption/${model}.yaml --severity ${sev} --corruption ${cor} --output ./output/${model}_none_${cor}_${sev}.txt
done
done
done
================================================
FILE: eval_og.sh
================================================
if [ ! -d "output" ]; then
mkdir "output"
fi
for model in 'gdanet'; do #'dgcnn' 'rscnn' 'pct' 'pointnet' 'pointnet2' 'simpleview' 'curvenet' 'pointMLP';; do
# for aug in 'pgd'; do
CUDA_VISIBLE_DEVICES=1 python main.py --entry test --model-path runs/dgcnn_${model}_run_1/model_best_test.pth --exp-config configs/dgcnn_${model}_run_1.yaml --output ./output/${model}_clean.txt
done
================================================
FILE: eval_tent_cutmix.sh
================================================
if [ ! -d "output" ]; then
mkdir "output"
fi
for model in 'rscnn' 'pct' 'pointnet' 'pointnet2' 'simpleview' 'dgcnn'; do
for cor in 'uniform' 'gaussian' 'background' 'impulse' 'upsampling' 'distortion_rbf' 'distortion_rbf_inv' 'density' 'density_inc' 'shear' 'rotation' 'cutout' 'distortion' 'occlusion' 'lidar'; do
for sev in 1 2 3 4 5; do
CUDA_VISIBLE_DEVICES=0 python main.py --entry test --model-path runs/cutmix_r_${model}_run_1/model_best_test.pth --exp-config configs/tent_cutmix/${model}.yaml --severity ${sev} --corruption ${cor} --output ./output/${model}_megamerger_${cor}_${sev}.txt
done
done
done
================================================
FILE: gdrivedl.py
================================================
#!/usr/bin/env python
from __future__ import unicode_literals
import json
import os
import re
import sys
import unicodedata
import argparse
import logging
try:
#Python3
from urllib.request import Request, urlopen, build_opener, HTTPCookieProcessor
from http.cookiejar import CookieJar
except ImportError:
#Python2
from urllib2 import Request, urlopen, build_opener, HTTPCookieProcessor
from cookielib import CookieJar
ITEM_URL = 'https://drive.google.com/open?id={id}'
FILE_URL = 'https://docs.google.com/uc?export=download&id={id}&confirm={confirm}'
FOLDER_URL = 'https://drive.google.com/embeddedfolderview?id={id}#list'
CHUNKSIZE = 4096
USER_AGENT = 'Mozilla/5.0'
ID_PATTERNS = [
re.compile('/file/d/([0-9A-Za-z_-]{10,})(?:/|$)', re.IGNORECASE),
re.compile('/folders/([0-9A-Za-z_-]{10,})(?:/|$)', re.IGNORECASE),
re.compile('id=([0-9A-Za-z_-]{10,})(?:&|$)', re.IGNORECASE),
re.compile('([0-9A-Za-z_-]{10,})', re.IGNORECASE)
]
FOLDER_PATTERN = re.compile('(.*?)',
re.DOTALL | re.IGNORECASE)
CONFIRM_PATTERN = re.compile("download_warning[0-9A-Za-z_-]+=([0-9A-Za-z_-]+);",
re.IGNORECASE)
FILENAME_PATTERN = re.compile('attachment;filename="(.*?)"',
re.IGNORECASE)
def output(text):
try:
sys.stdout.write(text)
except UnicodeEncodeError:
sys.stdout.write(text.encode('utf8'))
# Big thanks to leo_wallentin for below sanitize function (modified slightly for this script)
# https://gitlab.com/jplusplus/sanitize-filename/-/blob/master/sanitize_filename/sanitize_filename.py
def sanitize(filename):
blacklist = ["\\", "/", ":", "*", "?", "\"", "<", ">", "|", "\0"]
reserved = [
"CON", "PRN", "AUX", "NUL", "COM1", "COM2", "COM3", "COM4", "COM5",
"COM6", "COM7", "COM8", "COM9", "LPT1", "LPT2", "LPT3", "LPT4", "LPT5",
"LPT6", "LPT7", "LPT8", "LPT9",
]
filename = "".join(c for c in filename if c not in blacklist)
filename = "".join(c for c in filename if 31 < ord(c))
filename = unicodedata.normalize("NFKD", filename)
filename = filename.rstrip(". ")
filename = filename.strip()
if all([x == "." for x in filename]):
filename = "_" + filename
if filename in reserved:
filename = "_" + filename
if len(filename) == 0:
filename = "_"
if len(filename) > 255:
parts = re.split(r"/|\\", filename)[-1].split(".")
if len(parts) > 1:
ext = "." + parts.pop()
filename = filename[:-len(ext)]
else:
ext = ""
if filename == "":
filename = "_"
if len(ext) > 254:
ext = ext[254:]
maxl = 255 - len(ext)
filename = filename[:maxl]
filename = filename + ext
filename = filename.rstrip(". ")
if len(filename) == 0:
filename = "_"
return filename
def url_to_id(url):
for pattern in ID_PATTERNS:
match = pattern.search(url)
if match:
return match.group(1)
logging.error('Unable to get ID from {}'.format(url))
sys.exit(1)
class GDriveDL(object):
def __init__(self, quiet=False, overwrite=False):
self._quiet = quiet
self._overwrite = overwrite
self._create_empty_dirs = True
self._opener = build_opener(HTTPCookieProcessor(CookieJar()))
def _request(self, url):
logging.debug('Requesting: {}'.format(url))
req = Request(url, headers={'User-Agent': USER_AGENT})
return self._opener.open(req)
def process_url(self, url, directory, filename=None):
id = url_to_id(url)
if '://' not in url:
url = ITEM_URL.format(id=id)
resp = self._request(url)
url = resp.geturl()
if '/file/' in url.lower():
self.process_file(id, directory, filename=filename)
elif '/folders/' in url.lower():
if filename:
logging.warn("Ignoring --output-document option for folder download")
self.process_folder(id, directory)
else:
logging.error('That id {} returned an unknown url {}'.format(id, url))
sys.exit(1)
def process_folder(self, id, directory):
url = FOLDER_URL.format(id=id)
resp = self._request(url)
html = resp.read().decode('utf-8')
matches = re.findall(FOLDER_PATTERN, html)
if not matches and 'ServiceLogin' in html:
logging.error('Folder: {} does not have link sharing enabled'.format(id))
sys.exit(1)
for match in matches:
url, item_name = match
id = url_to_id(url)
if '/file/' in url.lower():
self.process_file(id, directory, filename=sanitize(item_name))
elif '/folders/' in url.lower():
self.process_folder(id, os.path.join(directory, sanitize(item_name)))
if self._create_empty_dirs and not os.path.exists(directory):
os.makedirs(directory)
logging.info('Directory: {directory} [Created]'.format(directory=directory))
def process_file(self, id, directory, filename=None, confirm=''):
file_path = None
if filename:
file_path = filename if os.path.isabs(filename) else os.path.join(directory, filename)
if not self._overwrite and os.path.exists(file_path):
logging.info('{file_path} [Exists]'.format(file_path=file_path))
return
url = FILE_URL.format(id=id, confirm=confirm)
resp = self._request(url)
if 'ServiceLogin' in resp.url:
logging.error('File: {} does not have link sharing enabled'.format(id))
sys.exit(1)
cookies = resp.headers.get('Set-Cookie') or ''
if not confirm and 'download_warning' in cookies:
confirm = CONFIRM_PATTERN.search(cookies)
return self.process_file(id, directory, filename=filename, confirm=confirm.group(1))
if not file_path:
filename = FILENAME_PATTERN.search(resp.headers.get('content-disposition')).group(1)
file_path = os.path.join(directory, sanitize(filename))
if not self._overwrite and os.path.exists(file_path):
logging.info('{file_path} [Exists]'.format(file_path=file_path))
return
directory = os.path.dirname(file_path)
if not os.path.exists(directory):
os.makedirs(directory)
logging.info('Directory: {directory} [Created]'.format(directory=directory))
try:
with open(file_path, 'wb') as f:
dl = 0
last_out = 0
while True:
chunk = resp.read(CHUNKSIZE)
if not chunk:
break
if b'Too many users have viewed or downloaded this file recently' in chunk:
logging.error('Quota exceeded for this file')
sys.exit(1)
dl += len(chunk)
f.write(chunk)
if not self._quiet and (not last_out or dl-last_out > 1048576):
output("\r{} {:.2f}MB".format(
file_path,
dl / 1024 / 1024,
))
last_out = dl
sys.stdout.flush()
except:
if os.path.exists(file_path):
os.remove(file_path)
raise
if not self._quiet:
output('\n')
def main(args=None):
parser = argparse.ArgumentParser(description='Download Google Drive files & folders')
parser.add_argument("url", help="Shared Google Drive URL")
parser.add_argument("-P", "--directory-prefix", default='.', help="Output directory (default is current directory)")
parser.add_argument("-O", "--output-document", help="Output filename. Defaults to the GDrive filename. Not valid when downloading folders")
parser.add_argument("-q", "--quiet", help="Disable console output", default=False, action="store_true")
args = parser.parse_args(args)
if args.quiet:
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.WARN)
else:
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO)
url = args.url
id = ''
for pattern in ID_PATTERNS:
match = pattern.search(url)
if match:
id = match.group(1)
break
if not id:
logging.error('Unable to get ID from {}'.format(url))
sys.exit(1)
gdrive = GDriveDL(quiet=args.quiet, overwrite=args.output_document is not None)
gdrive.process_url(url, directory=args.directory_prefix, filename=args.output_document)
if __name__ == "__main__":
main()
================================================
FILE: main.py
================================================
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim.lr_scheduler as lr_scheduler
import random
from dataloader import create_dataloader
from time import time
from datetime import datetime
from progressbar import ProgressBar
import models
from collections import defaultdict
import os
import numpy as np
import argparse
from all_utils import (
TensorboardManager, PerfTrackTrain,
PerfTrackVal, TrackTrain, smooth_loss, DATASET_NUM_CLASS,
rscnn_voting_evaluate_cls, pn2_vote_evaluate_cls)
from configs import get_cfg_defaults
import pprint
from pointnet_pyt.pointnet.model import feature_transform_regularizer
import sys
import aug_utils
from third_party import bn_helper, tent_helper
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if DEVICE.type == 'cpu':
print('WARNING: Using CPU')
def adapt_bn(data,model,cfg):
model = bn_helper.configure_model(model,eps=1e-5, momentum=0.1,reset_stats=False,no_stats=False)
for _ in range(cfg.ITER):
model(**data)
print("Adaptation Done ...")
model.eval()
return model
def adapt_tent(data,model,cfg):
model = tent_helper.configure_model(model,eps=1e-5, momentum=0.1)
parameter,_ = tent_helper.collect_params(model)
optimizer_tent = torch.optim.SGD(parameter, lr=0.001,momentum=0.9)
for _ in range(cfg.ITER):
# index = np.random.choice(args.number,args.batch_size,replace=False)
tent_helper.forward_and_adapt(data,model,optimizer_tent)
print("Adaptation Done ...")
model.eval()
return model
def check_inp_fmt(task, data_batch, dataset_name):
if task in ['cls', 'cls_trans']:
# assert set(data_batch.keys()) == {'pc', 'label'}
# print(data_batch['pc'],data_batch['label'])
pc, label = data_batch['pc'], data_batch['label']
# special case made for modelnet40_dgcnn to match the
# original implementation
# dgcnn loads torch.DoubleTensor for the test dataset
if dataset_name == 'modelnet40_dgcnn':
assert isinstance(pc, torch.FloatTensor) or isinstance(
pc, torch.DoubleTensor)
else:
assert isinstance(pc, torch.FloatTensor)
assert isinstance(label, torch.LongTensor)
assert len(pc.shape) == 3
assert len(label.shape) == 1
b1, _, y = pc.shape[0], pc.shape[1], pc.shape[2]
b2 = label.shape[0]
assert b1 == b2
assert y == 3
assert label.max().item() < DATASET_NUM_CLASS[dataset_name]
assert label.min().item() >= 0
else:
assert NotImplemented
def check_out_fmt(task, out, dataset_name):
if task == 'cls':
assert set(out.keys()) == {'logit'}
logit = out['logit']
assert isinstance(logit, torch.FloatTensor if DEVICE.type == 'cpu' else torch.cuda.FloatTensor)
assert len(logit.shape) == 2
assert DATASET_NUM_CLASS[dataset_name] == logit.shape[1]
elif task == 'cls_trans':
assert set(out.keys()) == {'logit', 'trans_feat'}
logit = out['logit']
trans_feat = out['trans_feat']
assert isinstance(logit, torch.FloatTensor if DEVICE.type == 'cpu' else torch.cuda.FloatTensor)
assert isinstance(trans_feat, torch.FloatTensor if DEVICE.type == 'cpu' else torch.cuda.FloatTensor)
assert len(logit.shape) == 2
assert len(trans_feat.shape) == 3
assert trans_feat.shape[0] == logit.shape[0]
# 64 coming from pointnet implementation
assert (trans_feat.shape[1] == trans_feat.shape[2]) and (trans_feat.shape[1] == 64)
assert DATASET_NUM_CLASS[dataset_name] == logit.shape[1]
else:
assert NotImplemented
def get_inp(task, model, data_batch, batch_proc, dataset_name):
check_inp_fmt(task, data_batch, dataset_name)
if not batch_proc is None:
data_batch = batch_proc(data_batch, DEVICE)
check_inp_fmt(task, data_batch, dataset_name)
if isinstance(model, nn.DataParallel):
model_type = type(model.module)
else:
model_type = type(model)
if task in ['cls', 'cls_trans']:
pc = data_batch['pc']
inp = {'pc': pc}
else:
assert False
return inp
def get_loss(task, loss_name, data_batch, out, dataset_name):
"""
Returns the tensor loss function
:param task:
:param loss_name:
:param data_batch: batched data; note not applied data_batch
:param out: output from the model
:param dataset_name:
:return: tensor
"""
check_out_fmt(task, out, dataset_name)
if task == 'cls':
label = data_batch['label'].to(out['logit'].device)
if loss_name == 'cross_entropy':
if 'label_2' in data_batch.keys():
label_2 = data_batch['label_2'].to(out['logit'].device)
if isinstance(data_batch['lam'],torch.Tensor):
loss = 0
for i in range(data_batch['pc'].shape[0]):
loss_tmp = smooth_loss(out['logit'][i].unsqueeze(0), label[i].unsqueeze(0).long()) * (1 - data_batch['lam'][i]) + smooth_loss(out['logit'][i].unsqueeze(0), label_2[i].unsqueeze(0).long()) * data_batch['lam'][i]
loss += loss_tmp
loss = loss / data_batch['pc'].shape[0]
else:
loss = smooth_loss(out['logit'], label) * (1 - data_batch['lam']) + smooth_loss(out['logit'], label_2) * data_batch['lam']
else:
loss = F.cross_entropy(out['logit'], label)
# source: https://github.com/WangYueFt/dgcnn/blob/master/pytorch/util.py
elif loss_name == 'smooth':
if 'label_2' in data_batch.keys():
label_2 = data_batch['label_2'].to(out['logit'].device)
if isinstance(data_batch['lam'],torch.Tensor):
loss = 0
for i in range(data_batch['pc'].shape[0]):
loss_tmp = smooth_loss(out['logit'][i].unsqueeze(0), label[i].unsqueeze(0).long()) * (1 - data_batch['lam'][i]) + smooth_loss(out['logit'][i].unsqueeze(0), label_2[i].unsqueeze(0).long()) * data_batch['lam'][i]
loss += loss_tmp
loss = loss / data_batch['pc'].shape[0]
else:
loss = smooth_loss(out['logit'], label) * (1 - data_batch['lam']) + smooth_loss(out['logit'], label_2) * data_batch['lam']
else:
loss = smooth_loss(out['logit'], label)
else:
assert False
elif task == 'cls_trans':
label = data_batch['label'].to(out['logit'].device)
trans_feat = out['trans_feat']
logit = out['logit']
if loss_name == 'cross_entropy':
if 'label_2' in data_batch.keys():
label_2 = data_batch['label_2'].to(out['logit'].device)
if isinstance(data_batch['lam'],torch.Tensor):
loss = 0
for i in range(data_batch['pc'].shape[0]):
loss_tmp = smooth_loss(out['logit'][i].unsqueeze(0), label[i].unsqueeze(0).long()) * (1 - data_batch['lam'][i]) + smooth_loss(out['logit'][i].unsqueeze(0), label_2[i].unsqueeze(0).long()) * data_batch['lam'][i]
loss += loss_tmp
loss = loss / data_batch['pc'].shape[0]
else:
loss = smooth_loss(out['logit'], label) * (1 - data_batch['lam']) + smooth_loss(out['logit'], label_2) * data_batch['lam']
else:
loss = F.cross_entropy(out['logit'], label)
loss += feature_transform_regularizer(trans_feat) * 0.001
elif loss_name == 'smooth':
if 'label_2' in data_batch.keys():
label_2 = data_batch['label_2'].to(out['logit'].device)
if isinstance(data_batch['lam'],torch.Tensor):
loss = 0
for i in range(data_batch['pc'].shape[0]):
loss_tmp = smooth_loss(out['logit'][i].unsqueeze(0), label[i].unsqueeze(0).long()) * (1 - data_batch['lam'][i]) + smooth_loss(out['logit'][i].unsqueeze(0), label_2[i].unsqueeze(0).long()) * data_batch['lam'][i]
loss += loss_tmp
loss = loss / data_batch['pc'].shape[0]
else:
loss = smooth_loss(out['logit'], label) * (1 - data_batch['lam']) + smooth_loss(out['logit'], label_2) * data_batch['lam']
else:
loss = smooth_loss(out['logit'], label)
loss += feature_transform_regularizer(trans_feat) * 0.001
else:
assert False
else:
assert False
return loss
def validate(task, loader, model, dataset_name, adapt = None, confusion = False):
model.eval()
def get_extra_param():
return None
perf = PerfTrackVal(task, extra_param=get_extra_param())
time_dl = 0
time_gi = 0
time_model = 0
time_upd = 0
with torch.no_grad():
bar = ProgressBar(max_value=len(loader))
time5 = time()
if confusion:
pred = []
ground = []
for i, data_batch in enumerate(loader):
time1 = time()
inp = get_inp(task, model, data_batch, loader.dataset.batch_proc, dataset_name)
time2 = time()
if adapt.METHOD == 'bn':
model = adapt_bn(inp,model,adapt)
elif adapt.METHOD == 'tent':
model = adapt_tent(inp,model,adapt)
out = model(**inp)
if confusion:
pred.append(out['logit'].squeeze().cpu())
ground.append(data_batch['label'].squeeze().cpu())
time3 = time()
perf.update(data_batch=data_batch, out=out)
time4 = time()
time_dl += (time1 - time5)
time_gi += (time2 - time1)
time_model += (time3 - time2)
time_upd += (time4 - time3)
time5 = time()
bar.update(i)
print(f"Time DL: {time_dl}, Time Get Inp: {time_gi}, Time Model: {time_model}, Time Update: {time_upd}")
if not confusion:
return perf.agg()
else:
pred = np.argmax(torch.cat(pred).numpy(), axis=1)
# print(pred)
ground = torch.cat(ground).numpy()
# print(ground)
return perf.agg(), pred, ground
def train(task, loader, model, optimizer, loss_name, dataset_name, cfg):
model.train()
def get_extra_param():
return None
perf = PerfTrackTrain(task, extra_param=get_extra_param())
time_forward = 0
time_backward = 0
time_data_loading = 0
time3 = time()
for i, data_batch in enumerate(loader):
time1 = time()
if cfg.AUG.NAME == 'cutmix_r':
data_batch = aug_utils.cutmix_r(data_batch,cfg)
elif cfg.AUG.NAME == 'cutmix_k':
data_batch = aug_utils.cutmix_k(data_batch,cfg)
elif cfg.AUG.NAME == 'mixup':
data_batch = aug_utils.mixup(data_batch,cfg)
elif cfg.AUG.NAME == 'rsmix':
data_batch = aug_utils.rsmix(data_batch,cfg)
elif cfg.AUG.NAME == 'pgd':
data_batch = aug_utils.pgd(data_batch,model, task, loss_name, dataset_name)
model.train()
# print(data_batch)
inp = get_inp(task, model, data_batch, loader.dataset.batch_proc, dataset_name)
out = model(**inp)
loss = get_loss(task, loss_name, data_batch, out, dataset_name)
perf.update_all(data_batch=data_batch, out=out, loss=loss)
time2 = time()
if loss.ne(loss).any():
print("WARNING: avoiding step as nan in the loss")
else:
optimizer.zero_grad()
loss.backward()
bad_grad = False
for x in model.parameters():
if x.grad is not None:
if x.grad.ne(x.grad).any():
print("WARNING: nan in a gradient")
bad_grad = True
break
if ((x.grad == float('inf')) | (x.grad == float('-inf'))).any():
print("WARNING: inf in a gradient")
bad_grad = True
break
if bad_grad:
print("WARNING: avoiding step as bad gradient")
else:
optimizer.step()
time_data_loading += (time1 - time3)
time_forward += (time2 - time1)
time3 = time()
time_backward += (time3 - time2)
if i % 50 == 0:
print(
f"[{i}/{len(loader)}] avg_loss: {perf.agg_loss()}, FW time = {round(time_forward, 2)}, "
f"BW time = {round(time_backward, 2)}, DL time = {round(time_data_loading, 2)}")
return perf.agg(), perf.agg_loss()
def save_checkpoint(id, epoch, model, optimizer, lr_sched, bnm_sched, test_perf, cfg):
model.cpu()
path = f"./runs/{cfg.EXP.EXP_ID}/model_{id}.pth"
torch.save({
'cfg': vars(cfg),
'epoch': epoch,
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict(),
'lr_sched_state': lr_sched.state_dict(),
'bnm_sched_state': bnm_sched.state_dict() if bnm_sched is not None else None,
'test_perf': test_perf,
}, path)
print('Checkpoint saved to %s' % path)
model.to(DEVICE)
def load_best_checkpoint(model, cfg):
path = f"./runs/{cfg.EXP.EXP_ID}/model_best.pth"
checkpoint = torch.load(path)
model.load_state_dict(checkpoint['model_state'])
print('Checkpoint loaded from %s' % path)
def load_model_opt_sched(model, optimizer, lr_sched, bnm_sched, model_path):
print(f'Recovering model and checkpoint from {model_path}')
checkpoint = torch.load(model_path)
try:
model.load_state_dict(checkpoint['model_state'])
except:
if isinstance(model, nn.DataParallel):
model.module.load_state_dict(checkpoint['model_state'])
else:
model = nn.DataParallel(model)
model.load_state_dict(checkpoint['model_state'])
model = model.module
optimizer.load_state_dict(checkpoint['optimizer_state'])
# for backward compatibility with saved models
if 'lr_sched_state' in checkpoint:
lr_sched.load_state_dict(checkpoint['lr_sched_state'])
if checkpoint['bnm_sched_state'] is not None:
bnm_sched.load_state_dict(checkpoint['bnm_sched_state'])
else:
print("WARNING: lr scheduler and bnm scheduler states are not loaded.")
return model
def get_model(cfg):
if cfg.EXP.MODEL_NAME == 'simpleview':
model = models.MVModel(
task=cfg.EXP.TASK,
dataset=cfg.EXP.DATASET,
**cfg.MODEL.MV)
elif cfg.EXP.MODEL_NAME == 'rscnn':
model = models.RSCNN(
task=cfg.EXP.TASK,
dataset=cfg.EXP.DATASET,
**cfg.MODEL.RSCNN)
elif cfg.EXP.MODEL_NAME == 'pointnet2':
model = models.PointNet2(
task=cfg.EXP.TASK,
dataset=cfg.EXP.DATASET,
**cfg.MODEL.PN2)
elif cfg.EXP.MODEL_NAME == 'dgcnn':
model = models.DGCNN(
task=cfg.EXP.TASK,
dataset=cfg.EXP.DATASET)
elif cfg.EXP.MODEL_NAME == 'pointnet':
model = models.PointNet(
task=cfg.EXP.TASK,
dataset=cfg.EXP.DATASET)
elif cfg.EXP.MODEL_NAME == 'pct':
model = models.Pct(
task=cfg.EXP.TASK,
dataset=cfg.EXP.DATASET)
elif cfg.EXP.MODEL_NAME == 'pointMLP':
model = models.pointMLP(
task=cfg.EXP.TASK,
dataset=cfg.EXP.DATASET)
elif cfg.EXP.MODEL_NAME == 'pointMLP2':
model = models.pointMLP2(
task=cfg.EXP.TASK,
dataset=cfg.EXP.DATASET)
elif cfg.EXP.MODEL_NAME == 'curvenet':
model = models.CurveNet(
task=cfg.EXP.TASK,
dataset=cfg.EXP.DATASET)
elif cfg.EXP.MODEL_NAME == 'gdanet':
model = models.GDANET(
task=cfg.EXP.TASK,
dataset=cfg.EXP.DATASET)
else:
assert False
return model
def get_metric_from_perf(task, perf, metric_name):
if task in ['cls', 'cls_trans']:
assert metric_name in ['acc']
metric = perf[metric_name]
else:
assert False
return metric
def get_optimizer(optim_name, tr_arg, model):
if optim_name == 'vanilla':
optimizer = torch.optim.Adam(
model.parameters(),
lr=tr_arg.learning_rate,
weight_decay=tr_arg.l2)
lr_sched = lr_scheduler.ReduceLROnPlateau(
optimizer,
mode='min',
factor=tr_arg.lr_decay_factor,
patience=tr_arg.lr_reduce_patience,
verbose=True,
min_lr=tr_arg.lr_clip)
bnm_sched = None
elif optim_name == 'pct':
pass
optimizer = torch.optim.Adam(
model.parameters(),
lr=tr_arg.learning_rate,
weight_decay=tr_arg.l2)
lr_sched = lr_scheduler.CosineAnnealingLR(
optimizer,
tr_arg.num_epochs,
eta_min=tr_arg.learning_rate)
bnm_sched = None
else:
assert False
return optimizer, lr_sched, bnm_sched
def entry_train(cfg, resume=False, model_path=""):
loader_train = create_dataloader(split='train', cfg=cfg)
loader_valid = create_dataloader(split='valid', cfg=cfg)
loader_test = create_dataloader(split='test', cfg=cfg)
model = get_model(cfg)
model.to(DEVICE)
print(model)
if torch.cuda.device_count() > 1:
model = nn.DataParallel(model)
optimizer, lr_sched, bnm_sched = get_optimizer(cfg.EXP.OPTIMIZER, cfg.TRAIN, model)
if resume:
model = load_model_opt_sched(model, optimizer, lr_sched, bnm_sched, model_path)
else:
assert model_path == ""
log_dir = f"./runs/{cfg.EXP.EXP_ID}"
if not os.path.exists(log_dir):
os.makedirs(log_dir)
tb = TensorboardManager(log_dir)
track_train = TrackTrain(early_stop_patience=cfg.TRAIN.early_stop)
for epoch in range(cfg.TRAIN.num_epochs):
print(f'Epoch {epoch}')
print('Training..')
train_perf, train_loss = train(cfg.EXP.TASK, loader_train, model, optimizer, cfg.EXP.LOSS_NAME, cfg.EXP.DATASET, cfg)
pprint.pprint(train_perf, width=80)
tb.update('train', epoch, train_perf)
if (not cfg.EXP_EXTRA.no_val) and epoch % cfg.EXP_EXTRA.val_eval_freq == 0:
print('\nValidating..')
val_perf = validate(cfg.EXP.TASK, loader_valid, model, cfg.EXP.DATASET, cfg.ADAPT)
pprint.pprint(val_perf, width=80)
tb.update('val', epoch, val_perf)
else:
val_perf = defaultdict(float)
if (not cfg.EXP_EXTRA.no_test) and (epoch % cfg.EXP_EXTRA.test_eval_freq == 0):
print('\nTesting..')
test_perf = validate(cfg.EXP.TASK, loader_test, model, cfg.EXP.DATASET, cfg.ADAPT)
pprint.pprint(test_perf, width=80)
tb.update('test', epoch, test_perf)
else:
test_perf = defaultdict(float)
track_train.record_epoch(
epoch_id=epoch,
train_metric=get_metric_from_perf(cfg.EXP.TASK, train_perf, cfg.EXP.METRIC),
val_metric=get_metric_from_perf(cfg.EXP.TASK, val_perf, cfg.EXP.METRIC),
test_metric=get_metric_from_perf(cfg.EXP.TASK, test_perf, cfg.EXP.METRIC))
if (not cfg.EXP_EXTRA.no_val) and track_train.save_model(epoch_id=epoch, split='val'):
print('Saving best model on the validation set')
save_checkpoint('best_val', epoch, model, optimizer, lr_sched, bnm_sched, test_perf, cfg)
if (not cfg.EXP_EXTRA.no_test) and track_train.save_model(epoch_id=epoch, split='test'):
print('Saving best model on the test set')
save_checkpoint('best_test', epoch, model, optimizer, lr_sched, bnm_sched, test_perf, cfg)
if (not cfg.EXP_EXTRA.no_val) and track_train.early_stop(epoch_id=epoch):
print(f"Early stopping at {epoch} as val acc did not improve for {cfg.TRAIN.early_stop} epochs.")
break
if (not (cfg.EXP_EXTRA.save_ckp == 0)) and (epoch % cfg.EXP_EXTRA.save_ckp == 0):
save_checkpoint(f'{epoch}', epoch, model, optimizer, lr_sched, bnm_sched, test_perf, cfg)
if cfg.EXP.OPTIMIZER == 'vanilla':
assert bnm_sched is None
lr_sched.step(train_loss)
else:
lr_sched.step()
print('Saving the final model')
save_checkpoint('final', epoch, model, optimizer, lr_sched, bnm_sched, test_perf, cfg)
print('\nTesting on the final model..')
last_test_perf = validate(cfg.EXP.TASK, loader_test, model, cfg.EXP.DATASET, cfg.ADAPT)
pprint.pprint(last_test_perf, width=80)
tb.close()
def entry_test(cfg, test_or_valid, model_path="", confusion = False):
split = "test" if test_or_valid else "valid"
loader_test = create_dataloader(split=split, cfg=cfg)
model = get_model(cfg)
model.to(DEVICE)
print(model)
if torch.cuda.device_count() > 1:
model = nn.DataParallel(model)
optimizer, lr_sched, bnm_sched = get_optimizer(cfg.EXP.OPTIMIZER, cfg.TRAIN, model)
model = load_model_opt_sched(model, optimizer, lr_sched, bnm_sched, model_path)
model.eval()
if confusion:
test_perf, pred, ground = validate(cfg.EXP.TASK, loader_test, model, cfg.EXP.DATASET, cfg.ADAPT, confusion)
print(pred.shape, ground.shape)
#### some hardcoding #######
np.save('./output/' + cfg.EXP.MODEL_NAME + '_' + cfg.DATALOADER.MODELNET40_C.corruption + '_' + str(cfg.DATALOADER.MODELNET40_C.severity) + '_pred.npy',pred )
np.save('./output/' + cfg.EXP.MODEL_NAME + '_' + cfg.DATALOADER.MODELNET40_C.corruption + '_' + str(cfg.DATALOADER.MODELNET40_C.severity) + '_ground.npy',ground)
#### #### #### #### #### ####
else:
test_perf = validate(cfg.EXP.TASK, loader_test, model, cfg.EXP.DATASET, cfg.ADAPT, confusion)
print("Model: {} Corruption: {} Severity: {} Acc: {} Class Acc: {}".format(cfg.EXP.MODEL_NAME, cfg.DATALOADER.MODELNET40_C.corruption, cfg.DATALOADER.MODELNET40_C.severity,test_perf['acc'],test_perf['class_acc']),file=file_object,flush=True)
pprint.pprint(test_perf, width=80)
return test_perf
def rscnn_vote_evaluation(cfg, model_path, log_file):
model = get_model(cfg)
checkpoint = torch.load(model_path)
try:
model.load_state_dict(checkpoint['model_state'])
except:
print("WARNING: using dataparallel to load data")
model = nn.DataParallel(model)
model.load_state_dict(checkpoint['model_state'])
print(f"Checkpoint loaded from {model_path}")
model.to(DEVICE)
model.eval()
assert cfg.EXP.DATASET in ["modelnet40_rscnn"]
loader_test = create_dataloader(split='test', cfg=cfg)
rscnn_voting_evaluate_cls(
loader=loader_test,
model=model,
data_batch_to_points_target=lambda x: (x['pc'], x['label']),
points_to_inp=lambda x: {'pc': x},
out_to_prob=lambda x: F.softmax(x['logit'], dim=1),
log_file=log_file
)
def pn2_vote_evaluation(cfg, model_path, log_file):
assert cfg.EXP.DATASET in ["modelnet40_pn2"]
loader_test = create_dataloader(split='test', cfg=cfg)
model = get_model(cfg)
checkpoint = torch.load(model_path)
try:
model.load_state_dict(checkpoint['model_state'])
except:
print("WARNING: using dataparallel to load data")
model = nn.DataParallel(model)
model.load_state_dict(checkpoint['model_state'])
print(f"Checkpoint loaded from {model_path}")
model.to(DEVICE)
model.eval()
pn2_vote_evaluate_cls(loader_test, model, log_file)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.set_defaults(entry=lambda cmd_args: parser.print_help())
parser.add_argument('--entry', type=str, default="train")
parser.add_argument('--exp-config', type=str, default="")
parser.add_argument('--model-path', type=str, default="")
parser.add_argument('--resume', action="store_true", default=False)
# parser.add_argument('--gpu',type=str,default='0',
# help="Which gpu to use")
parser.add_argument('--corruption',type=str,default='uniform',
help="Which corruption to use")
parser.add_argument('--output',type=str,default='./test.txt',
help="path to output file")
parser.add_argument('--severity',type=int,default=1,
help="Which severity to use")
parser.add_argument('--confusion', action="store_true", default=False,
help="whether to output confusion matrix data")
cmd_args = parser.parse_args()
# os.environ['CUDA_VISIBLE_DEVICES'] = cmd_args.gpu
if cmd_args.entry == "train":
assert not cmd_args.exp_config == ""
if not cmd_args.resume:
assert cmd_args.model_path == ""
cfg = get_cfg_defaults()
cfg.merge_from_file(cmd_args.exp_config)
if cfg.EXP.EXP_ID == "":
cfg.EXP.EXP_ID = str(datetime.now())[:-7].replace(' ', '-')
cfg.freeze()
print(cfg)
random.seed(cfg.EXP.SEED)
np.random.seed(cfg.EXP.SEED)
torch.manual_seed(cfg.EXP.SEED)
entry_train(cfg, cmd_args.resume, cmd_args.model_path)
elif cmd_args.entry in ["test", "valid"]:
file_object = open(cmd_args.output, 'a')
assert not cmd_args.exp_config == ""
assert not cmd_args.model_path == ""
cfg = get_cfg_defaults()
cfg.merge_from_file(cmd_args.exp_config)
if cfg.EXP.DATASET == "modelnet40_c":
cfg.DATALOADER.MODELNET40_C.corruption = cmd_args.corruption
cfg.DATALOADER.MODELNET40_C.severity = cmd_args.severity
cfg.freeze()
print(cfg)
random.seed(cfg.EXP.SEED)
np.random.seed(cfg.EXP.SEED)
torch.manual_seed(cfg.EXP.SEED)
test_or_valid = cmd_args.entry == "test"
entry_test(cfg, test_or_valid, cmd_args.model_path,cmd_args.confusion)
elif cmd_args.entry in ["rscnn_vote", "pn2_vote"]:
assert not cmd_args.exp_config == ""
assert not cmd_args.model_path == ""
log_file = f"vote_log/{cmd_args.model_path.replace('/', '_')}_{cmd_args.entry.replace('/', '_')}.log"
cfg = get_cfg_defaults()
cfg.merge_from_file(cmd_args.exp_config)
cfg.freeze()
print(cfg)
seed = cfg.EXP.SEED
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = True
if cmd_args.entry == "rscnn_vote":
rscnn_vote_evaluation(cfg, cmd_args.model_path, log_file)
elif cmd_args.entry == "pn2_vote":
pn2_vote_evaluation(cfg, cmd_args.model_path, log_file)
else:
assert False
================================================
FILE: models/__init__.py
================================================
'''
Description:
Autor: Jiachen Sun
Date: 2022-02-17 13:35:52
LastEditors: Jiachen Sun
LastEditTime: 2022-02-22 23:36:25
'''
from .mv import MVModel
from .rscnn import RSCNN
from .pointnet2 import PointNet2
from .dgcnn import DGCNN
from .pointnet import PointNet
from .pct import Pct
from .pointmlp import pointMLP
from .pointmlp2 import pointMLP2
from .curvenet import CurveNet
from .gdanet import GDANET
================================================
FILE: models/curvenet.py
================================================
'''
Description:
Autor: Jiachen Sun
Date: 2022-02-17 20:37:07
LastEditors: Jiachen Sun
LastEditTime: 2022-02-17 20:42:20
'''
import torch.nn as nn
import torch.nn.functional as F
from CurveNet.core.models.curvenet_cls import CurveNet as CurveNet_og
from all_utils import DATASET_NUM_CLASS
class CurveNet(nn.Module):
def __init__(self, task, dataset):
super().__init__()
self.task = task
self.dataset = dataset
if task == "cls":
num_classes = DATASET_NUM_CLASS[dataset]
self.model = CurveNet_og(num_classes=num_classes)
else:
assert False
def forward(self, pc, cls=None):
pc = pc.to(next(self.parameters()).device)
pc = pc.permute(0, 2, 1).contiguous()
if self.task == 'cls':
assert cls is None
logit = self.model(pc)
out = {'logit': logit}
else:
assert False
return out
================================================
FILE: models/dgcnn.py
================================================
import torch.nn as nn
import torch.nn.functional as F
from dgcnn.pytorch.model import DGCNN as DGCNN_original
from all_utils import DATASET_NUM_CLASS
class DGCNN(nn.Module):
def __init__(self, task, dataset):
super().__init__()
self.task = task
self.dataset = dataset
if task == "cls":
num_classes = DATASET_NUM_CLASS[dataset]
# default arguments
class Args:
def __init__(self):
self.k = 20
self.emb_dims = 1024
self.dropout = 0.5
self.leaky_relu = 1
args = Args()
self.model = DGCNN_original(args, output_channels=num_classes)
else:
assert False
def forward(self, pc, cls=None):
pc = pc.to(next(self.parameters()).device)
pc = pc.permute(0, 2, 1).contiguous()
if self.task == 'cls':
assert cls is None
logit = self.model(pc)
out = {'logit': logit}
else:
assert False
return out
================================================
FILE: models/gdanet.py
================================================
'''
Description:
Autor: Jiachen Sun
Date: 2022-02-22 23:22:17
LastEditors: Jiachen Sun
LastEditTime: 2022-02-23 00:16:25
'''
import torch
import torch.nn as nn
from GDANet.model.GDANet_cls import GDANET as GDANET_og
from all_utils import DATASET_NUM_CLASS
class GDANET(nn.Module):
def __init__(self, task, dataset):
super().__init__()
self.task = task
num_class = DATASET_NUM_CLASS[dataset]
if task == 'cls':
self.model = GDANET_og(number_class=num_class)
else:
assert False
def forward(self, pc, normal=None, cls=None):
# batch_size = pc.shape[0]
pc=pc.permute(0,2,1).contiguous()
pc = pc.to(next(self.parameters()).device)
if self.task == 'cls':
assert cls is None
assert normal is None
logit = self.model(pc)
out = {'logit': logit}
else:
assert False
return out
================================================
FILE: models/model_utils.py
================================================
import torch.nn as nn
# from syncbn_pyt.modules.nn import BatchNorm2d as BatchNorm2dSync
class Squeeze(nn.Module):
def __init__(self):
super().__init__()
def forward(self, inp):
return inp.squeeze()
class BatchNormPoint(nn.Module):
def __init__(self, feat_size, sync_bn=False):
super().__init__()
self.feat_size = feat_size
self.sync_bn=sync_bn
if self.sync_bn:
self.bn = BatchNorm2dSync(feat_size)
else:
self.bn = nn.BatchNorm1d(feat_size)
def forward(self, x):
assert len(x.shape) == 3
s1, s2, s3 = x.shape[0], x.shape[1], x.shape[2]
assert s3 == self.feat_size
if self.sync_bn:
# 4d input for BatchNorm2dSync
x = x.view(s1 * s2, self.feat_size, 1, 1)
x = self.bn(x)
else:
x = x.view(s1 * s2, self.feat_size)
x = self.bn(x)
return x.view(s1, s2, s3)
================================================
FILE: models/mv.py
================================================
import torch
import torch.nn as nn
from all_utils import DATASET_NUM_CLASS
from models.model_utils import Squeeze, BatchNormPoint
from models.mv_utils import PCViews
class MVModel(nn.Module):
def __init__(self, task, dataset, backbone,
feat_size):
super().__init__()
assert task == 'cls'
self.task = task
self.num_class = DATASET_NUM_CLASS[dataset]
self.dropout_p = 0.5
self.feat_size = feat_size
pc_views = PCViews()
self.num_views = pc_views.num_views
self._get_img = pc_views.get_img
img_layers, in_features = self.get_img_layers(
backbone, feat_size=feat_size)
self.img_model = nn.Sequential(*img_layers)
self.final_fc = MVFC(
num_views=self.num_views,
in_features=in_features,
out_features=self.num_class,
dropout_p=self.dropout_p)
def forward(self, pc):
"""
:param pc:
:return:
"""
pc = pc.cuda()
img = self.get_img(pc)
feat = self.img_model(img)
logit = self.final_fc(feat)
out = {'logit': logit}
return out
def get_img(self, pc):
img = self._get_img(pc)
img = torch.tensor(img).float()
img = img.to(next(self.parameters()).device)
assert len(img.shape) == 3
img = img.unsqueeze(3)
# [num_pc * num_views, 1, RESOLUTION, RESOLUTION]
img = img.permute(0, 3, 1, 2)
return img
@staticmethod
def get_img_layers(backbone, feat_size):
"""
Return layers for the image model
"""
from models.resnet import _resnet, BasicBlock
assert backbone == 'resnet18'
layers = [2, 2, 2, 2]
block = BasicBlock
backbone_mod = _resnet(
arch=None,
block=block,
layers=layers,
pretrained=False,
progress=False,
feature_size=feat_size,
zero_init_residual=True)
all_layers = [x for x in backbone_mod.children()]
in_features = all_layers[-1].in_features
# all layers except the final fc layer and the initial conv layers
# WARNING: this is checked only for resnet models
main_layers = all_layers[4:-1]
img_layers = [
nn.Conv2d(1, feat_size, kernel_size=(3, 3), stride=(1, 1),
padding=(1, 1), bias=False),
nn.BatchNorm2d(feat_size, eps=1e-05, momentum=0.1,
affine=True, track_running_stats=True),
nn.ReLU(inplace=True),
*main_layers,
Squeeze()
]
return img_layers, in_features
class MVFC(nn.Module):
"""
Final FC layers for the MV model
"""
def __init__(self, num_views, in_features, out_features, dropout_p):
super().__init__()
self.num_views = num_views
self.in_features = in_features
self.model = nn.Sequential(
BatchNormPoint(in_features),
# dropout before concatenation so that each view drops features independently
nn.Dropout(dropout_p),
nn.Flatten(),
nn.Linear(in_features=in_features * self.num_views,
out_features=in_features),
nn.BatchNorm1d(in_features),
nn.ReLU(),
nn.Dropout(dropout_p),
nn.Linear(in_features=in_features, out_features=out_features,
bias=True))
def forward(self, feat):
feat = feat.view((-1, self.num_views, self.in_features))
out = self.model(feat)
return out
================================================
FILE: models/mv_utils.py
================================================
import numpy as np
import torch
RESOLUTION = 128
TRANS = -1.4
def euler2mat(angle):
"""Convert euler angles to rotation matrix.
:param angle: [3] or [b, 3]
:return
rotmat: [3] or [b, 3, 3]
source
https://github.com/ClementPinard/SfmLearner-Pytorch/blob/master/inverse_warp.py
"""
if len(angle.size()) == 1:
x, y, z = angle[0], angle[1], angle[2]
_dim = 0
_view = [3, 3]
elif len(angle.size()) == 2:
b, _ = angle.size()
x, y, z = angle[:, 0], angle[:, 1], angle[:, 2]
_dim = 1
_view = [b, 3, 3]
else:
assert False
cosz = torch.cos(z)
sinz = torch.sin(z)
# zero = torch.zeros([b], requires_grad=False, device=angle.device)[0]
# one = torch.ones([b], requires_grad=False, device=angle.device)[0]
zero = z.detach()*0
one = zero.detach()+1
zmat = torch.stack([cosz, -sinz, zero,
sinz, cosz, zero,
zero, zero, one], dim=_dim).reshape(_view)
cosy = torch.cos(y)
siny = torch.sin(y)
ymat = torch.stack([cosy, zero, siny,
zero, one, zero,
-siny, zero, cosy], dim=_dim).reshape(_view)
cosx = torch.cos(x)
sinx = torch.sin(x)
xmat = torch.stack([one, zero, zero,
zero, cosx, -sinx,
zero, sinx, cosx], dim=_dim).reshape(_view)
rot_mat = xmat @ ymat @ zmat
# print(rot_mat)
return rot_mat
def distribute(depth, _x, _y, size_x, size_y, image_height, image_width):
"""
Distributes the depth associated with each point to the discrete coordinates (image_height, image_width) in a region
of size (size_x, size_y).
:param depth:
:param _x:
:param _y:
:param size_x:
:param size_y:
:param image_height:
:param image_width:
:return:
"""
assert size_x % 2 == 0 or size_x == 1
assert size_y % 2 == 0 or size_y == 1
batch, _ = depth.size()
epsilon = torch.tensor([1e-12], requires_grad=False, device=depth.device)
_i = torch.linspace(-size_x / 2, (size_x / 2) - 1, size_x, requires_grad=False, device=depth.device)
_j = torch.linspace(-size_y / 2, (size_y / 2) - 1, size_y, requires_grad=False, device=depth.device)
extended_x = _x.unsqueeze(2).repeat([1, 1, size_x]) + _i # [batch, num_points, size_x]
extended_y = _y.unsqueeze(2).repeat([1, 1, size_y]) + _j # [batch, num_points, size_y]
extended_x = extended_x.unsqueeze(3).repeat([1, 1, 1, size_y]) # [batch, num_points, size_x, size_y]
extended_y = extended_y.unsqueeze(2).repeat([1, 1, size_x, 1]) # [batch, num_points, size_x, size_y]
extended_x.ceil_()
extended_y.ceil_()
value = depth.unsqueeze(2).unsqueeze(3).repeat([1, 1, size_x, size_y]) # [batch, num_points, size_x, size_y]
# all points that will be finally used
masked_points = ((extended_x >= 0)
* (extended_x <= image_height - 1)
* (extended_y >= 0)
* (extended_y <= image_width - 1)
* (value >= 0))
true_extended_x = extended_x
true_extended_y = extended_y
# to prevent error
extended_x = (extended_x % image_height)
extended_y = (extended_y % image_width)
# [batch, num_points, size_x, size_y]
distance = torch.abs((extended_x - _x.unsqueeze(2).unsqueeze(3))
* (extended_y - _y.unsqueeze(2).unsqueeze(3)))
weight = (masked_points.float()
* (1 / (value + epsilon))) # [batch, num_points, size_x, size_y]
weighted_value = value * weight
weight = weight.view([batch, -1])
weighted_value = weighted_value.view([batch, -1])
coordinates = (extended_x.view([batch, -1]) * image_width) + extended_y.view(
[batch, -1])
coord_max = image_height * image_width
true_coordinates = (true_extended_x.view([batch, -1]) * image_width) + true_extended_y.view(
[batch, -1])
true_coordinates[~masked_points.view([batch, -1])] = coord_max
weight_scattered = torch.zeros(
[batch, image_width * image_height],
device=depth.device).scatter_add(1, coordinates.long(), weight)
masked_zero_weight_scattered = (weight_scattered == 0.0)
weight_scattered += masked_zero_weight_scattered.float()
weighed_value_scattered = torch.zeros(
[batch, image_width * image_height],
device=depth.device).scatter_add(1, coordinates.long(), weighted_value)
return weighed_value_scattered, weight_scattered
def points2depth(points, image_height, image_width, size_x=4, size_y=4):
"""
:param points: [B, num_points, 3]
:param image_width:
:param image_height:
:param size_x:
:param size_y:
:return:
depth_recovered: [B, image_width, image_height]
"""
epsilon = torch.tensor([1e-12], requires_grad=False, device=points.device)
# epsilon not needed, kept here to ensure exact replication of old version
coord_x = (points[:, :, 0] / (points[:, :, 2] + epsilon)) * (image_width / image_height) # [batch, num_points]
coord_y = (points[:, :, 1] / (points[:, :, 2] + epsilon)) # [batch, num_points]
batch, total_points, _ = points.size()
depth = points[:, :, 2] # [batch, num_points]
# pdb.set_trace()
_x = ((coord_x + 1) * image_height) / 2
_y = ((coord_y + 1) * image_width) / 2
weighed_value_scattered, weight_scattered = distribute(
depth=depth,
_x=_x,
_y=_y,
size_x=size_x,
size_y=size_y,
image_height=image_height,
image_width=image_width)
depth_recovered = (weighed_value_scattered / weight_scattered).view([
batch, image_height, image_width
])
return depth_recovered
# source: https://discuss.pytorch.org/t/batched-index-select/9115/6
def batched_index_select(inp, dim, index):
"""
input: B x * x ... x *
dim: 0 < scalar
index: B x M
"""
views = [inp.shape[0]] + \
[1 if i != dim else -1 for i in range(1, len(inp.shape))]
expanse = list(inp.shape)
expanse[0] = -1
expanse[dim] = -1
index = index.view(views).expand(expanse)
return torch.gather(inp, dim, index)
def point_fea_img_fea(point_fea, point_coo, h, w):
"""
each point_coo is of the form (x*w + h). points not in the canvas are removed
:param point_fea: [batch_size, num_points, feat_size]
:param point_coo: [batch_size, num_points]
:return:
"""
assert len(point_fea.shape) == 3
assert len(point_coo.shape) == 2
assert point_fea.shape[0:2] == point_coo.shape
coo_max = ((h - 1) * w) + (w - 1)
mask_point_coo = (point_coo >= 0) * (point_coo <= coo_max)
point_coo *= mask_point_coo.float()
point_fea *= mask_point_coo.float().unsqueeze(-1)
bs, _, fs = point_fea.shape
point_coo = point_coo.unsqueeze(2).repeat([1, 1, fs])
img_fea = torch.zeros([bs, h * w, fs], device=point_fea.device).scatter_add(1, point_coo.long(), point_fea)
return img_fea
def distribute_img_fea_points(img_fea, point_coord):
"""
:param img_fea: [B, C, H, W]
:param point_coord: [B, num_points], each coordinate is a scalar value given by (x * W) + y
:return
point_fea: [B, num_points, C], for points with coordinates outside the image, we return 0
"""
B, C, H, W = list(img_fea.size())
img_fea = img_fea.permute(0, 2, 3, 1).view([B, H*W, C])
coord_max = ((H - 1) * W) + (W - 1)
mask_point_coord = (point_coord >= 0) * (point_coord <= coord_max)
mask_point_coord = mask_point_coord.float()
point_coord = mask_point_coord * point_coord
point_fea = batched_index_select(
inp=img_fea,
dim=1,
index=point_coord.long())
point_fea = mask_point_coord.unsqueeze(-1) * point_fea
return point_fea
class PCViews:
"""For creating images from PC based on the view information. Faster as the
repeated operations are done only once whie initialization.
"""
def __init__(self):
_views = np.asarray([
[[0 * np.pi / 2, 0, np.pi / 2], [0, 0, TRANS]],
[[1 * np.pi / 2, 0, np.pi / 2], [0, 0, TRANS]],
[[2 * np.pi / 2, 0, np.pi / 2], [0, 0, TRANS]],
[[3 * np.pi / 2, 0, np.pi / 2], [0, 0, TRANS]],
[[0, -np.pi / 2, np.pi / 2], [0, 0, TRANS]],
[[0, np.pi / 2, np.pi / 2], [0, 0, TRANS]]])
self.num_views = 6
angle = torch.tensor(_views[:, 0, :]).float().cuda()
self.rot_mat = euler2mat(angle).transpose(1, 2)
self.translation = torch.tensor(_views[:, 1, :]).float().cuda()
self.translation = self.translation.unsqueeze(1)
def get_img(self, points):
"""Get image based on the prespecified specifications.
Args:
points (torch.tensor): of size [B, _, 3]
Returns:
img (torch.tensor): of size [B * self.num_views, RESOLUTION,
RESOLUTION]
"""
b, _, _ = points.shape
v = self.translation.shape[0]
_points = self.point_transform(
points=torch.repeat_interleave(points, v, dim=0),
rot_mat=self.rot_mat.repeat(b, 1, 1),
translation=self.translation.repeat(b, 1, 1))
img = points2depth(
points=_points,
image_height=RESOLUTION,
image_width=RESOLUTION,
size_x=1,
size_y=1,
)
return img
@staticmethod
def point_transform(points, rot_mat, translation):
"""
:param points: [batch, num_points, 3]
:param rot_mat: [batch, 3]
:param translation: [batch, 1, 3]
:return:
"""
points = torch.matmul(points.to('cuda:0'), rot_mat.to('cuda:0'))
points = points - translation
return points
================================================
FILE: models/pct.py
================================================
import torch.nn as nn
from PCT_Pytorch.model import Pct as Pct_original
from all_utils import DATASET_NUM_CLASS
class Pct(nn.Module):
def __init__(self, task, dataset):
super().__init__()
self.task = task
self.dataset = dataset
if task == "cls":
num_classes = DATASET_NUM_CLASS[dataset]
# default arguments
class Args:
def __init__(self):
self.dropout = 0.5
args = Args()
self.model = Pct_original(args, output_channels=num_classes)
else:
assert False
def forward(self, pc, cls=None):
pc = pc.to(next(self.parameters()).device)
pc = pc.permute(0, 2, 1).contiguous()
if self.task == 'cls':
assert cls is None
logit = self.model(pc)
out = {'logit': logit}
else:
assert False
return out
================================================
FILE: models/pointmlp.py
================================================
'''
Description:
Autor: Jiachen Sun
Date: 2022-02-17 20:50:58
LastEditors: Jiachen Sun
LastEditTime: 2022-02-21 21:18:02
'''
import torch.nn as nn
from pointMLP.classification_ModelNet40.models.pointmlp import pointMLP as pointMLP_original
from all_utils import DATASET_NUM_CLASS
class pointMLP(nn.Module):
def __init__(self, task, dataset):
super().__init__()
self.task = task
self.dataset = dataset
if task == "cls":
num_classes = DATASET_NUM_CLASS[dataset]
self.model = pointMLP_original(num_classes=num_classes)
else:
assert False
def forward(self, pc, cls=None):
pc = pc.to(next(self.parameters()).device)
pc = pc.permute(0, 2, 1).contiguous()
if self.task == 'cls':
assert cls is None
logit = self.model(pc)
out = {'logit': logit}
else:
assert False
return out
================================================
FILE: models/pointmlp2.py
================================================
'''
Description:
Autor: Jiachen Sun
Date: 2022-02-21 21:16:25
LastEditors: Jiachen Sun
LastEditTime: 2022-02-21 21:17:57
'''
import torch.nn as nn
from pointMLP.classification_ModelNet40.models.pointmlp import pointMLPElite as pointMLP_original
from all_utils import DATASET_NUM_CLASS
class pointMLP2(nn.Module):
def __init__(self, task, dataset):
super().__init__()
self.task = task
self.dataset = dataset
if task == "cls":
num_classes = DATASET_NUM_CLASS[dataset]
self.model = pointMLP_original(num_classes=num_classes)
else:
assert False
def forward(self, pc, cls=None):
pc = pc.to(next(self.parameters()).device)
pc = pc.permute(0, 2, 1).contiguous()
if self.task == 'cls':
assert cls is None
logit = self.model(pc)
out = {'logit': logit}
else:
assert False
return out
================================================
FILE: models/pointnet.py
================================================
# based on: https://github.com/fxia22/pointnet.pytorch/blob/master/utils/train_classification.py
import torch.nn as nn
from pointnet_pyt.pointnet.model import PointNetCls
from all_utils import DATASET_NUM_CLASS
class PointNet(nn.Module):
def __init__(self, dataset, task):
super().__init__()
self.task = task
num_class = DATASET_NUM_CLASS[dataset]
if self.task == 'cls_trans':
self.model = PointNetCls(k=num_class, feature_transform=True)
else:
assert False
def forward(self, pc, cls=None):
pc = pc.to(next(self.parameters()).device)
pc = pc.transpose(2, 1).float()
if self.task == 'cls_trans':
logit, _, trans_feat = self.model(pc)
else:
assert False
out = {'logit': logit, 'trans_feat': trans_feat}
return out
================================================
FILE: models/pointnet2.py
================================================
'''
Description:
Autor: Jiachen Sun
Date: 2022-02-16 22:23:16
LastEditors: Jiachen Sun
LastEditTime: 2022-02-24 22:36:59
'''
import torch
import torch.nn as nn
from pointnet2_pyt.pointnet2.models.pointnet2_msg_cls import Pointnet2MSG
from all_utils import DATASET_NUM_CLASS
class PointNet2(nn.Module):
def __init__(self, task, dataset, version_cls):
super().__init__()
self.task = task
num_class = DATASET_NUM_CLASS[dataset]
if task == 'cls':
self.model = Pointnet2MSG(num_classes=num_class, input_channels=0, use_xyz=True, version=version_cls)
else:
assert False
def forward(self, pc, normal=None, cls=None):
pc = pc.to(next(self.parameters()).device)
if self.task == 'cls':
assert cls is None
assert normal is None
logit = self.model(pc)
out = {'logit': logit}
else:
assert False
return out
================================================
FILE: models/resnet.py
================================================
import torch
import torch.nn as nn
from torchvision.models.utils import load_state_dict_from_url
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
'wide_resnet50_2', 'wide_resnet101_2']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None, feature_size=64):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = feature_size
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, feature_size, layers[0])
self.layer2 = self._make_layer(block, feature_size * 2, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, feature_size * 4, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, feature_size * 8, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(feature_size * 8 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x):
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def forward(self, x):
return self._forward_impl(x)
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
def resnet18(pretrained=False, progress=True, **kwargs):
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" `_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs)
def resnet34(pretrained=False, progress=True, **kwargs):
r"""ResNet-34 model from
`"Deep Residual Learning for Image Recognition" `_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet50(pretrained=False, progress=True, **kwargs):
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" `_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet101(pretrained=False, progress=True, **kwargs):
r"""ResNet-101 model from
`"Deep Residual Learning for Image Recognition" `_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,
**kwargs)
def resnet152(pretrained=False, progress=True, **kwargs):
r"""ResNet-152 model from
`"Deep Residual Learning for Image Recognition" `_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,
**kwargs)
def resnext50_32x4d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" `_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def resnext101_32x8d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" `_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
def wide_resnet50_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-50-2 model from
`"Wide Residual Networks" `_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def wide_resnet101_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-101-2 model from
`"Wide Residual Networks" `_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
================================================
FILE: models/rscnn.py
================================================
import torch
import torch.nn as nn
import torch.nn.functional as F
from rs_cnn.models import RSCNN_MSN_Seg, RSCNN_SSN_Cls
from all_utils import DATASET_NUM_CLASS
# distilled from:
# https://github.com/Yochengliu/Relation-Shape-CNN/blob/master/models/rscnn_ssn_cls.py
# https://github.com/Yochengliu/Relation-Shape-CNN/blob/master/models/rscnn_msn_seg.py
class RSCNN(nn.Module):
def __init__(self, task, dataset, ssn_or_msn):
"""
Returns a model
:param cls_or_seg: (bool) if true cls else seg
:param ssn_or_msn: (bool) if true ssn else msn
"""
super().__init__()
self.task = task
self.dataset = dataset
num_classes = DATASET_NUM_CLASS[self.dataset]
if self.task == 'cls':
assert ssn_or_msn
# source: https://github.com/Yochengliu/Relation-Shape-CNN/blob/master/cfgs/config_ssn_cls.yaml
# source: https://github.com/Yochengliu/Relation-Shape-CNN/blob/master/train_cls.py#L73
rscnn_params = {
'num_classes':num_classes,
'input_channels': 0,
'relation_prior': 1,
'use_xyz': True
}
self.model = RSCNN_SSN_Cls(**rscnn_params)
else:
assert False
def forward(self, pc, cls=None):
pc = pc.to(next(self.parameters()).device)
if self.task == 'cls':
assert cls is None
out = {'logit': self.model(pc)}
else:
assert False
return out
================================================
FILE: pc_utils.py
================================================
import numpy as np
import torch
# source: https://github.com/charlesq34/pointnet2/blob/74bb67f3702e8aec55a7b8765dd728b18456030c/utils/provider.py#L187-L198
def jitter_point_cloud(batch_data, sigma=0.01, clip=0.05):
""" Randomly jitter points. jittering is per point.
Input:
BxNx3 array, original batch of point clouds
Return:
BxNx3 array, jittered batch of point clouds
"""
B, N, C = batch_data.shape
assert(clip > 0)
jittered_data = np.clip(sigma * np.random.randn(B, N, C), -1*clip, clip)
jittered_data += batch_data
return jittered_data
# source: https://github.com/charlesq34/pointnet2/blob/74bb67f3702e8aec55a7b8765dd728b18456030c/utils/provider.py#L32-L50
def rotate_point_cloud(batch_data):
""" Randomly rotate the point clouds to augument the dataset
rotation is per shape based along up direction
Input:
BxNx3 array, original batch of point clouds
Return:
BxNx3 array, rotated batch of point clouds
"""
rotated_data = np.zeros(batch_data.shape, dtype=np.float32)
for k in range(batch_data.shape[0]):
rotation_angle = np.random.uniform() * 2 * np.pi
cosval = np.cos(rotation_angle)
sinval = np.sin(rotation_angle)
rotation_matrix = np.array([[cosval, 0, sinval],
[0, 1, 0],
[-sinval, 0, cosval]])
shape_pc = batch_data[k, ...]
rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix)
return rotated_data
# source: https://github.com/WangYueFt/dgcnn/blob/master/pytorch/data.py
def translate_pointcloud(pointcloud):
xyz1 = np.random.uniform(low=2./3., high=3./2., size=[3])
xyz2 = np.random.uniform(low=-0.2, high=0.2, size=[3])
translated_pointcloud = np.add(np.multiply(pointcloud, xyz1), xyz2).astype('float32')
return translated_pointcloud
# based on https://github.com/Yochengliu/Relation-Shape-CNN/blob/master/data/data_utils.py#L81
class PointcloudScaleAndTranslate(object):
def __init__(self, scale_low=2. / 3., scale_high=3. / 2., translate_range=0.2, no_z_aug=False):
"""
:param scale_low:
:param scale_high:
:param translate_range:
:param no_z: no translation and scaling along the z axis
"""
self.scale_low = scale_low
self.scale_high = scale_high
self.translate_range = translate_range
self.no_z_aug = no_z_aug
def __call__(self, pc):
bsize = pc.size()[0]
for i in range(bsize):
xyz1 = np.random.uniform(low=self.scale_low, high=self.scale_high, size=[3])
xyz2 = np.random.uniform(low=-self.translate_range, high=self.translate_range, size=[3])
if self.no_z_aug:
xyz1[2] = 1.0
xyz2[2] = 0.0
pc[i, :, 0:3] = torch.mul(pc[i, :, 0:3], torch.from_numpy(xyz1).float().cuda()) + torch.from_numpy(xyz2).float().cuda()
return pc
================================================
FILE: pointMLP/.gitignore
================================================
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
.idea
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
pip-wheel-metadata/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
.python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
.DS_Store
================================================
FILE: pointMLP/LICENSE
================================================
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
================================================
FILE: pointMLP/README.md
================================================
# Rethinking Network Design and Local Geometry in Point Cloud: A Simple Residual MLP Framework (ICLR 2022)
[](https://paperswithcode.com/sota/3d-point-cloud-classification-on-modelnet40?p=rethinking-network-design-and-local-geometry-1)
[](https://paperswithcode.com/sota/3d-point-cloud-classification-on-scanobjectnn?p=rethinking-network-design-and-local-geometry-1)
[Project Sites]() | [arXiv](https://arxiv.org/abs/2202.07123) | Primary contact: [Xu Ma](mailto:ma.xu1@northeastern.edu)
Overview of one stage in PointMLP. Given an input point cloud, PointMLP progressively extract local features using residual point MLP blocks. In each stage, we first transform local point using a geometric affine module, then local points are are extracted before and after aggregation respectively. By repeating multiple stages, PointMLP progressively enlarge the receptive field and model entire point cloud geometric information.
## BibTeX
@inproceedings{
ma2022rethinking,
title={Rethinking Network Design and Local Geometry in Point Cloud: A Simple Residual {MLP} Framework},
author={Xu Ma and Can Qin and Haoxuan You and Haoxi Ran and Yun Fu},
booktitle={International Conference on Learning Representations},
year={2022},
url={https://openreview.net/forum?id=3Pbra-_u76D}
}
## Model Zoo
- The codes/models/logs for submission version (without bug fixed) can be found here [commit:d2b8dbaa](http://github.com/13952522076/pointMLP-pytorch/tree/d2b8dbaa06eb6176b222dcf2ad248f8438582026).
- On ModelNet40, fixed pointMLP achieves a result of **91.5% mAcc** and **94.1% OA** without voting, logs and pretrained models can be found [[here]](https://web.northeastern.edu/smilelab/xuma/pointMLP/checkpoints/fixstd/modelnet40/pointMLP-20220209053148-404/).
- On ScanObjectNN, fixed pointMLP achieves a result of **84.4% mAcc** and **86.1% OA** without voting, logs and pretrained models can be found [[here]](https://web.northeastern.edu/smilelab/xuma/pointMLP/checkpoints/fixstd/scanobjectnn/pointMLP-20220204021453/).
- Stay tuned. More elite versions and voting results will be uploaded.
## News & Updates:
- [ ] updated more pretrained models
- [ ] double check the part seg utils
- [ ] project page
- [x] update std bug (unstable testing in previous version)
- [x] paper/codes release
:point_right::point_right::point_right:**NOTE:** The codes/models/logs for submission version (without bug fixed) can be found here [commit:d2b8dbaa](http://github.com/13952522076/pointMLP-pytorch/tree/d2b8dbaa06eb6176b222dcf2ad248f8438582026).
## Install
```bash
# 1. clone this repo
git clone https://github.com/ma-xu/pointMLP-pytorch.git
cd pointMLP-pytorch
# 2. create a conda virtual environment and activate it
conda create -n pointmlp python=3.7 -y
conda activate pointmlp
# 3. install required libs, pytorch 1.8.1, torchvision 0.9.1, etc.
pip install -r requirements.txt
# 4. install CUDA kernels
pip install pointnet2_ops_lib/.
```
## Useage
### Classification ModelNet40
**Train**: The dataset will be automatically downloaded, run following command to train.
By default, it will create a fold named "checkpoints/{modelName}-{msg}-{randomseed}", which includes args.txt, best_checkpoint.pth, last_checkpoint.pth, log.txt, out.txt.
```bash
cd pointMLP-pytorch/classification_ModelNet40
# train pointMLP
python main.py --model pointMLP
# train pointMLP-elite
python main.py --model pointMLPElite
# please add other paramemters as you wish.
```
To conduct voting testing, run
```bash
# please modify the msg accrodingly
python voting.py --model pointMLP --msg demo
```
### Classification ScanObjectNN
The dataset will be automatically downloaded
- Train pointMLP/pointMLPElite
```bash
# train pointMLP
python main.py --model pointMLP
# train pointMLP-elite
python main.py --model pointMLPElite
# please add other paramemters as you wish.
```
By default, it will create a fold named "checkpoints/{modelName}-{msg}-{randomseed}", which includes args.txt, best_checkpoint.pth, last_checkpoint.pth, log.txt, out.txt.
### Part segmentation
- Make data folder and download the dataset
```bash
cd pointMLP-pytorch/part_segmentation
mkdir data
cd data
wget https://shapenet.cs.stanford.edu/media/shapenetcore_partanno_segmentation_benchmark_v0_normal.zip --no-check-certificate
unzip shapenetcore_partanno_segmentation_benchmark_v0_normal.zip
```
- Train pointMLP
```bash
# train pointMLP
python main.py --model pointMLP
# please add other paramemters as you wish.
```
## Acknowledgment
Our implementation is mainly based on the following codebases. We gratefully thank the authors for their wonderful works.
[CurveNet](https://github.com/tiangexiang/CurveNet),
[PAConv](https://github.com/CVMI-Lab/PAConv),
[GDANet](https://github.com/mutianxu/GDANet),
[Pointnet2_PyTorch](https://github.com/erikwijmans/Pointnet2_PyTorch)
## LICENSE
PointMLP is under the Apache-2.0 license.
Please contact the authors for commercial use.
================================================
FILE: pointMLP/classification_ModelNet40/data.py
================================================
import os
import glob
import h5py
import numpy as np
from torch.utils.data import Dataset
os.environ["HDF5_USE_FILE_LOCKING"] = "FALSE"
def download():
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(BASE_DIR, 'data')
if not os.path.exists(DATA_DIR):
os.mkdir(DATA_DIR)
if not os.path.exists(os.path.join(DATA_DIR, 'modelnet40_ply_hdf5_2048')):
www = 'https://shapenet.cs.stanford.edu/media/modelnet40_ply_hdf5_2048.zip'
zipfile = os.path.basename(www)
os.system('wget %s --no-check-certificate; unzip %s' % (www, zipfile))
os.system('mv %s %s' % (zipfile[:-4], DATA_DIR))
os.system('rm %s' % (zipfile))
def load_data(partition):
download()
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(BASE_DIR, 'data')
all_data = []
all_label = []
for h5_name in glob.glob(os.path.join(DATA_DIR, 'modelnet40_ply_hdf5_2048', 'ply_data_%s*.h5'%partition)):
# print(f"h5_name: {h5_name}")
f = h5py.File(h5_name,'r')
data = f['data'][:].astype('float32')
label = f['label'][:].astype('int64')
f.close()
all_data.append(data)
all_label.append(label)
all_data = np.concatenate(all_data, axis=0)
all_label = np.concatenate(all_label, axis=0)
return all_data, all_label
def random_point_dropout(pc, max_dropout_ratio=0.875):
''' batch_pc: BxNx3 '''
# for b in range(batch_pc.shape[0]):
dropout_ratio = np.random.random()*max_dropout_ratio # 0~0.875
drop_idx = np.where(np.random.random((pc.shape[0]))<=dropout_ratio)[0]
# print ('use random drop', len(drop_idx))
if len(drop_idx)>0:
pc[drop_idx,:] = pc[0,:] # set to the first point
return pc
def translate_pointcloud(pointcloud):
xyz1 = np.random.uniform(low=2./3., high=3./2., size=[3])
xyz2 = np.random.uniform(low=-0.2, high=0.2, size=[3])
translated_pointcloud = np.add(np.multiply(pointcloud, xyz1), xyz2).astype('float32')
return translated_pointcloud
def jitter_pointcloud(pointcloud, sigma=0.01, clip=0.02):
N, C = pointcloud.shape
pointcloud += np.clip(sigma * np.random.randn(N, C), -1*clip, clip)
return pointcloud
class ModelNet40(Dataset):
def __init__(self, num_points, partition='train'):
self.data, self.label = load_data(partition)
self.num_points = num_points
self.partition = partition
def __getitem__(self, item):
pointcloud = self.data[item][:self.num_points]
label = self.label[item]
if self.partition == 'train':
# pointcloud = random_point_dropout(pointcloud) # open for dgcnn not for our idea for all
pointcloud = translate_pointcloud(pointcloud)
np.random.shuffle(pointcloud)
return pointcloud, label
def __len__(self):
return self.data.shape[0]
if __name__ == '__main__':
train = ModelNet40(1024)
test = ModelNet40(1024, 'test')
# for data, label in train:
# print(data.shape)
# print(label.shape)
from torch.utils.data import DataLoader
train_loader = DataLoader(ModelNet40(partition='train', num_points=1024), num_workers=4,
batch_size=32, shuffle=True, drop_last=True)
for batch_idx, (data, label) in enumerate(train_loader):
print(f"batch_idx: {batch_idx} | data shape: {data.shape} | ;lable shape: {label.shape}")
train_set = ModelNet40(partition='train', num_points=1024)
test_set = ModelNet40(partition='test', num_points=1024)
print(f"train_set size {train_set.__len__()}")
print(f"test_set size {test_set.__len__()}")
================================================
FILE: pointMLP/classification_ModelNet40/helper.py
================================================
import torch
import torch.nn.functional as F
def cal_loss(pred, gold, smoothing=True):
''' Calculate cross entropy loss, apply label smoothing if needed. '''
gold = gold.contiguous().view(-1)
if smoothing:
eps = 0.2
n_class = pred.size(1)
one_hot = torch.zeros_like(pred).scatter(1, gold.view(-1, 1), 1)
one_hot = one_hot * (1 - eps) + (1 - one_hot) * eps / (n_class - 1)
log_prb = F.log_softmax(pred, dim=1)
loss = -(one_hot * log_prb).sum(dim=1).mean()
else:
loss = F.cross_entropy(pred, gold, reduction='mean')
return loss
================================================
FILE: pointMLP/classification_ModelNet40/main.py
================================================
"""
Usage:
python main.py --model PointMLP --msg demo
"""
import argparse
import os
import logging
import datetime
import torch
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
from torch.utils.data import DataLoader
import models as models
from utils import Logger, mkdir_p, progress_bar, save_model, save_args, cal_loss
from data import ModelNet40
from torch.optim.lr_scheduler import CosineAnnealingLR
import sklearn.metrics as metrics
import numpy as np
def parse_args():
"""Parameters"""
parser = argparse.ArgumentParser('training')
parser.add_argument('-c', '--checkpoint', type=str, metavar='PATH',
help='path to save checkpoint (default: checkpoint)')
parser.add_argument('--msg', type=str, help='message after checkpoint')
parser.add_argument('--batch_size', type=int, default=32, help='batch size in training')
parser.add_argument('--model', default='PointNet', help='model name [default: pointnet_cls]')
parser.add_argument('--epoch', default=300, type=int, help='number of epoch in training')
parser.add_argument('--num_points', type=int, default=1024, help='Point Number')
parser.add_argument('--learning_rate', default=0.1, type=float, help='learning rate in training')
parser.add_argument('--min_lr', default=0.005, type=float, help='min lr')
parser.add_argument('--weight_decay', type=float, default=2e-4, help='decay rate')
parser.add_argument('--seed', type=int, help='random seed')
parser.add_argument('--workers', default=8, type=int, help='workers')
return parser.parse_args()
def main():
args = parse_args()
if args.seed is None:
args.seed = np.random.randint(1, 10000)
os.environ["HDF5_USE_FILE_LOCKING"] = "FALSE"
assert torch.cuda.is_available(), "Please ensure codes are executed in cuda."
device = 'cuda'
if args.seed is not None:
torch.manual_seed(args.seed)
np.random.seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
torch.cuda.manual_seed(args.seed)
torch.set_printoptions(10)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
os.environ['PYTHONHASHSEED'] = str(args.seed)
time_str = str(datetime.datetime.now().strftime('-%Y%m%d%H%M%S'))
if args.msg is None:
message = time_str
else:
message = "-" + args.msg
args.checkpoint = 'checkpoints/' + args.model + message + '-' + str(args.seed)
if not os.path.isdir(args.checkpoint):
mkdir_p(args.checkpoint)
screen_logger = logging.getLogger("Model")
screen_logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(message)s')
file_handler = logging.FileHandler(os.path.join(args.checkpoint, "out.txt"))
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(formatter)
screen_logger.addHandler(file_handler)
def printf(str):
screen_logger.info(str)
print(str)
# Model
printf(f"args: {args}")
printf('==> Building model..')
net = models.__dict__[args.model]()
criterion = cal_loss
net = net.to(device)
# criterion = criterion.to(device)
if device == 'cuda':
net = torch.nn.DataParallel(net)
cudnn.benchmark = True
best_test_acc = 0. # best test accuracy
best_train_acc = 0.
best_test_acc_avg = 0.
best_train_acc_avg = 0.
best_test_loss = float("inf")
best_train_loss = float("inf")
start_epoch = 0 # start from epoch 0 or last checkpoint epoch
optimizer_dict = None
if not os.path.isfile(os.path.join(args.checkpoint, "last_checkpoint.pth")):
save_args(args)
logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title="ModelNet" + args.model)
logger.set_names(["Epoch-Num", 'Learning-Rate',
'Train-Loss', 'Train-acc-B', 'Train-acc',
'Valid-Loss', 'Valid-acc-B', 'Valid-acc'])
else:
printf(f"Resuming last checkpoint from {args.checkpoint}")
checkpoint_path = os.path.join(args.checkpoint, "last_checkpoint.pth")
checkpoint = torch.load(checkpoint_path)
net.load_state_dict(checkpoint['net'])
start_epoch = checkpoint['epoch']
best_test_acc = checkpoint['best_test_acc']
best_train_acc = checkpoint['best_train_acc']
best_test_acc_avg = checkpoint['best_test_acc_avg']
best_train_acc_avg = checkpoint['best_train_acc_avg']
best_test_loss = checkpoint['best_test_loss']
best_train_loss = checkpoint['best_train_loss']
logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title="ModelNet" + args.model, resume=True)
optimizer_dict = checkpoint['optimizer']
printf('==> Preparing data..')
train_loader = DataLoader(ModelNet40(partition='train', num_points=args.num_points), num_workers=args.workers,
batch_size=args.batch_size, shuffle=True, drop_last=True)
test_loader = DataLoader(ModelNet40(partition='test', num_points=args.num_points), num_workers=args.workers,
batch_size=args.batch_size // 2, shuffle=False, drop_last=False)
optimizer = torch.optim.SGD(net.parameters(), lr=args.learning_rate, momentum=0.9, weight_decay=args.weight_decay)
if optimizer_dict is not None:
optimizer.load_state_dict(optimizer_dict)
scheduler = CosineAnnealingLR(optimizer, args.epoch, eta_min=args.min_lr, last_epoch=start_epoch - 1)
for epoch in range(start_epoch, args.epoch):
printf('Epoch(%d/%s) Learning Rate %s:' % (epoch + 1, args.epoch, optimizer.param_groups[0]['lr']))
train_out = train(net, train_loader, optimizer, criterion, device) # {"loss", "acc", "acc_avg", "time"}
test_out = validate(net, test_loader, criterion, device)
scheduler.step()
if test_out["acc"] > best_test_acc:
best_test_acc = test_out["acc"]
is_best = True
else:
is_best = False
best_test_acc = test_out["acc"] if (test_out["acc"] > best_test_acc) else best_test_acc
best_train_acc = train_out["acc"] if (train_out["acc"] > best_train_acc) else best_train_acc
best_test_acc_avg = test_out["acc_avg"] if (test_out["acc_avg"] > best_test_acc_avg) else best_test_acc_avg
best_train_acc_avg = train_out["acc_avg"] if (train_out["acc_avg"] > best_train_acc_avg) else best_train_acc_avg
best_test_loss = test_out["loss"] if (test_out["loss"] < best_test_loss) else best_test_loss
best_train_loss = train_out["loss"] if (train_out["loss"] < best_train_loss) else best_train_loss
save_model(
net, epoch, path=args.checkpoint, acc=test_out["acc"], is_best=is_best,
best_test_acc=best_test_acc, # best test accuracy
best_train_acc=best_train_acc,
best_test_acc_avg=best_test_acc_avg,
best_train_acc_avg=best_train_acc_avg,
best_test_loss=best_test_loss,
best_train_loss=best_train_loss,
optimizer=optimizer.state_dict()
)
logger.append([epoch, optimizer.param_groups[0]['lr'],
train_out["loss"], train_out["acc_avg"], train_out["acc"],
test_out["loss"], test_out["acc_avg"], test_out["acc"]])
printf(
f"Training loss:{train_out['loss']} acc_avg:{train_out['acc_avg']}% acc:{train_out['acc']}% time:{train_out['time']}s")
printf(
f"Testing loss:{test_out['loss']} acc_avg:{test_out['acc_avg']}% "
f"acc:{test_out['acc']}% time:{test_out['time']}s [best test acc: {best_test_acc}%] \n\n")
logger.close()
printf(f"++++++++" * 2 + "Final results" + "++++++++" * 2)
printf(f"++ Last Train time: {train_out['time']} | Last Test time: {test_out['time']} ++")
printf(f"++ Best Train loss: {best_train_loss} | Best Test loss: {best_test_loss} ++")
printf(f"++ Best Train acc_B: {best_train_acc_avg} | Best Test acc_B: {best_test_acc_avg} ++")
printf(f"++ Best Train acc: {best_train_acc} | Best Test acc: {best_test_acc} ++")
printf(f"++++++++" * 5)
def train(net, trainloader, optimizer, criterion, device):
net.train()
train_loss = 0
correct = 0
total = 0
train_pred = []
train_true = []
time_cost = datetime.datetime.now()
for batch_idx, (data, label) in enumerate(trainloader):
data, label = data.to(device), label.to(device).squeeze()
data = data.permute(0, 2, 1) # so, the input data shape is [batch, 3, 1024]
optimizer.zero_grad()
logits = net(data)
loss = criterion(logits, label)
loss.backward()
torch.nn.utils.clip_grad_norm_(net.parameters(), 1)
optimizer.step()
train_loss += loss.item()
preds = logits.max(dim=1)[1]
train_true.append(label.cpu().numpy())
train_pred.append(preds.detach().cpu().numpy())
total += label.size(0)
correct += preds.eq(label).sum().item()
progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (train_loss / (batch_idx + 1), 100. * correct / total, correct, total))
time_cost = int((datetime.datetime.now() - time_cost).total_seconds())
train_true = np.concatenate(train_true)
train_pred = np.concatenate(train_pred)
return {
"loss": float("%.3f" % (train_loss / (batch_idx + 1))),
"acc": float("%.3f" % (100. * metrics.accuracy_score(train_true, train_pred))),
"acc_avg": float("%.3f" % (100. * metrics.balanced_accuracy_score(train_true, train_pred))),
"time": time_cost
}
def validate(net, testloader, criterion, device):
net.eval()
test_loss = 0
correct = 0
total = 0
test_true = []
test_pred = []
time_cost = datetime.datetime.now()
with torch.no_grad():
for batch_idx, (data, label) in enumerate(testloader):
data, label = data.to(device), label.to(device).squeeze()
data = data.permute(0, 2, 1)
logits = net(data)
loss = criterion(logits, label)
test_loss += loss.item()
preds = logits.max(dim=1)[1]
test_true.append(label.cpu().numpy())
test_pred.append(preds.detach().cpu().numpy())
total += label.size(0)
correct += preds.eq(label).sum().item()
progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (test_loss / (batch_idx + 1), 100. * correct / total, correct, total))
time_cost = int((datetime.datetime.now() - time_cost).total_seconds())
test_true = np.concatenate(test_true)
test_pred = np.concatenate(test_pred)
return {
"loss": float("%.3f" % (test_loss / (batch_idx + 1))),
"acc": float("%.3f" % (100. * metrics.accuracy_score(test_true, test_pred))),
"acc_avg": float("%.3f" % (100. * metrics.balanced_accuracy_score(test_true, test_pred))),
"time": time_cost
}
if __name__ == '__main__':
main()
================================================
FILE: pointMLP/classification_ModelNet40/models/__init__.py
================================================
from __future__ import absolute_import
from .pointmlp import pointMLP, pointMLPElite
================================================
FILE: pointMLP/classification_ModelNet40/models/pointmlp.py
================================================
import torch
import torch.nn as nn
import torch.nn.functional as F
# from torch import einsum
# from einops import rearrange, repeat
from PCT_Pytorch.pointnet2_ops_lib.pointnet2_ops import pointnet2_utils
# from pointnet2_ops import pointnet2_utils
def get_activation(activation):
if activation.lower() == 'gelu':
return nn.GELU()
elif activation.lower() == 'rrelu':
return nn.RReLU(inplace=True)
elif activation.lower() == 'selu':
return nn.SELU(inplace=True)
elif activation.lower() == 'silu':
return nn.SiLU(inplace=True)
elif activation.lower() == 'hardswish':
return nn.Hardswish(inplace=True)
elif activation.lower() == 'leakyrelu':
return nn.LeakyReLU(inplace=True)
else:
return nn.ReLU(inplace=True)
def square_distance(src, dst):
"""
Calculate Euclid distance between each two points.
src^T * dst = xn * xm + yn * ym + zn * zm;
sum(src^2, dim=-1) = xn*xn + yn*yn + zn*zn;
sum(dst^2, dim=-1) = xm*xm + ym*ym + zm*zm;
dist = (xn - xm)^2 + (yn - ym)^2 + (zn - zm)^2
= sum(src**2,dim=-1)+sum(dst**2,dim=-1)-2*src^T*dst
Input:
src: source points, [B, N, C]
dst: target points, [B, M, C]
Output:
dist: per-point square distance, [B, N, M]
"""
B, N, _ = src.shape
_, M, _ = dst.shape
dist = -2 * torch.matmul(src, dst.permute(0, 2, 1))
dist += torch.sum(src ** 2, -1).view(B, N, 1)
dist += torch.sum(dst ** 2, -1).view(B, 1, M)
return dist
def index_points(points, idx):
"""
Input:
points: input points data, [B, N, C]
idx: sample index data, [B, S]
Return:
new_points:, indexed points data, [B, S, C]
"""
device = points.device
B = points.shape[0]
view_shape = list(idx.shape)
view_shape[1:] = [1] * (len(view_shape) - 1)
repeat_shape = list(idx.shape)
repeat_shape[0] = 1
batch_indices = torch.arange(B, dtype=torch.long).to(device).view(view_shape).repeat(repeat_shape)
new_points = points[batch_indices, idx, :]
return new_points
def farthest_point_sample(xyz, npoint):
"""
Input:
xyz: pointcloud data, [B, N, 3]
npoint: number of samples
Return:
centroids: sampled pointcloud index, [B, npoint]
"""
device = xyz.device
B, N, C = xyz.shape
centroids = torch.zeros(B, npoint, dtype=torch.long).to(device)
distance = torch.ones(B, N).to(device) * 1e10
farthest = torch.randint(0, N, (B,), dtype=torch.long).to(device)
batch_indices = torch.arange(B, dtype=torch.long).to(device)
for i in range(npoint):
centroids[:, i] = farthest
centroid = xyz[batch_indices, farthest, :].view(B, 1, 3)
dist = torch.sum((xyz - centroid) ** 2, -1)
distance = torch.min(distance, dist)
farthest = torch.max(distance, -1)[1]
return centroids
def query_ball_point(radius, nsample, xyz, new_xyz):
"""
Input:
radius: local region radius
nsample: max sample number in local region
xyz: all points, [B, N, 3]
new_xyz: query points, [B, S, 3]
Return:
group_idx: grouped points index, [B, S, nsample]
"""
device = xyz.device
B, N, C = xyz.shape
_, S, _ = new_xyz.shape
group_idx = torch.arange(N, dtype=torch.long).to(device).view(1, 1, N).repeat([B, S, 1])
sqrdists = square_distance(new_xyz, xyz)
group_idx[sqrdists > radius ** 2] = N
group_idx = group_idx.sort(dim=-1)[0][:, :, :nsample]
group_first = group_idx[:, :, 0].view(B, S, 1).repeat([1, 1, nsample])
mask = group_idx == N
group_idx[mask] = group_first[mask]
return group_idx
def knn_point(nsample, xyz, new_xyz):
"""
Input:
nsample: max sample number in local region
xyz: all points, [B, N, C]
new_xyz: query points, [B, S, C]
Return:
group_idx: grouped points index, [B, S, nsample]
"""
sqrdists = square_distance(new_xyz, xyz)
_, group_idx = torch.topk(sqrdists, nsample, dim=-1, largest=False, sorted=False)
return group_idx
class LocalGrouper(nn.Module):
def __init__(self, channel, groups, kneighbors, use_xyz=True, normalize="center", **kwargs):
"""
Give xyz[b,p,3] and fea[b,p,d], return new_xyz[b,g,3] and new_fea[b,g,k,d]
:param groups: groups number
:param kneighbors: k-nerighbors
:param kwargs: others
"""
super(LocalGrouper, self).__init__()
self.groups = groups
self.kneighbors = kneighbors
self.use_xyz = use_xyz
if normalize is not None:
self.normalize = normalize.lower()
else:
self.normalize = None
if self.normalize not in ["center", "anchor"]:
print(f"Unrecognized normalize parameter (self.normalize), set to None. Should be one of [center, anchor].")
self.normalize = None
if self.normalize is not None:
add_channel=3 if self.use_xyz else 0
self.affine_alpha = nn.Parameter(torch.ones([1,1,1,channel + add_channel]))
self.affine_beta = nn.Parameter(torch.zeros([1, 1, 1, channel + add_channel]))
def forward(self, xyz, points):
B, N, C = xyz.shape
S = self.groups
xyz = xyz.contiguous() # xyz [btach, points, xyz]
# fps_idx = torch.multinomial(torch.linspace(0, N - 1, steps=N).repeat(B, 1).to(xyz.device), num_samples=self.groups, replacement=False).long()
# fps_idx = farthest_point_sample(xyz, self.groups).long()
fps_idx = pointnet2_utils.furthest_point_sample(xyz, self.groups).long() # [B, npoint]
new_xyz = index_points(xyz, fps_idx) # [B, npoint, 3]
new_points = index_points(points, fps_idx) # [B, npoint, d]
idx = knn_point(self.kneighbors, xyz, new_xyz)
# idx = query_ball_point(radius, nsample, xyz, new_xyz)
grouped_xyz = index_points(xyz, idx) # [B, npoint, k, 3]
grouped_points = index_points(points, idx) # [B, npoint, k, d]
if self.use_xyz:
grouped_points = torch.cat([grouped_points, grouped_xyz],dim=-1) # [B, npoint, k, d+3]
if self.normalize is not None:
if self.normalize =="center":
mean = torch.mean(grouped_points, dim=2, keepdim=True)
if self.normalize =="anchor":
mean = torch.cat([new_points, new_xyz],dim=-1) if self.use_xyz else new_points
mean = mean.unsqueeze(dim=-2) # [B, npoint, 1, d+3]
std = torch.std((grouped_points-mean).reshape(B,-1),dim=-1,keepdim=True).unsqueeze(dim=-1).unsqueeze(dim=-1)
grouped_points = (grouped_points-mean)/(std + 1e-5)
grouped_points = self.affine_alpha*grouped_points + self.affine_beta
new_points = torch.cat([grouped_points, new_points.view(B, S, 1, -1).repeat(1, 1, self.kneighbors, 1)], dim=-1)
return new_xyz, new_points
class ConvBNReLU1D(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, bias=True, activation='relu'):
super(ConvBNReLU1D, self).__init__()
self.act = get_activation(activation)
self.net = nn.Sequential(
nn.Conv1d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, bias=bias),
nn.BatchNorm1d(out_channels),
self.act
)
def forward(self, x):
return self.net(x)
class ConvBNReLURes1D(nn.Module):
def __init__(self, channel, kernel_size=1, groups=1, res_expansion=1.0, bias=True, activation='relu'):
super(ConvBNReLURes1D, self).__init__()
self.act = get_activation(activation)
self.net1 = nn.Sequential(
nn.Conv1d(in_channels=channel, out_channels=int(channel * res_expansion),
kernel_size=kernel_size, groups=groups, bias=bias),
nn.BatchNorm1d(int(channel * res_expansion)),
self.act
)
if groups > 1:
self.net2 = nn.Sequential(
nn.Conv1d(in_channels=int(channel * res_expansion), out_channels=channel,
kernel_size=kernel_size, groups=groups, bias=bias),
nn.BatchNorm1d(channel),
self.act,
nn.Conv1d(in_channels=channel, out_channels=channel,
kernel_size=kernel_size, bias=bias),
nn.BatchNorm1d(channel),
)
else:
self.net2 = nn.Sequential(
nn.Conv1d(in_channels=int(channel * res_expansion), out_channels=channel,
kernel_size=kernel_size, bias=bias),
nn.BatchNorm1d(channel)
)
def forward(self, x):
return self.act(self.net2(self.net1(x)) + x)
class PreExtraction(nn.Module):
def __init__(self, channels, out_channels, blocks=1, groups=1, res_expansion=1, bias=True,
activation='relu', use_xyz=True):
"""
input: [b,g,k,d]: output:[b,d,g]
:param channels:
:param blocks:
"""
super(PreExtraction, self).__init__()
in_channels = 3+2*channels if use_xyz else 2*channels
self.transfer = ConvBNReLU1D(in_channels, out_channels, bias=bias, activation=activation)
operation = []
for _ in range(blocks):
operation.append(
ConvBNReLURes1D(out_channels, groups=groups, res_expansion=res_expansion,
bias=bias, activation=activation)
)
self.operation = nn.Sequential(*operation)
def forward(self, x):
b, n, s, d = x.size() # torch.Size([32, 512, 32, 6])
x = x.permute(0, 1, 3, 2)
x = x.reshape(-1, d, s)
x = self.transfer(x)
batch_size, _, _ = x.size()
x = self.operation(x) # [b, d, k]
x = F.adaptive_max_pool1d(x, 1).view(batch_size, -1)
x = x.reshape(b, n, -1).permute(0, 2, 1)
return x
class PosExtraction(nn.Module):
def __init__(self, channels, blocks=1, groups=1, res_expansion=1, bias=True, activation='relu'):
"""
input[b,d,g]; output[b,d,g]
:param channels:
:param blocks:
"""
super(PosExtraction, self).__init__()
operation = []
for _ in range(blocks):
operation.append(
ConvBNReLURes1D(channels, groups=groups, res_expansion=res_expansion, bias=bias, activation=activation)
)
self.operation = nn.Sequential(*operation)
def forward(self, x): # [b, d, g]
return self.operation(x)
class Model(nn.Module):
def __init__(self, points=1024, class_num=40, embed_dim=64, groups=1, res_expansion=1.0,
activation="relu", bias=True, use_xyz=True, normalize="center",
dim_expansion=[2, 2, 2, 2], pre_blocks=[2, 2, 2, 2], pos_blocks=[2, 2, 2, 2],
k_neighbors=[32, 32, 32, 32], reducers=[2, 2, 2, 2], **kwargs):
super(Model, self).__init__()
self.stages = len(pre_blocks)
self.class_num = class_num
self.points = points
self.embedding = ConvBNReLU1D(3, embed_dim, bias=bias, activation=activation)
assert len(pre_blocks) == len(k_neighbors) == len(reducers) == len(pos_blocks) == len(dim_expansion), \
"Please check stage number consistent for pre_blocks, pos_blocks k_neighbors, reducers."
self.local_grouper_list = nn.ModuleList()
self.pre_blocks_list = nn.ModuleList()
self.pos_blocks_list = nn.ModuleList()
last_channel = embed_dim
anchor_points = self.points
for i in range(len(pre_blocks)):
out_channel = last_channel * dim_expansion[i]
pre_block_num = pre_blocks[i]
pos_block_num = pos_blocks[i]
kneighbor = k_neighbors[i]
reduce = reducers[i]
anchor_points = anchor_points // reduce
# append local_grouper_list
local_grouper = LocalGrouper(last_channel, anchor_points, kneighbor, use_xyz, normalize) # [b,g,k,d]
self.local_grouper_list.append(local_grouper)
# append pre_block_list
pre_block_module = PreExtraction(last_channel, out_channel, pre_block_num, groups=groups,
res_expansion=res_expansion,
bias=bias, activation=activation, use_xyz=use_xyz)
self.pre_blocks_list.append(pre_block_module)
# append pos_block_list
pos_block_module = PosExtraction(out_channel, pos_block_num, groups=groups,
res_expansion=res_expansion, bias=bias, activation=activation)
self.pos_blocks_list.append(pos_block_module)
last_channel = out_channel
self.act = get_activation(activation)
self.classifier = nn.Sequential(
nn.Linear(last_channel, 512),
nn.BatchNorm1d(512),
self.act,
nn.Dropout(0.5),
nn.Linear(512, 256),
nn.BatchNorm1d(256),
self.act,
nn.Dropout(0.5),
nn.Linear(256, self.class_num)
)
def forward(self, x):
xyz = x.permute(0, 2, 1)
batch_size, _, _ = x.size()
x = self.embedding(x) # B,D,N
for i in range(self.stages):
# Give xyz[b, p, 3] and fea[b, p, d], return new_xyz[b, g, 3] and new_fea[b, g, k, d]
xyz, x = self.local_grouper_list[i](xyz, x.permute(0, 2, 1)) # [b,g,3] [b,g,k,d]
x = self.pre_blocks_list[i](x) # [b,d,g]
x = self.pos_blocks_list[i](x) # [b,d,g]
x = F.adaptive_max_pool1d(x, 1).squeeze(dim=-1)
x = self.classifier(x)
return x
def pointMLP(num_classes=40, **kwargs) -> Model:
return Model(points=1024, class_num=num_classes, embed_dim=64, groups=1, res_expansion=1.0,
activation="relu", bias=False, use_xyz=False, normalize="anchor",
dim_expansion=[2, 2, 2, 2], pre_blocks=[2, 2, 2, 2], pos_blocks=[2, 2, 2, 2],
k_neighbors=[24, 24, 24, 24], reducers=[2, 2, 2, 2], **kwargs)
def pointMLPElite(num_classes=40, **kwargs) -> Model:
return Model(points=1024, class_num=num_classes, embed_dim=32, groups=1, res_expansion=0.25,
activation="relu", bias=False, use_xyz=False, normalize="anchor",
dim_expansion=[2, 2, 2, 1], pre_blocks=[1, 1, 2, 1], pos_blocks=[1, 1, 2, 1],
k_neighbors=[24,24,24,24], reducers=[2, 2, 2, 2], **kwargs)
if __name__ == '__main__':
data = torch.rand(2, 3, 1024)
print("===> testing pointMLP ...")
model = pointMLP()
out = model(data)
print(out.shape)
================================================
FILE: pointMLP/classification_ModelNet40/utils/__init__.py
================================================
"""Useful utils
"""
from .misc import *
from .logger import *
from .progress.progress.bar import Bar as Bar
================================================
FILE: pointMLP/classification_ModelNet40/utils/logger.py
================================================
# A simple torch style logger
# (C) Wei YANG 2017
from __future__ import absolute_import
import matplotlib.pyplot as plt
import os
import sys
import numpy as np
__all__ = ['Logger', 'LoggerMonitor', 'savefig']
def savefig(fname, dpi=None):
dpi = 150 if dpi == None else dpi
plt.savefig(fname, dpi=dpi)
def plot_overlap(logger, names=None):
names = logger.names if names == None else names
numbers = logger.numbers
for _, name in enumerate(names):
x = np.arange(len(numbers[name]))
plt.plot(x, np.asarray(numbers[name]))
return [logger.title + '(' + name + ')' for name in names]
class Logger(object):
'''Save training process to log file with simple plot function.'''
def __init__(self, fpath, title=None, resume=False):
self.file = None
self.resume = resume
self.title = '' if title == None else title
if fpath is not None:
if resume:
self.file = open(fpath, 'r')
name = self.file.readline()
self.names = name.rstrip().split('\t')
self.numbers = {}
for _, name in enumerate(self.names):
self.numbers[name] = []
for numbers in self.file:
numbers = numbers.rstrip().split('\t')
for i in range(0, len(numbers)):
self.numbers[self.names[i]].append(numbers[i])
self.file.close()
self.file = open(fpath, 'a')
else:
self.file = open(fpath, 'w')
def set_names(self, names):
if self.resume:
pass
# initialize numbers as empty list
self.numbers = {}
self.names = names
for _, name in enumerate(self.names):
self.file.write(name)
self.file.write('\t')
self.numbers[name] = []
self.file.write('\n')
self.file.flush()
def append(self, numbers):
assert len(self.names) == len(numbers), 'Numbers do not match names'
for index, num in enumerate(numbers):
self.file.write("{0:.6f}".format(num))
self.file.write('\t')
self.numbers[self.names[index]].append(num)
self.file.write('\n')
self.file.flush()
def plot(self, names=None):
names = self.names if names == None else names
numbers = self.numbers
for _, name in enumerate(names):
x = np.arange(len(numbers[name]))
plt.plot(x, np.asarray(numbers[name]))
plt.legend([self.title + '(' + name + ')' for name in names])
plt.grid(True)
def close(self):
if self.file is not None:
self.file.close()
class LoggerMonitor(object):
'''Load and visualize multiple logs.'''
def __init__ (self, paths):
'''paths is a distionary with {name:filepath} pair'''
self.loggers = []
for title, path in paths.items():
logger = Logger(path, title=title, resume=True)
self.loggers.append(logger)
def plot(self, names=None):
plt.figure()
plt.subplot(121)
legend_text = []
for logger in self.loggers:
legend_text += plot_overlap(logger, names)
plt.legend(legend_text, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.grid(True)
if __name__ == '__main__':
# # Example
# logger = Logger('test.txt')
# logger.set_names(['Train loss', 'Valid loss','Test loss'])
# length = 100
# t = np.arange(length)
# train_loss = np.exp(-t / 10.0) + np.random.rand(length) * 0.1
# valid_loss = np.exp(-t / 10.0) + np.random.rand(length) * 0.1
# test_loss = np.exp(-t / 10.0) + np.random.rand(length) * 0.1
# for i in range(0, length):
# logger.append([train_loss[i], valid_loss[i], test_loss[i]])
# logger.plot()
# Example: logger monitor
paths = {
'resadvnet20':'/home/wyang/code/pytorch-classification/checkpoint/cifar10/resadvnet20/log.txt',
'resadvnet32':'/home/wyang/code/pytorch-classification/checkpoint/cifar10/resadvnet32/log.txt',
'resadvnet44':'/home/wyang/code/pytorch-classification/checkpoint/cifar10/resadvnet44/log.txt',
}
field = ['Valid Acc.']
monitor = LoggerMonitor(paths)
monitor.plot(names=field)
savefig('test.eps')
================================================
FILE: pointMLP/classification_ModelNet40/utils/misc.py
================================================
'''Some helper functions for PyTorch, including:
- get_mean_and_std: calculate the mean and std value of dataset.
- msr_init: net parameter initialization.
- progress_bar: progress bar mimic xlua.progress.
'''
import errno
import os
import sys
import time
import math
import torch
import shutil
import numpy as np
import random
import torch.nn.functional as F
import torch.nn as nn
import torch.nn.init as init
from torch.autograd import Variable
__all__ = ['get_mean_and_std', 'init_params', 'mkdir_p', 'AverageMeter',
'progress_bar','save_model',"save_args","set_seed", "IOStream", "cal_loss"]
def get_mean_and_std(dataset):
'''Compute the mean and std value of dataset.'''
dataloader = trainloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=2)
mean = torch.zeros(3)
std = torch.zeros(3)
print('==> Computing mean and std..')
for inputs, targets in dataloader:
for i in range(3):
mean[i] += inputs[:,i,:,:].mean()
std[i] += inputs[:,i,:,:].std()
mean.div_(len(dataset))
std.div_(len(dataset))
return mean, std
def init_params(net):
'''Init layer parameters.'''
for m in net.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal(m.weight, mode='fan_out')
if m.bias:
init.constant(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant(m.weight, 1)
init.constant(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal(m.weight, std=1e-3)
if m.bias:
init.constant(m.bias, 0)
def mkdir_p(path):
'''make dir if not exist'''
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
class AverageMeter(object):
"""Computes and stores the average and current value
Imported from https://github.com/pytorch/examples/blob/master/imagenet/main.py#L247-L262
"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
TOTAL_BAR_LENGTH = 65.
last_time = time.time()
begin_time = last_time
def progress_bar(current, total, msg=None):
global last_time, begin_time
if current == 0:
begin_time = time.time() # Reset for new bar.
cur_len = int(TOTAL_BAR_LENGTH*current/total)
rest_len = int(TOTAL_BAR_LENGTH - cur_len) - 1
sys.stdout.write(' [')
for i in range(cur_len):
sys.stdout.write('=')
sys.stdout.write('>')
for i in range(rest_len):
sys.stdout.write('.')
sys.stdout.write(']')
cur_time = time.time()
step_time = cur_time - last_time
last_time = cur_time
tot_time = cur_time - begin_time
L = []
L.append(' Step: %s' % format_time(step_time))
L.append(' | Tot: %s' % format_time(tot_time))
if msg:
L.append(' | ' + msg)
msg = ''.join(L)
sys.stdout.write(msg)
# for i in range(term_width-int(TOTAL_BAR_LENGTH)-len(msg)-3):
# sys.stdout.write(' ')
# Go back to the center of the bar.
# for i in range(term_width-int(TOTAL_BAR_LENGTH/2)+2):
# sys.stdout.write('\b')
sys.stdout.write(' %d/%d ' % (current+1, total))
if current < total-1:
sys.stdout.write('\r')
else:
sys.stdout.write('\n')
sys.stdout.flush()
def format_time(seconds):
days = int(seconds / 3600/24)
seconds = seconds - days*3600*24
hours = int(seconds / 3600)
seconds = seconds - hours*3600
minutes = int(seconds / 60)
seconds = seconds - minutes*60
secondsf = int(seconds)
seconds = seconds - secondsf
millis = int(seconds*1000)
f = ''
i = 1
if days > 0:
f += str(days) + 'D'
i += 1
if hours > 0 and i <= 2:
f += str(hours) + 'h'
i += 1
if minutes > 0 and i <= 2:
f += str(minutes) + 'm'
i += 1
if secondsf > 0 and i <= 2:
f += str(secondsf) + 's'
i += 1
if millis > 0 and i <= 2:
f += str(millis) + 'ms'
i += 1
if f == '':
f = '0ms'
return f
def save_model(net, epoch, path, acc, is_best, **kwargs):
state = {
'net': net.state_dict(),
'epoch': epoch,
'acc': acc
}
for key, value in kwargs.items():
state[key] = value
filepath = os.path.join(path, "last_checkpoint.pth")
torch.save(state, filepath)
if is_best:
shutil.copyfile(filepath, os.path.join(path, 'best_checkpoint.pth'))
def save_args(args):
file = open(os.path.join(args.checkpoint, 'args.txt'), "w")
for k, v in vars(args).items():
file.write(f"{k}:\t {v}\n")
file.close()
def set_seed(seed=None):
if seed is None:
return
random.seed(seed)
os.environ['PYTHONHASHSEED'] = ("%s" % seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
# create a file and write the text into it
class IOStream():
def __init__(self, path):
self.f = open(path, 'a')
def cprint(self, text):
print(text)
self.f.write(text+'\n')
self.f.flush()
def close(self):
self.f.close()
def cal_loss(pred, gold, smoothing=True):
''' Calculate cross entropy loss, apply label smoothing if needed. '''
gold = gold.contiguous().view(-1)
if smoothing:
eps = 0.2
n_class = pred.size(1)
one_hot = torch.zeros_like(pred).scatter(1, gold.view(-1, 1), 1)
one_hot = one_hot * (1 - eps) + (1 - one_hot) * eps / (n_class - 1)
log_prb = F.log_softmax(pred, dim=1)
loss = -(one_hot * log_prb).sum(dim=1).mean()
else:
loss = F.cross_entropy(pred, gold, reduction='mean')
return loss
================================================
FILE: pointMLP/classification_ModelNet40/utils/progress/.gitignore
================================================
*.pyc
*.egg-info
build/
dist/
================================================
FILE: pointMLP/classification_ModelNet40/utils/progress/LICENSE
================================================
# Copyright (c) 2012 Giorgos Verigakis
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
================================================
FILE: pointMLP/classification_ModelNet40/utils/progress/MANIFEST.in
================================================
include README.rst LICENSE
================================================
FILE: pointMLP/classification_ModelNet40/utils/progress/README.rst
================================================
Easy progress reporting for Python
==================================
|pypi|
|demo|
.. |pypi| image:: https://img.shields.io/pypi/v/progress.svg
.. |demo| image:: https://raw.github.com/verigak/progress/master/demo.gif
:alt: Demo
Bars
----
There are 7 progress bars to choose from:
- ``Bar``
- ``ChargingBar``
- ``FillingSquaresBar``
- ``FillingCirclesBar``
- ``IncrementalBar``
- ``PixelBar``
- ``ShadyBar``
To use them, just call ``next`` to advance and ``finish`` to finish:
.. code-block:: python
from progress.bar import Bar
bar = Bar('Processing', max=20)
for i in range(20):
# Do some work
bar.next()
bar.finish()
The result will be a bar like the following: ::
Processing |############# | 42/100
To simplify the common case where the work is done in an iterator, you can
use the ``iter`` method:
.. code-block:: python
for i in Bar('Processing').iter(it):
# Do some work
Progress bars are very customizable, you can change their width, their fill
character, their suffix and more:
.. code-block:: python
bar = Bar('Loading', fill='@', suffix='%(percent)d%%')
This will produce a bar like the following: ::
Loading |@@@@@@@@@@@@@ | 42%
You can use a number of template arguments in ``message`` and ``suffix``:
========== ================================
Name Value
========== ================================
index current value
max maximum value
remaining max - index
progress index / max
percent progress * 100
avg simple moving average time per item (in seconds)
elapsed elapsed time in seconds
elapsed_td elapsed as a timedelta (useful for printing as a string)
eta avg * remaining
eta_td eta as a timedelta (useful for printing as a string)
========== ================================
Instead of passing all configuration options on instatiation, you can create
your custom subclass:
.. code-block:: python
class FancyBar(Bar):
message = 'Loading'
fill = '*'
suffix = '%(percent).1f%% - %(eta)ds'
You can also override any of the arguments or create your own:
.. code-block:: python
class SlowBar(Bar):
suffix = '%(remaining_hours)d hours remaining'
@property
def remaining_hours(self):
return self.eta // 3600
Spinners
========
For actions with an unknown number of steps you can use a spinner:
.. code-block:: python
from progress.spinner import Spinner
spinner = Spinner('Loading ')
while state != 'FINISHED':
# Do some work
spinner.next()
There are 5 predefined spinners:
- ``Spinner``
- ``PieSpinner``
- ``MoonSpinner``
- ``LineSpinner``
- ``PixelSpinner``
Other
=====
There are a number of other classes available too, please check the source or
subclass one of them to create your own.
License
=======
progress is licensed under ISC
================================================
FILE: pointMLP/classification_ModelNet40/utils/progress/progress/__init__.py
================================================
# Copyright (c) 2012 Giorgos Verigakis
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import division
from collections import deque
from datetime import timedelta
from math import ceil
from sys import stderr
from time import time
__version__ = '1.3'
class Infinite(object):
file = stderr
sma_window = 10 # Simple Moving Average window
def __init__(self, *args, **kwargs):
self.index = 0
self.start_ts = time()
self.avg = 0
self._ts = self.start_ts
self._xput = deque(maxlen=self.sma_window)
for key, val in kwargs.items():
setattr(self, key, val)
def __getitem__(self, key):
if key.startswith('_'):
return None
return getattr(self, key, None)
@property
def elapsed(self):
return int(time() - self.start_ts)
@property
def elapsed_td(self):
return timedelta(seconds=self.elapsed)
def update_avg(self, n, dt):
if n > 0:
self._xput.append(dt / n)
self.avg = sum(self._xput) / len(self._xput)
def update(self):
pass
def start(self):
pass
def finish(self):
pass
def next(self, n=1):
now = time()
dt = now - self._ts
self.update_avg(n, dt)
self._ts = now
self.index = self.index + n
self.update()
def iter(self, it):
try:
for x in it:
yield x
self.next()
finally:
self.finish()
class Progress(Infinite):
def __init__(self, *args, **kwargs):
super(Progress, self).__init__(*args, **kwargs)
self.max = kwargs.get('max', 100)
@property
def eta(self):
return int(ceil(self.avg * self.remaining))
@property
def eta_td(self):
return timedelta(seconds=self.eta)
@property
def percent(self):
return self.progress * 100
@property
def progress(self):
return min(1, self.index / self.max)
@property
def remaining(self):
return max(self.max - self.index, 0)
def start(self):
self.update()
def goto(self, index):
incr = index - self.index
self.next(incr)
def iter(self, it):
try:
self.max = len(it)
except TypeError:
pass
try:
for x in it:
yield x
self.next()
finally:
self.finish()
================================================
FILE: pointMLP/classification_ModelNet40/utils/progress/progress/bar.py
================================================
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Giorgos Verigakis
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import unicode_literals
from . import Progress
from .helpers import WritelnMixin
class Bar(WritelnMixin, Progress):
width = 32
message = ''
suffix = '%(index)d/%(max)d'
bar_prefix = ' |'
bar_suffix = '| '
empty_fill = ' '
fill = '#'
hide_cursor = True
def update(self):
filled_length = int(self.width * self.progress)
empty_length = self.width - filled_length
message = self.message % self
bar = self.fill * filled_length
empty = self.empty_fill * empty_length
suffix = self.suffix % self
line = ''.join([message, self.bar_prefix, bar, empty, self.bar_suffix,
suffix])
self.writeln(line)
class ChargingBar(Bar):
suffix = '%(percent)d%%'
bar_prefix = ' '
bar_suffix = ' '
empty_fill = '∙'
fill = '█'
class FillingSquaresBar(ChargingBar):
empty_fill = '▢'
fill = '▣'
class FillingCirclesBar(ChargingBar):
empty_fill = '◯'
fill = '◉'
class IncrementalBar(Bar):
phases = (' ', '▏', '▎', '▍', '▌', '▋', '▊', '▉', '█')
def update(self):
nphases = len(self.phases)
filled_len = self.width * self.progress
nfull = int(filled_len) # Number of full chars
phase = int((filled_len - nfull) * nphases) # Phase of last char
nempty = self.width - nfull # Number of empty chars
message = self.message % self
bar = self.phases[-1] * nfull
current = self.phases[phase] if phase > 0 else ''
empty = self.empty_fill * max(0, nempty - len(current))
suffix = self.suffix % self
line = ''.join([message, self.bar_prefix, bar, current, empty,
self.bar_suffix, suffix])
self.writeln(line)
class PixelBar(IncrementalBar):
phases = ('⡀', '⡄', '⡆', '⡇', '⣇', '⣧', '⣷', '⣿')
class ShadyBar(IncrementalBar):
phases = (' ', '░', '▒', '▓', '█')
================================================
FILE: pointMLP/classification_ModelNet40/utils/progress/progress/counter.py
================================================
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Giorgos Verigakis
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import unicode_literals
from . import Infinite, Progress
from .helpers import WriteMixin
class Counter(WriteMixin, Infinite):
message = ''
hide_cursor = True
def update(self):
self.write(str(self.index))
class Countdown(WriteMixin, Progress):
hide_cursor = True
def update(self):
self.write(str(self.remaining))
class Stack(WriteMixin, Progress):
phases = (' ', '▁', '▂', '▃', '▄', '▅', '▆', '▇', '█')
hide_cursor = True
def update(self):
nphases = len(self.phases)
i = min(nphases - 1, int(self.progress * nphases))
self.write(self.phases[i])
class Pie(Stack):
phases = ('○', '◔', '◑', '◕', '●')
================================================
FILE: pointMLP/classification_ModelNet40/utils/progress/progress/helpers.py
================================================
# Copyright (c) 2012 Giorgos Verigakis
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import print_function
HIDE_CURSOR = '\x1b[?25l'
SHOW_CURSOR = '\x1b[?25h'
class WriteMixin(object):
hide_cursor = False
def __init__(self, message=None, **kwargs):
super(WriteMixin, self).__init__(**kwargs)
self._width = 0
if message:
self.message = message
if self.file.isatty():
if self.hide_cursor:
print(HIDE_CURSOR, end='', file=self.file)
print(self.message, end='', file=self.file)
self.file.flush()
def write(self, s):
if self.file.isatty():
b = '\b' * self._width
c = s.ljust(self._width)
print(b + c, end='', file=self.file)
self._width = max(self._width, len(s))
self.file.flush()
def finish(self):
if self.file.isatty() and self.hide_cursor:
print(SHOW_CURSOR, end='', file=self.file)
class WritelnMixin(object):
hide_cursor = False
def __init__(self, message=None, **kwargs):
super(WritelnMixin, self).__init__(**kwargs)
if message:
self.message = message
if self.file.isatty() and self.hide_cursor:
print(HIDE_CURSOR, end='', file=self.file)
def clearln(self):
if self.file.isatty():
print('\r\x1b[K', end='', file=self.file)
def writeln(self, line):
if self.file.isatty():
self.clearln()
print(line, end='', file=self.file)
self.file.flush()
def finish(self):
if self.file.isatty():
print(file=self.file)
if self.hide_cursor:
print(SHOW_CURSOR, end='', file=self.file)
from signal import signal, SIGINT
from sys import exit
class SigIntMixin(object):
"""Registers a signal handler that calls finish on SIGINT"""
def __init__(self, *args, **kwargs):
super(SigIntMixin, self).__init__(*args, **kwargs)
signal(SIGINT, self._sigint_handler)
def _sigint_handler(self, signum, frame):
self.finish()
exit(0)
================================================
FILE: pointMLP/classification_ModelNet40/utils/progress/progress/spinner.py
================================================
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Giorgos Verigakis
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import unicode_literals
from . import Infinite
from .helpers import WriteMixin
class Spinner(WriteMixin, Infinite):
message = ''
phases = ('-', '\\', '|', '/')
hide_cursor = True
def update(self):
i = self.index % len(self.phases)
self.write(self.phases[i])
class PieSpinner(Spinner):
phases = ['◷', '◶', '◵', '◴']
class MoonSpinner(Spinner):
phases = ['◑', '◒', '◐', '◓']
class LineSpinner(Spinner):
phases = ['⎺', '⎻', '⎼', '⎽', '⎼', '⎻']
class PixelSpinner(Spinner):
phases = ['⣾','⣷', '⣯', '⣟', '⡿', '⢿', '⣻', '⣽']
================================================
FILE: pointMLP/classification_ModelNet40/utils/progress/setup.py
================================================
#!/usr/bin/env python
from setuptools import setup
import progress
setup(
name='progress',
version=progress.__version__,
description='Easy to use progress bars',
long_description=open('README.rst').read(),
author='Giorgos Verigakis',
author_email='verigak@gmail.com',
url='http://github.com/verigak/progress/',
license='ISC',
packages=['progress'],
classifiers=[
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: ISC License (ISCL)',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
]
)
================================================
FILE: pointMLP/classification_ModelNet40/utils/progress/test_progress.py
================================================
#!/usr/bin/env python
from __future__ import print_function
import random
import time
from progress.bar import (Bar, ChargingBar, FillingSquaresBar,
FillingCirclesBar, IncrementalBar, PixelBar,
ShadyBar)
from progress.spinner import (Spinner, PieSpinner, MoonSpinner, LineSpinner,
PixelSpinner)
from progress.counter import Counter, Countdown, Stack, Pie
def sleep():
t = 0.01
t += t * random.uniform(-0.1, 0.1) # Add some variance
time.sleep(t)
for bar_cls in (Bar, ChargingBar, FillingSquaresBar, FillingCirclesBar):
suffix = '%(index)d/%(max)d [%(elapsed)d / %(eta)d / %(eta_td)s]'
bar = bar_cls(bar_cls.__name__, suffix=suffix)
for i in bar.iter(range(200)):
sleep()
for bar_cls in (IncrementalBar, PixelBar, ShadyBar):
suffix = '%(percent)d%% [%(elapsed_td)s / %(eta)d / %(eta_td)s]'
bar = bar_cls(bar_cls.__name__, suffix=suffix)
for i in bar.iter(range(200)):
sleep()
for spin in (Spinner, PieSpinner, MoonSpinner, LineSpinner, PixelSpinner):
for i in spin(spin.__name__ + ' ').iter(range(100)):
sleep()
print()
for singleton in (Counter, Countdown, Stack, Pie):
for i in singleton(singleton.__name__ + ' ').iter(range(100)):
sleep()
print()
bar = IncrementalBar('Random', suffix='%(index)d')
for i in range(100):
bar.goto(random.randint(0, 100))
sleep()
bar.finish()
================================================
FILE: pointMLP/classification_ModelNet40/voting.py
================================================
import argparse
import os
import datetime
import torch
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
from torch.utils.data import DataLoader
import models as models
from utils import progress_bar, IOStream
from data import ModelNet40
import sklearn.metrics as metrics
from helper import cal_loss
import numpy as np
import torch.nn.functional as F
model_names = sorted(name for name in models.__dict__
if callable(models.__dict__[name]))
def parse_args():
"""Parameters"""
parser = argparse.ArgumentParser('training')
parser.add_argument('-c', '--checkpoint', type=str, metavar='PATH',
help='path to save checkpoint (default: checkpoint)')
parser.add_argument('--msg', type=str, help='message after checkpoint')
parser.add_argument('--batch_size', type=int, default=32, help='batch size in training')
parser.add_argument('--model', default='model31A', help='model name [default: pointnet_cls]')
parser.add_argument('--num_classes', default=40, type=int, choices=[10, 40], help='training on ModelNet10/40')
parser.add_argument('--num_points', type=int, default=1024, help='Point Number')
parser.add_argument('--seed', type=int, help='random seed (default: 1)')
# Voting evaluation, referring: https://github.com/CVMI-Lab/PAConv/blob/main/obj_cls/eval_voting.py
parser.add_argument('--NUM_PEPEAT', type=int, default=300)
parser.add_argument('--NUM_VOTE', type=int, default=10)
parser.add_argument('--validate', action='store_true', help='Validate the original testing result.')
return parser.parse_args()
class PointcloudScale(object): # input random scaling
def __init__(self, scale_low=2. / 3., scale_high=3. / 2.):
self.scale_low = scale_low
self.scale_high = scale_high
def __call__(self, pc):
bsize = pc.size()[0]
for i in range(bsize):
xyz1 = np.random.uniform(low=self.scale_low, high=self.scale_high, size=[3])
pc[i, :, 0:3] = torch.mul(pc[i, :, 0:3], torch.from_numpy(xyz1).float().cuda())
return pc
def main():
args = parse_args()
print(f"args: {args}")
os.environ["HDF5_USE_FILE_LOCKING"] = "FALSE"
if args.seed is None:
args.seed = np.random.randint(1, 10000)
print(f"random seed is set to {args.seed}, the speed will slow down.")
torch.manual_seed(args.seed)
np.random.seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
torch.cuda.manual_seed(args.seed)
torch.set_printoptions(10)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
os.environ['PYTHONHASHSEED'] = str(args.seed)
if torch.cuda.is_available():
device = 'cuda'
else:
device = 'cpu'
print(f"==> Using device: {device}")
if args.msg is None:
message = str(datetime.datetime.now().strftime('-%Y%m%d%H%M%S'))
else:
message = "-" + args.msg
args.checkpoint = 'checkpoints/' + args.model + message
print('==> Preparing data..')
test_loader = DataLoader(ModelNet40(partition='test', num_points=args.num_points), num_workers=4,
batch_size=args.batch_size // 2, shuffle=False, drop_last=False)
# Model
print('==> Building model..')
net = models.__dict__[args.model]()
criterion = cal_loss
net = net.to(device)
checkpoint_path = os.path.join(args.checkpoint, 'best_checkpoint.pth')
checkpoint = torch.load(checkpoint_path, map_location=torch.device('cpu'))
# criterion = criterion.to(device)
if device == 'cuda':
net = torch.nn.DataParallel(net)
cudnn.benchmark = True
net.load_state_dict(checkpoint['net'])
if args.validate:
test_out = validate(net, test_loader, criterion, device)
print(f"Vanilla out: {test_out}")
print(f"Note 1: Please also load the random seed parameter (if forgot, see out.txt).\n"
f"Note 2: This result may vary little on different GPUs (and number of GPUs), we tested 2080Ti, P100, and V100.\n"
f"[note : Original result is achieved with V100 GPUs.]\n\n\n")
# Interestingly, we get original best_test_acc on 4 V100 gpus, but this model is trained on one V100 gpu.
# On different GPUs, and different number of GPUs, both OA and mean_acc vary a little.
# Also, the batch size also affect the testing results, could not understand.
print(f"===> start voting evaluation...")
voting(net, test_loader, device, args)
def validate(net, testloader, criterion, device):
net.eval()
test_loss = 0
correct = 0
total = 0
test_true = []
test_pred = []
time_cost = datetime.datetime.now()
with torch.no_grad():
for batch_idx, (data, label) in enumerate(testloader):
data, label = data.to(device), label.to(device).squeeze()
data = data.permute(0, 2, 1)
logits = net(data)
loss = criterion(logits, label)
test_loss += loss.item()
preds = logits.max(dim=1)[1]
test_true.append(label.cpu().numpy())
test_pred.append(preds.detach().cpu().numpy())
total += label.size(0)
correct += preds.eq(label).sum().item()
progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (test_loss / (batch_idx + 1), 100. * correct / total, correct, total))
time_cost = int((datetime.datetime.now() - time_cost).total_seconds())
test_true = np.concatenate(test_true)
test_pred = np.concatenate(test_pred)
return {
"loss": float("%.3f" % (test_loss / (batch_idx + 1))),
"acc": float("%.3f" % (100. * metrics.accuracy_score(test_true, test_pred))),
"acc_avg": float("%.3f" % (100. * metrics.balanced_accuracy_score(test_true, test_pred))),
"time": time_cost
}
def voting(net, testloader, device, args):
name = '/evaluate_voting' + str(datetime.datetime.now().strftime('-%Y%m%d%H%M%S')) + 'seed_' + str(
args.seed) + '.log'
io = IOStream(args.checkpoint + name)
io.cprint(str(args))
net.eval()
best_acc = 0
best_mean_acc = 0
# pointscale = PointcloudScale(scale_low=0.8, scale_high=1.18) # set the range of scaling
# pointscale = PointcloudScale()
pointscale = PointcloudScale(scale_low=0.85, scale_high=1.15)
for i in range(args.NUM_PEPEAT):
test_true = []
test_pred = []
for batch_idx, (data, label) in enumerate(testloader):
data, label = data.to(device), label.to(device).squeeze()
pred = 0
for v in range(args.NUM_VOTE):
new_data = data
# batch_size = data.size()[0]
if v > 0:
new_data.data = pointscale(new_data.data)
with torch.no_grad():
pred += F.softmax(net(new_data.permute(0, 2, 1)), dim=1) # sum 10 preds
pred /= args.NUM_VOTE # avg the preds!
label = label.view(-1)
pred_choice = pred.max(dim=1)[1]
test_true.append(label.cpu().numpy())
test_pred.append(pred_choice.detach().cpu().numpy())
test_true = np.concatenate(test_true)
test_pred = np.concatenate(test_pred)
test_acc = 100. * metrics.accuracy_score(test_true, test_pred)
test_mean_acc = 100. * metrics.balanced_accuracy_score(test_true, test_pred)
if test_acc > best_acc:
best_acc = test_acc
if test_mean_acc > best_mean_acc:
best_mean_acc = test_mean_acc
outstr = 'Voting %d, test acc: %.3f, test mean acc: %.3f, [current best(all_acc: %.3f mean_acc: %.3f)]' % \
(i, test_acc, test_mean_acc, best_acc, best_mean_acc)
io.cprint(outstr)
final_outstr = 'Final voting test acc: %.6f,' % (best_acc * 100)
io.cprint(final_outstr)
if __name__ == '__main__':
main()
================================================
FILE: pointnet2_pyt/.gitignore
================================================
__pycache__
*.pth*
.autoenv*
runs
build
checkpoints
*.prof
.lvimrc
.vimtags
.ccls
.ccls-cache/
dist/
pointnet2.egg-info/
*.zip
*.so
.tox/
.mypy_cache
**/*.pyc
================================================
FILE: pointnet2_pyt/.pre-commit-config.yaml
================================================
exclude: 'build|egg-info|dist'
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v1.2.3
hooks:
- id: trailing-whitespace
- id: check-added-large-files
- id: end-of-file-fixer
- repo: https://github.com/ambv/black
rev: stable
hooks:
- id: black
language_version: python3.6
- repo: local
hooks:
- id: clang-format
name: Run clang-format
entry: clang-format --style google -i
types: [text]
files: '.*\.cpp$|.*\.h$|.*\.cu$|.*\.hpp$'
language: system
================================================
FILE: pointnet2_pyt/.travis.yml
================================================
dist: trusty
language: python
python:
- "3.6"
install:
- pip install black
script:
- black --check .
- find . -not -path '*/\.*' | grep -E ".*\.cpp$|.*\.h$|.*\.cu$|.*\.hpp$" | xargs -I {} bash -c "diff -u <(cat {}) <(clang-format --style google {})"
================================================
FILE: pointnet2_pyt/MANIFEST.in
================================================
graft pointnet2/_ext-src/include/
================================================
FILE: pointnet2_pyt/README.rst
================================================
Pointnet2/Pointnet++ PyTorch
============================
* Implemention of Pointnet2/Pointnet++ written in `PyTorch `_.
* Supports Multi-GPU via `nn.DataParallel `_.
* Supports PyTorch version >= 1.0.0. Use `v1.0 `_
for support of older versions of PyTorch.
See the official code release for the paper (in tensorflow), `charlesq34/pointnet2 `_,
for official model definitions and hyper-parameters.
The custom ops used by Pointnet++ are currently **ONLY** supported on the GPU using CUDA.
Setup
-----
* Install ``python`` -- This repo is tested with ``2.7``, ``3.5``, and ``3.6``
* Install dependencies
::
pip install -r requirements.txt
* Building `_ext` module
::
python setup.py build_ext --inplace
* Optionally, you can also install this repo as a package
::
pip install -e .
Example training
------------------
Two training examples are provided by ``pointnet2/train/train_sem_seg.py`` and ``pointnet2/train/train_cls.py``.
The datasets for both will be downloaded automatically by default.
They can be run via
::
python -m pointnet2.train.train_cls
python -m pointnet2.train.train_sem_seg
Both scripts will print training progress after every epoch to the command line. Use the ``--visdom`` flag to
enable logging to visdom and more detailed logging of training progress.
Contributing
------------
This repository uses `black `_ for linting and style enforcement on python code.
For c++/cuda code,
`clang-format `_ is used for style. The simplest way to
comply with style is via `pre-commit `_
::
pip install pre-commit
pre-commit install
Citation
--------
::
@article{pytorchpointnet++,
Author = {Erik Wijmans},
Title = {Pointnet++ Pytorch},
Journal = {https://github.com/erikwijmans/Pointnet2_PyTorch},
Year = {2018}
}
@inproceedings{qi2017pointnet++,
title={Pointnet++: Deep hierarchical feature learning on point sets in a metric space},
author={Qi, Charles Ruizhongtai and Yi, Li and Su, Hao and Guibas, Leonidas J},
booktitle={Advances in Neural Information Processing Systems},
pages={5099--5108},
year={2017}
}
================================================
FILE: pointnet2_pyt/UNLICENSE
================================================
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to
================================================
FILE: pointnet2_pyt/__init__.py
================================================
================================================
FILE: pointnet2_pyt/pointnet2/__init__.py
================================================
'''
Description:
Autor: Jiachen Sun
Date: 2022-02-16 22:23:16
LastEditors: Jiachen Sun
LastEditTime: 2022-02-24 23:12:32
'''
from __future__ import (
division,
absolute_import,
with_statement,
print_function,
unicode_literals,
)
__version__ = "2.1.1"
try:
__POINTNET2_SETUP__
except NameError:
__POINTNET2_SETUP__ = False
if not __POINTNET2_SETUP__:
from pointnet2_pyt.pointnet2 import utils
from pointnet2_pyt.pointnet2 import data
from pointnet2_pyt.pointnet2 import models
================================================
FILE: pointnet2_pyt/pointnet2/_ext-src/include/ball_query.h
================================================
#pragma once
#include
at::Tensor ball_query(at::Tensor new_xyz, at::Tensor xyz, const float radius,
const int nsample);
================================================
FILE: pointnet2_pyt/pointnet2/_ext-src/include/cuda_utils.h
================================================
#ifndef _CUDA_UTILS_H
#define _CUDA_UTILS_H
#include
#include
#include
#include
#include
#include
#define TOTAL_THREADS 512
inline int opt_n_threads(int work_size) {
const int pow_2 = std::log(static_cast(work_size)) / std::log(2.0);
return max(min(1 << pow_2, TOTAL_THREADS), 1);
}
inline dim3 opt_block_config(int x, int y) {
const int x_threads = opt_n_threads(x);
const int y_threads =
max(min(opt_n_threads(y), TOTAL_THREADS / x_threads), 1);
dim3 block_config(x_threads, y_threads, 1);
return block_config;
}
#define CUDA_CHECK_ERRORS() \
do { \
cudaError_t err = cudaGetLastError(); \
if (cudaSuccess != err) { \
fprintf(stderr, "CUDA kernel failed : %s\n%s at L:%d in %s\n", \
cudaGetErrorString(err), __PRETTY_FUNCTION__, __LINE__, \
__FILE__); \
exit(-1); \
} \
} while (0)
#endif
================================================
FILE: pointnet2_pyt/pointnet2/_ext-src/include/group_points.h
================================================
#pragma once
#include
at::Tensor group_points(at::Tensor points, at::Tensor idx);
at::Tensor group_points_grad(at::Tensor grad_out, at::Tensor idx, const int n);
================================================
FILE: pointnet2_pyt/pointnet2/_ext-src/include/interpolate.h
================================================
#pragma once
#include
#include
std::vector three_nn(at::Tensor unknowns, at::Tensor knows);
at::Tensor three_interpolate(at::Tensor points, at::Tensor idx,
at::Tensor weight);
at::Tensor three_interpolate_grad(at::Tensor grad_out, at::Tensor idx,
at::Tensor weight, const int m);
================================================
FILE: pointnet2_pyt/pointnet2/_ext-src/include/sampling.h
================================================
#pragma once
#include
at::Tensor gather_points(at::Tensor points, at::Tensor idx);
at::Tensor gather_points_grad(at::Tensor grad_out, at::Tensor idx, const int n);
at::Tensor furthest_point_sampling(at::Tensor points, const int nsamples);
================================================
FILE: pointnet2_pyt/pointnet2/_ext-src/include/utils.h
================================================
#pragma once
#include
#include
#define CHECK_CUDA(x) \
do { \
AT_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor"); \
} while (0)
#define CHECK_CONTIGUOUS(x) \
do { \
AT_CHECK(x.is_contiguous(), #x " must be a contiguous tensor"); \
} while (0)
#define CHECK_IS_INT(x) \
do { \
AT_CHECK(x.scalar_type() == at::ScalarType::Int, \
#x " must be an int tensor"); \
} while (0)
#define CHECK_IS_FLOAT(x) \
do { \
AT_CHECK(x.scalar_type() == at::ScalarType::Float, \
#x " must be a float tensor"); \
} while (0)
================================================
FILE: pointnet2_pyt/pointnet2/_ext-src/src/ball_query.cpp
================================================
#include "ball_query.h"
#include "utils.h"
void query_ball_point_kernel_wrapper(int b, int n, int m, float radius,
int nsample, const float *new_xyz,
const float *xyz, int *idx);
at::Tensor ball_query(at::Tensor new_xyz, at::Tensor xyz, const float radius,
const int nsample) {
CHECK_CONTIGUOUS(new_xyz);
CHECK_CONTIGUOUS(xyz);
CHECK_IS_FLOAT(new_xyz);
CHECK_IS_FLOAT(xyz);
if (new_xyz.type().is_cuda()) {
CHECK_CUDA(xyz);
}
at::Tensor idx =
torch::zeros({new_xyz.size(0), new_xyz.size(1), nsample},
at::device(new_xyz.device()).dtype(at::ScalarType::Int));
if (new_xyz.type().is_cuda()) {
query_ball_point_kernel_wrapper(xyz.size(0), xyz.size(1), new_xyz.size(1),
radius, nsample, new_xyz.data(),
xyz.data(), idx.data());
} else {
AT_CHECK(false, "CPU not supported");
}
return idx;
}
================================================
FILE: pointnet2_pyt/pointnet2/_ext-src/src/ball_query_gpu.cu
================================================
#include
#include
#include
#include "cuda_utils.h"
// input: new_xyz(b, m, 3) xyz(b, n, 3)
// output: idx(b, m, nsample)
__global__ void query_ball_point_kernel(int b, int n, int m, float radius,
int nsample,
const float *__restrict__ new_xyz,
const float *__restrict__ xyz,
int *__restrict__ idx) {
int batch_index = blockIdx.x;
xyz += batch_index * n * 3;
new_xyz += batch_index * m * 3;
idx += m * nsample * batch_index;
int index = threadIdx.x;
int stride = blockDim.x;
float radius2 = radius * radius;
for (int j = index; j < m; j += stride) {
float new_x = new_xyz[j * 3 + 0];
float new_y = new_xyz[j * 3 + 1];
float new_z = new_xyz[j * 3 + 2];
for (int k = 0, cnt = 0; k < n && cnt < nsample; ++k) {
float x = xyz[k * 3 + 0];
float y = xyz[k * 3 + 1];
float z = xyz[k * 3 + 2];
float d2 = (new_x - x) * (new_x - x) + (new_y - y) * (new_y - y) +
(new_z - z) * (new_z - z);
if (d2 < radius2) {
if (cnt == 0) {
for (int l = 0; l < nsample; ++l) {
idx[j * nsample + l] = k;
}
}
idx[j * nsample + cnt] = k;
++cnt;
}
}
}
}
void query_ball_point_kernel_wrapper(int b, int n, int m, float radius,
int nsample, const float *new_xyz,
const float *xyz, int *idx) {
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
query_ball_point_kernel<<>>(
b, n, m, radius, nsample, new_xyz, xyz, idx);
CUDA_CHECK_ERRORS();
}
================================================
FILE: pointnet2_pyt/pointnet2/_ext-src/src/bindings.cpp
================================================
#include "ball_query.h"
#include "group_points.h"
#include "interpolate.h"
#include "sampling.h"
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("gather_points", &gather_points);
m.def("gather_points_grad", &gather_points_grad);
m.def("furthest_point_sampling", &furthest_point_sampling);
m.def("three_nn", &three_nn);
m.def("three_interpolate", &three_interpolate);
m.def("three_interpolate_grad", &three_interpolate_grad);
m.def("ball_query", &ball_query);
m.def("group_points", &group_points);
m.def("group_points_grad", &group_points_grad);
}
================================================
FILE: pointnet2_pyt/pointnet2/_ext-src/src/group_points.cpp
================================================
#include "group_points.h"
#include "utils.h"
void group_points_kernel_wrapper(int b, int c, int n, int npoints, int nsample,
const float *points, const int *idx,
float *out);
void group_points_grad_kernel_wrapper(int b, int c, int n, int npoints,
int nsample, const float *grad_out,
const int *idx, float *grad_points);
at::Tensor group_points(at::Tensor points, at::Tensor idx) {
CHECK_CONTIGUOUS(points);
CHECK_CONTIGUOUS(idx);
CHECK_IS_FLOAT(points);
CHECK_IS_INT(idx);
if (points.type().is_cuda()) {
CHECK_CUDA(idx);
}
at::Tensor output =
torch::zeros({points.size(0), points.size(1), idx.size(1), idx.size(2)},
at::device(points.device()).dtype(at::ScalarType::Float));
if (points.type().is_cuda()) {
group_points_kernel_wrapper(points.size(0), points.size(1), points.size(2),
idx.size(1), idx.size(2), points.data(),
idx.data(), output.data());
} else {
AT_CHECK(false, "CPU not supported");
}
return output;
}
at::Tensor group_points_grad(at::Tensor grad_out, at::Tensor idx, const int n) {
CHECK_CONTIGUOUS(grad_out);
CHECK_CONTIGUOUS(idx);
CHECK_IS_FLOAT(grad_out);
CHECK_IS_INT(idx);
if (grad_out.type().is_cuda()) {
CHECK_CUDA(idx);
}
at::Tensor output =
torch::zeros({grad_out.size(0), grad_out.size(1), n},
at::device(grad_out.device()).dtype(at::ScalarType::Float));
if (grad_out.type().is_cuda()) {
group_points_grad_kernel_wrapper(
grad_out.size(0), grad_out.size(1), n, idx.size(1), idx.size(2),
grad_out.data(), idx.data(), output.data());
} else {
AT_CHECK(false, "CPU not supported");
}
return output;
}
================================================
FILE: pointnet2_pyt/pointnet2/_ext-src/src/group_points_gpu.cu
================================================
#include
#include
#include "cuda_utils.h"
// input: points(b, c, n) idx(b, npoints, nsample)
// output: out(b, c, npoints, nsample)
__global__ void group_points_kernel(int b, int c, int n, int npoints,
int nsample,
const float *__restrict__ points,
const int *__restrict__ idx,
float *__restrict__ out) {
int batch_index = blockIdx.x;
points += batch_index * n * c;
idx += batch_index * npoints * nsample;
out += batch_index * npoints * nsample * c;
const int index = threadIdx.y * blockDim.x + threadIdx.x;
const int stride = blockDim.y * blockDim.x;
for (int i = index; i < c * npoints; i += stride) {
const int l = i / npoints;
const int j = i % npoints;
for (int k = 0; k < nsample; ++k) {
int ii = idx[j * nsample + k];
out[(l * npoints + j) * nsample + k] = points[l * n + ii];
}
}
}
void group_points_kernel_wrapper(int b, int c, int n, int npoints, int nsample,
const float *points, const int *idx,
float *out) {
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
group_points_kernel<<>>(
b, c, n, npoints, nsample, points, idx, out);
CUDA_CHECK_ERRORS();
}
// input: grad_out(b, c, npoints, nsample), idx(b, npoints, nsample)
// output: grad_points(b, c, n)
__global__ void group_points_grad_kernel(int b, int c, int n, int npoints,
int nsample,
const float *__restrict__ grad_out,
const int *__restrict__ idx,
float *__restrict__ grad_points) {
int batch_index = blockIdx.x;
grad_out += batch_index * npoints * nsample * c;
idx += batch_index * npoints * nsample;
grad_points += batch_index * n * c;
const int index = threadIdx.y * blockDim.x + threadIdx.x;
const int stride = blockDim.y * blockDim.x;
for (int i = index; i < c * npoints; i += stride) {
const int l = i / npoints;
const int j = i % npoints;
for (int k = 0; k < nsample; ++k) {
int ii = idx[j * nsample + k];
atomicAdd(grad_points + l * n + ii,
grad_out[(l * npoints + j) * nsample + k]);
}
}
}
void group_points_grad_kernel_wrapper(int b, int c, int n, int npoints,
int nsample, const float *grad_out,
const int *idx, float *grad_points) {
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
group_points_grad_kernel<<>>(
b, c, n, npoints, nsample, grad_out, idx, grad_points);
CUDA_CHECK_ERRORS();
}
================================================
FILE: pointnet2_pyt/pointnet2/_ext-src/src/interpolate.cpp
================================================
#include "interpolate.h"
#include "utils.h"
void three_nn_kernel_wrapper(int b, int n, int m, const float *unknown,
const float *known, float *dist2, int *idx);
void three_interpolate_kernel_wrapper(int b, int c, int m, int n,
const float *points, const int *idx,
const float *weight, float *out);
void three_interpolate_grad_kernel_wrapper(int b, int c, int n, int m,
const float *grad_out,
const int *idx, const float *weight,
float *grad_points);
std::vector three_nn(at::Tensor unknowns, at::Tensor knows) {
CHECK_CONTIGUOUS(unknowns);
CHECK_CONTIGUOUS(knows);
CHECK_IS_FLOAT(unknowns);
CHECK_IS_FLOAT(knows);
if (unknowns.type().is_cuda()) {
CHECK_CUDA(knows);
}
at::Tensor idx =
torch::zeros({unknowns.size(0), unknowns.size(1), 3},
at::device(unknowns.device()).dtype(at::ScalarType::Int));
at::Tensor dist2 =
torch::zeros({unknowns.size(0), unknowns.size(1), 3},
at::device(unknowns.device()).dtype(at::ScalarType::Float));
if (unknowns.type().is_cuda()) {
three_nn_kernel_wrapper(unknowns.size(0), unknowns.size(1), knows.size(1),
unknowns.data(), knows.data(),
dist2.data(), idx.data());
} else {
AT_CHECK(false, "CPU not supported");
}
return {dist2, idx};
}
at::Tensor three_interpolate(at::Tensor points, at::Tensor idx,
at::Tensor weight) {
CHECK_CONTIGUOUS(points);
CHECK_CONTIGUOUS(idx);
CHECK_CONTIGUOUS(weight);
CHECK_IS_FLOAT(points);
CHECK_IS_INT(idx);
CHECK_IS_FLOAT(weight);
if (points.type().is_cuda()) {
CHECK_CUDA(idx);
CHECK_CUDA(weight);
}
at::Tensor output =
torch::zeros({points.size(0), points.size(1), idx.size(1)},
at::device(points.device()).dtype(at::ScalarType::Float));
if (points.type().is_cuda()) {
three_interpolate_kernel_wrapper(
points.size(0), points.size(1), points.size(2), idx.size(1),
points.data(), idx.data(), weight.data(),
output.data());
} else {
AT_CHECK(false, "CPU not supported");
}
return output;
}
at::Tensor three_interpolate_grad(at::Tensor grad_out, at::Tensor idx,
at::Tensor weight, const int m) {
CHECK_CONTIGUOUS(grad_out);
CHECK_CONTIGUOUS(idx);
CHECK_CONTIGUOUS(weight);
CHECK_IS_FLOAT(grad_out);
CHECK_IS_INT(idx);
CHECK_IS_FLOAT(weight);
if (grad_out.type().is_cuda()) {
CHECK_CUDA(idx);
CHECK_CUDA(weight);
}
at::Tensor output =
torch::zeros({grad_out.size(0), grad_out.size(1), m},
at::device(grad_out.device()).dtype(at::ScalarType::Float));
if (grad_out.type().is_cuda()) {
three_interpolate_grad_kernel_wrapper(
grad_out.size(0), grad_out.size(1), grad_out.size(2), m,
grad_out.data(), idx.data(), weight.data(),
output.data());
} else {
AT_CHECK(false, "CPU not supported");
}
return output;
}
================================================
FILE: pointnet2_pyt/pointnet2/_ext-src/src/interpolate_gpu.cu
================================================
#include
#include
#include
#include "cuda_utils.h"
// input: unknown(b, n, 3) known(b, m, 3)
// output: dist2(b, n, 3), idx(b, n, 3)
__global__ void three_nn_kernel(int b, int n, int m,
const float *__restrict__ unknown,
const float *__restrict__ known,
float *__restrict__ dist2,
int *__restrict__ idx) {
int batch_index = blockIdx.x;
unknown += batch_index * n * 3;
known += batch_index * m * 3;
dist2 += batch_index * n * 3;
idx += batch_index * n * 3;
int index = threadIdx.x;
int stride = blockDim.x;
for (int j = index; j < n; j += stride) {
float ux = unknown[j * 3 + 0];
float uy = unknown[j * 3 + 1];
float uz = unknown[j * 3 + 2];
double best1 = 1e40, best2 = 1e40, best3 = 1e40;
int besti1 = 0, besti2 = 0, besti3 = 0;
for (int k = 0; k < m; ++k) {
float x = known[k * 3 + 0];
float y = known[k * 3 + 1];
float z = known[k * 3 + 2];
float d = (ux - x) * (ux - x) + (uy - y) * (uy - y) + (uz - z) * (uz - z);
if (d < best1) {
best3 = best2;
besti3 = besti2;
best2 = best1;
besti2 = besti1;
best1 = d;
besti1 = k;
} else if (d < best2) {
best3 = best2;
besti3 = besti2;
best2 = d;
besti2 = k;
} else if (d < best3) {
best3 = d;
besti3 = k;
}
}
dist2[j * 3 + 0] = best1;
dist2[j * 3 + 1] = best2;
dist2[j * 3 + 2] = best3;
idx[j * 3 + 0] = besti1;
idx[j * 3 + 1] = besti2;
idx[j * 3 + 2] = besti3;
}
}
void three_nn_kernel_wrapper(int b, int n, int m, const float *unknown,
const float *known, float *dist2, int *idx) {
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
three_nn_kernel<<>>(b, n, m, unknown, known,
dist2, idx);
CUDA_CHECK_ERRORS();
}
// input: points(b, c, m), idx(b, n, 3), weight(b, n, 3)
// output: out(b, c, n)
__global__ void three_interpolate_kernel(int b, int c, int m, int n,
const float *__restrict__ points,
const int *__restrict__ idx,
const float *__restrict__ weight,
float *__restrict__ out) {
int batch_index = blockIdx.x;
points += batch_index * m * c;
idx += batch_index * n * 3;
weight += batch_index * n * 3;
out += batch_index * n * c;
const int index = threadIdx.y * blockDim.x + threadIdx.x;
const int stride = blockDim.y * blockDim.x;
for (int i = index; i < c * n; i += stride) {
const int l = i / n;
const int j = i % n;
float w1 = weight[j * 3 + 0];
float w2 = weight[j * 3 + 1];
float w3 = weight[j * 3 + 2];
int i1 = idx[j * 3 + 0];
int i2 = idx[j * 3 + 1];
int i3 = idx[j * 3 + 2];
out[i] = points[l * m + i1] * w1 + points[l * m + i2] * w2 +
points[l * m + i3] * w3;
}
}
void three_interpolate_kernel_wrapper(int b, int c, int m, int n,
const float *points, const int *idx,
const float *weight, float *out) {
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
three_interpolate_kernel<<>>(
b, c, m, n, points, idx, weight, out);
CUDA_CHECK_ERRORS();
}
// input: grad_out(b, c, n), idx(b, n, 3), weight(b, n, 3)
// output: grad_points(b, c, m)
__global__ void three_interpolate_grad_kernel(
int b, int c, int n, int m, const float *__restrict__ grad_out,
const int *__restrict__ idx, const float *__restrict__ weight,
float *__restrict__ grad_points) {
int batch_index = blockIdx.x;
grad_out += batch_index * n * c;
idx += batch_index * n * 3;
weight += batch_index * n * 3;
grad_points += batch_index * m * c;
const int index = threadIdx.y * blockDim.x + threadIdx.x;
const int stride = blockDim.y * blockDim.x;
for (int i = index; i < c * n; i += stride) {
const int l = i / n;
const int j = i % n;
float w1 = weight[j * 3 + 0];
float w2 = weight[j * 3 + 1];
float w3 = weight[j * 3 + 2];
int i1 = idx[j * 3 + 0];
int i2 = idx[j * 3 + 1];
int i3 = idx[j * 3 + 2];
atomicAdd(grad_points + l * m + i1, grad_out[i] * w1);
atomicAdd(grad_points + l * m + i2, grad_out[i] * w2);
atomicAdd(grad_points + l * m + i3, grad_out[i] * w3);
}
}
void three_interpolate_grad_kernel_wrapper(int b, int c, int n, int m,
const float *grad_out,
const int *idx, const float *weight,
float *grad_points) {
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
three_interpolate_grad_kernel<<>>(
b, c, n, m, grad_out, idx, weight, grad_points);
CUDA_CHECK_ERRORS();
}
================================================
FILE: pointnet2_pyt/pointnet2/_ext-src/src/sampling.cpp
================================================
#include "sampling.h"
#include "utils.h"
void gather_points_kernel_wrapper(int b, int c, int n, int npoints,
const float *points, const int *idx,
float *out);
void gather_points_grad_kernel_wrapper(int b, int c, int n, int npoints,
const float *grad_out, const int *idx,
float *grad_points);
void furthest_point_sampling_kernel_wrapper(int b, int n, int m,
const float *dataset, float *temp,
int *idxs);
at::Tensor gather_points(at::Tensor points, at::Tensor idx) {
CHECK_CONTIGUOUS(points);
CHECK_CONTIGUOUS(idx);
CHECK_IS_FLOAT(points);
CHECK_IS_INT(idx);
if (points.type().is_cuda()) {
CHECK_CUDA(idx);
}
at::Tensor output =
torch::zeros({points.size(0), points.size(1), idx.size(1)},
at::device(points.device()).dtype(at::ScalarType::Float));
if (points.type().is_cuda()) {
gather_points_kernel_wrapper(points.size(0), points.size(1), points.size(2),
idx.size(1), points.data(),
idx.data(), output.data());
} else {
AT_CHECK(false, "CPU not supported");
}
return output;
}
at::Tensor gather_points_grad(at::Tensor grad_out, at::Tensor idx,
const int n) {
CHECK_CONTIGUOUS(grad_out);
CHECK_CONTIGUOUS(idx);
CHECK_IS_FLOAT(grad_out);
CHECK_IS_INT(idx);
if (grad_out.type().is_cuda()) {
CHECK_CUDA(idx);
}
at::Tensor output =
torch::zeros({grad_out.size(0), grad_out.size(1), n},
at::device(grad_out.device()).dtype(at::ScalarType::Float));
if (grad_out.type().is_cuda()) {
gather_points_grad_kernel_wrapper(grad_out.size(0), grad_out.size(1), n,
idx.size(1), grad_out.data(),
idx.data(), output.data());
} else {
AT_CHECK(false, "CPU not supported");
}
return output;
}
at::Tensor furthest_point_sampling(at::Tensor points, const int nsamples) {
CHECK_CONTIGUOUS(points);
CHECK_IS_FLOAT(points);
at::Tensor output =
torch::zeros({points.size(0), nsamples},
at::device(points.device()).dtype(at::ScalarType::Int));
at::Tensor tmp =
torch::full({points.size(0), points.size(1)}, 1e10,
at::device(points.device()).dtype(at::ScalarType::Float));
if (points.type().is_cuda()) {
furthest_point_sampling_kernel_wrapper(
points.size(0), points.size(1), nsamples, points.data(),
tmp.data(), output.data());
} else {
AT_CHECK(false, "CPU not supported");
}
return output;
}
================================================
FILE: pointnet2_pyt/pointnet2/_ext-src/src/sampling_gpu.cu
================================================
#include
#include
#include "cuda_utils.h"
// input: points(b, c, n) idx(b, m)
// output: out(b, c, m)
__global__ void gather_points_kernel(int b, int c, int n, int m,
const float *__restrict__ points,
const int *__restrict__ idx,
float *__restrict__ out) {
for (int i = blockIdx.x; i < b; i += gridDim.x) {
for (int l = blockIdx.y; l < c; l += gridDim.y) {
for (int j = threadIdx.x; j < m; j += blockDim.x) {
int a = idx[i * m + j];
out[(i * c + l) * m + j] = points[(i * c + l) * n + a];
}
}
}
}
void gather_points_kernel_wrapper(int b, int c, int n, int npoints,
const float *points, const int *idx,
float *out) {
gather_points_kernel<<>>(b, c, n, npoints,
points, idx, out);
CUDA_CHECK_ERRORS();
}
// input: grad_out(b, c, m) idx(b, m)
// output: grad_points(b, c, n)
__global__ void gather_points_grad_kernel(int b, int c, int n, int m,
const float *__restrict__ grad_out,
const int *__restrict__ idx,
float *__restrict__ grad_points) {
for (int i = blockIdx.x; i < b; i += gridDim.x) {
for (int l = blockIdx.y; l < c; l += gridDim.y) {
for (int j = threadIdx.x; j < m; j += blockDim.x) {
int a = idx[i * m + j];
atomicAdd(grad_points + (i * c + l) * n + a,
grad_out[(i * c + l) * m + j]);
}
}
}
}
void gather_points_grad_kernel_wrapper(int b, int c, int n, int npoints,
const float *grad_out, const int *idx,
float *grad_points) {
gather_points_grad_kernel<<>>(
b, c, n, npoints, grad_out, idx, grad_points);
CUDA_CHECK_ERRORS();
}
__device__ void __update(float *__restrict__ dists, int *__restrict__ dists_i,
int idx1, int idx2) {
const float v1 = dists[idx1], v2 = dists[idx2];
const int i1 = dists_i[idx1], i2 = dists_i[idx2];
dists[idx1] = max(v1, v2);
dists_i[idx1] = v2 > v1 ? i2 : i1;
}
// Input dataset: (b, n, 3), tmp: (b, n)
// Ouput idxs (b, m)
template
__global__ void furthest_point_sampling_kernel(
int b, int n, int m, const float *__restrict__ dataset,
float *__restrict__ temp, int *__restrict__ idxs) {
if (m <= 0) return;
__shared__ float dists[block_size];
__shared__ int dists_i[block_size];
int batch_index = blockIdx.x;
dataset += batch_index * n * 3;
temp += batch_index * n;
idxs += batch_index * m;
int tid = threadIdx.x;
const int stride = block_size;
int old = 0;
if (threadIdx.x == 0) idxs[0] = old;
__syncthreads();
for (int j = 1; j < m; j++) {
int besti = 0;
float best = -1;
float x1 = dataset[old * 3 + 0];
float y1 = dataset[old * 3 + 1];
float z1 = dataset[old * 3 + 2];
for (int k = tid; k < n; k += stride) {
float x2, y2, z2;
x2 = dataset[k * 3 + 0];
y2 = dataset[k * 3 + 1];
z2 = dataset[k * 3 + 2];
float mag = (x2 * x2) + (y2 * y2) + (z2 * z2);
if (mag <= 1e-3) continue;
float d =
(x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) + (z2 - z1) * (z2 - z1);
float d2 = min(d, temp[k]);
temp[k] = d2;
besti = d2 > best ? k : besti;
best = d2 > best ? d2 : best;
}
dists[tid] = best;
dists_i[tid] = besti;
__syncthreads();
if (block_size >= 512) {
if (tid < 256) {
__update(dists, dists_i, tid, tid + 256);
}
__syncthreads();
}
if (block_size >= 256) {
if (tid < 128) {
__update(dists, dists_i, tid, tid + 128);
}
__syncthreads();
}
if (block_size >= 128) {
if (tid < 64) {
__update(dists, dists_i, tid, tid + 64);
}
__syncthreads();
}
if (block_size >= 64) {
if (tid < 32) {
__update(dists, dists_i, tid, tid + 32);
}
__syncthreads();
}
if (block_size >= 32) {
if (tid < 16) {
__update(dists, dists_i, tid, tid + 16);
}
__syncthreads();
}
if (block_size >= 16) {
if (tid < 8) {
__update(dists, dists_i, tid, tid + 8);
}
__syncthreads();
}
if (block_size >= 8) {
if (tid < 4) {
__update(dists, dists_i, tid, tid + 4);
}
__syncthreads();
}
if (block_size >= 4) {
if (tid < 2) {
__update(dists, dists_i, tid, tid + 2);
}
__syncthreads();
}
if (block_size >= 2) {
if (tid < 1) {
__update(dists, dists_i, tid, tid + 1);
}
__syncthreads();
}
old = dists_i[0];
if (tid == 0) idxs[j] = old;
}
}
void furthest_point_sampling_kernel_wrapper(int b, int n, int m,
const float *dataset, float *temp,
int *idxs) {
unsigned int n_threads = opt_n_threads(n);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
switch (n_threads) {
case 512:
furthest_point_sampling_kernel<512>
<<>>(b, n, m, dataset, temp, idxs);
break;
case 256:
furthest_point_sampling_kernel<256>
<<>>(b, n, m, dataset, temp, idxs);
break;
case 128:
furthest_point_sampling_kernel<128>
<<>>(b, n, m, dataset, temp, idxs);
break;
case 64:
furthest_point_sampling_kernel<64>
<<>>(b, n, m, dataset, temp, idxs);
break;
case 32:
furthest_point_sampling_kernel<32>
<<>>(b, n, m, dataset, temp, idxs);
break;
case 16:
furthest_point_sampling_kernel<16>
<<>>(b, n, m, dataset, temp, idxs);
break;
case 8:
furthest_point_sampling_kernel<8>
<<>>(b, n, m, dataset, temp, idxs);
break;
case 4:
furthest_point_sampling_kernel<4>
<<>>(b, n, m, dataset, temp, idxs);
break;
case 2:
furthest_point_sampling_kernel<2>
<<>>(b, n, m, dataset, temp, idxs);
break;
case 1:
furthest_point_sampling_kernel<1>
<<>>(b, n, m, dataset, temp, idxs);
break;
default:
furthest_point_sampling_kernel<512>
<<>>(b, n, m, dataset, temp, idxs);
}
CUDA_CHECK_ERRORS();
}
================================================
FILE: pointnet2_pyt/pointnet2/data/.gitignore
================================================
indoor3d_sem_seg_hdf5_data
modelnet40_ply_hdf5_2048
================================================
FILE: pointnet2_pyt/pointnet2/data/Indoor3DSemSegLoader.py
================================================
from __future__ import (
division,
absolute_import,
with_statement,
print_function,
unicode_literals,
)
import torch
import torch.utils.data as data
import numpy as np
import os
import h5py
import subprocess
import shlex
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
def _get_data_files(list_filename):
with open(list_filename) as f:
return [line.rstrip() for line in f]
def _load_data_file(name):
f = h5py.File(name)
data = f["data"][:]
label = f["label"][:]
return data, label
class Indoor3DSemSeg(data.Dataset):
def __init__(self, num_points, train=True, download=True, data_precent=1.0):
super().__init__()
self.data_precent = data_precent
self.folder = "indoor3d_sem_seg_hdf5_data"
self.data_dir = os.path.join(BASE_DIR, self.folder)
self.url = (
"https://shapenet.cs.stanford.edu/media/indoor3d_sem_seg_hdf5_data.zip"
)
if download and not os.path.exists(self.data_dir):
zipfile = os.path.join(BASE_DIR, os.path.basename(self.url))
subprocess.check_call(
shlex.split("curl {} -o {}".format(self.url, zipfile))
)
subprocess.check_call(
shlex.split("unzip {} -d {}".format(zipfile, BASE_DIR))
)
subprocess.check_call(shlex.split("rm {}".format(zipfile)))
self.train, self.num_points = train, num_points
all_files = _get_data_files(os.path.join(self.data_dir, "all_files.txt"))
room_filelist = _get_data_files(
os.path.join(self.data_dir, "room_filelist.txt")
)
data_batchlist, label_batchlist = [], []
for f in all_files:
data, label = _load_data_file(os.path.join(BASE_DIR, f))
data_batchlist.append(data)
label_batchlist.append(label)
data_batches = np.concatenate(data_batchlist, 0)
labels_batches = np.concatenate(label_batchlist, 0)
test_area = "Area_5"
train_idxs, test_idxs = [], []
for i, room_name in enumerate(room_filelist):
if test_area in room_name:
test_idxs.append(i)
else:
train_idxs.append(i)
if self.train:
self.points = data_batches[train_idxs, ...]
self.labels = labels_batches[train_idxs, ...]
else:
self.points = data_batches[test_idxs, ...]
self.labels = labels_batches[test_idxs, ...]
def __getitem__(self, idx):
pt_idxs = np.arange(0, self.num_points)
np.random.shuffle(pt_idxs)
current_points = torch.from_numpy(self.points[idx, pt_idxs].copy()).type(
torch.FloatTensor
)
current_labels = torch.from_numpy(self.labels[idx, pt_idxs].copy()).type(
torch.LongTensor
)
return current_points, current_labels
def __len__(self):
return int(self.points.shape[0] * self.data_precent)
def set_num_points(self, pts):
self.num_points = pts
def randomize(self):
pass
if __name__ == "__main__":
dset = Indoor3DSemSeg(16, "./", train=True)
print(dset[0])
print(len(dset))
dloader = torch.utils.data.DataLoader(dset, batch_size=32, shuffle=True)
for i, data in enumerate(dloader, 0):
inputs, labels = data
if i == len(dloader) - 1:
print(inputs.size())
================================================
FILE: pointnet2_pyt/pointnet2/data/ModelNet40Loader.py
================================================
from __future__ import (
division,
absolute_import,
with_statement,
print_function,
unicode_literals,
)
import torch
import torch.utils.data as data
import numpy as np
import os
import h5py
import subprocess
import shlex
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
def _get_data_files(list_filename):
with open(list_filename) as f:
return [line.rstrip()[5:] for line in f]
def _load_data_file(name):
f = h5py.File(name)
data = f["data"][:]
label = f["label"][:]
return data, label
class ModelNet40Cls(data.Dataset):
def __init__(self, num_points, transforms=None, train=True, download=True):
super().__init__()
self.transforms = transforms
self.folder = "modelnet40_ply_hdf5_2048"
self.data_dir = os.path.join(BASE_DIR, self.folder)
self.url = "https://shapenet.cs.stanford.edu/media/modelnet40_ply_hdf5_2048.zip"
if download and not os.path.exists(self.data_dir):
zipfile = os.path.join(BASE_DIR, os.path.basename(self.url))
subprocess.check_call(
shlex.split("curl {} -o {}".format(self.url, zipfile))
)
subprocess.check_call(
shlex.split("unzip {} -d {}".format(zipfile, BASE_DIR))
)
subprocess.check_call(shlex.split("rm {}".format(zipfile)))
self.train = train
if self.train:
self.files = _get_data_files(os.path.join(self.data_dir, "train_files.txt"))
else:
self.files = _get_data_files(os.path.join(self.data_dir, "test_files.txt"))
point_list, label_list = [], []
for f in self.files:
points, labels = _load_data_file(os.path.join(BASE_DIR, f))
point_list.append(points)
label_list.append(labels)
self.points = np.concatenate(point_list, 0)
self.labels = np.concatenate(label_list, 0)
self.set_num_points(num_points)
def __getitem__(self, idx):
pt_idxs = np.arange(0, self.num_points)
np.random.shuffle(pt_idxs)
current_points = self.points[idx, pt_idxs].copy()
label = torch.from_numpy(self.labels[idx]).type(torch.LongTensor)
if self.transforms is not None:
current_points = self.transforms(current_points)
return current_points, label
def __len__(self):
return self.points.shape[0]
def set_num_points(self, pts):
self.num_points = min(self.points.shape[1], pts)
def randomize(self):
pass
if __name__ == "__main__":
from torchvision import transforms
import data_utils as d_utils
transforms = transforms.Compose(
[
d_utils.PointcloudToTensor(),
d_utils.PointcloudRotate(axis=np.array([1, 0, 0])),
d_utils.PointcloudScale(),
d_utils.PointcloudTranslate(),
d_utils.PointcloudJitter(),
]
)
dset = ModelNet40Cls(16, train=True, transforms=transforms)
print(dset[0][0])
print(dset[0][1])
print(len(dset))
dloader = torch.utils.data.DataLoader(dset, batch_size=32, shuffle=True)
================================================
FILE: pointnet2_pyt/pointnet2/data/__init__.py
================================================
from __future__ import (
division,
absolute_import,
with_statement,
print_function,
unicode_literals,
)
from .ModelNet40Loader import ModelNet40Cls
from .Indoor3DSemSegLoader import Indoor3DSemSeg
================================================
FILE: pointnet2_pyt/pointnet2/data/data_utils.py
================================================
from __future__ import (
division,
absolute_import,
with_statement,
print_function,
unicode_literals,
)
import torch
import numpy as np
def angle_axis(angle, axis):
# type: (float, np.ndarray) -> float
r"""Returns a 4x4 rotation matrix that performs a rotation around axis by angle
Parameters
----------
angle : float
Angle to rotate by
axis: np.ndarray
Axis to rotate about
Returns
-------
torch.Tensor
3x3 rotation matrix
"""
u = axis / np.linalg.norm(axis)
cosval, sinval = np.cos(angle), np.sin(angle)
# yapf: disable
cross_prod_mat = np.array([[0.0, -u[2], u[1]],
[u[2], 0.0, -u[0]],
[-u[1], u[0], 0.0]])
R = torch.from_numpy(
cosval * np.eye(3)
+ sinval * cross_prod_mat
+ (1.0 - cosval) * np.outer(u, u)
)
# yapf: enable
return R.float()
class PointcloudScale(object):
def __init__(self, lo=0.8, hi=1.25):
self.lo, self.hi = lo, hi
def __call__(self, points):
scaler = np.random.uniform(self.lo, self.hi)
points[:, 0:3] *= scaler
return points
class PointcloudRotate(object):
def __init__(self, axis=np.array([0.0, 1.0, 0.0])):
self.axis = axis
def __call__(self, points):
rotation_angle = np.random.uniform() * 2 * np.pi
rotation_matrix = angle_axis(rotation_angle, self.axis)
normals = points.size(1) > 3
if not normals:
return torch.matmul(points, rotation_matrix.t())
else:
pc_xyz = points[:, 0:3]
pc_normals = points[:, 3:]
points[:, 0:3] = torch.matmul(pc_xyz, rotation_matrix.t())
points[:, 3:] = torch.matmul(pc_normals, rotation_matrix.t())
return points
class PointcloudRotatePerturbation(object):
def __init__(self, angle_sigma=0.06, angle_clip=0.18):
self.angle_sigma, self.angle_clip = angle_sigma, angle_clip
def _get_angles(self):
angles = np.clip(
self.angle_sigma * np.random.randn(3), -self.angle_clip, self.angle_clip
)
return angles
def __call__(self, points):
angles = self._get_angles()
Rx = angle_axis(angles[0], np.array([1.0, 0.0, 0.0]))
Ry = angle_axis(angles[1], np.array([0.0, 1.0, 0.0]))
Rz = angle_axis(angles[2], np.array([0.0, 0.0, 1.0]))
rotation_matrix = torch.matmul(torch.matmul(Rz, Ry), Rx)
normals = points.size(1) > 3
if not normals:
return torch.matmul(points, rotation_matrix.t())
else:
pc_xyz = points[:, 0:3]
pc_normals = points[:, 3:]
points[:, 0:3] = torch.matmul(pc_xyz, rotation_matrix.t())
points[:, 3:] = torch.matmul(pc_normals, rotation_matrix.t())
return points
class PointcloudJitter(object):
def __init__(self, std=0.01, clip=0.05):
self.std, self.clip = std, clip
def __call__(self, points):
jittered_data = (
points.new(points.size(0), 3)
.normal_(mean=0.0, std=self.std)
.clamp_(-self.clip, self.clip)
)
points[:, 0:3] += jittered_data
return points
class PointcloudTranslate(object):
def __init__(self, translate_range=0.1):
self.translate_range = translate_range
def __call__(self, points):
translation = np.random.uniform(-self.translate_range, self.translate_range)
points[:, 0:3] += translation
return points
class PointcloudToTensor(object):
def __call__(self, points):
return torch.from_numpy(points).float()
class PointcloudRandomInputDropout(object):
def __init__(self, max_dropout_ratio=0.875):
assert max_dropout_ratio >= 0 and max_dropout_ratio < 1
self.max_dropout_ratio = max_dropout_ratio
def __call__(self, points):
pc = points.numpy()
dropout_ratio = np.random.random() * self.max_dropout_ratio # 0~0.875
drop_idx = np.where(np.random.random((pc.shape[0])) <= dropout_ratio)[0]
if len(drop_idx) > 0:
pc[drop_idx] = pc[0] # set to the first point
return torch.from_numpy(pc).float()
================================================
FILE: pointnet2_pyt/pointnet2/models/__init__.py
================================================
from __future__ import (
division,
absolute_import,
with_statement,
print_function,
unicode_literals,
)
from .pointnet2_msg_sem import Pointnet2MSG as Pointnet2SemMSG
from .pointnet2_ssg_sem import Pointnet2SSG as Pointnet2SemSSG
from .pointnet2_msg_cls import Pointnet2MSG as Pointnet2ClsMSG
from .pointnet2_ssg_cls import Pointnet2SSG as Pointnet2ClsSSG
================================================
FILE: pointnet2_pyt/pointnet2/models/pointnet2_msg_cls.py
================================================
from __future__ import (
division,
absolute_import,
with_statement,
print_function,
unicode_literals,
)
import torch
import torch.nn as nn
import etw_pytorch_utils as pt_utils
from collections import namedtuple
from pointnet2.utils.pointnet2_modules import PointnetSAModuleMSG, PointnetSAModule
def model_fn_decorator(criterion):
ModelReturn = namedtuple("ModelReturn", ["preds", "loss", "acc"])
def model_fn(model, data, epoch=0, eval=False):
with torch.set_grad_enabled(not eval):
inputs, labels = data
inputs = inputs.to("cuda", non_blocking=True)
labels = labels.to("cuda", non_blocking=True)
preds = model(inputs)
labels = labels.view(-1)
loss = criterion(preds, labels)
_, classes = torch.max(preds, -1)
acc = (classes == labels).float().sum() / labels.numel()
return ModelReturn(preds, loss, {"acc": acc.item(), "loss": loss.item()})
return model_fn
class Pointnet2MSG(nn.Module):
r"""
PointNet2 with multi-scale grouping
Classification network
Parameters
----------
num_classes: int
Number of semantics classes to predict over -- size of softmax classifier
input_channels: int = 3
Number of input channels in the feature descriptor for each point. If the point cloud is Nx9, this
value should be 6 as in an Nx9 point cloud, 3 of the channels are xyz, and 6 are feature descriptors
use_xyz: bool = True
Whether or not to use the xyz position of a point as a feature
"""
def __init__(self, num_classes, input_channels=3, use_xyz=True, version=1.0):
super(Pointnet2MSG, self).__init__()
self.SA_modules = nn.ModuleList()
self.SA_modules.append(
PointnetSAModuleMSG(
npoint=512,
radii=[0.1, 0.2, 0.4],
nsamples=[16, 32, 128],
mlps=[
[input_channels, 32, 32, 64],
[input_channels, 64, 64, 128],
[input_channels, 64, 96, 128],
],
use_xyz=use_xyz,
)
)
input_channels = 64 + 128 + 128
self.SA_modules.append(
PointnetSAModuleMSG(
npoint=128,
radii=[0.2, 0.4, 0.8],
nsamples=[32, 64, 128],
mlps=[
[input_channels, 64, 64, 128],
[input_channels, 128, 128, 256],
[input_channels, 128, 128, 256],
],
use_xyz=use_xyz,
)
)
self.SA_modules.append(
PointnetSAModule(mlp=[128 + 256 + 256, 256, 512, 1024], use_xyz=use_xyz)
)
if version == 1.0:
self.FC_layer = (
pt_utils.Seq(1024)
.fc(512, bn=True)
# potentially different for original one
# https://github.com/charlesq34/pointnet2/blob/master/models/pointnet2_cls_msg.py#L34
.dropout(0.5)
.fc(256, bn=True)
# potentially different for original one
# https://github.com/charlesq34/pointnet2/blob/master/models/pointnet2_cls_msg.py#L34
.dropout(0.5)
.fc(num_classes, activation=None)
)
elif version == 2.0:
self.FC_layer = (
pt_utils.Seq(1024)
.fc(512, bn=True)
# potentially different for original one
# https://github.com/charlesq34/pointnet2/blob/master/models/pointnet2_cls_msg.py#L34
.dropout(0.6)
.fc(256, bn=True)
# potentially different for original one
# https://github.com/charlesq34/pointnet2/blob/master/models/pointnet2_cls_msg.py#L34
.dropout(0.6)
.fc(num_classes, activation=None)
)
else:
assert False
def _break_up_pc(self, pc):
xyz = pc[..., 0:3].contiguous()
features = pc[..., 3:].transpose(1, 2).contiguous() if pc.size(-1) > 3 else None
return xyz, features
def forward(self, pointcloud):
# type: (Pointnet2MSG, torch.cuda.FloatTensor) -> pt_utils.Seq
r"""
Forward pass of the network
Parameters
----------
pointcloud: Variable(torch.cuda.FloatTensor)
(B, N, 3 + input_channels) tensor
Point cloud to run predicts on
Each point in the point-cloud MUST
be formated as (x, y, z, features...)
"""
xyz, features = self._break_up_pc(pointcloud)
for module in self.SA_modules:
xyz, features = module(xyz, features)
return self.FC_layer(features.squeeze(-1))
# arguments found out based on https://github.com/charlesq34/pointnet2/commit/74c52aa30458d1695e093a179cd335b7885b3244
# commit
class Pointnet2MSG5K(nn.Module):
r"""
PointNet2 with multi-scale grouping
Classification network
Parameters
----------
num_classes: int
Number of semantics classes to predict over -- size of softmax classifier
input_channels: int = 3
Number of input channels in the feature descriptor for each point. If the point cloud is Nx9, this
value should be 6 as in an Nx9 point cloud, 3 of the channels are xyz, and 6 are feature descriptors
use_xyz: bool = True
Whether or not to use the xyz position of a point as a feature
"""
def __init__(self, num_classes, input_channels=3, use_xyz=True):
super(Pointnet2MSG5K, self).__init__()
self.SA_modules = nn.ModuleList()
self.SA_modules.append(
PointnetSAModuleMSG(
npoint=512,
radii=[0.1, 0.2, 0.4],
nsamples=[32,64,128],
mlps=[
[input_channels, 32, 32, 64],
[input_channels, 64, 64, 128],
[input_channels, 64, 96, 128],
],
use_xyz=use_xyz,
)
)
input_channels = 64 + 128 + 128
self.SA_modules.append(
PointnetSAModuleMSG(
npoint=128,
radii=[0.2, 0.4, 0.8],
nsamples=[64,64,128],
mlps=[
[input_channels, 64, 64, 128],
[input_channels, 128, 128, 256],
[input_channels, 128, 128, 256],
],
use_xyz=use_xyz,
)
)
self.SA_modules.append(
PointnetSAModule(mlp=[128 + 256 + 256, 256, 512, 1024], use_xyz=use_xyz)
)
self.FC_layer = (
pt_utils.Seq(1024)
.fc(512, bn=True)
.dropout(0.5)
.fc(256, bn=True)
.dropout(0.5)
.fc(num_classes, activation=None)
)
def _break_up_pc(self, pc):
xyz = pc[..., 0:3].contiguous()
features = pc[..., 3:].transpose(1, 2).contiguous() if pc.size(-1) > 3 else None
return xyz, features
def forward(self, pointcloud):
# type: (Pointnet2MSG, torch.cuda.FloatTensor) -> pt_utils.Seq
r"""
Forward pass of the network
Parameters
----------
pointcloud: Variable(torch.cuda.FloatTensor)
(B, N, 3 + input_channels) tensor
Point cloud to run predicts on
Each point in the point-cloud MUST
be formated as (x, y, z, features...)
"""
xyz, features = self._break_up_pc(pointcloud)
for module in self.SA_modules:
xyz, features = module(xyz, features)
return self.FC_layer(features.squeeze(-1))
================================================
FILE: pointnet2_pyt/pointnet2/models/pointnet2_msg_sem.py
================================================
from __future__ import (
division,
absolute_import,
with_statement,
print_function,
unicode_literals,
)
import torch
import torch.nn as nn
import etw_pytorch_utils as pt_utils
from collections import namedtuple
from pointnet2.utils.pointnet2_modules import PointnetFPModule, PointnetSAModuleMSG
def model_fn_decorator(criterion):
ModelReturn = namedtuple("ModelReturn", ["preds", "loss", "acc"])
def model_fn(model, data, epoch=0, eval=False):
with torch.set_grad_enabled(not eval):
inputs, labels = data
inputs = inputs.to("cuda", non_blocking=True)
labels = labels.to("cuda", non_blocking=True)
preds = model(inputs)
loss = criterion(preds.view(labels.numel(), -1), labels.view(-1))
_, classes = torch.max(preds, -1)
acc = (classes == labels).float().sum() / labels.numel()
return ModelReturn(preds, loss, {"acc": acc.item(), "loss": loss.item()})
return model_fn
class Pointnet2MSG(nn.Module):
r"""
PointNet2 with multi-scale grouping
Semantic segmentation network that uses feature propogation layers
Parameters
----------
num_classes: int
Number of semantics classes to predict over -- size of softmax classifier that run for each point
input_channels: int = 6
Number of input channels in the feature descriptor for each point. If the point cloud is Nx9, this
value should be 6 as in an Nx9 point cloud, 3 of the channels are xyz, and 6 are feature descriptors
use_xyz: bool = True
Whether or not to use the xyz position of a point as a feature
"""
def __init__(self, num_classes, input_channels=6, use_xyz=True):
super(Pointnet2MSG, self).__init__()
self.SA_modules = nn.ModuleList()
c_in = input_channels
self.SA_modules.append(
PointnetSAModuleMSG(
npoint=1024,
radii=[0.05, 0.1],
nsamples=[16, 32],
mlps=[[c_in, 16, 16, 32], [c_in, 32, 32, 64]],
use_xyz=use_xyz,
)
)
c_out_0 = 32 + 64
c_in = c_out_0
self.SA_modules.append(
PointnetSAModuleMSG(
npoint=256,
radii=[0.1, 0.2],
nsamples=[16, 32],
mlps=[[c_in, 64, 64, 128], [c_in, 64, 96, 128]],
use_xyz=use_xyz,
)
)
c_out_1 = 128 + 128
c_in = c_out_1
self.SA_modules.append(
PointnetSAModuleMSG(
npoint=64,
radii=[0.2, 0.4],
nsamples=[16, 32],
mlps=[[c_in, 128, 196, 256], [c_in, 128, 196, 256]],
use_xyz=use_xyz,
)
)
c_out_2 = 256 + 256
c_in = c_out_2
self.SA_modules.append(
PointnetSAModuleMSG(
npoint=16,
radii=[0.4, 0.8],
nsamples=[16, 32],
mlps=[[c_in, 256, 256, 512], [c_in, 256, 384, 512]],
use_xyz=use_xyz,
)
)
c_out_3 = 512 + 512
self.FP_modules = nn.ModuleList()
self.FP_modules.append(PointnetFPModule(mlp=[256 + input_channels, 128, 128]))
self.FP_modules.append(PointnetFPModule(mlp=[512 + c_out_0, 256, 256]))
self.FP_modules.append(PointnetFPModule(mlp=[512 + c_out_1, 512, 512]))
self.FP_modules.append(PointnetFPModule(mlp=[c_out_3 + c_out_2, 512, 512]))
self.FC_layer = (
pt_utils.Seq(128)
.conv1d(128, bn=True)
.dropout()
.conv1d(num_classes, activation=None)
)
def _break_up_pc(self, pc):
xyz = pc[..., 0:3].contiguous()
features = pc[..., 3:].transpose(1, 2).contiguous() if pc.size(-1) > 3 else None
return xyz, features
def forward(self, pointcloud):
# type: (Pointnet2MSG, torch.cuda.FloatTensor) -> pt_utils.Seq
r"""
Forward pass of the network
Parameters
----------
pointcloud: Variable(torch.cuda.FloatTensor)
(B, N, 3 + input_channels) tensor
Point cloud to run predicts on
Each point in the point-cloud MUST
be formated as (x, y, z, features...)
"""
xyz, features = self._break_up_pc(pointcloud)
l_xyz, l_features = [xyz], [features]
for i in range(len(self.SA_modules)):
li_xyz, li_features = self.SA_modules[i](l_xyz[i], l_features[i])
l_xyz.append(li_xyz)
l_features.append(li_features)
for i in range(-1, -(len(self.FP_modules) + 1), -1):
l_features[i - 1] = self.FP_modules[i](
l_xyz[i - 1], l_xyz[i], l_features[i - 1], l_features[i]
)
return self.FC_layer(l_features[0]).transpose(1, 2).contiguous()
if __name__ == "__main__":
from torch.autograd import Variable
import numpy as np
import torch.optim as optim
B = 2
N = 32
inputs = torch.randn(B, N, 6).cuda()
labels = torch.from_numpy(np.random.randint(0, 3, size=B * N)).view(B, N).cuda()
model = Pointnet2MSG(3, input_channels=3)
model.cuda()
optimizer = optim.Adam(model.parameters(), lr=1e-2)
print("Testing with xyz")
model_fn = model_fn_decorator(nn.CrossEntropyLoss())
for _ in range(5):
optimizer.zero_grad()
_, loss, _ = model_fn(model, (inputs, labels))
loss.backward()
print(loss.data[0])
optimizer.step()
# with use_xyz=False
inputs = torch.randn(B, N, 6).cuda()
labels = torch.from_numpy(np.random.randint(0, 3, size=B * N)).view(B, N).cuda()
model = Pointnet2MSG(3, input_channels=3, use_xyz=False)
model.cuda()
optimizer = optim.Adam(model.parameters(), lr=1e-2)
print("Testing without xyz")
model_fn = model_fn_decorator(nn.CrossEntropyLoss())
for _ in range(5):
optimizer.zero_grad()
_, loss, _ = model_fn(model, (inputs, labels))
loss.backward()
print(loss.data[0])
optimizer.step()
================================================
FILE: pointnet2_pyt/pointnet2/models/pointnet2_ssg_cls.py
================================================
from __future__ import (
division,
absolute_import,
with_statement,
print_function,
unicode_literals,
)
import torch
import torch.nn as nn
import etw_pytorch_utils as pt_utils
from collections import namedtuple
from pointnet2.utils.pointnet2_modules import PointnetSAModule
def model_fn_decorator(criterion):
ModelReturn = namedtuple("ModelReturn", ["preds", "loss", "acc"])
def model_fn(model, data, epoch=0, eval=False):
with torch.set_grad_enabled(not eval):
inputs, labels = data
inputs = inputs.to("cuda", non_blocking=True)
labels = labels.to("cuda", non_blocking=True)
preds = model(inputs)
labels = labels.view(-1)
loss = criterion(preds, labels)
_, classes = torch.max(preds, -1)
acc = (classes == labels).float().sum() / labels.numel()
return ModelReturn(preds, loss, {"acc": acc.item(), "loss": loss.item()})
return model_fn
class Pointnet2SSG(nn.Module):
r"""
PointNet2 with single-scale grouping
Classification network
Parameters
----------
num_classes: int
Number of semantics classes to predict over -- size of softmax classifier
input_channels: int = 3
Number of input channels in the feature descriptor for each point. If the point cloud is Nx9, this
value should be 6 as in an Nx9 point cloud, 3 of the channels are xyz, and 6 are feature descriptors
use_xyz: bool = True
Whether or not to use the xyz position of a point as a feature
"""
def __init__(self, num_classes, input_channels=3, use_xyz=True):
super(Pointnet2SSG, self).__init__()
self.SA_modules = nn.ModuleList()
self.SA_modules.append(
PointnetSAModule(
npoint=512,
radius=0.2,
nsample=64,
mlp=[input_channels, 64, 64, 128],
use_xyz=use_xyz,
)
)
self.SA_modules.append(
PointnetSAModule(
npoint=128,
radius=0.4,
nsample=64,
mlp=[128, 128, 128, 256],
use_xyz=use_xyz,
)
)
self.SA_modules.append(
PointnetSAModule(mlp=[256, 256, 512, 1024], use_xyz=use_xyz)
)
self.FC_layer = (
pt_utils.Seq(1024)
.fc(512, bn=True)
.dropout(0.5)
.fc(256, bn=True)
.dropout(0.5)
.fc(num_classes, activation=None)
)
def _break_up_pc(self, pc):
xyz = pc[..., 0:3].contiguous()
features = pc[..., 3:].transpose(1, 2).contiguous() if pc.size(-1) > 3 else None
return xyz, features
def forward(self, pointcloud):
# type: (Pointnet2SSG, torch.cuda.FloatTensor) -> pt_utils.Seq
r"""
Forward pass of the network
Parameters
----------
pointcloud: Variable(torch.cuda.FloatTensor)
(B, N, 3 + input_channels) tensor
Point cloud to run predicts on
Each point in the point-cloud MUST
be formated as (x, y, z, features...)
"""
xyz, features = self._break_up_pc(pointcloud)
for module in self.SA_modules:
xyz, features = module(xyz, features)
return self.FC_layer(features.squeeze(-1))
================================================
FILE: pointnet2_pyt/pointnet2/models/pointnet2_ssg_sem.py
================================================
from __future__ import (
division,
absolute_import,
with_statement,
print_function,
unicode_literals,
)
import torch
import torch.nn as nn
import etw_pytorch_utils as pt_utils
from collections import namedtuple
from pointnet2.utils.pointnet2_modules import PointnetSAModule, PointnetFPModule
def model_fn_decorator(criterion):
ModelReturn = namedtuple("ModelReturn", ["preds", "loss", "acc"])
def model_fn(model, data, epoch=0, eval=False):
with torch.set_grad_enabled(not eval):
inputs, labels = data
inputs = inputs.to("cuda", non_blocking=True)
labels = labels.to("cuda", non_blocking=True)
preds = model(inputs)
loss = criterion(preds.view(labels.numel(), -1), labels.view(-1))
_, classes = torch.max(preds, -1)
acc = (classes == labels).float().sum() / labels.numel()
return ModelReturn(preds, loss, {"acc": acc.item(), "loss": loss.item()})
return model_fn
class Pointnet2SSG(nn.Module):
r"""
PointNet2 with single-scale grouping
Semantic segmentation network that uses feature propogation layers
Parameters
----------
num_classes: int
Number of semantics classes to predict over -- size of softmax classifier that run for each point
input_channels: int = 6
Number of input channels in the feature descriptor for each point. If the point cloud is Nx9, this
value should be 6 as in an Nx9 point cloud, 3 of the channels are xyz, and 6 are feature descriptors
use_xyz: bool = True
Whether or not to use the xyz position of a point as a feature
"""
def __init__(self, num_classes, input_channels=3, use_xyz=True):
super(Pointnet2SSG, self).__init__()
self.SA_modules = nn.ModuleList()
self.SA_modules.append(
PointnetSAModule(
npoint=1024,
radius=0.1,
nsample=32,
mlp=[input_channels, 32, 32, 64],
use_xyz=use_xyz,
)
)
self.SA_modules.append(
PointnetSAModule(
npoint=256,
radius=0.2,
nsample=32,
mlp=[64, 64, 64, 128],
use_xyz=use_xyz,
)
)
self.SA_modules.append(
PointnetSAModule(
npoint=64,
radius=0.4,
nsample=32,
mlp=[128, 128, 128, 256],
use_xyz=use_xyz,
)
)
self.SA_modules.append(
PointnetSAModule(
npoint=16,
radius=0.8,
nsample=32,
mlp=[256, 256, 256, 512],
use_xyz=use_xyz,
)
)
self.FP_modules = nn.ModuleList()
self.FP_modules.append(
PointnetFPModule(mlp=[128 + input_channels, 128, 128, 128])
)
self.FP_modules.append(PointnetFPModule(mlp=[256 + 64, 256, 128]))
self.FP_modules.append(PointnetFPModule(mlp=[256 + 128, 256, 256]))
self.FP_modules.append(PointnetFPModule(mlp=[512 + 256, 256, 256]))
self.FC_layer = (
pt_utils.Seq(128)
.conv1d(128, bn=True)
.dropout()
.conv1d(num_classes, activation=None)
)
def _break_up_pc(self, pc):
xyz = pc[..., 0:3].contiguous()
features = pc[..., 3:].transpose(1, 2).contiguous() if pc.size(-1) > 3 else None
return xyz, features
def forward(self, pointcloud):
# type: (Pointnet2SSG, torch.cuda.FloatTensor) -> pt_utils.Seq
r"""
Forward pass of the network
Parameters
----------
pointcloud: Variable(torch.cuda.FloatTensor)
(B, N, 3 + input_channels) tensor
Point cloud to run predicts on
Each point in the point-cloud MUST
be formated as (x, y, z, features...)
"""
xyz, features = self._break_up_pc(pointcloud)
l_xyz, l_features = [xyz], [features]
for i in range(len(self.SA_modules)):
li_xyz, li_features = self.SA_modules[i](l_xyz[i], l_features[i])
l_xyz.append(li_xyz)
l_features.append(li_features)
for i in range(-1, -(len(self.FP_modules) + 1), -1):
l_features[i - 1] = self.FP_modules[i](
l_xyz[i - 1], l_xyz[i], l_features[i - 1], l_features[i]
)
return self.FC_layer(l_features[0]).transpose(1, 2).contiguous()
================================================
FILE: pointnet2_pyt/pointnet2/train/__init__.py
================================================
================================================
FILE: pointnet2_pyt/pointnet2/train/train_cls.py
================================================
from __future__ import (
division,
absolute_import,
with_statement,
print_function,
unicode_literals,
)
import torch
import torch.optim as optim
import torch.optim.lr_scheduler as lr_sched
import torch.nn as nn
from torch.utils.data import DataLoader
from torchvision import transforms
import etw_pytorch_utils as pt_utils
import pprint
import os.path as osp
import os
import argparse
from pointnet2.models import Pointnet2ClsMSG as Pointnet
from pointnet2.models.pointnet2_msg_cls import model_fn_decorator
from pointnet2.data import ModelNet40Cls
import pointnet2.data.data_utils as d_utils
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
def parse_args():
parser = argparse.ArgumentParser(
description="Arguments for cls training",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("-batch_size", type=int, default=16, help="Batch size")
parser.add_argument(
"-num_points", type=int, default=4096, help="Number of points to train with"
)
parser.add_argument(
"-weight_decay", type=float, default=1e-5, help="L2 regularization coeff"
)
parser.add_argument("-lr", type=float, default=1e-2, help="Initial learning rate")
parser.add_argument(
"-lr_decay", type=float, default=0.7, help="Learning rate decay gamma"
)
parser.add_argument(
"-decay_step", type=float, default=2e5, help="Learning rate decay step"
)
parser.add_argument(
"-bn_momentum", type=float, default=0.5, help="Initial batch norm momentum"
)
parser.add_argument(
"-bnm_decay", type=float, default=0.5, help="Batch norm momentum decay gamma"
)
parser.add_argument(
"-checkpoint", type=str, default=None, help="Checkpoint to start from"
)
parser.add_argument(
"-epochs", type=int, default=200, help="Number of epochs to train for"
)
parser.add_argument(
"-run_name",
type=str,
default="cls_run_1",
help="Name for run in tensorboard_logger",
)
parser.add_argument("--visdom-port", type=int, default=8097)
parser.add_argument("--visdom", action="store_true")
return parser.parse_args()
lr_clip = 1e-5
bnm_clip = 1e-2
if __name__ == "__main__":
args = parse_args()
transforms = transforms.Compose(
[
d_utils.PointcloudToTensor(),
d_utils.PointcloudScale(),
d_utils.PointcloudRotate(),
d_utils.PointcloudRotatePerturbation(),
d_utils.PointcloudTranslate(),
d_utils.PointcloudJitter(),
d_utils.PointcloudRandomInputDropout(),
]
)
test_set = ModelNet40Cls(args.num_points, transforms=transforms, train=False)
test_loader = DataLoader(
test_set,
batch_size=args.batch_size,
shuffle=True,
num_workers=2,
pin_memory=True,
)
train_set = ModelNet40Cls(args.num_points, transforms=transforms)
train_loader = DataLoader(
train_set,
batch_size=args.batch_size,
shuffle=True,
num_workers=2,
pin_memory=True,
)
model = Pointnet(input_channels=0, num_classes=40, use_xyz=True)
model.cuda()
optimizer = optim.Adam(
model.parameters(), lr=args.lr, weight_decay=args.weight_decay
)
lr_lbmd = lambda it: max(
args.lr_decay ** (int(it * args.batch_size / args.decay_step)),
lr_clip / args.lr,
)
bn_lbmd = lambda it: max(
args.bn_momentum
* args.bnm_decay ** (int(it * args.batch_size / args.decay_step)),
bnm_clip,
)
# default value
it = -1 # for the initialize value of `LambdaLR` and `BNMomentumScheduler`
best_loss = 1e10
start_epoch = 1
# load status from checkpoint
if args.checkpoint is not None:
checkpoint_status = pt_utils.load_checkpoint(
model, optimizer, filename=args.checkpoint.split(".")[0]
)
if checkpoint_status is not None:
it, start_epoch, best_loss = checkpoint_status
lr_scheduler = lr_sched.LambdaLR(optimizer, lr_lambda=lr_lbmd, last_epoch=it)
bnm_scheduler = pt_utils.BNMomentumScheduler(
model, bn_lambda=bn_lbmd, last_epoch=it
)
it = max(it, 0) # for the initialize value of `trainer.train`
model_fn = model_fn_decorator(nn.CrossEntropyLoss())
if args.visdom:
viz = pt_utils.VisdomViz(port=args.visdom_port)
else:
viz = pt_utils.CmdLineViz()
viz.text(pprint.pformat(vars(args)))
if not osp.isdir("checkpoints"):
os.makedirs("checkpoints")
trainer = pt_utils.Trainer(
model,
model_fn,
optimizer,
checkpoint_name="checkpoints/pointnet2_cls",
best_name="checkpoints/pointnet2_cls_best",
lr_scheduler=lr_scheduler,
bnm_scheduler=bnm_scheduler,
viz=viz,
)
trainer.train(
it, start_epoch, args.epochs, train_loader, test_loader, best_loss=best_loss
)
if start_epoch == args.epochs:
_ = trainer.eval_epoch(test_loader)
================================================
FILE: pointnet2_pyt/pointnet2/train/train_sem_seg.py
================================================
from __future__ import (
division,
absolute_import,
with_statement,
print_function,
unicode_literals,
)
import torch.optim as optim
import torch.optim.lr_scheduler as lr_sched
import torch.nn as nn
from torch.utils.data import DataLoader
import etw_pytorch_utils as pt_utils
import pprint
import os.path as osp
import os
import argparse
from pointnet2.models import Pointnet2SemMSG as Pointnet
from pointnet2.models.pointnet2_msg_sem import model_fn_decorator
from pointnet2.data import Indoor3DSemSeg
parser = argparse.ArgumentParser(description="Arg parser")
parser.add_argument(
"-batch_size", type=int, default=32, help="Batch size [default: 32]"
)
parser.add_argument(
"-num_points",
type=int,
default=4096,
help="Number of points to train with [default: 4096]",
)
parser.add_argument(
"-weight_decay",
type=float,
default=0,
help="L2 regularization coeff [default: 0.0]",
)
parser.add_argument(
"-lr", type=float, default=1e-2, help="Initial learning rate [default: 1e-2]"
)
parser.add_argument(
"-lr_decay",
type=float,
default=0.5,
help="Learning rate decay gamma [default: 0.5]",
)
parser.add_argument(
"-decay_step",
type=float,
default=2e5,
help="Learning rate decay step [default: 20]",
)
parser.add_argument(
"-bn_momentum",
type=float,
default=0.9,
help="Initial batch norm momentum [default: 0.9]",
)
parser.add_argument(
"-bn_decay",
type=float,
default=0.5,
help="Batch norm momentum decay gamma [default: 0.5]",
)
parser.add_argument(
"-checkpoint", type=str, default=None, help="Checkpoint to start from"
)
parser.add_argument(
"-epochs", type=int, default=200, help="Number of epochs to train for"
)
parser.add_argument(
"-run_name",
type=str,
default="sem_seg_run_1",
help="Name for run in tensorboard_logger",
)
parser.add_argument("--visdom-port", type=int, default=8097)
parser.add_argument("--visdom", action="store_true")
lr_clip = 1e-5
bnm_clip = 1e-2
if __name__ == "__main__":
args = parser.parse_args()
test_set = Indoor3DSemSeg(args.num_points, train=False)
test_loader = DataLoader(
test_set,
batch_size=args.batch_size,
shuffle=True,
pin_memory=True,
num_workers=2,
)
train_set = Indoor3DSemSeg(args.num_points)
train_loader = DataLoader(
train_set,
batch_size=args.batch_size,
pin_memory=True,
num_workers=2,
shuffle=True,
)
model = Pointnet(num_classes=13, input_channels=6, use_xyz=True)
model.cuda()
optimizer = optim.Adam(
model.parameters(), lr=args.lr, weight_decay=args.weight_decay
)
lr_lbmd = lambda it: max(
args.lr_decay ** (int(it * args.batch_size / args.decay_step)),
lr_clip / args.lr,
)
bnm_lmbd = lambda it: max(
args.bn_momentum
* args.bn_decay ** (int(it * args.batch_size / args.decay_step)),
bnm_clip,
)
# default value
it = -1 # for the initialize value of `LambdaLR` and `BNMomentumScheduler`
best_loss = 1e10
start_epoch = 1
# load status from checkpoint
if args.checkpoint is not None:
checkpoint_status = pt_utils.load_checkpoint(
model, optimizer, filename=args.checkpoint.split(".")[0]
)
if checkpoint_status is not None:
it, start_epoch, best_loss = checkpoint_status
lr_scheduler = lr_sched.LambdaLR(optimizer, lr_lambda=lr_lbmd, last_epoch=it)
bnm_scheduler = pt_utils.BNMomentumScheduler(
model, bn_lambda=bnm_lmbd, last_epoch=it
)
it = max(it, 0) # for the initialize value of `trainer.train`
model_fn = model_fn_decorator(nn.CrossEntropyLoss())
if args.visdom:
viz = pt_utils.VisdomViz(port=args.visdom_port)
else:
viz = pt_utils.CmdLineViz()
viz.text(pprint.pformat(vars(args)))
if not osp.isdir("checkpoints"):
os.makedirs("checkpoints")
trainer = pt_utils.Trainer(
model,
model_fn,
optimizer,
checkpoint_name="checkpoints/pointnet2_semseg",
best_name="checkpoints/pointnet2_semseg_best",
lr_scheduler=lr_scheduler,
bnm_scheduler=bnm_scheduler,
viz=viz,
)
trainer.train(
it, start_epoch, args.epochs, train_loader, test_loader, best_loss=best_loss
)
if start_epoch == args.epochs:
_ = trainer.eval_epoch(test_loader)
================================================
FILE: pointnet2_pyt/pointnet2/utils/.gitignore
================================================
build
_ext
================================================
FILE: pointnet2_pyt/pointnet2/utils/__init__.py
================================================
'''
Description:
Autor: Jiachen Sun
Date: 2022-02-16 22:23:16
LastEditors: Jiachen Sun
LastEditTime: 2022-02-23 16:50:08
'''
from __future__ import (
division,
absolute_import,
with_statement,
print_function,
unicode_literals,
)
from PCT_Pytorch.pointnet2_ops_lib.pointnet2_ops import pointnet2_utils
from PCT_Pytorch.pointnet2_ops_lib.pointnet2_ops import pointnet2_modules
================================================
FILE: pointnet2_pyt/pointnet2/utils/linalg_utils.py
================================================
from __future__ import (
division,
absolute_import,
with_statement,
print_function,
unicode_literals,
)
import torch
from enum import Enum
import numpy as np
PDist2Order = Enum("PDist2Order", "d_first d_second")
def pdist2(X, Z=None, order=PDist2Order.d_second):
# type: (torch.Tensor, torch.Tensor, PDist2Order) -> torch.Tensor
r""" Calculates the pairwise distance between X and Z
D[b, i, j] = l2 distance X[b, i] and Z[b, j]
Parameters
---------
X : torch.Tensor
X is a (B, N, d) tensor. There are B batches, and N vectors of dimension d
Z: torch.Tensor
Z is a (B, M, d) tensor. If Z is None, then Z = X
Returns
-------
torch.Tensor
Distance matrix is size (B, N, M)
"""
if order == PDist2Order.d_second:
if X.dim() == 2:
X = X.unsqueeze(0)
if Z is None:
Z = X
G = np.matmul(X, Z.transpose(-2, -1))
S = (X * X).sum(-1, keepdim=True)
R = S.transpose(-2, -1)
else:
if Z.dim() == 2:
Z = Z.unsqueeze(0)
G = np.matmul(X, Z.transpose(-2, -1))
S = (X * X).sum(-1, keepdim=True)
R = (Z * Z).sum(-1, keepdim=True).transpose(-2, -1)
else:
if X.dim() == 2:
X = X.unsqueeze(0)
if Z is None:
Z = X
G = np.matmul(X.transpose(-2, -1), Z)
R = (X * X).sum(-2, keepdim=True)
S = R.transpose(-2, -1)
else:
if Z.dim() == 2:
Z = Z.unsqueeze(0)
G = np.matmul(X.transpose(-2, -1), Z)
S = (X * X).sum(-2, keepdim=True).transpose(-2, -1)
R = (Z * Z).sum(-2, keepdim=True)
return torch.abs(R + S - 2 * G).squeeze(0)
def pdist2_slow(X, Z=None):
if Z is None:
Z = X
D = torch.zeros(X.size(0), X.size(2), Z.size(2))
for b in range(D.size(0)):
for i in range(D.size(1)):
for j in range(D.size(2)):
D[b, i, j] = torch.dist(X[b, :, i], Z[b, :, j])
return D
if __name__ == "__main__":
X = torch.randn(2, 3, 5)
Z = torch.randn(2, 3, 3)
print(pdist2(X, order=PDist2Order.d_first))
print(pdist2_slow(X))
print(torch.dist(pdist2(X, order=PDist2Order.d_first), pdist2_slow(X)))
================================================
FILE: pointnet2_pyt/pointnet2/utils/pointnet2_modules.py
================================================
from __future__ import (
division,
absolute_import,
with_statement,
print_function,
unicode_literals,
)
import torch
import torch.nn as nn
import torch.nn.functional as F
import etw_pytorch_utils as pt_utils
from pointnet2.utils import pointnet2_utils
if False:
# Workaround for type hints without depending on the `typing` module
from typing import *
class _PointnetSAModuleBase(nn.Module):
def __init__(self):
super(_PointnetSAModuleBase, self).__init__()
self.npoint = None
self.groupers = None
self.mlps = None
def forward(self, xyz, features=None):
# type: (_PointnetSAModuleBase, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]
r"""
Parameters
----------
xyz : torch.Tensor
(B, N, 3) tensor of the xyz coordinates of the features
features : torch.Tensor
(B, C, N) tensor of the descriptors of the the features
Returns
-------
new_xyz : torch.Tensor
(B, npoint, 3) tensor of the new features' xyz
new_features : torch.Tensor
(B, \sum_k(mlps[k][-1]), npoint) tensor of the new_features descriptors
"""
new_features_list = []
B = xyz.shape[0]
xyz_flipped = xyz.transpose(1, 2).contiguous()
new_xyz = (
pointnet2_utils.gather_operation(
xyz_flipped, pointnet2_utils.furthest_point_sample(xyz, self.npoint)
)
.transpose(1, 2)
.contiguous()
if self.npoint is not None
else torch.zeros((B, 1, 3)).to(xyz.device)
)
for i in range(len(self.groupers)):
new_features = self.groupers[i](
xyz, new_xyz, features
) # (B, C, npoint, nsample)
new_features = self.mlps[i](new_features) # (B, mlp[-1], npoint, nsample)
new_features = F.max_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
) # (B, mlp[-1], npoint, 1)
new_features = new_features.squeeze(-1) # (B, mlp[-1], npoint)
new_features_list.append(new_features)
return new_xyz, torch.cat(new_features_list, dim=1)
class PointnetSAModuleMSG(_PointnetSAModuleBase):
r"""Pointnet set abstrction layer with multiscale grouping
Parameters
----------
npoint : int
Number of features
radii : list of float32
list of radii to group with
nsamples : list of int32
Number of samples in each ball query
mlps : list of list of int32
Spec of the pointnet before the global max_pool for each scale
bn : bool
Use batchnorm
"""
def __init__(self, npoint, radii, nsamples, mlps, bn=True, use_xyz=True):
# type: (PointnetSAModuleMSG, int, List[float], List[int], List[List[int]], bool, bool) -> None
super(PointnetSAModuleMSG, self).__init__()
assert len(radii) == len(nsamples) == len(mlps)
self.npoint = npoint
self.groupers = nn.ModuleList()
self.mlps = nn.ModuleList()
for i in range(len(radii)):
radius = radii[i]
nsample = nsamples[i]
self.groupers.append(
pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz)
if npoint is not None
else pointnet2_utils.GroupAll(use_xyz)
)
mlp_spec = mlps[i]
if use_xyz:
mlp_spec[0] += 3
self.mlps.append(pt_utils.SharedMLP(mlp_spec, bn=bn))
class PointnetSAModule(PointnetSAModuleMSG):
r"""Pointnet set abstrction layer
Parameters
----------
npoint : int
Number of features
radius : float
Radius of ball
nsample : int
Number of samples in the ball query
mlp : list
Spec of the pointnet before the global max_pool
bn : bool
Use batchnorm
"""
def __init__(
self, mlp, npoint=None, radius=None, nsample=None, bn=True, use_xyz=True
):
# type: (PointnetSAModule, List[int], int, float, int, bool, bool) -> None
super(PointnetSAModule, self).__init__(
mlps=[mlp],
npoint=npoint,
radii=[radius],
nsamples=[nsample],
bn=bn,
use_xyz=use_xyz,
)
class PointnetFPModule(nn.Module):
r"""Propigates the features of one set to another
Parameters
----------
mlp : list
Pointnet module parameters
bn : bool
Use batchnorm
"""
def __init__(self, mlp, bn=True):
# type: (PointnetFPModule, List[int], bool) -> None
super(PointnetFPModule, self).__init__()
self.mlp = pt_utils.SharedMLP(mlp, bn=bn)
def forward(self, unknown, known, unknow_feats, known_feats):
# type: (PointnetFPModule, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor) -> torch.Tensor
r"""
Parameters
----------
unknown : torch.Tensor
(B, n, 3) tensor of the xyz positions of the unknown features
known : torch.Tensor
(B, m, 3) tensor of the xyz positions of the known features
unknow_feats : torch.Tensor
(B, C1, n) tensor of the features to be propigated to
known_feats : torch.Tensor
(B, C2, m) tensor of features to be propigated
Returns
-------
new_features : torch.Tensor
(B, mlp[-1], n) tensor of the features of the unknown features
"""
if known is not None:
dist, idx = pointnet2_utils.three_nn(unknown, known)
dist_recip = 1.0 / (dist + 1e-8)
norm = torch.sum(dist_recip, dim=2, keepdim=True)
weight = dist_recip / norm
interpolated_feats = pointnet2_utils.three_interpolate(
known_feats, idx, weight
)
else:
interpolated_feats = known_feats.expand(
*(known_feats.size()[0:2] + [unknown.size(1)])
)
if unknow_feats is not None:
new_features = torch.cat(
[interpolated_feats, unknow_feats], dim=1
) # (B, C2 + C1, n)
else:
new_features = interpolated_feats
new_features = new_features.unsqueeze(-1)
new_features = self.mlp(new_features)
return new_features.squeeze(-1)
if __name__ == "__main__":
from torch.autograd import Variable
torch.manual_seed(1)
torch.cuda.manual_seed_all(1)
xyz = Variable(torch.randn(2, 9, 3).cuda(), requires_grad=True)
xyz_feats = Variable(torch.randn(2, 9, 6).cuda(), requires_grad=True)
test_module = PointnetSAModuleMSG(
npoint=2, radii=[5.0, 10.0], nsamples=[6, 3], mlps=[[9, 3], [9, 6]]
)
test_module.cuda()
print(test_module(xyz, xyz_feats))
# test_module = PointnetFPModule(mlp=[6, 6])
# test_module.cuda()
# from torch.autograd import gradcheck
# inputs = (xyz, xyz, None, xyz_feats)
# test = gradcheck(test_module, inputs, eps=1e-6, atol=1e-4)
# print(test)
for _ in range(1):
_, new_features = test_module(xyz, xyz_feats)
new_features.backward(torch.cuda.FloatTensor(*new_features.size()).fill_(1))
print(new_features)
print(xyz.grad)
================================================
FILE: pointnet2_pyt/pointnet2/utils/pointnet2_utils.py
================================================
from __future__ import (
division,
absolute_import,
with_statement,
print_function,
unicode_literals,
)
import torch
from torch.autograd import Function
import torch.nn as nn
import etw_pytorch_utils as pt_utils
import sys
try:
import builtins
except:
import __builtin__ as builtins
try:
import pointnet2._ext as _ext
except ImportError:
if not getattr(builtins, "__POINTNET2_SETUP__", False):
raise ImportError(
"Could not import _ext module.\n"
"Please see the setup instructions in the README: "
"https://github.com/erikwijmans/Pointnet2_PyTorch/blob/master/README.rst"
)
if False:
# Workaround for type hints without depending on the `typing` module
from typing import *
class RandomDropout(nn.Module):
def __init__(self, p=0.5, inplace=False):
super(RandomDropout, self).__init__()
self.p = p
self.inplace = inplace
def forward(self, X):
theta = torch.Tensor(1).uniform_(0, self.p)[0]
return pt_utils.feature_dropout_no_scaling(X, theta, self.train, self.inplace)
class FurthestPointSampling(Function):
@staticmethod
def forward(ctx, xyz, npoint):
# type: (Any, torch.Tensor, int) -> torch.Tensor
r"""
Uses iterative furthest point sampling to select a set of npoint features that have the largest
minimum distance
Parameters
----------
xyz : torch.Tensor
(B, N, 3) tensor where N > npoint
npoint : int32
number of features in the sampled set
Returns
-------
torch.Tensor
(B, npoint) tensor containing the set
"""
return _ext.furthest_point_sampling(xyz, npoint)
@staticmethod
def backward(xyz, a=None):
return None, None
furthest_point_sample = FurthestPointSampling.apply
class GatherOperation(Function):
@staticmethod
def forward(ctx, features, idx):
# type: (Any, torch.Tensor, torch.Tensor) -> torch.Tensor
r"""
Parameters
----------
features : torch.Tensor
(B, C, N) tensor
idx : torch.Tensor
(B, npoint) tensor of the features to gather
Returns
-------
torch.Tensor
(B, C, npoint) tensor
"""
_, C, N = features.size()
ctx.for_backwards = (idx, C, N)
return _ext.gather_points(features, idx)
@staticmethod
def backward(ctx, grad_out):
idx, C, N = ctx.for_backwards
grad_features = _ext.gather_points_grad(grad_out.contiguous(), idx, N)
return grad_features, None
gather_operation = GatherOperation.apply
class ThreeNN(Function):
@staticmethod
def forward(ctx, unknown, known):
# type: (Any, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]
r"""
Find the three nearest neighbors of unknown in known
Parameters
----------
unknown : torch.Tensor
(B, n, 3) tensor of known features
known : torch.Tensor
(B, m, 3) tensor of unknown features
Returns
-------
dist : torch.Tensor
(B, n, 3) l2 distance to the three nearest neighbors
idx : torch.Tensor
(B, n, 3) index of 3 nearest neighbors
"""
dist2, idx = _ext.three_nn(unknown, known)
return torch.sqrt(dist2), idx
@staticmethod
def backward(ctx, a=None, b=None):
return None, None
three_nn = ThreeNN.apply
class ThreeInterpolate(Function):
@staticmethod
def forward(ctx, features, idx, weight):
# type(Any, torch.Tensor, torch.Tensor, torch.Tensor) -> Torch.Tensor
r"""
Performs weight linear interpolation on 3 features
Parameters
----------
features : torch.Tensor
(B, c, m) Features descriptors to be interpolated from
idx : torch.Tensor
(B, n, 3) three nearest neighbors of the target features in features
weight : torch.Tensor
(B, n, 3) weights
Returns
-------
torch.Tensor
(B, c, n) tensor of the interpolated features
"""
B, c, m = features.size()
n = idx.size(1)
ctx.three_interpolate_for_backward = (idx, weight, m)
return _ext.three_interpolate(features, idx, weight)
@staticmethod
def backward(ctx, grad_out):
# type: (Any, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]
r"""
Parameters
----------
grad_out : torch.Tensor
(B, c, n) tensor with gradients of ouputs
Returns
-------
grad_features : torch.Tensor
(B, c, m) tensor with gradients of features
None
None
"""
idx, weight, m = ctx.three_interpolate_for_backward
grad_features = _ext.three_interpolate_grad(
grad_out.contiguous(), idx, weight, m
)
return grad_features, None, None
three_interpolate = ThreeInterpolate.apply
class GroupingOperation(Function):
@staticmethod
def forward(ctx, features, idx):
# type: (Any, torch.Tensor, torch.Tensor) -> torch.Tensor
r"""
Parameters
----------
features : torch.Tensor
(B, C, N) tensor of features to group
idx : torch.Tensor
(B, npoint, nsample) tensor containing the indicies of features to group with
Returns
-------
torch.Tensor
(B, C, npoint, nsample) tensor
"""
B, nfeatures, nsample = idx.size()
_, C, N = features.size()
ctx.for_backwards = (idx, N)
return _ext.group_points(features, idx)
@staticmethod
def backward(ctx, grad_out):
# type: (Any, torch.tensor) -> Tuple[torch.Tensor, torch.Tensor]
r"""
Parameters
----------
grad_out : torch.Tensor
(B, C, npoint, nsample) tensor of the gradients of the output from forward
Returns
-------
torch.Tensor
(B, C, N) gradient of the features
None
"""
idx, N = ctx.for_backwards
grad_features = _ext.group_points_grad(grad_out.contiguous(), idx, N)
return grad_features, None
grouping_operation = GroupingOperation.apply
class BallQuery(Function):
@staticmethod
def forward(ctx, radius, nsample, xyz, new_xyz):
# type: (Any, float, int, torch.Tensor, torch.Tensor) -> torch.Tensor
r"""
Parameters
----------
radius : float
radius of the balls
nsample : int
maximum number of features in the balls
xyz : torch.Tensor
(B, N, 3) xyz coordinates of the features
new_xyz : torch.Tensor
(B, npoint, 3) centers of the ball query
Returns
-------
torch.Tensor
(B, npoint, nsample) tensor with the indicies of the features that form the query balls
"""
return _ext.ball_query(new_xyz, xyz, radius, nsample)
@staticmethod
def backward(ctx, a=None):
return None, None, None, None
ball_query = BallQuery.apply
class QueryAndGroup(nn.Module):
r"""
Groups with a ball query of radius
Parameters
---------
radius : float32
Radius of ball
nsample : int32
Maximum number of features to gather in the ball
"""
def __init__(self, radius, nsample, use_xyz=True):
# type: (QueryAndGroup, float, int, bool) -> None
super(QueryAndGroup, self).__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
def forward(self, xyz, new_xyz, features=None):
# type: (QueryAndGroup, torch.Tensor. torch.Tensor, torch.Tensor) -> Tuple[Torch.Tensor]
r"""
Parameters
----------
xyz : torch.Tensor
xyz coordinates of the features (B, N, 3)
new_xyz : torch.Tensor
centriods (B, npoint, 3)
features : torch.Tensor
Descriptors of the features (B, C, N)
Returns
-------
new_features : torch.Tensor
(B, 3 + C, npoint, nsample) tensor
"""
idx = ball_query(self.radius, self.nsample, xyz, new_xyz)
xyz_trans = xyz.transpose(1, 2).contiguous()
grouped_xyz = grouping_operation(xyz_trans, idx) # (B, 3, npoint, nsample)
grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1)
if features is not None:
grouped_features = grouping_operation(features, idx)
if self.use_xyz:
new_features = torch.cat(
[grouped_xyz, grouped_features], dim=1
) # (B, C + 3, npoint, nsample)
else:
new_features = grouped_features
else:
assert (
self.use_xyz
), "Cannot have not features and not use xyz as a feature!"
new_features = grouped_xyz
return new_features
class GroupAll(nn.Module):
r"""
Groups all features
Parameters
---------
"""
def __init__(self, use_xyz=True):
# type: (GroupAll, bool) -> None
super(GroupAll, self).__init__()
self.use_xyz = use_xyz
def forward(self, xyz, new_xyz, features=None):
# type: (GroupAll, torch.Tensor, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor]
r"""
Parameters
----------
xyz : torch.Tensor
xyz coordinates of the features (B, N, 3)
new_xyz : torch.Tensor
Ignored
features : torch.Tensor
Descriptors of the features (B, C, N)
Returns
-------
new_features : torch.Tensor
(B, C + 3, 1, N) tensor
"""
grouped_xyz = xyz.transpose(1, 2).unsqueeze(2)
if features is not None:
grouped_features = features.unsqueeze(2)
if self.use_xyz:
new_features = torch.cat(
[grouped_xyz, grouped_features], dim=1
) # (B, 3 + C, 1, N)
else:
new_features = grouped_features
else:
new_features = grouped_xyz
return new_features
================================================
FILE: pointnet2_pyt/setup.py
================================================
'''
Description:
Autor: Jiachen Sun
Date: 2022-02-16 22:23:16
LastEditors: Jiachen Sun
LastEditTime: 2022-02-24 23:16:38
'''
from __future__ import division, absolute_import, with_statement, print_function
from setuptools import setup, find_packages
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
import glob
try:
import builtins
except:
import __builtin__ as builtins
builtins.__POINTNET2_SETUP__ = True
import pointnet2
# _ext_src_root = "pointnet2/_ext-src"
# _ext_sources = glob.glob("{}/src/*.cpp".format(_ext_src_root)) + glob.glob(
# "{}/src/*.cu".format(_ext_src_root)
# )
# _ext_headers = glob.glob("{}/include/*".format(_ext_src_root))
requirements = ["etw_pytorch_utils==1.1.1", "h5py", "enum34", "future"]
setup(
name="pointnet2",
version=pointnet2.__version__,
author="Erik Wijmans",
packages=find_packages(),
install_requires=requirements,
# ext_modules=[
# CUDAExtension(
# name="pointnet2._ext",
# sources=_ext_sources,
# extra_compile_args={
# "cxx": ["-O2", "-I{}".format("{}/include".format(_ext_src_root))],
# "nvcc": ["-O2", "-I{}".format("{}/include".format(_ext_src_root))],
# },
# )
# ],
# cmdclass={"build_ext": BuildExtension},
)
================================================
FILE: pointnet2_pyt/tests/conftest.py
================================================
from __future__ import (
division,
absolute_import,
with_statement,
print_function,
unicode_literals,
)
import pytest
import torch
import numpy as np
pytest_plugins = ["helpers_namespace"]
def _test_loop(model, model_fn, inputs, labels):
optimizer = torch.optim.Adam(model.parameters(), lr=1e-5)
prev_loss = 1e10
for _ in range(5):
optimizer.zero_grad()
_, loss, _ = model_fn(model, (inputs, labels))
loss.backward()
optimizer.step()
assert loss.item() < prev_loss + 1.0, "Loss spiked upwards"
prev_loss = loss.item()
@pytest.helpers.register
def cls_test_xyz(model, model_fn):
B, N = 4, 2048
inputs = torch.randn(B, N, 6).cuda()
labels = torch.from_numpy(np.random.randint(0, 3, size=B)).cuda()
model.cuda()
_test_loop(model, model_fn, inputs, labels)
@pytest.helpers.register
def cls_test_no_xyz(model, model_fn):
B, N = 4, 2048
inputs = torch.randn(B, N, 3).cuda()
labels = torch.from_numpy(np.random.randint(0, 3, size=B)).cuda()
model.cuda()
_test_loop(model, model_fn, inputs, labels)
@pytest.helpers.register
def semseg_test_xyz(model, model_fn):
B, N = 4, 2048
inputs = torch.randn(B, N, 6).cuda()
labels = torch.from_numpy(np.random.randint(0, 3, size=B * N)).view(B, N).cuda()
model.cuda()
_test_loop(model, model_fn, inputs, labels)
@pytest.helpers.register
def semseg_test_no_xyz(model, model_fn):
B, N = 4, 2048
inputs = torch.randn(B, N, 3).cuda()
labels = torch.from_numpy(np.random.randint(0, 3, size=B * N)).view(B, N).cuda()
model.cuda()
_test_loop(model, model_fn, inputs, labels)
================================================
FILE: pointnet2_pyt/tests/test_cls_msg.py
================================================
from __future__ import (
division,
absolute_import,
with_statement,
print_function,
unicode_literals,
)
from pointnet2.models.pointnet2_msg_cls import model_fn_decorator, Pointnet2MSG
import torch.nn as nn
import pytest
def test_xyz():
model = Pointnet2MSG(3, input_channels=3)
pytest.helpers.cls_test_xyz(model, model_fn_decorator(nn.CrossEntropyLoss()))
def test_no_xyz():
model = Pointnet2MSG(3, input_channels=0)
pytest.helpers.cls_test_no_xyz(model, model_fn_decorator(nn.CrossEntropyLoss()))
================================================
FILE: pointnet2_pyt/tests/test_cls_ssg.py
================================================
from __future__ import (
division,
absolute_import,
with_statement,
print_function,
unicode_literals,
)
from pointnet2.models.pointnet2_ssg_cls import model_fn_decorator, Pointnet2SSG
import torch.nn as nn
import pytest
def test_xyz():
model = Pointnet2SSG(3, input_channels=3)
pytest.helpers.cls_test_xyz(model, model_fn_decorator(nn.CrossEntropyLoss()))
def test_no_xyz():
model = Pointnet2SSG(3, input_channels=0)
pytest.helpers.cls_test_no_xyz(model, model_fn_decorator(nn.CrossEntropyLoss()))
================================================
FILE: pointnet2_pyt/tests/test_semseg_msg.py
================================================
from __future__ import (
division,
absolute_import,
with_statement,
print_function,
unicode_literals,
)
from pointnet2.models.pointnet2_msg_sem import model_fn_decorator, Pointnet2MSG
import torch.nn as nn
import pytest
def test_xyz():
model = Pointnet2MSG(3, input_channels=3)
pytest.helpers.semseg_test_xyz(model, model_fn_decorator(nn.CrossEntropyLoss()))
def test_no_xyz():
model = Pointnet2MSG(3, input_channels=0)
pytest.helpers.semseg_test_no_xyz(model, model_fn_decorator(nn.CrossEntropyLoss()))
================================================
FILE: pointnet2_pyt/tests/test_semseg_ssg.py
================================================
from __future__ import (
division,
absolute_import,
with_statement,
print_function,
unicode_literals,
)
from pointnet2.models.pointnet2_ssg_sem import model_fn_decorator, Pointnet2SSG
import torch.nn as nn
import pytest
def test_xyz():
model = Pointnet2SSG(3, input_channels=3)
pytest.helpers.semseg_test_xyz(model, model_fn_decorator(nn.CrossEntropyLoss()))
def test_no_xyz():
model = Pointnet2SSG(3, input_channels=0)
pytest.helpers.semseg_test_no_xyz(model, model_fn_decorator(nn.CrossEntropyLoss()))
================================================
FILE: pointnet2_pyt/tox.ini
================================================
[tox]
envlist =
py27
py35
py36
[testenv]
# install pytest in the virtualenv where commands will be executed
deps =
numpy
torch>=1.0
git+git://github.com/erikwijmans/etw_pytorch_utils.git@v1.1.1#egg=etw_pytorch_utils
pytest
pytest-helpers-namespace
commands =
pytest
================================================
FILE: pointnet2_tf/LICENSE
================================================
PointNet++: Deep Hierarchical Feature Learning on Point Sets in a Metric Space.
Copyright (c) 2017, Geometric Computation Group of Stanford University
The MIT License (MIT)
Copyright (c) 2017 Charles R. Qi
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
================================================
FILE: pointnet2_tf/README.md
================================================
### PointNet++: *Deep Hierarchical Feature Learning on Point Sets in a Metric Space*
Created by Charles R. Qi, Li (Eric) Yi, Hao Su, Leonidas J. Guibas from Stanford University.

### Citation
If you find our work useful in your research, please consider citing:
@article{qi2017pointnetplusplus,
title={PointNet++: Deep Hierarchical Feature Learning on Point Sets in a Metric Space},
author={Qi, Charles R and Yi, Li and Su, Hao and Guibas, Leonidas J},
journal={arXiv preprint arXiv:1706.02413},
year={2017}
}
### Introduction
This work is based on our NIPS'17 paper. You can find arXiv version of the paper here or check project webpage for a quick overview. PointNet++ is a follow-up project that builds on and extends PointNet. It is version 2.0 of the PointNet architecture.
PointNet (the v1 model) either transforms features of *individual points* independently or process global features of the *entire point set*. However, in many cases there are well defined distance metrics such as Euclidean distance for 3D point clouds collected by 3D sensors or geodesic distance for manifolds like isometric shape surfaces. In PointNet++ we want to respect *spatial localities* of those point sets. PointNet++ learns hierarchical features with increasing scales of contexts, just like that in convolutional neural networks. Besides, we also observe one challenge that is not present in convnets (with images) -- non-uniform densities in natural point clouds. To deal with those non-uniform densities, we further propose special layers that are able to intelligently aggregate information from different scales.
In this repository we release code and data for our PointNet++ classification and segmentation networks as well as a few utility scripts for training, testing and data processing and visualization.
### Installation
Install TensorFlow. The code is tested under TF1.2 GPU version and Python 2.7 (version 3 should also work) on Ubuntu 14.04. There are also some dependencies for a few Python libraries for data processing and visualizations like `cv2`, `h5py` etc. It's highly recommended that you have access to GPUs.
#### Compile Customized TF Operators
The TF operators are included under `tf_ops`, you need to compile them (check `tf_xxx_compile.sh` under each ops subfolder) first. Update `nvcc` and `python` path if necessary. The code is tested under TF1.2.0. If you are using earlier version it's possible that you need to remove the `-D_GLIBCXX_USE_CXX11_ABI=0` flag in g++ command in order to compile correctly.
To compile the operators in TF version >=1.4, you need to modify the compile scripts slightly.
First, find Tensorflow include and library paths.
TF_INC=$(python -c 'import tensorflow as tf; print(tf.sysconfig.get_include())')
TF_LIB=$(python -c 'import tensorflow as tf; print(tf.sysconfig.get_lib())')
Then, add flags of `-I$TF_INC/external/nsync/public -L$TF_LIB -ltensorflow_framework` to the `g++` commands.
### Usage
#### Shape Classification
To train a PointNet++ model to classify ModelNet40 shapes (using point clouds with XYZ coordinates):
python train.py
To see all optional arguments for training:
python train.py -h
If you have multiple GPUs on your machine, you can also run the multi-GPU version training (our implementation is similar to the tensorflow cifar10 tutorial):
CUDA_VISIBLE_DEVICES=0,1 python train_multi_gpu.py --num_gpus 2
After training, to evaluate the classification accuracies (with optional multi-angle voting):
python evaluate.py --num_votes 12
Side Note: For the XYZ+normal experiment reported in our paper: (1) 5000 points are used and (2) a further random data dropout augmentation is used during training (see commented line after `augment_batch_data` in `train.py` and (3) the model architecture is updated such that the `nsample=128` in the first two set abstraction levels, which is suited for the larger point density in 5000-point samplings.
To use normal features for classification: You can get our sampled point clouds of ModelNet40 (XYZ and normal from mesh, 10k points per shape) here (1.6GB). Move the uncompressed data folder to `data/modelnet40_normal_resampled`
#### Object Part Segmentation
To train a model to segment object parts for ShapeNet models:
cd part_seg
python train.py
Preprocessed ShapeNetPart dataset (XYZ, normal and part labels) can be found here (674MB). Move the uncompressed data folder to `data/shapenetcore_partanno_segmentation_benchmark_v0_normal`
#### Semantic Scene Parsing
See `scannet/README` and `scannet/train.py` for details.
#### Visualization Tools
We have provided a handy point cloud visualization tool under `utils`. Run `sh compile_render_balls_so.sh` to compile it and then you can try the demo with `python show3d_balls.py` The original code is from here.
#### Prepare Your Own Data
You can refer to here on how to prepare your own HDF5 files for either classification or segmentation. Or you can refer to `modelnet_dataset.py` on how to read raw data files and prepare mini-batches from them. A more advanced way is to use TensorFlow's dataset APIs, for which you can find more documentations here.
### License
Our code is released under MIT License (see LICENSE file for details).
### Updates
* 02/23/2018: Added support for multi-gpu training for the classification task.
* 02/23/2018: Adopted a new way for data loading. No longer require manual data downloading to train a classification network.
* 02/06/2018: Added sample training code for ScanNet semantic segmentation.
### Related Projects
* PointNet: Deep Learning on Point Sets for 3D Classification and Segmentation by Qi et al. (CVPR 2017 Oral Presentation). Code and data released in GitHub.
* Frustum PointNets for 3D Object Detection from RGB-D Data by Qi et al. (CVPR 2018) A novel framework for 3D object detection with RGB-D data. Based on 2D boxes from a 2D object detector on RGB images, we extrude the depth maps in 2D boxes to point clouds in 3D space and then realize instance segmentation and 3D bounding box estimation using PointNet/PointNet++. The method proposed has achieved first place on KITTI 3D object detection benchmark on all categories (last checked on 11/30/2017). Code and data release TBD.
================================================
FILE: pointnet2_tf/data/README.md
================================================
#### Point Cloud Data
You can get our sampled point clouds of ModelNet40 (XYZ and normal from mesh, 10k points per shape) at this link. The ShapeNetPart dataset (XYZ, normal and part labels) can be found here.
Uncompress the downloaded data in this directory.
================================================
FILE: pointnet2_tf/evaluate.py
================================================
'''
Evaluate classification performance with optional voting.
Will use H5 dataset in default. If using normal, will shift to the normal dataset.
'''
import tensorflow as tf
import numpy as np
import argparse
import socket
import importlib
import time
import os
import scipy.misc
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = BASE_DIR
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'models'))
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
import provider
import modelnet_dataset
import modelnet_h5_dataset
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')
parser.add_argument('--model', default='pointnet2_cls_ssg', help='Model name. [default: pointnet2_cls_ssg]')
parser.add_argument('--batch_size', type=int, default=16, help='Batch Size during training [default: 16]')
parser.add_argument('--num_point', type=int, default=1024, help='Point Number [256/512/1024/2048] [default: 1024]')
parser.add_argument('--model_path', default='log/model.ckpt', help='model checkpoint file path [default: log/model.ckpt]')
parser.add_argument('--dump_dir', default='dump', help='dump folder path [dump]')
parser.add_argument('--normal', action='store_true', help='Whether to use normal information')
parser.add_argument('--num_votes', type=int, default=1, help='Aggregate classification scores from multiple rotations [default: 1]')
FLAGS = parser.parse_args()
BATCH_SIZE = FLAGS.batch_size
NUM_POINT = FLAGS.num_point
MODEL_PATH = FLAGS.model_path
GPU_INDEX = FLAGS.gpu
MODEL = importlib.import_module(FLAGS.model) # import network module
DUMP_DIR = FLAGS.dump_dir
if not os.path.exists(DUMP_DIR): os.mkdir(DUMP_DIR)
LOG_FOUT = open(os.path.join(DUMP_DIR, 'log_evaluate.txt'), 'w')
LOG_FOUT.write(str(FLAGS)+'\n')
NUM_CLASSES = 40
SHAPE_NAMES = [line.rstrip() for line in \
open(os.path.join(ROOT_DIR, 'data/modelnet40_ply_hdf5_2048/shape_names.txt'))]
HOSTNAME = socket.gethostname()
# Shapenet official train/test split
if FLAGS.normal:
assert(NUM_POINT<=10000)
DATA_PATH = os.path.join(ROOT_DIR, 'data/modelnet40_normal_resampled')
TRAIN_DATASET = modelnet_dataset.ModelNetDataset(root=DATA_PATH, npoints=NUM_POINT, split='train', normal_channel=FLAGS.normal, batch_size=BATCH_SIZE)
TEST_DATASET = modelnet_dataset.ModelNetDataset(root=DATA_PATH, npoints=NUM_POINT, split='test', normal_channel=FLAGS.normal, batch_size=BATCH_SIZE)
else:
assert(NUM_POINT<=2048)
TRAIN_DATASET = modelnet_h5_dataset.ModelNetH5Dataset(os.path.join(BASE_DIR, 'data/modelnet40_ply_hdf5_2048/train_files.txt'), batch_size=BATCH_SIZE, npoints=NUM_POINT, shuffle=True)
TEST_DATASET = modelnet_h5_dataset.ModelNetH5Dataset(os.path.join(BASE_DIR, 'data/modelnet40_ply_hdf5_2048/test_files.txt'), batch_size=BATCH_SIZE, npoints=NUM_POINT, shuffle=False)
def log_string(out_str):
LOG_FOUT.write(out_str+'\n')
LOG_FOUT.flush()
print(out_str)
def evaluate(num_votes):
is_training = False
with tf.device('/gpu:'+str(GPU_INDEX)):
pointclouds_pl, labels_pl = MODEL.placeholder_inputs(BATCH_SIZE, NUM_POINT)
is_training_pl = tf.placeholder(tf.bool, shape=())
# simple model
pred, end_points = MODEL.get_model(pointclouds_pl, is_training_pl)
MODEL.get_loss(pred, labels_pl, end_points)
losses = tf.get_collection('losses')
total_loss = tf.add_n(losses, name='total_loss')
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
# Create a session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
config.log_device_placement = False
sess = tf.Session(config=config)
# Restore variables from disk.
saver.restore(sess, MODEL_PATH)
log_string("Model restored.")
ops = {'pointclouds_pl': pointclouds_pl,
'labels_pl': labels_pl,
'is_training_pl': is_training_pl,
'pred': pred,
'loss': total_loss}
eval_one_epoch(sess, ops, num_votes)
def eval_one_epoch(sess, ops, num_votes=1, topk=1):
is_training = False
# Make sure batch data is of same size
cur_batch_data = np.zeros((BATCH_SIZE,NUM_POINT,TEST_DATASET.num_channel()))
cur_batch_label = np.zeros((BATCH_SIZE), dtype=np.int32)
total_correct = 0
total_seen = 0
loss_sum = 0
batch_idx = 0
shape_ious = []
total_seen_class = [0 for _ in range(NUM_CLASSES)]
total_correct_class = [0 for _ in range(NUM_CLASSES)]
while TEST_DATASET.has_next_batch():
batch_data, batch_label = TEST_DATASET.next_batch(augment=False)
bsize = batch_data.shape[0]
print('Batch: %03d, batch size: %d'%(batch_idx, bsize))
# for the last batch in the epoch, the bsize:end are from last batch
cur_batch_data[0:bsize,...] = batch_data
cur_batch_label[0:bsize] = batch_label
batch_pred_sum = np.zeros((BATCH_SIZE, NUM_CLASSES)) # score for classes
for vote_idx in range(num_votes):
# Shuffle point order to achieve different farthest samplings
shuffled_indices = np.arange(NUM_POINT)
np.random.shuffle(shuffled_indices)
if FLAGS.normal:
rotated_data = provider.rotate_point_cloud_by_angle_with_normal(cur_batch_data[:, shuffled_indices, :],
vote_idx/float(num_votes) * np.pi * 2)
else:
rotated_data = provider.rotate_point_cloud_by_angle(cur_batch_data[:, shuffled_indices, :],
vote_idx/float(num_votes) * np.pi * 2)
feed_dict = {ops['pointclouds_pl']: rotated_data,
ops['labels_pl']: cur_batch_label,
ops['is_training_pl']: is_training}
loss_val, pred_val = sess.run([ops['loss'], ops['pred']], feed_dict=feed_dict)
batch_pred_sum += pred_val
pred_val = np.argmax(batch_pred_sum, 1)
correct = np.sum(pred_val[0:bsize] == batch_label[0:bsize])
total_correct += correct
total_seen += bsize
loss_sum += loss_val
batch_idx += 1
for i in range(bsize):
l = batch_label[i]
total_seen_class[l] += 1
total_correct_class[l] += (pred_val[i] == l)
log_string('eval mean loss: %f' % (loss_sum / float(batch_idx)))
log_string('eval accuracy: %f'% (total_correct / float(total_seen)))
log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float))))
class_accuracies = np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float)
for i, name in enumerate(SHAPE_NAMES):
log_string('%10s:\t%0.3f' % (name, class_accuracies[i]))
if __name__=='__main__':
with tf.Graph().as_default():
evaluate(num_votes=FLAGS.num_votes)
LOG_FOUT.close()
================================================
FILE: pointnet2_tf/modelnet_dataset.py
================================================
'''
ModelNet dataset. Support ModelNet40, ModelNet10, XYZ and normal channels. Up to 10000 points.
'''
import os
import os.path
import json
import numpy as np
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = BASE_DIR
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
import provider
def pc_normalize(pc):
l = pc.shape[0]
centroid = np.mean(pc, axis=0)
pc = pc - centroid
m = np.max(np.sqrt(np.sum(pc**2, axis=1)))
pc = pc / m
return pc
class ModelNetDataset():
def __init__(self, root, batch_size = 32, npoints = 1024, split='train', normalize=True, normal_channel=False, modelnet10=False, cache_size=15000, shuffle=None):
self.root = root
self.batch_size = batch_size
self.npoints = npoints
self.normalize = normalize
if modelnet10:
self.catfile = os.path.join(self.root, 'modelnet10_shape_names.txt')
else:
self.catfile = os.path.join(self.root, 'shape_names.txt')
self.cat = [line.rstrip() for line in open(self.catfile)]
self.classes = dict(zip(self.cat, range(len(self.cat))))
self.normal_channel = normal_channel
shape_ids = {}
if modelnet10:
shape_ids['train'] = [line.rstrip() for line in open(os.path.join(self.root, 'modelnet10_train.txt'))]
shape_ids['test']= [line.rstrip() for line in open(os.path.join(self.root, 'modelnet10_test.txt'))]
else:
shape_ids['train'] = [line.rstrip() for line in open(os.path.join(self.root, 'modelnet40_train.txt'))]
shape_ids['test']= [line.rstrip() for line in open(os.path.join(self.root, 'modelnet40_test.txt'))]
assert(split=='train' or split=='test')
shape_names = ['_'.join(x.split('_')[0:-1]) for x in shape_ids[split]]
# list of (shape_name, shape_txt_file_path) tuple
self.datapath = [(shape_names[i], os.path.join(self.root, shape_names[i], shape_ids[split][i])+'.txt') for i in range(len(shape_ids[split]))]
self.cache_size = cache_size # how many data points to cache in memory
self.cache = {} # from index to (point_set, cls) tuple
if shuffle is None:
if split == 'train': self.shuffle = True
else: self.shuffle = False
else:
self.shuffle = shuffle
self.reset()
def _augment_batch_data(self, batch_data):
if self.normal_channel:
rotated_data = provider.rotate_point_cloud_with_normal(batch_data)
rotated_data = provider.rotate_perturbation_point_cloud_with_normal(rotated_data)
else:
rotated_data = provider.rotate_point_cloud(batch_data)
rotated_data = provider.rotate_perturbation_point_cloud(rotated_data)
jittered_data = provider.random_scale_point_cloud(rotated_data[:,:,0:3])
jittered_data = provider.shift_point_cloud(jittered_data)
jittered_data = provider.jitter_point_cloud(jittered_data)
rotated_data[:,:,0:3] = jittered_data
return provider.shuffle_points(rotated_data)
def _get_item(self, index):
if index in self.cache:
point_set, cls = self.cache[index]
else:
fn = self.datapath[index]
cls = self.classes[self.datapath[index][0]]
cls = np.array([cls]).astype(np.int32)
point_set = np.loadtxt(fn[1],delimiter=',').astype(np.float32)
# Take the first npoints
point_set = point_set[0:self.npoints,:]
if self.normalize:
point_set[:,0:3] = pc_normalize(point_set[:,0:3])
if not self.normal_channel:
point_set = point_set[:,0:3]
if len(self.cache) < self.cache_size:
self.cache[index] = (point_set, cls)
return point_set, cls
def __getitem__(self, index):
return self._get_item(index)
def __len__(self):
return len(self.datapath)
def num_channel(self):
if self.normal_channel:
return 6
else:
return 3
def reset(self):
self.idxs = np.arange(0, len(self.datapath))
if self.shuffle:
np.random.shuffle(self.idxs)
self.num_batches = (len(self.datapath)+self.batch_size-1) // self.batch_size
self.batch_idx = 0
def has_next_batch(self):
return self.batch_idx < self.num_batches
def next_batch(self, augment=False):
''' returned dimension may be smaller than self.batch_size '''
start_idx = self.batch_idx * self.batch_size
end_idx = min((self.batch_idx+1) * self.batch_size, len(self.datapath))
bsize = end_idx - start_idx
batch_data = np.zeros((bsize, self.npoints, self.num_channel()))
batch_label = np.zeros((bsize), dtype=np.int32)
for i in range(bsize):
ps,cls = self._get_item(self.idxs[i+start_idx])
batch_data[i] = ps
batch_label[i] = cls
self.batch_idx += 1
if augment: batch_data = self._augment_batch_data(batch_data)
return batch_data, batch_label
if __name__ == '__main__':
d = ModelNetDataset(root = '../data/modelnet40_normal_resampled', split='test')
print(d.shuffle)
print(len(d))
import time
tic = time.time()
for i in range(10):
ps, cls = d[i]
print(time.time() - tic)
print(ps.shape, type(ps), cls)
print(d.has_next_batch())
ps_batch, cls_batch = d.next_batch(True)
print(ps_batch.shape)
print(cls_batch.shape)
================================================
FILE: pointnet2_tf/modelnet_h5_dataset.py
================================================
'''
ModelNet dataset. Support ModelNet40, XYZ channels. Up to 2048 points.
Faster IO than ModelNetDataset in the first epoch.
'''
import os
import sys
import numpy as np
import h5py
from .utils import provider
# updated datapath
DATA_DIR = 'data'
if not os.path.exists(DATA_DIR):
os.mkdir(DATA_DIR)
if not os.path.exists(os.path.join(DATA_DIR, 'modelnet40_ply_hdf5_2048')):
www = 'https://shapenet.cs.stanford.edu/media/modelnet40_ply_hdf5_2048.zip'
zipfile = os.path.basename(www)
os.system('wget %s; unzip %s' % (www, zipfile))
os.system('mv %s %s' % (zipfile[:-4], DATA_DIR))
os.system('rm %s' % (zipfile))
def shuffle_data(data, labels):
""" Shuffle data and labels.
Input:
data: B,N,... numpy array
label: B,... numpy array
Return:
shuffled data, label and shuffle indices
"""
idx = np.arange(len(labels))
np.random.shuffle(idx)
return data[idx, ...], labels[idx], idx
def getDataFiles(list_filename):
return [line.rstrip() for line in open(list_filename)]
def load_h5(h5_filename):
f = h5py.File(h5_filename, 'r')
data = f['data'][:]
label = f['label'][:]
return (data, label)
def loadDataFile(filename):
return load_h5(filename)
class ModelNetH5Dataset(object):
def __init__(self, list_filename, batch_size = 32, npoints = 1024, shuffle=True):
self.list_filename = list_filename
self.batch_size = batch_size
self.npoints = npoints
self.shuffle = shuffle
self.h5_files = getDataFiles(self.list_filename)
self.reset()
def reset(self):
''' reset order of h5 files '''
self.file_idxs = np.arange(0, len(self.h5_files))
if self.shuffle: np.random.shuffle(self.file_idxs)
self.current_data = None
self.current_label = None
self.current_file_idx = 0
self.batch_idx = 0
def _augment_batch_data(self, batch_data):
rotated_data = provider.rotate_point_cloud(batch_data)
rotated_data = provider.rotate_perturbation_point_cloud(rotated_data)
jittered_data = provider.random_scale_point_cloud(rotated_data[:,:,0:3])
jittered_data = provider.shift_point_cloud(jittered_data)
jittered_data = provider.jitter_point_cloud(jittered_data)
rotated_data[:,:,0:3] = jittered_data
return provider.shuffle_points(rotated_data)
def _get_data_filename(self):
return self.h5_files[self.file_idxs[self.current_file_idx]]
def _load_data_file(self, filename):
self.current_data,self.current_label = load_h5(filename)
self.current_label = np.squeeze(self.current_label)
self.batch_idx = 0
if self.shuffle:
self.current_data, self.current_label, _ = shuffle_data(self.current_data,self.current_label)
def _has_next_batch_in_file(self):
return self.batch_idx*self.batch_size < self.current_data.shape[0]
def num_channel(self):
return 3
def has_next_batch(self):
# TODO: add backend thread to load data
if (self.current_data is None) or (not self._has_next_batch_in_file()):
if self.current_file_idx >= len(self.h5_files):
return False
self._load_data_file(self._get_data_filename())
self.batch_idx = 0
self.current_file_idx += 1
return self._has_next_batch_in_file()
def next_batch(self, augment=False):
''' returned dimension may be smaller than self.batch_size '''
start_idx = self.batch_idx * self.batch_size
end_idx = min((self.batch_idx+1) * self.batch_size, self.current_data.shape[0])
bsize = end_idx - start_idx
batch_label = np.zeros((bsize), dtype=np.int32)
data_batch = self.current_data[start_idx:end_idx, 0:self.npoints, :].copy()
label_batch = self.current_label[start_idx:end_idx].copy()
self.batch_idx += 1
if augment: data_batch = self._augment_batch_data(data_batch)
return data_batch, label_batch
if __name__=='__main__':
d = ModelNetH5Dataset('data/modelnet40_ply_hdf5_2048/train_files.txt')
print(d.shuffle)
print(d.has_next_batch())
ps_batch, cls_batch = d.next_batch(True)
print(ps_batch.shape)
print(cls_batch.shape)
================================================
FILE: pointnet2_tf/models/pointnet2_cls_msg.py
================================================
import os
import sys
BASE_DIR = os.path.dirname(__file__)
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, '../utils'))
import tensorflow as tf
import numpy as np
import tf_util
from pointnet_util import pointnet_sa_module, pointnet_sa_module_msg
def placeholder_inputs(batch_size, num_point):
pointclouds_pl = tf.placeholder(tf.float32, shape=(batch_size, num_point, 3))
labels_pl = tf.placeholder(tf.int32, shape=(batch_size))
return pointclouds_pl, labels_pl
def get_model(point_cloud, is_training, bn_decay=None):
""" Classification PointNet, input is BxNx3, output Bx40 """
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
end_points = {}
l0_xyz = point_cloud
l0_points = None
# Set abstraction layers
l1_xyz, l1_points = pointnet_sa_module_msg(l0_xyz, l0_points, 512, [0.1,0.2,0.4], [16,32,128], [[32,32,64], [64,64,128], [64,96,128]], is_training, bn_decay, scope='layer1', use_nchw=True)
l2_xyz, l2_points = pointnet_sa_module_msg(l1_xyz, l1_points, 128, [0.2,0.4,0.8], [32,64,128], [[64,64,128], [128,128,256], [128,128,256]], is_training, bn_decay, scope='layer2')
l3_xyz, l3_points, _ = pointnet_sa_module(l2_xyz, l2_points, npoint=None, radius=None, nsample=None, mlp=[256,512,1024], mlp2=None, group_all=True, is_training=is_training, bn_decay=bn_decay, scope='layer3')
# Fully connected layers
net = tf.reshape(l3_points, [batch_size, -1])
net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
net = tf_util.dropout(net, keep_prob=0.4, is_training=is_training, scope='dp1')
net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training, scope='fc2', bn_decay=bn_decay)
net = tf_util.dropout(net, keep_prob=0.4, is_training=is_training, scope='dp2')
net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')
return net, end_points
def get_loss(pred, label, end_points):
""" pred: B*NUM_CLASSES,
label: B, """
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=label)
classify_loss = tf.reduce_mean(loss)
tf.summary.scalar('classify loss', classify_loss)
tf.add_to_collection('losses', classify_loss)
return classify_loss
if __name__=='__main__':
with tf.Graph().as_default():
inputs = tf.zeros((32,1024,3))
net, _ = get_model(inputs, tf.constant(True))
print(net)
================================================
FILE: pointnet2_tf/models/pointnet2_cls_ssg.py
================================================
"""
PointNet++ Model for point clouds classification
"""
import os
import sys
BASE_DIR = os.path.dirname(__file__)
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, '../utils'))
import tensorflow as tf
import numpy as np
import tf_util
from pointnet_util import pointnet_sa_module
def placeholder_inputs(batch_size, num_point):
pointclouds_pl = tf.placeholder(tf.float32, shape=(batch_size, num_point, 3))
labels_pl = tf.placeholder(tf.int32, shape=(batch_size))
return pointclouds_pl, labels_pl
def get_model(point_cloud, is_training, bn_decay=None):
""" Classification PointNet, input is BxNx3, output Bx40 """
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
end_points = {}
l0_xyz = point_cloud
l0_points = None
end_points['l0_xyz'] = l0_xyz
# Set abstraction layers
# Note: When using NCHW for layer 2, we see increased GPU memory usage (in TF1.4).
# So we only use NCHW for layer 1 until this issue can be resolved.
l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz, l0_points, npoint=512, radius=0.2, nsample=32, mlp=[64,64,128], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer1', use_nchw=True)
l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz, l1_points, npoint=128, radius=0.4, nsample=64, mlp=[128,128,256], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer2')
l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, npoint=None, radius=None, nsample=None, mlp=[256,512,1024], mlp2=None, group_all=True, is_training=is_training, bn_decay=bn_decay, scope='layer3')
# Fully connected layers
net = tf.reshape(l3_points, [batch_size, -1])
net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1')
net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training, scope='fc2', bn_decay=bn_decay)
net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp2')
net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')
return net, end_points
def get_loss(pred, label, end_points):
""" pred: B*NUM_CLASSES,
label: B, """
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=label)
classify_loss = tf.reduce_mean(loss)
tf.summary.scalar('classify loss', classify_loss)
tf.add_to_collection('losses', classify_loss)
return classify_loss
if __name__=='__main__':
with tf.Graph().as_default():
inputs = tf.zeros((32,1024,3))
output, _ = get_model(inputs, tf.constant(True))
print(output)
================================================
FILE: pointnet2_tf/models/pointnet2_part_seg.py
================================================
import os
import sys
BASE_DIR = os.path.dirname(__file__)
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, '../utils'))
import tensorflow as tf
import numpy as np
import tf_util
from pointnet_util import pointnet_sa_module, pointnet_fp_module
def placeholder_inputs(batch_size, num_point):
pointclouds_pl = tf.placeholder(tf.float32, shape=(batch_size, num_point, 6))
labels_pl = tf.placeholder(tf.int32, shape=(batch_size, num_point))
return pointclouds_pl, labels_pl
def get_model(point_cloud, is_training, bn_decay=None):
""" Part segmentation PointNet, input is BxNx6 (XYZ NormalX NormalY NormalZ), output Bx50 """
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
end_points = {}
l0_xyz = tf.slice(point_cloud, [0,0,0], [-1,-1,3])
l0_points = tf.slice(point_cloud, [0,0,3], [-1,-1,3])
# Set Abstraction layers
l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz, l0_points, npoint=512, radius=0.2, nsample=64, mlp=[64,64,128], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer1')
l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz, l1_points, npoint=128, radius=0.4, nsample=64, mlp=[128,128,256], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer2')
l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, npoint=None, radius=None, nsample=None, mlp=[256,512,1024], mlp2=None, group_all=True, is_training=is_training, bn_decay=bn_decay, scope='layer3')
# Feature Propagation layers
l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points, [256,256], is_training, bn_decay, scope='fa_layer1')
l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points, [256,128], is_training, bn_decay, scope='fa_layer2')
l0_points = pointnet_fp_module(l0_xyz, l1_xyz, tf.concat([l0_xyz,l0_points],axis=-1), l1_points, [128,128,128], is_training, bn_decay, scope='fa_layer3')
# FC layers
net = tf_util.conv1d(l0_points, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
end_points['feats'] = net
net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1')
net = tf_util.conv1d(net, 50, 1, padding='VALID', activation_fn=None, scope='fc2')
return net, end_points
def get_loss(pred, label):
""" pred: BxNxC,
label: BxN, """
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=label)
classify_loss = tf.reduce_mean(loss)
tf.summary.scalar('classify loss', classify_loss)
tf.add_to_collection('losses', classify_loss)
return classify_loss
if __name__=='__main__':
with tf.Graph().as_default():
inputs = tf.zeros((32,2048,6))
net, _ = get_model(inputs, tf.constant(True))
print(net)
================================================
FILE: pointnet2_tf/models/pointnet2_part_seg_msg_one_hot.py
================================================
import os
import sys
BASE_DIR = os.path.dirname(__file__)
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, '../utils'))
import tensorflow as tf
import numpy as np
import tf_util
from pointnet_util import pointnet_sa_module, pointnet_sa_module_msg, pointnet_fp_module
def placeholder_inputs(batch_size, num_point):
pointclouds_pl = tf.placeholder(tf.float32, shape=(batch_size, num_point, 6))
labels_pl = tf.placeholder(tf.int32, shape=(batch_size, num_point))
cls_labels_pl = tf.placeholder(tf.int32, shape=(batch_size))
return pointclouds_pl, labels_pl, cls_labels_pl
NUM_CATEGORIES = 16
def get_model(point_cloud, cls_label, is_training, bn_decay=None):
""" Classification PointNet, input is BxNx3, output Bx40 """
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
end_points = {}
l0_xyz = tf.slice(point_cloud, [0,0,0], [-1,-1,3])
l0_points = tf.slice(point_cloud, [0,0,3], [-1,-1,3])
# Set abstraction layers
l1_xyz, l1_points = pointnet_sa_module_msg(l0_xyz, l0_points, 512, [0.1,0.2,0.4], [32,64,128], [[32,32,64], [64,64,128], [64,96,128]], is_training, bn_decay, scope='layer1')
l2_xyz, l2_points = pointnet_sa_module_msg(l1_xyz, l1_points, 128, [0.4,0.8], [64,128], [[128,128,256],[128,196,256]], is_training, bn_decay, scope='layer2')
l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, npoint=None, radius=None, nsample=None, mlp=[256,512,1024], mlp2=None, group_all=True, is_training=is_training, bn_decay=bn_decay, scope='layer3')
# Feature propagation layers
l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points, [256,256], is_training, bn_decay, scope='fa_layer1')
l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points, [256,128], is_training, bn_decay, scope='fa_layer2')
cls_label_one_hot = tf.one_hot(cls_label, depth=NUM_CATEGORIES, on_value=1.0, off_value=0.0)
cls_label_one_hot = tf.reshape(cls_label_one_hot, [batch_size, 1, NUM_CATEGORIES])
cls_label_one_hot = tf.tile(cls_label_one_hot, [1,num_point,1])
l0_points = pointnet_fp_module(l0_xyz, l1_xyz, tf.concat([cls_label_one_hot, l0_xyz, l0_points],axis=-1), l1_points, [128,128], is_training, bn_decay, scope='fp_layer3')
# FC layers
net = tf_util.conv1d(l0_points, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
end_points['feats'] = net
net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1')
net = tf_util.conv1d(net, 50, 1, padding='VALID', activation_fn=None, scope='fc2')
return net, end_points
def get_loss(pred, label):
""" pred: BxNxC,
label: BxN, """
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=label)
classify_loss = tf.reduce_mean(loss)
tf.summary.scalar('classify loss', classify_loss)
tf.add_to_collection('losses', classify_loss)
return classify_loss
if __name__=='__main__':
with tf.Graph().as_default():
inputs = tf.zeros((32,2048,6))
cls_labels = tf.zeros((32),dtype=tf.int32)
output, ep = get_model(inputs, cls_labels, tf.constant(True))
print(output)
================================================
FILE: pointnet2_tf/models/pointnet2_sem_seg.py
================================================
import os
import sys
BASE_DIR = os.path.dirname(__file__)
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, '../utils'))
import tensorflow as tf
import numpy as np
import tf_util
from pointnet_util import pointnet_sa_module, pointnet_fp_module
def placeholder_inputs(batch_size, num_point):
pointclouds_pl = tf.placeholder(tf.float32, shape=(batch_size, num_point, 3))
labels_pl = tf.placeholder(tf.int32, shape=(batch_size, num_point))
smpws_pl = tf.placeholder(tf.float32, shape=(batch_size, num_point))
return pointclouds_pl, labels_pl, smpws_pl
def get_model(point_cloud, is_training, num_class, bn_decay=None):
""" Semantic segmentation PointNet, input is BxNx3, output Bxnum_class """
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
end_points = {}
l0_xyz = point_cloud
l0_points = None
end_points['l0_xyz'] = l0_xyz
# Layer 1
l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz, l0_points, npoint=1024, radius=0.1, nsample=32, mlp=[32,32,64], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer1')
l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz, l1_points, npoint=256, radius=0.2, nsample=32, mlp=[64,64,128], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer2')
l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, npoint=64, radius=0.4, nsample=32, mlp=[128,128,256], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer3')
l4_xyz, l4_points, l4_indices = pointnet_sa_module(l3_xyz, l3_points, npoint=16, radius=0.8, nsample=32, mlp=[256,256,512], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer4')
# Feature Propagation layers
l3_points = pointnet_fp_module(l3_xyz, l4_xyz, l3_points, l4_points, [256,256], is_training, bn_decay, scope='fa_layer1')
l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points, [256,256], is_training, bn_decay, scope='fa_layer2')
l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points, [256,128], is_training, bn_decay, scope='fa_layer3')
l0_points = pointnet_fp_module(l0_xyz, l1_xyz, l0_points, l1_points, [128,128,128], is_training, bn_decay, scope='fa_layer4')
# FC layers
net = tf_util.conv1d(l0_points, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
end_points['feats'] = net
net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1')
net = tf_util.conv1d(net, num_class, 1, padding='VALID', activation_fn=None, scope='fc2')
return net, end_points
def get_loss(pred, label, smpw):
""" pred: BxNxC,
label: BxN,
smpw: BxN """
classify_loss = tf.losses.sparse_softmax_cross_entropy(labels=label, logits=pred, weights=smpw)
tf.summary.scalar('classify loss', classify_loss)
tf.add_to_collection('losses', classify_loss)
return classify_loss
if __name__=='__main__':
with tf.Graph().as_default():
inputs = tf.zeros((32,2048,3))
net, _ = get_model(inputs, tf.constant(True), 10)
print(net)
================================================
FILE: pointnet2_tf/models/pointnet_cls_basic.py
================================================
'''
PointNet version 1 Model
Reference: https://github.com/charlesq34/pointnet
'''
import tensorflow as tf
import numpy as np
import math
import sys
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, '../utils'))
import tf_util
def placeholder_inputs(batch_size, num_point):
pointclouds_pl = tf.placeholder(tf.float32, shape=(batch_size, num_point, 3))
labels_pl = tf.placeholder(tf.int32, shape=(batch_size))
return pointclouds_pl, labels_pl
def get_model(point_cloud, is_training, bn_decay=None):
""" Classification PointNet, input is BxNx3, output Bx40 """
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
end_points = {}
input_image = tf.expand_dims(point_cloud, -1)
# Point functions (MLP implemented as conv2d)
net = tf_util.conv2d(input_image, 64, [1,3],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv1', bn_decay=bn_decay)
net = tf_util.conv2d(net, 64, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv2', bn_decay=bn_decay)
net = tf_util.conv2d(net, 64, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv3', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv4', bn_decay=bn_decay)
net = tf_util.conv2d(net, 1024, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv5', bn_decay=bn_decay)
# Symmetric function: max pooling
net = tf_util.max_pool2d(net, [num_point,1],
padding='VALID', scope='maxpool')
# MLP on global point cloud vector
net = tf.reshape(net, [batch_size, -1])
net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training,
scope='fc1', bn_decay=bn_decay)
net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training,
scope='fc2', bn_decay=bn_decay)
net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training,
scope='dp1')
net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')
return net, end_points
def get_loss(pred, label, end_points):
""" pred: B*NUM_CLASSES,
label: B, """
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=label)
classify_loss = tf.reduce_mean(loss)
tf.summary.scalar('classify loss', classify_loss)
tf.add_to_collection('losses', classify_loss)
return classify_loss
if __name__=='__main__':
with tf.Graph().as_default():
inputs = tf.zeros((32,1024,3))
outputs = get_model(inputs, tf.constant(True))
print(outputs)
================================================
FILE: pointnet2_tf/part_seg/command.sh
================================================
python train.py --model pointnet2_part_seg --log_dir log --gpu 1 --max_epoch 201 > log.txt 2>&1 &
================================================
FILE: pointnet2_tf/part_seg/command_one_hot.sh
================================================
python train_one_hot.py --batch_size 8 --model pointnet2_part_seg_msg_one_hot --log_dir log_msg_one_hot --gpu 0 --max_epoch 201 > log_msg_one_hot.txt 2>&1 &
================================================
FILE: pointnet2_tf/part_seg/evaluate.py
================================================
import argparse
import math
from datetime import datetime
import h5py
import numpy as np
import tensorflow as tf
import socket
import importlib
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'models'))
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
import tf_util
import part_dataset_all_normal
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')
parser.add_argument('--model', default='pointnet2_part_seg', help='Model name [default: pointnet2_part_seg]')
parser.add_argument('--model_path', default='log/model.ckpt', help='model checkpoint file path [default: log/model.ckpt]')
parser.add_argument('--log_dir', default='log_eval', help='Log dir [default: log_eval]')
parser.add_argument('--num_point', type=int, default=2048, help='Point Number [default: 2048]')
parser.add_argument('--batch_size', type=int, default=32, help='Batch Size during training [default: 32]')
FLAGS = parser.parse_args()
VOTE_NUM = 12
EPOCH_CNT = 0
BATCH_SIZE = FLAGS.batch_size
NUM_POINT = FLAGS.num_point
GPU_INDEX = FLAGS.gpu
MODEL_PATH = FLAGS.model_path
MODEL = importlib.import_module(FLAGS.model) # import network module
MODEL_FILE = os.path.join(ROOT_DIR, 'models', FLAGS.model+'.py')
LOG_DIR = FLAGS.log_dir
if not os.path.exists(LOG_DIR): os.mkdir(LOG_DIR)
os.system('cp %s %s' % (MODEL_FILE, LOG_DIR)) # bkp of model def
os.system('cp train.py %s' % (LOG_DIR)) # bkp of train procedure
LOG_FOUT = open(os.path.join(LOG_DIR, 'log_train.txt'), 'w')
LOG_FOUT.write(str(FLAGS)+'\n')
NUM_CLASSES = 50
# Shapenet official train/test split
DATA_PATH = os.path.join(ROOT_DIR, 'data', 'shapenetcore_partanno_segmentation_benchmark_v0_normal')
TEST_DATASET = part_dataset_all_normal.PartNormalDataset(root=DATA_PATH, npoints=NUM_POINT, classification=False, split='test')
def log_string(out_str):
LOG_FOUT.write(out_str+'\n')
LOG_FOUT.flush()
print(out_str)
def evaluate():
with tf.Graph().as_default():
with tf.device('/gpu:'+str(GPU_INDEX)):
pointclouds_pl, labels_pl = MODEL.placeholder_inputs(BATCH_SIZE, NUM_POINT)
is_training_pl = tf.placeholder(tf.bool, shape=())
print is_training_pl
print "--- Get model and loss"
pred, end_points = MODEL.get_model(pointclouds_pl, is_training_pl)
loss = MODEL.get_loss(pred, labels_pl)
saver = tf.train.Saver()
# Create a session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
sess = tf.Session(config=config)
# Restore variables from disk.
saver.restore(sess, MODEL_PATH)
ops = {'pointclouds_pl': pointclouds_pl,
'labels_pl': labels_pl,
'is_training_pl': is_training_pl,
'pred': pred,
'loss': loss}
eval_one_epoch(sess, ops)
def get_batch(dataset, idxs, start_idx, end_idx):
bsize = end_idx-start_idx
batch_data = np.zeros((bsize, NUM_POINT, 6))
batch_label = np.zeros((bsize, NUM_POINT), dtype=np.int32)
for i in range(bsize):
ps,normal,seg = dataset[idxs[i+start_idx]]
batch_data[i,:,0:3] = ps
batch_data[i,:,3:6] = normal
batch_label[i,:] = seg
return batch_data, batch_label
def eval_one_epoch(sess, ops):
""" ops: dict mapping from string to tf ops """
is_training = False
test_idxs = np.arange(0, len(TEST_DATASET))
# Test on all data: last batch might be smaller than BATCH_SIZE
num_batches = (len(TEST_DATASET)+BATCH_SIZE-1)/BATCH_SIZE
total_correct = 0
total_seen = 0
loss_sum = 0
total_seen_class = [0 for _ in range(NUM_CLASSES)]
total_correct_class = [0 for _ in range(NUM_CLASSES)]
seg_classes = TEST_DATASET.seg_classes
shape_ious = {cat:[] for cat in seg_classes.keys()}
seg_label_to_cat = {} # {0:Airplane, 1:Airplane, ...49:Table}
for cat in seg_classes.keys():
for label in seg_classes[cat]:
seg_label_to_cat[label] = cat
log_string(str(datetime.now()))
log_string('---- EPOCH %03d EVALUATION ----'%(EPOCH_CNT))
batch_data = np.zeros((BATCH_SIZE, NUM_POINT, 6))
batch_label = np.zeros((BATCH_SIZE, NUM_POINT)).astype(np.int32)
for batch_idx in range(num_batches):
if batch_idx %20==0:
log_string('%03d/%03d'%(batch_idx, num_batches))
start_idx = batch_idx * BATCH_SIZE
end_idx = min(len(TEST_DATASET), (batch_idx+1) * BATCH_SIZE)
cur_batch_size = end_idx-start_idx
cur_batch_data, cur_batch_label = get_batch(TEST_DATASET, test_idxs, start_idx, end_idx)
if cur_batch_size == BATCH_SIZE:
batch_data = cur_batch_data
batch_label = cur_batch_label
else:
batch_data[0:cur_batch_size] = cur_batch_data
batch_label[0:cur_batch_size] = cur_batch_label
# ---------------------------------------------------------------------
loss_val = 0
pred_val = np.zeros((BATCH_SIZE, NUM_POINT, NUM_CLASSES))
for _ in range(VOTE_NUM):
feed_dict = {ops['pointclouds_pl']: batch_data,
ops['labels_pl']: batch_label,
ops['is_training_pl']: is_training}
temp_loss_val, temp_pred_val = sess.run([ops['loss'], ops['pred']], feed_dict=feed_dict)
loss_val += temp_loss_val
pred_val += temp_pred_val
loss_val /= float(VOTE_NUM)
# ---------------------------------------------------------------------
# Select valid data
cur_pred_val = pred_val[0:cur_batch_size]
# Constrain pred to the groundtruth classes (selected by seg_classes[cat])
cur_pred_val_logits = cur_pred_val
cur_pred_val = np.zeros((cur_batch_size, NUM_POINT)).astype(np.int32)
for i in range(cur_batch_size):
cat = seg_label_to_cat[cur_batch_label[i,0]]
logits = cur_pred_val_logits[i,:,:]
cur_pred_val[i,:] = np.argmax(logits[:,seg_classes[cat]], 1) + seg_classes[cat][0]
correct = np.sum(cur_pred_val == cur_batch_label)
total_correct += correct
total_seen += (cur_batch_size*NUM_POINT)
if cur_batch_size==BATCH_SIZE:
loss_sum += loss_val
for l in range(NUM_CLASSES):
total_seen_class[l] += np.sum(cur_batch_label==l)
total_correct_class[l] += (np.sum((cur_pred_val==l) & (cur_batch_label==l)))
for i in range(cur_batch_size):
segp = cur_pred_val[i,:]
segl = cur_batch_label[i,:]
cat = seg_label_to_cat[segl[0]]
part_ious = [0.0 for _ in range(len(seg_classes[cat]))]
for l in seg_classes[cat]:
if (np.sum(segl==l) == 0) and (np.sum(segp==l) == 0): # part is not present, no prediction as well
part_ious[l-seg_classes[cat][0]] = 1.0
else:
part_ious[l-seg_classes[cat][0]] = np.sum((segl==l) & (segp==l)) / float(np.sum((segl==l) | (segp==l)))
shape_ious[cat].append(np.mean(part_ious))
all_shape_ious = []
for cat in shape_ious.keys():
for iou in shape_ious[cat]:
all_shape_ious.append(iou)
shape_ious[cat] = np.mean(shape_ious[cat])
print len(all_shape_ious)
mean_shape_ious = np.mean(shape_ious.values())
log_string('eval mean loss: %f' % (loss_sum / float(len(TEST_DATASET)/BATCH_SIZE)))
log_string('eval accuracy: %f'% (total_correct / float(total_seen)))
log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float))))
for cat in sorted(shape_ious.keys()):
log_string('eval mIoU of %s:\t %f'%(cat, shape_ious[cat]))
log_string('eval mean mIoU: %f' % (mean_shape_ious))
log_string('eval mean mIoU (all shapes): %f' % (np.mean(all_shape_ious)))
if __name__ == "__main__":
log_string('pid: %s'%(str(os.getpid())))
evaluate()
LOG_FOUT.close()
================================================
FILE: pointnet2_tf/part_seg/part_dataset.py
================================================
'''
Dataset for shapenet part segmentaion.
'''
import os
import os.path
import json
import numpy as np
import sys
def pc_normalize(pc):
l = pc.shape[0]
centroid = np.mean(pc, axis=0)
pc = pc - centroid
m = np.max(np.sqrt(np.sum(pc**2, axis=1)))
pc = pc / m
return pc
class PartDataset():
def __init__(self, root, npoints = 2500, classification = False, class_choice = None, split='train', normalize=True):
self.npoints = npoints
self.root = root
self.catfile = os.path.join(self.root, 'synsetoffset2category.txt')
self.cat = {}
self.classification = classification
self.normalize = normalize
with open(self.catfile, 'r') as f:
for line in f:
ls = line.strip().split()
self.cat[ls[0]] = ls[1]
#print(self.cat)
if not class_choice is None:
self.cat = {k:v for k,v in self.cat.items() if k in class_choice}
self.meta = {}
with open(os.path.join(self.root, 'train_test_split', 'shuffled_train_file_list.json'), 'r') as f:
train_ids = set([str(d.split('/')[2]) for d in json.load(f)])
with open(os.path.join(self.root, 'train_test_split', 'shuffled_val_file_list.json'), 'r') as f:
val_ids = set([str(d.split('/')[2]) for d in json.load(f)])
with open(os.path.join(self.root, 'train_test_split', 'shuffled_test_file_list.json'), 'r') as f:
test_ids = set([str(d.split('/')[2]) for d in json.load(f)])
for item in self.cat:
#print('category', item)
self.meta[item] = []
dir_point = os.path.join(self.root, self.cat[item], 'points')
dir_seg = os.path.join(self.root, self.cat[item], 'points_label')
#print(dir_point, dir_seg)
fns = sorted(os.listdir(dir_point))
#print(fns[0][0:-4])
if split=='trainval':
fns = [fn for fn in fns if ((fn[0:-4] in train_ids) or (fn[0:-4] in val_ids))]
elif split=='train':
fns = [fn for fn in fns if fn[0:-4] in train_ids]
elif split=='val':
fns = [fn for fn in fns if fn[0:-4] in val_ids]
elif split=='test':
fns = [fn for fn in fns if fn[0:-4] in test_ids]
else:
print('Unknown split: %s. Exiting..'%(split))
exit(-1)
#print(os.path.basename(fns))
for fn in fns:
token = (os.path.splitext(os.path.basename(fn))[0])
self.meta[item].append((os.path.join(dir_point, token + '.pts'), os.path.join(dir_seg, token + '.seg')))
self.datapath = []
for item in self.cat:
for fn in self.meta[item]:
self.datapath.append((item, fn[0], fn[1]))
self.classes = dict(zip(self.cat, range(len(self.cat))))
self.num_seg_classes = 0
if not self.classification:
for i in range(len(self.datapath)/50):
l = len(np.unique(np.loadtxt(self.datapath[i][-1]).astype(np.uint8)))
if l > self.num_seg_classes:
self.num_seg_classes = l
#print(self.num_seg_classes)
self.cache = {} # from index to (point_set, cls, seg) tuple
self.cache_size = 10000
def __getitem__(self, index):
if index in self.cache:
point_set, seg, cls = self.cache[index]
else:
fn = self.datapath[index]
cls = self.classes[self.datapath[index][0]]
cls = np.array([cls]).astype(np.int32)
point_set = np.loadtxt(fn[1]).astype(np.float32)
if self.normalize:
point_set = pc_normalize(point_set)
seg = np.loadtxt(fn[2]).astype(np.int64) - 1
#print(point_set.shape, seg.shape)
if len(self.cache) < self.cache_size:
self.cache[index] = (point_set, seg, cls)
choice = np.random.choice(len(seg), self.npoints, replace=True)
#resample
point_set = point_set[choice, :]
seg = seg[choice]
if self.classification:
return point_set, cls
else:
return point_set, seg
def __len__(self):
return len(self.datapath)
if __name__ == '__main__':
d = PartDataset(root = '../data/shapenetcore_partanno_segmentation_benchmark_v0', class_choice = ['Airplane'], split='test')
print(len(d))
import time
tic = time.time()
for i in range(100):
ps, seg = d[i]
print np.max(seg), np.min(seg)
print(time.time() - tic)
print(ps.shape, type(ps), seg.shape,type(seg))
d = PartDataset(root = '../data/shapenetcore_partanno_segmentation_benchmark_v0', classification = True)
print(len(d))
ps, cls = d[0]
print(ps.shape, type(ps), cls.shape,type(cls))
================================================
FILE: pointnet2_tf/part_seg/part_dataset_all_normal.py
================================================
'''
Dataset for ShapeNetPart segmentation
'''
import os
import os.path
import json
import numpy as np
import sys
def pc_normalize(pc):
l = pc.shape[0]
centroid = np.mean(pc, axis=0)
pc = pc - centroid
m = np.max(np.sqrt(np.sum(pc**2, axis=1)))
pc = pc / m
return pc
class PartNormalDataset():
def __init__(self, root, npoints = 2500, classification = False, split='train', normalize=True, return_cls_label = False):
self.npoints = npoints
self.root = root
self.catfile = os.path.join(self.root, 'synsetoffset2category.txt')
self.cat = {}
self.classification = classification
self.normalize = normalize
self.return_cls_label = return_cls_label
with open(self.catfile, 'r') as f:
for line in f:
ls = line.strip().split()
self.cat[ls[0]] = ls[1]
self.cat = {k:v for k,v in self.cat.items()}
#print(self.cat)
self.meta = {}
with open(os.path.join(self.root, 'train_test_split', 'shuffled_train_file_list.json'), 'r') as f:
train_ids = set([str(d.split('/')[2]) for d in json.load(f)])
with open(os.path.join(self.root, 'train_test_split', 'shuffled_val_file_list.json'), 'r') as f:
val_ids = set([str(d.split('/')[2]) for d in json.load(f)])
with open(os.path.join(self.root, 'train_test_split', 'shuffled_test_file_list.json'), 'r') as f:
test_ids = set([str(d.split('/')[2]) for d in json.load(f)])
for item in self.cat:
#print('category', item)
self.meta[item] = []
dir_point = os.path.join(self.root, self.cat[item])
fns = sorted(os.listdir(dir_point))
#print(fns[0][0:-4])
if split=='trainval':
fns = [fn for fn in fns if ((fn[0:-4] in train_ids) or (fn[0:-4] in val_ids))]
elif split=='train':
fns = [fn for fn in fns if fn[0:-4] in train_ids]
elif split=='val':
fns = [fn for fn in fns if fn[0:-4] in val_ids]
elif split=='test':
fns = [fn for fn in fns if fn[0:-4] in test_ids]
else:
print('Unknown split: %s. Exiting..'%(split))
exit(-1)
#print(os.path.basename(fns))
for fn in fns:
token = (os.path.splitext(os.path.basename(fn))[0])
self.meta[item].append(os.path.join(dir_point, token + '.txt'))
self.datapath = []
for item in self.cat:
for fn in self.meta[item]:
self.datapath.append((item, fn))
self.classes = dict(zip(self.cat, range(len(self.cat))))
# Mapping from category ('Chair') to a list of int [10,11,12,13] as segmentation labels
self.seg_classes = {'Earphone': [16, 17, 18], 'Motorbike': [30, 31, 32, 33, 34, 35], 'Rocket': [41, 42, 43], 'Car': [8, 9, 10, 11], 'Laptop': [28, 29], 'Cap': [6, 7], 'Skateboard': [44, 45, 46], 'Mug': [36, 37], 'Guitar': [19, 20, 21], 'Bag': [4, 5], 'Lamp': [24, 25, 26, 27], 'Table': [47, 48, 49], 'Airplane': [0, 1, 2, 3], 'Pistol': [38, 39, 40], 'Chair': [12, 13, 14, 15], 'Knife': [22, 23]}
for cat in sorted(self.seg_classes.keys()):
print(cat, self.seg_classes[cat])
self.cache = {} # from index to (point_set, cls, seg) tuple
self.cache_size = 20000
def __getitem__(self, index):
if index in self.cache:
point_set, normal, seg, cls = self.cache[index]
else:
fn = self.datapath[index]
cat = self.datapath[index][0]
cls = self.classes[cat]
cls = np.array([cls]).astype(np.int32)
data = np.loadtxt(fn[1]).astype(np.float32)
point_set = data[:,0:3]
if self.normalize:
point_set = pc_normalize(point_set)
normal = data[:,3:6]
seg = data[:,-1].astype(np.int32)
if len(self.cache) < self.cache_size:
self.cache[index] = (point_set, normal, seg, cls)
choice = np.random.choice(len(seg), self.npoints, replace=True)
#resample
point_set = point_set[choice, :]
seg = seg[choice]
normal = normal[choice,:]
if self.classification:
return point_set, normal, cls
else:
if self.return_cls_label:
return point_set, normal, seg, cls
else:
return point_set, normal, seg
def __len__(self):
return len(self.datapath)
if __name__ == '__main__':
d = PartNormalDataset(root = '../data/shapenetcore_partanno_segmentation_benchmark_v0_normal', split='trainval', npoints=3000)
print(len(d))
i = 500
ps, normal, seg = d[i]
print d.datapath[i]
print np.max(seg), np.min(seg)
print(ps.shape, seg.shape, normal.shape)
print ps
print normal
sys.path.append('../utils')
import show3d_balls
show3d_balls.showpoints(ps, normal+1, ballradius=8)
d = PartNormalDataset(root = '../data/shapenetcore_partanno_segmentation_benchmark_v0_normal', classification = True)
print(len(d))
ps, normal, cls = d[0]
print(ps.shape, type(ps), cls.shape,type(cls))
================================================
FILE: pointnet2_tf/part_seg/test.py
================================================
import tensorflow as tf
import numpy as np
import argparse
import socket
import importlib
import time
import os
import scipy.misc
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, 'models'))
sys.path.append(os.path.join(BASE_DIR, 'utils'))
import provider
import show3d_balls
sys.path.append(os.path.join(ROOT_DIR, 'data_prep'))
import part_dataset
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')
parser.add_argument('--num_point', type=int, default=2048, help='Point Number [default: 2048]')
parser.add_argument('--category', default='Airplane', help='Which single class to train on [default: Airplane]')
parser.add_argument('--model', default='pointnet2_part_seg', help='Model name [default: pointnet2_part_seg]')
parser.add_argument('--model_path', default='log/model.ckpt', help='model checkpoint file path [default: log/model.ckpt]')
FLAGS = parser.parse_args()
MODEL_PATH = FLAGS.model_path
GPU_INDEX = FLAGS.gpu
NUM_POINT = FLAGS.num_point
MODEL = importlib.import_module(FLAGS.model) # import network module
NUM_CLASSES = 4
DATA_PATH = os.path.join(ROOT_DIR, 'data', 'shapenetcore_partanno_segmentation_benchmark_v0_normal')
TEST_DATASET = part_dataset.PartDataset(root=DATA_PATH, npoints=NUM_POINT, classification=False, class_choice=FLAGS.category, split='test')
def get_model(batch_size, num_point):
with tf.Graph().as_default():
with tf.device('/gpu:'+str(GPU_INDEX)):
pointclouds_pl, labels_pl = MODEL.placeholder_inputs(batch_size, num_point)
is_training_pl = tf.placeholder(tf.bool, shape=())
pred, end_points = MODEL.get_model(pointclouds_pl, is_training_pl)
loss = MODEL.get_loss(pred, labels_pl, end_points)
saver = tf.train.Saver()
# Create a session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
sess = tf.Session(config=config)
# Restore variables from disk.
saver.restore(sess, MODEL_PATH)
ops = {'pointclouds_pl': pointclouds_pl,
'labels_pl': labels_pl,
'is_training_pl': is_training_pl,
'pred': pred,
'loss': loss}
return sess, ops
def inference(sess, ops, pc, batch_size):
''' pc: BxNx3 array, return BxN pred '''
assert pc.shape[0]%batch_size == 0
num_batches = pc.shape[0]/batch_size
logits = np.zeros((pc.shape[0], pc.shape[1], NUM_CLASSES))
for i in range(num_batches):
feed_dict = {ops['pointclouds_pl']: pc[i*batch_size:(i+1)*batch_size,...],
ops['is_training_pl']: False}
batch_logits = sess.run(ops['pred'], feed_dict=feed_dict)
logits[i*batch_size:(i+1)*batch_size,...] = batch_logits
return np.argmax(logits, 2)
if __name__=='__main__':
import matplotlib.pyplot as plt
cmap = plt.cm.get_cmap("hsv", 4)
cmap = np.array([cmap(i) for i in range(10)])[:,:3]
for i in range(len(TEST_DATASET)):
ps, seg = TEST_DATASET[i]
sess, ops = get_model(batch_size=1, num_point=ps.shape[0])
segp = inference(sess, ops, np.expand_dims(ps,0), batch_size=1)
segp = segp.squeeze()
gt = cmap[seg, :]
pred = cmap[segp, :]
show3d_balls.showpoints(ps, gt, pred, ballradius=8)
================================================
FILE: pointnet2_tf/part_seg/train.py
================================================
import argparse
import math
from datetime import datetime
import h5py
import numpy as np
import tensorflow as tf
import socket
import importlib
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'models'))
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
import provider
import tf_util
import part_dataset_all_normal
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')
parser.add_argument('--model', default='model', help='Model name [default: model]')
parser.add_argument('--log_dir', default='log', help='Log dir [default: log]')
parser.add_argument('--num_point', type=int, default=2048, help='Point Number [default: 2048]')
parser.add_argument('--max_epoch', type=int, default=201, help='Epoch to run [default: 201]')
parser.add_argument('--batch_size', type=int, default=32, help='Batch Size during training [default: 32]')
parser.add_argument('--learning_rate', type=float, default=0.001, help='Initial learning rate [default: 0.001]')
parser.add_argument('--momentum', type=float, default=0.9, help='Initial learning rate [default: 0.9]')
parser.add_argument('--optimizer', default='adam', help='adam or momentum [default: adam]')
parser.add_argument('--decay_step', type=int, default=200000, help='Decay step for lr decay [default: 200000]')
parser.add_argument('--decay_rate', type=float, default=0.7, help='Decay rate for lr decay [default: 0.7]')
FLAGS = parser.parse_args()
EPOCH_CNT = 0
BATCH_SIZE = FLAGS.batch_size
NUM_POINT = FLAGS.num_point
MAX_EPOCH = FLAGS.max_epoch
BASE_LEARNING_RATE = FLAGS.learning_rate
GPU_INDEX = FLAGS.gpu
MOMENTUM = FLAGS.momentum
OPTIMIZER = FLAGS.optimizer
DECAY_STEP = FLAGS.decay_step
DECAY_RATE = FLAGS.decay_rate
MODEL = importlib.import_module(FLAGS.model) # import network module
MODEL_FILE = os.path.join(ROOT_DIR, 'models', FLAGS.model+'.py')
LOG_DIR = FLAGS.log_dir
if not os.path.exists(LOG_DIR): os.mkdir(LOG_DIR)
os.system('cp %s %s' % (MODEL_FILE, LOG_DIR)) # bkp of model def
os.system('cp train.py %s' % (LOG_DIR)) # bkp of train procedure
LOG_FOUT = open(os.path.join(LOG_DIR, 'log_train.txt'), 'w')
LOG_FOUT.write(str(FLAGS)+'\n')
BN_INIT_DECAY = 0.5
BN_DECAY_DECAY_RATE = 0.5
BN_DECAY_DECAY_STEP = float(DECAY_STEP)
BN_DECAY_CLIP = 0.99
HOSTNAME = socket.gethostname()
NUM_CLASSES = 50
# Shapenet official train/test split
DATA_PATH = os.path.join(ROOT_DIR, 'data', 'shapenetcore_partanno_segmentation_benchmark_v0_normal')
TRAIN_DATASET = part_dataset_all_normal.PartNormalDataset(root=DATA_PATH, npoints=NUM_POINT, classification=False, split='trainval')
TEST_DATASET = part_dataset_all_normal.PartNormalDataset(root=DATA_PATH, npoints=NUM_POINT, classification=False, split='test')
def log_string(out_str):
LOG_FOUT.write(out_str+'\n')
LOG_FOUT.flush()
print(out_str)
def get_learning_rate(batch):
learning_rate = tf.train.exponential_decay(
BASE_LEARNING_RATE, # Base learning rate.
batch * BATCH_SIZE, # Current index into the dataset.
DECAY_STEP, # Decay step.
DECAY_RATE, # Decay rate.
staircase=True)
learning_rate = tf.maximum(learning_rate, 0.00001) # CLIP THE LEARNING RATE!
return learning_rate
def get_bn_decay(batch):
bn_momentum = tf.train.exponential_decay(
BN_INIT_DECAY,
batch*BATCH_SIZE,
BN_DECAY_DECAY_STEP,
BN_DECAY_DECAY_RATE,
staircase=True)
bn_decay = tf.minimum(BN_DECAY_CLIP, 1 - bn_momentum)
return bn_decay
def train():
with tf.Graph().as_default():
with tf.device('/gpu:'+str(GPU_INDEX)):
pointclouds_pl, labels_pl = MODEL.placeholder_inputs(BATCH_SIZE, NUM_POINT)
is_training_pl = tf.placeholder(tf.bool, shape=())
# Note the global_step=batch parameter to minimize.
# That tells the optimizer to helpfully increment the 'batch' parameter for you every time it trains.
batch = tf.Variable(0)
bn_decay = get_bn_decay(batch)
tf.summary.scalar('bn_decay', bn_decay)
print "--- Get model and loss"
# Get model and loss
pred, end_points = MODEL.get_model(pointclouds_pl, is_training_pl, bn_decay=bn_decay)
loss = MODEL.get_loss(pred, labels_pl)
tf.summary.scalar('loss', loss)
correct = tf.equal(tf.argmax(pred, 2), tf.to_int64(labels_pl))
accuracy = tf.reduce_sum(tf.cast(correct, tf.float32)) / float(BATCH_SIZE*NUM_POINT)
tf.summary.scalar('accuracy', accuracy)
print "--- Get training operator"
# Get training operator
learning_rate = get_learning_rate(batch)
tf.summary.scalar('learning_rate', learning_rate)
if OPTIMIZER == 'momentum':
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=MOMENTUM)
elif OPTIMIZER == 'adam':
optimizer = tf.train.AdamOptimizer(learning_rate)
train_op = optimizer.minimize(loss, global_step=batch)
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
# Create a session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
config.log_device_placement = False
sess = tf.Session(config=config)
# Add summary writers
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'train'), sess.graph)
test_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'test'), sess.graph)
# Init variables
init = tf.global_variables_initializer()
sess.run(init)
ops = {'pointclouds_pl': pointclouds_pl,
'labels_pl': labels_pl,
'is_training_pl': is_training_pl,
'pred': pred,
'loss': loss,
'train_op': train_op,
'merged': merged,
'step': batch,
'end_points': end_points}
best_acc = -1
for epoch in range(MAX_EPOCH):
log_string('**** EPOCH %03d ****' % (epoch))
sys.stdout.flush()
train_one_epoch(sess, ops, train_writer)
eval_one_epoch(sess, ops, test_writer)
# Save the variables to disk.
if epoch % 10 == 0:
save_path = saver.save(sess, os.path.join(LOG_DIR, "model.ckpt"))
log_string("Model saved in file: %s" % save_path)
def get_batch(dataset, idxs, start_idx, end_idx):
bsize = end_idx-start_idx
batch_data = np.zeros((bsize, NUM_POINT, 6))
batch_label = np.zeros((bsize, NUM_POINT), dtype=np.int32)
for i in range(bsize):
ps,normal,seg = dataset[idxs[i+start_idx]]
batch_data[i,:,0:3] = ps
batch_data[i,:,3:6] = normal
batch_label[i,:] = seg
return batch_data, batch_label
def train_one_epoch(sess, ops, train_writer):
""" ops: dict mapping from string to tf ops """
is_training = True
# Shuffle train samples
train_idxs = np.arange(0, len(TRAIN_DATASET))
np.random.shuffle(train_idxs)
num_batches = len(TRAIN_DATASET)/BATCH_SIZE
log_string(str(datetime.now()))
total_correct = 0
total_seen = 0
loss_sum = 0
for batch_idx in range(num_batches):
start_idx = batch_idx * BATCH_SIZE
end_idx = (batch_idx+1) * BATCH_SIZE
batch_data, batch_label = get_batch(TRAIN_DATASET, train_idxs, start_idx, end_idx)
# Augment batched point clouds by rotation and jittering
#aug_data = batch_data
#aug_data = provider.random_scale_point_cloud(batch_data)
batch_data[:,:,0:3] = provider.jitter_point_cloud(batch_data[:,:,0:3])
feed_dict = {ops['pointclouds_pl']: batch_data,
ops['labels_pl']: batch_label,
ops['is_training_pl']: is_training,}
summary, step, _, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
ops['train_op'], ops['loss'], ops['pred']], feed_dict=feed_dict)
train_writer.add_summary(summary, step)
pred_val = np.argmax(pred_val, 2)
correct = np.sum(pred_val == batch_label)
total_correct += correct
total_seen += (BATCH_SIZE*NUM_POINT)
loss_sum += loss_val
if (batch_idx+1)%10 == 0:
log_string(' -- %03d / %03d --' % (batch_idx+1, num_batches))
log_string('mean loss: %f' % (loss_sum / 10))
log_string('accuracy: %f' % (total_correct / float(total_seen)))
total_correct = 0
total_seen = 0
loss_sum = 0
def eval_one_epoch(sess, ops, test_writer):
""" ops: dict mapping from string to tf ops """
global EPOCH_CNT
is_training = False
test_idxs = np.arange(0, len(TEST_DATASET))
# Test on all data: last batch might be smaller than BATCH_SIZE
num_batches = (len(TEST_DATASET)+BATCH_SIZE-1)/BATCH_SIZE
total_correct = 0
total_seen = 0
loss_sum = 0
total_seen_class = [0 for _ in range(NUM_CLASSES)]
total_correct_class = [0 for _ in range(NUM_CLASSES)]
seg_classes = TEST_DATASET.seg_classes
shape_ious = {cat:[] for cat in seg_classes.keys()}
seg_label_to_cat = {} # {0:Airplane, 1:Airplane, ...49:Table}
for cat in seg_classes.keys():
for label in seg_classes[cat]:
seg_label_to_cat[label] = cat
log_string(str(datetime.now()))
log_string('---- EPOCH %03d EVALUATION ----'%(EPOCH_CNT))
batch_data = np.zeros((BATCH_SIZE, NUM_POINT, 3))
batch_label = np.zeros((BATCH_SIZE, NUM_POINT)).astype(np.int32)
for batch_idx in range(num_batches):
if batch_idx %20==0:
log_string('%03d/%03d'%(batch_idx, num_batches))
start_idx = batch_idx * BATCH_SIZE
end_idx = min(len(TEST_DATASET), (batch_idx+1) * BATCH_SIZE)
cur_batch_size = end_idx-start_idx
cur_batch_data, cur_batch_label = get_batch(TEST_DATASET, test_idxs, start_idx, end_idx)
if cur_batch_size == BATCH_SIZE:
batch_data = cur_batch_data
batch_label = cur_batch_label
else:
batch_data[0:cur_batch_size] = cur_batch_data
batch_label[0:cur_batch_size] = cur_batch_label
# ---------------------------------------------------------------------
feed_dict = {ops['pointclouds_pl']: batch_data,
ops['labels_pl']: batch_label,
ops['is_training_pl']: is_training}
summary, step, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
ops['loss'], ops['pred']], feed_dict=feed_dict)
test_writer.add_summary(summary, step)
# ---------------------------------------------------------------------
# Select valid data
cur_pred_val = pred_val[0:cur_batch_size]
# Constrain pred to the groundtruth classes (selected by seg_classes[cat])
cur_pred_val_logits = cur_pred_val
cur_pred_val = np.zeros((cur_batch_size, NUM_POINT)).astype(np.int32)
for i in range(cur_batch_size):
cat = seg_label_to_cat[cur_batch_label[i,0]]
logits = cur_pred_val_logits[i,:,:]
cur_pred_val[i,:] = np.argmax(logits[:,seg_classes[cat]], 1) + seg_classes[cat][0]
correct = np.sum(cur_pred_val == cur_batch_label)
total_correct += correct
total_seen += (cur_batch_size*NUM_POINT)
if cur_batch_size==BATCH_SIZE:
loss_sum += loss_val
for l in range(NUM_CLASSES):
total_seen_class[l] += np.sum(cur_batch_label==l)
total_correct_class[l] += (np.sum((cur_pred_val==l) & (cur_batch_label==l)))
for i in range(cur_batch_size):
segp = cur_pred_val[i,:]
segl = cur_batch_label[i,:]
cat = seg_label_to_cat[segl[0]]
part_ious = [0.0 for _ in range(len(seg_classes[cat]))]
for l in seg_classes[cat]:
if (np.sum(segl==l) == 0) and (np.sum(segp==l) == 0): # part is not present, no prediction as well
part_ious[l-seg_classes[cat][0]] = 1.0
else:
part_ious[l-seg_classes[cat][0]] = np.sum((segl==l) & (segp==l)) / float(np.sum((segl==l) | (segp==l)))
shape_ious[cat].append(np.mean(part_ious))
all_shape_ious = []
for cat in shape_ious.keys():
for iou in shape_ious[cat]:
all_shape_ious.append(iou)
shape_ious[cat] = np.mean(shape_ious[cat])
mean_shape_ious = np.mean(shape_ious.values())
log_string('eval mean loss: %f' % (loss_sum / float(len(TEST_DATASET)/BATCH_SIZE)))
log_string('eval accuracy: %f'% (total_correct / float(total_seen)))
log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float))))
for cat in sorted(shape_ious.keys()):
log_string('eval mIoU of %s:\t %f'%(cat, shape_ious[cat]))
log_string('eval mean mIoU: %f' % (mean_shape_ious))
log_string('eval mean mIoU (all shapes): %f' % (np.mean(all_shape_ious)))
EPOCH_CNT += 1
return total_correct/float(total_seen)
if __name__ == "__main__":
log_string('pid: %s'%(str(os.getpid())))
train()
LOG_FOUT.close()
================================================
FILE: pointnet2_tf/part_seg/train_one_hot.py
================================================
import argparse
import math
from datetime import datetime
import h5py
import numpy as np
import tensorflow as tf
import socket
import importlib
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'models'))
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
import provider
import tf_util
import part_dataset_all_normal
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')
parser.add_argument('--model', default='model', help='Model name [default: model]')
parser.add_argument('--log_dir', default='log', help='Log dir [default: log]')
parser.add_argument('--num_point', type=int, default=2048, help='Point Number [default: 2048]')
parser.add_argument('--max_epoch', type=int, default=201, help='Epoch to run [default: 201]')
parser.add_argument('--batch_size', type=int, default=32, help='Batch Size during training [default: 32]')
parser.add_argument('--learning_rate', type=float, default=0.001, help='Initial learning rate [default: 0.001]')
parser.add_argument('--momentum', type=float, default=0.9, help='Initial learning rate [default: 0.9]')
parser.add_argument('--optimizer', default='adam', help='adam or momentum [default: adam]')
parser.add_argument('--decay_step', type=int, default=16881*20, help='Decay step for lr decay [default: 200000]')
parser.add_argument('--decay_rate', type=float, default=0.5, help='Decay rate for lr decay [default: 0.7]')
FLAGS = parser.parse_args()
EPOCH_CNT = 0
BATCH_SIZE = FLAGS.batch_size
NUM_POINT = FLAGS.num_point
MAX_EPOCH = FLAGS.max_epoch
BASE_LEARNING_RATE = FLAGS.learning_rate
GPU_INDEX = FLAGS.gpu
MOMENTUM = FLAGS.momentum
OPTIMIZER = FLAGS.optimizer
DECAY_STEP = FLAGS.decay_step
DECAY_RATE = FLAGS.decay_rate
MODEL = importlib.import_module(FLAGS.model) # import network module
MODEL_FILE = os.path.join(ROOT_DIR, 'models', FLAGS.model+'.py')
LOG_DIR = FLAGS.log_dir
if not os.path.exists(LOG_DIR): os.mkdir(LOG_DIR)
os.system('cp %s %s' % (MODEL_FILE, LOG_DIR)) # bkp of model def
os.system('cp train.py %s' % (LOG_DIR)) # bkp of train procedure
LOG_FOUT = open(os.path.join(LOG_DIR, 'log_train.txt'), 'w')
LOG_FOUT.write(str(FLAGS)+'\n')
BN_INIT_DECAY = 0.5
BN_DECAY_DECAY_RATE = 0.5
BN_DECAY_DECAY_STEP = float(DECAY_STEP)
BN_DECAY_CLIP = 0.99
HOSTNAME = socket.gethostname()
NUM_CLASSES = 50
# Shapenet official train/test split
DATA_PATH = os.path.join(ROOT_DIR, 'data', 'shapenetcore_partanno_segmentation_benchmark_v0_normal')
TRAIN_DATASET = part_dataset_all_normal.PartNormalDataset(root=DATA_PATH, npoints=NUM_POINT, classification=False, split='trainval', return_cls_label=True)
TEST_DATASET = part_dataset_all_normal.PartNormalDataset(root=DATA_PATH, npoints=NUM_POINT, classification=False, split='test', return_cls_label=True)
def log_string(out_str):
LOG_FOUT.write(out_str+'\n')
LOG_FOUT.flush()
print(out_str)
def get_learning_rate(batch):
learning_rate = tf.train.exponential_decay(
BASE_LEARNING_RATE, # Base learning rate.
batch * BATCH_SIZE, # Current index into the dataset.
DECAY_STEP, # Decay step.
DECAY_RATE, # Decay rate.
staircase=True)
learning_rate = tf.maximum(learning_rate, 0.00001) # CLIP THE LEARNING RATE!
return learning_rate
def get_bn_decay(batch):
bn_momentum = tf.train.exponential_decay(
BN_INIT_DECAY,
batch*BATCH_SIZE,
BN_DECAY_DECAY_STEP,
BN_DECAY_DECAY_RATE,
staircase=True)
bn_decay = tf.minimum(BN_DECAY_CLIP, 1 - bn_momentum)
return bn_decay
def train():
with tf.Graph().as_default():
with tf.device('/gpu:'+str(GPU_INDEX)):
pointclouds_pl, labels_pl, cls_labels_pl = MODEL.placeholder_inputs(BATCH_SIZE, NUM_POINT)
is_training_pl = tf.placeholder(tf.bool, shape=())
print is_training_pl
# Note the global_step=batch parameter to minimize.
# That tells the optimizer to helpfully increment the 'batch' parameter for you every time it trains.
batch = tf.Variable(0)
bn_decay = get_bn_decay(batch)
tf.summary.scalar('bn_decay', bn_decay)
print "--- Get model and loss"
# Get model and loss
pred, end_points = MODEL.get_model(pointclouds_pl, cls_labels_pl, is_training_pl, bn_decay=bn_decay)
loss = MODEL.get_loss(pred, labels_pl)
tf.summary.scalar('loss', loss)
correct = tf.equal(tf.argmax(pred, 2), tf.to_int64(labels_pl))
accuracy = tf.reduce_sum(tf.cast(correct, tf.float32)) / float(BATCH_SIZE*NUM_POINT)
tf.summary.scalar('accuracy', accuracy)
print "--- Get training operator"
# Get training operator
learning_rate = get_learning_rate(batch)
tf.summary.scalar('learning_rate', learning_rate)
if OPTIMIZER == 'momentum':
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=MOMENTUM)
elif OPTIMIZER == 'adam':
optimizer = tf.train.AdamOptimizer(learning_rate)
train_op = optimizer.minimize(loss, global_step=batch)
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
# Create a session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
config.log_device_placement = False
sess = tf.Session(config=config)
# Add summary writers
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'train'), sess.graph)
test_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'test'), sess.graph)
# Init variables
init = tf.global_variables_initializer()
sess.run(init)
#sess.run(init, {is_training_pl: True})
ops = {'pointclouds_pl': pointclouds_pl,
'labels_pl': labels_pl,
'cls_labels_pl': cls_labels_pl,
'is_training_pl': is_training_pl,
'pred': pred,
'loss': loss,
'train_op': train_op,
'merged': merged,
'step': batch,
'end_points': end_points}
best_acc = -1
for epoch in range(MAX_EPOCH):
log_string('**** EPOCH %03d ****' % (epoch))
sys.stdout.flush()
train_one_epoch(sess, ops, train_writer)
eval_one_epoch(sess, ops, test_writer)
# Save the variables to disk.
if epoch % 10 == 0:
save_path = saver.save(sess, os.path.join(LOG_DIR, "model.ckpt"))
log_string("Model saved in file: %s" % save_path)
def get_batch(dataset, idxs, start_idx, end_idx):
bsize = end_idx-start_idx
batch_data = np.zeros((bsize, NUM_POINT, 6))
batch_label = np.zeros((bsize, NUM_POINT), dtype=np.int32)
batch_cls_label = np.zeros((bsize,), dtype=np.int32)
for i in range(bsize):
ps,normal,seg,cls = dataset[idxs[i+start_idx]]
batch_data[i,:,0:3] = ps
batch_data[i,:,3:6] = normal
batch_label[i,:] = seg
batch_cls_label[i] = cls
return batch_data, batch_label, batch_cls_label
def train_one_epoch(sess, ops, train_writer):
""" ops: dict mapping from string to tf ops """
is_training = True
# Shuffle train samples
train_idxs = np.arange(0, len(TRAIN_DATASET))
np.random.shuffle(train_idxs)
num_batches = len(TRAIN_DATASET)/BATCH_SIZE
log_string(str(datetime.now()))
total_correct = 0
total_seen = 0
loss_sum = 0
for batch_idx in range(num_batches):
start_idx = batch_idx * BATCH_SIZE
end_idx = (batch_idx+1) * BATCH_SIZE
batch_data, batch_label, batch_cls_label = get_batch(TRAIN_DATASET, train_idxs, start_idx, end_idx)
# Augment batched point clouds by rotation and jittering
#aug_data = batch_data
#aug_data = provider.random_scale_point_cloud(batch_data)
batch_data[:,:,0:3] = provider.jitter_point_cloud(batch_data[:,:,0:3])
feed_dict = {ops['pointclouds_pl']: batch_data,
ops['labels_pl']: batch_label,
ops['cls_labels_pl']: batch_cls_label,
ops['is_training_pl']: is_training,}
summary, step, _, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
ops['train_op'], ops['loss'], ops['pred']], feed_dict=feed_dict)
train_writer.add_summary(summary, step)
pred_val = np.argmax(pred_val, 2)
correct = np.sum(pred_val == batch_label)
total_correct += correct
total_seen += (BATCH_SIZE*NUM_POINT)
loss_sum += loss_val
if (batch_idx+1)%10 == 0:
log_string(' -- %03d / %03d --' % (batch_idx+1, num_batches))
log_string('mean loss: %f' % (loss_sum / 10))
log_string('accuracy: %f' % (total_correct / float(total_seen)))
total_correct = 0
total_seen = 0
loss_sum = 0
def eval_one_epoch(sess, ops, test_writer):
""" ops: dict mapping from string to tf ops """
global EPOCH_CNT
is_training = False
test_idxs = np.arange(0, len(TEST_DATASET))
# Test on all data: last batch might be smaller than BATCH_SIZE
num_batches = (len(TEST_DATASET)+BATCH_SIZE-1)/BATCH_SIZE
total_correct = 0
total_seen = 0
loss_sum = 0
total_seen_class = [0 for _ in range(NUM_CLASSES)]
total_correct_class = [0 for _ in range(NUM_CLASSES)]
seg_classes = TEST_DATASET.seg_classes
shape_ious = {cat:[] for cat in seg_classes.keys()}
seg_label_to_cat = {} # {0:Airplane, 1:Airplane, ...49:Table}
for cat in seg_classes.keys():
for label in seg_classes[cat]:
seg_label_to_cat[label] = cat
log_string(str(datetime.now()))
log_string('---- EPOCH %03d EVALUATION ----'%(EPOCH_CNT))
batch_data = np.zeros((BATCH_SIZE, NUM_POINT, 3))
batch_label = np.zeros((BATCH_SIZE, NUM_POINT)).astype(np.int32)
batch_cls_label = np.zeros((BATCH_SIZE,)).astype(np.int32)
for batch_idx in range(num_batches):
if batch_idx %20==0:
log_string('%03d/%03d'%(batch_idx, num_batches))
start_idx = batch_idx * BATCH_SIZE
end_idx = min(len(TEST_DATASET), (batch_idx+1) * BATCH_SIZE)
cur_batch_size = end_idx-start_idx
cur_batch_data, cur_batch_label, cur_batch_cls_label = get_batch(TEST_DATASET, test_idxs, start_idx, end_idx)
if cur_batch_size == BATCH_SIZE:
batch_data = cur_batch_data
batch_label = cur_batch_label
batch_cls_label = cur_batch_cls_label
else:
batch_data[0:cur_batch_size] = cur_batch_data
batch_label[0:cur_batch_size] = cur_batch_label
batch_cls_label[0:cur_batch_size] = cur_batch_cls_label
# ---------------------------------------------------------------------
feed_dict = {ops['pointclouds_pl']: batch_data,
ops['labels_pl']: batch_label,
ops['cls_labels_pl']: batch_cls_label,
ops['is_training_pl']: is_training}
summary, step, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
ops['loss'], ops['pred']], feed_dict=feed_dict)
test_writer.add_summary(summary, step)
# ---------------------------------------------------------------------
# Select valid data
cur_pred_val = pred_val[0:cur_batch_size]
# Constrain pred to the groundtruth classes (selected by seg_classes[cat])
cur_pred_val_logits = cur_pred_val
cur_pred_val = np.zeros((cur_batch_size, NUM_POINT)).astype(np.int32)
for i in range(cur_batch_size):
cat = seg_label_to_cat[cur_batch_label[i,0]]
logits = cur_pred_val_logits[i,:,:]
cur_pred_val[i,:] = np.argmax(logits[:,seg_classes[cat]], 1) + seg_classes[cat][0]
correct = np.sum(cur_pred_val == cur_batch_label)
total_correct += correct
total_seen += (cur_batch_size*NUM_POINT)
if cur_batch_size==BATCH_SIZE:
loss_sum += loss_val
for l in range(NUM_CLASSES):
total_seen_class[l] += np.sum(cur_batch_label==l)
total_correct_class[l] += (np.sum((cur_pred_val==l) & (cur_batch_label==l)))
for i in range(cur_batch_size):
segp = cur_pred_val[i,:]
segl = cur_batch_label[i,:]
cat = seg_label_to_cat[segl[0]]
part_ious = [0.0 for _ in range(len(seg_classes[cat]))]
for l in seg_classes[cat]:
if (np.sum(segl==l) == 0) and (np.sum(segp==l) == 0): # part is not present, no prediction as well
part_ious[l-seg_classes[cat][0]] = 1.0
else:
part_ious[l-seg_classes[cat][0]] = np.sum((segl==l) & (segp==l)) / float(np.sum((segl==l) | (segp==l)))
shape_ious[cat].append(np.mean(part_ious))
all_shape_ious = []
for cat in shape_ious.keys():
for iou in shape_ious[cat]:
all_shape_ious.append(iou)
shape_ious[cat] = np.mean(shape_ious[cat])
mean_shape_ious = np.mean(shape_ious.values())
log_string('eval mean loss: %f' % (loss_sum / float(len(TEST_DATASET)/BATCH_SIZE)))
log_string('eval accuracy: %f'% (total_correct / float(total_seen)))
log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float))))
for cat in sorted(shape_ious.keys()):
log_string('eval mIoU of %s:\t %f'%(cat, shape_ious[cat]))
log_string('eval mean mIoU: %f' % (mean_shape_ious))
log_string('eval mean mIoU (all shapes): %f' % (np.mean(all_shape_ious)))
EPOCH_CNT += 1
return total_correct/float(total_seen)
if __name__ == "__main__":
log_string('pid: %s'%(str(os.getpid())))
train()
LOG_FOUT.close()
================================================
FILE: pointnet2_tf/scannet/README.md
================================================
### ScanNet Data
Original dataset website: http://www.scan-net.org/
You can get our preprocessed data at here (1.72GB) and refer to the code in `scannet_util.py` for data loading. Note that the virtual scan data is generated on the fly from our preprocessed data.
Some code we used for scannet preprocessing is also included in `preprocessing` folder. You have to download the original ScanNet data and make small modifications in paths in order to run them.
Note: To use ScanNetV2 data, change the tsv file to `scannetv2-labels.combined.tsv` and also update `scannet_util.py` to read the raw class and NYU40 names in the right columns (shifted by 1 compared to the V1 tsv).
================================================
FILE: pointnet2_tf/scannet/pc_util.py
================================================
""" Utility functions for processing point clouds.
Author: Charles R. Qi, Hao Su
Date: November 2016
"""
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
# Draw point cloud
from eulerangles import euler2mat
# Point cloud IO
import numpy as np
from plyfile import PlyData, PlyElement
# ----------------------------------------
# Point Cloud/Volume Conversions
# ----------------------------------------
def point_cloud_label_to_surface_voxel_label(point_cloud, label, res=0.0484):
coordmax = np.max(point_cloud,axis=0)
coordmin = np.min(point_cloud,axis=0)
nvox = np.ceil((coordmax-coordmin)/res)
vidx = np.ceil((point_cloud-coordmin)/res)
vidx = vidx[:,0]+vidx[:,1]*nvox[0]+vidx[:,2]*nvox[0]*nvox[1]
uvidx = np.unique(vidx)
if label.ndim==1:
uvlabel = [np.argmax(np.bincount(label[vidx==uv].astype(np.uint32))) for uv in uvidx]
else:
assert(label.ndim==2)
uvlabel = np.zeros(len(uvidx),label.shape[1])
for i in range(label.shape[1]):
uvlabel[:,i] = np.array([np.argmax(np.bincount(label[vidx==uv,i].astype(np.uint32))) for uv in uvidx])
return uvidx, uvlabel, nvox
def point_cloud_label_to_surface_voxel_label_fast(point_cloud, label, res=0.0484):
coordmax = np.max(point_cloud,axis=0)
coordmin = np.min(point_cloud,axis=0)
nvox = np.ceil((coordmax-coordmin)/res)
vidx = np.ceil((point_cloud-coordmin)/res)
vidx = vidx[:,0]+vidx[:,1]*nvox[0]+vidx[:,2]*nvox[0]*nvox[1]
uvidx, vpidx = np.unique(vidx,return_index=True)
if label.ndim==1:
uvlabel = label[vpidx]
else:
assert(label.ndim==2)
uvlabel = label[vpidx,:]
return uvidx, uvlabel, nvox
def point_cloud_to_volume_batch(point_clouds, vsize=12, radius=1.0, flatten=True):
""" Input is BxNx3 batch of point cloud
Output is Bx(vsize^3)
"""
vol_list = []
for b in range(point_clouds.shape[0]):
vol = point_cloud_to_volume(np.squeeze(point_clouds[b,:,:]), vsize, radius)
if flatten:
vol_list.append(vol.flatten())
else:
vol_list.append(np.expand_dims(np.expand_dims(vol, -1), 0))
if flatten:
return np.vstack(vol_list)
else:
return np.concatenate(vol_list, 0)
def point_cloud_to_volume(points, vsize, radius=1.0):
""" input is Nx3 points.
output is vsize*vsize*vsize
assumes points are in range [-radius, radius]
"""
vol = np.zeros((vsize,vsize,vsize))
voxel = 2*radius/float(vsize)
locations = (points + radius)/voxel
locations = locations.astype(int)
vol[locations[:,0],locations[:,1],locations[:,2]] = 1.0
return vol
#a = np.zeros((16,1024,3))
#print point_cloud_to_volume_batch(a, 12, 1.0, False).shape
def volume_to_point_cloud(vol):
""" vol is occupancy grid (value = 0 or 1) of size vsize*vsize*vsize
return Nx3 numpy array.
"""
vsize = vol.shape[0]
assert(vol.shape[1] == vsize and vol.shape[1] == vsize)
points = []
for a in range(vsize):
for b in range(vsize):
for c in range(vsize):
if vol[a,b,c] == 1:
points.append(np.array([a,b,c]))
if len(points) == 0:
return np.zeros((0,3))
points = np.vstack(points)
return points
def point_cloud_to_volume_v2_batch(point_clouds, vsize=12, radius=1.0, num_sample=128):
""" Input is BxNx3 a batch of point cloud
Output is BxVxVxVxnum_samplex3
Added on Feb 19
"""
vol_list = []
for b in range(point_clouds.shape[0]):
vol = point_cloud_to_volume_v2(point_clouds[b,:,:], vsize, radius, num_sample)
vol_list.append(np.expand_dims(vol, 0))
return np.concatenate(vol_list, 0)
def point_cloud_to_volume_v2(points, vsize, radius=1.0, num_sample=128):
""" input is Nx3 points
output is vsize*vsize*vsize*num_sample*3
assumes points are in range [-radius, radius]
samples num_sample points in each voxel, if there are less than
num_sample points, replicate the points
Added on Feb 19
"""
vol = np.zeros((vsize,vsize,vsize,num_sample,3))
voxel = 2*radius/float(vsize)
locations = (points + radius)/voxel
locations = locations.astype(int)
loc2pc = {}
for n in range(points.shape[0]):
loc = tuple(locations[n,:])
if loc not in loc2pc:
loc2pc[loc] = []
loc2pc[loc].append(points[n,:])
#print loc2pc
for i in range(vsize):
for j in range(vsize):
for k in range(vsize):
if (i,j,k) not in loc2pc:
vol[i,j,k,:,:] = np.zeros((num_sample,3))
else:
pc = loc2pc[(i,j,k)] # a list of (3,) arrays
pc = np.vstack(pc) # kx3
# Sample/pad to num_sample points
if pc.shape[0]>num_sample:
choices = np.random.choice(pc.shape[0], num_sample, replace=False)
pc = pc[choices,:]
elif pc.shape[0]num_sample:
choices = np.random.choice(pc.shape[0], num_sample, replace=False)
pc = pc[choices,:]
elif pc.shape[0] 0)
dx = mask[:, 0]
dy = mask[:, 1]
dv = disk[disk > 0]
# Order points by z-buffer
zorder = np.argsort(points[:, 2])
points = points[zorder, :]
points[:, 2] = (points[:, 2] - np.min(points[:, 2])) / (np.max(points[:, 2] - np.min(points[:, 2])))
max_depth = np.max(points[:, 2])
for i in range(points.shape[0]):
j = points.shape[0] - i - 1
x = points[j, 0]
y = points[j, 1]
xc = canvasSize/2 + (x*space)
yc = canvasSize/2 + (y*space)
xc = int(np.round(xc))
yc = int(np.round(yc))
px = dx + xc
py = dy + yc
image[px, py] = image[px, py] * 0.7 + dv * (max_depth - points[j, 2]) * 0.3
image = image / np.max(image)
return image
def point_cloud_three_views(points):
""" input points Nx3 numpy array (+y is up direction).
return an numpy array gray image of size 500x1500. """
# +y is up direction
# xrot is azimuth
# yrot is in-plane
# zrot is elevation
img1 = draw_point_cloud(points, zrot=110/180.0*np.pi, xrot=45/180.0*np.pi, yrot=0/180.0*np.pi)
img2 = draw_point_cloud(points, zrot=70/180.0*np.pi, xrot=135/180.0*np.pi, yrot=0/180.0*np.pi)
img3 = draw_point_cloud(points, zrot=180.0/180.0*np.pi, xrot=90/180.0*np.pi, yrot=0/180.0*np.pi)
image_large = np.concatenate([img1, img2, img3], 1)
return image_large
def point_cloud_three_views_demo():
""" Demo for draw_point_cloud function """
from PIL import Image
points = read_ply('../third_party/mesh_sampling/piano.ply')
im_array = point_cloud_three_views(points)
img = Image.fromarray(np.uint8(im_array*255.0))
img.save('piano.jpg')
if __name__=="__main__":
point_cloud_three_views_demo()
def pyplot_draw_point_cloud(points, output_filename):
""" points is a Nx3 numpy array """
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(points[:,0], points[:,1], points[:,2])
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
#savefig(output_filename)
def pyplot_draw_volume(vol, output_filename):
""" vol is of size vsize*vsize*vsize
output an image to output_filename
"""
points = volume_to_point_cloud(vol)
pyplot_draw_point_cloud(points, output_filename)
def write_ply_color(points, labels, out_filename, num_classes=None):
""" Color (N,3) points with labels (N) within range 0 ~ num_classes-1 as OBJ file """
import matplotlib.pyplot as pyplot
labels = labels.astype(int)
N = points.shape[0]
if num_classes is None:
num_classes = np.max(labels)+1
else:
assert(num_classes>np.max(labels))
fout = open(out_filename, 'w')
colors = [pyplot.cm.hsv(i/float(num_classes)) for i in range(num_classes)]
for i in range(N):
c = colors[labels[i]]
c = [int(x*255) for x in c]
fout.write('v %f %f %f %d %d %d\n' % (points[i,0],points[i,1],points[i,2],c[0],c[1],c[2]))
fout.close()
def write_ply_rgb(points, colors, out_filename, num_classes=None):
""" Color (N,3) points with RGB colors (N,3) within range [0,255] as OBJ file """
colors = colors.astype(int)
N = points.shape[0]
fout = open(out_filename, 'w')
for i in range(N):
c = colors[i,:]
fout.write('v %f %f %f %d %d %d\n' % (points[i,0],points[i,1],points[i,2],c[0],c[1],c[2]))
fout.close()
================================================
FILE: pointnet2_tf/scannet/preprocessing/collect_scannet_scenes.py
================================================
import scannet_util
CLASS_NAMES = scannet_util.g_label_names
RAW2SCANNET = scannet_util.g_raw2scannet
import os
import json
import sys
import numpy as np
BASE_DIR = os.path.dirname(__file__)
sys.path.append(BASE_DIR)
sys.path.append('../')
import pc_util
SCANNET_DIR = 'scannet_clean_2'
SCENE_NAMES = [line.rstrip() for line in open('scannet_all.txt')]
def collect_one_scene_data_label(scene_name, out_filename):
# Over-segmented segments: maps from segment to vertex/point IDs
data_folder = os.path.join(SCANNET_DIR, scene_name)
mesh_seg_filename = os.path.join(data_folder, '%s_vh_clean_2.0.010000.segs.json'%(scene_name))
#print mesh_seg_filename
with open(mesh_seg_filename) as jsondata:
d = json.load(jsondata)
seg = d['segIndices']
#print len(seg)
segid_to_pointid = {}
for i in range(len(seg)):
if seg[i] not in segid_to_pointid:
segid_to_pointid[seg[i]] = []
segid_to_pointid[seg[i]].append(i)
# Raw points in XYZRGBA
ply_filename = os.path.join(data_folder, '%s_vh_clean_2.ply' % (scene_name))
points = pc_util.read_ply_xyzrgb(ply_filename)
log_string(str(points.shape))
# Instances over-segmented segment IDs: annotation on segments
instance_segids = []
labels = []
annotation_filename = os.path.join(data_folder, '%s.aggregation.json'%(scene_name))
#print annotation_filename
with open(annotation_filename) as jsondata:
d = json.load(jsondata)
for x in d['segGroups']:
instance_segids.append(x['segments'])
labels.append(x['label'])
#print len(instance_segids)
#print labels
# Each instance's points
instance_points_list = []
instance_labels_list = []
semantic_labels_list = []
for i in range(len(instance_segids)):
segids = instance_segids[i]
pointids = []
for segid in segids:
pointids += segid_to_pointid[segid]
instance_points = points[np.array(pointids),:]
instance_points_list.append(instance_points)
instance_labels_list.append(np.ones((instance_points.shape[0], 1))*i)
if labels[i] not in RAW2SCANNET:
label = 'unannotated'
else:
label = RAW2SCANNET[labels[i]]
label = CLASS_NAMES.index(label)
semantic_labels_list.append(np.ones((instance_points.shape[0], 1))*label)
# Refactor data format
scene_points = np.concatenate(instance_points_list, 0)
scene_points = scene_points[:,0:6] # XYZRGB, disregarding the A
instance_labels = np.concatenate(instance_labels_list, 0)
semantic_labels = np.concatenate(semantic_labels_list, 0)
data = np.concatenate((scene_points, instance_labels, semantic_labels), 1)
np.save(out_filename, data)
LOG_FOUT = open('log.txt','w')
def log_string(out_str):
LOG_FOUT.write(out_str+'\n')
LOG_FOUT.flush()
print(out_str)
if __name__=='__main__':
output_folder = 'scannet_scenes'
if not os.path.exists(output_folder):
os.mkdir(output_folder)
for scene_name in SCENE_NAMES:
log_string(scene_name)
try:
out_filename = scene_name+'.npy' # scene0000_00.npy
collect_one_scene_data_label(scene_name, os.path.join(output_folder, out_filename))
except Exception, e:
log_string(scene_name+'ERROR!!')
log_string(str(e))
LOG_FOUT.close()
================================================
FILE: pointnet2_tf/scannet/preprocessing/demo.py
================================================
import sys
import os
BASE_DIR = os.path.dirname(__file__)
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, '../'))
import numpy as np
import pc_util
data = np.load('scannet_scenes/scene0001_01.npy')
scene_points = data[:,0:3]
colors = data[:,3:6]
instance_labels = data[:,6]
semantic_labels = data[:,7]
output_folder = 'demo_output'
if not os.path.exists(output_folder):
os.mkdir(output_folder)
# Write scene as OBJ file for visualization
pc_util.write_ply_rgb(scene_points, colors, os.path.join(output_folder, 'scene.obj'))
pc_util.write_ply_color(scene_points, instance_labels, os.path.join(output_folder, 'scene_instance.obj'))
pc_util.write_ply_color(scene_points, semantic_labels, os.path.join(output_folder, 'scene_semantic.obj'))
================================================
FILE: pointnet2_tf/scannet/preprocessing/fetch_label_names.py
================================================
''' scanning through annotation files for all the scenes to get a complete list of categories '''
import os
import json
scannet_dir = './scannet/'
scene_names = [line.rstrip() for line in open('scannet_all.txt')]
labels = set()
for scene_name in scene_names:
path = os.path.join(scannet_dir, scene_name)
agg_filename = os.path.join(path, scene_name+'.aggregation.json')
with open(agg_filename) as jsondata:
d = json.load(jsondata)
for x in d['segGroups']:
labels.add(x['label'])
fout = open('class_names.txt', 'w')
for label in list(labels):
print label
try:
fout.write(label+'\n')
except:
pass
fout.close()
================================================
FILE: pointnet2_tf/scannet/preprocessing/scannet-labels.combined.tsv
================================================
category count nyuId nyu40id eigen13id nyuClass nyu40class eigen13class ModelNet40 ModelNet10 ShapeNetCore55 synsetoffset wnsynsetid wnsynsetkey
wall 7274 21 1 12 wall wall Wall n04546855 wall.n.01
chair 5419 5 5 4 chair chair Chair chair chair chair 03001627 n03001627 chair.n.01
floor 3910 11 2 5 floor floor Floor n03365592 floor.n.01
table 2664 19 7 10 table table Table table table table 04379243 n04379243 table.n.02
door 1400 28 8 12 door door Wall door n03221720 door.n.01
couch 1222 83 6 9 sofa sofa Sofa sofa sofa sofa 04256520 n04256520 sofa.n.01
cabinet 1106 3 3 6 cabinet cabinet Furniture cabinet 02933112 n02933112 cabinet.n.01
shelf 889 42 15 6 shelves shelves Furniture bookshelf bookshelf 02871439 n02871439 bookshelf.n.01
desk 862 36 14 10 desk desk Table desk desk table 04379243 n03179701 desk.n.01
office chair 837 5 5 4 chair chair Chair chair chair chair 03001627 n04373704 swivel_chair.n.01
bed 814 157 4 1 bed bed Bed bed bed bed 02818832 n02818832 bed.n.01
trashcan 688 12 39 6 garbage bin otherfurniture Furniture trash_bin 02747177 n02747177 ashcan.n.01
pillow 608 119 18 7 pillow pillow Objects pillow 03938244 n03938244 pillow.n.01
sink 504 24 34 7 sink sink Objects sink n04223580 sink.n.01
picture 467 64 11 8 picture picture Picture n03931044 picture.n.01
window 432 59 9 13 window window Window n04587648 window.n.01
toilet 402 124 33 7 toilet toilet Objects toilet toilet n04446276 toilet.n.01
bookshelf 400 88 10 6 bookshelf bookshelf Furniture bookshelf bookshelf 02871439 n02871439 bookshelf.n.01
monitor 395 49 40 7 monitor otherprop Objects monitor monitor tv or monitor 03211117 n03782190 monitor.n.04
computer 369 46 40 7 computer otherprop Objects n03082979 computer.n.01
curtain 356 89 16 13 curtain curtain Window curtain n03151077 curtain.n.01
book 335 1 23 2 book books Books n02870526 book.n.11
armchair 318 5 5 4 chair chair Chair chair chair chair 03001627 n02738535 armchair.n.01
coffee table 303 356 39 6 coffee table otherfurniture Furniture table table table 04379243 n03063968 coffee_table.n.01
drawer 290 174 39 6 drawer otherfurniture Furniture n03233905 drawer.n.01
box 283 26 29 7 box box Objects n02883344 box.n.01
refrigerator 269 17 24 6 refridgerator refridgerator Furniture n04070727 refrigerator.n.01
lamp 255 144 35 7 lamp lamp Objects lamp lamp 03636649 n03636649 lamp.n.02
kitchen cabinet 252 3 3 6 cabinet cabinet Furniture n02933112 cabinet.n.01
dining chair 242 5 5 4 chair chair Chair chair chair chair 03001627 n03001627 chair.n.01
towel 222 135 27 7 towel towel Objects n04459362 towel.n.01
clothes 214 141 21 7 clothes clothes Objects n02728440 apparel.n.01
tv 210 172 25 11 television television TV tv or monitor 03211117 n03211117 display.n.06
nightstand 206 158 32 6 night stand night stand Furniture night_stand night_stand n03015254 chest_of_drawers.n.01
counter 196 7 12 6 counter counter Furniture table table table 04379243 n03116530 counter.n.01
dresser 180 169 17 6 dresser dresser Furniture dresser dresser n03015254 chest_of_drawers.n.01
countertop 176 7 12 6 counter counter Furniture n03118245 countertop.n.01
stool 165 150 40 7 stool otherprop Objects stool n04326896 stool.n.01
cushion 141 119 18 7 pillow pillow Objects n03151500 cushion.n.03
plant 139 82 40 7 plant otherprop Objects plant n00017222 plant.n.02
ceiling 134 4 22 3 ceiling ceiling Ceiling n02990373 ceiling.n.01
bathtub 134 136 36 7 bathtub bathtub Objects bathtub bathtub tub 02808440 n02808440 bathtub.n.01
bedframe 132 157 4 1 bed bed Bed n02822579 bedstead.n.01
end table 125 19 7 10 table table Table table table table 04379243 n04379243 table.n.02
dining table 123 19 7 10 table table Table table table table 04379243 n04379243 table.n.02
keyboard 118 47 40 7 keyboard otherprop Objects keyboard computer keyboard 03085013 n03085013 computer_keyboard.n.01
bag 116 55 37 7 bag bag Objects suitcase 02773838 n02773838 bag.n.06
backpack 114 206 40 7 backpack otherprop Objects n02769748 backpack.n.01
toilet paper 113 139 40 7 toilet paper otherprop Objects n15075141 toilet_tissue.n.01
printer 111 66 40 7 printer otherprop Objects printer 04004475 n04004475 printer.n.03
tv stand 103 291 39 6 tv stand otherfurniture Furniture tv_stand n03290653 entertainment_center.n.01
whiteboard 102 45 30 7 whiteboard whiteboard Objects n03211616 display_panel.n.01
carpet 99 130 40 7 rug otherprop Objects n04118021 rug.n.01
blanket 99 312 40 7 blanket otherprop Objects n02849154 blanket.n.01
shower curtain 99 123 28 7 shower curtain shower curtain Objects curtain n04209239 shower_curtain.n.01
trash can 94 12 39 6 garbage bin otherfurniture Furniture trash_bin 02747177 n02747177 ashcan.n.01
closet 94 772 39 6 wardrobe otherfurniture Furniture wardrobe
stair 89 215 38 7 stairs otherstructure Objects stairs n04314914 step.n.04
microwave 88 13 40 7 microwave otherprop Objects microwave 03761084 n03761084 microwave.n.02
washbasin 86 24 34 7 sink sink Objects sink n04553920 washbasin.n.01
rug 85 130 40 7 rug otherprop Objects n04118021 rug.n.01
stove 78 242 38 7 stove otherstructure Objects stove 04330267 n04330267 stove.n.02
shoe 68 149 40 7 shoe otherprop Objects n04199027 shoe.n.01
computer tower 68 46 40 7 computer otherprop Objects n03082979 computer.n.01
bottle 66 2 40 7 bottle otherprop Objects bottle bottle 02876657 n02876657 bottle.n.01
bin 64 307 40 7 bin otherprop Objects n02839910 bin.n.01
ottoman 63 359 39 6 ottoman otherfurniture Furniture stool n03380724 footstool.n.01
bench 63 204 39 6 bench otherfurniture Furniture bench bench 02828884 n02828884 bench.n.01
board 63 408 38 7 board otherstructure Objects
washing machine 62 278 39 6 washing machine otherfurniture Furniture washing_machine 04554684 n04554684 washer.n.03
mirror 62 122 19 7 mirror mirror Objects n03773035 mirror.n.01
copier 61 40 7 otherprop Objects n03257586 duplicator.n.01
basket 60 39 40 7 basket otherprop Objects basket 02801938 n02801938 basket.n.01
sofa chair 59 5 5 4 chair chair Chair chair chair chair 03001627 n03001627 chair.n.01
file cabinet 54 3 3 6 cabinet cabinet Furniture cabinet 02933112 n02933112 cabinet.n.01
fan 52 74 40 7 fan otherprop Objects n03320046 fan.n.01
laptop 52 37 40 7 laptop otherprop Objects laptop laptop 03642806 n03642806 laptop.n.01
shower 49 38 7 otherstructure Objects n04208936 shower.n.01
paper 48 15 26 7 paper paper Objects n14974264 paper.n.01
person 48 331 31 7 person person Objects person n05217688 person.n.02
headboard 47 161 39 6 headboard otherfurniture Furniture n03502200 headboard.n.01
paper towel dispenser 47 14 40 7 paper towel dispenser otherprop Objects
faucet 45 9 40 7 faucet otherprop Objects faucet 03325088 n03325088 faucet.n.01
oven 43 238 38 7 oven otherstructure Objects n03862676 oven.n.01
footstool 42 359 39 6 ottoman otherfurniture Furniture stool n03380724 footstool.n.01
blinds 42 80 13 13 blinds blinds Window n02851099 blind.n.03
rack 41 50 39 6 stand otherfurniture Furniture n04038440 rack.n.05
plate 39 233 40 7 plate otherprop Objects n03959485 plate.n.04
blackboard 38 225 38 7 blackboard otherstructure Objects n02846511 blackboard.n.01
piano 38 298 39 6 piano otherfurniture Furniture piano piano 03928116 n03928116 piano.n.01
heater 38 111 39 6 heater otherfurniture Furniture n03508101 heater.n.01
soap 37 133 40 7 soap otherprop Objects
luggage 36 783 40 7 luggage otherprop Objects n02774630 baggage.n.01
computer desk 36 36 14 10 desk desk Table desk desk table 04379243 n03179701 desk.n.01
rail 36 497 38 7 railing otherstructure Objects
radiator 36 236 39 6 radiator otherfurniture Furniture n04041069 radiator.n.02
recycle bin 35 307 40 7 bin otherprop Objects
container 34 140 40 7 container otherprop Objects n03094503 container.n.01
wardrobe 34 772 39 6 wardrobe otherfurniture Furniture wardrobe n04550184 wardrobe.n.01
soap dispenser 32 40 7 otherprop Objects n04254120 soap_dispenser.n.01
telephone 32 32 40 7 telephone otherprop Objects telephone 04401088 n04401088 telephone.n.01
bucket 32 427 40 7 bucket otherprop Objects n02909870 bucket.n.01
clock 31 56 40 7 clock otherprop Objects clock 03046257 n03046257 clock.n.01
stand 29 50 39 6 stand otherfurniture Furniture table table table 04379243 n04301000 stand.n.04
light 27 62 38 7 light otherstructure Objects n03665366 light.n.02
laundry basket 27 164 40 7 laundry basket otherprop Objects basket 02801938 n03050864 clothes_hamper.n.01
pipe 27 41 40 7 pipe otherprop Objects
round table 26 19 7 10 table table Table table table table 04379243 n04114554 round_table.n.02
roof 25 4 22 3 ceiling ceiling Ceiling n04105068 roof.n.01
clothes dryer 25 39 6 otherfurniture Furniture n03251766 dryer.n.01
coat 23 324 40 7 jacket otherprop Objects n03057021 coat.n.01
guitar 23 300 40 7 guitar otherprop Objects guitar guitar 03467517 n03467517 guitar.n.01
desk chair 23 5 5 4 chair chair Chair chair chair chair 03001627 n03001627 chair.n.01
sheet 22 559 40 7 sheet otherprop Objects
toilet paper holder 22 647 40 7 toilet paper holder otherprop Objects
seat 22 524 39 6 furniture otherfurniture Furniture n04161981 seat.n.03
step 21 38 7 otherstructure Objects n04314914 step.n.04
speaker 20 54 40 7 speaker otherprop Objects speaker 03691459 n03691459 loudspeaker.n.01
vending machine 19 220 40 7 machine otherprop Objects n04525305 vending_machine.n.01
column 19 94 38 7 column otherstructure Objects n03074380 column.n.06
bicycle 18 189 40 7 bicycle otherprop Objects bicycle 02834778 n02834778 bicycle.n.01
ladder 18 48 39 6 ladder otherfurniture Furniture stairs n03632277 ladder.n.01
cover 18 312 40 7 blanket otherprop Objects
handle 18 758 40 7 handle otherprop Objects n03485997 handle.n.01
bathroom stall 18 38 7 otherstructure Objects n02873839 booth.n.02
foosball table 17 510 39 6 foosball table otherfurniture Furniture table table table 04379243 n04379243 table.n.02
table lamp 17 144 35 7 lamp lamp Objects lamp lamp 03636649 n04380533 table_lamp.n.01
shower wall 17 21 1 12 wall wall Wall
chest 17 344 39 6 chest otherfurniture Furniture dresser dresser
cup 17 35 40 7 cup otherprop Objects cup cup or mug 03797390 n03797390 mug.n.04
jacket 16 324 40 7 jacket otherprop Objects n03589791 jacket.n.01
storage bin 16 812 40 7 storage bin otherprop Objects
screen 16 40 7 otherprop Objects
coffee maker 16 40 7 otherprop Objects n03063338 coffee_maker.n.01
hamper 15 39 40 7 basket otherprop Objects basket 02801938 n03482405 hamper.n.02
dishwasher 15 8 38 7 dishwasher otherstructure Objects dishwasher 03207941 n03207941 dishwasher.n.01
window frame 15 477 38 7 window frame otherstructure Objects n04589593 window_frame.n.01
paper towel 15 113 40 7 paper towel otherprop Objects n03887697 paper_towel.n.01
machine 15 220 40 7 machine otherprop Objects n03699975 machine.n.01
mat 15 143 20 5 floor mat floor mat Floor n03727837 mat.n.01
windowsill 14 38 7 otherstructure Objects n04590263 windowsill.n.01
tap 14 9 40 7 faucet otherprop Objects faucet 03325088 n04559451 water_faucet.n.01
pool table 14 515 39 6 pool table otherfurniture Furniture table table table 04379243 n03982430 pool_table.n.01
hand dryer 14 40 7 otherprop Objects
bar 14 51 38 7 bar otherstructure Objects n02788689 bar.n.03
frame 14 38 7 otherstructure Objects
rolling chair 14 5 5 4 chair chair Chair chair chair chair 03001627 n03001627 chair.n.01
toaster 14 251 40 7 toaster otherprop Objects n04442312 toaster.n.02
wall frame 14 38 7 otherstructure Objects
hanger 13 211 40 7 hanger otherprop Objects n03490884 hanger.n.02
conference table 13 19 7 10 table table Table table table table 04379243 n03090000 conference_table.n.01
handrail 13 453 38 7 banister otherstructure Objects n02788148 bannister.n.02
treadmill 13 458 39 6 treadmill otherfurniture Furniture n04477387 treadmill.n.01
bulletin board 13 408 38 7 board otherstructure Objects
ironing board 13 313 39 6 ironing board otherfurniture Furniture n03586090 ironing_board.n.01
fireplace 12 372 38 7 fireplace otherstructure Objects n03346455 fireplace.n.01
soap dish 12 638 40 7 soap dish otherprop Objects n04254009 soap_dish.n.01
fabric 12 40 7 otherprop Objects n03309808 fabric.n.01
kitchen counter 12 7 12 6 counter counter Furniture table table table 04379243 n03116530 counter.n.01
glass 12 612 38 7 glass otherstructure Objects n03438257 glass.n.02
doorframe 11 615 38 7 door frame otherstructure Objects n03222722 doorframe.n.01
table cushion 11 40 7 otherprop Objects
toilet paper dispenser 11 40 7 otherprop Objects
slab 11 38 7 otherstructure Objects n04233405 slab.n.01
mini fridge 11 17 24 6 refridgerator refridgerator Furniture n03273913 electric_refrigerator.n.01
fire extinguisher 11 10 40 7 fire extinguisher otherprop Objects n03345837 fire_extinguisher.n.01
shampoo 11 40 7 otherprop Objects
ball 11 60 40 7 ball otherprop Objects
hat 11 193 40 7 hat otherprop Objects n03497657 hat.n.01
shower curtain rod 11 40 7 otherprop Objects
junk 11 40 7 otherprop Objects n14857897 debris.n.01
soap holder 10 506 40 7 soap holder otherprop Objects
staircase 10 215 38 7 stairs otherstructure Objects n04298308 stairway.n.01
toiletry 10 40 7 otherprop Objects n04447443 toiletry.n.01
stall door 10 28 8 12 door door Wall door
framed picture 10 64 11 8 picture picture Picture
water cooler 10 509 39 6 water cooler otherfurniture Furniture n04559166 water_cooler.n.01
bags 10 40 7 otherprop Objects suitcase 02773838 n02773838 bag.n.06
desk lamp 10 144 35 7 lamp lamp Objects lamp lamp 03636649 n03636649 lamp.n.02
paper cutter 10 108 40 7 paper cutter otherprop Objects n03886940 paper_cutter.n.01
led tv 9 40 7 otherprop Objects
switch 9 40 7 otherprop Objects n04372370 switch.n.01
bed sheet 9 559 40 7 sheet otherprop Objects n04188179 sheet.n.03
roof frame 9 38 7 otherstructure Objects
tray 9 179 40 7 tray otherprop Objects n04476259 tray.n.01
comforter 9 484 40 7 comforter otherprop Objects n04033995 quilt.n.01
air conditioner 9 79 38 7 air conditioner otherstructure Objects n02686379 air_conditioner.n.01
shower door 9 28 8 12 door door Wall door
shirt 9 40 7 otherprop Objects n04197391 shirt.n.01
swivel chair 9 5 5 4 chair chair Chair chair chair chair 03001627 n04373704 swivel_chair.n.01
pillar 9 94 38 7 column otherstructure Objects n03073977 column.n.07
detergent 9 40 7 otherprop Objects
ledge 9 38 7 otherstructure Objects n09337253 ledge.n.01
vase 8 78 40 7 vase otherprop Objects vase jar 03593526 n04522168 vase.n.01
toaster oven 8 275 40 7 toaster oven otherprop Objects n04442441 toaster_oven.n.01
bedpost 8 40 7 otherprop Objects n02821415 bedpost.n.01
food 8 40 7 otherprop Objects n00021265 food.n.01
picture frame 8 40 7 otherprop Objects n03931765 picture_frame.n.01
poltrone 8 40 7 otherprop Objects
study table 8 19 7 10 table table Table table table table 04379243 n04379243 table.n.02
office table 8 19 7 10 table table Table table table table 04379243 n04379243 table.n.02
mouse 8 103 40 7 mouse otherprop Objects n03793489 mouse.n.04
storage 8 n03744276 memory.n.04
nerf gun 8 40 7 otherprop Objects
table chair 8 5 5 4 chair chair Chair chair chair chair 03001627 n03001627 chair.n.01
night table 8 19 7 10 table table Table table table table 04379243 n04379243 table.n.02
computer chair 8 5 5 4 chair chair Chair chair chair chair 03001627 n03001627 chair.n.01
toilet seat liner dispenser 8 40 7 otherprop Objects
backrest 7 5 5 4 chair chair Chair n02767433 back.n.08
chair seat 7 40 7 otherprop Objects
sink cabinet 7 3 3 6 cabinet cabinet Furniture cabinet 02933112 n02933112 cabinet.n.01
can 7 329 40 7 can otherprop Objects can 02946921 n02946921 can.n.01
furniture 7 524 39 6 furniture otherfurniture Furniture n03405725 furniture.n.01
cart 7 305 40 7 cart otherprop Objects n03484083 handcart.n.01
stool chair 7 5 5 4 chair chair Chair chair chair chair 03001627 n03001627 chair.n.01
step stool 7 276 40 7 step stool otherprop Objects stool n04315713 step_stool.n.01
robe 7 40 7 otherprop Objects n04097866 robe.n.01
table stand 7 50 39 6 stand otherfurniture Furniture
stall 7 38 7 otherstructure Objects n02873839 booth.n.02
dispenser 7 40 7 otherprop Objects n03210683 dispenser.n.01
storage container 7 140 40 7 container otherprop Objects
side table 7 19 7 10 table table Table table table table 04379243 n04379243 table.n.02
partition 7 21 1 12 wall wall Wall n03894379 partition.n.01
appliance 7 40 7 otherprop Objects
lotion 7 40 7 otherprop Objects n03690938 lotion.n.01
pot 7 16 40 7 pot otherprop Objects
photo 7 508 40 7 photo otherprop Objects n03925226 photograph.n.01
toilet brush 7 630 40 7 toilet brush otherprop Objects
scale 7 639 40 7 scale otherprop Objects n04141975 scale.n.07
tissue box 7 138 40 7 tissue box otherprop Objects
remote 7 40 7 otherprop Objects remote_control 04074963 n04074963 remote_control.n.01
light switch 6 301 38 7 light switch otherstructure Objects
crate 6 183 39 6 crate otherfurniture Furniture n03127925 crate.n.01
ping pong table 6 625 39 6 ping pong table otherfurniture Furniture table table table 04379243 n04379243 table.n.02
platform 6 38 7 otherstructure Objects
pantry 6 38 7 otherstructure Objects n03885535 pantry.n.01
bath cabinet 6 3 3 6 cabinet cabinet Furniture cabinet 02933112 n02933112 cabinet.n.01
slipper 6 40 7 otherprop Objects n04241394 slipper.n.01
sideboard 6 7 12 6 counter counter Furniture
holder 6 40 7 otherprop Objects n03525454 holder.n.01
worktop 6 40 7 otherprop Objects
outlet 6 40 7 otherprop Objects n04548771 wall_socket.n.01
gas cooker 6 242 38 7 stove otherstructure Objects n03425595 gas_range.n.01
doorhandle 6 652 40 7 knob otherprop Objects n03222959 doorknob.n.01
cutting board 6 247 40 7 cutting board otherprop Objects n03025513 chopping_board.n.01
bathroom sink 6 24 34 7 sink sink Objects sink n04223580 sink.n.01
controller 6 40 7 otherprop Objects n03096960 control.n.09
bedding set 6 40 7 otherprop Objects
mount 6 40 7 otherprop Objects
decoration 6 40 7 otherprop Objects n03169390 decoration.n.01
tablet 6 19 7 10 table table Table table table table 04379243 n04379243 table.n.02
reading table 6 19 7 10 table table Table table table table 04379243 n04379243 table.n.02
floor covered 6 40 7 otherprop Objects
cooker 6 267 40 7 utensil otherprop Objects n03101156 cooker.n.01
file 6 75 40 7 file otherprop Objects filing_cabinet 03337140 n03337140 file.n.03
archive 6 40 7 otherprop Objects n02735086 archive.n.01
trolley 5 504 40 7 trolley otherprop Objects n04335435 streetcar.n.01
wainscoting 5 40 7 otherprop Objects
lampshade 5 859 40 7 lamp shade otherprop Objects n03637318 lampshade.n.01
china 5 267 40 7 utensil otherprop Objects n03018209 china.n.02
sign 5 208 40 7 sign otherprop Objects n04217882 signboard.n.01
fax machine 5 68 40 7 fax machine otherprop Objects
mirror frame 5 40 7 otherprop Objects
projector 5 90 40 7 projector otherprop Objects n04009552 projector.n.02
sweater 5 40 7 otherprop Objects n04370048 sweater.n.01
paint can 5 329 40 7 can otherprop Objects can 02946921 n02946921 can.n.01
heat register 5 40 7 otherprop Objects
kitchen table 5 19 7 10 table table Table table table table 04379243 n03620967 kitchen_table.n.01
globe 5 347 40 7 globe otherprop Objects
toy 5 389 40 7 toy otherprop Objects n03964744 plaything.n.01
kitchen worktop 5 40 7 otherprop Objects
paper roll 5 40 7 otherprop Objects
meeting table 5 19 7 10 table table Table table table table 04379243 n04379243 table.n.02
vaze 5 40 7 otherprop Objects
wall clock 5 56 40 7 clock otherprop Objects clock 03046257 n04548280 wall_clock.n.01
closet door 5 28 8 12 door door Wall door
pack 5 40 7 otherprop Objects
doormat 5 143 20 5 floor mat floor mat Floor n03223299 doormat.n.02
tissue 5 648 40 7 tissue otherprop Objects
plastic container 5 140 40 7 container otherprop Objects
statue 5 294 40 7 sculpture otherprop Objects n04306847 statue.n.01
dollhouse 5 486 39 6 doll house otherfurniture Furniture n03219483 dollhouse.n.01
vacuum 5 40 7 otherprop Objects n04517823 vacuum.n.04
wet floor sign 5 208 40 7 sign otherprop Objects
vanity 5 169 17 6 dresser dresser Furniture dresser dresser table 04379243 n03238586 dressing_table.n.01
candle 5 137 40 7 candle otherprop Objects lamp n02948072 candle.n.01
library desk 5 36 14 10 desk desk Table desk desk
carton box 5 26 29 7 box box Objects
easel 5 50 39 6 stand otherfurniture Furniture n03262809 easel.n.01
wall lamp 5 144 35 7 lamp lamp Objects lamp lamp 03636649 n03636649 lamp.n.02
wall hanging 4 40 7 otherprop Objects n03491178 hanging.n.01
face wash 4 40 7 otherprop Objects
corner 4 40 7 otherprop Objects
lounge chair 4 5 5 4 chair chair Chair chair chair chair 03001627 n03262932 easy_chair.n.01
beanbag 4 797 39 6 bean bag otherfurniture Furniture n02816656 beanbag.n.01
marker holder 4 40 7 otherprop Objects
dumbell 4 40 7 otherprop Objects
ping pong paddle 4 40 7 otherprop Objects
locker 4 3 3 6 cabinet cabinet Furniture
plunger 4 40 7 otherprop Objects n03970156 plunger.n.03
soap bar 4 51 38 7 bar otherstructure Objects
student chair 4 5 5 4 chair chair Chair chair chair chair 03001627 n03001627 chair.n.01
office object 4 40 7 otherprop Objects
stuffed animal 4 177 40 7 stuffed animal otherprop Objects
water fountain 4 339 38 7 water fountain otherstructure Objects n03241335 drinking_fountain.n.01
doorknob 4 27 40 7 door knob otherprop Objects n03222959 doorknob.n.01
footrest 4 163 39 6 foot rest otherfurniture Furniture stool n03380724 footstool.n.01
ac unit 4 40 7 otherprop Objects
safe 4 26 29 7 box box Objects n04125021 safe.n.01
tile 4 40 7 otherprop Objects n04435180 tile.n.01
easle 4 40 7 otherprop Objects
headphone 4 160 40 7 headphones otherprop Objects headphone 03261776 n03261776 earphone.n.01
dress 4 40 7 otherprop Objects n03236735 dress.n.01
rolling cart 4 305 40 7 cart otherprop Objects
chest of drawers 4 524 39 6 furniture otherfurniture Furniture dresser dresser n03015254 chest_of_drawers.n.01
plastic bin 4 307 40 7 bin otherprop Objects
pail 4 427 40 7 bucket otherprop Objects n02909870 bucket.n.01
dry erase board 4 408 38 7 board otherstructure Objects
coatrack 4 40 7 otherprop Objects n03059103 coatrack.n.01
recliner 4 5 5 4 chair chair Chair chair chair chair 03001627 n04062428 recliner.n.01
roomba 4 40 7 otherprop Objects
highchair 4 5 5 4 chair chair Chair chair chair chair 03001627 n03518445 highchair.n.01
dish rack 4 581 40 7 dish rack otherprop Objects n03207630 dish_rack.n.01
dartboard 4 408 38 7 board otherstructure Objects n03162940 dartboard.n.01
broom 4 328 40 7 broom otherprop Objects n02906734 broom.n.01
book rack 4 224 39 6 bookrack otherfurniture Furniture
eraser 4 100 40 7 eraser otherprop Objects n03294833 eraser.n.01
bath mat 4 40 7 otherprop Objects n02807401 bath_mat.n.01
textile 4 40 7 otherprop Objects n03309808 fabric.n.01
paper box 4 26 29 7 box box Objects
guitar case 4 771 40 7 guitar case otherprop Objects
mop 4 40 7 otherprop Objects n04367480 swab.n.02
lavatory 4 40 7 otherprop Objects toilet toilet
server 4 360 40 7 server otherprop Objects
paper towel holder 4 281 40 7 paper towel holder otherprop Objects
office supply 4 40 7 otherprop Objects
panel 4 408 38 7 board otherstructure Objects
toilet brush holder 4 40 7 otherprop Objects
magazine 4 71 40 7 magazine otherprop Objects n06595351 magazine.n.01
kitchen rack 4 50 39 6 stand otherfurniture Furniture n04038440 rack.n.05
table object 4 40 7 otherprop Objects
range hood 4 380 38 7 range hood otherstructure Objects range_hood n04053677 range_hood.n.01
bath 4 136 36 7 bathtub bathtub Objects bathtub bathtub tub 02808440 n02808440 bathtub.n.01
trim 4 40 7 otherprop Objects n04484160 trimming.n.02
scanner 4 40 7 otherprop Objects
bathrobe 4 40 7 otherprop Objects n02807616 bathrobe.n.01
door and post 4 40 7 otherprop Objects
pouff 4 40 7 otherprop Objects
spring curtain 3 89 16 13 curtain curtain Window curtain
recycle 3 40 7 otherprop Objects
fax 3 40 7 otherprop Objects n03316105 facsimile.n.02
rolling shelf 3 40 7 otherprop Objects
flat-screen television 3 172 25 11 television television TV
futon 3 576 39 6 mattress otherfurniture Furniture n03408444 futon.n.01
stack of chairs 3 40 7 otherprop Objects
dustpan 3 40 7 otherprop Objects n03259009 dustpan.n.02
hand towel 3 135 27 7 towel towel Objects n03490006 hand_towel.n.01
floor lamp 3 144 35 7 lamp lamp Objects lamp lamp 03636649 n03367059 floor_lamp.n.01
mainboard 3 40 7 otherprop Objects
kitchen shelf 3 42 15 6 shelves shelves Furniture bookshelf bookshelf 02871439 n02871439 bookshelf.n.01
organizer 3 40 7 otherprop Objects n03918737 personal_digital_assistant.n.01
freezer 3 17 24 6 refridgerator refridgerator Furniture n03170635 deep-freeze.n.01
furnace 3 551 39 6 furnace otherfurniture Furniture n03404449 furnace.n.01
stock 3 40 7 otherprop Objects
map 3 107 40 7 map otherprop Objects n03720163 map.n.01
helmet 3 40 7 otherprop Objects helmet 03513137 n03513137 helmet.n.02
wallpaper 3 38 7 otherstructure Objects
wall cabinet 3 3 3 6 cabinet cabinet Furniture cabinet 02933112 n02933112 cabinet.n.01
office equipment 3 40 7 otherprop Objects
hair dryer 3 577 40 7 hair dryer otherprop Objects n03483316 hand_blower.n.01
backsplash 3 40 7 otherprop Objects
exercise ball 3 60 40 7 ball otherprop Objects
jeremiah 3 40 7 otherprop Objects n11082842 jeremiah.n.01
flush 3 40 7 otherprop Objects
fridge just 3 40 7 otherprop Objects
folded clothes 3 141 21 7 clothes clothes Objects
window counter 3 7 12 6 counter counter Furniture
iron 3 40 7 otherprop Objects n03584829 iron.n.04
studio light 3 62 38 7 light otherstructure Objects
sconce 3 62 38 7 light otherstructure Objects n04148703 sconce.n.03
sofa set 3 40 7 otherprop Objects
baseboard 3 40 7 otherprop Objects n02800354 baseboard.n.01
sink counter 3 7 12 6 counter counter Furniture
kitchen slab 3 38 7 otherstructure Objects n04233405 slab.n.01
cabinet door 3 28 8 12 door door Wall door
exercise machine 3 220 40 7 machine otherprop Objects
wood 3 40 7 otherprop Objects
teatowels 3 40 7 otherprop Objects
workbench 3 204 39 6 bench otherfurniture Furniture bench table 04379243 n04600486 workbench.n.01
backwall 3 40 7 otherprop Objects
cubby 3 40 7 otherprop Objects n03144365 cubby.n.01
water bottle 3 2 40 7 bottle otherprop Objects bottle bottle 02876657 n04557648 water_bottle.n.01
kitchen sink 3 24 34 7 sink sink Objects sink n03620889 kitchen_sink.n.01
sink area 3 40 7 otherprop Objects
handicap bar 3 51 38 7 bar otherstructure Objects
painter 3 594 40 7 cat otherprop Objects n02125311 cougar.n.01
tank 3 40 7 otherprop Objects
washstand 3 524 39 6 furniture otherfurniture Furniture n04555400 washstand.n.01
purse 3 181 40 7 purse otherprop Objects n02774152 bag.n.04
surface 3 40 7 otherprop Objects n02688443 airfoil.n.01
towel rack 3 40 7 otherprop Objects n04459773 towel_rack.n.01
decor 3 40 7 otherprop Objects n03579355 interior_decoration.n.01
handwash 3 40 7 otherprop Objects
bar stool 3 150 40 7 stool otherprop Objects stool
pan 3 589 40 7 pan otherprop Objects n03880531 pan.n.01
air propeller 3 40 7 otherprop Objects
paneling 3 21 1 12 wall wall Wall n03882611 paneling.n.01
vent 3 40 7 otherprop Objects
kitchen junk 3 40 7 otherprop Objects n14857897 debris.n.01
piano bench 3 460 39 6 piano bench otherfurniture Furniture bench bench 02828884 n02828884 bench.n.01
bunk bed 3 804 39 6 bunk bed otherfurniture Furniture bed bed bed 02818832 n02920259 bunk_bed.n.01
bed lamp 3 144 35 7 lamp lamp Objects lamp lamp 03636649 n03636649 lamp.n.02
shower mat 3 40 7 otherprop Objects
bedding 3 40 7 otherprop Objects n02820210 bedclothes.n.01
shoe rack 3 614 40 7 shoe rack otherprop Objects
notice board 3 408 38 7 board otherstructure Objects n02916538 bulletin_board.n.02
shower floor 3 11 2 5 floor floor Floor
brush 3 40 7 otherprop Objects n02908217 brush.n.02
table football 3 166 40 7 football otherprop Objects
padded bench 3 204 39 6 bench otherfurniture Furniture bench bench 02828884 n02828884 bench.n.01
bathroom carpet 3 130 40 7 rug otherprop Objects n04118021 rug.n.01
showerhead 3 650 40 7 shower head otherprop Objects n04209383 showerhead.n.01
loft 3 40 7 otherprop Objects
chair w/table 3 40 7 otherprop Objects
bedhead 3 40 7 otherprop Objects
roll 3 40 7 otherprop Objects n04101375 roll.n.04
comidin 3 40 7 otherprop Objects
cardboard box 3 26 29 7 box box Objects
cushion stool 2 150 40 7 stool otherprop Objects stool
bed cabinet 2 3 3 6 cabinet cabinet Furniture cabinet 02933112 n02933112 cabinet.n.01
pile of clothes 2 141 21 7 clothes clothes Objects
case 2 851 40 7 case otherprop Objects
slep 2 40 7 otherprop Objects
swiffer 2 40 7 otherprop Objects
stapler 2 67 40 7 stapler otherprop Objects n04303497 stapler.n.01
cable 2 450 40 7 cables otherprop Objects
work desk 2 36 14 10 desk desk Table desk desk
floor carpet 2 40 7 otherprop Objects
bedside 2 40 7 otherprop Objects n08649711 bedside.n.01
trash bag 2 55 37 7 bag bag Objects
heating devce 2 40 7 otherprop Objects
sofa table 2 19 7 10 table table Table table table table 04379243 n04379243 table.n.02
ventilator 2 40 7 otherprop Objects n04526964 ventilator.n.01
cat 2 594 40 7 cat otherprop Objects
kitchen utensil 2 267 40 7 utensil otherprop Objects n03621049 kitchen_utensil.n.01
counter top for sink 2 24 34 7 sink sink Objects sink
bathroom frame 2 38 7 otherstructure Objects
banister 2 453 38 7 banister otherstructure Objects n02788148 bannister.n.02
tuvalet kağıdı 2 40 7 otherprop Objects
trunk 2 40 7 otherprop Objects
blank screen 2 40 7 otherprop Objects
tire 2 40 7 otherprop Objects n04440749 tire.n.01
screen curtain 2 89 16 13 curtain curtain Window curtain
cooking range 2 40 7 otherprop Objects
dressware 2 40 7 otherprop Objects
blow up matress 2 40 7 otherprop Objects
shred bin 2 307 40 7 bin otherprop Objects
air matress 2 40 7 otherprop Objects
folder 2 69 40 7 folder otherprop Objects n03376279 folder.n.02
room heater 2 111 39 6 heater otherfurniture Furniture
car 2 530 40 7 car otherprop Objects car car 02958343 n02958343 car.n.01
massage armchair 2 40 7 otherprop Objects
wardrobe door 2 28 8 12 door door Wall door
coffee supply 2 40 7 otherprop Objects
tissue holder 2 40 7 otherprop Objects
tab 2 40 7 otherprop Objects
knickknack 2 40 7 otherprop Objects
indoor plant 2 82 40 7 plant otherprop Objects plant
notebook 2 210 40 7 notebook otherprop Objects n03832673 notebook.n.02
water dispenser 2 507 40 7 water dispenser otherprop Objects n03210683 dispenser.n.01
cleaning supply 2 40 7 otherprop Objects
library table 2 19 7 10 table table Table table table table 04379243 n04379243 table.n.02
bin cover 2 40 7 otherprop Objects
teakettle 2 243 40 7 tea kettle otherprop Objects n04397768 teakettle.n.01
reservoir 2 263 40 7 vessel otherprop Objects n04078574 reservoir.n.03
arc sofa 2 83 6 9 sofa sofa Sofa sofa sofa sofa 04256520 n04256520 sofa.n.01
bedside lamp 2 144 35 7 lamp lamp Objects lamp lamp 03636649 n03636649 lamp.n.02
picture window 2 59 9 13 window window Window n03932080 picture_window.n.01
medicine cabinet 2 3 3 6 cabinet cabinet Furniture cabinet 02933112 n03742115 medicine_chest.n.01
cosmetic bag 2 55 37 7 bag bag Objects
coffee 2 40 7 otherprop Objects
brush holder 2 40 7 otherprop Objects
duvet 2 40 7 otherprop Objects n03266749 eiderdown.n.01
flower stand 2 50 39 6 stand otherfurniture Furniture
bedcover 2 40 7 otherprop Objects n02822220 bedspread.n.01
side 2 40 7 otherprop Objects n09437454 slope.n.01
weight 2 40 7 otherprop Objects n04571292 weight.n.02
pitcher 2 273 40 7 pitcher otherprop Objects n03950228 pitcher.n.02
schoolbag 2 55 37 7 bag bag Objects n04146343 schoolbag.n.01
wall picture 2 64 11 8 picture picture Picture
metal handrail 2 40 7 otherprop Objects
icebox 2 17 24 6 refridgerator refridgerator Furniture n04070727 refrigerator.n.01
exercise equipment 2 457 39 6 excercise equipment otherfurniture Furniture
loft bed 2 157 4 1 bed bed Bed bed bed bed 02818832 n02818832 bed.n.01
tennis table 2 19 7 10 table table Table table table table 04379243 n04379243 table.n.02
bookbag 2 40 7 otherprop Objects
scarf 2 240 40 7 scarf otherprop Objects n04143897 scarf.n.01
cabinet drawer 2 174 39 6 drawer otherfurniture Furniture
double door 2 28 8 12 door door Wall door n03226880 double_door.n.01
books in a book shelf 2 40 7 otherprop Objects
entertainment stand 2 50 39 6 stand otherfurniture Furniture
potty 2 40 7 otherprop Objects n03004275 chamberpot.n.01
john 2 40 7 otherprop Objects toilet toilet n04446276 toilet.n.01
desk accessory 2 40 7 otherprop Objects
chair support 2 40 7 otherprop Objects
shopping bag 2 55 37 7 bag bag Objects n04204081 shopping_bag.n.01
tree 2 82 40 7 plant otherprop Objects plant n13104059 tree.n.01
dustpan broom 2 328 40 7 broom otherprop Objects
stall wall 2 21 1 12 wall wall Wall
tường 2 40 7 otherprop Objects
bedside drawer 2 174 39 6 drawer otherfurniture Furniture
chaise 2 5 5 4 chair chair Chair chair chair chair 03001627 n03002711 chaise_longue.n.01
curtain rod 2 582 38 7 curtain rod otherstructure Objects
boardgame 2 40 7 otherprop Objects
cccvurtains 2 40 7 otherprop Objects
device 2 40 7 otherprop Objects n03183080 device.n.01
armrest 2 40 7 otherprop Objects n02741475 armrest.n.01
alarm 2 525 40 7 alarm otherprop Objects clock 03046257 n02694662 alarm_clock.n.01
towel rail 2 51 38 7 bar otherstructure Objects n04459909 towel_rail.n.01
sliding wood doors 2 40 7 otherprop Objects
backbag 2 40 7 otherprop Objects
bath curtain 2 89 16 13 curtain curtain Window curtain
washcloth 2 40 7 otherprop Objects n04554523 washcloth.n.01
bean bag chair 2 5 5 4 chair chair Chair chair chair chair 03001627 n03001627 chair.n.01
toolbox 2 344 39 6 chest otherfurniture Furniture n04452615 toolbox.n.01
rack shelf 2 40 7 otherprop Objects
kleenex box 2 26 29 7 box box Objects
air freshner 2 40 7 otherprop Objects
sewing machine 2 890 40 7 sewing machine otherprop Objects n04179913 sewing_machine.n.01
washing and drying machine 2 220 40 7 machine otherprop Objects
hairbrush 2 120 40 7 hair brush otherprop Objects n03475581 hairbrush.n.01
lap desk 2 36 14 10 desk desk Table desk desk
hutch 2 40 7 otherprop Objects
stack of flat items/possibly cardboard 2 40 7 otherprop Objects
under-bed drawer 2 174 39 6 drawer otherfurniture Furniture
bleach 2 40 7 otherprop Objects
comidine 2 40 7 otherprop Objects
bathmat 2 40 7 otherprop Objects
cinder block 2 40 7 otherprop Objects n03031957 cinder_block.n.01
mailbox 2 26 29 7 box box Objects mailbox 03710193 n03710193 mailbox.n.01
writing board 2 408 38 7 board otherstructure Objects n04608127 writing_board.n.01
flooring 2 11 2 5 floor floor Floor n03365592 floor.n.01
wall of safety boxes 2 40 7 otherprop Objects
vessel 2 263 40 7 vessel otherprop Objects watercraft 04530566 n04530566 vessel.n.02
cube sofa 2 83 6 9 sofa sofa Sofa sofa sofa sofa 04256520 n04256520 sofa.n.01
toothpaste 2 128 40 7 toothpaste otherprop Objects
liquid dispencer 2 40 7 otherprop Objects
folding chair 2 5 5 4 chair chair Chair chair chair chair 03001627 n03376595 folding_chair.n.01
squash 2 717 40 7 squash otherprop Objects plant n12158798 squash.n.01
grille 2 700 38 7 grill otherstructure Objects
centerpiece 2 878 40 7 centerpiece otherprop Objects n02994419 centerpiece.n.02
wall folder 2 69 40 7 folder otherprop Objects
towel hanger 2 211 40 7 hanger otherprop Objects n03490884 hanger.n.02
toilet pot 2 16 40 7 pot otherprop Objects
aid 2 40 7 otherprop Objects
rope 2 560 40 7 rope otherprop Objects n04108268 rope.n.01
envelop rack 2 40 7 otherprop Objects
tissue roll 2 764 40 7 tissue roll otherprop Objects
rostrum 2 40 7 otherprop Objects n03159640 dais.n.01
owen 2 40 7 otherprop Objects
electric panel 2 40 7 otherprop Objects
bowl 2 22 40 7 bowl otherprop Objects bowl bowl 02880940 n02880940 bowl.n.03
boiler 2 40 7 otherprop Objects
tile wall 2 21 1 12 wall wall Wall
reflection 2 64 11 8 picture picture Picture n04068976 reflection.n.05
crib 2 485 39 6 crib otherfurniture Furniture
shelves of stuff 2 40 7 otherprop Objects
kitchen gadget 2 40 7 otherprop Objects n02729965 appliance.n.01
sliding door 2 28 8 12 door door Wall door n04239074 sliding_door.n.01
paper bag 2 55 37 7 bag bag Objects n04122825 sack.n.01
water heater 2 588 40 7 water heater otherprop Objects n04560113 water_heater.n.01
alarm clock 2 156 40 7 alarm clock otherprop Objects clock 03046257 n02694662 alarm_clock.n.01
chair rail 2 40 7 otherprop Objects
corkboard 2 34 40 7 cork board otherprop Objects n14823376 corkboard.n.01
sàn nhà 2 40 7 otherprop Objects
conformer 2 40 7 otherprop Objects
easy chair 2 5 5 4 chair chair Chair chair chair chair 03001627 n03262932 easy_chair.n.01
sehpa 2 40 7 otherprop Objects
library 2 40 7 otherprop Objects
bench seat 2 40 7 otherprop Objects
music stand 2 820 39 6 music stand otherfurniture Furniture n03801760 music_stand.n.01
office 2 40 7 otherprop Objects n03841666 office.n.01
clutter 2 40 7 otherprop Objects
flush for toilet 2 124 33 7 toilet toilet Objects toilet toilet
kleenex 2 40 7 otherprop Objects
box/storage 2 40 7 otherprop Objects
post 2 40 7 otherprop Objects n03988170 post.n.04
plug 2 40 7 otherprop Objects
socket 2 40 7 otherprop Objects n04255163 socket.n.02
island 2 456 38 7 kitchen island otherstructure Objects
instrument case 2 851 40 7 case otherprop Objects
paper tray 2 538 40 7 paper tray otherprop Objects
toilet paper package 2 40 7 otherprop Objects
antibacterial soap dispenser 2 40 7 otherprop Objects
cubicle 2 40 7 otherprop Objects
vaccuum 2 40 7 otherprop Objects
photography light 2 62 38 7 light otherstructure Objects
trump wall 2 21 1 12 wall wall Wall
shredder 2 40 7 otherprop Objects n04210120 shredder.n.01
square table 2 19 7 10 table table Table table table table 04379243 n04379243 table.n.02
table support 2 40 7 otherprop Objects
sink plumbing 2 40 7 otherprop Objects
basin 2 40 7 otherprop Objects
pinball machine 2 220 40 7 machine otherprop Objects n03941417 pinball_machine.n.01
meeting 2 40 7 otherprop Objects n08542634 confluence.n.01
mobile 2 40 7 otherprop Objects
hatrack 2 40 7 otherprop Objects n03059103 coatrack.n.01
johnny 2 331 31 7 person person Objects person n10628368 rebel.n.01
projector screen 2 53 38 7 projector screen otherstructure Objects
valance 2 40 7 otherprop Objects n03111296 cornice.n.01
eliptical 2 40 7 otherprop Objects
boot 2 40 7 otherprop Objects
window ledge 2 40 7 otherprop Objects
clothes hamper 2 39 40 7 basket otherprop Objects basket 02801938 n03050864 clothes_hamper.n.01
wall side 2 40 7 otherprop Objects
stairstepper 2 40 7 otherprop Objects
bottle recycling bin 2 307 40 7 bin otherprop Objects
blend 2 40 7 otherprop Objects
tissue dispenser 1 40 7 otherprop Objects
entertainment center 1 524 39 6 furniture otherfurniture Furniture n03290653 entertainment_center.n.01
kettle 1 16 40 7 pot otherprop Objects n03612814 kettle.n.01
laundyr supply 1 40 7 otherprop Objects
chain 1 40 7 otherprop Objects
gown 1 40 7 otherprop Objects
w.c. 1 40 7 otherprop Objects toilet toilet n04558478 water_closet.n.01
shelf unit 1 40 7 otherprop Objects
walss 1 40 7 otherprop Objects
mirror reflection 1 40 7 otherprop Objects
bed curtain 1 89 16 13 curtain curtain Window curtain
papertray 1 538 40 7 paper tray otherprop Objects
tissue paper 1 15 26 7 paper paper Objects
milk 1 40 7 otherprop Objects
leg rest sofa 1 83 6 9 sofa sofa Sofa sofa sofa sofa 04256520 n04256520 sofa.n.01
double decker 1 40 7 otherprop Objects
dekw 1 40 7 otherprop Objects
sink counter top 1 40 7 otherprop Objects
breadbox 1 40 7 otherprop Objects n02893692 bread-bin.n.01
floor mat 1 143 20 5 floor mat floor mat Floor n03727837 mat.n.01
electrical machine 1 220 40 7 machine otherprop Objects
student seat 1 40 7 otherprop Objects
privacy partition 1 40 7 otherprop Objects
chairlegs 1 40 7 otherprop Objects
mail tray 1 618 40 7 mail tray otherprop Objects
over the door storage 1 40 7 otherprop Objects
door hinge 1 40 7 otherprop Objects
cahi 1 40 7 otherprop Objects
excercise cycle 1 40 7 otherprop Objects
handsoap 1 40 7 otherprop Objects
glass door 1 28 8 12 door door Wall door
dustbin box 1 26 29 7 box box Objects
toy ship 1 40 7 otherprop Objects
storage rack 1 448 39 6 storage rack otherfurniture Furniture
wall outside room 1 40 7 otherprop Objects
ask tray 1 179 40 7 tray otherprop Objects
punching bag 1 55 37 7 bag bag Objects n04023962 punching_bag.n.02
storage drawer 1 174 39 6 drawer otherfurniture Furniture
cat litter box 1 26 29 7 box box Objects
shower rod 1 40 7 otherprop Objects
office desk 1 36 14 10 desk desk Table desk desk
water filter 1 731 40 7 water filter otherprop Objects n04559620 water_filter.n.01
nicknack 1 40 7 otherprop Objects n02897692 bric-a-brac.n.01
tin of drink 1 40 7 otherprop Objects
work 1 40 7 otherprop Objects
lustre 1 40 7 otherprop Objects
paper shredder 1 40 7 otherprop Objects
awllll 1 40 7 otherprop Objects
booth 1 19 7 10 table table Table table table table 04379243 n02874214 booth.n.01
folded blanket 1 312 40 7 blanket otherprop Objects
bed skirt 1 40 7 otherprop Objects
computer speaker 1 54 40 7 speaker otherprop Objects speaker 03691459 n03691459 loudspeaker.n.01
desktop computer 1 46 40 7 computer otherprop Objects n03180011 desktop_computer.n.01
hadnwash 1 40 7 otherprop Objects
covered box 1 26 29 7 box box Objects
lampbase 1 40 7 otherprop Objects
net 1 40 7 otherprop Objects
wall pane 1 40 7 otherprop Objects
dressing gown 1 40 7 otherprop Objects n03237992 dressing_gown.n.01
footstowindow 2ol 1 40 7 otherprop Objects
display/signs 1 40 7 otherprop Objects
arifact 1 40 7 otherprop Objects
leg 1 40 7 otherprop Objects
ceiling fan 1 74 40 7 fan otherprop Objects
circular sofa 1 83 6 9 sofa sofa Sofa sofa sofa sofa 04256520 n04256520 sofa.n.01
food/drink 1 40 7 otherprop Objects
chair cushion 1 40 7 otherprop Objects
water 1 40 7 otherprop Objects n04562658 water_system.n.02
sinl 1 40 7 otherprop Objects
sing 1 40 7 otherprop Objects
cardboard 1 40 7 otherprop Objects
plastic sliding drawers 1 40 7 otherprop Objects
celltech 1 40 7 otherprop Objects
sink tap 1 40 7 otherprop Objects
binder 1 399 40 7 binder otherprop Objects
toilet plumbing 1 40 7 otherprop Objects
marble 1 60 40 7 ball otherprop Objects n03721047 marble.n.02
table speaker 1 54 40 7 speaker otherprop Objects speaker 03691459 n03691459 loudspeaker.n.01
talbetop 1 40 7 otherprop Objects
gym cycle 1 40 7 otherprop Objects
table piece 1 40 7 otherprop Objects
counter panel 1 40 7 otherprop Objects
plug/outlet 1 40 7 otherprop Objects
till 1 26 29 7 box box Objects n02976939 cashbox.n.01
sink unit 1 40 7 otherprop Objects
urinary 1 40 7 otherprop Objects
ball chair 1 5 5 4 chair chair Chair chair chair chair 03001627 n03001627 chair.n.01
of shelf 1 40 7 otherprop Objects
shower rack 1 614 40 7 shoe rack otherprop Objects
full bed 1 157 4 1 bed bed Bed bed bed bed 02818832 n02818832 bed.n.01
sink cupboard 1 40 7 otherprop Objects
house shoe 1 149 40 7 shoe otherprop Objects
cha 1 40 7 otherprop Objects
beachball 1 60 40 7 ball otherprop Objects n02814224 beach_ball.n.01
game table 1 429 40 7 game table otherprop Objects table table table 04379243 n04379243 table.n.02
tiny table 1 19 7 10 table table Table table table table 04379243 n04379243 table.n.02
feminine hygiene waste basket 1 39 40 7 basket otherprop Objects basket 02801938 n02801938 basket.n.01
dicplay case 1 851 40 7 case otherprop Objects
end table chair 1 5 5 4 chair chair Chair chair chair chair 03001627 n03001627 chair.n.01
cardbox 1 40 7 otherprop Objects
envelope 1 476 40 7 envelope otherprop Objects n03291819 envelope.n.01
resevoir 1 40 7 otherprop Objects
rabbit 1 40 7 otherprop Objects n02324045 rabbit.n.01
board meeting table 1 19 7 10 table table Table table table table 04379243 n04379243 table.n.02
hold 1 758 40 7 handle otherprop Objects
laser printer 1 66 40 7 printer otherprop Objects printer 04004475 n03643737 laser_printer.n.01
glasses case 1 851 40 7 case otherprop Objects n03438863 glasses_case.n.01
thermos 1 693 40 7 flask otherprop Objects bottle bottle 02876657 n04422727 thermos.n.01
shelving unit 1 40 7 otherprop Objects
bath towel 1 135 27 7 towel towel Objects n02808304 bath_towel.n.01
monitor stand 1 50 39 6 stand otherfurniture Furniture
breakfast bar 1 51 38 7 bar otherstructure Objects
flat screen television 1 172 25 11 television television TV
dressin table 1 19 7 10 table table Table table table table 04379243 n04379243 table.n.02
dress rack 1 50 39 6 stand otherfurniture Furniture n03238762 dress_rack.n.01
plates of food 1 40 7 otherprop Objects
figure 1 40 7 otherprop Objects
credenza 1 7 12 6 counter counter Furniture n03129753 credenza.n.01
oejcts 1 40 7 otherprop Objects
sinktop 1 40 7 otherprop Objects
wall.table 1 40 7 otherprop Objects
penholder 1 464 40 7 pen holder otherprop Objects
wall panel 1 21 1 12 wall wall Wall n04548503 wall_panel.n.01
stair landing 1 40 7 otherprop Objects
wallpapere 1 40 7 otherprop Objects
water dispencer 1 507 40 7 water dispenser otherprop Objects
therostat 1 110 40 7 thermostat otherprop Objects
frying pan 1 318 40 7 frying pan otherprop Objects n03400231 frying_pan.n.01
separator 1 40 7 otherprop Objects n02995998 centrifuge.n.01
divider 1 40 7 otherprop Objects
ceiling lamp 1 144 35 7 lamp lamp Objects lamp lamp 03636649 n03636649 lamp.n.02
rod 1 40 7 otherprop Objects pistol 03948459 n03427202 gat.n.01
;amps 1 40 7 otherprop Objects
vaccum 1 40 7 otherprop Objects
chair b' 1 40 7 otherprop Objects
paint bucket 1 427 40 7 bucket otherprop Objects
lighting 1 40 7 otherprop Objects n03667235 lighting.n.02
tablecloth 1 292 40 7 tablecloth otherprop Objects n04380143 tablecloth.n.01
plank 1 408 38 7 board otherstructure Objects n15101854 board.n.02
sink pipe 1 41 40 7 pipe otherprop Objects
concrete slab 1 40 7 otherprop Objects
mini couch 1 83 6 9 sofa sofa Sofa sofa sofa sofa 04256520 n04256520 sofa.n.01
post it 1 40 7 otherprop Objects
stair down 1 40 7 otherprop Objects
spout 1 41 40 7 pipe otherprop Objects n04287153 spout.n.01
cutlery 1 40 7 otherprop Objects n03154073 cutter.n.06
magazine rack 1 50 39 6 stand otherfurniture Furniture n03704549 magazine_rack.n.01
mini printer 1 66 40 7 printer otherprop Objects printer 04004475 n04004475 printer.n.03
tray box 1 26 29 7 box box Objects
level 2 stair case 1 851 40 7 case otherprop Objects
industrial machine 1 220 40 7 machine otherprop Objects
giường 1 40 7 otherprop Objects
induction cooktop 1 40 7 otherprop Objects
artwork 1 40 7 otherprop Objects
pillowcase 1 851 40 7 case otherprop Objects n02975412 case.n.19
window wall 1 21 1 12 wall wall Wall
minibar 1 7 12 6 counter counter Furniture n03769610 minibar.n.01
laundry detergent 1 40 7 otherprop Objects
serving plate 1 233 40 7 plate otherprop Objects
dirty basket 1 39 40 7 basket otherprop Objects basket 02801938 n02801938 basket.n.01
reading lamp 1 144 35 7 lamp lamp Objects lamp lamp 03636649 n04057981 reading_lamp.n.01
open cabinet 1 3 3 6 cabinet cabinet Furniture cabinet 02933112 n02933112 cabinet.n.01
hand truck 1 305 40 7 cart otherprop Objects n03490119 hand_truck.n.01
pad 1 40 7 otherprop Objects n03195485 diggings.n.02
laptop table 1 19 7 10 table table Table table table table 04379243 n04379243 table.n.02
ventilator window 1 59 9 13 window window Window
esll 1 40 7 otherprop Objects
sofa bed 1 157 4 1 bed bed Bed bed bed bed 02818832 n02818832 bed.n.01
safa 1 40 7 otherprop Objects
heating system 1 40 7 otherprop Objects n03509025 heating_system.n.01
floor]floo 1 40 7 otherprop Objects
facial scrub 1 40 7 otherprop Objects
vent hood 1 40 7 otherprop Objects
office cupboard 1 40 7 otherprop Objects
stand fan 1 74 40 7 fan otherprop Objects
storage shelf 1 40 7 otherprop Objects
hanging clothes 1 141 21 7 clothes clothes Objects
fuse box 1 26 29 7 box box Objects
pizza 1 40 7 otherprop Objects
personal effect 1 40 7 otherprop Objects
drawer organizer 1 40 7 otherprop Objects
main board 1 408 38 7 board otherstructure Objects
loofa 1 40 7 otherprop Objects
shower surround 1 40 7 otherprop Objects
bycicle 1 40 7 otherprop Objects
all-in-one computer 1 46 40 7 computer otherprop Objects
box of tissue 1 648 40 7 tissue otherprop Objects
doorlock 1 646 40 7 door lock otherprop Objects n03223162 doorlock.n.01
base unit 1 40 7 otherprop Objects
tennis ball 1 60 40 7 ball otherprop Objects n04409515 tennis_ball.n.01
snack machine 1 220 40 7 machine otherprop Objects
laptop bag 1 55 37 7 bag bag Objects
hallway wall 1 21 1 12 wall wall Wall
msic 1 40 7 otherprop Objects
file organizer 1 40 7 otherprop Objects
fire hose 1 40 7 otherprop Objects n03346004 fire_hose.n.01
media center 1 40 7 otherprop Objects
umbrella 1 203 40 7 umbrella otherprop Objects n04507155 umbrella.n.01
barrier 1 40 7 otherprop Objects n02796623 barrier.n.01
dirt 1 40 7 otherprop Objects
subwoofer 1 54 40 7 speaker otherprop Objects speaker 03691459 n04349401 subwoofer.n.01
table tennis 1 40 7 otherprop Objects
printer/scanner 1 40 7 otherprop Objects
drying rack 1 262 39 6 drying rack otherfurniture Furniture
wppd [ame;owood paneling 1 40 7 otherprop Objects
toilet robe 1 40 7 otherprop Objects
printer stand 1 50 39 6 stand otherfurniture Furniture
shower screen 1 40 7 otherprop Objects
water bed 1 157 4 1 bed bed Bed bed bed bed 02818832 n04557522 water_bed.n.01
display sign 1 208 40 7 sign otherprop Objects
diaper bin 1 307 40 7 bin otherprop Objects
router 1 303 40 7 router otherprop Objects
ashtray 1 377 40 7 ashtray otherprop Objects n02747802 ashtray.n.01
footrest/table 1 40 7 otherprop Objects
cleaning brush 1 40 7 otherprop Objects
bathroom desk 1 36 14 10 desk desk Table desk desk table 04379243 n03179701 desk.n.01
toilet commode 1 40 7 otherprop Objects
staicase handrail 1 40 7 otherprop Objects
clothes shelf 1 40 7 otherprop Objects
dink 1 40 7 otherprop Objects
toilet seat protectors 1 40 7 otherprop Objects
stuffware 1 40 7 otherprop Objects
low table for storage 1 40 7 otherprop Objects
covered stuff 1 40 7 otherprop Objects
hood 1 40 7 otherprop Objects
floor reflection 1 40 7 otherprop Objects
can opener 1 279 40 7 can opener otherprop Objects n02951585 can_opener.n.01
top 1 40 7 otherprop Objects
wheely bin 1 307 40 7 bin otherprop Objects
book bag 1 55 37 7 bag bag Objects n02870676 book_bag.n.01
body wash 1 40 7 otherprop Objects
study chair 1 5 5 4 chair chair Chair chair chair chair 03001627 n03001627 chair.n.01
stepladder 1 48 39 6 ladder otherfurniture Furniture stairs n04315599 step_ladder.n.01
paper holder 1 470 40 7 paper holder otherprop Objects
white 1 40 7 otherprop Objects
yoga ball 1 60 40 7 ball otherprop Objects
consol 1 40 7 otherprop Objects
cloths container 1 140 40 7 container otherprop Objects
shorts 1 192 40 7 shorts otherprop Objects n04204755 short_circuit.n.01
emergency exit windows 1 40 7 otherprop Objects
gym 1 40 7 otherprop Objects n03472112 gymnasium.n.02
box of tissues 1 40 7 otherprop Objects
trolly 1 221 39 6 trolly otherfurniture Furniture
water purifyer 1 93 40 7 water purifier otherprop Objects
clutter' 1 40 7 otherprop Objects
wallend 1 40 7 otherprop Objects
tissue `paper 1 40 7 otherprop Objects
mop and bucket 1 427 40 7 bucket otherprop Objects
grocery 1 40 7 otherprop Objects n03461385 grocery_store.n.01
worktable 1 19 7 10 table table Table table table table 04379243 n04603729 worktable.n.01
air outlet 1 40 7 otherprop Objects
fold 1 97 40 7 pen otherprop Objects n03376159 fold.n.06
toilet commoed 1 40 7 otherprop Objects
handfold 1 40 7 otherprop Objects
business chair 1 5 5 4 chair chair Chair chair chair chair 03001627 n03001627 chair.n.01
llong table chair 1 5 5 4 chair chair Chair chair chair chair 03001627 n03001627 chair.n.01
books in a shelf 1 40 7 otherprop Objects
chaurs 1 40 7 otherprop Objects
tennis table stand 1 50 39 6 stand otherfurniture Furniture
kitchen water purifier 1 93 40 7 water purifier otherprop Objects
lid 1 533 40 7 lid otherprop Objects
electric hob 1 40 7 otherprop Objects
baggage 1 783 40 7 luggage otherprop Objects
cpnter 1 40 7 otherprop Objects
stairs (exit) 1 40 7 otherprop Objects
equipment 1 40 7 otherprop Objects n03294048 equipment.n.01
rocking chair 1 5 5 4 chair chair Chair chair chair chair 03001627 n04099969 rocking_chair.n.01
bunkbed 1 804 39 6 bunk bed otherfurniture Furniture
divan 1 83 6 9 sofa sofa Sofa sofa sofa sofa 04256520 n03214966 divan.n.01
bottle of hand sanitizer 1 40 7 otherprop Objects
sales stall 1 40 7 otherprop Objects
dehumidifer 1 40 7 otherprop Objects
wall cover 1 40 7 otherprop Objects
plumbing 1 40 7 otherprop Objects n03969041 plumbing.n.01
elliptical machine 1 220 40 7 machine otherprop Objects
playball 1 40 7 otherprop Objects
safety bar 1 51 38 7 bar otherstructure Objects
stereo 1 84 40 7 stereo otherprop Objects n04315948 stereo.n.01
havlu 1 40 7 otherprop Objects
handbag 1 40 7 otherprop Objects n02774152 bag.n.04
furnance 1 551 39 6 furnace otherfurniture Furniture
shleves 1 40 7 otherprop Objects
coat hanger 1 400 40 7 coat hanger otherprop Objects n03057920 coat_hanger.n.01
erar wall 1 21 1 12 wall wall Wall
bed bench 1 204 39 6 bench otherfurniture Furniture bench bench 02828884 n02828884 bench.n.01
tissu 1 648 40 7 tissue otherprop Objects
plastic tub 1 232 40 7 plastic tub otherprop Objects bathtub bathtub tub 02808440 n02808440 bathtub.n.01
potholder 1 40 7 otherprop Objects n03992115 potholder.n.01
coffee mug 1 263 40 7 vessel otherprop Objects cup or mug 03797390 n03063599 coffee_mug.n.01
tennis rcket bag 1 55 37 7 bag bag Objects
stand lamp 1 144 35 7 lamp lamp Objects lamp lamp 03636649 n03636649 lamp.n.02
bed/sofa 1 40 7 otherprop Objects
laundry supply 1 40 7 otherprop Objects
document shredder 1 40 7 otherprop Objects
gas-range stove 1 242 38 7 stove otherstructure Objects stove 04330267 n04330267 stove.n.02
table soccer 1 40 7 otherprop Objects
cookingrange 1 40 7 otherprop Objects
bookbags 1 40 7 otherprop Objects
downstairs 1 40 7 otherprop Objects
coffeepot 1 893 40 7 coffee pot otherprop Objects n03063689 coffeepot.n.01
jar 1 70 40 7 jar otherprop Objects jar 03593526 n03593526 jar.n.01
rear wall 1 21 1 12 wall wall Wall
part of chair 1 5 5 4 chair chair Chair chair chair chair 03001627 n03001627 chair.n.01
tape 1 109 40 7 tape otherprop Objects
drawer table 1 19 7 10 table table Table table table table 04379243 n04379243 table.n.02
woodenrack 1 40 7 otherprop Objects
toilet floor 1 11 2 5 floor floor Floor
cushioned seating 1 40 7 otherprop Objects
bulls eye 1 40 7 otherprop Objects
soft mat 1 40 7 otherprop Objects
snack box 1 26 29 7 box box Objects
desk organizer 1 40 7 otherprop Objects
footboard 1 559 40 7 sheet otherprop Objects n03379461 footboard.n.02
wall hook 1 40 7 otherprop Objects
chopping board 1 408 38 7 board otherstructure Objects n03025513 chopping_board.n.01
round picture 1 64 11 8 picture picture Picture
chimney 1 702 38 7 chimney otherstructure Objects n03017428 chimney.n.01
studio screen 1 40 7 otherprop Objects
personal belonging 1 40 7 otherprop Objects
roll of paper 1 15 26 7 paper paper Objects
gaming wheel 1 40 7 otherprop Objects
landlord 1 331 31 7 person person Objects person n10245236 landlord.n.01
ebd 1 40 7 otherprop Objects
heater radiator 1 236 39 6 radiator otherfurniture Furniture
cabinet above 1 40 7 otherprop Objects
weighted plate 1 233 40 7 plate otherprop Objects
travelling bag 1 55 37 7 bag bag Objects suitcase 02773838 n02773838 bag.n.06
desk material 1 40 7 otherprop Objects
door wall 1 21 1 12 wall wall Wall
traffic cone 1 6 40 7 cone otherprop Objects cone
computer mouse 1 103 40 7 mouse otherprop Objects n03793489 mouse.n.04
coathanger 1 400 40 7 coat hanger otherprop Objects
bureau 1 524 39 6 furniture otherfurniture Furniture dresser dresser n03015254 chest_of_drawers.n.01
tyre 1 40 7 otherprop Objects n04440749 tire.n.01
armchairchair 1 40 7 otherprop Objects
oven range 1 40 7 otherprop Objects
pants 1 40 7 otherprop Objects n04489008 trouser.n.01
chiropractic chair 1 5 5 4 chair chair Chair chair chair chair 03001627 n03001627 chair.n.01
keg 1 343 39 6 barrel otherfurniture Furniture n03610418 keg.n.02
spray 1 40 7 otherprop Objects n02754103 atomizer.n.01
paper trimmer 1 40 7 otherprop Objects
standing whiteboard 1 45 30 7 whiteboard whiteboard Objects
desk drawer 1 475 39 6 desk drawer otherfurniture Furniture
window/windowed door 1 28 8 12 door door Wall door
soapbox 1 671 40 7 soap box otherprop Objects
pillow sofa 1 83 6 9 sofa sofa Sofa sofa sofa sofa 04256520 n04256520 sofa.n.01
centre table 1 19 7 10 table table Table table table table 04379243 n04379243 table.n.02
doorway 1 609 38 7 door way otherstructure Objects door n03224032 doorway.n.01
wall and whiteboard 1 45 30 7 whiteboard whiteboard Objects
laptop computer 1 46 40 7 computer otherprop Objects laptop laptop 03642806 n03642806 laptop.n.01
scanner/copier 1 40 7 otherprop Objects
suitcase w/clothes 1 40 7 otherprop Objects
power pusher 1 40 7 otherprop Objects
shower faucet handle 1 758 40 7 handle otherprop Objects
walk 1 40 7 otherprop Objects n04544979 walk.n.05
matte 1 40 7 otherprop Objects
atm machine 1 220 40 7 machine otherprop Objects
garage door 1 850 38 7 garage door otherstructure Objects door
wals 1 40 7 otherprop Objects
cabinet aisle 1 40 7 otherprop Objects
table light 1 62 38 7 light otherstructure Objects
guillotine paper trimmer 1 40 7 otherprop Objects
round 2\ 1 40 7 otherprop Objects
teddy 1 40 7 otherprop Objects n03013580 chemise.n.01
white board/divider 1 40 7 otherprop Objects
white wall 1 21 1 12 wall wall Wall
mark 1 40 7 otherprop Objects n04681387 crisscross.n.01
partition wall 1 21 1 12 wall wall Wall
shag rug 1 130 40 7 rug otherprop Objects n04183217 shag_rug.n.01
upstair way 1 40 7 otherprop Objects
music stand' 1 820 39 6 music stand otherfurniture Furniture
recamier 1 40 7 otherprop Objects
venthole 1 40 7 otherprop Objects n04526241 vent.n.01
dining seat 1 40 7 otherprop Objects
toilet cover 1 40 7 otherprop Objects
personal item 1 40 7 otherprop Objects
tallboy 1 524 39 6 furniture otherfurniture Furniture dresser dresser n03518305 highboy.n.01
drawers unit 1 40 7 otherprop Objects
teapot 1 678 40 7 tea pot otherprop Objects n04398044 teapot.n.01
cook cabinet 1 3 3 6 cabinet cabinet Furniture cabinet 02933112 n02933112 cabinet.n.01
wok pan 1 589 40 7 pan otherprop Objects
tv tray 1 179 40 7 tray otherprop Objects
round chair 1 5 5 4 chair chair Chair chair chair chair 03001627 n03001627 chair.n.01
sawhorse 1 40 7 otherprop Objects n04140631 sawhorse.n.01
kitchen range 1 242 38 7 stove otherstructure Objects n04330340 stove.n.01
busdrver 1 40 7 otherprop Objects
barricade 1 40 7 otherprop Objects
wall ornament 1 40 7 otherprop Objects
color printer 1 66 40 7 printer otherprop Objects printer 04004475 n04004475 printer.n.03
sticker 1 725 40 7 sticker otherprop Objects n07272545 gummed_label.n.01
exit sign 1 86 40 7 exit sign otherprop Objects
gas stove 1 242 38 7 stove otherstructure Objects stove 04330267 n04330267 stove.n.02
venta hood 1 40 7 otherprop Objects
copier/printer 1 40 7 otherprop Objects
wall-mounted lamp 1 144 35 7 lamp lamp Objects lamp lamp 03636649 n03636649 lamp.n.02
item box 1 26 29 7 box box Objects
water puifyer 1 40 7 otherprop Objects
wall papper 1 40 7 otherprop Objects
salt and peper 1 737 40 7 salt and pepper otherprop Objects
printer four 1 40 7 otherprop Objects
towel fastener 1 40 7 otherprop Objects
basth 1 40 7 otherprop Objects
flipflops 1 40 7 otherprop Objects
bonus 1 40 7 otherprop Objects
kitchen box 1 26 29 7 box box Objects n02883344 box.n.01
central heating unit 1 40 7 otherprop Objects
hanging tubelight 1 40 7 otherprop Objects
soccer ball 1 837 40 7 soccer ball otherprop Objects n04254680 soccer_ball.n.01
almarah 1 40 7 otherprop Objects
canopy 1 40 7 otherprop Objects
med box 1 26 29 7 box box Objects
drain 1 567 38 7 drain otherstructure Objects
panelling 1 21 1 12 wall wall Wall n03882611 paneling.n.01
bed stand 1 50 39 6 stand otherfurniture Furniture
deal 1 408 38 7 board otherstructure Objects n15102622 deal.n.04
massage 1 40 7 otherprop Objects
safety rail 1 497 38 7 railing otherstructure Objects n04127395 safety_rail.n.01
vacuumer 1 40 7 otherprop Objects
binfl 1 40 7 otherprop Objects
lightbulb 1 566 40 7 light bulb otherprop Objects lamp n03665924 light_bulb.n.01
door hydraulic 1 40 7 otherprop Objects
induction cook top 1 40 7 otherprop Objects
bedstand 1 40 7 otherprop Objects
calander 1 40 7 otherprop Objects
set of seats 1 40 7 otherprop Objects
chocolate bar dispenser 1 40 7 otherprop Objects
wall unit tv 1 40 7 otherprop Objects
broomstick 1 328 40 7 broom otherprop Objects n02907082 broomstick.n.01
bath faucet 1 9 40 7 faucet otherprop Objects faucet 03325088 n03325088 faucet.n.01
folded cloth 1 40 7 otherprop Objects
supply 1 40 7 otherprop Objects
under oven drawer 1 174 39 6 drawer otherfurniture Furniture
kinect 1 823 40 7 kinect otherprop Objects
cash 1 40 7 otherprop Objects n10886222 cash.n.03
dining side wall 1 21 1 12 wall wall Wall
log 1 40 7 otherprop Objects n03686658 log.n.05
garden gnome 1 40 7 otherprop Objects
coucnb 1 40 7 otherprop Objects
dart 1 40 7 otherprop Objects n03162818 dart.n.01
dust pan and brush 1 40 7 otherprop Objects
smoke alarm 1 525 40 7 alarm otherprop Objects n03343737 fire_alarm.n.02
kitchen top 1 40 7 otherprop Objects
toilet flush 1 40 7 otherprop Objects
cooler 1 17 24 6 refridgerator refridgerator Furniture n03102654 cooler.n.01
kitchen island 1 456 38 7 kitchen island otherstructure Objects n03620600 kitchen_island.n.01
balcony 1 40 7 otherprop Objects
escape door 1 28 8 12 door door Wall door
hammer 1 883 40 7 hammer otherprop Objects n03481172 hammer.n.02
wall and paiting 1 40 7 otherprop Objects
kitch shelf 1 40 7 otherprop Objects
handwasher 1 40 7 otherprop Objects
vanity top 1 40 7 otherprop Objects
bodyboard 1 40 7 otherprop Objects
messenger bag 1 55 37 7 bag bag Objects
stationary bike 1 40 7 otherprop Objects
cabinet countertop 1 40 7 otherprop Objects
ping pong padle 1 40 7 otherprop Objects
teapoy 1 40 7 otherprop Objects
clothes basket 1 39 40 7 basket otherprop Objects basket 02801938 n03050864 clothes_hamper.n.01
xbox 1 628 40 7 xbox otherprop Objects xbox
both 1 40 7 otherprop Objects
foosball 1 40 7 otherprop Objects
soad stand 1 50 39 6 stand otherfurniture Furniture
prop 1 40 7 otherprop Objects n02692086 airplane_propeller.n.01
buddha 1 40 7 otherprop Objects
reflection in a mirror 1 122 19 7 mirror mirror Objects
bar stol 1 40 7 otherprop Objects
oven/stove 1 40 7 otherprop Objects
patterned rug 1 130 40 7 rug otherprop Objects
window panel 1 40 7 otherprop Objects
vault 1 40 7 otherprop Objects
dust bin cover 1 40 7 otherprop Objects
throw 1 872 40 7 throw otherprop Objects n04429971 throw.n.04
painting and frame 1 40 7 otherprop Objects
covered piano 1 298 39 6 piano otherfurniture Furniture piano piano 03928116 n03928116 piano.n.01
drawer unit 1 40 7 otherprop Objects
aircon 1 40 7 otherprop Objects
package 1 40 7 otherprop Objects n03871083 package.n.02
gas vent 1 40 7 otherprop Objects
block 1 40 7 otherprop Objects
cloth container 1 140 40 7 container otherprop Objects
additional printer 1 66 40 7 printer otherprop Objects printer 04004475 n04004475 printer.n.03
danger sign 1 208 40 7 sign otherprop Objects
game machine 1 220 40 7 machine otherprop Objects
light fixture 1 40 7 otherprop Objects
utility 1 40 7 otherprop Objects n04516874 utility.n.06
base rack 1 40 7 otherprop Objects
staircase landing 1 40 7 otherprop Objects
szll 1 40 7 otherprop Objects
piano note 1 40 7 otherprop Objects
bboks 1 40 7 otherprop Objects
cabord 1 40 7 otherprop Objects
central table 1 19 7 10 table table Table table table table 04379243 n04379243 table.n.02
splash 1 40 7 otherprop Objects n04682319 splash.n.04
suit 1 40 7 otherprop Objects n04350905 suit.n.01
cook top 1 40 7 otherprop Objects
jug 1 687 40 7 jug otherprop Objects bottle bottle 02876657 n03603722 jug.n.01
stepstool 1 276 40 7 step stool otherprop Objects
tripod 1 50 39 6 stand otherfurniture Furniture n04485082 tripod.n.01
cover box 1 26 29 7 box box Objects
baby crib 1 485 39 6 crib otherfurniture Furniture
air condisnor 1 40 7 otherprop Objects
water softner 1 40 7 otherprop Objects
chandelier 1 342 38 7 chandelier otherstructure Objects n03005285 chandelier.n.01
floor patterning 1 40 7 otherprop Objects
tablet top 1 40 7 otherprop Objects
smoke detector 1 40 7 otherprop Objects
baseball cap 1 40 7 otherprop Objects cap 02954340 n02799323 baseball_cap.n.01
tissue roll holder 1 40 7 otherprop Objects
case of water 1 40 7 otherprop Objects
wall-organizer 1 40 7 otherprop Objects
piece 1 40 7 otherprop Objects n03343853 firearm.n.01
wheelbarrel 1 40 7 otherprop Objects
desktop item 1 40 7 otherprop Objects
tv showcase 1 40 7 otherprop Objects
chelves 1 40 7 otherprop Objects
toothbrush 1 127 40 7 toothbrush otherprop Objects n04453156 toothbrush.n.01
chiffonière 1 40 7 otherprop Objects
leg towel 1 135 27 7 towel towel Objects
flowers/decorations 1 40 7 otherprop Objects
snake toy 1 389 40 7 toy otherprop Objects
cabinet's side 1 40 7 otherprop Objects
bedroom chair 1 5 5 4 chair chair Chair chair chair chair 03001627 n03001627 chair.n.01
drum 1 145 40 7 drum otherprop Objects n03249569 drum.n.01
liquid soap 1 133 40 7 soap otherprop Objects
set of bedding 1 40 7 otherprop Objects
night lamp 1 144 35 7 lamp lamp Objects lamp lamp 03636649 n03636649 lamp.n.02
post board 1 408 38 7 board otherstructure Objects
measuring cup 1 730 40 7 measuring cup otherprop Objects cup n03733805 measuring_cup.n.01
baseboard heater 1 111 39 6 heater otherfurniture Furniture
paper shelf 1 40 7 otherprop Objects
alert sheet 1 559 40 7 sheet otherprop Objects
duster 1 115 40 7 duster otherprop Objects n03258330 dustcloth.n.01
snooker table 1 19 7 10 table table Table table table table 04379243 n03982430 pool_table.n.01
leg rest 1 40 7 otherprop Objects
wall storage 1 40 7 otherprop Objects
office board 1 408 38 7 board otherstructure Objects
bathroom counter 1 7 12 6 counter counter Furniture table table table 04379243 n03116530 counter.n.01
table sofa 1 83 6 9 sofa sofa Sofa sofa sofa sofa 04256520 n04256520 sofa.n.01
glass-topped table 1 19 7 10 table table Table table table table 04379243 n04379243 table.n.02
racket bat 1 40 7 otherprop Objects
fridge handle 1 758 40 7 handle otherprop Objects
stove top 1 40 7 otherprop Objects
monitor from pc 1 40 7 otherprop Objects
stick 1 529 40 7 stick otherprop Objects
================================================
FILE: pointnet2_tf/scannet/preprocessing/scannet_util.py
================================================
g_label_names = ['unannotated', 'wall', 'floor', 'chair', 'table', 'desk', 'bed', 'bookshelf', 'sofa', 'sink', 'bathtub', 'toilet', 'curtain', 'counter', 'door', 'window', 'shower curtain', 'refridgerator', 'picture', 'cabinet', 'otherfurniture']
def get_raw2scannet_label_map():
lines = [line.rstrip() for line in open('scannet-labels.combined.tsv')]
lines = lines[1:]
raw2scannet = {}
for i in range(len(lines)):
label_classes_set = set(g_label_names)
elements = lines[i].split('\t')
raw_name = elements[0]
nyu40_name = elements[6]
if nyu40_name not in label_classes_set:
raw2scannet[raw_name] = 'unannotated'
else:
raw2scannet[raw_name] = nyu40_name
return raw2scannet
g_raw2scannet = get_raw2scannet_label_map()
================================================
FILE: pointnet2_tf/scannet/scannet_dataset.py
================================================
import pickle
import os
import sys
import numpy as np
import pc_util
import scene_util
class ScannetDataset():
def __init__(self, root, npoints=8192, split='train'):
self.npoints = npoints
self.root = root
self.split = split
self.data_filename = os.path.join(self.root, 'scannet_%s.pickle'%(split))
with open(self.data_filename,'rb') as fp:
self.scene_points_list = pickle.load(fp)
self.semantic_labels_list = pickle.load(fp)
if split=='train':
labelweights = np.zeros(21)
for seg in self.semantic_labels_list:
tmp,_ = np.histogram(seg,range(22))
labelweights += tmp
labelweights = labelweights.astype(np.float32)
labelweights = labelweights/np.sum(labelweights)
self.labelweights = 1/np.log(1.2+labelweights)
elif split=='test':
self.labelweights = np.ones(21)
def __getitem__(self, index):
point_set = self.scene_points_list[index]
semantic_seg = self.semantic_labels_list[index].astype(np.int32)
coordmax = np.max(point_set,axis=0)
coordmin = np.min(point_set,axis=0)
smpmin = np.maximum(coordmax-[1.5,1.5,3.0], coordmin)
smpmin[2] = coordmin[2]
smpsz = np.minimum(coordmax-smpmin,[1.5,1.5,3.0])
smpsz[2] = coordmax[2]-coordmin[2]
isvalid = False
for i in range(10):
curcenter = point_set[np.random.choice(len(semantic_seg),1)[0],:]
curmin = curcenter-[0.75,0.75,1.5]
curmax = curcenter+[0.75,0.75,1.5]
curmin[2] = coordmin[2]
curmax[2] = coordmax[2]
curchoice = np.sum((point_set>=(curmin-0.2))*(point_set<=(curmax+0.2)),axis=1)==3
cur_point_set = point_set[curchoice,:]
cur_semantic_seg = semantic_seg[curchoice]
if len(cur_semantic_seg)==0:
continue
mask = np.sum((cur_point_set>=(curmin-0.01))*(cur_point_set<=(curmax+0.01)),axis=1)==3
vidx = np.ceil((cur_point_set[mask,:]-curmin)/(curmax-curmin)*[31.0,31.0,62.0])
vidx = np.unique(vidx[:,0]*31.0*62.0+vidx[:,1]*62.0+vidx[:,2])
isvalid = np.sum(cur_semantic_seg>0)/len(cur_semantic_seg)>=0.7 and len(vidx)/31.0/31.0/62.0>=0.02
if isvalid:
break
choice = np.random.choice(len(cur_semantic_seg), self.npoints, replace=True)
point_set = cur_point_set[choice,:]
semantic_seg = cur_semantic_seg[choice]
mask = mask[choice]
sample_weight = self.labelweights[semantic_seg]
sample_weight *= mask
return point_set, semantic_seg, sample_weight
def __len__(self):
return len(self.scene_points_list)
class ScannetDatasetWholeScene():
def __init__(self, root, npoints=8192, split='train'):
self.npoints = npoints
self.root = root
self.split = split
self.data_filename = os.path.join(self.root, 'scannet_%s.pickle'%(split))
with open(self.data_filename,'rb') as fp:
self.scene_points_list = pickle.load(fp)
self.semantic_labels_list = pickle.load(fp)
if split=='train':
labelweights = np.zeros(21)
for seg in self.semantic_labels_list:
tmp,_ = np.histogram(seg,range(22))
labelweights += tmp
labelweights = labelweights.astype(np.float32)
labelweights = labelweights/np.sum(labelweights)
self.labelweights = 1/np.log(1.2+labelweights)
elif split=='test':
self.labelweights = np.ones(21)
def __getitem__(self, index):
point_set_ini = self.scene_points_list[index]
semantic_seg_ini = self.semantic_labels_list[index].astype(np.int32)
coordmax = np.max(point_set_ini,axis=0)
coordmin = np.min(point_set_ini,axis=0)
nsubvolume_x = np.ceil((coordmax[0]-coordmin[0])/1.5).astype(np.int32)
nsubvolume_y = np.ceil((coordmax[1]-coordmin[1])/1.5).astype(np.int32)
point_sets = list()
semantic_segs = list()
sample_weights = list()
isvalid = False
for i in range(nsubvolume_x):
for j in range(nsubvolume_y):
curmin = coordmin+[i*1.5,j*1.5,0]
curmax = coordmin+[(i+1)*1.5,(j+1)*1.5,coordmax[2]-coordmin[2]]
curchoice = np.sum((point_set_ini>=(curmin-0.2))*(point_set_ini<=(curmax+0.2)),axis=1)==3
cur_point_set = point_set_ini[curchoice,:]
cur_semantic_seg = semantic_seg_ini[curchoice]
if len(cur_semantic_seg)==0:
continue
mask = np.sum((cur_point_set>=(curmin-0.001))*(cur_point_set<=(curmax+0.001)),axis=1)==3
choice = np.random.choice(len(cur_semantic_seg), self.npoints, replace=True)
point_set = cur_point_set[choice,:] # Nx3
semantic_seg = cur_semantic_seg[choice] # N
mask = mask[choice]
if sum(mask)/float(len(mask))<0.01:
continue
sample_weight = self.labelweights[semantic_seg]
sample_weight *= mask # N
point_sets.append(np.expand_dims(point_set,0)) # 1xNx3
semantic_segs.append(np.expand_dims(semantic_seg,0)) # 1xN
sample_weights.append(np.expand_dims(sample_weight,0)) # 1xN
point_sets = np.concatenate(tuple(point_sets),axis=0)
semantic_segs = np.concatenate(tuple(semantic_segs),axis=0)
sample_weights = np.concatenate(tuple(sample_weights),axis=0)
return point_sets, semantic_segs, sample_weights
def __len__(self):
return len(self.scene_points_list)
class ScannetDatasetVirtualScan():
def __init__(self, root, npoints=8192, split='train'):
self.npoints = npoints
self.root = root
self.split = split
self.data_filename = os.path.join(self.root, 'scannet_%s.pickle'%(split))
with open(self.data_filename,'rb') as fp:
self.scene_points_list = pickle.load(fp)
self.semantic_labels_list = pickle.load(fp)
if split=='train':
labelweights = np.zeros(21)
for seg in self.semantic_labels_list:
tmp,_ = np.histogram(seg,range(22))
labelweights += tmp
labelweights = labelweights.astype(np.float32)
labelweights = labelweights/np.sum(labelweights)
self.labelweights = 1/np.log(1.2+labelweights)
elif split=='test':
self.labelweights = np.ones(21)
def __getitem__(self, index):
point_set_ini = self.scene_points_list[index]
semantic_seg_ini = self.semantic_labels_list[index].astype(np.int32)
sample_weight_ini = self.labelweights[semantic_seg_ini]
point_sets = list()
semantic_segs = list()
sample_weights = list()
for i in xrange(8):
smpidx = scene_util.virtual_scan(point_set_ini,mode=i)
if len(smpidx)<300:
continue
point_set = point_set_ini[smpidx,:]
semantic_seg = semantic_seg_ini[smpidx]
sample_weight = sample_weight_ini[smpidx]
choice = np.random.choice(len(semantic_seg), self.npoints, replace=True)
point_set = point_set[choice,:] # Nx3
semantic_seg = semantic_seg[choice] # N
sample_weight = sample_weight[choice] # N
point_sets.append(np.expand_dims(point_set,0)) # 1xNx3
semantic_segs.append(np.expand_dims(semantic_seg,0)) # 1xN
sample_weights.append(np.expand_dims(sample_weight,0)) # 1xN
point_sets = np.concatenate(tuple(point_sets),axis=0)
semantic_segs = np.concatenate(tuple(semantic_segs),axis=0)
sample_weights = np.concatenate(tuple(sample_weights),axis=0)
return point_sets, semantic_segs, sample_weights
def __len__(self):
return len(self.scene_points_list)
if __name__=='__main__':
d = ScannetDatasetWholeScene(root = './data', split='test', npoints=8192)
labelweights_vox = np.zeros(21)
for ii in xrange(len(d)):
print ii
ps,seg,smpw = d[ii]
for b in xrange(ps.shape[0]):
_, uvlabel, _ = pc_util.point_cloud_label_to_surface_voxel_label_fast(ps[b,smpw[b,:]>0,:], seg[b,smpw[b,:]>0], res=0.02)
tmp,_ = np.histogram(uvlabel,range(22))
labelweights_vox += tmp
print labelweights_vox[1:].astype(np.float32)/np.sum(labelweights_vox[1:].astype(np.float32))
exit()
================================================
FILE: pointnet2_tf/scannet/scene_util.py
================================================
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
import numpy as np
from sklearn.neighbors import NearestNeighbors
from numpy import linalg as la
import scipy.io as sio
def cart2sph(xyz):
xy = xyz[:,0]**2+xyz[:,1]**2
aer = np.zeros(xyz.shape)
aer[:,2] = np.sqrt(xy+xyz[:,2]**2)
aer[:,1] = np.arctan2(xyz[:,2],np.sqrt(xy))
aer[:,0] = np.arctan2(xyz[:,1],xyz[:,0])
return aer
# generate virtual scan of a scene by subsampling the point cloud
def virtual_scan(xyz, mode=-1):
camloc = np.mean(xyz,axis=0)
camloc[2] = 1.5 # human height
if mode==-1:
view_dr = np.array([2*np.pi*np.random.random(), np.pi/10*(np.random.random()-0.75)])
camloc[:2] -= (0.8+0.7*np.random.random())*np.array([np.cos(view_dr[0]),np.sin(view_dr[0])])
else:
view_dr = np.array([np.pi/4*mode, 0])
camloc[:2] -= np.array([np.cos(view_dr[0]),np.sin(view_dr[0])])
ct_ray_dr = np.array([np.cos(view_dr[1])*np.cos(view_dr[0]), np.cos(view_dr[1])*np.sin(view_dr[0]), np.sin(view_dr[1])])
hr_dr = np.cross(ct_ray_dr, np.array([0,0,1]))
hr_dr /= la.norm(hr_dr)
vt_dr = np.cross(hr_dr, ct_ray_dr)
vt_dr /= la.norm(vt_dr)
xx = np.linspace(-0.6,0.6,200) #200
yy = np.linspace(-0.45,0.45,150) #150
xx, yy = np.meshgrid(xx,yy)
xx = xx.reshape(-1,1)
yy = yy.reshape(-1,1)
rays = xx*hr_dr.reshape(1,-1)+yy*vt_dr.reshape(1,-1)+ct_ray_dr.reshape(1,-1)
rays_aer = cart2sph(rays)
local_xyz = xyz-camloc.reshape(1,-1)
local_aer = cart2sph(local_xyz)
nbrs = NearestNeighbors(n_neighbors=1, algorithm='kd_tree').fit(rays_aer[:,:2])
mindd, minidx = nbrs.kneighbors(local_aer[:,:2])
mindd = mindd.reshape(-1)
minidx = minidx.reshape(-1)
sub_idx = mindd<0.01
if sum(sub_idx)<100:
return np.ones(0)
sub_r = local_aer[sub_idx,2]
sub_minidx = minidx[sub_idx]
min_r = float('inf')*np.ones(np.max(sub_minidx)+1)
for i in xrange(len(sub_r)):
if sub_r[i]min_r[sub_minidx[i]]:
sub_smpidx[i] = 0
smpidx = np.where(sub_idx)[0]
smpidx = smpidx[sub_smpidx==1]
return smpidx
if __name__=='__main__':
pc = np.load('scannet_dataset/scannet_scenes/scene0015_00.npy')
print pc.shape
xyz = pc[:,:3]
seg = pc[:,7]
smpidx = virtual_scan(xyz,mode=2)
xyz = xyz[smpidx,:]
seg = seg[smpidx]
sio.savemat('tmp.mat',{'pc':xyz,'seg':seg})
================================================
FILE: pointnet2_tf/scannet/train.py
================================================
import argparse
import math
from datetime import datetime
#import h5pyprovider
import numpy as np
import tensorflow as tf
import socket
import importlib
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(BASE_DIR) # model
sys.path.append(ROOT_DIR) # provider
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
import provider
import tf_util
import pc_util
sys.path.append(os.path.join(ROOT_DIR, 'data_prep'))
import scannet_dataset
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')
parser.add_argument('--model', default='model', help='Model name [default: model]')
parser.add_argument('--log_dir', default='log', help='Log dir [default: log]')
parser.add_argument('--num_point', type=int, default=8192, help='Point Number [default: 8192]')
parser.add_argument('--max_epoch', type=int, default=201, help='Epoch to run [default: 201]')
parser.add_argument('--batch_size', type=int, default=32, help='Batch Size during training [default: 32]')
parser.add_argument('--learning_rate', type=float, default=0.001, help='Initial learning rate [default: 0.001]')
parser.add_argument('--momentum', type=float, default=0.9, help='Initial learning rate [default: 0.9]')
parser.add_argument('--optimizer', default='adam', help='adam or momentum [default: adam]')
parser.add_argument('--decay_step', type=int, default=200000, help='Decay step for lr decay [default: 200000]')
parser.add_argument('--decay_rate', type=float, default=0.7, help='Decay rate for lr decay [default: 0.7]')
FLAGS = parser.parse_args()
EPOCH_CNT = 0
BATCH_SIZE = FLAGS.batch_size
NUM_POINT = FLAGS.num_point
MAX_EPOCH = FLAGS.max_epoch
BASE_LEARNING_RATE = FLAGS.learning_rate
GPU_INDEX = FLAGS.gpu
MOMENTUM = FLAGS.momentum
OPTIMIZER = FLAGS.optimizer
DECAY_STEP = FLAGS.decay_step
DECAY_RATE = FLAGS.decay_rate
MODEL = importlib.import_module(FLAGS.model) # import network module
MODEL_FILE = os.path.join(BASE_DIR, FLAGS.model+'.py')
LOG_DIR = FLAGS.log_dir
if not os.path.exists(LOG_DIR): os.mkdir(LOG_DIR)
os.system('cp %s %s' % (MODEL_FILE, LOG_DIR)) # bkp of model def
os.system('cp train.py %s' % (LOG_DIR)) # bkp of train procedure
LOG_FOUT = open(os.path.join(LOG_DIR, 'log_train.txt'), 'w')
LOG_FOUT.write(str(FLAGS)+'\n')
BN_INIT_DECAY = 0.5
BN_DECAY_DECAY_RATE = 0.5
BN_DECAY_DECAY_STEP = float(DECAY_STEP)
BN_DECAY_CLIP = 0.99
HOSTNAME = socket.gethostname()
NUM_CLASSES = 21
# Shapenet official train/test split
DATA_PATH = os.path.join(ROOT_DIR,'data','scannet_data_pointnet2')
TRAIN_DATASET = scannet_dataset.ScannetDataset(root=DATA_PATH, npoints=NUM_POINT, split='train')
TEST_DATASET = scannet_dataset.ScannetDataset(root=DATA_PATH, npoints=NUM_POINT, split='test')
TEST_DATASET_WHOLE_SCENE = scannet_dataset.ScannetDatasetWholeScene(root=DATA_PATH, npoints=NUM_POINT, split='test')
def log_string(out_str):
LOG_FOUT.write(out_str+'\n')
LOG_FOUT.flush()
print(out_str)
def get_learning_rate(batch):
learning_rate = tf.train.exponential_decay(
BASE_LEARNING_RATE, # Base learning rate.
batch * BATCH_SIZE, # Current index into the dataset.
DECAY_STEP, # Decay step.
DECAY_RATE, # Decay rate.
staircase=True)
learing_rate = tf.maximum(learning_rate, 0.00001) # CLIP THE LEARNING RATE!
return learning_rate
def get_bn_decay(batch):
bn_momentum = tf.train.exponential_decay(
BN_INIT_DECAY,
batch*BATCH_SIZE,
BN_DECAY_DECAY_STEP,
BN_DECAY_DECAY_RATE,
staircase=True)
bn_decay = tf.minimum(BN_DECAY_CLIP, 1 - bn_momentum)
return bn_decay
def train():
with tf.Graph().as_default():
with tf.device('/gpu:'+str(GPU_INDEX)):
pointclouds_pl, labels_pl, smpws_pl = MODEL.placeholder_inputs(BATCH_SIZE, NUM_POINT)
is_training_pl = tf.placeholder(tf.bool, shape=())
print is_training_pl
# Note the global_step=batch parameter to minimize.
# That tells the optimizer to helpfully increment the 'batch' parameter for you every time it trains.
batch = tf.Variable(0)
bn_decay = get_bn_decay(batch)
tf.summary.scalar('bn_decay', bn_decay)
print "--- Get model and loss"
# Get model and loss
pred, end_points = MODEL.get_model(pointclouds_pl, is_training_pl, NUM_CLASSES, bn_decay=bn_decay)
loss = MODEL.get_loss(pred, labels_pl, smpws_pl)
tf.summary.scalar('loss', loss)
correct = tf.equal(tf.argmax(pred, 2), tf.to_int64(labels_pl))
accuracy = tf.reduce_sum(tf.cast(correct, tf.float32)) / float(BATCH_SIZE*NUM_POINT)
tf.summary.scalar('accuracy', accuracy)
print "--- Get training operator"
# Get training operator
learning_rate = get_learning_rate(batch)
tf.summary.scalar('learning_rate', learning_rate)
if OPTIMIZER == 'momentum':
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=MOMENTUM)
elif OPTIMIZER == 'adam':
optimizer = tf.train.AdamOptimizer(learning_rate)
train_op = optimizer.minimize(loss, global_step=batch)
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
# Create a session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
config.log_device_placement = False
sess = tf.Session(config=config)
# Add summary writers
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'train'), sess.graph)
test_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'test'), sess.graph)
# Init variables
init = tf.global_variables_initializer()
sess.run(init)
#sess.run(init, {is_training_pl: True})
ops = {'pointclouds_pl': pointclouds_pl,
'labels_pl': labels_pl,
'smpws_pl': smpws_pl,
'is_training_pl': is_training_pl,
'pred': pred,
'loss': loss,
'train_op': train_op,
'merged': merged,
'step': batch,
'end_points': end_points}
best_acc = -1
for epoch in range(MAX_EPOCH):
log_string('**** EPOCH %03d ****' % (epoch))
sys.stdout.flush()
train_one_epoch(sess, ops, train_writer)
if epoch%5==0:
acc = eval_one_epoch(sess, ops, test_writer)
acc = eval_whole_scene_one_epoch(sess, ops, test_writer)
if acc > best_acc:
best_acc = acc
save_path = saver.save(sess, os.path.join(LOG_DIR, "best_model_epoch_%03d.ckpt"%(epoch)))
log_string("Model saved in file: %s" % save_path)
# Save the variables to disk.
if epoch % 10 == 0:
save_path = saver.save(sess, os.path.join(LOG_DIR, "model.ckpt"))
log_string("Model saved in file: %s" % save_path)
def get_batch_wdp(dataset, idxs, start_idx, end_idx):
bsize = end_idx-start_idx
batch_data = np.zeros((bsize, NUM_POINT, 3))
batch_label = np.zeros((bsize, NUM_POINT), dtype=np.int32)
batch_smpw = np.zeros((bsize, NUM_POINT), dtype=np.float32)
for i in range(bsize):
ps,seg,smpw = dataset[idxs[i+start_idx]]
batch_data[i,...] = ps
batch_label[i,:] = seg
batch_smpw[i,:] = smpw
dropout_ratio = np.random.random()*0.875 # 0-0.875
drop_idx = np.where(np.random.random((ps.shape[0]))<=dropout_ratio)[0]
batch_data[i,drop_idx,:] = batch_data[i,0,:]
batch_label[i,drop_idx] = batch_label[i,0]
batch_smpw[i,drop_idx] *= 0
return batch_data, batch_label, batch_smpw
def get_batch(dataset, idxs, start_idx, end_idx):
bsize = end_idx-start_idx
batch_data = np.zeros((bsize, NUM_POINT, 3))
batch_label = np.zeros((bsize, NUM_POINT), dtype=np.int32)
batch_smpw = np.zeros((bsize, NUM_POINT), dtype=np.float32)
for i in range(bsize):
ps,seg,smpw = dataset[idxs[i+start_idx]]
batch_data[i,...] = ps
batch_label[i,:] = seg
batch_smpw[i,:] = smpw
return batch_data, batch_label, batch_smpw
def train_one_epoch(sess, ops, train_writer):
""" ops: dict mapping from string to tf ops """
is_training = True
# Shuffle train samples
train_idxs = np.arange(0, len(TRAIN_DATASET))
np.random.shuffle(train_idxs)
num_batches = len(TRAIN_DATASET)/BATCH_SIZE
log_string(str(datetime.now()))
total_correct = 0
total_seen = 0
loss_sum = 0
for batch_idx in range(num_batches):
start_idx = batch_idx * BATCH_SIZE
end_idx = (batch_idx+1) * BATCH_SIZE
batch_data, batch_label, batch_smpw = get_batch_wdp(TRAIN_DATASET, train_idxs, start_idx, end_idx)
# Augment batched point clouds by rotation
aug_data = provider.rotate_point_cloud_z(batch_data)
feed_dict = {ops['pointclouds_pl']: aug_data,
ops['labels_pl']: batch_label,
ops['smpws_pl']:batch_smpw,
ops['is_training_pl']: is_training,}
summary, step, _, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
ops['train_op'], ops['loss'], ops['pred']], feed_dict=feed_dict)
train_writer.add_summary(summary, step)
pred_val = np.argmax(pred_val, 2)
correct = np.sum(pred_val == batch_label)
total_correct += correct
total_seen += (BATCH_SIZE*NUM_POINT)
loss_sum += loss_val
if (batch_idx+1)%10 == 0:
log_string(' -- %03d / %03d --' % (batch_idx+1, num_batches))
log_string('mean loss: %f' % (loss_sum / 10))
log_string('accuracy: %f' % (total_correct / float(total_seen)))
total_correct = 0
total_seen = 0
loss_sum = 0
# evaluate on randomly chopped scenes
def eval_one_epoch(sess, ops, test_writer):
""" ops: dict mapping from string to tf ops """
global EPOCH_CNT
is_training = False
test_idxs = np.arange(0, len(TEST_DATASET))
num_batches = len(TEST_DATASET)/BATCH_SIZE
total_correct = 0
total_seen = 0
loss_sum = 0
total_seen_class = [0 for _ in range(NUM_CLASSES)]
total_correct_class = [0 for _ in range(NUM_CLASSES)]
total_correct_vox = 0
total_seen_vox = 0
total_seen_class_vox = [0 for _ in range(NUM_CLASSES)]
total_correct_class_vox = [0 for _ in range(NUM_CLASSES)]
log_string(str(datetime.now()))
log_string('---- EPOCH %03d EVALUATION ----'%(EPOCH_CNT))
labelweights = np.zeros(21)
labelweights_vox = np.zeros(21)
for batch_idx in range(num_batches):
start_idx = batch_idx * BATCH_SIZE
end_idx = (batch_idx+1) * BATCH_SIZE
batch_data, batch_label, batch_smpw = get_batch(TEST_DATASET, test_idxs, start_idx, end_idx)
aug_data = provider.rotate_point_cloud_z(batch_data)
feed_dict = {ops['pointclouds_pl']: aug_data,
ops['labels_pl']: batch_label,
ops['smpws_pl']: batch_smpw,
ops['is_training_pl']: is_training}
summary, step, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
ops['loss'], ops['pred']], feed_dict=feed_dict)
test_writer.add_summary(summary, step)
pred_val = np.argmax(pred_val, 2) # BxN
correct = np.sum((pred_val == batch_label) & (batch_label>0) & (batch_smpw>0)) # evaluate only on 20 categories but not unknown
total_correct += correct
total_seen += np.sum((batch_label>0) & (batch_smpw>0))
loss_sum += loss_val
tmp,_ = np.histogram(batch_label,range(22))
labelweights += tmp
for l in range(NUM_CLASSES):
total_seen_class[l] += np.sum((batch_label==l) & (batch_smpw>0))
total_correct_class[l] += np.sum((pred_val==l) & (batch_label==l) & (batch_smpw>0))
for b in xrange(batch_label.shape[0]):
_, uvlabel, _ = pc_util.point_cloud_label_to_surface_voxel_label_fast(aug_data[b,batch_smpw[b,:]>0,:], np.concatenate((np.expand_dims(batch_label[b,batch_smpw[b,:]>0],1),np.expand_dims(pred_val[b,batch_smpw[b,:]>0],1)),axis=1), res=0.02)
total_correct_vox += np.sum((uvlabel[:,0]==uvlabel[:,1])&(uvlabel[:,0]>0))
total_seen_vox += np.sum(uvlabel[:,0]>0)
tmp,_ = np.histogram(uvlabel[:,0],range(22))
labelweights_vox += tmp
for l in range(NUM_CLASSES):
total_seen_class_vox[l] += np.sum(uvlabel[:,0]==l)
total_correct_class_vox[l] += np.sum((uvlabel[:,0]==l) & (uvlabel[:,1]==l))
log_string('eval mean loss: %f' % (loss_sum / float(num_batches)))
log_string('eval point accuracy vox: %f'% (total_correct_vox / float(total_seen_vox)))
log_string('eval point avg class acc vox: %f' % (np.mean(np.array(total_correct_class_vox[1:])/(np.array(total_seen_class_vox[1:],dtype=np.float)+1e-6))))
log_string('eval point accuracy: %f'% (total_correct / float(total_seen)))
log_string('eval point avg class acc: %f' % (np.mean(np.array(total_correct_class[1:])/(np.array(total_seen_class[1:],dtype=np.float)+1e-6))))
labelweights_vox = labelweights_vox[1:].astype(np.float32)/np.sum(labelweights_vox[1:].astype(np.float32))
caliweights = np.array([0.388,0.357,0.038,0.033,0.017,0.02,0.016,0.025,0.002,0.002,0.002,0.007,0.006,0.022,0.004,0.0004,0.003,0.002,0.024,0.029])
log_string('eval point calibrated average acc: %f' % (np.average(np.array(total_correct_class[1:])/(np.array(total_seen_class[1:],dtype=np.float)+1e-6),weights=caliweights)))
per_class_str = 'vox based --------'
for l in range(1,NUM_CLASSES):
per_class_str += 'class %d weight: %f, acc: %f; ' % (l,labelweights_vox[l-1],total_correct_class[l]/float(total_seen_class[l]))
log_string(per_class_str)
EPOCH_CNT += 1
return total_correct/float(total_seen)
# evaluate on whole scenes to generate numbers provided in the paper
def eval_whole_scene_one_epoch(sess, ops, test_writer):
""" ops: dict mapping from string to tf ops """
global EPOCH_CNT
is_training = False
test_idxs = np.arange(0, len(TEST_DATASET_WHOLE_SCENE))
num_batches = len(TEST_DATASET_WHOLE_SCENE)
total_correct = 0
total_seen = 0
loss_sum = 0
total_seen_class = [0 for _ in range(NUM_CLASSES)]
total_correct_class = [0 for _ in range(NUM_CLASSES)]
total_correct_vox = 0
total_seen_vox = 0
total_seen_class_vox = [0 for _ in range(NUM_CLASSES)]
total_correct_class_vox = [0 for _ in range(NUM_CLASSES)]
log_string(str(datetime.now()))
log_string('---- EPOCH %03d EVALUATION WHOLE SCENE----'%(EPOCH_CNT))
labelweights = np.zeros(21)
labelweights_vox = np.zeros(21)
is_continue_batch = False
extra_batch_data = np.zeros((0,NUM_POINT,3))
extra_batch_label = np.zeros((0,NUM_POINT))
extra_batch_smpw = np.zeros((0,NUM_POINT))
for batch_idx in range(num_batches):
if not is_continue_batch:
batch_data, batch_label, batch_smpw = TEST_DATASET_WHOLE_SCENE[batch_idx]
batch_data = np.concatenate((batch_data,extra_batch_data),axis=0)
batch_label = np.concatenate((batch_label,extra_batch_label),axis=0)
batch_smpw = np.concatenate((batch_smpw,extra_batch_smpw),axis=0)
else:
batch_data_tmp, batch_label_tmp, batch_smpw_tmp = TEST_DATASET_WHOLE_SCENE[batch_idx]
batch_data = np.concatenate((batch_data,batch_data_tmp),axis=0)
batch_label = np.concatenate((batch_label,batch_label_tmp),axis=0)
batch_smpw = np.concatenate((batch_smpw,batch_smpw_tmp),axis=0)
if batch_data.shape[0]0) & (batch_smpw>0)) # evaluate only on 20 categories but not unknown
total_correct += correct
total_seen += np.sum((batch_label>0) & (batch_smpw>0))
loss_sum += loss_val
tmp,_ = np.histogram(batch_label,range(22))
labelweights += tmp
for l in range(NUM_CLASSES):
total_seen_class[l] += np.sum((batch_label==l) & (batch_smpw>0))
total_correct_class[l] += np.sum((pred_val==l) & (batch_label==l) & (batch_smpw>0))
for b in xrange(batch_label.shape[0]):
_, uvlabel, _ = pc_util.point_cloud_label_to_surface_voxel_label_fast(aug_data[b,batch_smpw[b,:]>0,:], np.concatenate((np.expand_dims(batch_label[b,batch_smpw[b,:]>0],1),np.expand_dims(pred_val[b,batch_smpw[b,:]>0],1)),axis=1), res=0.02)
total_correct_vox += np.sum((uvlabel[:,0]==uvlabel[:,1])&(uvlabel[:,0]>0))
total_seen_vox += np.sum(uvlabel[:,0]>0)
tmp,_ = np.histogram(uvlabel[:,0],range(22))
labelweights_vox += tmp
for l in range(NUM_CLASSES):
total_seen_class_vox[l] += np.sum(uvlabel[:,0]==l)
total_correct_class_vox[l] += np.sum((uvlabel[:,0]==l) & (uvlabel[:,1]==l))
log_string('eval whole scene mean loss: %f' % (loss_sum / float(num_batches)))
log_string('eval whole scene point accuracy vox: %f'% (total_correct_vox / float(total_seen_vox)))
log_string('eval whole scene point avg class acc vox: %f' % (np.mean(np.array(total_correct_class_vox[1:])/(np.array(total_seen_class_vox[1:],dtype=np.float)+1e-6))))
log_string('eval whole scene point accuracy: %f'% (total_correct / float(total_seen)))
log_string('eval whole scene point avg class acc: %f' % (np.mean(np.array(total_correct_class[1:])/(np.array(total_seen_class[1:],dtype=np.float)+1e-6))))
labelweights = labelweights[1:].astype(np.float32)/np.sum(labelweights[1:].astype(np.float32))
labelweights_vox = labelweights_vox[1:].astype(np.float32)/np.sum(labelweights_vox[1:].astype(np.float32))
caliweights = np.array([0.388,0.357,0.038,0.033,0.017,0.02,0.016,0.025,0.002,0.002,0.002,0.007,0.006,0.022,0.004,0.0004,0.003,0.002,0.024,0.029])
caliacc = np.average(np.array(total_correct_class_vox[1:])/(np.array(total_seen_class_vox[1:],dtype=np.float)+1e-6),weights=caliweights)
log_string('eval whole scene point calibrated average acc vox: %f' % caliacc)
per_class_str = 'vox based --------'
for l in range(1,NUM_CLASSES):
per_class_str += 'class %d weight: %f, acc: %f; ' % (l,labelweights_vox[l-1],total_correct_class_vox[l]/float(total_seen_class_vox[l]))
log_string(per_class_str)
EPOCH_CNT += 1
return caliacc
if __name__ == "__main__":
log_string('pid: %s'%(str(os.getpid())))
train()
LOG_FOUT.close()
================================================
FILE: pointnet2_tf/tf_ops/3d_interpolation/interpolate.cpp
================================================
#include
#include
#include // memset
#include // rand, RAND_MAX
#include // sqrtf
#include
#include
using namespace std;
float randomf(){
return (rand()+0.5)/(RAND_MAX+1.0);
}
static double get_time(){
timespec tp;
clock_gettime(CLOCK_MONOTONIC,&tp);
return tp.tv_sec+tp.tv_nsec*1e-9;
}
// Find three nearest neigbors with square distance
// input: xyz1 (b,n,3), xyz2(b,m,3)
// output: dist (b,n,3), idx (b,n,3)
void threenn_cpu(int b, int n, int m, const float *xyz1, const float *xyz2, float *dist, int *idx) {
for (int i=0;i
#include
#include