Copy disabled (too large)
Download .txt
Showing preview only (12,396K chars total). Download the full file to get everything.
Repository: unsky/FPN
Branch: master
Commit: a096edb4495c
Files: 704
Total size: 11.7 MB
Directory structure:
gitextract_hh7mi8hx/
├── README.md
├── caffe-fpn/
│ ├── .Doxyfile
│ ├── .travis.yml
│ ├── CMakeLists.txt
│ ├── CONTRIBUTING.md
│ ├── CONTRIBUTORS.md
│ ├── INSTALL.md
│ ├── LICENSE
│ ├── Makefile
│ ├── Makefile.config
│ ├── Makefile.config.example
│ ├── README.md
│ ├── caffe.cloc
│ ├── cmake/
│ │ ├── ConfigGen.cmake
│ │ ├── Cuda.cmake
│ │ ├── Dependencies.cmake
│ │ ├── External/
│ │ │ ├── gflags.cmake
│ │ │ └── glog.cmake
│ │ ├── Misc.cmake
│ │ ├── Modules/
│ │ │ ├── FindAtlas.cmake
│ │ │ ├── FindGFlags.cmake
│ │ │ ├── FindGlog.cmake
│ │ │ ├── FindLAPACK.cmake
│ │ │ ├── FindLMDB.cmake
│ │ │ ├── FindLevelDB.cmake
│ │ │ ├── FindMKL.cmake
│ │ │ ├── FindMatlabMex.cmake
│ │ │ ├── FindNumPy.cmake
│ │ │ ├── FindOpenBLAS.cmake
│ │ │ ├── FindSnappy.cmake
│ │ │ └── FindvecLib.cmake
│ │ ├── ProtoBuf.cmake
│ │ ├── Summary.cmake
│ │ ├── Targets.cmake
│ │ ├── Templates/
│ │ │ ├── CaffeConfig.cmake.in
│ │ │ ├── CaffeConfigVersion.cmake.in
│ │ │ └── caffe_config.h.in
│ │ ├── Utils.cmake
│ │ └── lint.cmake
│ ├── data/
│ │ ├── cifar10/
│ │ │ └── get_cifar10.sh
│ │ └── ilsvrc12/
│ │ └── get_ilsvrc_aux.sh
│ ├── docs/
│ │ ├── CMakeLists.txt
│ │ ├── CNAME
│ │ ├── README.md
│ │ ├── _config.yml
│ │ ├── _layouts/
│ │ │ └── default.html
│ │ ├── development.md
│ │ ├── index.md
│ │ ├── install_apt.md
│ │ ├── install_osx.md
│ │ ├── install_yum.md
│ │ ├── installation.md
│ │ ├── model_zoo.md
│ │ ├── multigpu.md
│ │ ├── performance_hardware.md
│ │ ├── stylesheets/
│ │ │ ├── pygment_trac.css
│ │ │ ├── reset.css
│ │ │ └── styles.css
│ │ └── tutorial/
│ │ ├── convolution.md
│ │ ├── data.md
│ │ ├── fig/
│ │ │ └── .gitignore
│ │ ├── forward_backward.md
│ │ ├── index.md
│ │ ├── interfaces.md
│ │ ├── layers.md
│ │ ├── loss.md
│ │ ├── net_layer_blob.md
│ │ └── solver.md
│ ├── examples/
│ │ ├── 00-classification.ipynb
│ │ ├── 01-learning-lenet.ipynb
│ │ ├── 02-brewing-logreg.ipynb
│ │ ├── 03-fine-tuning.ipynb
│ │ ├── CMakeLists.txt
│ │ ├── cifar10/
│ │ │ ├── cifar10_full.prototxt
│ │ │ ├── cifar10_full_sigmoid_solver.prototxt
│ │ │ ├── cifar10_full_sigmoid_solver_bn.prototxt
│ │ │ ├── cifar10_full_sigmoid_train_test.prototxt
│ │ │ ├── cifar10_full_sigmoid_train_test_bn.prototxt
│ │ │ ├── cifar10_full_solver.prototxt
│ │ │ ├── cifar10_full_solver_lr1.prototxt
│ │ │ ├── cifar10_full_solver_lr2.prototxt
│ │ │ ├── cifar10_full_train_test.prototxt
│ │ │ ├── cifar10_quick.prototxt
│ │ │ ├── cifar10_quick_solver.prototxt
│ │ │ ├── cifar10_quick_solver_lr1.prototxt
│ │ │ ├── cifar10_quick_train_test.prototxt
│ │ │ ├── convert_cifar_data.cpp
│ │ │ ├── create_cifar10.sh
│ │ │ ├── readme.md
│ │ │ ├── train_full.sh
│ │ │ ├── train_full_sigmoid.sh
│ │ │ ├── train_full_sigmoid_bn.sh
│ │ │ └── train_quick.sh
│ │ ├── cpp_classification/
│ │ │ ├── classification.cpp
│ │ │ └── readme.md
│ │ ├── detection.ipynb
│ │ ├── feature_extraction/
│ │ │ ├── imagenet_val.prototxt
│ │ │ └── readme.md
│ │ ├── finetune_flickr_style/
│ │ │ ├── assemble_data.py
│ │ │ ├── readme.md
│ │ │ └── style_names.txt
│ │ ├── finetune_pascal_detection/
│ │ │ ├── pascal_finetune_solver.prototxt
│ │ │ └── pascal_finetune_trainval_test.prototxt
│ │ ├── hdf5_classification/
│ │ │ ├── nonlinear_auto_test.prototxt
│ │ │ ├── nonlinear_auto_train.prototxt
│ │ │ ├── nonlinear_solver.prototxt
│ │ │ ├── nonlinear_train_val.prototxt
│ │ │ ├── solver.prototxt
│ │ │ └── train_val.prototxt
│ │ ├── imagenet/
│ │ │ ├── create_imagenet.sh
│ │ │ ├── make_imagenet_mean.sh
│ │ │ ├── readme.md
│ │ │ ├── resume_training.sh
│ │ │ └── train_caffenet.sh
│ │ ├── mnist/
│ │ │ ├── convert_mnist_data.cpp
│ │ │ ├── create_mnist.sh
│ │ │ ├── lenet.prototxt
│ │ │ ├── lenet_adadelta_solver.prototxt
│ │ │ ├── lenet_auto_solver.prototxt
│ │ │ ├── lenet_consolidated_solver.prototxt
│ │ │ ├── lenet_multistep_solver.prototxt
│ │ │ ├── lenet_solver.prototxt
│ │ │ ├── lenet_solver_adam.prototxt
│ │ │ ├── lenet_solver_rmsprop.prototxt
│ │ │ ├── lenet_train_test.prototxt
│ │ │ ├── mnist_autoencoder.prototxt
│ │ │ ├── mnist_autoencoder_solver.prototxt
│ │ │ ├── mnist_autoencoder_solver_adadelta.prototxt
│ │ │ ├── mnist_autoencoder_solver_adagrad.prototxt
│ │ │ ├── mnist_autoencoder_solver_nesterov.prototxt
│ │ │ ├── mnist_train_lmdb/
│ │ │ │ ├── data.mdb
│ │ │ │ └── lock.mdb
│ │ │ ├── readme.md
│ │ │ ├── train_lenet.sh
│ │ │ ├── train_lenet_adam.sh
│ │ │ ├── train_lenet_consolidated.sh
│ │ │ ├── train_lenet_docker.sh
│ │ │ ├── train_lenet_rmsprop.sh
│ │ │ ├── train_mnist_autoencoder.sh
│ │ │ ├── train_mnist_autoencoder_adadelta.sh
│ │ │ ├── train_mnist_autoencoder_adagrad.sh
│ │ │ └── train_mnist_autoencoder_nesterov.sh
│ │ ├── net_surgery/
│ │ │ ├── bvlc_caffenet_full_conv.prototxt
│ │ │ └── conv.prototxt
│ │ ├── net_surgery.ipynb
│ │ ├── pycaffe/
│ │ │ ├── caffenet.py
│ │ │ ├── layers/
│ │ │ │ └── pyloss.py
│ │ │ └── linreg.prototxt
│ │ ├── siamese/
│ │ │ ├── convert_mnist_siamese_data.cpp
│ │ │ ├── create_mnist_siamese.sh
│ │ │ ├── mnist_siamese.ipynb
│ │ │ ├── mnist_siamese.prototxt
│ │ │ ├── mnist_siamese_solver.prototxt
│ │ │ ├── mnist_siamese_train_test.prototxt
│ │ │ ├── readme.md
│ │ │ └── train_mnist_siamese.sh
│ │ └── web_demo/
│ │ ├── app.py
│ │ ├── exifutil.py
│ │ ├── readme.md
│ │ ├── requirements.txt
│ │ └── templates/
│ │ └── index.html
│ ├── include/
│ │ └── caffe/
│ │ ├── blob.hpp
│ │ ├── caffe.hpp
│ │ ├── common.hpp
│ │ ├── data_reader.hpp
│ │ ├── data_transformer.hpp
│ │ ├── fast_rcnn_layers.hpp
│ │ ├── filler.hpp
│ │ ├── internal_thread.hpp
│ │ ├── layer.hpp
│ │ ├── layer_factory.hpp
│ │ ├── layers/
│ │ │ ├── .conadd_layer.hpp.swo
│ │ │ ├── .conadd_layer.hpp.swp
│ │ │ ├── absval_layer.hpp
│ │ │ ├── accuracy_layer.hpp
│ │ │ ├── argmax_layer.hpp
│ │ │ ├── base_conv_layer.hpp
│ │ │ ├── base_data_layer.hpp
│ │ │ ├── batch_norm_layer.hpp
│ │ │ ├── batch_reindex_layer.hpp
│ │ │ ├── bias_layer.hpp
│ │ │ ├── bnll_layer.hpp
│ │ │ ├── conadd_layer.hpp
│ │ │ ├── concat_layer.hpp
│ │ │ ├── contrastive_loss_layer.hpp
│ │ │ ├── conv_layer.hpp
│ │ │ ├── crop_layer.hpp
│ │ │ ├── cudnn_conv_layer.hpp
│ │ │ ├── cudnn_lcn_layer.hpp
│ │ │ ├── cudnn_lrn_layer.hpp
│ │ │ ├── cudnn_pooling_layer.hpp
│ │ │ ├── cudnn_relu_layer.hpp
│ │ │ ├── cudnn_sigmoid_layer.hpp
│ │ │ ├── cudnn_softmax_layer.hpp
│ │ │ ├── cudnn_tanh_layer.hpp
│ │ │ ├── data_layer.hpp
│ │ │ ├── deconv_layer.hpp
│ │ │ ├── deformable_conv_layer.hpp
│ │ │ ├── dropout_layer.hpp
│ │ │ ├── dummy_data_layer.hpp
│ │ │ ├── eltwise_layer.hpp
│ │ │ ├── elu_layer.hpp
│ │ │ ├── embed_layer.hpp
│ │ │ ├── euclidean_loss_layer.hpp
│ │ │ ├── exp_layer.hpp
│ │ │ ├── filter_layer.hpp
│ │ │ ├── flatten_layer.hpp
│ │ │ ├── hdf5_data_layer.hpp
│ │ │ ├── hdf5_output_layer.hpp
│ │ │ ├── hinge_loss_layer.hpp
│ │ │ ├── im2col_layer.hpp
│ │ │ ├── image_data_layer.hpp
│ │ │ ├── infogain_loss_layer.hpp
│ │ │ ├── inner_product_layer.hpp
│ │ │ ├── log_layer.hpp
│ │ │ ├── loss_layer.hpp
│ │ │ ├── lrn_layer.hpp
│ │ │ ├── memory_data_layer.hpp
│ │ │ ├── multinomial_logistic_loss_layer.hpp
│ │ │ ├── mvn_layer.hpp
│ │ │ ├── neuron_layer.hpp
│ │ │ ├── pooling_layer.hpp
│ │ │ ├── power_layer.hpp
│ │ │ ├── prelu_layer.hpp
│ │ │ ├── python_layer.hpp
│ │ │ ├── reduction_layer.hpp
│ │ │ ├── relu_layer.hpp
│ │ │ ├── reshape_layer.hpp
│ │ │ ├── scale_layer.hpp
│ │ │ ├── sigmoid_cross_entropy_loss_layer.hpp
│ │ │ ├── sigmoid_layer.hpp
│ │ │ ├── silence_layer.hpp
│ │ │ ├── slice_layer.hpp
│ │ │ ├── softmax_layer.hpp
│ │ │ ├── softmax_loss_layer.hpp
│ │ │ ├── split_layer.hpp
│ │ │ ├── spp_layer.hpp
│ │ │ ├── tanh_layer.hpp
│ │ │ ├── threshold_layer.hpp
│ │ │ ├── tile_layer.hpp
│ │ │ └── window_data_layer.hpp
│ │ ├── net.hpp
│ │ ├── parallel.hpp
│ │ ├── sgd_solvers.hpp
│ │ ├── solver.hpp
│ │ ├── solver_factory.hpp
│ │ ├── syncedmem.hpp
│ │ ├── test/
│ │ │ ├── test_caffe_main.hpp
│ │ │ └── test_gradient_check_util.hpp
│ │ └── util/
│ │ ├── benchmark.hpp
│ │ ├── blocking_queue.hpp
│ │ ├── cudnn.hpp
│ │ ├── db.hpp
│ │ ├── db_leveldb.hpp
│ │ ├── db_lmdb.hpp
│ │ ├── deformable_im2col.hpp
│ │ ├── device_alternate.hpp
│ │ ├── format.hpp
│ │ ├── gpu_util.cuh
│ │ ├── hdf5.hpp
│ │ ├── im2col.hpp
│ │ ├── insert_splits.hpp
│ │ ├── io.hpp
│ │ ├── math_functions.hpp
│ │ ├── mkl_alternate.hpp
│ │ ├── rng.hpp
│ │ ├── signal_handler.h
│ │ └── upgrade_proto.hpp
│ ├── matlab/
│ │ ├── +caffe/
│ │ │ ├── +test/
│ │ │ │ ├── test_io.m
│ │ │ │ ├── test_net.m
│ │ │ │ └── test_solver.m
│ │ │ ├── Blob.m
│ │ │ ├── Layer.m
│ │ │ ├── Net.m
│ │ │ ├── Solver.m
│ │ │ ├── get_net.m
│ │ │ ├── get_solver.m
│ │ │ ├── imagenet/
│ │ │ │ └── ilsvrc_2012_mean.mat
│ │ │ ├── io.m
│ │ │ ├── private/
│ │ │ │ ├── CHECK.m
│ │ │ │ ├── CHECK_FILE_EXIST.m
│ │ │ │ ├── caffe_.cpp
│ │ │ │ └── is_valid_handle.m
│ │ │ ├── reset_all.m
│ │ │ ├── run_tests.m
│ │ │ ├── set_device.m
│ │ │ ├── set_mode_cpu.m
│ │ │ ├── set_mode_gpu.m
│ │ │ └── version.m
│ │ ├── CMakeLists.txt
│ │ ├── demo/
│ │ │ └── classification_demo.m
│ │ └── hdf5creation/
│ │ ├── .gitignore
│ │ ├── demo.m
│ │ └── store2hdf5.m
│ ├── models/
│ │ ├── bvlc_alexnet/
│ │ │ ├── deploy.prototxt
│ │ │ ├── readme.md
│ │ │ ├── solver.prototxt
│ │ │ └── train_val.prototxt
│ │ ├── bvlc_googlenet/
│ │ │ ├── deploy.prototxt
│ │ │ ├── quick_solver.prototxt
│ │ │ ├── readme.md
│ │ │ ├── solver.prototxt
│ │ │ └── train_val.prototxt
│ │ ├── bvlc_reference_caffenet/
│ │ │ ├── deploy.prototxt
│ │ │ ├── readme.md
│ │ │ ├── solver.prototxt
│ │ │ └── train_val.prototxt
│ │ ├── bvlc_reference_rcnn_ilsvrc13/
│ │ │ ├── deploy.prototxt
│ │ │ └── readme.md
│ │ └── finetune_flickr_style/
│ │ ├── deploy.prototxt
│ │ ├── readme.md
│ │ ├── solver.prototxt
│ │ └── train_val.prototxt
│ ├── python/
│ │ ├── CMakeLists.txt
│ │ ├── caffe/
│ │ │ ├── __init__.py
│ │ │ ├── _caffe.cpp
│ │ │ ├── classifier.py
│ │ │ ├── detector.py
│ │ │ ├── draw.py
│ │ │ ├── imagenet/
│ │ │ │ └── ilsvrc_2012_mean.npy
│ │ │ ├── io.py
│ │ │ ├── net_spec.py
│ │ │ ├── proto/
│ │ │ │ ├── __init__.py
│ │ │ │ └── caffe_pb2.py
│ │ │ ├── pycaffe.py
│ │ │ └── test/
│ │ │ ├── test_io.py
│ │ │ ├── test_layer_type_list.py
│ │ │ ├── test_net.py
│ │ │ ├── test_net_spec.py
│ │ │ ├── test_python_layer.py
│ │ │ ├── test_python_layer_with_param_str.py
│ │ │ └── test_solver.py
│ │ ├── classify.py
│ │ ├── detect.py
│ │ ├── draw_net.py
│ │ └── requirements.txt
│ ├── scripts/
│ │ ├── build_docs.sh
│ │ ├── copy_notebook.py
│ │ ├── cpp_lint.py
│ │ ├── deploy_docs.sh
│ │ ├── download_model_binary.py
│ │ ├── download_model_from_gist.sh
│ │ ├── gather_examples.sh
│ │ ├── travis/
│ │ │ ├── travis_build_and_test.sh
│ │ │ ├── travis_install.sh
│ │ │ └── travis_setup_makefile_config.sh
│ │ └── upload_model_to_gist.sh
│ ├── src/
│ │ ├── caffe/
│ │ │ ├── CMakeLists.txt
│ │ │ ├── blob.cpp
│ │ │ ├── common.cpp
│ │ │ ├── data_reader.cpp
│ │ │ ├── data_transformer.cpp
│ │ │ ├── internal_thread.cpp
│ │ │ ├── layer.cpp
│ │ │ ├── layer_factory.cpp
│ │ │ ├── layers/
│ │ │ │ ├── .conadd_layer.cpp.swo
│ │ │ │ ├── .conadd_layer.cpp.swp
│ │ │ │ ├── .concat_layer.cpp.swp
│ │ │ │ ├── absval_layer.cpp
│ │ │ │ ├── absval_layer.cu
│ │ │ │ ├── accuracy_layer.cpp
│ │ │ │ ├── argmax_layer.cpp
│ │ │ │ ├── base_conv_layer.cpp
│ │ │ │ ├── base_data_layer.cpp
│ │ │ │ ├── base_data_layer.cu
│ │ │ │ ├── batch_norm_layer.cpp
│ │ │ │ ├── batch_norm_layer.cu
│ │ │ │ ├── batch_reindex_layer.cpp
│ │ │ │ ├── batch_reindex_layer.cu
│ │ │ │ ├── bias_layer.cpp
│ │ │ │ ├── bias_layer.cu
│ │ │ │ ├── bnll_layer.cpp
│ │ │ │ ├── bnll_layer.cu
│ │ │ │ ├── conadd_layer.cpp
│ │ │ │ ├── conadd_layer.cu
│ │ │ │ ├── concat_layer.cpp
│ │ │ │ ├── concat_layer.cu
│ │ │ │ ├── contrastive_loss_layer.cpp
│ │ │ │ ├── contrastive_loss_layer.cu
│ │ │ │ ├── conv_layer.cpp
│ │ │ │ ├── conv_layer.cu
│ │ │ │ ├── crop_layer.cpp
│ │ │ │ ├── crop_layer.cu
│ │ │ │ ├── cudnn_conv_layer.cpp
│ │ │ │ ├── cudnn_conv_layer.cu
│ │ │ │ ├── cudnn_lcn_layer.cpp
│ │ │ │ ├── cudnn_lcn_layer.cu
│ │ │ │ ├── cudnn_lrn_layer.cpp
│ │ │ │ ├── cudnn_lrn_layer.cu
│ │ │ │ ├── cudnn_pooling_layer.cpp
│ │ │ │ ├── cudnn_pooling_layer.cu
│ │ │ │ ├── cudnn_relu_layer.cpp
│ │ │ │ ├── cudnn_relu_layer.cu
│ │ │ │ ├── cudnn_sigmoid_layer.cpp
│ │ │ │ ├── cudnn_sigmoid_layer.cu
│ │ │ │ ├── cudnn_softmax_layer.cpp
│ │ │ │ ├── cudnn_softmax_layer.cu
│ │ │ │ ├── cudnn_tanh_layer.cpp
│ │ │ │ ├── cudnn_tanh_layer.cu
│ │ │ │ ├── data_layer.cpp
│ │ │ │ ├── deconv_layer.cpp
│ │ │ │ ├── deconv_layer.cu
│ │ │ │ ├── deformable_conv_layer.cpp
│ │ │ │ ├── deformable_conv_layer.cu
│ │ │ │ ├── dropout_layer.cpp
│ │ │ │ ├── dropout_layer.cu
│ │ │ │ ├── dummy_data_layer.cpp
│ │ │ │ ├── eltwise_layer.cpp
│ │ │ │ ├── eltwise_layer.cu
│ │ │ │ ├── elu_layer.cpp
│ │ │ │ ├── elu_layer.cu
│ │ │ │ ├── embed_layer.cpp
│ │ │ │ ├── embed_layer.cu
│ │ │ │ ├── euclidean_loss_layer.cpp
│ │ │ │ ├── euclidean_loss_layer.cu
│ │ │ │ ├── exp_layer.cpp
│ │ │ │ ├── exp_layer.cu
│ │ │ │ ├── filter_layer.cpp
│ │ │ │ ├── filter_layer.cu
│ │ │ │ ├── flatten_layer.cpp
│ │ │ │ ├── hdf5_data_layer.cpp
│ │ │ │ ├── hdf5_data_layer.cu
│ │ │ │ ├── hdf5_output_layer.cpp
│ │ │ │ ├── hdf5_output_layer.cu
│ │ │ │ ├── hinge_loss_layer.cpp
│ │ │ │ ├── im2col_layer.cpp
│ │ │ │ ├── im2col_layer.cu
│ │ │ │ ├── image_data_layer.cpp
│ │ │ │ ├── infogain_loss_layer.cpp
│ │ │ │ ├── inner_product_layer.cpp
│ │ │ │ ├── inner_product_layer.cu
│ │ │ │ ├── log_layer.cpp
│ │ │ │ ├── log_layer.cu
│ │ │ │ ├── loss_layer.cpp
│ │ │ │ ├── lrn_layer.cpp
│ │ │ │ ├── lrn_layer.cu
│ │ │ │ ├── memory_data_layer.cpp
│ │ │ │ ├── multinomial_logistic_loss_layer.cpp
│ │ │ │ ├── mvn_layer.cpp
│ │ │ │ ├── mvn_layer.cu
│ │ │ │ ├── neuron_layer.cpp
│ │ │ │ ├── pooling_layer.cpp
│ │ │ │ ├── pooling_layer.cu
│ │ │ │ ├── power_layer.cpp
│ │ │ │ ├── power_layer.cu
│ │ │ │ ├── prelu_layer.cpp
│ │ │ │ ├── prelu_layer.cu
│ │ │ │ ├── reduction_layer.cpp
│ │ │ │ ├── reduction_layer.cu
│ │ │ │ ├── relu_layer.cpp
│ │ │ │ ├── relu_layer.cu
│ │ │ │ ├── reshape_layer.cpp
│ │ │ │ ├── roi_pooling_layer.cpp
│ │ │ │ ├── roi_pooling_layer.cu
│ │ │ │ ├── scale_layer.cpp
│ │ │ │ ├── scale_layer.cu
│ │ │ │ ├── sigmoid_cross_entropy_loss_layer.cpp
│ │ │ │ ├── sigmoid_cross_entropy_loss_layer.cu
│ │ │ │ ├── sigmoid_layer.cpp
│ │ │ │ ├── sigmoid_layer.cu
│ │ │ │ ├── silence_layer.cpp
│ │ │ │ ├── silence_layer.cu
│ │ │ │ ├── slice_layer.cpp
│ │ │ │ ├── slice_layer.cu
│ │ │ │ ├── smooth_L1_loss_layer.cpp
│ │ │ │ ├── smooth_L1_loss_layer.cu
│ │ │ │ ├── softmax_layer.cpp
│ │ │ │ ├── softmax_layer.cu
│ │ │ │ ├── softmax_loss_layer.cpp
│ │ │ │ ├── softmax_loss_layer.cu
│ │ │ │ ├── split_layer.cpp
│ │ │ │ ├── split_layer.cu
│ │ │ │ ├── spp_layer.cpp
│ │ │ │ ├── tanh_layer.cpp
│ │ │ │ ├── tanh_layer.cu
│ │ │ │ ├── threshold_layer.cpp
│ │ │ │ ├── threshold_layer.cu
│ │ │ │ ├── tile_layer.cpp
│ │ │ │ ├── tile_layer.cu
│ │ │ │ └── window_data_layer.cpp
│ │ │ ├── net.cpp
│ │ │ ├── parallel.cpp
│ │ │ ├── proto/
│ │ │ │ ├── .caffe.proto.swp
│ │ │ │ └── caffe.proto
│ │ │ ├── solver.cpp
│ │ │ ├── solvers/
│ │ │ │ ├── adadelta_solver.cpp
│ │ │ │ ├── adadelta_solver.cu
│ │ │ │ ├── adagrad_solver.cpp
│ │ │ │ ├── adagrad_solver.cu
│ │ │ │ ├── adam_solver.cpp
│ │ │ │ ├── adam_solver.cu
│ │ │ │ ├── nesterov_solver.cpp
│ │ │ │ ├── nesterov_solver.cu
│ │ │ │ ├── rmsprop_solver.cpp
│ │ │ │ ├── rmsprop_solver.cu
│ │ │ │ ├── sgd_solver.cpp
│ │ │ │ └── sgd_solver.cu
│ │ │ ├── syncedmem.cpp
│ │ │ ├── test/
│ │ │ │ ├── CMakeLists.txt
│ │ │ │ ├── test_accuracy_layer.cpp
│ │ │ │ ├── test_argmax_layer.cpp
│ │ │ │ ├── test_batch_norm_layer.cpp
│ │ │ │ ├── test_batch_reindex_layer.cpp
│ │ │ │ ├── test_benchmark.cpp
│ │ │ │ ├── test_bias_layer.cpp
│ │ │ │ ├── test_blob.cpp
│ │ │ │ ├── test_caffe_main.cpp
│ │ │ │ ├── test_common.cpp
│ │ │ │ ├── test_concat_layer.cpp
│ │ │ │ ├── test_contrastive_loss_layer.cpp
│ │ │ │ ├── test_convolution_layer.cpp
│ │ │ │ ├── test_data/
│ │ │ │ │ ├── generate_sample_data.py
│ │ │ │ │ ├── sample_data.h5
│ │ │ │ │ ├── sample_data_2_gzip.h5
│ │ │ │ │ ├── sample_data_list.txt
│ │ │ │ │ ├── solver_data.h5
│ │ │ │ │ └── solver_data_list.txt
│ │ │ │ ├── test_data_layer.cpp
│ │ │ │ ├── test_data_transformer.cpp
│ │ │ │ ├── test_db.cpp
│ │ │ │ ├── test_deconvolution_layer.cpp
│ │ │ │ ├── test_dummy_data_layer.cpp
│ │ │ │ ├── test_eltwise_layer.cpp
│ │ │ │ ├── test_embed_layer.cpp
│ │ │ │ ├── test_euclidean_loss_layer.cpp
│ │ │ │ ├── test_filler.cpp
│ │ │ │ ├── test_filter_layer.cpp
│ │ │ │ ├── test_flatten_layer.cpp
│ │ │ │ ├── test_gradient_based_solver.cpp
│ │ │ │ ├── test_hdf5_output_layer.cpp
│ │ │ │ ├── test_hdf5data_layer.cpp
│ │ │ │ ├── test_hinge_loss_layer.cpp
│ │ │ │ ├── test_im2col_kernel.cu
│ │ │ │ ├── test_im2col_layer.cpp
│ │ │ │ ├── test_image_data_layer.cpp
│ │ │ │ ├── test_infogain_loss_layer.cpp
│ │ │ │ ├── test_inner_product_layer.cpp
│ │ │ │ ├── test_internal_thread.cpp
│ │ │ │ ├── test_io.cpp
│ │ │ │ ├── test_layer_factory.cpp
│ │ │ │ ├── test_lrn_layer.cpp
│ │ │ │ ├── test_math_functions.cpp
│ │ │ │ ├── test_maxpool_dropout_layers.cpp
│ │ │ │ ├── test_memory_data_layer.cpp
│ │ │ │ ├── test_multinomial_logistic_loss_layer.cpp
│ │ │ │ ├── test_mvn_layer.cpp
│ │ │ │ ├── test_net.cpp
│ │ │ │ ├── test_neuron_layer.cpp
│ │ │ │ ├── test_platform.cpp
│ │ │ │ ├── test_pooling_layer.cpp
│ │ │ │ ├── test_power_layer.cpp
│ │ │ │ ├── test_protobuf.cpp
│ │ │ │ ├── test_random_number_generator.cpp
│ │ │ │ ├── test_reduction_layer.cpp
│ │ │ │ ├── test_reshape_layer.cpp
│ │ │ │ ├── test_roi_pooling_layer.cpp
│ │ │ │ ├── test_scale_layer.cpp
│ │ │ │ ├── test_sigmoid_cross_entropy_loss_layer.cpp
│ │ │ │ ├── test_slice_layer.cpp
│ │ │ │ ├── test_smooth_L1_loss_layer.cpp
│ │ │ │ ├── test_softmax_layer.cpp
│ │ │ │ ├── test_softmax_with_loss_layer.cpp
│ │ │ │ ├── test_solver.cpp
│ │ │ │ ├── test_solver_factory.cpp
│ │ │ │ ├── test_split_layer.cpp
│ │ │ │ ├── test_spp_layer.cpp
│ │ │ │ ├── test_stochastic_pooling.cpp
│ │ │ │ ├── test_syncedmem.cpp
│ │ │ │ ├── test_tanh_layer.cpp
│ │ │ │ ├── test_threshold_layer.cpp
│ │ │ │ ├── test_tile_layer.cpp
│ │ │ │ ├── test_upgrade_proto.cpp
│ │ │ │ └── test_util_blas.cpp
│ │ │ └── util/
│ │ │ ├── benchmark.cpp
│ │ │ ├── blocking_queue.cpp
│ │ │ ├── cudnn.cpp
│ │ │ ├── db.cpp
│ │ │ ├── db_leveldb.cpp
│ │ │ ├── db_lmdb.cpp
│ │ │ ├── deformable_im2col.cu
│ │ │ ├── hdf5.cpp
│ │ │ ├── im2col.cpp
│ │ │ ├── im2col.cu
│ │ │ ├── insert_splits.cpp
│ │ │ ├── io.cpp
│ │ │ ├── math_functions.cpp
│ │ │ ├── math_functions.cu
│ │ │ ├── signal_handler.cpp
│ │ │ └── upgrade_proto.cpp
│ │ └── gtest/
│ │ ├── CMakeLists.txt
│ │ ├── gtest-all.cpp
│ │ ├── gtest.h
│ │ └── gtest_main.cc
│ └── tools/
│ ├── CMakeLists.txt
│ ├── caffe.cpp
│ ├── compute_image_mean.cpp
│ ├── convert_imageset.cpp
│ ├── device_query.cpp
│ ├── extra/
│ │ ├── extract_seconds.py
│ │ ├── launch_resize_and_crop_images.sh
│ │ ├── parse_log.py
│ │ ├── parse_log.sh
│ │ ├── plot_log.gnuplot.example
│ │ ├── plot_training_log.py
│ │ ├── resize_and_crop_images.py
│ │ ├── summarize.py
│ │ ├── train.log
│ │ └── train.log.train
│ ├── extract_features.cpp
│ ├── finetune_net.cpp
│ ├── net_speed_benchmark.cpp
│ ├── test_net.cpp
│ ├── train_net.cpp
│ ├── upgrade_net_proto_binary.cpp
│ ├── upgrade_net_proto_text.cpp
│ └── upgrade_solver_proto_text.cpp
├── data/
│ ├── .gitignore
│ ├── README.md
│ ├── pylintrc
│ ├── scripts/
│ │ ├── fetch_faster_rcnn_models.sh
│ │ ├── fetch_imagenet_models.sh
│ │ └── fetch_selective_search_data.sh
│ └── wget-log
├── experiments/
│ ├── README.md
│ ├── cfgs/
│ │ └── FP_Net_end2end.yml
│ └── scripts/
│ └── FP_Net_end2end.sh
├── lib/
│ ├── Makefile
│ ├── datasets/
│ │ ├── VOCdevkit-matlab-wrapper/
│ │ │ ├── get_voc_opts.m
│ │ │ ├── voc_eval.m
│ │ │ └── xVOCap.m
│ │ ├── __init__.py
│ │ ├── coco.py
│ │ ├── ds_utils.py
│ │ ├── factory.py
│ │ ├── imdb.py
│ │ ├── pascal_voc.py
│ │ ├── tools/
│ │ │ └── mcg_munge.py
│ │ └── voc_eval.py
│ ├── fast_rcnn/
│ │ ├── FP_Net_end2end.sh
│ │ ├── __init__.py
│ │ ├── bbox_transform.py
│ │ ├── config.py
│ │ ├── nms_wrapper.py
│ │ ├── test.py
│ │ └── train.py
│ ├── nms/
│ │ ├── .gitignore
│ │ ├── __init__.py
│ │ ├── cpu_nms.pyx
│ │ ├── gpu_nms.hpp
│ │ ├── gpu_nms.pyx
│ │ ├── nms_kernel.cu
│ │ └── py_cpu_nms.py
│ ├── pycocotools/
│ │ ├── UPSTREAM_REV
│ │ ├── __init__.py
│ │ ├── _mask.c
│ │ ├── _mask.pyx
│ │ ├── coco.py
│ │ ├── cocoeval.py
│ │ ├── license.txt
│ │ ├── mask.py
│ │ ├── maskApi.c
│ │ └── maskApi.h
│ ├── roi_data_layer/
│ │ ├── __init__.py
│ │ ├── layer.py
│ │ ├── minibatch.py
│ │ └── roidb.py
│ ├── rpn/
│ │ ├── __init__.py
│ │ ├── anchor_target_layer.py
│ │ ├── as_rois.py
│ │ ├── as_rois_mrcnn.py
│ │ ├── generate.py
│ │ ├── generate_anchors.py
│ │ ├── proposal_layer.py
│ │ ├── proposal_target_layer.py
│ │ └── proposal_target_layer_mrcnn.py
│ ├── setup.py
│ ├── transform/
│ │ ├── __init__.py
│ │ └── torch_image_transform_layer.py
│ └── utils/
│ ├── .gitignore
│ ├── __init__.py
│ ├── bbox.pyx
│ ├── blob.py
│ └── timer.py
├── models/
│ ├── README.md
│ └── pascal_voc/
│ └── FPN/
│ └── FP_Net_end2end/
│ ├── solver.prototxt
│ ├── solver_mergercnn.prototxt
│ ├── test.prototxt
│ ├── test_mergercnn.prototxt
│ ├── train.prototxt
│ └── train_mergercnn.prototxt
├── output/
│ └── output.md
├── test.sh
├── test_mergercnn.sh
└── tools/
├── README.md
├── _init_paths.py
├── compress_net.py
├── demo.py
├── eval_recall.py
├── reval.py
├── rpn_generate.py
├── test_net.py
├── train_faster_rcnn_alt_opt.py
├── train_net.py
└── train_svms.py
================================================
FILE CONTENTS
================================================
================================================
FILE: README.md
================================================
Feature Pyramid Network on caffe
This is the unoffical version Feature Pyramid Network for Feature Pyramid Networks for Object Detection https://arxiv.org/abs/1612.03144
# results
`FPN(resnet50)-end2end result is implemented without OHEM and train with pascal voc 2007 + 2012 test on 2007`
merged rcnn
|mAP@0.5|aeroplane|bicycle|bird|boat|bottle|bus|car|cat|chair|cow|
|:--:|:-------:| -----:| --:| --:|-----:|--:|--:|--:|----:|--:|
|0.788|0.8079| 0.8036| 0.8010| 0.7293|0.6743|0.8680|0.8766|0.8967|0.6122|0.8646|
|diningtable|dog |horse|motorbike|person |pottedplant|sheep|sofa|train|tv|
|----------:|:--:|:---:| -------:| -----:| -------:|----:|---:|----:|--:|
|0.7330|0.8855|0.8760| 0.8063| 0.7999| 0.5138|0.7905|0.7755|0.8637|0.7736|
shared rcnn
|mAP@0.5|aeroplane|bicycle|bird|boat|bottle|bus|car|cat|chair|cow|
|:--:|:-------:| -----:| --:| --:|-----:|--:|--:|--:|----:|--:|
|0.7833|0.8585| 0.8001| 0.7970| 0.7174|0.6522|0.8668|0.8768|0.8929|0.5842|0.8658|
|diningtable|dog |horse|motorbike|person |pottedplant|sheep|sofa|train|tv|
|----------:|:--:|:---:| -------:| -----:| -------:|----:|---:|----:|--:|
|0.7022|0.8891|0.8680| 0.7991| 0.7944| 0.5065|0.7896|0.7707|0.8697|0.7653|
# framework
megred rcnn framework
Network overview: [link](http://ethereon.github.io/netscope/#/gist/c5334efdd667ce41d540e3697de2936c)

shared rcnn
Network overview: [link](http://ethereon.github.io/netscope/#/gist/63c0281751afd1b2d50f4c2764b31a4e)

`the red and yellow are shared params`
# about the anchor size setting
In the paper the anchor setting is `Ratios: [0.5,1,2],scales :[8,]`
With the setting and P2~P6, all anchor sizes are `[32,64,128,512,1024]`,but this setting is suit for COCO dataset which has so many small targets.
But the voc dataset targets are range `[128,256,512]`.
So, we desgin the anchor setting:`Ratios: [0.5,1,2],scales :[8,16]`, this is very import for voc dataset.
# usage
download voc07,12 dataset `ResNet50.caffemodel` and rename to `ResNet50.v2.caffemodel`
```bash
cp ResNet50.v2.caffemodel data/pretrained_model/
```
- OneDrive download: [link](https://onedrive.live.com/?authkey=%21AAFW2-FVoxeVRck&id=4006CBB8476FF777%2117887&cid=4006CBB8476FF777)
`In my expriments, the codes require ~10G GPU memory in training and ~6G in testing.
your can design the suit image size, mimbatch size and rcnn batch size for your GPUS.`
### compile caffe & lib
```bash
cd caffe-fpn
mkdir build
cd build
cmake ..
make -j16 all
cd lib
make
```
### train & test
shared rcnn
```bash
./experiments/scripts/FP_Net_end2end.sh 1 FPN pascal_voc
./test.sh 1 FPN pascal_voc
```
megred rcnn
```bash
./experiments/scripts/FP_Net_end2end_merge_rcnn.sh 0 FPN pascal_voc
./test_mergercnn.sh 0 FPN pascal_voc
```
0 1 is GPU id.
### TODO List
- [x] all tests passed
- [x] evaluate object detection performance on voc
- [x] evaluate merged rcnn version performance on voc
### feature pyramid networks for object detection
Lin, T. Y., Dollár, P., Girshick, R., He, K., Hariharan, B., & Belongie, S. (2016). Feature pyramid networks for object detection. arXiv preprint arXiv:1612.03144.
================================================
FILE: caffe-fpn/.Doxyfile
================================================
# Doxyfile 1.8.8
# This file describes the settings to be used by the documentation system
# doxygen (www.doxygen.org) for a project.
#
# All text after a double hash (##) is considered a comment and is placed in
# front of the TAG it is preceding.
#
# All text after a single hash (#) is considered a comment and will be ignored.
# The format is:
# TAG = value [value, ...]
# For lists, items can also be appended using:
# TAG += value [value, ...]
# Values that contain spaces should be placed between quotes (\" \").
#---------------------------------------------------------------------------
# Project related configuration options
#---------------------------------------------------------------------------
# This tag specifies the encoding used for all characters in the config file
# that follow. The default is UTF-8 which is also the encoding used for all text
# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv
# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv
# for the list of possible encodings.
# The default value is: UTF-8.
DOXYFILE_ENCODING = UTF-8
# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by
# double-quotes, unless you are using Doxywizard) that should identify the
# project for which the documentation is generated. This name is used in the
# title of most generated pages and in a few other places.
# The default value is: My Project.
PROJECT_NAME = "Caffe"
# The PROJECT_NUMBER tag can be used to enter a project or revision number. This
# could be handy for archiving the generated documentation or if some version
# control system is used.
PROJECT_NUMBER =
# Using the PROJECT_BRIEF tag one can provide an optional one line description
# for a project that appears at the top of each page and should give viewer a
# quick idea about the purpose of the project. Keep the description short.
PROJECT_BRIEF =
# With the PROJECT_LOGO tag one can specify an logo or icon that is included in
# the documentation. The maximum height of the logo should not exceed 55 pixels
# and the maximum width should not exceed 200 pixels. Doxygen will copy the logo
# to the output directory.
PROJECT_LOGO =
# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path
# into which the generated documentation will be written. If a relative path is
# entered, it will be relative to the location where doxygen was started. If
# left blank the current directory will be used.
OUTPUT_DIRECTORY = ./doxygen/
# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create 4096 sub-
# directories (in 2 levels) under the output directory of each output format and
# will distribute the generated files over these directories. Enabling this
# option can be useful when feeding doxygen a huge amount of source files, where
# putting all generated files in the same directory would otherwise causes
# performance problems for the file system.
# The default value is: NO.
CREATE_SUBDIRS = NO
# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII
# characters to appear in the names of generated files. If set to NO, non-ASCII
# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode
# U+3044.
# The default value is: NO.
ALLOW_UNICODE_NAMES = NO
# The OUTPUT_LANGUAGE tag is used to specify the language in which all
# documentation generated by doxygen is written. Doxygen will use this
# information to generate all constant output in the proper language.
# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese,
# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States),
# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian,
# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages),
# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian,
# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian,
# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish,
# Ukrainian and Vietnamese.
# The default value is: English.
OUTPUT_LANGUAGE = English
# If the BRIEF_MEMBER_DESC tag is set to YES doxygen will include brief member
# descriptions after the members that are listed in the file and class
# documentation (similar to Javadoc). Set to NO to disable this.
# The default value is: YES.
BRIEF_MEMBER_DESC = YES
# If the REPEAT_BRIEF tag is set to YES doxygen will prepend the brief
# description of a member or function before the detailed description
#
# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
# brief descriptions will be completely suppressed.
# The default value is: YES.
REPEAT_BRIEF = YES
# This tag implements a quasi-intelligent brief description abbreviator that is
# used to form the text in various listings. Each string in this list, if found
# as the leading text of the brief description, will be stripped from the text
# and the result, after processing the whole list, is used as the annotated
# text. Otherwise, the brief description is used as-is. If left blank, the
# following values are used ($name is automatically replaced with the name of
# the entity):The $name class, The $name widget, The $name file, is, provides,
# specifies, contains, represents, a, an and the.
ABBREVIATE_BRIEF =
# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
# doxygen will generate a detailed section even if there is only a brief
# description.
# The default value is: NO.
ALWAYS_DETAILED_SEC = NO
# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
# inherited members of a class in the documentation of that class as if those
# members were ordinary class members. Constructors, destructors and assignment
# operators of the base classes will not be shown.
# The default value is: NO.
INLINE_INHERITED_MEMB = NO
# If the FULL_PATH_NAMES tag is set to YES doxygen will prepend the full path
# before files name in the file list and in the header files. If set to NO the
# shortest path that makes the file name unique will be used
# The default value is: YES.
FULL_PATH_NAMES = YES
# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path.
# Stripping is only done if one of the specified strings matches the left-hand
# part of the path. The tag can be used to show relative paths in the file list.
# If left blank the directory from which doxygen is run is used as the path to
# strip.
#
# Note that you can specify absolute paths here, but also relative paths, which
# will be relative from the directory where doxygen is started.
# This tag requires that the tag FULL_PATH_NAMES is set to YES.
STRIP_FROM_PATH =
# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the
# path mentioned in the documentation of a class, which tells the reader which
# header file to include in order to use a class. If left blank only the name of
# the header file containing the class definition is used. Otherwise one should
# specify the list of include paths that are normally passed to the compiler
# using the -I flag.
STRIP_FROM_INC_PATH =
# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but
# less readable) file names. This can be useful is your file systems doesn't
# support long names like on DOS, Mac, or CD-ROM.
# The default value is: NO.
SHORT_NAMES = NO
# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the
# first line (until the first dot) of a Javadoc-style comment as the brief
# description. If set to NO, the Javadoc-style will behave just like regular Qt-
# style comments (thus requiring an explicit @brief command for a brief
# description.)
# The default value is: NO.
JAVADOC_AUTOBRIEF = NO
# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first
# line (until the first dot) of a Qt-style comment as the brief description. If
# set to NO, the Qt-style will behave just like regular Qt-style comments (thus
# requiring an explicit \brief command for a brief description.)
# The default value is: NO.
QT_AUTOBRIEF = NO
# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a
# multi-line C++ special comment block (i.e. a block of //! or /// comments) as
# a brief description. This used to be the default behavior. The new default is
# to treat a multi-line C++ comment block as a detailed description. Set this
# tag to YES if you prefer the old behavior instead.
#
# Note that setting this tag to YES also means that rational rose comments are
# not recognized any more.
# The default value is: NO.
MULTILINE_CPP_IS_BRIEF = NO
# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the
# documentation from any documented member that it re-implements.
# The default value is: YES.
INHERIT_DOCS = YES
# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce a
# new page for each member. If set to NO, the documentation of a member will be
# part of the file/class/namespace that contains it.
# The default value is: NO.
SEPARATE_MEMBER_PAGES = NO
# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen
# uses this value to replace tabs by spaces in code fragments.
# Minimum value: 1, maximum value: 16, default value: 4.
TAB_SIZE = 8
# This tag can be used to specify a number of aliases that act as commands in
# the documentation. An alias has the form:
# name=value
# For example adding
# "sideeffect=@par Side Effects:\n"
# will allow you to put the command \sideeffect (or @sideeffect) in the
# documentation, which will result in a user-defined paragraph with heading
# "Side Effects:". You can put \n's in the value part of an alias to insert
# newlines.
ALIASES =
# This tag can be used to specify a number of word-keyword mappings (TCL only).
# A mapping has the form "name=value". For example adding "class=itcl::class"
# will allow you to use the command class in the itcl::class meaning.
TCL_SUBST =
# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources
# only. Doxygen will then generate output that is more tailored for C. For
# instance, some of the names that are used will be different. The list of all
# members will be omitted, etc.
# The default value is: NO.
OPTIMIZE_OUTPUT_FOR_C = NO
# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or
# Python sources only. Doxygen will then generate output that is more tailored
# for that language. For instance, namespaces will be presented as packages,
# qualified scopes will look different, etc.
# The default value is: NO.
OPTIMIZE_OUTPUT_JAVA = NO
# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
# sources. Doxygen will then generate output that is tailored for Fortran.
# The default value is: NO.
OPTIMIZE_FOR_FORTRAN = NO
# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
# sources. Doxygen will then generate output that is tailored for VHDL.
# The default value is: NO.
OPTIMIZE_OUTPUT_VHDL = NO
# Doxygen selects the parser to use depending on the extension of the files it
# parses. With this tag you can assign which parser to use for a given
# extension. Doxygen has a built-in mapping, but you can override or extend it
# using this tag. The format is ext=language, where ext is a file extension, and
# language is one of the parsers supported by doxygen: IDL, Java, Javascript,
# C#, C, C++, D, PHP, Objective-C, Python, Fortran (fixed format Fortran:
# FortranFixed, free formatted Fortran: FortranFree, unknown formatted Fortran:
# Fortran. In the later case the parser tries to guess whether the code is fixed
# or free formatted code, this is the default for Fortran type files), VHDL. For
# instance to make doxygen treat .inc files as Fortran files (default is PHP),
# and .f files as C (default is Fortran), use: inc=Fortran f=C.
#
# Note For files without extension you can use no_extension as a placeholder.
#
# Note that for custom extensions you also need to set FILE_PATTERNS otherwise
# the files are not read by doxygen.
EXTENSION_MAPPING =
# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments
# according to the Markdown format, which allows for more readable
# documentation. See http://daringfireball.net/projects/markdown/ for details.
# The output of markdown processing is further processed by doxygen, so you can
# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in
# case of backward compatibilities issues.
# The default value is: YES.
MARKDOWN_SUPPORT = YES
# When enabled doxygen tries to link words that correspond to documented
# classes, or namespaces to their corresponding documentation. Such a link can
# be prevented in individual cases by by putting a % sign in front of the word
# or globally by setting AUTOLINK_SUPPORT to NO.
# The default value is: YES.
AUTOLINK_SUPPORT = YES
# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
# to include (a tag file for) the STL sources as input, then you should set this
# tag to YES in order to let doxygen match functions declarations and
# definitions whose arguments contain STL classes (e.g. func(std::string);
# versus func(std::string) {}). This also make the inheritance and collaboration
# diagrams that involve STL classes more complete and accurate.
# The default value is: NO.
BUILTIN_STL_SUPPORT = NO
# If you use Microsoft's C++/CLI language, you should set this option to YES to
# enable parsing support.
# The default value is: NO.
CPP_CLI_SUPPORT = NO
# Set the SIP_SUPPORT tag to YES if your project consists of sip (see:
# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen
# will parse them like normal C++ but will assume all classes use public instead
# of private inheritance when no explicit protection keyword is present.
# The default value is: NO.
SIP_SUPPORT = NO
# For Microsoft's IDL there are propget and propput attributes to indicate
# getter and setter methods for a property. Setting this option to YES will make
# doxygen to replace the get and set methods by a property in the documentation.
# This will only work if the methods are indeed getting or setting a simple
# type. If this is not the case, or you want to show the methods anyway, you
# should set this option to NO.
# The default value is: YES.
IDL_PROPERTY_SUPPORT = YES
# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
# tag is set to YES, then doxygen will reuse the documentation of the first
# member in the group (if any) for the other members of the group. By default
# all members of a group must be documented explicitly.
# The default value is: NO.
DISTRIBUTE_GROUP_DOC = NO
# Set the SUBGROUPING tag to YES to allow class member groups of the same type
# (for instance a group of public functions) to be put as a subgroup of that
# type (e.g. under the Public Functions section). Set it to NO to prevent
# subgrouping. Alternatively, this can be done per class using the
# \nosubgrouping command.
# The default value is: YES.
SUBGROUPING = YES
# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions
# are shown inside the group in which they are included (e.g. using \ingroup)
# instead of on a separate page (for HTML and Man pages) or section (for LaTeX
# and RTF).
#
# Note that this feature does not work in combination with
# SEPARATE_MEMBER_PAGES.
# The default value is: NO.
INLINE_GROUPED_CLASSES = NO
# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions
# with only public data fields or simple typedef fields will be shown inline in
# the documentation of the scope in which they are defined (i.e. file,
# namespace, or group documentation), provided this scope is documented. If set
# to NO, structs, classes, and unions are shown on a separate page (for HTML and
# Man pages) or section (for LaTeX and RTF).
# The default value is: NO.
INLINE_SIMPLE_STRUCTS = NO
# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or
# enum is documented as struct, union, or enum with the name of the typedef. So
# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
# with name TypeT. When disabled the typedef will appear as a member of a file,
# namespace, or class. And the struct will be named TypeS. This can typically be
# useful for C code in case the coding convention dictates that all compound
# types are typedef'ed and only the typedef is referenced, never the tag name.
# The default value is: NO.
TYPEDEF_HIDES_STRUCT = NO
# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This
# cache is used to resolve symbols given their name and scope. Since this can be
# an expensive process and often the same symbol appears multiple times in the
# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small
# doxygen will become slower. If the cache is too large, memory is wasted. The
# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range
# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536
# symbols. At the end of a run doxygen will report the cache usage and suggest
# the optimal cache size from a speed point of view.
# Minimum value: 0, maximum value: 9, default value: 0.
LOOKUP_CACHE_SIZE = 0
#---------------------------------------------------------------------------
# Build related configuration options
#---------------------------------------------------------------------------
# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
# documentation are documented, even if no documentation was available. Private
# class members and static file members will be hidden unless the
# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES.
# Note: This will also disable the warnings about undocumented members that are
# normally produced when WARNINGS is set to YES.
# The default value is: NO.
EXTRACT_ALL = NO
# If the EXTRACT_PRIVATE tag is set to YES all private members of a class will
# be included in the documentation.
# The default value is: NO.
EXTRACT_PRIVATE = NO
# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal
# scope will be included in the documentation.
# The default value is: NO.
EXTRACT_PACKAGE = NO
# If the EXTRACT_STATIC tag is set to YES all static members of a file will be
# included in the documentation.
# The default value is: NO.
EXTRACT_STATIC = NO
# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) defined
# locally in source files will be included in the documentation. If set to NO
# only classes defined in header files are included. Does not have any effect
# for Java sources.
# The default value is: YES.
EXTRACT_LOCAL_CLASSES = YES
# This flag is only useful for Objective-C code. When set to YES local methods,
# which are defined in the implementation section but not in the interface are
# included in the documentation. If set to NO only methods in the interface are
# included.
# The default value is: NO.
EXTRACT_LOCAL_METHODS = NO
# If this flag is set to YES, the members of anonymous namespaces will be
# extracted and appear in the documentation as a namespace called
# 'anonymous_namespace{file}', where file will be replaced with the base name of
# the file that contains the anonymous namespace. By default anonymous namespace
# are hidden.
# The default value is: NO.
EXTRACT_ANON_NSPACES = NO
# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all
# undocumented members inside documented classes or files. If set to NO these
# members will be included in the various overviews, but no documentation
# section is generated. This option has no effect if EXTRACT_ALL is enabled.
# The default value is: NO.
HIDE_UNDOC_MEMBERS = NO
# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all
# undocumented classes that are normally visible in the class hierarchy. If set
# to NO these classes will be included in the various overviews. This option has
# no effect if EXTRACT_ALL is enabled.
# The default value is: NO.
HIDE_UNDOC_CLASSES = NO
# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend
# (class|struct|union) declarations. If set to NO these declarations will be
# included in the documentation.
# The default value is: NO.
HIDE_FRIEND_COMPOUNDS = NO
# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any
# documentation blocks found inside the body of a function. If set to NO these
# blocks will be appended to the function's detailed documentation block.
# The default value is: NO.
HIDE_IN_BODY_DOCS = NO
# The INTERNAL_DOCS tag determines if documentation that is typed after a
# \internal command is included. If the tag is set to NO then the documentation
# will be excluded. Set it to YES to include the internal documentation.
# The default value is: NO.
INTERNAL_DOCS = NO
# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file
# names in lower-case letters. If set to YES upper-case letters are also
# allowed. This is useful if you have classes or files whose names only differ
# in case and if your file system supports case sensitive file names. Windows
# and Mac users are advised to set this option to NO.
# The default value is: system dependent.
CASE_SENSE_NAMES = YES
# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with
# their full class and namespace scopes in the documentation. If set to YES the
# scope will be hidden.
# The default value is: NO.
HIDE_SCOPE_NAMES = NO
# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of
# the files that are included by a file in the documentation of that file.
# The default value is: YES.
SHOW_INCLUDE_FILES = YES
# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each
# grouped member an include statement to the documentation, telling the reader
# which file to include in order to use the member.
# The default value is: NO.
SHOW_GROUPED_MEMB_INC = NO
# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include
# files with double quotes in the documentation rather than with sharp brackets.
# The default value is: NO.
FORCE_LOCAL_INCLUDES = NO
# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the
# documentation for inline members.
# The default value is: YES.
INLINE_INFO = YES
# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the
# (detailed) documentation of file and class members alphabetically by member
# name. If set to NO the members will appear in declaration order.
# The default value is: YES.
SORT_MEMBER_DOCS = YES
# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief
# descriptions of file, namespace and class members alphabetically by member
# name. If set to NO the members will appear in declaration order. Note that
# this will also influence the order of the classes in the class list.
# The default value is: NO.
SORT_BRIEF_DOCS = NO
# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the
# (brief and detailed) documentation of class members so that constructors and
# destructors are listed first. If set to NO the constructors will appear in the
# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS.
# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief
# member documentation.
# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting
# detailed member documentation.
# The default value is: NO.
SORT_MEMBERS_CTORS_1ST = NO
# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy
# of group names into alphabetical order. If set to NO the group names will
# appear in their defined order.
# The default value is: NO.
SORT_GROUP_NAMES = NO
# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by
# fully-qualified names, including namespaces. If set to NO, the class list will
# be sorted only by class name, not including the namespace part.
# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
# Note: This option applies only to the class list, not to the alphabetical
# list.
# The default value is: NO.
SORT_BY_SCOPE_NAME = NO
# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper
# type resolution of all parameters of a function it will reject a match between
# the prototype and the implementation of a member function even if there is
# only one candidate or it is obvious which candidate to choose by doing a
# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still
# accept a match between prototype and implementation in such cases.
# The default value is: NO.
STRICT_PROTO_MATCHING = NO
# The GENERATE_TODOLIST tag can be used to enable ( YES) or disable ( NO) the
# todo list. This list is created by putting \todo commands in the
# documentation.
# The default value is: YES.
GENERATE_TODOLIST = YES
# The GENERATE_TESTLIST tag can be used to enable ( YES) or disable ( NO) the
# test list. This list is created by putting \test commands in the
# documentation.
# The default value is: YES.
GENERATE_TESTLIST = YES
# The GENERATE_BUGLIST tag can be used to enable ( YES) or disable ( NO) the bug
# list. This list is created by putting \bug commands in the documentation.
# The default value is: YES.
GENERATE_BUGLIST = YES
# The GENERATE_DEPRECATEDLIST tag can be used to enable ( YES) or disable ( NO)
# the deprecated list. This list is created by putting \deprecated commands in
# the documentation.
# The default value is: YES.
GENERATE_DEPRECATEDLIST= YES
# The ENABLED_SECTIONS tag can be used to enable conditional documentation
# sections, marked by \if <section_label> ... \endif and \cond <section_label>
# ... \endcond blocks.
ENABLED_SECTIONS =
# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the
# initial value of a variable or macro / define can have for it to appear in the
# documentation. If the initializer consists of more lines than specified here
# it will be hidden. Use a value of 0 to hide initializers completely. The
# appearance of the value of individual variables and macros / defines can be
# controlled using \showinitializer or \hideinitializer command in the
# documentation regardless of this setting.
# Minimum value: 0, maximum value: 10000, default value: 30.
MAX_INITIALIZER_LINES = 30
# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at
# the bottom of the documentation of classes and structs. If set to YES the list
# will mention the files that were used to generate the documentation.
# The default value is: YES.
SHOW_USED_FILES = YES
# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This
# will remove the Files entry from the Quick Index and from the Folder Tree View
# (if specified).
# The default value is: YES.
SHOW_FILES = YES
# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces
# page. This will remove the Namespaces entry from the Quick Index and from the
# Folder Tree View (if specified).
# The default value is: YES.
SHOW_NAMESPACES = YES
# The FILE_VERSION_FILTER tag can be used to specify a program or script that
# doxygen should invoke to get the current version for each file (typically from
# the version control system). Doxygen will invoke the program by executing (via
# popen()) the command command input-file, where command is the value of the
# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided
# by doxygen. Whatever the program writes to standard output is used as the file
# version. For an example see the documentation.
FILE_VERSION_FILTER =
# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
# by doxygen. The layout file controls the global structure of the generated
# output files in an output format independent way. To create the layout file
# that represents doxygen's defaults, run doxygen with the -l option. You can
# optionally specify a file name after the option, if omitted DoxygenLayout.xml
# will be used as the name of the layout file.
#
# Note that if you run doxygen from a directory containing a file called
# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE
# tag is left empty.
LAYOUT_FILE =
# The CITE_BIB_FILES tag can be used to specify one or more bib files containing
# the reference definitions. This must be a list of .bib files. The .bib
# extension is automatically appended if omitted. This requires the bibtex tool
# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info.
# For LaTeX the style of the bibliography can be controlled using
# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the
# search path. See also \cite for info how to create references.
CITE_BIB_FILES =
#---------------------------------------------------------------------------
# Configuration options related to warning and progress messages
#---------------------------------------------------------------------------
# The QUIET tag can be used to turn on/off the messages that are generated to
# standard output by doxygen. If QUIET is set to YES this implies that the
# messages are off.
# The default value is: NO.
QUIET = YES
# The WARNINGS tag can be used to turn on/off the warning messages that are
# generated to standard error ( stderr) by doxygen. If WARNINGS is set to YES
# this implies that the warnings are on.
#
# Tip: Turn warnings on while writing the documentation.
# The default value is: YES.
WARNINGS = YES
# If the WARN_IF_UNDOCUMENTED tag is set to YES, then doxygen will generate
# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag
# will automatically be disabled.
# The default value is: YES.
WARN_IF_UNDOCUMENTED = NO
# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for
# potential errors in the documentation, such as not documenting some parameters
# in a documented function, or documenting parameters that don't exist or using
# markup commands wrongly.
# The default value is: YES.
WARN_IF_DOC_ERROR = YES
# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that
# are documented, but have no documentation for their parameters or return
# value. If set to NO doxygen will only warn about wrong or incomplete parameter
# documentation, but not about the absence of documentation.
# The default value is: NO.
WARN_NO_PARAMDOC = NO
# The WARN_FORMAT tag determines the format of the warning messages that doxygen
# can produce. The string should contain the $file, $line, and $text tags, which
# will be replaced by the file and line number from which the warning originated
# and the warning text. Optionally the format may contain $version, which will
# be replaced by the version of the file (if it could be obtained via
# FILE_VERSION_FILTER)
# The default value is: $file:$line: $text.
WARN_FORMAT = "$file:$line: $text"
# The WARN_LOGFILE tag can be used to specify a file to which warning and error
# messages should be written. If left blank the output is written to standard
# error (stderr).
WARN_LOGFILE =
#---------------------------------------------------------------------------
# Configuration options related to the input files
#---------------------------------------------------------------------------
# The INPUT tag is used to specify the files and/or directories that contain
# documented source files. You may enter file names like myfile.cpp or
# directories like /usr/src/myproject. Separate the files or directories with
# spaces.
# Note: If this tag is empty the current directory is searched.
INPUT = ./include/caffe \
./src/caffe
# This tag can be used to specify the character encoding of the source files
# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
# libiconv (or the iconv built into libc) for the transcoding. See the libiconv
# documentation (see: http://www.gnu.org/software/libiconv) for the list of
# possible encodings.
# The default value is: UTF-8.
INPUT_ENCODING = UTF-8
# If the value of the INPUT tag contains directories, you can use the
# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and
# *.h) to filter out the source-files in the directories. If left blank the
# following patterns are tested:*.c, *.cc, *.cxx, *.cpp, *.c++, *.java, *.ii,
# *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp,
# *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown,
# *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf,
# *.qsf, *.as and *.js.
FILE_PATTERNS =
# The RECURSIVE tag can be used to specify whether or not subdirectories should
# be searched for input files as well.
# The default value is: NO.
RECURSIVE = YES
# The EXCLUDE tag can be used to specify files and/or directories that should be
# excluded from the INPUT source files. This way you can easily exclude a
# subdirectory from a directory tree whose root is specified with the INPUT tag.
#
# Note that relative paths are relative to the directory from which doxygen is
# run.
EXCLUDE = ./src/caffe/test/ \
./include/caffe/test/
# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
# directories that are symbolic links (a Unix file system feature) are excluded
# from the input.
# The default value is: NO.
EXCLUDE_SYMLINKS = NO
# If the value of the INPUT tag contains directories, you can use the
# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
# certain files from those directories.
#
# Note that the wildcards are matched against the file with absolute path, so to
# exclude all test directories for example use the pattern */test/*
EXCLUDE_PATTERNS =
# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
# (namespaces, classes, functions, etc.) that should be excluded from the
# output. The symbol name can be a fully qualified name, a word, or if the
# wildcard * is used, a substring. Examples: ANamespace, AClass,
# AClass::ANamespace, ANamespace::*Test
#
# Note that the wildcards are matched against the file with absolute path, so to
# exclude all test directories use the pattern */test/*
EXCLUDE_SYMBOLS =
# The EXAMPLE_PATH tag can be used to specify one or more files or directories
# that contain example code fragments that are included (see the \include
# command).
EXAMPLE_PATH =
# If the value of the EXAMPLE_PATH tag contains directories, you can use the
# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and
# *.h) to filter out the source-files in the directories. If left blank all
# files are included.
EXAMPLE_PATTERNS =
# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
# searched for input files to be used with the \include or \dontinclude commands
# irrespective of the value of the RECURSIVE tag.
# The default value is: NO.
EXAMPLE_RECURSIVE = NO
# The IMAGE_PATH tag can be used to specify one or more files or directories
# that contain images that are to be included in the documentation (see the
# \image command).
IMAGE_PATH =
# The INPUT_FILTER tag can be used to specify a program that doxygen should
# invoke to filter for each input file. Doxygen will invoke the filter program
# by executing (via popen()) the command:
#
# <filter> <input-file>
#
# where <filter> is the value of the INPUT_FILTER tag, and <input-file> is the
# name of an input file. Doxygen will then use the output that the filter
# program writes to standard output. If FILTER_PATTERNS is specified, this tag
# will be ignored.
#
# Note that the filter must not add or remove lines; it is applied before the
# code is scanned, but not when the output code is generated. If lines are added
# or removed, the anchors will not be placed correctly.
INPUT_FILTER =
# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
# basis. Doxygen will compare the file name with each pattern and apply the
# filter if there is a match. The filters are a list of the form: pattern=filter
# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how
# filters are used. If the FILTER_PATTERNS tag is empty or if none of the
# patterns match the file name, INPUT_FILTER is applied.
FILTER_PATTERNS =
# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
# INPUT_FILTER ) will also be used to filter the input files that are used for
# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES).
# The default value is: NO.
FILTER_SOURCE_FILES = NO
# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and
# it is also possible to disable source filtering for a specific pattern using
# *.ext= (so without naming a filter).
# This tag requires that the tag FILTER_SOURCE_FILES is set to YES.
FILTER_SOURCE_PATTERNS =
# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that
# is part of the input, its contents will be placed on the main page
# (index.html). This can be useful if you have a project on for instance GitHub
# and want to reuse the introduction page also for the doxygen output.
USE_MDFILE_AS_MAINPAGE =
#---------------------------------------------------------------------------
# Configuration options related to source browsing
#---------------------------------------------------------------------------
# If the SOURCE_BROWSER tag is set to YES then a list of source files will be
# generated. Documented entities will be cross-referenced with these sources.
#
# Note: To get rid of all source code in the generated output, make sure that
# also VERBATIM_HEADERS is set to NO.
# The default value is: NO.
SOURCE_BROWSER = NO
# Setting the INLINE_SOURCES tag to YES will include the body of functions,
# classes and enums directly into the documentation.
# The default value is: NO.
INLINE_SOURCES = NO
# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any
# special comment blocks from generated source code fragments. Normal C, C++ and
# Fortran comments will always remain visible.
# The default value is: YES.
STRIP_CODE_COMMENTS = YES
# If the REFERENCED_BY_RELATION tag is set to YES then for each documented
# function all documented functions referencing it will be listed.
# The default value is: NO.
REFERENCED_BY_RELATION = NO
# If the REFERENCES_RELATION tag is set to YES then for each documented function
# all documented entities called/used by that function will be listed.
# The default value is: NO.
REFERENCES_RELATION = NO
# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set
# to YES, then the hyperlinks from functions in REFERENCES_RELATION and
# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will
# link to the documentation.
# The default value is: YES.
REFERENCES_LINK_SOURCE = YES
# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the
# source code will show a tooltip with additional information such as prototype,
# brief description and links to the definition and documentation. Since this
# will make the HTML file larger and loading of large files a bit slower, you
# can opt to disable this feature.
# The default value is: YES.
# This tag requires that the tag SOURCE_BROWSER is set to YES.
SOURCE_TOOLTIPS = YES
# If the USE_HTAGS tag is set to YES then the references to source code will
# point to the HTML generated by the htags(1) tool instead of doxygen built-in
# source browser. The htags tool is part of GNU's global source tagging system
# (see http://www.gnu.org/software/global/global.html). You will need version
# 4.8.6 or higher.
#
# To use it do the following:
# - Install the latest version of global
# - Enable SOURCE_BROWSER and USE_HTAGS in the config file
# - Make sure the INPUT points to the root of the source tree
# - Run doxygen as normal
#
# Doxygen will invoke htags (and that will in turn invoke gtags), so these
# tools must be available from the command line (i.e. in the search path).
#
# The result: instead of the source browser generated by doxygen, the links to
# source code will now point to the output of htags.
# The default value is: NO.
# This tag requires that the tag SOURCE_BROWSER is set to YES.
USE_HTAGS = NO
# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a
# verbatim copy of the header file for each class for which an include is
# specified. Set to NO to disable this.
# See also: Section \class.
# The default value is: YES.
VERBATIM_HEADERS = YES
#---------------------------------------------------------------------------
# Configuration options related to the alphabetical class index
#---------------------------------------------------------------------------
# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all
# compounds will be generated. Enable this if the project contains a lot of
# classes, structs, unions or interfaces.
# The default value is: YES.
ALPHABETICAL_INDEX = YES
# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in
# which the alphabetical index list will be split.
# Minimum value: 1, maximum value: 20, default value: 5.
# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
COLS_IN_ALPHA_INDEX = 5
# In case all classes in a project start with a common prefix, all classes will
# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag
# can be used to specify a prefix (or a list of prefixes) that should be ignored
# while generating the index headers.
# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
IGNORE_PREFIX =
#---------------------------------------------------------------------------
# Configuration options related to the HTML output
#---------------------------------------------------------------------------
# If the GENERATE_HTML tag is set to YES doxygen will generate HTML output
# The default value is: YES.
GENERATE_HTML = YES
# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a
# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
# it.
# The default directory is: html.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_OUTPUT = html
# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each
# generated HTML page (for example: .htm, .php, .asp).
# The default value is: .html.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_FILE_EXTENSION = .html
# The HTML_HEADER tag can be used to specify a user-defined HTML header file for
# each generated HTML page. If the tag is left blank doxygen will generate a
# standard header.
#
# To get valid HTML the header file that includes any scripts and style sheets
# that doxygen needs, which is dependent on the configuration options used (e.g.
# the setting GENERATE_TREEVIEW). It is highly recommended to start with a
# default header using
# doxygen -w html new_header.html new_footer.html new_stylesheet.css
# YourConfigFile
# and then modify the file new_header.html. See also section "Doxygen usage"
# for information on how to generate the default header that doxygen normally
# uses.
# Note: The header is subject to change so you typically have to regenerate the
# default header when upgrading to a newer version of doxygen. For a description
# of the possible markers and block names see the documentation.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_HEADER =
# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each
# generated HTML page. If the tag is left blank doxygen will generate a standard
# footer. See HTML_HEADER for more information on how to generate a default
# footer and what special commands can be used inside the footer. See also
# section "Doxygen usage" for information on how to generate the default footer
# that doxygen normally uses.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_FOOTER =
# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style
# sheet that is used by each HTML page. It can be used to fine-tune the look of
# the HTML output. If left blank doxygen will generate a default style sheet.
# See also section "Doxygen usage" for information on how to generate the style
# sheet that doxygen normally uses.
# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as
# it is more robust and this tag (HTML_STYLESHEET) will in the future become
# obsolete.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_STYLESHEET =
# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined
# cascading style sheets that are included after the standard style sheets
# created by doxygen. Using this option one can overrule certain style aspects.
# This is preferred over using HTML_STYLESHEET since it does not replace the
# standard style sheet and is therefor more robust against future updates.
# Doxygen will copy the style sheet files to the output directory.
# Note: The order of the extra stylesheet files is of importance (e.g. the last
# stylesheet in the list overrules the setting of the previous ones in the
# list). For an example see the documentation.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_EXTRA_STYLESHEET =
# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
# other source files which should be copied to the HTML output directory. Note
# that these files will be copied to the base HTML output directory. Use the
# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
# files. In the HTML_STYLESHEET file, use the file name only. Also note that the
# files will be copied as-is; there are no commands or markers available.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_EXTRA_FILES =
# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen
# will adjust the colors in the stylesheet and background images according to
# this color. Hue is specified as an angle on a colorwheel, see
# http://en.wikipedia.org/wiki/Hue for more information. For instance the value
# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300
# purple, and 360 is red again.
# Minimum value: 0, maximum value: 359, default value: 220.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_COLORSTYLE_HUE = 220
# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors
# in the HTML output. For a value of 0 the output will use grayscales only. A
# value of 255 will produce the most vivid colors.
# Minimum value: 0, maximum value: 255, default value: 100.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_COLORSTYLE_SAT = 100
# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the
# luminance component of the colors in the HTML output. Values below 100
# gradually make the output lighter, whereas values above 100 make the output
# darker. The value divided by 100 is the actual gamma applied, so 80 represents
# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not
# change the gamma.
# Minimum value: 40, maximum value: 240, default value: 80.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_COLORSTYLE_GAMMA = 80
# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
# page will contain the date and time when the page was generated. Setting this
# to NO can help when comparing the output of multiple runs.
# The default value is: YES.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_TIMESTAMP = YES
# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
# documentation will contain sections that can be hidden and shown after the
# page has loaded.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_DYNAMIC_SECTIONS = NO
# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries
# shown in the various tree structured indices initially; the user can expand
# and collapse entries dynamically later on. Doxygen will expand the tree to
# such a level that at most the specified number of entries are visible (unless
# a fully collapsed tree already exceeds this amount). So setting the number of
# entries 1 will produce a full collapsed tree by default. 0 is a special value
# representing an infinite number of entries and will result in a full expanded
# tree by default.
# Minimum value: 0, maximum value: 9999, default value: 100.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_INDEX_NUM_ENTRIES = 100
# If the GENERATE_DOCSET tag is set to YES, additional index files will be
# generated that can be used as input for Apple's Xcode 3 integrated development
# environment (see: http://developer.apple.com/tools/xcode/), introduced with
# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a
# Makefile in the HTML output directory. Running make will produce the docset in
# that directory and running make install will install the docset in
# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at
# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
# for more information.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
GENERATE_DOCSET = NO
# This tag determines the name of the docset feed. A documentation feed provides
# an umbrella under which multiple documentation sets from a single provider
# (such as a company or product suite) can be grouped.
# The default value is: Doxygen generated docs.
# This tag requires that the tag GENERATE_DOCSET is set to YES.
DOCSET_FEEDNAME = "Doxygen generated docs"
# This tag specifies a string that should uniquely identify the documentation
# set bundle. This should be a reverse domain-name style string, e.g.
# com.mycompany.MyDocSet. Doxygen will append .docset to the name.
# The default value is: org.doxygen.Project.
# This tag requires that the tag GENERATE_DOCSET is set to YES.
DOCSET_BUNDLE_ID = org.doxygen.Project
# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify
# the documentation publisher. This should be a reverse domain-name style
# string, e.g. com.mycompany.MyDocSet.documentation.
# The default value is: org.doxygen.Publisher.
# This tag requires that the tag GENERATE_DOCSET is set to YES.
DOCSET_PUBLISHER_ID = org.doxygen.Publisher
# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher.
# The default value is: Publisher.
# This tag requires that the tag GENERATE_DOCSET is set to YES.
DOCSET_PUBLISHER_NAME = Publisher
# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three
# additional HTML index files: index.hhp, index.hhc, and index.hhk. The
# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop
# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on
# Windows.
#
# The HTML Help Workshop contains a compiler that can convert all HTML output
# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML
# files are now used as the Windows 98 help format, and will replace the old
# Windows help format (.hlp) on all Windows platforms in the future. Compressed
# HTML files also contain an index, a table of contents, and you can search for
# words in the documentation. The HTML workshop also contains a viewer for
# compressed HTML files.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
GENERATE_HTMLHELP = NO
# The CHM_FILE tag can be used to specify the file name of the resulting .chm
# file. You can add a path in front of the file if the result should not be
# written to the html output directory.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
CHM_FILE =
# The HHC_LOCATION tag can be used to specify the location (absolute path
# including file name) of the HTML help compiler ( hhc.exe). If non-empty
# doxygen will try to run the HTML help compiler on the generated index.hhp.
# The file has to be specified with full path.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
HHC_LOCATION =
# The GENERATE_CHI flag controls if a separate .chi index file is generated (
# YES) or that it should be included in the master .chm file ( NO).
# The default value is: NO.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
GENERATE_CHI = NO
# The CHM_INDEX_ENCODING is used to encode HtmlHelp index ( hhk), content ( hhc)
# and project file content.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
CHM_INDEX_ENCODING =
# The BINARY_TOC flag controls whether a binary table of contents is generated (
# YES) or a normal table of contents ( NO) in the .chm file. Furthermore it
# enables the Previous and Next buttons.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
BINARY_TOC = NO
# The TOC_EXPAND flag can be set to YES to add extra items for group members to
# the table of contents of the HTML help documentation and to the tree view.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
TOC_EXPAND = NO
# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that
# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help
# (.qch) of the generated HTML documentation.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
GENERATE_QHP = NO
# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify
# the file name of the resulting .qch file. The path specified is relative to
# the HTML output folder.
# This tag requires that the tag GENERATE_QHP is set to YES.
QCH_FILE =
# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help
# Project output. For more information please see Qt Help Project / Namespace
# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace).
# The default value is: org.doxygen.Project.
# This tag requires that the tag GENERATE_QHP is set to YES.
QHP_NAMESPACE = org.doxygen.Project
# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt
# Help Project output. For more information please see Qt Help Project / Virtual
# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual-
# folders).
# The default value is: doc.
# This tag requires that the tag GENERATE_QHP is set to YES.
QHP_VIRTUAL_FOLDER = doc
# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom
# filter to add. For more information please see Qt Help Project / Custom
# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
# filters).
# This tag requires that the tag GENERATE_QHP is set to YES.
QHP_CUST_FILTER_NAME =
# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the
# custom filter to add. For more information please see Qt Help Project / Custom
# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
# filters).
# This tag requires that the tag GENERATE_QHP is set to YES.
QHP_CUST_FILTER_ATTRS =
# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
# project's filter section matches. Qt Help Project / Filter Attributes (see:
# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes).
# This tag requires that the tag GENERATE_QHP is set to YES.
QHP_SECT_FILTER_ATTRS =
# The QHG_LOCATION tag can be used to specify the location of Qt's
# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the
# generated .qhp file.
# This tag requires that the tag GENERATE_QHP is set to YES.
QHG_LOCATION =
# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be
# generated, together with the HTML files, they form an Eclipse help plugin. To
# install this plugin and make it available under the help contents menu in
# Eclipse, the contents of the directory containing the HTML and XML files needs
# to be copied into the plugins directory of eclipse. The name of the directory
# within the plugins directory should be the same as the ECLIPSE_DOC_ID value.
# After copying Eclipse needs to be restarted before the help appears.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
GENERATE_ECLIPSEHELP = NO
# A unique identifier for the Eclipse help plugin. When installing the plugin
# the directory name containing the HTML and XML files should also have this
# name. Each documentation set should have its own identifier.
# The default value is: org.doxygen.Project.
# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES.
ECLIPSE_DOC_ID = org.doxygen.Project
# If you want full control over the layout of the generated HTML pages it might
# be necessary to disable the index and replace it with your own. The
# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top
# of each HTML page. A value of NO enables the index and the value YES disables
# it. Since the tabs in the index contain the same information as the navigation
# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
DISABLE_INDEX = NO
# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
# structure should be generated to display hierarchical information. If the tag
# value is set to YES, a side panel will be generated containing a tree-like
# index structure (just like the one that is generated for HTML Help). For this
# to work a browser that supports JavaScript, DHTML, CSS and frames is required
# (i.e. any modern browser). Windows users are probably better off using the
# HTML help feature. Via custom stylesheets (see HTML_EXTRA_STYLESHEET) one can
# further fine-tune the look of the index. As an example, the default style
# sheet generated by doxygen has an example that shows how to put an image at
# the root of the tree instead of the PROJECT_NAME. Since the tree basically has
# the same information as the tab index, you could consider setting
# DISABLE_INDEX to YES when enabling this option.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
GENERATE_TREEVIEW = NO
# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that
# doxygen will group on one line in the generated HTML documentation.
#
# Note that a value of 0 will completely suppress the enum values from appearing
# in the overview section.
# Minimum value: 0, maximum value: 20, default value: 4.
# This tag requires that the tag GENERATE_HTML is set to YES.
ENUM_VALUES_PER_LINE = 4
# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used
# to set the initial width (in pixels) of the frame in which the tree is shown.
# Minimum value: 0, maximum value: 1500, default value: 250.
# This tag requires that the tag GENERATE_HTML is set to YES.
TREEVIEW_WIDTH = 250
# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open links to
# external symbols imported via tag files in a separate window.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
EXT_LINKS_IN_WINDOW = NO
# Use this tag to change the font size of LaTeX formulas included as images in
# the HTML documentation. When you change the font size after a successful
# doxygen run you need to manually remove any form_*.png images from the HTML
# output directory to force them to be regenerated.
# Minimum value: 8, maximum value: 50, default value: 10.
# This tag requires that the tag GENERATE_HTML is set to YES.
FORMULA_FONTSIZE = 10
# Use the FORMULA_TRANPARENT tag to determine whether or not the images
# generated for formulas are transparent PNGs. Transparent PNGs are not
# supported properly for IE 6.0, but are supported on all modern browsers.
#
# Note that when changing this option you need to delete any form_*.png files in
# the HTML output directory before the changes have effect.
# The default value is: YES.
# This tag requires that the tag GENERATE_HTML is set to YES.
FORMULA_TRANSPARENT = YES
# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see
# http://www.mathjax.org) which uses client side Javascript for the rendering
# instead of using prerendered bitmaps. Use this if you do not have LaTeX
# installed or if you want to formulas look prettier in the HTML output. When
# enabled you may also need to install MathJax separately and configure the path
# to it using the MATHJAX_RELPATH option.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
USE_MATHJAX = NO
# When MathJax is enabled you can set the default output format to be used for
# the MathJax output. See the MathJax site (see:
# http://docs.mathjax.org/en/latest/output.html) for more details.
# Possible values are: HTML-CSS (which is slower, but has the best
# compatibility), NativeMML (i.e. MathML) and SVG.
# The default value is: HTML-CSS.
# This tag requires that the tag USE_MATHJAX is set to YES.
MATHJAX_FORMAT = HTML-CSS
# When MathJax is enabled you need to specify the location relative to the HTML
# output directory using the MATHJAX_RELPATH option. The destination directory
# should contain the MathJax.js script. For instance, if the mathjax directory
# is located at the same level as the HTML output directory, then
# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax
# Content Delivery Network so you can quickly see the result without installing
# MathJax. However, it is strongly recommended to install a local copy of
# MathJax from http://www.mathjax.org before deployment.
# The default value is: http://cdn.mathjax.org/mathjax/latest.
# This tag requires that the tag USE_MATHJAX is set to YES.
MATHJAX_RELPATH = http://www.mathjax.org/mathjax
# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax
# extension names that should be enabled during MathJax rendering. For example
# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols
# This tag requires that the tag USE_MATHJAX is set to YES.
MATHJAX_EXTENSIONS =
# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces
# of code that will be used on startup of the MathJax code. See the MathJax site
# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an
# example see the documentation.
# This tag requires that the tag USE_MATHJAX is set to YES.
MATHJAX_CODEFILE =
# When the SEARCHENGINE tag is enabled doxygen will generate a search box for
# the HTML output. The underlying search engine uses javascript and DHTML and
# should work on any modern browser. Note that when using HTML help
# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET)
# there is already a search function so this one should typically be disabled.
# For large projects the javascript based search engine can be slow, then
# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to
# search using the keyboard; to jump to the search box use <access key> + S
# (what the <access key> is depends on the OS and browser, but it is typically
# <CTRL>, <ALT>/<option>, or both). Inside the search box use the <cursor down
# key> to jump into the search results window, the results can be navigated
# using the <cursor keys>. Press <Enter> to select an item or <escape> to cancel
# the search. The filter options can be selected when the cursor is inside the
# search box by pressing <Shift>+<cursor down>. Also here use the <cursor keys>
# to select a filter and <Enter> or <escape> to activate or cancel the filter
# option.
# The default value is: YES.
# This tag requires that the tag GENERATE_HTML is set to YES.
SEARCHENGINE = YES
# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
# implemented using a web server instead of a web client using Javascript. There
# are two flavors of web server based searching depending on the EXTERNAL_SEARCH
# setting. When disabled, doxygen will generate a PHP script for searching and
# an index file used by the script. When EXTERNAL_SEARCH is enabled the indexing
# and searching needs to be provided by external tools. See the section
# "External Indexing and Searching" for details.
# The default value is: NO.
# This tag requires that the tag SEARCHENGINE is set to YES.
SERVER_BASED_SEARCH = NO
# When EXTERNAL_SEARCH tag is enabled doxygen will no longer generate the PHP
# script for searching. Instead the search results are written to an XML file
# which needs to be processed by an external indexer. Doxygen will invoke an
# external search engine pointed to by the SEARCHENGINE_URL option to obtain the
# search results.
#
# Doxygen ships with an example indexer ( doxyindexer) and search engine
# (doxysearch.cgi) which are based on the open source search engine library
# Xapian (see: http://xapian.org/).
#
# See the section "External Indexing and Searching" for details.
# The default value is: NO.
# This tag requires that the tag SEARCHENGINE is set to YES.
EXTERNAL_SEARCH = NO
# The SEARCHENGINE_URL should point to a search engine hosted by a web server
# which will return the search results when EXTERNAL_SEARCH is enabled.
#
# Doxygen ships with an example indexer ( doxyindexer) and search engine
# (doxysearch.cgi) which are based on the open source search engine library
# Xapian (see: http://xapian.org/). See the section "External Indexing and
# Searching" for details.
# This tag requires that the tag SEARCHENGINE is set to YES.
SEARCHENGINE_URL =
# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed
# search data is written to a file for indexing by an external tool. With the
# SEARCHDATA_FILE tag the name of this file can be specified.
# The default file is: searchdata.xml.
# This tag requires that the tag SEARCHENGINE is set to YES.
SEARCHDATA_FILE = searchdata.xml
# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the
# EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is
# useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple
# projects and redirect the results back to the right project.
# This tag requires that the tag SEARCHENGINE is set to YES.
EXTERNAL_SEARCH_ID =
# The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen
# projects other than the one defined by this configuration file, but that are
# all added to the same external search index. Each project needs to have a
# unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id of
# to a relative location where the documentation can be found. The format is:
# EXTRA_SEARCH_MAPPINGS = tagname1=loc1 tagname2=loc2 ...
# This tag requires that the tag SEARCHENGINE is set to YES.
EXTRA_SEARCH_MAPPINGS =
#---------------------------------------------------------------------------
# Configuration options related to the LaTeX output
#---------------------------------------------------------------------------
# If the GENERATE_LATEX tag is set to YES doxygen will generate LaTeX output.
# The default value is: YES.
GENERATE_LATEX = YES
# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. If a
# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
# it.
# The default directory is: latex.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_OUTPUT = latex
# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
# invoked.
#
# Note that when enabling USE_PDFLATEX this option is only used for generating
# bitmaps for formulas in the HTML output, but not in the Makefile that is
# written to the output directory.
# The default file is: latex.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_CMD_NAME = latex
# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to generate
# index for LaTeX.
# The default file is: makeindex.
# This tag requires that the tag GENERATE_LATEX is set to YES.
MAKEINDEX_CMD_NAME = makeindex
# If the COMPACT_LATEX tag is set to YES doxygen generates more compact LaTeX
# documents. This may be useful for small projects and may help to save some
# trees in general.
# The default value is: NO.
# This tag requires that the tag GENERATE_LATEX is set to YES.
COMPACT_LATEX = NO
# The PAPER_TYPE tag can be used to set the paper type that is used by the
# printer.
# Possible values are: a4 (210 x 297 mm), letter (8.5 x 11 inches), legal (8.5 x
# 14 inches) and executive (7.25 x 10.5 inches).
# The default value is: a4.
# This tag requires that the tag GENERATE_LATEX is set to YES.
PAPER_TYPE = a4
# The EXTRA_PACKAGES tag can be used to specify one or more LaTeX package names
# that should be included in the LaTeX output. To get the times font for
# instance you can specify
# EXTRA_PACKAGES=times
# If left blank no extra packages will be included.
# This tag requires that the tag GENERATE_LATEX is set to YES.
EXTRA_PACKAGES = amsmath \
amsfonts \
xr
# The LATEX_HEADER tag can be used to specify a personal LaTeX header for the
# generated LaTeX document. The header should contain everything until the first
# chapter. If it is left blank doxygen will generate a standard header. See
# section "Doxygen usage" for information on how to let doxygen write the
# default header to a separate file.
#
# Note: Only use a user-defined header if you know what you are doing! The
# following commands have a special meaning inside the header: $title,
# $datetime, $date, $doxygenversion, $projectname, $projectnumber,
# $projectbrief, $projectlogo. Doxygen will replace $title with the empy string,
# for the replacement values of the other commands the user is refered to
# HTML_HEADER.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_HEADER =
# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for the
# generated LaTeX document. The footer should contain everything after the last
# chapter. If it is left blank doxygen will generate a standard footer. See
# LATEX_HEADER for more information on how to generate a default footer and what
# special commands can be used inside the footer.
#
# Note: Only use a user-defined footer if you know what you are doing!
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_FOOTER =
# The LATEX_EXTRA_FILES tag can be used to specify one or more extra images or
# other source files which should be copied to the LATEX_OUTPUT output
# directory. Note that the files will be copied as-is; there are no commands or
# markers available.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_EXTRA_FILES =
# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated is
# prepared for conversion to PDF (using ps2pdf or pdflatex). The PDF file will
# contain links (just like the HTML output) instead of page references. This
# makes the output suitable for online browsing using a PDF viewer.
# The default value is: YES.
# This tag requires that the tag GENERATE_LATEX is set to YES.
PDF_HYPERLINKS = YES
# If the USE_PDFLATEX tag is set to YES, doxygen will use pdflatex to generate
# the PDF file directly from the LaTeX files. Set this option to YES to get a
# higher quality PDF documentation.
# The default value is: YES.
# This tag requires that the tag GENERATE_LATEX is set to YES.
USE_PDFLATEX = YES
# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \batchmode
# command to the generated LaTeX files. This will instruct LaTeX to keep running
# if errors occur, instead of asking the user for help. This option is also used
# when generating formulas in HTML.
# The default value is: NO.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_BATCHMODE = NO
# If the LATEX_HIDE_INDICES tag is set to YES then doxygen will not include the
# index chapters (such as File Index, Compound Index, etc.) in the output.
# The default value is: NO.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_HIDE_INDICES = NO
# If the LATEX_SOURCE_CODE tag is set to YES then doxygen will include source
# code with syntax highlighting in the LaTeX output.
#
# Note that which sources are shown also depends on other settings such as
# SOURCE_BROWSER.
# The default value is: NO.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_SOURCE_CODE = NO
# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
# bibliography, e.g. plainnat, or ieeetr. See
# http://en.wikipedia.org/wiki/BibTeX and \cite for more info.
# The default value is: plain.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_BIB_STYLE = plain
#---------------------------------------------------------------------------
# Configuration options related to the RTF output
#---------------------------------------------------------------------------
# If the GENERATE_RTF tag is set to YES doxygen will generate RTF output. The
# RTF output is optimized for Word 97 and may not look too pretty with other RTF
# readers/editors.
# The default value is: NO.
GENERATE_RTF = NO
# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. If a
# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
# it.
# The default directory is: rtf.
# This tag requires that the tag GENERATE_RTF is set to YES.
RTF_OUTPUT = rtf
# If the COMPACT_RTF tag is set to YES doxygen generates more compact RTF
# documents. This may be useful for small projects and may help to save some
# trees in general.
# The default value is: NO.
# This tag requires that the tag GENERATE_RTF is set to YES.
COMPACT_RTF = NO
# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated will
# contain hyperlink fields. The RTF file will contain links (just like the HTML
# output) instead of page references. This makes the output suitable for online
# browsing using Word or some other Word compatible readers that support those
# fields.
#
# Note: WordPad (write) and others do not support links.
# The default value is: NO.
# This tag requires that the tag GENERATE_RTF is set to YES.
RTF_HYPERLINKS = NO
# Load stylesheet definitions from file. Syntax is similar to doxygen's config
# file, i.e. a series of assignments. You only have to provide replacements,
# missing definitions are set to their default value.
#
# See also section "Doxygen usage" for information on how to generate the
# default style sheet that doxygen normally uses.
# This tag requires that the tag GENERATE_RTF is set to YES.
RTF_STYLESHEET_FILE =
# Set optional variables used in the generation of an RTF document. Syntax is
# similar to doxygen's config file. A template extensions file can be generated
# using doxygen -e rtf extensionFile.
# This tag requires that the tag GENERATE_RTF is set to YES.
RTF_EXTENSIONS_FILE =
#---------------------------------------------------------------------------
# Configuration options related to the man page output
#---------------------------------------------------------------------------
# If the GENERATE_MAN tag is set to YES doxygen will generate man pages for
# classes and files.
# The default value is: NO.
GENERATE_MAN = NO
# The MAN_OUTPUT tag is used to specify where the man pages will be put. If a
# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
# it. A directory man3 will be created inside the directory specified by
# MAN_OUTPUT.
# The default directory is: man.
# This tag requires that the tag GENERATE_MAN is set to YES.
MAN_OUTPUT = man
# The MAN_EXTENSION tag determines the extension that is added to the generated
# man pages. In case the manual section does not start with a number, the number
# 3 is prepended. The dot (.) at the beginning of the MAN_EXTENSION tag is
# optional.
# The default value is: .3.
# This tag requires that the tag GENERATE_MAN is set to YES.
MAN_EXTENSION = .3
# The MAN_SUBDIR tag determines the name of the directory created within
# MAN_OUTPUT in which the man pages are placed. If defaults to man followed by
# MAN_EXTENSION with the initial . removed.
# This tag requires that the tag GENERATE_MAN is set to YES.
MAN_SUBDIR =
# If the MAN_LINKS tag is set to YES and doxygen generates man output, then it
# will generate one additional man file for each entity documented in the real
# man page(s). These additional files only source the real man page, but without
# them the man command would be unable to find the correct page.
# The default value is: NO.
# This tag requires that the tag GENERATE_MAN is set to YES.
MAN_LINKS = NO
#---------------------------------------------------------------------------
# Configuration options related to the XML output
#---------------------------------------------------------------------------
# If the GENERATE_XML tag is set to YES doxygen will generate an XML file that
# captures the structure of the code including all documentation.
# The default value is: NO.
GENERATE_XML = NO
# The XML_OUTPUT tag is used to specify where the XML pages will be put. If a
# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
# it.
# The default directory is: xml.
# This tag requires that the tag GENERATE_XML is set to YES.
XML_OUTPUT = xml
# If the XML_PROGRAMLISTING tag is set to YES doxygen will dump the program
# listings (including syntax highlighting and cross-referencing information) to
# the XML output. Note that enabling this will significantly increase the size
# of the XML output.
# The default value is: YES.
# This tag requires that the tag GENERATE_XML is set to YES.
XML_PROGRAMLISTING = YES
#---------------------------------------------------------------------------
# Configuration options related to the DOCBOOK output
#---------------------------------------------------------------------------
# If the GENERATE_DOCBOOK tag is set to YES doxygen will generate Docbook files
# that can be used to generate PDF.
# The default value is: NO.
GENERATE_DOCBOOK = NO
# The DOCBOOK_OUTPUT tag is used to specify where the Docbook pages will be put.
# If a relative path is entered the value of OUTPUT_DIRECTORY will be put in
# front of it.
# The default directory is: docbook.
# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
DOCBOOK_OUTPUT = docbook
# If the DOCBOOK_PROGRAMLISTING tag is set to YES doxygen will include the
# program listings (including syntax highlighting and cross-referencing
# information) to the DOCBOOK output. Note that enabling this will significantly
# increase the size of the DOCBOOK output.
# The default value is: NO.
# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
DOCBOOK_PROGRAMLISTING = NO
#---------------------------------------------------------------------------
# Configuration options for the AutoGen Definitions output
#---------------------------------------------------------------------------
# If the GENERATE_AUTOGEN_DEF tag is set to YES doxygen will generate an AutoGen
# Definitions (see http://autogen.sf.net) file that captures the structure of
# the code including all documentation. Note that this feature is still
# experimental and incomplete at the moment.
# The default value is: NO.
GENERATE_AUTOGEN_DEF = NO
#---------------------------------------------------------------------------
# Configuration options related to the Perl module output
#---------------------------------------------------------------------------
# If the GENERATE_PERLMOD tag is set to YES doxygen will generate a Perl module
# file that captures the structure of the code including all documentation.
#
# Note that this feature is still experimental and incomplete at the moment.
# The default value is: NO.
GENERATE_PERLMOD = NO
# If the PERLMOD_LATEX tag is set to YES doxygen will generate the necessary
# Makefile rules, Perl scripts and LaTeX code to be able to generate PDF and DVI
# output from the Perl module output.
# The default value is: NO.
# This tag requires that the tag GENERATE_PERLMOD is set to YES.
PERLMOD_LATEX = NO
# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be nicely
# formatted so it can be parsed by a human reader. This is useful if you want to
# understand what is going on. On the other hand, if this tag is set to NO the
# size of the Perl module output will be much smaller and Perl will parse it
# just the same.
# The default value is: YES.
# This tag requires that the tag GENERATE_PERLMOD is set to YES.
PERLMOD_PRETTY = YES
# The names of the make variables in the generated doxyrules.make file are
# prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. This is useful
# so different doxyrules.make files included by the same Makefile don't
# overwrite each other's variables.
# This tag requires that the tag GENERATE_PERLMOD is set to YES.
PERLMOD_MAKEVAR_PREFIX =
#---------------------------------------------------------------------------
# Configuration options related to the preprocessor
#---------------------------------------------------------------------------
# If the ENABLE_PREPROCESSING tag is set to YES doxygen will evaluate all
# C-preprocessor directives found in the sources and include files.
# The default value is: YES.
ENABLE_PREPROCESSING = YES
# If the MACRO_EXPANSION tag is set to YES doxygen will expand all macro names
# in the source code. If set to NO only conditional compilation will be
# performed. Macro expansion can be done in a controlled way by setting
# EXPAND_ONLY_PREDEF to YES.
# The default value is: NO.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
MACRO_EXPANSION = NO
# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES then
# the macro expansion is limited to the macros specified with the PREDEFINED and
# EXPAND_AS_DEFINED tags.
# The default value is: NO.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
EXPAND_ONLY_PREDEF = NO
# If the SEARCH_INCLUDES tag is set to YES the includes files in the
# INCLUDE_PATH will be searched if a #include is found.
# The default value is: YES.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
SEARCH_INCLUDES = YES
# The INCLUDE_PATH tag can be used to specify one or more directories that
# contain include files that are not input files but should be processed by the
# preprocessor.
# This tag requires that the tag SEARCH_INCLUDES is set to YES.
INCLUDE_PATH =
# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
# patterns (like *.h and *.hpp) to filter out the header-files in the
# directories. If left blank, the patterns specified with FILE_PATTERNS will be
# used.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
INCLUDE_FILE_PATTERNS =
# The PREDEFINED tag can be used to specify one or more macro names that are
# defined before the preprocessor is started (similar to the -D option of e.g.
# gcc). The argument of the tag is a list of macros of the form: name or
# name=definition (no spaces). If the definition and the "=" are omitted, "=1"
# is assumed. To prevent a macro definition from being undefined via #undef or
# recursively expanded use the := operator instead of the = operator.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
PREDEFINED =
# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this
# tag can be used to specify a list of macro names that should be expanded. The
# macro definition that is found in the sources will be used. Use the PREDEFINED
# tag if you want to use a different macro definition that overrules the
# definition found in the source code.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
EXPAND_AS_DEFINED =
# If the SKIP_FUNCTION_MACROS tag is set to YES then doxygen's preprocessor will
# remove all references to function-like macros that are alone on a line, have
# an all uppercase name, and do not end with a semicolon. Such function macros
# are typically used for boiler-plate code, and will confuse the parser if not
# removed.
# The default value is: YES.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
SKIP_FUNCTION_MACROS = YES
#---------------------------------------------------------------------------
# Configuration options related to external references
#---------------------------------------------------------------------------
# The TAGFILES tag can be used to specify one or more tag files. For each tag
# file the location of the external documentation should be added. The format of
# a tag file without this location is as follows:
# TAGFILES = file1 file2 ...
# Adding location for the tag files is done as follows:
# TAGFILES = file1=loc1 "file2 = loc2" ...
# where loc1 and loc2 can be relative or absolute paths or URLs. See the
# section "Linking to external documentation" for more information about the use
# of tag files.
# Note: Each tag file must have a unique name (where the name does NOT include
# the path). If a tag file is not located in the directory in which doxygen is
# run, you must also specify the path to the tagfile here.
TAGFILES =
# When a file name is specified after GENERATE_TAGFILE, doxygen will create a
# tag file that is based on the input files it reads. See section "Linking to
# external documentation" for more information about the usage of tag files.
GENERATE_TAGFILE =
# If the ALLEXTERNALS tag is set to YES all external class will be listed in the
# class index. If set to NO only the inherited external classes will be listed.
# The default value is: NO.
ALLEXTERNALS = NO
# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed in
# the modules index. If set to NO, only the current project's groups will be
# listed.
# The default value is: YES.
EXTERNAL_GROUPS = YES
# If the EXTERNAL_PAGES tag is set to YES all external pages will be listed in
# the related pages index. If set to NO, only the current project's pages will
# be listed.
# The default value is: YES.
EXTERNAL_PAGES = YES
# The PERL_PATH should be the absolute path and name of the perl script
# interpreter (i.e. the result of 'which perl').
# The default file (with absolute path) is: /usr/bin/perl.
PERL_PATH = /usr/bin/perl
#---------------------------------------------------------------------------
# Configuration options related to the dot tool
#---------------------------------------------------------------------------
# If the CLASS_DIAGRAMS tag is set to YES doxygen will generate a class diagram
# (in HTML and LaTeX) for classes with base or super classes. Setting the tag to
# NO turns the diagrams off. Note that this option also works with HAVE_DOT
# disabled, but it is recommended to install and use dot, since it yields more
# powerful graphs.
# The default value is: YES.
CLASS_DIAGRAMS = YES
# You can define message sequence charts within doxygen comments using the \msc
# command. Doxygen will then run the mscgen tool (see:
# http://www.mcternan.me.uk/mscgen/)) to produce the chart and insert it in the
# documentation. The MSCGEN_PATH tag allows you to specify the directory where
# the mscgen tool resides. If left empty the tool is assumed to be found in the
# default search path.
MSCGEN_PATH =
# You can include diagrams made with dia in doxygen documentation. Doxygen will
# then run dia to produce the diagram and insert it in the documentation. The
# DIA_PATH tag allows you to specify the directory where the dia binary resides.
# If left empty dia is assumed to be found in the default search path.
DIA_PATH =
# If set to YES, the inheritance and collaboration graphs will hide inheritance
# and usage relations if the target is undocumented or is not a class.
# The default value is: YES.
HIDE_UNDOC_RELATIONS = YES
# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
# available from the path. This tool is part of Graphviz (see:
# http://www.graphviz.org/), a graph visualization toolkit from AT&T and Lucent
# Bell Labs. The other options in this section have no effect if this option is
# set to NO
# The default value is: NO.
HAVE_DOT = NO
# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is allowed
# to run in parallel. When set to 0 doxygen will base this on the number of
# processors available in the system. You can set it explicitly to a value
# larger than 0 to get control over the balance between CPU load and processing
# speed.
# Minimum value: 0, maximum value: 32, default value: 0.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_NUM_THREADS = 0
# When you want a differently looking font in the dot files that doxygen
# generates you can specify the font name using DOT_FONTNAME. You need to make
# sure dot is able to find the font, which can be done by putting it in a
# standard location or by setting the DOTFONTPATH environment variable or by
# setting DOT_FONTPATH to the directory containing the font.
# The default value is: Helvetica.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_FONTNAME = Helvetica
# The DOT_FONTSIZE tag can be used to set the size (in points) of the font of
# dot graphs.
# Minimum value: 4, maximum value: 24, default value: 10.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_FONTSIZE = 10
# By default doxygen will tell dot to use the default font as specified with
# DOT_FONTNAME. If you specify a different font using DOT_FONTNAME you can set
# the path where dot can find it using this tag.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_FONTPATH =
# If the CLASS_GRAPH tag is set to YES then doxygen will generate a graph for
# each documented class showing the direct and indirect inheritance relations.
# Setting this tag to YES will force the CLASS_DIAGRAMS tag to NO.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
CLASS_GRAPH = YES
# If the COLLABORATION_GRAPH tag is set to YES then doxygen will generate a
# graph for each documented class showing the direct and indirect implementation
# dependencies (inheritance, containment, and class references variables) of the
# class with other documented classes.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
COLLABORATION_GRAPH = YES
# If the GROUP_GRAPHS tag is set to YES then doxygen will generate a graph for
# groups, showing the direct groups dependencies.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
GROUP_GRAPHS = YES
# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
# collaboration diagrams in a style similar to the OMG's Unified Modeling
# Language.
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
UML_LOOK = NO
# If the UML_LOOK tag is enabled, the fields and methods are shown inside the
# class node. If there are many fields or methods and many nodes the graph may
# become too big to be useful. The UML_LIMIT_NUM_FIELDS threshold limits the
# number of items for each type to make the size more manageable. Set this to 0
# for no limit. Note that the threshold may be exceeded by 50% before the limit
# is enforced. So when you set the threshold to 10, up to 15 fields may appear,
# but if the number exceeds 15, the total amount of fields shown is limited to
# 10.
# Minimum value: 0, maximum value: 100, default value: 10.
# This tag requires that the tag HAVE_DOT is set to YES.
UML_LIMIT_NUM_FIELDS = 10
# If the TEMPLATE_RELATIONS tag is set to YES then the inheritance and
# collaboration graphs will show the relations between templates and their
# instances.
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
TEMPLATE_RELATIONS = NO
# If the INCLUDE_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are set to
# YES then doxygen will generate a graph for each documented file showing the
# direct and indirect include dependencies of the file with other documented
# files.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
INCLUDE_GRAPH = YES
# If the INCLUDED_BY_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are
# set to YES then doxygen will generate a graph for each documented file showing
# the direct and indirect include dependencies of the file with other documented
# files.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
INCLUDED_BY_GRAPH = YES
# If the CALL_GRAPH tag is set to YES then doxygen will generate a call
# dependency graph for every global function or class method.
#
# Note that enabling this option will significantly increase the time of a run.
# So in most cases it will be better to enable call graphs for selected
# functions only using the \callgraph command.
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
CALL_GRAPH = NO
# If the CALLER_GRAPH tag is set to YES then doxygen will generate a caller
# dependency graph for every global function or class method.
#
# Note that enabling this option will significantly increase the time of a run.
# So in most cases it will be better to enable caller graphs for selected
# functions only using the \callergraph command.
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
CALLER_GRAPH = NO
# If the GRAPHICAL_HIERARCHY tag is set to YES then doxygen will graphical
# hierarchy of all classes instead of a textual one.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
GRAPHICAL_HIERARCHY = YES
# If the DIRECTORY_GRAPH tag is set to YES then doxygen will show the
# dependencies a directory has on other directories in a graphical way. The
# dependency relations are determined by the #include relations between the
# files in the directories.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
DIRECTORY_GRAPH = YES
# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
# generated by dot.
# Note: If you choose svg you need to set HTML_FILE_EXTENSION to xhtml in order
# to make the SVG files visible in IE 9+ (other browsers do not have this
# requirement).
# Possible values are: png, jpg, gif and svg.
# The default value is: png.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_IMAGE_FORMAT = png
# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to
# enable generation of interactive SVG images that allow zooming and panning.
#
# Note that this requires a modern browser other than Internet Explorer. Tested
# and working are Firefox, Chrome, Safari, and Opera.
# Note: For IE 9+ you need to set HTML_FILE_EXTENSION to xhtml in order to make
# the SVG files visible. Older versions of IE do not have SVG support.
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
INTERACTIVE_SVG = NO
# The DOT_PATH tag can be used to specify the path where the dot tool can be
# found. If left blank, it is assumed the dot tool can be found in the path.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_PATH =
# The DOTFILE_DIRS tag can be used to specify one or more directories that
# contain dot files that are included in the documentation (see the \dotfile
# command).
# This tag requires that the tag HAVE_DOT is set to YES.
DOTFILE_DIRS =
# The MSCFILE_DIRS tag can be used to specify one or more directories that
# contain msc files that are included in the documentation (see the \mscfile
# command).
MSCFILE_DIRS =
# The DIAFILE_DIRS tag can be used to specify one or more directories that
# contain dia files that are included in the documentation (see the \diafile
# command).
DIAFILE_DIRS =
# When using plantuml, the PLANTUML_JAR_PATH tag should be used to specify the
# path where java can find the plantuml.jar file. If left blank, it is assumed
# PlantUML is not used or called during a preprocessing step. Doxygen will
# generate a warning when it encounters a \startuml command in this case and
# will not generate output for the diagram.
# This tag requires that the tag HAVE_DOT is set to YES.
PLANTUML_JAR_PATH =
# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of nodes
# that will be shown in the graph. If the number of nodes in a graph becomes
# larger than this value, doxygen will truncate the graph, which is visualized
# by representing a node as a red box. Note that doxygen if the number of direct
# children of the root node in a graph is already larger than
# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note that
# the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
# Minimum value: 0, maximum value: 10000, default value: 50.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_GRAPH_MAX_NODES = 50
# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the graphs
# generated by dot. A depth value of 3 means that only nodes reachable from the
# root by following a path via at most 3 edges will be shown. Nodes that lay
# further from the root node will be omitted. Note that setting this option to 1
# or 2 may greatly reduce the computation time needed for large code bases. Also
# note that the size of a graph can be further restricted by
# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
# Minimum value: 0, maximum value: 1000, default value: 0.
# This tag requires that the tag HAVE_DOT is set to YES.
MAX_DOT_GRAPH_DEPTH = 0
# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
# background. This is disabled by default, because dot on Windows does not seem
# to support this out of the box.
#
# Warning: Depending on the platform used, enabling this option may lead to
# badly anti-aliased labels on the edges of a graph (i.e. they become hard to
# read).
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_TRANSPARENT = NO
# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
# files in one run (i.e. multiple -o and -T options on the command line). This
# makes dot run faster, but since only newer versions of dot (>1.8.10) support
# this, this feature is disabled by default.
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_MULTI_TARGETS = YES
# If the GENERATE_LEGEND tag is set to YES doxygen will generate a legend page
# explaining the meaning of the various boxes and arrows in the dot generated
# graphs.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
GENERATE_LEGEND = YES
# If the DOT_CLEANUP tag is set to YES doxygen will remove the intermediate dot
# files that are used to generate the various graphs.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_CLEANUP = YES
================================================
FILE: caffe-fpn/.travis.yml
================================================
# Use a build matrix to do two builds in parallel:
# one using CMake, and one using make.
env:
matrix:
- WITH_CUDA=false WITH_CMAKE=false WITH_IO=true
- WITH_CUDA=false WITH_CMAKE=true WITH_IO=true PYTHON_VERSION=3
- WITH_CUDA=true WITH_CMAKE=false WITH_IO=true
- WITH_CUDA=true WITH_CMAKE=true WITH_IO=true
- WITH_CUDA=false WITH_CMAKE=false WITH_IO=false
- WITH_CUDA=false WITH_CMAKE=true WITH_IO=false PYTHON_VERSION=3
language: cpp
# Cache Ubuntu apt packages.
cache:
apt: true
directories:
- /home/travis/miniconda
- /home/travis/miniconda2
- /home/travis/miniconda3
compiler: gcc
before_install:
- export NUM_THREADS=4
- export SCRIPTS=./scripts/travis
- export CONDA_DIR="/home/travis/miniconda$PYTHON_VERSION"
install:
- sudo -E $SCRIPTS/travis_install.sh
before_script:
- export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib:/usr/local/cuda/lib64:$CONDA_DIR/lib
- export PATH=$CONDA_DIR/bin:$PATH
- if ! $WITH_CMAKE; then $SCRIPTS/travis_setup_makefile_config.sh; fi
script: $SCRIPTS/travis_build_and_test.sh
notifications:
# Emails are sent to the committer's git-configured email address by default,
# but only if they have access to the repository. To enable Travis on your
# public fork of Caffe, just go to travis-ci.org and flip the switch on for
# your Caffe fork. To configure your git email address, use:
# git config --global user.email me@example.com
email:
on_success: always
on_failure: always
# IRC notifications disabled by default.
# Uncomment next 5 lines to send notifications to chat.freenode.net#caffe
# irc:
# channels:
# - "chat.freenode.net#caffe"
# template:
# - "%{repository}/%{branch} (%{commit} - %{author}): %{message}"
================================================
FILE: caffe-fpn/CMakeLists.txt
================================================
cmake_minimum_required(VERSION 2.8.7)
if(POLICY CMP0046)
cmake_policy(SET CMP0046 NEW)
endif()
if(POLICY CMP0054)
cmake_policy(SET CMP0054 NEW)
endif()
# ---[ Caffe project
project(Caffe C CXX)
# ---[ Caffe version
set(CAFFE_TARGET_VERSION "1.0.0-rc3")
set(CAFFE_TARGET_SOVERSION "1.0.0-rc3")
add_definitions(-DCAFFE_VERSION=${CAFFE_TARGET_VERSION})
# ---[ Using cmake scripts and modules
list(APPEND CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake/Modules)
include(ExternalProject)
include(cmake/Utils.cmake)
include(cmake/Targets.cmake)
include(cmake/Misc.cmake)
include(cmake/Summary.cmake)
include(cmake/ConfigGen.cmake)
# ---[ Options
caffe_option(CPU_ONLY "Build Caffe without CUDA support" OFF) # TODO: rename to USE_CUDA
caffe_option(USE_CUDNN "Build Caffe with cuDNN library support" ON IF NOT CPU_ONLY)
caffe_option(BUILD_SHARED_LIBS "Build shared libraries" ON)
caffe_option(BUILD_python "Build Python wrapper" ON)
set(python_version "2" CACHE STRING "Specify which Python version to use")
caffe_option(BUILD_matlab "Build Matlab wrapper" OFF IF UNIX OR APPLE)
caffe_option(BUILD_docs "Build documentation" ON IF UNIX OR APPLE)
caffe_option(BUILD_python_layer "Build the Caffe Python layer" ON)
caffe_option(USE_OPENCV "Build with OpenCV support" ON)
caffe_option(USE_LEVELDB "Build with levelDB" ON)
caffe_option(USE_LMDB "Build with lmdb" ON)
caffe_option(ALLOW_LMDB_NOLOCK "Allow MDB_NOLOCK when reading LMDB files (only if necessary)" OFF)
# ---[ Dependencies
include(cmake/Dependencies.cmake)
# ---[ Flags
if(UNIX OR APPLE)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC -Wall")
endif()
if(USE_libstdcpp)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libstdc++")
message("-- Warning: forcing libstdc++ (controlled by USE_libstdcpp option in cmake)")
endif()
add_definitions(-DGTEST_USE_OWN_TR1_TUPLE)
# ---[ Warnings
caffe_warnings_disable(CMAKE_CXX_FLAGS -Wno-sign-compare -Wno-uninitialized)
# ---[ Config generation
configure_file(cmake/Templates/caffe_config.h.in "${PROJECT_BINARY_DIR}/caffe_config.h")
# ---[ Includes
set(Caffe_INCLUDE_DIR ${PROJECT_SOURCE_DIR}/include)
include_directories(${Caffe_INCLUDE_DIR} ${PROJECT_BINARY_DIR})
include_directories(BEFORE src) # This is needed for gtest.
# ---[ Subdirectories
add_subdirectory(src/gtest)
add_subdirectory(src/caffe)
add_subdirectory(tools)
add_subdirectory(examples)
add_subdirectory(python)
add_subdirectory(matlab)
add_subdirectory(docs)
# ---[ Linter target
add_custom_target(lint COMMAND ${CMAKE_COMMAND} -P ${PROJECT_SOURCE_DIR}/cmake/lint.cmake)
# ---[ pytest target
if(BUILD_python)
add_custom_target(pytest COMMAND python${python_version} -m unittest discover -s caffe/test WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}/python )
add_dependencies(pytest pycaffe)
endif()
# ---[ Configuration summary
caffe_print_configuration_summary()
# ---[ Export configs generation
caffe_generate_export_configs()
================================================
FILE: caffe-fpn/CONTRIBUTING.md
================================================
# Contributing
## Issues
Specific Caffe design and development issues, bugs, and feature requests are maintained by GitHub Issues.
_Please do not post usage, installation, or modeling questions, or other requests for help to Issues._
Use the [caffe-users list](https://groups.google.com/forum/#!forum/caffe-users) instead. This helps developers maintain a clear, uncluttered, and efficient view of the state of Caffe.
When reporting a bug, it's most helpful to provide the following information, where applicable:
* What steps reproduce the bug?
* Can you reproduce the bug using the latest [master](https://github.com/BVLC/caffe/tree/master), compiled with the `DEBUG` make option?
* What hardware and operating system/distribution are you running?
* If the bug is a crash, provide the backtrace (usually printed by Caffe; always obtainable with `gdb`).
Try to give your issue a title that is succinct and specific. The devs will rename issues as needed to keep track of them.
## Pull Requests
Caffe welcomes all contributions.
See the [contributing guide](http://caffe.berkeleyvision.org/development.html) for details.
Briefly: read commit by commit, a PR should tell a clean, compelling story of _one_ improvement to Caffe. In particular:
* A PR should do one clear thing that obviously improves Caffe, and nothing more. Making many smaller PRs is better than making one large PR; review effort is superlinear in the amount of code involved.
* Similarly, each commit should be a small, atomic change representing one step in development. PRs should be made of many commits where appropriate.
* Please do rewrite PR history to be clean rather than chronological. Within-PR bugfixes, style cleanups, reversions, etc. should be squashed and should not appear in merged PR history.
* Anything nonobvious from the code should be explained in comments, commit messages, or the PR description, as appropriate.
================================================
FILE: caffe-fpn/CONTRIBUTORS.md
================================================
# Contributors
Caffe is developed by a core set of BVLC members and the open-source community.
We thank all of our [contributors](https://github.com/BVLC/caffe/graphs/contributors)!
**For the detailed history of contributions** of a given file, try
git blame file
to see line-by-line credits and
git log --follow file
to see the change log even across renames and rewrites.
Please refer to the [acknowledgements](http://caffe.berkeleyvision.org/#acknowledgements) on the Caffe site for further details.
**Copyright** is held by the original contributor according to the versioning history; see LICENSE.
================================================
FILE: caffe-fpn/INSTALL.md
================================================
# Installation
See http://caffe.berkeleyvision.org/installation.html for the latest
installation instructions.
Check the users group in case you need help:
https://groups.google.com/forum/#!forum/caffe-users
================================================
FILE: caffe-fpn/LICENSE
================================================
--------------------------START OF THIRD PARTY NOTICE--------------------------
Microsoft licenses this Third Party IP to you under the licensing
terms for the Microsoft product. Microsoft reserves all other rights
not expressly granted under this agreement, whether by implication,
estoppel or otherwise.
Caffe
Copyrights can be found here: https://github.com/BVLC/caffe/blob/master/LICENSE
Provided for Informational Purposes Only
BSD License
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
*AS IS* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
---------------------------END OF THIRD PARTY NOTICE---------------------------
Fast R-CNN
Copyright (c) Microsoft Corporation
All rights reserved.
MIT License
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
================================================
FILE: caffe-fpn/Makefile
================================================
PROJECT := caffe
CONFIG_FILE := Makefile.config
# Explicitly check for the config file, otherwise make -k will proceed anyway.
ifeq ($(wildcard $(CONFIG_FILE)),)
$(error $(CONFIG_FILE) not found. See $(CONFIG_FILE).example.)
endif
include $(CONFIG_FILE)
BUILD_DIR_LINK := $(BUILD_DIR)
ifeq ($(RELEASE_BUILD_DIR),)
RELEASE_BUILD_DIR := .$(BUILD_DIR)_release
endif
ifeq ($(DEBUG_BUILD_DIR),)
DEBUG_BUILD_DIR := .$(BUILD_DIR)_debug
endif
DEBUG ?= 0
ifeq ($(DEBUG), 1)
BUILD_DIR := $(DEBUG_BUILD_DIR)
OTHER_BUILD_DIR := $(RELEASE_BUILD_DIR)
else
BUILD_DIR := $(RELEASE_BUILD_DIR)
OTHER_BUILD_DIR := $(DEBUG_BUILD_DIR)
endif
# All of the directories containing code.
SRC_DIRS := $(shell find * -type d -exec bash -c "find {} -maxdepth 1 \
\( -name '*.cpp' -o -name '*.proto' \) | grep -q ." \; -print)
# The target shared library name
LIBRARY_NAME := $(PROJECT)
LIB_BUILD_DIR := $(BUILD_DIR)/lib
STATIC_NAME := $(LIB_BUILD_DIR)/lib$(LIBRARY_NAME).a
DYNAMIC_VERSION_MAJOR := 1
DYNAMIC_VERSION_MINOR := 0
DYNAMIC_VERSION_REVISION := 0-rc3
DYNAMIC_NAME_SHORT := lib$(LIBRARY_NAME).so
#DYNAMIC_SONAME_SHORT := $(DYNAMIC_NAME_SHORT).$(DYNAMIC_VERSION_MAJOR)
DYNAMIC_VERSIONED_NAME_SHORT := $(DYNAMIC_NAME_SHORT).$(DYNAMIC_VERSION_MAJOR).$(DYNAMIC_VERSION_MINOR).$(DYNAMIC_VERSION_REVISION)
DYNAMIC_NAME := $(LIB_BUILD_DIR)/$(DYNAMIC_VERSIONED_NAME_SHORT)
COMMON_FLAGS += -DCAFFE_VERSION=$(DYNAMIC_VERSION_MAJOR).$(DYNAMIC_VERSION_MINOR).$(DYNAMIC_VERSION_REVISION)
##############################
# Get all source files
##############################
# CXX_SRCS are the source files excluding the test ones.
CXX_SRCS := $(shell find src/$(PROJECT) ! -name "test_*.cpp" -name "*.cpp")
# CU_SRCS are the cuda source files
CU_SRCS := $(shell find src/$(PROJECT) ! -name "test_*.cu" -name "*.cu")
# TEST_SRCS are the test source files
TEST_MAIN_SRC := src/$(PROJECT)/test/test_caffe_main.cpp
TEST_SRCS := $(shell find src/$(PROJECT) -name "test_*.cpp")
TEST_SRCS := $(filter-out $(TEST_MAIN_SRC), $(TEST_SRCS))
TEST_CU_SRCS := $(shell find src/$(PROJECT) -name "test_*.cu")
GTEST_SRC := src/gtest/gtest-all.cpp
# TOOL_SRCS are the source files for the tool binaries
TOOL_SRCS := $(shell find tools -name "*.cpp")
# EXAMPLE_SRCS are the source files for the example binaries
EXAMPLE_SRCS := $(shell find examples -name "*.cpp")
# BUILD_INCLUDE_DIR contains any generated header files we want to include.
BUILD_INCLUDE_DIR := $(BUILD_DIR)/src
# PROTO_SRCS are the protocol buffer definitions
PROTO_SRC_DIR := src/$(PROJECT)/proto
PROTO_SRCS := $(wildcard $(PROTO_SRC_DIR)/*.proto)
# PROTO_BUILD_DIR will contain the .cc and obj files generated from
# PROTO_SRCS; PROTO_BUILD_INCLUDE_DIR will contain the .h header files
PROTO_BUILD_DIR := $(BUILD_DIR)/$(PROTO_SRC_DIR)
PROTO_BUILD_INCLUDE_DIR := $(BUILD_INCLUDE_DIR)/$(PROJECT)/proto
# NONGEN_CXX_SRCS includes all source/header files except those generated
# automatically (e.g., by proto).
NONGEN_CXX_SRCS := $(shell find \
src/$(PROJECT) \
include/$(PROJECT) \
python/$(PROJECT) \
matlab/+$(PROJECT)/private \
examples \
tools \
-name "*.cpp" -or -name "*.hpp" -or -name "*.cu" -or -name "*.cuh")
LINT_SCRIPT := scripts/cpp_lint.py
LINT_OUTPUT_DIR := $(BUILD_DIR)/.lint
LINT_EXT := lint.txt
LINT_OUTPUTS := $(addsuffix .$(LINT_EXT), $(addprefix $(LINT_OUTPUT_DIR)/, $(NONGEN_CXX_SRCS)))
EMPTY_LINT_REPORT := $(BUILD_DIR)/.$(LINT_EXT)
NONEMPTY_LINT_REPORT := $(BUILD_DIR)/$(LINT_EXT)
# PY$(PROJECT)_SRC is the python wrapper for $(PROJECT)
PY$(PROJECT)_SRC := python/$(PROJECT)/_$(PROJECT).cpp
PY$(PROJECT)_SO := python/$(PROJECT)/_$(PROJECT).so
PY$(PROJECT)_HXX := include/$(PROJECT)/layers/python_layer.hpp
# MAT$(PROJECT)_SRC is the mex entrance point of matlab package for $(PROJECT)
MAT$(PROJECT)_SRC := matlab/+$(PROJECT)/private/$(PROJECT)_.cpp
ifneq ($(MATLAB_DIR),)
MAT_SO_EXT := $(shell $(MATLAB_DIR)/bin/mexext)
endif
MAT$(PROJECT)_SO := matlab/+$(PROJECT)/private/$(PROJECT)_.$(MAT_SO_EXT)
##############################
# Derive generated files
##############################
# The generated files for protocol buffers
PROTO_GEN_HEADER_SRCS := $(addprefix $(PROTO_BUILD_DIR)/, \
$(notdir ${PROTO_SRCS:.proto=.pb.h}))
PROTO_GEN_HEADER := $(addprefix $(PROTO_BUILD_INCLUDE_DIR)/, \
$(notdir ${PROTO_SRCS:.proto=.pb.h}))
PROTO_GEN_CC := $(addprefix $(BUILD_DIR)/, ${PROTO_SRCS:.proto=.pb.cc})
PY_PROTO_BUILD_DIR := python/$(PROJECT)/proto
PY_PROTO_INIT := python/$(PROJECT)/proto/__init__.py
PROTO_GEN_PY := $(foreach file,${PROTO_SRCS:.proto=_pb2.py}, \
$(PY_PROTO_BUILD_DIR)/$(notdir $(file)))
# The objects corresponding to the source files
# These objects will be linked into the final shared library, so we
# exclude the tool, example, and test objects.
CXX_OBJS := $(addprefix $(BUILD_DIR)/, ${CXX_SRCS:.cpp=.o})
CU_OBJS := $(addprefix $(BUILD_DIR)/cuda/, ${CU_SRCS:.cu=.o})
PROTO_OBJS := ${PROTO_GEN_CC:.cc=.o}
OBJS := $(PROTO_OBJS) $(CXX_OBJS) $(CU_OBJS)
# tool, example, and test objects
TOOL_OBJS := $(addprefix $(BUILD_DIR)/, ${TOOL_SRCS:.cpp=.o})
TOOL_BUILD_DIR := $(BUILD_DIR)/tools
TEST_CXX_BUILD_DIR := $(BUILD_DIR)/src/$(PROJECT)/test
TEST_CU_BUILD_DIR := $(BUILD_DIR)/cuda/src/$(PROJECT)/test
TEST_CXX_OBJS := $(addprefix $(BUILD_DIR)/, ${TEST_SRCS:.cpp=.o})
TEST_CU_OBJS := $(addprefix $(BUILD_DIR)/cuda/, ${TEST_CU_SRCS:.cu=.o})
TEST_OBJS := $(TEST_CXX_OBJS) $(TEST_CU_OBJS)
GTEST_OBJ := $(addprefix $(BUILD_DIR)/, ${GTEST_SRC:.cpp=.o})
EXAMPLE_OBJS := $(addprefix $(BUILD_DIR)/, ${EXAMPLE_SRCS:.cpp=.o})
# Output files for automatic dependency generation
DEPS := ${CXX_OBJS:.o=.d} ${CU_OBJS:.o=.d} ${TEST_CXX_OBJS:.o=.d} \
${TEST_CU_OBJS:.o=.d} $(BUILD_DIR)/${MAT$(PROJECT)_SO:.$(MAT_SO_EXT)=.d}
# tool, example, and test bins
TOOL_BINS := ${TOOL_OBJS:.o=.bin}
EXAMPLE_BINS := ${EXAMPLE_OBJS:.o=.bin}
# symlinks to tool bins without the ".bin" extension
TOOL_BIN_LINKS := ${TOOL_BINS:.bin=}
# Put the test binaries in build/test for convenience.
TEST_BIN_DIR := $(BUILD_DIR)/test
TEST_CU_BINS := $(addsuffix .testbin,$(addprefix $(TEST_BIN_DIR)/, \
$(foreach obj,$(TEST_CU_OBJS),$(basename $(notdir $(obj))))))
TEST_CXX_BINS := $(addsuffix .testbin,$(addprefix $(TEST_BIN_DIR)/, \
$(foreach obj,$(TEST_CXX_OBJS),$(basename $(notdir $(obj))))))
TEST_BINS := $(TEST_CXX_BINS) $(TEST_CU_BINS)
# TEST_ALL_BIN is the test binary that links caffe dynamically.
TEST_ALL_BIN := $(TEST_BIN_DIR)/test_all.testbin
##############################
# Derive compiler warning dump locations
##############################
WARNS_EXT := warnings.txt
CXX_WARNS := $(addprefix $(BUILD_DIR)/, ${CXX_SRCS:.cpp=.o.$(WARNS_EXT)})
CU_WARNS := $(addprefix $(BUILD_DIR)/cuda/, ${CU_SRCS:.cu=.o.$(WARNS_EXT)})
TOOL_WARNS := $(addprefix $(BUILD_DIR)/, ${TOOL_SRCS:.cpp=.o.$(WARNS_EXT)})
EXAMPLE_WARNS := $(addprefix $(BUILD_DIR)/, ${EXAMPLE_SRCS:.cpp=.o.$(WARNS_EXT)})
TEST_WARNS := $(addprefix $(BUILD_DIR)/, ${TEST_SRCS:.cpp=.o.$(WARNS_EXT)})
TEST_CU_WARNS := $(addprefix $(BUILD_DIR)/cuda/, ${TEST_CU_SRCS:.cu=.o.$(WARNS_EXT)})
ALL_CXX_WARNS := $(CXX_WARNS) $(TOOL_WARNS) $(EXAMPLE_WARNS) $(TEST_WARNS)
ALL_CU_WARNS := $(CU_WARNS) $(TEST_CU_WARNS)
ALL_WARNS := $(ALL_CXX_WARNS) $(ALL_CU_WARNS)
EMPTY_WARN_REPORT := $(BUILD_DIR)/.$(WARNS_EXT)
NONEMPTY_WARN_REPORT := $(BUILD_DIR)/$(WARNS_EXT)
##############################
# Derive include and lib directories
##############################
CUDA_INCLUDE_DIR := $(CUDA_DIR)/include
CUDA_LIB_DIR :=
# add <cuda>/lib64 only if it exists
ifneq ("$(wildcard $(CUDA_DIR)/lib64)","")
CUDA_LIB_DIR += $(CUDA_DIR)/lib64
endif
CUDA_LIB_DIR += $(CUDA_DIR)/lib
INCLUDE_DIRS += $(BUILD_INCLUDE_DIR) ./src ./include
ifneq ($(CPU_ONLY), 1)
INCLUDE_DIRS += $(CUDA_INCLUDE_DIR)
LIBRARY_DIRS += $(CUDA_LIB_DIR)
LIBRARIES := cudart cublas curand
endif
LIBRARIES += glog gflags protobuf boost_system boost_filesystem m hdf5_hl hdf5
# handle IO dependencies
USE_LEVELDB ?= 1
USE_LMDB ?= 1
USE_OPENCV ?= 1
ifeq ($(USE_LEVELDB), 1)
LIBRARIES += leveldb snappy
endif
ifeq ($(USE_LMDB), 1)
LIBRARIES += lmdb
endif
ifeq ($(USE_OPENCV), 1)
LIBRARIES += opencv_core opencv_highgui opencv_imgproc
ifeq ($(OPENCV_VERSION), 3)
LIBRARIES += opencv_imgcodecs
endif
endif
PYTHON_LIBRARIES ?= boost_python python2.7
WARNINGS := -Wall -Wno-sign-compare
##############################
# Set build directories
##############################
DISTRIBUTE_DIR ?= distribute
DISTRIBUTE_SUBDIRS := $(DISTRIBUTE_DIR)/bin $(DISTRIBUTE_DIR)/lib
DIST_ALIASES := dist
ifneq ($(strip $(DISTRIBUTE_DIR)),distribute)
DIST_ALIASES += distribute
endif
ALL_BUILD_DIRS := $(sort $(BUILD_DIR) $(addprefix $(BUILD_DIR)/, $(SRC_DIRS)) \
$(addprefix $(BUILD_DIR)/cuda/, $(SRC_DIRS)) \
$(LIB_BUILD_DIR) $(TEST_BIN_DIR) $(PY_PROTO_BUILD_DIR) $(LINT_OUTPUT_DIR) \
$(DISTRIBUTE_SUBDIRS) $(PROTO_BUILD_INCLUDE_DIR))
##############################
# Set directory for Doxygen-generated documentation
##############################
DOXYGEN_CONFIG_FILE ?= ./.Doxyfile
# should be the same as OUTPUT_DIRECTORY in the .Doxyfile
DOXYGEN_OUTPUT_DIR ?= ./doxygen
DOXYGEN_COMMAND ?= doxygen
# All the files that might have Doxygen documentation.
DOXYGEN_SOURCES := $(shell find \
src/$(PROJECT) \
include/$(PROJECT) \
python/ \
matlab/ \
examples \
tools \
-name "*.cpp" -or -name "*.hpp" -or -name "*.cu" -or -name "*.cuh" -or \
-name "*.py" -or -name "*.m")
DOXYGEN_SOURCES += $(DOXYGEN_CONFIG_FILE)
##############################
# Configure build
##############################
# Determine platform
UNAME := $(shell uname -s)
ifeq ($(UNAME), Linux)
LINUX := 1
else ifeq ($(UNAME), Darwin)
OSX := 1
endif
# Linux
ifeq ($(LINUX), 1)
CXX ?= /usr/bin/g++
GCCVERSION := $(shell $(CXX) -dumpversion | cut -f1,2 -d.)
# older versions of gcc are too dumb to build boost with -Wuninitalized
ifeq ($(shell echo | awk '{exit $(GCCVERSION) < 4.6;}'), 1)
WARNINGS += -Wno-uninitialized
endif
# boost::thread is reasonably called boost_thread (compare OS X)
# We will also explicitly add stdc++ to the link target.
LIBRARIES += boost_thread stdc++
VERSIONFLAGS += -Wl,-soname,$(DYNAMIC_VERSIONED_NAME_SHORT) -Wl,-rpath,$(ORIGIN)/../lib
endif
# OS X:
# clang++ instead of g++
# libstdc++ for NVCC compatibility on OS X >= 10.9 with CUDA < 7.0
ifeq ($(OSX), 1)
CXX := /usr/bin/clang++
ifneq ($(CPU_ONLY), 1)
CUDA_VERSION := $(shell $(CUDA_DIR)/bin/nvcc -V | grep -o 'release \d' | grep -o '\d')
ifeq ($(shell echo | awk '{exit $(CUDA_VERSION) < 7.0;}'), 1)
CXXFLAGS += -stdlib=libstdc++
LINKFLAGS += -stdlib=libstdc++
endif
# clang throws this warning for cuda headers
WARNINGS += -Wno-unneeded-internal-declaration
endif
# gtest needs to use its own tuple to not conflict with clang
COMMON_FLAGS += -DGTEST_USE_OWN_TR1_TUPLE=1
# boost::thread is called boost_thread-mt to mark multithreading on OS X
LIBRARIES += boost_thread-mt
# we need to explicitly ask for the rpath to be obeyed
DYNAMIC_FLAGS := -install_name @rpath/libcaffe.so
ORIGIN := @loader_path
VERSIONFLAGS += -Wl,-install_name,$(DYNAMIC_VERSIONED_NAME_SHORT) -Wl,-rpath,$(ORIGIN)/../../build/lib
else
ORIGIN := \$$ORIGIN
endif
# Custom compiler
ifdef CUSTOM_CXX
CXX := $(CUSTOM_CXX)
endif
# Static linking
ifneq (,$(findstring clang++,$(CXX)))
STATIC_LINK_COMMAND := -Wl,-force_load $(STATIC_NAME)
else ifneq (,$(findstring g++,$(CXX)))
STATIC_LINK_COMMAND := -Wl,--whole-archive $(STATIC_NAME) -Wl,--no-whole-archive
else
# The following line must not be indented with a tab, since we are not inside a target
$(error Cannot static link with the $(CXX) compiler)
endif
# Debugging
ifeq ($(DEBUG), 1)
COMMON_FLAGS += -DDEBUG -g -O0
NVCCFLAGS += -G
else
COMMON_FLAGS += -DNDEBUG -O2
endif
# cuDNN acceleration configuration.
ifeq ($(USE_CUDNN), 1)
LIBRARIES += cudnn
COMMON_FLAGS += -DUSE_CUDNN
endif
# configure IO libraries
ifeq ($(USE_OPENCV), 1)
COMMON_FLAGS += -DUSE_OPENCV
endif
ifeq ($(USE_LEVELDB), 1)
COMMON_FLAGS += -DUSE_LEVELDB
endif
ifeq ($(USE_LMDB), 1)
COMMON_FLAGS += -DUSE_LMDB
ifeq ($(ALLOW_LMDB_NOLOCK), 1)
COMMON_FLAGS += -DALLOW_LMDB_NOLOCK
endif
endif
# CPU-only configuration
ifeq ($(CPU_ONLY), 1)
OBJS := $(PROTO_OBJS) $(CXX_OBJS)
TEST_OBJS := $(TEST_CXX_OBJS)
TEST_BINS := $(TEST_CXX_BINS)
ALL_WARNS := $(ALL_CXX_WARNS)
TEST_FILTER := --gtest_filter="-*GPU*"
COMMON_FLAGS += -DCPU_ONLY
endif
# Python layer support
ifeq ($(WITH_PYTHON_LAYER), 1)
COMMON_FLAGS += -DWITH_PYTHON_LAYER
LIBRARIES += $(PYTHON_LIBRARIES)
endif
# BLAS configuration (default = ATLAS)
BLAS ?= atlas
ifeq ($(BLAS), mkl)
# MKL
LIBRARIES += mkl_rt
COMMON_FLAGS += -DUSE_MKL
MKL_DIR ?= /opt/intel/mkl
BLAS_INCLUDE ?= $(MKL_DIR)/include
BLAS_LIB ?= $(MKL_DIR)/lib $(MKL_DIR)/lib/intel64
else ifeq ($(BLAS), open)
# OpenBLAS
LIBRARIES += openblas
else
# ATLAS
ifeq ($(LINUX), 1)
ifeq ($(BLAS), atlas)
# Linux simply has cblas and atlas
LIBRARIES += cblas atlas
endif
else ifeq ($(OSX), 1)
# OS X packages atlas as the vecLib framework
LIBRARIES += cblas
# 10.10 has accelerate while 10.9 has veclib
XCODE_CLT_VER := $(shell pkgutil --pkg-info=com.apple.pkg.CLTools_Executables | grep 'version' | sed 's/[^0-9]*\([0-9]\).*/\1/')
XCODE_CLT_GEQ_6 := $(shell [ $(XCODE_CLT_VER) -gt 5 ] && echo 1)
ifeq ($(XCODE_CLT_GEQ_6), 1)
BLAS_INCLUDE ?= /System/Library/Frameworks/Accelerate.framework/Versions/Current/Frameworks/vecLib.framework/Headers/
LDFLAGS += -framework Accelerate
else
BLAS_INCLUDE ?= /System/Library/Frameworks/vecLib.framework/Versions/Current/Headers/
LDFLAGS += -framework vecLib
endif
endif
endif
INCLUDE_DIRS += $(BLAS_INCLUDE)
LIBRARY_DIRS += $(BLAS_LIB)
LIBRARY_DIRS += $(LIB_BUILD_DIR)
# Automatic dependency generation (nvcc is handled separately)
CXXFLAGS += -MMD -MP
# Complete build flags.
COMMON_FLAGS += $(foreach includedir,$(INCLUDE_DIRS),-I$(includedir))
CXXFLAGS += -pthread -fPIC $(COMMON_FLAGS) $(WARNINGS)
NVCCFLAGS += -ccbin=$(CXX) -Xcompiler -fPIC $(COMMON_FLAGS)
# mex may invoke an older gcc that is too liberal with -Wuninitalized
MATLAB_CXXFLAGS := $(CXXFLAGS) -Wno-uninitialized
LINKFLAGS += -pthread -fPIC $(COMMON_FLAGS) $(WARNINGS)
USE_PKG_CONFIG ?= 0
ifeq ($(USE_PKG_CONFIG), 1)
PKG_CONFIG := $(shell pkg-config opencv --libs)
else
PKG_CONFIG :=
endif
LDFLAGS += $(foreach librarydir,$(LIBRARY_DIRS),-L$(librarydir)) $(PKG_CONFIG) \
$(foreach library,$(LIBRARIES),-l$(library))
PYTHON_LDFLAGS := $(LDFLAGS) $(foreach library,$(PYTHON_LIBRARIES),-l$(library))
# 'superclean' target recursively* deletes all files ending with an extension
# in $(SUPERCLEAN_EXTS) below. This may be useful if you've built older
# versions of Caffe that do not place all generated files in a location known
# to the 'clean' target.
#
# 'supercleanlist' will list the files to be deleted by make superclean.
#
# * Recursive with the exception that symbolic links are never followed, per the
# default behavior of 'find'.
SUPERCLEAN_EXTS := .so .a .o .bin .testbin .pb.cc .pb.h _pb2.py .cuo
# Set the sub-targets of the 'everything' target.
EVERYTHING_TARGETS := all py$(PROJECT) test warn lint
# Only build matcaffe as part of "everything" if MATLAB_DIR is specified.
ifneq ($(MATLAB_DIR),)
EVERYTHING_TARGETS += mat$(PROJECT)
endif
##############################
# Define build targets
##############################
.PHONY: all lib test clean docs linecount lint lintclean tools examples $(DIST_ALIASES) \
py mat py$(PROJECT) mat$(PROJECT) proto runtest \
superclean supercleanlist supercleanfiles warn everything
all: lib tools examples
lib: $(STATIC_NAME) $(DYNAMIC_NAME)
everything: $(EVERYTHING_TARGETS)
linecount:
cloc --read-lang-def=$(PROJECT).cloc \
src/$(PROJECT) include/$(PROJECT) tools examples \
python matlab
lint: $(EMPTY_LINT_REPORT)
lintclean:
@ $(RM) -r $(LINT_OUTPUT_DIR) $(EMPTY_LINT_REPORT) $(NONEMPTY_LINT_REPORT)
docs: $(DOXYGEN_OUTPUT_DIR)
@ cd ./docs ; ln -sfn ../$(DOXYGEN_OUTPUT_DIR)/html doxygen
$(DOXYGEN_OUTPUT_DIR): $(DOXYGEN_CONFIG_FILE) $(DOXYGEN_SOURCES)
$(DOXYGEN_COMMAND) $(DOXYGEN_CONFIG_FILE)
$(EMPTY_LINT_REPORT): $(LINT_OUTPUTS) | $(BUILD_DIR)
@ cat $(LINT_OUTPUTS) > $@
@ if [ -s "$@" ]; then \
cat $@; \
mv $@ $(NONEMPTY_LINT_REPORT); \
echo "Found one or more lint errors."; \
exit 1; \
fi; \
$(RM) $(NONEMPTY_LINT_REPORT); \
echo "No lint errors!";
$(LINT_OUTPUTS): $(LINT_OUTPUT_DIR)/%.lint.txt : % $(LINT_SCRIPT) | $(LINT_OUTPUT_DIR)
@ mkdir -p $(dir $@)
@ python $(LINT_SCRIPT) $< 2>&1 \
| grep -v "^Done processing " \
| grep -v "^Total errors found: 0" \
> $@ \
|| true
test: $(TEST_ALL_BIN) $(TEST_ALL_DYNLINK_BIN) $(TEST_BINS)
tools: $(TOOL_BINS) $(TOOL_BIN_LINKS)
examples: $(EXAMPLE_BINS)
py$(PROJECT): py
py: $(PY$(PROJECT)_SO) $(PROTO_GEN_PY)
$(PY$(PROJECT)_SO): $(PY$(PROJECT)_SRC) $(PY$(PROJECT)_HXX) | $(DYNAMIC_NAME)
@ echo CXX/LD -o $@ $<
$(Q)$(CXX) -shared -o $@ $(PY$(PROJECT)_SRC) \
-o $@ $(LINKFLAGS) -l$(LIBRARY_NAME) $(PYTHON_LDFLAGS) \
-Wl,-rpath,$(ORIGIN)/../../build/lib
mat$(PROJECT): mat
mat: $(MAT$(PROJECT)_SO)
$(MAT$(PROJECT)_SO): $(MAT$(PROJECT)_SRC) $(STATIC_NAME)
@ if [ -z "$(MATLAB_DIR)" ]; then \
echo "MATLAB_DIR must be specified in $(CONFIG_FILE)" \
"to build mat$(PROJECT)."; \
exit 1; \
fi
@ echo MEX $<
$(Q)$(MATLAB_DIR)/bin/mex $(MAT$(PROJECT)_SRC) \
CXX="$(CXX)" \
CXXFLAGS="\$$CXXFLAGS $(MATLAB_CXXFLAGS)" \
CXXLIBS="\$$CXXLIBS $(STATIC_LINK_COMMAND) $(LDFLAGS)" -output $@
@ if [ -f "$(PROJECT)_.d" ]; then \
mv -f $(PROJECT)_.d $(BUILD_DIR)/${MAT$(PROJECT)_SO:.$(MAT_SO_EXT)=.d}; \
fi
runtest: $(TEST_ALL_BIN)
$(TOOL_BUILD_DIR)/caffe
$(TEST_ALL_BIN) $(TEST_GPUID) --gtest_shuffle $(TEST_FILTER)
pytest: py
cd python; python -m unittest discover -s caffe/test
mattest: mat
cd matlab; $(MATLAB_DIR)/bin/matlab -nodisplay -r 'caffe.run_tests(), exit()'
warn: $(EMPTY_WARN_REPORT)
$(EMPTY_WARN_REPORT): $(ALL_WARNS) | $(BUILD_DIR)
@ cat $(ALL_WARNS) > $@
@ if [ -s "$@" ]; then \
cat $@; \
mv $@ $(NONEMPTY_WARN_REPORT); \
echo "Compiler produced one or more warnings."; \
exit 1; \
fi; \
$(RM) $(NONEMPTY_WARN_REPORT); \
echo "No compiler warnings!";
$(ALL_WARNS): %.o.$(WARNS_EXT) : %.o
$(BUILD_DIR_LINK): $(BUILD_DIR)/.linked
# Create a target ".linked" in this BUILD_DIR to tell Make that the "build" link
# is currently correct, then delete the one in the OTHER_BUILD_DIR in case it
# exists and $(DEBUG) is toggled later.
$(BUILD_DIR)/.linked:
@ mkdir -p $(BUILD_DIR)
@ $(RM) $(OTHER_BUILD_DIR)/.linked
@ $(RM) -r $(BUILD_DIR_LINK)
@ ln -s $(BUILD_DIR) $(BUILD_DIR_LINK)
@ touch $@
$(ALL_BUILD_DIRS): | $(BUILD_DIR_LINK)
@ mkdir -p $@
$(DYNAMIC_NAME): $(OBJS) | $(LIB_BUILD_DIR)
@ echo LD -o $@
$(Q)$(CXX) -shared -o $@ $(OBJS) $(VERSIONFLAGS) $(LINKFLAGS) $(LDFLAGS) $(DYNAMIC_FLAGS)
@ cd $(BUILD_DIR)/lib; rm -f $(DYNAMIC_NAME_SHORT); ln -s $(DYNAMIC_VERSIONED_NAME_SHORT) $(DYNAMIC_NAME_SHORT)
$(STATIC_NAME): $(OBJS) | $(LIB_BUILD_DIR)
@ echo AR -o $@
$(Q)ar rcs $@ $(OBJS)
$(BUILD_DIR)/%.o: %.cpp | $(ALL_BUILD_DIRS)
@ echo CXX $<
$(Q)$(CXX) $< $(CXXFLAGS) -c -o $@ 2> $@.$(WARNS_EXT) \
|| (cat $@.$(WARNS_EXT); exit 1)
@ cat $@.$(WARNS_EXT)
$(PROTO_BUILD_DIR)/%.pb.o: $(PROTO_BUILD_DIR)/%.pb.cc $(PROTO_GEN_HEADER) \
| $(PROTO_BUILD_DIR)
@ echo CXX $<
$(Q)$(CXX) $< $(CXXFLAGS) -c -o $@ 2> $@.$(WARNS_EXT) \
|| (cat $@.$(WARNS_EXT); exit 1)
@ cat $@.$(WARNS_EXT)
$(BUILD_DIR)/cuda/%.o: %.cu | $(ALL_BUILD_DIRS)
@ echo NVCC $<
$(Q)$(CUDA_DIR)/bin/nvcc $(NVCCFLAGS) $(CUDA_ARCH) -M $< -o ${@:.o=.d} \
-odir $(@D)
$(Q)$(CUDA_DIR)/bin/nvcc $(NVCCFLAGS) $(CUDA_ARCH) -c $< -o $@ 2> $@.$(WARNS_EXT) \
|| (cat $@.$(WARNS_EXT); exit 1)
@ cat $@.$(WARNS_EXT)
$(TEST_ALL_BIN): $(TEST_MAIN_SRC) $(TEST_OBJS) $(GTEST_OBJ) \
| $(DYNAMIC_NAME) $(TEST_BIN_DIR)
@ echo CXX/LD -o $@ $<
$(Q)$(CXX) $(TEST_MAIN_SRC) $(TEST_OBJS) $(GTEST_OBJ) \
-o $@ $(LINKFLAGS) $(LDFLAGS) -l$(LIBRARY_NAME) -Wl,-rpath,$(ORIGIN)/../lib
$(TEST_CU_BINS): $(TEST_BIN_DIR)/%.testbin: $(TEST_CU_BUILD_DIR)/%.o \
$(GTEST_OBJ) | $(DYNAMIC_NAME) $(TEST_BIN_DIR)
@ echo LD $<
$(Q)$(CXX) $(TEST_MAIN_SRC) $< $(GTEST_OBJ) \
-o $@ $(LINKFLAGS) $(LDFLAGS) -l$(LIBRARY_NAME) -Wl,-rpath,$(ORIGIN)/../lib
$(TEST_CXX_BINS): $(TEST_BIN_DIR)/%.testbin: $(TEST_CXX_BUILD_DIR)/%.o \
$(GTEST_OBJ) | $(DYNAMIC_NAME) $(TEST_BIN_DIR)
@ echo LD $<
$(Q)$(CXX) $(TEST_MAIN_SRC) $< $(GTEST_OBJ) \
-o $@ $(LINKFLAGS) $(LDFLAGS) -l$(LIBRARY_NAME) -Wl,-rpath,$(ORIGIN)/../lib
# Target for extension-less symlinks to tool binaries with extension '*.bin'.
$(TOOL_BUILD_DIR)/%: $(TOOL_BUILD_DIR)/%.bin | $(TOOL_BUILD_DIR)
@ $(RM) $@
@ ln -s $(notdir $<) $@
$(TOOL_BINS): %.bin : %.o | $(DYNAMIC_NAME)
@ echo CXX/LD -o $@
$(Q)$(CXX) $< -o $@ $(LINKFLAGS) -l$(LIBRARY_NAME) $(LDFLAGS) \
-Wl,-rpath,$(ORIGIN)/../lib
$(EXAMPLE_BINS): %.bin : %.o | $(DYNAMIC_NAME)
@ echo CXX/LD -o $@
$(Q)$(CXX) $< -o $@ $(LINKFLAGS) -l$(LIBRARY_NAME) $(LDFLAGS) \
-Wl,-rpath,$(ORIGIN)/../../lib
proto: $(PROTO_GEN_CC) $(PROTO_GEN_HEADER)
$(PROTO_BUILD_DIR)/%.pb.cc $(PROTO_BUILD_DIR)/%.pb.h : \
$(PROTO_SRC_DIR)/%.proto | $(PROTO_BUILD_DIR)
@ echo PROTOC $<
$(Q)protoc --proto_path=$(PROTO_SRC_DIR) --cpp_out=$(PROTO_BUILD_DIR) $<
$(PY_PROTO_BUILD_DIR)/%_pb2.py : $(PROTO_SRC_DIR)/%.proto \
$(PY_PROTO_INIT) | $(PY_PROTO_BUILD_DIR)
@ echo PROTOC \(python\) $<
$(Q)protoc --proto_path=$(PROTO_SRC_DIR) --python_out=$(PY_PROTO_BUILD_DIR) $<
$(PY_PROTO_INIT): | $(PY_PROTO_BUILD_DIR)
touch $(PY_PROTO_INIT)
clean:
@- $(RM) -rf $(ALL_BUILD_DIRS)
@- $(RM) -rf $(OTHER_BUILD_DIR)
@- $(RM) -rf $(BUILD_DIR_LINK)
@- $(RM) -rf $(DISTRIBUTE_DIR)
@- $(RM) $(PY$(PROJECT)_SO)
@- $(RM) $(MAT$(PROJECT)_SO)
supercleanfiles:
$(eval SUPERCLEAN_FILES := $(strip \
$(foreach ext,$(SUPERCLEAN_EXTS), $(shell find . -name '*$(ext)' \
-not -path './data/*'))))
supercleanlist: supercleanfiles
@ \
if [ -z "$(SUPERCLEAN_FILES)" ]; then \
echo "No generated files found."; \
else \
echo $(SUPERCLEAN_FILES) | tr ' ' '\n'; \
fi
superclean: clean supercleanfiles
@ \
if [ -z "$(SUPERCLEAN_FILES)" ]; then \
echo "No generated files found."; \
else \
echo "Deleting the following generated files:"; \
echo $(SUPERCLEAN_FILES) | tr ' ' '\n'; \
$(RM) $(SUPERCLEAN_FILES); \
fi
$(DIST_ALIASES): $(DISTRIBUTE_DIR)
$(DISTRIBUTE_DIR): all py | $(DISTRIBUTE_SUBDIRS)
# add proto
cp -r src/caffe/proto $(DISTRIBUTE_DIR)/
# add include
cp -r include $(DISTRIBUTE_DIR)/
mkdir -p $(DISTRIBUTE_DIR)/include/caffe/proto
cp $(PROTO_GEN_HEADER_SRCS) $(DISTRIBUTE_DIR)/include/caffe/proto
# add tool and example binaries
cp $(TOOL_BINS) $(DISTRIBUTE_DIR)/bin
cp $(EXAMPLE_BINS) $(DISTRIBUTE_DIR)/bin
# add libraries
cp $(STATIC_NAME) $(DISTRIBUTE_DIR)/lib
install -m 644 $(DYNAMIC_NAME) $(DISTRIBUTE_DIR)/lib
cd $(DISTRIBUTE_DIR)/lib; rm -f $(DYNAMIC_NAME_SHORT); ln -s $(DYNAMIC_VERSIONED_NAME_SHORT) $(DYNAMIC_NAME_SHORT)
# add python - it's not the standard way, indeed...
cp -r python $(DISTRIBUTE_DIR)/python
-include $(DEPS)
================================================
FILE: caffe-fpn/Makefile.config
================================================
## Refer to http://caffe.berkeleyvision.org/installation.html
# Contributions simplifying and improving our build system are welcome!
# cuDNN acceleration switch (uncomment to build with cuDNN).
USE_CUDNN := 1
# CPU-only switch (uncomment to build without GPU support).
# CPU_ONLY := 1
# uncomment to disable IO dependencies and corresponding data layers
# USE_OPENCV := 0
# USE_LEVELDB := 0
# USE_LMDB := 0
# uncomment to allow MDB_NOLOCK when reading LMDB files (only if necessary)
# You should not set this flag if you will be reading LMDBs with any
# possibility of simultaneous read and write
# ALLOW_LMDB_NOLOCK := 1
# Uncomment if you're using OpenCV 3
OPENCV_VERSION := 3
# To customize your choice of compiler, uncomment and set the following.
# N.B. the default for Linux is g++ and the default for OSX is clang++
# CUSTOM_CXX := g++
# CUDA directory contains bin/ and lib/ directories that we need.
CUDA_DIR := /usr/local/cuda
# On Ubuntu 14.04, if cuda tools are installed via
# "sudo apt-get install nvidia-cuda-toolkit" then use this instead:
# CUDA_DIR := /usr
# CUDA architecture setting: going with all of them.
# For CUDA < 6.0, comment the *_50 lines for compatibility.
CUDA_ARCH := -gencode arch=compute_20,code=sm_20 \
-gencode arch=compute_20,code=sm_21 \
-gencode arch=compute_30,code=sm_30 \
-gencode arch=compute_35,code=sm_35 \
-gencode arch=compute_50,code=sm_50 \
-gencode arch=compute_50,code=compute_50
# BLAS choice:
# atlas for ATLAS (default)
# mkl for MKL
# open for OpenBlas
BLAS := atlas
# Custom (MKL/ATLAS/OpenBLAS) include and lib directories.
# Leave commented to accept the defaults for your choice of BLAS
# (which should work)!
# BLAS_INCLUDE := /path/to/your/blas
# BLAS_LIB := /path/to/your/blas
# Homebrew puts openblas in a directory that is not on the standard search path
# BLAS_INCLUDE := $(shell brew --prefix openblas)/include
# BLAS_LIB := $(shell brew --prefix openblas)/lib
# This is required only if you will compile the matlab interface.
# MATLAB directory should contain the mex binary in /bin.
# MATLAB_DIR := /usr/local
# MATLAB_DIR := /Applications/MATLAB_R2012b.app
# NOTE: this is required only if you will compile the python interface.
# We need to be able to find Python.h and numpy/arrayobject.h.
#PYTHON_INCLUDE := /usr/include/python2.7 \
/usr/lib/python2.7/dist-packages/numpy/core/include
# Anaconda Python distribution is quite popular. Include path:
# Verify anaconda location, sometimes it's in root.
ANACONDA_HOME := /usr/local/anaconda2
PYTHON_INCLUDE := $(ANACONDA_HOME)/include \
$(ANACONDA_HOME)/include/python2.7 \
$(ANACONDA_HOME)/lib/python2.7/site-packages/numpy/core/include \
# Uncomment to use Python 3 (default is Python 2)
# PYTHON_LIBRARIES := boost_python3 python3.5m
# PYTHON_INCLUDE := /usr/include/python3.5m \
# /usr/lib/python3.5/dist-packages/numpy/core/include
# We need to be able to find libpythonX.X.so or .dylib.
# PYTHON_LIB := /usr/lib
PYTHON_LIB := $(ANACONDA_HOME)/lib
# Homebrew installs numpy in a non standard path (keg only)
# PYTHON_INCLUDE += $(dir $(shell python -c 'import numpy.core; print(numpy.core.__file__)'))/include
# PYTHON_LIB += $(shell brew --prefix numpy)/lib
# Uncomment to support layers written in Python (will link against Python libs)
WITH_PYTHON_LAYER := 1
# Whatever else you find you need goes here.
INCLUDE_DIRS := $(PYTHON_INCLUDE) /usr/local/include
LIBRARY_DIRS := $(PYTHON_LIB) /usr/local/lib /usr/lib
# If Homebrew is installed at a non standard location (for example your home directory) and you use it for general dependencies
# INCLUDE_DIRS += $(shell brew --prefix)/include
# LIBRARY_DIRS += $(shell brew --prefix)/lib
# Uncomment to use `pkg-config` to specify OpenCV library paths.
# (Usually not necessary -- OpenCV libraries are normally installed in one of the above $LIBRARY_DIRS.)
# USE_PKG_CONFIG := 1
# N.B. both build and distribute dirs are cleared on `make clean`
BUILD_DIR := build
DISTRIBUTE_DIR := distribute
# Uncomment for debugging. Does not work on OSX due to https://github.com/BVLC/caffe/issues/171
# DEBUG := 1
# The ID of the GPU that 'make runtest' will use to run unit tests.
TEST_GPUID := 0
# enable pretty build (comment to see full commands)
Q ?= @
================================================
FILE: caffe-fpn/Makefile.config.example
================================================
## Refer to http://caffe.berkeleyvision.org/installation.html
# Contributions simplifying and improving our build system are welcome!
# cuDNN acceleration switch (uncomment to build with cuDNN).
# USE_CUDNN := 1
# CPU-only switch (uncomment to build without GPU support).
# CPU_ONLY := 1
# uncomment to disable IO dependencies and corresponding data layers
# USE_OPENCV := 0
# USE_LEVELDB := 0
# USE_LMDB := 0
# uncomment to allow MDB_NOLOCK when reading LMDB files (only if necessary)
# You should not set this flag if you will be reading LMDBs with any
# possibility of simultaneous read and write
# ALLOW_LMDB_NOLOCK := 1
# Uncomment if you're using OpenCV 3
# OPENCV_VERSION := 3
# To customize your choice of compiler, uncomment and set the following.
# N.B. the default for Linux is g++ and the default for OSX is clang++
# CUSTOM_CXX := g++
# CUDA directory contains bin/ and lib/ directories that we need.
CUDA_DIR := /usr/local/cuda
# On Ubuntu 14.04, if cuda tools are installed via
# "sudo apt-get install nvidia-cuda-toolkit" then use this instead:
# CUDA_DIR := /usr
# CUDA architecture setting: going with all of them.
# For CUDA < 6.0, comment the *_50 lines for compatibility.
CUDA_ARCH := -gencode arch=compute_20,code=sm_20 \
-gencode arch=compute_20,code=sm_21 \
-gencode arch=compute_30,code=sm_30 \
-gencode arch=compute_35,code=sm_35 \
-gencode arch=compute_50,code=sm_50 \
-gencode arch=compute_50,code=compute_50
# BLAS choice:
# atlas for ATLAS (default)
# mkl for MKL
# open for OpenBlas
BLAS := atlas
# Custom (MKL/ATLAS/OpenBLAS) include and lib directories.
# Leave commented to accept the defaults for your choice of BLAS
# (which should work)!
# BLAS_INCLUDE := /path/to/your/blas
# BLAS_LIB := /path/to/your/blas
# Homebrew puts openblas in a directory that is not on the standard search path
# BLAS_INCLUDE := $(shell brew --prefix openblas)/include
# BLAS_LIB := $(shell brew --prefix openblas)/lib
# This is required only if you will compile the matlab interface.
# MATLAB directory should contain the mex binary in /bin.
# MATLAB_DIR := /usr/local
# MATLAB_DIR := /Applications/MATLAB_R2012b.app
# NOTE: this is required only if you will compile the python interface.
# We need to be able to find Python.h and numpy/arrayobject.h.
PYTHON_INCLUDE := /usr/include/python2.7 \
/usr/lib/python2.7/dist-packages/numpy/core/include
# Anaconda Python distribution is quite popular. Include path:
# Verify anaconda location, sometimes it's in root.
# ANACONDA_HOME := $(HOME)/anaconda
# PYTHON_INCLUDE := $(ANACONDA_HOME)/include \
# $(ANACONDA_HOME)/include/python2.7 \
# $(ANACONDA_HOME)/lib/python2.7/site-packages/numpy/core/include \
# Uncomment to use Python 3 (default is Python 2)
# PYTHON_LIBRARIES := boost_python3 python3.5m
# PYTHON_INCLUDE := /usr/include/python3.5m \
# /usr/lib/python3.5/dist-packages/numpy/core/include
# We need to be able to find libpythonX.X.so or .dylib.
PYTHON_LIB := /usr/lib
# PYTHON_LIB := $(ANACONDA_HOME)/lib
# Homebrew installs numpy in a non standard path (keg only)
# PYTHON_INCLUDE += $(dir $(shell python -c 'import numpy.core; print(numpy.core.__file__)'))/include
# PYTHON_LIB += $(shell brew --prefix numpy)/lib
# Uncomment to support layers written in Python (will link against Python libs)
# WITH_PYTHON_LAYER := 1
# Whatever else you find you need goes here.
INCLUDE_DIRS := $(PYTHON_INCLUDE) /usr/local/include
LIBRARY_DIRS := $(PYTHON_LIB) /usr/local/lib /usr/lib
# If Homebrew is installed at a non standard location (for example your home directory) and you use it for general dependencies
# INCLUDE_DIRS += $(shell brew --prefix)/include
# LIBRARY_DIRS += $(shell brew --prefix)/lib
# Uncomment to use `pkg-config` to specify OpenCV library paths.
# (Usually not necessary -- OpenCV libraries are normally installed in one of the above $LIBRARY_DIRS.)
# USE_PKG_CONFIG := 1
BUILD_DIR := build
DISTRIBUTE_DIR := distribute
# Uncomment for debugging. Does not work on OSX due to https://github.com/BVLC/caffe/issues/171
# DEBUG := 1
# The ID of the GPU that 'make runtest' will use to run unit tests.
TEST_GPUID := 0
# enable pretty build (comment to see full commands)
Q ?= @
================================================
FILE: caffe-fpn/README.md
================================================
deformable convolution net on caffe
================================================
FILE: caffe-fpn/caffe.cloc
================================================
Bourne Shell
filter remove_matches ^\s*#
filter remove_inline #.*$
extension sh
script_exe sh
C
filter remove_matches ^\s*//
filter call_regexp_common C
filter remove_inline //.*$
extension c
extension ec
extension pgc
C++
filter remove_matches ^\s*//
filter remove_inline //.*$
filter call_regexp_common C
extension C
extension cc
extension cpp
extension cxx
extension pcc
C/C++ Header
filter remove_matches ^\s*//
filter call_regexp_common C
filter remove_inline //.*$
extension H
extension h
extension hh
extension hpp
CUDA
filter remove_matches ^\s*//
filter remove_inline //.*$
filter call_regexp_common C
extension cu
Python
filter remove_matches ^\s*#
filter docstring_to_C
filter call_regexp_common C
filter remove_inline #.*$
extension py
make
filter remove_matches ^\s*#
filter remove_inline #.*$
extension Gnumakefile
extension Makefile
extension am
extension gnumakefile
extension makefile
filename Gnumakefile
filename Makefile
filename gnumakefile
filename makefile
script_exe make
================================================
FILE: caffe-fpn/cmake/ConfigGen.cmake
================================================
################################################################################################
# Helper function to fetch caffe includes which will be passed to dependent projects
# Usage:
# caffe_get_current_includes(<includes_list_variable>)
function(caffe_get_current_includes includes_variable)
get_property(current_includes DIRECTORY PROPERTY INCLUDE_DIRECTORIES)
caffe_convert_absolute_paths(current_includes)
# remove at most one ${PROJECT_BINARY_DIR} include added for caffe_config.h
list(FIND current_includes ${PROJECT_BINARY_DIR} __index)
list(REMOVE_AT current_includes ${__index})
# removing numpy includes (since not required for client libs)
set(__toremove "")
foreach(__i ${current_includes})
if(${__i} MATCHES "python")
list(APPEND __toremove ${__i})
endif()
endforeach()
if(__toremove)
list(REMOVE_ITEM current_includes ${__toremove})
endif()
caffe_list_unique(current_includes)
set(${includes_variable} ${current_includes} PARENT_SCOPE)
endfunction()
################################################################################################
# Helper function to get all list items that begin with given prefix
# Usage:
# caffe_get_items_with_prefix(<prefix> <list_variable> <output_variable>)
function(caffe_get_items_with_prefix prefix list_variable output_variable)
set(__result "")
foreach(__e ${${list_variable}})
if(__e MATCHES "^${prefix}.*")
list(APPEND __result ${__e})
endif()
endforeach()
set(${output_variable} ${__result} PARENT_SCOPE)
endfunction()
################################################################################################
# Function for generation Caffe build- and install- tree export config files
# Usage:
# caffe_generate_export_configs()
function(caffe_generate_export_configs)
set(install_cmake_suffix "share/Caffe")
# ---[ Configure build-tree CaffeConfig.cmake file ]---
caffe_get_current_includes(Caffe_INCLUDE_DIRS)
set(Caffe_DEFINITIONS "")
if(NOT HAVE_CUDA)
set(HAVE_CUDA FALSE)
list(APPEND Caffe_DEFINITIONS -DCPU_ONLY)
endif()
if(USE_OPENCV)
list(APPEND Caffe_DEFINITIONS -DUSE_OPENCV)
endif()
if(USE_LMDB)
list(APPEND Caffe_DEFINITIONS -DUSE_LMDB)
if (ALLOW_LMDB_NOLOCK)
list(APPEND Caffe_DEFINITIONS -DALLOW_LMDB_NOLOCK)
endif()
endif()
if(USE_LEVELDB)
list(APPEND Caffe_DEFINITIONS -DUSE_LEVELDB)
endif()
if(NOT HAVE_CUDNN)
set(HAVE_CUDNN FALSE)
else()
list(APPEND DEFINITIONS -DUSE_CUDNN)
endif()
if(BLAS STREQUAL "MKL" OR BLAS STREQUAL "mkl")
list(APPEND Caffe_DEFINITIONS -DUSE_MKL)
endif()
configure_file("cmake/Templates/CaffeConfig.cmake.in" "${PROJECT_BINARY_DIR}/CaffeConfig.cmake" @ONLY)
# Add targets to the build-tree export set
export(TARGETS caffe proto FILE "${PROJECT_BINARY_DIR}/CaffeTargets.cmake")
export(PACKAGE Caffe)
# ---[ Configure install-tree CaffeConfig.cmake file ]---
# remove source and build dir includes
caffe_get_items_with_prefix(${PROJECT_SOURCE_DIR} Caffe_INCLUDE_DIRS __insource)
caffe_get_items_with_prefix(${PROJECT_BINARY_DIR} Caffe_INCLUDE_DIRS __inbinary)
list(REMOVE_ITEM Caffe_INCLUDE_DIRS ${__insource} ${__inbinary})
# add `install` include folder
set(lines
"get_filename_component(__caffe_include \"\${Caffe_CMAKE_DIR}/../../include\" ABSOLUTE)\n"
"list(APPEND Caffe_INCLUDE_DIRS \${__caffe_include})\n"
"unset(__caffe_include)\n")
string(REPLACE ";" "" Caffe_INSTALL_INCLUDE_DIR_APPEND_COMMAND ${lines})
configure_file("cmake/Templates/CaffeConfig.cmake.in" "${PROJECT_BINARY_DIR}/cmake/CaffeConfig.cmake" @ONLY)
# Install the CaffeConfig.cmake and export set to use with install-tree
install(FILES "${PROJECT_BINARY_DIR}/cmake/CaffeConfig.cmake" DESTINATION ${install_cmake_suffix})
install(EXPORT CaffeTargets DESTINATION ${install_cmake_suffix})
# ---[ Configure and install version file ]---
# TODO: Lines below are commented because Caffe does't declare its version in headers.
# When the declarations are added, modify `caffe_extract_caffe_version()` macro and uncomment
# configure_file(cmake/Templates/CaffeConfigVersion.cmake.in "${PROJECT_BINARY_DIR}/CaffeConfigVersion.cmake" @ONLY)
# install(FILES "${PROJECT_BINARY_DIR}/CaffeConfigVersion.cmake" DESTINATION ${install_cmake_suffix})
endfunction()
================================================
FILE: caffe-fpn/cmake/Cuda.cmake
================================================
if(CPU_ONLY)
return()
endif()
# Known NVIDIA GPU achitectures Caffe can be compiled for.
# This list will be used for CUDA_ARCH_NAME = All option
set(Caffe_known_gpu_archs "20 21(20) 30 35 50")
################################################################################################
# A function for automatic detection of GPUs installed (if autodetection is enabled)
# Usage:
# caffe_detect_installed_gpus(out_variable)
function(caffe_detect_installed_gpus out_variable)
if(NOT CUDA_gpu_detect_output)
set(__cufile ${PROJECT_BINARY_DIR}/detect_cuda_archs.cu)
file(WRITE ${__cufile} ""
"#include <cstdio>\n"
"int main()\n"
"{\n"
" int count = 0;\n"
" if (cudaSuccess != cudaGetDeviceCount(&count)) return -1;\n"
" if (count == 0) return -1;\n"
" for (int device = 0; device < count; ++device)\n"
" {\n"
" cudaDeviceProp prop;\n"
" if (cudaSuccess == cudaGetDeviceProperties(&prop, device))\n"
" std::printf(\"%d.%d \", prop.major, prop.minor);\n"
" }\n"
" return 0;\n"
"}\n")
execute_process(COMMAND "${CUDA_NVCC_EXECUTABLE}" "--run" "${__cufile}"
WORKING_DIRECTORY "${PROJECT_BINARY_DIR}/CMakeFiles/"
RESULT_VARIABLE __nvcc_res OUTPUT_VARIABLE __nvcc_out
ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)
if(__nvcc_res EQUAL 0)
string(REPLACE "2.1" "2.1(2.0)" __nvcc_out "${__nvcc_out}")
set(CUDA_gpu_detect_output ${__nvcc_out} CACHE INTERNAL "Returned GPU architetures from caffe_detect_gpus tool" FORCE)
endif()
endif()
if(NOT CUDA_gpu_detect_output)
message(STATUS "Automatic GPU detection failed. Building for all known architectures.")
set(${out_variable} ${Caffe_known_gpu_archs} PARENT_SCOPE)
else()
set(${out_variable} ${CUDA_gpu_detect_output} PARENT_SCOPE)
endif()
endfunction()
################################################################################################
# Function for selecting GPU arch flags for nvcc based on CUDA_ARCH_NAME
# Usage:
# caffe_select_nvcc_arch_flags(out_variable)
function(caffe_select_nvcc_arch_flags out_variable)
# List of arch names
set(__archs_names "Fermi" "Kepler" "Maxwell" "All" "Manual")
set(__archs_name_default "All")
if(NOT CMAKE_CROSSCOMPILING)
list(APPEND __archs_names "Auto")
set(__archs_name_default "Auto")
endif()
# set CUDA_ARCH_NAME strings (so it will be seen as dropbox in CMake-Gui)
set(CUDA_ARCH_NAME ${__archs_name_default} CACHE STRING "Select target NVIDIA GPU achitecture.")
set_property( CACHE CUDA_ARCH_NAME PROPERTY STRINGS "" ${__archs_names} )
mark_as_advanced(CUDA_ARCH_NAME)
# verify CUDA_ARCH_NAME value
if(NOT ";${__archs_names};" MATCHES ";${CUDA_ARCH_NAME};")
string(REPLACE ";" ", " __archs_names "${__archs_names}")
message(FATAL_ERROR "Only ${__archs_names} architeture names are supported.")
endif()
if(${CUDA_ARCH_NAME} STREQUAL "Manual")
set(CUDA_ARCH_BIN ${Caffe_known_gpu_archs} CACHE STRING "Specify 'real' GPU architectures to build binaries for, BIN(PTX) format is supported")
set(CUDA_ARCH_PTX "50" CACHE STRING "Specify 'virtual' PTX architectures to build PTX intermediate code for")
mark_as_advanced(CUDA_ARCH_BIN CUDA_ARCH_PTX)
else()
unset(CUDA_ARCH_BIN CACHE)
unset(CUDA_ARCH_PTX CACHE)
endif()
if(${CUDA_ARCH_NAME} STREQUAL "Fermi")
set(__cuda_arch_bin "20 21(20)")
elseif(${CUDA_ARCH_NAME} STREQUAL "Kepler")
set(__cuda_arch_bin "30 35")
elseif(${CUDA_ARCH_NAME} STREQUAL "Maxwell")
set(__cuda_arch_bin "50")
elseif(${CUDA_ARCH_NAME} STREQUAL "All")
set(__cuda_arch_bin ${Caffe_known_gpu_archs})
elseif(${CUDA_ARCH_NAME} STREQUAL "Auto")
caffe_detect_installed_gpus(__cuda_arch_bin)
else() # (${CUDA_ARCH_NAME} STREQUAL "Manual")
set(__cuda_arch_bin ${CUDA_ARCH_BIN})
endif()
# remove dots and convert to lists
string(REGEX REPLACE "\\." "" __cuda_arch_bin "${__cuda_arch_bin}")
string(REGEX REPLACE "\\." "" __cuda_arch_ptx "${CUDA_ARCH_PTX}")
string(REGEX MATCHALL "[0-9()]+" __cuda_arch_bin "${__cuda_arch_bin}")
string(REGEX MATCHALL "[0-9]+" __cuda_arch_ptx "${__cuda_arch_ptx}")
caffe_list_unique(__cuda_arch_bin __cuda_arch_ptx)
set(__nvcc_flags "")
set(__nvcc_archs_readable "")
# Tell NVCC to add binaries for the specified GPUs
foreach(__arch ${__cuda_arch_bin})
if(__arch MATCHES "([0-9]+)\\(([0-9]+)\\)")
# User explicitly specified PTX for the concrete BIN
list(APPEND __nvcc_flags -gencode arch=compute_${CMAKE_MATCH_2},code=sm_${CMAKE_MATCH_1})
list(APPEND __nvcc_archs_readable sm_${CMAKE_MATCH_1})
else()
# User didn't explicitly specify PTX for the concrete BIN, we assume PTX=BIN
list(APPEND __nvcc_flags -gencode arch=compute_${__arch},code=sm_${__arch})
list(APPEND __nvcc_archs_readable sm_${__arch})
endif()
endforeach()
# Tell NVCC to add PTX intermediate code for the specified architectures
foreach(__arch ${__cuda_arch_ptx})
list(APPEND __nvcc_flags -gencode arch=compute_${__arch},code=compute_${__arch})
list(APPEND __nvcc_archs_readable compute_${__arch})
endforeach()
string(REPLACE ";" " " __nvcc_archs_readable "${__nvcc_archs_readable}")
set(${out_variable} ${__nvcc_flags} PARENT_SCOPE)
set(${out_variable}_readable ${__nvcc_archs_readable} PARENT_SCOPE)
endfunction()
################################################################################################
# Short command for cuda compilation
# Usage:
# caffe_cuda_compile(<objlist_variable> <cuda_files>)
macro(caffe_cuda_compile objlist_variable)
foreach(var CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_RELEASE CMAKE_CXX_FLAGS_DEBUG)
set(${var}_backup_in_cuda_compile_ "${${var}}")
# we remove /EHa as it generates warnings under windows
string(REPLACE "/EHa" "" ${var} "${${var}}")
endforeach()
if(UNIX OR APPLE)
list(APPEND CUDA_NVCC_FLAGS -Xcompiler -fPIC)
endif()
if(APPLE)
list(APPEND CUDA_NVCC_FLAGS -Xcompiler -Wno-unused-function)
endif()
cuda_compile(cuda_objcs ${ARGN})
foreach(var CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_RELEASE CMAKE_CXX_FLAGS_DEBUG)
set(${var} "${${var}_backup_in_cuda_compile_}")
unset(${var}_backup_in_cuda_compile_)
endforeach()
set(${objlist_variable} ${cuda_objcs})
endmacro()
################################################################################################
# Short command for cuDNN detection. Believe it soon will be a part of CUDA toolkit distribution.
# That's why not FindcuDNN.cmake file, but just the macro
# Usage:
# detect_cuDNN()
function(detect_cuDNN)
set(CUDNN_ROOT "" CACHE PATH "CUDNN root folder")
find_path(CUDNN_INCLUDE cudnn.h
PATHS ${CUDNN_ROOT} $ENV{CUDNN_ROOT} ${CUDA_TOOLKIT_INCLUDE}
DOC "Path to cuDNN include directory." )
get_filename_component(__libpath_hist ${CUDA_CUDART_LIBRARY} PATH)
find_library(CUDNN_LIBRARY NAMES libcudnn.so # libcudnn_static.a
PATHS ${CUDNN_ROOT} $ENV{CUDNN_ROOT} ${CUDNN_INCLUDE} ${__libpath_hist}
DOC "Path to cuDNN library.")
if(CUDNN_INCLUDE AND CUDNN_LIBRARY)
set(HAVE_CUDNN TRUE PARENT_SCOPE)
set(CUDNN_FOUND TRUE PARENT_SCOPE)
file(READ ${CUDNN_INCLUDE}/cudnn.h CUDNN_VERSION_FILE_CONTENTS)
# cuDNN v3 and beyond
string(REGEX MATCH "define CUDNN_MAJOR * +([0-9]+)"
CUDNN_VERSION_MAJOR "${CUDNN_VERSION_FILE_CONTENTS}")
string(REGEX REPLACE "define CUDNN_MAJOR * +([0-9]+)" "\\1"
CUDNN_VERSION_MAJOR "${CUDNN_VERSION_MAJOR}")
string(REGEX MATCH "define CUDNN_MINOR * +([0-9]+)"
CUDNN_VERSION_MINOR "${CUDNN_VERSION_FILE_CONTENTS}")
string(REGEX REPLACE "define CUDNN_MINOR * +([0-9]+)" "\\1"
CUDNN_VERSION_MINOR "${CUDNN_VERSION_MINOR}")
string(REGEX MATCH "define CUDNN_PATCHLEVEL * +([0-9]+)"
CUDNN_VERSION_PATCH "${CUDNN_VERSION_FILE_CONTENTS}")
string(REGEX REPLACE "define CUDNN_PATCHLEVEL * +([0-9]+)" "\\1"
CUDNN_VERSION_PATCH "${CUDNN_VERSION_PATCH}")
if(NOT CUDNN_VERSION_MAJOR)
set(CUDNN_VERSION "???")
else()
set(CUDNN_VERSION "${CUDNN_VERSION_MAJOR}.${CUDNN_VERSION_MINOR}.${CUDNN_VERSION_PATCH}")
endif()
message(STATUS "Found cuDNN: ver. ${CUDNN_VERSION} found (include: ${CUDNN_INCLUDE}, library: ${CUDNN_LIBRARY})")
string(COMPARE LESS "${CUDNN_VERSION_MAJOR}" 3 cuDNNVersionIncompatible)
if(cuDNNVersionIncompatible)
message(FATAL_ERROR "cuDNN version >3 is required.")
endif()
set(CUDNN_VERSION "${CUDNN_VERSION}" PARENT_SCOPE)
mark_as_advanced(CUDNN_INCLUDE CUDNN_LIBRARY CUDNN_ROOT)
endif()
endfunction()
################################################################################################
### Non macro section
################################################################################################
find_package(CUDA 5.5 QUIET)
find_cuda_helper_libs(curand) # cmake 2.8.7 compartibility which doesn't search for curand
if(NOT CUDA_FOUND)
return()
endif()
set(HAVE_CUDA TRUE)
message(STATUS "CUDA detected: " ${CUDA_VERSION})
include_directories(SYSTEM ${CUDA_INCLUDE_DIRS})
list(APPEND Caffe_LINKER_LIBS ${CUDA_CUDART_LIBRARY}
${CUDA_curand_LIBRARY} ${CUDA_CUBLAS_LIBRARIES})
# cudnn detection
if(USE_CUDNN)
detect_cuDNN()
if(HAVE_CUDNN)
add_definitions(-DUSE_CUDNN)
include_directories(SYSTEM ${CUDNN_INCLUDE})
list(APPEND Caffe_LINKER_LIBS ${CUDNN_LIBRARY})
endif()
endif()
# setting nvcc arch flags
caffe_select_nvcc_arch_flags(NVCC_FLAGS_EXTRA)
list(APPEND CUDA_NVCC_FLAGS ${NVCC_FLAGS_EXTRA})
message(STATUS "Added CUDA NVCC flags for: ${NVCC_FLAGS_EXTRA_readable}")
# Boost 1.55 workaround, see https://svn.boost.org/trac/boost/ticket/9392 or
# https://github.com/ComputationalRadiationPhysics/picongpu/blob/master/src/picongpu/CMakeLists.txt
if(Boost_VERSION EQUAL 105500)
message(STATUS "Cuda + Boost 1.55: Applying noinline work around")
# avoid warning for CMake >= 2.8.12
set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} \"-DBOOST_NOINLINE=__attribute__((noinline))\" ")
endif()
# disable some nvcc diagnostic that apears in boost, glog, glags, opencv, etc.
foreach(diag cc_clobber_ignored integer_sign_change useless_using_declaration set_but_not_used)
list(APPEND CUDA_NVCC_FLAGS -Xcudafe --diag_suppress=${diag})
endforeach()
# setting default testing device
if(NOT CUDA_TEST_DEVICE)
set(CUDA_TEST_DEVICE -1)
endif()
mark_as_advanced(CUDA_BUILD_CUBIN CUDA_BUILD_EMULATION CUDA_VERBOSE_BUILD)
mark_as_advanced(CUDA_SDK_ROOT_DIR CUDA_SEPARABLE_COMPILATION)
# Handle clang/libc++ issue
if(APPLE)
caffe_detect_darwin_version(OSX_VERSION)
# OSX 10.9 and higher uses clang/libc++ by default which is incompartible with old CUDA toolkits
if(OSX_VERSION VERSION_GREATER 10.8)
# enabled by default if and only if CUDA version is less than 7.0
caffe_option(USE_libstdcpp "Use libstdc++ instead of libc++" (CUDA_VERSION VERSION_LESS 7.0))
endif()
endif()
================================================
FILE: caffe-fpn/cmake/Dependencies.cmake
================================================
# This list is required for static linking and exported to CaffeConfig.cmake
set(Caffe_LINKER_LIBS "")
# ---[ Boost
find_package(Boost 1.46 REQUIRED COMPONENTS system thread filesystem)
include_directories(SYSTEM ${Boost_INCLUDE_DIR})
list(APPEND Caffe_LINKER_LIBS ${Boost_LIBRARIES})
# ---[ Threads
find_package(Threads REQUIRED)
list(APPEND Caffe_LINKER_LIBS ${CMAKE_THREAD_LIBS_INIT})
# ---[ Google-glog
include("cmake/External/glog.cmake")
include_directories(SYSTEM ${GLOG_INCLUDE_DIRS})
list(APPEND Caffe_LINKER_LIBS ${GLOG_LIBRARIES})
# ---[ Google-gflags
include("cmake/External/gflags.cmake")
include_directories(SYSTEM ${GFLAGS_INCLUDE_DIRS})
list(APPEND Caffe_LINKER_LIBS ${GFLAGS_LIBRARIES})
# ---[ Google-protobuf
include(cmake/ProtoBuf.cmake)
# ---[ HDF5
find_package(HDF5 COMPONENTS HL REQUIRED)
include_directories(SYSTEM ${HDF5_INCLUDE_DIRS} ${HDF5_HL_INCLUDE_DIR})
list(APPEND Caffe_LINKER_LIBS ${HDF5_LIBRARIES})
# ---[ LMDB
if(USE_LMDB)
find_package(LMDB REQUIRED)
include_directories(SYSTEM ${LMDB_INCLUDE_DIR})
list(APPEND Caffe_LINKER_LIBS ${LMDB_LIBRARIES})
add_definitions(-DUSE_LMDB)
if(ALLOW_LMDB_NOLOCK)
add_definitions(-DALLOW_LMDB_NOLOCK)
endif()
endif()
# ---[ LevelDB
if(USE_LEVELDB)
find_package(LevelDB REQUIRED)
include_directories(SYSTEM ${LevelDB_INCLUDE})
list(APPEND Caffe_LINKER_LIBS ${LevelDB_LIBRARIES})
add_definitions(-DUSE_LEVELDB)
endif()
# ---[ Snappy
if(USE_LEVELDB)
find_package(Snappy REQUIRED)
include_directories(SYSTEM ${Snappy_INCLUDE_DIR})
list(APPEND Caffe_LINKER_LIBS ${Snappy_LIBRARIES})
endif()
# ---[ CUDA
include(cmake/Cuda.cmake)
if(NOT HAVE_CUDA)
if(CPU_ONLY)
message(STATUS "-- CUDA is disabled. Building without it...")
else()
message(WARNING "-- CUDA is not detected by cmake. Building without it...")
endif()
# TODO: remove this not cross platform define in future. Use caffe_config.h instead.
add_definitions(-DCPU_ONLY)
endif()
# ---[ OpenCV
if(USE_OPENCV)
find_package(OpenCV QUIET COMPONENTS core highgui imgproc imgcodecs)
if(NOT OpenCV_FOUND) # if not OpenCV 3.x, then imgcodecs are not found
find_package(OpenCV REQUIRED COMPONENTS core highgui imgproc)
endif()
include_directories(SYSTEM ${OpenCV_INCLUDE_DIRS})
list(APPEND Caffe_LINKER_LIBS ${OpenCV_LIBS})
message(STATUS "OpenCV found (${OpenCV_CONFIG_PATH})")
add_definitions(-DUSE_OPENCV)
endif()
# ---[ BLAS
if(NOT APPLE)
set(BLAS "Atlas" CACHE STRING "Selected BLAS library")
set_property(CACHE BLAS PROPERTY STRINGS "Atlas;Open;MKL")
if(BLAS STREQUAL "Atlas" OR BLAS STREQUAL "atlas")
find_package(Atlas REQUIRED)
include_directories(SYSTEM ${Atlas_INCLUDE_DIR})
list(APPEND Caffe_LINKER_LIBS ${Atlas_LIBRARIES})
elseif(BLAS STREQUAL "Open" OR BLAS STREQUAL "open")
find_package(OpenBLAS REQUIRED)
include_directories(SYSTEM ${OpenBLAS_INCLUDE_DIR})
list(APPEND Caffe_LINKER_LIBS ${OpenBLAS_LIB})
elseif(BLAS STREQUAL "MKL" OR BLAS STREQUAL "mkl")
find_package(MKL REQUIRED)
include_directories(SYSTEM ${MKL_INCLUDE_DIR})
list(APPEND Caffe_LINKER_LIBS ${MKL_LIBRARIES})
add_definitions(-DUSE_MKL)
endif()
elseif(APPLE)
find_package(vecLib REQUIRED)
include_directories(SYSTEM ${vecLib_INCLUDE_DIR})
list(APPEND Caffe_LINKER_LIBS ${vecLib_LINKER_LIBS})
endif()
# ---[ Python
if(BUILD_python)
if(NOT "${python_version}" VERSION_LESS "3.0.0")
# use python3
find_package(PythonInterp 3.0)
find_package(PythonLibs 3.0)
find_package(NumPy 1.7.1)
# Find the matching boost python implementation
set(version ${PYTHONLIBS_VERSION_STRING})
STRING( REGEX REPLACE "[^0-9]" "" boost_py_version ${version} )
find_package(Boost 1.46 COMPONENTS "python-py${boost_py_version}")
set(Boost_PYTHON_FOUND ${Boost_PYTHON-PY${boost_py_version}_FOUND})
while(NOT "${version}" STREQUAL "" AND NOT Boost_PYTHON_FOUND)
STRING( REGEX REPLACE "([0-9.]+).[0-9]+" "\\1" version ${version} )
STRING( REGEX REPLACE "[^0-9]" "" boost_py_version ${version} )
find_package(Boost 1.46 COMPONENTS "python-py${boost_py_version}")
set(Boost_PYTHON_FOUND ${Boost_PYTHON-PY${boost_py_version}_FOUND})
STRING( REGEX MATCHALL "([0-9.]+).[0-9]+" has_more_version ${version} )
if("${has_more_version}" STREQUAL "")
break()
endif()
endwhile()
if(NOT Boost_PYTHON_FOUND)
find_package(Boost 1.46 COMPONENTS python)
endif()
else()
# disable Python 3 search
find_package(PythonInterp 2.7)
find_package(PythonLibs 2.7)
find_package(NumPy 1.7.1)
find_package(Boost 1.46 COMPONENTS python)
endif()
if(PYTHONLIBS_FOUND AND NUMPY_FOUND AND Boost_PYTHON_FOUND)
set(HAVE_PYTHON TRUE)
if(BUILD_python_layer)
add_definitions(-DWITH_PYTHON_LAYER)
include_directories(SYSTEM ${PYTHON_INCLUDE_DIRS} ${NUMPY_INCLUDE_DIR} ${Boost_INCLUDE_DIRS})
list(APPEND Caffe_LINKER_LIBS ${PYTHON_LIBRARIES} ${Boost_LIBRARIES})
endif()
endif()
endif()
# ---[ Matlab
if(BUILD_matlab)
find_package(MatlabMex)
if(MATLABMEX_FOUND)
set(HAVE_MATLAB TRUE)
endif()
# sudo apt-get install liboctave-dev
find_program(Octave_compiler NAMES mkoctfile DOC "Octave C++ compiler")
if(HAVE_MATLAB AND Octave_compiler)
set(Matlab_build_mex_using "Matlab" CACHE STRING "Select Matlab or Octave if both detected")
set_property(CACHE Matlab_build_mex_using PROPERTY STRINGS "Matlab;Octave")
endif()
endif()
# ---[ Doxygen
if(BUILD_docs)
find_package(Doxygen)
endif()
================================================
FILE: caffe-fpn/cmake/External/gflags.cmake
================================================
if (NOT __GFLAGS_INCLUDED) # guard against multiple includes
set(__GFLAGS_INCLUDED TRUE)
# use the system-wide gflags if present
find_package(GFlags)
if (GFLAGS_FOUND)
set(GFLAGS_EXTERNAL FALSE)
else()
# gflags will use pthreads if it's available in the system, so we must link with it
find_package(Threads)
# build directory
set(gflags_PREFIX ${CMAKE_BINARY_DIR}/external/gflags-prefix)
# install directory
set(gflags_INSTALL ${CMAKE_BINARY_DIR}/external/gflags-install)
# we build gflags statically, but want to link it into the caffe shared library
# this requires position-independent code
if (UNIX)
set(GFLAGS_EXTRA_COMPILER_FLAGS "-fPIC")
endif()
set(GFLAGS_CXX_FLAGS ${CMAKE_CXX_FLAGS} ${GFLAGS_EXTRA_COMPILER_FLAGS})
set(GFLAGS_C_FLAGS ${CMAKE_C_FLAGS} ${GFLAGS_EXTRA_COMPILER_FLAGS})
ExternalProject_Add(gflags
PREFIX ${gflags_PREFIX}
GIT_REPOSITORY "https://github.com/gflags/gflags.git"
GIT_TAG "v2.1.2"
UPDATE_COMMAND ""
INSTALL_DIR ${gflags_INSTALL}
CMAKE_ARGS -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}
-DCMAKE_INSTALL_PREFIX=${gflags_INSTALL}
-DBUILD_SHARED_LIBS=OFF
-DBUILD_STATIC_LIBS=ON
-DBUILD_PACKAGING=OFF
-DBUILD_TESTING=OFF
-DBUILD_NC_TESTS=OFF
-BUILD_CONFIG_TESTS=OFF
-DINSTALL_HEADERS=ON
-DCMAKE_C_FLAGS=${GFLAGS_C_FLAGS}
-DCMAKE_CXX_FLAGS=${GFLAGS_CXX_FLAGS}
LOG_DOWNLOAD 1
LOG_INSTALL 1
)
set(GFLAGS_FOUND TRUE)
set(GFLAGS_INCLUDE_DIRS ${gflags_INSTALL}/include)
set(GFLAGS_LIBRARIES ${gflags_INSTALL}/lib/libgflags.a ${CMAKE_THREAD_LIBS_INIT})
set(GFLAGS_LIBRARY_DIRS ${gflags_INSTALL}/lib)
set(GFLAGS_EXTERNAL TRUE)
list(APPEND external_project_dependencies gflags)
endif()
endif()
================================================
FILE: caffe-fpn/cmake/External/glog.cmake
================================================
# glog depends on gflags
include("cmake/External/gflags.cmake")
if (NOT __GLOG_INCLUDED)
set(__GLOG_INCLUDED TRUE)
# try the system-wide glog first
find_package(Glog)
if (GLOG_FOUND)
set(GLOG_EXTERNAL FALSE)
else()
# fetch and build glog from github
# build directory
set(glog_PREFIX ${CMAKE_BINARY_DIR}/external/glog-prefix)
# install directory
set(glog_INSTALL ${CMAKE_BINARY_DIR}/external/glog-install)
# we build glog statically, but want to link it into the caffe shared library
# this requires position-independent code
if (UNIX)
set(GLOG_EXTRA_COMPILER_FLAGS "-fPIC")
endif()
set(GLOG_CXX_FLAGS ${CMAKE_CXX_FLAGS} ${GLOG_EXTRA_COMPILER_FLAGS})
set(GLOG_C_FLAGS ${CMAKE_C_FLAGS} ${GLOG_EXTRA_COMPILER_FLAGS})
# depend on gflags if we're also building it
if (GFLAGS_EXTERNAL)
set(GLOG_DEPENDS gflags)
endif()
ExternalProject_Add(glog
DEPENDS ${GLOG_DEPENDS}
PREFIX ${glog_PREFIX}
GIT_REPOSITORY "https://github.com/google/glog"
GIT_TAG "v0.3.4"
UPDATE_COMMAND ""
INSTALL_DIR ${gflags_INSTALL}
CONFIGURE_COMMAND env "CFLAGS=${GLOG_C_FLAGS}" "CXXFLAGS=${GLOG_CXX_FLAGS}" ${glog_PREFIX}/src/glog/configure --prefix=${glog_INSTALL} --enable-shared=no --enable-static=yes --with-gflags=${GFLAGS_LIBRARY_DIRS}/..
LOG_DOWNLOAD 1
LOG_CONFIGURE 1
LOG_INSTALL 1
)
set(GLOG_FOUND TRUE)
set(GLOG_INCLUDE_DIRS ${glog_INSTALL}/include)
set(GLOG_LIBRARIES ${GFLAGS_LIBRARIES} ${glog_INSTALL}/lib/libglog.a)
set(GLOG_LIBRARY_DIRS ${glog_INSTALL}/lib)
set(GLOG_EXTERNAL TRUE)
list(APPEND external_project_dependencies glog)
endif()
endif()
================================================
FILE: caffe-fpn/cmake/Misc.cmake
================================================
# ---[ Configuration types
set(CMAKE_CONFIGURATION_TYPES "Debug;Release" CACHE STRING "Possible configurations" FORCE)
mark_as_advanced(CMAKE_CONFIGURATION_TYPES)
if(DEFINED CMAKE_BUILD_TYPE)
set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS ${CMAKE_CONFIGURATION_TYPES})
endif()
# --[ If user doesn't specify build type then assume release
if("${CMAKE_BUILD_TYPE}" STREQUAL "")
set(CMAKE_BUILD_TYPE Release)
endif()
if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
set(CMAKE_COMPILER_IS_CLANGXX TRUE)
endif()
# ---[ Solution folders
caffe_option(USE_PROJECT_FOLDERS "IDE Solution folders" (MSVC_IDE OR CMAKE_GENERATOR MATCHES Xcode) )
if(USE_PROJECT_FOLDERS)
set_property(GLOBAL PROPERTY USE_FOLDERS ON)
set_property(GLOBAL PROPERTY PREDEFINED_TARGETS_FOLDER "CMakeTargets")
endif()
# ---[ Install options
if(CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT)
set(CMAKE_INSTALL_PREFIX "${PROJECT_BINARY_DIR}/install" CACHE PATH "Default install path" FORCE)
endif()
# ---[ RPATH settings
set(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE CACHE BOOLEAN "Use link paths for shared library rpath")
set(CMAKE_MACOSX_RPATH TRUE)
list(FIND CMAKE_PLATFORM_IMPLICIT_LINK_DIRECTORIES ${CMAKE_INSTALL_PREFIX}/lib __is_systtem_dir)
if(${__is_systtem_dir} STREQUAL -1)
set(CMAKE_INSTALL_RPATH ${CMAKE_INSTALL_PREFIX}/lib)
endif()
# ---[ Funny target
if(UNIX OR APPLE)
add_custom_target(symlink_to_build COMMAND "ln" "-sf" "${PROJECT_BINARY_DIR}" "${PROJECT_SOURCE_DIR}/build"
COMMENT "Adding symlink: <caffe_root>/build -> ${PROJECT_BINARY_DIR}" )
endif()
# ---[ Set debug postfix
set(Caffe_DEBUG_POSTFIX "-d")
set(Caffe_POSTFIX "")
if(CMAKE_BUILD_TYPE MATCHES "Debug")
set(Caffe_POSTFIX ${Caffe_DEBUG_POSTFIX})
endif()
================================================
FILE: caffe-fpn/cmake/Modules/FindAtlas.cmake
================================================
# Find the Atlas (and Lapack) libraries
#
# The following variables are optionally searched for defaults
# Atlas_ROOT_DIR: Base directory where all Atlas components are found
#
# The following are set after configuration is done:
# Atlas_FOUND
# Atlas_INCLUDE_DIRS
# Atlas_LIBRARIES
# Atlas_LIBRARYRARY_DIRS
set(Atlas_INCLUDE_SEARCH_PATHS
/usr/include/atlas
/usr/include/atlas-base
$ENV{Atlas_ROOT_DIR}
$ENV{Atlas_ROOT_DIR}/include
)
set(Atlas_LIB_SEARCH_PATHS
/usr/lib/atlas
/usr/lib/atlas-base
$ENV{Atlas_ROOT_DIR}
$ENV{Atlas_ROOT_DIR}/lib
)
find_path(Atlas_CBLAS_INCLUDE_DIR NAMES cblas.h PATHS ${Atlas_INCLUDE_SEARCH_PATHS})
find_path(Atlas_CLAPACK_INCLUDE_DIR NAMES clapack.h PATHS ${Atlas_INCLUDE_SEARCH_PATHS})
find_library(Atlas_CBLAS_LIBRARY NAMES ptcblas_r ptcblas cblas_r cblas PATHS ${Atlas_LIB_SEARCH_PATHS})
find_library(Atlas_BLAS_LIBRARY NAMES atlas_r atlas PATHS ${Atlas_LIB_SEARCH_PATHS})
find_library(Atlas_LAPACK_LIBRARY NAMES alapack_r alapack lapack_atlas PATHS ${Atlas_LIB_SEARCH_PATHS})
set(LOOKED_FOR
Atlas_CBLAS_INCLUDE_DIR
Atlas_CLAPACK_INCLUDE_DIR
Atlas_CBLAS_LIBRARY
Atlas_BLAS_LIBRARY
Atlas_LAPACK_LIBRARY
)
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(Atlas DEFAULT_MSG ${LOOKED_FOR})
if(ATLAS_FOUND)
set(Atlas_INCLUDE_DIR ${Atlas_CBLAS_INCLUDE_DIR} ${Atlas_CLAPACK_INCLUDE_DIR})
set(Atlas_LIBRARIES ${Atlas_LAPACK_LIBRARY} ${Atlas_CBLAS_LIBRARY} ${Atlas_BLAS_LIBRARY})
mark_as_advanced(${LOOKED_FOR})
message(STATUS "Found Atlas (include: ${Atlas_CBLAS_INCLUDE_DIR}, library: ${Atlas_BLAS_LIBRARY})")
endif(ATLAS_FOUND)
================================================
FILE: caffe-fpn/cmake/Modules/FindGFlags.cmake
================================================
# - Try to find GFLAGS
#
# The following variables are optionally searched for defaults
# GFLAGS_ROOT_DIR: Base directory where all GFLAGS components are found
#
# The following are set after configuration is done:
# GFLAGS_FOUND
# GFLAGS_INCLUDE_DIRS
# GFLAGS_LIBRARIES
# GFLAGS_LIBRARYRARY_DIRS
include(FindPackageHandleStandardArgs)
set(GFLAGS_ROOT_DIR "" CACHE PATH "Folder contains Gflags")
# We are testing only a couple of files in the include directories
if(WIN32)
find_path(GFLAGS_INCLUDE_DIR gflags/gflags.h
PATHS ${GFLAGS_ROOT_DIR}/src/windows)
else()
find_path(GFLAGS_INCLUDE_DIR gflags/gflags.h
PATHS ${GFLAGS_ROOT_DIR})
endif()
if(MSVC)
find_library(GFLAGS_LIBRARY_RELEASE
NAMES libgflags
PATHS ${GFLAGS_ROOT_DIR}
PATH_SUFFIXES Release)
find_library(GFLAGS_LIBRARY_DEBUG
NAMES libgflags-debug
PATHS ${GFLAGS_ROOT_DIR}
PATH_SUFFIXES Debug)
set(GFLAGS_LIBRARY optimized ${GFLAGS_LIBRARY_RELEASE} debug ${GFLAGS_LIBRARY_DEBUG})
else()
find_library(GFLAGS_LIBRARY gflags)
endif()
find_package_handle_standard_args(GFlags DEFAULT_MSG GFLAGS_INCLUDE_DIR GFLAGS_LIBRARY)
if(GFLAGS_FOUND)
set(GFLAGS_INCLUDE_DIRS ${GFLAGS_INCLUDE_DIR})
set(GFLAGS_LIBRARIES ${GFLAGS_LIBRARY})
message(STATUS "Found gflags (include: ${GFLAGS_INCLUDE_DIR}, library: ${GFLAGS_LIBRARY})")
mark_as_advanced(GFLAGS_LIBRARY_DEBUG GFLAGS_LIBRARY_RELEASE
GFLAGS_LIBRARY GFLAGS_INCLUDE_DIR GFLAGS_ROOT_DIR)
endif()
================================================
FILE: caffe-fpn/cmake/Modules/FindGlog.cmake
================================================
# - Try to find Glog
#
# The following variables are optionally searched for defaults
# GLOG_ROOT_DIR: Base directory where all GLOG components are found
#
# The following are set after configuration is done:
# GLOG_FOUND
# GLOG_INCLUDE_DIRS
# GLOG_LIBRARIES
# GLOG_LIBRARYRARY_DIRS
include(FindPackageHandleStandardArgs)
set(GLOG_ROOT_DIR "" CACHE PATH "Folder contains Google glog")
if(WIN32)
find_path(GLOG_INCLUDE_DIR glog/logging.h
PATHS ${GLOG_ROOT_DIR}/src/windows)
else()
find_path(GLOG_INCLUDE_DIR glog/logging.h
PATHS ${GLOG_ROOT_DIR})
endif()
if(MSVC)
find_library(GLOG_LIBRARY_RELEASE libglog_static
PATHS ${GLOG_ROOT_DIR}
PATH_SUFFIXES Release)
find_library(GLOG_LIBRARY_DEBUG libglog_static
PATHS ${GLOG_ROOT_DIR}
PATH_SUFFIXES Debug)
set(GLOG_LIBRARY optimized ${GLOG_LIBRARY_RELEASE} debug ${GLOG_LIBRARY_DEBUG})
else()
find_library(GLOG_LIBRARY glog
PATHS ${GLOG_ROOT_DIR}
PATH_SUFFIXES lib lib64)
endif()
find_package_handle_standard_args(Glog DEFAULT_MSG GLOG_INCLUDE_DIR GLOG_LIBRARY)
if(GLOG_FOUND)
set(GLOG_INCLUDE_DIRS ${GLOG_INCLUDE_DIR})
set(GLOG_LIBRARIES ${GLOG_LIBRARY})
message(STATUS "Found glog (include: ${GLOG_INCLUDE_DIR}, library: ${GLOG_LIBRARY})")
mark_as_advanced(GLOG_ROOT_DIR GLOG_LIBRARY_RELEASE GLOG_LIBRARY_DEBUG
GLOG_LIBRARY GLOG_INCLUDE_DIR)
endif()
================================================
FILE: caffe-fpn/cmake/Modules/FindLAPACK.cmake
================================================
# - Find LAPACK library
# This module finds an installed fortran library that implements the LAPACK
# linear-algebra interface (see http://www.netlib.org/lapack/).
#
# The approach follows that taken for the autoconf macro file, acx_lapack.m4
# (distributed at http://ac-archive.sourceforge.net/ac-archive/acx_lapack.html).
#
# This module sets the following variables:
# LAPACK_FOUND - set to true if a library implementing the LAPACK interface is found
# LAPACK_LIBRARIES - list of libraries (using full path name) for LAPACK
# Note: I do not think it is a good idea to mixup different BLAS/LAPACK versions
# Hence, this script wants to find a Lapack library matching your Blas library
# Do nothing if LAPACK was found before
IF(NOT LAPACK_FOUND)
SET(LAPACK_LIBRARIES)
SET(LAPACK_INFO)
IF(LAPACK_FIND_QUIETLY OR NOT LAPACK_FIND_REQUIRED)
FIND_PACKAGE(BLAS)
ELSE(LAPACK_FIND_QUIETLY OR NOT LAPACK_FIND_REQUIRED)
FIND_PACKAGE(BLAS REQUIRED)
ENDIF(LAPACK_FIND_QUIETLY OR NOT LAPACK_FIND_REQUIRED)
# Old search lapack script
include(CheckFortranFunctionExists)
macro(Check_Lapack_Libraries LIBRARIES _prefix _name _flags _list _blas)
# This macro checks for the existence of the combination of fortran libraries
# given by _list. If the combination is found, this macro checks (using the
# Check_Fortran_Function_Exists macro) whether can link against that library
# combination using the name of a routine given by _name using the linker
# flags given by _flags. If the combination of libraries is found and passes
# the link test, LIBRARIES is set to the list of complete library paths that
# have been found. Otherwise, LIBRARIES is set to FALSE.
# N.B. _prefix is the prefix applied to the names of all cached variables that
# are generated internally and marked advanced by this macro.
set(_libraries_work TRUE)
set(${LIBRARIES})
set(_combined_name)
foreach(_library ${_list})
set(_combined_name ${_combined_name}_${_library})
if(_libraries_work)
if (WIN32)
find_library(${_prefix}_${_library}_LIBRARY
NAMES ${_library} PATHS ENV LIB PATHS ENV PATH)
else (WIN32)
if(APPLE)
find_library(${_prefix}_${_library}_LIBRARY
NAMES ${_library}
PATHS /usr/local/lib /usr/lib /usr/local/lib64 /usr/lib64
ENV DYLD_LIBRARY_PATH)
else(APPLE)
find_library(${_prefix}_${_library}_LIBRARY
NAMES ${_library}
PATHS /usr/local/lib /usr/lib /usr/local/lib64 /usr/lib64
ENV LD_LIBRARY_PATH)
endif(APPLE)
endif(WIN32)
mark_as_advanced(${_prefix}_${_library}_LIBRARY)
set(${LIBRARIES} ${${LIBRARIES}} ${${_prefix}_${_library}_LIBRARY})
set(_libraries_work ${${_prefix}_${_library}_LIBRARY})
endif(_libraries_work)
endforeach(_library ${_list})
if(_libraries_work)
# Test this combination of libraries.
set(CMAKE_REQUIRED_LIBRARIES ${_flags} ${${LIBRARIES}} ${_blas})
if (CMAKE_Fortran_COMPILER_WORKS)
check_fortran_function_exists(${_name} ${_prefix}${_combined_name}_WORKS)
else (CMAKE_Fortran_COMPILER_WORKS)
check_function_exists("${_name}_" ${_prefix}${_combined_name}_WORKS)
endif (CMAKE_Fortran_COMPILER_WORKS)
set(CMAKE_REQUIRED_LIBRARIES)
mark_as_advanced(${_prefix}${_combined_name}_WORKS)
set(_libraries_work ${${_prefix}${_combined_name}_WORKS})
endif(_libraries_work)
if(NOT _libraries_work)
set(${LIBRARIES} FALSE)
endif(NOT _libraries_work)
endmacro(Check_Lapack_Libraries)
if(BLAS_FOUND)
# Intel MKL
IF((NOT LAPACK_INFO) AND (BLAS_INFO STREQUAL "mkl"))
IF(MKL_LAPACK_LIBRARIES)
SET(LAPACK_LIBRARIES ${MKL_LAPACK_LIBRARIES} ${MKL_LIBRARIES})
ELSE(MKL_LAPACK_LIBRARIES)
SET(LAPACK_LIBRARIES ${MKL_LIBRARIES})
ENDIF(MKL_LAPACK_LIBRARIES)
SET(LAPACK_INCLUDE_DIR ${MKL_INCLUDE_DIR})
SET(LAPACK_INFO "mkl")
ENDIF()
# OpenBlas
IF((NOT LAPACK_INFO) AND (BLAS_INFO STREQUAL "open"))
SET(CMAKE_REQUIRED_LIBRARIES ${BLAS_LIBRARIES})
check_function_exists("cheev_" OPEN_LAPACK_WORKS)
if(OPEN_LAPACK_WORKS)
SET(LAPACK_INFO "open")
else()
message(STATUS "It seems OpenBlas has not been compiled with Lapack support")
endif()
endif()
# GotoBlas
IF((NOT LAPACK_INFO) AND (BLAS_INFO STREQUAL "goto"))
SET(CMAKE_REQUIRED_LIBRARIES ${BLAS_LIBRARIES})
check_function_exists("cheev_" GOTO_LAPACK_WORKS)
if(GOTO_LAPACK_WORKS)
SET(LAPACK_INFO "goto")
else()
message(STATUS "It seems GotoBlas has not been compiled with Lapack support")
endif()
endif()
# ACML
IF((NOT LAPACK_INFO) AND (BLAS_INFO STREQUAL "acml"))
SET(CMAKE_REQUIRED_LIBRARIES ${BLAS_LIBRARIES})
check_function_exists("cheev_" ACML_LAPACK_WORKS)
if(ACML_LAPACK_WORKS)
SET(LAPACK_INFO "acml")
else()
message(STATUS "Strangely, this ACML library does not support Lapack?!")
endif()
endif()
# Accelerate
IF((NOT LAPACK_INFO) AND (BLAS_INFO STREQUAL "accelerate"))
SET(CMAKE_REQUIRED_LIBRARIES ${BLAS_LIBRARIES})
check_function_exists("cheev_" ACCELERATE_LAPACK_WORKS)
if(ACCELERATE_LAPACK_WORKS)
SET(LAPACK_INFO "accelerate")
else()
message(STATUS "Strangely, this Accelerate library does not support Lapack?!")
endif()
endif()
# vecLib
IF((NOT LAPACK_INFO) AND (BLAS_INFO STREQUAL "veclib"))
SET(CMAKE_REQUIRED_LIBRARIES ${BLAS_LIBRARIES})
check_function_exists("cheev_" VECLIB_LAPACK_WORKS)
if(VECLIB_LAPACK_WORKS)
SET(LAPACK_INFO "veclib")
else()
message(STATUS "Strangely, this vecLib library does not support Lapack?!")
endif()
endif()
# Generic LAPACK library?
IF((NOT LAPACK_INFO) AND (BLAS_INFO STREQUAL "generic"))
check_lapack_libraries(
LAPACK_LIBRARIES
LAPACK
cheev
""
"lapack"
"${BLAS_LIBRARIES}"
)
if(LAPACK_LIBRARIES)
SET(LAPACK_INFO "generic")
endif(LAPACK_LIBRARIES)
endif()
else(BLAS_FOUND)
message(STATUS "LAPACK requires BLAS")
endif(BLAS_FOUND)
if(LAPACK_INFO)
set(LAPACK_FOUND TRUE)
else(LAPACK_INFO)
set(LAPACK_FOUND FALSE)
endif(LAPACK_INFO)
IF (NOT LAPACK_FOUND AND LAPACK_FIND_REQUIRED)
message(FATAL_ERROR "Cannot find a library with LAPACK API. Please specify library location.")
ENDIF (NOT LAPACK_FOUND AND LAPACK_FIND_REQUIRED)
IF(NOT LAPACK_FIND_QUIETLY)
IF(LAPACK_FOUND)
MESSAGE(STATUS "Found a library with LAPACK API. (${LAPACK_INFO})")
ELSE(LAPACK_FOUND)
MESSAGE(STATUS "Cannot find a library with LAPACK API. Not using LAPACK.")
ENDIF(LAPACK_FOUND)
ENDIF(NOT LAPACK_FIND_QUIETLY)
# Do nothing if LAPACK was found before
ENDIF(NOT LAPACK_FOUND)
================================================
FILE: caffe-fpn/cmake/Modules/FindLMDB.cmake
================================================
# Try to find the LMBD libraries and headers
# LMDB_FOUND - system has LMDB lib
# LMDB_INCLUDE_DIR - the LMDB include directory
# LMDB_LIBRARIES - Libraries needed to use LMDB
# FindCWD based on FindGMP by:
# Copyright (c) 2006, Laurent Montel, <montel@kde.org>
#
# Redistribution and use is allowed according to the terms of the BSD license.
# Adapted from FindCWD by:
# Copyright 2013 Conrad Steenberg <conrad.steenberg@gmail.com>
# Aug 31, 2013
find_path(LMDB_INCLUDE_DIR NAMES lmdb.h PATHS "$ENV{LMDB_DIR}/include")
find_library(LMDB_LIBRARIES NAMES lmdb PATHS "$ENV{LMDB_DIR}/lib" )
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(LMDB DEFAULT_MSG LMDB_INCLUDE_DIR LMDB_LIBRARIES)
if(LMDB_FOUND)
message(STATUS "Found lmdb (include: ${LMDB_INCLUDE_DIR}, library: ${LMDB_LIBRARIES})")
mark_as_advanced(LMDB_INCLUDE_DIR LMDB_LIBRARIES)
caffe_parse_header(${LMDB_INCLUDE_DIR}/lmdb.h
LMDB_VERSION_LINES MDB_VERSION_MAJOR MDB_VERSION_MINOR MDB_VERSION_PATCH)
set(LMDB_VERSION "${MDB_VERSION_MAJOR}.${MDB_VERSION_MINOR}.${MDB_VERSION_PATCH}")
endif()
================================================
FILE: caffe-fpn/cmake/Modules/FindLevelDB.cmake
================================================
# - Find LevelDB
#
# LevelDB_INCLUDES - List of LevelDB includes
# LevelDB_LIBRARIES - List of libraries when using LevelDB.
# LevelDB_FOUND - True if LevelDB found.
# Look for the header file.
find_path(LevelDB_INCLUDE NAMES leveldb/db.h
PATHS $ENV{LEVELDB_ROOT}/include /opt/local/include /usr/local/include /usr/include
DOC "Path in which the file leveldb/db.h is located." )
# Look for the library.
find_library(LevelDB_LIBRARY NAMES leveldb
PATHS /usr/lib $ENV{LEVELDB_ROOT}/lib
DOC "Path to leveldb library." )
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(LevelDB DEFAULT_MSG LevelDB_INCLUDE LevelDB_LIBRARY)
if(LEVELDB_FOUND)
message(STATUS "Found LevelDB (include: ${LevelDB_INCLUDE}, library: ${LevelDB_LIBRARY})")
set(LevelDB_INCLUDES ${LevelDB_INCLUDE})
set(LevelDB_LIBRARIES ${LevelDB_LIBRARY})
mark_as_advanced(LevelDB_INCLUDE LevelDB_LIBRARY)
if(EXISTS "${LevelDB_INCLUDE}/leveldb/db.h")
file(STRINGS "${LevelDB_INCLUDE}/leveldb/db.h" __version_lines
REGEX "static const int k[^V]+Version[ \t]+=[ \t]+[0-9]+;")
foreach(__line ${__version_lines})
if(__line MATCHES "[^k]+kMajorVersion[ \t]+=[ \t]+([0-9]+);")
set(LEVELDB_VERSION_MAJOR ${CMAKE_MATCH_1})
elseif(__line MATCHES "[^k]+kMinorVersion[ \t]+=[ \t]+([0-9]+);")
set(LEVELDB_VERSION_MINOR ${CMAKE_MATCH_1})
endif()
endforeach()
if(LEVELDB_VERSION_MAJOR AND LEVELDB_VERSION_MINOR)
set(LEVELDB_VERSION "${LEVELDB_VERSION_MAJOR}.${LEVELDB_VERSION_MINOR}")
endif()
caffe_clear_vars(__line __version_lines)
endif()
endif()
================================================
FILE: caffe-fpn/cmake/Modules/FindMKL.cmake
================================================
# Find the MKL libraries
#
# Options:
#
# MKL_USE_SINGLE_DYNAMIC_LIBRARY : use single dynamic library interface
# MKL_USE_STATIC_LIBS : use static libraries
# MKL_MULTI_THREADED : use multi-threading
#
# This module defines the following variables:
#
# MKL_FOUND : True mkl is found
# MKL_INCLUDE_DIR : unclude directory
# MKL_LIBRARIES : the libraries to link against.
# ---[ Options
caffe_option(MKL_USE_SINGLE_DYNAMIC_LIBRARY "Use single dynamic library interface" ON)
caffe_option(MKL_USE_STATIC_LIBS "Use static libraries" OFF IF NOT MKL_USE_SINGLE_DYNAMIC_LIBRARY)
caffe_option(MKL_MULTI_THREADED "Use multi-threading" ON IF NOT MKL_USE_SINGLE_DYNAMIC_LIBRARY)
# ---[ Root folders
set(INTEL_ROOT "/opt/intel" CACHE PATH "Folder contains intel libs")
find_path(MKL_ROOT include/mkl.h PATHS $ENV{MKL_ROOT} ${INTEL_ROOT}/mkl
DOC "Folder contains MKL")
# ---[ Find include dir
find_path(MKL_INCLUDE_DIR mkl.h PATHS ${MKL_ROOT} PATH_SUFFIXES include)
set(__looked_for MKL_INCLUDE_DIR)
# ---[ Find libraries
if(CMAKE_SIZEOF_VOID_P EQUAL 4)
set(__path_suffixes lib lib/ia32)
else()
set(__path_suffixes lib lib/intel64)
endif()
set(__mkl_libs "")
if(MKL_USE_SINGLE_DYNAMIC_LIBRARY)
list(APPEND __mkl_libs rt)
else()
if(CMAKE_SIZEOF_VOID_P EQUAL 4)
if(WIN32)
list(APPEND __mkl_libs intel_c)
else()
list(APPEND __mkl_libs intel gf)
endif()
else()
list(APPEND __mkl_libs intel_lp64 gf_lp64)
endif()
if(MKL_MULTI_THREADED)
list(APPEND __mkl_libs intel_thread)
else()
list(APPEND __mkl_libs sequential)
endif()
list(APPEND __mkl_libs core cdft_core)
endif()
foreach (__lib ${__mkl_libs})
set(__mkl_lib "mkl_${__lib}")
string(TOUPPER ${__mkl_lib} __mkl_lib_upper)
if(MKL_USE_STATIC_LIBS)
set(__mkl_lib "lib${__mkl_lib}.a")
endif()
find_library(${__mkl_lib_upper}_LIBRARY
NAMES ${__mkl_lib}
PATHS ${MKL_ROOT} "${MKL_INCLUDE_DIR}/.."
PATH_SUFFIXES ${__path_suffixes}
DOC "The path to Intel(R) MKL ${__mkl_lib} library")
mark_as_advanced(${__mkl_lib_upper}_LIBRARY)
list(APPEND __looked_for ${__mkl_lib_upper}_LIBRARY)
list(APPEND MKL_LIBRARIES ${${__mkl_lib_upper}_LIBRARY})
endforeach()
if(NOT MKL_USE_SINGLE_DYNAMIC_LIBRARY)
if (MKL_USE_STATIC_LIBS)
set(__iomp5_libs iomp5 libiomp5mt.lib)
else()
set(__iomp5_libs iomp5 libiomp5md.lib)
endif()
if(WIN32)
find_path(INTEL_INCLUDE_DIR omp.h PATHS ${INTEL_ROOT} PATH_SUFFIXES include)
list(APPEND __looked_for INTEL_INCLUDE_DIR)
endif()
find_library(MKL_RTL_LIBRARY ${__iomp5_libs}
PATHS ${INTEL_RTL_ROOT} ${INTEL_ROOT}/compiler ${MKL_ROOT}/.. ${MKL_ROOT}/../compiler
PATH_SUFFIXES ${__path_suffixes}
DOC "Path to Path to OpenMP runtime library")
list(APPEND __looked_for MKL_RTL_LIBRARY)
list(APPEND MKL_LIBRARIES ${MKL_RTL_LIBRARY})
endif()
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(MKL DEFAULT_MSG ${__looked_for})
if(MKL_FOUND)
message(STATUS "Found MKL (include: ${MKL_INCLUDE_DIR}, lib: ${MKL_LIBRARIES}")
endif()
caffe_clear_vars(__looked_for __mkl_libs __path_suffixes __lib_suffix __iomp5_libs)
================================================
FILE: caffe-fpn/cmake/Modules/FindMatlabMex.cmake
================================================
# This module looks for MatlabMex compiler
# Defines variables:
# Matlab_DIR - Matlab root dir
# Matlab_mex - path to mex compiler
# Matlab_mexext - path to mexext
if(MSVC)
foreach(__ver "9.30" "7.14" "7.11" "7.10" "7.9" "7.8" "7.7")
get_filename_component(__matlab_root "[HKEY_LOCAL_MACHINE\\SOFTWARE\\MathWorks\\MATLAB\\${__ver};MATLABROOT]" ABSOLUTE)
if(__matlab_root)
break()
endif()
endforeach()
endif()
if(APPLE)
foreach(__ver "R2014b" "R2014a" "R2013b" "R2013a" "R2012b" "R2012a" "R2011b" "R2011a" "R2010b" "R2010a")
if(EXISTS /Applications/MATLAB_${__ver}.app)
set(__matlab_root /Applications/MATLAB_${__ver}.app)
break()
endif()
endforeach()
endif()
if(UNIX)
execute_process(COMMAND which matlab OUTPUT_STRIP_TRAILING_WHITESPACE
OUTPUT_VARIABLE __out RESULT_VARIABLE __res)
if(__res MATCHES 0) # Suppress `readlink` warning if `which` returned nothing
execute_process(COMMAND which matlab COMMAND xargs readlink
COMMAND xargs dirname COMMAND xargs dirname COMMAND xargs echo -n
OUTPUT_VARIABLE __matlab_root OUTPUT_STRIP_TRAILING_WHITESPACE)
endif()
endif()
find_path(Matlab_DIR NAMES bin/mex bin/mexext PATHS ${__matlab_root}
DOC "Matlab directory" NO_DEFAULT_PATH)
find_program(Matlab_mex NAMES mex mex.bat HINTS ${Matlab_DIR} PATH_SUFFIXES bin NO_DEFAULT_PATH)
find_program(Matlab_mexext NAMES mexext mexext.bat HINTS ${Matlab_DIR} PATH_SUFFIXES bin NO_DEFAULT_PATH)
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(MatlabMex DEFAULT_MSG Matlab_mex Matlab_mexext)
if(MATLABMEX_FOUND)
mark_as_advanced(Matlab_mex Matlab_mexext)
endif()
================================================
FILE: caffe-fpn/cmake/Modules/FindNumPy.cmake
================================================
# - Find the NumPy libraries
# This module finds if NumPy is installed, and sets the following variables
# indicating where it is.
#
# TODO: Update to provide the libraries and paths for linking npymath lib.
#
# NUMPY_FOUND - was NumPy found
# NUMPY_VERSION - the version of NumPy found as a string
# NUMPY_VERSION_MAJOR - the major version number of NumPy
# NUMPY_VERSION_MINOR - the minor version number of NumPy
# NUMPY_VERSION_PATCH - the patch version number of NumPy
# NUMPY_VERSION_DECIMAL - e.g. version 1.6.1 is 10601
# NUMPY_INCLUDE_DIR - path to the NumPy include files
unset(NUMPY_VERSION)
unset(NUMPY_INCLUDE_DIR)
if(PYTHONINTERP_FOUND)
execute_process(COMMAND "${PYTHON_EXECUTABLE}" "-c"
"import numpy as n; print(n.__version__); print(n.get_include());"
RESULT_VARIABLE __result
OUTPUT_VARIABLE __output
OUTPUT_STRIP_TRAILING_WHITESPACE)
if(__result MATCHES 0)
string(REGEX REPLACE ";" "\\\\;" __values ${__output})
string(REGEX REPLACE "\r?\n" ";" __values ${__values})
list(GET __values 0 NUMPY_VERSION)
list(GET __values 1 NUMPY_INCLUDE_DIR)
string(REGEX MATCH "^([0-9])+\\.([0-9])+\\.([0-9])+" __ver_check "${NUMPY_VERSION}")
if(NOT "${__ver_check}" STREQUAL "")
set(NUMPY_VERSION_MAJOR ${CMAKE_MATCH_1})
set(NUMPY_VERSION_MINOR ${CMAKE_MATCH_2})
set(NUMPY_VERSION_PATCH ${CMAKE_MATCH_3})
math(EXPR NUMPY_VERSION_DECIMAL
"(${NUMPY_VERSION_MAJOR} * 10000) + (${NUMPY_VERSION_MINOR} * 100) + ${NUMPY_VERSION_PATCH}")
string(REGEX REPLACE "\\\\" "/" NUMPY_INCLUDE_DIR ${NUMPY_INCLUDE_DIR})
else()
unset(NUMPY_VERSION)
unset(NUMPY_INCLUDE_DIR)
message(STATUS "Requested NumPy version and include path, but got instead:\n${__output}\n")
endif()
endif()
else()
message(STATUS "To find NumPy Python interpretator is required to be found.")
endif()
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(NumPy REQUIRED_VARS NUMPY_INCLUDE_DIR NUMPY_VERSION
gitextract_hh7mi8hx/
├── README.md
├── caffe-fpn/
│ ├── .Doxyfile
│ ├── .travis.yml
│ ├── CMakeLists.txt
│ ├── CONTRIBUTING.md
│ ├── CONTRIBUTORS.md
│ ├── INSTALL.md
│ ├── LICENSE
│ ├── Makefile
│ ├── Makefile.config
│ ├── Makefile.config.example
│ ├── README.md
│ ├── caffe.cloc
│ ├── cmake/
│ │ ├── ConfigGen.cmake
│ │ ├── Cuda.cmake
│ │ ├── Dependencies.cmake
│ │ ├── External/
│ │ │ ├── gflags.cmake
│ │ │ └── glog.cmake
│ │ ├── Misc.cmake
│ │ ├── Modules/
│ │ │ ├── FindAtlas.cmake
│ │ │ ├── FindGFlags.cmake
│ │ │ ├── FindGlog.cmake
│ │ │ ├── FindLAPACK.cmake
│ │ │ ├── FindLMDB.cmake
│ │ │ ├── FindLevelDB.cmake
│ │ │ ├── FindMKL.cmake
│ │ │ ├── FindMatlabMex.cmake
│ │ │ ├── FindNumPy.cmake
│ │ │ ├── FindOpenBLAS.cmake
│ │ │ ├── FindSnappy.cmake
│ │ │ └── FindvecLib.cmake
│ │ ├── ProtoBuf.cmake
│ │ ├── Summary.cmake
│ │ ├── Targets.cmake
│ │ ├── Templates/
│ │ │ ├── CaffeConfig.cmake.in
│ │ │ ├── CaffeConfigVersion.cmake.in
│ │ │ └── caffe_config.h.in
│ │ ├── Utils.cmake
│ │ └── lint.cmake
│ ├── data/
│ │ ├── cifar10/
│ │ │ └── get_cifar10.sh
│ │ └── ilsvrc12/
│ │ └── get_ilsvrc_aux.sh
│ ├── docs/
│ │ ├── CMakeLists.txt
│ │ ├── CNAME
│ │ ├── README.md
│ │ ├── _config.yml
│ │ ├── _layouts/
│ │ │ └── default.html
│ │ ├── development.md
│ │ ├── index.md
│ │ ├── install_apt.md
│ │ ├── install_osx.md
│ │ ├── install_yum.md
│ │ ├── installation.md
│ │ ├── model_zoo.md
│ │ ├── multigpu.md
│ │ ├── performance_hardware.md
│ │ ├── stylesheets/
│ │ │ ├── pygment_trac.css
│ │ │ ├── reset.css
│ │ │ └── styles.css
│ │ └── tutorial/
│ │ ├── convolution.md
│ │ ├── data.md
│ │ ├── fig/
│ │ │ └── .gitignore
│ │ ├── forward_backward.md
│ │ ├── index.md
│ │ ├── interfaces.md
│ │ ├── layers.md
│ │ ├── loss.md
│ │ ├── net_layer_blob.md
│ │ └── solver.md
│ ├── examples/
│ │ ├── 00-classification.ipynb
│ │ ├── 01-learning-lenet.ipynb
│ │ ├── 02-brewing-logreg.ipynb
│ │ ├── 03-fine-tuning.ipynb
│ │ ├── CMakeLists.txt
│ │ ├── cifar10/
│ │ │ ├── cifar10_full.prototxt
│ │ │ ├── cifar10_full_sigmoid_solver.prototxt
│ │ │ ├── cifar10_full_sigmoid_solver_bn.prototxt
│ │ │ ├── cifar10_full_sigmoid_train_test.prototxt
│ │ │ ├── cifar10_full_sigmoid_train_test_bn.prototxt
│ │ │ ├── cifar10_full_solver.prototxt
│ │ │ ├── cifar10_full_solver_lr1.prototxt
│ │ │ ├── cifar10_full_solver_lr2.prototxt
│ │ │ ├── cifar10_full_train_test.prototxt
│ │ │ ├── cifar10_quick.prototxt
│ │ │ ├── cifar10_quick_solver.prototxt
│ │ │ ├── cifar10_quick_solver_lr1.prototxt
│ │ │ ├── cifar10_quick_train_test.prototxt
│ │ │ ├── convert_cifar_data.cpp
│ │ │ ├── create_cifar10.sh
│ │ │ ├── readme.md
│ │ │ ├── train_full.sh
│ │ │ ├── train_full_sigmoid.sh
│ │ │ ├── train_full_sigmoid_bn.sh
│ │ │ └── train_quick.sh
│ │ ├── cpp_classification/
│ │ │ ├── classification.cpp
│ │ │ └── readme.md
│ │ ├── detection.ipynb
│ │ ├── feature_extraction/
│ │ │ ├── imagenet_val.prototxt
│ │ │ └── readme.md
│ │ ├── finetune_flickr_style/
│ │ │ ├── assemble_data.py
│ │ │ ├── readme.md
│ │ │ └── style_names.txt
│ │ ├── finetune_pascal_detection/
│ │ │ ├── pascal_finetune_solver.prototxt
│ │ │ └── pascal_finetune_trainval_test.prototxt
│ │ ├── hdf5_classification/
│ │ │ ├── nonlinear_auto_test.prototxt
│ │ │ ├── nonlinear_auto_train.prototxt
│ │ │ ├── nonlinear_solver.prototxt
│ │ │ ├── nonlinear_train_val.prototxt
│ │ │ ├── solver.prototxt
│ │ │ └── train_val.prototxt
│ │ ├── imagenet/
│ │ │ ├── create_imagenet.sh
│ │ │ ├── make_imagenet_mean.sh
│ │ │ ├── readme.md
│ │ │ ├── resume_training.sh
│ │ │ └── train_caffenet.sh
│ │ ├── mnist/
│ │ │ ├── convert_mnist_data.cpp
│ │ │ ├── create_mnist.sh
│ │ │ ├── lenet.prototxt
│ │ │ ├── lenet_adadelta_solver.prototxt
│ │ │ ├── lenet_auto_solver.prototxt
│ │ │ ├── lenet_consolidated_solver.prototxt
│ │ │ ├── lenet_multistep_solver.prototxt
│ │ │ ├── lenet_solver.prototxt
│ │ │ ├── lenet_solver_adam.prototxt
│ │ │ ├── lenet_solver_rmsprop.prototxt
│ │ │ ├── lenet_train_test.prototxt
│ │ │ ├── mnist_autoencoder.prototxt
│ │ │ ├── mnist_autoencoder_solver.prototxt
│ │ │ ├── mnist_autoencoder_solver_adadelta.prototxt
│ │ │ ├── mnist_autoencoder_solver_adagrad.prototxt
│ │ │ ├── mnist_autoencoder_solver_nesterov.prototxt
│ │ │ ├── mnist_train_lmdb/
│ │ │ │ ├── data.mdb
│ │ │ │ └── lock.mdb
│ │ │ ├── readme.md
│ │ │ ├── train_lenet.sh
│ │ │ ├── train_lenet_adam.sh
│ │ │ ├── train_lenet_consolidated.sh
│ │ │ ├── train_lenet_docker.sh
│ │ │ ├── train_lenet_rmsprop.sh
│ │ │ ├── train_mnist_autoencoder.sh
│ │ │ ├── train_mnist_autoencoder_adadelta.sh
│ │ │ ├── train_mnist_autoencoder_adagrad.sh
│ │ │ └── train_mnist_autoencoder_nesterov.sh
│ │ ├── net_surgery/
│ │ │ ├── bvlc_caffenet_full_conv.prototxt
│ │ │ └── conv.prototxt
│ │ ├── net_surgery.ipynb
│ │ ├── pycaffe/
│ │ │ ├── caffenet.py
│ │ │ ├── layers/
│ │ │ │ └── pyloss.py
│ │ │ └── linreg.prototxt
│ │ ├── siamese/
│ │ │ ├── convert_mnist_siamese_data.cpp
│ │ │ ├── create_mnist_siamese.sh
│ │ │ ├── mnist_siamese.ipynb
│ │ │ ├── mnist_siamese.prototxt
│ │ │ ├── mnist_siamese_solver.prototxt
│ │ │ ├── mnist_siamese_train_test.prototxt
│ │ │ ├── readme.md
│ │ │ └── train_mnist_siamese.sh
│ │ └── web_demo/
│ │ ├── app.py
│ │ ├── exifutil.py
│ │ ├── readme.md
│ │ ├── requirements.txt
│ │ └── templates/
│ │ └── index.html
│ ├── include/
│ │ └── caffe/
│ │ ├── blob.hpp
│ │ ├── caffe.hpp
│ │ ├── common.hpp
│ │ ├── data_reader.hpp
│ │ ├── data_transformer.hpp
│ │ ├── fast_rcnn_layers.hpp
│ │ ├── filler.hpp
│ │ ├── internal_thread.hpp
│ │ ├── layer.hpp
│ │ ├── layer_factory.hpp
│ │ ├── layers/
│ │ │ ├── .conadd_layer.hpp.swo
│ │ │ ├── .conadd_layer.hpp.swp
│ │ │ ├── absval_layer.hpp
│ │ │ ├── accuracy_layer.hpp
│ │ │ ├── argmax_layer.hpp
│ │ │ ├── base_conv_layer.hpp
│ │ │ ├── base_data_layer.hpp
│ │ │ ├── batch_norm_layer.hpp
│ │ │ ├── batch_reindex_layer.hpp
│ │ │ ├── bias_layer.hpp
│ │ │ ├── bnll_layer.hpp
│ │ │ ├── conadd_layer.hpp
│ │ │ ├── concat_layer.hpp
│ │ │ ├── contrastive_loss_layer.hpp
│ │ │ ├── conv_layer.hpp
│ │ │ ├── crop_layer.hpp
│ │ │ ├── cudnn_conv_layer.hpp
│ │ │ ├── cudnn_lcn_layer.hpp
│ │ │ ├── cudnn_lrn_layer.hpp
│ │ │ ├── cudnn_pooling_layer.hpp
│ │ │ ├── cudnn_relu_layer.hpp
│ │ │ ├── cudnn_sigmoid_layer.hpp
│ │ │ ├── cudnn_softmax_layer.hpp
│ │ │ ├── cudnn_tanh_layer.hpp
│ │ │ ├── data_layer.hpp
│ │ │ ├── deconv_layer.hpp
│ │ │ ├── deformable_conv_layer.hpp
│ │ │ ├── dropout_layer.hpp
│ │ │ ├── dummy_data_layer.hpp
│ │ │ ├── eltwise_layer.hpp
│ │ │ ├── elu_layer.hpp
│ │ │ ├── embed_layer.hpp
│ │ │ ├── euclidean_loss_layer.hpp
│ │ │ ├── exp_layer.hpp
│ │ │ ├── filter_layer.hpp
│ │ │ ├── flatten_layer.hpp
│ │ │ ├── hdf5_data_layer.hpp
│ │ │ ├── hdf5_output_layer.hpp
│ │ │ ├── hinge_loss_layer.hpp
│ │ │ ├── im2col_layer.hpp
│ │ │ ├── image_data_layer.hpp
│ │ │ ├── infogain_loss_layer.hpp
│ │ │ ├── inner_product_layer.hpp
│ │ │ ├── log_layer.hpp
│ │ │ ├── loss_layer.hpp
│ │ │ ├── lrn_layer.hpp
│ │ │ ├── memory_data_layer.hpp
│ │ │ ├── multinomial_logistic_loss_layer.hpp
│ │ │ ├── mvn_layer.hpp
│ │ │ ├── neuron_layer.hpp
│ │ │ ├── pooling_layer.hpp
│ │ │ ├── power_layer.hpp
│ │ │ ├── prelu_layer.hpp
│ │ │ ├── python_layer.hpp
│ │ │ ├── reduction_layer.hpp
│ │ │ ├── relu_layer.hpp
│ │ │ ├── reshape_layer.hpp
│ │ │ ├── scale_layer.hpp
│ │ │ ├── sigmoid_cross_entropy_loss_layer.hpp
│ │ │ ├── sigmoid_layer.hpp
│ │ │ ├── silence_layer.hpp
│ │ │ ├── slice_layer.hpp
│ │ │ ├── softmax_layer.hpp
│ │ │ ├── softmax_loss_layer.hpp
│ │ │ ├── split_layer.hpp
│ │ │ ├── spp_layer.hpp
│ │ │ ├── tanh_layer.hpp
│ │ │ ├── threshold_layer.hpp
│ │ │ ├── tile_layer.hpp
│ │ │ └── window_data_layer.hpp
│ │ ├── net.hpp
│ │ ├── parallel.hpp
│ │ ├── sgd_solvers.hpp
│ │ ├── solver.hpp
│ │ ├── solver_factory.hpp
│ │ ├── syncedmem.hpp
│ │ ├── test/
│ │ │ ├── test_caffe_main.hpp
│ │ │ └── test_gradient_check_util.hpp
│ │ └── util/
│ │ ├── benchmark.hpp
│ │ ├── blocking_queue.hpp
│ │ ├── cudnn.hpp
│ │ ├── db.hpp
│ │ ├── db_leveldb.hpp
│ │ ├── db_lmdb.hpp
│ │ ├── deformable_im2col.hpp
│ │ ├── device_alternate.hpp
│ │ ├── format.hpp
│ │ ├── gpu_util.cuh
│ │ ├── hdf5.hpp
│ │ ├── im2col.hpp
│ │ ├── insert_splits.hpp
│ │ ├── io.hpp
│ │ ├── math_functions.hpp
│ │ ├── mkl_alternate.hpp
│ │ ├── rng.hpp
│ │ ├── signal_handler.h
│ │ └── upgrade_proto.hpp
│ ├── matlab/
│ │ ├── +caffe/
│ │ │ ├── +test/
│ │ │ │ ├── test_io.m
│ │ │ │ ├── test_net.m
│ │ │ │ └── test_solver.m
│ │ │ ├── Blob.m
│ │ │ ├── Layer.m
│ │ │ ├── Net.m
│ │ │ ├── Solver.m
│ │ │ ├── get_net.m
│ │ │ ├── get_solver.m
│ │ │ ├── imagenet/
│ │ │ │ └── ilsvrc_2012_mean.mat
│ │ │ ├── io.m
│ │ │ ├── private/
│ │ │ │ ├── CHECK.m
│ │ │ │ ├── CHECK_FILE_EXIST.m
│ │ │ │ ├── caffe_.cpp
│ │ │ │ └── is_valid_handle.m
│ │ │ ├── reset_all.m
│ │ │ ├── run_tests.m
│ │ │ ├── set_device.m
│ │ │ ├── set_mode_cpu.m
│ │ │ ├── set_mode_gpu.m
│ │ │ └── version.m
│ │ ├── CMakeLists.txt
│ │ ├── demo/
│ │ │ └── classification_demo.m
│ │ └── hdf5creation/
│ │ ├── .gitignore
│ │ ├── demo.m
│ │ └── store2hdf5.m
│ ├── models/
│ │ ├── bvlc_alexnet/
│ │ │ ├── deploy.prototxt
│ │ │ ├── readme.md
│ │ │ ├── solver.prototxt
│ │ │ └── train_val.prototxt
│ │ ├── bvlc_googlenet/
│ │ │ ├── deploy.prototxt
│ │ │ ├── quick_solver.prototxt
│ │ │ ├── readme.md
│ │ │ ├── solver.prototxt
│ │ │ └── train_val.prototxt
│ │ ├── bvlc_reference_caffenet/
│ │ │ ├── deploy.prototxt
│ │ │ ├── readme.md
│ │ │ ├── solver.prototxt
│ │ │ └── train_val.prototxt
│ │ ├── bvlc_reference_rcnn_ilsvrc13/
│ │ │ ├── deploy.prototxt
│ │ │ └── readme.md
│ │ └── finetune_flickr_style/
│ │ ├── deploy.prototxt
│ │ ├── readme.md
│ │ ├── solver.prototxt
│ │ └── train_val.prototxt
│ ├── python/
│ │ ├── CMakeLists.txt
│ │ ├── caffe/
│ │ │ ├── __init__.py
│ │ │ ├── _caffe.cpp
│ │ │ ├── classifier.py
│ │ │ ├── detector.py
│ │ │ ├── draw.py
│ │ │ ├── imagenet/
│ │ │ │ └── ilsvrc_2012_mean.npy
│ │ │ ├── io.py
│ │ │ ├── net_spec.py
│ │ │ ├── proto/
│ │ │ │ ├── __init__.py
│ │ │ │ └── caffe_pb2.py
│ │ │ ├── pycaffe.py
│ │ │ └── test/
│ │ │ ├── test_io.py
│ │ │ ├── test_layer_type_list.py
│ │ │ ├── test_net.py
│ │ │ ├── test_net_spec.py
│ │ │ ├── test_python_layer.py
│ │ │ ├── test_python_layer_with_param_str.py
│ │ │ └── test_solver.py
│ │ ├── classify.py
│ │ ├── detect.py
│ │ ├── draw_net.py
│ │ └── requirements.txt
│ ├── scripts/
│ │ ├── build_docs.sh
│ │ ├── copy_notebook.py
│ │ ├── cpp_lint.py
│ │ ├── deploy_docs.sh
│ │ ├── download_model_binary.py
│ │ ├── download_model_from_gist.sh
│ │ ├── gather_examples.sh
│ │ ├── travis/
│ │ │ ├── travis_build_and_test.sh
│ │ │ ├── travis_install.sh
│ │ │ └── travis_setup_makefile_config.sh
│ │ └── upload_model_to_gist.sh
│ ├── src/
│ │ ├── caffe/
│ │ │ ├── CMakeLists.txt
│ │ │ ├── blob.cpp
│ │ │ ├── common.cpp
│ │ │ ├── data_reader.cpp
│ │ │ ├── data_transformer.cpp
│ │ │ ├── internal_thread.cpp
│ │ │ ├── layer.cpp
│ │ │ ├── layer_factory.cpp
│ │ │ ├── layers/
│ │ │ │ ├── .conadd_layer.cpp.swo
│ │ │ │ ├── .conadd_layer.cpp.swp
│ │ │ │ ├── .concat_layer.cpp.swp
│ │ │ │ ├── absval_layer.cpp
│ │ │ │ ├── absval_layer.cu
│ │ │ │ ├── accuracy_layer.cpp
│ │ │ │ ├── argmax_layer.cpp
│ │ │ │ ├── base_conv_layer.cpp
│ │ │ │ ├── base_data_layer.cpp
│ │ │ │ ├── base_data_layer.cu
│ │ │ │ ├── batch_norm_layer.cpp
│ │ │ │ ├── batch_norm_layer.cu
│ │ │ │ ├── batch_reindex_layer.cpp
│ │ │ │ ├── batch_reindex_layer.cu
│ │ │ │ ├── bias_layer.cpp
│ │ │ │ ├── bias_layer.cu
│ │ │ │ ├── bnll_layer.cpp
│ │ │ │ ├── bnll_layer.cu
│ │ │ │ ├── conadd_layer.cpp
│ │ │ │ ├── conadd_layer.cu
│ │ │ │ ├── concat_layer.cpp
│ │ │ │ ├── concat_layer.cu
│ │ │ │ ├── contrastive_loss_layer.cpp
│ │ │ │ ├── contrastive_loss_layer.cu
│ │ │ │ ├── conv_layer.cpp
│ │ │ │ ├── conv_layer.cu
│ │ │ │ ├── crop_layer.cpp
│ │ │ │ ├── crop_layer.cu
│ │ │ │ ├── cudnn_conv_layer.cpp
│ │ │ │ ├── cudnn_conv_layer.cu
│ │ │ │ ├── cudnn_lcn_layer.cpp
│ │ │ │ ├── cudnn_lcn_layer.cu
│ │ │ │ ├── cudnn_lrn_layer.cpp
│ │ │ │ ├── cudnn_lrn_layer.cu
│ │ │ │ ├── cudnn_pooling_layer.cpp
│ │ │ │ ├── cudnn_pooling_layer.cu
│ │ │ │ ├── cudnn_relu_layer.cpp
│ │ │ │ ├── cudnn_relu_layer.cu
│ │ │ │ ├── cudnn_sigmoid_layer.cpp
│ │ │ │ ├── cudnn_sigmoid_layer.cu
│ │ │ │ ├── cudnn_softmax_layer.cpp
│ │ │ │ ├── cudnn_softmax_layer.cu
│ │ │ │ ├── cudnn_tanh_layer.cpp
│ │ │ │ ├── cudnn_tanh_layer.cu
│ │ │ │ ├── data_layer.cpp
│ │ │ │ ├── deconv_layer.cpp
│ │ │ │ ├── deconv_layer.cu
│ │ │ │ ├── deformable_conv_layer.cpp
│ │ │ │ ├── deformable_conv_layer.cu
│ │ │ │ ├── dropout_layer.cpp
│ │ │ │ ├── dropout_layer.cu
│ │ │ │ ├── dummy_data_layer.cpp
│ │ │ │ ├── eltwise_layer.cpp
│ │ │ │ ├── eltwise_layer.cu
│ │ │ │ ├── elu_layer.cpp
│ │ │ │ ├── elu_layer.cu
│ │ │ │ ├── embed_layer.cpp
│ │ │ │ ├── embed_layer.cu
│ │ │ │ ├── euclidean_loss_layer.cpp
│ │ │ │ ├── euclidean_loss_layer.cu
│ │ │ │ ├── exp_layer.cpp
│ │ │ │ ├── exp_layer.cu
│ │ │ │ ├── filter_layer.cpp
│ │ │ │ ├── filter_layer.cu
│ │ │ │ ├── flatten_layer.cpp
│ │ │ │ ├── hdf5_data_layer.cpp
│ │ │ │ ├── hdf5_data_layer.cu
│ │ │ │ ├── hdf5_output_layer.cpp
│ │ │ │ ├── hdf5_output_layer.cu
│ │ │ │ ├── hinge_loss_layer.cpp
│ │ │ │ ├── im2col_layer.cpp
│ │ │ │ ├── im2col_layer.cu
│ │ │ │ ├── image_data_layer.cpp
│ │ │ │ ├── infogain_loss_layer.cpp
│ │ │ │ ├── inner_product_layer.cpp
│ │ │ │ ├── inner_product_layer.cu
│ │ │ │ ├── log_layer.cpp
│ │ │ │ ├── log_layer.cu
│ │ │ │ ├── loss_layer.cpp
│ │ │ │ ├── lrn_layer.cpp
│ │ │ │ ├── lrn_layer.cu
│ │ │ │ ├── memory_data_layer.cpp
│ │ │ │ ├── multinomial_logistic_loss_layer.cpp
│ │ │ │ ├── mvn_layer.cpp
│ │ │ │ ├── mvn_layer.cu
│ │ │ │ ├── neuron_layer.cpp
│ │ │ │ ├── pooling_layer.cpp
│ │ │ │ ├── pooling_layer.cu
│ │ │ │ ├── power_layer.cpp
│ │ │ │ ├── power_layer.cu
│ │ │ │ ├── prelu_layer.cpp
│ │ │ │ ├── prelu_layer.cu
│ │ │ │ ├── reduction_layer.cpp
│ │ │ │ ├── reduction_layer.cu
│ │ │ │ ├── relu_layer.cpp
│ │ │ │ ├── relu_layer.cu
│ │ │ │ ├── reshape_layer.cpp
│ │ │ │ ├── roi_pooling_layer.cpp
│ │ │ │ ├── roi_pooling_layer.cu
│ │ │ │ ├── scale_layer.cpp
│ │ │ │ ├── scale_layer.cu
│ │ │ │ ├── sigmoid_cross_entropy_loss_layer.cpp
│ │ │ │ ├── sigmoid_cross_entropy_loss_layer.cu
│ │ │ │ ├── sigmoid_layer.cpp
│ │ │ │ ├── sigmoid_layer.cu
│ │ │ │ ├── silence_layer.cpp
│ │ │ │ ├── silence_layer.cu
│ │ │ │ ├── slice_layer.cpp
│ │ │ │ ├── slice_layer.cu
│ │ │ │ ├── smooth_L1_loss_layer.cpp
│ │ │ │ ├── smooth_L1_loss_layer.cu
│ │ │ │ ├── softmax_layer.cpp
│ │ │ │ ├── softmax_layer.cu
│ │ │ │ ├── softmax_loss_layer.cpp
│ │ │ │ ├── softmax_loss_layer.cu
│ │ │ │ ├── split_layer.cpp
│ │ │ │ ├── split_layer.cu
│ │ │ │ ├── spp_layer.cpp
│ │ │ │ ├── tanh_layer.cpp
│ │ │ │ ├── tanh_layer.cu
│ │ │ │ ├── threshold_layer.cpp
│ │ │ │ ├── threshold_layer.cu
│ │ │ │ ├── tile_layer.cpp
│ │ │ │ ├── tile_layer.cu
│ │ │ │ └── window_data_layer.cpp
│ │ │ ├── net.cpp
│ │ │ ├── parallel.cpp
│ │ │ ├── proto/
│ │ │ │ ├── .caffe.proto.swp
│ │ │ │ └── caffe.proto
│ │ │ ├── solver.cpp
│ │ │ ├── solvers/
│ │ │ │ ├── adadelta_solver.cpp
│ │ │ │ ├── adadelta_solver.cu
│ │ │ │ ├── adagrad_solver.cpp
│ │ │ │ ├── adagrad_solver.cu
│ │ │ │ ├── adam_solver.cpp
│ │ │ │ ├── adam_solver.cu
│ │ │ │ ├── nesterov_solver.cpp
│ │ │ │ ├── nesterov_solver.cu
│ │ │ │ ├── rmsprop_solver.cpp
│ │ │ │ ├── rmsprop_solver.cu
│ │ │ │ ├── sgd_solver.cpp
│ │ │ │ └── sgd_solver.cu
│ │ │ ├── syncedmem.cpp
│ │ │ ├── test/
│ │ │ │ ├── CMakeLists.txt
│ │ │ │ ├── test_accuracy_layer.cpp
│ │ │ │ ├── test_argmax_layer.cpp
│ │ │ │ ├── test_batch_norm_layer.cpp
│ │ │ │ ├── test_batch_reindex_layer.cpp
│ │ │ │ ├── test_benchmark.cpp
│ │ │ │ ├── test_bias_layer.cpp
│ │ │ │ ├── test_blob.cpp
│ │ │ │ ├── test_caffe_main.cpp
│ │ │ │ ├── test_common.cpp
│ │ │ │ ├── test_concat_layer.cpp
│ │ │ │ ├── test_contrastive_loss_layer.cpp
│ │ │ │ ├── test_convolution_layer.cpp
│ │ │ │ ├── test_data/
│ │ │ │ │ ├── generate_sample_data.py
│ │ │ │ │ ├── sample_data.h5
│ │ │ │ │ ├── sample_data_2_gzip.h5
│ │ │ │ │ ├── sample_data_list.txt
│ │ │ │ │ ├── solver_data.h5
│ │ │ │ │ └── solver_data_list.txt
│ │ │ │ ├── test_data_layer.cpp
│ │ │ │ ├── test_data_transformer.cpp
│ │ │ │ ├── test_db.cpp
│ │ │ │ ├── test_deconvolution_layer.cpp
│ │ │ │ ├── test_dummy_data_layer.cpp
│ │ │ │ ├── test_eltwise_layer.cpp
│ │ │ │ ├── test_embed_layer.cpp
│ │ │ │ ├── test_euclidean_loss_layer.cpp
│ │ │ │ ├── test_filler.cpp
│ │ │ │ ├── test_filter_layer.cpp
│ │ │ │ ├── test_flatten_layer.cpp
│ │ │ │ ├── test_gradient_based_solver.cpp
│ │ │ │ ├── test_hdf5_output_layer.cpp
│ │ │ │ ├── test_hdf5data_layer.cpp
│ │ │ │ ├── test_hinge_loss_layer.cpp
│ │ │ │ ├── test_im2col_kernel.cu
│ │ │ │ ├── test_im2col_layer.cpp
│ │ │ │ ├── test_image_data_layer.cpp
│ │ │ │ ├── test_infogain_loss_layer.cpp
│ │ │ │ ├── test_inner_product_layer.cpp
│ │ │ │ ├── test_internal_thread.cpp
│ │ │ │ ├── test_io.cpp
│ │ │ │ ├── test_layer_factory.cpp
│ │ │ │ ├── test_lrn_layer.cpp
│ │ │ │ ├── test_math_functions.cpp
│ │ │ │ ├── test_maxpool_dropout_layers.cpp
│ │ │ │ ├── test_memory_data_layer.cpp
│ │ │ │ ├── test_multinomial_logistic_loss_layer.cpp
│ │ │ │ ├── test_mvn_layer.cpp
│ │ │ │ ├── test_net.cpp
│ │ │ │ ├── test_neuron_layer.cpp
│ │ │ │ ├── test_platform.cpp
│ │ │ │ ├── test_pooling_layer.cpp
│ │ │ │ ├── test_power_layer.cpp
│ │ │ │ ├── test_protobuf.cpp
│ │ │ │ ├── test_random_number_generator.cpp
│ │ │ │ ├── test_reduction_layer.cpp
│ │ │ │ ├── test_reshape_layer.cpp
│ │ │ │ ├── test_roi_pooling_layer.cpp
│ │ │ │ ├── test_scale_layer.cpp
│ │ │ │ ├── test_sigmoid_cross_entropy_loss_layer.cpp
│ │ │ │ ├── test_slice_layer.cpp
│ │ │ │ ├── test_smooth_L1_loss_layer.cpp
│ │ │ │ ├── test_softmax_layer.cpp
│ │ │ │ ├── test_softmax_with_loss_layer.cpp
│ │ │ │ ├── test_solver.cpp
│ │ │ │ ├── test_solver_factory.cpp
│ │ │ │ ├── test_split_layer.cpp
│ │ │ │ ├── test_spp_layer.cpp
│ │ │ │ ├── test_stochastic_pooling.cpp
│ │ │ │ ├── test_syncedmem.cpp
│ │ │ │ ├── test_tanh_layer.cpp
│ │ │ │ ├── test_threshold_layer.cpp
│ │ │ │ ├── test_tile_layer.cpp
│ │ │ │ ├── test_upgrade_proto.cpp
│ │ │ │ └── test_util_blas.cpp
│ │ │ └── util/
│ │ │ ├── benchmark.cpp
│ │ │ ├── blocking_queue.cpp
│ │ │ ├── cudnn.cpp
│ │ │ ├── db.cpp
│ │ │ ├── db_leveldb.cpp
│ │ │ ├── db_lmdb.cpp
│ │ │ ├── deformable_im2col.cu
│ │ │ ├── hdf5.cpp
│ │ │ ├── im2col.cpp
│ │ │ ├── im2col.cu
│ │ │ ├── insert_splits.cpp
│ │ │ ├── io.cpp
│ │ │ ├── math_functions.cpp
│ │ │ ├── math_functions.cu
│ │ │ ├── signal_handler.cpp
│ │ │ └── upgrade_proto.cpp
│ │ └── gtest/
│ │ ├── CMakeLists.txt
│ │ ├── gtest-all.cpp
│ │ ├── gtest.h
│ │ └── gtest_main.cc
│ └── tools/
│ ├── CMakeLists.txt
│ ├── caffe.cpp
│ ├── compute_image_mean.cpp
│ ├── convert_imageset.cpp
│ ├── device_query.cpp
│ ├── extra/
│ │ ├── extract_seconds.py
│ │ ├── launch_resize_and_crop_images.sh
│ │ ├── parse_log.py
│ │ ├── parse_log.sh
│ │ ├── plot_log.gnuplot.example
│ │ ├── plot_training_log.py
│ │ ├── resize_and_crop_images.py
│ │ ├── summarize.py
│ │ ├── train.log
│ │ └── train.log.train
│ ├── extract_features.cpp
│ ├── finetune_net.cpp
│ ├── net_speed_benchmark.cpp
│ ├── test_net.cpp
│ ├── train_net.cpp
│ ├── upgrade_net_proto_binary.cpp
│ ├── upgrade_net_proto_text.cpp
│ └── upgrade_solver_proto_text.cpp
├── data/
│ ├── .gitignore
│ ├── README.md
│ ├── pylintrc
│ ├── scripts/
│ │ ├── fetch_faster_rcnn_models.sh
│ │ ├── fetch_imagenet_models.sh
│ │ └── fetch_selective_search_data.sh
│ └── wget-log
├── experiments/
│ ├── README.md
│ ├── cfgs/
│ │ └── FP_Net_end2end.yml
│ └── scripts/
│ └── FP_Net_end2end.sh
├── lib/
│ ├── Makefile
│ ├── datasets/
│ │ ├── VOCdevkit-matlab-wrapper/
│ │ │ ├── get_voc_opts.m
│ │ │ ├── voc_eval.m
│ │ │ └── xVOCap.m
│ │ ├── __init__.py
│ │ ├── coco.py
│ │ ├── ds_utils.py
│ │ ├── factory.py
│ │ ├── imdb.py
│ │ ├── pascal_voc.py
│ │ ├── tools/
│ │ │ └── mcg_munge.py
│ │ └── voc_eval.py
│ ├── fast_rcnn/
│ │ ├── FP_Net_end2end.sh
│ │ ├── __init__.py
│ │ ├── bbox_transform.py
│ │ ├── config.py
│ │ ├── nms_wrapper.py
│ │ ├── test.py
│ │ └── train.py
│ ├── nms/
│ │ ├── .gitignore
│ │ ├── __init__.py
│ │ ├── cpu_nms.pyx
│ │ ├── gpu_nms.hpp
│ │ ├── gpu_nms.pyx
│ │ ├── nms_kernel.cu
│ │ └── py_cpu_nms.py
│ ├── pycocotools/
│ │ ├── UPSTREAM_REV
│ │ ├── __init__.py
│ │ ├── _mask.c
│ │ ├── _mask.pyx
│ │ ├── coco.py
│ │ ├── cocoeval.py
│ │ ├── license.txt
│ │ ├── mask.py
│ │ ├── maskApi.c
│ │ └── maskApi.h
│ ├── roi_data_layer/
│ │ ├── __init__.py
│ │ ├── layer.py
│ │ ├── minibatch.py
│ │ └── roidb.py
│ ├── rpn/
│ │ ├── __init__.py
│ │ ├── anchor_target_layer.py
│ │ ├── as_rois.py
│ │ ├── as_rois_mrcnn.py
│ │ ├── generate.py
│ │ ├── generate_anchors.py
│ │ ├── proposal_layer.py
│ │ ├── proposal_target_layer.py
│ │ └── proposal_target_layer_mrcnn.py
│ ├── setup.py
│ ├── transform/
│ │ ├── __init__.py
│ │ └── torch_image_transform_layer.py
│ └── utils/
│ ├── .gitignore
│ ├── __init__.py
│ ├── bbox.pyx
│ ├── blob.py
│ └── timer.py
├── models/
│ ├── README.md
│ └── pascal_voc/
│ └── FPN/
│ └── FP_Net_end2end/
│ ├── solver.prototxt
│ ├── solver_mergercnn.prototxt
│ ├── test.prototxt
│ ├── test_mergercnn.prototxt
│ ├── train.prototxt
│ └── train_mergercnn.prototxt
├── output/
│ └── output.md
├── test.sh
├── test_mergercnn.sh
└── tools/
├── README.md
├── _init_paths.py
├── compress_net.py
├── demo.py
├── eval_recall.py
├── reval.py
├── rpn_generate.py
├── test_net.py
├── train_faster_rcnn_alt_opt.py
├── train_net.py
└── train_svms.py
Showing preview only (789K chars total). Download the full file or copy to clipboard to get everything.
SYMBOL INDEX (3402 symbols across 361 files)
FILE: caffe-fpn/examples/cifar10/convert_cifar_data.cpp
function read_image (line 31) | void read_image(std::ifstream* file, int* label, char* buffer) {
function convert_dataset (line 39) | void convert_dataset(const string& input_folder, const string& output_fo...
function main (line 93) | int main(int argc, char** argv) {
FILE: caffe-fpn/examples/cpp_classification/classification.cpp
class Classifier (line 21) | class Classifier {
function PairCompare (line 86) | static bool PairCompare(const std::pair<float, int>& lhs,
function Argmax (line 92) | static std::vector<int> Argmax(const std::vector<float>& v, int N) {
function main (line 229) | int main(int argc, char** argv) {
function main (line 262) | int main(int argc, char** argv) {
FILE: caffe-fpn/examples/finetune_flickr_style/assemble_data.py
function download_image (line 23) | def download_image(args_tuple):
FILE: caffe-fpn/examples/mnist/convert_mnist_data.cpp
function swap_endian (line 38) | uint32_t swap_endian(uint32_t val) {
function convert_dataset (line 43) | void convert_dataset(const char* image_filename, const char* label_filen...
function main (line 113) | int main(int argc, char** argv) {
function main (line 143) | int main(int argc, char** argv) {
FILE: caffe-fpn/examples/pycaffe/caffenet.py
function conv_relu (line 7) | def conv_relu(bottom, ks, nout, stride=1, pad=0, group=1):
function fc_relu (line 12) | def fc_relu(bottom, nout):
function max_pool (line 16) | def max_pool(bottom, ks, stride=1):
function caffenet (line 19) | def caffenet(lmdb, batch_size=256, include_acc=False):
function make_net (line 47) | def make_net():
FILE: caffe-fpn/examples/pycaffe/layers/pyloss.py
class EuclideanLossLayer (line 5) | class EuclideanLossLayer(caffe.Layer):
method setup (line 11) | def setup(self, bottom, top):
method reshape (line 16) | def reshape(self, bottom, top):
method forward (line 25) | def forward(self, bottom, top):
method backward (line 29) | def backward(self, top, propagate_down, bottom):
FILE: caffe-fpn/examples/siamese/convert_mnist_siamese_data.cpp
function swap_endian (line 22) | uint32_t swap_endian(uint32_t val) {
function read_image (line 27) | void read_image(std::ifstream* image_file, std::ifstream* label_file,
function convert_dataset (line 36) | void convert_dataset(const char* image_filename, const char* label_filen...
function main (line 109) | int main(int argc, char** argv) {
function main (line 126) | int main(int argc, char** argv) {
FILE: caffe-fpn/examples/web_demo/app.py
function index (line 29) | def index():
function classify_url (line 34) | def classify_url():
function classify_upload (line 57) | def classify_upload():
function embed_image_html (line 82) | def embed_image_html(image):
function allowed_file (line 92) | def allowed_file(filename):
class ImagenetClassifier (line 99) | class ImagenetClassifier(object):
method __init__ (line 119) | def __init__(self, model_def_file, pretrained_model_file, mean_file,
method classify_image (line 148) | def classify_image(self, image):
function start_tornado (line 184) | def start_tornado(app, port=5000):
function start_from_terminal (line 192) | def start_from_terminal(app):
FILE: caffe-fpn/examples/web_demo/exifutil.py
function open_oriented_im (line 19) | def open_oriented_im(im_path):
function apply_orientation (line 35) | def apply_orientation(im, orientation):
FILE: caffe-fpn/include/caffe/blob.hpp
type caffe (line 14) | namespace caffe {
class Blob (line 24) | class Blob {
method Blob (line 26) | Blob()
method string (line 54) | inline string shape_string() const {
method shape (line 71) | inline int shape(int index) const {
method num_axes (line 74) | inline int num_axes() const { return shape_.size(); }
method count (line 75) | inline int count() const { return count_; }
method count (line 85) | inline int count(int start_axis, int end_axis) const {
method count (line 103) | inline int count(int start_axis) const {
method CanonicalAxisIndex (line 118) | inline int CanonicalAxisIndex(int axis_index) const {
method num (line 132) | inline int num() const { return LegacyShape(0); }
method channels (line 134) | inline int channels() const { return LegacyShape(1); }
method height (line 136) | inline int height() const { return LegacyShape(2); }
method width (line 138) | inline int width() const { return LegacyShape(3); }
method LegacyShape (line 139) | inline int LegacyShape(int index) const {
method offset (line 153) | inline int offset(const int n, const int c = 0, const int h = 0,
method offset (line 166) | inline int offset(const vector<int>& indices) const {
method Dtype (line 191) | inline Dtype data_at(const int n, const int c, const int h,
method Dtype (line 196) | inline Dtype diff_at(const int n, const int c, const int h,
method Dtype (line 201) | inline Dtype data_at(const vector<int>& index) const {
method Dtype (line 205) | inline Dtype diff_at(const vector<int>& index) const {
FILE: caffe-fpn/include/caffe/common.hpp
type cv (line 73) | namespace cv { class Mat; }
class Mat (line 73) | class Mat
type caffe (line 75) | namespace caffe {
class Caffe (line 102) | class Caffe {
type Brew (line 111) | enum Brew { CPU, GPU }
class RNG (line 115) | class RNG {
class Generator (line 123) | class Generator
method RNG (line 128) | inline static RNG& rng_stream() {
class Generator (line 123) | class Generator
method cublasHandle_t (line 135) | inline static cublasHandle_t cublas_handle() { return Get().cublas_h...
method curandGenerator_t (line 136) | inline static curandGenerator_t curand_generator() {
method Brew (line 142) | inline static Brew mode() { return Get().mode_; }
method set_mode (line 148) | inline static void set_mode(Brew mode) { Get().mode_ = mode; }
method solver_count (line 157) | inline static int solver_count() { return Get().solver_count_; }
method set_solver_count (line 158) | inline static void set_solver_count(int val) { Get().solver_count_ =...
method root_solver (line 159) | inline static bool root_solver() { return Get().root_solver_; }
method set_root_solver (line 160) | inline static void set_root_solver(bool val) { Get().root_solver_ = ...
FILE: caffe-fpn/include/caffe/data_reader.hpp
type caffe (line 13) | namespace caffe {
class DataReader (line 23) | class DataReader {
class QueuePair (line 37) | class QueuePair {
class Body (line 49) | class Body : public InternalThread {
method string (line 68) | static inline string source_key(const LayerParameter& param) {
FILE: caffe-fpn/include/caffe/data_transformer.hpp
type caffe (line 10) | namespace caffe {
class DataTransformer (line 17) | class DataTransformer {
FILE: caffe-fpn/include/caffe/fast_rcnn_layers.hpp
type caffe (line 20) | namespace caffe {
class ROIPoolingLayer (line 25) | class ROIPoolingLayer : public Layer<Dtype> {
method ROIPoolingLayer (line 27) | explicit ROIPoolingLayer(const LayerParameter& param)
method MinBottomBlobs (line 36) | virtual inline int MinBottomBlobs() const { return 2; }
method MaxBottomBlobs (line 37) | virtual inline int MaxBottomBlobs() const { return 2; }
method MinTopBlobs (line 38) | virtual inline int MinTopBlobs() const { return 1; }
method MaxTopBlobs (line 39) | virtual inline int MaxTopBlobs() const { return 1; }
class SmoothL1LossLayer (line 61) | class SmoothL1LossLayer : public LossLayer<Dtype> {
method SmoothL1LossLayer (line 63) | explicit SmoothL1LossLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 72) | virtual inline int ExactNumBottomBlobs() const { return -1; }
method MinBottomBlobs (line 73) | virtual inline int MinBottomBlobs() const { return 2; }
method MaxBottomBlobs (line 74) | virtual inline int MaxBottomBlobs() const { return 4; }
method AllowForceBackward (line 80) | virtual inline bool AllowForceBackward(const int bottom_index) const {
FILE: caffe-fpn/include/caffe/filler.hpp
type caffe (line 15) | namespace caffe {
class Filler (line 19) | class Filler {
method Filler (line 21) | explicit Filler(const FillerParameter& param) : filler_param_(param) {}
class ConstantFiller (line 31) | class ConstantFiller : public Filler<Dtype> {
method ConstantFiller (line 33) | explicit ConstantFiller(const FillerParameter& param)
method Fill (line 35) | virtual void Fill(Blob<Dtype>* blob) {
class UniformFiller (line 50) | class UniformFiller : public Filler<Dtype> {
method UniformFiller (line 52) | explicit UniformFiller(const FillerParameter& param)
method Fill (line 54) | virtual void Fill(Blob<Dtype>* blob) {
class GaussianFiller (line 65) | class GaussianFiller : public Filler<Dtype> {
method GaussianFiller (line 67) | explicit GaussianFiller(const FillerParameter& param)
method Fill (line 69) | virtual void Fill(Blob<Dtype>* blob) {
class PositiveUnitballFiller (line 101) | class PositiveUnitballFiller : public Filler<Dtype> {
method PositiveUnitballFiller (line 103) | explicit PositiveUnitballFiller(const FillerParameter& param)
method Fill (line 105) | virtual void Fill(Blob<Dtype>* blob) {
class XavierFiller (line 144) | class XavierFiller : public Filler<Dtype> {
method XavierFiller (line 146) | explicit XavierFiller(const FillerParameter& param)
method Fill (line 148) | virtual void Fill(Blob<Dtype>* blob) {
class MSRAFiller (line 186) | class MSRAFiller : public Filler<Dtype> {
method MSRAFiller (line 188) | explicit MSRAFiller(const FillerParameter& param)
method Fill (line 190) | virtual void Fill(Blob<Dtype>* blob) {
class BilinearFiller (line 244) | class BilinearFiller : public Filler<Dtype> {
method BilinearFiller (line 246) | explicit BilinearFiller(const FillerParameter& param)
method Fill (line 248) | virtual void Fill(Blob<Dtype>* blob) {
FILE: caffe-fpn/include/caffe/internal_thread.hpp
type boost (line 10) | namespace boost { class thread; }
class thread (line 10) | class thread
type caffe (line 12) | namespace caffe {
class InternalThread (line 19) | class InternalThread {
method InternalThread (line 21) | InternalThread() : thread_() {}
method InternalThreadEntry (line 39) | virtual void InternalThreadEntry() {}
FILE: caffe-fpn/include/caffe/layer.hpp
type boost (line 20) | namespace boost { class mutex; }
class mutex (line 20) | class mutex
type caffe (line 22) | namespace caffe {
class Layer (line 35) | class Layer {
method Layer (line 42) | explicit Layer(const LayerParameter& param)
method SetUp (line 69) | void SetUp(const vector<Blob<Dtype>*>& bottom,
method LayerSetUp (line 97) | virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
method ShareInParallel (line 106) | virtual inline bool ShareInParallel() const { return false; }
method IsShared (line 112) | inline bool IsShared() const { return is_shared_; }
method SetShared (line 118) | inline void SetShared(bool is_shared) {
method LayerParameter (line 194) | const LayerParameter& layer_param() const { return layer_param_; }
method Dtype (line 204) | inline Dtype loss(const int top_index) const {
method set_loss (line 211) | inline void set_loss(const int top_index, const Dtype value) {
method ExactNumBottomBlobs (line 230) | virtual inline int ExactNumBottomBlobs() const { return -1; }
method MinBottomBlobs (line 238) | virtual inline int MinBottomBlobs() const { return -1; }
method MaxBottomBlobs (line 246) | virtual inline int MaxBottomBlobs() const { return -1; }
method ExactNumTopBlobs (line 254) | virtual inline int ExactNumTopBlobs() const { return -1; }
method MinTopBlobs (line 262) | virtual inline int MinTopBlobs() const { return -1; }
method MaxTopBlobs (line 270) | virtual inline int MaxTopBlobs() const { return -1; }
method EqualNumBottomTopBlobs (line 278) | virtual inline bool EqualNumBottomTopBlobs() const { return false; }
method AutoTopBlobs (line 288) | virtual inline bool AutoTopBlobs() const { return false; }
method AllowForceBackward (line 298) | virtual inline bool AllowForceBackward(const int bottom_index) const {
method param_propagate_down (line 309) | inline bool param_propagate_down(const int param_id) {
method set_param_propagate_down (line 317) | inline void set_param_propagate_down(const int param_id, const bool ...
method Phase (line 324) | inline Phase phase() { return phase_; }
method Forward_gpu (line 347) | virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
method Backward_gpu (line 365) | virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
method CheckBlobCounts (line 377) | virtual void CheckBlobCounts(const vector<Blob<Dtype>*>& bottom,
method SetLossWeights (line 420) | inline void SetLossWeights(const vector<Blob<Dtype>*>& top) {
function Dtype (line 457) | inline Dtype Layer<Dtype>::Forward(const vector<Blob<Dtype>*>& bottom,
FILE: caffe-fpn/include/caffe/layer_factory.hpp
type caffe (line 50) | namespace caffe {
class Layer (line 53) | class Layer
class LayerRegistry (line 56) | class LayerRegistry {
method CreatorRegistry (line 61) | static CreatorRegistry& Registry() {
method AddCreator (line 67) | static void AddCreator(const string& type, Creator creator) {
method CreateLayer (line 75) | static shared_ptr<Layer<Dtype> > CreateLayer(const LayerParameter& p...
method LayerTypeList (line 86) | static vector<string> LayerTypeList() {
method LayerRegistry (line 99) | LayerRegistry() {}
method string (line 101) | static string LayerTypeListString() {
class LayerRegisterer (line 117) | class LayerRegisterer {
method LayerRegisterer (line 119) | LayerRegisterer(const string& type,
FILE: caffe-fpn/include/caffe/layers/absval_layer.hpp
type caffe (line 12) | namespace caffe {
class AbsValLayer (line 25) | class AbsValLayer : public NeuronLayer<Dtype> {
method AbsValLayer (line 27) | explicit AbsValLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 33) | virtual inline int ExactNumBottomBlobs() const { return 1; }
method ExactNumTopBlobs (line 34) | virtual inline int ExactNumTopBlobs() const { return 1; }
FILE: caffe-fpn/include/caffe/layers/accuracy_layer.hpp
type caffe (line 12) | namespace caffe {
class AccuracyLayer (line 19) | class AccuracyLayer : public Layer<Dtype> {
method AccuracyLayer (line 29) | explicit AccuracyLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 37) | virtual inline int ExactNumBottomBlobs() const { return 2; }
method MinTopBlobs (line 41) | virtual inline int MinTopBlobs() const { return 1; }
method MaxTopBlos (line 42) | virtual inline int MaxTopBlos() const { return 2; }
method Backward_cpu (line 74) | virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
FILE: caffe-fpn/include/caffe/layers/argmax_layer.hpp
type caffe (line 10) | namespace caffe {
class ArgMaxLayer (line 24) | class ArgMaxLayer : public Layer<Dtype> {
method ArgMaxLayer (line 38) | explicit ArgMaxLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 46) | virtual inline int ExactNumBottomBlobs() const { return 1; }
method ExactNumTopBlobs (line 47) | virtual inline int ExactNumTopBlobs() const { return 1; }
method Backward_cpu (line 65) | virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
FILE: caffe-fpn/include/caffe/layers/base_conv_layer.hpp
type caffe (line 11) | namespace caffe {
class BaseConvolutionLayer (line 18) | class BaseConvolutionLayer : public Layer<Dtype> {
method BaseConvolutionLayer (line 20) | explicit BaseConvolutionLayer(const LayerParameter& param)
method MinBottomBlobs (line 27) | virtual inline int MinBottomBlobs() const { return 1; }
method MinTopBlobs (line 28) | virtual inline int MinTopBlobs() const { return 1; }
method EqualNumBottomTopBlobs (line 29) | virtual inline bool EqualNumBottomTopBlobs() const { return true; }
method input_shape (line 56) | inline int input_shape(int i) {
method conv_im2col_cpu (line 98) | inline void conv_im2col_cpu(const Dtype* data, Dtype* col_buff) {
method conv_col2im_cpu (line 112) | inline void conv_col2im_cpu(const Dtype* col_buff, Dtype* data) {
method conv_im2col_gpu (line 127) | inline void conv_im2col_gpu(const Dtype* data, Dtype* col_buff) {
method conv_col2im_gpu (line 142) | inline void conv_col2im_gpu(const Dtype* col_buff, Dtype* data) {
FILE: caffe-fpn/include/caffe/layers/base_data_layer.hpp
type caffe (line 13) | namespace caffe {
class BaseDataLayer (line 21) | class BaseDataLayer : public Layer<Dtype> {
method ShareInParallel (line 30) | virtual inline bool ShareInParallel() const { return true; }
method DataLayerSetUp (line 31) | virtual void DataLayerSetUp(const vector<Blob<Dtype>*>& bottom,
method Reshape (line 34) | virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
method Backward_cpu (line 37) | virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
method Backward_gpu (line 39) | virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
class Batch (line 49) | class Batch {
class BasePrefetchingDataLayer (line 55) | class BasePrefetchingDataLayer :
FILE: caffe-fpn/include/caffe/layers/batch_norm_layer.hpp
type caffe (line 10) | namespace caffe {
class BatchNormLayer (line 43) | class BatchNormLayer : public Layer<Dtype> {
method BatchNormLayer (line 45) | explicit BatchNormLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 53) | virtual inline int ExactNumBottomBlobs() const { return 1; }
method ExactNumTopBlobs (line 54) | virtual inline int ExactNumTopBlobs() const { return 1; }
FILE: caffe-fpn/include/caffe/layers/batch_reindex_layer.hpp
type caffe (line 11) | namespace caffe {
class BatchReindexLayer (line 21) | class BatchReindexLayer : public Layer<Dtype> {
method BatchReindexLayer (line 23) | explicit BatchReindexLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 29) | virtual inline int ExactNumBottomBlobs() const { return 2; }
method ExactNumTopBlobs (line 30) | virtual inline int ExactNumTopBlobs() const { return 1; }
type pair_sort_first (line 71) | struct pair_sort_first {
FILE: caffe-fpn/include/caffe/layers/bias_layer.hpp
type caffe (line 10) | namespace caffe {
class BiasLayer (line 22) | class BiasLayer : public Layer<Dtype> {
method BiasLayer (line 24) | explicit BiasLayer(const LayerParameter& param)
method MinBottomBlobs (line 32) | virtual inline int MinBottomBlobs() const { return 1; }
method MaxBottomBlobs (line 33) | virtual inline int MaxBottomBlobs() const { return 2; }
method ExactNumTopBlobs (line 34) | virtual inline int ExactNumTopBlobs() const { return 1; }
FILE: caffe-fpn/include/caffe/layers/bnll_layer.hpp
type caffe (line 12) | namespace caffe {
class BNLLLayer (line 32) | class BNLLLayer : public NeuronLayer<Dtype> {
method BNLLLayer (line 34) | explicit BNLLLayer(const LayerParameter& param)
FILE: caffe-fpn/include/caffe/layers/conadd_layer.hpp
type caffe (line 9) | namespace caffe {
class ConaddLayer (line 16) | class ConaddLayer : public Layer<Dtype> {
method ConaddLayer (line 18) | explicit ConaddLayer(const LayerParameter& param)
method MinBottomBlobs (line 26) | virtual inline int MinBottomBlobs() const { return 1; }
method ExactNumTopBlobs (line 27) | virtual inline int ExactNumTopBlobs() const { return 1; }
FILE: caffe-fpn/include/caffe/layers/concat_layer.hpp
type caffe (line 10) | namespace caffe {
class ConcatLayer (line 17) | class ConcatLayer : public Layer<Dtype> {
method ConcatLayer (line 19) | explicit ConcatLayer(const LayerParameter& param)
method MinBottomBlobs (line 27) | virtual inline int MinBottomBlobs() const { return 1; }
method ExactNumTopBlobs (line 28) | virtual inline int ExactNumTopBlobs() const { return 1; }
FILE: caffe-fpn/include/caffe/layers/contrastive_loss_layer.hpp
type caffe (line 12) | namespace caffe {
class ContrastiveLossLayer (line 39) | class ContrastiveLossLayer : public LossLayer<Dtype> {
method ContrastiveLossLayer (line 41) | explicit ContrastiveLossLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 46) | virtual inline int ExactNumBottomBlobs() const { return 3; }
method AllowForceBackward (line 52) | virtual inline bool AllowForceBackward(const int bottom_index) const {
FILE: caffe-fpn/include/caffe/layers/conv_layer.hpp
type caffe (line 12) | namespace caffe {
class ConvolutionLayer (line 31) | class ConvolutionLayer : public BaseConvolutionLayer<Dtype> {
method ConvolutionLayer (line 64) | explicit ConvolutionLayer(const LayerParameter& param)
method reverse_dimensions (line 81) | virtual inline bool reverse_dimensions() { return false; }
FILE: caffe-fpn/include/caffe/layers/crop_layer.hpp
type caffe (line 11) | namespace caffe {
class CropLayer (line 21) | class CropLayer : public Layer<Dtype> {
method CropLayer (line 23) | explicit CropLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 31) | virtual inline int ExactNumBottomBlobs() const { return 2; }
method ExactNumTopBlobs (line 32) | virtual inline int ExactNumTopBlobs() const { return 1; }
FILE: caffe-fpn/include/caffe/layers/cudnn_conv_layer.hpp
type caffe (line 12) | namespace caffe {
class CuDNNConvolutionLayer (line 30) | class CuDNNConvolutionLayer : public ConvolutionLayer<Dtype> {
method CuDNNConvolutionLayer (line 32) | explicit CuDNNConvolutionLayer(const LayerParameter& param)
FILE: caffe-fpn/include/caffe/layers/cudnn_lcn_layer.hpp
type caffe (line 13) | namespace caffe {
class CuDNNLCNLayer (line 17) | class CuDNNLCNLayer : public LRNLayer<Dtype> {
method CuDNNLCNLayer (line 19) | explicit CuDNNLCNLayer(const LayerParameter& param)
FILE: caffe-fpn/include/caffe/layers/cudnn_lrn_layer.hpp
type caffe (line 12) | namespace caffe {
class CuDNNLRNLayer (line 16) | class CuDNNLRNLayer : public LRNLayer<Dtype> {
method CuDNNLRNLayer (line 18) | explicit CuDNNLRNLayer(const LayerParameter& param)
FILE: caffe-fpn/include/caffe/layers/cudnn_pooling_layer.hpp
type caffe (line 12) | namespace caffe {
class CuDNNPoolingLayer (line 20) | class CuDNNPoolingLayer : public PoolingLayer<Dtype> {
method CuDNNPoolingLayer (line 22) | explicit CuDNNPoolingLayer(const LayerParameter& param)
method MinTopBlobs (line 30) | virtual inline int MinTopBlobs() const { return -1; }
method ExactNumTopBlobs (line 31) | virtual inline int ExactNumTopBlobs() const { return 1; }
FILE: caffe-fpn/include/caffe/layers/cudnn_relu_layer.hpp
type caffe (line 13) | namespace caffe {
class CuDNNReLULayer (line 20) | class CuDNNReLULayer : public ReLULayer<Dtype> {
method CuDNNReLULayer (line 22) | explicit CuDNNReLULayer(const LayerParameter& param)
FILE: caffe-fpn/include/caffe/layers/cudnn_sigmoid_layer.hpp
type caffe (line 13) | namespace caffe {
class CuDNNSigmoidLayer (line 20) | class CuDNNSigmoidLayer : public SigmoidLayer<Dtype> {
method CuDNNSigmoidLayer (line 22) | explicit CuDNNSigmoidLayer(const LayerParameter& param)
FILE: caffe-fpn/include/caffe/layers/cudnn_softmax_layer.hpp
type caffe (line 12) | namespace caffe {
class CuDNNSoftmaxLayer (line 20) | class CuDNNSoftmaxLayer : public SoftmaxLayer<Dtype> {
method CuDNNSoftmaxLayer (line 22) | explicit CuDNNSoftmaxLayer(const LayerParameter& param)
FILE: caffe-fpn/include/caffe/layers/cudnn_tanh_layer.hpp
type caffe (line 13) | namespace caffe {
class CuDNNTanHLayer (line 20) | class CuDNNTanHLayer : public TanHLayer<Dtype> {
method CuDNNTanHLayer (line 22) | explicit CuDNNTanHLayer(const LayerParameter& param)
FILE: caffe-fpn/include/caffe/layers/data_layer.hpp
type caffe (line 15) | namespace caffe {
class DataLayer (line 18) | class DataLayer : public BasePrefetchingDataLayer<Dtype> {
method ShareInParallel (line 25) | virtual inline bool ShareInParallel() const { return false; }
method ExactNumBottomBlobs (line 27) | virtual inline int ExactNumBottomBlobs() const { return 0; }
method MinTopBlobs (line 28) | virtual inline int MinTopBlobs() const { return 1; }
method MaxTopBlobs (line 29) | virtual inline int MaxTopBlobs() const { return 2; }
FILE: caffe-fpn/include/caffe/layers/deconv_layer.hpp
type caffe (line 12) | namespace caffe {
class DeconvolutionLayer (line 29) | class DeconvolutionLayer : public BaseConvolutionLayer<Dtype> {
method DeconvolutionLayer (line 31) | explicit DeconvolutionLayer(const LayerParameter& param)
method reverse_dimensions (line 45) | virtual inline bool reverse_dimensions() { return true; }
FILE: caffe-fpn/include/caffe/layers/deformable_conv_layer.hpp
type caffe (line 12) | namespace caffe {
class DeformableConvolutionLayer (line 31) | class DeformableConvolutionLayer : public BaseConvolutionLayer<Dtype> {
method DeformableConvolutionLayer (line 64) | explicit DeformableConvolutionLayer(const LayerParameter& param)
method EqualNumBottomTopBlobs (line 68) | virtual inline bool EqualNumBottomTopBlobs() const { return false; }
method reverse_dimensions (line 89) | virtual inline bool reverse_dimensions() { return false; }
FILE: caffe-fpn/include/caffe/layers/dropout_layer.hpp
type caffe (line 12) | namespace caffe {
class DropoutLayer (line 26) | class DropoutLayer : public NeuronLayer<Dtype> {
method DropoutLayer (line 34) | explicit DropoutLayer(const LayerParameter& param)
FILE: caffe-fpn/include/caffe/layers/dummy_data_layer.hpp
type caffe (line 11) | namespace caffe {
class DummyDataLayer (line 19) | class DummyDataLayer : public Layer<Dtype> {
method DummyDataLayer (line 21) | explicit DummyDataLayer(const LayerParameter& param)
method ShareInParallel (line 26) | virtual inline bool ShareInParallel() const { return true; }
method Reshape (line 28) | virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
method ExactNumBottomBlobs (line 32) | virtual inline int ExactNumBottomBlobs() const { return 0; }
method MinTopBlobs (line 33) | virtual inline int MinTopBlobs() const { return 1; }
method Backward_cpu (line 38) | virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
method Backward_gpu (line 40) | virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
FILE: caffe-fpn/include/caffe/layers/eltwise_layer.hpp
type caffe (line 10) | namespace caffe {
class EltwiseLayer (line 19) | class EltwiseLayer : public Layer<Dtype> {
method EltwiseLayer (line 21) | explicit EltwiseLayer(const LayerParameter& param)
method MinBottomBlobs (line 29) | virtual inline int MinBottomBlobs() const { return 2; }
method ExactNumTopBlobs (line 30) | virtual inline int ExactNumTopBlobs() const { return 1; }
FILE: caffe-fpn/include/caffe/layers/elu_layer.hpp
type caffe (line 12) | namespace caffe {
class ELULayer (line 24) | class ELULayer : public NeuronLayer<Dtype> {
method ELULayer (line 32) | explicit ELULayer(const LayerParameter& param)
FILE: caffe-fpn/include/caffe/layers/embed_layer.hpp
type caffe (line 10) | namespace caffe {
class EmbedLayer (line 20) | class EmbedLayer : public Layer<Dtype> {
method EmbedLayer (line 22) | explicit EmbedLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 30) | virtual inline int ExactNumBottomBlobs() const { return 1; }
method ExactNumTopBlobs (line 31) | virtual inline int ExactNumTopBlobs() const { return 1; }
FILE: caffe-fpn/include/caffe/layers/euclidean_loss_layer.hpp
type caffe (line 12) | namespace caffe {
class EuclideanLossLayer (line 41) | class EuclideanLossLayer : public LossLayer<Dtype> {
method EuclideanLossLayer (line 43) | explicit EuclideanLossLayer(const LayerParameter& param)
method AllowForceBackward (line 53) | virtual inline bool AllowForceBackward(const int bottom_index) const {
FILE: caffe-fpn/include/caffe/layers/exp_layer.hpp
type caffe (line 12) | namespace caffe {
class ExpLayer (line 20) | class ExpLayer : public NeuronLayer<Dtype> {
method ExpLayer (line 30) | explicit ExpLayer(const LayerParameter& param)
FILE: caffe-fpn/include/caffe/layers/filter_layer.hpp
type caffe (line 10) | namespace caffe {
class FilterLayer (line 19) | class FilterLayer : public Layer<Dtype> {
method FilterLayer (line 21) | explicit FilterLayer(const LayerParameter& param)
method MinBottomBlobs (line 29) | virtual inline int MinBottomBlobs() const { return 2; }
method MinTopBlobs (line 30) | virtual inline int MinTopBlobs() const { return 1; }
FILE: caffe-fpn/include/caffe/layers/flatten_layer.hpp
type caffe (line 10) | namespace caffe {
class FlattenLayer (line 23) | class FlattenLayer : public Layer<Dtype> {
method FlattenLayer (line 25) | explicit FlattenLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 31) | virtual inline int ExactNumBottomBlobs() const { return 1; }
method ExactNumTopBlobs (line 32) | virtual inline int ExactNumTopBlobs() const { return 1; }
FILE: caffe-fpn/include/caffe/layers/hdf5_data_layer.hpp
type caffe (line 15) | namespace caffe {
class HDF5DataLayer (line 23) | class HDF5DataLayer : public Layer<Dtype> {
method HDF5DataLayer (line 25) | explicit HDF5DataLayer(const LayerParameter& param)
method ShareInParallel (line 31) | virtual inline bool ShareInParallel() const { return true; }
method Reshape (line 33) | virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
method ExactNumBottomBlobs (line 37) | virtual inline int ExactNumBottomBlobs() const { return 0; }
method MinTopBlobs (line 38) | virtual inline int MinTopBlobs() const { return 1; }
method Backward_cpu (line 45) | virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
method Backward_gpu (line 47) | virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
FILE: caffe-fpn/include/caffe/layers/hdf5_output_layer.hpp
type caffe (line 13) | namespace caffe {
class HDF5OutputLayer (line 24) | class HDF5OutputLayer : public Layer<Dtype> {
method HDF5OutputLayer (line 26) | explicit HDF5OutputLayer(const LayerParameter& param)
method ShareInParallel (line 32) | virtual inline bool ShareInParallel() const { return true; }
method Reshape (line 34) | virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
method ExactNumBottomBlobs (line 39) | virtual inline int ExactNumBottomBlobs() const { return 2; }
method ExactNumTopBlobs (line 40) | virtual inline int ExactNumTopBlobs() const { return 0; }
method file_name (line 42) | inline std::string file_name() const { return file_name_; }
FILE: caffe-fpn/include/caffe/layers/hinge_loss_layer.hpp
type caffe (line 12) | namespace caffe {
class HingeLossLayer (line 58) | class HingeLossLayer : public LossLayer<Dtype> {
method HingeLossLayer (line 60) | explicit HingeLossLayer(const LayerParameter& param)
FILE: caffe-fpn/include/caffe/layers/im2col_layer.hpp
type caffe (line 10) | namespace caffe {
class Im2colLayer (line 20) | class Im2colLayer : public Layer<Dtype> {
method Im2colLayer (line 22) | explicit Im2colLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 30) | virtual inline int ExactNumBottomBlobs() const { return 1; }
method ExactNumTopBlobs (line 31) | virtual inline int ExactNumTopBlobs() const { return 1; }
FILE: caffe-fpn/include/caffe/layers/image_data_layer.hpp
type caffe (line 15) | namespace caffe {
class ImageDataLayer (line 23) | class ImageDataLayer : public BasePrefetchingDataLayer<Dtype> {
method ImageDataLayer (line 25) | explicit ImageDataLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 32) | virtual inline int ExactNumBottomBlobs() const { return 0; }
method ExactNumTopBlobs (line 33) | virtual inline int ExactNumTopBlobs() const { return 2; }
FILE: caffe-fpn/include/caffe/layers/infogain_loss_layer.hpp
type caffe (line 12) | namespace caffe {
class InfogainLossLayer (line 47) | class InfogainLossLayer : public LossLayer<Dtype> {
method InfogainLossLayer (line 49) | explicit InfogainLossLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 59) | virtual inline int ExactNumBottomBlobs() const { return -1; }
method MinBottomBlobs (line 60) | virtual inline int MinBottomBlobs() const { return 2; }
method MaxBottomBlobs (line 61) | virtual inline int MaxBottomBlobs() const { return 3; }
FILE: caffe-fpn/include/caffe/layers/inner_product_layer.hpp
type caffe (line 10) | namespace caffe {
class InnerProductLayer (line 19) | class InnerProductLayer : public Layer<Dtype> {
method InnerProductLayer (line 21) | explicit InnerProductLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 29) | virtual inline int ExactNumBottomBlobs() const { return 1; }
method ExactNumTopBlobs (line 30) | virtual inline int ExactNumTopBlobs() const { return 1; }
FILE: caffe-fpn/include/caffe/layers/log_layer.hpp
type caffe (line 12) | namespace caffe {
class LogLayer (line 20) | class LogLayer : public NeuronLayer<Dtype> {
method LogLayer (line 30) | explicit LogLayer(const LayerParameter& param)
FILE: caffe-fpn/include/caffe/layers/loss_layer.hpp
type caffe (line 10) | namespace caffe {
class LossLayer (line 23) | class LossLayer : public Layer<Dtype> {
method LossLayer (line 25) | explicit LossLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 32) | virtual inline int ExactNumBottomBlobs() const { return 2; }
method AutoTopBlobs (line 40) | virtual inline bool AutoTopBlobs() const { return true; }
method ExactNumTopBlobs (line 41) | virtual inline int ExactNumTopBlobs() const { return 1; }
method AllowForceBackward (line 46) | virtual inline bool AllowForceBackward(const int bottom_index) const {
FILE: caffe-fpn/include/caffe/layers/lrn_layer.hpp
type caffe (line 15) | namespace caffe {
class LRNLayer (line 23) | class LRNLayer : public Layer<Dtype> {
method LRNLayer (line 25) | explicit LRNLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 33) | virtual inline int ExactNumBottomBlobs() const { return 1; }
method ExactNumTopBlobs (line 34) | virtual inline int ExactNumTopBlobs() const { return 1; }
FILE: caffe-fpn/include/caffe/layers/memory_data_layer.hpp
type caffe (line 12) | namespace caffe {
class MemoryDataLayer (line 20) | class MemoryDataLayer : public BaseDataLayer<Dtype> {
method MemoryDataLayer (line 22) | explicit MemoryDataLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 28) | virtual inline int ExactNumBottomBlobs() const { return 0; }
method ExactNumTopBlobs (line 29) | virtual inline int ExactNumTopBlobs() const { return 2; }
method batch_size (line 42) | int batch_size() { return batch_size_; }
method channels (line 43) | int channels() { return channels_; }
method height (line 44) | int height() { return height_; }
method width (line 45) | int width() { return width_; }
FILE: caffe-fpn/include/caffe/layers/multinomial_logistic_loss_layer.hpp
type caffe (line 12) | namespace caffe {
class MultinomialLogisticLossLayer (line 44) | class MultinomialLogisticLossLayer : public LossLayer<Dtype> {
method MultinomialLogisticLossLayer (line 46) | explicit MultinomialLogisticLossLayer(const LayerParameter& param)
FILE: caffe-fpn/include/caffe/layers/mvn_layer.hpp
type caffe (line 10) | namespace caffe {
class MVNLayer (line 18) | class MVNLayer : public Layer<Dtype> {
method MVNLayer (line 20) | explicit MVNLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 26) | virtual inline int ExactNumBottomBlobs() const { return 1; }
method ExactNumTopBlobs (line 27) | virtual inline int ExactNumTopBlobs() const { return 1; }
FILE: caffe-fpn/include/caffe/layers/neuron_layer.hpp
type caffe (line 10) | namespace caffe {
class NeuronLayer (line 19) | class NeuronLayer : public Layer<Dtype> {
method NeuronLayer (line 21) | explicit NeuronLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 26) | virtual inline int ExactNumBottomBlobs() const { return 1; }
method ExactNumTopBlobs (line 27) | virtual inline int ExactNumTopBlobs() const { return 1; }
FILE: caffe-fpn/include/caffe/layers/pooling_layer.hpp
type caffe (line 10) | namespace caffe {
class PoolingLayer (line 18) | class PoolingLayer : public Layer<Dtype> {
method PoolingLayer (line 20) | explicit PoolingLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 28) | virtual inline int ExactNumBottomBlobs() const { return 1; }
method MinTopBlobs (line 29) | virtual inline int MinTopBlobs() const { return 1; }
method MaxTopBlobs (line 32) | virtual inline int MaxTopBlobs() const {
FILE: caffe-fpn/include/caffe/layers/power_layer.hpp
type caffe (line 12) | namespace caffe {
class PowerLayer (line 20) | class PowerLayer : public NeuronLayer<Dtype> {
method PowerLayer (line 29) | explicit PowerLayer(const LayerParameter& param)
FILE: caffe-fpn/include/caffe/layers/prelu_layer.hpp
type caffe (line 12) | namespace caffe {
class PReLULayer (line 23) | class PReLULayer : public NeuronLayer<Dtype> {
method PReLULayer (line 33) | explicit PReLULayer(const LayerParameter& param)
FILE: caffe-fpn/include/caffe/layers/python_layer.hpp
type caffe (line 11) | namespace caffe {
class PythonLayer (line 14) | class PythonLayer : public Layer<Dtype> {
method PythonLayer (line 16) | PythonLayer(PyObject* self, const LayerParameter& param)
method LayerSetUp (line 19) | virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
method Reshape (line 31) | virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
method ShareInParallel (line 36) | virtual inline bool ShareInParallel() const {
method Forward_cpu (line 43) | virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
method Backward_cpu (line 47) | virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
FILE: caffe-fpn/include/caffe/layers/reduction_layer.hpp
type caffe (line 10) | namespace caffe {
class ReductionLayer (line 20) | class ReductionLayer : public Layer<Dtype> {
method ReductionLayer (line 22) | explicit ReductionLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 30) | virtual inline int ExactNumBottomBlobs() const { return 1; }
method ExactNumTopBlobs (line 31) | virtual inline int ExactNumTopBlobs() const { return 1; }
FILE: caffe-fpn/include/caffe/layers/relu_layer.hpp
type caffe (line 12) | namespace caffe {
class ReLULayer (line 19) | class ReLULayer : public NeuronLayer<Dtype> {
method ReLULayer (line 27) | explicit ReLULayer(const LayerParameter& param)
FILE: caffe-fpn/include/caffe/layers/reshape_layer.hpp
type caffe (line 10) | namespace caffe {
class ReshapeLayer (line 19) | class ReshapeLayer : public Layer<Dtype> {
method ReshapeLayer (line 21) | explicit ReshapeLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 29) | virtual inline int ExactNumBottomBlobs() const { return 1; }
method ExactNumTopBlobs (line 30) | virtual inline int ExactNumTopBlobs() const { return 1; }
method Forward_cpu (line 33) | virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
method Backward_cpu (line 35) | virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
method Forward_gpu (line 37) | virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
method Backward_gpu (line 39) | virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
FILE: caffe-fpn/include/caffe/layers/scale_layer.hpp
type caffe (line 12) | namespace caffe {
class ScaleLayer (line 24) | class ScaleLayer: public Layer<Dtype> {
method ScaleLayer (line 26) | explicit ScaleLayer(const LayerParameter& param)
method MinBottomBlobs (line 35) | virtual inline int MinBottomBlobs() const { return 1; }
method MaxBottomBlobs (line 36) | virtual inline int MaxBottomBlobs() const { return 2; }
method ExactNumTopBlobs (line 37) | virtual inline int ExactNumTopBlobs() const { return 1; }
FILE: caffe-fpn/include/caffe/layers/sigmoid_cross_entropy_loss_layer.hpp
type caffe (line 13) | namespace caffe {
class SigmoidCrossEntropyLossLayer (line 45) | class SigmoidCrossEntropyLossLayer : public LossLayer<Dtype> {
method SigmoidCrossEntropyLossLayer (line 47) | explicit SigmoidCrossEntropyLossLayer(const LayerParameter& param)
FILE: caffe-fpn/include/caffe/layers/sigmoid_layer.hpp
type caffe (line 12) | namespace caffe {
class SigmoidLayer (line 23) | class SigmoidLayer : public NeuronLayer<Dtype> {
method SigmoidLayer (line 25) | explicit SigmoidLayer(const LayerParameter& param)
FILE: caffe-fpn/include/caffe/layers/silence_layer.hpp
type caffe (line 10) | namespace caffe {
class SilenceLayer (line 17) | class SilenceLayer : public Layer<Dtype> {
method SilenceLayer (line 19) | explicit SilenceLayer(const LayerParameter& param)
method Reshape (line 21) | virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
method MinBottomBlobs (line 25) | virtual inline int MinBottomBlobs() const { return 1; }
method ExactNumTopBlobs (line 26) | virtual inline int ExactNumTopBlobs() const { return 0; }
method Forward_cpu (line 29) | virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
FILE: caffe-fpn/include/caffe/layers/slice_layer.hpp
type caffe (line 10) | namespace caffe {
class SliceLayer (line 19) | class SliceLayer : public Layer<Dtype> {
method SliceLayer (line 21) | explicit SliceLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 29) | virtual inline int ExactNumBottomBlobs() const { return 1; }
method MinTopBlobs (line 30) | virtual inline int MinTopBlobs() const { return 1; }
FILE: caffe-fpn/include/caffe/layers/softmax_layer.hpp
type caffe (line 10) | namespace caffe {
class SoftmaxLayer (line 18) | class SoftmaxLayer : public Layer<Dtype> {
method SoftmaxLayer (line 20) | explicit SoftmaxLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 26) | virtual inline int ExactNumBottomBlobs() const { return 1; }
method ExactNumTopBlobs (line 27) | virtual inline int ExactNumTopBlobs() const { return 1; }
FILE: caffe-fpn/include/caffe/layers/softmax_loss_layer.hpp
type caffe (line 13) | namespace caffe {
class SoftmaxWithLossLayer (line 44) | class SoftmaxWithLossLayer : public LossLayer<Dtype> {
method SoftmaxWithLossLayer (line 54) | explicit SoftmaxWithLossLayer(const LayerParameter& param)
method ExactNumTopBlobs (line 62) | virtual inline int ExactNumTopBlobs() const { return -1; }
method MinTopBlobs (line 63) | virtual inline int MinTopBlobs() const { return 1; }
method MaxTopBlobs (line 64) | virtual inline int MaxTopBlobs() const { return 2; }
FILE: caffe-fpn/include/caffe/layers/split_layer.hpp
type caffe (line 10) | namespace caffe {
class SplitLayer (line 19) | class SplitLayer : public Layer<Dtype> {
method SplitLayer (line 21) | explicit SplitLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 27) | virtual inline int ExactNumBottomBlobs() const { return 1; }
method MinTopBlobs (line 28) | virtual inline int MinTopBlobs() const { return 1; }
FILE: caffe-fpn/include/caffe/layers/spp_layer.hpp
type caffe (line 10) | namespace caffe {
class SPPLayer (line 19) | class SPPLayer : public Layer<Dtype> {
method SPPLayer (line 21) | explicit SPPLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 29) | virtual inline int ExactNumBottomBlobs() const { return 1; }
method ExactNumTopBlobs (line 30) | virtual inline int ExactNumTopBlobs() const { return 1; }
FILE: caffe-fpn/include/caffe/layers/tanh_layer.hpp
type caffe (line 12) | namespace caffe {
class TanHLayer (line 23) | class TanHLayer : public NeuronLayer<Dtype> {
method TanHLayer (line 25) | explicit TanHLayer(const LayerParameter& param)
FILE: caffe-fpn/include/caffe/layers/threshold_layer.hpp
type caffe (line 12) | namespace caffe {
class ThresholdLayer (line 19) | class ThresholdLayer : public NeuronLayer<Dtype> {
method ThresholdLayer (line 27) | explicit ThresholdLayer(const LayerParameter& param)
method Backward_cpu (line 54) | virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
FILE: caffe-fpn/include/caffe/layers/tile_layer.hpp
type caffe (line 10) | namespace caffe {
class TileLayer (line 16) | class TileLayer : public Layer<Dtype> {
method TileLayer (line 18) | explicit TileLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 24) | virtual inline int ExactNumBottomBlobs() const { return 1; }
method ExactNumTopBlobs (line 25) | virtual inline int ExactNumTopBlobs() const { return 1; }
FILE: caffe-fpn/include/caffe/layers/window_data_layer.hpp
type caffe (line 15) | namespace caffe {
class WindowDataLayer (line 24) | class WindowDataLayer : public BasePrefetchingDataLayer<Dtype> {
method WindowDataLayer (line 26) | explicit WindowDataLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 33) | virtual inline int ExactNumBottomBlobs() const { return 0; }
method ExactNumTopBlobs (line 34) | virtual inline int ExactNumTopBlobs() const { return 2; }
type WindowField (line 42) | enum WindowField { IMAGE_INDEX, LABEL, OVERLAP, X1, Y1, X2, Y2, NUM }
FILE: caffe-fpn/include/caffe/net.hpp
type caffe (line 15) | namespace caffe {
class Net (line 24) | class Net {
method Dtype (line 85) | Dtype ForwardBackward(const vector<Blob<Dtype>* > & bottom) {
method string (line 123) | inline const string& name() const { return name_; }
method Phase (line 137) | inline Phase phase() const { return phase_; }
method num_inputs (line 198) | inline int num_inputs() const { return net_input_blobs_.size(); }
method num_outputs (line 199) | inline int num_outputs() const { return net_output_blobs_.size(); }
method set_debug_info (line 217) | void set_debug_info(const bool value) { debug_info_ = value; }
FILE: caffe-fpn/include/caffe/parallel.hpp
type caffe (line 17) | namespace caffe {
class Params (line 23) | class Params {
method size (line 29) | inline size_t size() const {
method Dtype (line 32) | inline Dtype* data() const {
method Dtype (line 35) | inline Dtype* diff() const {
class GPUParams (line 49) | class GPUParams : public Params<Dtype> {
class DevicePair (line 62) | class DevicePair {
method DevicePair (line 64) | DevicePair(int parent, int device)
method parent (line 68) | inline int parent() {
method device (line 71) | inline int device() {
class P2PSync (line 85) | class P2PSync : public GPUParams<Dtype>, public Solver<Dtype>::Callback,
FILE: caffe-fpn/include/caffe/sgd_solvers.hpp
type caffe (line 9) | namespace caffe {
class SGDSolver (line 16) | class SGDSolver : public Solver<Dtype> {
method SGDSolver (line 18) | explicit SGDSolver(const SolverParameter& param)
method SGDSolver (line 20) | explicit SGDSolver(const string& param_file)
class NesterovSolver (line 49) | class NesterovSolver : public SGDSolver<Dtype> {
method NesterovSolver (line 51) | explicit NesterovSolver(const SolverParameter& param)
method NesterovSolver (line 53) | explicit NesterovSolver(const string& param_file)
class AdaGradSolver (line 64) | class AdaGradSolver : public SGDSolver<Dtype> {
method AdaGradSolver (line 66) | explicit AdaGradSolver(const SolverParameter& param)
method AdaGradSolver (line 68) | explicit AdaGradSolver(const string& param_file)
method constructor_sanity_check (line 74) | void constructor_sanity_check() {
class RMSPropSolver (line 84) | class RMSPropSolver : public SGDSolver<Dtype> {
method RMSPropSolver (line 86) | explicit RMSPropSolver(const SolverParameter& param)
method RMSPropSolver (line 88) | explicit RMSPropSolver(const string& param_file)
method constructor_sanity_check (line 94) | void constructor_sanity_check() {
class AdaDeltaSolver (line 107) | class AdaDeltaSolver : public SGDSolver<Dtype> {
method AdaDeltaSolver (line 109) | explicit AdaDeltaSolver(const SolverParameter& param)
method AdaDeltaSolver (line 111) | explicit AdaDeltaSolver(const string& param_file)
class AdamSolver (line 131) | class AdamSolver : public SGDSolver<Dtype> {
method AdamSolver (line 133) | explicit AdamSolver(const SolverParameter& param)
method AdamSolver (line 135) | explicit AdamSolver(const string& param_file)
FILE: caffe-fpn/include/caffe/solver.hpp
type caffe (line 10) | namespace caffe {
type SolverAction (line 20) | namespace SolverAction {
type Enum (line 21) | enum Enum {
class Solver (line 41) | class Solver {
method Solve (line 58) | inline void Solve(const string resume_file) { Solve(resume_file.c_st...
method SolverParameter (line 70) | inline const SolverParameter& param() const { return param_; }
method net (line 71) | inline shared_ptr<Net<Dtype> > net() { return net_; }
method iter (line 75) | int iter() { return iter_; }
class Callback (line 78) | class Callback {
method add_callback (line 87) | void add_callback(Callback* value) {
class WorkerSolver (line 140) | class WorkerSolver : public Solver<Dtype> {
method WorkerSolver (line 142) | explicit WorkerSolver(const SolverParameter& param,
method ApplyUpdate (line 147) | void ApplyUpdate() {}
method SnapshotSolverState (line 148) | void SnapshotSolverState(const string& model_filename) {
method RestoreSolverStateFromBinaryProto (line 151) | void RestoreSolverStateFromBinaryProto(const string& state_file) {
method RestoreSolverStateFromHDF5 (line 154) | void RestoreSolverStateFromHDF5(const string& state_file) {
FILE: caffe-fpn/include/caffe/solver_factory.hpp
type caffe (line 48) | namespace caffe {
class Solver (line 51) | class Solver
class SolverRegistry (line 54) | class SolverRegistry {
method CreatorRegistry (line 59) | static CreatorRegistry& Registry() {
method AddCreator (line 65) | static void AddCreator(const string& type, Creator creator) {
method SolverTypeList (line 81) | static vector<string> SolverTypeList() {
method SolverRegistry (line 94) | SolverRegistry() {}
method string (line 96) | static string SolverTypeListString() {
class SolverRegisterer (line 112) | class SolverRegisterer {
method SolverRegisterer (line 114) | SolverRegisterer(const string& type,
FILE: caffe-fpn/include/caffe/syncedmem.hpp
type caffe (line 8) | namespace caffe {
function CaffeMallocHost (line 15) | inline void CaffeMallocHost(void** ptr, size_t size, bool* use_cuda) {
function CaffeFreeHost (line 28) | inline void CaffeFreeHost(void* ptr, bool use_cuda) {
class SyncedMemory (line 45) | class SyncedMemory {
method SyncedMemory (line 47) | SyncedMemory()
method SyncedMemory (line 51) | explicit SyncedMemory(size_t size)
type SyncedHead (line 62) | enum SyncedHead { UNINITIALIZED, HEAD_AT_CPU, HEAD_AT_GPU, SYNCED }
method SyncedHead (line 63) | SyncedHead head() { return head_; }
method size (line 64) | size_t size() { return size_; }
FILE: caffe-fpn/include/caffe/test/test_caffe_main.hpp
type caffe (line 28) | namespace caffe {
class MultiDeviceTest (line 31) | class MultiDeviceTest : public ::testing::Test {
method MultiDeviceTest (line 35) | MultiDeviceTest() {
type CPUDevice (line 44) | struct CPUDevice {
class CPUDeviceTest (line 50) | class CPUDeviceTest : public MultiDeviceTest<CPUDevice<Dtype> > {
type GPUDevice (line 61) | struct GPUDevice {
class GPUDeviceTest (line 67) | class GPUDeviceTest : public MultiDeviceTest<GPUDevice<Dtype> > {
FILE: caffe-fpn/include/caffe/test/test_gradient_check_util.hpp
type caffe (line 14) | namespace caffe {
class GradientChecker (line 19) | class GradientChecker {
method GradientChecker (line 24) | GradientChecker(const Dtype stepsize, const Dtype threshold,
method CheckGradient (line 33) | void CheckGradient(Layer<Dtype>* layer, const vector<Blob<Dtype>*>& ...
function Dtype (line 233) | Dtype GradientChecker<Dtype>::GetObjAndGradient(const Layer<Dtype>& la...
FILE: caffe-fpn/include/caffe/util/benchmark.hpp
type caffe (line 8) | namespace caffe {
class Timer (line 10) | class Timer {
method initted (line 20) | inline bool initted() { return initted_; }
method running (line 21) | inline bool running() { return running_; }
method has_run_at_least_once (line 22) | inline bool has_run_at_least_once() { return has_run_at_least_once_; }
class CPUTimer (line 40) | class CPUTimer : public Timer {
FILE: caffe-fpn/include/caffe/util/blocking_queue.hpp
type caffe (line 7) | namespace caffe {
class BlockingQueue (line 10) | class BlockingQueue {
class sync (line 35) | class sync
FILE: caffe-fpn/include/caffe/util/cudnn.hpp
type caffe (line 52) | namespace caffe {
type cudnn (line 54) | namespace cudnn {
class dataType (line 56) | class dataType
class dataType<float> (line 57) | class dataType<float> {
class dataType<double> (line 63) | class dataType<double> {
function createTensor4dDesc (line 71) | inline void createTensor4dDesc(cudnnTensorDescriptor_t* desc) {
function setTensor4dDesc (line 76) | inline void setTensor4dDesc(cudnnTensorDescriptor_t* desc,
function setTensor4dDesc (line 84) | inline void setTensor4dDesc(cudnnTensorDescriptor_t* desc,
function createFilterDesc (line 95) | inline void createFilterDesc(cudnnFilterDescriptor_t* desc,
function createConvolutionDesc (line 108) | inline void createConvolutionDesc(cudnnConvolutionDescriptor_t* conv) {
function setConvolutionDesc (line 113) | inline void setConvolutionDesc(cudnnConvolutionDescriptor_t* conv,
function createPoolingDesc (line 127) | inline void createPoolingDesc(cudnnPoolingDescriptor_t* pool_desc,
function createActivationDescriptor (line 151) | inline void createActivationDescriptor(cudnnActivationDescriptor_t* ...
FILE: caffe-fpn/include/caffe/util/db.hpp
type caffe (line 9) | namespace caffe { namespace db {
type db (line 9) | namespace db {
type Mode (line 11) | enum Mode { READ, WRITE, NEW }
class Cursor (line 13) | class Cursor {
method Cursor (line 15) | Cursor() { }
class Transaction (line 26) | class Transaction {
method Transaction (line 28) | Transaction() { }
class DB (line 36) | class DB {
method DB (line 38) | DB() { }
FILE: caffe-fpn/include/caffe/util/db_leveldb.hpp
type caffe (line 12) | namespace caffe { namespace db {
type db (line 12) | namespace db {
class LevelDBCursor (line 14) | class LevelDBCursor : public Cursor {
method LevelDBCursor (line 16) | explicit LevelDBCursor(leveldb::Iterator* iter)
method SeekToFirst (line 19) | virtual void SeekToFirst() { iter_->SeekToFirst(); }
method Next (line 20) | virtual void Next() { iter_->Next(); }
method string (line 21) | virtual string key() { return iter_->key().ToString(); }
method string (line 22) | virtual string value() { return iter_->value().ToString(); }
method valid (line 23) | virtual bool valid() { return iter_->Valid(); }
class LevelDBTransaction (line 29) | class LevelDBTransaction : public Transaction {
method LevelDBTransaction (line 31) | explicit LevelDBTransaction(leveldb::DB* db) : db_(db) { CHECK_NOT...
method Put (line 32) | virtual void Put(const string& key, const string& value) {
method Commit (line 35) | virtual void Commit() {
class LevelDB (line 48) | class LevelDB : public DB {
method LevelDB (line 50) | LevelDB() : db_(NULL) { }
method Close (line 53) | virtual void Close() {
method LevelDBCursor (line 59) | virtual LevelDBCursor* NewCursor() {
method LevelDBTransaction (line 62) | virtual LevelDBTransaction* NewTransaction() {
FILE: caffe-fpn/include/caffe/util/db_lmdb.hpp
type caffe (line 11) | namespace caffe { namespace db {
type db (line 11) | namespace db {
function MDB_CHECK (line 13) | inline void MDB_CHECK(int mdb_status) {
class LMDBCursor (line 17) | class LMDBCursor : public Cursor {
method LMDBCursor (line 19) | explicit LMDBCursor(MDB_txn* mdb_txn, MDB_cursor* mdb_cursor)
method SeekToFirst (line 27) | virtual void SeekToFirst() { Seek(MDB_FIRST); }
method Next (line 28) | virtual void Next() { Seek(MDB_NEXT); }
method string (line 29) | virtual string key() {
method string (line 32) | virtual string value() {
method valid (line 36) | virtual bool valid() { return valid_; }
method Seek (line 39) | void Seek(MDB_cursor_op op) {
class LMDBTransaction (line 55) | class LMDBTransaction : public Transaction {
method LMDBTransaction (line 57) | explicit LMDBTransaction(MDB_dbi* mdb_dbi, MDB_txn* mdb_txn)
method Commit (line 60) | virtual void Commit() { MDB_CHECK(mdb_txn_commit(mdb_txn_)); }
class LMDB (line 69) | class LMDB : public DB {
method LMDB (line 71) | LMDB() : mdb_env_(NULL) { }
method Close (line 74) | virtual void Close() {
FILE: caffe-fpn/include/caffe/util/deformable_im2col.hpp
type caffe (line 4) | namespace caffe {
FILE: caffe-fpn/include/caffe/util/device_alternate.hpp
type caffe (line 78) | namespace caffe {
function CAFFE_GET_BLOCKS (line 88) | inline int CAFFE_GET_BLOCKS(const int N) {
FILE: caffe-fpn/include/caffe/util/format.hpp
type caffe (line 8) | namespace caffe {
function format_int (line 10) | inline std::string format_int(int n, int numberOfLeadingZeros = 0 ) {
FILE: caffe-fpn/include/caffe/util/hdf5.hpp
type caffe (line 11) | namespace caffe {
FILE: caffe-fpn/include/caffe/util/im2col.hpp
type caffe (line 4) | namespace caffe {
FILE: caffe-fpn/include/caffe/util/insert_splits.hpp
type caffe (line 8) | namespace caffe {
FILE: caffe-fpn/include/caffe/util/io.hpp
type caffe (line 19) | namespace caffe {
function MakeTempDir (line 24) | inline void MakeTempDir(string* temp_dirname) {
function MakeTempFilename (line 39) | inline void MakeTempFilename(string* temp_filename) {
function ReadProtoFromTextFile (line 54) | inline bool ReadProtoFromTextFile(const string& filename, Message* pro...
function ReadProtoFromTextFileOrDie (line 58) | inline void ReadProtoFromTextFileOrDie(const char* filename, Message* ...
function ReadProtoFromTextFileOrDie (line 62) | inline void ReadProtoFromTextFileOrDie(const string& filename, Message...
function WriteProtoToTextFile (line 67) | inline void WriteProtoToTextFile(const Message& proto, const string& f...
function ReadProtoFromBinaryFile (line 73) | inline bool ReadProtoFromBinaryFile(const string& filename, Message* p...
function ReadProtoFromBinaryFileOrDie (line 77) | inline void ReadProtoFromBinaryFileOrDie(const char* filename, Message...
function ReadProtoFromBinaryFileOrDie (line 81) | inline void ReadProtoFromBinaryFileOrDie(const string& filename,
function WriteProtoToBinaryFile (line 88) | inline void WriteProtoToBinaryFile(
function ReadFileToDatum (line 95) | inline bool ReadFileToDatum(const string& filename, Datum* datum) {
function ReadImageToDatum (line 103) | inline bool ReadImageToDatum(const string& filename, const int label,
function ReadImageToDatum (line 109) | inline bool ReadImageToDatum(const string& filename, const int label,
function ReadImageToDatum (line 114) | inline bool ReadImageToDatum(const string& filename, const int label,
function ReadImageToDatum (line 119) | inline bool ReadImageToDatum(const string& filename, const int label,
function ReadImageToDatum (line 124) | inline bool ReadImageToDatum(const string& filename, const int label,
FILE: caffe-fpn/include/caffe/util/math_functions.hpp
type caffe (line 13) | namespace caffe {
function caffe_memset (line 42) | inline void caffe_memset(const size_t N, const int alpha, void* X) {
function caffe_sign (line 111) | inline int8_t caffe_sign(Dtype val) {
function caffe_gpu_memset (line 174) | inline void caffe_gpu_memset(const size_t N, const int alpha, void* X) {
FILE: caffe-fpn/include/caffe/util/mkl_alternate.hpp
function cblas_saxpby (line 83) | inline void cblas_saxpby(const int N, const float alpha, const float* X,
function cblas_daxpby (line 89) | inline void cblas_daxpby(const int N, const double alpha, const double* X,
FILE: caffe-fpn/include/caffe/util/rng.hpp
type caffe (line 12) | namespace caffe {
function rng_t (line 16) | inline rng_t* caffe_rng() {
function shuffle (line 22) | inline void shuffle(RandomAccessIterator begin, RandomAccessIterator end,
function shuffle (line 38) | inline void shuffle(RandomAccessIterator begin, RandomAccessIterator e...
FILE: caffe-fpn/include/caffe/util/signal_handler.h
function namespace (line 7) | namespace caffe {
FILE: caffe-fpn/include/caffe/util/upgrade_proto.hpp
type caffe (line 8) | namespace caffe {
FILE: caffe-fpn/matlab/+caffe/private/caffe_.cpp
function mxCHECK (line 25) | inline void mxCHECK(bool expr, const char* msg) {
function mxERROR (line 30) | inline void mxERROR(const char* msg) { mexErrMsgTxt(msg); }
function mxCHECK_FILE_EXIST (line 33) | void mxCHECK_FILE_EXIST(const char* file) {
type WhichMemory (line 54) | enum WhichMemory { DATA, DIFF }
function mx_mat_to_blob (line 57) | static void mx_mat_to_blob(const mxArray* mx_mat, Blob<float>* blob,
function mxArray (line 79) | static mxArray* blob_to_mx_mat(const Blob<float>* blob,
function mxArray (line 110) | static mxArray* int_vec_to_mx_vec(const vector<int>& int_vec) {
function mxArray (line 120) | static mxArray* str_vec_to_mx_strcell(const vector<std::string>& str_vec) {
function T (line 136) | static T* handle_to_ptr(const mxArray* mx_handle) {
function mxArray (line 148) | static mxArray* create_handle_vec(int ptr_num) {
function setup_handle (line 156) | static void setup_handle(const T* ptr, int index, mxArray* mx_handle_vec) {
function mxArray (line 166) | static mxArray* ptr_to_handle(const T* ptr) {
function mxArray (line 174) | static mxArray* ptr_vec_to_handle_vec(const vector<shared_ptr<T> >& ptr_...
function get_solver (line 186) | static void get_solver(MEX_ARGS) {
function solver_get_attr (line 201) | static void solver_get_attr(MEX_ARGS) {
function solver_get_iter (line 217) | static void solver_get_iter(MEX_ARGS) {
function solver_restore (line 225) | static void solver_restore(MEX_ARGS) {
function solver_solve (line 236) | static void solver_solve(MEX_ARGS) {
function solver_step (line 244) | static void solver_step(MEX_ARGS) {
function get_net (line 253) | static void get_net(MEX_ARGS) {
function net_get_attr (line 275) | static void net_get_attr(MEX_ARGS) {
function net_forward (line 300) | static void net_forward(MEX_ARGS) {
function net_backward (line 308) | static void net_backward(MEX_ARGS) {
function net_copy_from (line 316) | static void net_copy_from(MEX_ARGS) {
function net_reshape (line 327) | static void net_reshape(MEX_ARGS) {
function net_save (line 335) | static void net_save(MEX_ARGS) {
function layer_get_attr (line 347) | static void layer_get_attr(MEX_ARGS) {
function layer_get_type (line 361) | static void layer_get_type(MEX_ARGS) {
function blob_get_shape (line 369) | static void blob_get_shape(MEX_ARGS) {
function blob_reshape (line 384) | static void blob_reshape(MEX_ARGS) {
function blob_get_data (line 400) | static void blob_get_data(MEX_ARGS) {
function blob_set_data (line 408) | static void blob_set_data(MEX_ARGS) {
function blob_get_diff (line 416) | static void blob_get_diff(MEX_ARGS) {
function blob_set_diff (line 424) | static void blob_set_diff(MEX_ARGS) {
function set_mode_cpu (line 432) | static void set_mode_cpu(MEX_ARGS) {
function set_mode_gpu (line 438) | static void set_mode_gpu(MEX_ARGS) {
function set_device (line 444) | static void set_device(MEX_ARGS) {
function get_init_key (line 452) | static void get_init_key(MEX_ARGS) {
function reset (line 458) | static void reset(MEX_ARGS) {
function read_mean (line 470) | static void read_mean(MEX_ARGS) {
function write_mean (line 485) | static void write_mean(MEX_ARGS) {
function version (line 508) | static void version(MEX_ARGS) {
type handler_registry (line 517) | struct handler_registry {
function mexFunction (line 561) | void mexFunction(MEX_ARGS) {
FILE: caffe-fpn/python/caffe/_caffe.cpp
type caffe (line 32) | namespace caffe {
function set_mode_cpu (line 39) | void set_mode_cpu() { Caffe::set_mode(Caffe::CPU); }
function set_mode_gpu (line 40) | void set_mode_gpu() { Caffe::set_mode(Caffe::GPU); }
function CheckFile (line 46) | static void CheckFile(const string& filename) {
function CheckContiguousArray (line 55) | void CheckContiguousArray(PyArrayObject* arr, string name,
function Net_Init (line 78) | shared_ptr<Net<Dtype> > Net_Init(
function Net_Init_Load (line 88) | shared_ptr<Net<Dtype> > Net_Init_Load(
function Net_Save (line 99) | void Net_Save(const Net<Dtype>& net, string filename) {
function Net_SetInputArrays (line 105) | void Net_SetInputArrays(Net<Dtype>* net, bp::object data_obj,
type NdarrayConverterGenerator (line 143) | struct NdarrayConverterGenerator {
type apply (line 144) | struct apply
type NdarrayConverterGenerator::apply<Dtype*> (line 148) | struct NdarrayConverterGenerator::apply<Dtype*> {
type type (line 149) | struct type {
method PyObject (line 150) | PyObject* operator() (Dtype* data) const {
method PyTypeObject (line 154) | const PyTypeObject* get_pytype() {
type NdarrayCallPolicies (line 160) | struct NdarrayCallPolicies : public bp::default_call_policies {
method PyObject (line 162) | PyObject* postcall(PyObject* pyargs, PyObject* result) {
function Blob_Reshape (line 182) | bp::object Blob_Reshape(bp::tuple args, bp::dict kwargs) {
function BlobVec_add_blob (line 196) | bp::object BlobVec_add_blob(bp::tuple args, bp::dict kwargs) {
function BOOST_PYTHON_MODULE (line 213) | BOOST_PYTHON_MODULE(_caffe) {
FILE: caffe-fpn/python/caffe/classifier.py
class Classifier (line 11) | class Classifier(caffe.Net):
method __init__ (line 23) | def __init__(self, model_file, pretrained_file, image_dims=None,
method predict (line 47) | def predict(self, inputs, oversample=True):
FILE: caffe-fpn/python/caffe/detector.py
class Detector (line 22) | class Detector(caffe.Net):
method __init__ (line 35) | def __init__(self, model_file, pretrained_file, mean=None,
method detect_windows (line 56) | def detect_windows(self, images_windows):
method detect_selective_search (line 101) | def detect_selective_search(self, image_fnames):
method crop (line 125) | def crop(self, im, window):
method configure_crop (line 181) | def configure_crop(self, context_pad):
FILE: caffe-fpn/python/caffe/draw.py
function get_pooling_types_dict (line 36) | def get_pooling_types_dict():
function get_edge_label (line 46) | def get_edge_label(layer):
function get_layer_label (line 62) | def get_layer_label(layer, rankdir):
function choose_color_by_layertype (line 117) | def choose_color_by_layertype(layertype):
function get_pydot_graph (line 130) | def get_pydot_graph(caffe_net, rankdir, label_edges=True):
function draw_net (line 189) | def draw_net(caffe_net, rankdir, ext='png'):
function draw_net_to_file (line 207) | def draw_net_to_file(caffe_net, filename, rankdir='LR'):
FILE: caffe-fpn/python/caffe/io.py
function blobproto_to_array (line 18) | def blobproto_to_array(blob, return_diff=False):
function array_to_blobproto (line 36) | def array_to_blobproto(arr, diff=None):
function arraylist_to_blobprotovecor_str (line 49) | def arraylist_to_blobprotovecor_str(arraylist):
function blobprotovector_str_to_arraylist (line 58) | def blobprotovector_str_to_arraylist(str):
function array_to_datum (line 66) | def array_to_datum(arr, label=0):
function datum_to_array (line 83) | def datum_to_array(datum):
class Transformer (line 97) | class Transformer:
method __init__ (line 108) | def __init__(self, inputs):
method __check_input (line 116) | def __check_input(self, in_):
method preprocess (line 121) | def preprocess(self, in_, data):
method deprocess (line 163) | def deprocess(self, in_, data):
method set_transpose (line 186) | def set_transpose(self, in_, order):
method set_channel_swap (line 202) | def set_channel_swap(self, in_, order):
method set_raw_scale (line 220) | def set_raw_scale(self, in_, scale):
method set_mean (line 235) | def set_mean(self, in_, mean):
method set_input_scale (line 261) | def set_input_scale(self, in_, scale):
function load_image (line 278) | def load_image(filename, color=True):
function resize_image (line 305) | def resize_image(im, new_dims, interp_order=1):
function oversample (line 340) | def oversample(images, crop_dims):
FILE: caffe-fpn/python/caffe/net_spec.py
function param_name_dict (line 28) | def param_name_dict():
function to_proto (line 43) | def to_proto(*tops):
function assign_proto (line 56) | def assign_proto(proto, name, val):
class Top (line 82) | class Top(object):
method __init__ (line 86) | def __init__(self, fn, n):
method to_proto (line 90) | def to_proto(self):
method _to_proto (line 96) | def _to_proto(self, layers, names, autonames):
class Function (line 100) | class Function(object):
method __init__ (line 104) | def __init__(self, type_name, inputs, params):
method _get_name (line 117) | def _get_name(self, names, autonames):
method _get_top_name (line 125) | def _get_top_name(self, top, names, autonames):
method _to_proto (line 131) | def _to_proto(self, layers, names, autonames):
class NetSpec (line 163) | class NetSpec(object):
method __init__ (line 169) | def __init__(self):
method __setattr__ (line 172) | def __setattr__(self, name, value):
method __getattr__ (line 175) | def __getattr__(self, name):
method to_proto (line 178) | def to_proto(self):
class Layers (line 189) | class Layers(object):
method __getattr__ (line 194) | def __getattr__(self, name):
class Parameters (line 206) | class Parameters(object):
method __getattr__ (line 211) | def __getattr__(self, name):
FILE: caffe-fpn/python/caffe/pycaffe.py
function _Net_blobs (line 23) | def _Net_blobs(self):
function _Net_blob_loss_weights (line 32) | def _Net_blob_loss_weights(self):
function _Net_params (line 41) | def _Net_params(self):
function _Net_inputs (line 53) | def _Net_inputs(self):
function _Net_outputs (line 58) | def _Net_outputs(self):
function _Net_forward (line 62) | def _Net_forward(self, blobs=None, start=None, end=None, **kwargs):
function _Net_backward (line 111) | def _Net_backward(self, diffs=None, start=None, end=None, **kwargs):
function _Net_forward_all (line 159) | def _Net_forward_all(self, blobs=None, **kwargs):
function _Net_forward_backward_all (line 190) | def _Net_forward_backward_all(self, blobs=None, diffs=None, **kwargs):
function _Net_set_input_arrays (line 235) | def _Net_set_input_arrays(self, data, labels):
function _Net_batch (line 246) | def _Net_batch(self, blobs):
class _Net_IdNameWrapper (line 280) | class _Net_IdNameWrapper:
method __init__ (line 285) | def __init__(self, net, func):
method __getitem__ (line 288) | def __getitem__(self, name):
FILE: caffe-fpn/python/caffe/test/test_io.py
class TestBlobProtoToArray (line 6) | class TestBlobProtoToArray(unittest.TestCase):
method test_old_format (line 8) | def test_old_format(self):
method test_new_format (line 18) | def test_new_format(self):
method test_no_shape (line 27) | def test_no_shape(self):
method test_scalar (line 35) | def test_scalar(self):
FILE: caffe-fpn/python/caffe/test/test_layer_type_list.py
class TestLayerTypeList (line 5) | class TestLayerTypeList(unittest.TestCase):
method test_standard_types (line 7) | def test_standard_types(self):
FILE: caffe-fpn/python/caffe/test/test_net.py
function simple_net_file (line 10) | def simple_net_file(num_output):
class TestNet (line 37) | class TestNet(unittest.TestCase):
method setUp (line 38) | def setUp(self):
method test_memory (line 48) | def test_memory(self):
method test_forward_backward (line 62) | def test_forward_backward(self):
method test_inputs_outputs (line 66) | def test_inputs_outputs(self):
method test_save_and_read (line 70) | def test_save_and_read(self):
FILE: caffe-fpn/python/caffe/test/test_net_spec.py
function lenet (line 7) | def lenet(batch_size):
function anon_lenet (line 26) | def anon_lenet(batch_size):
function silent_net (line 44) | def silent_net():
class TestNetSpec (line 51) | class TestNetSpec(unittest.TestCase):
method load_net (line 52) | def load_net(self, net_proto):
method test_lenet (line 58) | def test_lenet(self):
method test_zero_tops (line 76) | def test_zero_tops(self):
FILE: caffe-fpn/python/caffe/test/test_python_layer.py
class SimpleLayer (line 9) | class SimpleLayer(caffe.Layer):
method setup (line 12) | def setup(self, bottom, top):
method reshape (line 15) | def reshape(self, bottom, top):
method forward (line 18) | def forward(self, bottom, top):
method backward (line 21) | def backward(self, top, propagate_down, bottom):
class ExceptionLayer (line 25) | class ExceptionLayer(caffe.Layer):
method setup (line 28) | def setup(self, bottom, top):
class ParameterLayer (line 31) | class ParameterLayer(caffe.Layer):
method setup (line 34) | def setup(self, bottom, top):
method reshape (line 38) | def reshape(self, bottom, top):
method forward (line 41) | def forward(self, bottom, top):
method backward (line 44) | def backward(self, top, propagate_down, bottom):
function python_net_file (line 47) | def python_net_file():
function exception_net_file (line 60) | def exception_net_file():
function parameter_net_file (line 70) | def parameter_net_file():
class TestPythonLayer (line 82) | class TestPythonLayer(unittest.TestCase):
method setUp (line 83) | def setUp(self):
method test_forward (line 88) | def test_forward(self):
method test_backward (line 95) | def test_backward(self):
method test_reshape (line 102) | def test_reshape(self):
method test_exception (line 110) | def test_exception(self):
method test_parameter (line 115) | def test_parameter(self):
FILE: caffe-fpn/python/caffe/test/test_python_layer_with_param_str.py
class SimpleParamLayer (line 9) | class SimpleParamLayer(caffe.Layer):
method setup (line 12) | def setup(self, bottom, top):
method reshape (line 18) | def reshape(self, bottom, top):
method forward (line 21) | def forward(self, bottom, top):
method backward (line 24) | def backward(self, top, propagate_down, bottom):
function python_param_net_file (line 28) | def python_param_net_file():
class TestLayerWithParam (line 43) | class TestLayerWithParam(unittest.TestCase):
method setUp (line 44) | def setUp(self):
method test_forward (line 49) | def test_forward(self):
method test_backward (line 56) | def test_backward(self):
FILE: caffe-fpn/python/caffe/test/test_solver.py
class TestSolver (line 11) | class TestSolver(unittest.TestCase):
method setUp (line 12) | def setUp(self):
method test_solve (line 36) | def test_solve(self):
method test_net_memory (line 41) | def test_net_memory(self):
method test_snapshot (line 56) | def test_snapshot(self):
FILE: caffe-fpn/python/classify.py
function main (line 17) | def main(argv):
FILE: caffe-fpn/python/detect.py
function main (line 30) | def main(argv):
FILE: caffe-fpn/python/draw_net.py
function parse_args (line 13) | def parse_args():
function main (line 36) | def main():
FILE: caffe-fpn/scripts/cpp_lint.py
function ParseNolintSuppressions (line 464) | def ParseNolintSuppressions(filename, raw_line, linenum, error):
function ResetNolintSuppressions (line 495) | def ResetNolintSuppressions():
function IsErrorSuppressedByNolint (line 500) | def IsErrorSuppressedByNolint(category, linenum):
function Match (line 515) | def Match(pattern, s):
function ReplaceAll (line 525) | def ReplaceAll(pattern, rep, s):
function Search (line 543) | def Search(pattern, s):
class _IncludeState (line 550) | class _IncludeState(dict):
method __init__ (line 584) | def __init__(self):
method ResetSection (line 588) | def ResetSection(self):
method SetLastHeader (line 594) | def SetLastHeader(self, header_path):
method CanonicalizeAlphabeticalOrder (line 597) | def CanonicalizeAlphabeticalOrder(self, header_path):
method IsInAlphabeticalOrder (line 612) | def IsInAlphabeticalOrder(self, clean_lines, linenum, header_path):
method CheckNextIncludeOrder (line 633) | def CheckNextIncludeOrder(self, header_type):
class _CppLintState (line 687) | class _CppLintState(object):
method __init__ (line 690) | def __init__(self):
method SetOutputFormat (line 703) | def SetOutputFormat(self, output_format):
method SetVerboseLevel (line 707) | def SetVerboseLevel(self, level):
method SetCountingStyle (line 713) | def SetCountingStyle(self, counting_style):
method SetFilters (line 717) | def SetFilters(self, filters):
method ResetErrorCounts (line 742) | def ResetErrorCounts(self):
method IncrementErrorCount (line 747) | def IncrementErrorCount(self, category):
method PrintErrorCounts (line 757) | def PrintErrorCounts(self):
function _OutputFormat (line 767) | def _OutputFormat():
function _SetOutputFormat (line 772) | def _SetOutputFormat(output_format):
function _VerboseLevel (line 777) | def _VerboseLevel():
function _SetVerboseLevel (line 782) | def _SetVerboseLevel(level):
function _SetCountingStyle (line 787) | def _SetCountingStyle(level):
function _Filters (line 792) | def _Filters():
function _SetFilters (line 797) | def _SetFilters(filters):
class _FunctionState (line 810) | class _FunctionState(object):
method __init__ (line 816) | def __init__(self):
method Begin (line 821) | def Begin(self, function_name):
method Count (line 831) | def Count(self):
method Check (line 836) | def Check(self, error, filename, linenum):
method End (line 861) | def End(self):
class _IncludeError (line 866) | class _IncludeError(Exception):
class FileInfo (line 871) | class FileInfo:
method __init__ (line 878) | def __init__(self, filename):
method FullName (line 881) | def FullName(self):
method RepositoryName (line 885) | def RepositoryName(self):
method Split (line 930) | def Split(self):
method BaseName (line 944) | def BaseName(self):
method Extension (line 948) | def Extension(self):
method NoExtension (line 952) | def NoExtension(self):
method IsSource (line 956) | def IsSource(self):
function _ShouldPrintError (line 961) | def _ShouldPrintError(category, confidence, linenum):
function Error (line 988) | def Error(filename, linenum, category, confidence, message):
function IsCppString (line 1045) | def IsCppString(line):
function CleanseRawStrings (line 1062) | def CleanseRawStrings(raw_lines):
function FindNextMultiLineCommentStart (line 1123) | def FindNextMultiLineCommentStart(lines, lineix):
function FindNextMultiLineCommentEnd (line 1134) | def FindNextMultiLineCommentEnd(lines, lineix):
function RemoveMultiLineCommentsFromRange (line 1143) | def RemoveMultiLineCommentsFromRange(lines, begin, end):
function RemoveMultiLineComments (line 1151) | def RemoveMultiLineComments(filename, lines, error):
function CleanseComments (line 1167) | def CleanseComments(line):
class CleansedLines (line 1183) | class CleansedLines(object):
method __init__ (line 1192) | def __init__(self, lines):
method NumLines (line 1204) | def NumLines(self):
method _CollapseStrings (line 1209) | def _CollapseStrings(elided):
function FindEndOfExpressionInLine (line 1230) | def FindEndOfExpressionInLine(line, startpos, depth, startchar, endchar):
function CloseExpression (line 1254) | def CloseExpression(clean_lines, linenum, pos):
function FindStartOfExpressionInLine (line 1300) | def FindStartOfExpressionInLine(line, endpos, depth, startchar, endchar):
function ReverseCloseExpression (line 1327) | def ReverseCloseExpression(clean_lines, linenum, pos):
function CheckForCopyright (line 1372) | def CheckForCopyright(filename, lines, error):
function GetHeaderGuardCPPVariable (line 1384) | def GetHeaderGuardCPPVariable(filename):
function CheckForHeaderGuard (line 1408) | def CheckForHeaderGuard(filename, lines, error):
function CheckForBadCharacters (line 1483) | def CheckForBadCharacters(filename, lines, error):
function CheckForNewlineAtEOF (line 1508) | def CheckForNewlineAtEOF(filename, lines, error):
function CheckForMultilineCommentsAndStrings (line 1526) | def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, ...
function CheckCaffeAlternatives (line 1572) | def CheckCaffeAlternatives(filename, clean_lines, linenum, error):
function CheckCaffeDataLayerSetUp (line 1595) | def CheckCaffeDataLayerSetUp(filename, clean_lines, linenum, error):
function CheckCaffeRandom (line 1640) | def CheckCaffeRandom(filename, clean_lines, linenum, error):
function CheckPosixThreading (line 1681) | def CheckPosixThreading(filename, clean_lines, linenum, error):
function CheckVlogArguments (line 1708) | def CheckVlogArguments(filename, clean_lines, linenum, error):
function CheckInvalidIncrement (line 1733) | def CheckInvalidIncrement(filename, clean_lines, linenum, error):
class _BlockInfo (line 1755) | class _BlockInfo(object):
method __init__ (line 1758) | def __init__(self, seen_open_brace):
method CheckBegin (line 1763) | def CheckBegin(self, filename, clean_lines, linenum, error):
method CheckEnd (line 1778) | def CheckEnd(self, filename, clean_lines, linenum, error):
class _ClassInfo (line 1792) | class _ClassInfo(_BlockInfo):
method __init__ (line 1795) | def __init__(self, name, class_or_struct, clean_lines, linenum):
method CheckBegin (line 1829) | def CheckBegin(self, filename, clean_lines, linenum, error):
method CheckEnd (line 1834) | def CheckEnd(self, filename, clean_lines, linenum, error):
class _NamespaceInfo (line 1848) | class _NamespaceInfo(_BlockInfo):
method __init__ (line 1851) | def __init__(self, name, linenum):
method CheckEnd (line 1856) | def CheckEnd(self, filename, clean_lines, linenum, error):
class _PreprocessorInfo (line 1902) | class _PreprocessorInfo(object):
method __init__ (line 1905) | def __init__(self, stack_before_if):
class _NestingState (line 1916) | class _NestingState(object):
method __init__ (line 1919) | def __init__(self):
method SeenOpenBrace (line 1931) | def SeenOpenBrace(self):
method InNamespaceBody (line 1940) | def InNamespaceBody(self):
method UpdatePreprocessor (line 1948) | def UpdatePreprocessor(self, line):
method Update (line 2004) | def Update(self, filename, clean_lines, linenum, error):
method InnermostClass (line 2160) | def InnermostClass(self):
method CheckCompletedBlocks (line 2172) | def CheckCompletedBlocks(self, filename, error):
function CheckForNonStandardConstructs (line 2194) | def CheckForNonStandardConstructs(filename, clean_lines, linenum,
function CheckSpacingForFunctionCall (line 2301) | def CheckSpacingForFunctionCall(filename, line, linenum, error):
function IsBlankLine (line 2369) | def IsBlankLine(line):
function CheckForFunctionLengths (line 2384) | def CheckForFunctionLengths(filename, clean_lines, linenum,
function CheckComment (line 2457) | def CheckComment(comment, filename, linenum, error):
function CheckAccess (line 2486) | def CheckAccess(filename, clean_lines, linenum, nesting_state, error):
function FindNextMatchingAngleBracket (line 2517) | def FindNextMatchingAngleBracket(clean_lines, linenum, init_suffix):
function FindPreviousMatchingAngleBracket (line 2586) | def FindPreviousMatchingAngleBracket(clean_lines, linenum, init_prefix):
function CheckSpacing (line 2643) | def CheckSpacing(filename, clean_lines, linenum, nesting_state, error):
function CheckSectionSpacing (line 2991) | def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error):
function GetPreviousNonBlankLine (line 3046) | def GetPreviousNonBlankLine(clean_lines, linenum):
function CheckBraces (line 3069) | def CheckBraces(filename, clean_lines, linenum, error):
function CheckEmptyBlockBody (line 3243) | def CheckEmptyBlockBody(filename, clean_lines, linenum, error):
function CheckCheck (line 3278) | def CheckCheck(filename, clean_lines, linenum, error):
function CheckAltTokens (line 3405) | def CheckAltTokens(filename, clean_lines, linenum, error):
function GetLineWidth (line 3437) | def GetLineWidth(line):
function CheckStyle (line 3459) | def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_s...
function _DropCommonSuffixes (line 3576) | def _DropCommonSuffixes(filename):
function _IsTestFilename (line 3603) | def _IsTestFilename(filename):
function _ClassifyInclude (line 3620) | def _ClassifyInclude(fileinfo, include, is_system):
function CheckIncludeLine (line 3680) | def CheckIncludeLine(filename, clean_lines, linenum, include_state, error):
function _GetTextInside (line 3752) | def _GetTextInside(text, start_pattern):
function CheckLanguage (line 3834) | def CheckLanguage(filename, clean_lines, linenum, file_extension,
function CheckForNonConstReference (line 4134) | def CheckForNonConstReference(filename, clean_lines, linenum,
function CheckCStyleCast (line 4247) | def CheckCStyleCast(filename, linenum, line, raw_line, cast_type, pattern,
function FilesBelongToSameModule (line 4399) | def FilesBelongToSameModule(filename_cc, filename_h):
function UpdateIncludeState (line 4454) | def UpdateIncludeState(filename, include_state, io=codecs):
function CheckForIncludeWhatYouUse (line 4483) | def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error,
function CheckMakePairUsesDeduction (line 4579) | def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error):
function ProcessLine (line 4600) | def ProcessLine(filename, file_extension, clean_lines, line,
function ProcessFileData (line 4644) | def ProcessFileData(filename, file_extension, lines, error,
function ProcessFile (line 4689) | def ProcessFile(filename, vlevel, extra_check_functions=[]):
function PrintUsage (line 4757) | def PrintUsage(message):
function PrintCategories (line 4770) | def PrintCategories():
function ParseArguments (line 4779) | def ParseArguments(args):
function main (line 4849) | def main():
FILE: caffe-fpn/scripts/download_model_binary.py
function reporthook (line 13) | def reporthook(count, block_size, total_size):
function parse_readme_frontmatter (line 30) | def parse_readme_frontmatter(dirname):
function valid_dirname (line 41) | def valid_dirname(dirname):
function model_checks_out (line 62) | def model_checks_out(filename=model_filename, sha1=frontmatter['sha1']):
FILE: caffe-fpn/src/caffe/blob.cpp
type caffe (line 11) | namespace caffe {
function Dtype (line 84) | const Dtype* Blob<Dtype>::cpu_data() const {
function Dtype (line 96) | const Dtype* Blob<Dtype>::gpu_data() const {
function Dtype (line 102) | const Dtype* Blob<Dtype>::cpu_diff() const {
function Dtype (line 108) | const Dtype* Blob<Dtype>::gpu_diff() const {
function Dtype (line 114) | Dtype* Blob<Dtype>::mutable_cpu_data() {
function Dtype (line 120) | Dtype* Blob<Dtype>::mutable_gpu_data() {
function Dtype (line 126) | Dtype* Blob<Dtype>::mutable_cpu_diff() {
function Dtype (line 132) | Dtype* Blob<Dtype>::mutable_gpu_diff() {
function Dtype (line 192) | Dtype Blob<Dtype>::asum_data() const {
function Dtype (line 227) | Dtype Blob<Dtype>::asum_diff() const {
function Dtype (line 262) | Dtype Blob<Dtype>::sumsq_data() const {
function Dtype (line 299) | Dtype Blob<Dtype>::sumsq_diff() const {
class Blob<int> (line 539) | class Blob<int>
class Blob<unsigned int> (line 540) | class Blob<unsigned int>
FILE: caffe-fpn/src/caffe/common.cpp
type caffe (line 10) | namespace caffe {
function Caffe (line 15) | Caffe& Caffe::Get() {
function cluster_seedgen (line 23) | int64_t cluster_seedgen(void) {
function GlobalInit (line 43) | void GlobalInit(int* pargc, char*** pargv) {
class Caffe::RNG::Generator (line 74) | class Caffe::RNG::Generator {
method Generator (line 76) | Generator() : rng_(new caffe::rng_t(cluster_seedgen())) {}
method Generator (line 77) | explicit Generator(unsigned int seed) : rng_(new caffe::rng_t(seed)) {}
method Generator (line 198) | Generator() : rng_(new caffe::rng_t(cluster_seedgen())) {}
method Generator (line 199) | explicit Generator(unsigned int seed) : rng_(new caffe::rng_t(seed)) {}
class Caffe::RNG::Generator (line 196) | class Caffe::RNG::Generator {
method Generator (line 76) | Generator() : rng_(new caffe::rng_t(cluster_seedgen())) {}
method Generator (line 77) | explicit Generator(unsigned int seed) : rng_(new caffe::rng_t(seed)) {}
method Generator (line 198) | Generator() : rng_(new caffe::rng_t(cluster_seedgen())) {}
method Generator (line 199) | explicit Generator(unsigned int seed) : rng_(new caffe::rng_t(seed)) {}
FILE: caffe-fpn/src/caffe/data_reader.cpp
type caffe (line 11) | namespace caffe {
FILE: caffe-fpn/src/caffe/data_transformer.cpp
type caffe (line 13) | namespace caffe {
FILE: caffe-fpn/src/caffe/internal_thread.cpp
type caffe (line 7) | namespace caffe {
FILE: caffe-fpn/src/caffe/layer.cpp
type caffe (line 4) | namespace caffe {
FILE: caffe-fpn/src/caffe/layer_factory.cpp
type caffe (line 35) | namespace caffe {
function GetConvolutionLayer (line 39) | shared_ptr<Layer<Dtype> > GetConvolutionLayer(
function GetDeformableConvolutionLayer (line 78) | shared_ptr<Layer<Dtype> > GetDeformableConvolutionLayer(
function GetPoolingLayer (line 99) | shared_ptr<Layer<Dtype> > GetPoolingLayer(const LayerParameter& param) {
function GetLRNLayer (line 136) | shared_ptr<Layer<Dtype> > GetLRNLayer(const LayerParameter& param) {
function GetReLULayer (line 173) | shared_ptr<Layer<Dtype> > GetReLULayer(const LayerParameter& param) {
function GetSigmoidLayer (line 196) | shared_ptr<Layer<Dtype> > GetSigmoidLayer(const LayerParameter& param) {
function GetSoftmaxLayer (line 219) | shared_ptr<Layer<Dtype> > GetSoftmaxLayer(const LayerParameter& param) {
function GetTanHLayer (line 242) | shared_ptr<Layer<Dtype> > GetTanHLayer(const LayerParameter& param) {
function GetPythonLayer (line 265) | shared_ptr<Layer<Dtype> > GetPythonLayer(const LayerParameter& param) {
FILE: caffe-fpn/src/caffe/layers/absval_layer.cpp
type caffe (line 6) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/accuracy_layer.cpp
type caffe (line 8) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/argmax_layer.cpp
type caffe (line 8) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/base_conv_layer.cpp
type caffe (line 10) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/base_data_layer.cpp
type caffe (line 12) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/batch_norm_layer.cpp
type caffe (line 7) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/batch_reindex_layer.cpp
type caffe (line 6) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/bias_layer.cpp
type caffe (line 7) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/bnll_layer.cpp
type caffe (line 6) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/conadd_layer.cpp
type caffe (line 6) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/concat_layer.cpp
type caffe (line 7) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/contrastive_loss_layer.cpp
type caffe (line 7) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/conv_layer.cpp
type caffe (line 5) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/crop_layer.cpp
type caffe (line 13) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/cudnn_conv_layer.cpp
type caffe (line 7) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/cudnn_lcn_layer.cpp
type caffe (line 6) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/cudnn_lrn_layer.cpp
type caffe (line 6) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/cudnn_pooling_layer.cpp
type caffe (line 6) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/cudnn_relu_layer.cpp
type caffe (line 6) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/cudnn_sigmoid_layer.cpp
type caffe (line 6) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/cudnn_softmax_layer.cpp
type caffe (line 8) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/cudnn_tanh_layer.cpp
type caffe (line 6) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/data_layer.cpp
type caffe (line 12) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/deconv_layer.cpp
type caffe (line 5) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/deformable_conv_layer.cpp
type caffe (line 6) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/dropout_layer.cpp
type caffe (line 8) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/dummy_data_layer.cpp
type caffe (line 6) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/eltwise_layer.cpp
type caffe (line 7) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/elu_layer.cpp
type caffe (line 6) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/embed_layer.cpp
type caffe (line 7) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/euclidean_loss_layer.cpp
type caffe (line 6) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/exp_layer.cpp
type caffe (line 6) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/filter_layer.cpp
type caffe (line 6) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/flatten_layer.cpp
type caffe (line 5) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/hdf5_data_layer.cpp
type caffe (line 20) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/hdf5_output_layer.cpp
type caffe (line 9) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/hinge_loss_layer.cpp
type caffe (line 7) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/im2col_layer.cpp
type caffe (line 6) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/image_data_layer.cpp
type caffe (line 18) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/infogain_loss_layer.cpp
type caffe (line 8) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/inner_product_layer.cpp
type caffe (line 7) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/log_layer.cpp
type caffe (line 6) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/loss_layer.cpp
type caffe (line 5) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/lrn_layer.cpp
type caffe (line 6) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/memory_data_layer.cpp
type caffe (line 9) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/multinomial_logistic_loss_layer.cpp
type caffe (line 8) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/mvn_layer.cpp
type caffe (line 6) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/neuron_layer.cpp
type caffe (line 5) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/pooling_layer.cpp
type caffe (line 8) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/power_layer.cpp
type caffe (line 6) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/prelu_layer.cpp
type caffe (line 9) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/reduction_layer.cpp
type caffe (line 6) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/relu_layer.cpp
type caffe (line 6) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/reshape_layer.cpp
type caffe (line 5) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/roi_pooling_layer.cpp
type caffe (line 17) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/scale_layer.cpp
type caffe (line 9) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp
type caffe (line 6) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/sigmoid_layer.cpp
type caffe (line 6) | namespace caffe {
function Dtype (line 9) | inline Dtype sigmoid(Dtype x) {
FILE: caffe-fpn/src/caffe/layers/silence_layer.cpp
type caffe (line 6) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/slice_layer.cpp
type caffe (line 7) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/smooth_L1_loss_layer.cpp
type caffe (line 10) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/softmax_layer.cpp
type caffe (line 7) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/softmax_loss_layer.cpp
type caffe (line 8) | namespace caffe {
function Dtype (line 59) | Dtype SoftmaxWithLossLayer<Dtype>::get_normalizer(
FILE: caffe-fpn/src/caffe/layers/split_layer.cpp
type caffe (line 6) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/spp_layer.cpp
type caffe (line 11) | namespace caffe {
function LayerParameter (line 17) | LayerParameter SPPLayer<Dtype>::GetPoolingParam(const int pyramid_level,
FILE: caffe-fpn/src/caffe/layers/tanh_layer.cpp
type caffe (line 8) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/threshold_layer.cpp
type caffe (line 5) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/tile_layer.cpp
type caffe (line 6) | namespace caffe {
FILE: caffe-fpn/src/caffe/layers/window_data_layer.cpp
type caffe (line 28) | namespace caffe {
FILE: caffe-fpn/src/caffe/net.cpp
type caffe (line 22) | namespace caffe {
function Dtype (line 565) | Dtype Net<Dtype>::ForwardFromTo(int start, int end) {
function Dtype (line 584) | Dtype Net<Dtype>::ForwardFrom(int start) {
function Dtype (line 589) | Dtype Net<Dtype>::ForwardTo(int end) {
function string (line 614) | string Net<Dtype>::Forward(const string& input_blob_protos, Dtype* los...
FILE: caffe-fpn/src/caffe/parallel.cpp
type caffe (line 15) | namespace caffe {
type Op (line 17) | enum Op {
function apply_buffers (line 26) | static void apply_buffers(const vector<Blob<Dtype>*>& blobs,
function total_size (line 60) | static size_t total_size(const vector<Blob<Dtype>*>& params) {
FILE: caffe-fpn/src/caffe/solver.cpp
type caffe (line 12) | namespace caffe {
function string (line 448) | string Solver<Dtype>::SnapshotFilename(const string extension) {
function string (line 454) | string Solver<Dtype>::SnapshotToBinaryProto() {
function string (line 464) | string Solver<Dtype>::SnapshotToHDF5() {
FILE: caffe-fpn/src/caffe/solvers/adadelta_solver.cpp
type caffe (line 5) | namespace caffe {
FILE: caffe-fpn/src/caffe/solvers/adagrad_solver.cpp
type caffe (line 5) | namespace caffe {
FILE: caffe-fpn/src/caffe/solvers/adam_solver.cpp
type caffe (line 5) | namespace caffe {
FILE: caffe-fpn/src/caffe/solvers/nesterov_solver.cpp
type caffe (line 5) | namespace caffe {
FILE: caffe-fpn/src/caffe/solvers/rmsprop_solver.cpp
type caffe (line 5) | namespace caffe {
FILE: caffe-fpn/src/caffe/solvers/sgd_solver.cpp
type caffe (line 9) | namespace caffe {
function Dtype (line 27) | Dtype SGDSolver<Dtype>::GetLearningRate() {
FILE: caffe-fpn/src/caffe/syncedmem.cpp
type caffe (line 5) | namespace caffe {
FILE: caffe-fpn/src/caffe/test/test_accuracy_layer.cpp
type caffe (line 14) | namespace caffe {
class AccuracyLayerTest (line 17) | class AccuracyLayerTest : public CPUDeviceTest<Dtype> {
method AccuracyLayerTest (line 19) | AccuracyLayerTest()
method FillBottoms (line 40) | virtual void FillBottoms() {
function TYPED_TEST (line 74) | TYPED_TEST(AccuracyLayerTest, TestSetup) {
function TYPED_TEST (line 84) | TYPED_TEST(AccuracyLayerTest, TestSetupTopK) {
function TYPED_TEST (line 97) | TYPED_TEST(AccuracyLayerTest, TestSetupOutputPerClass) {
function TYPED_TEST (line 111) | TYPED_TEST(AccuracyLayerTest, TestForwardCPU) {
function TYPED_TEST (line 137) | TYPED_TEST(AccuracyLayerTest, TestForwardWithSpatialAxes) {
function TYPED_TEST (line 180) | TYPED_TEST(AccuracyLayerTest, TestForwardIgnoreLabel) {
function TYPED_TEST (line 218) | TYPED_TEST(AccuracyLayerTest, TestForwardCPUTopK) {
function TYPED_TEST (line 249) | TYPED_TEST(AccuracyLayerTest, TestForwardCPUPerClass) {
function TYPED_TEST (line 287) | TYPED_TEST(AccuracyLayerTest, TestForwardCPUPerClassWithIgnoreLabel) {
FILE: caffe-fpn/src/caffe/test/test_argmax_layer.cpp
type caffe (line 13) | namespace caffe {
class ArgMaxLayerTest (line 16) | class ArgMaxLayerTest : public CPUDeviceTest<Dtype> {
method ArgMaxLayerTest (line 18) | ArgMaxLayerTest()
function TYPED_TEST (line 40) | TYPED_TEST(ArgMaxLayerTest, TestSetup) {
function TYPED_TEST (line 48) | TYPED_TEST(ArgMaxLayerTest, TestSetupMaxVal) {
function TYPED_TEST (line 58) | TYPED_TEST(ArgMaxLayerTest, TestSetupAxis) {
function TYPED_TEST (line 70) | TYPED_TEST(ArgMaxLayerTest, TestSetupAxisNegativeIndexing) {
function TYPED_TEST (line 82) | TYPED_TEST(ArgMaxLayerTest, TestSetupAxisMaxVal) {
function TYPED_TEST (line 95) | TYPED_TEST(ArgMaxLayerTest, TestCPU) {
function TYPED_TEST (line 118) | TYPED_TEST(ArgMaxLayerTest, TestCPUMaxVal) {
function TYPED_TEST (line 144) | TYPED_TEST(ArgMaxLayerTest, TestCPUTopK) {
function TYPED_TEST (line 174) | TYPED_TEST(ArgMaxLayerTest, TestCPUMaxValTopK) {
function TYPED_TEST (line 206) | TYPED_TEST(ArgMaxLayerTest, TestCPUAxis) {
function TYPED_TEST (line 232) | TYPED_TEST(ArgMaxLayerTest, TestCPUAxisTopK) {
function TYPED_TEST (line 265) | TYPED_TEST(ArgMaxLayerTest, TestCPUAxisMaxValTopK) {
FILE: caffe-fpn/src/caffe/test/test_batch_norm_layer.cpp
type caffe (line 18) | namespace caffe {
class BatchNormLayerTest (line 21) | class BatchNormLayerTest : public MultiDeviceTest<TypeParam> {
method BatchNormLayerTest (line 24) | BatchNormLayerTest()
function TYPED_TEST (line 43) | TYPED_TEST(BatchNormLayerTest, TestForward) {
function TYPED_TEST (line 79) | TYPED_TEST(BatchNormLayerTest, TestForwardInplace) {
function TYPED_TEST (line 123) | TYPED_TEST(BatchNormLayerTest, TestGradient) {
FILE: caffe-fpn/src/caffe/test/test_batch_reindex_layer.cpp
type caffe (line 13) | namespace caffe {
class BatchReindexLayerTest (line 16) | class BatchReindexLayerTest : public MultiDeviceTest<TypeParam> {
method BatchReindexLayerTest (line 20) | BatchReindexLayerTest()
method SetUp (line 25) | virtual void SetUp() {
method TestForward (line 61) | void TestForward() {
function TYPED_TEST (line 105) | TYPED_TEST(BatchReindexLayerTest, TestForward) {
function TYPED_TEST (line 109) | TYPED_TEST(BatchReindexLayerTest, TestGradient) {
FILE: caffe-fpn/src/caffe/test/test_benchmark.cpp
type caffe (line 10) | namespace caffe {
class BenchmarkTest (line 15) | class BenchmarkTest : public MultiDeviceTest<TypeParam> {}
function TYPED_TEST (line 19) | TYPED_TEST(BenchmarkTest, TestTimerConstructor) {
function TYPED_TEST (line 26) | TYPED_TEST(BenchmarkTest, TestTimerStart) {
function TYPED_TEST (line 43) | TYPED_TEST(BenchmarkTest, TestTimerStop) {
function TYPED_TEST (line 60) | TYPED_TEST(BenchmarkTest, TestTimerMilliSeconds) {
function TYPED_TEST (line 75) | TYPED_TEST(BenchmarkTest, TestTimerSeconds) {
FILE: caffe-fpn/src/caffe/test/test_bias_layer.cpp
type caffe (line 14) | namespace caffe {
class BiasLayerTest (line 17) | class BiasLayerTest : public MultiDeviceTest<TypeParam> {
method BiasLayerTest (line 21) | BiasLayerTest()
function TYPED_TEST (line 72) | TYPED_TEST(BiasLayerTest, TestForwardEltwise) {
function TYPED_TEST (line 90) | TYPED_TEST(BiasLayerTest, TestForwardEltwiseInPlace) {
function TYPED_TEST (line 110) | TYPED_TEST(BiasLayerTest, TestBackwardEltwiseInPlace) {
function TYPED_TEST (line 156) | TYPED_TEST(BiasLayerTest, TestForwardEltwiseWithParam) {
function TYPED_TEST (line 176) | TYPED_TEST(BiasLayerTest, TestForwardBroadcastBegin) {
function TYPED_TEST (line 199) | TYPED_TEST(BiasLayerTest, TestForwardBroadcastMiddle) {
function TYPED_TEST (line 222) | TYPED_TEST(BiasLayerTest, TestForwardBroadcastMiddleInPlace) {
function TYPED_TEST (line 247) | TYPED_TEST(BiasLayerTest, TestBackwardBroadcastMiddleInPlace) {
function TYPED_TEST (line 293) | TYPED_TEST(BiasLayerTest, TestForwardBroadcastMiddleWithParam) {
function TYPED_TEST (line 317) | TYPED_TEST(BiasLayerTest, TestForwardBroadcastEnd) {
function TYPED_TEST (line 340) | TYPED_TEST(BiasLayerTest, TestForwardBias) {
function TYPED_TEST (line 357) | TYPED_TEST(BiasLayerTest, TestForwardBiasAxis2) {
function TYPED_TEST (line 375) | TYPED_TEST(BiasLayerTest, TestGradientEltwise) {
function TYPED_TEST (line 386) | TYPED_TEST(BiasLayerTest, TestGradientEltwiseWithParam) {
function TYPED_TEST (line 399) | TYPED_TEST(BiasLayerTest, TestGradientBroadcastBegin) {
function TYPED_TEST (line 410) | TYPED_TEST(BiasLayerTest, TestGradientBroadcastMiddle) {
function TYPED_TEST (line 421) | TYPED_TEST(BiasLayerTest, TestGradientBroadcastMiddleWithParam) {
function TYPED_TEST (line 435) | TYPED_TEST(BiasLayerTest, TestGradientBroadcastEnd) {
function TYPED_TEST (line 446) | TYPED_TEST(BiasLayerTest, TestGradientBias) {
function TYPED_TEST (line 456) | TYPED_TEST(BiasLayerTest, TestGradientBiasAxis2) {
FILE: caffe-fpn/src/caffe/test/test_blob.cpp
type caffe (line 11) | namespace caffe {
class BlobSimpleTest (line 14) | class BlobSimpleTest : public ::testing::Test {
method BlobSimpleTest (line 16) | BlobSimpleTest()
function TYPED_TEST (line 26) | TYPED_TEST(BlobSimpleTest, TestInitialization) {
function TYPED_TEST (line 38) | TYPED_TEST(BlobSimpleTest, TestPointersCPUGPU) {
function TYPED_TEST (line 45) | TYPED_TEST(BlobSimpleTest, TestReshape) {
function TYPED_TEST (line 54) | TYPED_TEST(BlobSimpleTest, TestLegacyBlobProtoShapeEquals) {
class BlobMathTest (line 108) | class BlobMathTest : public MultiDeviceTest<TypeParam> {
method BlobMathTest (line 111) | BlobMathTest()
function TYPED_TEST (line 122) | TYPED_TEST(BlobMathTest, TestSumOfSquares) {
function TYPED_TEST (line 177) | TYPED_TEST(BlobMathTest, TestAsum) {
function TYPED_TEST (line 231) | TYPED_TEST(BlobMathTest, TestScaleData) {
FILE: caffe-fpn/src/caffe/test/test_caffe_main.cpp
type caffe (line 7) | namespace caffe {
function main (line 17) | int main(int argc, char** argv) {
FILE: caffe-fpn/src/caffe/test/test_common.cpp
type caffe (line 9) | namespace caffe {
class CommonTest (line 11) | class CommonTest : public ::testing::Test {}
function TEST_F (line 15) | TEST_F(CommonTest, TestCublasHandlerGPU) {
function TEST_F (line 23) | TEST_F(CommonTest, TestBrewMode) {
function TEST_F (line 30) | TEST_F(CommonTest, TestRandSeedCPU) {
function TEST_F (line 47) | TEST_F(CommonTest, TestRandSeedGPU) {
FILE: caffe-fpn/src/caffe/test/test_concat_layer.cpp
type caffe (line 13) | namespace caffe {
class ConcatLayerTest (line 16) | class ConcatLayerTest : public MultiDeviceTest<TypeParam> {
method ConcatLayerTest (line 20) | ConcatLayerTest()
method SetUp (line 25) | virtual void SetUp() {
function TYPED_TEST (line 60) | TYPED_TEST(ConcatLayerTest, TestSetupNum) {
function TYPED_TEST (line 73) | TYPED_TEST(ConcatLayerTest, TestSetupChannels) {
function TYPED_TEST (line 85) | TYPED_TEST(ConcatLayerTest, TestSetupChannelsNegativeIndexing) {
function TYPED_TEST (line 101) | TYPED_TEST(ConcatLayerTest, TestForwardTrivial) {
function TYPED_TEST (line 114) | TYPED_TEST(ConcatLayerTest, TestForwardNum) {
function TYPED_TEST (line 143) | TYPED_TEST(ConcatLayerTest, TestForwardChannels) {
function TYPED_TEST (line 169) | TYPED_TEST(ConcatLayerTest, TestGradientTrivial) {
function TYPED_TEST (line 179) | TYPED_TEST(ConcatLayerTest, TestGradientNum) {
function TYPED_TEST (line 189) | TYPED_TEST(ConcatLayerTest, TestGradientChannels) {
function TYPED_TEST (line 198) | TYPED_TEST(ConcatLayerTest, TestGradientChannelsBottomOneOnly) {
FILE: caffe-fpn/src/caffe/test/test_contrastive_loss_layer.cpp
type caffe (line 15) | namespace caffe {
class ContrastiveLossLayerTest (line 18) | class ContrastiveLossLayerTest : public MultiDeviceTest<TypeParam> {
method ContrastiveLossLayerTest (line 22) | ContrastiveLossLayerTest()
function TYPED_TEST (line 59) | TYPED_TEST(ContrastiveLossLayerTest, TestForward) {
function TYPED_TEST (line 88) | TYPED_TEST(ContrastiveLossLayerTest, TestGradient) {
function TYPED_TEST (line 101) | TYPED_TEST(ContrastiveLossLayerTest, TestForwardLegacy) {
function TYPED_TEST (line 130) | TYPED_TEST(ContrastiveLossLayerTest, TestGradientLegacy) {
FILE: caffe-fpn/src/caffe/test/test_convolution_layer.cpp
type caffe (line 17) | namespace caffe {
function caffe_conv (line 22) | void caffe_conv(const Blob<Dtype>* in, ConvolutionParameter* conv_param,
class ConvolutionLayerTest (line 151) | class ConvolutionLayerTest : public MultiDeviceTest<TypeParam> {
method ConvolutionLayerTest (line 155) | ConvolutionLayerTest()
method SetUp (line 160) | virtual void SetUp() {
function TYPED_TEST (line 195) | TYPED_TEST(ConvolutionLayerTest, TestSetup) {
function TYPED_TEST (line 231) | TYPED_TEST(ConvolutionLayerTest, TestSimpleConvolution) {
function TYPED_TEST (line 267) | TYPED_TEST(ConvolutionLayerTest, TestDilatedConvolution) {
function TYPED_TEST (line 311) | TYPED_TEST(ConvolutionLayerTest, Test0DConvolution) {
function TYPED_TEST (line 349) | TYPED_TEST(ConvolutionLayerTest, TestSimple3DConvolution) {
function TYPED_TEST (line 396) | TYPED_TEST(ConvolutionLayerTest, TestDilated3DConvolution) {
function TYPED_TEST (line 443) | TYPED_TEST(ConvolutionLayerTest, Test1x1Convolution) {
function TYPED_TEST (line 470) | TYPED_TEST(ConvolutionLayerTest, TestSimpleConvolutionGroup) {
function TYPED_TEST (line 498) | TYPED_TEST(ConvolutionLayerTest, TestSobelConvolution) {
function TYPED_TEST (line 591) | TYPED_TEST(ConvolutionLayerTest, TestNDAgainst2D) {
function TYPED_TEST (line 709) | TYPED_TEST(ConvolutionLayerTest, TestGradient) {
function TYPED_TEST (line 727) | TYPED_TEST(ConvolutionLayerTest, TestDilatedGradient) {
function TYPED_TEST (line 751) | TYPED_TEST(ConvolutionLayerTest, TestGradient3D) {
function TYPED_TEST (line 779) | TYPED_TEST(ConvolutionLayerTest, Test1x1Gradient) {
function TYPED_TEST (line 797) | TYPED_TEST(ConvolutionLayerTest, TestGradientGroup) {
class CuDNNConvolutionLayerTest (line 817) | class CuDNNConvolutionLayerTest : public GPUDeviceTest<Dtype> {
method CuDNNConvolutionLayerTest (line 819) | CuDNNConvolutionLayerTest()
method SetUp (line 824) | virtual void SetUp() {
function TYPED_TEST (line 859) | TYPED_TEST(CuDNNConvolutionLayerTest, TestSetupCuDNN) {
function TYPED_TEST (line 896) | TYPED_TEST(CuDNNConvolutionLayerTest, TestSimpleConvolutionCuDNN) {
function TYPED_TEST (line 931) | TYPED_TEST(CuDNNConvolutionLayerTest, TestSimpleConvolutionGroupCuDNN) {
function TYPED_TEST (line 958) | TYPED_TEST(CuDNNConvolutionLayerTest, TestSobelConvolutionCuDNN) {
function TYPED_TEST (line 1051) | TYPED_TEST(CuDNNConvolutionLayerTest, TestGradientCuDNN) {
function TYPED_TEST (line 1068) | TYPED_TEST(CuDNNConvolutionLayerTest, TestGradientGroupCuDNN) {
FILE: caffe-fpn/src/caffe/test/test_data_layer.cpp
type caffe (line 18) | namespace caffe {
class DataLayerTest (line 23) | class DataLayerTest : public MultiDeviceTest<TypeParam> {
method DataLayerTest (line 27) | DataLayerTest()
method SetUp (line 32) | virtual void SetUp() {
method Fill (line 43) | void Fill(const bool unique_pixels, DataParameter_DB backend) {
method TestRead (line 70) | void TestRead() {
method TestReshape (line 108) | void TestReshape(DataParameter_DB backend) {
method TestReadCrop (line 173) | void TestReadCrop(Phase phase) {
method TestReadCropTrainSequenceSeeded (line 227) | void TestReadCropTrainSequenceSeeded() {
method TestReadCropTrainSequenceUnseeded (line 282) | void TestReadCropTrainSequenceUnseeded() {
function TYPED_TEST (line 353) | TYPED_TEST(DataLayerTest, TestReadLevelDB) {
function TYPED_TEST (line 359) | TYPED_TEST(DataLayerTest, TestReshapeLevelDB) {
function TYPED_TEST (line 363) | TYPED_TEST(DataLayerTest, TestReadCropTrainLevelDB) {
function TYPED_TEST (line 371) | TYPED_TEST(DataLayerTest, TestReadCropTrainSequenceSeededLevelDB) {
function TYPED_TEST (line 379) | TYPED_TEST(DataLayerTest, TestReadCropTrainSequenceUnseededLevelDB) {
function TYPED_TEST (line 385) | TYPED_TEST(DataLayerTest, TestReadCropTestLevelDB) {
function TYPED_TEST (line 393) | TYPED_TEST(DataLayerTest, TestReadLMDB) {
function TYPED_TEST (line 399) | TYPED_TEST(DataLayerTest, TestReshapeLMDB) {
function TYPED_TEST (line 403) | TYPED_TEST(DataLayerTest, TestReadCropTrainLMDB) {
function TYPED_TEST (line 411) | TYPED_TEST(DataLayerTest, TestReadCropTrainSequenceSeededLMDB) {
function TYPED_TEST (line 419) | TYPED_TEST(DataLayerTest, TestReadCropTrainSequenceUnseededLMDB) {
function TYPED_TEST (line 425) | TYPED_TEST(DataLayerTest, TestReadCropTestLMDB) {
FILE: caffe-fpn/src/caffe/test/test_data_transformer.cpp
type caffe (line 17) | namespace caffe {
function FillDatum (line 19) | void FillDatum(const int label, const int channels, const int height,
class DataTransformTest (line 34) | class DataTransformTest : public ::testing::Test {
method DataTransformTest (line 36) | DataTransformTest()
method NumSequenceMatches (line 40) | int NumSequenceMatches(const TransformationParameter transform_param,
function TYPED_TEST (line 79) | TYPED_TEST(DataTransformTest, TestEmptyTransform) {
function TYPED_TEST (line 102) | TYPED_TEST(DataTransformTest, TestEmptyTransformUniquePixels) {
function TYPED_TEST (line 125) | TYPED_TEST(DataTransformTest, TestCropSize) {
function TYPED_TEST (line 152) | TYPED_TEST(DataTransformTest, TestCropTrain) {
function TYPED_TEST (line 169) | TYPED_TEST(DataTransformTest, TestCropTest) {
function TYPED_TEST (line 186) | TYPED_TEST(DataTransformTest, TestMirrorTrain) {
function TYPED_TEST (line 202) | TYPED_TEST(DataTransformTest, TestMirrorTest) {
function TYPED_TEST (line 218) | TYPED_TEST(DataTransformTest, TestCropMirrorTrain) {
function TYPED_TEST (line 240) | TYPED_TEST(DataTransformTest, TestCropMirrorTest) {
function TYPED_TEST (line 262) | TYPED_TEST(DataTransformTest, TestMeanValue) {
function TYPED_TEST (line 283) | TYPED_TEST(DataTransformTest, TestMeanValues) {
function TYPED_TEST (line 307) | TYPED_TEST(DataTransformTest, TestMeanFile) {
FILE: caffe-fpn/src/caffe/test/test_db.cpp
type caffe (line 14) | namespace caffe {
class DBTest (line 19) | class DBTest : public ::testing::Test {
method DBTest (line 21) | DBTest()
method SetUp (line 25) | virtual void SetUp() {
type TypeLevelDB (line 50) | struct TypeLevelDB {
type TypeLMDB (line 55) | struct TypeLMDB {
function TYPED_TEST (line 65) | TYPED_TEST(DBTest, TestGetDB) {
function TYPED_TEST (line 69) | TYPED_TEST(DBTest, TestNext) {
function TYPED_TEST (line 80) | TYPED_TEST(DBTest, TestSeekToFirst) {
function TYPED_TEST (line 96) | TYPED_TEST(DBTest, TestKeyValue) {
function TYPED_TEST (line 120) | TYPED_TEST(DBTest, TestWrite) {
FILE: caffe-fpn/src/caffe/test/test_deconvolution_layer.cpp
type caffe (line 13) | namespace caffe {
class DeconvolutionLayerTest (line 18) | class DeconvolutionLayerTest : public MultiDeviceTest<TypeParam> {
method DeconvolutionLayerTest (line 22) | DeconvolutionLayerTest()
method SetUp (line 27) | virtual void SetUp() {
function TYPED_TEST (line 55) | TYPED_TEST(DeconvolutionLayerTest, TestSetup) {
function TYPED_TEST (line 91) | TYPED_TEST(DeconvolutionLayerTest, TestSimpleDeconvolution) {
function TYPED_TEST (line 139) | TYPED_TEST(DeconvolutionLayerTest, TestGradient) {
function TYPED_TEST (line 157) | TYPED_TEST(DeconvolutionLayerTest, TestNDAgainst2D) {
function TYPED_TEST (line 275) | TYPED_TEST(DeconvolutionLayerTest, TestGradient3D) {
FILE: caffe-fpn/src/caffe/test/test_dummy_data_layer.cpp
type caffe (line 13) | namespace caffe {
class DummyDataLayerTest (line 16) | class DummyDataLayerTest : public CPUDeviceTest<Dtype> {
method DummyDataLayerTest (line 18) | DummyDataLayerTest()
method SetUp (line 23) | virtual void SetUp() {
function TYPED_TEST (line 46) | TYPED_TEST(DummyDataLayerTest, TestOneTopConstant) {
function TYPED_TEST (line 75) | TYPED_TEST(DummyDataLayerTest, TestTwoTopConstant) {
function TYPED_TEST (line 113) | TYPED_TEST(DummyDataLayerTest, TestThreeTopConstantGaussianConstant) {
FILE: caffe-fpn/src/caffe/test/test_eltwise_layer.cpp
type caffe (line 14) | namespace caffe {
class EltwiseLayerTest (line 17) | class EltwiseLayerTest : public MultiDeviceTest<TypeParam> {
method EltwiseLayerTest (line 21) | EltwiseLayerTest()
function TYPED_TEST (line 54) | TYPED_TEST(EltwiseLayerTest, TestSetUp) {
function TYPED_TEST (line 68) | TYPED_TEST(EltwiseLayerTest, TestProd) {
function TYPED_TEST (line 87) | TYPED_TEST(EltwiseLayerTest, TestSum) {
function TYPED_TEST (line 106) | TYPED_TEST(EltwiseLayerTest, TestSumCoeff) {
function TYPED_TEST (line 129) | TYPED_TEST(EltwiseLayerTest, TestStableProdGradient) {
function TYPED_TEST (line 141) | TYPED_TEST(EltwiseLayerTest, TestUnstableProdGradient) {
function TYPED_TEST (line 153) | TYPED_TEST(EltwiseLayerTest, TestSumGradient) {
function TYPED_TEST (line 164) | TYPED_TEST(EltwiseLayerTest, TestSumCoeffGradient) {
function TYPED_TEST (line 178) | TYPED_TEST(EltwiseLayerTest, TestMax) {
function TYPED_TEST (line 198) | TYPED_TEST(EltwiseLayerTest, TestMaxGradient) {
FILE: caffe-fpn/src/caffe/test/test_embed_layer.cpp
type caffe (line 13) | namespace caffe {
class EmbedLayerTest (line 16) | class EmbedLayerTest : public MultiDeviceTest<TypeParam> {
method EmbedLayerTest (line 19) | EmbedLayerTest()
function TYPED_TEST (line 38) | TYPED_TEST(EmbedLayerTest, TestSetUp) {
function TYPED_TEST (line 54) | TYPED_TEST(EmbedLayerTest, TestForward) {
function TYPED_TEST (line 93) | TYPED_TEST(EmbedLayerTest, TestForwardWithBias) {
function TYPED_TEST (line 137) | TYPED_TEST(EmbedLayerTest, TestGradient) {
function TYPED_TEST (line 157) | TYPED_TEST(EmbedLayerTest, TestGradientWithBias) {
FILE: caffe-fpn/src/caffe/test/test_euclidean_loss_layer.cpp
type caffe (line 14) | namespace caffe {
class EuclideanLossLayerTest (line 17) | class EuclideanLossLayerTest : public MultiDeviceTest<TypeParam> {
method EuclideanLossLayerTest (line 21) | EuclideanLossLayerTest()
method TestForward (line 40) | void TestForward() {
function TYPED_TEST (line 73) | TYPED_TEST(EuclideanLossLayerTest, TestForward) {
function TYPED_TEST (line 77) | TYPED_TEST(EuclideanLossLayerTest, TestGradient) {
FILE: caffe-fpn/src/caffe/test/test_filler.cpp
type caffe (line 7) | namespace caffe {
class ConstantFillerTest (line 10) | class ConstantFillerTest : public ::testing::Test {
method ConstantFillerTest (line 12) | ConstantFillerTest()
function TYPED_TEST (line 27) | TYPED_TEST(ConstantFillerTest, TestFill) {
class UniformFillerTest (line 38) | class UniformFillerTest : public ::testing::Test {
method UniformFillerTest (line 40) | UniformFillerTest()
function TYPED_TEST (line 56) | TYPED_TEST(UniformFillerTest, TestFill) {
class PositiveUnitballFillerTest (line 67) | class PositiveUnitballFillerTest : public ::testing::Test {
method PositiveUnitballFillerTest (line 69) | PositiveUnitballFillerTest()
function TYPED_TEST (line 83) | TYPED_TEST(PositiveUnitballFillerTest, TestFill) {
class GaussianFillerTest (line 104) | class GaussianFillerTest : public ::testing::Test {
method GaussianFillerTest (line 106) | GaussianFillerTest()
function TYPED_TEST (line 122) | TYPED_TEST(GaussianFillerTest, TestFill) {
class XavierFillerTest (line 144) | class XavierFillerTest : public ::testing::Test {
method XavierFillerTest (line 146) | XavierFillerTest()
method test_params (line 150) | virtual void test_params(FillerParameter_VarianceNorm variance_norm,
function TYPED_TEST (line 179) | TYPED_TEST(XavierFillerTest, TestFillFanIn) {
function TYPED_TEST (line 183) | TYPED_TEST(XavierFillerTest, TestFillFanOut) {
function TYPED_TEST (line 187) | TYPED_TEST(XavierFillerTest, TestFillAverage) {
class MSRAFillerTest (line 193) | class MSRAFillerTest : public ::testing::Test {
method MSRAFillerTest (line 195) | MSRAFillerTest()
method test_params (line 199) | virtual void test_params(FillerParameter_VarianceNorm variance_norm,
function TYPED_TEST (line 228) | TYPED_TEST(MSRAFillerTest, TestFillFanIn) {
function TYPED_TEST (line 232) | TYPED_TEST(MSRAFillerTest, TestFillFanOut) {
function TYPED_TEST (line 236) | TYPED_TEST(MSRAFillerTest, TestFillAverage) {
FILE: caffe-fpn/src/caffe/test/test_filter_layer.cpp
type caffe (line 13) | namespace caffe {
class FilterLayerTest (line 16) | class FilterLayerTest : public MultiDeviceTest<TypeParam> {
method FilterLayerTest (line 20) | FilterLayerTest()
method SetUp (line 26) | virtual void SetUp() {
function TYPED_TEST (line 67) | TYPED_TEST(FilterLayerTest, TestReshape) {
function TYPED_TEST (line 87) | TYPED_TEST(FilterLayerTest, TestForward) {
function TYPED_TEST (line 115) | TYPED_TEST(FilterLayerTest, TestGradient) {
FILE: caffe-fpn/src/caffe/test/test_flatten_layer.cpp
type caffe (line 13) | namespace caffe {
class FlattenLayerTest (line 16) | class FlattenLayerTest : public MultiDeviceTest<TypeParam> {
method FlattenLayerTest (line 19) | FlattenLayerTest()
function TYPED_TEST (line 39) | TYPED_TEST(FlattenLayerTest, TestSetup) {
function TYPED_TEST (line 49) | TYPED_TEST(FlattenLayerTest, TestSetupWithAxis) {
function TYPED_TEST (line 61) | TYPED_TEST(FlattenLayerTest, TestSetupWithEndAxis) {
function TYPED_TEST (line 73) | TYPED_TEST(FlattenLayerTest, TestSetupWithStartAndEndAxis) {
function TYPED_TEST (line 85) | TYPED_TEST(FlattenLayerTest, TestForward) {
function TYPED_TEST (line 99) | TYPED_TEST(FlattenLayerTest, TestGradient) {
FILE: caffe-fpn/src/caffe/test/test_gradient_based_solver.cpp
type caffe (line 20) | namespace caffe {
class GradientBasedSolverTest (line 23) | class GradientBasedSolverTest : public MultiDeviceTest<TypeParam> {
method GradientBasedSolverTest (line 27) | GradientBasedSolverTest() :
method InitSolverFromProtoString (line 52) | virtual void InitSolverFromProtoString(const string& proto) {
method string (line 70) | string RunLeastSquaresSolver(const Dtype learning_rate,
method ComputeLeastSquaresUpdate (line 225) | void ComputeLeastSquaresUpdate(const Dtype learning_rate,
method CheckLeastSquaresUpdate (line 351) | void CheckLeastSquaresUpdate(
method CheckAccumulation (line 401) | void CheckAccumulation(const Dtype kLearningRate, const Dtype kWeigh...
method TestLeastSquaresUpdate (line 455) | void TestLeastSquaresUpdate(const Dtype learning_rate = 1.0,
method TestSnapshot (line 492) | void TestSnapshot(const Dtype learning_rate = 1.0,
class SGDSolverTest (line 565) | class SGDSolverTest : public GradientBasedSolverTest<TypeParam> {
method InitSolver (line 569) | virtual void InitSolver(const SolverParameter& param) {
function TYPED_TEST (line 576) | TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdate) {
function TYPED_TEST (line 580) | TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateLROneHundredth) {
function TYPED_TEST (line 586) | TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithWeightDecay) {
function TYPED_TEST (line 597) | TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithWeightDecayMultiIt...
function TYPED_TEST (line 608) | TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithMomentum) {
function TYPED_TEST (line 619) | TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithMomentumMultiIter) {
function TYPED_TEST (line 630) | TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverything) {
function TYPED_TEST (line 641) | TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverythingShare) {
function TYPED_TEST (line 653) | TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverythingAccum) {
function TYPED_TEST (line 664) | TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverythingAccumSha...
function TYPED_TEST (line 676) | TYPED_TEST(SGDSolverTest, TestSnapshot) {
function TYPED_TEST (line 687) | TYPED_TEST(SGDSolverTest, TestSnapshotShare) {
class AdaGradSolverTest (line 701) | class AdaGradSolverTest : public GradientBasedSolverTest<TypeParam> {
method InitSolver (line 705) | virtual void InitSolver(const SolverParameter& param) {
function TYPED_TEST (line 712) | TYPED_TEST(AdaGradSolverTest, TestAdaGradLeastSquaresUpdate) {
function TYPED_TEST (line 716) | TYPED_TEST(AdaGradSolverTest, TestAdaGradLeastSquaresUpdateLROneHundre...
function TYPED_TEST (line 722) | TYPED_TEST(AdaGradSolverTest, TestAdaGradLeastSquaresUpdateWithWeightD...
function TYPED_TEST (line 729) | TYPED_TEST(AdaGradSolverTest, TestAdaGradLeastSquaresUpdateWithEveryth...
function TYPED_TEST (line 740) | TYPED_TEST(AdaGradSolverTest,
function TYPED_TEST (line 753) | TYPED_TEST(AdaGradSolverTest, TestLeastSquaresUpdateWithEverythingAccu...
function TYPED_TEST (line 764) | TYPED_TEST(AdaGradSolverTest, TestLeastSquaresUpdateWithEverythingAccu...
function TYPED_TEST (line 776) | TYPED_TEST(AdaGradSolverTest, TestSnapshot) {
function TYPED_TEST (line 787) | TYPED_TEST(AdaGradSolverTest, TestSnapshotShare) {
class NesterovSolverTest (line 801) | class NesterovSolverTest : public GradientBasedSolverTest<TypeParam> {
method InitSolver (line 805) | virtual void InitSolver(const SolverParameter& param) {
function TYPED_TEST (line 812) | TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdate) {
function TYPED_TEST (line 816) | TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdateLROneHund...
function TYPED_TEST (line 822) | TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdateWithWeigh...
function TYPED_TEST (line 829) | TYPED_TEST(NesterovSolverTest,
function TYPED_TEST (line 841) | TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdateWithMomen...
function TYPED_TEST (line 852) | TYPED_TEST(NesterovSolverTest, TestLeastSquaresUpdateWithMomentumMulti...
function TYPED_TEST (line 863) | TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdateWithEvery...
function TYPED_TEST (line 874) | TYPED_TEST(NesterovSolverTest,
function TYPED_TEST (line 887) | TYPED_TEST(NesterovSolverTest, TestLeastSquaresUpdateWithEverythingAcc...
function TYPED_TEST (line 898) | TYPED_TEST(NesterovSolverTest, TestLeastSquaresUpdateWithEverythingAcc...
function TYPED_TEST (line 910) | TYPED_TEST(NesterovSolverTest, TestSnapshot) {
function TYPED_TEST (line 921) | TYPED_TEST(NesterovSolverTest, TestSnapshotShare) {
class AdaDeltaSolverTest (line 934) | class AdaDeltaSolverTest : public GradientBasedSolverTest<TypeParam> {
method InitSolver (line 938) | virtual void InitSolver(const SolverParameter& param) {
function TYPED_TEST (line 945) | TYPED_TEST(AdaDeltaSolverTest, TestAdaDeltaLeastSquaresUpdate) {
function TYPED_TEST (line 951) | TYPED_TEST(AdaDeltaSolverTest, TestAdaDeltaLeastSquaresUpdateWithWeigh...
function TYPED_TEST (line 959) | TYPED_TEST(AdaDeltaSolverTest, TestAdaDeltaLeastSquaresUpdateWithHalfM...
function TYPED_TEST (line 970) | TYPED_TEST(AdaDeltaSolverTest, TestAdaDeltaLeastSquaresUpdateWithMomen...
function TYPED_TEST (line 981) | TYPED_TEST(AdaDeltaSolverTest, TestLeastSquaresUpdateWithMomentumMulti...
function TYPED_TEST (line 992) | TYPED_TEST(AdaDeltaSolverTest, TestAdaDeltaLeastSquaresUpdateWithEvery...
function TYPED_TEST (line 1003) | TYPED_TEST(AdaDeltaSolverTest,
function TYPED_TEST (line 1016) | TYPED_TEST(AdaDeltaSolverTest, TestLeastSquaresUpdateWithEverythingAcc...
function TYPED_TEST (line 1027) | TYPED_TEST(AdaDeltaSolverTest, TestLeastSquaresUpdateWithEverythingAcc...
function TYPED_TEST (line 1039) | TYPED_TEST(AdaDeltaSolverTest, TestSnapshot) {
function TYPED_TEST (line 1050) | TYPED_TEST(AdaDeltaSolverTest, TestSnapshotShare) {
class AdamSolverTest (line 1063) | class AdamSolverTest : public GradientBasedSolverTest<TypeParam> {
method InitSolver (line 1067) | virtual void InitSolver(const SolverParameter& param) {
function TYPED_TEST (line 1079) | TYPED_TEST(AdamSolverTest, TestAdamLeastSquaresUpdate) {
function TYPED_TEST (line 1087) | TYPED_TEST(AdamSolverTest, TestAdamLeastSquaresUpdateWithWeightDecay) {
function TYPED_TEST (line 1095) | TYPED_TEST(AdamSolverTest, TestAdamLeastSquaresUpdateWithEverything) {
function TYPED_TEST (line 1106) | TYPED_TEST(AdamSolverTest, TestAdamLeastSquaresUpdateWithEverythingSha...
function TYPED_TEST (line 1118) | TYPED_TEST(AdamSolverTest, TestLeastSquaresUpdateWithEverythingAccum) {
function TYPED_TEST (line 1129) | TYPED_TEST(AdamSolverTest, TestLeastSquaresUpdateWithEverythingAccumSh...
function TYPED_TEST (line 1141) | TYPED_TEST(AdamSolverTest, TestSnapshot) {
function TYPED_TEST (line 1152) | TYPED_TEST(AdamSolverTest, TestSnapshotShare) {
class RMSPropSolverTest (line 1165) | class RMSPropSolverTest : public GradientBasedSolverTest<TypeParam> {
method InitSolver (line 1169) | virtual void InitSolver(const SolverParameter& param) {
function TYPED_TEST (line 1179) | TYPED_TEST(RMSPropSolverTest, TestRMSPropLeastSquaresUpdateWithWeightD...
function TYPED_TEST (line 1186) | TYPED_TEST(RMSPropSolverTest, TestRMSPropLeastSquaresUpdateWithRmsDeca...
function TYPED_TEST (line 1197) | TYPED_TEST(RMSPropSolverTest, TestRMSPropLeastSquaresUpdateWithEveryth...
function TYPED_TEST (line 1208) | TYPED_TEST(RMSPropSolverTest,
function TYPED_TEST (line 1221) | TYPED_TEST(RMSPropSolverTest, TestLeastSquaresUpdateWithEverythingAccu...
function TYPED_TEST (line 1232) | TYPED_TEST(RMSPropSolverTest, TestLeastSquaresUpdateWithEverythingAccu...
function TYPED_TEST (line 1244) | TYPED_TEST(RMSPropSolverTest, TestSnapshot) {
function TYPED_TEST (line 1255) | TYPED_TEST(RMSPropSolverTest, TestSnapshotShare) {
FILE: caffe-fpn/src/caffe/test/test_hdf5_output_layer.cpp
type caffe (line 15) | namespace caffe {
class HDF5OutputLayerTest (line 18) | class HDF5OutputLayerTest : public MultiDeviceTest<TypeParam> {
method HDF5OutputLayerTest (line 22) | HDF5OutputLayerTest()
function TYPED_TEST (line 73) | TYPED_TEST(HDF5OutputLayerTest, TestForward) {
FILE: caffe-fpn/src/caffe/test/test_hdf5data_layer.cpp
type caffe (line 15) | namespace caffe {
class HDF5DataLayerTest (line 18) | class HDF5DataLayerTest : public MultiDeviceTest<TypeParam> {
method HDF5DataLayerTest (line 22) | HDF5DataLayerTest()
method SetUp (line 27) | virtual void SetUp() {
function TYPED_TEST (line 55) | TYPED_TEST(HDF5DataLayerTest, TestRead) {
FILE: caffe-fpn/src/caffe/test/test_hinge_loss_layer.cpp
type caffe (line 14) | namespace caffe {
class HingeLossLayerTest (line 17) | class HingeLossLayerTest : public MultiDeviceTest<TypeParam> {
method HingeLossLayerTest (line 21) | HingeLossLayerTest()
function TYPED_TEST (line 53) | TYPED_TEST(HingeLossLayerTest, TestGradientL1) {
function TYPED_TEST (line 62) | TYPED_TEST(HingeLossLayerTest, TestGradientL2) {
FILE: caffe-fpn/src/caffe/test/test_im2col_layer.cpp
type caffe (line 13) | namespace caffe {
class Im2colLayerTest (line 16) | class Im2colLayerTest : public MultiDeviceTest<TypeParam> {
method Im2colLayerTest (line 19) | Im2colLayerTest()
function TYPED_TEST (line 39) | TYPED_TEST(Im2colLayerTest, TestSetup) {
function TYPED_TEST (line 61) | TYPED_TEST(Im2colLayerTest, TestForward) {
function TYPED_TEST (line 78) | TYPED_TEST(Im2colLayerTest, TestGradient) {
function TYPED_TEST (line 91) | TYPED_TEST(Im2colLayerTest, TestDilatedGradient) {
function TYPED_TEST (line 111) | TYPED_TEST(Im2colLayerTest, TestGradientForceND) {
function TYPED_TEST (line 125) | TYPED_TEST(Im2colLayerTest, TestDilatedGradientForceND) {
function TYPED_TEST (line 146) | TYPED_TEST(Im2colLayerTest, TestRect) {
function TYPED_TEST (line 164) | TYPED_TEST(Im2colLayerTest, TestRectGradient) {
FILE: caffe-fpn/src/caffe/test/test_image_data_layer.cpp
type caffe (line 17) | namespace caffe {
class ImageDataLayerTest (line 20) | class ImageDataLayerTest : public MultiDeviceTest<TypeParam> {
method ImageDataLayerTest (line 24) | ImageDataLayerTest()
method SetUp (line 28) | virtual void SetUp() {
function TYPED_TEST (line 65) | TYPED_TEST(ImageDataLayerTest, TestRead) {
function TYPED_TEST (line 91) | TYPED_TEST(ImageDataLayerTest, TestResize) {
function TYPED_TEST (line 119) | TYPED_TEST(ImageDataLayerTest, TestReshape) {
function TYPED_TEST (line 146) | TYPED_TEST(ImageDataLayerTest, TestShuffle) {
FILE: caffe-fpn/src/caffe/test/test_infogain_loss_layer.cpp
type caffe (line 13) | namespace caffe {
class InfogainLossLayerTest (line 16) | class InfogainLossLayerTest : public MultiDeviceTest<TypeParam> {
method InfogainLossLayerTest (line 20) | InfogainLossLayerTest()
function TYPED_TEST (line 58) | TYPED_TEST(InfogainLossLayerTest, TestGradient) {
FILE: caffe-fpn/src/caffe/test/test_inner_product_layer.cpp
type caffe (line 13) | namespace caffe {
class InnerProductLayerTest (line 20) | class InnerProductLayerTest : public MultiDeviceTest<TypeParam> {
method InnerProductLayerTest (line 23) | InnerProductLayerTest()
function TYPED_TEST (line 47) | TYPED_TEST(InnerProductLayerTest, TestSetUp) {
function TYPED_TEST (line 63) | TYPED_TEST(InnerProductLayerTest, TestForward) {
function TYPED_TEST (line 94) | TYPED_TEST(InnerProductLayerTest, TestForwardNoBatch) {
function TYPED_TEST (line 125) | TYPED_TEST(InnerProductLayerTest, TestGradient) {
FILE: caffe-fpn/src/caffe/test/test_internal_thread.cpp
type caffe (line 9) | namespace caffe {
class InternalThreadTest (line 12) | class InternalThreadTest : public ::testing::Test {}
function TEST_F (line 14) | TEST_F(InternalThreadTest, TestStartAndExit) {
class TestThreadA (line 23) | class TestThreadA : public InternalThread {
method InternalThreadEntry (line 24) | void InternalThreadEntry() {
class TestThreadB (line 29) | class TestThreadB : public InternalThread {
method InternalThreadEntry (line 30) | void InternalThreadEntry() {
function TEST_F (line 35) | TEST_F(InternalThreadTest, TestRandomSeed) {
FILE: caffe-fpn/src/caffe/test/test_io.cpp
type caffe (line 16) | namespace caffe {
class IOTest (line 18) | class IOTest : public ::testing::Test {}
function ReadImageToDatumReference (line 20) | bool ReadImageToDatumReference(const string& filename, const int label,
function TEST_F (line 65) | TEST_F(IOTest, TestReadImageToDatum) {
function TEST_F (line 74) | TEST_F(IOTest, TestReadImageToDatumReference) {
function TEST_F (line 93) | TEST_F(IOTest, TestReadImageToDatumReferenceResized) {
function TEST_F (line 111) | TEST_F(IOTest, TestReadImageToDatumContent) {
function TEST_F (line 132) | TEST_F(IOTest, TestReadImageToDatumContentGray) {
function TEST_F (line 151) | TEST_F(IOTest, TestReadImageToDatumResized) {
function TEST_F (line 161) | TEST_F(IOTest, TestReadImageToDatumResizedSquare) {
function TEST_F (line 170) | TEST_F(IOTest, TestReadImageToDatumGray) {
function TEST_F (line 180) | TEST_F(IOTest, TestReadImageToDatumResizedGray) {
function TEST_F (line 190) | TEST_F(IOTest, TestReadImageToCVMat) {
function TEST_F (line 198) | TEST_F(IOTest, TestReadImageToCVMatResized) {
function TEST_F (line 206) | TEST_F(IOTest, TestReadImageToCVMatResizedSquare) {
function TEST_F (line 214) | TEST_F(IOTest, TestReadImageToCVMatGray) {
function TEST_F (line 223) | TEST_F(IOTest, TestReadImageToCVMatResizedGray) {
function TEST_F (line 232) | TEST_F(IOTest, TestCVMatToDatum) {
function TEST_F (line 242) | TEST_F(IOTest, TestCVMatToDatumContent) {
function TEST_F (line 261) | TEST_F(IOTest, TestCVMatToDatumReference) {
function TEST_F (line 280) | TEST_F(IOTest, TestReadFileToDatum) {
function TEST_F (line 289) | TEST_F(IOTest, TestDecodeDatum) {
function TEST_F (line 309) | TEST_F(IOTest, TestDecodeDatumToCVMat) {
function TEST_F (line 323) | TEST_F(IOTest, TestDecodeDatumToCVMatContent) {
function TEST_F (line 343) | TEST_F(IOTest, TestDecodeDatumNative) {
function TEST_F (line 363) | TEST_F(IOTest, TestDecodeDatumToCVMatNative) {
function TEST_F (line 373) | TEST_F(IOTest, TestDecodeDatumNativeGray) {
function TEST_F (line 393) | TEST_F(IOTest, TestDecodeDatumToCVMatNativeGray) {
function TEST_F (line 403) | TEST_F(IOTest, TestDecodeDatumToCVMatContentNative) {
FILE: caffe-fpn/src/caffe/test/test_layer_factory.cpp
type caffe (line 15) | namespace caffe {
class LayerFactoryTest (line 18) | class LayerFactoryTest : public MultiDeviceTest<TypeParam> {}
function TYPED_TEST (line 22) | TYPED_TEST(LayerFactoryTest, TestCreateLayer) {
FILE: caffe-fpn/src/caffe/test/test_lrn_layer.cpp
type caffe (line 22) | namespace caffe {
class LRNLayerTest (line 25) | class LRNLayerTest : public MultiDeviceTest<TypeParam> {
method LRNLayerTest (line 29) | LRNLayerTest()
method SetUp (line 33) | virtual void SetUp() {
function TYPED_TEST (line 119) | TYPED_TEST(LRNLayerTest, TestSetupAcrossChannels) {
function TYPED_TEST (line 130) | TYPED_TEST(LRNLayerTest, TestForwardAcrossChannels) {
function TYPED_TEST (line 145) | TYPED_TEST(LRNLayerTest, TestForwardAcrossChannelsLargeRegion) {
function TYPED_TEST (line 161) | TYPED_TEST(LRNLayerTest, TestGradientAcrossChannels) {
function TYPED_TEST (line 182) | TYPED_TEST(LRNLayerTest, TestGradientAcrossChannelsLargeRegion) {
function TYPED_TEST (line 204) | TYPED_TEST(LRNLayerTest, TestSetupWithinChannel) {
function TYPED_TEST (line 218) | TYPED_TEST(LRNLayerTest, TestForwardWithinChannel) {
function TYPED_TEST (line 236) | TYPED_TEST(LRNLayerTest, TestGradientWithinChannel) {
class CuDNNLRNLayerTest (line 255) | class CuDNNLRNLayerTest : public GPUDeviceTest<Dtype> {
method CuDNNLRNLayerTest (line 257) | CuDNNLRNLayerTest()
method SetUp (line 261) | virtual void SetUp() {
function TYPED_TEST (line 347) | TYPED_TEST(CuDNNLRNLayerTest, TestForwardAcrossChannelsCuDNN) {
function TYPED_TEST (line 362) | TYPED_TEST(CuDNNLRNLayerTest, TestForwardAcrossChannelsLargeRegionCuDN...
function TYPED_TEST (line 378) | TYPED_TEST(CuDNNLRNLayerTest, TestGradientAcrossChannelsCuDNN) {
function TYPED_TEST (line 395) | TYPED_TEST(CuDNNLRNLayerTest, TestForwardWithinChannel) {
function TYPED_TEST (line 413) | TYPED_TEST(CuDNNLRNLayerTest, TestGradientWithinChannel) {
function TYPED_TEST (line 430) | TYPED_TEST(CuDNNLRNLayerTest, TestGradientAcrossChannelsLargeRegionCuD...
FILE: caffe-fpn/src/caffe/test/test_math_functions.cpp
type caffe (line 14) | namespace caffe {
class MathFunctionsTest (line 17) | class MathFunctionsTest : public MultiDeviceTest<TypeParam> {
method MathFunctionsTest (line 21) | MathFunctionsTest()
method SetUp (line 26) | virtual void SetUp() {
class CPUMathFunctionsTest (line 47) | class CPUMathFunctionsTest
function TYPED_TEST (line 53) | TYPED_TEST(CPUMathFunctionsTest, TestNothing) {
function TYPED_TEST (line 58) | TYPED_TEST(CPUMathFunctionsTest, TestAsum) {
function TYPED_TEST (line 69) | TYPED_TEST(CPUMathFunctionsTest, TestSign) {
function TYPED_TEST (line 79) | TYPED_TEST(CPUMathFunctionsTest, TestSgnbit) {
function TYPED_TEST (line 89) | TYPED_TEST(CPUMathFunctionsTest, TestFabs) {
function TYPED_TEST (line 99) | TYPED_TEST(CPUMathFunctionsTest, TestScale) {
function TYPED_TEST (line 112) | TYPED_TEST(CPUMathFunctionsTest, TestCopy) {
class GPUMathFunctionsTest (line 125) | class GPUMathFunctionsTest : public MathFunctionsTest<GPUDevice<Dtype>...
function TYPED_TEST (line 130) | TYPED_TEST(GPUMathFunctionsTest, TestAsum) {
function TYPED_TEST (line 142) | TYPED_TEST(GPUMathFunctionsTest, TestSign) {
function TYPED_TEST (line 153) | TYPED_TEST(GPUMathFunctionsTest, TestSgnbit) {
function TYPED_TEST (line 164) | TYPED_TEST(GPUMathFunctionsTest, TestFabs) {
function TYPED_TEST (line 175) | TYPED_TEST(GPUMathFunctionsTest, TestScale) {
function TYPED_TEST (line 188) | TYPED_TEST(GPUMathFunctionsTest, TestCopy) {
FILE: caffe-fpn/src/caffe/test/test_maxpool_dropout_layers.cpp
type caffe (line 14) | namespace caffe {
class MaxPoolingDropoutTest (line 17) | class MaxPoolingDropoutTest : public MultiDeviceTest<TypeParam> {
method MaxPoolingDropoutTest (line 20) | MaxPoolingDropoutTest()
method SetUp (line 23) | virtual void SetUp() {
function TYPED_TEST (line 43) | TYPED_TEST(MaxPoolingDropoutTest, TestSetup) {
function TYPED_TEST (line 60) | TYPED_TEST(MaxPoolingDropoutTest, TestForward) {
function TYPED_TEST (line 89) | TYPED_TEST(MaxPoolingDropoutTest, TestBackward) {
FILE: caffe-fpn/src/caffe/test/test_memory_data_layer.cpp
type caffe (line 13) | namespace caffe {
class MemoryDataLayerTest (line 16) | class MemoryDataLayerTest : public MultiDeviceTest<TypeParam> {
method MemoryDataLayerTest (line 20) | MemoryDataLayerTest()
method SetUp (line 25) | virtual void SetUp() {
function TYPED_TEST (line 66) | TYPED_TEST(MemoryDataLayerTest, TestSetup) {
function TYPED_TEST (line 89) | TYPED_TEST(MemoryDataLayerTest, TestForward) {
function TYPED_TEST (line 119) | TYPED_TEST(MemoryDataLayerTest, AddDatumVectorDefaultTransform) {
function TYPED_TEST (line 173) | TYPED_TEST(MemoryDataLayerTest, AddMatVectorDefaultTransform) {
function TYPED_TEST (line 219) | TYPED_TEST(MemoryDataLayerTest, TestSetBatchSize) {
FILE: caffe-fpn/src/caffe/test/test_multinomial_logistic_loss_layer.cpp
type caffe (line 13) | namespace caffe {
class MultinomialLogisticLossLayerTest (line 16) | class MultinomialLogisticLossLayerTest : public CPUDeviceTest<Dtype> {
method MultinomialLogisticLossLayerTest (line 18) | MultinomialLogisticLossLayerTest()
function TYPED_TEST (line 49) | TYPED_TEST(MultinomialLogisticLossLayerTest, TestGradientCPU) {
FILE: caffe-fpn/src/caffe/test/test_mvn_layer.cpp
type caffe (line 13) | namespace caffe {
class MVNLayerTest (line 16) | class MVNLayerTest : public MultiDeviceTest<TypeParam> {
method MVNLayerTest (line 19) | MVNLayerTest()
function TYPED_TEST (line 38) | TYPED_TEST(MVNLayerTest, TestForward) {
function TYPED_TEST (line 72) | TYPED_TEST(MVNLayerTest, TestForwardMeanOnly) {
function TYPED_TEST (line 105) | TYPED_TEST(MVNLayerTest, TestForwardAcrossChannels) {
function TYPED_TEST (line 141) | TYPED_TEST(MVNLayerTest, TestGradient) {
function TYPED_TEST (line 150) | TYPED_TEST(MVNLayerTest, TestGradientMeanOnly) {
function TYPED_TEST (line 161) | TYPED_TEST(MVNLayerTest, TestGradientAcrossChannels) {
FILE: caffe-fpn/src/caffe/test/test_net.cpp
type caffe (line 17) | namespace caffe {
class NetTest (line 20) | class NetTest : public MultiDeviceTest<TypeParam> {
method NetTest (line 24) | NetTest() : seed_(1701) {}
method InitNetFromProtoString (line 26) | virtual void InitNetFromProtoString(const string& proto) {
method CopyNetBlobs (line 32) | virtual void CopyNetBlobs(const bool copy_diff,
method CopyNetParams (line 45) | virtual void CopyNetParams(const bool copy_diff,
method InitTinyNet (line 58) | virtual void InitTinyNet(const bool force_backward = false,
method InitTinyNetEuclidean (line 135) | virtual void InitTinyNetEuclidean(const bool force_backward = false) {
method InitTrickyNet (line 195) | virtual void InitTrickyNet(Dtype* loss_weight = NULL) {
method InitUnsharedWeightsNet (line 286) | virtual void InitUnsharedWeightsNet(const Dtype* loss_weight = NULL,
method InitSharedWeightsNet (line 374) | virtual void InitSharedWeightsNet() {
method InitDiffDataUnsharedWeightsNet (line 431) | virtual void InitDiffDataUnsharedWeightsNet() {
method InitDiffDataSharedWeightsNet (line 493) | virtual void InitDiffDataSharedWeightsNet() {
method InitReshapableNet (line 555) | virtual void InitReshapableNet() {
method InitSkipPropNet (line 617) | virtual void InitSkipPropNet(bool test_skip_true) {
function TYPED_TEST (line 722) | TYPED_TEST(NetTest, TestHasBlob) {
function TYPED_TEST (line 731) | TYPED_TEST(NetTest, TestGetBlob) {
function TYPED_TEST (line 740) | TYPED_TEST(NetTest, TestHasLayer) {
function TYPED_TEST (line 748) | TYPED_TEST(NetTest, TestGetLayerByName) {
function TYPED_TEST (line 756) | TYPED_TEST(NetTest, TestBottomNeedBackward) {
function TYPED_TEST (line 769) | TYPED_TEST(NetTest, TestBottomNeedBackwardForce) {
function TYPED_TEST (line 783) | TYPED_TEST(NetTest, TestBottomNeedBackwardEuclideanForce) {
function TYPED_TEST (line 797) | TYPED_TEST(NetTest, TestBottomNeedBackwardTricky) {
function TYPED_TEST (line 815) | TYPED_TEST(NetTest, TestLossWeight) {
function TYPED_TEST (line 866) | TYPED_TEST(NetTest, TestLossWeightMidNet) {
function TYPED_TEST (line 904) | TYPED_TEST(NetTest, TestComboLossWeight) {
function TYPED_TEST (line 1034) | TYPED_TEST(NetTest, TestBackwardWithAccuracyLayer) {
function TYPED_TEST (line 1045) | TYPED_TEST(NetTest, TestUnsharedWeightsDataNet) {
function TYPED_TEST (line 1054) | TYPED_TEST(NetTest, TestSharedWeightsDataNet) {
function TYPED_TEST (line 1063) | TYPED_TEST(NetTest, TestUnsharedWeightsDiffNet) {
function TYPED_TEST (line 1081) | TYPED_TEST(NetTest, TestSharedWeightsDiffNet) {
function TYPED_TEST (line 1101) | TYPED_TEST(NetTest, TestSharedWeightsUpdate) {
function TYPED_TEST (line 1183) | TYPED_TEST(NetTest, TestSharedWeightsResume) {
function TYPED_TEST (line 1229) | TYPED_TEST(NetTest, TestParamPropagateDown) {
function TYPED_TEST (line 1311) | TYPED_TEST(NetTest, TestFromTo) {
class FilterNetTest (line 1345) | class FilterNetTest : public ::testing::Test {
method RunFilterNetTest (line 1347) | void RunFilterNetTest(
function TEST_F (line 1367) | TEST_F(FilterNetTest, TestNoFilter) {
function TEST_F (line 1391) | TEST_F(FilterNetTest, TestFilterLeNetTrainTest) {
function TEST_F (line 1633) | TEST_F(FilterNetTest, TestFilterOutByStage) {
function TEST_F (line 1672) | TEST_F(FilterNetTest, TestFilterOutByStage2) {
function TEST_F (line 1711) | TEST_F(FilterNetTest, TestFilterInByStage) {
function TEST_F (line 1737) | TEST_F(FilterNetTest, TestFilterInByStage2) {
function TEST_F (line 1762) | TEST_F(FilterNetTest, TestFilterOutByMultipleStage) {
function TEST_F (line 1805) | TEST_F(FilterNetTest, TestFilterInByMultipleStage) {
function TEST_F (line 1833) | TEST_F(FilterNetTest, TestFilterInByMultipleStage2) {
function TEST_F (line 1860) | TEST_F(FilterNetTest, TestFilterInByNotStage) {
function TEST_F (line 1887) | TEST_F(FilterNetTest, TestFilterOutByNotStage) {
function TEST_F (line 1923) | TEST_F(FilterNetTest, TestFilterOutByMinLevel) {
function TEST_F (line 1962) | TEST_F(FilterNetTest, TestFilterOutByMaxLevel) {
function TEST_F (line 2001) | TEST_F(FilterNetTest, TestFilterInByMinLevel) {
function TEST_F (line 2026) | TEST_F(FilterNetTest, TestFilterInByMinLevel2) {
function TEST_F (line 2052) | TEST_F(FilterNetTest, TestFilterInByMaxLevel) {
function TEST_F (line 2077) | TEST_F(FilterNetTest, TestFilterInByMaxLevel2) {
function TEST_F (line 2103) | TEST_F(FilterNetTest, TestFilterInOutByIncludeMultiRule) {
function TEST_F (line 2166) | TEST_F(FilterNetTest, TestFilterInByIncludeMultiRule) {
function TEST_F (line 2199) | TEST_F(FilterNetTest, TestFilterInOutByExcludeMultiRule) {
function TYPED_TEST (line 2262) | TYPED_TEST(NetTest, TestReshape) {
function TYPED_TEST (line 2334) | TYPED_TEST(NetTest, TestSkipPropagateDown) {
FILE: caffe-fpn/src/caffe/test/test_neuron_layer.cpp
type caffe (line 34) | namespace caffe {
class NeuronLayerTest (line 37) | class NeuronLayerTest : public MultiDeviceTest<TypeParam> {
method NeuronLayerTest (line 41) | NeuronLayerTest()
method TestDropoutForward (line 58) | void TestDropoutForward(const float dropout_ratio) {
method TestExpForward (line 90) | void TestExpForward(const float base, const float scale, const float...
method TestExpGradient (line 112) | void TestExpGradient(const float base, const float scale, const floa...
method TestPReLU (line 122) | void TestPReLU(PReLULayer<Dtype> *layer) {
method LogBottomInit (line 139) | void LogBottomInit() {
method TestLogForward (line 147) | void TestLogForward(const float base, const float scale, const float...
method TestLogGradient (line 171) | void TestLogGradient(const float base, const float scale, const floa...
function TYPED_TEST (line 185) | TYPED_TEST(NeuronLayerTest, TestAbsVal) {
function TYPED_TEST (line 199) | TYPED_TEST(NeuronLayerTest, TestAbsGradient) {
function TYPED_TEST (line 208) | TYPED_TEST(NeuronLayerTest, TestReLU) {
function TYPED_TEST (line 223) | TYPED_TEST(NeuronLayerTest, TestReLUGradient) {
function TYPED_TEST (line 232) | TYPED_TEST(NeuronLayerTest, TestReLUWithNegativeSlope) {
function TYPED_TEST (line 252) | TYPED_TEST(NeuronLayerTest, TestReLUGradientWithNegativeSlope) {
function TYPED_TEST (line 263) | TYPED_TEST(NeuronLayerTest, TestELU) {
function TYPED_TEST (line 284) | TYPED_TEST(NeuronLayerTest, TestELUasReLU) {
function TYPED_TEST (line 301) | TYPED_TEST(NeuronLayerTest, TestELUGradient) {
function TYPED_TEST (line 310) | TYPED_TEST(NeuronLayerTest, TestELUasReLUGradient) {
function TYPED_TEST (line 321) | TYPED_TEST(NeuronLayerTest, TestSigmoid) {
function TYPED_TEST (line 338) | TYPED_TEST(NeuronLayerTest, TestSigmoidGradient) {
function TYPED_TEST (line 347) | TYPED_TEST(NeuronLayerTest, TestTanH) {
function TYPED_TEST (line 370) | TYPED_TEST(NeuronLayerTest, TestTanHGradient) {
function TYPED_TEST (line 379) | TYPED_TEST(NeuronLayerTest, TestExpLayer) {
function TYPED_TEST (line 388) | TYPED_TEST(NeuronLayerTest, TestExpGradient) {
function TYPED_TEST (line 397) | TYPED_TEST(NeuronLayerTest, TestExpLayerBase2) {
function TYPED_TEST (line 405) | TYPED_TEST(NeuronLayerTest, TestExpGradientBase2) {
function TYPED_TEST (line 413) | TYPED_TEST(NeuronLayerTest, TestExpLayerBase2Shift1) {
function TYPED_TEST (line 421) | TYPED_TEST(NeuronLayerTest, TestExpGradientBase2Shift1) {
function TYPED_TEST (line 429) | TYPED_TEST(NeuronLayerTest, TestExpLayerBase2Scale3) {
function TYPED_TEST (line 437) | TYPED_TEST(NeuronLayerTest, TestExpGradientBase2Scale3) {
function TYPED_TEST (line 445) | TYPED_TEST(NeuronLayerTest, TestExpLayerBase2Shift1Scale3) {
function TYPED_TEST (line 453) | TYPED_TEST(NeuronLayerTest, TestExpGradientBase2Shift1Scale3) {
function TYPED_TEST (line 461) | TYPED_TEST(NeuronLayerTest, TestLogLayer) {
function TYPED_TEST (line 470) | TYPED_TEST(NeuronLayerTest, TestLogGradient) {
function TYPED_TEST (line 479) | TYPED_TEST(NeuronLayerTest, TestLogLayerBase2) {
function TYPED_TEST (line 487) | TYPED_TEST(NeuronLayerTest, TestLogGradientBase2) {
function TYPED_TEST (line 495) | TYPED_TEST(NeuronLayerTest, TestLogLayerBase2Shift1) {
function TYPED_TEST (line 503) | TYPED_TEST(NeuronLayerTest, TestLogGradientBase2Shift1) {
function TYPED_TEST (line 511) | TYPED_TEST(NeuronLayerTest, TestLogLayerBase2Scale3) {
function TYPED_TEST (line 519) | TYPED_TEST(NeuronLayerTest, TestLogGradientBase2Scale3) {
function TYPED_TEST (line 527) | TYPED_TEST(NeuronLayerTest, TestLogLayerBase2Shift1Scale3) {
function TYPED_TEST (line 535) | TYPED_TEST(NeuronLayerTest, TestLogGradientBase2Shift1Scale3) {
function TYPED_TEST (line 543) | TYPED_TEST(NeuronLayerTest, TestDropoutHalf) {
function TYPED_TEST (line 548) | TYPED_TEST(NeuronLayerTest, TestDropoutThreeQuarters) {
function TYPED_TEST (line 553) | TYPED_TEST(NeuronLayerTest, TestDropoutTestPhase) {
function TYPED_TEST (line 570) | TYPED_TEST(NeuronLayerTest, TestDropoutGradient) {
function TYPED_TEST (line 580) | TYPED_TEST(NeuronLayerTest, TestDropoutGradientTest) {
function TYPED_TEST (line 590) | TYPED_TEST(NeuronLayerTest, TestBNLL) {
function TYPED_TEST (line 605) | TYPED_TEST(NeuronLayerTest, TestBNLLGradient) {
function TYPED_TEST (line 614) | TYPED_TEST(NeuronLayerTest, TestPReLUParam) {
function TYPED_TEST (line 626) | TYPED_TEST(NeuronLayerTest, TestPReLUForward) {
function TYPED_TEST (line 637) | TYPED_TEST(NeuronLayerTest, TestPReLUForwardChannelShared) {
function TYPED_TEST (line 646) | TYPED_TEST(NeuronLayerTest, TestPReLUGradient) {
function TYPED_TEST (line 659) | TYPED_TEST(NeuronLayerTest, TestPReLUGradientChannelShared) {
function TYPED_TEST (line 670) | TYPED_TEST(NeuronLayerTest, TestPReLUConsistencyReLU) {
function TYPED_TEST (line 713) | TYPED_TEST(NeuronLayerTest, TestPReLUInPlace) {
class CuDNNNeuronLayerTest (line 790) | class CuDNNNeuronLayerTest : public GPUDeviceTest<Dtype> {
method CuDNNNeuronLayerTest (line 792) | CuDNNNeuronLayerTest()
function TYPED_TEST (line 812) | TYPED_TEST(CuDNNNeuronLayerTest, TestReLUCuDNN) {
function TYPED_TEST (line 826) | TYPED_TEST(CuDNNNeuronLayerTest, TestReLUGradientCuDNN) {
function TYPED_TEST (line 834) | TYPED_TEST(CuDNNNeuronLayerTest, TestReLUWithNegativeSlopeCuDNN) {
function TYPED_TEST (line 853) | TYPED_TEST(CuDNNNeuronLayerTest, TestReLUGradientWithNegativeSlopeCuDN...
function TYPED_TEST (line 863) | TYPED_TEST(CuDNNNeuronLayerTest, TestSigmoidCuDNN) {
function TYPED_TEST (line 879) | TYPED_TEST(CuDNNNeuronLayerTest, TestSigmoidGradientCuDNN) {
function TYPED_TEST (line 887) | TYPED_TEST(CuDNNNeuronLayerTest, TestTanHCuDNN) {
function TYPED_TEST (line 909) | TYPED_TEST(CuDNNNeuronLayerTest, TestTanHGradientCuDNN) {
FILE: caffe-fpn/src/caffe/test/test_platform.cpp
type caffe (line 11) | namespace caffe {
class PlatformTest (line 15) | class PlatformTest : public ::testing::Test {}
function TEST_F (line 17) | TEST_F(PlatformTest, TestInitialization) {
FILE: caffe-fpn/src/caffe/test/test_pooling_layer.cpp
type caffe (line 17) | namespace caffe {
class PoolingLayerTest (line 20) | class PoolingLayerTest : public MultiDeviceTest<TypeParam> {
method PoolingLayerTest (line 24) | PoolingLayerTest()
method SetUp (line 28) | virtual void SetUp() {
method TestForwardSquare (line 49) | void TestForwardSquare() {
method TestForwardRectHigh (line 121) | void TestForwardRectHigh() {
method TestForwardRectWide (line 246) | void TestForwardRectWide() {
function TYPED_TEST (line 376) | TYPED_TEST(PoolingLayerTest, TestSetup) {
function TYPED_TEST (line 390) | TYPED_TEST(PoolingLayerTest, TestSetupPadded) {
function TYPED_TEST (line 406) | TYPED_TEST(PoolingLayerTest, TestSetupGlobalPooling) {
function TYPED_TEST (line 446) | TYPED_TEST(PoolingLayerTest, TestForwardMax) {
function TYPED_TEST (line 452) | TYPED_TEST(PoolingLayerTest, TestForwardMaxTopMask) {
function TYPED_TEST (line 459) | TYPED_TEST(PoolingLayerTest, TestGradientMax) {
function TYPED_TEST (line 478) | TYPED_TEST(PoolingLayerTest, TestForwardMaxPadded) {
function TYPED_TEST (line 523) | TYPED_TEST(PoolingLayerTest, TestGradientMaxTopMask) {
function TYPED_TEST (line 543) | TYPED_TEST(PoolingLayerTest, TestForwardAve) {
function TYPED_TEST (line 575) | TYPED_TEST(PoolingLayerTest, TestGradientAve) {
function TYPED_TEST (line 593) | TYPED_TEST(PoolingLayerTest, TestGradientAvePadded) {
class CuDNNPoolingLayerTest (line 614) | class CuDNNPoolingLayerTest : public GPUDeviceTest<Dtype> {
method CuDNNPoolingLayerTest (line 616) | CuDNNPoolingLayerTest()
method SetUp (line 620) | virtual void SetUp() {
method TestForwardSquare (line 641) | void TestForwardSquare() {
method TestForwardRectHigh (line 713) | void TestForwardRectHigh() {
method TestForwardRectWide (line 838) | void TestForwardRectWide() {
function TYPED_TEST (line 968) | TYPED_TEST(CuDNNPoolingLayerTest, TestSetupCuDNN) {
function TYPED_TEST (line 981) | TYPED_TEST(CuDNNPoolingLayerTest, TestSetupPaddedCuDNN) {
function TYPED_TEST (line 1022) | TYPED_TEST(CuDNNPoolingLayerTest, TestForwardMaxCuDNN) {
function TYPED_TEST (line 1039) | TYPED_TEST(CuDNNPoolingLayerTest, TestGradientMaxCuDNN) {
function TYPED_TEST (line 1058) | TYPED_TEST(CuDNNPoolingLayerTest, TestForwardMaxPaddedCuDNN) {
function TYPED_TEST (line 1123) | TYPED_TEST(CuDNNPoolingLayerTest, TestForwardAveCuDNN) {
function TYPED_TEST (line 1148) | TYPED_TEST(CuDNNPoolingLayerTest, TestGradientAveCuDNN) {
function TYPED_TEST (line 1165) | TYPED_TEST(CuDNNPoolingLayerTest, TestGradientAvePaddedCuDNN) {
FILE: caffe-fpn/src/caffe/test/test_power_layer.cpp
type caffe (line 14) | namespace caffe {
class PowerLayerTest (line 17) | class PowerLayerTest : public MultiDeviceTest<TypeParam> {
method PowerLayerTest (line 21) | PowerLayerTest()
method TestForward (line 34) | void TestForward(Dtype power, Dtype scale, Dtype shift) {
method TestBackward (line 61) | void TestBackward(Dtype power, Dtype scale, Dtype shift) {
function TYPED_TEST (line 90) | TYPED_TEST(PowerLayerTest, TestPower) {
function TYPED_TEST (line 98) | TYPED_TEST(PowerLayerTest, TestPowerGradient) {
function TYPED_TEST (line 106) | TYPED_TEST(PowerLayerTest, TestPowerGradientShiftZero) {
function TYPED_TEST (line 114) | TYPED_TEST(PowerLayerTest, TestPowerZero) {
function TYPED_TEST (line 122) | TYPED_TEST(PowerLayerTest, TestPowerZeroGradient) {
function TYPED_TEST (line 130) | TYPED_TEST(PowerLayerTest, TestPowerOne) {
function TYPED_TEST (line 138) | TYPED_TEST(PowerLayerTest, TestPowerOneGradient) {
function TYPED_TEST (line 146) | TYPED_TEST(PowerLayerTest, TestPowerTwo) {
function TYPED_TEST (line 154) | TYPED_TEST(PowerLayerTest, TestPowerTwoGradient) {
function TYPED_TEST (line 162) | TYPED_TEST(PowerLayerTest, TestPowerTwoScaleHalfGradient) {
FILE: caffe-fpn/src/caffe/test/test_protobuf.cpp
type caffe (line 12) | namespace caffe {
class ProtoTest (line 14) | class ProtoTest : public ::testing::Test {}
function TEST_F (line 16) | TEST_F(ProtoTest, TestSerialization) {
FILE: caffe-fpn/src/caffe/test/test_random_number_generator.cpp
type caffe (line 11) | namespace caffe {
class RandomNumberGeneratorTest (line 14) | class RandomNumberGeneratorTest : public ::testing::Test {
method RandomNumberGeneratorTest (line 16) | RandomNumberGeneratorTest()
method SetUp (line 25) | virtual void SetUp() {
method Dtype (line 29) | Dtype sample_mean(const Dtype* const seqs, const int sample_size) {
method Dtype (line 37) | Dtype sample_mean(const Dtype* const seqs) {
method Dtype (line 41) | Dtype sample_mean(const int* const seqs, const int sample_size) {
method Dtype (line 49) | Dtype sample_mean(const int* const seqs) {
method Dtype (line 53) | Dtype mean_bound(const Dtype std, const int sample_size) {
method Dtype (line 57) | Dtype mean_bound(const Dtype std) {
method RngGaussianFill (line 61) | void RngGaussianFill(const Dtype mu, const Dtype sigma, void* cpu_da...
method RngGaussianChecks (line 66) | void RngGaussianChecks(const Dtype mu, const Dtype sigma,
method RngUniformFill (line 104) | void RngUniformFill(const Dtype lower, const Dtype upper, void* cpu_...
method RngUniformChecks (line 110) | void RngUniformChecks(const Dtype lower, const Dtype upper,
method RngBernoulliFill (line 157) | void RngBernoulliFill(const Dtype p, void* cpu_data) {
method RngBernoulliChecks (line 162) | void RngBernoulliChecks(const Dtype p, const void* cpu_data) {
method RngGaussianFillGPU (line 173) | void RngGaussianFillGPU(const Dtype mu, const Dtype sigma, void* gpu...
method RngUniformFillGPU (line 178) | void RngUniformFillGPU(const Dtype lower, const Dtype upper, void* g...
method RngUniformIntFillGPU (line 186) | void RngUniformIntFillGPU(void* gpu_data) {
function TYPED_TEST (line 209) | TYPED_TEST(RandomNumberGeneratorTest, TestRngGaussian) {
function TYPED_TEST (line 218) | TYPED_TEST(RandomNumberGeneratorTest, TestRngGaussian2) {
function TYPED_TEST (line 227) | TYPED_TEST(RandomNumberGeneratorTest, TestRngUniform) {
function TYPED_TEST (line 236) | TYPED_TEST(RandomNumberGeneratorTest, TestRngUniform2) {
function TYPED_TEST (line 245) | TYPED_TEST(RandomNumberGeneratorTest, TestRngBernoulli) {
function TYPED_TEST (line 253) | TYPED_TEST(RandomNumberGeneratorTest, TestRngBernoulli2) {
function TYPED_TEST (line 261) | TYPED_TEST(RandomNumberGeneratorTest, TestRngGaussianTimesGaussian) {
function TYPED_TEST (line 287) | TYPED_TEST(RandomNumberGeneratorTest, TestRngUniformTimesUniform) {
function TYPED_TEST (line 315) | TYPED_TEST(RandomNumberGeneratorTest, TestRngGaussianTimesBernoulli) {
function TYPED_TEST (line 340) | TYPED_TEST(RandomNumberGeneratorTest, TestRngUniformTimesBernoulli) {
function TYPED_TEST (line 365) | TYPED_TEST(RandomNumberGeneratorTest, TestRngBernoulliTimesBernoulli) {
function TYPED_TEST (line 400) | TYPED_TEST(RandomNumberGeneratorTest, TestRngGaussianGPU) {
function TYPED_TEST (line 410) | TYPED_TEST(RandomNumberGeneratorTest, TestRngGaussian2GPU) {
function TYPED_TEST (line 420) | TYPED_TEST(RandomNumberGeneratorTest, TestRngUniformGPU) {
function TYPED_TEST (line 430) | TYPED_TEST(RandomNumberGeneratorTest, TestRngUniform2GPU) {
function TYPED_TEST (line 440) | TYPED_TEST(RandomNumberGeneratorTest, TestRngUniformIntGPU) {
function TYPED_TEST (line 457) | TYPED_TEST(RandomNumberGeneratorTest, TestRngGaussianTimesGaussianGPU) {
function TYPED_TEST (line 488) | TYPED_TEST(RandomNumberGeneratorTest, TestRngUniformTimesUniformGPU) {
FILE: caffe-fpn/src/caffe/test/test_reduction_layer.cpp
type caffe (line 13) | namespace caffe {
class ReductionLayerTest (line 16) | class ReductionLayerTest : public MultiDeviceTest<TypeParam> {
method ReductionLayerTest (line 20) | ReductionLayerTest()
method TestForward (line 36) | void TestForward(ReductionParameter_ReductionOp op,
method TestGradient (line 80) | void TestGradient(ReductionParameter_ReductionOp op,
function TYPED_TEST (line 102) | TYPED_TEST(ReductionLayerTest, TestSetUp) {
function TYPED_TEST (line 111) | TYPED_TEST(ReductionLayerTest, TestSetUpWithAxis1) {
function TYPED_TEST (line 122) | TYPED_TEST(ReductionLayerTest, TestSetUpWithAxis2) {
function TYPED_TEST (line 134) | TYPED_TEST(ReductionLayerTest, TestSum) {
function TYPED_TEST (line 139) | TYPED_TEST(ReductionLayerTest, TestSumCoeff) {
function TYPED_TEST (line 145) | TYPED_TEST(ReductionLayerTest, TestSumCoeffAxis1) {
function TYPED_TEST (line 152) | TYPED_TEST(ReductionLayerTest, TestSumGradient) {
function TYPED_TEST (line 157) | TYPED_TEST(ReductionLayerTest, TestSumCoeffGradient) {
function TYPED_TEST (line 163) | TYPED_TEST(ReductionLayerTest, TestSumCoeffAxis1Gradient) {
function TYPED_TEST (line 170) | TYPED_TEST(ReductionLayerTest, TestMean) {
function TYPED_TEST (line 176) | TYPED_TEST(ReductionLayerTest, TestMeanCoeff) {
function TYPED_TEST (line 183) | TYPED_TEST(ReductionLayerTest, TestMeanCoeffAxis1) {
function TYPED_TEST (line 191) | TYPED_TEST(ReductionLayerTest, TestMeanGradient) {
function TYPED_TEST (line 197) | TYPED_TEST(ReductionLayerTest, TestMeanCoeffGradient) {
function TYPED_TEST (line 204) | TYPED_TEST(ReductionLayerTest, TestMeanCoeffGradientAxis1) {
function TYPED_TEST (line 212) | TYPED_TEST(ReductionLayerTest, TestAbsSum) {
function TYPED_TEST (line 218) | TYPED_TEST(ReductionLayerTest, TestAbsSumCoeff) {
function TYPED_TEST (line 225) | TYPED_TEST(ReductionLayerTest, TestAbsSumCoeffAxis1) {
function TYPED_TEST (line 233) | TYPED_TEST(ReductionLayerTest, TestAbsSumGradient) {
function TYPED_TEST (line 239) | TYPED_TEST(ReductionLayerTest, TestAbsSumCoeffGradient) {
function TYPED_TEST (line 246) | TYPED_TEST(ReductionLayerTest, TestAbsSumCoeffAxis1Gradient) {
function TYPED_TEST (line 254) | TYPED_TEST(ReductionLayerTest, TestSumOfSquares) {
function TYPED_TEST (line 260) | TYPED_TEST(ReductionLayerTest, TestSumOfSquaresCoeff) {
function TYPED_TEST (line 267) | TYPED_TEST(ReductionLayerTest, TestSumOfSquaresCoeffAxis1) {
function TYPED_TEST (line 275) | TYPED_TEST(ReductionLayerTest, TestSumOfSquaresGradient) {
function TYPED_TEST (line 281) | TYPED_TEST(ReductionLayerTest, TestSumOfSquaresCoeffGradient) {
function TYPED_TEST (line 288) | TYPED_TEST(ReductionLayerTest, TestSumOfSquaresCoeffAxis1Gradient) {
FILE: caffe-fpn/src/caffe/test/test_reshape_layer.cpp
type caffe (line 13) | namespace caffe {
class ReshapeLayerTest (line 16) | class ReshapeLayerTest : public MultiDeviceTest<TypeParam> {
method ReshapeLayerTest (line 19) | ReshapeLayerTest()
function TYPED_TEST (line 40) | TYPED_TEST(ReshapeLayerTest, TestFlattenOutputSizes) {
function TYPED_TEST (line 57) | TYPED_TEST(ReshapeLayerTest, TestFlattenValues) {
function TYPED_TEST (line 78) | TYPED_TEST(ReshapeLayerTest, TestCopyDimensions) {
function TYPED_TEST (line 97) | TYPED_TEST(ReshapeLayerTest, TestInferenceOfUnspecified) {
function TYPED_TEST (line 117) | TYPED_TEST(ReshapeLayerTest, TestInferenceOfUnspecifiedWithStartAxis) {
function TYPED_TEST (line 136) | TYPED_TEST(ReshapeLayerTest, TestInsertSingletonAxesStart) {
function TYPED_TEST (line 159) | TYPED_TEST(ReshapeLayerTest, TestInsertSingletonAxesMiddle) {
function TYPED_TEST (line 182) | TYPED_TEST(ReshapeLayerTest, TestInsertSingletonAxesEnd) {
function TYPED_TEST (line 205) | TYPED_TEST(ReshapeLayerTest, TestFlattenMiddle) {
function TYPED_TEST (line 222) | TYPED_TEST(ReshapeLayerTest, TestForward) {
function TYPED_TEST (line 239) | TYPED_TEST(ReshapeLayerTest, TestForwardAfterReshape) {
function TYPED_TEST (line 265) | TYPED_TEST(ReshapeLayerTest, TestGradient) {
FILE: caffe-fpn/src/caffe/test/test_roi_pooling_layer.cpp
type caffe (line 26) | namespace caffe {
class ROIPoolingLayerTest (line 31) | class ROIPoolingLayerTest : public MultiDeviceTest<TypeParam> {
method ROIPoolingLayerTest (line 35) | ROIPoolingLayerTest()
function TYPED_TEST (line 90) | TYPED_TEST(ROIPoolingLayerTest, TestGradient) {
FILE: caffe-fpn/src/caffe/test/test_scale_layer.cpp
type caffe (line 14) | namespace caffe {
class ScaleLayerTest (line 17) | class ScaleLayerTest : public MultiDeviceTest<TypeParam> {
method ScaleLayerTest (line 21) | ScaleLayerTest()
function TYPED_TEST (line 72) | TYPED_TEST(ScaleLayerTest, TestForwardEltwise) {
function TYPED_TEST (line 90) | TYPED_TEST(ScaleLayerTest, TestForwardEltwiseInPlace) {
function TYPED_TEST (line 110) | TYPED_TEST(ScaleLayerTest, TestBackwardEltwiseInPlace) {
function TYPED_TEST (line 156) | TYPED_TEST(ScaleLayerTest, TestForwardEltwiseWithParam) {
function TYPED_TEST (line 176) | TYPED_TEST(ScaleLayerTest, TestForwardBroadcastBegin) {
function TYPED_TEST (line 199) | TYPED_TEST(ScaleLayerTest, TestForwardBroadcastMiddle) {
function TYPED_TEST (line 222) | TYPED_TEST(ScaleLayerTest, TestForwardBroadcastMiddleInPlace) {
function TYPED_TEST (line 247) | TYPED_TEST(ScaleLayerTest, TestBackwardBroadcastMiddleInPlace) {
function TYPED_TEST (line 293) | TYPED_TEST(ScaleLayerTest, TestForwardBroadcastMiddleWithParam) {
function TYPED_TEST (line 317) | TYPED_TEST(ScaleLayerTest, TestForwardBroadcastMiddleWithParamAndBias) {
function TYPED_TEST (line 344) | TYPED_TEST(ScaleLayerTest, TestForwardBroadcastEnd) {
function TYPED_TEST (line 367) | TYPED_TEST(ScaleLayerTest, TestForwardScale) {
function TYPED_TEST (line 384) | TYPED_TEST(ScaleLayerTest, TestForwardScaleAxis2) {
function TYPED_TEST (line 402) | TYPED_TEST(ScaleLayerTest, TestGradientEltwise) {
function TYPED_TEST (line 413) | TYPED_TEST(ScaleLayerTest, TestGradientEltwiseWithParam) {
function TYPED_TEST (line 426) | TYPED_TEST(ScaleLayerTest, TestGradientBroadcastBegin) {
function TYPED_TEST (line 437) | TYPED_TEST(ScaleLayerTest, TestGradientBroadcastMiddle) {
function TYPED_TEST (line 448) | TYPED_TEST(ScaleLayerTest, TestGradientBroadcastMiddleWithParam) {
function TYPED_TEST (line 462) | TYPED_TEST(ScaleLayerTest, TestGradientBroadcastEnd) {
function TYPED_TEST (line 473) | TYPED_TEST(ScaleLayerTest, TestGradientScale) {
function TYPED_TEST (line 483) | TYPED_TEST(ScaleLayerTest, TestGradientScaleAndBias) {
function TYPED_TEST (line 496) | TYPED_TEST(ScaleLayerTest, TestGradientScaleAxis2) {
FILE: caffe-fpn/src/caffe/test/test_sigmoid_cross_entropy_loss_layer.cpp
type caffe (line 14) | namespace caffe {
class SigmoidCrossEntropyLossLayerTest (line 17) | class SigmoidCrossEntropyLossLayerTest : public MultiDeviceTest<TypePa...
method SigmoidCrossEntropyLossLayerTest (line 21) | SigmoidCrossEntropyLossLayerTest()
method Dtype (line 46) | Dtype SigmoidCrossEntropyLossReference(const int count, const int num,
method TestForward (line 62) | void TestForward() {
function TYPED_TEST (line 103) | TYPED_TEST(SigmoidCrossEntropyLossLayerTest, TestSigmoidCrossEntropyLo...
function TYPED_TEST (line 107) | TYPED_TEST(SigmoidCrossEntropyLossLayerTest, TestGradient) {
FILE: caffe-fpn/src/caffe/test/test_slice_layer.cpp
type caffe (line 13) | namespace caffe {
class SliceLayerTest (line 16) | class SliceLayerTest : public MultiDeviceTest<TypeParam> {
method SliceLayerTest (line 20) | SliceLayerTest()
method SetUp (line 25) | virtual void SetUp() {
method ReduceBottomBlobSize (line 39) | virtual void ReduceBottomBlobSize() {
function TYPED_TEST (line 61) | TYPED_TEST(SliceLayerTest, TestSetupNum) {
function TYPED_TEST (line 75) | TYPED_TEST(SliceLayerTest, TestSetupChannels) {
function TYPED_TEST (line 90) | TYPED_TEST(SliceLayerTest, TestTrivialSlice) {
function TYPED_TEST (line 105) | TYPED_TEST(SliceLayerTest, TestSliceAcrossNum) {
function TYPED_TEST (line 135) | TYPED_TEST(SliceLayerTest, TestSliceAcrossChannels) {
function TYPED_TEST (line 178) | TYPED_TEST(SliceLayerTest, TestGradientTrivial) {
function TYPED_TEST (line 190) | TYPED_TEST(SliceLayerTest, TestGradientAcrossNum) {
function TYPED_TEST (line 202) | TYPED_TEST(SliceLayerTest, TestGradientAcrossChannels) {
FILE: caffe-fpn/src/caffe/test/test_smooth_L1_loss_layer.cpp
type caffe (line 17) | namespace caffe {
class SmoothL1LossLayerTest (line 22) | class SmoothL1LossLayerTest : public MultiDeviceTest<TypeParam> {
method SmoothL1LossLayerTest (line 26) | SmoothL1LossLayerTest()
function TYPED_TEST (line 72) | TYPED_TEST(SmoothL1LossLayerTest, TestGradient) {
FILE: caffe-fpn/src/caffe/test/test_softmax_layer.cpp
type caffe (line 18) | namespace caffe {
class SoftmaxLayerTest (line 21) | class SoftmaxLayerTest : public MultiDeviceTest<TypeParam> {
method SoftmaxLayerTest (line 24) | SoftmaxLayerTest()
function TYPED_TEST (line 43) | TYPED_TEST(SoftmaxLayerTest, TestForward) {
function TYPED_TEST (line 77) | TYPED_TEST(SoftmaxLayerTest, TestGradient) {
class CuDNNSoftmaxLayerTest (line 88) | class CuDNNSoftmaxLayerTest : public GPUDeviceTest<Dtype> {
method CuDNNSoftmaxLayerTest (line 90) | CuDNNSoftmaxLayerTest()
function TYPED_TEST (line 109) | TYPED_TEST(CuDNNSoftmaxLayerTest, TestForwardCuDNN) {
function TYPED_TEST (line 142) | TYPED_TEST(CuDNNSoftmaxLayerTest, TestGradientCuDNN) {
FILE: caffe-fpn/src/caffe/test/test_softmax_with_loss_layer.cpp
type caffe (line 17) | namespace caffe {
class SoftmaxWithLossLayerTest (line 20) | class SoftmaxWithLossLayerTest : public MultiDeviceTest<TypeParam> {
method SoftmaxWithLossLayerTest (line 24) | SoftmaxWithLossLayerTest()
function TYPED_TEST (line 54) | TYPED_TEST(SoftmaxWithLossLayerTest, TestGradient) {
function TYPED_TEST (line 64) | TYPED_TEST(SoftmaxWithLossLayerTest, TestForwardIgnoreLabel) {
function TYPED_TEST (line 87) | TYPED_TEST(SoftmaxWithLossLayerTest, TestGradientIgnoreLabel) {
function TYPED_TEST (line 98) | TYPED_TEST(SoftmaxWithLossLayerTest, TestGradientUnnormalized) {
FILE: caffe-fpn/src/caffe/test/test_solver.cpp
type caffe (line 17) | namespace caffe {
class SolverTest (line 20) | class SolverTest : public MultiDeviceTest<TypeParam> {
method InitSolverFromProtoString (line 24) | virtual void InitSolverFromProtoString(const string& proto) {
function TYPED_TEST (line 46) | TYPED_TEST(SolverTest, TestInitTrainTestNets) {
FILE: caffe-fpn/src/caffe/test/test_solver_factory.cpp
type caffe (line 14) | namespace caffe {
class SolverFactoryTest (line 17) | class SolverFactoryTest : public MultiDeviceTest<TypeParam> {
method SolverParameter (line 19) | SolverParameter simple_solver_param() {
function TYPED_TEST (line 36) | TYPED_TEST(SolverFactoryTest, TestCreateSolver) {
FILE: caffe-fpn/src/caffe/test/test_split_layer.cpp
type caffe (line 17) | namespace caffe {
class SplitLayerTest (line 20) | class SplitLayerTest : public MultiDeviceTest<TypeParam> {
method SplitLayerTest (line 24) | SplitLayerTest()
function TYPED_TEST (line 50) | TYPED_TEST(SplitLayerTest, TestSetup) {
function TYPED_TEST (line 65) | TYPED_TEST(SplitLayerTest, Test) {
function TYPED_TEST (line 78) | TYPED_TEST(SplitLayerTest, TestGradient) {
class SplitLayerInsertionTest (line 88) | class SplitLayerInsertionTest : public ::testing::Test {
method RunInsertionTest (line 90) | void RunInsertionTest(
function TEST_F (line 113) | TEST_F(SplitLayerInsertionTest, TestNoInsertion1) {
function TEST_F (line 137) | TEST_F(SplitLayerInsertionTest, TestNoInsertion2) {
function TEST_F (line 174) | TEST_F(SplitLayerInsertionTest, TestNoInsertionImageNet) {
function TEST_F (line 529) | TEST_F(SplitLayerInsertionTest, TestNoInsertionWithInPlace) {
function TEST_F (line 559) | TEST_F(SplitLayerInsertionTest, TestLossInsertion) {
function TEST_F (line 688) | TEST_F(SplitLayerInsertionTest, TestInsertion) {
function TEST_F (line 783) | TEST_F(SplitLayerInsertionTest, TestInsertionTwoTop) {
function TEST_F (line 889) | TEST_F(SplitLayerInsertionTest, TestInputInsertion) {
function TEST_F (line 950) | TEST_F(SplitLayerInsertionTest, TestWithInPlace) {
FILE: caffe-fpn/src/caffe/test/test_spp_layer.cpp
type caffe (line 18) | namespace caffe {
class SPPLayerTest (line 21) | class SPPLayerTest : public MultiDeviceTest<TypeParam> {
method SPPLayerTest (line 25) | SPPLayerTest()
method SetUp (line 30) | virtual void SetUp() {
function TYPED_TEST (line 58) | TYPED_TEST(SPPLayerTest, TestSetup) {
function TYPED_TEST (line 75) | TYPED_TEST(SPPLayerTest, TestEqualOutputDims) {
function TYPED_TEST (line 92) | TYPED_TEST(SPPLayerTest, TestEqualOutputDims2) {
function TYPED_TEST (line 109) | TYPED_TEST(SPPLayerTest, TestForwardBackward) {
function TYPED_TEST (line 121) | TYPED_TEST(SPPLayerTest, TestGradient) {
FILE: caffe-fpn/src/caffe/test/test_stochastic_pooling.cpp
type caffe (line 16) | namespace caffe {
class StochasticPoolingLayerTest (line 19) | class StochasticPoolingLayerTest : public MultiDeviceTest<TypeParam> {
method StochasticPoolingLayerTest (line 23) | StochasticPoolingLayerTest()
method SetUp (line 26) | virtual void SetUp() {
class CPUStochasticPoolingLayerTest (line 50) | class CPUStochasticPoolingLayerTest
function TYPED_TEST (line 56) | TYPED_TEST(CPUStochasticPoolingLayerTest, TestSetup) {
class GPUStochasticPoolingLayerTest (line 72) | class GPUStochasticPoolingLayerTest
function TYPED_TEST (line 78) | TYPED_TEST(GPUStochasticPoolingLayerTest, TestStochastic) {
function TYPED_TEST (line 121) | TYPED_TEST(GPUStochasticPoolingLayerTest, TestStochasticTestPhase) {
function TYPED_TEST (line 158) | TYPED_TEST(GPUStochasticPoolingLayerTest, TestGradient) {
FILE: caffe-fpn/src/caffe/test/test_syncedmem.cpp
type caffe (line 12) | namespace caffe {
class SyncedMemoryTest (line 14) | class SyncedMemoryTest : public ::testing::Test {}
function TEST_F (line 16) | TEST_F(SyncedMemoryTest, TestInitialization) {
function TEST_F (line 27) | TEST_F(SyncedMemoryTest, TestAllocationCPUGPU) {
function TEST_F (line 37) | TEST_F(SyncedMemoryTest, TestAllocationCPU) {
function TEST_F (line 45) | TEST_F(SyncedMemoryTest, TestAllocationGPU) {
function TEST_F (line 53) | TEST_F(SyncedMemoryTest, TestCPUWrite) {
function TEST_F (line 72) | TEST_F(SyncedMemoryTest, TestGPURead) {
function TEST_F (line 102) | TEST_F(SyncedMemoryTest, TestGPUWrite) {
FILE: caffe-fpn/src/caffe/test/test_tanh_layer.cpp
type caffe (line 14) | namespace caffe {
function tanh_naive (line 16) | double tanh_naive(double x) {
class TanHLayerTest (line 31) | class TanHLayerTest : public MultiDeviceTest<TypeParam> {
method TanHLayerTest (line 35) | TanHLayerTest()
method TestForward (line 45) | void TestForward(Dtype filler_std) {
method TestBackward (line 67) | void TestBackward(Dtype filler_std) {
function TYPED_TEST (line 88) | TYPED_TEST(TanHLayerTest, TestTanH) {
function TYPED_TEST (line 92) | TYPED_TEST(TanHLayerTest, TestTanHOverflow) {
function TYPED_TEST (line 97) | TYPED_TEST(TanHLayerTest, TestTanHGradient) {
FILE: caffe-fpn/src/caffe/test/test_threshold_layer.cpp
type caffe (line 12) | namespace caffe {
class ThresholdLayerTest (line 15) | class ThresholdLayerTest : public MultiDeviceTest<TypeParam> {
method ThresholdLayerTest (line 18) | ThresholdLayerTest()
function TYPED_TEST (line 39) | TYPED_TEST(ThresholdLayerTest, TestSetup) {
function TYPED_TEST (line 50) | TYPED_TEST(ThresholdLayerTest, Test) {
function TYPED_TEST (line 72) | TYPED_TEST(ThresholdLayerTest, Test2) {
FILE: caffe-fpn/src/caffe/test/test_tile_layer.cpp
type caffe (line 13) | namespace caffe {
class TileLayerTest (line 16) | class TileLayerTest : public MultiDeviceTest<TypeParam> {
method TileLayerTest (line 20) | TileLayerTest()
method SetUp (line 23) | virtual void SetUp() {
function TYPED_TEST (line 46) | TYPED_TEST(TileLayerTest, TestTrivialSetup) {
function TYPED_TEST (line 62) | TYPED_TEST(TileLayerTest, TestSetup) {
function TYPED_TEST (line 80) | TYPED_TEST(TileLayerTest, TestForwardNum) {
function TYPED_TEST (line 103) | TYPED_TEST(TileLayerTest, TestForwardChannels) {
function TYPED_TEST (line 124) | TYPED_TEST(TileLayerTest, TestTrivialGradient) {
function TYPED_TEST (line 135) | TYPED_TEST(TileLayerTest, TestGradientNum) {
function TYPED_TEST (line 148) | TYPED_TEST(TileLayerTest, TestGradientChannels) {
FILE: caffe-fpn/src/caffe/test/test_upgrade_proto.cpp
type caffe (line 17) | namespace caffe {
class PaddingLayerUpgradeTest (line 19) | class PaddingLayerUpgradeTest : public ::testing::Test {
method RunPaddingUpgradeTest (line 21) | void RunPaddingUpgradeTest(
function TEST_F (line 44) | TEST_F(PaddingLayerUpgradeTest, TestSimple) {
function TEST_F (line 192) | TEST_F(PaddingLayerUpgradeTest, TestTwoTops) {
function TEST_F (line 387) | TEST_F(PaddingLayerUpgradeTest, TestImageNet) {
class NetUpgradeTest (line 1089) | class NetUpgradeTest : public ::testing::Test {
method RunV0UpgradeTest (line 1091) | void RunV0UpgradeTest(
method RunV1UpgradeTest (line 1108) | void RunV1UpgradeTest(
function TEST_F (line 1126) | TEST_F(NetUpgradeTest, TestSimple) {
function TEST_F (line 1353) | TEST_F(NetUpgradeTest, TestAllParams) {
function TEST_F (line 1853) | TEST_F(NetUpgradeTest, TestImageNet) {
function TEST_F (line 2894) | TEST_F(NetUpgradeTest, TestUpgradeV1LayerType) {
class SolverTypeUpgradeTest (line 2929) | class SolverTypeUpgradeTest : public ::testing::Test {
method RunSolverTypeUpgradeTest (line 2931) | void RunSolverTypeUpgradeTest(
function TEST_F (line 2947) | TEST_F(SolverTypeUpgradeTest, TestSimple) {
FILE: caffe-fpn/src/caffe/test/test_util_blas.cpp
type caffe (line 11) | namespace caffe {
class GemmTest (line 16) | class GemmTest : public ::testing::Test {}
function TYPED_TEST (line 20) | TYPED_TEST(GemmTest, TestGemmCPUGPU) {
function TYPED_TEST (line 91) | TYPED_TEST(GemmTest, TestGemvCPUGPU) {
FILE: caffe-fpn/src/caffe/util/benchmark.cpp
type caffe (line 6) | namespace caffe {
FILE: caffe-fpn/src/caffe/util/blocking_queue.cpp
type caffe (line 9) | namespace caffe {
class BlockingQueue<T>::sync (line 12) | class BlockingQueue<T>::sync {
function T (line 45) | T BlockingQueue<T>::pop(const string& log_on_wait) {
function T (line 73) | T BlockingQueue<T>::peek() {
class BlockingQueue<Batch<float>*> (line 89) | class BlockingQueue<Batch<float>*>
class BlockingQueue<Batch<double>*> (line 90) | class BlockingQueue<Batch<double>*>
class BlockingQueue<Datum*> (line 91) | class BlockingQueue<Datum*>
class BlockingQueue<shared_ptr<DataReader::QueuePair> > (line 92) | class BlockingQueue<shared_ptr<DataReader::QueuePair> >
class BlockingQueue<P2PSync<float>*> (line 93) | class BlockingQueue<P2PSync<float>*>
class BlockingQueue<P2PSync<double>*> (line 94) | class BlockingQueue<P2PSync<double>*>
FILE: caffe-fpn/src/caffe/util/cudnn.cpp
type caffe (line 4) | namespace caffe {
type cudnn (line 5) | namespace cudnn {
FILE: caffe-fpn/src/caffe/util/db.cpp
type caffe (line 7) | namespace caffe { namespace db {
type db (line 7) | namespace db {
function DB (line 9) | DB* GetDB(DataParameter::DB backend) {
function DB (line 25) | DB* GetDB(const string& backend) {
FILE: caffe-fpn/src/caffe/util/db_leveldb.cpp
type caffe (line 6) | namespace caffe { namespace db {
type db (line 6) | namespace db {
FILE: caffe-fpn/src/caffe/util/db_lmdb.cpp
type caffe (line 8) | namespace caffe { namespace db {
type db (line 8) | namespace db {
function LMDBCursor (line 41) | LMDBCursor* LMDB::NewCursor() {
function LMDBTransaction (line 50) | LMDBTransaction* LMDB::NewTransaction() {
FILE: caffe-fpn/src/caffe/util/hdf5.cpp
type caffe (line 6) | namespace caffe {
function hdf5_load_nd_dataset_helper (line 10) | void hdf5_load_nd_dataset_helper(
function string (line 126) | string hdf5_load_string(hid_t loc_id, const string& dataset_name) {
function hdf5_save_string (line 142) | void hdf5_save_string(hid_t loc_id, const string& dataset_name,
function hdf5_load_int (line 150) | int hdf5_load_int(hid_t loc_id, const string& dataset_name) {
function hdf5_save_int (line 158) | void hdf5_save_int(hid_t loc_id, const string& dataset_name, int i) {
function hdf5_get_num_links (line 166) | int hdf5_get_num_links(hid_t loc_id) {
function string (line 173) | string hdf5_get_name_by_idx(hid_t loc_id, int idx) {
FILE: caffe-fpn/src/caffe/util/im2col.cpp
type caffe (line 6) | namespace caffe {
function is_a_ge_zero_and_a_lt_b (line 14) | inline bool is_a_ge_zero_and_a_lt_b(int a, int b) {
function im2col_cpu (line 19) | void im2col_cpu(const Dtype* data_im, const int channels,
function im2col_nd_core_cpu (line 70) | inline void im2col_nd_core_cpu(const Dtype* data_input, const bool im2...
function im2col_nd_cpu (line 141) | void im2col_nd_cpu(const Dtype* data_im, const int num_spatial_axes,
function col2im_cpu (line 163) | void col2im_cpu(const Dtype* data_col, const int channels,
function col2im_nd_cpu (line 212) | void col2im_nd_cpu(const Dtype* data_col, const int num_spatial_axes,
FILE: caffe-fpn/src/caffe/util/insert_splits.cpp
type caffe (line 10) | namespace caffe {
function InsertSplits (line 12) | void InsertSplits(const NetParameter& param, NetParameter* param_split) {
function ConfigureSplitLayer (line 109) | void ConfigureSplitLayer(const string& layer_name, const string& blob_...
function string (line 129) | string SplitLayerName(const string& layer_name, const string& blob_name,
function string (line 137) | string SplitBlobName(const string& layer_name, const string& blob_name,
FILE: caffe-fpn/src/caffe/util/io.cpp
type caffe (line 24) | namespace caffe {
function ReadProtoFromTextFile (line 34) | bool ReadProtoFromTextFile(const char* filename, Message* proto) {
function WriteProtoToTextFile (line 44) | void WriteProtoToTextFile(const Message& proto, const char* filename) {
function ReadProtoFromBinaryFile (line 52) | bool ReadProtoFromBinaryFile(const char* filename, Message* proto) {
function WriteProtoToBinaryFile (line 67) | void WriteProtoToBinaryFile(const Message& proto, const char* filename) {
function ReadImageToCVMat (line 73) | cv::Mat ReadImageToCVMat(const string& filename,
function ReadImageToCVMat (line 91) | cv::Mat ReadImageToCVMat(const string& filename,
function ReadImageToCVMat (line 96) | cv::Mat ReadImageToCVMat(const string& filename,
function ReadImageToCVMat (line 101) | cv::Mat ReadImageToCVMat(const string& filename) {
function matchExt (line 106) | static bool matchExt(const std::string & fn,
function ReadImageToDatum (line 119) | bool ReadImageToDatum(const string& filename, const int label,
function ReadFileToDatum (line 145) | bool ReadFileToDatum(const string& filename, const int label,
function DecodeDatumToCVMatNative (line 166) | cv::Mat DecodeDatumToCVMatNative(const Datum& datum) {
function DecodeDatumToCVMat (line 177) | cv::Mat DecodeDatumToCVMat(const Datum& datum, bool is_color) {
function DecodeDatumNative (line 193) | bool DecodeDatumNative(Datum* datum) {
function DecodeDatum (line 202) | bool DecodeDatum(Datum* datum, bool is_color) {
function CVMatToDatum (line 212) | void CVMatToDatum(const cv::Mat& cv_img, Datum* datum) {
FILE: caffe-fpn/src/caffe/util/math_functions.cpp
type caffe (line 10) | namespace caffe {
function caffe_set (line 57) | void caffe_set(const int N, const Dtype alpha, Dtype* Y) {
function caffe_add_scalar (line 72) | void caffe_add_scalar(const int N, const float alpha, float* Y) {
function caffe_add_scalar (line 79) | void caffe_add_scalar(const int N, const double alpha, double* Y) {
function caffe_copy (line 86) | void caffe_copy(const int N, const Dtype* X, Dtype* Y) {
function caffe_rng_rand (line 229) | unsigned int caffe_rng_rand() {
function Dtype (line 234) | Dtype caffe_nextafter(const Dtype b) {
function caffe_rng_uniform (line 246) | void caffe_rng_uniform(const int n, const Dtype a, const Dtype b, Dtyp...
function caffe_rng_gaussian (line 267) | void caffe_rng_gaussian(const int n, const Dtype a,
function caffe_rng_bernoulli (line 289) | void caffe_rng_bernoulli(const int n, const Dtype p, int* r) {
function caffe_rng_bernoulli (line 309) | void caffe_rng_bernoulli(const int n, const Dtype p, unsigned int* r) {
function Dtype (line 341) | Dtype caffe_cpu_dot(const int n, const Dtype* x, const Dtype* y) {
FILE: caffe-fpn/src/caffe/util/signal_handler.cpp
function handle_signal (line 14) | void handle_signal(int signal) {
function HookupHandler (line 25) | void HookupHandler() {
function UnhookHandler (line 48) | void UnhookHandler() {
function GotSIGINT (line 71) | bool GotSIGINT() {
function GotSIGHUP (line 79) | bool GotSIGHUP() {
type caffe (line 86) | namespace caffe {
function ActionCallback (line 111) | ActionCallback SignalHandler::GetActionFunction() {
FILE: caffe-fpn/src/caffe/util/upgrade_proto.cpp
type caffe (line 13) | namespace caffe {
function NetNeedsUpgrade (line 15) | bool NetNeedsUpgrade(const NetParameter& net_param) {
function UpgradeNetAsNeeded (line 19) | bool UpgradeNetAsNeeded(const string& param_file, NetParameter* param) {
function ReadNetParamsFromTextFileOrDie (line 66) | void ReadNetParamsFromTextFileOrDie(const string& param_file,
function ReadNetParamsFromBinaryFileOrDie (line 73) | void ReadNetParamsFromBinaryFileOrDie(const string& param_file,
function NetNeedsV0ToV1Upgrade (line 80) | bool NetNeedsV0ToV1Upgrade(const NetParameter& net_param) {
function NetNeedsV1ToV2Upgrade (line 89) | bool NetNeedsV1ToV2Upgrade(const NetParameter& net_param) {
function UpgradeV0Net (line 93) | bool UpgradeV0Net(const NetParameter& v0_net_param_padding_layers,
function UpgradeV0PaddingLayers (line 120) | void UpgradeV0PaddingLayers(const NetParameter& param,
function UpgradeV0LayerParameter (line 179) | bool UpgradeV0LayerParameter(const V1LayerParameter& v0_layer_connection,
function V1LayerParameter_LayerType (line 531) | V1LayerParameter_LayerType UpgradeV0LayerType(const string& type) {
function NetNeedsDataUpgrade (line 586) | bool NetNeedsDataUpgrade(const NetParameter& net_param) {
function UpgradeNetDataTransformation (line 639) | void UpgradeNetDataTransformation(NetParameter* net_param) {
function UpgradeV1Net (line 647) | bool UpgradeV1Net(const NetParameter& v1_net_param, NetParameter* net_...
function UpgradeV1LayerParameter (line 667) | bool UpgradeV1LayerParameter(const V1LayerParameter& v1_layer_param,
function SolverNeedsTypeUpgrade (line 941) | bool SolverNeedsTypeUpgrade(const SolverParameter& solver_param) {
function UpgradeSolverType (line 948) | bool UpgradeSolverType(SolverParameter* solver_param) {
function UpgradeSolverAsNeeded (line 986) | bool UpgradeSolverAsNeeded(const string& param_file, SolverParameter* ...
function ReadSolverParamsFromTextFileOrDie (line 1007) | void ReadSolverParamsFromTextFileOrDie(const string& param_file,
FILE: caffe-fpn/src/gtest/gtest-all.cpp
type testing (line 113) | namespace testing {
function ScopedFakeTestPartResultReporter (line 124) | class GTEST_API_ ScopedFakeTestPartResultReporter
function AssertionResult (line 2510) | AssertionResult CmpHelperSTRCASENE(const char* s1_expression,
type internal (line 6293) | namespace internal {
function String (line 6340) | static String ExitSummary(int exit_code) {
function ExitedUnsuccessfully (line 6366) | bool ExitedUnsuccessfully(int exit_status) {
function String (line 6375) | static String DeathTestThreadWarning(size_t thread_count) {
type DeathTestOutcome (line 6402) | enum DeathTestOutcome { IN_PROGRESS, DIED, LIVED, RETURNED, THREW }
function DeathTestAbort (line 6409) | void DeathTestAbort(const String& message) {
function String (line 6460) | String GetLastErrnoDescription() {
function FailFromInternalError (line 6468) | static void FailFromInternalError(int fd) {
class DeathTestImpl (line 6518) | class DeathTestImpl : public DeathTest {
method DeathTestImpl (line 6520) | DeathTestImpl(const char* a_statement, const RE* a_regex)
method RE (line 6536) | const RE* regex() const { return regex_; }
method spawned (line 6537) | bool spawned() const { return spawned_; }
method set_spawned (line 6538) | void set_spawned(bool is_spawned) { spawned_ = is_spawned; }
method status (line 6539) | int status() const { return status_; }
method set_status (line 6540) | void set_status(int a_status) { status_ = a_status; }
method DeathTestOutcome (line 6541) | DeathTestOutcome outcome() const { return outcome_; }
method set_outcome (line 6542) | void set_outcome(DeathTestOutcome an_outcome) { outcome_ = an_outc...
method read_fd (line 6543) | int read_fd() const { return read_fd_; }
method set_read_fd (line 6544) | void set_read_fd(int fd) { read_fd_ = fd; }
method write_fd (line 6545) | int write_fd() const { return write_fd_; }
method set_write_fd (line 6546) | void set_write_fd(int fd) { write_fd_ = fd; }
function FormatDeathTestOutput (line 6649) | static ::std::string FormatDeathTestOutput(const ::std::string& outp...
class WindowsDeathTest (line 6764) | class WindowsDeathTest : public DeathTestImpl {
method WindowsDeathTest (line 6766) | WindowsDeathTest(const char* a_statement,
class ForkingDeathTest (line 6936) | class ForkingDeathTest : public DeathTestImpl {
method set_child_pid (line 6944) | void set_child_pid(pid_t child_pid) { child_pid_ = child_pid; }
class NoExecDeathTest (line 6973) | class NoExecDeathTest : public ForkingDeathTest {
method NoExecDeathTest (line 6975) | NoExecDeathTest(const char* a_statement, const RE* a_regex) :
class ExecDeathTest (line 7027) | class ExecDeathTest : public ForkingDeathTest {
method ExecDeathTest (line 7029) | ExecDeathTest(const char* a_statement, const RE* a_regex,
class Arguments (line 7041) | class Arguments {
method Arguments (line 7043) | Arguments() {
method AddArgument (line 7053) | void AddArgument(const char* argument) {
method AddArguments (line 7058) | void AddArguments(const ::std::vector<Str>& arguments) {
type ExecDeathTestArgs (line 7074) | struct ExecDeathTestArgs {
function ExecDeathTestChildMain (line 7096) | static int ExecDeathTestChildMain(void* child_arg) {
function StackLowerThanAddress (line 7136) | bool StackLowerThanAddress(const void* ptr) {
function StackGrowsDown (line 7141) | bool StackGrowsDown() {
function pid_t (line 7149) | static pid_t ExecDeathTestFork(char* const* argv, int close_fd) {
function IsPathSeparator (line 7518) | static bool IsPathSeparator(char c) {
function FilePath (line 7527) | FilePath FilePath::GetCurrentDir() {
function FilePath (line 7545) | FilePath FilePath::RemoveExtension(const char* extension) const {
function FilePath (line 7575) | FilePath FilePath::RemoveDirectoryName() const {
function FilePath (line 7586) | FilePath FilePath::RemoveFileName() const {
function FilePath (line 7603) | FilePath FilePath::MakeFileName(const FilePath& directory,
function FilePath (line 7618) | FilePath FilePath::ConcatPaths(const FilePath& directory,
function FilePath (line 7706) | FilePath FilePath::GenerateUniqueFileName(const FilePath& directory,
function FilePath (line 7766) | FilePath FilePath::RemoveTrailingPathSeparator() const {
function GetThreadCount (line 7885) | size_t GetThreadCount() {
function GetThreadCount (line 7904) | size_t GetThreadCount() {
function IsInSet (line 7979) | bool IsInSet(char ch, const char* str) {
function IsAsciiDigit (line 7986) | bool IsAsciiDigit(char ch) { return '0' <= ch && ch <= '9'; }
function IsAsciiPunct (line 7987) | bool IsAsciiPunct(char ch) {
function IsRepeat (line 7990) | bool IsRepeat(char ch) { return IsInSet(ch, "?*+"); }
function IsAsciiWhiteSpace (line 7991) | bool IsAsciiWhiteSpace(char ch) { return IsInSet(ch, " \f\n\r\t\v"); }
function IsAsciiWordChar (line 7992) | bool IsAsciiWordChar(char ch) {
function IsValidEscape (line 7998) | bool IsValidEscape(char c) {
function AtomMatchesChar (line 8004) | bool AtomMatchesChar(bool escaped, char pattern_char, char ch) {
function String (line 8026) | String FormatRegexSyntaxError(const char* regex, int index) {
function ValidateRegex (line 8033) | bool ValidateRegex(const char* regex) {
function MatchRepetitionAndRegexAtHead (line 8096) | bool MatchRepetitionAndRegexAtHead(
function MatchRegexAtHead (line 8123) | bool MatchRegexAtHead(const char* regex, const char* str) {
function MatchRegexAnywhere (line 8159) | bool MatchRegexAnywhere(const char* regex, const char* str) {
function FormatFileLocation (line 8232) | GTEST_API_ ::std::string FormatFileLocation(const char* file, int li...
function FormatCompilerIndependentFileLocation (line 8250) | GTEST_API_ ::std::string FormatCompilerIndependentFileLocation(
class CapturedStream (line 8289) | class CapturedStream {
method CapturedStream (line 8292) | CapturedStream(int fd) : fd_(fd), uncaptured_fd_(dup(fd)) {
method String (line 8326) | String GetCapturedString() {
function String (line 8363) | String CapturedStream::ReadEntireFile(FILE* file) {
function CaptureStream (line 8393) | void CaptureStream(int fd, const char* stream_name,
Copy disabled (too large)
Download .json
Condensed preview — 704 files, each showing path, character count, and a content snippet. Download the .json file for the full structured content (12,745K chars).
[
{
"path": "README.md",
"chars": 3159,
"preview": "Feature Pyramid Network on caffe\n\nThis is the unoffical version Feature Pyramid Network for Feature Pyramid Networks fo"
},
{
"path": "caffe-fpn/.Doxyfile",
"chars": 101863,
"preview": "# Doxyfile 1.8.8\n\n# This file describes the settings to be used by the documentation system\n# doxygen (www.doxygen.org) "
},
{
"path": "caffe-fpn/.travis.yml",
"chars": 1757,
"preview": "# Use a build matrix to do two builds in parallel:\n# one using CMake, and one using make.\nenv:\n matrix:\n - WITH_CUDA"
},
{
"path": "caffe-fpn/CMakeLists.txt",
"chars": 2915,
"preview": "cmake_minimum_required(VERSION 2.8.7)\nif(POLICY CMP0046)\n cmake_policy(SET CMP0046 NEW)\nendif()\nif(POLICY CMP0054)\n cm"
},
{
"path": "caffe-fpn/CONTRIBUTING.md",
"chars": 1917,
"preview": "# Contributing\n\n## Issues\n\nSpecific Caffe design and development issues, bugs, and feature requests are maintained by Gi"
},
{
"path": "caffe-fpn/CONTRIBUTORS.md",
"chars": 620,
"preview": "# Contributors\n\nCaffe is developed by a core set of BVLC members and the open-source community.\n\nWe thank all of our [co"
},
{
"path": "caffe-fpn/INSTALL.md",
"chars": 210,
"preview": "# Installation\n\nSee http://caffe.berkeleyvision.org/installation.html for the latest\ninstallation instructions.\n\nCheck t"
},
{
"path": "caffe-fpn/LICENSE",
"chars": 2892,
"preview": "--------------------------START OF THIRD PARTY NOTICE--------------------------\n\nMicrosoft licenses this Third Party IP "
},
{
"path": "caffe-fpn/Makefile",
"chars": 23068,
"preview": "PROJECT := caffe\n\nCONFIG_FILE := Makefile.config\n# Explicitly check for the config file, otherwise make -k will proceed "
},
{
"path": "caffe-fpn/Makefile.config",
"chars": 4398,
"preview": "## Refer to http://caffe.berkeleyvision.org/installation.html\n# Contributions simplifying and improving our build system"
},
{
"path": "caffe-fpn/Makefile.config.example",
"chars": 4217,
"preview": "## Refer to http://caffe.berkeleyvision.org/installation.html\n# Contributions simplifying and improving our build system"
},
{
"path": "caffe-fpn/README.md",
"chars": 36,
"preview": "deformable convolution net on caffe\n"
},
{
"path": "caffe-fpn/caffe.cloc",
"chars": 1180,
"preview": "Bourne Shell\n filter remove_matches ^\\s*#\n filter remove_inline #.*$\n extension sh\n script_exe sh\nC\n filt"
},
{
"path": "caffe-fpn/cmake/ConfigGen.cmake",
"chars": 4373,
"preview": "\n################################################################################################\n# Helper function to f"
},
{
"path": "caffe-fpn/cmake/Cuda.cmake",
"chars": 11184,
"preview": "if(CPU_ONLY)\n return()\nendif()\n\n# Known NVIDIA GPU achitectures Caffe can be compiled for.\n# This list will be used for"
},
{
"path": "caffe-fpn/cmake/Dependencies.cmake",
"chars": 5564,
"preview": "# This list is required for static linking and exported to CaffeConfig.cmake\nset(Caffe_LINKER_LIBS \"\")\n\n# ---[ Boost\nfin"
},
{
"path": "caffe-fpn/cmake/External/gflags.cmake",
"chars": 1939,
"preview": "if (NOT __GFLAGS_INCLUDED) # guard against multiple includes\n set(__GFLAGS_INCLUDED TRUE)\n\n # use the system-wide gfla"
},
{
"path": "caffe-fpn/cmake/External/glog.cmake",
"chars": 1719,
"preview": "# glog depends on gflags\ninclude(\"cmake/External/gflags.cmake\")\n\nif (NOT __GLOG_INCLUDED)\n set(__GLOG_INCLUDED TRUE)\n\n "
},
{
"path": "caffe-fpn/cmake/Misc.cmake",
"chars": 1764,
"preview": "# ---[ Configuration types\nset(CMAKE_CONFIGURATION_TYPES \"Debug;Release\" CACHE STRING \"Possible configurations\" FORCE)\nm"
},
{
"path": "caffe-fpn/cmake/Modules/FindAtlas.cmake",
"chars": 1666,
"preview": "# Find the Atlas (and Lapack) libraries\n#\n# The following variables are optionally searched for defaults\n# Atlas_ROOT_D"
},
{
"path": "caffe-fpn/cmake/Modules/FindGFlags.cmake",
"chars": 1545,
"preview": "# - Try to find GFLAGS\n#\n# The following variables are optionally searched for defaults\n# GFLAGS_ROOT_DIR: B"
},
{
"path": "caffe-fpn/cmake/Modules/FindGlog.cmake",
"chars": 1451,
"preview": "# - Try to find Glog\n#\n# The following variables are optionally searched for defaults\n# GLOG_ROOT_DIR: Base "
},
{
"path": "caffe-fpn/cmake/Modules/FindLAPACK.cmake",
"chars": 6723,
"preview": "# - Find LAPACK library\n# This module finds an installed fortran library that implements the LAPACK\n# linear-algebra int"
},
{
"path": "caffe-fpn/cmake/Modules/FindLMDB.cmake",
"chars": 1119,
"preview": "# Try to find the LMBD libraries and headers\n# LMDB_FOUND - system has LMDB lib\n# LMDB_INCLUDE_DIR - the LMDB include "
},
{
"path": "caffe-fpn/cmake/Modules/FindLevelDB.cmake",
"chars": 1728,
"preview": "# - Find LevelDB\n#\n# LevelDB_INCLUDES - List of LevelDB includes\n# LevelDB_LIBRARIES - List of libraries when using L"
},
{
"path": "caffe-fpn/cmake/Modules/FindMKL.cmake",
"chars": 3361,
"preview": "# Find the MKL libraries\r\n#\r\n# Options:\r\n#\r\n# MKL_USE_SINGLE_DYNAMIC_LIBRARY : use single dynamic library interface\r\n"
},
{
"path": "caffe-fpn/cmake/Modules/FindMatlabMex.cmake",
"chars": 1749,
"preview": "# This module looks for MatlabMex compiler\n# Defines variables:\n# Matlab_DIR - Matlab root dir\n# Matlab_mex "
},
{
"path": "caffe-fpn/cmake/Modules/FindNumPy.cmake",
"chars": 2333,
"preview": "# - Find the NumPy libraries\n# This module finds if NumPy is installed, and sets the following variables\n# indicating wh"
},
{
"path": "caffe-fpn/cmake/Modules/FindOpenBLAS.cmake",
"chars": 1593,
"preview": "\n\nSET(Open_BLAS_INCLUDE_SEARCH_PATHS\n /usr/include\n /usr/include/openblas\n /usr/include/openblas-base\n /usr/local/in"
},
{
"path": "caffe-fpn/cmake/Modules/FindSnappy.cmake",
"chars": 1071,
"preview": "# Find the Snappy libraries\n#\n# The following variables are optionally searched for defaults\n# Snappy_ROOT_DIR: Base"
},
{
"path": "caffe-fpn/cmake/Modules/FindvecLib.cmake",
"chars": 1304,
"preview": "# Find the vecLib libraries as part of Accelerate.framework or as standalon framework\n#\n# The following are set after co"
},
{
"path": "caffe-fpn/cmake/ProtoBuf.cmake",
"chars": 3733,
"preview": "# Finds Google Protocol Buffers library and compilers and extends\n# the standard cmake script with version and python ge"
},
{
"path": "caffe-fpn/cmake/Summary.cmake",
"chars": 7597,
"preview": "################################################################################################\n# Caffe status report f"
},
{
"path": "caffe-fpn/cmake/Targets.cmake",
"chars": 7135,
"preview": "################################################################################################\n# Defines global Caffe_"
},
{
"path": "caffe-fpn/cmake/Templates/CaffeConfig.cmake.in",
"chars": 1798,
"preview": "# Config file for the Caffe package.\n#\n# Note:\n# Caffe and this config file depends on opencv,\n# so put `find_packag"
},
{
"path": "caffe-fpn/cmake/Templates/CaffeConfigVersion.cmake.in",
"chars": 377,
"preview": "set(PACKAGE_VERSION \"@Caffe_VERSION@\")\n\n# Check whether the requested PACKAGE_FIND_VERSION is compatible\nif(\"${PACKAGE_V"
},
{
"path": "caffe-fpn/cmake/Templates/caffe_config.h.in",
"chars": 804,
"preview": "/* Sources directory */\n#define SOURCE_FOLDER \"${PROJECT_SOURCE_DIR}\"\n\n/* Binaries directory */\n#define BINARY_FOLDER \"$"
},
{
"path": "caffe-fpn/cmake/Utils.cmake",
"chars": 13393,
"preview": "################################################################################################\n# Command alias for deb"
},
{
"path": "caffe-fpn/cmake/lint.cmake",
"chars": 1505,
"preview": "\nset(CMAKE_SOURCE_DIR ..)\nset(LINT_COMMAND ${CMAKE_SOURCE_DIR}/scripts/cpp_lint.py)\nset(SRC_FILE_EXTENSIONS h hpp hu c c"
},
{
"path": "caffe-fpn/data/cifar10/get_cifar10.sh",
"chars": 504,
"preview": "#!/usr/bin/env sh\n# This scripts downloads the CIFAR10 (binary version) data and unzips it.\n\nDIR=\"$( cd \"$(dirname \"$0\")"
},
{
"path": "caffe-fpn/data/ilsvrc12/get_ilsvrc_aux.sh",
"chars": 585,
"preview": "#!/usr/bin/env sh\n#\n# N.B. This does not download the ilsvrcC12 data set, as it is gargantuan.\n# This script downloads t"
},
{
"path": "caffe-fpn/docs/CMakeLists.txt",
"chars": 4532,
"preview": "# Building docs script\n# Requirements:\n# sudo apt-get install doxygen texlive ruby-dev\n# sudo gem install jekyll exe"
},
{
"path": "caffe-fpn/docs/CNAME",
"chars": 25,
"preview": "caffe.berkeleyvision.org\n"
},
{
"path": "caffe-fpn/docs/README.md",
"chars": 241,
"preview": "# Caffe Documentation\n\nTo generate the documentation, run `$CAFFE_ROOT/scripts/build_docs.sh`.\n\nTo push your changes to "
},
{
"path": "caffe-fpn/docs/_config.yml",
"chars": 131,
"preview": "defaults:\n -\n scope:\n path: \"\" # an empty string here means all files in the project\n values:\n layout: "
},
{
"path": "caffe-fpn/docs/_layouts/default.html",
"chars": 2067,
"preview": "<!doctype html>\n<html>\n <head>\n <!-- MathJax -->\n <script type=\"text/javascript\"\n src=\"http://cdn.mathjax.or"
},
{
"path": "caffe-fpn/docs/development.md",
"chars": 6631,
"preview": "---\ntitle: Developing and Contributing\n---\n# Development and Contributing\n\nCaffe is developed with active participation "
},
{
"path": "caffe-fpn/docs/index.md",
"chars": 6262,
"preview": "---\ntitle: Deep Learning Framework\n---\n\n# Caffe\n\nCaffe is a deep learning framework made with expression, speed, and mod"
},
{
"path": "caffe-fpn/docs/install_apt.md",
"chars": 1793,
"preview": "---\ntitle: Installation: Ubuntu\n---\n\n# Ubuntu Installation\n\n**General dependencies**\n\n sudo apt-get install libprotob"
},
{
"path": "caffe-fpn/docs/install_osx.md",
"chars": 6316,
"preview": "---\ntitle: Installation: OS X\n---\n\n# OS X Installation\n\nWe highly recommend using the [Homebrew](http://brew.sh/) packag"
},
{
"path": "caffe-fpn/docs/install_yum.md",
"chars": 1730,
"preview": "---\ntitle: Installation: RHEL / Fedora / CentOS\n---\n\n# RHEL / Fedora / CentOS Installation\n\n**General dependencies**\n\n "
},
{
"path": "caffe-fpn/docs/installation.md",
"chars": 7818,
"preview": "---\ntitle: Installation\n---\n\n# Installation\n\nPrior to installing, have a glance through this guide and take note of the "
},
{
"path": "caffe-fpn/docs/model_zoo.md",
"chars": 4878,
"preview": "---\ntitle: Model Zoo\n---\n# Caffe Model Zoo\n\nLots of researchers and engineers have made Caffe models for different tasks"
},
{
"path": "caffe-fpn/docs/multigpu.md",
"chars": 2810,
"preview": "---\ntitle: Multi-GPU Usage, Hardware Configuration Assumptions, and Performance\n---\n\n# Multi-GPU Usage\n\nCurrently Multi-"
},
{
"path": "caffe-fpn/docs/performance_hardware.md",
"chars": 2533,
"preview": "---\ntitle: Performance and Hardware Configuration\n---\n\n# Performance and Hardware Configuration\n\nTo measure performance "
},
{
"path": "caffe-fpn/docs/stylesheets/pygment_trac.css",
"chars": 4168,
"preview": ".highlight { background: #ffffff; }\n.highlight .c { color: #999988; font-style: italic } /* Comment */\n.highlight .err "
},
{
"path": "caffe-fpn/docs/stylesheets/reset.css",
"chars": 602,
"preview": "/* MeyerWeb Reset */\n\nhtml, body, div, span, applet, object, iframe,\nh1, h2, h3, h4, h5, h6, p, blockquote, pre,\na, abbr"
},
{
"path": "caffe-fpn/docs/stylesheets/styles.css",
"chars": 4385,
"preview": "@import url(http://fonts.googleapis.com/css?family=PT+Serif|Open+Sans:600,400);\n\nbody {\n padding:10px 50px 0 0;\n font-"
},
{
"path": "caffe-fpn/docs/tutorial/convolution.md",
"chars": 683,
"preview": "---\ntitle: Convolution\n---\n# Caffeinated Convolution\n\nThe Caffe strategy for convolution is to reduce the problem to mat"
},
{
"path": "caffe-fpn/docs/tutorial/data.md",
"chars": 3496,
"preview": "---\ntitle: Data\n---\n# Data: Ins and Outs\n\nData flows through Caffe as [Blobs](net_layer_blob.html#blob-storage-and-commu"
},
{
"path": "caffe-fpn/docs/tutorial/fig/.gitignore",
"chars": 0,
"preview": ""
},
{
"path": "caffe-fpn/docs/tutorial/forward_backward.md",
"chars": 2463,
"preview": "---\ntitle: Forward and Backward for Inference and Learning\n---\n# Forward and Backward\n\nThe forward and backward passes a"
},
{
"path": "caffe-fpn/docs/tutorial/index.md",
"chars": 3219,
"preview": "---\ntitle: Caffe Tutorial\n---\n# Caffe Tutorial\n\nCaffe is a deep learning framework and this tutorial explains its philos"
},
{
"path": "caffe-fpn/docs/tutorial/interfaces.md",
"chars": 14301,
"preview": "---\ntitle: Interfaces\n---\n# Interfaces\n\nCaffe has command line, Python, and MATLAB interfaces for day-to-day usage, inte"
},
{
"path": "caffe-fpn/docs/tutorial/layers.md",
"chars": 19098,
"preview": "---\ntitle: Layer Catalogue\n---\n# Layers\n\nTo create a Caffe model you need to define the model architecture in a protocol"
},
{
"path": "caffe-fpn/docs/tutorial/loss.md",
"chars": 2783,
"preview": "---\ntitle: Loss\n---\n# Loss\n\nIn Caffe, as in most of machine learning, learning is driven by a **loss** function (also kn"
},
{
"path": "caffe-fpn/docs/tutorial/net_layer_blob.md",
"chars": 13260,
"preview": "---\ntitle: Blobs, Layers, and Nets\n---\n# Blobs, Layers, and Nets: anatomy of a Caffe model\n\nDeep networks are compositio"
},
{
"path": "caffe-fpn/docs/tutorial/solver.md",
"chars": 18375,
"preview": "---\ntitle: Solver / Model Optimization\n---\n# Solver\n\nThe solver orchestrates model optimization by coordinating the netw"
},
{
"path": "caffe-fpn/examples/00-classification.ipynb",
"chars": 1120498,
"preview": "{\n \"cells\": [\n {\n \"cell_type\": \"markdown\",\n \"metadata\": {},\n \"source\": [\n \"# Instant Recognition with Caffe\\n\""
},
{
"path": "caffe-fpn/examples/01-learning-lenet.ipynb",
"chars": 390332,
"preview": "{\n \"cells\": [\n {\n \"cell_type\": \"markdown\",\n \"metadata\": {},\n \"source\": [\n \"# Python solving with LeNet\\n\",\n "
},
{
"path": "caffe-fpn/examples/02-brewing-logreg.ipynb",
"chars": 477118,
"preview": "{\n \"cells\": [\n {\n \"cell_type\": \"markdown\",\n \"metadata\": {},\n \"source\": [\n \"# Brewing Logistic Regression then "
},
{
"path": "caffe-fpn/examples/03-fine-tuning.ipynb",
"chars": 60747,
"preview": "{\n \"cells\": [\n {\n \"cell_type\": \"markdown\",\n \"metadata\": {},\n \"source\": [\n \"# Fine-tuning a Pretrained Network "
},
{
"path": "caffe-fpn/examples/CMakeLists.txt",
"chars": 1063,
"preview": "file(GLOB_RECURSE examples_srcs \"${PROJECT_SOURCE_DIR}/examples/*.cpp\")\n\nforeach(source_file ${examples_srcs})\n # get f"
},
{
"path": "caffe-fpn/examples/cifar10/cifar10_full.prototxt",
"chars": 2128,
"preview": "name: \"CIFAR10_full_deploy\"\n# N.B. input image must be in CIFAR-10 format\n# as described at http://www.cs.toronto.edu/~k"
},
{
"path": "caffe-fpn/examples/cifar10/cifar10_full_sigmoid_solver.prototxt",
"chars": 953,
"preview": "# reduce learning rate after 120 epochs (60000 iters) by factor 0f 10\n# then another factor of 10 after 10 more epochs ("
},
{
"path": "caffe-fpn/examples/cifar10/cifar10_full_sigmoid_solver_bn.prototxt",
"chars": 959,
"preview": "# reduce learning rate after 120 epochs (60000 iters) by factor 0f 10\n# then another factor of 10 after 10 more epochs ("
},
{
"path": "caffe-fpn/examples/cifar10/cifar10_full_sigmoid_train_test.prototxt",
"chars": 2879,
"preview": "name: \"CIFAR10_full\"\nlayer {\n name: \"cifar\"\n type: \"Data\"\n top: \"data\"\n top: \"label\"\n include {\n phase: TRAIN\n "
},
{
"path": "caffe-fpn/examples/cifar10/cifar10_full_sigmoid_train_test_bn.prototxt",
"chars": 3192,
"preview": "name: \"CIFAR10_full\"\nlayer {\n name: \"cifar\"\n type: \"Data\"\n top: \"data\"\n top: \"label\"\n include {\n phase: TRAIN\n "
},
{
"path": "caffe-fpn/examples/cifar10/cifar10_full_solver.prototxt",
"chars": 944,
"preview": "# reduce learning rate after 120 epochs (60000 iters) by factor 0f 10\n# then another factor of 10 after 10 more epochs ("
},
{
"path": "caffe-fpn/examples/cifar10/cifar10_full_solver_lr1.prototxt",
"chars": 944,
"preview": "# reduce learning rate after 120 epochs (60000 iters) by factor 0f 10\n# then another factor of 10 after 10 more epochs ("
},
{
"path": "caffe-fpn/examples/cifar10/cifar10_full_solver_lr2.prototxt",
"chars": 945,
"preview": "# reduce learning rate after 120 epochs (60000 iters) by factor 0f 10\n# then another factor of 10 after 10 more epochs ("
},
{
"path": "caffe-fpn/examples/cifar10/cifar10_full_train_test.prototxt",
"chars": 3122,
"preview": "name: \"CIFAR10_full\"\nlayer {\n name: \"cifar\"\n type: \"Data\"\n top: \"data\"\n top: \"label\"\n include {\n phase: TRAIN\n "
},
{
"path": "caffe-fpn/examples/cifar10/cifar10_quick.prototxt",
"chars": 1875,
"preview": "name: \"CIFAR10_quick_test\"\ninput: \"data\"\ninput_shape {\n dim: 1\n dim: 3\n dim: 32\n dim: 32\n}\nlayer {\n name: \"conv1\"\n "
},
{
"path": "caffe-fpn/examples/cifar10/cifar10_quick_solver.prototxt",
"chars": 881,
"preview": "# reduce the learning rate after 8 epochs (4000 iters) by a factor of 10\n\n# The train/test net protocol buffer definitio"
},
{
"path": "caffe-fpn/examples/cifar10/cifar10_quick_solver_lr1.prototxt",
"chars": 882,
"preview": "# reduce the learning rate after 8 epochs (4000 iters) by a factor of 10\n\n# The train/test net protocol buffer definitio"
},
{
"path": "caffe-fpn/examples/cifar10/cifar10_quick_train_test.prototxt",
"chars": 3088,
"preview": "name: \"CIFAR10_quick\"\nlayer {\n name: \"cifar\"\n type: \"Data\"\n top: \"data\"\n top: \"label\"\n include {\n phase: TRAIN\n "
},
{
"path": "caffe-fpn/examples/cifar10/convert_cifar_data.cpp",
"chars": 3647,
"preview": "//\n// This script converts the CIFAR dataset to the leveldb format used\n// by caffe to perform classification.\n// Usage:"
},
{
"path": "caffe-fpn/examples/cifar10/create_cifar10.sh",
"chars": 460,
"preview": "#!/usr/bin/env sh\n# This script converts the cifar data into leveldb format.\n\nEXAMPLE=examples/cifar10\nDATA=data/cifar10"
},
{
"path": "caffe-fpn/examples/cifar10/readme.md",
"chars": 5243,
"preview": "---\ntitle: CIFAR-10 tutorial\ncategory: example\ndescription: Train and test Caffe on CIFAR-10 data.\ninclude_in_docs: true"
},
{
"path": "caffe-fpn/examples/cifar10/train_full.sh",
"chars": 514,
"preview": "#!/usr/bin/env sh\n\nTOOLS=./build/tools\n\n$TOOLS/caffe train \\\n --solver=examples/cifar10/cifar10_full_solver.prototxt\n"
},
{
"path": "caffe-fpn/examples/cifar10/train_full_sigmoid.sh",
"chars": 129,
"preview": "#!/usr/bin/env sh\n\nTOOLS=./build/tools\n\n$TOOLS/caffe train \\\n --solver=examples/cifar10/cifar10_full_sigmoid_solver.p"
},
{
"path": "caffe-fpn/examples/cifar10/train_full_sigmoid_bn.sh",
"chars": 132,
"preview": "#!/usr/bin/env sh\n\nTOOLS=./build/tools\n\n$TOOLS/caffe train \\\n --solver=examples/cifar10/cifar10_full_sigmoid_solver_b"
},
{
"path": "caffe-fpn/examples/cifar10/train_quick.sh",
"chars": 328,
"preview": "#!/usr/bin/env sh\n\nTOOLS=./build/tools\n\n$TOOLS/caffe train \\\n --solver=examples/cifar10/cifar10_quick_solver.prototxt\n\n"
},
{
"path": "caffe-fpn/examples/cpp_classification/classification.cpp",
"chars": 8670,
"preview": "#include <caffe/caffe.hpp>\n#ifdef USE_OPENCV\n#include <opencv2/core/core.hpp>\n#include <opencv2/highgui/highgui.hpp>\n#in"
},
{
"path": "caffe-fpn/examples/cpp_classification/readme.md",
"chars": 2837,
"preview": "---\ntitle: CaffeNet C++ Classification example\ndescription: A simple example performing image classification using the l"
},
{
"path": "caffe-fpn/examples/detection.ipynb",
"chars": 702457,
"preview": "{\n \"cells\": [\n {\n \"cell_type\": \"markdown\",\n \"metadata\": {},\n \"source\": [\n \"[R-CNN](https://github.com/rbgirshi"
},
{
"path": "caffe-fpn/examples/feature_extraction/imagenet_val.prototxt",
"chars": 3279,
"preview": "name: \"CaffeNet\"\nlayer {\n name: \"data\"\n type: \"ImageData\"\n top: \"data\"\n top: \"label\"\n transform_param {\n mirror:"
},
{
"path": "caffe-fpn/examples/feature_extraction/readme.md",
"chars": 3087,
"preview": "---\ntitle: Feature extraction with Caffe C++ code.\ndescription: Extract CaffeNet / AlexNet features using the Caffe util"
},
{
"path": "caffe-fpn/examples/finetune_flickr_style/assemble_data.py",
"chars": 3636,
"preview": "#!/usr/bin/env python\n\"\"\"\nForm a subset of the Flickr Style data, download images to dirname, and write\nCaffe ImagesData"
},
{
"path": "caffe-fpn/examples/finetune_flickr_style/readme.md",
"chars": 10334,
"preview": "---\ntitle: Fine-tuning for style recognition\ndescription: Fine-tune the ImageNet-trained CaffeNet on the \"Flickr Style\" "
},
{
"path": "caffe-fpn/examples/finetune_flickr_style/style_names.txt",
"chars": 173,
"preview": "Detailed\nPastel\nMelancholy\nNoir\nHDR\nVintage\nLong Exposure\nHorror\nSunny\nBright\nHazy\nBokeh\nSerene\nTexture\nEthereal\nMacro\nD"
},
{
"path": "caffe-fpn/examples/finetune_pascal_detection/pascal_finetune_solver.prototxt",
"chars": 330,
"preview": "net: \"examples/finetune_pascal_detection/pascal_finetune_trainval_test.prototxt\"\ntest_iter: 100\ntest_interval: 1000\nbase"
},
{
"path": "caffe-fpn/examples/finetune_pascal_detection/pascal_finetune_trainval_test.prototxt",
"chars": 5614,
"preview": "name: \"CaffeNet\"\nlayer {\n name: \"data\"\n type: \"WindowData\"\n top: \"data\"\n top: \"label\"\n include {\n phase: TRAIN\n "
},
{
"path": "caffe-fpn/examples/hdf5_classification/nonlinear_auto_test.prototxt",
"chars": 782,
"preview": "layer {\n name: \"data\"\n type: \"HDF5Data\"\n top: \"data\"\n top: \"label\"\n hdf5_data_param {\n source: \"examples/hdf5_cl"
},
{
"path": "caffe-fpn/examples/hdf5_classification/nonlinear_auto_train.prototxt",
"chars": 783,
"preview": "layer {\n name: \"data\"\n type: \"HDF5Data\"\n top: \"data\"\n top: \"label\"\n hdf5_data_param {\n source: \"examples/hdf5_cl"
},
{
"path": "caffe-fpn/examples/hdf5_classification/nonlinear_solver.prototxt",
"chars": 392,
"preview": "train_net: \"examples/hdf5_classification/nonlinear_auto_train.prototxt\"\ntest_net: \"examples/hdf5_classification/nonlinea"
},
{
"path": "caffe-fpn/examples/hdf5_classification/nonlinear_train_val.prototxt",
"chars": 1395,
"preview": "name: \"LogisticRegressionNet\"\nlayer {\n name: \"data\"\n type: \"HDF5Data\"\n top: \"data\"\n top: \"label\"\n include {\n pha"
},
{
"path": "caffe-fpn/examples/hdf5_classification/solver.prototxt",
"chars": 386,
"preview": "train_net: \"examples/hdf5_classification/logreg_auto_train.prototxt\"\ntest_net: \"examples/hdf5_classification/logreg_auto"
},
{
"path": "caffe-fpn/examples/hdf5_classification/train_val.prototxt",
"chars": 999,
"preview": "name: \"LogisticRegressionNet\"\nlayer {\n name: \"data\"\n type: \"HDF5Data\"\n top: \"data\"\n top: \"label\"\n include {\n pha"
},
{
"path": "caffe-fpn/examples/imagenet/create_imagenet.sh",
"chars": 1496,
"preview": "#!/usr/bin/env sh\n# Create the imagenet lmdb inputs\n# N.B. set the path to the imagenet train + val data dirs\n\nEXAMPLE=e"
},
{
"path": "caffe-fpn/examples/imagenet/make_imagenet_mean.sh",
"chars": 287,
"preview": "#!/usr/bin/env sh\n# Compute the mean image from the imagenet training lmdb\n# N.B. this is available in data/ilsvrc12\n\nEX"
},
{
"path": "caffe-fpn/examples/imagenet/readme.md",
"chars": 7634,
"preview": "---\ntitle: ImageNet tutorial\ndescription: Train and test \"CaffeNet\" on ImageNet data.\ncategory: example\ninclude_in_docs:"
},
{
"path": "caffe-fpn/examples/imagenet/resume_training.sh",
"chars": 191,
"preview": "#!/usr/bin/env sh\n\n./build/tools/caffe train \\\n --solver=models/bvlc_reference_caffenet/solver.prototxt \\\n --snaps"
},
{
"path": "caffe-fpn/examples/imagenet/train_caffenet.sh",
"chars": 107,
"preview": "#!/usr/bin/env sh\n\n./build/tools/caffe train \\\n --solver=models/bvlc_reference_caffenet/solver.prototxt\n"
},
{
"path": "caffe-fpn/examples/mnist/convert_mnist_data.cpp",
"chars": 4520,
"preview": "// This script converts the MNIST dataset to a lmdb (default) or\n// leveldb (--backend=leveldb) format used by caffe to "
},
{
"path": "caffe-fpn/examples/mnist/create_mnist.sh",
"chars": 634,
"preview": "#!/usr/bin/env sh\n# This script converts the mnist data into lmdb/leveldb format,\n# depending on the value assigned to $"
},
{
"path": "caffe-fpn/examples/mnist/lenet.prototxt",
"chars": 1738,
"preview": "name: \"LeNet\"\nlayer {\n name: \"data\"\n type: \"Input\"\n top: \"data\"\n input_param { shape: { dim: 64 dim: 1 dim: 28 dim: "
},
{
"path": "caffe-fpn/examples/mnist/lenet_adadelta_solver.prototxt",
"chars": 777,
"preview": "# The train/test net protocol buffer definition\nnet: \"examples/mnist/lenet_train_test.prototxt\"\n# test_iter specifies ho"
},
{
"path": "caffe-fpn/examples/mnist/lenet_auto_solver.prototxt",
"chars": 778,
"preview": "# The train/test net protocol buffer definition\ntrain_net: \"mnist/lenet_auto_train.prototxt\"\ntest_net: \"mnist/lenet_auto"
},
{
"path": "caffe-fpn/examples/mnist/lenet_consolidated_solver.prototxt",
"chars": 6003,
"preview": "# lenet_consolidated_solver.prototxt consolidates the lenet_solver, lenet_train,\n# and lenet_test prototxts into a singl"
},
{
"path": "caffe-fpn/examples/mnist/lenet_multistep_solver.prototxt",
"chars": 871,
"preview": "# The train/test net protocol buffer definition\nnet: \"examples/mnist/lenet_train_test.prototxt\"\n# test_iter specifies ho"
},
{
"path": "caffe-fpn/examples/mnist/lenet_solver.prototxt",
"chars": 790,
"preview": "# The train/test net protocol buffer definition\nnet: \"examples/mnist/lenet_train_test.prototxt\"\n# test_iter specifies ho"
},
{
"path": "caffe-fpn/examples/mnist/lenet_solver_adam.prototxt",
"chars": 886,
"preview": "# The train/test net protocol buffer definition\n# this follows \"ADAM: A METHOD FOR STOCHASTIC OPTIMIZATION\"\nnet: \"exampl"
},
{
"path": "caffe-fpn/examples/mnist/lenet_solver_rmsprop.prototxt",
"chars": 830,
"preview": "# The train/test net protocol buffer definition\nnet: \"examples/mnist/lenet_train_test.prototxt\"\n# test_iter specifies ho"
},
{
"path": "caffe-fpn/examples/mnist/lenet_train_test.prototxt",
"chars": 3019,
"preview": "name: \"LeNet\"\nlayer {\n name: \"mnist\"\n type: \"Data\"\n top: \"data\"\n top: \"label\"\n include {\n phase: TRAIN\n }\n tra"
},
{
"path": "caffe-fpn/examples/mnist/mnist_autoencoder.prototxt",
"chars": 4814,
"preview": "name: \"MNISTAutoencoder\"\nlayer {\n name: \"data\"\n type: \"Data\"\n top: \"data\"\n include {\n phase: TRAIN\n }\n transfor"
},
{
"path": "caffe-fpn/examples/mnist/mnist_autoencoder_solver.prototxt",
"chars": 433,
"preview": "net: \"examples/mnist/mnist_autoencoder.prototxt\"\ntest_state: { stage: 'test-on-train' }\ntest_iter: 500\ntest_state: { sta"
},
{
"path": "caffe-fpn/examples/mnist/mnist_autoencoder_solver_adadelta.prototxt",
"chars": 451,
"preview": "net: \"examples/mnist/mnist_autoencoder.prototxt\"\ntest_state: { stage: 'test-on-train' }\ntest_iter: 500\ntest_state: { sta"
},
{
"path": "caffe-fpn/examples/mnist/mnist_autoencoder_solver_adagrad.prototxt",
"chars": 423,
"preview": "net: \"examples/mnist/mnist_autoencoder.prototxt\"\ntest_state: { stage: 'test-on-train' }\ntest_iter: 500\ntest_state: { sta"
},
{
"path": "caffe-fpn/examples/mnist/mnist_autoencoder_solver_nesterov.prototxt",
"chars": 466,
"preview": "net: \"examples/mnist/mnist_autoencoder.prototxt\"\ntest_state: { stage: 'test-on-train' }\ntest_iter: 500\ntest_state: { sta"
},
{
"path": "caffe-fpn/examples/mnist/readme.md",
"chars": 11948,
"preview": "---\ntitle: LeNet MNIST Tutorial\ndescription: Train and test \"LeNet\" on the MNIST handwritten digit data.\ncategory: examp"
},
{
"path": "caffe-fpn/examples/mnist/train_lenet.sh",
"chars": 101,
"preview": "#!/usr/bin/env sh\nset -e\n\n./build/tools/caffe train --solver=examples/mnist/lenet_solver.prototxt $@\n"
},
{
"path": "caffe-fpn/examples/mnist/train_lenet_adam.sh",
"chars": 106,
"preview": "#!/usr/bin/env sh\nset -e\n\n./build/tools/caffe train --solver=examples/mnist/lenet_solver_adam.prototxt $@\n"
},
{
"path": "caffe-fpn/examples/mnist/train_lenet_consolidated.sh",
"chars": 118,
"preview": "#!/usr/bin/env sh\nset -e\n\n./build/tools/caffe train \\\n --solver=examples/mnist/lenet_consolidated_solver.prototxt $@\n"
},
{
"path": "caffe-fpn/examples/mnist/train_lenet_docker.sh",
"chars": 4517,
"preview": "#!/usr/bin/env sh\nset -e\n# The following example allows for the MNIST example (using LeNet) to be\n# trained using the ca"
},
{
"path": "caffe-fpn/examples/mnist/train_lenet_rmsprop.sh",
"chars": 115,
"preview": "#!/usr/bin/env sh\nset -e\n\n./build/tools/caffe train \\\n --solver=examples/mnist/lenet_solver_rmsprop.prototxt $@\n"
},
{
"path": "caffe-fpn/examples/mnist/train_mnist_autoencoder.sh",
"chars": 117,
"preview": "#!/usr/bin/env sh\nset -e\n\n./build/tools/caffe train \\\n --solver=examples/mnist/mnist_autoencoder_solver.prototxt $@\n"
},
{
"path": "caffe-fpn/examples/mnist/train_mnist_autoencoder_adadelta.sh",
"chars": 120,
"preview": "#!/bin/bash\nset -e\n\n./build/tools/caffe train \\\n --solver=examples/mnist/mnist_autoencoder_solver_adadelta.prototxt $@\n"
},
{
"path": "caffe-fpn/examples/mnist/train_mnist_autoencoder_adagrad.sh",
"chars": 119,
"preview": "#!/bin/bash\nset -e\n\n./build/tools/caffe train \\\n --solver=examples/mnist/mnist_autoencoder_solver_adagrad.prototxt $@\n"
},
{
"path": "caffe-fpn/examples/mnist/train_mnist_autoencoder_nesterov.sh",
"chars": 120,
"preview": "#!/bin/bash\nset -e\n\n./build/tools/caffe train \\\n --solver=examples/mnist/mnist_autoencoder_solver_nesterov.prototxt $@\n"
},
{
"path": "caffe-fpn/examples/net_surgery/bvlc_caffenet_full_conv.prototxt",
"chars": 3019,
"preview": "# Fully convolutional network version of CaffeNet.\nname: \"CaffeNetConv\"\ninput: \"data\"\ninput_shape {\n dim: 1\n dim: 3\n "
},
{
"path": "caffe-fpn/examples/net_surgery/conv.prototxt",
"chars": 440,
"preview": "# Simple single-layer network to showcase editing model parameters.\nname: \"convolution\"\ninput: \"data\"\ninput_shape {\n di"
},
{
"path": "caffe-fpn/examples/net_surgery.ipynb",
"chars": 583379,
"preview": "{\n \"cells\": [\n {\n \"cell_type\": \"markdown\",\n \"metadata\": {},\n \"source\": [\n \"# Net Surgery\\n\",\n \"\\n\",\n \"Ca"
},
{
"path": "caffe-fpn/examples/pycaffe/caffenet.py",
"chars": 2112,
"preview": "from __future__ import print_function\nfrom caffe import layers as L, params as P, to_proto\nfrom caffe.proto import caffe"
},
{
"path": "caffe-fpn/examples/pycaffe/layers/pyloss.py",
"chars": 1223,
"preview": "import caffe\nimport numpy as np\n\n\nclass EuclideanLossLayer(caffe.Layer):\n \"\"\"\n Compute the Euclidean Loss in the s"
},
{
"path": "caffe-fpn/examples/pycaffe/linreg.prototxt",
"chars": 1302,
"preview": "name: 'LinearRegressionExample'\n# define a simple network for linear regression on dummy data\n# that computes the loss b"
},
{
"path": "caffe-fpn/examples/siamese/convert_mnist_siamese_data.cpp",
"chars": 4368,
"preview": "//\n// This script converts the MNIST dataset to the leveldb format used\n// by caffe to train siamese network.\n// Usage:\n"
},
{
"path": "caffe-fpn/examples/siamese/create_mnist_siamese.sh",
"chars": 610,
"preview": "#!/usr/bin/env sh\n# This script converts the mnist data into leveldb format.\n\nEXAMPLES=./build/examples/siamese\nDATA=./d"
},
{
"path": "caffe-fpn/examples/siamese/mnist_siamese.ipynb",
"chars": 158921,
"preview": "{\n \"cells\": [\n {\n \"cell_type\": \"markdown\",\n \"metadata\": {},\n \"source\": [\n \"### Setup\\n\",\n \"\\n\",\n \"Import"
},
{
"path": "caffe-fpn/examples/siamese/mnist_siamese.prototxt",
"chars": 1436,
"preview": "name: \"mnist_siamese\"\ninput: \"data\"\ninput_shape {\n dim: 10000\n dim: 1\n dim: 28\n dim: 28\n}\nlayer {\n name: \"conv1\"\n "
},
{
"path": "caffe-fpn/examples/siamese/mnist_siamese_solver.prototxt",
"chars": 810,
"preview": "# The train/test net protocol buffer definition\nnet: \"examples/siamese/mnist_siamese_train_test.prototxt\"\n# test_iter sp"
},
{
"path": "caffe-fpn/examples/siamese/mnist_siamese_train_test.prototxt",
"chars": 4907,
"preview": "name: \"mnist_siamese_train_test\"\nlayer {\n name: \"pair_data\"\n type: \"Data\"\n top: \"pair_data\"\n top: \"sim\"\n include {\n"
},
{
"path": "caffe-fpn/examples/siamese/readme.md",
"chars": 5949,
"preview": "---\ntitle: Siamese Network Tutorial\ndescription: Train and test a siamese network on MNIST data.\ncategory: example\ninclu"
},
{
"path": "caffe-fpn/examples/siamese/train_mnist_siamese.sh",
"chars": 115,
"preview": "#!/usr/bin/env sh\n\nTOOLS=./build/tools\n\n$TOOLS/caffe train --solver=examples/siamese/mnist_siamese_solver.prototxt\n"
},
{
"path": "caffe-fpn/examples/web_demo/app.py",
"chars": 7793,
"preview": "import os\nimport time\nimport cPickle\nimport datetime\nimport logging\nimport flask\nimport werkzeug\nimport optparse\nimport "
},
{
"path": "caffe-fpn/examples/web_demo/exifutil.py",
"chars": 1046,
"preview": "\"\"\"\nThis script handles the skimage exif problem.\n\"\"\"\n\nfrom PIL import Image\nimport numpy as np\n\nORIENTATIONS = { # us"
},
{
"path": "caffe-fpn/examples/web_demo/readme.md",
"chars": 1876,
"preview": "---\ntitle: Web demo\ndescription: Image classification demo running as a Flask web server.\ncategory: example\ninclude_in_d"
},
{
"path": "caffe-fpn/examples/web_demo/requirements.txt",
"chars": 50,
"preview": "werkzeug\nflask\ntornado\nnumpy\npandas\npillow\npyyaml\n"
},
{
"path": "caffe-fpn/examples/web_demo/templates/index.html",
"chars": 4981,
"preview": "<!DOCTYPE html>\n<html lang=\"en\">\n <head>\n <meta charset=\"utf-8\">\n <meta name=\"viewport\" content=\"width=device-wid"
},
{
"path": "caffe-fpn/include/caffe/blob.hpp",
"chars": 9378,
"preview": "#ifndef CAFFE_BLOB_HPP_\n#define CAFFE_BLOB_HPP_\n\n#include <algorithm>\n#include <string>\n#include <vector>\n\n#include \"caf"
},
{
"path": "caffe-fpn/include/caffe/caffe.hpp",
"chars": 634,
"preview": "// caffe.hpp is the header file that you need to include in your code. It wraps\n// all the internal caffe header files i"
},
{
"path": "caffe-fpn/include/caffe/common.hpp",
"chars": 5917,
"preview": "#ifndef CAFFE_COMMON_HPP_\n#define CAFFE_COMMON_HPP_\n\n#include <boost/shared_ptr.hpp>\n#include <gflags/gflags.h>\n#include"
},
{
"path": "caffe-fpn/include/caffe/data_reader.hpp",
"chars": 2167,
"preview": "#ifndef CAFFE_DATA_READER_HPP_\n#define CAFFE_DATA_READER_HPP_\n\n#include <map>\n#include <string>\n#include <vector>\n\n#incl"
},
{
"path": "caffe-fpn/include/caffe/data_transformer.hpp",
"chars": 4904,
"preview": "#ifndef CAFFE_DATA_TRANSFORMER_HPP\n#define CAFFE_DATA_TRANSFORMER_HPP\n\n#include <vector>\n\n#include \"caffe/blob.hpp\"\n#inc"
},
{
"path": "caffe-fpn/include/caffe/fast_rcnn_layers.hpp",
"chars": 3486,
"preview": "// ------------------------------------------------------------------\n// Fast R-CNN\n// Copyright (c) 2015 Microsoft\n// L"
},
{
"path": "caffe-fpn/include/caffe/filler.hpp",
"chars": 10937,
"preview": "// Fillers are random number generators that fills a blob using the specified\n// algorithm. The expectation is that they"
},
{
"path": "caffe-fpn/include/caffe/internal_thread.hpp",
"chars": 1382,
"preview": "#ifndef CAFFE_INTERNAL_THREAD_HPP_\n#define CAFFE_INTERNAL_THREAD_HPP_\n\n#include \"caffe/common.hpp\"\n\n/**\n Forward declare"
},
{
"path": "caffe-fpn/include/caffe/layer.hpp",
"chars": 18583,
"preview": "#ifndef CAFFE_LAYER_H_\n#define CAFFE_LAYER_H_\n\n#include <algorithm>\n#include <string>\n#include <vector>\n#include <iostre"
},
{
"path": "caffe-fpn/include/caffe/layer_factory.hpp",
"chars": 4517,
"preview": "/**\n * @brief A layer factory that allows one to register layers.\n * During runtime, registered layers could be called b"
},
{
"path": "caffe-fpn/include/caffe/layers/absval_layer.hpp",
"chars": 2343,
"preview": "#ifndef CAFFE_ABSVAL_LAYER_HPP_\n#define CAFFE_ABSVAL_LAYER_HPP_\n\n#include <vector>\n\n#include \"caffe/blob.hpp\"\n#include \""
},
{
"path": "caffe-fpn/include/caffe/layers/accuracy_layer.hpp",
"chars": 3351,
"preview": "#ifndef CAFFE_ACCURACY_LAYER_HPP_\n#define CAFFE_ACCURACY_LAYER_HPP_\n\n#include <vector>\n\n#include \"caffe/blob.hpp\"\n#inclu"
},
{
"path": "caffe-fpn/include/caffe/layers/argmax_layer.hpp",
"chars": 2727,
"preview": "#ifndef CAFFE_ARGMAX_LAYER_HPP_\n#define CAFFE_ARGMAX_LAYER_HPP_\n\n#include <vector>\n\n#include \"caffe/blob.hpp\"\n#include \""
},
{
"path": "caffe-fpn/include/caffe/layers/base_conv_layer.hpp",
"chars": 6692,
"preview": "#ifndef CAFFE_BASE_CONVOLUTION_LAYER_HPP_\n#define CAFFE_BASE_CONVOLUTION_LAYER_HPP_\n\n#include <vector>\n\n#include \"caffe/"
},
{
"path": "caffe-fpn/include/caffe/layers/base_data_layer.hpp",
"chars": 2948,
"preview": "#ifndef CAFFE_DATA_LAYERS_HPP_\n#define CAFFE_DATA_LAYERS_HPP_\n\n#include <vector>\n\n#include \"caffe/blob.hpp\"\n#include \"ca"
},
{
"path": "caffe-fpn/include/caffe/layers/batch_norm_layer.hpp",
"chars": 3228,
"preview": "#ifndef CAFFE_BATCHNORM_LAYER_HPP_\n#define CAFFE_BATCHNORM_LAYER_HPP_\n\n#include <vector>\n\n#include \"caffe/blob.hpp\"\n#inc"
},
{
"path": "caffe-fpn/include/caffe/layers/batch_reindex_layer.hpp",
"chars": 2825,
"preview": "#ifndef CAFFE_BATCHREINDEX_LAYER_HPP_\n#define CAFFE_BATCHREINDEX_LAYER_HPP_\n\n#include <utility>\n#include <vector>\n\n#incl"
},
{
"path": "caffe-fpn/include/caffe/layers/bias_layer.hpp",
"chars": 1756,
"preview": "#ifndef CAFFE_BIAS_LAYER_HPP_\n#define CAFFE_BIAS_LAYER_HPP_\n\n#include <vector>\n\n#include \"caffe/blob.hpp\"\n#include \"caff"
},
{
"path": "caffe-fpn/include/caffe/layers/bnll_layer.hpp",
"chars": 2307,
"preview": "#ifndef CAFFE_BNLL_LAYER_HPP_\n#define CAFFE_BNLL_LAYER_HPP_\n\n#include <vector>\n\n#include \"caffe/blob.hpp\"\n#include \"caff"
},
{
"path": "caffe-fpn/include/caffe/layers/conadd_layer.hpp",
"chars": 3048,
"preview": "#ifndef CAFFE_CONADD_LAYER_HPP_\n#define CAFFE_CONADD_LAYER_HPP_\n\n#include <vector>\n#include \"caffe/blob.hpp\"\n#include \"c"
},
{
"path": "caffe-fpn/include/caffe/layers/concat_layer.hpp",
"chars": 3130,
"preview": "#ifndef CAFFE_CONCAT_LAYER_HPP_\n#define CAFFE_CONCAT_LAYER_HPP_\n\n#include <vector>\n\n#include \"caffe/blob.hpp\"\n#include \""
},
{
"path": "caffe-fpn/include/caffe/layers/contrastive_loss_layer.hpp",
"chars": 4076,
"preview": "#ifndef CAFFE_CONTRASTIVE_LOSS_LAYER_HPP_\n#define CAFFE_CONTRASTIVE_LOSS_LAYER_HPP_\n\n#include <vector>\n\n#include \"caffe/"
},
{
"path": "caffe-fpn/include/caffe/layers/conv_layer.hpp",
"chars": 3938,
"preview": "#ifndef CAFFE_CONV_LAYER_HPP_\n#define CAFFE_CONV_LAYER_HPP_\n\n#include <vector>\n\n#include \"caffe/blob.hpp\"\n#include \"caff"
},
{
"path": "caffe-fpn/include/caffe/layers/crop_layer.hpp",
"chars": 2845,
"preview": "#ifndef CAFFE_CROP_LAYER_HPP_\n#define CAFFE_CROP_LAYER_HPP_\n\n#include <utility>\n#include <vector>\n\n#include \"caffe/blob."
},
{
"path": "caffe-fpn/include/caffe/layers/cudnn_conv_layer.hpp",
"chars": 2532,
"preview": "#ifndef CAFFE_CUDNN_CONV_LAYER_HPP_\n#define CAFFE_CUDNN_CONV_LAYER_HPP_\n\n#include <vector>\n\n#include \"caffe/blob.hpp\"\n#i"
},
{
"path": "caffe-fpn/include/caffe/layers/cudnn_lcn_layer.hpp",
"chars": 1355,
"preview": "#ifndef CAFFE_CUDNN_LCN_LAYER_HPP_\n#define CAFFE_CUDNN_LCN_LAYER_HPP_\n\n#include <vector>\n\n#include \"caffe/blob.hpp\"\n#inc"
},
{
"path": "caffe-fpn/include/caffe/layers/cudnn_lrn_layer.hpp",
"chars": 1191,
"preview": "#ifndef CAFFE_CUDNN_LRN_LAYER_HPP_\n#define CAFFE_CUDNN_LRN_LAYER_HPP_\n\n#include <vector>\n\n#include \"caffe/blob.hpp\"\n#inc"
},
{
"path": "caffe-fpn/include/caffe/layers/cudnn_pooling_layer.hpp",
"chars": 1507,
"preview": "#ifndef CAFFE_CUDNN_POOLING_LAYER_HPP_\n#define CAFFE_CUDNN_POOLING_LAYER_HPP_\n\n#include <vector>\n\n#include \"caffe/blob.h"
},
{
"path": "caffe-fpn/include/caffe/layers/cudnn_relu_layer.hpp",
"chars": 1285,
"preview": "#ifndef CAFFE_CUDNN_RELU_LAYER_HPP_\n#define CAFFE_CUDNN_RELU_LAYER_HPP_\n\n#include <vector>\n\n#include \"caffe/blob.hpp\"\n#i"
},
{
"path": "caffe-fpn/include/caffe/layers/cudnn_sigmoid_layer.hpp",
"chars": 1315,
"preview": "#ifndef CAFFE_CUDNN_SIGMOID_LAYER_HPP_\n#define CAFFE_CUDNN_SIGMOID_LAYER_HPP_\n\n#include <vector>\n\n#include \"caffe/blob.h"
},
{
"path": "caffe-fpn/include/caffe/layers/cudnn_softmax_layer.hpp",
"chars": 1281,
"preview": "#ifndef CAFFE_CUDNN_SOFTMAX_LAYER_HPP_\n#define CAFFE_CUDNN_SOFTMAX_LAYER_HPP_\n\n#include <vector>\n\n#include \"caffe/blob.h"
},
{
"path": "caffe-fpn/include/caffe/layers/cudnn_tanh_layer.hpp",
"chars": 1285,
"preview": "#ifndef CAFFE_CUDNN_TANH_LAYER_HPP_\n#define CAFFE_CUDNN_TANH_LAYER_HPP_\n\n#include <vector>\n\n#include \"caffe/blob.hpp\"\n#i"
},
{
"path": "caffe-fpn/include/caffe/layers/data_layer.hpp",
"chars": 1152,
"preview": "#ifndef CAFFE_DATA_LAYER_HPP_\n#define CAFFE_DATA_LAYER_HPP_\n\n#include <vector>\n\n#include \"caffe/blob.hpp\"\n#include \"caff"
},
{
"path": "caffe-fpn/include/caffe/layers/deconv_layer.hpp",
"chars": 1977,
"preview": "#ifndef CAFFE_DECONV_LAYER_HPP_\n#define CAFFE_DECONV_LAYER_HPP_\n\n#include <vector>\n\n#include \"caffe/blob.hpp\"\n#include \""
},
{
"path": "caffe-fpn/include/caffe/layers/deformable_conv_layer.hpp",
"chars": 4887,
"preview": "#ifndef CAFFE_DEFORMABEL_CONV_LAYER_HPP_\r\n#define CAFFE_DEFORMABLE_CONV_LAYER_HPP_\r\n\r\n#include <vector>\r\n\r\n#include \"caf"
},
{
"path": "caffe-fpn/include/caffe/layers/dropout_layer.hpp",
"chars": 2899,
"preview": "#ifndef CAFFE_DROPOUT_LAYER_HPP_\n#define CAFFE_DROPOUT_LAYER_HPP_\n\n#include <vector>\n\n#include \"caffe/blob.hpp\"\n#include"
},
{
"path": "caffe-fpn/include/caffe/layers/dummy_data_layer.hpp",
"chars": 1644,
"preview": "#ifndef CAFFE_DUMMY_DATA_LAYER_HPP_\n#define CAFFE_DUMMY_DATA_LAYER_HPP_\n\n#include <vector>\n\n#include \"caffe/blob.hpp\"\n#i"
},
{
"path": "caffe-fpn/include/caffe/layers/eltwise_layer.hpp",
"chars": 1604,
"preview": "#ifndef CAFFE_ELTWISE_LAYER_HPP_\n#define CAFFE_ELTWISE_LAYER_HPP_\n\n#include <vector>\n\n#include \"caffe/blob.hpp\"\n#include"
},
{
"path": "caffe-fpn/include/caffe/layers/elu_layer.hpp",
"chars": 2860,
"preview": "#ifndef CAFFE_ELU_LAYER_HPP_\n#define CAFFE_ELU_LAYER_HPP_\n\n#include <vector>\n\n#include \"caffe/blob.hpp\"\n#include \"caffe/"
},
{
"path": "caffe-fpn/include/caffe/layers/embed_layer.hpp",
"chars": 1691,
"preview": "#ifndef CAFFE_EMBED_LAYER_HPP_\n#define CAFFE_EMBED_LAYER_HPP_\n\n#include <vector>\n\n#include \"caffe/blob.hpp\"\n#include \"ca"
},
{
"path": "caffe-fpn/include/caffe/layers/euclidean_loss_layer.hpp",
"chars": 4570,
"preview": "#ifndef CAFFE_EUCLIDEAN_LOSS_LAYER_HPP_\n#define CAFFE_EUCLIDEAN_LOSS_LAYER_HPP_\n\n#include <vector>\n\n#include \"caffe/blob"
}
]
// ... and 504 more files (download for full content)
About this extraction
This page contains the full source code of the unsky/FPN GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 704 files (11.7 MB), approximately 3.1M tokens, and a symbol index with 3402 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.