Copy disabled (too large)
Download .txt
Showing preview only (10,841K chars total). Download the full file to get everything.
Repository: tonghe90/textspotter
Branch: master
Commit: aa36bbc11229
Files: 721
Total size: 10.2 MB
Directory structure:
gitextract_8dm_v1kj/
├── .idea/
│ ├── misc.xml
│ ├── modules.xml
│ ├── textspotter.iml
│ ├── vcs.xml
│ └── workspace.xml
├── README.md
├── caffe/
│ ├── .Doxyfile
│ ├── .github/
│ │ └── ISSUE_TEMPLATE.md
│ ├── .gitignore
│ ├── .travis.yml
│ ├── CMakeLists.txt
│ ├── CONTRIBUTING.md
│ ├── CONTRIBUTORS.md
│ ├── INSTALL.md
│ ├── LICENSE
│ ├── Makefile
│ ├── Makefile.config.example
│ ├── README.md
│ ├── caffe.cloc
│ ├── cmake/
│ │ ├── ConfigGen.cmake
│ │ ├── Cuda.cmake
│ │ ├── Dependencies.cmake
│ │ ├── External/
│ │ │ ├── gflags.cmake
│ │ │ └── glog.cmake
│ │ ├── Misc.cmake
│ │ ├── Modules/
│ │ │ ├── FindAtlas.cmake
│ │ │ ├── FindGFlags.cmake
│ │ │ ├── FindGlog.cmake
│ │ │ ├── FindLAPACK.cmake
│ │ │ ├── FindLMDB.cmake
│ │ │ ├── FindLevelDB.cmake
│ │ │ ├── FindMKL.cmake
│ │ │ ├── FindMatlabMex.cmake
│ │ │ ├── FindNCCL.cmake
│ │ │ ├── FindNumPy.cmake
│ │ │ ├── FindOpenBLAS.cmake
│ │ │ ├── FindSnappy.cmake
│ │ │ └── FindvecLib.cmake
│ │ ├── ProtoBuf.cmake
│ │ ├── Summary.cmake
│ │ ├── Targets.cmake
│ │ ├── Templates/
│ │ │ ├── CaffeConfig.cmake.in
│ │ │ ├── CaffeConfigVersion.cmake.in
│ │ │ └── caffe_config.h.in
│ │ ├── Uninstall.cmake.in
│ │ ├── Utils.cmake
│ │ └── lint.cmake
│ ├── docker/
│ │ ├── README.md
│ │ ├── cpu/
│ │ │ └── Dockerfile
│ │ └── gpu/
│ │ └── Dockerfile
│ ├── docs/
│ │ ├── CMakeLists.txt
│ │ ├── CNAME
│ │ ├── README.md
│ │ ├── _config.yml
│ │ ├── _layouts/
│ │ │ └── default.html
│ │ ├── development.md
│ │ ├── index.md
│ │ ├── install_apt.md
│ │ ├── install_apt_debian.md
│ │ ├── install_osx.md
│ │ ├── install_yum.md
│ │ ├── installation.md
│ │ ├── model_zoo.md
│ │ ├── multigpu.md
│ │ ├── stylesheets/
│ │ │ ├── pygment_trac.css
│ │ │ ├── reset.css
│ │ │ └── styles.css
│ │ └── tutorial/
│ │ ├── convolution.md
│ │ ├── data.md
│ │ ├── fig/
│ │ │ └── .gitignore
│ │ ├── forward_backward.md
│ │ ├── index.md
│ │ ├── interfaces.md
│ │ ├── layers/
│ │ │ ├── absval.md
│ │ │ ├── accuracy.md
│ │ │ ├── argmax.md
│ │ │ ├── batchnorm.md
│ │ │ ├── batchreindex.md
│ │ │ ├── bias.md
│ │ │ ├── bnll.md
│ │ │ ├── concat.md
│ │ │ ├── contrastiveloss.md
│ │ │ ├── convolution.md
│ │ │ ├── crop.md
│ │ │ ├── data.md
│ │ │ ├── deconvolution.md
│ │ │ ├── dropout.md
│ │ │ ├── dummydata.md
│ │ │ ├── eltwise.md
│ │ │ ├── elu.md
│ │ │ ├── embed.md
│ │ │ ├── euclideanloss.md
│ │ │ ├── exp.md
│ │ │ ├── filter.md
│ │ │ ├── flatten.md
│ │ │ ├── hdf5data.md
│ │ │ ├── hdf5output.md
│ │ │ ├── hingeloss.md
│ │ │ ├── im2col.md
│ │ │ ├── imagedata.md
│ │ │ ├── infogainloss.md
│ │ │ ├── innerproduct.md
│ │ │ ├── input.md
│ │ │ ├── log.md
│ │ │ ├── lrn.md
│ │ │ ├── lstm.md
│ │ │ ├── memorydata.md
│ │ │ ├── multinomiallogisticloss.md
│ │ │ ├── mvn.md
│ │ │ ├── parameter.md
│ │ │ ├── pooling.md
│ │ │ ├── power.md
│ │ │ ├── prelu.md
│ │ │ ├── python.md
│ │ │ ├── recurrent.md
│ │ │ ├── reduction.md
│ │ │ ├── relu.md
│ │ │ ├── reshape.md
│ │ │ ├── rnn.md
│ │ │ ├── scale.md
│ │ │ ├── sigmoid.md
│ │ │ ├── sigmoidcrossentropyloss.md
│ │ │ ├── silence.md
│ │ │ ├── slice.md
│ │ │ ├── softmax.md
│ │ │ ├── softmaxwithloss.md
│ │ │ ├── split.md
│ │ │ ├── spp.md
│ │ │ ├── tanh.md
│ │ │ ├── threshold.md
│ │ │ ├── tile.md
│ │ │ └── windowdata.md
│ │ ├── layers.md
│ │ ├── loss.md
│ │ ├── net_layer_blob.md
│ │ └── solver.md
│ ├── examples/
│ │ ├── 00-classification.ipynb
│ │ ├── 01-learning-lenet.ipynb
│ │ ├── 02-fine-tuning.ipynb
│ │ ├── CMakeLists.txt
│ │ ├── brewing-logreg.ipynb
│ │ ├── cifar10/
│ │ │ ├── cifar10_full.prototxt
│ │ │ ├── cifar10_full_sigmoid_solver.prototxt
│ │ │ ├── cifar10_full_sigmoid_solver_bn.prototxt
│ │ │ ├── cifar10_full_sigmoid_train_test.prototxt
│ │ │ ├── cifar10_full_sigmoid_train_test_bn.prototxt
│ │ │ ├── cifar10_full_solver.prototxt
│ │ │ ├── cifar10_full_solver_lr1.prototxt
│ │ │ ├── cifar10_full_solver_lr2.prototxt
│ │ │ ├── cifar10_full_train_test.prototxt
│ │ │ ├── cifar10_quick.prototxt
│ │ │ ├── cifar10_quick_solver.prototxt
│ │ │ ├── cifar10_quick_solver_lr1.prototxt
│ │ │ ├── cifar10_quick_train_test.prototxt
│ │ │ ├── convert_cifar_data.cpp
│ │ │ ├── create_cifar10.sh
│ │ │ ├── readme.md
│ │ │ ├── train_full.sh
│ │ │ ├── train_full_sigmoid.sh
│ │ │ ├── train_full_sigmoid_bn.sh
│ │ │ └── train_quick.sh
│ │ ├── cpp_classification/
│ │ │ ├── classification.cpp
│ │ │ └── readme.md
│ │ ├── detection.ipynb
│ │ ├── feature_extraction/
│ │ │ ├── imagenet_val.prototxt
│ │ │ └── readme.md
│ │ ├── finetune_flickr_style/
│ │ │ ├── assemble_data.py
│ │ │ ├── readme.md
│ │ │ └── style_names.txt
│ │ ├── finetune_pascal_detection/
│ │ │ ├── pascal_finetune_solver.prototxt
│ │ │ └── pascal_finetune_trainval_test.prototxt
│ │ ├── hdf5_classification/
│ │ │ ├── nonlinear_auto_test.prototxt
│ │ │ ├── nonlinear_auto_train.prototxt
│ │ │ ├── nonlinear_train_val.prototxt
│ │ │ └── train_val.prototxt
│ │ ├── imagenet/
│ │ │ ├── create_imagenet.sh
│ │ │ ├── make_imagenet_mean.sh
│ │ │ ├── readme.md
│ │ │ ├── resume_training.sh
│ │ │ └── train_caffenet.sh
│ │ ├── mnist/
│ │ │ ├── convert_mnist_data.cpp
│ │ │ ├── create_mnist.sh
│ │ │ ├── lenet.prototxt
│ │ │ ├── lenet_adadelta_solver.prototxt
│ │ │ ├── lenet_auto_solver.prototxt
│ │ │ ├── lenet_consolidated_solver.prototxt
│ │ │ ├── lenet_multistep_solver.prototxt
│ │ │ ├── lenet_solver.prototxt
│ │ │ ├── lenet_solver_adam.prototxt
│ │ │ ├── lenet_solver_rmsprop.prototxt
│ │ │ ├── lenet_train_test.prototxt
│ │ │ ├── mnist_autoencoder.prototxt
│ │ │ ├── mnist_autoencoder_solver.prototxt
│ │ │ ├── mnist_autoencoder_solver_adadelta.prototxt
│ │ │ ├── mnist_autoencoder_solver_adagrad.prototxt
│ │ │ ├── mnist_autoencoder_solver_nesterov.prototxt
│ │ │ ├── readme.md
│ │ │ ├── train_lenet.sh
│ │ │ ├── train_lenet_adam.sh
│ │ │ ├── train_lenet_consolidated.sh
│ │ │ ├── train_lenet_docker.sh
│ │ │ ├── train_lenet_rmsprop.sh
│ │ │ ├── train_mnist_autoencoder.sh
│ │ │ ├── train_mnist_autoencoder_adadelta.sh
│ │ │ ├── train_mnist_autoencoder_adagrad.sh
│ │ │ └── train_mnist_autoencoder_nesterov.sh
│ │ ├── net_surgery/
│ │ │ ├── bvlc_caffenet_full_conv.prototxt
│ │ │ └── conv.prototxt
│ │ ├── net_surgery.ipynb
│ │ ├── pascal-multilabel-with-datalayer.ipynb
│ │ ├── pycaffe/
│ │ │ ├── caffenet.py
│ │ │ ├── layers/
│ │ │ │ ├── pascal_multilabel_datalayers.py
│ │ │ │ └── pyloss.py
│ │ │ ├── linreg.prototxt
│ │ │ └── tools.py
│ │ ├── siamese/
│ │ │ ├── convert_mnist_siamese_data.cpp
│ │ │ ├── create_mnist_siamese.sh
│ │ │ ├── mnist_siamese.ipynb
│ │ │ ├── mnist_siamese.prototxt
│ │ │ ├── mnist_siamese_solver.prototxt
│ │ │ ├── mnist_siamese_train_test.prototxt
│ │ │ ├── readme.md
│ │ │ └── train_mnist_siamese.sh
│ │ └── web_demo/
│ │ ├── app.py
│ │ ├── exifutil.py
│ │ ├── readme.md
│ │ ├── requirements.txt
│ │ └── templates/
│ │ └── index.html
│ ├── include/
│ │ └── caffe/
│ │ ├── blob.hpp
│ │ ├── caffe.hpp
│ │ ├── common.hpp
│ │ ├── data_transformer.hpp
│ │ ├── filler.hpp
│ │ ├── internal_thread.hpp
│ │ ├── layer.hpp
│ │ ├── layer_factory.hpp
│ │ ├── layers/
│ │ │ ├── absval_layer.hpp
│ │ │ ├── accuracy_layer.hpp
│ │ │ ├── argmax_layer.hpp
│ │ │ ├── at_layer.hpp
│ │ │ ├── attention_lstm_layer.hpp
│ │ │ ├── base_conv_layer.hpp
│ │ │ ├── base_data_layer.hpp
│ │ │ ├── batch_norm_layer.hpp
│ │ │ ├── batch_reindex_layer.hpp
│ │ │ ├── bias_layer.hpp
│ │ │ ├── bnll_layer.hpp
│ │ │ ├── concat_layer.hpp
│ │ │ ├── contrastive_loss_layer.hpp
│ │ │ ├── conv_layer.hpp
│ │ │ ├── cosinangle_loss_layer.hpp
│ │ │ ├── crop_layer.hpp
│ │ │ ├── cudnn_conv_layer.hpp
│ │ │ ├── cudnn_lcn_layer.hpp
│ │ │ ├── cudnn_lrn_layer.hpp
│ │ │ ├── cudnn_pooling_layer.hpp
│ │ │ ├── cudnn_relu_layer.hpp
│ │ │ ├── cudnn_sigmoid_layer.hpp
│ │ │ ├── cudnn_softmax_layer.hpp
│ │ │ ├── cudnn_tanh_layer.hpp
│ │ │ ├── data_layer.hpp
│ │ │ ├── deconv_layer.hpp
│ │ │ ├── dropout_layer.hpp
│ │ │ ├── dummy_data_layer.hpp
│ │ │ ├── eltwise_layer.hpp
│ │ │ ├── elu_layer.hpp
│ │ │ ├── embed_layer.hpp
│ │ │ ├── euclidean_loss_layer.hpp
│ │ │ ├── exp_layer.hpp
│ │ │ ├── filter_layer.hpp
│ │ │ ├── flatten_layer.hpp
│ │ │ ├── hdf5_data_layer.hpp
│ │ │ ├── hdf5_output_layer.hpp
│ │ │ ├── hinge_loss_layer.hpp
│ │ │ ├── im2col_layer.hpp
│ │ │ ├── image_data_layer.hpp
│ │ │ ├── infogain_loss_layer.hpp
│ │ │ ├── inner_product_layer.hpp
│ │ │ ├── input_layer.hpp
│ │ │ ├── log_layer.hpp
│ │ │ ├── loss_layer.hpp
│ │ │ ├── lrn_layer.hpp
│ │ │ ├── lstm_layer.hpp
│ │ │ ├── lstm_new_layer.hpp
│ │ │ ├── memory_data_layer.hpp
│ │ │ ├── multinomial_logistic_loss_layer.hpp
│ │ │ ├── mvn_layer.hpp
│ │ │ ├── neuron_layer.hpp
│ │ │ ├── parameter_layer.hpp
│ │ │ ├── point_bilinear_layer.hpp
│ │ │ ├── pooling_layer.hpp
│ │ │ ├── power_layer.hpp
│ │ │ ├── prelu_layer.hpp
│ │ │ ├── python_layer.hpp
│ │ │ ├── recurrent_layer.hpp
│ │ │ ├── reduction_layer.hpp
│ │ │ ├── relu_layer.hpp
│ │ │ ├── reshape_layer.hpp
│ │ │ ├── reverse_axis_layer.hpp
│ │ │ ├── reverse_layer.hpp
│ │ │ ├── rnn_layer.hpp
│ │ │ ├── roi_pooling_layer.hpp
│ │ │ ├── scale_layer.hpp
│ │ │ ├── sigmoid_cross_entropy_loss_layer.hpp
│ │ │ ├── sigmoid_layer.hpp
│ │ │ ├── silence_layer.hpp
│ │ │ ├── slice_layer.hpp
│ │ │ ├── smooth_L1_loss_layer.hpp
│ │ │ ├── softmax_layer.hpp
│ │ │ ├── softmax_loss_layer.hpp
│ │ │ ├── split_layer.hpp
│ │ │ ├── spp_layer.hpp
│ │ │ ├── sum_layer.hpp
│ │ │ ├── tanh_layer.hpp
│ │ │ ├── threshold_layer.hpp
│ │ │ ├── tile_layer.hpp
│ │ │ ├── transpose_layer.hpp
│ │ │ ├── unitbox_data_layer.hpp
│ │ │ ├── unitbox_loss_layer.hpp
│ │ │ └── window_data_layer.hpp
│ │ ├── net.hpp
│ │ ├── parallel.hpp
│ │ ├── sgd_solvers.hpp
│ │ ├── solver.hpp
│ │ ├── solver_factory.hpp
│ │ ├── syncedmem.hpp
│ │ ├── test/
│ │ │ ├── test_caffe_main.hpp
│ │ │ └── test_gradient_check_util.hpp
│ │ └── util/
│ │ ├── benchmark.hpp
│ │ ├── blocking_queue.hpp
│ │ ├── cudnn.hpp
│ │ ├── db.hpp
│ │ ├── db_leveldb.hpp
│ │ ├── db_lmdb.hpp
│ │ ├── device_alternate.hpp
│ │ ├── format.hpp
│ │ ├── gpu_util.cuh
│ │ ├── hdf5.hpp
│ │ ├── im2col.hpp
│ │ ├── insert_splits.hpp
│ │ ├── io.hpp
│ │ ├── math_functions.hpp
│ │ ├── mkl_alternate.hpp
│ │ ├── nccl.hpp
│ │ ├── rng.hpp
│ │ ├── signal_handler.h
│ │ └── upgrade_proto.hpp
│ ├── matlab/
│ │ ├── +caffe/
│ │ │ ├── +test/
│ │ │ │ ├── test_io.m
│ │ │ │ ├── test_net.m
│ │ │ │ └── test_solver.m
│ │ │ ├── Blob.m
│ │ │ ├── Layer.m
│ │ │ ├── Net.m
│ │ │ ├── Solver.m
│ │ │ ├── get_net.m
│ │ │ ├── get_solver.m
│ │ │ ├── imagenet/
│ │ │ │ └── ilsvrc_2012_mean.mat
│ │ │ ├── io.m
│ │ │ ├── private/
│ │ │ │ ├── CHECK.m
│ │ │ │ ├── CHECK_FILE_EXIST.m
│ │ │ │ ├── caffe_.cpp
│ │ │ │ └── is_valid_handle.m
│ │ │ ├── reset_all.m
│ │ │ ├── run_tests.m
│ │ │ ├── set_device.m
│ │ │ ├── set_mode_cpu.m
│ │ │ ├── set_mode_gpu.m
│ │ │ └── version.m
│ │ ├── CMakeLists.txt
│ │ ├── demo/
│ │ │ └── classification_demo.m
│ │ └── hdf5creation/
│ │ ├── .gitignore
│ │ ├── demo.m
│ │ └── store2hdf5.m
│ ├── python/
│ │ ├── CMakeLists.txt
│ │ ├── caffe/
│ │ │ ├── __init__.py
│ │ │ ├── _caffe.cpp
│ │ │ ├── classifier.py
│ │ │ ├── coord_map.py
│ │ │ ├── detector.py
│ │ │ ├── draw.py
│ │ │ ├── imagenet/
│ │ │ │ └── ilsvrc_2012_mean.npy
│ │ │ ├── io.py
│ │ │ ├── net_spec.py
│ │ │ ├── pycaffe.py
│ │ │ └── test/
│ │ │ ├── test_coord_map.py
│ │ │ ├── test_draw.py
│ │ │ ├── test_gradient_for_python_layer.py
│ │ │ ├── test_io.py
│ │ │ ├── test_layer_type_list.py
│ │ │ ├── test_nccl.py
│ │ │ ├── test_net.py
│ │ │ ├── test_net_spec.py
│ │ │ ├── test_proposal.py
│ │ │ ├── test_python_layer.py
│ │ │ ├── test_python_layer_with_param_str.py
│ │ │ ├── test_solver.py
│ │ │ └── test_w_pooling.py
│ │ ├── classify.py
│ │ ├── detect.py
│ │ ├── draw_net.py
│ │ ├── requirements.txt
│ │ └── train.py
│ ├── scripts/
│ │ ├── build_docs.sh
│ │ ├── caffe
│ │ ├── copy_notebook.py
│ │ ├── cpp_lint.py
│ │ ├── deploy_docs.sh
│ │ ├── download_model_binary.py
│ │ ├── download_model_from_gist.sh
│ │ ├── gather_examples.sh
│ │ ├── split_caffe_proto.py
│ │ ├── travis/
│ │ │ ├── build.sh
│ │ │ ├── configure-cmake.sh
│ │ │ ├── configure-make.sh
│ │ │ ├── configure.sh
│ │ │ ├── defaults.sh
│ │ │ ├── install-deps.sh
│ │ │ ├── install-python-deps.sh
│ │ │ ├── setup-venv.sh
│ │ │ └── test.sh
│ │ └── upload_model_to_gist.sh
│ ├── src/
│ │ ├── caffe/
│ │ │ ├── CMakeLists.txt
│ │ │ ├── blob.cpp
│ │ │ ├── common.cpp
│ │ │ ├── data_transformer.cpp
│ │ │ ├── internal_thread.cpp
│ │ │ ├── layer.cpp
│ │ │ ├── layer_factory.cpp
│ │ │ ├── layers/
│ │ │ │ ├── absval_layer.cpp
│ │ │ │ ├── absval_layer.cu
│ │ │ │ ├── accuracy_layer.cpp
│ │ │ │ ├── argmax_layer.cpp
│ │ │ │ ├── at_layer.cpp
│ │ │ │ ├── at_layer.cu
│ │ │ │ ├── attention_lstm_layer.cpp
│ │ │ │ ├── base_conv_layer.cpp
│ │ │ │ ├── base_data_layer.cpp
│ │ │ │ ├── base_data_layer.cu
│ │ │ │ ├── batch_norm_layer.cpp
│ │ │ │ ├── batch_norm_layer.cu
│ │ │ │ ├── batch_reindex_layer.cpp
│ │ │ │ ├── batch_reindex_layer.cu
│ │ │ │ ├── bias_layer.cpp
│ │ │ │ ├── bias_layer.cu
│ │ │ │ ├── bnll_layer.cpp
│ │ │ │ ├── bnll_layer.cu
│ │ │ │ ├── concat_layer.cpp
│ │ │ │ ├── concat_layer.cu
│ │ │ │ ├── contrastive_loss_layer.cpp
│ │ │ │ ├── contrastive_loss_layer.cu
│ │ │ │ ├── conv_layer.cpp
│ │ │ │ ├── conv_layer.cu
│ │ │ │ ├── cosinangle_loss_layer.cpp
│ │ │ │ ├── cosinangle_loss_layer.cu
│ │ │ │ ├── crop_layer.cpp
│ │ │ │ ├── crop_layer.cu
│ │ │ │ ├── cudnn_conv_layer.cpp
│ │ │ │ ├── cudnn_conv_layer.cu
│ │ │ │ ├── cudnn_lcn_layer.cpp
│ │ │ │ ├── cudnn_lcn_layer.cu
│ │ │ │ ├── cudnn_lrn_layer.cpp
│ │ │ │ ├── cudnn_lrn_layer.cu
│ │ │ │ ├── cudnn_pooling_layer.cpp
│ │ │ │ ├── cudnn_pooling_layer.cu
│ │ │ │ ├── cudnn_relu_layer.cpp
│ │ │ │ ├── cudnn_relu_layer.cu
│ │ │ │ ├── cudnn_sigmoid_layer.cpp
│ │ │ │ ├── cudnn_sigmoid_layer.cu
│ │ │ │ ├── cudnn_softmax_layer.cpp
│ │ │ │ ├── cudnn_softmax_layer.cu
│ │ │ │ ├── cudnn_tanh_layer.cpp
│ │ │ │ ├── cudnn_tanh_layer.cu
│ │ │ │ ├── data_layer.cpp
│ │ │ │ ├── deconv_layer.cpp
│ │ │ │ ├── deconv_layer.cu
│ │ │ │ ├── dropout_layer.cpp
│ │ │ │ ├── dropout_layer.cu
│ │ │ │ ├── dummy_data_layer.cpp
│ │ │ │ ├── eltwise_layer.cpp
│ │ │ │ ├── eltwise_layer.cu
│ │ │ │ ├── elu_layer.cpp
│ │ │ │ ├── elu_layer.cu
│ │ │ │ ├── embed_layer.cpp
│ │ │ │ ├── embed_layer.cu
│ │ │ │ ├── euclidean_loss_layer.cpp
│ │ │ │ ├── euclidean_loss_layer.cu
│ │ │ │ ├── exp_layer.cpp
│ │ │ │ ├── exp_layer.cu
│ │ │ │ ├── filter_layer.cpp
│ │ │ │ ├── filter_layer.cu
│ │ │ │ ├── flatten_layer.cpp
│ │ │ │ ├── hdf5_data_layer.cpp
│ │ │ │ ├── hdf5_data_layer.cu
│ │ │ │ ├── hdf5_output_layer.cpp
│ │ │ │ ├── hdf5_output_layer.cu
│ │ │ │ ├── hinge_loss_layer.cpp
│ │ │ │ ├── im2col_layer.cpp
│ │ │ │ ├── im2col_layer.cu
│ │ │ │ ├── image_data_layer.cpp
│ │ │ │ ├── infogain_loss_layer.cpp
│ │ │ │ ├── inner_product_layer.cpp
│ │ │ │ ├── inner_product_layer.cu
│ │ │ │ ├── input_layer.cpp
│ │ │ │ ├── log_layer.cpp
│ │ │ │ ├── log_layer.cu
│ │ │ │ ├── loss_layer.cpp
│ │ │ │ ├── lrn_layer.cpp
│ │ │ │ ├── lrn_layer.cu
│ │ │ │ ├── lstm_layer.cpp
│ │ │ │ ├── lstm_layer.cu
│ │ │ │ ├── lstm_new_layer.cpp
│ │ │ │ ├── lstm_unit_layer.cpp
│ │ │ │ ├── lstm_unit_layer.cu
│ │ │ │ ├── memory_data_layer.cpp
│ │ │ │ ├── multinomial_logistic_loss_layer.cpp
│ │ │ │ ├── mvn_layer.cpp
│ │ │ │ ├── mvn_layer.cu
│ │ │ │ ├── neuron_layer.cpp
│ │ │ │ ├── parameter_layer.cpp
│ │ │ │ ├── point_bilinear_layer.cpp
│ │ │ │ ├── pooling_layer.cpp
│ │ │ │ ├── pooling_layer.cu
│ │ │ │ ├── power_layer.cpp
│ │ │ │ ├── power_layer.cu
│ │ │ │ ├── prelu_layer.cpp
│ │ │ │ ├── prelu_layer.cu
│ │ │ │ ├── recurrent_layer.cpp
│ │ │ │ ├── recurrent_layer.cu
│ │ │ │ ├── reduction_layer.cpp
│ │ │ │ ├── reduction_layer.cu
│ │ │ │ ├── relu_layer.cpp
│ │ │ │ ├── relu_layer.cu
│ │ │ │ ├── reshape_layer.cpp
│ │ │ │ ├── reverse_axis_layer.cpp
│ │ │ │ ├── reverse_axis_layer.cu
│ │ │ │ ├── rnn_layer.cpp
│ │ │ │ ├── roi_pooling_layer.cpp
│ │ │ │ ├── roi_pooling_layer.cu
│ │ │ │ ├── scale_layer.cpp
│ │ │ │ ├── scale_layer.cu
│ │ │ │ ├── sigmoid_cross_entropy_loss_layer.cpp
│ │ │ │ ├── sigmoid_cross_entropy_loss_layer.cu
│ │ │ │ ├── sigmoid_layer.cpp
│ │ │ │ ├── sigmoid_layer.cu
│ │ │ │ ├── silence_layer.cpp
│ │ │ │ ├── silence_layer.cu
│ │ │ │ ├── slice_layer.cpp
│ │ │ │ ├── slice_layer.cu
│ │ │ │ ├── smooth_L1_loss_layer.cpp
│ │ │ │ ├── smooth_L1_loss_layer.cu
│ │ │ │ ├── softmax_layer.cpp
│ │ │ │ ├── softmax_layer.cu
│ │ │ │ ├── softmax_loss_layer.cpp
│ │ │ │ ├── softmax_loss_layer.cu
│ │ │ │ ├── split_layer.cpp
│ │ │ │ ├── split_layer.cu
│ │ │ │ ├── spp_layer.cpp
│ │ │ │ ├── sum_layer.cpp
│ │ │ │ ├── tanh_layer.cpp
│ │ │ │ ├── tanh_layer.cu
│ │ │ │ ├── threshold_layer.cpp
│ │ │ │ ├── threshold_layer.cu
│ │ │ │ ├── tile_layer.cpp
│ │ │ │ ├── tile_layer.cu
│ │ │ │ ├── transpose_layer.cpp
│ │ │ │ ├── transpose_layer.cu
│ │ │ │ ├── unitbox_loss_layer.cpp
│ │ │ │ └── window_data_layer.cpp
│ │ │ ├── net.cpp
│ │ │ ├── parallel.cpp
│ │ │ ├── proto/
│ │ │ │ └── caffe.proto
│ │ │ ├── solver.cpp
│ │ │ ├── solvers/
│ │ │ │ ├── adadelta_solver.cpp
│ │ │ │ ├── adadelta_solver.cu
│ │ │ │ ├── adagrad_solver.cpp
│ │ │ │ ├── adagrad_solver.cu
│ │ │ │ ├── adam_solver.cpp
│ │ │ │ ├── adam_solver.cu
│ │ │ │ ├── nesterov_solver.cpp
│ │ │ │ ├── nesterov_solver.cu
│ │ │ │ ├── rmsprop_solver.cpp
│ │ │ │ ├── rmsprop_solver.cu
│ │ │ │ ├── sgd_solver.cpp
│ │ │ │ └── sgd_solver.cu
│ │ │ ├── syncedmem.cpp
│ │ │ ├── test/
│ │ │ │ ├── CMakeLists.txt
│ │ │ │ ├── test_accuracy_layer.cpp
│ │ │ │ ├── test_argmax_layer.cpp
│ │ │ │ ├── test_attlstm_layer.cpp
│ │ │ │ ├── test_batch_norm_layer.cpp
│ │ │ │ ├── test_batch_reindex_layer.cpp
│ │ │ │ ├── test_benchmark.cpp
│ │ │ │ ├── test_bias_layer.cpp
│ │ │ │ ├── test_blob.cpp
│ │ │ │ ├── test_bn_layer.cpp
│ │ │ │ ├── test_caffe_main.cpp
│ │ │ │ ├── test_common.cpp
│ │ │ │ ├── test_concat_layer.cpp
│ │ │ │ ├── test_contrastive_loss_layer.cpp
│ │ │ │ ├── test_convolution_layer.cpp
│ │ │ │ ├── test_crop_layer.cpp
│ │ │ │ ├── test_data/
│ │ │ │ │ ├── generate_sample_data.py
│ │ │ │ │ ├── sample_data.h5
│ │ │ │ │ ├── sample_data_2_gzip.h5
│ │ │ │ │ ├── sample_data_list.txt
│ │ │ │ │ ├── solver_data.h5
│ │ │ │ │ └── solver_data_list.txt
│ │ │ │ ├── test_data_layer.cpp
│ │ │ │ ├── test_data_transformer.cpp
│ │ │ │ ├── test_db.cpp
│ │ │ │ ├── test_deconvolution_layer.cpp
│ │ │ │ ├── test_deformconv_layer.cpp
│ │ │ │ ├── test_dummy_data_layer.cpp
│ │ │ │ ├── test_eltwise_layer.cpp
│ │ │ │ ├── test_embed_layer.cpp
│ │ │ │ ├── test_euclidean_loss_layer.cpp
│ │ │ │ ├── test_filler.cpp
│ │ │ │ ├── test_filter_layer.cpp
│ │ │ │ ├── test_flatten_layer.cpp
│ │ │ │ ├── test_gradient_based_solver.cpp
│ │ │ │ ├── test_hdf5_output_layer.cpp
│ │ │ │ ├── test_hdf5data_layer.cpp
│ │ │ │ ├── test_hinge_loss_layer.cpp
│ │ │ │ ├── test_im2col_kernel.cu
│ │ │ │ ├── test_im2col_layer.cpp
│ │ │ │ ├── test_image_data_layer.cpp
│ │ │ │ ├── test_infogain_loss_layer.cpp
│ │ │ │ ├── test_inner_product_layer.cpp
│ │ │ │ ├── test_internal_thread.cpp
│ │ │ │ ├── test_io.cpp
│ │ │ │ ├── test_layer_factory.cpp
│ │ │ │ ├── test_lrn_layer.cpp
│ │ │ │ ├── test_lstm_new_layer.cpp
│ │ │ │ ├── test_math_functions.cpp
│ │ │ │ ├── test_maxpool_dropout_layers.cpp
│ │ │ │ ├── test_memory_data_layer.cpp
│ │ │ │ ├── test_multinomial_logistic_loss_layer.cpp
│ │ │ │ ├── test_mvn_layer.cpp
│ │ │ │ ├── test_net.cpp
│ │ │ │ ├── test_neuron_layer.cpp
│ │ │ │ ├── test_platform.cpp
│ │ │ │ ├── test_point_bilinear_layer.cpp
│ │ │ │ ├── test_pooling_layer.cpp
│ │ │ │ ├── test_power_layer.cpp
│ │ │ │ ├── test_protobuf.cpp
│ │ │ │ ├── test_random_number_generator.cpp
│ │ │ │ ├── test_reduction_layer.cpp
│ │ │ │ ├── test_reshape_layer.cpp
│ │ │ │ ├── test_rnn_layer.cpp
│ │ │ │ ├── test_scale_layer.cpp
│ │ │ │ ├── test_sigmoid_cross_entropy_loss_layer.cpp
│ │ │ │ ├── test_slice_layer.cpp
│ │ │ │ ├── test_softmax_layer.cpp
│ │ │ │ ├── test_softmax_with_loss_layer.cpp
│ │ │ │ ├── test_solver.cpp
│ │ │ │ ├── test_solver_factory.cpp
│ │ │ │ ├── test_split_layer.cpp
│ │ │ │ ├── test_spp_layer.cpp
│ │ │ │ ├── test_stochastic_pooling.cpp
│ │ │ │ ├── test_sum_layer.cpp
│ │ │ │ ├── test_syncedmem.cpp
│ │ │ │ ├── test_tanh_layer.cpp
│ │ │ │ ├── test_threshold_layer.cpp
│ │ │ │ ├── test_tile_layer.cpp
│ │ │ │ ├── test_upgrade_proto.cpp
│ │ │ │ └── test_util_blas.cpp
│ │ │ └── util/
│ │ │ ├── benchmark.cpp
│ │ │ ├── blocking_queue.cpp
│ │ │ ├── cudnn.cpp
│ │ │ ├── db.cpp
│ │ │ ├── db_leveldb.cpp
│ │ │ ├── db_lmdb.cpp
│ │ │ ├── hdf5.cpp
│ │ │ ├── im2col.cpp
│ │ │ ├── im2col.cu
│ │ │ ├── insert_splits.cpp
│ │ │ ├── io.cpp
│ │ │ ├── math_functions.cpp
│ │ │ ├── math_functions.cu
│ │ │ ├── signal_handler.cpp
│ │ │ └── upgrade_proto.cpp
│ │ └── gtest/
│ │ ├── CMakeLists.txt
│ │ ├── gtest-all.cpp
│ │ ├── gtest.h
│ │ └── gtest_main.cc
│ └── tools/
│ ├── CMakeLists.txt
│ ├── binary_to_text.cpp
│ ├── caffe.cpp
│ ├── compute_image_mean.cpp
│ ├── convert_imageset.cpp
│ ├── convert_model.cpp
│ ├── copy_layers.cpp
│ ├── device_query.cpp
│ ├── extra/
│ │ ├── convert_deform_conv.py
│ │ ├── extract_seconds.py
│ │ ├── launch_resize_and_crop_images.sh
│ │ ├── parse_log.py
│ │ ├── parse_log.sh
│ │ ├── plot_log.gnuplot.example
│ │ ├── plot_training_log.py.example
│ │ ├── resize_and_crop_images.py
│ │ └── summarize.py
│ ├── extract_features.cpp
│ ├── finetune_net.cpp
│ ├── net_speed_benchmark.cpp
│ ├── test_net.cpp
│ ├── train_net.cpp
│ ├── upgrade_net_proto_binary.cpp
│ ├── upgrade_net_proto_text.cpp
│ └── upgrade_solver_proto_text.cpp
├── cfg.py
├── dicts/
│ ├── dict.txt
│ ├── generic_lex.txt
│ └── weak_voc.txt
├── models/
│ ├── test_iou.pt
│ ├── test_lstm.pt
│ └── train.pt
├── pylayer/
│ ├── tool.py
│ └── tool_layers.py
├── results/
│ └── res_img_105.txt
└── test.py
================================================
FILE CONTENTS
================================================
================================================
FILE: .idea/misc.xml
================================================
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectLevelVcsManager" settingsEditedManually="false">
<OptionsSetting value="true" id="Add" />
<OptionsSetting value="true" id="Remove" />
<OptionsSetting value="true" id="Checkout" />
<OptionsSetting value="true" id="Update" />
<OptionsSetting value="true" id="Status" />
<OptionsSetting value="true" id="Edit" />
<ConfirmationsSetting value="0" id="Add" />
<ConfirmationsSetting value="0" id="Remove" />
</component>
<component name="ProjectRootManager" version="2" project-jdk-name="Python 2.7.13 virtualenv at ~/pytorch" project-jdk-type="Python SDK" />
</project>
================================================
FILE: .idea/modules.xml
================================================
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectModuleManager">
<modules>
<module fileurl="file://$PROJECT_DIR$/.idea/textspotter.iml" filepath="$PROJECT_DIR$/.idea/textspotter.iml" />
</modules>
</component>
</project>
================================================
FILE: .idea/textspotter.iml
================================================
<?xml version="1.0" encoding="UTF-8"?>
<module type="PYTHON_MODULE" version="4">
<component name="NewModuleRootManager">
<content url="file://$MODULE_DIR$" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
</component>
<component name="PyDocumentationSettings">
<option name="myDocStringFormat" value="NumPy" />
</component>
<component name="TestRunnerService">
<option name="PROJECT_TEST_RUNNER" value="Unittests" />
</component>
</module>
================================================
FILE: .idea/vcs.xml
================================================
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="VcsDirectoryMappings">
<mapping directory="$PROJECT_DIR$" vcs="Git" />
</component>
</project>
================================================
FILE: .idea/workspace.xml
================================================
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ChangeListManager">
<list default="true" id="9488109c-8437-49b2-8097-a88758cd6c8b" name="Default" comment="">
<change type="MODIFICATION" beforePath="$PROJECT_DIR$/README.md" afterPath="$PROJECT_DIR$/README.md" />
</list>
<ignored path="textspotter.iws" />
<ignored path=".idea/workspace.xml" />
<option name="EXCLUDED_CONVERTED_TO_IGNORED" value="true" />
<option name="TRACKING_ENABLED" value="true" />
<option name="SHOW_DIALOG" value="false" />
<option name="HIGHLIGHT_CONFLICTS" value="true" />
<option name="HIGHLIGHT_NON_ACTIVE_CHANGELIST" value="false" />
<option name="LAST_RESOLUTION" value="IGNORE" />
</component>
<component name="CreatePatchCommitExecutor">
<option name="PATCH_PATH" value="" />
</component>
<component name="ExecutionTargetManager" SELECTED_TARGET="default_target" />
<component name="FavoritesManager">
<favorites_list name="textspotter" />
</component>
<component name="FileEditorManager">
<leaf SIDE_TABS_SIZE_LIMIT_KEY="300">
<file leaf-file-name="test.py" pinned="false" current-in-tab="false">
<entry file="file://$PROJECT_DIR$/test.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="450">
<caret line="70" column="19" selection-start-line="70" selection-start-column="9" selection-end-line="70" selection-end-column="19" />
<folding />
</state>
</provider>
</entry>
</file>
<file leaf-file-name="cfg.py" pinned="false" current-in-tab="false">
<entry file="file://$PROJECT_DIR$/cfg.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="54">
<caret line="3" column="10" selection-start-line="3" selection-start-column="0" selection-end-line="3" selection-end-column="10" />
<folding />
</state>
</provider>
</entry>
</file>
<file leaf-file-name="tool_layers.py" pinned="false" current-in-tab="false">
<entry file="file://$PROJECT_DIR$/pylayer/tool_layers.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="-180">
<caret line="8" column="0" selection-start-line="8" selection-start-column="0" selection-end-line="8" selection-end-column="0" />
<folding />
</state>
</provider>
</entry>
</file>
<file leaf-file-name="README.md" pinned="false" current-in-tab="true">
<entry file="file://$PROJECT_DIR$/README.md">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="252">
<caret line="30" column="0" selection-start-line="30" selection-start-column="0" selection-end-line="30" selection-end-column="0" />
<folding />
</state>
</provider>
</entry>
</file>
<file leaf-file-name="test_iou.pt" pinned="false" current-in-tab="false">
<entry file="file://$PROJECT_DIR$/models/test_iou.pt">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="212">
<caret line="7008" column="23" selection-start-line="7008" selection-start-column="22" selection-end-line="7008" selection-end-column="23" />
<folding />
</state>
</provider>
</entry>
</file>
<file leaf-file-name="test_lstm.pt" pinned="false" current-in-tab="false">
<entry file="file://$PROJECT_DIR$/models/test_lstm.pt">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="450">
<caret line="25" column="0" selection-start-line="25" selection-start-column="0" selection-end-line="25" selection-end-column="0" />
<folding />
</state>
</provider>
</entry>
</file>
<file leaf-file-name="tool.py" pinned="false" current-in-tab="false">
<entry file="file://$PROJECT_DIR$/pylayer/tool.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="378">
<caret line="39" column="12" selection-start-line="39" selection-start-column="4" selection-end-line="39" selection-end-column="12" />
<folding>
<element signature="e#1#26#0" expanded="true" />
</folding>
</state>
</provider>
</entry>
</file>
</leaf>
</component>
<component name="FileTemplateManagerImpl">
<option name="RECENT_TEMPLATES">
<list>
<option value="Python Script" />
</list>
</option>
</component>
<component name="Git.Settings">
<option name="RECENT_GIT_ROOT_PATH" value="$PROJECT_DIR$" />
</component>
<component name="IdeDocumentHistory">
<option name="CHANGED_PATHS">
<list>
<option value="$PROJECT_DIR$/test_icdar_e2e_lex_15.py" />
<option value="$PROJECT_DIR$/cfg.py" />
<option value="$PROJECT_DIR$/pylayer/tool.py" />
<option value="$PROJECT_DIR$/test.py" />
<option value="$PROJECT_DIR$/pylayer/tool_layers.py" />
<option value="$PROJECT_DIR$/README.md" />
</list>
</option>
</component>
<component name="ProjectFrameBounds">
<option name="x" value="519" />
<option name="y" value="82" />
<option name="width" value="1918" />
<option name="height" value="1028" />
</component>
<component name="ProjectLevelVcsManager" settingsEditedManually="false">
<OptionsSetting value="true" id="Add" />
<OptionsSetting value="true" id="Remove" />
<OptionsSetting value="true" id="Checkout" />
<OptionsSetting value="true" id="Update" />
<OptionsSetting value="true" id="Status" />
<OptionsSetting value="true" id="Edit" />
<ConfirmationsSetting value="0" id="Add" />
<ConfirmationsSetting value="0" id="Remove" />
</component>
<component name="ProjectView">
<navigator currentView="ProjectPane" proportions="" version="1">
<flattenPackages />
<showMembers />
<showModules />
<showLibraryContents />
<hideEmptyPackages />
<abbreviatePackageNames />
<autoscrollToSource />
<autoscrollFromSource />
<sortByType />
<manualOrder />
<foldersAlwaysOnTop value="true" />
</navigator>
<panes>
<pane id="Scope" />
<pane id="Scratches" />
<pane id="ProjectPane">
<subPane>
<PATH>
<PATH_ELEMENT>
<option name="myItemId" value="textspotter" />
<option name="myItemType" value="com.intellij.ide.projectView.impl.nodes.ProjectViewProjectNode" />
</PATH_ELEMENT>
</PATH>
<PATH>
<PATH_ELEMENT>
<option name="myItemId" value="textspotter" />
<option name="myItemType" value="com.intellij.ide.projectView.impl.nodes.ProjectViewProjectNode" />
</PATH_ELEMENT>
<PATH_ELEMENT>
<option name="myItemId" value="textspotter" />
<option name="myItemType" value="com.intellij.ide.projectView.impl.nodes.PsiDirectoryNode" />
</PATH_ELEMENT>
</PATH>
<PATH>
<PATH_ELEMENT>
<option name="myItemId" value="textspotter" />
<option name="myItemType" value="com.intellij.ide.projectView.impl.nodes.ProjectViewProjectNode" />
</PATH_ELEMENT>
<PATH_ELEMENT>
<option name="myItemId" value="textspotter" />
<option name="myItemType" value="com.intellij.ide.projectView.impl.nodes.PsiDirectoryNode" />
</PATH_ELEMENT>
<PATH_ELEMENT>
<option name="myItemId" value="pylayer" />
<option name="myItemType" value="com.intellij.ide.projectView.impl.nodes.PsiDirectoryNode" />
</PATH_ELEMENT>
</PATH>
<PATH>
<PATH_ELEMENT>
<option name="myItemId" value="textspotter" />
<option name="myItemType" value="com.intellij.ide.projectView.impl.nodes.ProjectViewProjectNode" />
</PATH_ELEMENT>
<PATH_ELEMENT>
<option name="myItemId" value="textspotter" />
<option name="myItemType" value="com.intellij.ide.projectView.impl.nodes.PsiDirectoryNode" />
</PATH_ELEMENT>
<PATH_ELEMENT>
<option name="myItemId" value="models" />
<option name="myItemType" value="com.intellij.ide.projectView.impl.nodes.PsiDirectoryNode" />
</PATH_ELEMENT>
</PATH>
<PATH>
<PATH_ELEMENT>
<option name="myItemId" value="textspotter" />
<option name="myItemType" value="com.intellij.ide.projectView.impl.nodes.ProjectViewProjectNode" />
</PATH_ELEMENT>
<PATH_ELEMENT>
<option name="myItemId" value="textspotter" />
<option name="myItemType" value="com.intellij.ide.projectView.impl.nodes.PsiDirectoryNode" />
</PATH_ELEMENT>
<PATH_ELEMENT>
<option name="myItemId" value="imgs" />
<option name="myItemType" value="com.intellij.ide.projectView.impl.nodes.PsiDirectoryNode" />
</PATH_ELEMENT>
</PATH>
<PATH>
<PATH_ELEMENT>
<option name="myItemId" value="textspotter" />
<option name="myItemType" value="com.intellij.ide.projectView.impl.nodes.ProjectViewProjectNode" />
</PATH_ELEMENT>
<PATH_ELEMENT>
<option name="myItemId" value="textspotter" />
<option name="myItemType" value="com.intellij.ide.projectView.impl.nodes.PsiDirectoryNode" />
</PATH_ELEMENT>
<PATH_ELEMENT>
<option name="myItemId" value="dicts" />
<option name="myItemType" value="com.intellij.ide.projectView.impl.nodes.PsiDirectoryNode" />
</PATH_ELEMENT>
</PATH>
</subPane>
</pane>
</panes>
</component>
<component name="PropertiesComponent">
<property name="settings.editor.selected.configurable" value="com.jetbrains.python.configuration.PyActiveSdkModuleConfigurable" />
<property name="last_opened_file_path" value="$PROJECT_DIR$" />
<property name="settings.editor.splitter.proportion" value="0.2" />
</component>
<component name="RunManager" selected="Python.test">
<configuration default="false" name="test" type="PythonConfigurationType" factoryName="Python" temporary="true">
<option name="INTERPRETER_OPTIONS" value="" />
<option name="PARENT_ENVS" value="true" />
<envs>
<env name="PYTHONUNBUFFERED" value="1" />
</envs>
<option name="SDK_HOME" value="" />
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$" />
<option name="IS_MODULE_SDK" value="true" />
<option name="ADD_CONTENT_ROOTS" value="true" />
<option name="ADD_SOURCE_ROOTS" value="true" />
<module name="textspotter" />
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/test.py" />
<option name="PARAMETERS" value="" />
<option name="SHOW_COMMAND_LINE" value="false" />
<method />
</configuration>
<configuration default="true" type="BashConfigurationType" factoryName="Bash">
<option name="INTERPRETER_OPTIONS" value="" />
<option name="INTERPRETER_PATH" value="/bin/bash" />
<option name="WORKING_DIRECTORY" value="" />
<option name="PARENT_ENVS" value="true" />
<option name="SCRIPT_NAME" value="" />
<option name="PARAMETERS" value="" />
<module name="" />
<envs />
<method />
</configuration>
<configuration default="true" type="PythonConfigurationType" factoryName="Python">
<option name="INTERPRETER_OPTIONS" value="" />
<option name="PARENT_ENVS" value="true" />
<envs>
<env name="PYTHONUNBUFFERED" value="1" />
</envs>
<option name="SDK_HOME" value="" />
<option name="WORKING_DIRECTORY" value="" />
<option name="IS_MODULE_SDK" value="false" />
<option name="ADD_CONTENT_ROOTS" value="true" />
<option name="ADD_SOURCE_ROOTS" value="true" />
<module name="textspotter" />
<option name="SCRIPT_NAME" value="" />
<option name="PARAMETERS" value="" />
<option name="SHOW_COMMAND_LINE" value="false" />
<method />
</configuration>
<configuration default="true" type="Tox" factoryName="Tox">
<option name="INTERPRETER_OPTIONS" value="" />
<option name="PARENT_ENVS" value="true" />
<envs />
<option name="SDK_HOME" value="" />
<option name="WORKING_DIRECTORY" value="" />
<option name="IS_MODULE_SDK" value="false" />
<option name="ADD_CONTENT_ROOTS" value="true" />
<option name="ADD_SOURCE_ROOTS" value="true" />
<module name="textspotter" />
<method />
</configuration>
<configuration default="true" type="tests" factoryName="Attests">
<option name="INTERPRETER_OPTIONS" value="" />
<option name="PARENT_ENVS" value="true" />
<envs />
<option name="SDK_HOME" value="" />
<option name="WORKING_DIRECTORY" value="" />
<option name="IS_MODULE_SDK" value="false" />
<option name="ADD_CONTENT_ROOTS" value="true" />
<option name="ADD_SOURCE_ROOTS" value="true" />
<module name="textspotter" />
<option name="SCRIPT_NAME" value="" />
<option name="CLASS_NAME" value="" />
<option name="METHOD_NAME" value="" />
<option name="FOLDER_NAME" value="" />
<option name="TEST_TYPE" value="TEST_SCRIPT" />
<option name="PATTERN" value="" />
<option name="USE_PATTERN" value="false" />
<method />
</configuration>
<configuration default="true" type="tests" factoryName="Doctests">
<option name="INTERPRETER_OPTIONS" value="" />
<option name="PARENT_ENVS" value="true" />
<envs />
<option name="SDK_HOME" value="" />
<option name="WORKING_DIRECTORY" value="" />
<option name="IS_MODULE_SDK" value="false" />
<option name="ADD_CONTENT_ROOTS" value="true" />
<option name="ADD_SOURCE_ROOTS" value="true" />
<module name="textspotter" />
<option name="SCRIPT_NAME" value="" />
<option name="CLASS_NAME" value="" />
<option name="METHOD_NAME" value="" />
<option name="FOLDER_NAME" value="" />
<option name="TEST_TYPE" value="TEST_SCRIPT" />
<option name="PATTERN" value="" />
<option name="USE_PATTERN" value="false" />
<method />
</configuration>
<configuration default="true" type="tests" factoryName="Nosetests">
<option name="INTERPRETER_OPTIONS" value="" />
<option name="PARENT_ENVS" value="true" />
<envs />
<option name="SDK_HOME" value="" />
<option name="WORKING_DIRECTORY" value="" />
<option name="IS_MODULE_SDK" value="false" />
<option name="ADD_CONTENT_ROOTS" value="true" />
<option name="ADD_SOURCE_ROOTS" value="true" />
<module name="textspotter" />
<option name="SCRIPT_NAME" value="" />
<option name="CLASS_NAME" value="" />
<option name="METHOD_NAME" value="" />
<option name="FOLDER_NAME" value="" />
<option name="TEST_TYPE" value="TEST_SCRIPT" />
<option name="PATTERN" value="" />
<option name="USE_PATTERN" value="false" />
<option name="PARAMS" value="" />
<option name="USE_PARAM" value="false" />
<method />
</configuration>
<configuration default="true" type="tests" factoryName="Unittests">
<option name="INTERPRETER_OPTIONS" value="" />
<option name="PARENT_ENVS" value="true" />
<envs />
<option name="SDK_HOME" value="" />
<option name="WORKING_DIRECTORY" value="" />
<option name="IS_MODULE_SDK" value="false" />
<option name="ADD_CONTENT_ROOTS" value="true" />
<option name="ADD_SOURCE_ROOTS" value="true" />
<module name="textspotter" />
<option name="SCRIPT_NAME" value="" />
<option name="CLASS_NAME" value="" />
<option name="METHOD_NAME" value="" />
<option name="FOLDER_NAME" value="" />
<option name="TEST_TYPE" value="TEST_SCRIPT" />
<option name="PATTERN" value="" />
<option name="USE_PATTERN" value="false" />
<option name="PUREUNITTEST" value="true" />
<option name="PARAMS" value="" />
<option name="USE_PARAM" value="false" />
<method />
</configuration>
<configuration default="true" type="tests" factoryName="py.test">
<option name="INTERPRETER_OPTIONS" value="" />
<option name="PARENT_ENVS" value="true" />
<envs />
<option name="SDK_HOME" value="" />
<option name="WORKING_DIRECTORY" value="" />
<option name="IS_MODULE_SDK" value="false" />
<option name="ADD_CONTENT_ROOTS" value="true" />
<option name="ADD_SOURCE_ROOTS" value="true" />
<module name="textspotter" />
<option name="SCRIPT_NAME" value="" />
<option name="CLASS_NAME" value="" />
<option name="METHOD_NAME" value="" />
<option name="FOLDER_NAME" value="" />
<option name="TEST_TYPE" value="TEST_SCRIPT" />
<option name="PATTERN" value="" />
<option name="USE_PATTERN" value="false" />
<option name="testToRun" value="" />
<option name="keywords" value="" />
<option name="params" value="" />
<option name="USE_PARAM" value="false" />
<option name="USE_KEYWORD" value="false" />
<method />
</configuration>
<list size="1">
<item index="0" class="java.lang.String" itemvalue="Python.test" />
</list>
<recent_temporary>
<list size="1">
<item index="0" class="java.lang.String" itemvalue="Python.test" />
</list>
</recent_temporary>
</component>
<component name="ShelveChangesManager" show_recycled="false">
<option name="remove_strategy" value="false" />
</component>
<component name="SvnConfiguration">
<configuration />
</component>
<component name="TaskManager">
<task active="true" id="Default" summary="Default task">
<changelist id="9488109c-8437-49b2-8097-a88758cd6c8b" name="Default" comment="" />
<created>1520985496603</created>
<option name="number" value="Default" />
<option name="presentableId" value="Default" />
<updated>1520985496603</updated>
</task>
<servers />
</component>
<component name="ToolWindowManager">
<frame x="519" y="82" width="1918" height="1028" extended-state="0" />
<editor active="true" />
<layout>
<window_info id="Project" active="false" anchor="left" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="true" show_stripe_button="true" weight="0.20490094" sideWeight="0.5" order="0" side_tool="false" content_ui="combo" />
<window_info id="TODO" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.33" sideWeight="0.5" order="6" side_tool="false" content_ui="tabs" />
<window_info id="Event Log" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.33" sideWeight="0.5" order="7" side_tool="true" content_ui="tabs" />
<window_info id="Version Control" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.33" sideWeight="0.5" order="7" side_tool="false" content_ui="tabs" />
<window_info id="Python Console" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.33" sideWeight="0.5" order="7" side_tool="false" content_ui="tabs" />
<window_info id="Run" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="true" show_stripe_button="true" weight="0.26757607" sideWeight="0.5" order="2" side_tool="false" content_ui="tabs" />
<window_info id="Structure" active="false" anchor="left" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.25" sideWeight="0.5" order="1" side_tool="false" content_ui="tabs" />
<window_info id="Terminal" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.33" sideWeight="0.5" order="7" side_tool="false" content_ui="tabs" />
<window_info id="Favorites" active="false" anchor="left" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.33" sideWeight="0.5" order="2" side_tool="true" content_ui="tabs" />
<window_info id="Debug" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.4" sideWeight="0.5" order="3" side_tool="false" content_ui="tabs" />
<window_info id="Cvs" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.25" sideWeight="0.5" order="4" side_tool="false" content_ui="tabs" />
<window_info id="Message" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.33" sideWeight="0.5" order="0" side_tool="false" content_ui="tabs" />
<window_info id="Commander" active="false" anchor="right" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.4" sideWeight="0.5" order="0" side_tool="false" content_ui="tabs" />
<window_info id="Inspection" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.4" sideWeight="0.5" order="5" side_tool="false" content_ui="tabs" />
<window_info id="Hierarchy" active="false" anchor="right" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.25" sideWeight="0.5" order="2" side_tool="false" content_ui="combo" />
<window_info id="Find" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.33" sideWeight="0.5" order="1" side_tool="false" content_ui="tabs" />
<window_info id="Ant Build" active="false" anchor="right" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.25" sideWeight="0.5" order="1" side_tool="false" content_ui="tabs" />
</layout>
</component>
<component name="Vcs.Log.UiProperties">
<option name="RECENTLY_FILTERED_USER_GROUPS">
<collection />
</option>
<option name="RECENTLY_FILTERED_BRANCH_GROUPS">
<collection />
</option>
</component>
<component name="VcsContentAnnotationSettings">
<option name="myLimit" value="2678400000" />
</component>
<component name="XDebuggerManager">
<breakpoint-manager />
<watches-manager />
</component>
<component name="editorHistoryManager">
<entry file="file://$PROJECT_DIR$/test_icdar_e2e_lex_15.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="270">
<caret line="15" column="0" selection-start-line="15" selection-start-column="0" selection-end-line="16" selection-end-column="19" />
<folding />
</state>
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/test.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="0">
<caret line="0" column="0" selection-start-line="0" selection-start-column="0" selection-end-line="0" selection-end-column="0" />
<folding />
</state>
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/pylayer/tool.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="594">
<caret line="42" column="12" selection-start-line="42" selection-start-column="4" selection-end-line="42" selection-end-column="12" />
<folding>
<element signature="e#1#26#0" expanded="true" />
</folding>
</state>
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/test_icdar_e2e_lex_15.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="270">
<caret line="15" column="0" selection-start-line="15" selection-start-column="0" selection-end-line="16" selection-end-column="19" />
<folding />
</state>
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/test.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="0">
<caret line="0" column="0" selection-start-line="0" selection-start-column="0" selection-end-line="0" selection-end-column="0" />
<folding />
</state>
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/pylayer/tool.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="594">
<caret line="42" column="12" selection-start-line="42" selection-start-column="4" selection-end-line="42" selection-end-column="12" />
<folding>
<element signature="e#1#26#0" expanded="true" />
</folding>
</state>
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/pylayer/cfg.py" />
<entry file="file://$USER_HOME$/code_e2e/text_test_code/pylayer/tool_layers.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="-4442">
<caret line="0" column="0" selection-start-line="0" selection-start-column="0" selection-end-line="0" selection-end-column="0" />
<folding />
</state>
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/test_icdar_e2e_lex_15.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="540">
<caret line="150" column="0" selection-start-line="117" selection-start-column="4" selection-end-line="150" selection-end-column="0" />
<folding />
</state>
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/models/test_iou.pt">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="212">
<caret line="7008" column="23" selection-start-line="7008" selection-start-column="22" selection-end-line="7008" selection-end-column="23" />
<folding />
</state>
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/models/test_lstm.pt">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="450">
<caret line="25" column="0" selection-start-line="25" selection-start-column="0" selection-end-line="25" selection-end-column="0" />
<folding />
</state>
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/pylayer/tool.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="378">
<caret line="39" column="12" selection-start-line="39" selection-start-column="4" selection-end-line="39" selection-end-column="12" />
<folding>
<element signature="e#1#26#0" expanded="true" />
</folding>
</state>
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/test.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="450">
<caret line="70" column="19" selection-start-line="70" selection-start-column="9" selection-end-line="70" selection-end-column="19" />
<folding />
</state>
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/cfg.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="54">
<caret line="3" column="10" selection-start-line="3" selection-start-column="0" selection-end-line="3" selection-end-column="10" />
<folding />
</state>
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/pylayer/tool_layers.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="-180">
<caret line="8" column="0" selection-start-line="8" selection-start-column="0" selection-end-line="8" selection-end-column="0" />
<folding />
</state>
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/README.md">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="252">
<caret line="30" column="0" selection-start-line="30" selection-start-column="0" selection-end-line="30" selection-end-column="0" />
<folding />
</state>
</provider>
</entry>
</component>
</project>
================================================
FILE: README.md
================================================
# An End-to-End TextSpotter with Explicit Alignment and Attention
This is initially described in our [CVPR 2018 paper](https://arxiv.org/abs/1803.03474).
<img src='imgs/screenshot.png' height="350px">
## Getting Started
### Installation
- Clone the code
```bash
git clone https://github.com/tonghe90/textspotter
cd textspotter
```
- Install caffe. You can follow this [this tutorial](http://caffe.berkeleyvision.org/installation.html).
If you have build problem about std::allocater, please refer to [this #3](https://github.com/tonghe90/textspotter/issues/3)
```bash
# make sure you set WITH_PYTHON_LAYER := 1
# change Makefile.config according to your library path
cp Makefile.config.example Makefile.config
make clean
make -j8
make pycaffe
```
### Training
```
we provide part of the training code. But you can not run this directly.
We have give the comment in the [train.pt](https://github.com/tonghe90/textspotter/models/train.pt).
You have to write your own layer, IOUloss layer. We cannot publish this for some IP reason.
To be noticed:
[L6902](https://github.com/tonghe90/textspotter/models/train.pt#L6902)
[L6947](https://github.com/tonghe90/textspotter/models/train.pt#L6907)
```
### Testing
- install editdistance and pyclipper: `pip install editdistance` and `pip install pyclipper`
- After Caffe is set up, you need to download a trained model (about 40M) from [Google Drive](https://drive.google.com/open?id=1lzM-V1Ec8KHr8fKxeO_d1x3zFaj3bmnU). This model
is trained with [VGG800k](http://www.robots.ox.ac.uk/~vgg/data/scenetext/) and finetuned on [ICDAR2015](http://rrc.cvc.uab.es/?ch=4&com=introduction).
- Run `python test.py --img=./imgs/img_105.jpg`
- hyperparameters:
```
cfg.py --mean_val ==> mean value during the testing.
--max_len ==> maximum length of the text string (here we take 25, meaning a word can contain 25 characters at most.)
--recog_th ==> the threshold during the recognition process. The score for a word is the average mean of every character.
--word_score ==> the threshold for those words that contain number or symbols for they are not contained in the dictionary.
test.py --weight ==> weights file of caffemodel
--prototxt-iou ==> the prototxt file for detection.
--prototxt-lstm ==> the prototxt file for recognition.
--img ==> the folder or img file for testing. The format can be added in ./pylayer/tool is_image function.
--scales-ms ==> multiscales input for input during the testing process.
--thresholds-ms ==> corresponding thresholds of text region for multiscale inputs.
--nms ==> nms threshold for testing
--save-dir ==> the dir for save results in format of ICDAR2015 submition.
```
```
One thing should be noted: the recognition results are achieved by comparing direct output with words in dictionary, which has about 90K lexicons.
These lexicons don't contain any number and symbol. You can delete dictionary reference part and directly output recognition results.
```
## Citation
If you use this code for your research, please cite our papers.
```
@inproceedings{tong2018,
title={An End-to-End TextSpotter with Explicit Alignment and Attention},
author={T. He and Z. Tian and W. Huang and C. Shen and Y. Qiao and C. Sun},
booktitle={Computer Vision and Pattern Recognition (CVPR), 2018 IEEE Conference on},
year={2018}
}
```
## License
This code is for NON-COMMERCIAL purposes only. For commerical purposes, please contact Chunhua Shen <chhshen@gmail.com>.
This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, version 3. Please refer to <http://www.gnu.org/licenses/> for more details.
================================================
FILE: caffe/.Doxyfile
================================================
# Doxyfile 1.8.8
# This file describes the settings to be used by the documentation system
# doxygen (www.doxygen.org) for a project.
#
# All text after a double hash (##) is considered a comment and is placed in
# front of the TAG it is preceding.
#
# All text after a single hash (#) is considered a comment and will be ignored.
# The format is:
# TAG = value [value, ...]
# For lists, items can also be appended using:
# TAG += value [value, ...]
# Values that contain spaces should be placed between quotes (\" \").
#---------------------------------------------------------------------------
# Project related configuration options
#---------------------------------------------------------------------------
# This tag specifies the encoding used for all characters in the config file
# that follow. The default is UTF-8 which is also the encoding used for all text
# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv
# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv
# for the list of possible encodings.
# The default value is: UTF-8.
DOXYFILE_ENCODING = UTF-8
# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by
# double-quotes, unless you are using Doxywizard) that should identify the
# project for which the documentation is generated. This name is used in the
# title of most generated pages and in a few other places.
# The default value is: My Project.
PROJECT_NAME = "Caffe"
# The PROJECT_NUMBER tag can be used to enter a project or revision number. This
# could be handy for archiving the generated documentation or if some version
# control system is used.
PROJECT_NUMBER =
# Using the PROJECT_BRIEF tag one can provide an optional one line description
# for a project that appears at the top of each page and should give viewer a
# quick idea about the purpose of the project. Keep the description short.
PROJECT_BRIEF =
# With the PROJECT_LOGO tag one can specify an logo or icon that is included in
# the documentation. The maximum height of the logo should not exceed 55 pixels
# and the maximum width should not exceed 200 pixels. Doxygen will copy the logo
# to the output directory.
PROJECT_LOGO =
# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path
# into which the generated documentation will be written. If a relative path is
# entered, it will be relative to the location where doxygen was started. If
# left blank the current directory will be used.
OUTPUT_DIRECTORY = ./doxygen/
# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create 4096 sub-
# directories (in 2 levels) under the output directory of each output format and
# will distribute the generated files over these directories. Enabling this
# option can be useful when feeding doxygen a huge amount of source files, where
# putting all generated files in the same directory would otherwise causes
# performance problems for the file system.
# The default value is: NO.
CREATE_SUBDIRS = NO
# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII
# characters to appear in the names of generated files. If set to NO, non-ASCII
# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode
# U+3044.
# The default value is: NO.
ALLOW_UNICODE_NAMES = NO
# The OUTPUT_LANGUAGE tag is used to specify the language in which all
# documentation generated by doxygen is written. Doxygen will use this
# information to generate all constant output in the proper language.
# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese,
# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States),
# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian,
# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages),
# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian,
# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian,
# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish,
# Ukrainian and Vietnamese.
# The default value is: English.
OUTPUT_LANGUAGE = English
# If the BRIEF_MEMBER_DESC tag is set to YES doxygen will include brief member
# descriptions after the members that are listed in the file and class
# documentation (similar to Javadoc). Set to NO to disable this.
# The default value is: YES.
BRIEF_MEMBER_DESC = YES
# If the REPEAT_BRIEF tag is set to YES doxygen will prepend the brief
# description of a member or function before the detailed description
#
# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
# brief descriptions will be completely suppressed.
# The default value is: YES.
REPEAT_BRIEF = YES
# This tag implements a quasi-intelligent brief description abbreviator that is
# used to form the text in various listings. Each string in this list, if found
# as the leading text of the brief description, will be stripped from the text
# and the result, after processing the whole list, is used as the annotated
# text. Otherwise, the brief description is used as-is. If left blank, the
# following values are used ($name is automatically replaced with the name of
# the entity):The $name class, The $name widget, The $name file, is, provides,
# specifies, contains, represents, a, an and the.
ABBREVIATE_BRIEF =
# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
# doxygen will generate a detailed section even if there is only a brief
# description.
# The default value is: NO.
ALWAYS_DETAILED_SEC = NO
# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
# inherited members of a class in the documentation of that class as if those
# members were ordinary class members. Constructors, destructors and assignment
# operators of the base classes will not be shown.
# The default value is: NO.
INLINE_INHERITED_MEMB = NO
# If the FULL_PATH_NAMES tag is set to YES doxygen will prepend the full path
# before files name in the file list and in the header files. If set to NO the
# shortest path that makes the file name unique will be used
# The default value is: YES.
FULL_PATH_NAMES = YES
# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path.
# Stripping is only done if one of the specified strings matches the left-hand
# part of the path. The tag can be used to show relative paths in the file list.
# If left blank the directory from which doxygen is run is used as the path to
# strip.
#
# Note that you can specify absolute paths here, but also relative paths, which
# will be relative from the directory where doxygen is started.
# This tag requires that the tag FULL_PATH_NAMES is set to YES.
STRIP_FROM_PATH =
# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the
# path mentioned in the documentation of a class, which tells the reader which
# header file to include in order to use a class. If left blank only the name of
# the header file containing the class definition is used. Otherwise one should
# specify the list of include paths that are normally passed to the compiler
# using the -I flag.
STRIP_FROM_INC_PATH =
# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but
# less readable) file names. This can be useful is your file systems doesn't
# support long names like on DOS, Mac, or CD-ROM.
# The default value is: NO.
SHORT_NAMES = NO
# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the
# first line (until the first dot) of a Javadoc-style comment as the brief
# description. If set to NO, the Javadoc-style will behave just like regular Qt-
# style comments (thus requiring an explicit @brief command for a brief
# description.)
# The default value is: NO.
JAVADOC_AUTOBRIEF = NO
# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first
# line (until the first dot) of a Qt-style comment as the brief description. If
# set to NO, the Qt-style will behave just like regular Qt-style comments (thus
# requiring an explicit \brief command for a brief description.)
# The default value is: NO.
QT_AUTOBRIEF = NO
# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a
# multi-line C++ special comment block (i.e. a block of //! or /// comments) as
# a brief description. This used to be the default behavior. The new default is
# to treat a multi-line C++ comment block as a detailed description. Set this
# tag to YES if you prefer the old behavior instead.
#
# Note that setting this tag to YES also means that rational rose comments are
# not recognized any more.
# The default value is: NO.
MULTILINE_CPP_IS_BRIEF = NO
# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the
# documentation from any documented member that it re-implements.
# The default value is: YES.
INHERIT_DOCS = YES
# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce a
# new page for each member. If set to NO, the documentation of a member will be
# part of the file/class/namespace that contains it.
# The default value is: NO.
SEPARATE_MEMBER_PAGES = NO
# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen
# uses this value to replace tabs by spaces in code fragments.
# Minimum value: 1, maximum value: 16, default value: 4.
TAB_SIZE = 8
# This tag can be used to specify a number of aliases that act as commands in
# the documentation. An alias has the form:
# name=value
# For example adding
# "sideeffect=@par Side Effects:\n"
# will allow you to put the command \sideeffect (or @sideeffect) in the
# documentation, which will result in a user-defined paragraph with heading
# "Side Effects:". You can put \n's in the value part of an alias to insert
# newlines.
ALIASES =
# This tag can be used to specify a number of word-keyword mappings (TCL only).
# A mapping has the form "name=value". For example adding "class=itcl::class"
# will allow you to use the command class in the itcl::class meaning.
TCL_SUBST =
# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources
# only. Doxygen will then generate output that is more tailored for C. For
# instance, some of the names that are used will be different. The list of all
# members will be omitted, etc.
# The default value is: NO.
OPTIMIZE_OUTPUT_FOR_C = NO
# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or
# Python sources only. Doxygen will then generate output that is more tailored
# for that language. For instance, namespaces will be presented as packages,
# qualified scopes will look different, etc.
# The default value is: NO.
OPTIMIZE_OUTPUT_JAVA = NO
# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
# sources. Doxygen will then generate output that is tailored for Fortran.
# The default value is: NO.
OPTIMIZE_FOR_FORTRAN = NO
# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
# sources. Doxygen will then generate output that is tailored for VHDL.
# The default value is: NO.
OPTIMIZE_OUTPUT_VHDL = NO
# Doxygen selects the parser to use depending on the extension of the files it
# parses. With this tag you can assign which parser to use for a given
# extension. Doxygen has a built-in mapping, but you can override or extend it
# using this tag. The format is ext=language, where ext is a file extension, and
# language is one of the parsers supported by doxygen: IDL, Java, Javascript,
# C#, C, C++, D, PHP, Objective-C, Python, Fortran (fixed format Fortran:
# FortranFixed, free formatted Fortran: FortranFree, unknown formatted Fortran:
# Fortran. In the later case the parser tries to guess whether the code is fixed
# or free formatted code, this is the default for Fortran type files), VHDL. For
# instance to make doxygen treat .inc files as Fortran files (default is PHP),
# and .f files as C (default is Fortran), use: inc=Fortran f=C.
#
# Note For files without extension you can use no_extension as a placeholder.
#
# Note that for custom extensions you also need to set FILE_PATTERNS otherwise
# the files are not read by doxygen.
EXTENSION_MAPPING =
# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments
# according to the Markdown format, which allows for more readable
# documentation. See http://daringfireball.net/projects/markdown/ for details.
# The output of markdown processing is further processed by doxygen, so you can
# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in
# case of backward compatibilities issues.
# The default value is: YES.
MARKDOWN_SUPPORT = YES
# When enabled doxygen tries to link words that correspond to documented
# classes, or namespaces to their corresponding documentation. Such a link can
# be prevented in individual cases by by putting a % sign in front of the word
# or globally by setting AUTOLINK_SUPPORT to NO.
# The default value is: YES.
AUTOLINK_SUPPORT = YES
# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
# to include (a tag file for) the STL sources as input, then you should set this
# tag to YES in order to let doxygen match functions declarations and
# definitions whose arguments contain STL classes (e.g. func(std::string);
# versus func(std::string) {}). This also make the inheritance and collaboration
# diagrams that involve STL classes more complete and accurate.
# The default value is: NO.
BUILTIN_STL_SUPPORT = NO
# If you use Microsoft's C++/CLI language, you should set this option to YES to
# enable parsing support.
# The default value is: NO.
CPP_CLI_SUPPORT = NO
# Set the SIP_SUPPORT tag to YES if your project consists of sip (see:
# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen
# will parse them like normal C++ but will assume all classes use public instead
# of private inheritance when no explicit protection keyword is present.
# The default value is: NO.
SIP_SUPPORT = NO
# For Microsoft's IDL there are propget and propput attributes to indicate
# getter and setter methods for a property. Setting this option to YES will make
# doxygen to replace the get and set methods by a property in the documentation.
# This will only work if the methods are indeed getting or setting a simple
# type. If this is not the case, or you want to show the methods anyway, you
# should set this option to NO.
# The default value is: YES.
IDL_PROPERTY_SUPPORT = YES
# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
# tag is set to YES, then doxygen will reuse the documentation of the first
# member in the group (if any) for the other members of the group. By default
# all members of a group must be documented explicitly.
# The default value is: NO.
DISTRIBUTE_GROUP_DOC = NO
# Set the SUBGROUPING tag to YES to allow class member groups of the same type
# (for instance a group of public functions) to be put as a subgroup of that
# type (e.g. under the Public Functions section). Set it to NO to prevent
# subgrouping. Alternatively, this can be done per class using the
# \nosubgrouping command.
# The default value is: YES.
SUBGROUPING = YES
# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions
# are shown inside the group in which they are included (e.g. using \ingroup)
# instead of on a separate page (for HTML and Man pages) or section (for LaTeX
# and RTF).
#
# Note that this feature does not work in combination with
# SEPARATE_MEMBER_PAGES.
# The default value is: NO.
INLINE_GROUPED_CLASSES = NO
# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions
# with only public data fields or simple typedef fields will be shown inline in
# the documentation of the scope in which they are defined (i.e. file,
# namespace, or group documentation), provided this scope is documented. If set
# to NO, structs, classes, and unions are shown on a separate page (for HTML and
# Man pages) or section (for LaTeX and RTF).
# The default value is: NO.
INLINE_SIMPLE_STRUCTS = NO
# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or
# enum is documented as struct, union, or enum with the name of the typedef. So
# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
# with name TypeT. When disabled the typedef will appear as a member of a file,
# namespace, or class. And the struct will be named TypeS. This can typically be
# useful for C code in case the coding convention dictates that all compound
# types are typedef'ed and only the typedef is referenced, never the tag name.
# The default value is: NO.
TYPEDEF_HIDES_STRUCT = NO
# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This
# cache is used to resolve symbols given their name and scope. Since this can be
# an expensive process and often the same symbol appears multiple times in the
# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small
# doxygen will become slower. If the cache is too large, memory is wasted. The
# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range
# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536
# symbols. At the end of a run doxygen will report the cache usage and suggest
# the optimal cache size from a speed point of view.
# Minimum value: 0, maximum value: 9, default value: 0.
LOOKUP_CACHE_SIZE = 0
#---------------------------------------------------------------------------
# Build related configuration options
#---------------------------------------------------------------------------
# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
# documentation are documented, even if no documentation was available. Private
# class members and static file members will be hidden unless the
# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES.
# Note: This will also disable the warnings about undocumented members that are
# normally produced when WARNINGS is set to YES.
# The default value is: NO.
EXTRACT_ALL = NO
# If the EXTRACT_PRIVATE tag is set to YES all private members of a class will
# be included in the documentation.
# The default value is: NO.
EXTRACT_PRIVATE = NO
# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal
# scope will be included in the documentation.
# The default value is: NO.
EXTRACT_PACKAGE = NO
# If the EXTRACT_STATIC tag is set to YES all static members of a file will be
# included in the documentation.
# The default value is: NO.
EXTRACT_STATIC = NO
# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) defined
# locally in source files will be included in the documentation. If set to NO
# only classes defined in header files are included. Does not have any effect
# for Java sources.
# The default value is: YES.
EXTRACT_LOCAL_CLASSES = YES
# This flag is only useful for Objective-C code. When set to YES local methods,
# which are defined in the implementation section but not in the interface are
# included in the documentation. If set to NO only methods in the interface are
# included.
# The default value is: NO.
EXTRACT_LOCAL_METHODS = NO
# If this flag is set to YES, the members of anonymous namespaces will be
# extracted and appear in the documentation as a namespace called
# 'anonymous_namespace{file}', where file will be replaced with the base name of
# the file that contains the anonymous namespace. By default anonymous namespace
# are hidden.
# The default value is: NO.
EXTRACT_ANON_NSPACES = NO
# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all
# undocumented members inside documented classes or files. If set to NO these
# members will be included in the various overviews, but no documentation
# section is generated. This option has no effect if EXTRACT_ALL is enabled.
# The default value is: NO.
HIDE_UNDOC_MEMBERS = NO
# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all
# undocumented classes that are normally visible in the class hierarchy. If set
# to NO these classes will be included in the various overviews. This option has
# no effect if EXTRACT_ALL is enabled.
# The default value is: NO.
HIDE_UNDOC_CLASSES = NO
# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend
# (class|struct|union) declarations. If set to NO these declarations will be
# included in the documentation.
# The default value is: NO.
HIDE_FRIEND_COMPOUNDS = NO
# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any
# documentation blocks found inside the body of a function. If set to NO these
# blocks will be appended to the function's detailed documentation block.
# The default value is: NO.
HIDE_IN_BODY_DOCS = NO
# The INTERNAL_DOCS tag determines if documentation that is typed after a
# \internal command is included. If the tag is set to NO then the documentation
# will be excluded. Set it to YES to include the internal documentation.
# The default value is: NO.
INTERNAL_DOCS = NO
# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file
# names in lower-case letters. If set to YES upper-case letters are also
# allowed. This is useful if you have classes or files whose names only differ
# in case and if your file system supports case sensitive file names. Windows
# and Mac users are advised to set this option to NO.
# The default value is: system dependent.
CASE_SENSE_NAMES = YES
# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with
# their full class and namespace scopes in the documentation. If set to YES the
# scope will be hidden.
# The default value is: NO.
HIDE_SCOPE_NAMES = NO
# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of
# the files that are included by a file in the documentation of that file.
# The default value is: YES.
SHOW_INCLUDE_FILES = YES
# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each
# grouped member an include statement to the documentation, telling the reader
# which file to include in order to use the member.
# The default value is: NO.
SHOW_GROUPED_MEMB_INC = NO
# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include
# files with double quotes in the documentation rather than with sharp brackets.
# The default value is: NO.
FORCE_LOCAL_INCLUDES = NO
# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the
# documentation for inline members.
# The default value is: YES.
INLINE_INFO = YES
# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the
# (detailed) documentation of file and class members alphabetically by member
# name. If set to NO the members will appear in declaration order.
# The default value is: YES.
SORT_MEMBER_DOCS = YES
# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief
# descriptions of file, namespace and class members alphabetically by member
# name. If set to NO the members will appear in declaration order. Note that
# this will also influence the order of the classes in the class list.
# The default value is: NO.
SORT_BRIEF_DOCS = NO
# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the
# (brief and detailed) documentation of class members so that constructors and
# destructors are listed first. If set to NO the constructors will appear in the
# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS.
# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief
# member documentation.
# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting
# detailed member documentation.
# The default value is: NO.
SORT_MEMBERS_CTORS_1ST = NO
# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy
# of group names into alphabetical order. If set to NO the group names will
# appear in their defined order.
# The default value is: NO.
SORT_GROUP_NAMES = NO
# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by
# fully-qualified names, including namespaces. If set to NO, the class list will
# be sorted only by class name, not including the namespace part.
# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
# Note: This option applies only to the class list, not to the alphabetical
# list.
# The default value is: NO.
SORT_BY_SCOPE_NAME = NO
# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper
# type resolution of all parameters of a function it will reject a match between
# the prototype and the implementation of a member function even if there is
# only one candidate or it is obvious which candidate to choose by doing a
# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still
# accept a match between prototype and implementation in such cases.
# The default value is: NO.
STRICT_PROTO_MATCHING = NO
# The GENERATE_TODOLIST tag can be used to enable ( YES) or disable ( NO) the
# todo list. This list is created by putting \todo commands in the
# documentation.
# The default value is: YES.
GENERATE_TODOLIST = YES
# The GENERATE_TESTLIST tag can be used to enable ( YES) or disable ( NO) the
# test list. This list is created by putting \test commands in the
# documentation.
# The default value is: YES.
GENERATE_TESTLIST = YES
# The GENERATE_BUGLIST tag can be used to enable ( YES) or disable ( NO) the bug
# list. This list is created by putting \bug commands in the documentation.
# The default value is: YES.
GENERATE_BUGLIST = YES
# The GENERATE_DEPRECATEDLIST tag can be used to enable ( YES) or disable ( NO)
# the deprecated list. This list is created by putting \deprecated commands in
# the documentation.
# The default value is: YES.
GENERATE_DEPRECATEDLIST= YES
# The ENABLED_SECTIONS tag can be used to enable conditional documentation
# sections, marked by \if <section_label> ... \endif and \cond <section_label>
# ... \endcond blocks.
ENABLED_SECTIONS =
# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the
# initial value of a variable or macro / define can have for it to appear in the
# documentation. If the initializer consists of more lines than specified here
# it will be hidden. Use a value of 0 to hide initializers completely. The
# appearance of the value of individual variables and macros / defines can be
# controlled using \showinitializer or \hideinitializer command in the
# documentation regardless of this setting.
# Minimum value: 0, maximum value: 10000, default value: 30.
MAX_INITIALIZER_LINES = 30
# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at
# the bottom of the documentation of classes and structs. If set to YES the list
# will mention the files that were used to generate the documentation.
# The default value is: YES.
SHOW_USED_FILES = YES
# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This
# will remove the Files entry from the Quick Index and from the Folder Tree View
# (if specified).
# The default value is: YES.
SHOW_FILES = YES
# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces
# page. This will remove the Namespaces entry from the Quick Index and from the
# Folder Tree View (if specified).
# The default value is: YES.
SHOW_NAMESPACES = YES
# The FILE_VERSION_FILTER tag can be used to specify a program or script that
# doxygen should invoke to get the current version for each file (typically from
# the version control system). Doxygen will invoke the program by executing (via
# popen()) the command command input-file, where command is the value of the
# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided
# by doxygen. Whatever the program writes to standard output is used as the file
# version. For an example see the documentation.
FILE_VERSION_FILTER =
# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
# by doxygen. The layout file controls the global structure of the generated
# output files in an output format independent way. To create the layout file
# that represents doxygen's defaults, run doxygen with the -l option. You can
# optionally specify a file name after the option, if omitted DoxygenLayout.xml
# will be used as the name of the layout file.
#
# Note that if you run doxygen from a directory containing a file called
# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE
# tag is left empty.
LAYOUT_FILE =
# The CITE_BIB_FILES tag can be used to specify one or more bib files containing
# the reference definitions. This must be a list of .bib files. The .bib
# extension is automatically appended if omitted. This requires the bibtex tool
# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info.
# For LaTeX the style of the bibliography can be controlled using
# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the
# search path. See also \cite for info how to create references.
CITE_BIB_FILES =
#---------------------------------------------------------------------------
# Configuration options related to warning and progress messages
#---------------------------------------------------------------------------
# The QUIET tag can be used to turn on/off the messages that are generated to
# standard output by doxygen. If QUIET is set to YES this implies that the
# messages are off.
# The default value is: NO.
QUIET = YES
# The WARNINGS tag can be used to turn on/off the warning messages that are
# generated to standard error ( stderr) by doxygen. If WARNINGS is set to YES
# this implies that the warnings are on.
#
# Tip: Turn warnings on while writing the documentation.
# The default value is: YES.
WARNINGS = YES
# If the WARN_IF_UNDOCUMENTED tag is set to YES, then doxygen will generate
# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag
# will automatically be disabled.
# The default value is: YES.
WARN_IF_UNDOCUMENTED = NO
# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for
# potential errors in the documentation, such as not documenting some parameters
# in a documented function, or documenting parameters that don't exist or using
# markup commands wrongly.
# The default value is: YES.
WARN_IF_DOC_ERROR = YES
# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that
# are documented, but have no documentation for their parameters or return
# value. If set to NO doxygen will only warn about wrong or incomplete parameter
# documentation, but not about the absence of documentation.
# The default value is: NO.
WARN_NO_PARAMDOC = NO
# The WARN_FORMAT tag determines the format of the warning messages that doxygen
# can produce. The string should contain the $file, $line, and $text tags, which
# will be replaced by the file and line number from which the warning originated
# and the warning text. Optionally the format may contain $version, which will
# be replaced by the version of the file (if it could be obtained via
# FILE_VERSION_FILTER)
# The default value is: $file:$line: $text.
WARN_FORMAT = "$file:$line: $text"
# The WARN_LOGFILE tag can be used to specify a file to which warning and error
# messages should be written. If left blank the output is written to standard
# error (stderr).
WARN_LOGFILE =
#---------------------------------------------------------------------------
# Configuration options related to the input files
#---------------------------------------------------------------------------
# The INPUT tag is used to specify the files and/or directories that contain
# documented source files. You may enter file names like myfile.cpp or
# directories like /usr/src/myproject. Separate the files or directories with
# spaces.
# Note: If this tag is empty the current directory is searched.
INPUT = ./include/caffe \
./src/caffe
# This tag can be used to specify the character encoding of the source files
# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
# libiconv (or the iconv built into libc) for the transcoding. See the libiconv
# documentation (see: http://www.gnu.org/software/libiconv) for the list of
# possible encodings.
# The default value is: UTF-8.
INPUT_ENCODING = UTF-8
# If the value of the INPUT tag contains directories, you can use the
# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and
# *.h) to filter out the source-files in the directories. If left blank the
# following patterns are tested:*.c, *.cc, *.cxx, *.cpp, *.c++, *.java, *.ii,
# *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp,
# *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown,
# *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf,
# *.qsf, *.as and *.js.
FILE_PATTERNS =
# The RECURSIVE tag can be used to specify whether or not subdirectories should
# be searched for input files as well.
# The default value is: NO.
RECURSIVE = YES
# The EXCLUDE tag can be used to specify files and/or directories that should be
# excluded from the INPUT source files. This way you can easily exclude a
# subdirectory from a directory tree whose root is specified with the INPUT tag.
#
# Note that relative paths are relative to the directory from which doxygen is
# run.
EXCLUDE = ./src/caffe/test/ \
./include/caffe/test/
# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
# directories that are symbolic links (a Unix file system feature) are excluded
# from the input.
# The default value is: NO.
EXCLUDE_SYMLINKS = NO
# If the value of the INPUT tag contains directories, you can use the
# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
# certain files from those directories.
#
# Note that the wildcards are matched against the file with absolute path, so to
# exclude all test directories for example use the pattern */test/*
EXCLUDE_PATTERNS =
# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
# (namespaces, classes, functions, etc.) that should be excluded from the
# output. The symbol name can be a fully qualified name, a word, or if the
# wildcard * is used, a substring. Examples: ANamespace, AClass,
# AClass::ANamespace, ANamespace::*Test
#
# Note that the wildcards are matched against the file with absolute path, so to
# exclude all test directories use the pattern */test/*
EXCLUDE_SYMBOLS =
# The EXAMPLE_PATH tag can be used to specify one or more files or directories
# that contain example code fragments that are included (see the \include
# command).
EXAMPLE_PATH =
# If the value of the EXAMPLE_PATH tag contains directories, you can use the
# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and
# *.h) to filter out the source-files in the directories. If left blank all
# files are included.
EXAMPLE_PATTERNS =
# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
# searched for input files to be used with the \include or \dontinclude commands
# irrespective of the value of the RECURSIVE tag.
# The default value is: NO.
EXAMPLE_RECURSIVE = NO
# The IMAGE_PATH tag can be used to specify one or more files or directories
# that contain images that are to be included in the documentation (see the
# \image command).
IMAGE_PATH =
# The INPUT_FILTER tag can be used to specify a program that doxygen should
# invoke to filter for each input file. Doxygen will invoke the filter program
# by executing (via popen()) the command:
#
# <filter> <input-file>
#
# where <filter> is the value of the INPUT_FILTER tag, and <input-file> is the
# name of an input file. Doxygen will then use the output that the filter
# program writes to standard output. If FILTER_PATTERNS is specified, this tag
# will be ignored.
#
# Note that the filter must not add or remove lines; it is applied before the
# code is scanned, but not when the output code is generated. If lines are added
# or removed, the anchors will not be placed correctly.
INPUT_FILTER =
# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
# basis. Doxygen will compare the file name with each pattern and apply the
# filter if there is a match. The filters are a list of the form: pattern=filter
# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how
# filters are used. If the FILTER_PATTERNS tag is empty or if none of the
# patterns match the file name, INPUT_FILTER is applied.
FILTER_PATTERNS =
# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
# INPUT_FILTER ) will also be used to filter the input files that are used for
# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES).
# The default value is: NO.
FILTER_SOURCE_FILES = NO
# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and
# it is also possible to disable source filtering for a specific pattern using
# *.ext= (so without naming a filter).
# This tag requires that the tag FILTER_SOURCE_FILES is set to YES.
FILTER_SOURCE_PATTERNS =
# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that
# is part of the input, its contents will be placed on the main page
# (index.html). This can be useful if you have a project on for instance GitHub
# and want to reuse the introduction page also for the doxygen output.
USE_MDFILE_AS_MAINPAGE =
#---------------------------------------------------------------------------
# Configuration options related to source browsing
#---------------------------------------------------------------------------
# If the SOURCE_BROWSER tag is set to YES then a list of source files will be
# generated. Documented entities will be cross-referenced with these sources.
#
# Note: To get rid of all source code in the generated output, make sure that
# also VERBATIM_HEADERS is set to NO.
# The default value is: NO.
SOURCE_BROWSER = NO
# Setting the INLINE_SOURCES tag to YES will include the body of functions,
# classes and enums directly into the documentation.
# The default value is: NO.
INLINE_SOURCES = NO
# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any
# special comment blocks from generated source code fragments. Normal C, C++ and
# Fortran comments will always remain visible.
# The default value is: YES.
STRIP_CODE_COMMENTS = YES
# If the REFERENCED_BY_RELATION tag is set to YES then for each documented
# function all documented functions referencing it will be listed.
# The default value is: NO.
REFERENCED_BY_RELATION = NO
# If the REFERENCES_RELATION tag is set to YES then for each documented function
# all documented entities called/used by that function will be listed.
# The default value is: NO.
REFERENCES_RELATION = NO
# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set
# to YES, then the hyperlinks from functions in REFERENCES_RELATION and
# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will
# link to the documentation.
# The default value is: YES.
REFERENCES_LINK_SOURCE = YES
# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the
# source code will show a tooltip with additional information such as prototype,
# brief description and links to the definition and documentation. Since this
# will make the HTML file larger and loading of large files a bit slower, you
# can opt to disable this feature.
# The default value is: YES.
# This tag requires that the tag SOURCE_BROWSER is set to YES.
SOURCE_TOOLTIPS = YES
# If the USE_HTAGS tag is set to YES then the references to source code will
# point to the HTML generated by the htags(1) tool instead of doxygen built-in
# source browser. The htags tool is part of GNU's global source tagging system
# (see http://www.gnu.org/software/global/global.html). You will need version
# 4.8.6 or higher.
#
# To use it do the following:
# - Install the latest version of global
# - Enable SOURCE_BROWSER and USE_HTAGS in the config file
# - Make sure the INPUT points to the root of the source tree
# - Run doxygen as normal
#
# Doxygen will invoke htags (and that will in turn invoke gtags), so these
# tools must be available from the command line (i.e. in the search path).
#
# The result: instead of the source browser generated by doxygen, the links to
# source code will now point to the output of htags.
# The default value is: NO.
# This tag requires that the tag SOURCE_BROWSER is set to YES.
USE_HTAGS = NO
# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a
# verbatim copy of the header file for each class for which an include is
# specified. Set to NO to disable this.
# See also: Section \class.
# The default value is: YES.
VERBATIM_HEADERS = YES
#---------------------------------------------------------------------------
# Configuration options related to the alphabetical class index
#---------------------------------------------------------------------------
# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all
# compounds will be generated. Enable this if the project contains a lot of
# classes, structs, unions or interfaces.
# The default value is: YES.
ALPHABETICAL_INDEX = YES
# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in
# which the alphabetical index list will be split.
# Minimum value: 1, maximum value: 20, default value: 5.
# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
COLS_IN_ALPHA_INDEX = 5
# In case all classes in a project start with a common prefix, all classes will
# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag
# can be used to specify a prefix (or a list of prefixes) that should be ignored
# while generating the index headers.
# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
IGNORE_PREFIX =
#---------------------------------------------------------------------------
# Configuration options related to the HTML output
#---------------------------------------------------------------------------
# If the GENERATE_HTML tag is set to YES doxygen will generate HTML output
# The default value is: YES.
GENERATE_HTML = YES
# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a
# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
# it.
# The default directory is: html.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_OUTPUT = html
# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each
# generated HTML page (for example: .htm, .php, .asp).
# The default value is: .html.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_FILE_EXTENSION = .html
# The HTML_HEADER tag can be used to specify a user-defined HTML header file for
# each generated HTML page. If the tag is left blank doxygen will generate a
# standard header.
#
# To get valid HTML the header file that includes any scripts and style sheets
# that doxygen needs, which is dependent on the configuration options used (e.g.
# the setting GENERATE_TREEVIEW). It is highly recommended to start with a
# default header using
# doxygen -w html new_header.html new_footer.html new_stylesheet.css
# YourConfigFile
# and then modify the file new_header.html. See also section "Doxygen usage"
# for information on how to generate the default header that doxygen normally
# uses.
# Note: The header is subject to change so you typically have to regenerate the
# default header when upgrading to a newer version of doxygen. For a description
# of the possible markers and block names see the documentation.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_HEADER =
# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each
# generated HTML page. If the tag is left blank doxygen will generate a standard
# footer. See HTML_HEADER for more information on how to generate a default
# footer and what special commands can be used inside the footer. See also
# section "Doxygen usage" for information on how to generate the default footer
# that doxygen normally uses.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_FOOTER =
# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style
# sheet that is used by each HTML page. It can be used to fine-tune the look of
# the HTML output. If left blank doxygen will generate a default style sheet.
# See also section "Doxygen usage" for information on how to generate the style
# sheet that doxygen normally uses.
# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as
# it is more robust and this tag (HTML_STYLESHEET) will in the future become
# obsolete.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_STYLESHEET =
# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined
# cascading style sheets that are included after the standard style sheets
# created by doxygen. Using this option one can overrule certain style aspects.
# This is preferred over using HTML_STYLESHEET since it does not replace the
# standard style sheet and is therefor more robust against future updates.
# Doxygen will copy the style sheet files to the output directory.
# Note: The order of the extra stylesheet files is of importance (e.g. the last
# stylesheet in the list overrules the setting of the previous ones in the
# list). For an example see the documentation.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_EXTRA_STYLESHEET =
# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
# other source files which should be copied to the HTML output directory. Note
# that these files will be copied to the base HTML output directory. Use the
# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
# files. In the HTML_STYLESHEET file, use the file name only. Also note that the
# files will be copied as-is; there are no commands or markers available.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_EXTRA_FILES =
# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen
# will adjust the colors in the stylesheet and background images according to
# this color. Hue is specified as an angle on a colorwheel, see
# http://en.wikipedia.org/wiki/Hue for more information. For instance the value
# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300
# purple, and 360 is red again.
# Minimum value: 0, maximum value: 359, default value: 220.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_COLORSTYLE_HUE = 220
# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors
# in the HTML output. For a value of 0 the output will use grayscales only. A
# value of 255 will produce the most vivid colors.
# Minimum value: 0, maximum value: 255, default value: 100.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_COLORSTYLE_SAT = 100
# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the
# luminance component of the colors in the HTML output. Values below 100
# gradually make the output lighter, whereas values above 100 make the output
# darker. The value divided by 100 is the actual gamma applied, so 80 represents
# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not
# change the gamma.
# Minimum value: 40, maximum value: 240, default value: 80.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_COLORSTYLE_GAMMA = 80
# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
# page will contain the date and time when the page was generated. Setting this
# to NO can help when comparing the output of multiple runs.
# The default value is: YES.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_TIMESTAMP = YES
# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
# documentation will contain sections that can be hidden and shown after the
# page has loaded.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_DYNAMIC_SECTIONS = NO
# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries
# shown in the various tree structured indices initially; the user can expand
# and collapse entries dynamically later on. Doxygen will expand the tree to
# such a level that at most the specified number of entries are visible (unless
# a fully collapsed tree already exceeds this amount). So setting the number of
# entries 1 will produce a full collapsed tree by default. 0 is a special value
# representing an infinite number of entries and will result in a full expanded
# tree by default.
# Minimum value: 0, maximum value: 9999, default value: 100.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_INDEX_NUM_ENTRIES = 100
# If the GENERATE_DOCSET tag is set to YES, additional index files will be
# generated that can be used as input for Apple's Xcode 3 integrated development
# environment (see: http://developer.apple.com/tools/xcode/), introduced with
# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a
# Makefile in the HTML output directory. Running make will produce the docset in
# that directory and running make install will install the docset in
# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at
# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
# for more information.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
GENERATE_DOCSET = NO
# This tag determines the name of the docset feed. A documentation feed provides
# an umbrella under which multiple documentation sets from a single provider
# (such as a company or product suite) can be grouped.
# The default value is: Doxygen generated docs.
# This tag requires that the tag GENERATE_DOCSET is set to YES.
DOCSET_FEEDNAME = "Doxygen generated docs"
# This tag specifies a string that should uniquely identify the documentation
# set bundle. This should be a reverse domain-name style string, e.g.
# com.mycompany.MyDocSet. Doxygen will append .docset to the name.
# The default value is: org.doxygen.Project.
# This tag requires that the tag GENERATE_DOCSET is set to YES.
DOCSET_BUNDLE_ID = org.doxygen.Project
# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify
# the documentation publisher. This should be a reverse domain-name style
# string, e.g. com.mycompany.MyDocSet.documentation.
# The default value is: org.doxygen.Publisher.
# This tag requires that the tag GENERATE_DOCSET is set to YES.
DOCSET_PUBLISHER_ID = org.doxygen.Publisher
# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher.
# The default value is: Publisher.
# This tag requires that the tag GENERATE_DOCSET is set to YES.
DOCSET_PUBLISHER_NAME = Publisher
# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three
# additional HTML index files: index.hhp, index.hhc, and index.hhk. The
# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop
# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on
# Windows.
#
# The HTML Help Workshop contains a compiler that can convert all HTML output
# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML
# files are now used as the Windows 98 help format, and will replace the old
# Windows help format (.hlp) on all Windows platforms in the future. Compressed
# HTML files also contain an index, a table of contents, and you can search for
# words in the documentation. The HTML workshop also contains a viewer for
# compressed HTML files.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
GENERATE_HTMLHELP = NO
# The CHM_FILE tag can be used to specify the file name of the resulting .chm
# file. You can add a path in front of the file if the result should not be
# written to the html output directory.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
CHM_FILE =
# The HHC_LOCATION tag can be used to specify the location (absolute path
# including file name) of the HTML help compiler ( hhc.exe). If non-empty
# doxygen will try to run the HTML help compiler on the generated index.hhp.
# The file has to be specified with full path.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
HHC_LOCATION =
# The GENERATE_CHI flag controls if a separate .chi index file is generated (
# YES) or that it should be included in the master .chm file ( NO).
# The default value is: NO.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
GENERATE_CHI = NO
# The CHM_INDEX_ENCODING is used to encode HtmlHelp index ( hhk), content ( hhc)
# and project file content.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
CHM_INDEX_ENCODING =
# The BINARY_TOC flag controls whether a binary table of contents is generated (
# YES) or a normal table of contents ( NO) in the .chm file. Furthermore it
# enables the Previous and Next buttons.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
BINARY_TOC = NO
# The TOC_EXPAND flag can be set to YES to add extra items for group members to
# the table of contents of the HTML help documentation and to the tree view.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
TOC_EXPAND = NO
# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that
# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help
# (.qch) of the generated HTML documentation.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
GENERATE_QHP = NO
# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify
# the file name of the resulting .qch file. The path specified is relative to
# the HTML output folder.
# This tag requires that the tag GENERATE_QHP is set to YES.
QCH_FILE =
# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help
# Project output. For more information please see Qt Help Project / Namespace
# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace).
# The default value is: org.doxygen.Project.
# This tag requires that the tag GENERATE_QHP is set to YES.
QHP_NAMESPACE = org.doxygen.Project
# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt
# Help Project output. For more information please see Qt Help Project / Virtual
# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual-
# folders).
# The default value is: doc.
# This tag requires that the tag GENERATE_QHP is set to YES.
QHP_VIRTUAL_FOLDER = doc
# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom
# filter to add. For more information please see Qt Help Project / Custom
# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
# filters).
# This tag requires that the tag GENERATE_QHP is set to YES.
QHP_CUST_FILTER_NAME =
# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the
# custom filter to add. For more information please see Qt Help Project / Custom
# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
# filters).
# This tag requires that the tag GENERATE_QHP is set to YES.
QHP_CUST_FILTER_ATTRS =
# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
# project's filter section matches. Qt Help Project / Filter Attributes (see:
# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes).
# This tag requires that the tag GENERATE_QHP is set to YES.
QHP_SECT_FILTER_ATTRS =
# The QHG_LOCATION tag can be used to specify the location of Qt's
# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the
# generated .qhp file.
# This tag requires that the tag GENERATE_QHP is set to YES.
QHG_LOCATION =
# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be
# generated, together with the HTML files, they form an Eclipse help plugin. To
# install this plugin and make it available under the help contents menu in
# Eclipse, the contents of the directory containing the HTML and XML files needs
# to be copied into the plugins directory of eclipse. The name of the directory
# within the plugins directory should be the same as the ECLIPSE_DOC_ID value.
# After copying Eclipse needs to be restarted before the help appears.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
GENERATE_ECLIPSEHELP = NO
# A unique identifier for the Eclipse help plugin. When installing the plugin
# the directory name containing the HTML and XML files should also have this
# name. Each documentation set should have its own identifier.
# The default value is: org.doxygen.Project.
# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES.
ECLIPSE_DOC_ID = org.doxygen.Project
# If you want full control over the layout of the generated HTML pages it might
# be necessary to disable the index and replace it with your own. The
# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top
# of each HTML page. A value of NO enables the index and the value YES disables
# it. Since the tabs in the index contain the same information as the navigation
# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
DISABLE_INDEX = NO
# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
# structure should be generated to display hierarchical information. If the tag
# value is set to YES, a side panel will be generated containing a tree-like
# index structure (just like the one that is generated for HTML Help). For this
# to work a browser that supports JavaScript, DHTML, CSS and frames is required
# (i.e. any modern browser). Windows users are probably better off using the
# HTML help feature. Via custom stylesheets (see HTML_EXTRA_STYLESHEET) one can
# further fine-tune the look of the index. As an example, the default style
# sheet generated by doxygen has an example that shows how to put an image at
# the root of the tree instead of the PROJECT_NAME. Since the tree basically has
# the same information as the tab index, you could consider setting
# DISABLE_INDEX to YES when enabling this option.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
GENERATE_TREEVIEW = NO
# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that
# doxygen will group on one line in the generated HTML documentation.
#
# Note that a value of 0 will completely suppress the enum values from appearing
# in the overview section.
# Minimum value: 0, maximum value: 20, default value: 4.
# This tag requires that the tag GENERATE_HTML is set to YES.
ENUM_VALUES_PER_LINE = 4
# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used
# to set the initial width (in pixels) of the frame in which the tree is shown.
# Minimum value: 0, maximum value: 1500, default value: 250.
# This tag requires that the tag GENERATE_HTML is set to YES.
TREEVIEW_WIDTH = 250
# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open links to
# external symbols imported via tag files in a separate window.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
EXT_LINKS_IN_WINDOW = NO
# Use this tag to change the font size of LaTeX formulas included as images in
# the HTML documentation. When you change the font size after a successful
# doxygen run you need to manually remove any form_*.png images from the HTML
# output directory to force them to be regenerated.
# Minimum value: 8, maximum value: 50, default value: 10.
# This tag requires that the tag GENERATE_HTML is set to YES.
FORMULA_FONTSIZE = 10
# Use the FORMULA_TRANPARENT tag to determine whether or not the images
# generated for formulas are transparent PNGs. Transparent PNGs are not
# supported properly for IE 6.0, but are supported on all modern browsers.
#
# Note that when changing this option you need to delete any form_*.png files in
# the HTML output directory before the changes have effect.
# The default value is: YES.
# This tag requires that the tag GENERATE_HTML is set to YES.
FORMULA_TRANSPARENT = YES
# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see
# http://www.mathjax.org) which uses client side Javascript for the rendering
# instead of using prerendered bitmaps. Use this if you do not have LaTeX
# installed or if you want to formulas look prettier in the HTML output. When
# enabled you may also need to install MathJax separately and configure the path
# to it using the MATHJAX_RELPATH option.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
USE_MATHJAX = NO
# When MathJax is enabled you can set the default output format to be used for
# the MathJax output. See the MathJax site (see:
# http://docs.mathjax.org/en/latest/output.html) for more details.
# Possible values are: HTML-CSS (which is slower, but has the best
# compatibility), NativeMML (i.e. MathML) and SVG.
# The default value is: HTML-CSS.
# This tag requires that the tag USE_MATHJAX is set to YES.
MATHJAX_FORMAT = HTML-CSS
# When MathJax is enabled you need to specify the location relative to the HTML
# output directory using the MATHJAX_RELPATH option. The destination directory
# should contain the MathJax.js script. For instance, if the mathjax directory
# is located at the same level as the HTML output directory, then
# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax
# Content Delivery Network so you can quickly see the result without installing
# MathJax. However, it is strongly recommended to install a local copy of
# MathJax from http://www.mathjax.org before deployment.
# The default value is: http://cdn.mathjax.org/mathjax/latest.
# This tag requires that the tag USE_MATHJAX is set to YES.
MATHJAX_RELPATH = http://www.mathjax.org/mathjax
# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax
# extension names that should be enabled during MathJax rendering. For example
# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols
# This tag requires that the tag USE_MATHJAX is set to YES.
MATHJAX_EXTENSIONS =
# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces
# of code that will be used on startup of the MathJax code. See the MathJax site
# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an
# example see the documentation.
# This tag requires that the tag USE_MATHJAX is set to YES.
MATHJAX_CODEFILE =
# When the SEARCHENGINE tag is enabled doxygen will generate a search box for
# the HTML output. The underlying search engine uses javascript and DHTML and
# should work on any modern browser. Note that when using HTML help
# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET)
# there is already a search function so this one should typically be disabled.
# For large projects the javascript based search engine can be slow, then
# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to
# search using the keyboard; to jump to the search box use <access key> + S
# (what the <access key> is depends on the OS and browser, but it is typically
# <CTRL>, <ALT>/<option>, or both). Inside the search box use the <cursor down
# key> to jump into the search results window, the results can be navigated
# using the <cursor keys>. Press <Enter> to select an item or <escape> to cancel
# the search. The filter options can be selected when the cursor is inside the
# search box by pressing <Shift>+<cursor down>. Also here use the <cursor keys>
# to select a filter and <Enter> or <escape> to activate or cancel the filter
# option.
# The default value is: YES.
# This tag requires that the tag GENERATE_HTML is set to YES.
SEARCHENGINE = YES
# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
# implemented using a web server instead of a web client using Javascript. There
# are two flavors of web server based searching depending on the EXTERNAL_SEARCH
# setting. When disabled, doxygen will generate a PHP script for searching and
# an index file used by the script. When EXTERNAL_SEARCH is enabled the indexing
# and searching needs to be provided by external tools. See the section
# "External Indexing and Searching" for details.
# The default value is: NO.
# This tag requires that the tag SEARCHENGINE is set to YES.
SERVER_BASED_SEARCH = NO
# When EXTERNAL_SEARCH tag is enabled doxygen will no longer generate the PHP
# script for searching. Instead the search results are written to an XML file
# which needs to be processed by an external indexer. Doxygen will invoke an
# external search engine pointed to by the SEARCHENGINE_URL option to obtain the
# search results.
#
# Doxygen ships with an example indexer ( doxyindexer) and search engine
# (doxysearch.cgi) which are based on the open source search engine library
# Xapian (see: http://xapian.org/).
#
# See the section "External Indexing and Searching" for details.
# The default value is: NO.
# This tag requires that the tag SEARCHENGINE is set to YES.
EXTERNAL_SEARCH = NO
# The SEARCHENGINE_URL should point to a search engine hosted by a web server
# which will return the search results when EXTERNAL_SEARCH is enabled.
#
# Doxygen ships with an example indexer ( doxyindexer) and search engine
# (doxysearch.cgi) which are based on the open source search engine library
# Xapian (see: http://xapian.org/). See the section "External Indexing and
# Searching" for details.
# This tag requires that the tag SEARCHENGINE is set to YES.
SEARCHENGINE_URL =
# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed
# search data is written to a file for indexing by an external tool. With the
# SEARCHDATA_FILE tag the name of this file can be specified.
# The default file is: searchdata.xml.
# This tag requires that the tag SEARCHENGINE is set to YES.
SEARCHDATA_FILE = searchdata.xml
# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the
# EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is
# useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple
# projects and redirect the results back to the right project.
# This tag requires that the tag SEARCHENGINE is set to YES.
EXTERNAL_SEARCH_ID =
# The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen
# projects other than the one defined by this configuration file, but that are
# all added to the same external search index. Each project needs to have a
# unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id of
# to a relative location where the documentation can be found. The format is:
# EXTRA_SEARCH_MAPPINGS = tagname1=loc1 tagname2=loc2 ...
# This tag requires that the tag SEARCHENGINE is set to YES.
EXTRA_SEARCH_MAPPINGS =
#---------------------------------------------------------------------------
# Configuration options related to the LaTeX output
#---------------------------------------------------------------------------
# If the GENERATE_LATEX tag is set to YES doxygen will generate LaTeX output.
# The default value is: YES.
GENERATE_LATEX = YES
# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. If a
# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
# it.
# The default directory is: latex.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_OUTPUT = latex
# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
# invoked.
#
# Note that when enabling USE_PDFLATEX this option is only used for generating
# bitmaps for formulas in the HTML output, but not in the Makefile that is
# written to the output directory.
# The default file is: latex.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_CMD_NAME = latex
# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to generate
# index for LaTeX.
# The default file is: makeindex.
# This tag requires that the tag GENERATE_LATEX is set to YES.
MAKEINDEX_CMD_NAME = makeindex
# If the COMPACT_LATEX tag is set to YES doxygen generates more compact LaTeX
# documents. This may be useful for small projects and may help to save some
# trees in general.
# The default value is: NO.
# This tag requires that the tag GENERATE_LATEX is set to YES.
COMPACT_LATEX = NO
# The PAPER_TYPE tag can be used to set the paper type that is used by the
# printer.
# Possible values are: a4 (210 x 297 mm), letter (8.5 x 11 inches), legal (8.5 x
# 14 inches) and executive (7.25 x 10.5 inches).
# The default value is: a4.
# This tag requires that the tag GENERATE_LATEX is set to YES.
PAPER_TYPE = a4
# The EXTRA_PACKAGES tag can be used to specify one or more LaTeX package names
# that should be included in the LaTeX output. To get the times font for
# instance you can specify
# EXTRA_PACKAGES=times
# If left blank no extra packages will be included.
# This tag requires that the tag GENERATE_LATEX is set to YES.
EXTRA_PACKAGES = amsmath \
amsfonts \
xr
# The LATEX_HEADER tag can be used to specify a personal LaTeX header for the
# generated LaTeX document. The header should contain everything until the first
# chapter. If it is left blank doxygen will generate a standard header. See
# section "Doxygen usage" for information on how to let doxygen write the
# default header to a separate file.
#
# Note: Only use a user-defined header if you know what you are doing! The
# following commands have a special meaning inside the header: $title,
# $datetime, $date, $doxygenversion, $projectname, $projectnumber,
# $projectbrief, $projectlogo. Doxygen will replace $title with the empy string,
# for the replacement values of the other commands the user is refered to
# HTML_HEADER.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_HEADER =
# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for the
# generated LaTeX document. The footer should contain everything after the last
# chapter. If it is left blank doxygen will generate a standard footer. See
# LATEX_HEADER for more information on how to generate a default footer and what
# special commands can be used inside the footer.
#
# Note: Only use a user-defined footer if you know what you are doing!
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_FOOTER =
# The LATEX_EXTRA_FILES tag can be used to specify one or more extra images or
# other source files which should be copied to the LATEX_OUTPUT output
# directory. Note that the files will be copied as-is; there are no commands or
# markers available.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_EXTRA_FILES =
# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated is
# prepared for conversion to PDF (using ps2pdf or pdflatex). The PDF file will
# contain links (just like the HTML output) instead of page references. This
# makes the output suitable for online browsing using a PDF viewer.
# The default value is: YES.
# This tag requires that the tag GENERATE_LATEX is set to YES.
PDF_HYPERLINKS = YES
# If the USE_PDFLATEX tag is set to YES, doxygen will use pdflatex to generate
# the PDF file directly from the LaTeX files. Set this option to YES to get a
# higher quality PDF documentation.
# The default value is: YES.
# This tag requires that the tag GENERATE_LATEX is set to YES.
USE_PDFLATEX = YES
# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \batchmode
# command to the generated LaTeX files. This will instruct LaTeX to keep running
# if errors occur, instead of asking the user for help. This option is also used
# when generating formulas in HTML.
# The default value is: NO.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_BATCHMODE = NO
# If the LATEX_HIDE_INDICES tag is set to YES then doxygen will not include the
# index chapters (such as File Index, Compound Index, etc.) in the output.
# The default value is: NO.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_HIDE_INDICES = NO
# If the LATEX_SOURCE_CODE tag is set to YES then doxygen will include source
# code with syntax highlighting in the LaTeX output.
#
# Note that which sources are shown also depends on other settings such as
# SOURCE_BROWSER.
# The default value is: NO.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_SOURCE_CODE = NO
# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
# bibliography, e.g. plainnat, or ieeetr. See
# http://en.wikipedia.org/wiki/BibTeX and \cite for more info.
# The default value is: plain.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_BIB_STYLE = plain
#---------------------------------------------------------------------------
# Configuration options related to the RTF output
#---------------------------------------------------------------------------
# If the GENERATE_RTF tag is set to YES doxygen will generate RTF output. The
# RTF output is optimized for Word 97 and may not look too pretty with other RTF
# readers/editors.
# The default value is: NO.
GENERATE_RTF = NO
# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. If a
# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
# it.
# The default directory is: rtf.
# This tag requires that the tag GENERATE_RTF is set to YES.
RTF_OUTPUT = rtf
# If the COMPACT_RTF tag is set to YES doxygen generates more compact RTF
# documents. This may be useful for small projects and may help to save some
# trees in general.
# The default value is: NO.
# This tag requires that the tag GENERATE_RTF is set to YES.
COMPACT_RTF = NO
# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated will
# contain hyperlink fields. The RTF file will contain links (just like the HTML
# output) instead of page references. This makes the output suitable for online
# browsing using Word or some other Word compatible readers that support those
# fields.
#
# Note: WordPad (write) and others do not support links.
# The default value is: NO.
# This tag requires that the tag GENERATE_RTF is set to YES.
RTF_HYPERLINKS = NO
# Load stylesheet definitions from file. Syntax is similar to doxygen's config
# file, i.e. a series of assignments. You only have to provide replacements,
# missing definitions are set to their default value.
#
# See also section "Doxygen usage" for information on how to generate the
# default style sheet that doxygen normally uses.
# This tag requires that the tag GENERATE_RTF is set to YES.
RTF_STYLESHEET_FILE =
# Set optional variables used in the generation of an RTF document. Syntax is
# similar to doxygen's config file. A template extensions file can be generated
# using doxygen -e rtf extensionFile.
# This tag requires that the tag GENERATE_RTF is set to YES.
RTF_EXTENSIONS_FILE =
#---------------------------------------------------------------------------
# Configuration options related to the man page output
#---------------------------------------------------------------------------
# If the GENERATE_MAN tag is set to YES doxygen will generate man pages for
# classes and files.
# The default value is: NO.
GENERATE_MAN = NO
# The MAN_OUTPUT tag is used to specify where the man pages will be put. If a
# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
# it. A directory man3 will be created inside the directory specified by
# MAN_OUTPUT.
# The default directory is: man.
# This tag requires that the tag GENERATE_MAN is set to YES.
MAN_OUTPUT = man
# The MAN_EXTENSION tag determines the extension that is added to the generated
# man pages. In case the manual section does not start with a number, the number
# 3 is prepended. The dot (.) at the beginning of the MAN_EXTENSION tag is
# optional.
# The default value is: .3.
# This tag requires that the tag GENERATE_MAN is set to YES.
MAN_EXTENSION = .3
# The MAN_SUBDIR tag determines the name of the directory created within
# MAN_OUTPUT in which the man pages are placed. If defaults to man followed by
# MAN_EXTENSION with the initial . removed.
# This tag requires that the tag GENERATE_MAN is set to YES.
MAN_SUBDIR =
# If the MAN_LINKS tag is set to YES and doxygen generates man output, then it
# will generate one additional man file for each entity documented in the real
# man page(s). These additional files only source the real man page, but without
# them the man command would be unable to find the correct page.
# The default value is: NO.
# This tag requires that the tag GENERATE_MAN is set to YES.
MAN_LINKS = NO
#---------------------------------------------------------------------------
# Configuration options related to the XML output
#---------------------------------------------------------------------------
# If the GENERATE_XML tag is set to YES doxygen will generate an XML file that
# captures the structure of the code including all documentation.
# The default value is: NO.
GENERATE_XML = NO
# The XML_OUTPUT tag is used to specify where the XML pages will be put. If a
# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
# it.
# The default directory is: xml.
# This tag requires that the tag GENERATE_XML is set to YES.
XML_OUTPUT = xml
# If the XML_PROGRAMLISTING tag is set to YES doxygen will dump the program
# listings (including syntax highlighting and cross-referencing information) to
# the XML output. Note that enabling this will significantly increase the size
# of the XML output.
# The default value is: YES.
# This tag requires that the tag GENERATE_XML is set to YES.
XML_PROGRAMLISTING = YES
#---------------------------------------------------------------------------
# Configuration options related to the DOCBOOK output
#---------------------------------------------------------------------------
# If the GENERATE_DOCBOOK tag is set to YES doxygen will generate Docbook files
# that can be used to generate PDF.
# The default value is: NO.
GENERATE_DOCBOOK = NO
# The DOCBOOK_OUTPUT tag is used to specify where the Docbook pages will be put.
# If a relative path is entered the value of OUTPUT_DIRECTORY will be put in
# front of it.
# The default directory is: docbook.
# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
DOCBOOK_OUTPUT = docbook
# If the DOCBOOK_PROGRAMLISTING tag is set to YES doxygen will include the
# program listings (including syntax highlighting and cross-referencing
# information) to the DOCBOOK output. Note that enabling this will significantly
# increase the size of the DOCBOOK output.
# The default value is: NO.
# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
DOCBOOK_PROGRAMLISTING = NO
#---------------------------------------------------------------------------
# Configuration options for the AutoGen Definitions output
#---------------------------------------------------------------------------
# If the GENERATE_AUTOGEN_DEF tag is set to YES doxygen will generate an AutoGen
# Definitions (see http://autogen.sf.net) file that captures the structure of
# the code including all documentation. Note that this feature is still
# experimental and incomplete at the moment.
# The default value is: NO.
GENERATE_AUTOGEN_DEF = NO
#---------------------------------------------------------------------------
# Configuration options related to the Perl module output
#---------------------------------------------------------------------------
# If the GENERATE_PERLMOD tag is set to YES doxygen will generate a Perl module
# file that captures the structure of the code including all documentation.
#
# Note that this feature is still experimental and incomplete at the moment.
# The default value is: NO.
GENERATE_PERLMOD = NO
# If the PERLMOD_LATEX tag is set to YES doxygen will generate the necessary
# Makefile rules, Perl scripts and LaTeX code to be able to generate PDF and DVI
# output from the Perl module output.
# The default value is: NO.
# This tag requires that the tag GENERATE_PERLMOD is set to YES.
PERLMOD_LATEX = NO
# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be nicely
# formatted so it can be parsed by a human reader. This is useful if you want to
# understand what is going on. On the other hand, if this tag is set to NO the
# size of the Perl module output will be much smaller and Perl will parse it
# just the same.
# The default value is: YES.
# This tag requires that the tag GENERATE_PERLMOD is set to YES.
PERLMOD_PRETTY = YES
# The names of the make variables in the generated doxyrules.make file are
# prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. This is useful
# so different doxyrules.make files included by the same Makefile don't
# overwrite each other's variables.
# This tag requires that the tag GENERATE_PERLMOD is set to YES.
PERLMOD_MAKEVAR_PREFIX =
#---------------------------------------------------------------------------
# Configuration options related to the preprocessor
#---------------------------------------------------------------------------
# If the ENABLE_PREPROCESSING tag is set to YES doxygen will evaluate all
# C-preprocessor directives found in the sources and include files.
# The default value is: YES.
ENABLE_PREPROCESSING = YES
# If the MACRO_EXPANSION tag is set to YES doxygen will expand all macro names
# in the source code. If set to NO only conditional compilation will be
# performed. Macro expansion can be done in a controlled way by setting
# EXPAND_ONLY_PREDEF to YES.
# The default value is: NO.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
MACRO_EXPANSION = NO
# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES then
# the macro expansion is limited to the macros specified with the PREDEFINED and
# EXPAND_AS_DEFINED tags.
# The default value is: NO.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
EXPAND_ONLY_PREDEF = NO
# If the SEARCH_INCLUDES tag is set to YES the includes files in the
# INCLUDE_PATH will be searched if a #include is found.
# The default value is: YES.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
SEARCH_INCLUDES = YES
# The INCLUDE_PATH tag can be used to specify one or more directories that
# contain include files that are not input files but should be processed by the
# preprocessor.
# This tag requires that the tag SEARCH_INCLUDES is set to YES.
INCLUDE_PATH =
# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
# patterns (like *.h and *.hpp) to filter out the header-files in the
# directories. If left blank, the patterns specified with FILE_PATTERNS will be
# used.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
INCLUDE_FILE_PATTERNS =
# The PREDEFINED tag can be used to specify one or more macro names that are
# defined before the preprocessor is started (similar to the -D option of e.g.
# gcc). The argument of the tag is a list of macros of the form: name or
# name=definition (no spaces). If the definition and the "=" are omitted, "=1"
# is assumed. To prevent a macro definition from being undefined via #undef or
# recursively expanded use the := operator instead of the = operator.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
PREDEFINED =
# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this
# tag can be used to specify a list of macro names that should be expanded. The
# macro definition that is found in the sources will be used. Use the PREDEFINED
# tag if you want to use a different macro definition that overrules the
# definition found in the source code.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
EXPAND_AS_DEFINED =
# If the SKIP_FUNCTION_MACROS tag is set to YES then doxygen's preprocessor will
# remove all references to function-like macros that are alone on a line, have
# an all uppercase name, and do not end with a semicolon. Such function macros
# are typically used for boiler-plate code, and will confuse the parser if not
# removed.
# The default value is: YES.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
SKIP_FUNCTION_MACROS = YES
#---------------------------------------------------------------------------
# Configuration options related to external references
#---------------------------------------------------------------------------
# The TAGFILES tag can be used to specify one or more tag files. For each tag
# file the location of the external documentation should be added. The format of
# a tag file without this location is as follows:
# TAGFILES = file1 file2 ...
# Adding location for the tag files is done as follows:
# TAGFILES = file1=loc1 "file2 = loc2" ...
# where loc1 and loc2 can be relative or absolute paths or URLs. See the
# section "Linking to external documentation" for more information about the use
# of tag files.
# Note: Each tag file must have a unique name (where the name does NOT include
# the path). If a tag file is not located in the directory in which doxygen is
# run, you must also specify the path to the tagfile here.
TAGFILES =
# When a file name is specified after GENERATE_TAGFILE, doxygen will create a
# tag file that is based on the input files it reads. See section "Linking to
# external documentation" for more information about the usage of tag files.
GENERATE_TAGFILE =
# If the ALLEXTERNALS tag is set to YES all external class will be listed in the
# class index. If set to NO only the inherited external classes will be listed.
# The default value is: NO.
ALLEXTERNALS = NO
# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed in
# the modules index. If set to NO, only the current project's groups will be
# listed.
# The default value is: YES.
EXTERNAL_GROUPS = YES
# If the EXTERNAL_PAGES tag is set to YES all external pages will be listed in
# the related pages index. If set to NO, only the current project's pages will
# be listed.
# The default value is: YES.
EXTERNAL_PAGES = YES
# The PERL_PATH should be the absolute path and name of the perl script
# interpreter (i.e. the result of 'which perl').
# The default file (with absolute path) is: /usr/bin/perl.
PERL_PATH = /usr/bin/perl
#---------------------------------------------------------------------------
# Configuration options related to the dot tool
#---------------------------------------------------------------------------
# If the CLASS_DIAGRAMS tag is set to YES doxygen will generate a class diagram
# (in HTML and LaTeX) for classes with base or super classes. Setting the tag to
# NO turns the diagrams off. Note that this option also works with HAVE_DOT
# disabled, but it is recommended to install and use dot, since it yields more
# powerful graphs.
# The default value is: YES.
CLASS_DIAGRAMS = YES
# You can define message sequence charts within doxygen comments using the \msc
# command. Doxygen will then run the mscgen tool (see:
# http://www.mcternan.me.uk/mscgen/)) to produce the chart and insert it in the
# documentation. The MSCGEN_PATH tag allows you to specify the directory where
# the mscgen tool resides. If left empty the tool is assumed to be found in the
# default search path.
MSCGEN_PATH =
# You can include diagrams made with dia in doxygen documentation. Doxygen will
# then run dia to produce the diagram and insert it in the documentation. The
# DIA_PATH tag allows you to specify the directory where the dia binary resides.
# If left empty dia is assumed to be found in the default search path.
DIA_PATH =
# If set to YES, the inheritance and collaboration graphs will hide inheritance
# and usage relations if the target is undocumented or is not a class.
# The default value is: YES.
HIDE_UNDOC_RELATIONS = YES
# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
# available from the path. This tool is part of Graphviz (see:
# http://www.graphviz.org/), a graph visualization toolkit from AT&T and Lucent
# Bell Labs. The other options in this section have no effect if this option is
# set to NO
# The default value is: NO.
HAVE_DOT = NO
# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is allowed
# to run in parallel. When set to 0 doxygen will base this on the number of
# processors available in the system. You can set it explicitly to a value
# larger than 0 to get control over the balance between CPU load and processing
# speed.
# Minimum value: 0, maximum value: 32, default value: 0.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_NUM_THREADS = 0
# When you want a differently looking font in the dot files that doxygen
# generates you can specify the font name using DOT_FONTNAME. You need to make
# sure dot is able to find the font, which can be done by putting it in a
# standard location or by setting the DOTFONTPATH environment variable or by
# setting DOT_FONTPATH to the directory containing the font.
# The default value is: Helvetica.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_FONTNAME = Helvetica
# The DOT_FONTSIZE tag can be used to set the size (in points) of the font of
# dot graphs.
# Minimum value: 4, maximum value: 24, default value: 10.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_FONTSIZE = 10
# By default doxygen will tell dot to use the default font as specified with
# DOT_FONTNAME. If you specify a different font using DOT_FONTNAME you can set
# the path where dot can find it using this tag.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_FONTPATH =
# If the CLASS_GRAPH tag is set to YES then doxygen will generate a graph for
# each documented class showing the direct and indirect inheritance relations.
# Setting this tag to YES will force the CLASS_DIAGRAMS tag to NO.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
CLASS_GRAPH = YES
# If the COLLABORATION_GRAPH tag is set to YES then doxygen will generate a
# graph for each documented class showing the direct and indirect implementation
# dependencies (inheritance, containment, and class references variables) of the
# class with other documented classes.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
COLLABORATION_GRAPH = YES
# If the GROUP_GRAPHS tag is set to YES then doxygen will generate a graph for
# groups, showing the direct groups dependencies.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
GROUP_GRAPHS = YES
# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
# collaboration diagrams in a style similar to the OMG's Unified Modeling
# Language.
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
UML_LOOK = NO
# If the UML_LOOK tag is enabled, the fields and methods are shown inside the
# class node. If there are many fields or methods and many nodes the graph may
# become too big to be useful. The UML_LIMIT_NUM_FIELDS threshold limits the
# number of items for each type to make the size more manageable. Set this to 0
# for no limit. Note that the threshold may be exceeded by 50% before the limit
# is enforced. So when you set the threshold to 10, up to 15 fields may appear,
# but if the number exceeds 15, the total amount of fields shown is limited to
# 10.
# Minimum value: 0, maximum value: 100, default value: 10.
# This tag requires that the tag HAVE_DOT is set to YES.
UML_LIMIT_NUM_FIELDS = 10
# If the TEMPLATE_RELATIONS tag is set to YES then the inheritance and
# collaboration graphs will show the relations between templates and their
# instances.
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
TEMPLATE_RELATIONS = NO
# If the INCLUDE_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are set to
# YES then doxygen will generate a graph for each documented file showing the
# direct and indirect include dependencies of the file with other documented
# files.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
INCLUDE_GRAPH = YES
# If the INCLUDED_BY_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are
# set to YES then doxygen will generate a graph for each documented file showing
# the direct and indirect include dependencies of the file with other documented
# files.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
INCLUDED_BY_GRAPH = YES
# If the CALL_GRAPH tag is set to YES then doxygen will generate a call
# dependency graph for every global function or class method.
#
# Note that enabling this option will significantly increase the time of a run.
# So in most cases it will be better to enable call graphs for selected
# functions only using the \callgraph command.
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
CALL_GRAPH = NO
# If the CALLER_GRAPH tag is set to YES then doxygen will generate a caller
# dependency graph for every global function or class method.
#
# Note that enabling this option will significantly increase the time of a run.
# So in most cases it will be better to enable caller graphs for selected
# functions only using the \callergraph command.
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
CALLER_GRAPH = NO
# If the GRAPHICAL_HIERARCHY tag is set to YES then doxygen will graphical
# hierarchy of all classes instead of a textual one.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
GRAPHICAL_HIERARCHY = YES
# If the DIRECTORY_GRAPH tag is set to YES then doxygen will show the
# dependencies a directory has on other directories in a graphical way. The
# dependency relations are determined by the #include relations between the
# files in the directories.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
DIRECTORY_GRAPH = YES
# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
# generated by dot.
# Note: If you choose svg you need to set HTML_FILE_EXTENSION to xhtml in order
# to make the SVG files visible in IE 9+ (other browsers do not have this
# requirement).
# Possible values are: png, jpg, gif and svg.
# The default value is: png.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_IMAGE_FORMAT = png
# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to
# enable generation of interactive SVG images that allow zooming and panning.
#
# Note that this requires a modern browser other than Internet Explorer. Tested
# and working are Firefox, Chrome, Safari, and Opera.
# Note: For IE 9+ you need to set HTML_FILE_EXTENSION to xhtml in order to make
# the SVG files visible. Older versions of IE do not have SVG support.
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
INTERACTIVE_SVG = NO
# The DOT_PATH tag can be used to specify the path where the dot tool can be
# found. If left blank, it is assumed the dot tool can be found in the path.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_PATH =
# The DOTFILE_DIRS tag can be used to specify one or more directories that
# contain dot files that are included in the documentation (see the \dotfile
# command).
# This tag requires that the tag HAVE_DOT is set to YES.
DOTFILE_DIRS =
# The MSCFILE_DIRS tag can be used to specify one or more directories that
# contain msc files that are included in the documentation (see the \mscfile
# command).
MSCFILE_DIRS =
# The DIAFILE_DIRS tag can be used to specify one or more directories that
# contain dia files that are included in the documentation (see the \diafile
# command).
DIAFILE_DIRS =
# When using plantuml, the PLANTUML_JAR_PATH tag should be used to specify the
# path where java can find the plantuml.jar file. If left blank, it is assumed
# PlantUML is not used or called during a preprocessing step. Doxygen will
# generate a warning when it encounters a \startuml command in this case and
# will not generate output for the diagram.
# This tag requires that the tag HAVE_DOT is set to YES.
PLANTUML_JAR_PATH =
# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of nodes
# that will be shown in the graph. If the number of nodes in a graph becomes
# larger than this value, doxygen will truncate the graph, which is visualized
# by representing a node as a red box. Note that doxygen if the number of direct
# children of the root node in a graph is already larger than
# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note that
# the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
# Minimum value: 0, maximum value: 10000, default value: 50.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_GRAPH_MAX_NODES = 50
# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the graphs
# generated by dot. A depth value of 3 means that only nodes reachable from the
# root by following a path via at most 3 edges will be shown. Nodes that lay
# further from the root node will be omitted. Note that setting this option to 1
# or 2 may greatly reduce the computation time needed for large code bases. Also
# note that the size of a graph can be further restricted by
# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
# Minimum value: 0, maximum value: 1000, default value: 0.
# This tag requires that the tag HAVE_DOT is set to YES.
MAX_DOT_GRAPH_DEPTH = 0
# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
# background. This is disabled by default, because dot on Windows does not seem
# to support this out of the box.
#
# Warning: Depending on the platform used, enabling this option may lead to
# badly anti-aliased labels on the edges of a graph (i.e. they become hard to
# read).
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_TRANSPARENT = NO
# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
# files in one run (i.e. multiple -o and -T options on the command line). This
# makes dot run faster, but since only newer versions of dot (>1.8.10) support
# this, this feature is disabled by default.
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_MULTI_TARGETS = YES
# If the GENERATE_LEGEND tag is set to YES doxygen will generate a legend page
# explaining the meaning of the various boxes and arrows in the dot generated
# graphs.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
GENERATE_LEGEND = YES
# If the DOT_CLEANUP tag is set to YES doxygen will remove the intermediate dot
# files that are used to generate the various graphs.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_CLEANUP = YES
================================================
FILE: caffe/.github/ISSUE_TEMPLATE.md
================================================
Please use the [caffe-users list](https://groups.google.com/forum/#!forum/caffe-users) for usage, installation, or modeling questions, or other requests for help.
_Do not post such requests to Issues._ Doing so interferes with the development of Caffe.
Please read the [guidelines for contributing](https://github.com/BVLC/caffe/blob/master/CONTRIBUTING.md) before submitting this issue.
### Issue summary
### Steps to reproduce
If you are having difficulty building Caffe or training a model, please ask the caffe-users mailing list. If you are reporting a build error that seems to be due to a bug in Caffe, please attach your build configuration (either Makefile.config or CMakeCache.txt) and the output of the make (or cmake) command.
### Your system configuration
Operating system:
Compiler:
CUDA version (if applicable):
CUDNN version (if applicable):
BLAS:
Python or MATLAB version (for pycaffe and matcaffe respectively):
================================================
FILE: caffe/.gitignore
================================================
## General
# [local]
*mypvanet*
# Compiled Object files
*.slo
*.lo
*.o
*.cuo
# Compiled Dynamic libraries
*.so
*.dylib
# Compiled Static libraries
*.lai
*.la
*.a
# Compiled protocol buffers
*.pb.h
*.pb.cc
*_pb2.py
# Compiled python
*.pyc
# Compiled MATLAB
*.mex*
# IPython notebook checkpoints
.ipynb_checkpoints
# Editor temporaries
*.swp
*~
# Sublime Text settings
*.sublime-workspace
*.sublime-project
# Eclipse Project settings
*.*project
.settings
# QtCreator files
*.user
# PyCharm files
.idea
# Visual Studio Code files
.vscode
# OSX dir files
.DS_Store
## Caffe
# User's build configuration
# Makefile.config
# Data and models are either
# 1. reference, and not casually committed
# 2. custom, and live on their own unless they're deliberated contributed
*.log
data/*
models/*
*.caffemodel
*.caffemodel.h5
*.solverstate
*.solverstate.h5
*.binaryproto
*leveldb
*lmdb
# build, distribute, and bins (+ python proto bindings)
build
.build_debug/*
.build_release/*
distribute/*
*.testbin
*.bin
python/caffe/proto/
cmake_build
.cmake_build
# Generated documentation
docs/_site
docs/_includes
docs/gathered
_site
doxygen
docs/dev
# LevelDB files
*.sst
*.ldb
LOCK
LOG*
CURRENT
MANIFEST-*
*.bk
================================================
FILE: caffe/.travis.yml
================================================
dist: trusty
sudo: required
language: cpp
compiler: gcc
env:
global:
- NUM_THREADS=4
matrix:
# Use a build matrix to test many builds in parallel
# envvar defaults:
# WITH_CMAKE: false
# WITH_PYTHON3: false
# WITH_IO: true
# WITH_CUDA: false
# WITH_CUDNN: false
- BUILD_NAME="default-make"
# - BUILD_NAME="python3-make" WITH_PYTHON3=true
- BUILD_NAME="no-io-make" WITH_IO=false
- BUILD_NAME="cuda-make" WITH_CUDA=true
- BUILD_NAME="cudnn-make" WITH_CUDA=true WITH_CUDNN=true
- BUILD_NAME="default-cmake" WITH_CMAKE=true
- BUILD_NAME="python3-cmake" WITH_CMAKE=true WITH_PYTHON3=true
- BUILD_NAME="no-io-cmake" WITH_CMAKE=true WITH_IO=false
- BUILD_NAME="cuda-cmake" WITH_CMAKE=true WITH_CUDA=true
- BUILD_NAME="cudnn-cmake" WITH_CMAKE=true WITH_CUDA=true WITH_CUDNN=true
cache:
apt: true
directories:
- ~/protobuf3
before_install:
- source ./scripts/travis/defaults.sh
install:
- sudo -E ./scripts/travis/install-deps.sh
- ./scripts/travis/setup-venv.sh ~/venv
- source ~/venv/bin/activate
- ./scripts/travis/install-python-deps.sh
before_script:
- ./scripts/travis/configure.sh
script:
- ./scripts/travis/build.sh
- ./scripts/travis/test.sh
notifications:
# Emails are sent to the committer's git-configured email address by default,
# but only if they have access to the repository. To enable Travis on your
# public fork of Caffe, just go to travis-ci.org and flip the switch on for
# your Caffe fork. To configure your git email address, use:
# git config --global user.email me@example.com
email:
on_success: always
on_failure: always
# IRC notifications disabled by default.
# Uncomment next 5 lines to send notifications to chat.freenode.net#caffe
# irc:
# channels:
# - "chat.freenode.net#caffe"
# template:
# - "%{repository}/%{branch} (%{commit} - %{author}): %{message}"
================================================
FILE: caffe/CMakeLists.txt
================================================
cmake_minimum_required(VERSION 2.8.7)
if(POLICY CMP0046)
cmake_policy(SET CMP0046 NEW)
endif()
if(POLICY CMP0054)
cmake_policy(SET CMP0054 NEW)
endif()
# ---[ Caffe project
project(Caffe C CXX)
# ---[ Caffe version
set(CAFFE_TARGET_VERSION "1.0.0" CACHE STRING "Caffe logical version")
set(CAFFE_TARGET_SOVERSION "1.0.0" CACHE STRING "Caffe soname version")
add_definitions(-DCAFFE_VERSION=${CAFFE_TARGET_VERSION})
# ---[ Using cmake scripts and modules
list(APPEND CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake/Modules)
include(ExternalProject)
include(GNUInstallDirs)
include(cmake/Utils.cmake)
include(cmake/Targets.cmake)
include(cmake/Misc.cmake)
include(cmake/Summary.cmake)
include(cmake/ConfigGen.cmake)
# ---[ Options
caffe_option(CPU_ONLY "Build Caffe without CUDA support" OFF) # TODO: rename to USE_CUDA
caffe_option(USE_CUDNN "Build Caffe with cuDNN library support" ON IF NOT CPU_ONLY)
caffe_option(USE_NCCL "Build Caffe with NCCL library support" ON)
caffe_option(BUILD_SHARED_LIBS "Build shared libraries" ON)
caffe_option(BUILD_python "Build Python wrapper" ON)
set(python_version "2" CACHE STRING "Specify which Python version to use")
caffe_option(BUILD_matlab "Build Matlab wrapper" OFF IF UNIX OR APPLE)
caffe_option(BUILD_docs "Build documentation" ON IF UNIX OR APPLE)
caffe_option(BUILD_python_layer "Build the Caffe Python layer" ON)
caffe_option(USE_OPENCV "Build with OpenCV support" ON)
caffe_option(USE_LEVELDB "Build with levelDB" ON)
caffe_option(USE_LMDB "Build with lmdb" ON)
caffe_option(ALLOW_LMDB_NOLOCK "Allow MDB_NOLOCK when reading LMDB files (only if necessary)" OFF)
caffe_option(USE_OPENMP "Link with OpenMP (when your BLAS wants OpenMP and you get linker errors)" OFF)
# ---[ Dependencies
include(cmake/Dependencies.cmake)
# ---[ Flags
if(UNIX OR APPLE)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC -Wall")
endif()
caffe_set_caffe_link()
if(USE_libstdcpp)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libstdc++")
message("-- Warning: forcing libstdc++ (controlled by USE_libstdcpp option in cmake)")
endif()
# ---[ Warnings
caffe_warnings_disable(CMAKE_CXX_FLAGS -Wno-sign-compare -Wno-uninitialized)
# ---[ Config generation
configure_file(cmake/Templates/caffe_config.h.in "${PROJECT_BINARY_DIR}/caffe_config.h")
# ---[ Includes
set(Caffe_INCLUDE_DIR ${PROJECT_SOURCE_DIR}/include)
set(Caffe_SRC_DIR ${PROJECT_SOURCE_DIR}/src)
include_directories(${PROJECT_BINARY_DIR})
# ---[ Includes & defines for CUDA
# cuda_compile() does not have per-call dependencies or include pathes
# (cuda_compile() has per-call flags, but we set them here too for clarity)
#
# list(REMOVE_ITEM ...) invocations remove PRIVATE and PUBLIC keywords from collected definitions and include pathes
if(HAVE_CUDA)
# pass include pathes to cuda_include_directories()
set(Caffe_ALL_INCLUDE_DIRS ${Caffe_INCLUDE_DIRS})
list(REMOVE_ITEM Caffe_ALL_INCLUDE_DIRS PRIVATE PUBLIC)
cuda_include_directories(${Caffe_INCLUDE_DIR} ${Caffe_SRC_DIR} ${Caffe_ALL_INCLUDE_DIRS})
# add definitions to nvcc flags directly
set(Caffe_ALL_DEFINITIONS ${Caffe_DEFINITIONS})
list(REMOVE_ITEM Caffe_ALL_DEFINITIONS PRIVATE PUBLIC)
list(APPEND CUDA_NVCC_FLAGS ${Caffe_ALL_DEFINITIONS})
endif()
# ---[ Subdirectories
add_subdirectory(src/gtest)
add_subdirectory(src/caffe)
add_subdirectory(tools)
add_subdirectory(examples)
add_subdirectory(python)
add_subdirectory(matlab)
add_subdirectory(docs)
# ---[ Linter target
add_custom_target(lint COMMAND ${CMAKE_COMMAND} -P ${PROJECT_SOURCE_DIR}/cmake/lint.cmake)
# ---[ pytest target
if(BUILD_python)
add_custom_target(pytest COMMAND python${python_version} -m unittest discover -s caffe/test WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}/python )
add_dependencies(pytest pycaffe)
endif()
# ---[ uninstall target
configure_file(
${CMAKE_CURRENT_SOURCE_DIR}/cmake/Uninstall.cmake.in
${CMAKE_CURRENT_BINARY_DIR}/cmake/Uninstall.cmake
IMMEDIATE @ONLY)
add_custom_target(uninstall
COMMAND ${CMAKE_COMMAND} -P
${CMAKE_CURRENT_BINARY_DIR}/cmake/Uninstall.cmake)
# ---[ Configuration summary
caffe_print_configuration_summary()
# ---[ Export configs generation
caffe_generate_export_configs()
================================================
FILE: caffe/CONTRIBUTING.md
================================================
# Contributing
## Issues
Specific Caffe design and development issues, bugs, and feature requests are maintained by GitHub Issues.
_Please do not post usage, installation, or modeling questions, or other requests for help to Issues._
Use the [caffe-users list](https://groups.google.com/forum/#!forum/caffe-users) instead. This helps developers maintain a clear, uncluttered, and efficient view of the state of Caffe.
When reporting a bug, it's most helpful to provide the following information, where applicable:
* What steps reproduce the bug?
* Can you reproduce the bug using the latest [master](https://github.com/BVLC/caffe/tree/master), compiled with the `DEBUG` make option?
* What hardware and operating system/distribution are you running?
* If the bug is a crash, provide the backtrace (usually printed by Caffe; always obtainable with `gdb`).
Try to give your issue a title that is succinct and specific. The devs will rename issues as needed to keep track of them.
## Pull Requests
Caffe welcomes all contributions.
See the [contributing guide](http://caffe.berkeleyvision.org/development.html) for details.
Briefly: read commit by commit, a PR should tell a clean, compelling story of _one_ improvement to Caffe. In particular:
* A PR should do one clear thing that obviously improves Caffe, and nothing more. Making many smaller PRs is better than making one large PR; review effort is superlinear in the amount of code involved.
* Similarly, each commit should be a small, atomic change representing one step in development. PRs should be made of many commits where appropriate.
* Please do rewrite PR history to be clean rather than chronological. Within-PR bugfixes, style cleanups, reversions, etc. should be squashed and should not appear in merged PR history.
* Anything nonobvious from the code should be explained in comments, commit messages, or the PR description, as appropriate.
================================================
FILE: caffe/CONTRIBUTORS.md
================================================
# Contributors
Caffe is developed by a core set of BAIR members and the open-source community.
We thank all of our [contributors](https://github.com/BVLC/caffe/graphs/contributors)!
**For the detailed history of contributions** of a given file, try
git blame file
to see line-by-line credits and
git log --follow file
to see the change log even across renames and rewrites.
Please refer to the [acknowledgements](http://caffe.berkeleyvision.org/#acknowledgements) on the Caffe site for further details.
**Copyright** is held by the original contributor according to the versioning history; see LICENSE.
================================================
FILE: caffe/INSTALL.md
================================================
# Installation
See http://caffe.berkeleyvision.org/installation.html for the latest
installation instructions.
Check the users group in case you need help:
https://groups.google.com/forum/#!forum/caffe-users
================================================
FILE: caffe/LICENSE
================================================
COPYRIGHT
All contributions by the University of California:
Copyright (c) 2014-2017 The Regents of the University of California (Regents)
All rights reserved.
All other contributions:
Copyright (c) 2014-2017, the respective contributors
All rights reserved.
Caffe uses a shared copyright model: each contributor holds copyright over
their contributions to Caffe. The project versioning records all such
contribution and copyright details. If a contributor wants to further mark
their specific copyright on a particular contribution, they should indicate
their copyright solely in the commit message of the change when it is
committed.
LICENSE
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
CONTRIBUTION AGREEMENT
By contributing to the BVLC/caffe repository through pull-request, comment,
or otherwise, the contributor releases their content to the
license and copyright terms herein.
================================================
FILE: caffe/Makefile
================================================
PROJECT := caffe
CONFIG_FILE := Makefile.config
# Explicitly check for the config file, otherwise make -k will proceed anyway.
ifeq ($(wildcard $(CONFIG_FILE)),)
$(error $(CONFIG_FILE) not found. See $(CONFIG_FILE).example.)
endif
include $(CONFIG_FILE)
BUILD_DIR_LINK := $(BUILD_DIR)
ifeq ($(RELEASE_BUILD_DIR),)
RELEASE_BUILD_DIR := .$(BUILD_DIR)_release
endif
ifeq ($(DEBUG_BUILD_DIR),)
DEBUG_BUILD_DIR := .$(BUILD_DIR)_debug
endif
DEBUG ?= 0
ifeq ($(DEBUG), 1)
BUILD_DIR := $(DEBUG_BUILD_DIR)
OTHER_BUILD_DIR := $(RELEASE_BUILD_DIR)
else
BUILD_DIR := $(RELEASE_BUILD_DIR)
OTHER_BUILD_DIR := $(DEBUG_BUILD_DIR)
endif
# All of the directories containing code.
SRC_DIRS := $(shell find * -type d -exec bash -c "find {} -maxdepth 1 \
\( -name '*.cpp' -o -name '*.proto' \) | grep -q ." \; -print)
# The target shared library name
LIBRARY_NAME := $(PROJECT)
LIB_BUILD_DIR := $(BUILD_DIR)/lib
STATIC_NAME := $(LIB_BUILD_DIR)/lib$(LIBRARY_NAME).a
DYNAMIC_VERSION_MAJOR := 1
DYNAMIC_VERSION_MINOR := 0
DYNAMIC_VERSION_REVISION := 0
DYNAMIC_NAME_SHORT := lib$(LIBRARY_NAME).so
#DYNAMIC_SONAME_SHORT := $(DYNAMIC_NAME_SHORT).$(DYNAMIC_VERSION_MAJOR)
DYNAMIC_VERSIONED_NAME_SHORT := $(DYNAMIC_NAME_SHORT).$(DYNAMIC_VERSION_MAJOR).$(DYNAMIC_VERSION_MINOR).$(DYNAMIC_VERSION_REVISION)
DYNAMIC_NAME := $(LIB_BUILD_DIR)/$(DYNAMIC_VERSIONED_NAME_SHORT)
COMMON_FLAGS += -DCAFFE_VERSION=$(DYNAMIC_VERSION_MAJOR).$(DYNAMIC_VERSION_MINOR).$(DYNAMIC_VERSION_REVISION)
##############################
# Get all source files
##############################
# CXX_SRCS are the source files excluding the test ones.
CXX_SRCS := $(shell find src/$(PROJECT) ! -name "test_*.cpp" -name "*.cpp")
# CU_SRCS are the cuda source files
CU_SRCS := $(shell find src/$(PROJECT) ! -name "test_*.cu" -name "*.cu")
# TEST_SRCS are the test source files
TEST_MAIN_SRC := src/$(PROJECT)/test/test_caffe_main.cpp
TEST_SRCS := $(shell find src/$(PROJECT) -name "test_*.cpp")
TEST_SRCS := $(filter-out $(TEST_MAIN_SRC), $(TEST_SRCS))
TEST_CU_SRCS := $(shell find src/$(PROJECT) -name "test_*.cu")
GTEST_SRC := src/gtest/gtest-all.cpp
# TOOL_SRCS are the source files for the tool binaries
TOOL_SRCS := $(shell find tools -name "*.cpp")
# EXAMPLE_SRCS are the source files for the example binaries
EXAMPLE_SRCS := $(shell find examples -name "*.cpp")
# BUILD_INCLUDE_DIR contains any generated header files we want to include.
BUILD_INCLUDE_DIR := $(BUILD_DIR)/src
# PROTO_SRCS are the protocol buffer definitions
PROTO_SRC_DIR := src/$(PROJECT)/proto
PROTO_SRCS := $(wildcard $(PROTO_SRC_DIR)/*.proto)
# PROTO_BUILD_DIR will contain the .cc and obj files generated from
# PROTO_SRCS; PROTO_BUILD_INCLUDE_DIR will contain the .h header files
PROTO_BUILD_DIR := $(BUILD_DIR)/$(PROTO_SRC_DIR)
PROTO_BUILD_INCLUDE_DIR := $(BUILD_INCLUDE_DIR)/$(PROJECT)/proto
# NONGEN_CXX_SRCS includes all source/header files except those generated
# automatically (e.g., by proto).
NONGEN_CXX_SRCS := $(shell find \
src/$(PROJECT) \
include/$(PROJECT) \
python/$(PROJECT) \
matlab/+$(PROJECT)/private \
examples \
tools \
-name "*.cpp" -or -name "*.hpp" -or -name "*.cu" -or -name "*.cuh")
LINT_SCRIPT := scripts/cpp_lint.py
LINT_OUTPUT_DIR := $(BUILD_DIR)/.lint
LINT_EXT := lint.txt
LINT_OUTPUTS := $(addsuffix .$(LINT_EXT), $(addprefix $(LINT_OUTPUT_DIR)/, $(NONGEN_CXX_SRCS)))
EMPTY_LINT_REPORT := $(BUILD_DIR)/.$(LINT_EXT)
NONEMPTY_LINT_REPORT := $(BUILD_DIR)/$(LINT_EXT)
# PY$(PROJECT)_SRC is the python wrapper for $(PROJECT)
PY$(PROJECT)_SRC := python/$(PROJECT)/_$(PROJECT).cpp
PY$(PROJECT)_SO := python/$(PROJECT)/_$(PROJECT).so
PY$(PROJECT)_HXX := include/$(PROJECT)/layers/python_layer.hpp
# MAT$(PROJECT)_SRC is the mex entrance point of matlab package for $(PROJECT)
MAT$(PROJECT)_SRC := matlab/+$(PROJECT)/private/$(PROJECT)_.cpp
ifneq ($(MATLAB_DIR),)
MAT_SO_EXT := $(shell $(MATLAB_DIR)/bin/mexext)
endif
MAT$(PROJECT)_SO := matlab/+$(PROJECT)/private/$(PROJECT)_.$(MAT_SO_EXT)
##############################
# Derive generated files
##############################
# The generated files for protocol buffers
PROTO_GEN_HEADER_SRCS := $(addprefix $(PROTO_BUILD_DIR)/, \
$(notdir ${PROTO_SRCS:.proto=.pb.h}))
PROTO_GEN_HEADER := $(addprefix $(PROTO_BUILD_INCLUDE_DIR)/, \
$(notdir ${PROTO_SRCS:.proto=.pb.h}))
PROTO_GEN_CC := $(addprefix $(BUILD_DIR)/, ${PROTO_SRCS:.proto=.pb.cc})
PY_PROTO_BUILD_DIR := python/$(PROJECT)/proto
PY_PROTO_INIT := python/$(PROJECT)/proto/__init__.py
PROTO_GEN_PY := $(foreach file,${PROTO_SRCS:.proto=_pb2.py}, \
$(PY_PROTO_BUILD_DIR)/$(notdir $(file)))
# The objects corresponding to the source files
# These objects will be linked into the final shared library, so we
# exclude the tool, example, and test objects.
CXX_OBJS := $(addprefix $(BUILD_DIR)/, ${CXX_SRCS:.cpp=.o})
CU_OBJS := $(addprefix $(BUILD_DIR)/cuda/, ${CU_SRCS:.cu=.o})
PROTO_OBJS := ${PROTO_GEN_CC:.cc=.o}
OBJS := $(PROTO_OBJS) $(CXX_OBJS) $(CU_OBJS)
# tool, example, and test objects
TOOL_OBJS := $(addprefix $(BUILD_DIR)/, ${TOOL_SRCS:.cpp=.o})
TOOL_BUILD_DIR := $(BUILD_DIR)/tools
TEST_CXX_BUILD_DIR := $(BUILD_DIR)/src/$(PROJECT)/test
TEST_CU_BUILD_DIR := $(BUILD_DIR)/cuda/src/$(PROJECT)/test
TEST_CXX_OBJS := $(addprefix $(BUILD_DIR)/, ${TEST_SRCS:.cpp=.o})
TEST_CU_OBJS := $(addprefix $(BUILD_DIR)/cuda/, ${TEST_CU_SRCS:.cu=.o})
TEST_OBJS := $(TEST_CXX_OBJS) $(TEST_CU_OBJS)
GTEST_OBJ := $(addprefix $(BUILD_DIR)/, ${GTEST_SRC:.cpp=.o})
EXAMPLE_OBJS := $(addprefix $(BUILD_DIR)/, ${EXAMPLE_SRCS:.cpp=.o})
# Output files for automatic dependency generation
DEPS := ${CXX_OBJS:.o=.d} ${CU_OBJS:.o=.d} ${TEST_CXX_OBJS:.o=.d} \
${TEST_CU_OBJS:.o=.d} $(BUILD_DIR)/${MAT$(PROJECT)_SO:.$(MAT_SO_EXT)=.d}
# tool, example, and test bins
TOOL_BINS := ${TOOL_OBJS:.o=.bin}
EXAMPLE_BINS := ${EXAMPLE_OBJS:.o=.bin}
# symlinks to tool bins without the ".bin" extension
TOOL_BIN_LINKS := ${TOOL_BINS:.bin=}
# Put the test binaries in build/test for convenience.
TEST_BIN_DIR := $(BUILD_DIR)/test
TEST_CU_BINS := $(addsuffix .testbin,$(addprefix $(TEST_BIN_DIR)/, \
$(foreach obj,$(TEST_CU_OBJS),$(basename $(notdir $(obj))))))
TEST_CXX_BINS := $(addsuffix .testbin,$(addprefix $(TEST_BIN_DIR)/, \
$(foreach obj,$(TEST_CXX_OBJS),$(basename $(notdir $(obj))))))
TEST_BINS := $(TEST_CXX_BINS) $(TEST_CU_BINS)
# TEST_ALL_BIN is the test binary that links caffe dynamically.
TEST_ALL_BIN := $(TEST_BIN_DIR)/test_all.testbin
##############################
# Derive compiler warning dump locations
##############################
WARNS_EXT := warnings.txt
CXX_WARNS := $(addprefix $(BUILD_DIR)/, ${CXX_SRCS:.cpp=.o.$(WARNS_EXT)})
CU_WARNS := $(addprefix $(BUILD_DIR)/cuda/, ${CU_SRCS:.cu=.o.$(WARNS_EXT)})
TOOL_WARNS := $(addprefix $(BUILD_DIR)/, ${TOOL_SRCS:.cpp=.o.$(WARNS_EXT)})
EXAMPLE_WARNS := $(addprefix $(BUILD_DIR)/, ${EXAMPLE_SRCS:.cpp=.o.$(WARNS_EXT)})
TEST_WARNS := $(addprefix $(BUILD_DIR)/, ${TEST_SRCS:.cpp=.o.$(WARNS_EXT)})
TEST_CU_WARNS := $(addprefix $(BUILD_DIR)/cuda/, ${TEST_CU_SRCS:.cu=.o.$(WARNS_EXT)})
ALL_CXX_WARNS := $(CXX_WARNS) $(TOOL_WARNS) $(EXAMPLE_WARNS) $(TEST_WARNS)
ALL_CU_WARNS := $(CU_WARNS) $(TEST_CU_WARNS)
ALL_WARNS := $(ALL_CXX_WARNS) $(ALL_CU_WARNS)
EMPTY_WARN_REPORT := $(BUILD_DIR)/.$(WARNS_EXT)
NONEMPTY_WARN_REPORT := $(BUILD_DIR)/$(WARNS_EXT)
##############################
# Derive include and lib directories
##############################
CUDA_INCLUDE_DIR := $(CUDA_DIR)/include
CUDA_LIB_DIR :=
# add <cuda>/lib64 only if it exists
ifneq ("$(wildcard $(CUDA_DIR)/lib64)","")
CUDA_LIB_DIR += $(CUDA_DIR)/lib64
endif
CUDA_LIB_DIR += $(CUDA_DIR)/lib
INCLUDE_DIRS += $(BUILD_INCLUDE_DIR) ./src ./include
ifneq ($(CPU_ONLY), 1)
INCLUDE_DIRS += $(CUDA_INCLUDE_DIR)
LIBRARY_DIRS += $(CUDA_LIB_DIR)
LIBRARIES := cudart cublas curand
endif
LIBRARIES += glog gflags protobuf boost_system boost_filesystem m hdf5_hl hdf5
# handle IO dependencies
USE_LEVELDB ?= 1
USE_LMDB ?= 1
USE_OPENCV ?= 1
ifeq ($(USE_LEVELDB), 1)
LIBRARIES += leveldb snappy
endif
ifeq ($(USE_LMDB), 1)
LIBRARIES += lmdb
endif
ifeq ($(USE_OPENCV), 1)
LIBRARIES += opencv_core opencv_highgui opencv_imgproc
ifeq ($(OPENCV_VERSION), 3)
LIBRARIES += opencv_imgcodecs
endif
endif
PYTHON_LIBRARIES ?= boost_python python2.7
WARNINGS := -Wall -Wno-sign-compare
##############################
# Set build directories
##############################
DISTRIBUTE_DIR ?= distribute
DISTRIBUTE_SUBDIRS := $(DISTRIBUTE_DIR)/bin $(DISTRIBUTE_DIR)/lib
DIST_ALIASES := dist
ifneq ($(strip $(DISTRIBUTE_DIR)),distribute)
DIST_ALIASES += distribute
endif
ALL_BUILD_DIRS := $(sort $(BUILD_DIR) $(addprefix $(BUILD_DIR)/, $(SRC_DIRS)) \
$(addprefix $(BUILD_DIR)/cuda/, $(SRC_DIRS)) \
$(LIB_BUILD_DIR) $(TEST_BIN_DIR) $(PY_PROTO_BUILD_DIR) $(LINT_OUTPUT_DIR) \
$(DISTRIBUTE_SUBDIRS) $(PROTO_BUILD_INCLUDE_DIR))
##############################
# Set directory for Doxygen-generated documentation
##############################
DOXYGEN_CONFIG_FILE ?= ./.Doxyfile
# should be the same as OUTPUT_DIRECTORY in the .Doxyfile
DOXYGEN_OUTPUT_DIR ?= ./doxygen
DOXYGEN_COMMAND ?= doxygen
# All the files that might have Doxygen documentation.
DOXYGEN_SOURCES := $(shell find \
src/$(PROJECT) \
include/$(PROJECT) \
python/ \
matlab/ \
examples \
tools \
-name "*.cpp" -or -name "*.hpp" -or -name "*.cu" -or -name "*.cuh" -or \
-name "*.py" -or -name "*.m")
DOXYGEN_SOURCES += $(DOXYGEN_CONFIG_FILE)
##############################
# Configure build
##############################
# Determine platform
UNAME := $(shell uname -s)
ifeq ($(UNAME), Linux)
LINUX := 1
else ifeq ($(UNAME), Darwin)
OSX := 1
OSX_MAJOR_VERSION := $(shell sw_vers -productVersion | cut -f 1 -d .)
OSX_MINOR_VERSION := $(shell sw_vers -productVersion | cut -f 2 -d .)
endif
# Linux
ifeq ($(LINUX), 1)
CXX ?= /usr/bin/g++
GCCVERSION := $(shell $(CXX) -dumpversion | cut -f1,2 -d.)
# older versions of gcc are too dumb to build boost with -Wuninitalized
ifeq ($(shell echo | awk '{exit $(GCCVERSION) < 4.6;}'), 1)
WARNINGS += -Wno-uninitialized
endif
# boost::thread is reasonably called boost_thread (compare OS X)
# We will also explicitly add stdc++ to the link target.
LIBRARIES += boost_thread stdc++
VERSIONFLAGS += -Wl,-soname,$(DYNAMIC_VERSIONED_NAME_SHORT) -Wl,-rpath,$(ORIGIN)/../lib
endif
# OS X:
# clang++ instead of g++
# libstdc++ for NVCC compatibility on OS X >= 10.9 with CUDA < 7.0
ifeq ($(OSX), 1)
CXX := /usr/bin/clang++
ifneq ($(CPU_ONLY), 1)
CUDA_VERSION := $(shell $(CUDA_DIR)/bin/nvcc -V | grep -o 'release [0-9.]*' | tr -d '[a-z ]')
ifeq ($(shell echo | awk '{exit $(CUDA_VERSION) < 7.0;}'), 1)
CXXFLAGS += -stdlib=libstdc++
LINKFLAGS += -stdlib=libstdc++
endif
# clang throws this warning for cuda headers
WARNINGS += -Wno-unneeded-internal-declaration
# 10.11 strips DYLD_* env vars so link CUDA (rpath is available on 10.5+)
OSX_10_OR_LATER := $(shell [ $(OSX_MAJOR_VERSION) -ge 10 ] && echo true)
OSX_10_5_OR_LATER := $(shell [ $(OSX_MINOR_VERSION) -ge 5 ] && echo true)
ifeq ($(OSX_10_OR_LATER),true)
ifeq ($(OSX_10_5_OR_LATER),true)
LDFLAGS += -Wl,-rpath,$(CUDA_LIB_DIR)
endif
endif
endif
# gtest needs to use its own tuple to not conflict with clang
COMMON_FLAGS += -DGTEST_USE_OWN_TR1_TUPLE=1
# boost::thread is called boost_thread-mt to mark multithreading on OS X
LIBRARIES += boost_thread-mt
# we need to explicitly ask for the rpath to be obeyed
ORIGIN := @loader_path
VERSIONFLAGS += -Wl,-install_name,@rpath/$(DYNAMIC_VERSIONED_NAME_SHORT) -Wl,-rpath,$(ORIGIN)/../../build/lib
else
ORIGIN := \$$ORIGIN
endif
# Custom compiler
ifdef CUSTOM_CXX
CXX := $(CUSTOM_CXX)
endif
# Static linking
ifneq (,$(findstring clang++,$(CXX)))
STATIC_LINK_COMMAND := -Wl,-force_load $(STATIC_NAME)
else ifneq (,$(findstring g++,$(CXX)))
STATIC_LINK_COMMAND := -Wl,--whole-archive $(STATIC_NAME) -Wl,--no-whole-archive
else
# The following line must not be indented with a tab, since we are not inside a target
$(error Cannot static link with the $(CXX) compiler)
endif
# Debugging
ifeq ($(DEBUG), 1)
COMMON_FLAGS += -DDEBUG -g -O0
NVCCFLAGS += -G
else
COMMON_FLAGS += -DNDEBUG -O2
endif
# cuDNN acceleration configuration.
ifeq ($(USE_CUDNN), 1)
LIBRARIES += cudnn
COMMON_FLAGS += -DUSE_CUDNN
endif
# NCCL acceleration configuration
ifeq ($(USE_NCCL), 1)
LIBRARIES += nccl
COMMON_FLAGS += -DUSE_NCCL
endif
# configure IO libraries
ifeq ($(USE_OPENCV), 1)
COMMON_FLAGS += -DUSE_OPENCV
endif
ifeq ($(USE_LEVELDB), 1)
COMMON_FLAGS += -DUSE_LEVELDB
endif
ifeq ($(USE_LMDB), 1)
COMMON_FLAGS += -DUSE_LMDB
ifeq ($(ALLOW_LMDB_NOLOCK), 1)
COMMON_FLAGS += -DALLOW_LMDB_NOLOCK
endif
endif
# CPU-only configuration
ifeq ($(CPU_ONLY), 1)
OBJS := $(PROTO_OBJS) $(CXX_OBJS)
TEST_OBJS := $(TEST_CXX_OBJS)
TEST_BINS := $(TEST_CXX_BINS)
ALL_WARNS := $(ALL_CXX_WARNS)
TEST_FILTER := --gtest_filter="-*GPU*"
COMMON_FLAGS += -DCPU_ONLY
endif
# Python layer support
ifeq ($(WITH_PYTHON_LAYER), 1)
COMMON_FLAGS += -DWITH_PYTHON_LAYER
LIBRARIES += $(PYTHON_LIBRARIES)
endif
# BLAS configuration (default = ATLAS)
BLAS ?= atlas
ifeq ($(BLAS), mkl)
# MKL
LIBRARIES += mkl_rt
COMMON_FLAGS += -DUSE_MKL
MKLROOT ?= /opt/intel/mkl
BLAS_INCLUDE ?= $(MKLROOT)/include
BLAS_LIB ?= $(MKLROOT)/lib $(MKLROOT)/lib/intel64
else ifeq ($(BLAS), open)
# OpenBLAS
LIBRARIES += openblas
else
# ATLAS
ifeq ($(LINUX), 1)
ifeq ($(BLAS), atlas)
# Linux simply has cblas and atlas
LIBRARIES += cblas atlas
endif
else ifeq ($(OSX), 1)
# OS X packages atlas as the vecLib framework
LIBRARIES += cblas
# 10.10 has accelerate while 10.9 has veclib
XCODE_CLT_VER := $(shell pkgutil --pkg-info=com.apple.pkg.CLTools_Executables | grep 'version' | sed 's/[^0-9]*\([0-9]\).*/\1/')
XCODE_CLT_GEQ_7 := $(shell [ $(XCODE_CLT_VER) -gt 6 ] && echo 1)
XCODE_CLT_GEQ_6 := $(shell [ $(XCODE_CLT_VER) -gt 5 ] && echo 1)
ifeq ($(XCODE_CLT_GEQ_7), 1)
BLAS_INCLUDE ?= /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/$(shell ls /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/ | sort | tail -1)/System/Library/Frameworks/Accelerate.framework/Versions/A/Frameworks/vecLib.framework/Versions/A/Headers
else ifeq ($(XCODE_CLT_GEQ_6), 1)
BLAS_INCLUDE ?= /System/Library/Frameworks/Accelerate.framework/Versions/Current/Frameworks/vecLib.framework/Headers/
LDFLAGS += -framework Accelerate
else
BLAS_INCLUDE ?= /System/Library/Frameworks/vecLib.framework/Versions/Current/Headers/
LDFLAGS += -framework vecLib
endif
endif
endif
INCLUDE_DIRS += $(BLAS_INCLUDE)
LIBRARY_DIRS += $(BLAS_LIB)
LIBRARY_DIRS += $(LIB_BUILD_DIR)
# Automatic dependency generation (nvcc is handled separately)
CXXFLAGS += -MMD -MP
# Complete build flags.
COMMON_FLAGS += $(foreach includedir,$(INCLUDE_DIRS),-I$(includedir))
CXXFLAGS += -pthread -fPIC $(COMMON_FLAGS) $(WARNINGS)
NVCCFLAGS += -ccbin=$(CXX) -Xcompiler -fPIC $(COMMON_FLAGS)
# mex may invoke an older gcc that is too liberal with -Wuninitalized
MATLAB_CXXFLAGS := $(CXXFLAGS) -Wno-uninitialized
LINKFLAGS += -pthread -fPIC $(COMMON_FLAGS) $(WARNINGS)
USE_PKG_CONFIG ?= 0
ifeq ($(USE_PKG_CONFIG), 1)
PKG_CONFIG := $(shell pkg-config opencv --libs)
else
PKG_CONFIG :=
endif
LDFLAGS += $(foreach librarydir,$(LIBRARY_DIRS),-L$(librarydir)) $(PKG_CONFIG) \
$(foreach library,$(LIBRARIES),-l$(library))
PYTHON_LDFLAGS := $(LDFLAGS) $(foreach library,$(PYTHON_LIBRARIES),-l$(library))
# 'superclean' target recursively* deletes all files ending with an extension
# in $(SUPERCLEAN_EXTS) below. This may be useful if you've built older
# versions of Caffe that do not place all generated files in a location known
# to the 'clean' target.
#
# 'supercleanlist' will list the files to be deleted by make superclean.
#
# * Recursive with the exception that symbolic links are never followed, per the
# default behavior of 'find'.
SUPERCLEAN_EXTS := .so .a .o .bin .testbin .pb.cc .pb.h _pb2.py .cuo
# Set the sub-targets of the 'everything' target.
EVERYTHING_TARGETS := all py$(PROJECT) test warn lint
# Only build matcaffe as part of "everything" if MATLAB_DIR is specified.
ifneq ($(MATLAB_DIR),)
EVERYTHING_TARGETS += mat$(PROJECT)
endif
##############################
# Define build targets
##############################
.PHONY: all lib test clean docs linecount lint lintclean tools examples $(DIST_ALIASES) \
py mat py$(PROJECT) mat$(PROJECT) proto runtest \
superclean supercleanlist supercleanfiles warn everything
all: lib tools examples
lib: $(STATIC_NAME) $(DYNAMIC_NAME)
everything: $(EVERYTHING_TARGETS)
linecount:
cloc --read-lang-def=$(PROJECT).cloc \
src/$(PROJECT) include/$(PROJECT) tools examples \
python matlab
lint: $(EMPTY_LINT_REPORT)
lintclean:
@ $(RM) -r $(LINT_OUTPUT_DIR) $(EMPTY_LINT_REPORT) $(NONEMPTY_LINT_REPORT)
docs: $(DOXYGEN_OUTPUT_DIR)
@ cd ./docs ; ln -sfn ../$(DOXYGEN_OUTPUT_DIR)/html doxygen
$(DOXYGEN_OUTPUT_DIR): $(DOXYGEN_CONFIG_FILE) $(DOXYGEN_SOURCES)
$(DOXYGEN_COMMAND) $(DOXYGEN_CONFIG_FILE)
$(EMPTY_LINT_REPORT): $(LINT_OUTPUTS) | $(BUILD_DIR)
@ cat $(LINT_OUTPUTS) > $@
@ if [ -s "$@" ]; then \
cat $@; \
mv $@ $(NONEMPTY_LINT_REPORT); \
echo "Found one or more lint errors."; \
exit 1; \
fi; \
$(RM) $(NONEMPTY_LINT_REPORT); \
echo "No lint errors!";
$(LINT_OUTPUTS): $(LINT_OUTPUT_DIR)/%.lint.txt : % $(LINT_SCRIPT) | $(LINT_OUTPUT_DIR)
@ mkdir -p $(dir $@)
@ python $(LINT_SCRIPT) $< 2>&1 \
| grep -v "^Done processing " \
| grep -v "^Total errors found: 0" \
> $@ \
|| true
test: $(TEST_ALL_BIN) $(TEST_ALL_DYNLINK_BIN) $(TEST_BINS)
tools: $(TOOL_BINS) $(TOOL_BIN_LINKS)
examples: $(EXAMPLE_BINS)
py$(PROJECT): py
py: $(PY$(PROJECT)_SO) $(PROTO_GEN_PY)
$(PY$(PROJECT)_SO): $(PY$(PROJECT)_SRC) $(PY$(PROJECT)_HXX) | $(DYNAMIC_NAME)
@ echo CXX/LD -o $@ $<
$(Q)$(CXX) -shared -o $@ $(PY$(PROJECT)_SRC) \
-o $@ $(LINKFLAGS) -l$(LIBRARY_NAME) $(PYTHON_LDFLAGS) \
-Wl,-rpath,$(ORIGIN)/../../build/lib
mat$(PROJECT): mat
mat: $(MAT$(PROJECT)_SO)
$(MAT$(PROJECT)_SO): $(MAT$(PROJECT)_SRC) $(STATIC_NAME)
@ if [ -z "$(MATLAB_DIR)" ]; then \
echo "MATLAB_DIR must be specified in $(CONFIG_FILE)" \
"to build mat$(PROJECT)."; \
exit 1; \
fi
@ echo MEX $<
$(Q)$(MATLAB_DIR)/bin/mex $(MAT$(PROJECT)_SRC) \
CXX="$(CXX)" \
CXXFLAGS="\$$CXXFLAGS $(MATLAB_CXXFLAGS)" \
CXXLIBS="\$$CXXLIBS $(STATIC_LINK_COMMAND) $(LDFLAGS)" -output $@
@ if [ -f "$(PROJECT)_.d" ]; then \
mv -f $(PROJECT)_.d $(BUILD_DIR)/${MAT$(PROJECT)_SO:.$(MAT_SO_EXT)=.d}; \
fi
runtest: $(TEST_ALL_BIN)
$(TOOL_BUILD_DIR)/caffe
$(TEST_ALL_BIN) $(TEST_GPUID) --gtest_shuffle $(TEST_FILTER)
pytest: py
cd python; python -m unittest discover -s caffe/test
mattest: mat
cd matlab; $(MATLAB_DIR)/bin/matlab -nodisplay -r 'caffe.run_tests(), exit()'
warn: $(EMPTY_WARN_REPORT)
$(EMPTY_WARN_REPORT): $(ALL_WARNS) | $(BUILD_DIR)
@ cat $(ALL_WARNS) > $@
@ if [ -s "$@" ]; then \
cat $@; \
mv $@ $(NONEMPTY_WARN_REPORT); \
echo "Compiler produced one or more warnings."; \
exit 1; \
fi; \
$(RM) $(NONEMPTY_WARN_REPORT); \
echo "No compiler warnings!";
$(ALL_WARNS): %.o.$(WARNS_EXT) : %.o
$(BUILD_DIR_LINK): $(BUILD_DIR)/.linked
# Create a target ".linked" in this BUILD_DIR to tell Make that the "build" link
# is currently correct, then delete the one in the OTHER_BUILD_DIR in case it
# exists and $(DEBUG) is toggled later.
$(BUILD_DIR)/.linked:
@ mkdir -p $(BUILD_DIR)
@ $(RM) $(OTHER_BUILD_DIR)/.linked
@ $(RM) -r $(BUILD_DIR_LINK)
@ ln -s $(BUILD_DIR) $(BUILD_DIR_LINK)
@ touch $@
$(ALL_BUILD_DIRS): | $(BUILD_DIR_LINK)
@ mkdir -p $@
$(DYNAMIC_NAME): $(OBJS) | $(LIB_BUILD_DIR)
@ echo LD -o $@
$(Q)$(CXX) -shared -o $@ $(OBJS) $(VERSIONFLAGS) $(LINKFLAGS) $(LDFLAGS)
@ cd $(BUILD_DIR)/lib; rm -f $(DYNAMIC_NAME_SHORT); ln -s $(DYNAMIC_VERSIONED_NAME_SHORT) $(DYNAMIC_NAME_SHORT)
$(STATIC_NAME): $(OBJS) | $(LIB_BUILD_DIR)
@ echo AR -o $@
$(Q)ar rcs $@ $(OBJS)
$(BUILD_DIR)/%.o: %.cpp | $(ALL_BUILD_DIRS)
@ echo CXX $<
$(Q)$(CXX) $< $(CXXFLAGS) -c -o $@ 2> $@.$(WARNS_EXT) \
|| (cat $@.$(WARNS_EXT); exit 1)
@ cat $@.$(WARNS_EXT)
$(PROTO_BUILD_DIR)/%.pb.o: $(PROTO_BUILD_DIR)/%.pb.cc $(PROTO_GEN_HEADER) \
| $(PROTO_BUILD_DIR)
@ echo CXX $<
$(Q)$(CXX) $< $(CXXFLAGS) -c -o $@ 2> $@.$(WARNS_EXT) \
|| (cat $@.$(WARNS_EXT); exit 1)
@ cat $@.$(WARNS_EXT)
$(BUILD_DIR)/cuda/%.o: %.cu | $(ALL_BUILD_DIRS)
@ echo NVCC $<
$(Q)$(CUDA_DIR)/bin/nvcc $(NVCCFLAGS) $(CUDA_ARCH) -M $< -o ${@:.o=.d} \
-odir $(@D)
$(Q)$(CUDA_DIR)/bin/nvcc $(NVCCFLAGS) $(CUDA_ARCH) -c $< -o $@ 2> $@.$(WARNS_EXT) \
|| (cat $@.$(WARNS_EXT); exit 1)
@ cat $@.$(WARNS_EXT)
$(TEST_ALL_BIN): $(TEST_MAIN_SRC) $(TEST_OBJS) $(GTEST_OBJ) \
| $(DYNAMIC_NAME) $(TEST_BIN_DIR)
@ echo CXX/LD -o $@ $<
$(Q)$(CXX) $(TEST_MAIN_SRC) $(TEST_OBJS) $(GTEST_OBJ) \
-o $@ $(LINKFLAGS) $(LDFLAGS) -l$(LIBRARY_NAME) -Wl,-rpath,$(ORIGIN)/../lib
$(TEST_CU_BINS): $(TEST_BIN_DIR)/%.testbin: $(TEST_CU_BUILD_DIR)/%.o \
$(GTEST_OBJ) | $(DYNAMIC_NAME) $(TEST_BIN_DIR)
@ echo LD $<
$(Q)$(CXX) $(TEST_MAIN_SRC) $< $(GTEST_OBJ) \
-o $@ $(LINKFLAGS) $(LDFLAGS) -l$(LIBRARY_NAME) -Wl,-rpath,$(ORIGIN)/../lib
$(TEST_CXX_BINS): $(TEST_BIN_DIR)/%.testbin: $(TEST_CXX_BUILD_DIR)/%.o \
$(GTEST_OBJ) | $(DYNAMIC_NAME) $(TEST_BIN_DIR)
@ echo LD $<
$(Q)$(CXX) $(TEST_MAIN_SRC) $< $(GTEST_OBJ) \
-o $@ $(LINKFLAGS) $(LDFLAGS) -l$(LIBRARY_NAME) -Wl,-rpath,$(ORIGIN)/../lib
# Target for extension-less symlinks to tool binaries with extension '*.bin'.
$(TOOL_BUILD_DIR)/%: $(TOOL_BUILD_DIR)/%.bin | $(TOOL_BUILD_DIR)
@ $(RM) $@
@ ln -s $(notdir $<) $@
$(TOOL_BINS): %.bin : %.o | $(DYNAMIC_NAME)
@ echo CXX/LD -o $@
$(Q)$(CXX) $< -o $@ $(LINKFLAGS) -l$(LIBRARY_NAME) $(LDFLAGS) \
-Wl,-rpath,$(ORIGIN)/../lib
$(EXAMPLE_BINS): %.bin : %.o | $(DYNAMIC_NAME)
@ echo CXX/LD -o $@
$(Q)$(CXX) $< -o $@ $(LINKFLAGS) -l$(LIBRARY_NAME) $(LDFLAGS) \
-Wl,-rpath,$(ORIGIN)/../../lib
proto: $(PROTO_GEN_CC) $(PROTO_GEN_HEADER)
$(PROTO_BUILD_DIR)/%.pb.cc $(PROTO_BUILD_DIR)/%.pb.h : \
$(PROTO_SRC_DIR)/%.proto | $(PROTO_BUILD_DIR)
@ echo PROTOC $<
$(Q)protoc --proto_path=$(PROTO_SRC_DIR) --cpp_out=$(PROTO_BUILD_DIR) $<
$(PY_PROTO_BUILD_DIR)/%_pb2.py : $(PROTO_SRC_DIR)/%.proto \
$(PY_PROTO_INIT) | $(PY_PROTO_BUILD_DIR)
@ echo PROTOC \(python\) $<
$(Q)protoc --proto_path=$(PROTO_SRC_DIR) --python_out=$(PY_PROTO_BUILD_DIR) $<
$(PY_PROTO_INIT): | $(PY_PROTO_BUILD_DIR)
touch $(PY_PROTO_INIT)
clean:
@- $(RM) -rf $(ALL_BUILD_DIRS)
@- $(RM) -rf $(OTHER_BUILD_DIR)
@- $(RM) -rf $(BUILD_DIR_LINK)
@- $(RM) -rf $(DISTRIBUTE_DIR)
@- $(RM) $(PY$(PROJECT)_SO)
@- $(RM) $(MAT$(PROJECT)_SO)
supercleanfiles:
$(eval SUPERCLEAN_FILES := $(strip \
$(foreach ext,$(SUPERCLEAN_EXTS), $(shell find . -name '*$(ext)' \
-not -path './data/*'))))
supercleanlist: supercleanfiles
@ \
if [ -z "$(SUPERCLEAN_FILES)" ]; then \
echo "No generated files found."; \
else \
echo $(SUPERCLEAN_FILES) | tr ' ' '\n'; \
fi
superclean: clean supercleanfiles
@ \
if [ -z "$(SUPERCLEAN_FILES)" ]; then \
echo "No generated files found."; \
else \
echo "Deleting the following generated files:"; \
echo $(SUPERCLEAN_FILES) | tr ' ' '\n'; \
$(RM) $(SUPERCLEAN_FILES); \
fi
$(DIST_ALIASES): $(DISTRIBUTE_DIR)
$(DISTRIBUTE_DIR): all py | $(DISTRIBUTE_SUBDIRS)
# add proto
cp -r src/caffe/proto $(DISTRIBUTE_DIR)/
# add include
cp -r include $(DISTRIBUTE_DIR)/
mkdir -p $(DISTRIBUTE_DIR)/include/caffe/proto
cp $(PROTO_GEN_HEADER_SRCS) $(DISTRIBUTE_DIR)/include/caffe/proto
# add tool and example binaries
cp $(TOOL_BINS) $(DISTRIBUTE_DIR)/bin
cp $(EXAMPLE_BINS) $(DISTRIBUTE_DIR)/bin
# add libraries
cp $(STATIC_NAME) $(DISTRIBUTE_DIR)/lib
install -m 644 $(DYNAMIC_NAME) $(DISTRIBUTE_DIR)/lib
cd $(DISTRIBUTE_DIR)/lib; rm -f $(DYNAMIC_NAME_SHORT); ln -s $(DYNAMIC_VERSIONED_NAME_SHORT) $(DYNAMIC_NAME_SHORT)
# add python - it's not the standard way, indeed...
cp -r python $(DISTRIBUTE_DIR)/python
-include $(DEPS)
================================================
FILE: caffe/Makefile.config.example
================================================
## Refer to http://caffe.berkeleyvision.org/installation.html
# Contributions simplifying and improving our build system are welcome!
# cuDNN acceleration switch (uncomment to build with cuDNN).
# USE_CUDNN := 1
# CPU-only switch (uncomment to build without GPU support).
# CPU_ONLY := 1
# uncomment to disable IO dependencies and corresponding data layers
# USE_OPENCV := 0
# USE_LEVELDB := 0
# USE_LMDB := 0
# uncomment to allow MDB_NOLOCK when reading LMDB files (only if necessary)
# You should not set this flag if you will be reading LMDBs with any
# possibility of simultaneous read and write
# ALLOW_LMDB_NOLOCK := 1
# Uncomment if you're using OpenCV 3
# OPENCV_VERSION := 3
# To customize your choice of compiler, uncomment and set the following.
# N.B. the default for Linux is g++ and the default for OSX is clang++
# CUSTOM_CXX := g++
# CUDA directory contains bin/ and lib/ directories that we need.
CUDA_DIR := /usr/local/cuda
# On Ubuntu 14.04, if cuda tools are installed via
# "sudo apt-get install nvidia-cuda-toolkit" then use this instead:
# CUDA_DIR := /usr
# CUDA architecture setting: going with all of them.
# For CUDA < 6.0, comment the *_50 through *_61 lines for compatibility.
# For CUDA < 8.0, comment the *_60 and *_61 lines for compatibility.
CUDA_ARCH := -gencode arch=compute_20,code=sm_20 \
-gencode arch=compute_20,code=sm_21 \
-gencode arch=compute_30,code=sm_30 \
-gencode arch=compute_35,code=sm_35 \
-gencode arch=compute_50,code=sm_50 \
-gencode arch=compute_52,code=sm_52 \
-gencode arch=compute_60,code=sm_60 \
-gencode arch=compute_61,code=sm_61 \
-gencode arch=compute_61,code=compute_61
# BLAS choice:
# atlas for ATLAS (default)
# mkl for MKL
# open for OpenBlas
BLAS := atlas
# Custom (MKL/ATLAS/OpenBLAS) include and lib directories.
# Leave commented to accept the defaults for your choice of BLAS
# (which should work)!
# BLAS_INCLUDE := /path/to/your/blas
# BLAS_LIB := /path/to/your/blas
# Homebrew puts openblas in a directory that is not on the standard search path
# BLAS_INCLUDE := $(shell brew --prefix openblas)/include
# BLAS_LIB := $(shell brew --prefix openblas)/lib
# This is required only if you will compile the matlab interface.
# MATLAB directory should contain the mex binary in /bin.
# MATLAB_DIR := /usr/local
# MATLAB_DIR := /Applications/MATLAB_R2012b.app
# NOTE: this is required only if you will compile the python interface.
# We need to be able to find Python.h and numpy/arrayobject.h.
PYTHON_INCLUDE := /usr/include/python2.7 \
/usr/lib/python2.7/dist-packages/numpy/core/include
# Anaconda Python distribution is quite popular. Include path:
# Verify anaconda location, sometimes it's in root.
# ANACONDA_HOME := $(HOME)/anaconda
# PYTHON_INCLUDE := $(ANACONDA_HOME)/include \
# $(ANACONDA_HOME)/include/python2.7 \
# $(ANACONDA_HOME)/lib/python2.7/site-packages/numpy/core/include
# Uncomment to use Python 3 (default is Python 2)
# PYTHON_LIBRARIES := boost_python3 python3.5m
# PYTHON_INCLUDE := /usr/include/python3.5m \
# /usr/lib/python3.5/dist-packages/numpy/core/include
# We need to be able to find libpythonX.X.so or .dylib.
PYTHON_LIB := /usr/lib
# PYTHON_LIB := $(ANACONDA_HOME)/lib
# Homebrew installs numpy in a non standard path (keg only)
# PYTHON_INCLUDE += $(dir $(shell python -c 'import numpy.core; print(numpy.core.__file__)'))/include
# PYTHON_LIB += $(shell brew --prefix numpy)/lib
# Uncomment to support layers written in Python (will link against Python libs)
# WITH_PYTHON_LAYER := 1
# Whatever else you find you need goes here.
INCLUDE_DIRS := $(PYTHON_INCLUDE) /usr/local/include
LIBRARY_DIRS := $(PYTHON_LIB) /usr/local/lib /usr/lib
# If Homebrew is installed at a non standard location (for example your home directory) and you use it for general dependencies
# INCLUDE_DIRS += $(shell brew --prefix)/include
# LIBRARY_DIRS += $(shell brew --prefix)/lib
# NCCL acceleration switch (uncomment to build with NCCL)
# https://github.com/NVIDIA/nccl (last tested version: v1.2.3-1+cuda8.0)
# USE_NCCL := 1
# Uncomment to use `pkg-config` to specify OpenCV library paths.
# (Usually not necessary -- OpenCV libraries are normally installed in one of the above $LIBRARY_DIRS.)
# USE_PKG_CONFIG := 1
# N.B. both build and distribute dirs are cleared on `make clean`
BUILD_DIR := build
DISTRIBUTE_DIR := distribute
# Uncomment for debugging. Does not work on OSX due to https://github.com/BVLC/caffe/issues/171
# DEBUG := 1
# The ID of the GPU that 'make runtest' will use to run unit tests.
TEST_GPUID := 0
# enable pretty build (comment to see full commands)
Q ?= @
================================================
FILE: caffe/README.md
================================================
# caffe
================================================
FILE: caffe/caffe.cloc
================================================
Bourne Shell
filter remove_matches ^\s*#
filter remove_inline #.*$
extension sh
script_exe sh
C
filter remove_matches ^\s*//
filter call_regexp_common C
filter remove_inline //.*$
extension c
extension ec
extension pgc
C++
filter remove_matches ^\s*//
filter remove_inline //.*$
filter call_regexp_common C
extension C
extension cc
extension cpp
extension cxx
extension pcc
C/C++ Header
filter remove_matches ^\s*//
filter call_regexp_common C
filter remove_inline //.*$
extension H
extension h
extension hh
extension hpp
CUDA
filter remove_matches ^\s*//
filter remove_inline //.*$
filter call_regexp_common C
extension cu
Python
filter remove_matches ^\s*#
filter docstring_to_C
filter call_regexp_common C
filter remove_inline #.*$
extension py
make
filter remove_matches ^\s*#
filter remove_inline #.*$
extension Gnumakefile
extension Makefile
extension am
extension gnumakefile
extension makefile
filename Gnumakefile
filename Makefile
filename gnumakefile
filename makefile
script_exe make
================================================
FILE: caffe/cmake/ConfigGen.cmake
================================================
################################################################################################
# Helper function to get all list items that begin with given prefix
# Usage:
# caffe_get_items_with_prefix(<prefix> <list_variable> <output_variable>)
function(caffe_get_items_with_prefix prefix list_variable output_variable)
set(__result "")
foreach(__e ${${list_variable}})
if(__e MATCHES "^${prefix}.*")
list(APPEND __result ${__e})
endif()
endforeach()
set(${output_variable} ${__result} PARENT_SCOPE)
endfunction()
################################################################################################
# Function for generation Caffe build- and install- tree export config files
# Usage:
# caffe_generate_export_configs()
function(caffe_generate_export_configs)
set(install_cmake_suffix "share/Caffe")
if(NOT HAVE_CUDA)
set(HAVE_CUDA FALSE)
endif()
if(NOT HAVE_CUDNN)
set(HAVE_CUDNN FALSE)
endif()
# ---[ Configure build-tree CaffeConfig.cmake file ]---
configure_file("cmake/Templates/CaffeConfig.cmake.in" "${PROJECT_BINARY_DIR}/CaffeConfig.cmake" @ONLY)
# Add targets to the build-tree export set
export(TARGETS caffe proto FILE "${PROJECT_BINARY_DIR}/CaffeTargets.cmake")
export(PACKAGE Caffe)
# ---[ Configure install-tree CaffeConfig.cmake file ]---
configure_file("cmake/Templates/CaffeConfig.cmake.in" "${PROJECT_BINARY_DIR}/cmake/CaffeConfig.cmake" @ONLY)
# Install the CaffeConfig.cmake and export set to use with install-tree
install(FILES "${PROJECT_BINARY_DIR}/cmake/CaffeConfig.cmake" DESTINATION ${install_cmake_suffix})
install(EXPORT CaffeTargets DESTINATION ${install_cmake_suffix})
# ---[ Configure and install version file ]---
# TODO: Lines below are commented because Caffe doesn't declare its version in headers.
# When the declarations are added, modify `caffe_extract_caffe_version()` macro and uncomment
# configure_file(cmake/Templates/CaffeConfigVersion.cmake.in "${PROJECT_BINARY_DIR}/CaffeConfigVersion.cmake" @ONLY)
# install(FILES "${PROJECT_BINARY_DIR}/CaffeConfigVersion.cmake" DESTINATION ${install_cmake_suffix})
endfunction()
================================================
FILE: caffe/cmake/Cuda.cmake
================================================
if(CPU_ONLY)
return()
endif()
# Known NVIDIA GPU achitectures Caffe can be compiled for.
# This list will be used for CUDA_ARCH_NAME = All option
set(Caffe_known_gpu_archs "20 21(20) 30 35 50 60 61")
################################################################################################
# A function for automatic detection of GPUs installed (if autodetection is enabled)
# Usage:
# caffe_detect_installed_gpus(out_variable)
function(caffe_detect_installed_gpus out_variable)
if(NOT CUDA_gpu_detect_output)
set(__cufile ${PROJECT_BINARY_DIR}/detect_cuda_archs.cu)
file(WRITE ${__cufile} ""
"#include <cstdio>\n"
"int main()\n"
"{\n"
" int count = 0;\n"
" if (cudaSuccess != cudaGetDeviceCount(&count)) return -1;\n"
" if (count == 0) return -1;\n"
" for (int device = 0; device < count; ++device)\n"
" {\n"
" cudaDeviceProp prop;\n"
" if (cudaSuccess == cudaGetDeviceProperties(&prop, device))\n"
" std::printf(\"%d.%d \", prop.major, prop.minor);\n"
" }\n"
" return 0;\n"
"}\n")
execute_process(COMMAND "${CUDA_NVCC_EXECUTABLE}" "--run" "${__cufile}"
WORKING_DIRECTORY "${PROJECT_BINARY_DIR}/CMakeFiles/"
RESULT_VARIABLE __nvcc_res OUTPUT_VARIABLE __nvcc_out
ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)
if(__nvcc_res EQUAL 0)
string(REPLACE "2.1" "2.1(2.0)" __nvcc_out "${__nvcc_out}")
set(CUDA_gpu_detect_output ${__nvcc_out} CACHE INTERNAL "Returned GPU architetures from caffe_detect_gpus tool" FORCE)
endif()
endif()
if(NOT CUDA_gpu_detect_output)
message(STATUS "Automatic GPU detection failed. Building for all known architectures.")
set(${out_variable} ${Caffe_known_gpu_archs} PARENT_SCOPE)
else()
set(${out_variable} ${CUDA_gpu_detect_output} PARENT_SCOPE)
endif()
endfunction()
################################################################################################
# Function for selecting GPU arch flags for nvcc based on CUDA_ARCH_NAME
# Usage:
# caffe_select_nvcc_arch_flags(out_variable)
function(caffe_select_nvcc_arch_flags out_variable)
# List of arch names
set(__archs_names "Fermi" "Kepler" "Maxwell" "Pascal" "All" "Manual")
set(__archs_name_default "All")
if(NOT CMAKE_CROSSCOMPILING)
list(APPEND __archs_names "Auto")
set(__archs_name_default "Auto")
endif()
# set CUDA_ARCH_NAME strings (so it will be seen as dropbox in CMake-Gui)
set(CUDA_ARCH_NAME ${__archs_name_default} CACHE STRING "Select target NVIDIA GPU achitecture.")
set_property( CACHE CUDA_ARCH_NAME PROPERTY STRINGS "" ${__archs_names} )
mark_as_advanced(CUDA_ARCH_NAME)
# verify CUDA_ARCH_NAME value
if(NOT ";${__archs_names};" MATCHES ";${CUDA_ARCH_NAME};")
string(REPLACE ";" ", " __archs_names "${__archs_names}")
message(FATAL_ERROR "Only ${__archs_names} architeture names are supported.")
endif()
if(${CUDA_ARCH_NAME} STREQUAL "Manual")
set(CUDA_ARCH_BIN ${Caffe_known_gpu_archs} CACHE STRING "Specify 'real' GPU architectures to build binaries for, BIN(PTX) format is supported")
set(CUDA_ARCH_PTX "50" CACHE STRING "Specify 'virtual' PTX architectures to build PTX intermediate code for")
mark_as_advanced(CUDA_ARCH_BIN CUDA_ARCH_PTX)
else()
unset(CUDA_ARCH_BIN CACHE)
unset(CUDA_ARCH_PTX CACHE)
endif()
if(${CUDA_ARCH_NAME} STREQUAL "Fermi")
set(__cuda_arch_bin "20 21(20)")
elseif(${CUDA_ARCH_NAME} STREQUAL "Kepler")
set(__cuda_arch_bin "30 35")
elseif(${CUDA_ARCH_NAME} STREQUAL "Maxwell")
set(__cuda_arch_bin "50")
elseif(${CUDA_ARCH_NAME} STREQUAL "Pascal")
set(__cuda_arch_bin "60 61")
elseif(${CUDA_ARCH_NAME} STREQUAL "All")
set(__cuda_arch_bin ${Caffe_known_gpu_archs})
elseif(${CUDA_ARCH_NAME} STREQUAL "Auto")
caffe_detect_installed_gpus(__cuda_arch_bin)
else() # (${CUDA_ARCH_NAME} STREQUAL "Manual")
set(__cuda_arch_bin ${CUDA_ARCH_BIN})
endif()
# remove dots and convert to lists
string(REGEX REPLACE "\\." "" __cuda_arch_bin "${__cuda_arch_bin}")
string(REGEX REPLACE "\\." "" __cuda_arch_ptx "${CUDA_ARCH_PTX}")
string(REGEX MATCHALL "[0-9()]+" __cuda_arch_bin "${__cuda_arch_bin}")
string(REGEX MATCHALL "[0-9]+" __cuda_arch_ptx "${__cuda_arch_ptx}")
caffe_list_unique(__cuda_arch_bin __cuda_arch_ptx)
set(__nvcc_flags "")
set(__nvcc_archs_readable "")
# Tell NVCC to add binaries for the specified GPUs
foreach(__arch ${__cuda_arch_bin})
if(__arch MATCHES "([0-9]+)\\(([0-9]+)\\)")
# User explicitly specified PTX for the concrete BIN
list(APPEND __nvcc_flags -gencode arch=compute_${CMAKE_MATCH_2},code=sm_${CMAKE_MATCH_1})
list(APPEND __nvcc_archs_readable sm_${CMAKE_MATCH_1})
else()
# User didn't explicitly specify PTX for the concrete BIN, we assume PTX=BIN
list(APPEND __nvcc_flags -gencode arch=compute_${__arch},code=sm_${__arch})
list(APPEND __nvcc_archs_readable sm_${__arch})
endif()
endforeach()
# Tell NVCC to add PTX intermediate code for the specified architectures
foreach(__arch ${__cuda_arch_ptx})
list(APPEND __nvcc_flags -gencode arch=compute_${__arch},code=compute_${__arch})
list(APPEND __nvcc_archs_readable compute_${__arch})
endforeach()
string(REPLACE ";" " " __nvcc_archs_readable "${__nvcc_archs_readable}")
set(${out_variable} ${__nvcc_flags} PARENT_SCOPE)
set(${out_variable}_readable ${__nvcc_archs_readable} PARENT_SCOPE)
endfunction()
################################################################################################
# Short command for cuda compilation
# Usage:
# caffe_cuda_compile(<objlist_variable> <cuda_files>)
macro(caffe_cuda_compile objlist_variable)
foreach(var CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_RELEASE CMAKE_CXX_FLAGS_DEBUG)
set(${var}_backup_in_cuda_compile_ "${${var}}")
# we remove /EHa as it generates warnings under windows
string(REPLACE "/EHa" "" ${var} "${${var}}")
endforeach()
if(UNIX OR APPLE)
list(APPEND CUDA_NVCC_FLAGS -Xcompiler -fPIC)
endif()
if(APPLE)
list(APPEND CUDA_NVCC_FLAGS -Xcompiler -Wno-unused-function)
endif()
cuda_compile(cuda_objcs ${ARGN})
foreach(var CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_RELEASE CMAKE_CXX_FLAGS_DEBUG)
set(${var} "${${var}_backup_in_cuda_compile_}")
unset(${var}_backup_in_cuda_compile_)
endforeach()
set(${objlist_variable} ${cuda_objcs})
endmacro()
################################################################################################
# Short command for cuDNN detection. Believe it soon will be a part of CUDA toolkit distribution.
# That's why not FindcuDNN.cmake file, but just the macro
# Usage:
# detect_cuDNN()
function(detect_cuDNN)
set(CUDNN_ROOT "" CACHE PATH "CUDNN root folder")
find_path(CUDNN_INCLUDE cudnn.h
PATHS ${CUDNN_ROOT} $ENV{CUDNN_ROOT} ${CUDA_TOOLKIT_INCLUDE}
DOC "Path to cuDNN include directory." )
# dynamic libs have different suffix in mac and linux
if(APPLE)
set(CUDNN_LIB_NAME "libcudnn.dylib")
else()
set(CUDNN_LIB_NAME "libcudnn.so")
endif()
get_filename_component(__libpath_hist ${CUDA_CUDART_LIBRARY} PATH)
find_library(CUDNN_LIBRARY NAMES ${CUDNN_LIB_NAME}
PATHS ${CUDNN_ROOT} $ENV{CUDNN_ROOT} ${CUDNN_INCLUDE} ${__libpath_hist} ${__libpath_hist}/../lib
DOC "Path to cuDNN library.")
if(CUDNN_INCLUDE AND CUDNN_LIBRARY)
set(HAVE_CUDNN TRUE PARENT_SCOPE)
set(CUDNN_FOUND TRUE PARENT_SCOPE)
file(READ ${CUDNN_INCLUDE}/cudnn.h CUDNN_VERSION_FILE_CONTENTS)
# cuDNN v3 and beyond
string(REGEX MATCH "define CUDNN_MAJOR * +([0-9]+)"
CUDNN_VERSION_MAJOR "${CUDNN_VERSION_FILE_CONTENTS}")
string(REGEX REPLACE "define CUDNN_MAJOR * +([0-9]+)" "\\1"
CUDNN_VERSION_MAJOR "${CUDNN_VERSION_MAJOR}")
string(REGEX MATCH "define CUDNN_MINOR * +([0-9]+)"
CUDNN_VERSION_MINOR "${CUDNN_VERSION_FILE_CONTENTS}")
string(REGEX REPLACE "define CUDNN_MINOR * +([0-9]+)" "\\1"
CUDNN_VERSION_MINOR "${CUDNN_VERSION_MINOR}")
string(REGEX MATCH "define CUDNN_PATCHLEVEL * +([0-9]+)"
CUDNN_VERSION_PATCH "${CUDNN_VERSION_FILE_CONTENTS}")
string(REGEX REPLACE "define CUDNN_PATCHLEVEL * +([0-9]+)" "\\1"
CUDNN_VERSION_PATCH "${CUDNN_VERSION_PATCH}")
if(NOT CUDNN_VERSION_MAJOR)
set(CUDNN_VERSION "???")
else()
set(CUDNN_VERSION "${CUDNN_VERSION_MAJOR}.${CUDNN_VERSION_MINOR}.${CUDNN_VERSION_PATCH}")
endif()
message(STATUS "Found cuDNN: ver. ${CUDNN_VERSION} found (include: ${CUDNN_INCLUDE}, library: ${CUDNN_LIBRARY})")
string(COMPARE LESS "${CUDNN_VERSION_MAJOR}" 3 cuDNNVersionIncompatible)
if(cuDNNVersionIncompatible)
message(FATAL_ERROR "cuDNN version >3 is required.")
endif()
set(CUDNN_VERSION "${CUDNN_VERSION}" PARENT_SCOPE)
mark_as_advanced(CUDNN_INCLUDE CUDNN_LIBRARY CUDNN_ROOT)
endif()
endfunction()
################################################################################################
### Non macro section
################################################################################################
find_package(CUDA 5.5 QUIET)
find_cuda_helper_libs(curand) # cmake 2.8.7 compartibility which doesn't search for curand
if(NOT CUDA_FOUND)
return()
endif()
set(HAVE_CUDA TRUE)
message(STATUS "CUDA detected: " ${CUDA_VERSION})
list(APPEND Caffe_INCLUDE_DIRS PUBLIC ${CUDA_INCLUDE_DIRS})
list(APPEND Caffe_LINKER_LIBS PUBLIC ${CUDA_CUDART_LIBRARY}
${CUDA_curand_LIBRARY} ${CUDA_CUBLAS_LIBRARIES})
# cudnn detection
if(USE_CUDNN)
detect_cuDNN()
if(HAVE_CUDNN)
list(APPEND Caffe_DEFINITIONS PUBLIC -DUSE_CUDNN)
list(APPEND Caffe_INCLUDE_DIRS PUBLIC ${CUDNN_INCLUDE})
list(APPEND Caffe_LINKER_LIBS PUBLIC ${CUDNN_LIBRARY})
endif()
endif()
# setting nvcc arch flags
caffe_select_nvcc_arch_flags(NVCC_FLAGS_EXTRA)
list(APPEND CUDA_NVCC_FLAGS ${NVCC_FLAGS_EXTRA})
message(STATUS "Added CUDA NVCC flags for: ${NVCC_FLAGS_EXTRA_readable}")
# Boost 1.55 workaround, see https://svn.boost.org/trac/boost/ticket/9392 or
# https://github.com/ComputationalRadiationPhysics/picongpu/blob/master/src/picongpu/CMakeLists.txt
if(Boost_VERSION EQUAL 105500)
message(STATUS "Cuda + Boost 1.55: Applying noinline work around")
# avoid warning for CMake >= 2.8.12
set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} \"-DBOOST_NOINLINE=__attribute__((noinline))\" ")
endif()
# disable some nvcc diagnostic that apears in boost, glog, glags, opencv, etc.
foreach(diag cc_clobber_ignored integer_sign_change useless_using_declaration set_but_not_used)
list(APPEND CUDA_NVCC_FLAGS -Xcudafe --diag_suppress=${diag})
endforeach()
# setting default testing device
if(NOT CUDA_TEST_DEVICE)
set(CUDA_TEST_DEVICE -1)
endif()
mark_as_advanced(CUDA_BUILD_CUBIN CUDA_BUILD_EMULATION CUDA_VERBOSE_BUILD)
mark_as_advanced(CUDA_SDK_ROOT_DIR CUDA_SEPARABLE_COMPILATION)
# Handle clang/libc++ issue
if(APPLE)
caffe_detect_darwin_version(OSX_VERSION)
# OSX 10.9 and higher uses clang/libc++ by default which is incompatible with old CUDA toolkits
if(OSX_VERSION VERSION_GREATER 10.8)
# enabled by default if and only if CUDA version is less than 7.0
caffe_option(USE_libstdcpp "Use libstdc++ instead of libc++" (CUDA_VERSION VERSION_LESS 7.0))
endif()
endif()
================================================
FILE: caffe/cmake/Dependencies.cmake
================================================
# These lists are later turned into target properties on main caffe library target
set(Caffe_LINKER_LIBS "")
set(Caffe_INCLUDE_DIRS "")
set(Caffe_DEFINITIONS "")
set(Caffe_COMPILE_OPTIONS "")
# ---[ Boost
find_package(Boost 1.55 REQUIRED COMPONENTS system thread filesystem)
list(APPEND Caffe_INCLUDE_DIRS PUBLIC ${Boost_INCLUDE_DIRS})
list(APPEND Caffe_LINKER_LIBS PUBLIC ${Boost_LIBRARIES})
# ---[ Threads
find_package(Threads REQUIRED)
list(APPEND Caffe_LINKER_LIBS PRIVATE ${CMAKE_THREAD_LIBS_INIT})
# ---[ OpenMP
if(USE_OPENMP)
# Ideally, this should be provided by the BLAS library IMPORTED target. However,
# nobody does this, so we need to link to OpenMP explicitly and have the maintainer
# to flick the switch manually as needed.
#
# Moreover, OpenMP package does not provide an IMPORTED target as well, and the
# suggested way of linking to OpenMP is to append to CMAKE_{C,CXX}_FLAGS.
# However, this naïve method will force any user of Caffe to add the same kludge
# into their buildsystem again, so we put these options into per-target PUBLIC
# compile options and link flags, so that they will be exported properly.
find_package(OpenMP REQUIRED)
list(APPEND Caffe_LINKER_LIBS PRIVATE ${OpenMP_CXX_FLAGS})
list(APPEND Caffe_COMPILE_OPTIONS PRIVATE ${OpenMP_CXX_FLAGS})
endif()
# ---[ Google-glog
include("cmake/External/glog.cmake")
list(APPEND Caffe_INCLUDE_DIRS PUBLIC ${GLOG_INCLUDE_DIRS})
list(APPEND Caffe_LINKER_LIBS PUBLIC ${GLOG_LIBRARIES})
# ---[ Google-gflags
include("cmake/External/gflags.cmake")
list(APPEND Caffe_INCLUDE_DIRS PUBLIC ${GFLAGS_INCLUDE_DIRS})
list(APPEND Caffe_LINKER_LIBS PUBLIC ${GFLAGS_LIBRARIES})
# ---[ Google-protobuf
include(cmake/ProtoBuf.cmake)
# ---[ HDF5
find_package(HDF5 COMPONENTS HL REQUIRED)
list(APPEND Caffe_INCLUDE_DIRS PUBLIC ${HDF5_INCLUDE_DIRS})
list(APPEND Caffe_LINKER_LIBS PUBLIC ${HDF5_LIBRARIES} ${HDF5_HL_LIBRARIES})
# ---[ LMDB
if(USE_LMDB)
find_package(LMDB REQUIRED)
list(APPEND Caffe_INCLUDE_DIRS PUBLIC ${LMDB_INCLUDE_DIR})
list(APPEND Caffe_LINKER_LIBS PUBLIC ${LMDB_LIBRARIES})
list(APPEND Caffe_DEFINITIONS PUBLIC -DUSE_LMDB)
if(ALLOW_LMDB_NOLOCK)
list(APPEND Caffe_DEFINITIONS PRIVATE -DALLOW_LMDB_NOLOCK)
endif()
endif()
# ---[ LevelDB
if(USE_LEVELDB)
find_package(LevelDB REQUIRED)
list(APPEND Caffe_INCLUDE_DIRS PUBLIC ${LevelDB_INCLUDES})
list(APPEND Caffe_LINKER_LIBS PUBLIC ${LevelDB_LIBRARIES})
list(APPEND Caffe_DEFINITIONS PUBLIC -DUSE_LEVELDB)
endif()
# ---[ Snappy
if(USE_LEVELDB)
find_package(Snappy REQUIRED)
list(APPEND Caffe_INCLUDE_DIRS PRIVATE ${Snappy_INCLUDE_DIR})
list(APPEND Caffe_LINKER_LIBS PRIVATE ${Snappy_LIBRARIES})
endif()
# ---[ CUDA
include(cmake/Cuda.cmake)
if(NOT HAVE_CUDA)
if(CPU_ONLY)
message(STATUS "-- CUDA is disabled. Building without it...")
else()
message(WARNING "-- CUDA is not detected by cmake. Building without it...")
endif()
list(APPEND Caffe_DEFINITIONS PUBLIC -DCPU_ONLY)
endif()
if(USE_NCCL)
find_package(NCCL REQUIRED)
include_directories(SYSTEM ${NCCL_INCLUDE_DIR})
list(APPEND Caffe_LINKER_LIBS ${NCCL_LIBRARIES})
add_definitions(-DUSE_NCCL)
endif()
# ---[ OpenCV
if(USE_OPENCV)
find_package(OpenCV QUIET COMPONENTS core highgui imgproc imgcodecs)
if(NOT OpenCV_FOUND) # if not OpenCV 3.x, then imgcodecs are not found
find_package(OpenCV REQUIRED COMPONENTS core highgui imgproc)
endif()
list(APPEND Caffe_INCLUDE_DIRS PUBLIC ${OpenCV_INCLUDE_DIRS})
list(APPEND Caffe_LINKER_LIBS PUBLIC ${OpenCV_LIBS})
message(STATUS "OpenCV found (${OpenCV_CONFIG_PATH})")
list(APPEND Caffe_DEFINITIONS PUBLIC -DUSE_OPENCV)
endif()
# ---[ BLAS
if(NOT APPLE)
set(BLAS "Atlas" CACHE STRING "Selected BLAS library")
se
gitextract_8dm_v1kj/ ├── .idea/ │ ├── misc.xml │ ├── modules.xml │ ├── textspotter.iml │ ├── vcs.xml │ └── workspace.xml ├── README.md ├── caffe/ │ ├── .Doxyfile │ ├── .github/ │ │ └── ISSUE_TEMPLATE.md │ ├── .gitignore │ ├── .travis.yml │ ├── CMakeLists.txt │ ├── CONTRIBUTING.md │ ├── CONTRIBUTORS.md │ ├── INSTALL.md │ ├── LICENSE │ ├── Makefile │ ├── Makefile.config.example │ ├── README.md │ ├── caffe.cloc │ ├── cmake/ │ │ ├── ConfigGen.cmake │ │ ├── Cuda.cmake │ │ ├── Dependencies.cmake │ │ ├── External/ │ │ │ ├── gflags.cmake │ │ │ └── glog.cmake │ │ ├── Misc.cmake │ │ ├── Modules/ │ │ │ ├── FindAtlas.cmake │ │ │ ├── FindGFlags.cmake │ │ │ ├── FindGlog.cmake │ │ │ ├── FindLAPACK.cmake │ │ │ ├── FindLMDB.cmake │ │ │ ├── FindLevelDB.cmake │ │ │ ├── FindMKL.cmake │ │ │ ├── FindMatlabMex.cmake │ │ │ ├── FindNCCL.cmake │ │ │ ├── FindNumPy.cmake │ │ │ ├── FindOpenBLAS.cmake │ │ │ ├── FindSnappy.cmake │ │ │ └── FindvecLib.cmake │ │ ├── ProtoBuf.cmake │ │ ├── Summary.cmake │ │ ├── Targets.cmake │ │ ├── Templates/ │ │ │ ├── CaffeConfig.cmake.in │ │ │ ├── CaffeConfigVersion.cmake.in │ │ │ └── caffe_config.h.in │ │ ├── Uninstall.cmake.in │ │ ├── Utils.cmake │ │ └── lint.cmake │ ├── docker/ │ │ ├── README.md │ │ ├── cpu/ │ │ │ └── Dockerfile │ │ └── gpu/ │ │ └── Dockerfile │ ├── docs/ │ │ ├── CMakeLists.txt │ │ ├── CNAME │ │ ├── README.md │ │ ├── _config.yml │ │ ├── _layouts/ │ │ │ └── default.html │ │ ├── development.md │ │ ├── index.md │ │ ├── install_apt.md │ │ ├── install_apt_debian.md │ │ ├── install_osx.md │ │ ├── install_yum.md │ │ ├── installation.md │ │ ├── model_zoo.md │ │ ├── multigpu.md │ │ ├── stylesheets/ │ │ │ ├── pygment_trac.css │ │ │ ├── reset.css │ │ │ └── styles.css │ │ └── tutorial/ │ │ ├── convolution.md │ │ ├── data.md │ │ ├── fig/ │ │ │ └── .gitignore │ │ ├── forward_backward.md │ │ ├── index.md │ │ ├── interfaces.md │ │ ├── layers/ │ │ │ ├── absval.md │ │ │ ├── accuracy.md │ │ │ ├── argmax.md │ │ │ ├── batchnorm.md │ │ │ ├── batchreindex.md │ │ │ ├── bias.md │ │ │ ├── bnll.md │ │ │ ├── concat.md │ │ │ ├── contrastiveloss.md │ │ │ ├── convolution.md │ │ │ ├── crop.md │ │ │ ├── data.md │ │ │ ├── deconvolution.md │ │ │ ├── dropout.md │ │ │ ├── dummydata.md │ │ │ ├── eltwise.md │ │ │ ├── elu.md │ │ │ ├── embed.md │ │ │ ├── euclideanloss.md │ │ │ ├── exp.md │ │ │ ├── filter.md │ │ │ ├── flatten.md │ │ │ ├── hdf5data.md │ │ │ ├── hdf5output.md │ │ │ ├── hingeloss.md │ │ │ ├── im2col.md │ │ │ ├── imagedata.md │ │ │ ├── infogainloss.md │ │ │ ├── innerproduct.md │ │ │ ├── input.md │ │ │ ├── log.md │ │ │ ├── lrn.md │ │ │ ├── lstm.md │ │ │ ├── memorydata.md │ │ │ ├── multinomiallogisticloss.md │ │ │ ├── mvn.md │ │ │ ├── parameter.md │ │ │ ├── pooling.md │ │ │ ├── power.md │ │ │ ├── prelu.md │ │ │ ├── python.md │ │ │ ├── recurrent.md │ │ │ ├── reduction.md │ │ │ ├── relu.md │ │ │ ├── reshape.md │ │ │ ├── rnn.md │ │ │ ├── scale.md │ │ │ ├── sigmoid.md │ │ │ ├── sigmoidcrossentropyloss.md │ │ │ ├── silence.md │ │ │ ├── slice.md │ │ │ ├── softmax.md │ │ │ ├── softmaxwithloss.md │ │ │ ├── split.md │ │ │ ├── spp.md │ │ │ ├── tanh.md │ │ │ ├── threshold.md │ │ │ ├── tile.md │ │ │ └── windowdata.md │ │ ├── layers.md │ │ ├── loss.md │ │ ├── net_layer_blob.md │ │ └── solver.md │ ├── examples/ │ │ ├── 00-classification.ipynb │ │ ├── 01-learning-lenet.ipynb │ │ ├── 02-fine-tuning.ipynb │ │ ├── CMakeLists.txt │ │ ├── brewing-logreg.ipynb │ │ ├── cifar10/ │ │ │ ├── cifar10_full.prototxt │ │ │ ├── cifar10_full_sigmoid_solver.prototxt │ │ │ ├── cifar10_full_sigmoid_solver_bn.prototxt │ │ │ ├── cifar10_full_sigmoid_train_test.prototxt │ │ │ ├── cifar10_full_sigmoid_train_test_bn.prototxt │ │ │ ├── cifar10_full_solver.prototxt │ │ │ ├── cifar10_full_solver_lr1.prototxt │ │ │ ├── cifar10_full_solver_lr2.prototxt │ │ │ ├── cifar10_full_train_test.prototxt │ │ │ ├── cifar10_quick.prototxt │ │ │ ├── cifar10_quick_solver.prototxt │ │ │ ├── cifar10_quick_solver_lr1.prototxt │ │ │ ├── cifar10_quick_train_test.prototxt │ │ │ ├── convert_cifar_data.cpp │ │ │ ├── create_cifar10.sh │ │ │ ├── readme.md │ │ │ ├── train_full.sh │ │ │ ├── train_full_sigmoid.sh │ │ │ ├── train_full_sigmoid_bn.sh │ │ │ └── train_quick.sh │ │ ├── cpp_classification/ │ │ │ ├── classification.cpp │ │ │ └── readme.md │ │ ├── detection.ipynb │ │ ├── feature_extraction/ │ │ │ ├── imagenet_val.prototxt │ │ │ └── readme.md │ │ ├── finetune_flickr_style/ │ │ │ ├── assemble_data.py │ │ │ ├── readme.md │ │ │ └── style_names.txt │ │ ├── finetune_pascal_detection/ │ │ │ ├── pascal_finetune_solver.prototxt │ │ │ └── pascal_finetune_trainval_test.prototxt │ │ ├── hdf5_classification/ │ │ │ ├── nonlinear_auto_test.prototxt │ │ │ ├── nonlinear_auto_train.prototxt │ │ │ ├── nonlinear_train_val.prototxt │ │ │ └── train_val.prototxt │ │ ├── imagenet/ │ │ │ ├── create_imagenet.sh │ │ │ ├── make_imagenet_mean.sh │ │ │ ├── readme.md │ │ │ ├── resume_training.sh │ │ │ └── train_caffenet.sh │ │ ├── mnist/ │ │ │ ├── convert_mnist_data.cpp │ │ │ ├── create_mnist.sh │ │ │ ├── lenet.prototxt │ │ │ ├── lenet_adadelta_solver.prototxt │ │ │ ├── lenet_auto_solver.prototxt │ │ │ ├── lenet_consolidated_solver.prototxt │ │ │ ├── lenet_multistep_solver.prototxt │ │ │ ├── lenet_solver.prototxt │ │ │ ├── lenet_solver_adam.prototxt │ │ │ ├── lenet_solver_rmsprop.prototxt │ │ │ ├── lenet_train_test.prototxt │ │ │ ├── mnist_autoencoder.prototxt │ │ │ ├── mnist_autoencoder_solver.prototxt │ │ │ ├── mnist_autoencoder_solver_adadelta.prototxt │ │ │ ├── mnist_autoencoder_solver_adagrad.prototxt │ │ │ ├── mnist_autoencoder_solver_nesterov.prototxt │ │ │ ├── readme.md │ │ │ ├── train_lenet.sh │ │ │ ├── train_lenet_adam.sh │ │ │ ├── train_lenet_consolidated.sh │ │ │ ├── train_lenet_docker.sh │ │ │ ├── train_lenet_rmsprop.sh │ │ │ ├── train_mnist_autoencoder.sh │ │ │ ├── train_mnist_autoencoder_adadelta.sh │ │ │ ├── train_mnist_autoencoder_adagrad.sh │ │ │ └── train_mnist_autoencoder_nesterov.sh │ │ ├── net_surgery/ │ │ │ ├── bvlc_caffenet_full_conv.prototxt │ │ │ └── conv.prototxt │ │ ├── net_surgery.ipynb │ │ ├── pascal-multilabel-with-datalayer.ipynb │ │ ├── pycaffe/ │ │ │ ├── caffenet.py │ │ │ ├── layers/ │ │ │ │ ├── pascal_multilabel_datalayers.py │ │ │ │ └── pyloss.py │ │ │ ├── linreg.prototxt │ │ │ └── tools.py │ │ ├── siamese/ │ │ │ ├── convert_mnist_siamese_data.cpp │ │ │ ├── create_mnist_siamese.sh │ │ │ ├── mnist_siamese.ipynb │ │ │ ├── mnist_siamese.prototxt │ │ │ ├── mnist_siamese_solver.prototxt │ │ │ ├── mnist_siamese_train_test.prototxt │ │ │ ├── readme.md │ │ │ └── train_mnist_siamese.sh │ │ └── web_demo/ │ │ ├── app.py │ │ ├── exifutil.py │ │ ├── readme.md │ │ ├── requirements.txt │ │ └── templates/ │ │ └── index.html │ ├── include/ │ │ └── caffe/ │ │ ├── blob.hpp │ │ ├── caffe.hpp │ │ ├── common.hpp │ │ ├── data_transformer.hpp │ │ ├── filler.hpp │ │ ├── internal_thread.hpp │ │ ├── layer.hpp │ │ ├── layer_factory.hpp │ │ ├── layers/ │ │ │ ├── absval_layer.hpp │ │ │ ├── accuracy_layer.hpp │ │ │ ├── argmax_layer.hpp │ │ │ ├── at_layer.hpp │ │ │ ├── attention_lstm_layer.hpp │ │ │ ├── base_conv_layer.hpp │ │ │ ├── base_data_layer.hpp │ │ │ ├── batch_norm_layer.hpp │ │ │ ├── batch_reindex_layer.hpp │ │ │ ├── bias_layer.hpp │ │ │ ├── bnll_layer.hpp │ │ │ ├── concat_layer.hpp │ │ │ ├── contrastive_loss_layer.hpp │ │ │ ├── conv_layer.hpp │ │ │ ├── cosinangle_loss_layer.hpp │ │ │ ├── crop_layer.hpp │ │ │ ├── cudnn_conv_layer.hpp │ │ │ ├── cudnn_lcn_layer.hpp │ │ │ ├── cudnn_lrn_layer.hpp │ │ │ ├── cudnn_pooling_layer.hpp │ │ │ ├── cudnn_relu_layer.hpp │ │ │ ├── cudnn_sigmoid_layer.hpp │ │ │ ├── cudnn_softmax_layer.hpp │ │ │ ├── cudnn_tanh_layer.hpp │ │ │ ├── data_layer.hpp │ │ │ ├── deconv_layer.hpp │ │ │ ├── dropout_layer.hpp │ │ │ ├── dummy_data_layer.hpp │ │ │ ├── eltwise_layer.hpp │ │ │ ├── elu_layer.hpp │ │ │ ├── embed_layer.hpp │ │ │ ├── euclidean_loss_layer.hpp │ │ │ ├── exp_layer.hpp │ │ │ ├── filter_layer.hpp │ │ │ ├── flatten_layer.hpp │ │ │ ├── hdf5_data_layer.hpp │ │ │ ├── hdf5_output_layer.hpp │ │ │ ├── hinge_loss_layer.hpp │ │ │ ├── im2col_layer.hpp │ │ │ ├── image_data_layer.hpp │ │ │ ├── infogain_loss_layer.hpp │ │ │ ├── inner_product_layer.hpp │ │ │ ├── input_layer.hpp │ │ │ ├── log_layer.hpp │ │ │ ├── loss_layer.hpp │ │ │ ├── lrn_layer.hpp │ │ │ ├── lstm_layer.hpp │ │ │ ├── lstm_new_layer.hpp │ │ │ ├── memory_data_layer.hpp │ │ │ ├── multinomial_logistic_loss_layer.hpp │ │ │ ├── mvn_layer.hpp │ │ │ ├── neuron_layer.hpp │ │ │ ├── parameter_layer.hpp │ │ │ ├── point_bilinear_layer.hpp │ │ │ ├── pooling_layer.hpp │ │ │ ├── power_layer.hpp │ │ │ ├── prelu_layer.hpp │ │ │ ├── python_layer.hpp │ │ │ ├── recurrent_layer.hpp │ │ │ ├── reduction_layer.hpp │ │ │ ├── relu_layer.hpp │ │ │ ├── reshape_layer.hpp │ │ │ ├── reverse_axis_layer.hpp │ │ │ ├── reverse_layer.hpp │ │ │ ├── rnn_layer.hpp │ │ │ ├── roi_pooling_layer.hpp │ │ │ ├── scale_layer.hpp │ │ │ ├── sigmoid_cross_entropy_loss_layer.hpp │ │ │ ├── sigmoid_layer.hpp │ │ │ ├── silence_layer.hpp │ │ │ ├── slice_layer.hpp │ │ │ ├── smooth_L1_loss_layer.hpp │ │ │ ├── softmax_layer.hpp │ │ │ ├── softmax_loss_layer.hpp │ │ │ ├── split_layer.hpp │ │ │ ├── spp_layer.hpp │ │ │ ├── sum_layer.hpp │ │ │ ├── tanh_layer.hpp │ │ │ ├── threshold_layer.hpp │ │ │ ├── tile_layer.hpp │ │ │ ├── transpose_layer.hpp │ │ │ ├── unitbox_data_layer.hpp │ │ │ ├── unitbox_loss_layer.hpp │ │ │ └── window_data_layer.hpp │ │ ├── net.hpp │ │ ├── parallel.hpp │ │ ├── sgd_solvers.hpp │ │ ├── solver.hpp │ │ ├── solver_factory.hpp │ │ ├── syncedmem.hpp │ │ ├── test/ │ │ │ ├── test_caffe_main.hpp │ │ │ └── test_gradient_check_util.hpp │ │ └── util/ │ │ ├── benchmark.hpp │ │ ├── blocking_queue.hpp │ │ ├── cudnn.hpp │ │ ├── db.hpp │ │ ├── db_leveldb.hpp │ │ ├── db_lmdb.hpp │ │ ├── device_alternate.hpp │ │ ├── format.hpp │ │ ├── gpu_util.cuh │ │ ├── hdf5.hpp │ │ ├── im2col.hpp │ │ ├── insert_splits.hpp │ │ ├── io.hpp │ │ ├── math_functions.hpp │ │ ├── mkl_alternate.hpp │ │ ├── nccl.hpp │ │ ├── rng.hpp │ │ ├── signal_handler.h │ │ └── upgrade_proto.hpp │ ├── matlab/ │ │ ├── +caffe/ │ │ │ ├── +test/ │ │ │ │ ├── test_io.m │ │ │ │ ├── test_net.m │ │ │ │ └── test_solver.m │ │ │ ├── Blob.m │ │ │ ├── Layer.m │ │ │ ├── Net.m │ │ │ ├── Solver.m │ │ │ ├── get_net.m │ │ │ ├── get_solver.m │ │ │ ├── imagenet/ │ │ │ │ └── ilsvrc_2012_mean.mat │ │ │ ├── io.m │ │ │ ├── private/ │ │ │ │ ├── CHECK.m │ │ │ │ ├── CHECK_FILE_EXIST.m │ │ │ │ ├── caffe_.cpp │ │ │ │ └── is_valid_handle.m │ │ │ ├── reset_all.m │ │ │ ├── run_tests.m │ │ │ ├── set_device.m │ │ │ ├── set_mode_cpu.m │ │ │ ├── set_mode_gpu.m │ │ │ └── version.m │ │ ├── CMakeLists.txt │ │ ├── demo/ │ │ │ └── classification_demo.m │ │ └── hdf5creation/ │ │ ├── .gitignore │ │ ├── demo.m │ │ └── store2hdf5.m │ ├── python/ │ │ ├── CMakeLists.txt │ │ ├── caffe/ │ │ │ ├── __init__.py │ │ │ ├── _caffe.cpp │ │ │ ├── classifier.py │ │ │ ├── coord_map.py │ │ │ ├── detector.py │ │ │ ├── draw.py │ │ │ ├── imagenet/ │ │ │ │ └── ilsvrc_2012_mean.npy │ │ │ ├── io.py │ │ │ ├── net_spec.py │ │ │ ├── pycaffe.py │ │ │ └── test/ │ │ │ ├── test_coord_map.py │ │ │ ├── test_draw.py │ │ │ ├── test_gradient_for_python_layer.py │ │ │ ├── test_io.py │ │ │ ├── test_layer_type_list.py │ │ │ ├── test_nccl.py │ │ │ ├── test_net.py │ │ │ ├── test_net_spec.py │ │ │ ├── test_proposal.py │ │ │ ├── test_python_layer.py │ │ │ ├── test_python_layer_with_param_str.py │ │ │ ├── test_solver.py │ │ │ └── test_w_pooling.py │ │ ├── classify.py │ │ ├── detect.py │ │ ├── draw_net.py │ │ ├── requirements.txt │ │ └── train.py │ ├── scripts/ │ │ ├── build_docs.sh │ │ ├── caffe │ │ ├── copy_notebook.py │ │ ├── cpp_lint.py │ │ ├── deploy_docs.sh │ │ ├── download_model_binary.py │ │ ├── download_model_from_gist.sh │ │ ├── gather_examples.sh │ │ ├── split_caffe_proto.py │ │ ├── travis/ │ │ │ ├── build.sh │ │ │ ├── configure-cmake.sh │ │ │ ├── configure-make.sh │ │ │ ├── configure.sh │ │ │ ├── defaults.sh │ │ │ ├── install-deps.sh │ │ │ ├── install-python-deps.sh │ │ │ ├── setup-venv.sh │ │ │ └── test.sh │ │ └── upload_model_to_gist.sh │ ├── src/ │ │ ├── caffe/ │ │ │ ├── CMakeLists.txt │ │ │ ├── blob.cpp │ │ │ ├── common.cpp │ │ │ ├── data_transformer.cpp │ │ │ ├── internal_thread.cpp │ │ │ ├── layer.cpp │ │ │ ├── layer_factory.cpp │ │ │ ├── layers/ │ │ │ │ ├── absval_layer.cpp │ │ │ │ ├── absval_layer.cu │ │ │ │ ├── accuracy_layer.cpp │ │ │ │ ├── argmax_layer.cpp │ │ │ │ ├── at_layer.cpp │ │ │ │ ├── at_layer.cu │ │ │ │ ├── attention_lstm_layer.cpp │ │ │ │ ├── base_conv_layer.cpp │ │ │ │ ├── base_data_layer.cpp │ │ │ │ ├── base_data_layer.cu │ │ │ │ ├── batch_norm_layer.cpp │ │ │ │ ├── batch_norm_layer.cu │ │ │ │ ├── batch_reindex_layer.cpp │ │ │ │ ├── batch_reindex_layer.cu │ │ │ │ ├── bias_layer.cpp │ │ │ │ ├── bias_layer.cu │ │ │ │ ├── bnll_layer.cpp │ │ │ │ ├── bnll_layer.cu │ │ │ │ ├── concat_layer.cpp │ │ │ │ ├── concat_layer.cu │ │ │ │ ├── contrastive_loss_layer.cpp │ │ │ │ ├── contrastive_loss_layer.cu │ │ │ │ ├── conv_layer.cpp │ │ │ │ ├── conv_layer.cu │ │ │ │ ├── cosinangle_loss_layer.cpp │ │ │ │ ├── cosinangle_loss_layer.cu │ │ │ │ ├── crop_layer.cpp │ │ │ │ ├── crop_layer.cu │ │ │ │ ├── cudnn_conv_layer.cpp │ │ │ │ ├── cudnn_conv_layer.cu │ │ │ │ ├── cudnn_lcn_layer.cpp │ │ │ │ ├── cudnn_lcn_layer.cu │ │ │ │ ├── cudnn_lrn_layer.cpp │ │ │ │ ├── cudnn_lrn_layer.cu │ │ │ │ ├── cudnn_pooling_layer.cpp │ │ │ │ ├── cudnn_pooling_layer.cu │ │ │ │ ├── cudnn_relu_layer.cpp │ │ │ │ ├── cudnn_relu_layer.cu │ │ │ │ ├── cudnn_sigmoid_layer.cpp │ │ │ │ ├── cudnn_sigmoid_layer.cu │ │ │ │ ├── cudnn_softmax_layer.cpp │ │ │ │ ├── cudnn_softmax_layer.cu │ │ │ │ ├── cudnn_tanh_layer.cpp │ │ │ │ ├── cudnn_tanh_layer.cu │ │ │ │ ├── data_layer.cpp │ │ │ │ ├── deconv_layer.cpp │ │ │ │ ├── deconv_layer.cu │ │ │ │ ├── dropout_layer.cpp │ │ │ │ ├── dropout_layer.cu │ │ │ │ ├── dummy_data_layer.cpp │ │ │ │ ├── eltwise_layer.cpp │ │ │ │ ├── eltwise_layer.cu │ │ │ │ ├── elu_layer.cpp │ │ │ │ ├── elu_layer.cu │ │ │ │ ├── embed_layer.cpp │ │ │ │ ├── embed_layer.cu │ │ │ │ ├── euclidean_loss_layer.cpp │ │ │ │ ├── euclidean_loss_layer.cu │ │ │ │ ├── exp_layer.cpp │ │ │ │ ├── exp_layer.cu │ │ │ │ ├── filter_layer.cpp │ │ │ │ ├── filter_layer.cu │ │ │ │ ├── flatten_layer.cpp │ │ │ │ ├── hdf5_data_layer.cpp │ │ │ │ ├── hdf5_data_layer.cu │ │ │ │ ├── hdf5_output_layer.cpp │ │ │ │ ├── hdf5_output_layer.cu │ │ │ │ ├── hinge_loss_layer.cpp │ │ │ │ ├── im2col_layer.cpp │ │ │ │ ├── im2col_layer.cu │ │ │ │ ├── image_data_layer.cpp │ │ │ │ ├── infogain_loss_layer.cpp │ │ │ │ ├── inner_product_layer.cpp │ │ │ │ ├── inner_product_layer.cu │ │ │ │ ├── input_layer.cpp │ │ │ │ ├── log_layer.cpp │ │ │ │ ├── log_layer.cu │ │ │ │ ├── loss_layer.cpp │ │ │ │ ├── lrn_layer.cpp │ │ │ │ ├── lrn_layer.cu │ │ │ │ ├── lstm_layer.cpp │ │ │ │ ├── lstm_layer.cu │ │ │ │ ├── lstm_new_layer.cpp │ │ │ │ ├── lstm_unit_layer.cpp │ │ │ │ ├── lstm_unit_layer.cu │ │ │ │ ├── memory_data_layer.cpp │ │ │ │ ├── multinomial_logistic_loss_layer.cpp │ │ │ │ ├── mvn_layer.cpp │ │ │ │ ├── mvn_layer.cu │ │ │ │ ├── neuron_layer.cpp │ │ │ │ ├── parameter_layer.cpp │ │ │ │ ├── point_bilinear_layer.cpp │ │ │ │ ├── pooling_layer.cpp │ │ │ │ ├── pooling_layer.cu │ │ │ │ ├── power_layer.cpp │ │ │ │ ├── power_layer.cu │ │ │ │ ├── prelu_layer.cpp │ │ │ │ ├── prelu_layer.cu │ │ │ │ ├── recurrent_layer.cpp │ │ │ │ ├── recurrent_layer.cu │ │ │ │ ├── reduction_layer.cpp │ │ │ │ ├── reduction_layer.cu │ │ │ │ ├── relu_layer.cpp │ │ │ │ ├── relu_layer.cu │ │ │ │ ├── reshape_layer.cpp │ │ │ │ ├── reverse_axis_layer.cpp │ │ │ │ ├── reverse_axis_layer.cu │ │ │ │ ├── rnn_layer.cpp │ │ │ │ ├── roi_pooling_layer.cpp │ │ │ │ ├── roi_pooling_layer.cu │ │ │ │ ├── scale_layer.cpp │ │ │ │ ├── scale_layer.cu │ │ │ │ ├── sigmoid_cross_entropy_loss_layer.cpp │ │ │ │ ├── sigmoid_cross_entropy_loss_layer.cu │ │ │ │ ├── sigmoid_layer.cpp │ │ │ │ ├── sigmoid_layer.cu │ │ │ │ ├── silence_layer.cpp │ │ │ │ ├── silence_layer.cu │ │ │ │ ├── slice_layer.cpp │ │ │ │ ├── slice_layer.cu │ │ │ │ ├── smooth_L1_loss_layer.cpp │ │ │ │ ├── smooth_L1_loss_layer.cu │ │ │ │ ├── softmax_layer.cpp │ │ │ │ ├── softmax_layer.cu │ │ │ │ ├── softmax_loss_layer.cpp │ │ │ │ ├── softmax_loss_layer.cu │ │ │ │ ├── split_layer.cpp │ │ │ │ ├── split_layer.cu │ │ │ │ ├── spp_layer.cpp │ │ │ │ ├── sum_layer.cpp │ │ │ │ ├── tanh_layer.cpp │ │ │ │ ├── tanh_layer.cu │ │ │ │ ├── threshold_layer.cpp │ │ │ │ ├── threshold_layer.cu │ │ │ │ ├── tile_layer.cpp │ │ │ │ ├── tile_layer.cu │ │ │ │ ├── transpose_layer.cpp │ │ │ │ ├── transpose_layer.cu │ │ │ │ ├── unitbox_loss_layer.cpp │ │ │ │ └── window_data_layer.cpp │ │ │ ├── net.cpp │ │ │ ├── parallel.cpp │ │ │ ├── proto/ │ │ │ │ └── caffe.proto │ │ │ ├── solver.cpp │ │ │ ├── solvers/ │ │ │ │ ├── adadelta_solver.cpp │ │ │ │ ├── adadelta_solver.cu │ │ │ │ ├── adagrad_solver.cpp │ │ │ │ ├── adagrad_solver.cu │ │ │ │ ├── adam_solver.cpp │ │ │ │ ├── adam_solver.cu │ │ │ │ ├── nesterov_solver.cpp │ │ │ │ ├── nesterov_solver.cu │ │ │ │ ├── rmsprop_solver.cpp │ │ │ │ ├── rmsprop_solver.cu │ │ │ │ ├── sgd_solver.cpp │ │ │ │ └── sgd_solver.cu │ │ │ ├── syncedmem.cpp │ │ │ ├── test/ │ │ │ │ ├── CMakeLists.txt │ │ │ │ ├── test_accuracy_layer.cpp │ │ │ │ ├── test_argmax_layer.cpp │ │ │ │ ├── test_attlstm_layer.cpp │ │ │ │ ├── test_batch_norm_layer.cpp │ │ │ │ ├── test_batch_reindex_layer.cpp │ │ │ │ ├── test_benchmark.cpp │ │ │ │ ├── test_bias_layer.cpp │ │ │ │ ├── test_blob.cpp │ │ │ │ ├── test_bn_layer.cpp │ │ │ │ ├── test_caffe_main.cpp │ │ │ │ ├── test_common.cpp │ │ │ │ ├── test_concat_layer.cpp │ │ │ │ ├── test_contrastive_loss_layer.cpp │ │ │ │ ├── test_convolution_layer.cpp │ │ │ │ ├── test_crop_layer.cpp │ │ │ │ ├── test_data/ │ │ │ │ │ ├── generate_sample_data.py │ │ │ │ │ ├── sample_data.h5 │ │ │ │ │ ├── sample_data_2_gzip.h5 │ │ │ │ │ ├── sample_data_list.txt │ │ │ │ │ ├── solver_data.h5 │ │ │ │ │ └── solver_data_list.txt │ │ │ │ ├── test_data_layer.cpp │ │ │ │ ├── test_data_transformer.cpp │ │ │ │ ├── test_db.cpp │ │ │ │ ├── test_deconvolution_layer.cpp │ │ │ │ ├── test_deformconv_layer.cpp │ │ │ │ ├── test_dummy_data_layer.cpp │ │ │ │ ├── test_eltwise_layer.cpp │ │ │ │ ├── test_embed_layer.cpp │ │ │ │ ├── test_euclidean_loss_layer.cpp │ │ │ │ ├── test_filler.cpp │ │ │ │ ├── test_filter_layer.cpp │ │ │ │ ├── test_flatten_layer.cpp │ │ │ │ ├── test_gradient_based_solver.cpp │ │ │ │ ├── test_hdf5_output_layer.cpp │ │ │ │ ├── test_hdf5data_layer.cpp │ │ │ │ ├── test_hinge_loss_layer.cpp │ │ │ │ ├── test_im2col_kernel.cu │ │ │ │ ├── test_im2col_layer.cpp │ │ │ │ ├── test_image_data_layer.cpp │ │ │ │ ├── test_infogain_loss_layer.cpp │ │ │ │ ├── test_inner_product_layer.cpp │ │ │ │ ├── test_internal_thread.cpp │ │ │ │ ├── test_io.cpp │ │ │ │ ├── test_layer_factory.cpp │ │ │ │ ├── test_lrn_layer.cpp │ │ │ │ ├── test_lstm_new_layer.cpp │ │ │ │ ├── test_math_functions.cpp │ │ │ │ ├── test_maxpool_dropout_layers.cpp │ │ │ │ ├── test_memory_data_layer.cpp │ │ │ │ ├── test_multinomial_logistic_loss_layer.cpp │ │ │ │ ├── test_mvn_layer.cpp │ │ │ │ ├── test_net.cpp │ │ │ │ ├── test_neuron_layer.cpp │ │ │ │ ├── test_platform.cpp │ │ │ │ ├── test_point_bilinear_layer.cpp │ │ │ │ ├── test_pooling_layer.cpp │ │ │ │ ├── test_power_layer.cpp │ │ │ │ ├── test_protobuf.cpp │ │ │ │ ├── test_random_number_generator.cpp │ │ │ │ ├── test_reduction_layer.cpp │ │ │ │ ├── test_reshape_layer.cpp │ │ │ │ ├── test_rnn_layer.cpp │ │ │ │ ├── test_scale_layer.cpp │ │ │ │ ├── test_sigmoid_cross_entropy_loss_layer.cpp │ │ │ │ ├── test_slice_layer.cpp │ │ │ │ ├── test_softmax_layer.cpp │ │ │ │ ├── test_softmax_with_loss_layer.cpp │ │ │ │ ├── test_solver.cpp │ │ │ │ ├── test_solver_factory.cpp │ │ │ │ ├── test_split_layer.cpp │ │ │ │ ├── test_spp_layer.cpp │ │ │ │ ├── test_stochastic_pooling.cpp │ │ │ │ ├── test_sum_layer.cpp │ │ │ │ ├── test_syncedmem.cpp │ │ │ │ ├── test_tanh_layer.cpp │ │ │ │ ├── test_threshold_layer.cpp │ │ │ │ ├── test_tile_layer.cpp │ │ │ │ ├── test_upgrade_proto.cpp │ │ │ │ └── test_util_blas.cpp │ │ │ └── util/ │ │ │ ├── benchmark.cpp │ │ │ ├── blocking_queue.cpp │ │ │ ├── cudnn.cpp │ │ │ ├── db.cpp │ │ │ ├── db_leveldb.cpp │ │ │ ├── db_lmdb.cpp │ │ │ ├── hdf5.cpp │ │ │ ├── im2col.cpp │ │ │ ├── im2col.cu │ │ │ ├── insert_splits.cpp │ │ │ ├── io.cpp │ │ │ ├── math_functions.cpp │ │ │ ├── math_functions.cu │ │ │ ├── signal_handler.cpp │ │ │ └── upgrade_proto.cpp │ │ └── gtest/ │ │ ├── CMakeLists.txt │ │ ├── gtest-all.cpp │ │ ├── gtest.h │ │ └── gtest_main.cc │ └── tools/ │ ├── CMakeLists.txt │ ├── binary_to_text.cpp │ ├── caffe.cpp │ ├── compute_image_mean.cpp │ ├── convert_imageset.cpp │ ├── convert_model.cpp │ ├── copy_layers.cpp │ ├── device_query.cpp │ ├── extra/ │ │ ├── convert_deform_conv.py │ │ ├── extract_seconds.py │ │ ├── launch_resize_and_crop_images.sh │ │ ├── parse_log.py │ │ ├── parse_log.sh │ │ ├── plot_log.gnuplot.example │ │ ├── plot_training_log.py.example │ │ ├── resize_and_crop_images.py │ │ └── summarize.py │ ├── extract_features.cpp │ ├── finetune_net.cpp │ ├── net_speed_benchmark.cpp │ ├── test_net.cpp │ ├── train_net.cpp │ ├── upgrade_net_proto_binary.cpp │ ├── upgrade_net_proto_text.cpp │ └── upgrade_solver_proto_text.cpp ├── cfg.py ├── dicts/ │ ├── dict.txt │ ├── generic_lex.txt │ └── weak_voc.txt ├── models/ │ ├── test_iou.pt │ ├── test_lstm.pt │ └── train.pt ├── pylayer/ │ ├── tool.py │ └── tool_layers.py ├── results/ │ └── res_img_105.txt └── test.py
Showing preview only (767K chars total). Download the full file or copy to clipboard to get everything.
SYMBOL INDEX (3195 symbols across 363 files)
FILE: caffe/examples/cifar10/convert_cifar_data.cpp
function read_image (line 31) | void read_image(std::ifstream* file, int* label, char* buffer) {
function convert_dataset (line 39) | void convert_dataset(const string& input_folder, const string& output_fo...
function main (line 93) | int main(int argc, char** argv) {
FILE: caffe/examples/cpp_classification/classification.cpp
class Classifier (line 21) | class Classifier {
function PairCompare (line 86) | static bool PairCompare(const std::pair<float, int>& lhs,
function Argmax (line 92) | static std::vector<int> Argmax(const std::vector<float>& v, int N) {
function main (line 229) | int main(int argc, char** argv) {
function main (line 262) | int main(int argc, char** argv) {
FILE: caffe/examples/finetune_flickr_style/assemble_data.py
function download_image (line 23) | def download_image(args_tuple):
FILE: caffe/examples/mnist/convert_mnist_data.cpp
function swap_endian (line 38) | uint32_t swap_endian(uint32_t val) {
function convert_dataset (line 43) | void convert_dataset(const char* image_filename, const char* label_filen...
function main (line 113) | int main(int argc, char** argv) {
function main (line 143) | int main(int argc, char** argv) {
FILE: caffe/examples/pycaffe/caffenet.py
function conv_relu (line 7) | def conv_relu(bottom, ks, nout, stride=1, pad=0, group=1):
function fc_relu (line 12) | def fc_relu(bottom, nout):
function max_pool (line 16) | def max_pool(bottom, ks, stride=1):
function caffenet (line 19) | def caffenet(lmdb, batch_size=256, include_acc=False):
function make_net (line 47) | def make_net():
FILE: caffe/examples/pycaffe/layers/pascal_multilabel_datalayers.py
class PascalMultilabelDataLayerSync (line 20) | class PascalMultilabelDataLayerSync(caffe.Layer):
method setup (line 27) | def setup(self, bottom, top):
method forward (line 55) | def forward(self, bottom, top):
method reshape (line 67) | def reshape(self, bottom, top):
method backward (line 74) | def backward(self, top, propagate_down, bottom):
class BatchLoader (line 81) | class BatchLoader(object):
method __init__ (line 90) | def __init__(self, params, result):
method load_next_image (line 106) | def load_next_image(self):
function load_pascal_annotation (line 140) | def load_pascal_annotation(index, pascal_root):
function check_params (line 196) | def check_params(params):
function print_info (line 208) | def print_info(name, params):
FILE: caffe/examples/pycaffe/layers/pyloss.py
class EuclideanLossLayer (line 5) | class EuclideanLossLayer(caffe.Layer):
method setup (line 11) | def setup(self, bottom, top):
method reshape (line 16) | def reshape(self, bottom, top):
method forward (line 25) | def forward(self, bottom, top):
method backward (line 29) | def backward(self, top, propagate_down, bottom):
FILE: caffe/examples/pycaffe/tools.py
class SimpleTransformer (line 4) | class SimpleTransformer:
method __init__ (line 11) | def __init__(self, mean=[128, 128, 128]):
method set_mean (line 15) | def set_mean(self, mean):
method set_scale (line 21) | def set_scale(self, scale):
method preprocess (line 27) | def preprocess(self, im):
method deprocess (line 41) | def deprocess(self, im):
class CaffeSolver (line 53) | class CaffeSolver:
method __init__ (line 62) | def __init__(self, testnet_prototxt_path="testnet.prototxt",
method add_from_file (line 101) | def add_from_file(self, filepath):
method write (line 113) | def write(self, filepath):
FILE: caffe/examples/siamese/convert_mnist_siamese_data.cpp
function swap_endian (line 22) | uint32_t swap_endian(uint32_t val) {
function read_image (line 27) | void read_image(std::ifstream* image_file, std::ifstream* label_file,
function convert_dataset (line 36) | void convert_dataset(const char* image_filename, const char* label_filen...
function main (line 109) | int main(int argc, char** argv) {
function main (line 126) | int main(int argc, char** argv) {
FILE: caffe/examples/web_demo/app.py
function index (line 29) | def index():
function classify_url (line 34) | def classify_url():
function classify_upload (line 57) | def classify_upload():
function embed_image_html (line 82) | def embed_image_html(image):
function allowed_file (line 92) | def allowed_file(filename):
class ImagenetClassifier (line 99) | class ImagenetClassifier(object):
method __init__ (line 119) | def __init__(self, model_def_file, pretrained_model_file, mean_file,
method classify_image (line 148) | def classify_image(self, image):
function start_tornado (line 184) | def start_tornado(app, port=5000):
function start_from_terminal (line 192) | def start_from_terminal(app):
FILE: caffe/examples/web_demo/exifutil.py
function open_oriented_im (line 19) | def open_oriented_im(im_path):
function apply_orientation (line 35) | def apply_orientation(im, orientation):
FILE: caffe/include/caffe/blob.hpp
type caffe (line 14) | namespace caffe {
class Blob (line 24) | class Blob {
method Blob (line 26) | Blob()
method string (line 60) | inline string shape_string() const {
method shape (line 77) | inline int shape(int index) const {
method num_axes (line 80) | inline int num_axes() const { return shape_.size(); }
method count (line 81) | inline int count() const { return count_; }
method count (line 91) | inline int count(int start_axis, int end_axis) const {
method count (line 109) | inline int count(int start_axis) const {
method CanonicalAxisIndex (line 124) | inline int CanonicalAxisIndex(int axis_index) const {
method num (line 138) | inline int num() const { return LegacyShape(0); }
method channels (line 140) | inline int channels() const { return LegacyShape(1); }
method height (line 142) | inline int height() const { return LegacyShape(2); }
method width (line 144) | inline int width() const { return LegacyShape(3); }
method LegacyShape (line 145) | inline int LegacyShape(int index) const {
method offset (line 159) | inline int offset(const int n, const int c = 0, const int h = 0,
method offset (line 172) | inline int offset(const vector<int>& indices) const {
method Dtype (line 197) | inline Dtype data_at(const int n, const int c, const int h,
method Dtype (line 202) | inline Dtype diff_at(const int n, const int c, const int h,
method Dtype (line 207) | inline Dtype data_at(const vector<int>& index) const {
method Dtype (line 211) | inline Dtype diff_at(const vector<int>& index) const {
FILE: caffe/include/caffe/common.hpp
type cv (line 78) | namespace cv { class Mat; }
class Mat (line 78) | class Mat
type caffe (line 80) | namespace caffe {
class Caffe (line 107) | class Caffe {
type Brew (line 116) | enum Brew { CPU, GPU }
class RNG (line 120) | class RNG {
class Generator (line 128) | class Generator
method RNG (line 133) | inline static RNG& rng_stream() {
class Generator (line 128) | class Generator
method cublasHandle_t (line 140) | inline static cublasHandle_t cublas_handle() { return Get().cublas_h...
method curandGenerator_t (line 141) | inline static curandGenerator_t curand_generator() {
method Brew (line 147) | inline static Brew mode() { return Get().mode_; }
method set_mode (line 153) | inline static void set_mode(Brew mode) { Get().mode_ = mode; }
method solver_count (line 167) | inline static int solver_count() { return Get().solver_count_; }
method set_solver_count (line 168) | inline static void set_solver_count(int val) { Get().solver_count_ =...
method solver_rank (line 169) | inline static int solver_rank() { return Get().solver_rank_; }
method set_solver_rank (line 170) | inline static void set_solver_rank(int val) { Get().solver_rank_ = v...
method multiprocess (line 171) | inline static bool multiprocess() { return Get().multiprocess_; }
method set_multiprocess (line 172) | inline static void set_multiprocess(bool val) { Get().multiprocess_ ...
method root_solver (line 173) | inline static bool root_solver() { return Get().solver_rank_ == 0; }
method setIterSize (line 174) | inline static void setIterSize(int iter_size){Get().iter_size_ = ite...
method getIterSize (line 175) | inline static int getIterSize(){return Get().iter_size_;}
method setIter (line 176) | inline static void setIter(int iter){Get().iter_ = iter;}
method getIter (line 177) | inline static int getIter(){return Get().iter_;}
FILE: caffe/include/caffe/data_transformer.hpp
type caffe (line 10) | namespace caffe {
class DataTransformer (line 17) | class DataTransformer {
FILE: caffe/include/caffe/filler.hpp
type caffe (line 15) | namespace caffe {
class Filler (line 19) | class Filler {
method Filler (line 21) | explicit Filler(const FillerParameter& param) : filler_param_(param) {}
class ConstantFiller (line 31) | class ConstantFiller : public Filler<Dtype> {
method ConstantFiller (line 33) | explicit ConstantFiller(const FillerParameter& param)
method Fill (line 35) | virtual void Fill(Blob<Dtype>* blob) {
class UniformFiller (line 50) | class UniformFiller : public Filler<Dtype> {
method UniformFiller (line 52) | explicit UniformFiller(const FillerParameter& param)
method Fill (line 54) | virtual void Fill(Blob<Dtype>* blob) {
class GaussianFiller (line 65) | class GaussianFiller : public Filler<Dtype> {
method GaussianFiller (line 67) | explicit GaussianFiller(const FillerParameter& param)
method Fill (line 69) | virtual void Fill(Blob<Dtype>* blob) {
class PositiveUnitballFiller (line 101) | class PositiveUnitballFiller : public Filler<Dtype> {
method PositiveUnitballFiller (line 103) | explicit PositiveUnitballFiller(const FillerParameter& param)
method Fill (line 105) | virtual void Fill(Blob<Dtype>* blob) {
class XavierFiller (line 144) | class XavierFiller : public Filler<Dtype> {
method XavierFiller (line 146) | explicit XavierFiller(const FillerParameter& param)
method Fill (line 148) | virtual void Fill(Blob<Dtype>* blob) {
class MSRAFiller (line 186) | class MSRAFiller : public Filler<Dtype> {
method MSRAFiller (line 188) | explicit MSRAFiller(const FillerParameter& param)
method Fill (line 190) | virtual void Fill(Blob<Dtype>* blob) {
class BilinearFiller (line 244) | class BilinearFiller : public Filler<Dtype> {
method BilinearFiller (line 246) | explicit BilinearFiller(const FillerParameter& param)
method Fill (line 248) | virtual void Fill(Blob<Dtype>* blob) {
FILE: caffe/include/caffe/internal_thread.hpp
type boost (line 10) | namespace boost { class thread; }
class thread (line 10) | class thread
type caffe (line 12) | namespace caffe {
class InternalThread (line 19) | class InternalThread {
method InternalThread (line 21) | InternalThread() : thread_() {}
method InternalThreadEntry (line 39) | virtual void InternalThreadEntry() {}
FILE: caffe/include/caffe/layer.hpp
type boost (line 18) | namespace boost { class mutex; }
class mutex (line 18) | class mutex
type caffe (line 20) | namespace caffe {
class Layer (line 33) | class Layer {
method Layer (line 40) | explicit Layer(const LayerParameter& param)
method SetUp (line 67) | void SetUp(const vector<Blob<Dtype>*>& bottom,
method LayerSetUp (line 91) | virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
method LayerParameter (line 164) | const LayerParameter& layer_param() const { return layer_param_; }
method Dtype (line 174) | inline Dtype loss(const int top_index) const {
method set_loss (line 181) | inline void set_loss(const int top_index, const Dtype value) {
method ExactNumBottomBlobs (line 200) | virtual inline int ExactNumBottomBlobs() const { return -1; }
method MinBottomBlobs (line 208) | virtual inline int MinBottomBlobs() const { return -1; }
method MaxBottomBlobs (line 216) | virtual inline int MaxBottomBlobs() const { return -1; }
method ExactNumTopBlobs (line 224) | virtual inline int ExactNumTopBlobs() const { return -1; }
method MinTopBlobs (line 232) | virtual inline int MinTopBlobs() const { return -1; }
method MaxTopBlobs (line 240) | virtual inline int MaxTopBlobs() const { return -1; }
method EqualNumBottomTopBlobs (line 248) | virtual inline bool EqualNumBottomTopBlobs() const { return false; }
method AutoTopBlobs (line 258) | virtual inline bool AutoTopBlobs() const { return false; }
method AllowForceBackward (line 268) | virtual inline bool AllowForceBackward(const int bottom_index) const {
method param_propagate_down (line 279) | inline bool param_propagate_down(const int param_id) {
method set_param_propagate_down (line 287) | inline void set_param_propagate_down(const int param_id, const bool ...
method Forward_gpu (line 316) | virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
method Backward_gpu (line 334) | virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
method CheckBlobCounts (line 346) | virtual void CheckBlobCounts(const vector<Blob<Dtype>*>& bottom,
method SetLossWeights (line 389) | inline void SetLossWeights(const vector<Blob<Dtype>*>& top) {
function Dtype (line 413) | inline Dtype Layer<Dtype>::Forward(const vector<Blob<Dtype>*>& bottom,
FILE: caffe/include/caffe/layer_factory.hpp
type caffe (line 50) | namespace caffe {
class Layer (line 53) | class Layer
class LayerRegistry (line 56) | class LayerRegistry {
method CreatorRegistry (line 61) | static CreatorRegistry& Registry() {
method AddCreator (line 67) | static void AddCreator(const string& type, Creator creator) {
method CreateLayer (line 75) | static shared_ptr<Layer<Dtype> > CreateLayer(const LayerParameter& p...
method LayerTypeList (line 86) | static vector<string> LayerTypeList() {
method LayerRegistry (line 99) | LayerRegistry() {}
method string (line 101) | static string LayerTypeListString() {
class LayerRegisterer (line 117) | class LayerRegisterer {
method LayerRegisterer (line 119) | LayerRegisterer(const string& type,
FILE: caffe/include/caffe/layers/absval_layer.hpp
type caffe (line 12) | namespace caffe {
class AbsValLayer (line 25) | class AbsValLayer : public NeuronLayer<Dtype> {
method AbsValLayer (line 27) | explicit AbsValLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 33) | virtual inline int ExactNumBottomBlobs() const { return 1; }
method ExactNumTopBlobs (line 34) | virtual inline int ExactNumTopBlobs() const { return 1; }
FILE: caffe/include/caffe/layers/accuracy_layer.hpp
type caffe (line 12) | namespace caffe {
class AccuracyLayer (line 19) | class AccuracyLayer : public Layer<Dtype> {
method AccuracyLayer (line 29) | explicit AccuracyLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 37) | virtual inline int ExactNumBottomBlobs() const { return 2; }
method MinTopBlobs (line 41) | virtual inline int MinTopBlobs() const { return 1; }
method MaxTopBlobs (line 42) | virtual inline int MaxTopBlobs() const { return 2; }
method Backward_cpu (line 74) | virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
FILE: caffe/include/caffe/layers/argmax_layer.hpp
type caffe (line 10) | namespace caffe {
class ArgMaxLayer (line 24) | class ArgMaxLayer : public Layer<Dtype> {
method ArgMaxLayer (line 38) | explicit ArgMaxLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 46) | virtual inline int ExactNumBottomBlobs() const { return 1; }
method ExactNumTopBlobs (line 47) | virtual inline int ExactNumTopBlobs() const { return 1; }
method Backward_cpu (line 65) | virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
FILE: caffe/include/caffe/layers/at_layer.hpp
type caffe (line 9) | namespace caffe {
class AffineTransformerLayer (line 12) | class AffineTransformerLayer : public Layer<Dtype> {
method AffineTransformerLayer (line 15) | explicit AffineTransformerLayer(const LayerParameter& param)
method MinBottomBlobs (line 23) | virtual inline int MinBottomBlobs() const { return 2; }
method MaxBottomBlobs (line 24) | virtual inline int MaxBottomBlobs() const { return 3; }
method MinTopBlobs (line 25) | virtual inline int MinTopBlobs() const { return 1; }
method Forward_cpu (line 28) | virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
method Backward_cpu (line 32) | virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
method AllowForceBackward (line 38) | virtual inline bool AllowForceBackward(int bottom_index) const {
FILE: caffe/include/caffe/layers/attention_lstm_layer.hpp
type caffe (line 17) | namespace caffe {
class RecurrentLayer (line 19) | class RecurrentLayer
class AttLstmLayer (line 50) | class AttLstmLayer : public RecurrentLayer<Dtype> {
method AttLstmLayer (line 52) | explicit AttLstmLayer(const LayerParameter& param)
method MinBottomBlobs (line 69) | virtual inline int MinBottomBlobs() const {
method MaxBottomBlobs (line 72) | virtual inline int MaxBottomBlobs() const { return MinBottomBlobs() ...
method ExactNumTopBlobs (line 73) | virtual inline int ExactNumTopBlobs() const {
FILE: caffe/include/caffe/layers/base_conv_layer.hpp
type caffe (line 11) | namespace caffe {
class BaseConvolutionLayer (line 18) | class BaseConvolutionLayer : public Layer<Dtype> {
method BaseConvolutionLayer (line 20) | explicit BaseConvolutionLayer(const LayerParameter& param)
method MinBottomBlobs (line 27) | virtual inline int MinBottomBlobs() const { return 1; }
method MinTopBlobs (line 28) | virtual inline int MinTopBlobs() const { return 1; }
method EqualNumBottomTopBlobs (line 29) | virtual inline bool EqualNumBottomTopBlobs() const { return true; }
method input_shape (line 56) | inline int input_shape(int i) {
method conv_im2col_cpu (line 98) | inline void conv_im2col_cpu(const Dtype* data, Dtype* col_buff) {
method conv_col2im_cpu (line 112) | inline void conv_col2im_cpu(const Dtype* col_buff, Dtype* data) {
method conv_im2col_gpu (line 127) | inline void conv_im2col_gpu(const Dtype* data, Dtype* col_buff) {
method conv_col2im_gpu (line 142) | inline void conv_col2im_gpu(const Dtype* col_buff, Dtype* data) {
FILE: caffe/include/caffe/layers/base_data_layer.hpp
type caffe (line 13) | namespace caffe {
class BaseDataLayer (line 21) | class BaseDataLayer : public Layer<Dtype> {
method DataLayerSetUp (line 29) | virtual void DataLayerSetUp(const vector<Blob<Dtype>*>& bottom,
method Reshape (line 32) | virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
method Backward_cpu (line 35) | virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
method Backward_gpu (line 37) | virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
class Batch (line 47) | class Batch {
class BasePrefetchingDataLayer (line 53) | class BasePrefetchingDataLayer :
FILE: caffe/include/caffe/layers/batch_norm_layer.hpp
type caffe (line 10) | namespace caffe {
class BatchNormLayer (line 40) | class BatchNormLayer : public Layer<Dtype> {
method BatchNormLayer (line 42) | explicit BatchNormLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 50) | virtual inline int ExactNumBottomBlobs() const { return 1; }
method ExactNumTopBlobs (line 51) | virtual inline int ExactNumTopBlobs() const { return 1; }
FILE: caffe/include/caffe/layers/batch_reindex_layer.hpp
type caffe (line 11) | namespace caffe {
class BatchReindexLayer (line 21) | class BatchReindexLayer : public Layer<Dtype> {
method BatchReindexLayer (line 23) | explicit BatchReindexLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 29) | virtual inline int ExactNumBottomBlobs() const { return 2; }
method ExactNumTopBlobs (line 30) | virtual inline int ExactNumTopBlobs() const { return 1; }
type pair_sort_first (line 71) | struct pair_sort_first {
FILE: caffe/include/caffe/layers/bias_layer.hpp
type caffe (line 10) | namespace caffe {
class BiasLayer (line 22) | class BiasLayer : public Layer<Dtype> {
method BiasLayer (line 24) | explicit BiasLayer(const LayerParameter& param)
method MinBottomBlobs (line 32) | virtual inline int MinBottomBlobs() const { return 1; }
method MaxBottomBlobs (line 33) | virtual inline int MaxBottomBlobs() const { return 2; }
method ExactNumTopBlobs (line 34) | virtual inline int ExactNumTopBlobs() const { return 1; }
FILE: caffe/include/caffe/layers/bnll_layer.hpp
type caffe (line 12) | namespace caffe {
class BNLLLayer (line 32) | class BNLLLayer : public NeuronLayer<Dtype> {
method BNLLLayer (line 34) | explicit BNLLLayer(const LayerParameter& param)
FILE: caffe/include/caffe/layers/concat_layer.hpp
type caffe (line 10) | namespace caffe {
class ConcatLayer (line 17) | class ConcatLayer : public Layer<Dtype> {
method ConcatLayer (line 19) | explicit ConcatLayer(const LayerParameter& param)
method MinBottomBlobs (line 27) | virtual inline int MinBottomBlobs() const { return 1; }
method ExactNumTopBlobs (line 28) | virtual inline int ExactNumTopBlobs() const { return 1; }
FILE: caffe/include/caffe/layers/contrastive_loss_layer.hpp
type caffe (line 12) | namespace caffe {
class ContrastiveLossLayer (line 39) | class ContrastiveLossLayer : public LossLayer<Dtype> {
method ContrastiveLossLayer (line 41) | explicit ContrastiveLossLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 46) | virtual inline int ExactNumBottomBlobs() const { return 3; }
method AllowForceBackward (line 52) | virtual inline bool AllowForceBackward(const int bottom_index) const {
FILE: caffe/include/caffe/layers/conv_layer.hpp
type caffe (line 12) | namespace caffe {
class ConvolutionLayer (line 31) | class ConvolutionLayer : public BaseConvolutionLayer<Dtype> {
method ConvolutionLayer (line 64) | explicit ConvolutionLayer(const LayerParameter& param)
method reverse_dimensions (line 78) | virtual inline bool reverse_dimensions() { return false; }
FILE: caffe/include/caffe/layers/cosinangle_loss_layer.hpp
type caffe (line 12) | namespace caffe {
class CosinangleLossLayer (line 15) | class CosinangleLossLayer : public LossLayer<Dtype> {
method CosinangleLossLayer (line 17) | explicit CosinangleLossLayer(const LayerParameter& param)
method ExactNumTopBlobs (line 25) | virtual inline int ExactNumTopBlobs() const { return -1; }
method MinTopBlobs (line 26) | virtual inline int MinTopBlobs() const { return 1; }
method MaxTopBlobs (line 27) | virtual inline int MaxTopBlobs() const { return 2; }
FILE: caffe/include/caffe/layers/crop_layer.hpp
type caffe (line 11) | namespace caffe {
class CropLayer (line 21) | class CropLayer : public Layer<Dtype> {
method CropLayer (line 23) | explicit CropLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 31) | virtual inline int ExactNumBottomBlobs() const { return 2; }
method ExactNumTopBlobs (line 32) | virtual inline int ExactNumTopBlobs() const { return 1; }
FILE: caffe/include/caffe/layers/cudnn_conv_layer.hpp
type caffe (line 12) | namespace caffe {
class CuDNNConvolutionLayer (line 30) | class CuDNNConvolutionLayer : public ConvolutionLayer<Dtype> {
method CuDNNConvolutionLayer (line 32) | explicit CuDNNConvolutionLayer(const LayerParameter& param)
FILE: caffe/include/caffe/layers/cudnn_lcn_layer.hpp
type caffe (line 13) | namespace caffe {
class CuDNNLCNLayer (line 17) | class CuDNNLCNLayer : public LRNLayer<Dtype> {
method CuDNNLCNLayer (line 19) | explicit CuDNNLCNLayer(const LayerParameter& param)
FILE: caffe/include/caffe/layers/cudnn_lrn_layer.hpp
type caffe (line 12) | namespace caffe {
class CuDNNLRNLayer (line 16) | class CuDNNLRNLayer : public LRNLayer<Dtype> {
method CuDNNLRNLayer (line 18) | explicit CuDNNLRNLayer(const LayerParameter& param)
FILE: caffe/include/caffe/layers/cudnn_pooling_layer.hpp
type caffe (line 12) | namespace caffe {
class CuDNNPoolingLayer (line 20) | class CuDNNPoolingLayer : public PoolingLayer<Dtype> {
method CuDNNPoolingLayer (line 22) | explicit CuDNNPoolingLayer(const LayerParameter& param)
method MinTopBlobs (line 30) | virtual inline int MinTopBlobs() const { return -1; }
method ExactNumTopBlobs (line 31) | virtual inline int ExactNumTopBlobs() const { return 1; }
FILE: caffe/include/caffe/layers/cudnn_relu_layer.hpp
type caffe (line 13) | namespace caffe {
class CuDNNReLULayer (line 20) | class CuDNNReLULayer : public ReLULayer<Dtype> {
method CuDNNReLULayer (line 22) | explicit CuDNNReLULayer(const LayerParameter& param)
FILE: caffe/include/caffe/layers/cudnn_sigmoid_layer.hpp
type caffe (line 13) | namespace caffe {
class CuDNNSigmoidLayer (line 20) | class CuDNNSigmoidLayer : public SigmoidLayer<Dtype> {
method CuDNNSigmoidLayer (line 22) | explicit CuDNNSigmoidLayer(const LayerParameter& param)
FILE: caffe/include/caffe/layers/cudnn_softmax_layer.hpp
type caffe (line 12) | namespace caffe {
class CuDNNSoftmaxLayer (line 20) | class CuDNNSoftmaxLayer : public SoftmaxLayer<Dtype> {
method CuDNNSoftmaxLayer (line 22) | explicit CuDNNSoftmaxLayer(const LayerParameter& param)
FILE: caffe/include/caffe/layers/cudnn_tanh_layer.hpp
type caffe (line 13) | namespace caffe {
class CuDNNTanHLayer (line 20) | class CuDNNTanHLayer : public TanHLayer<Dtype> {
method CuDNNTanHLayer (line 22) | explicit CuDNNTanHLayer(const LayerParameter& param)
FILE: caffe/include/caffe/layers/data_layer.hpp
type caffe (line 14) | namespace caffe {
class DataLayer (line 17) | class DataLayer : public BasePrefetchingDataLayer<Dtype> {
method ExactNumBottomBlobs (line 24) | virtual inline int ExactNumBottomBlobs() const { return 0; }
method MinTopBlobs (line 25) | virtual inline int MinTopBlobs() const { return 1; }
method MaxTopBlobs (line 26) | virtual inline int MaxTopBlobs() const { return 2; }
FILE: caffe/include/caffe/layers/deconv_layer.hpp
type caffe (line 12) | namespace caffe {
class DeconvolutionLayer (line 29) | class DeconvolutionLayer : public BaseConvolutionLayer<Dtype> {
method DeconvolutionLayer (line 31) | explicit DeconvolutionLayer(const LayerParameter& param)
method reverse_dimensions (line 45) | virtual inline bool reverse_dimensions() { return true; }
FILE: caffe/include/caffe/layers/dropout_layer.hpp
type caffe (line 12) | namespace caffe {
class DropoutLayer (line 26) | class DropoutLayer : public NeuronLayer<Dtype> {
method DropoutLayer (line 34) | explicit DropoutLayer(const LayerParameter& param)
FILE: caffe/include/caffe/layers/dummy_data_layer.hpp
type caffe (line 11) | namespace caffe {
class DummyDataLayer (line 19) | class DummyDataLayer : public Layer<Dtype> {
method DummyDataLayer (line 21) | explicit DummyDataLayer(const LayerParameter& param)
method Reshape (line 26) | virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
method ExactNumBottomBlobs (line 30) | virtual inline int ExactNumBottomBlobs() const { return 0; }
method MinTopBlobs (line 31) | virtual inline int MinTopBlobs() const { return 1; }
method Backward_cpu (line 36) | virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
method Backward_gpu (line 38) | virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
FILE: caffe/include/caffe/layers/eltwise_layer.hpp
type caffe (line 10) | namespace caffe {
class EltwiseLayer (line 19) | class EltwiseLayer : public Layer<Dtype> {
method EltwiseLayer (line 21) | explicit EltwiseLayer(const LayerParameter& param)
method MinBottomBlobs (line 29) | virtual inline int MinBottomBlobs() const { return 2; }
method ExactNumTopBlobs (line 30) | virtual inline int ExactNumTopBlobs() const { return 1; }
FILE: caffe/include/caffe/layers/elu_layer.hpp
type caffe (line 12) | namespace caffe {
class ELULayer (line 24) | class ELULayer : public NeuronLayer<Dtype> {
method ELULayer (line 32) | explicit ELULayer(const LayerParameter& param)
FILE: caffe/include/caffe/layers/embed_layer.hpp
type caffe (line 10) | namespace caffe {
class EmbedLayer (line 20) | class EmbedLayer : public Layer<Dtype> {
method EmbedLayer (line 22) | explicit EmbedLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 30) | virtual inline int ExactNumBottomBlobs() const { return 1; }
method ExactNumTopBlobs (line 31) | virtual inline int ExactNumTopBlobs() const { return 1; }
FILE: caffe/include/caffe/layers/euclidean_loss_layer.hpp
type caffe (line 12) | namespace caffe {
class EuclideanLossLayer (line 41) | class EuclideanLossLayer : public LossLayer<Dtype> {
method EuclideanLossLayer (line 43) | explicit EuclideanLossLayer(const LayerParameter& param)
method AllowForceBackward (line 53) | virtual inline bool AllowForceBackward(const int bottom_index) const {
FILE: caffe/include/caffe/layers/exp_layer.hpp
type caffe (line 12) | namespace caffe {
class ExpLayer (line 20) | class ExpLayer : public NeuronLayer<Dtype> {
method ExpLayer (line 30) | explicit ExpLayer(const LayerParameter& param)
FILE: caffe/include/caffe/layers/filter_layer.hpp
type caffe (line 10) | namespace caffe {
class FilterLayer (line 19) | class FilterLayer : public Layer<Dtype> {
method FilterLayer (line 21) | explicit FilterLayer(const LayerParameter& param)
method MinBottomBlobs (line 29) | virtual inline int MinBottomBlobs() const { return 2; }
method MinTopBlobs (line 30) | virtual inline int MinTopBlobs() const { return 1; }
FILE: caffe/include/caffe/layers/flatten_layer.hpp
type caffe (line 10) | namespace caffe {
class FlattenLayer (line 23) | class FlattenLayer : public Layer<Dtype> {
method FlattenLayer (line 25) | explicit FlattenLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 31) | virtual inline int ExactNumBottomBlobs() const { return 1; }
method ExactNumTopBlobs (line 32) | virtual inline int ExactNumTopBlobs() const { return 1; }
FILE: caffe/include/caffe/layers/hdf5_data_layer.hpp
type caffe (line 15) | namespace caffe {
class HDF5DataLayer (line 23) | class HDF5DataLayer : public Layer<Dtype> {
method HDF5DataLayer (line 25) | explicit HDF5DataLayer(const LayerParameter& param)
method Reshape (line 31) | virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
method ExactNumBottomBlobs (line 35) | virtual inline int ExactNumBottomBlobs() const { return 0; }
method MinTopBlobs (line 36) | virtual inline int MinTopBlobs() const { return 1; }
method Backward_cpu (line 46) | virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
method Backward_gpu (line 48) | virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
FILE: caffe/include/caffe/layers/hdf5_output_layer.hpp
type caffe (line 13) | namespace caffe {
class HDF5OutputLayer (line 24) | class HDF5OutputLayer : public Layer<Dtype> {
method HDF5OutputLayer (line 26) | explicit HDF5OutputLayer(const LayerParameter& param)
method Reshape (line 32) | virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
method ExactNumBottomBlobs (line 37) | virtual inline int ExactNumBottomBlobs() const { return 2; }
method ExactNumTopBlobs (line 38) | virtual inline int ExactNumTopBlobs() const { return 0; }
method file_name (line 40) | inline std::string file_name() const { return file_name_; }
FILE: caffe/include/caffe/layers/hinge_loss_layer.hpp
type caffe (line 12) | namespace caffe {
class HingeLossLayer (line 58) | class HingeLossLayer : public LossLayer<Dtype> {
method HingeLossLayer (line 60) | explicit HingeLossLayer(const LayerParameter& param)
FILE: caffe/include/caffe/layers/im2col_layer.hpp
type caffe (line 10) | namespace caffe {
class Im2colLayer (line 20) | class Im2colLayer : public Layer<Dtype> {
method Im2colLayer (line 22) | explicit Im2colLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 30) | virtual inline int ExactNumBottomBlobs() const { return 1; }
method ExactNumTopBlobs (line 31) | virtual inline int ExactNumTopBlobs() const { return 1; }
FILE: caffe/include/caffe/layers/image_data_layer.hpp
type caffe (line 15) | namespace caffe {
class ImageDataLayer (line 23) | class ImageDataLayer : public BasePrefetchingDataLayer<Dtype> {
method ImageDataLayer (line 25) | explicit ImageDataLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 32) | virtual inline int ExactNumBottomBlobs() const { return 0; }
method ExactNumTopBlobs (line 33) | virtual inline int ExactNumTopBlobs() const { return 2; }
FILE: caffe/include/caffe/layers/infogain_loss_layer.hpp
type caffe (line 13) | namespace caffe {
class InfogainLossLayer (line 48) | class InfogainLossLayer : public LossLayer<Dtype> {
method InfogainLossLayer (line 50) | explicit InfogainLossLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 60) | virtual inline int ExactNumBottomBlobs() const { return -1; }
method MinBottomBlobs (line 61) | virtual inline int MinBottomBlobs() const { return 2; }
method MaxBottomBlobs (line 62) | virtual inline int MaxBottomBlobs() const { return 3; }
method ExactNumTopBlobs (line 66) | virtual inline int ExactNumTopBlobs() const { return -1; }
method MinTopBlobs (line 67) | virtual inline int MinTopBlobs() const { return 1; }
method MaxTopBlobs (line 68) | virtual inline int MaxTopBlobs() const { return 2; }
FILE: caffe/include/caffe/layers/inner_product_layer.hpp
type caffe (line 10) | namespace caffe {
class InnerProductLayer (line 19) | class InnerProductLayer : public Layer<Dtype> {
method InnerProductLayer (line 21) | explicit InnerProductLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 29) | virtual inline int ExactNumBottomBlobs() const { return 1; }
method ExactNumTopBlobs (line 30) | virtual inline int ExactNumTopBlobs() const { return 1; }
FILE: caffe/include/caffe/layers/input_layer.hpp
type caffe (line 10) | namespace caffe {
class InputLayer (line 19) | class InputLayer : public Layer<Dtype> {
method InputLayer (line 21) | explicit InputLayer(const LayerParameter& param)
method Reshape (line 26) | virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
method ExactNumBottomBlobs (line 30) | virtual inline int ExactNumBottomBlobs() const { return 0; }
method MinTopBlobs (line 31) | virtual inline int MinTopBlobs() const { return 1; }
method Forward_cpu (line 34) | virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
method Backward_cpu (line 36) | virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
FILE: caffe/include/caffe/layers/log_layer.hpp
type caffe (line 12) | namespace caffe {
class LogLayer (line 20) | class LogLayer : public NeuronLayer<Dtype> {
method LogLayer (line 30) | explicit LogLayer(const LayerParameter& param)
FILE: caffe/include/caffe/layers/loss_layer.hpp
type caffe (line 10) | namespace caffe {
class LossLayer (line 23) | class LossLayer : public Layer<Dtype> {
method LossLayer (line 25) | explicit LossLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 32) | virtual inline int ExactNumBottomBlobs() const { return 2; }
method AutoTopBlobs (line 40) | virtual inline bool AutoTopBlobs() const { return true; }
method ExactNumTopBlobs (line 41) | virtual inline int ExactNumTopBlobs() const { return 1; }
method AllowForceBackward (line 46) | virtual inline bool AllowForceBackward(const int bottom_index) const {
FILE: caffe/include/caffe/layers/lrn_layer.hpp
type caffe (line 15) | namespace caffe {
class LRNLayer (line 23) | class LRNLayer : public Layer<Dtype> {
method LRNLayer (line 25) | explicit LRNLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 33) | virtual inline int ExactNumBottomBlobs() const { return 1; }
method ExactNumTopBlobs (line 34) | virtual inline int ExactNumTopBlobs() const { return 1; }
FILE: caffe/include/caffe/layers/lstm_layer.hpp
type caffe (line 11) | namespace caffe {
class LstmLayer (line 14) | class LstmLayer : public Layer<Dtype> {
method LstmLayer (line 16) | explicit LstmLayer(const LayerParameter& param)
method IsRecurrent (line 24) | virtual bool IsRecurrent() const { return true; }
FILE: caffe/include/caffe/layers/lstm_new_layer.hpp
type caffe (line 15) | namespace caffe {
class RecurrentLayer (line 17) | class RecurrentLayer
class LSTMNewLayer (line 48) | class LSTMNewLayer : public RecurrentLayer<Dtype> {
method LSTMNewLayer (line 50) | explicit LSTMNewLayer(const LayerParameter& param)
class LSTMUnitLayer (line 69) | class LSTMUnitLayer : public Layer<Dtype> {
method LSTMUnitLayer (line 71) | explicit LSTMUnitLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 77) | virtual inline int ExactNumBottomBlobs() const { return 3; }
method ExactNumTopBlobs (line 78) | virtual inline int ExactNumTopBlobs() const { return 2; }
method AllowForceBackward (line 80) | virtual inline bool AllowForceBackward(const int bottom_index) const {
FILE: caffe/include/caffe/layers/memory_data_layer.hpp
type caffe (line 12) | namespace caffe {
class MemoryDataLayer (line 20) | class MemoryDataLayer : public BaseDataLayer<Dtype> {
method MemoryDataLayer (line 22) | explicit MemoryDataLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 28) | virtual inline int ExactNumBottomBlobs() const { return 0; }
method ExactNumTopBlobs (line 29) | virtual inline int ExactNumTopBlobs() const { return 2; }
method batch_size (line 42) | int batch_size() { return batch_size_; }
method channels (line 43) | int channels() { return channels_; }
method height (line 44) | int height() { return height_; }
method width (line 45) | int width() { return width_; }
FILE: caffe/include/caffe/layers/multinomial_logistic_loss_layer.hpp
type caffe (line 12) | namespace caffe {
class MultinomialLogisticLossLayer (line 44) | class MultinomialLogisticLossLayer : public LossLayer<Dtype> {
method MultinomialLogisticLossLayer (line 46) | explicit MultinomialLogisticLossLayer(const LayerParameter& param)
FILE: caffe/include/caffe/layers/mvn_layer.hpp
type caffe (line 10) | namespace caffe {
class MVNLayer (line 18) | class MVNLayer : public Layer<Dtype> {
method MVNLayer (line 20) | explicit MVNLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 26) | virtual inline int ExactNumBottomBlobs() const { return 1; }
method ExactNumTopBlobs (line 27) | virtual inline int ExactNumTopBlobs() const { return 1; }
FILE: caffe/include/caffe/layers/neuron_layer.hpp
type caffe (line 10) | namespace caffe {
class NeuronLayer (line 19) | class NeuronLayer : public Layer<Dtype> {
method NeuronLayer (line 21) | explicit NeuronLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 26) | virtual inline int ExactNumBottomBlobs() const { return 1; }
method ExactNumTopBlobs (line 27) | virtual inline int ExactNumTopBlobs() const { return 1; }
FILE: caffe/include/caffe/layers/parameter_layer.hpp
type caffe (line 8) | namespace caffe {
class ParameterLayer (line 11) | class ParameterLayer : public Layer<Dtype> {
method ParameterLayer (line 13) | explicit ParameterLayer(const LayerParameter& param)
method LayerSetUp (line 15) | virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
method Reshape (line 26) | virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
method ExactNumBottomBlobs (line 29) | virtual inline int ExactNumBottomBlobs() const { return 0; }
method ExactNumTopBlobs (line 30) | virtual inline int ExactNumTopBlobs() const { return 1; }
method Forward_cpu (line 33) | virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
method Backward_cpu (line 38) | virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
FILE: caffe/include/caffe/layers/point_bilinear_layer.hpp
type caffe (line 12) | namespace caffe {
class PointBilinearLayer (line 15) | class PointBilinearLayer : public Layer<Dtype> {
method PointBilinearLayer (line 17) | explicit PointBilinearLayer(const LayerParameter& param)
method MinBottomBlobs (line 25) | virtual inline int MinBottomBlobs() const { return 2; }
method MaxBottomBlobs (line 26) | virtual inline int MaxBottomBlobs() const { return 3; }
method MinTopBlobs (line 27) | virtual inline int MinTopBlobs() const { return 1; }
method MaxTopBlobs (line 28) | virtual inline int MaxTopBlobs() const { return 2; }
FILE: caffe/include/caffe/layers/pooling_layer.hpp
type caffe (line 10) | namespace caffe {
class PoolingLayer (line 18) | class PoolingLayer : public Layer<Dtype> {
method PoolingLayer (line 20) | explicit PoolingLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 28) | virtual inline int ExactNumBottomBlobs() const { return 1; }
method MinTopBlobs (line 29) | virtual inline int MinTopBlobs() const { return 1; }
method MaxTopBlobs (line 32) | virtual inline int MaxTopBlobs() const {
FILE: caffe/include/caffe/layers/power_layer.hpp
type caffe (line 12) | namespace caffe {
class PowerLayer (line 20) | class PowerLayer : public NeuronLayer<Dtype> {
method PowerLayer (line 29) | explicit PowerLayer(const LayerParameter& param)
FILE: caffe/include/caffe/layers/prelu_layer.hpp
type caffe (line 12) | namespace caffe {
class PReLULayer (line 23) | class PReLULayer : public NeuronLayer<Dtype> {
method PReLULayer (line 33) | explicit PReLULayer(const LayerParameter& param)
FILE: caffe/include/caffe/layers/python_layer.hpp
type caffe (line 11) | namespace caffe {
class PythonLayer (line 14) | class PythonLayer : public Layer<Dtype> {
method PythonLayer (line 16) | PythonLayer(PyObject* self, const LayerParameter& param)
method LayerSetUp (line 19) | virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
method Reshape (line 32) | virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
method Forward_cpu (line 40) | virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
method Backward_cpu (line 44) | virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
FILE: caffe/include/caffe/layers/recurrent_layer.hpp
type caffe (line 15) | namespace caffe {
class RecurrentLayer (line 17) | class RecurrentLayer
method RecurrentLayer (line 28) | explicit RecurrentLayer(const LayerParameter& param)
method MinBottomBlobs (line 37) | virtual inline int MinBottomBlobs() const {
method MaxBottomBlobs (line 46) | virtual inline int MaxBottomBlobs() const { return MinBottomBlobs() ...
method ExactNumTopBlobs (line 47) | virtual inline int ExactNumTopBlobs() const {
method AllowForceBackward (line 57) | virtual inline bool AllowForceBackward(const int bottom_index) const {
class RecurrentLayer (line 26) | class RecurrentLayer : public Layer<Dtype> {
method RecurrentLayer (line 28) | explicit RecurrentLayer(const LayerParameter& param)
method MinBottomBlobs (line 37) | virtual inline int MinBottomBlobs() const {
method MaxBottomBlobs (line 46) | virtual inline int MaxBottomBlobs() const { return MinBottomBlobs() ...
method ExactNumTopBlobs (line 47) | virtual inline int ExactNumTopBlobs() const {
method AllowForceBackward (line 57) | virtual inline bool AllowForceBackward(const int bottom_index) const {
FILE: caffe/include/caffe/layers/reduction_layer.hpp
type caffe (line 10) | namespace caffe {
class ReductionLayer (line 20) | class ReductionLayer : public Layer<Dtype> {
method ReductionLayer (line 22) | explicit ReductionLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 30) | virtual inline int ExactNumBottomBlobs() const { return 1; }
method ExactNumTopBlobs (line 31) | virtual inline int ExactNumTopBlobs() const { return 1; }
FILE: caffe/include/caffe/layers/relu_layer.hpp
type caffe (line 12) | namespace caffe {
class ReLULayer (line 19) | class ReLULayer : public NeuronLayer<Dtype> {
method ReLULayer (line 27) | explicit ReLULayer(const LayerParameter& param)
FILE: caffe/include/caffe/layers/reshape_layer.hpp
type caffe (line 10) | namespace caffe {
class ReshapeLayer (line 19) | class ReshapeLayer : public Layer<Dtype> {
method ReshapeLayer (line 21) | explicit ReshapeLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 29) | virtual inline int ExactNumBottomBlobs() const { return 1; }
method ExactNumTopBlobs (line 30) | virtual inline int ExactNumTopBlobs() const { return 1; }
method Forward_cpu (line 33) | virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
method Backward_cpu (line 35) | virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
method Forward_gpu (line 37) | virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
method Backward_gpu (line 39) | virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
FILE: caffe/include/caffe/layers/reverse_axis_layer.hpp
type caffe (line 10) | namespace caffe {
class ReverseAxisLayer (line 12) | class ReverseAxisLayer : public Layer<Dtype> {
method ReverseAxisLayer (line 14) | explicit ReverseAxisLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 22) | virtual inline int ExactNumBottomBlobs() const { return 1; }
method ExactNumTopBlobs (line 23) | virtual inline int ExactNumTopBlobs() const { return 1; }
FILE: caffe/include/caffe/layers/reverse_layer.hpp
type caffe (line 9) | namespace caffe {
class ReverseLayer (line 11) | class ReverseLayer : public Layer<Dtype> {
method ReverseLayer (line 13) | explicit ReverseLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 21) | virtual inline int ExactNumBottomBlobs() const { return 1; }
method ExactNumTopBlobs (line 22) | virtual inline int ExactNumTopBlobs() const { return 1; }
FILE: caffe/include/caffe/layers/rnn_layer.hpp
type caffe (line 15) | namespace caffe {
class RecurrentLayer (line 17) | class RecurrentLayer
class RNNLayer (line 30) | class RNNLayer : public RecurrentLayer<Dtype> {
method RNNLayer (line 32) | explicit RNNLayer(const LayerParameter& param)
FILE: caffe/include/caffe/layers/roi_pooling_layer.hpp
type caffe (line 10) | namespace caffe {
class ROIPoolingLayer (line 14) | class ROIPoolingLayer : public Layer<Dtype> {
method ROIPoolingLayer (line 16) | explicit ROIPoolingLayer(const LayerParameter& param)
method MinBottomBlobs (line 25) | virtual inline int MinBottomBlobs() const { return 2; }
method MaxBottomBlobs (line 26) | virtual inline int MaxBottomBlobs() const { return 2; }
method MinTopBlobs (line 27) | virtual inline int MinTopBlobs() const { return 1; }
method MaxTopBlobs (line 28) | virtual inline int MaxTopBlobs() const { return 1; }
FILE: caffe/include/caffe/layers/scale_layer.hpp
type caffe (line 12) | namespace caffe {
class ScaleLayer (line 26) | class ScaleLayer: public Layer<Dtype> {
method ScaleLayer (line 28) | explicit ScaleLayer(const LayerParameter& param)
method MinBottomBlobs (line 37) | virtual inline int MinBottomBlobs() const { return 1; }
method MaxBottomBlobs (line 38) | virtual inline int MaxBottomBlobs() const { return 2; }
method ExactNumTopBlobs (line 39) | virtual inline int ExactNumTopBlobs() const { return 1; }
FILE: caffe/include/caffe/layers/sigmoid_cross_entropy_loss_layer.hpp
type caffe (line 13) | namespace caffe {
class SigmoidCrossEntropyLossLayer (line 45) | class SigmoidCrossEntropyLossLayer : public LossLayer<Dtype> {
method SigmoidCrossEntropyLossLayer (line 47) | explicit SigmoidCrossEntropyLossLayer(const LayerParameter& param)
FILE: caffe/include/caffe/layers/sigmoid_layer.hpp
type caffe (line 12) | namespace caffe {
class SigmoidLayer (line 23) | class SigmoidLayer : public NeuronLayer<Dtype> {
method SigmoidLayer (line 25) | explicit SigmoidLayer(const LayerParameter& param)
FILE: caffe/include/caffe/layers/silence_layer.hpp
type caffe (line 10) | namespace caffe {
class SilenceLayer (line 17) | class SilenceLayer : public Layer<Dtype> {
method SilenceLayer (line 19) | explicit SilenceLayer(const LayerParameter& param)
method Reshape (line 21) | virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
method MinBottomBlobs (line 25) | virtual inline int MinBottomBlobs() const { return 1; }
method ExactNumTopBlobs (line 26) | virtual inline int ExactNumTopBlobs() const { return 0; }
method Forward_cpu (line 29) | virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
FILE: caffe/include/caffe/layers/slice_layer.hpp
type caffe (line 10) | namespace caffe {
class SliceLayer (line 19) | class SliceLayer : public Layer<Dtype> {
method SliceLayer (line 21) | explicit SliceLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 29) | virtual inline int ExactNumBottomBlobs() const { return 1; }
method MinTopBlobs (line 30) | virtual inline int MinTopBlobs() const { return 1; }
FILE: caffe/include/caffe/layers/smooth_L1_loss_layer.hpp
type caffe (line 12) | namespace caffe {
class SmoothL1LossLayer (line 14) | class SmoothL1LossLayer : public LossLayer<Dtype> {
method SmoothL1LossLayer (line 16) | explicit SmoothL1LossLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 25) | virtual inline int ExactNumBottomBlobs() const { return -1; }
method MinBottomBlobs (line 26) | virtual inline int MinBottomBlobs() const { return 2; }
method MaxBottomBlobs (line 27) | virtual inline int MaxBottomBlobs() const { return 4; }
method AllowForceBackward (line 33) | virtual inline bool AllowForceBackward(const int bottom_index) const {
FILE: caffe/include/caffe/layers/softmax_layer.hpp
type caffe (line 10) | namespace caffe {
class SoftmaxLayer (line 18) | class SoftmaxLayer : public Layer<Dtype> {
method SoftmaxLayer (line 20) | explicit SoftmaxLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 26) | virtual inline int ExactNumBottomBlobs() const { return 1; }
method ExactNumTopBlobs (line 27) | virtual inline int ExactNumTopBlobs() const { return 1; }
FILE: caffe/include/caffe/layers/softmax_loss_layer.hpp
type caffe (line 13) | namespace caffe {
class SoftmaxWithLossLayer (line 44) | class SoftmaxWithLossLayer : public LossLayer<Dtype> {
method SoftmaxWithLossLayer (line 54) | explicit SoftmaxWithLossLayer(const LayerParameter& param)
method ExactNumTopBlobs (line 62) | virtual inline int ExactNumTopBlobs() const { return -1; }
method MinTopBlobs (line 63) | virtual inline int MinTopBlobs() const { return 1; }
method MaxTopBlobs (line 64) | virtual inline int MaxTopBlobs() const { return 2; }
FILE: caffe/include/caffe/layers/split_layer.hpp
type caffe (line 10) | namespace caffe {
class SplitLayer (line 19) | class SplitLayer : public Layer<Dtype> {
method SplitLayer (line 21) | explicit SplitLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 27) | virtual inline int ExactNumBottomBlobs() const { return 1; }
method MinTopBlobs (line 28) | virtual inline int MinTopBlobs() const { return 1; }
FILE: caffe/include/caffe/layers/spp_layer.hpp
type caffe (line 10) | namespace caffe {
class SPPLayer (line 19) | class SPPLayer : public Layer<Dtype> {
method SPPLayer (line 21) | explicit SPPLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 29) | virtual inline int ExactNumBottomBlobs() const { return 1; }
method ExactNumTopBlobs (line 30) | virtual inline int ExactNumTopBlobs() const { return 1; }
FILE: caffe/include/caffe/layers/sum_layer.hpp
type caffe (line 10) | namespace caffe {
class SumLayer (line 13) | class SumLayer : public Layer<Dtype> {
method SumLayer (line 15) | explicit SumLayer(const LayerParameter& param)
method MinBottomBlobs (line 23) | virtual inline int MinBottomBlobs() const { return 1; }
method MaxBottomBlobs (line 24) | virtual inline int MaxBottomBlobs() const { return 1; }
method MinTopBlobs (line 25) | virtual inline int MinTopBlobs() const { return 1; }
method MaxTopBlobs (line 26) | virtual inline int MaxTopBlobs() const { return 1; }
FILE: caffe/include/caffe/layers/tanh_layer.hpp
type caffe (line 12) | namespace caffe {
class TanHLayer (line 23) | class TanHLayer : public NeuronLayer<Dtype> {
method TanHLayer (line 25) | explicit TanHLayer(const LayerParameter& param)
FILE: caffe/include/caffe/layers/threshold_layer.hpp
type caffe (line 12) | namespace caffe {
class ThresholdLayer (line 19) | class ThresholdLayer : public NeuronLayer<Dtype> {
method ThresholdLayer (line 27) | explicit ThresholdLayer(const LayerParameter& param)
method Backward_cpu (line 54) | virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
FILE: caffe/include/caffe/layers/tile_layer.hpp
type caffe (line 10) | namespace caffe {
class TileLayer (line 16) | class TileLayer : public Layer<Dtype> {
method TileLayer (line 18) | explicit TileLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 24) | virtual inline int ExactNumBottomBlobs() const { return 1; }
method ExactNumTopBlobs (line 25) | virtual inline int ExactNumTopBlobs() const { return 1; }
FILE: caffe/include/caffe/layers/transpose_layer.hpp
type caffe (line 9) | namespace caffe {
class TransposeLayer (line 13) | class TransposeLayer : public Layer<Dtype> {
method TransposeLayer (line 15) | explicit TransposeLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 23) | virtual inline int ExactNumBottomBlobs() const { return 1; }
method ExactNumTopBlobs (line 24) | virtual inline int ExactNumTopBlobs() const { return 1; }
FILE: caffe/include/caffe/layers/unitbox_data_layer.hpp
type caffe (line 16) | namespace caffe {
class UnitBoxDataLayer (line 20) | class UnitBoxDataLayer : public BasePrefetchingDataLayer<Dtype> {
method UnitBoxDataLayer (line 22) | explicit UnitBoxDataLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 28) | virtual inline int ExactNumBottomBlobs() const { return 0; }
method ExactNumTopBlobs (line 29) | virtual inline int ExactNumTopBlobs() const { return 2; }
method AutoTopBlobs (line 30) | virtual inline bool AutoTopBlobs() const { return true; }
class Line (line 32) | class Line {
method Line (line 34) | Line (std::string x, vector<float> y, vector<float> z, vector<stri...
FILE: caffe/include/caffe/layers/unitbox_loss_layer.hpp
type caffe (line 12) | namespace caffe {
class UnitboxLossLayer (line 15) | class UnitboxLossLayer : public LossLayer<Dtype> {
method UnitboxLossLayer (line 17) | explicit UnitboxLossLayer(const LayerParameter& param)
method ExactNumTopBlobs (line 25) | virtual inline int ExactNumTopBlobs() const { return -1; }
method MinTopBlobs (line 26) | virtual inline int MinTopBlobs() const { return 1; }
method MaxTopBlobs (line 27) | virtual inline int MaxTopBlobs() const { return 2; }
FILE: caffe/include/caffe/layers/window_data_layer.hpp
type caffe (line 15) | namespace caffe {
class WindowDataLayer (line 25) | class WindowDataLayer : public BasePrefetchingDataLayer<Dtype> {
method WindowDataLayer (line 27) | explicit WindowDataLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 34) | virtual inline int ExactNumBottomBlobs() const { return 0; }
method ExactNumTopBlobs (line 35) | virtual inline int ExactNumTopBlobs() const { return 2; }
type WindowField (line 43) | enum WindowField { IMAGE_INDEX, LABEL, OVERLAP, X1, Y1, X2, Y2, NUM }
FILE: caffe/include/caffe/net.hpp
type caffe (line 15) | namespace caffe {
class Net (line 24) | class Net {
method Dtype (line 85) | Dtype ForwardBackward() {
method string (line 123) | inline const string& name() const { return name_; }
method Phase (line 137) | inline Phase phase() const { return phase_; }
method num_inputs (line 198) | inline int num_inputs() const { return net_input_blobs_.size(); }
method num_outputs (line 199) | inline int num_outputs() const { return net_output_blobs_.size(); }
method set_debug_info (line 217) | void set_debug_info(const bool value) { debug_info_ = value; }
class Callback (line 231) | class Callback {
method add_before_forward (line 239) | void add_before_forward(Callback* value) {
method add_after_forward (line 243) | void add_after_forward(Callback* value) {
method add_before_backward (line 247) | void add_before_backward(Callback* value) {
method add_after_backward (line 251) | void add_after_backward(Callback* value) {
FILE: caffe/include/caffe/parallel.hpp
type caffe (line 21) | namespace caffe {
class Params (line 27) | class Params {
method size (line 33) | inline size_t size() const {
method Dtype (line 36) | inline Dtype* data() const {
method Dtype (line 39) | inline Dtype* diff() const {
class GPUParams (line 53) | class GPUParams : public Params<Dtype> {
class NCCL (line 67) | class NCCL : public GPUParams<Dtype>,
method on_start (line 105) | void on_start() {}
FILE: caffe/include/caffe/sgd_solvers.hpp
type caffe (line 9) | namespace caffe {
class SGDSolver (line 16) | class SGDSolver : public Solver<Dtype> {
method SGDSolver (line 18) | explicit SGDSolver(const SolverParameter& param)
method SGDSolver (line 20) | explicit SGDSolver(const string& param_file)
class NesterovSolver (line 49) | class NesterovSolver : public SGDSolver<Dtype> {
method NesterovSolver (line 51) | explicit NesterovSolver(const SolverParameter& param)
method NesterovSolver (line 53) | explicit NesterovSolver(const string& param_file)
class AdaGradSolver (line 64) | class AdaGradSolver : public SGDSolver<Dtype> {
method AdaGradSolver (line 66) | explicit AdaGradSolver(const SolverParameter& param)
method AdaGradSolver (line 68) | explicit AdaGradSolver(const string& param_file)
method constructor_sanity_check (line 74) | void constructor_sanity_check() {
class RMSPropSolver (line 84) | class RMSPropSolver : public SGDSolver<Dtype> {
method RMSPropSolver (line 86) | explicit RMSPropSolver(const SolverParameter& param)
method RMSPropSolver (line 88) | explicit RMSPropSolver(const string& param_file)
method constructor_sanity_check (line 94) | void constructor_sanity_check() {
class AdaDeltaSolver (line 107) | class AdaDeltaSolver : public SGDSolver<Dtype> {
method AdaDeltaSolver (line 109) | explicit AdaDeltaSolver(const SolverParameter& param)
method AdaDeltaSolver (line 111) | explicit AdaDeltaSolver(const string& param_file)
class AdamSolver (line 131) | class AdamSolver : public SGDSolver<Dtype> {
method AdamSolver (line 133) | explicit AdamSolver(const SolverParameter& param)
method AdamSolver (line 135) | explicit AdamSolver(const string& param_file)
FILE: caffe/include/caffe/solver.hpp
type caffe (line 11) | namespace caffe {
type SolverAction (line 21) | namespace SolverAction {
type Enum (line 22) | enum Enum {
class Solver (line 42) | class Solver {
method Solve (line 58) | inline void Solve(const string resume_file) { Solve(resume_file.c_st...
method SolverParameter (line 70) | inline const SolverParameter& param() const { return param_; }
method net (line 71) | inline shared_ptr<Net<Dtype> > net() { return net_; }
method iter (line 75) | int iter() const { return iter_; }
class Callback (line 78) | class Callback {
method add_callback (line 87) | void add_callback(Callback* value) {
FILE: caffe/include/caffe/solver_factory.hpp
type caffe (line 48) | namespace caffe {
class Solver (line 51) | class Solver
class SolverRegistry (line 54) | class SolverRegistry {
method CreatorRegistry (line 59) | static CreatorRegistry& Registry() {
method AddCreator (line 65) | static void AddCreator(const string& type, Creator creator) {
method SolverTypeList (line 81) | static vector<string> SolverTypeList() {
method SolverRegistry (line 94) | SolverRegistry() {}
method string (line 96) | static string SolverTypeListString() {
class SolverRegisterer (line 112) | class SolverRegisterer {
method SolverRegisterer (line 114) | SolverRegisterer(const string& type,
FILE: caffe/include/caffe/syncedmem.hpp
type caffe (line 12) | namespace caffe {
function CaffeMallocHost (line 19) | inline void CaffeMallocHost(void** ptr, size_t size, bool* use_cuda) {
function CaffeFreeHost (line 36) | inline void CaffeFreeHost(void* ptr, bool use_cuda) {
class SyncedMemory (line 57) | class SyncedMemory {
type SyncedHead (line 68) | enum SyncedHead { UNINITIALIZED, HEAD_AT_CPU, HEAD_AT_GPU, SYNCED }
method SyncedHead (line 69) | SyncedHead head() { return head_; }
method size (line 70) | size_t size() { return size_; }
FILE: caffe/include/caffe/test/test_caffe_main.hpp
type caffe (line 27) | namespace caffe {
class MultiDeviceTest (line 30) | class MultiDeviceTest : public ::testing::Test {
method MultiDeviceTest (line 34) | MultiDeviceTest() {
type CPUDevice (line 43) | struct CPUDevice {
class CPUDeviceTest (line 49) | class CPUDeviceTest : public MultiDeviceTest<CPUDevice<Dtype> > {
type GPUDevice (line 60) | struct GPUDevice {
class GPUDeviceTest (line 66) | class GPUDeviceTest : public MultiDeviceTest<GPUDevice<Dtype> > {
FILE: caffe/include/caffe/test/test_gradient_check_util.hpp
type caffe (line 14) | namespace caffe {
class GradientChecker (line 19) | class GradientChecker {
method GradientChecker (line 24) | GradientChecker(const Dtype stepsize, const Dtype threshold,
method CheckGradient (line 33) | void CheckGradient(Layer<Dtype>* layer, const vector<Blob<Dtype>*>& ...
function Dtype (line 233) | Dtype GradientChecker<Dtype>::GetObjAndGradient(const Layer<Dtype>& la...
FILE: caffe/include/caffe/util/benchmark.hpp
type caffe (line 8) | namespace caffe {
class Timer (line 10) | class Timer {
method initted (line 20) | inline bool initted() { return initted_; }
method running (line 21) | inline bool running() { return running_; }
method has_run_at_least_once (line 22) | inline bool has_run_at_least_once() { return has_run_at_least_once_; }
class CPUTimer (line 40) | class CPUTimer : public Timer {
FILE: caffe/include/caffe/util/blocking_queue.hpp
type caffe (line 7) | namespace caffe {
class BlockingQueue (line 10) | class BlockingQueue {
class sync (line 35) | class sync
FILE: caffe/include/caffe/util/cudnn.hpp
type caffe (line 52) | namespace caffe {
type cudnn (line 54) | namespace cudnn {
class dataType (line 56) | class dataType
class dataType<float> (line 57) | class dataType<float> {
class dataType<double> (line 63) | class dataType<double> {
function createTensor4dDesc (line 71) | inline void createTensor4dDesc(cudnnTensorDescriptor_t* desc) {
function setTensor4dDesc (line 76) | inline void setTensor4dDesc(cudnnTensorDescriptor_t* desc,
function setTensor4dDesc (line 84) | inline void setTensor4dDesc(cudnnTensorDescriptor_t* desc,
function createFilterDesc (line 95) | inline void createFilterDesc(cudnnFilterDescriptor_t* desc,
function createConvolutionDesc (line 108) | inline void createConvolutionDesc(cudnnConvolutionDescriptor_t* conv) {
function setConvolutionDesc (line 113) | inline void setConvolutionDesc(cudnnConvolutionDescriptor_t* conv,
function createPoolingDesc (line 127) | inline void createPoolingDesc(cudnnPoolingDescriptor_t* pool_desc,
function createActivationDescriptor (line 151) | inline void createActivationDescriptor(cudnnActivationDescriptor_t* ...
FILE: caffe/include/caffe/util/db.hpp
type caffe (line 9) | namespace caffe { namespace db {
type db (line 9) | namespace db {
type Mode (line 11) | enum Mode { READ, WRITE, NEW }
class Cursor (line 13) | class Cursor {
method Cursor (line 15) | Cursor() { }
class Transaction (line 26) | class Transaction {
method Transaction (line 28) | Transaction() { }
class DB (line 36) | class DB {
method DB (line 38) | DB() { }
FILE: caffe/include/caffe/util/db_leveldb.hpp
type caffe (line 12) | namespace caffe { namespace db {
type db (line 12) | namespace db {
class LevelDBCursor (line 14) | class LevelDBCursor : public Cursor {
method LevelDBCursor (line 16) | explicit LevelDBCursor(leveldb::Iterator* iter)
method SeekToFirst (line 22) | virtual void SeekToFirst() { iter_->SeekToFirst(); }
method Next (line 23) | virtual void Next() { iter_->Next(); }
method string (line 24) | virtual string key() { return iter_->key().ToString(); }
method string (line 25) | virtual string value() { return iter_->value().ToString(); }
method valid (line 26) | virtual bool valid() { return iter_->Valid(); }
class LevelDBTransaction (line 32) | class LevelDBTransaction : public Transaction {
method LevelDBTransaction (line 34) | explicit LevelDBTransaction(leveldb::DB* db) : db_(db) { CHECK_NOT...
method Put (line 35) | virtual void Put(const string& key, const string& value) {
method Commit (line 38) | virtual void Commit() {
class LevelDB (line 51) | class LevelDB : public DB {
method LevelDB (line 53) | LevelDB() : db_(NULL) { }
method Close (line 56) | virtual void Close() {
method LevelDBCursor (line 62) | virtual LevelDBCursor* NewCursor() {
method LevelDBTransaction (line 65) | virtual LevelDBTransaction* NewTransaction() {
FILE: caffe/include/caffe/util/db_lmdb.hpp
type caffe (line 12) | namespace caffe { namespace db {
type db (line 12) | namespace db {
function MDB_CHECK (line 14) | inline void MDB_CHECK(int mdb_status) {
class LMDBCursor (line 18) | class LMDBCursor : public Cursor {
method LMDBCursor (line 20) | explicit LMDBCursor(MDB_txn* mdb_txn, MDB_cursor* mdb_cursor)
method SeekToFirst (line 28) | virtual void SeekToFirst() { Seek(MDB_FIRST); }
method Next (line 29) | virtual void Next() { Seek(MDB_NEXT); }
method string (line 30) | virtual string key() {
method string (line 33) | virtual string value() {
method valid (line 37) | virtual bool valid() { return valid_; }
method Seek (line 40) | void Seek(MDB_cursor_op op) {
class LMDBTransaction (line 56) | class LMDBTransaction : public Transaction {
method LMDBTransaction (line 58) | explicit LMDBTransaction(MDB_env* mdb_env)
class LMDB (line 72) | class LMDB : public DB {
method LMDB (line 74) | LMDB() : mdb_env_(NULL) { }
method Close (line 77) | virtual void Close() {
FILE: caffe/include/caffe/util/device_alternate.hpp
type caffe (line 78) | namespace caffe {
function CAFFE_GET_BLOCKS (line 88) | inline int CAFFE_GET_BLOCKS(const int N) {
FILE: caffe/include/caffe/util/format.hpp
type caffe (line 8) | namespace caffe {
function format_int (line 10) | inline std::string format_int(int n, int numberOfLeadingZeros = 0 ) {
FILE: caffe/include/caffe/util/hdf5.hpp
type caffe (line 11) | namespace caffe {
FILE: caffe/include/caffe/util/im2col.hpp
type caffe (line 4) | namespace caffe {
FILE: caffe/include/caffe/util/insert_splits.hpp
type caffe (line 8) | namespace caffe {
FILE: caffe/include/caffe/util/io.hpp
type caffe (line 19) | namespace caffe {
function MakeTempDir (line 24) | inline void MakeTempDir(string* temp_dirname) {
function MakeTempFilename (line 39) | inline void MakeTempFilename(string* temp_filename) {
function ReadProtoFromTextFile (line 54) | inline bool ReadProtoFromTextFile(const string& filename, Message* pro...
function ReadProtoFromTextFileOrDie (line 58) | inline void ReadProtoFromTextFileOrDie(const char* filename, Message* ...
function ReadProtoFromTextFileOrDie (line 62) | inline void ReadProtoFromTextFileOrDie(const string& filename, Message...
function WriteProtoToTextFile (line 67) | inline void WriteProtoToTextFile(const Message& proto, const string& f...
function ReadProtoFromBinaryFile (line 73) | inline bool ReadProtoFromBinaryFile(const string& filename, Message* p...
function ReadProtoFromBinaryFileOrDie (line 77) | inline void ReadProtoFromBinaryFileOrDie(const char* filename, Message...
function ReadProtoFromBinaryFileOrDie (line 81) | inline void ReadProtoFromBinaryFileOrDie(const string& filename,
function WriteProtoToBinaryFile (line 88) | inline void WriteProtoToBinaryFile(
function ReadFileToDatum (line 95) | inline bool ReadFileToDatum(const string& filename, Datum* datum) {
function ReadImageToDatum (line 103) | inline bool ReadImageToDatum(const string& filename, const int label,
function ReadImageToDatum (line 109) | inline bool ReadImageToDatum(const string& filename, const int label,
function ReadImageToDatum (line 114) | inline bool ReadImageToDatum(const string& filename, const int label,
function ReadImageToDatum (line 119) | inline bool ReadImageToDatum(const string& filename, const int label,
function ReadImageToDatum (line 124) | inline bool ReadImageToDatum(const string& filename, const int label,
FILE: caffe/include/caffe/util/math_functions.hpp
type caffe (line 13) | namespace caffe {
function caffe_memset (line 45) | inline void caffe_memset(const size_t N, const int alpha, void* X) {
function caffe_sign (line 117) | inline int8_t caffe_sign(Dtype val) {
function caffe_gpu_memset (line 184) | inline void caffe_gpu_memset(const size_t N, const int alpha, void* X) {
FILE: caffe/include/caffe/util/mkl_alternate.hpp
function cblas_saxpby (line 89) | inline void cblas_saxpby(const int N, const float alpha, const float* X,
function cblas_daxpby (line 95) | inline void cblas_daxpby(const int N, const double alpha, const double* X,
FILE: caffe/include/caffe/util/nccl.hpp
type caffe (line 16) | namespace caffe {
type nccl (line 18) | namespace nccl {
class dataType (line 20) | class dataType
class dataType<float> (line 22) | class dataType<float> {
class dataType<double> (line 26) | class dataType<double> {
FILE: caffe/include/caffe/util/rng.hpp
type caffe (line 12) | namespace caffe {
function rng_t (line 16) | inline rng_t* caffe_rng() {
function shuffle (line 22) | inline void shuffle(RandomAccessIterator begin, RandomAccessIterator end,
function shuffle (line 38) | inline void shuffle(RandomAccessIterator begin, RandomAccessIterator e...
FILE: caffe/include/caffe/util/signal_handler.h
function namespace (line 7) | namespace caffe {
FILE: caffe/include/caffe/util/upgrade_proto.hpp
type caffe (line 8) | namespace caffe {
FILE: caffe/matlab/+caffe/private/caffe_.cpp
function mxCHECK (line 25) | inline void mxCHECK(bool expr, const char* msg) {
function mxERROR (line 30) | inline void mxERROR(const char* msg) { mexErrMsgTxt(msg); }
function mxCHECK_FILE_EXIST (line 33) | void mxCHECK_FILE_EXIST(const char* file) {
type WhichMemory (line 54) | enum WhichMemory { DATA, DIFF }
function mx_mat_to_blob (line 57) | static void mx_mat_to_blob(const mxArray* mx_mat, Blob<float>* blob,
function mxArray (line 79) | static mxArray* blob_to_mx_mat(const Blob<float>* blob,
function mxArray (line 110) | static mxArray* int_vec_to_mx_vec(const vector<int>& int_vec) {
function mxArray (line 120) | static mxArray* str_vec_to_mx_strcell(const vector<std::string>& str_vec) {
function T (line 136) | static T* handle_to_ptr(const mxArray* mx_handle) {
function mxArray (line 148) | static mxArray* create_handle_vec(int ptr_num) {
function setup_handle (line 156) | static void setup_handle(const T* ptr, int index, mxArray* mx_handle_vec) {
function mxArray (line 166) | static mxArray* ptr_to_handle(const T* ptr) {
function mxArray (line 174) | static mxArray* ptr_vec_to_handle_vec(const vector<shared_ptr<T> >& ptr_...
function get_solver (line 186) | static void get_solver(MEX_ARGS) {
function delete_solver (line 201) | static void delete_solver(MEX_ARGS) {
function solver_get_attr (line 212) | static void solver_get_attr(MEX_ARGS) {
function solver_get_iter (line 228) | static void solver_get_iter(MEX_ARGS) {
function solver_restore (line 236) | static void solver_restore(MEX_ARGS) {
function solver_solve (line 247) | static void solver_solve(MEX_ARGS) {
function solver_step (line 255) | static void solver_step(MEX_ARGS) {
function get_net (line 264) | static void get_net(MEX_ARGS) {
function delete_net (line 286) | static void delete_net(MEX_ARGS) {
function net_get_attr (line 297) | static void net_get_attr(MEX_ARGS) {
function net_forward (line 322) | static void net_forward(MEX_ARGS) {
function net_backward (line 330) | static void net_backward(MEX_ARGS) {
function net_copy_from (line 338) | static void net_copy_from(MEX_ARGS) {
function net_reshape (line 349) | static void net_reshape(MEX_ARGS) {
function net_save (line 357) | static void net_save(MEX_ARGS) {
function layer_get_attr (line 369) | static void layer_get_attr(MEX_ARGS) {
function layer_get_type (line 383) | static void layer_get_type(MEX_ARGS) {
function blob_get_shape (line 391) | static void blob_get_shape(MEX_ARGS) {
function blob_reshape (line 406) | static void blob_reshape(MEX_ARGS) {
function blob_get_data (line 422) | static void blob_get_data(MEX_ARGS) {
function blob_set_data (line 430) | static void blob_set_data(MEX_ARGS) {
function blob_get_diff (line 438) | static void blob_get_diff(MEX_ARGS) {
function blob_set_diff (line 446) | static void blob_set_diff(MEX_ARGS) {
function set_mode_cpu (line 454) | static void set_mode_cpu(MEX_ARGS) {
function set_mode_gpu (line 460) | static void set_mode_gpu(MEX_ARGS) {
function set_device (line 466) | static void set_device(MEX_ARGS) {
function get_init_key (line 474) | static void get_init_key(MEX_ARGS) {
function reset (line 480) | static void reset(MEX_ARGS) {
function read_mean (line 492) | static void read_mean(MEX_ARGS) {
function write_mean (line 507) | static void write_mean(MEX_ARGS) {
function version (line 530) | static void version(MEX_ARGS) {
type handler_registry (line 539) | struct handler_registry {
function mexFunction (line 585) | void mexFunction(MEX_ARGS) {
FILE: caffe/python/caffe/_caffe.cpp
type caffe (line 44) | namespace caffe {
function set_mode_cpu (line 51) | void set_mode_cpu() { Caffe::set_mode(Caffe::CPU); }
function set_mode_gpu (line 52) | void set_mode_gpu() { Caffe::set_mode(Caffe::GPU); }
function InitLog (line 54) | void InitLog(int level) {
function InitLogInfo (line 60) | void InitLogInfo() {
function Log (line 63) | void Log(const string& s) {
function set_random_seed (line 67) | void set_random_seed(unsigned int seed) { Caffe::set_random_seed(seed); }
function CheckFile (line 73) | static void CheckFile(const string& filename) {
function CheckContiguousArray (line 82) | void CheckContiguousArray(PyArrayObject* arr, string name,
function Net_Init (line 105) | shared_ptr<Net<Dtype> > Net_Init(string network_file, int phase,
function Net_Init_Load (line 133) | shared_ptr<Net<Dtype> > Net_Init_Load(
function Net_Save (line 149) | void Net_Save(const Net<Dtype>& net, string filename) {
function Net_SaveHDF5 (line 155) | void Net_SaveHDF5(const Net<Dtype>& net, string filename) {
function Net_LoadHDF5 (line 159) | void Net_LoadHDF5(Net<Dtype>* net, string filename) {
function Net_SetInputArrays (line 163) | void Net_SetInputArrays(Net<Dtype>* net, bp::object data_obj,
type NdarrayConverterGenerator (line 201) | struct NdarrayConverterGenerator {
type apply (line 202) | struct apply
type NdarrayConverterGenerator::apply<Dtype*> (line 206) | struct NdarrayConverterGenerator::apply<Dtype*> {
type type (line 207) | struct type {
method PyObject (line 208) | PyObject* operator() (Dtype* data) const {
method PyTypeObject (line 212) | const PyTypeObject* get_pytype() {
type NdarrayCallPolicies (line 218) | struct NdarrayCallPolicies : public bp::default_call_policies {
method PyObject (line 220) | PyObject* postcall(PyObject* pyargs, PyObject* result) {
function Blob_Reshape (line 240) | bp::object Blob_Reshape(bp::tuple args, bp::dict kwargs) {
function BlobVec_add_blob (line 254) | bp::object BlobVec_add_blob(bp::tuple args, bp::dict kwargs) {
class SolverCallback (line 270) | class SolverCallback: public Solver<Dtype>::Callback {
method SolverCallback (line 275) | SolverCallback(bp::object on_start, bp::object on_gradients_ready)
method on_gradients_ready (line 277) | virtual void on_gradients_ready() {
method on_start (line 280) | virtual void on_start() {
function Solver_add_callback (line 285) | void Solver_add_callback(Solver<Dtype> * solver, bp::object on_start,
class NetCallback (line 302) | class NetCallback: public Net<Dtype>::Callback {
method NetCallback (line 304) | explicit NetCallback(bp::object run) : run_(run) {}
method run (line 307) | virtual void run(int layer) {
function Net_before_forward (line 312) | void Net_before_forward(Net<Dtype>* net, bp::object run) {
function Net_after_forward (line 315) | void Net_after_forward(Net<Dtype>* net, bp::object run) {
function Net_before_backward (line 318) | void Net_before_backward(Net<Dtype>* net, bp::object run) {
function Net_after_backward (line 321) | void Net_after_backward(Net<Dtype>* net, bp::object run) {
class NCCL (line 336) | class NCCL {
method NCCL (line 338) | NCCL(shared_ptr<Solver<Dtype> > solver, const string& uid) {}
function BOOST_PYTHON_MODULE (line 344) | BOOST_PYTHON_MODULE(_caffe) {
FILE: caffe/python/caffe/classifier.py
class Classifier (line 11) | class Classifier(caffe.Net):
method __init__ (line 23) | def __init__(self, model_file, pretrained_file, image_dims=None,
method predict (line 47) | def predict(self, inputs, oversample=True):
FILE: caffe/python/caffe/coord_map.py
function conv_params (line 18) | def conv_params(fn):
function crop_params (line 40) | def crop_params(fn):
class UndefinedMapException (line 50) | class UndefinedMapException(Exception):
function coord_map (line 57) | def coord_map(fn):
class AxisMismatchException (line 82) | class AxisMismatchException(Exception):
function compose (line 89) | def compose(base_map, next_map):
function inverse (line 106) | def inverse(coord_map):
function coord_map_from_to (line 115) | def coord_map_from_to(top_from, top_to):
function crop (line 172) | def crop(top_from, top_to):
FILE: caffe/python/caffe/detector.py
class Detector (line 22) | class Detector(caffe.Net):
method __init__ (line 35) | def __init__(self, model_file, pretrained_file, mean=None,
method detect_windows (line 56) | def detect_windows(self, images_windows):
method detect_selective_search (line 101) | def detect_selective_search(self, image_fnames):
method crop (line 125) | def crop(self, im, window):
method configure_crop (line 181) | def configure_crop(self, context_pad):
FILE: caffe/python/caffe/draw.py
function get_pooling_types_dict (line 36) | def get_pooling_types_dict():
function get_edge_label (line 46) | def get_edge_label(layer):
function get_layer_label (line 62) | def get_layer_label(layer, rankdir):
function choose_color_by_layertype (line 117) | def choose_color_by_layertype(layertype):
function get_pydot_graph (line 130) | def get_pydot_graph(caffe_net, rankdir, label_edges=True, phase=None):
function draw_net (line 205) | def draw_net(caffe_net, rankdir, ext='png', phase=None):
function draw_net_to_file (line 226) | def draw_net_to_file(caffe_net, filename, rankdir='LR', phase=None):
FILE: caffe/python/caffe/io.py
function blobproto_to_array (line 18) | def blobproto_to_array(blob, return_diff=False):
function array_to_blobproto (line 36) | def array_to_blobproto(arr, diff=None):
function arraylist_to_blobprotovector_str (line 49) | def arraylist_to_blobprotovector_str(arraylist):
function blobprotovector_str_to_arraylist (line 58) | def blobprotovector_str_to_arraylist(str):
function array_to_datum (line 66) | def array_to_datum(arr, label=None):
function datum_to_array (line 84) | def datum_to_array(datum):
class Transformer (line 98) | class Transformer:
method __init__ (line 109) | def __init__(self, inputs):
method __check_input (line 117) | def __check_input(self, in_):
method preprocess (line 122) | def preprocess(self, in_, data):
method deprocess (line 164) | def deprocess(self, in_, data):
method set_transpose (line 187) | def set_transpose(self, in_, order):
method set_channel_swap (line 203) | def set_channel_swap(self, in_, order):
method set_raw_scale (line 221) | def set_raw_scale(self, in_, scale):
method set_mean (line 236) | def set_mean(self, in_, mean):
method set_input_scale (line 262) | def set_input_scale(self, in_, scale):
function load_image (line 279) | def load_image(filename, color=True):
function resize_image (line 306) | def resize_image(im, new_dims, interp_order=1):
function oversample (line 341) | def oversample(images, crop_dims):
FILE: caffe/python/caffe/net_spec.py
function param_name_dict (line 28) | def param_name_dict():
function to_proto (line 43) | def to_proto(*tops):
function assign_proto (line 56) | def assign_proto(proto, name, val):
class Top (line 82) | class Top(object):
method __init__ (line 86) | def __init__(self, fn, n):
method to_proto (line 90) | def to_proto(self):
method _to_proto (line 96) | def _to_proto(self, layers, names, autonames):
class Function (line 100) | class Function(object):
method __init__ (line 104) | def __init__(self, type_name, inputs, params):
method _get_name (line 121) | def _get_name(self, names, autonames):
method _get_top_name (line 129) | def _get_top_name(self, top, names, autonames):
method _to_proto (line 135) | def _to_proto(self, layers, names, autonames):
class NetSpec (line 167) | class NetSpec(object):
method __init__ (line 173) | def __init__(self):
method __setattr__ (line 176) | def __setattr__(self, name, value):
method __getattr__ (line 179) | def __getattr__(self, name):
method __setitem__ (line 182) | def __setitem__(self, key, value):
method __getitem__ (line 185) | def __getitem__(self, item):
method to_proto (line 188) | def to_proto(self):
class Layers (line 199) | class Layers(object):
method __getattr__ (line 204) | def __getattr__(self, name):
class Parameters (line 216) | class Parameters(object):
method __getattr__ (line 221) | def __getattr__(self, name):
FILE: caffe/python/caffe/pycaffe.py
function _Net_blobs (line 25) | def _Net_blobs(self):
function _Net_blob_loss_weights (line 36) | def _Net_blob_loss_weights(self):
function _Net_layer_dict (line 47) | def _Net_layer_dict(self):
function _Net_params (line 58) | def _Net_params(self):
function _Net_inputs (line 73) | def _Net_inputs(self):
function _Net_outputs (line 81) | def _Net_outputs(self):
function _Net_forward (line 88) | def _Net_forward(self, blobs=None, start=None, end=None, **kwargs):
function _Net_backward (line 137) | def _Net_backward(self, diffs=None, start=None, end=None, **kwargs):
function _Net_forward_all (line 185) | def _Net_forward_all(self, blobs=None, **kwargs):
function _Net_forward_backward_all (line 216) | def _Net_forward_backward_all(self, blobs=None, diffs=None, **kwargs):
function _Net_set_input_arrays (line 261) | def _Net_set_input_arrays(self, data, labels):
function _Net_batch (line 272) | def _Net_batch(self, blobs):
function _Net_get_id_name (line 305) | def _Net_get_id_name(func, field):
FILE: caffe/python/caffe/test/test_coord_map.py
function coord_net_spec (line 12) | def coord_net_spec(ks=3, stride=1, pad=0, pool=2, dstride=2, dpad=0):
class TestCoordMap (line 34) | class TestCoordMap(unittest.TestCase):
method setUp (line 35) | def setUp(self):
method test_conv_pool_deconv (line 38) | def test_conv_pool_deconv(self):
method test_pass (line 55) | def test_pass(self):
method test_padding (line 71) | def test_padding(self):
method test_multi_conv (line 94) | def test_multi_conv(self):
method test_rect (line 109) | def test_rect(self):
method test_nd_conv (line 125) | def test_nd_conv(self):
method test_crop_of_crop (line 146) | def test_crop_of_crop(self):
method test_crop_helper (line 160) | def test_crop_helper(self):
method test_catch_unconnected (line 167) | def test_catch_unconnected(self):
method test_catch_scale_mismatch (line 176) | def test_catch_scale_mismatch(self):
method test_catch_negative_crop (line 185) | def test_catch_negative_crop(self):
FILE: caffe/python/caffe/test/test_draw.py
function getFilenames (line 9) | def getFilenames():
class TestDraw (line 27) | class TestDraw(unittest.TestCase):
method test_draw_net (line 28) | def test_draw_net(self):
FILE: caffe/python/caffe/test/test_gradient_for_python_layer.py
function test_gradient_for_python_layer (line 58) | def test_gradient_for_python_layer(input_names_and_values, output_names,...
function make_net_from_python_layer (line 85) | def make_net_from_python_layer(input_names_and_values, output_names, py_...
function gradient_test_for_net (line 131) | def gradient_test_for_net(net, input_names_and_values, propagate_down, o...
function test_gradient_for_specific_output_of_net (line 156) | def test_gradient_for_specific_output_of_net(net, input_names, propagate...
function get_obj_and_gradient (line 216) | def get_obj_and_gradient(net, output_names, out_name, out_i, loss_weight...
FILE: caffe/python/caffe/test/test_io.py
class TestBlobProtoToArray (line 6) | class TestBlobProtoToArray(unittest.TestCase):
method test_old_format (line 8) | def test_old_format(self):
method test_new_format (line 18) | def test_new_format(self):
method test_no_shape (line 27) | def test_no_shape(self):
method test_scalar (line 35) | def test_scalar(self):
class TestArrayToDatum (line 44) | class TestArrayToDatum(unittest.TestCase):
method test_label_none_size (line 46) | def test_label_none_size(self):
FILE: caffe/python/caffe/test/test_layer_type_list.py
class TestLayerTypeList (line 5) | class TestLayerTypeList(unittest.TestCase):
method test_standard_types (line 7) | def test_standard_types(self):
FILE: caffe/python/caffe/test/test_nccl.py
class TestNCCL (line 7) | class TestNCCL(unittest.TestCase):
method test_newuid (line 9) | def test_newuid(self):
FILE: caffe/python/caffe/test/test_net.py
function simple_net_file (line 11) | def simple_net_file(num_output):
class TestNet (line 38) | class TestNet(unittest.TestCase):
method setUp (line 39) | def setUp(self):
method test_memory (line 49) | def test_memory(self):
method test_layer_dict (line 63) | def test_layer_dict(self):
method test_forward_backward (line 70) | def test_forward_backward(self):
method test_forward_start_end (line 74) | def test_forward_start_end(self):
method test_backward_start_end (line 92) | def test_backward_start_end(self):
method test_clear_param_diffs (line 111) | def test_clear_param_diffs(self):
method test_inputs_outputs (line 122) | def test_inputs_outputs(self):
method test_top_bottom_names (line 126) | def test_top_bottom_names(self):
method test_save_and_read (line 138) | def test_save_and_read(self):
method test_save_hdf5 (line 155) | def test_save_hdf5(self):
class TestLevels (line 169) | class TestLevels(unittest.TestCase):
method setUp (line 219) | def setUp(self):
method tearDown (line 224) | def tearDown(self):
method check_net (line 227) | def check_net(self, net, blobs):
method test_0 (line 231) | def test_0(self):
method test_1 (line 235) | def test_1(self):
class TestStages (line 240) | class TestStages(unittest.TestCase):
method setUp (line 284) | def setUp(self):
method tearDown (line 289) | def tearDown(self):
method check_net (line 292) | def check_net(self, net, blobs):
method test_A (line 296) | def test_A(self):
method test_B (line 300) | def test_B(self):
method test_AandB (line 304) | def test_AandB(self):
class TestAllInOne (line 309) | class TestAllInOne(unittest.TestCase):
method setUp (line 366) | def setUp(self):
method tearDown (line 371) | def tearDown(self):
method check_net (line 374) | def check_net(self, net, outputs):
method test_train (line 378) | def test_train(self):
method test_val (line 382) | def test_val(self):
method test_deploy (line 386) | def test_deploy(self):
FILE: caffe/python/caffe/test/test_net_spec.py
function lenet (line 7) | def lenet(batch_size):
function anon_lenet (line 26) | def anon_lenet(batch_size):
function silent_net (line 44) | def silent_net():
class TestNetSpec (line 51) | class TestNetSpec(unittest.TestCase):
method load_net (line 52) | def load_net(self, net_proto):
method test_lenet (line 58) | def test_lenet(self):
method test_zero_tops (line 76) | def test_zero_tops(self):
method test_type_error (line 83) | def test_type_error(self):
FILE: caffe/python/caffe/test/test_python_layer.py
class SimpleLayer (line 9) | class SimpleLayer(caffe.Layer):
method setup (line 12) | def setup(self, bottom, top):
method reshape (line 15) | def reshape(self, bottom, top):
method forward (line 18) | def forward(self, bottom, top):
method backward (line 21) | def backward(self, top, propagate_down, bottom):
class ExceptionLayer (line 25) | class ExceptionLayer(caffe.Layer):
method setup (line 28) | def setup(self, bottom, top):
class ParameterLayer (line 31) | class ParameterLayer(caffe.Layer):
method setup (line 34) | def setup(self, bottom, top):
method reshape (line 38) | def reshape(self, bottom, top):
method forward (line 41) | def forward(self, bottom, top):
method backward (line 44) | def backward(self, top, propagate_down, bottom):
class PhaseLayer (line 47) | class PhaseLayer(caffe.Layer):
method setup (line 50) | def setup(self, bottom, top):
method reshape (line 53) | def reshape(self, bootom, top):
method forward (line 56) | def forward(self, bottom, top):
function python_net_file (line 59) | def python_net_file():
function exception_net_file (line 72) | def exception_net_file():
function parameter_net_file (line 82) | def parameter_net_file():
function phase_net_file (line 91) | def phase_net_file():
class TestPythonLayer (line 102) | class TestPythonLayer(unittest.TestCase):
method setUp (line 103) | def setUp(self):
method test_forward (line 108) | def test_forward(self):
method test_backward (line 115) | def test_backward(self):
method test_reshape (line 122) | def test_reshape(self):
method test_exception (line 130) | def test_exception(self):
method test_parameter (line 135) | def test_parameter(self):
method test_phase (line 164) | def test_phase(self):
FILE: caffe/python/caffe/test/test_python_layer_with_param_str.py
class SimpleParamLayer (line 9) | class SimpleParamLayer(caffe.Layer):
method setup (line 12) | def setup(self, bottom, top):
method reshape (line 18) | def reshape(self, bottom, top):
method forward (line 21) | def forward(self, bottom, top):
method backward (line 24) | def backward(self, top, propagate_down, bottom):
function python_param_net_file (line 28) | def python_param_net_file():
class TestLayerWithParam (line 43) | class TestLayerWithParam(unittest.TestCase):
method setUp (line 44) | def setUp(self):
method test_forward (line 49) | def test_forward(self):
method test_backward (line 56) | def test_backward(self):
FILE: caffe/python/caffe/test/test_solver.py
class TestSolver (line 11) | class TestSolver(unittest.TestCase):
method setUp (line 12) | def setUp(self):
method test_solve (line 36) | def test_solve(self):
method test_net_memory (line 41) | def test_net_memory(self):
method test_snapshot (line 56) | def test_snapshot(self):
FILE: caffe/python/classify.py
function main (line 17) | def main(argv):
FILE: caffe/python/detect.py
function main (line 30) | def main(argv):
FILE: caffe/python/draw_net.py
function parse_args (line 13) | def parse_args():
function main (line 41) | def main():
FILE: caffe/python/train.py
function train (line 10) | def train(
function time (line 33) | def time(solver, nccl):
function solve (line 65) | def solve(proto, snapshot, gpus, timing, uid, rank):
FILE: caffe/scripts/cpp_lint.py
function ParseNolintSuppressions (line 468) | def ParseNolintSuppressions(filename, raw_line, linenum, error):
function ResetNolintSuppressions (line 499) | def ResetNolintSuppressions():
function IsErrorSuppressedByNolint (line 504) | def IsErrorSuppressedByNolint(category, linenum):
function Match (line 519) | def Match(pattern, s):
function ReplaceAll (line 529) | def ReplaceAll(pattern, rep, s):
function Search (line 547) | def Search(pattern, s):
class _IncludeState (line 554) | class _IncludeState(dict):
method __init__ (line 588) | def __init__(self):
method ResetSection (line 592) | def ResetSection(self):
method SetLastHeader (line 598) | def SetLastHeader(self, header_path):
method CanonicalizeAlphabeticalOrder (line 601) | def CanonicalizeAlphabeticalOrder(self, header_path):
method IsInAlphabeticalOrder (line 616) | def IsInAlphabeticalOrder(self, clean_lines, linenum, header_path):
method CheckNextIncludeOrder (line 637) | def CheckNextIncludeOrder(self, header_type):
class _CppLintState (line 691) | class _CppLintState(object):
method __init__ (line 694) | def __init__(self):
method SetOutputFormat (line 707) | def SetOutputFormat(self, output_format):
method SetVerboseLevel (line 711) | def SetVerboseLevel(self, level):
method SetCountingStyle (line 717) | def SetCountingStyle(self, counting_style):
method SetFilters (line 721) | def SetFilters(self, filters):
method ResetErrorCounts (line 746) | def ResetErrorCounts(self):
method IncrementErrorCount (line 751) | def IncrementErrorCount(self, category):
method PrintErrorCounts (line 761) | def PrintErrorCounts(self):
function _OutputFormat (line 771) | def _OutputFormat():
function _SetOutputFormat (line 776) | def _SetOutputFormat(output_format):
function _VerboseLevel (line 781) | def _VerboseLevel():
function _SetVerboseLevel (line 786) | def _SetVerboseLevel(level):
function _SetCountingStyle (line 791) | def _SetCountingStyle(level):
function _Filters (line 796) | def _Filters():
function _SetFilters (line 801) | def _SetFilters(filters):
class _FunctionState (line 814) | class _FunctionState(object):
method __init__ (line 820) | def __init__(self):
method Begin (line 825) | def Begin(self, function_name):
method Count (line 835) | def Count(self):
method Check (line 840) | def Check(self, error, filename, linenum):
method End (line 865) | def End(self):
class _IncludeError (line 870) | class _IncludeError(Exception):
class FileInfo (line 875) | class FileInfo:
method __init__ (line 882) | def __init__(self, filename):
method FullName (line 885) | def FullName(self):
method RepositoryName (line 889) | def RepositoryName(self):
method Split (line 934) | def Split(self):
method BaseName (line 948) | def BaseName(self):
method Extension (line 952) | def Extension(self):
method NoExtension (line 956) | def NoExtension(self):
method IsSource (line 960) | def IsSource(self):
function _ShouldPrintError (line 965) | def _ShouldPrintError(category, confidence, linenum):
function Error (line 992) | def Error(filename, linenum, category, confidence, message):
function IsCppString (line 1049) | def IsCppString(line):
function CleanseRawStrings (line 1066) | def CleanseRawStrings(raw_lines):
function FindNextMultiLineCommentStart (line 1127) | def FindNextMultiLineCommentStart(lines, lineix):
function FindNextMultiLineCommentEnd (line 1138) | def FindNextMultiLineCommentEnd(lines, lineix):
function RemoveMultiLineCommentsFromRange (line 1147) | def RemoveMultiLineCommentsFromRange(lines, begin, end):
function RemoveMultiLineComments (line 1155) | def RemoveMultiLineComments(filename, lines, error):
function CleanseComments (line 1171) | def CleanseComments(line):
class CleansedLines (line 1187) | class CleansedLines(object):
method __init__ (line 1196) | def __init__(self, lines):
method NumLines (line 1208) | def NumLines(self):
method _CollapseStrings (line 1213) | def _CollapseStrings(elided):
function FindEndOfExpressionInLine (line 1234) | def FindEndOfExpressionInLine(line, startpos, depth, startchar, endchar):
function CloseExpression (line 1258) | def CloseExpression(clean_lines, linenum, pos):
function FindStartOfExpressionInLine (line 1304) | def FindStartOfExpressionInLine(line, endpos, depth, startchar, endchar):
function ReverseCloseExpression (line 1331) | def ReverseCloseExpression(clean_lines, linenum, pos):
function CheckForCopyright (line 1376) | def CheckForCopyright(filename, lines, error):
function GetHeaderGuardCPPVariable (line 1388) | def GetHeaderGuardCPPVariable(filename):
function CheckForHeaderGuard (line 1412) | def CheckForHeaderGuard(filename, lines, error):
function CheckForBadCharacters (line 1487) | def CheckForBadCharacters(filename, lines, error):
function CheckForNewlineAtEOF (line 1512) | def CheckForNewlineAtEOF(filename, lines, error):
function CheckForMultilineCommentsAndStrings (line 1530) | def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, ...
function CheckCaffeAlternatives (line 1576) | def CheckCaffeAlternatives(filename, clean_lines, linenum, error):
function CheckCaffeDataLayerSetUp (line 1599) | def CheckCaffeDataLayerSetUp(filename, clean_lines, linenum, error):
function CheckCaffeRandom (line 1644) | def CheckCaffeRandom(filename, clean_lines, linenum, error):
function CheckPosixThreading (line 1685) | def CheckPosixThreading(filename, clean_lines, linenum, error):
function CheckVlogArguments (line 1712) | def CheckVlogArguments(filename, clean_lines, linenum, error):
function CheckInvalidIncrement (line 1737) | def CheckInvalidIncrement(filename, clean_lines, linenum, error):
class _BlockInfo (line 1759) | class _BlockInfo(object):
method __init__ (line 1762) | def __init__(self, seen_open_brace):
method CheckBegin (line 1767) | def CheckBegin(self, filename, clean_lines, linenum, error):
method CheckEnd (line 1782) | def CheckEnd(self, filename, clean_lines, linenum, error):
class _ClassInfo (line 1796) | class _ClassInfo(_BlockInfo):
method __init__ (line 1799) | def __init__(self, name, class_or_struct, clean_lines, linenum):
method CheckBegin (line 1833) | def CheckBegin(self, filename, clean_lines, linenum, error):
method CheckEnd (line 1838) | def CheckEnd(self, filename, clean_lines, linenum, error):
class _NamespaceInfo (line 1852) | class _NamespaceInfo(_BlockInfo):
method __init__ (line 1855) | def __init__(self, name, linenum):
method CheckEnd (line 1860) | def CheckEnd(self, filename, clean_lines, linenum, error):
class _PreprocessorInfo (line 1906) | class _PreprocessorInfo(object):
method __init__ (line 1909) | def __init__(self, stack_before_if):
class _NestingState (line 1920) | class _NestingState(object):
method __init__ (line 1923) | def __init__(self):
method SeenOpenBrace (line 1935) | def SeenOpenBrace(self):
method InNamespaceBody (line 1944) | def InNamespaceBody(self):
method UpdatePreprocessor (line 1952) | def UpdatePreprocessor(self, line):
method Update (line 2008) | def Update(self, filename, clean_lines, linenum, error):
method InnermostClass (line 2164) | def InnermostClass(self):
method CheckCompletedBlocks (line 2176) | def CheckCompletedBlocks(self, filename, error):
function CheckForNonStandardConstructs (line 2198) | def CheckForNonStandardConstructs(filename, clean_lines, linenum,
function CheckSpacingForFunctionCall (line 2305) | def CheckSpacingForFunctionCall(filename, line, linenum, error):
function IsBlankLine (line 2373) | def IsBlankLine(line):
function CheckForFunctionLengths (line 2388) | def CheckForFunctionLengths(filename, clean_lines, linenum,
function CheckComment (line 2461) | def CheckComment(comment, filename, linenum, error):
function CheckAccess (line 2490) | def CheckAccess(filename, clean_lines, linenum, nesting_state, error):
function FindNextMatchingAngleBracket (line 2521) | def FindNextMatchingAngleBracket(clean_lines, linenum, init_suffix):
function FindPreviousMatchingAngleBracket (line 2590) | def FindPreviousMatchingAngleBracket(clean_lines, linenum, init_prefix):
function CheckSpacing (line 2647) | def CheckSpacing(filename, clean_lines, linenum, nesting_state, error):
function CheckSectionSpacing (line 2995) | def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error):
function GetPreviousNonBlankLine (line 3050) | def GetPreviousNonBlankLine(clean_lines, linenum):
function CheckBraces (line 3073) | def CheckBraces(filename, clean_lines, linenum, error):
function CheckEmptyBlockBody (line 3247) | def CheckEmptyBlockBody(filename, clean_lines, linenum, error):
function CheckCheck (line 3282) | def CheckCheck(filename, clean_lines, linenum, error):
function CheckAltTokens (line 3409) | def CheckAltTokens(filename, clean_lines, linenum, error):
function GetLineWidth (line 3441) | def GetLineWidth(line):
function CheckStyle (line 3463) | def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_s...
function _DropCommonSuffixes (line 3580) | def _DropCommonSuffixes(filename):
function _IsTestFilename (line 3607) | def _IsTestFilename(filename):
function _ClassifyInclude (line 3624) | def _ClassifyInclude(fileinfo, include, is_system):
function CheckIncludeLine (line 3684) | def CheckIncludeLine(filename, clean_lines, linenum, include_state, error):
function _GetTextInside (line 3756) | def _GetTextInside(text, start_pattern):
function CheckLanguage (line 3838) | def CheckLanguage(filename, clean_lines, linenum, file_extension,
function CheckForNonConstReference (line 4138) | def CheckForNonConstReference(filename, clean_lines, linenum,
function CheckCStyleCast (line 4251) | def CheckCStyleCast(filename, linenum, line, raw_line, cast_type, pattern,
function FilesBelongToSameModule (line 4403) | def FilesBelongToSameModule(filename_cc, filename_h):
function UpdateIncludeState (line 4458) | def UpdateIncludeState(filename, include_state, io=codecs):
function CheckForIncludeWhatYouUse (line 4487) | def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error,
function CheckMakePairUsesDeduction (line 4583) | def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error):
function ProcessLine (line 4604) | def ProcessLine(filename, file_extension, clean_lines, line,
function ProcessFileData (line 4648) | def ProcessFileData(filename, file_extension, lines, error,
function ProcessFile (line 4693) | def ProcessFile(filename, vlevel, extra_check_functions=[]):
function PrintUsage (line 4761) | def PrintUsage(message):
function PrintCategories (line 4774) | def PrintCategories():
function ParseArguments (line 4783) | def ParseArguments(args):
function main (line 4853) | def main():
FILE: caffe/scripts/download_model_binary.py
function reporthook (line 14) | def reporthook(count, block_size, total_size):
function parse_readme_frontmatter (line 31) | def parse_readme_frontmatter(dirname):
function valid_dirname (line 42) | def valid_dirname(dirname):
function model_checks_out (line 63) | def model_checks_out(filename=model_filename, sha1=frontmatter['sha1']):
FILE: caffe/src/caffe/blob.cpp
type caffe (line 9) | namespace caffe {
function Dtype (line 130) | const Dtype* Blob<Dtype>::cpu_data() const {
function Dtype (line 148) | const Dtype* Blob<Dtype>::gpu_data() const {
function Dtype (line 166) | const Dtype* Blob<Dtype>::cpu_diff() const {
function Dtype (line 172) | const Dtype* Blob<Dtype>::gpu_diff() const {
function Dtype (line 178) | Dtype* Blob<Dtype>::mutable_cpu_data() {
function Dtype (line 184) | Dtype* Blob<Dtype>::mutable_gpu_data() {
function Dtype (line 190) | Dtype* Blob<Dtype>::mutable_cpu_diff() {
function Dtype (line 196) | Dtype* Blob<Dtype>::mutable_gpu_diff() {
function Dtype (line 256) | Dtype Blob<Dtype>::asum_data() const {
function Dtype (line 291) | Dtype Blob<Dtype>::asum_diff() const {
function Dtype (line 326) | Dtype Blob<Dtype>::sumsq_data() const {
function Dtype (line 363) | Dtype Blob<Dtype>::sumsq_diff() const {
class Blob<int> (line 603) | class Blob<int>
class Blob<unsigned int> (line 604) | class Blob<unsigned int>
FILE: caffe/src/caffe/common.cpp
type caffe (line 10) | namespace caffe {
function Caffe (line 15) | Caffe& Caffe::Get() {
function cluster_seedgen (line 23) | int64_t cluster_seedgen(void) {
function GlobalInit (line 43) | void GlobalInit(int* pargc, char*** pargv) {
class Caffe::RNG::Generator (line 83) | class Caffe::RNG::Generator {
method Generator (line 85) | Generator() : rng_(new caffe::rng_t(cluster_seedgen())) {}
method Generator (line 86) | explicit Generator(unsigned int seed) : rng_(new caffe::rng_t(seed)) {}
method Generator (line 241) | Generator() : rng_(new caffe::rng_t(cluster_seedgen())) {}
method Generator (line 242) | explicit Generator(unsigned int seed) : rng_(new caffe::rng_t(seed)) {}
class Caffe::RNG::Generator (line 239) | class Caffe::RNG::Generator {
method Generator (line 85) | Generator() : rng_(new caffe::rng_t(cluster_seedgen())) {}
method Generator (line 86) | explicit Generator(unsigned int seed) : rng_(new caffe::rng_t(seed)) {}
method Generator (line 241) | Generator() : rng_(new caffe::rng_t(cluster_seedgen())) {}
method Generator (line 242) | explicit Generator(unsigned int seed) : rng_(new caffe::rng_t(seed)) {}
FILE: caffe/src/caffe/data_transformer.cpp
type caffe (line 13) | namespace caffe {
FILE: caffe/src/caffe/internal_thread.cpp
type caffe (line 7) | namespace caffe {
FILE: caffe/src/caffe/layer.cpp
type caffe (line 3) | namespace caffe {
FILE: caffe/src/caffe/layer_factory.cpp
type caffe (line 34) | namespace caffe {
function GetConvolutionLayer (line 38) | shared_ptr<Layer<Dtype> > GetConvolutionLayer(
function GetPoolingLayer (line 78) | shared_ptr<Layer<Dtype> > GetPoolingLayer(const LayerParameter& param) {
function GetLRNLayer (line 116) | shared_ptr<Layer<Dtype> > GetLRNLayer(const LayerParameter& param) {
function GetReLULayer (line 154) | shared_ptr<Layer<Dtype> > GetReLULayer(const LayerParameter& param) {
function GetSigmoidLayer (line 178) | shared_ptr<Layer<Dtype> > GetSigmoidLayer(const LayerParameter& param) {
function GetSoftmaxLayer (line 202) | shared_ptr<Layer<Dtype> > GetSoftmaxLayer(const LayerParameter& param) {
function GetTanHLayer (line 226) | shared_ptr<Layer<Dtype> > GetTanHLayer(const LayerParameter& param) {
function GetPythonLayer (line 250) | shared_ptr<Layer<Dtype> > GetPythonLayer(const LayerParameter& param) {
FILE: caffe/src/caffe/layers/absval_layer.cpp
type caffe (line 6) | namespace caffe {
FILE: caffe/src/caffe/layers/accuracy_layer.cpp
type caffe (line 8) | namespace caffe {
FILE: caffe/src/caffe/layers/argmax_layer.cpp
type caffe (line 8) | namespace caffe {
FILE: caffe/src/caffe/layers/at_layer.cpp
type caffe (line 3) | namespace caffe {
FILE: caffe/src/caffe/layers/attention_lstm_layer.cpp
type caffe (line 11) | namespace caffe {
FILE: caffe/src/caffe/layers/base_conv_layer.cpp
type caffe (line 9) | namespace caffe {
FILE: caffe/src/caffe/layers/base_data_layer.cpp
type caffe (line 12) | namespace caffe {
FILE: caffe/src/caffe/layers/batch_norm_layer.cpp
type caffe (line 7) | namespace caffe {
FILE: caffe/src/caffe/layers/batch_reindex_layer.cpp
type caffe (line 6) | namespace caffe {
FILE: caffe/src/caffe/layers/bias_layer.cpp
type caffe (line 7) | namespace caffe {
FILE: caffe/src/caffe/layers/bnll_layer.cpp
type caffe (line 6) | namespace caffe {
FILE: caffe/src/caffe/layers/concat_layer.cpp
type caffe (line 6) | namespace caffe {
FILE: caffe/src/caffe/layers/contrastive_loss_layer.cpp
type caffe (line 7) | namespace caffe {
FILE: caffe/src/caffe/layers/conv_layer.cpp
type caffe (line 5) | namespace caffe {
FILE: caffe/src/caffe/layers/cosinangle_loss_layer.cpp
type caffe (line 14) | namespace caffe {
FILE: caffe/src/caffe/layers/crop_layer.cpp
type caffe (line 13) | namespace caffe {
FILE: caffe/src/caffe/layers/cudnn_conv_layer.cpp
type caffe (line 7) | namespace caffe {
FILE: caffe/src/caffe/layers/cudnn_lcn_layer.cpp
type caffe (line 6) | namespace caffe {
FILE: caffe/src/caffe/layers/cudnn_lrn_layer.cpp
type caffe (line 6) | namespace caffe {
FILE: caffe/src/caffe/layers/cudnn_pooling_layer.cpp
type caffe (line 6) | namespace caffe {
FILE: caffe/src/caffe/layers/cudnn_relu_layer.cpp
type caffe (line 6) | namespace caffe {
FILE: caffe/src/caffe/layers/cudnn_sigmoid_layer.cpp
type caffe (line 6) | namespace caffe {
FILE: caffe/src/caffe/layers/cudnn_softmax_layer.cpp
type caffe (line 8) | namespace caffe {
FILE: caffe/src/caffe/layers/cudnn_tanh_layer.cpp
type caffe (line 6) | namespace caffe {
FILE: caffe/src/caffe/layers/data_layer.cpp
type caffe (line 12) | namespace caffe {
FILE: caffe/src/caffe/layers/deconv_layer.cpp
type caffe (line 5) | namespace caffe {
FILE: caffe/src/caffe/layers/dropout_layer.cpp
type caffe (line 8) | namespace caffe {
FILE: caffe/src/caffe/layers/dummy_data_layer.cpp
type caffe (line 6) | namespace caffe {
FILE: caffe/src/caffe/layers/eltwise_layer.cpp
type caffe (line 7) | namespace caffe {
FILE: caffe/src/caffe/layers/elu_layer.cpp
type caffe (line 6) | namespace caffe {
FILE: caffe/src/caffe/layers/embed_layer.cpp
type caffe (line 7) | namespace caffe {
FILE: caffe/src/caffe/layers/euclidean_loss_layer.cpp
type caffe (line 6) | namespace caffe {
FILE: caffe/src/caffe/layers/exp_layer.cpp
type caffe (line 6) | namespace caffe {
FILE: caffe/src/caffe/layers/filter_layer.cpp
type caffe (line 6) | namespace caffe {
FILE: caffe/src/caffe/layers/flatten_layer.cpp
type caffe (line 5) | namespace caffe {
FILE: caffe/src/caffe/layers/hdf5_data_layer.cpp
type caffe (line 20) | namespace caffe {
FILE: caffe/src/caffe/layers/hdf5_output_layer.cpp
type caffe (line 9) | namespace caffe {
FILE: caffe/src/caffe/layers/hinge_loss_layer.cpp
type caffe (line 7) | namespace caffe {
FILE: caffe/src/caffe/layers/im2col_layer.cpp
type caffe (line 6) | namespace caffe {
FILE: caffe/src/caffe/layers/image_data_layer.cpp
type caffe (line 18) | namespace caffe {
FILE: caffe/src/caffe/layers/infogain_loss_layer.cpp
type caffe (line 9) | namespace caffe {
function Dtype (line 85) | Dtype InfogainLossLayer<Dtype>::get_normalizer(
FILE: caffe/src/caffe/layers/inner_product_layer.cpp
type caffe (line 7) | namespace caffe {
FILE: caffe/src/caffe/layers/input_layer.cpp
type caffe (line 5) | namespace caffe {
FILE: caffe/src/caffe/layers/log_layer.cpp
type caffe (line 6) | namespace caffe {
FILE: caffe/src/caffe/layers/loss_layer.cpp
type caffe (line 5) | namespace caffe {
FILE: caffe/src/caffe/layers/lrn_layer.cpp
type caffe (line 6) | namespace caffe {
FILE: caffe/src/caffe/layers/lstm_layer.cpp
type caffe (line 3) | namespace caffe {
function Dtype (line 6) | inline Dtype sigmoid(Dtype x) {
FILE: caffe/src/caffe/layers/lstm_new_layer.cpp
type caffe (line 11) | namespace caffe {
FILE: caffe/src/caffe/layers/lstm_unit_layer.cpp
type caffe (line 8) | namespace caffe {
function Dtype (line 11) | inline Dtype sigmoid(Dtype x) {
function Dtype (line 16) | inline Dtype tanh(Dtype x) {
FILE: caffe/src/caffe/layers/memory_data_layer.cpp
type caffe (line 9) | namespace caffe {
FILE: caffe/src/caffe/layers/multinomial_logistic_loss_layer.cpp
type caffe (line 8) | namespace caffe {
FILE: caffe/src/caffe/layers/mvn_layer.cpp
type caffe (line 6) | namespace caffe {
FILE: caffe/src/caffe/layers/neuron_layer.cpp
type caffe (line 5) | namespace caffe {
FILE: caffe/src/caffe/layers/parameter_layer.cpp
type caffe (line 3) | namespace caffe {
FILE: caffe/src/caffe/layers/point_bilinear_layer.cpp
type caffe (line 11) | namespace caffe {
function bilinear_interpolate (line 14) | void bilinear_interpolate(const Dtype* bottom_data, const int height, ...
function get_coord_gradient (line 134) | void get_coord_gradient(Dtype top_diff, Dtype scale, Dtype ih, Dtype i...
function get_feature_gradient (line 175) | void get_feature_gradient(Dtype top_diff, Dtype h, Dtype w, const int ...
FILE: caffe/src/caffe/layers/pooling_layer.cpp
type caffe (line 8) | namespace caffe {
FILE: caffe/src/caffe/layers/power_layer.cpp
type caffe (line 6) | namespace caffe {
FILE: caffe/src/caffe/layers/prelu_layer.cpp
type caffe (line 9) | namespace caffe {
FILE: caffe/src/caffe/layers/recurrent_layer.cpp
type caffe (line 11) | namespace caffe {
FILE: caffe/src/caffe/layers/reduction_layer.cpp
type caffe (line 6) | namespace caffe {
FILE: caffe/src/caffe/layers/relu_layer.cpp
type caffe (line 6) | namespace caffe {
FILE: caffe/src/caffe/layers/reshape_layer.cpp
type caffe (line 5) | namespace caffe {
FILE: caffe/src/caffe/layers/reverse_axis_layer.cpp
type caffe (line 4) | namespace caffe {
function reverse_cpu (line 6) | void reverse_cpu(const int count, const Dtype* from_data, Dtype* to_data,
FILE: caffe/src/caffe/layers/rnn_layer.cpp
type caffe (line 11) | namespace caffe {
FILE: caffe/src/caffe/layers/roi_pooling_layer.cpp
type caffe (line 17) | namespace caffe {
FILE: caffe/src/caffe/layers/scale_layer.cpp
type caffe (line 9) | namespace caffe {
FILE: caffe/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp
type caffe (line 7) | namespace caffe {
function Dtype (line 49) | Dtype SigmoidCrossEntropyLossLayer<Dtype>::get_normalizer(
FILE: caffe/src/caffe/layers/sigmoid_layer.cpp
type caffe (line 6) | namespace caffe {
function Dtype (line 9) | inline Dtype sigmoid(Dtype x) {
FILE: caffe/src/caffe/layers/silence_layer.cpp
type caffe (line 6) | namespace caffe {
FILE: caffe/src/caffe/layers/slice_layer.cpp
type caffe (line 7) | namespace caffe {
FILE: caffe/src/caffe/layers/smooth_L1_loss_layer.cpp
type caffe (line 18) | namespace caffe {
FILE: caffe/src/caffe/layers/softmax_layer.cpp
type caffe (line 7) | namespace caffe {
FILE: caffe/src/caffe/layers/softmax_loss_layer.cpp
type caffe (line 8) | namespace caffe {
function Dtype (line 59) | Dtype SoftmaxWithLossLayer<Dtype>::get_normalizer(
FILE: caffe/src/caffe/layers/split_layer.cpp
type caffe (line 6) | namespace caffe {
FILE: caffe/src/caffe/layers/spp_layer.cpp
type caffe (line 11) | namespace caffe {
function LayerParameter (line 17) | LayerParameter SPPLayer<Dtype>::GetPoolingParam(const int pyramid_level,
FILE: caffe/src/caffe/layers/sum_layer.cpp
type caffe (line 13) | namespace caffe {
function sumation_forward (line 16) | void sumation_forward(const int count, const Dtype* from_data, Dtype* ...
function sumation_backward (line 36) | void sumation_backward(const int count, Dtype* bottom_diff, const Dtyp...
FILE: caffe/src/caffe/layers/tanh_layer.cpp
type caffe (line 8) | namespace caffe {
FILE: caffe/src/caffe/layers/threshold_layer.cpp
type caffe (line 5) | namespace caffe {
FILE: caffe/src/caffe/layers/tile_layer.cpp
type caffe (line 6) | namespace caffe {
FILE: caffe/src/caffe/layers/transpose_layer.cpp
type caffe (line 3) | namespace caffe {
function transpose_cpu (line 6) | void transpose_cpu(const int count, const Dtype* from_data, Dtype* to_...
FILE: caffe/src/caffe/layers/unitbox_loss_layer.cpp
type caffe (line 14) | namespace caffe {
FILE: caffe/src/caffe/layers/window_data_layer.cpp
type caffe (line 28) | namespace caffe {
FILE: caffe/src/caffe/net.cpp
type caffe (line 20) | namespace caffe {
function Dtype (line 516) | Dtype Net<Dtype>::ForwardFromTo(int start, int end) {
function Dtype (line 535) | Dtype Net<Dtype>::ForwardFrom(int start) {
function Dtype (line 540) | Dtype Net<Dtype>::ForwardTo(int end) {
FILE: caffe/src/caffe/parallel.cpp
type caffe (line 14) | namespace caffe {
type Op (line 16) | enum Op {
function apply_buffers (line 25) | static void apply_buffers(const vector<Blob<Dtype>*>& blobs,
function total_size (line 59) | static size_t total_size(const vector<Blob<Dtype>*>& params) {
function getDevice (line 110) | static int getDevice() {
function string (line 179) | string NCCL<Dtype>::new_uid() {
class Worker (line 259) | class Worker : public InternalThread {
method Worker (line 261) | explicit Worker(shared_ptr<Solver<Dtype> > rank0, int device,
method InternalThreadEntry (line 270) | void InternalThreadEntry() {
FILE: caffe/src/caffe/solver.cpp
type caffe (line 12) | namespace caffe {
function string (line 440) | string Solver<Dtype>::SnapshotFilename(const string extension) {
function string (line 446) | string Solver<Dtype>::SnapshotToBinaryProto() {
function string (line 456) | string Solver<Dtype>::SnapshotToHDF5() {
FILE: caffe/src/caffe/solvers/adadelta_solver.cpp
type caffe (line 5) | namespace caffe {
FILE: caffe/src/caffe/solvers/adagrad_solver.cpp
type caffe (line 5) | namespace caffe {
FILE: caffe/src/caffe/solvers/adam_solver.cpp
type caffe (line 5) | namespace caffe {
FILE: caffe/src/caffe/solvers/nesterov_solver.cpp
type caffe (line 5) | namespace caffe {
FILE: caffe/src/caffe/solvers/rmsprop_solver.cpp
type caffe (line 5) | namespace caffe {
FILE: caffe/src/caffe/solvers/sgd_solver.cpp
type caffe (line 9) | namespace caffe {
function Dtype (line 27) | Dtype SGDSolver<Dtype>::GetLearningRate() {
FILE: caffe/src/caffe/syncedmem.cpp
type caffe (line 5) | namespace caffe {
FILE: caffe/src/caffe/test/test_accuracy_layer.cpp
type caffe (line 14) | namespace caffe {
class AccuracyLayerTest (line 17) | class AccuracyLayerTest : public CPUDeviceTest<Dtype> {
method AccuracyLayerTest (line 19) | AccuracyLayerTest()
method FillBottoms (line 40) | virtual void FillBottoms() {
function TYPED_TEST (line 74) | TYPED_TEST(AccuracyLayerTest, TestSetup) {
function TYPED_TEST (line 84) | TYPED_TEST(AccuracyLayerTest, TestSetupTopK) {
function TYPED_TEST (line 97) | TYPED_TEST(AccuracyLayerTest, TestSetupOutputPerClass) {
function TYPED_TEST (line 111) | TYPED_TEST(AccuracyLayerTest, TestForwardCPU) {
function TYPED_TEST (line 137) | TYPED_TEST(AccuracyLayerTest, TestForwardWithSpatialAxes) {
function TYPED_TEST (line 180) | TYPED_TEST(AccuracyLayerTest, TestForwardIgnoreLabel) {
function TYPED_TEST (line 218) | TYPED_TEST(AccuracyLayerTest, TestForwardCPUTopK) {
function TYPED_TEST (line 249) | TYPED_TEST(AccuracyLayerTest, TestForwardCPUPerClass) {
function TYPED_TEST (line 287) | TYPED_TEST(AccuracyLayerTest, TestForwardCPUPerClassWithIgnoreLabel) {
FILE: caffe/src/caffe/test/test_argmax_layer.cpp
type caffe (line 13) | namespace caffe {
class ArgMaxLayerTest (line 16) | class ArgMaxLayerTest : public CPUDeviceTest<Dtype> {
method ArgMaxLayerTest (line 18) | ArgMaxLayerTest()
function TYPED_TEST (line 40) | TYPED_TEST(ArgMaxLayerTest, TestSetup) {
function TYPED_TEST (line 48) | TYPED_TEST(ArgMaxLayerTest, TestSetupMaxVal) {
function TYPED_TEST (line 58) | TYPED_TEST(ArgMaxLayerTest, TestSetupAxis) {
function TYPED_TEST (line 70) | TYPED_TEST(ArgMaxLayerTest, TestSetupAxisNegativeIndexing) {
function TYPED_TEST (line 82) | TYPED_TEST(ArgMaxLayerTest, TestSetupAxisMaxVal) {
function TYPED_TEST (line 95) | TYPED_TEST(ArgMaxLayerTest, TestCPU) {
function TYPED_TEST (line 118) | TYPED_TEST(ArgMaxLayerTest, TestCPUMaxVal) {
function TYPED_TEST (line 144) | TYPED_TEST(ArgMaxLayerTest, TestCPUTopK) {
function TYPED_TEST (line 174) | TYPED_TEST(ArgMaxLayerTest, TestCPUMaxValTopK) {
function TYPED_TEST (line 206) | TYPED_TEST(ArgMaxLayerTest, TestCPUAxis) {
function TYPED_TEST (line 232) | TYPED_TEST(ArgMaxLayerTest, TestCPUAxisTopK) {
function TYPED_TEST (line 265) | TYPED_TEST(ArgMaxLayerTest, TestCPUAxisMaxValTopK) {
FILE: caffe/src/caffe/test/test_attlstm_layer.cpp
type caffe (line 14) | namespace caffe {
class AttLstmLayerTest (line 17) | class AttLstmLayerTest : public MultiDeviceTest<TypeParam> {
method AttLstmLayerTest (line 21) | AttLstmLayerTest() : num_output_(7) {
method ReshapeBlobs (line 47) | void ReshapeBlobs(int num_timesteps, int num_instances) {
function TYPED_TEST (line 94) | TYPED_TEST(AttLstmLayerTest, TestSetUp) {
function TYPED_TEST (line 205) | TYPED_TEST(AttLstmLayerTest, TestGradient) {
function TYPED_TEST (line 215) | TYPED_TEST(AttLstmLayerTest, TestGradientNonZeroCont) {
function TYPED_TEST (line 226) | TYPED_TEST(AttLstmLayerTest, TestGradientNonZeroContBufferSize2) {
FILE: caffe/src/caffe/test/test_batch_norm_layer.cpp
type caffe (line 18) | namespace caffe {
class BatchNormLayerTest (line 21) | class BatchNormLayerTest : public MultiDeviceTest<TypeParam> {
method BatchNormLayerTest (line 24) | BatchNormLayerTest()
function TYPED_TEST (line 43) | TYPED_TEST(BatchNormLayerTest, TestForward) {
function TYPED_TEST (line 79) | TYPED_TEST(BatchNormLayerTest, TestForwardInplace) {
function TYPED_TEST (line 123) | TYPED_TEST(BatchNormLayerTest, TestGradient) {
FILE: caffe/src/caffe/test/test_batch_reindex_layer.cpp
type caffe (line 13) | namespace caffe {
class BatchReindexLayerTest (line 16) | class BatchReindexLayerTest : public MultiDeviceTest<TypeParam> {
method BatchReindexLayerTest (line 20) | BatchReindexLayerTest()
method SetUp (line 25) | virtual void SetUp() {
method TestForward (line 61) | void TestForward() {
function TYPED_TEST (line 105) | TYPED_TEST(BatchReindexLayerTest, TestForward) {
function TYPED_TEST (line 109) | TYPED_TEST(BatchReindexLayerTest, TestGradient) {
FILE: caffe/src/caffe/test/test_benchmark.cpp
type caffe (line 10) | namespace caffe {
class BenchmarkTest (line 15) | class BenchmarkTest : public MultiDeviceTest<TypeParam> {}
function TYPED_TEST (line 19) | TYPED_TEST(BenchmarkTest, TestTimerConstructor) {
function TYPED_TEST (line 26) | TYPED_TEST(BenchmarkTest, TestTimerStart) {
function TYPED_TEST (line 43) | TYPED_TEST(BenchmarkTest, TestTimerStop) {
function TYPED_TEST (line 60) | TYPED_TEST(BenchmarkTest, TestTimerMilliSeconds) {
function TYPED_TEST (line 75) | TYPED_TEST(BenchmarkTest, TestTimerSeconds) {
FILE: caffe/src/caffe/test/test_bias_layer.cpp
type caffe (line 14) | namespace caffe {
class BiasLayerTest (line 17) | class BiasLayerTest : public MultiDeviceTest<TypeParam> {
method BiasLayerTest (line 21) | BiasLayerTest()
function TYPED_TEST (line 72) | TYPED_TEST(BiasLayerTest, TestForwardEltwise) {
function TYPED_TEST (line 90) | TYPED_TEST(BiasLayerTest, TestForwardEltwiseInPlace) {
function TYPED_TEST (line 110) | TYPED_TEST(BiasLayerTest, TestBackwardEltwiseInPlace) {
function TYPED_TEST (line 156) | TYPED_TEST(BiasLayerTest, TestForwardEltwiseWithParam) {
function TYPED_TEST (line 176) | TYPED_TEST(BiasLayerTest, TestForwardBroadcastBegin) {
function TYPED_TEST (line 199) | TYPED_TEST(BiasLayerTest, TestForwardBroadcastMiddle) {
function TYPED_TEST (line 222) | TYPED_TEST(BiasLayerTest, TestForwardBroadcastMiddleInPlace) {
function TYPED_TEST (line 247) | TYPED_TEST(BiasLayerTest, TestBackwardBroadcastMiddleInPlace) {
function TYPED_TEST (line 293) | TYPED_TEST(BiasLayerTest, TestForwardBroadcastMiddleWithParam) {
function TYPED_TEST (line 317) | TYPED_TEST(BiasLayerTest, TestForwardBroadcastEnd) {
function TYPED_TEST (line 340) | TYPED_TEST(BiasLayerTest, TestForwardBias) {
function TYPED_TEST (line 357) | TYPED_TEST(BiasLayerTest, TestForwardBiasAxis2) {
function TYPED_TEST (line 375) | TYPED_TEST(BiasLayerTest, TestGradientEltwise) {
function TYPED_TEST (line 386) | TYPED_TEST(BiasLayerTest, TestGradientEltwiseWithParam) {
function TYPED_TEST (line 399) | TYPED_TEST(BiasLayerTest, TestGradientBroadcastBegin) {
function TYPED_TEST (line 410) | TYPED_TEST(BiasLayerTest, TestGradientBroadcastMiddle) {
function TYPED_TEST (line 421) | TYPED_TEST(BiasLayerTest, TestGradientBroadcastMiddleWithParam) {
function TYPED_TEST (line 435) | TYPED_TEST(BiasLayerTest, TestGradientBroadcastEnd) {
function TYPED_TEST (line 446) | TYPED_TEST(BiasLayerTest, TestGradientBias) {
function TYPED_TEST (line 456) | TYPED_TEST(BiasLayerTest, TestGradientBiasAxis2) {
FILE: caffe/src/caffe/test/test_blob.cpp
type caffe (line 11) | namespace caffe {
class BlobSimpleTest (line 14) | class BlobSimpleTest : public ::testing::Test {
method BlobSimpleTest (line 16) | BlobSimpleTest()
function TYPED_TEST (line 26) | TYPED_TEST(BlobSimpleTest, TestInitialization) {
function TYPED_TEST (line 38) | TYPED_TEST(BlobSimpleTest, TestPointersCPUGPU) {
function TYPED_TEST (line 45) | TYPED_TEST(BlobSimpleTest, TestReshape) {
function TYPED_TEST (line 54) | TYPED_TEST(BlobSimpleTest, TestReshapeZero) {
function TYPED_TEST (line 62) | TYPED_TEST(BlobSimpleTest, TestLegacyBlobProtoShapeEquals) {
class BlobMathTest (line 116) | class BlobMathTest : public MultiDeviceTest<TypeParam> {
method BlobMathTest (line 119) | BlobMathTest()
function TYPED_TEST (line 130) | TYPED_TEST(BlobMathTest, TestSumOfSquares) {
function TYPED_TEST (line 185) | TYPED_TEST(BlobMathTest, TestAsum) {
function TYPED_TEST (line 239) | TYPED_TEST(BlobMathTest, TestScaleData) {
FILE: caffe/src/caffe/test/test_bn_layer.cpp
type caffe (line 17) | namespace caffe {
class BNLayerTest (line 20) | class BNLayerTest : public MultiDeviceTest<TypeParam> {
method BNLayerTest (line 23) | BNLayerTest()
function TYPED_TEST (line 42) | TYPED_TEST(BNLayerTest, TestForward) {
function TYPED_TEST (line 84) | TYPED_TEST(BNLayerTest, TestForwardInplace) {
function TYPED_TEST (line 135) | TYPED_TEST(BNLayerTest, TestGradient) {
FILE: caffe/src/caffe/test/test_caffe_main.cpp
type caffe (line 4) | namespace caffe {
function main (line 14) | int main(int argc, char** argv) {
FILE: caffe/src/caffe/test/test_common.cpp
type caffe (line 9) | namespace caffe {
class CommonTest (line 11) | class CommonTest : public ::testing::Test {}
function TEST_F (line 15) | TEST_F(CommonTest, TestCublasHandlerGPU) {
function TEST_F (line 23) | TEST_F(CommonTest, TestBrewMode) {
function TEST_F (line 30) | TEST_F(CommonTest, TestRandSeedCPU) {
function TEST_F (line 47) | TEST_F(CommonTest, TestRandSeedGPU) {
FILE: caffe/src/caffe/test/test_concat_layer.cpp
type caffe (line 13) | namespace caffe {
class ConcatLayerTest (line 16) | class ConcatLayerTest : public MultiDeviceTest<TypeParam> {
method ConcatLayerTest (line 20) | ConcatLayerTest()
method SetUp (line 25) | virtual void SetUp() {
function TYPED_TEST (line 60) | TYPED_TEST(ConcatLayerTest, TestSetupNum) {
function TYPED_TEST (line 73) | TYPED_TEST(ConcatLayerTest, TestSetupChannels) {
function TYPED_TEST (line 85) | TYPED_TEST(ConcatLayerTest, TestSetupChannelsNegativeIndexing) {
function TYPED_TEST (line 101) | TYPED_TEST(ConcatLayerTest, TestForwardTrivial) {
function TYPED_TEST (line 114) | TYPED_TEST(ConcatLayerTest, TestForwardNum) {
function TYPED_TEST (line 143) | TYPED_TEST(ConcatLayerTest, TestForwardChannels) {
function TYPED_TEST (line 169) | TYPED_TEST(ConcatLayerTest, TestGradientTrivial) {
function TYPED_TEST (line 179) | TYPED_TEST(ConcatLayerTest, TestGradientNum) {
function TYPED_TEST (line 189) | TYPED_TEST(ConcatLayerTest, TestGradientChannels) {
function TYPED_TEST (line 198) | TYPED_TEST(ConcatLayerTest, TestGradientChannelsBottomOneOnly) {
FILE: caffe/src/caffe/test/test_contrastive_loss_layer.cpp
type caffe (line 15) | namespace caffe {
class ContrastiveLossLayerTest (line 18) | class ContrastiveLossLayerTest : public MultiDeviceTest<TypeParam> {
method ContrastiveLossLayerTest (line 22) | ContrastiveLossLayerTest()
function TYPED_TEST (line 59) | TYPED_TEST(ContrastiveLossLayerTest, TestForward) {
function TYPED_TEST (line 88) | TYPED_TEST(ContrastiveLossLayerTest, TestGradient) {
function TYPED_TEST (line 101) | TYPED_TEST(ContrastiveLossLayerTest, TestForwardLegacy) {
function TYPED_TEST (line 130) | TYPED_TEST(ContrastiveLossLayerTest, TestGradientLegacy) {
FILE: caffe/src/caffe/test/test_convolution_layer.cpp
type caffe (line 17) | namespace caffe {
function caffe_conv (line 22) | void caffe_conv(const Blob<Dtype>* in, ConvolutionParameter* conv_param,
class ConvolutionLayerTest (line 151) | class ConvolutionLayerTest : public MultiDeviceTest<TypeParam> {
method ConvolutionLayerTest (line 155) | ConvolutionLayerTest()
method SetUp (line 160) | virtual void SetUp() {
function TYPED_TEST (line 195) | TYPED_TEST(ConvolutionLayerTest, TestSetup) {
function TYPED_TEST (line 231) | TYPED_TEST(ConvolutionLayerTest, TestSimpleConvolution) {
function TYPED_TEST (line 267) | TYPED_TEST(ConvolutionLayerTest, TestDilatedConvolution) {
function TYPED_TEST (line 311) | TYPED_TEST(ConvolutionLayerTest, Test0DConvolution) {
function TYPED_TEST (line 349) | TYPED_TEST(ConvolutionLayerTest, TestSimple3DConvolution) {
function TYPED_TEST (line 396) | TYPED_TEST(ConvolutionLayerTest, TestDilated3DConvolution) {
function TYPED_TEST (line 443) | TYPED_TEST(ConvolutionLayerTest, Test1x1Convolution) {
function TYPED_TEST (line 470) | TYPED_TEST(ConvolutionLayerTest, TestSimpleConvolutionGroup) {
function TYPED_TEST (line 498) | TYPED_TEST(ConvolutionLayerTest, TestSobelConvolution) {
function TYPED_TEST (line 591) | TYPED_TEST(ConvolutionLayerTest, TestNDAgainst2D) {
function TYPED_TEST (line 709) | TYPED_TEST(ConvolutionLayerTest, TestGradient) {
function TYPED_TEST (line 727) | TYPED_TEST(ConvolutionLayerTest, TestDilatedGradient) {
function TYPED_TEST (line 751) | TYPED_TEST(ConvolutionLayerTest, TestGradient3D) {
function TYPED_TEST (line 779) | TYPED_TEST(ConvolutionLayerTest, Test1x1Gradient) {
function TYPED_TEST (line 797) | TYPED_TEST(ConvolutionLayerTest, TestGradientGroup) {
class CuDNNConvolutionLayerTest (line 817) | class CuDNNConvolutionLayerTest : public GPUDeviceTest<Dtype> {
method CuDNNConvolutionLayerTest (line 819) | CuDNNConvolutionLayerTest()
method SetUp (line 824) | virtual void SetUp() {
function TYPED_TEST (line 859) | TYPED_TEST(CuDNNConvolutionLayerTest, TestSetupCuDNN) {
function TYPED_TEST (line 896) | TYPED_TEST(CuDNNConvolutionLayerTest, TestSimpleConvolutionCuDNN) {
function TYPED_TEST (line 931) | TYPED_TEST(CuDNNConvolutionLayerTest, TestSimpleConvolutionGroupCuDNN) {
function TYPED_TEST (line 958) | TYPED_TEST(CuDNNConvolutionLayerTest, TestSobelConvolutionCuDNN) {
function TYPED_TEST (line 1051) | TYPED_TEST(CuDNNConvolutionLayerTest, TestGradientCuDNN) {
function TYPED_TEST (line 1068) | TYPED_TEST(CuDNNConvolutionLayerTest, TestGradientGroupCuDNN) {
FILE: caffe/src/caffe/test/test_crop_layer.cpp
type caffe (line 13) | namespace caffe {
class CropLayerTest (line 16) | class CropLayerTest : public MultiDeviceTest<TypeParam> {
method CropLayerTest (line 20) | CropLayerTest()
method SetUp (line 24) | virtual void SetUp() {
function TYPED_TEST (line 51) | TYPED_TEST(CropLayerTest, TestSetupShapeAll) {
function TYPED_TEST (line 63) | TYPED_TEST(CropLayerTest, TestSetupShapeDefault) {
function TYPED_TEST (line 78) | TYPED_TEST(CropLayerTest, TestSetupShapeNegativeIndexing) {
function TYPED_TEST (line 94) | TYPED_TEST(CropLayerTest, TestDimensionsCheck) {
function TYPED_TEST (line 112) | TYPED_TEST(CropLayerTest, TestCropAll) {
function TYPED_TEST (line 136) | TYPED_TEST(CropLayerTest, TestCropAllOffset) {
function TYPED_TEST (line 164) | TYPED_TEST(CropLayerTest, TestCropHW) {
function TYPED_TEST (line 190) | TYPED_TEST(CropLayerTest, TestCrop5D) {
function TYPED_TEST (line 241) | TYPED_TEST(CropLayerTest, TestCropAllGradient) {
function TYPED_TEST (line 251) | TYPED_TEST(CropLayerTest, TestCropHWGradient) {
function TYPED_TEST (line 263) | TYPED_TEST(CropLayerTest, TestCrop5DGradient) {
FILE: caffe/src/caffe/test/test_data_layer.cpp
type caffe (line 18) | namespace caffe {
class DataLayerTest (line 23) | class DataLayerTest : public MultiDeviceTest<TypeParam> {
method DataLayerTest (line 27) | DataLayerTest()
method SetUp (line 32) | virtual void SetUp() {
method Fill (line 43) | void Fill(const bool unique_pixels, DataParameter_DB backend) {
method TestRead (line 70) | void TestRead() {
method TestSkip (line 108) | void TestSkip() {
method TestReshape (line 134) | void TestReshape(DataParameter_DB backend) {
method TestReadCrop (line 199) | void TestReadCrop(Phase phase) {
method TestReadCropTrainSequenceSeeded (line 253) | void TestReadCropTrainSequenceSeeded() {
method TestReadCropTrainSequenceUnseeded (line 308) | void TestReadCropTrainSequenceUnseeded() {
function TYPED_TEST (line 379) | TYPED_TEST(DataLayerTest, TestReadLevelDB) {
function TYPED_TEST (line 385) | TYPED_TEST(DataLayerTest, TestSkipLevelDB) {
function TYPED_TEST (line 390) | TYPED_TEST(DataLayerTest, TestReshapeLevelDB) {
function TYPED_TEST (line 394) | TYPED_TEST(DataLayerTest, TestReadCropTrainLevelDB) {
function TYPED_TEST (line 402) | TYPED_TEST(DataLayerTest, TestReadCropTrainSequenceSeededLevelDB) {
function TYPED_TEST (line 410) | TYPED_TEST(DataLayerTest, TestReadCropTrainSequenceUnseededLevelDB) {
function TYPED_TEST (line 416) | TYPED_TEST(DataLayerTest, TestReadCropTestLevelDB) {
function TYPED_TEST (line 424) | TYPED_TEST(DataLayerTest, TestReadLMDB) {
function TYPED_TEST (line 430) | TYPED_TEST(DataLayerTest, TestSkipLMDB) {
function TYPED_TEST (line 435) | TYPED_TEST(DataLayerTest, TestReshapeLMDB) {
function TYPED_TEST (line 439) | TYPED_TEST(DataLayerTest, TestReadCropTrainLMDB) {
function TYPED_TEST (line 447) | TYPED_TEST(DataLayerTest, TestReadCropTrainSequenceSeededLMDB) {
function TYPED_TEST (line 455) | TYPED_TEST(DataLayerTest, TestReadCropTrainSequenceUnseededLMDB) {
function TYPED_TEST (line 461) | TYPED_TEST(DataLayerTest, TestReadCropTestLMDB) {
FILE: caffe/src/caffe/test/test_data_transformer.cpp
type caffe (line 16) | namespace caffe {
function FillDatum (line 18) | void FillDatum(const int label, const int channels, const int height,
class DataTransformTest (line 33) | class DataTransformTest : public ::testing::Test {
method DataTransformTest (line 35) | DataTransformTest()
method NumSequenceMatches (line 39) | int NumSequenceMatches(const TransformationParameter transform_param,
function TYPED_TEST (line 78) | TYPED_TEST(DataTransformTest, TestEmptyTransform) {
function TYPED_TEST (line 101) | TYPED_TEST(DataTransformTest, TestEmptyTransformUniquePixels) {
function TYPED_TEST (line 124) | TYPED_TEST(DataTransformTest, TestCropSize) {
function TYPED_TEST (line 151) | TYPED_TEST(DataTransformTest, TestCropTrain) {
function TYPED_TEST (line 168) | TYPED_TEST(DataTransformTest, TestCropTest) {
function TYPED_TEST (line 185) | TYPED_TEST(DataTransformTest, TestMirrorTrain) {
function TYPED_TEST (line 201) | TYPED_TEST(DataTransformTest, TestMirrorTest) {
function TYPED_TEST (line 217) | TYPED_TEST(DataTransformTest, TestCropMirrorTrain) {
function TYPED_TEST (line 239) | TYPED_TEST(DataTransformTest, TestCropMirrorTest) {
function TYPED_TEST (line 261) | TYPED_TEST(DataTransformTest, TestMeanValue) {
function TYPED_TEST (line 282) | TYPED_TEST(DataTransformTest, TestMeanValues) {
function TYPED_TEST (line 306) | TYPED_TEST(DataTransformTest, TestMeanFile) {
FILE: caffe/src/caffe/test/test_db.cpp
type caffe (line 14) | namespace caffe {
class DBTest (line 19) | class DBTest : public ::testing::Test {
method DBTest (line 21) | DBTest()
method SetUp (line 25) | virtual void SetUp() {
type TypeLevelDB (line 50) | struct TypeLevelDB {
type TypeLMDB (line 55) | struct TypeLMDB {
function TYPED_TEST (line 65) | TYPED_TEST(DBTest, TestGetDB) {
function TYPED_TEST (line 69) | TYPED_TEST(DBTest, TestNext) {
function TYPED_TEST (line 80) | TYPED_TEST(DBTest, TestSeekToFirst) {
function TYPED_TEST (line 96) | TYPED_TEST(DBTest, TestKeyValue) {
function TYPED_TEST (line 120) | TYPED_TEST(DBTest, TestWrite) {
FILE: caffe/src/caffe/test/test_deconvolution_layer.cpp
type caffe (line 13) | namespace caffe {
class DeconvolutionLayerTest (line 18) | class DeconvolutionLayerTest : public MultiDeviceTest<TypeParam> {
method DeconvolutionLayerTest (line 22) | DeconvolutionLayerTest()
method SetUp (line 27) | virtual void SetUp() {
function TYPED_TEST (line 55) | TYPED_TEST(DeconvolutionLayerTest, TestSetup) {
function TYPED_TEST (line 91) | TYPED_TEST(DeconvolutionLayerTest, TestSimpleDeconvolution) {
function TYPED_TEST (line 139) | TYPED_TEST(DeconvolutionLayerTest, TestGradient) {
function TYPED_TEST (line 157) | TYPED_TEST(DeconvolutionLayerTest, TestNDAgainst2D) {
function TYPED_TEST (line 275) | TYPED_TEST(DeconvolutionLayerTest, TestGradient3D) {
FILE: caffe/src/caffe/test/test_deformconv_layer.cpp
type caffe (line 17) | namespace caffe {
class DeformConvLayerTest (line 20) | class DeformConvLayerTest : public MultiDeviceTest<TypeParam> {
method DeformConvLayerTest (line 24) | DeformConvLayerTest()
function TYPED_TEST (line 57) | TYPED_TEST(DeformConvLayerTest, TestGradient) {
FILE: caffe/src/caffe/test/test_dummy_data_layer.cpp
type caffe (line 13) | namespace caffe {
class DummyDataLayerTest (line 16) | class DummyDataLayerTest : public CPUDeviceTest<Dtype> {
method DummyDataLayerTest (line 18) | DummyDataLayerTest()
method SetUp (line 23) | virtual void SetUp() {
function TYPED_TEST (line 46) | TYPED_TEST(DummyDataLayerTest, TestOneTopConstant) {
function TYPED_TEST (line 75) | TYPED_TEST(DummyDataLayerTest, TestTwoTopConstant) {
function TYPED_TEST (line 113) | TYPED_TEST(DummyDataLayerTest, TestThreeTopConstantGaussianConstant) {
FILE: caffe/src/caffe/test/test_eltwise_layer.cpp
type caffe (line 14) | namespace caffe {
class EltwiseLayerTest (line 17) | class EltwiseLayerTest : public MultiDeviceTest<TypeParam> {
method EltwiseLayerTest (line 21) | EltwiseLayerTest()
function TYPED_TEST (line 54) | TYPED_TEST(EltwiseLayerTest, TestSetUp) {
function TYPED_TEST (line 68) | TYPED_TEST(EltwiseLayerTest, TestProd) {
function TYPED_TEST (line 87) | TYPED_TEST(EltwiseLayerTest, TestSum) {
function TYPED_TEST (line 106) | TYPED_TEST(EltwiseLayerTest, TestSumCoeff) {
function TYPED_TEST (line 129) | TYPED_TEST(EltwiseLayerTest, TestStableProdGradient) {
function TYPED_TEST (line 141) | TYPED_TEST(EltwiseLayerTest, TestUnstableProdGradient) {
function TYPED_TEST (line 153) | TYPED_TEST(EltwiseLayerTest, TestSumGradient) {
function TYPED_TEST (line 164) | TYPED_TEST(EltwiseLayerTest, TestSumCoeffGradient) {
function TYPED_TEST (line 178) | TYPED_TEST(EltwiseLayerTest, TestMax) {
function TYPED_TEST (line 198) | TYPED_TEST(EltwiseLayerTest, TestMaxGradient) {
FILE: caffe/src/caffe/test/test_embed_layer.cpp
type caffe (line 13) | namespace caffe {
class EmbedLayerTest (line 16) | class EmbedLayerTest : public MultiDeviceTest<TypeParam> {
method EmbedLayerTest (line 19) | EmbedLayerTest()
function TYPED_TEST (line 38) | TYPED_TEST(EmbedLayerTest, TestSetUp) {
function TYPED_TEST (line 54) | TYPED_TEST(EmbedLayerTest, TestForward) {
function TYPED_TEST (line 93) | TYPED_TEST(EmbedLayerTest, TestForwardWithBias) {
function TYPED_TEST (line 137) | TYPED_TEST(EmbedLayerTest, TestGradient) {
function TYPED_TEST (line 157) | TYPED_TEST(EmbedLayerTest, TestGradientWithBias) {
FILE: caffe/src/caffe/test/test_euclidean_loss_layer.cpp
type caffe (line 14) | namespace caffe {
class EuclideanLossLayerTest (line 17) | class EuclideanLossLayerTest : public MultiDeviceTest<TypeParam> {
method EuclideanLossLayerTest (line 21) | EuclideanLossLayerTest()
method TestForward (line 40) | void TestForward() {
function TYPED_TEST (line 73) | TYPED_TEST(EuclideanLossLayerTest, TestForward) {
function TYPED_TEST (line 77) | TYPED_TEST(EuclideanLossLayerTest, TestGradient) {
FILE: caffe/src/caffe/test/test_filler.cpp
type caffe (line 7) | namespace caffe {
class ConstantFillerTest (line 10) | class ConstantFillerTest : public ::testing::Test {
method ConstantFillerTest (line 12) | ConstantFillerTest()
function TYPED_TEST (line 27) | TYPED_TEST(ConstantFillerTest, TestFill) {
class UniformFillerTest (line 38) | class UniformFillerTest : public ::testing::Test {
method UniformFillerTest (line 40) | UniformFillerTest()
function TYPED_TEST (line 56) | TYPED_TEST(UniformFillerTest, TestFill) {
class PositiveUnitballFillerTest (line 67) | class PositiveUnitballFillerTest : public ::testing::Test {
method PositiveUnitballFillerTest (line 69) | PositiveUnitballFillerTest()
function TYPED_TEST (line 83) | TYPED_TEST(PositiveUnitballFillerTest, TestFill) {
class GaussianFillerTest (line 104) | class GaussianFillerTest : public ::testing::Test {
method GaussianFillerTest (line 106) | GaussianFillerTest()
function TYPED_TEST (line 122) | TYPED_TEST(GaussianFillerTest, TestFill) {
class XavierFillerTest (line 144) | class XavierFillerTest : public ::testing::Test {
method XavierFillerTest (line 146) | XavierFillerTest()
method test_params (line 150) | virtual void test_params(FillerParameter_VarianceNorm variance_norm,
function TYPED_TEST (line 179) | TYPED_TEST(XavierFillerTest, TestFillFanIn) {
function TYPED_TEST (line 183) | TYPED_TEST(XavierFillerTest, TestFillFanOut) {
function TYPED_TEST (line 187) | TYPED_TEST(XavierFillerTest, TestFillAverage) {
class MSRAFillerTest (line 193) | class MSRAFillerTest : public ::testing::Test {
method MSRAFillerTest (line 195) | MSRAFillerTest()
method test_params (line 199) | virtual void test_params(FillerParameter_VarianceNorm variance_norm,
function TYPED_TEST (line 228) | TYPED_TEST(MSRAFillerTest, TestFillFanIn) {
function TYPED_TEST (line 232) | TYPED_TEST(MSRAFillerTest, TestFillFanOut) {
function TYPED_TEST (line 236) | TYPED_TEST(MSRAFillerTest, TestFillAverage) {
FILE: caffe/src/caffe/test/test_filter_layer.cpp
type caffe (line 13) | namespace caffe {
class FilterLayerTest (line 16) | class FilterLayerTest : public MultiDeviceTest<TypeParam> {
method FilterLayerTest (line 20) | FilterLayerTest()
method SetUp (line 26) | virtual void SetUp() {
function TYPED_TEST (line 67) | TYPED_TEST(FilterLayerTest, TestReshape) {
function TYPED_TEST (line 87) | TYPED_TEST(FilterLayerTest, TestForward) {
function TYPED_TEST (line 115) | TYPED_TEST(FilterLayerTest, TestGradient) {
FILE: caffe/src/caffe/test/test_flatten_layer.cpp
type caffe (line 13) | namespace caffe {
class FlattenLayerTest (line 16) | class FlattenLayerTest : public MultiDeviceTest<TypeParam> {
method FlattenLayerTest (line 19) | FlattenLayerTest()
function TYPED_TEST (line 39) | TYPED_TEST(FlattenLayerTest, TestSetup) {
function TYPED_TEST (line 49) | TYPED_TEST(FlattenLayerTest, TestSetupWithAxis) {
function TYPED_TEST (line 61) | TYPED_TEST(FlattenLayerTest, TestSetupWithEndAxis) {
function TYPED_TEST (line 73) | TYPED_TEST(FlattenLayerTest, TestSetupWithStartAndEndAxis) {
function TYPED_TEST (line 85) | TYPED_TEST(FlattenLayerTest, TestForward) {
function TYPED_TEST (line 99) | TYPED_TEST(FlattenLayerTest, TestGradient) {
FILE: caffe/src/caffe/test/test_gradient_based_solver.cpp
type caffe (line 20) | namespace caffe {
class GradientBasedSolverTest (line 23) | class GradientBasedSolverTest : public MultiDeviceTest<TypeParam> {
method GradientBasedSolverTest (line 27) | GradientBasedSolverTest() :
method InitSolverFromProtoString (line 54) | virtual void InitSolverFromProtoString(const string& proto) {
method string (line 72) | string RunLeastSquaresSolver(const Dtype learning_rate,
method ComputeLeastSquaresUpdate (line 228) | void ComputeLeastSquaresUpdate(const Dtype learning_rate,
method CheckLeastSquaresUpdate (line 353) | void CheckLeastSquaresUpdate(
method CheckAccumulation (line 403) | void CheckAccumulation(const Dtype kLearningRate, const Dtype kWeigh...
method TestLeastSquaresUpdate (line 457) | void TestLeastSquaresUpdate(const Dtype learning_rate = 1.0,
method TestSnapshot (line 510) | void TestSnapshot(const Dtype learning_rate = 1.0,
class SGDSolverTest (line 587) | class SGDSolverTest : public GradientBasedSolverTest<TypeParam> {
method InitSolver (line 591) | virtual void InitSolver(const SolverParameter& param) {
function TYPED_TEST (line 598) | TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdate) {
function TYPED_TEST (line 602) | TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateLROneHundredth) {
function TYPED_TEST (line 608) | TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithWeightDecay) {
function TYPED_TEST (line 619) | TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithWeightDecayMultiIt...
function TYPED_TEST (line 630) | TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithMomentum) {
function TYPED_TEST (line 641) | TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithMomentumMultiIter) {
function TYPED_TEST (line 652) | TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverything) {
function TYPED_TEST (line 663) | TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverythingShare) {
function TYPED_TEST (line 675) | TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverythingAccum) {
function TYPED_TEST (line 686) | TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverythingAccumSha...
function TYPED_TEST (line 698) | TYPED_TEST(SGDSolverTest, TestSnapshot) {
function TYPED_TEST (line 709) | TYPED_TEST(SGDSolverTest, TestSnapshotShare) {
class AdaGradSolverTest (line 723) | class AdaGradSolverTest : public GradientBasedSolverTest<TypeParam> {
method InitSolver (line 727) | virtual void InitSolver(const SolverParameter& param) {
function TYPED_TEST (line 734) | TYPED_TEST(AdaGradSolverTest, TestAdaGradLeastSquaresUpdate) {
function TYPED_TEST (line 738) | TYPED_TEST(AdaGradSolverTest, TestAdaGradLeastSquaresUpdateLROneHundre...
function TYPED_TEST (line 744) | TYPED_TEST(AdaGradSolverTest, TestAdaGradLeastSquaresUpdateWithWeightD...
function TYPED_TEST (line 751) | TYPED_TEST(AdaGradSolverTest, TestAdaGradLeastSquaresUpdateWithEveryth...
function TYPED_TEST (line 762) | TYPED_TEST(AdaGradSolverTest,
function TYPED_TEST (line 775) | TYPED_TEST(AdaGradSolverTest, TestLeastSquaresUpdateWithEverythingAccu...
function TYPED_TEST (line 786) | TYPED_TEST(AdaGradSolverTest, TestLeastSquaresUpdateWithEverythingAccu...
function TYPED_TEST (line 798) | TYPED_TEST(AdaGradSolverTest, TestSnapshot) {
function TYPED_TEST (line 809) | TYPED_TEST(AdaGradSolverTest, TestSnapshotShare) {
class NesterovSolverTest (line 823) | class NesterovSolverTest : public GradientBasedSolverTest<TypeParam> {
method InitSolver (line 827) | virtual void InitSolver(const SolverParameter& param) {
function TYPED_TEST (line 834) | TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdate) {
function TYPED_TEST (line 838) | TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdateLROneHund...
function TYPED_TEST (line 844) | TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdateWithWeigh...
function TYPED_TEST (line 851) | TYPED_TEST(NesterovSolverTest,
function TYPED_TEST (line 863) | TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdateWithMomen...
function TYPED_TEST (line 874) | TYPED_TEST(NesterovSolverTest, TestLeastSquaresUpdateWithMomentumMulti...
function TYPED_TEST (line 885) | TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdateWithEvery...
function TYPED_TEST (line 896) | TYPED_TEST(NesterovSolverTest,
function TYPED_TEST (line 909) | TYPED_TEST(NesterovSolverTest, TestLeastSquaresUpdateWithEverythingAcc...
function TYPED_TEST (line 920) | TYPED_TEST(NesterovSolverTest, TestLeastSquaresUpdateWithEverythingAcc...
function TYPED_TEST (line 932) | TYPED_TEST(NesterovSolverTest, TestSnapshot) {
function TYPED_TEST (line 943) | TYPED_TEST(NesterovSolverTest, TestSnapshotShare) {
class AdaDeltaSolverTest (line 956) | class AdaDeltaSolverTest : public GradientBasedSolverTest<TypeParam> {
method InitSolver (line 960) | virtual void InitSolver(const SolverParameter& param) {
function TYPED_TEST (line 967) | TYPED_TEST(AdaDeltaSolverTest, TestAdaDeltaLeastSquaresUpdate) {
function TYPED_TEST (line 973) | TYPED_TEST(AdaDeltaSolverTest, TestAdaDeltaLeastSquaresUpdateWithWeigh...
function TYPED_TEST (line 981) | TYPED_TEST(AdaDeltaSolverTest, TestAdaDeltaLeastSquaresUpdateWithHalfM...
function TYPED_TEST (line 992) | TYPED_TEST(AdaDeltaSolverTest, TestAdaDeltaLeastSquaresUpdateWithMomen...
function TYPED_TEST (line 1003) | TYPED_TEST(AdaDeltaSolverTest, TestLeastSquaresUpdateWithMomentumMulti...
function TYPED_TEST (line 1014) | TYPED_TEST(AdaDeltaSolverTest, TestAdaDeltaLeastSquaresUpdateWithEvery...
function TYPED_TEST (line 1025) | TYPED_TEST(AdaDeltaSolverTest,
function TYPED_TEST (line 1038) | TYPED_TEST(AdaDeltaSolverTest, TestLeastSquaresUpdateWithEverythingAcc...
function TYPED_TEST (line 1049) | TYPED_TEST(AdaDeltaSolverTest, TestLeastSquaresUpdateWithEverythingAcc...
function TYPED_TEST (line 1061) | TYPED_TEST(AdaDeltaSolverTest, TestSnapshot) {
function TYPED_TEST (line 1072) | TYPED_TEST(AdaDeltaSolverTest, TestSnapshotShare) {
class AdamSolverTest (line 1085) | class AdamSolverTest : public GradientBasedSolverTest<TypeParam> {
method InitSolver (line 1089) | virtual void InitSolver(const SolverParameter& param) {
function TYPED_TEST (line 1101) | TYPED_TEST(AdamSolverTest, TestAdamLeastSquaresUpdate) {
function TYPED_TEST (line 1109) | TYPED_TEST(AdamSolverTest, TestAdamLeastSquaresUpdateWithWeightDecay) {
function TYPED_TEST (line 1117) | TYPED_TEST(AdamSolverTest, TestAdamLeastSquaresUpdateWithEverything) {
function TYPED_TEST (line 1128) | TYPED_TEST(AdamSolverTest, TestAdamLeastSquaresUpdateWithEverythingSha...
function TYPED_TEST (line 1140) | TYPED_TEST(AdamSolverTest, TestLeastSquaresUpdateWithEverythingAccum) {
function TYPED_TEST (line 1151) | TYPED_TEST(AdamSolverTest, TestLeastSquaresUpdateWithEverythingAccumSh...
function TYPED_TEST (line 1163) | TYPED_TEST(AdamSolverTest, TestSnapshot) {
function TYPED_TEST (line 1174) | TYPED_TEST(AdamSolverTest, TestSnapshotShare) {
class RMSPropSolverTest (line 1187) | class RMSPropSolverTest : public GradientBasedSolverTest<TypeParam> {
method InitSolver (line 1191) | virtual void InitSolver(const SolverParameter& param) {
function TYPED_TEST (line 1201) | TYPED_TEST(RMSPropSolverTest, TestRMSPropLeastSquaresUpdateWithWeightD...
function TYPED_TEST (line 1208) | TYPED_TEST(RMSPropSolverTest, TestRMSPropLeastSquaresUpdateWithRmsDeca...
function TYPED_TEST (line 1219) | TYPED_TEST(RMSPropSolverTest, TestRMSPropLeastSquaresUpdateWithEveryth...
function TYPED_TEST (line 1230) | TYPED_TEST(RMSPropSolverTest,
function TYPED_TEST (line 1243) | TYPED_TEST(RMSPropSolverTest, TestLeastSquaresUpdateWithEverythingAccu...
function TYPED_TEST (line 1254) | TYPED_TEST(RMSPropSolverTest, TestLeastSquaresUpdateWithEverythingAccu...
function TYPED_TEST (line 1266) | TYPED_TEST(RMSPropSolverTest, TestSnapshot) {
function TYPED_TEST (line 1277) | TYPED_TEST(RMSPropSolverTest, TestSnapshotShare) {
FILE: caffe/src/caffe/test/test_hdf5_output_layer.cpp
type caffe (line 15) | namespace caffe {
class HDF5OutputLayerTest (line 18) | class HDF5OutputLayerTest : public MultiDeviceTest<TypeParam> {
method HDF5OutputLayerTest (line 22) | HDF5OutputLayerTest()
function TYPED_TEST (line 72) | TYPED_TEST(HDF5OutputLayerTest, TestForward) {
FILE: caffe/src/caffe/test/test_hdf5data_layer.cpp
type caffe (line 15) | namespace caffe {
class HDF5DataLayerTest (line 18) | class HDF5DataLayerTest : public MultiDeviceTest<TypeParam> {
method HDF5DataLayerTest (line 22) | HDF5DataLayerTest()
method SetUp (line 27) | virtual void SetUp() {
function TYPED_TEST (line 54) | TYPED_TEST(HDF5DataLayerTest, TestRead) {
function TYPED_TEST (line 135) | TYPED_TEST(HDF5DataLayerTest, TestSkip) {
FILE: caffe/src/caffe/test/test_hinge_loss_layer.cpp
type caffe (line 14) | namespace caffe {
class HingeLossLayerTest (line 17) | class HingeLossLayerTest : public MultiDeviceTest<TypeParam> {
method HingeLossLayerTest (line 21) | HingeLossLayerTest()
function TYPED_TEST (line 53) | TYPED_TEST(HingeLossLayerTest, TestGradientL1) {
function TYPED_TEST (line 62) | TYPED_TEST(HingeLossLayerTest, TestGradientL2) {
FILE: caffe/src/caffe/test/test_im2col_layer.cpp
type caffe (line 13) | namespace caffe {
class Im2colLayerTest (line 16) | class Im2colLayerTest : public MultiDeviceTest<TypeParam> {
method Im2colLayerTest (line 19) | Im2colLayerTest()
function TYPED_TEST (line 39) | TYPED_TEST(Im2colLayerTest, TestSetup) {
function TYPED_TEST (line 61) | TYPED_TEST(Im2colLayerTest, TestForward) {
function TYPED_TEST (line 78) | TYPED_TEST(Im2colLayerTest, TestGradient) {
function TYPED_TEST (line 91) | TYPED_TEST(Im2colLayerTest, TestDilatedGradient) {
function TYPED_TEST (line 111) | TYPED_TEST(Im2colLayerTest, TestGradientForceND) {
function TYPED_TEST (line 125) | TYPED_TEST(Im2colLayerTest, TestDilatedGradientForceND) {
function TYPED_TEST (line 146) | TYPED_TEST(Im2colLayerTest, TestRect) {
function TYPED_TEST (line 164) | TYPED_TEST(Im2colLayerTest, TestRectGradient) {
FILE: caffe/src/caffe/test/test_image_data_layer.cpp
type caffe (line 17) | namespace caffe {
class ImageDataLayerTest (line 20) | class ImageDataLayerTest : public MultiDeviceTest<TypeParam> {
method ImageDataLayerTest (line 24) | ImageDataLayerTest()
method SetUp (line 28) | virtual void SetUp() {
function TYPED_TEST (line 74) | TYPED_TEST(ImageDataLayerTest, TestRead) {
function TYPED_TEST (line 100) | TYPED_TEST(ImageDataLayerTest, TestResize) {
function TYPED_TEST (line 128) | TYPED_TEST(ImageDataLayerTest, TestReshape) {
function TYPED_TEST (line 155) | TYPED_TEST(ImageDataLayerTest, TestShuffle) {
function TYPED_TEST (line 189) | TYPED_TEST(ImageDataLayerTest, TestSpace) {
FILE: caffe/src/caffe/test/test_infogain_loss_layer.cpp
type caffe (line 14) | namespace caffe {
class InfogainLossLayerTest (line 17) | class InfogainLossLayerTest : public MultiDeviceTest<TypeParam> {
method InfogainLossLayerTest (line 21) | InfogainLossLayerTest()
function TYPED_TEST (line 67) | TYPED_TEST(InfogainLossLayerTest, TestInfogainLoss) {
function TYPED_TEST (line 126) | TYPED_TEST(InfogainLossLayerTest, TestGradient) {
FILE: caffe/src/caffe/test/test_inner_product_layer.cpp
type caffe (line 13) | namespace caffe {
class InnerProductLayerTest (line 20) | class InnerProductLayerTest : public MultiDeviceTest<TypeParam> {
method InnerProductLayerTest (line 23) | InnerProductLayerTest()
function TYPED_TEST (line 47) | TYPED_TEST(InnerProductLayerTest, TestSetUp) {
function TYPED_TEST (line 65) | TYPED_TEST(InnerProductLayerTest, TestSetUpTransposeFalse) {
function TYPED_TEST (line 87) | TYPED_TEST(InnerProductLayerTest, TestSetUpTransposeTrue) {
function TYPED_TEST (line 107) | TYPED_TEST(InnerProductLayerTest, TestForward) {
function TYPED_TEST (line 145) | TYPED_TEST(InnerProductLayerTest, TestForwardTranspose) {
function TYPED_TEST (line 211) | TYPED_TEST(InnerProductLayerTest, TestForwardNoBatch) {
function TYPED_TEST (line 242) | TYPED_TEST(InnerProductLayerTest, TestGradient) {
function TYPED_TEST (line 268) | TYPED_TEST(InnerProductLayerTest, TestGradientTranspose) {
function TYPED_TEST (line 295) | TYPED_TEST(InnerProductLayerTest, TestBackwardTranspose) {
FILE: caffe/src/caffe/test/test_internal_thread.cpp
type caffe (line 9) | namespace caffe {
class InternalThreadTest (line 12) | class InternalThreadTest : public ::testing::Test {}
function TEST_F (line 14) | TEST_F(InternalThreadTest, TestStartAndExit) {
class TestThreadA (line 23) | class TestThreadA : public InternalThread {
method InternalThreadEntry (line 24) | void InternalThreadEntry() {
class TestThreadB (line 29) | class TestThreadB : public InternalThread {
method InternalThreadEntry (line 30) | void InternalThreadEntry() {
function TEST_F (line 35) | TEST_F(InternalThreadTest, TestRandomSeed) {
FILE: caffe/src/caffe/test/test_io.cpp
type caffe (line 16) | namespace caffe {
class IOTest (line 18) | class IOTest : public ::testing::Test {}
function ReadImageToDatumReference (line 20) | bool ReadImageToDatumReference(const string& filename, const int label,
function TEST_F (line 65) | TEST_F(IOTest, TestReadImageToDatum) {
function TEST_F (line 74) | TEST_F(IOTest, TestReadImageToDatumReference) {
function TEST_F (line 93) | TEST_F(IOTest, TestReadImageToDatumReferenceResized) {
function TEST_F (line 111) | TEST_F(IOTest, TestReadImageToDatumContent) {
function TEST_F (line 132) | TEST_F(IOTest, TestReadImageToDatumContentGray) {
function TEST_F (line 151) | TEST_F(IOTest, TestReadImageToDatumResized) {
function TEST_F (line 161) | TEST_F(IOTest, TestReadImageToDatumResizedSquare) {
function TEST_F (line 170) | TEST_F(IOTest, TestReadImageToDatumGray) {
function TEST_F (line 180) | TEST_F(IOTest, TestReadImageToDatumResizedGray) {
function TEST_F (line 190) | TEST_F(IOTest, TestReadImageToCVMat) {
function TEST_F (line 198) | TEST_F(IOTest, TestReadImageToCVMatResized) {
function TEST_F (line 206) | TEST_F(IOTest, TestReadImageToCVMatResizedSquare) {
function TEST_F (line 214) | TEST_F(IOTest, TestReadImageToCVMatGray) {
function TEST_F (line 223) | TEST_F(IOTest, TestReadImageToCVMatResizedGray) {
function TEST_F (line 232) | TEST_F(IOTest, TestCVMatToDatum) {
function TEST_F (line 242) | TEST_F(IOTest, TestCVMatToDatumContent) {
function TEST_F (line 261) | TEST_F(IOTest, TestCVMatToDatumReference) {
function TEST_F (line 280) | TEST_F(IOTest, TestReadFileToDatum) {
function TEST_F (line 289) | TEST_F(IOTest, TestDecodeDatum) {
function TEST_F (line 309) | TEST_F(IOTest, TestDecodeDatumToCVMat) {
function TEST_F (line 323) | TEST_F(IOTest, TestDecodeDatumToCVMatContent) {
function TEST_F (line 343) | TEST_F(IOTest, TestDecodeDatumNative) {
function TEST_F (line 363) | TEST_F(IOTest, TestDecodeDatumToCVMatNative) {
function TEST_F (line 373) | TEST_F(IOTest, TestDecodeDatumNativeGray) {
function TEST_F (line 393) | TEST_F(IOTest, TestDecodeDatumToCVMatNativeGray) {
function TEST_F (line 403) | TEST_F(IOTest, TestDecodeDatumToCVMatContentNative) {
FILE: caffe/src/caffe/test/test_layer_factory.cpp
type caffe (line 15) | namespace caffe {
class LayerFactoryTest (line 18) | class LayerFactoryTest : public MultiDeviceTest<TypeParam> {}
function TYPED_TEST (line 22) | TYPED_TEST(LayerFactoryTest, TestCreateLayer) {
FILE: caffe/src/caffe/test/test_lrn_layer.cpp
type caffe (line 22) | namespace caffe {
class LRNLayerTest (line 25) | class LRNLayerTest : public MultiDeviceTest<TypeParam> {
method LRNLayerTest (line 29) | LRNLayerTest()
method SetUp (line 33) | virtual void SetUp() {
function TYPED_TEST (line 119) | TYPED_TEST(LRNLayerTest, TestSetupAcrossChannels) {
function TYPED_TEST (line 130) | TYPED_TEST(LRNLayerTest, TestForwardAcrossChannels) {
function TYPED_TEST (line 145) | TYPED_TEST(LRNLayerTest, TestForwardAcrossChannelsLargeRegion) {
function TYPED_TEST (line 161) | TYPED_TEST(LRNLayerTest, TestGradientAcrossChannels) {
function TYPED_TEST (line 182) | TYPED_TEST(LRNLayerTest, TestGradientAcrossChannelsLargeRegion) {
function TYPED_TEST (line 204) | TYPED_TEST(LRNLayerTest, TestSetupWithinChannel) {
function TYPED_TEST (line 218) | TYPED_TEST(LRNLayerTest, TestForwardWithinChannel) {
function TYPED_TEST (line 236) | TYPED_TEST(LRNLayerTest, TestGradientWithinChannel) {
class CuDNNLRNLayerTest (line 255) | class CuDNNLRNLayerTest : public GPUDeviceTest<Dtype> {
method CuDNNLRNLayerTest (line 257) | CuDNNLRNLayerTest()
method SetUp (line 261) | virtual void SetUp() {
function TYPED_TEST (line 347) | TYPED_TEST(CuDNNLRNLayerTest, TestForwardAcrossChannelsCuDNN) {
function TYPED_TEST (line 362) | TYPED_TEST(CuDNNLRNLayerTest, TestForwardAcrossChannelsLargeRegionCuDN...
function TYPED_TEST (line 378) | TYPED_TEST(CuDNNLRNLayerTest, TestGradientAcrossChannelsCuDNN) {
function TYPED_TEST (line 395) | TYPED_TEST(CuDNNLRNLayerTest, TestForwardWithinChannel) {
function TYPED_TEST (line 413) | TYPED_TEST(CuDNNLRNLayerTest, TestGradientWithinChannel) {
function TYPED_TEST (line 430) | TYPED_TEST(CuDNNLRNLayerTest, TestGradientAcrossChannelsLargeRegionCuD...
FILE: caffe/src/caffe/test/test_lstm_new_layer.cpp
type caffe (line 14) | namespace caffe {
class LSTMNewLayerTest (line 17) | class LSTMNewLayerTest : public MultiDeviceTest<TypeParam> {
method LSTMNewLayerTest (line 21) | LSTMNewLayerTest() : num_output_(7) {
method ReshapeBlobs (line 46) | void ReshapeBlobs(int num_timesteps, int num_instances) {
function TYPED_TEST (line 92) | TYPED_TEST(LSTMNewLayerTest, TestSetUp) {
function TYPED_TEST (line 102) | TYPED_TEST(LSTMNewLayerTest, TestForward) {
function TYPED_TEST (line 188) | TYPED_TEST(LSTMNewLayerTest, TestLSTMUnitSetUp) {
function TYPED_TEST (line 204) | TYPED_TEST(LSTMNewLayerTest, TestLSTMUnitGradient) {
function TYPED_TEST (line 219) | TYPED_TEST(LSTMNewLayerTest, TestLSTMUnitGradientNonZeroCont) {
function TYPED_TEST (line 234) | TYPED_TEST(LSTMNewLayerTest, TestGradient) {
function TYPED_TEST (line 242) | TYPED_TEST(LSTMNewLayerTest, TestGradientNonZeroCont) {
function TYPED_TEST (line 253) | TYPED_TEST(LSTMNewLayerTest, TestGradientNonZeroContBufferSize2) {
function TYPED_TEST (line 268) | TYPED_TEST(LSTMNewLayerTest, TestGradientNonZeroContBufferSize2WithSta...
FILE: caffe/src/caffe/test/test_math_functions.cpp
type caffe (line 14) | namespace caffe {
class MathFunctionsTest (line 17) | class MathFunctionsTest : public MultiDeviceTest<TypeParam> {
method MathFunctionsTest (line 21) | MathFunctionsTest()
method SetUp (line 26) | virtual void SetUp() {
class CPUMathFunctionsTest (line 47) | class CPUMathFunctionsTest
function TYPED_TEST (line 53) | TYPED_TEST(CPUMathFunctionsTest, TestNothing) {
function TYPED_TEST (line 58) | TYPED_TEST(CPUMathFunctionsTest, TestAsum) {
function TYPED_TEST (line 69) | TYPED_TEST(CPUMathFunctionsTest, TestSign) {
function TYPED_TEST (line 79) | TYPED_TEST(CPUMathFunctionsTest, TestSgnbit) {
function TYPED_TEST (line 89) | TYPED_TEST(CPUMathFunctionsTest, TestFabs) {
function TYPED_TEST (line 99) | TYPED_TEST(CPUMathFunctionsTest, TestScale) {
function TYPED_TEST (line 112) | TYPED_TEST(CPUMathFunctionsTest, TestCopy) {
class GPUMathFunctionsTest (line 125) | class GPUMathFunctionsTest : public MathFunctionsTest<GPUDevice<Dtype>...
function TYPED_TEST (line 130) | TYPED_TEST(GPUMathFunctionsTest, TestAsum) {
function TYPED_TEST (line 142) | TYPED_TEST(GPUMathFunctionsTest, TestSign) {
function TYPED_TEST (line 153) | TYPED_TEST(GPUMathFunctionsTest, TestSgnbit) {
function TYPED_TEST (line 164) | TYPED_TEST(GPUMathFunctionsTest, TestFabs) {
function TYPED_TEST (line 175) | TYPED_TEST(GPUMathFunctionsTest, TestScale) {
function TYPED_TEST (line 188) | TYPED_TEST(GPUMathFunctionsTest, TestCopy) {
FILE: caffe/src/caffe/test/test_maxpool_dropout_layers.cpp
type caffe (line 14) | namespace caffe {
class MaxPoolingDropoutTest (line 17) | class MaxPoolingDropoutTest : public MultiDeviceTest<TypeParam> {
method MaxPoolingDropoutTest (line 20) | MaxPoolingDropoutTest()
method SetUp (line 23) | virtual void SetUp() {
function TYPED_TEST (line 43) | TYPED_TEST(MaxPoolingDropoutTest, TestSetup) {
function TYPED_TEST (line 60) | TYPED_TEST(MaxPoolingDropoutTest, TestForward) {
function TYPED_TEST (line 89) | TYPED_TEST(MaxPoolingDropoutTest, TestBackward) {
FILE: caffe/src/caffe/test/test_memory_data_layer.cpp
type caffe (line 13) | namespace caffe {
class MemoryDataLayerTest (line 16) | class MemoryDataLayerTest : public MultiDeviceTest<TypeParam> {
method MemoryDataLayerTest (line 20) | MemoryDataLayerTest()
method SetUp (line 25) | virtual void SetUp() {
function TYPED_TEST (line 66) | TYPED_TEST(MemoryDataLayerTest, TestSetup) {
function TYPED_TEST (line 89) | TYPED_TEST(MemoryDataLayerTest, TestForward) {
function TYPED_TEST (line 119) | TYPED_TEST(MemoryDataLayerTest, AddDatumVectorDefaultTransform) {
function TYPED_TEST (line 173) | TYPED_TEST(MemoryDataLayerTest, AddMatVectorDefaultTransform) {
function TYPED_TEST (line 219) | TYPED_TEST(MemoryDataLayerTest, TestSetBatchSize) {
FILE: caffe/src/caffe/test/test_multinomial_logistic_loss_layer.cpp
type caffe (line 13) | namespace caffe {
class MultinomialLogisticLossLayerTest (line 16) | class MultinomialLogisticLossLayerTest : public CPUDeviceTest<Dtype> {
method MultinomialLogisticLossLayerTest (line 18) | MultinomialLogisticLossLayerTest()
function TYPED_TEST (line 49) | TYPED_TEST(MultinomialLogisticLossLayerTest, TestGradientCPU) {
FILE: caffe/src/caffe/test/test_mvn_layer.cpp
type caffe (line 13) | namespace caffe {
class MVNLayerTest (line 16) | class MVNLayerTest : public MultiDeviceTest<TypeParam> {
method MVNLayerTest (line 19) | MVNLayerTest()
function TYPED_TEST (line 38) | TYPED_TEST(MVNLayerTest, TestForward) {
function TYPED_TEST (line 72) | TYPED_TEST(MVNLayerTest, TestForwardMeanOnly) {
function TYPED_TEST (line 105) | TYPED_TEST(MVNLayerTest, TestForwardAcrossChannels) {
function TYPED_TEST (line 141) | TYPED_TEST(MVNLayerTest, TestGradient) {
function TYPED_TEST (line 150) | TYPED_TEST(MVNLayerTest, TestGradientMeanOnly) {
function TYPED_TEST (line 161) | TYPED_TEST(MVNLayerTest, TestGradientAcrossChannels) {
FILE: caffe/src/caffe/test/test_net.cpp
type caffe (line 18) | namespace caffe {
class NetTest (line 21) | class NetTest : public MultiDeviceTest<TypeParam> {
method NetTest (line 25) | NetTest() : seed_(1701) {}
method InitNetFromProtoString (line 27) | virtual void InitNetFromProtoString(const string& proto) {
method InitNetFromProtoFileWithState (line 33) | virtual void InitNetFromProtoFileWithState(const string& proto,
method CopyNetBlobs (line 44) | virtual void CopyNetBlobs(const bool copy_diff,
method CopyNetParams (line 57) | virtual void CopyNetParams(const bool copy_diff,
method InitTinyNet (line 70) | virtual void InitTinyNet(const bool force_backward = false,
method InitTinyNetEuclidean (line 147) | virtual void InitTinyNetEuclidean(const bool force_backward = false) {
method InitTrickyNet (line 207) | virtual void InitTrickyNet(Dtype* loss_weight = NULL) {
method InitUnsharedWeightsNet (line 298) | virtual void InitUnsharedWeightsNet(const Dtype* loss_weight = NULL,
method InitSharedWeightsNet (line 386) | virtual void InitSharedWeightsNet() {
method InitDiffDataUnsharedWeightsNet (line 443) | virtual void InitDiffDataUnsharedWeightsNet() {
method InitDiffDataSharedWeightsNet (line 505) | virtual void InitDiffDataSharedWeightsNet() {
method InitReshapableNet (line 567) | virtual void InitReshapableNet() {
method InitSkipPropNet (line 632) | virtual void InitSkipPropNet(bool test_skip_true) {
method InitForcePropNet (line 731) | virtual void InitForcePropNet(bool test_force_true) {
method InitAllInOneNet (line 786) | virtual void InitAllInOneNet(Phase phase = caffe::TRAIN,
function TYPED_TEST (line 848) | TYPED_TEST(NetTest, TestHasBlob) {
function TYPED_TEST (line 857) | TYPED_TEST(NetTest, TestGetBlob) {
function TYPED_TEST (line 866) | TYPED_TEST(NetTest, TestHasLayer) {
function TYPED_TEST (line 874) | TYPED_TEST(NetTest, TestGetLayerByName) {
function TYPED_TEST (line 882) | TYPED_TEST(NetTest, TestBottomNeedBackward) {
function TYPED_TEST (line 895) | TYPED_TEST(NetTest, TestBottomNeedBackwardForce) {
function TYPED_TEST (line 909) | TYPED_TEST(NetTest, TestBottomNeedBackwardEuclideanForce) {
function TYPED_TEST (line 923) | TYPED_TEST(NetTest, TestBottomNeedBackwardTricky) {
function TYPED_TEST (line 941) | TYPED_TEST(NetTest, TestLossWeight) {
function TYPED_TEST (line 992) | TYPED_TEST(NetTest, TestLossWeightMidNet) {
function TYPED_TEST (line 1029) | TYPED_TEST(NetTest, TestComboLossWeight) {
function TYPED_TEST (line 1158) | TYPED_TEST(NetTest, TestBackwardWithAccuracyLayer) {
function TYPED_TEST (line 1167) | TYPED_TEST(NetTest, TestUnsharedWeightsDataNet) {
function TYPED_TEST (line 1175) | TYPED_TEST(NetTest, TestSharedWeightsDataNet) {
function TYPED_TEST (line 1183) | TYPED_TEST(NetTest, TestUnsharedWeightsDiffNet) {
function TYPED_TEST (line 1200) | TYPED_TEST(NetTest, TestSharedWeightsDiffNet) {
function TYPED_TEST (line 1219) | TYPED_TEST(NetTest, TestSharedWeightsUpdate) {
function TYPED_TEST (line 1300) | TYPED_TEST(NetTest, TestSharedWeightsResume) {
function TYPED_TEST (line 1345) | TYPED_TEST(NetTest, TestParamPropagateDown) {
function TYPED_TEST (line 1426) | TYPED_TEST(NetTest, TestFromTo) {
class FilterNetTest (line 1460) | class FilterNetTest : public ::testing::Test {
method RunFilterNetTest (line 1462) | void RunFilterNetTest(
function TEST_F (line 1482) | TEST_F(FilterNetTest, TestNoFilter) {
function TEST_F (line 1506) | TEST_F(FilterNetTest, TestFilterLeNetTrainTest) {
function TEST_F (line 1748) | TEST_F(FilterNetTest, TestFilterOutByStage) {
function TEST_F (line 1787) | TEST_F(FilterNetTest, TestFilterOutByStage2) {
function TEST_F (line 1826) | TEST_F(FilterNetTest, TestFilterInByStage) {
function TEST_F (line 1852) | TEST_F(FilterNetTest, TestFilterInByStage2) {
function TEST_F (line 1877) | TEST_F(FilterNetTest, TestFilterOutByMultipleStage) {
function TEST_F (line 1920) | TEST_F(FilterNetTest, TestFilterInByMultipleStage) {
function TEST_F (line 1948) | TEST_F(FilterNetTest, TestFilterInByMultipleStage2) {
function TEST_F (line 1975) | TEST_F(FilterNetTest, TestFilterInByNotStage) {
function TEST_F (line 2002) | TEST_F(FilterNetTest, TestFilterOutByNotStage) {
function TEST_F (line 2038) | TEST_F(FilterNetTest, TestFilterOutByMinLevel) {
function TEST_F (line 2077) | TEST_F(FilterNetTest, TestFilterOutByMaxLevel) {
function TEST_F (line 2116) | TEST_F(FilterNetTest, TestFilterInByMinLevel) {
function TEST_F (line 2141) | TEST_F(FilterNetTest, TestFilterInByMinLevel2) {
function TEST_F (line 2167) | TEST_F(FilterNetTest, TestFilterInByMaxLevel) {
function TEST_F (line 2192) | TEST_F(FilterNetTest, TestFilterInByMaxLevel2) {
function TEST_F (line 2218) | TEST_F(FilterNetTest, TestFilterInOutByIncludeMultiRule) {
function TEST_F (line 2281) | TEST_F(FilterNetTest, TestFilterInByIncludeMultiRule) {
function TEST_F (line 2314) | TEST_F(FilterNetTest, TestFilterInOutByExcludeMultiRule) {
function TYPED_TEST (line 2377) | TYPED_TEST(NetTest, TestReshape) {
function TYPED_TEST (line 2449) | TYPED_TEST(NetTest, TestSkipPropagateDown) {
function TYPED_TEST (line 2497) | TYPED_TEST(NetTest, TestForcePropagateDown) {
function TYPED_TEST (line 2544) | TYPED_TEST(NetTest, TestAllInOneNetTrain) {
function TYPED_TEST (line 2565) | TYPED_TEST(NetTest, TestAllInOneNetVal) {
function TYPED_TEST (line 2586) | TYPED_TEST(NetTest, TestAllInOneNetDeploy) {
FILE: caffe/src/caffe/test/test_neuron_layer.cpp
type caffe (line 34) | namespace caffe {
class NeuronLayerTest (line 37) | class NeuronLayerTest : public MultiDeviceTest<TypeParam> {
method NeuronLayerTest (line 41) | NeuronLayerTest()
method TestDropoutForward (line 58) | void TestDropoutForward(const float dropout_ratio) {
method TestExpForward (line 90) | void TestExpForward(const float base, const float scale, const float...
method TestExpGradient (line 112) | void TestExpGradient(const float base, const float scale, const floa...
method TestPReLU (line 122) | void TestPReLU(PReLULayer<Dtype> *layer) {
method LogBottomInit (line 139) | void LogBottomInit() {
method TestLogForward (line 147) | void TestLogForward(const float base, const float scale, const float...
method TestLogGradient (line 171) | void TestLogGradient(const float base, const float scale, const floa...
function TYPED_TEST (line 185) | TYPED_TEST(NeuronLayerTest, TestAbsVal) {
function TYPED_TEST (line 199) | TYPED_TEST(NeuronLayerTest, TestAbsGradient) {
function TYPED_TEST (line 208) | TYPED_TEST(NeuronLayerTest, TestReLU) {
function TYPED_TEST (line 223) | TYPED_TEST(NeuronLayerTest, TestReLUGradient) {
function TYPED_TEST (line 232) | TYPED_TEST(NeuronLayerTest, TestReLUWithNegativeSlope) {
function TYPED_TEST (line 252) | TYPED_TEST(NeuronLayerTest, TestReLUGradientWithNegativeSlope) {
function TYPED_TEST (line 263) | TYPED_TEST(NeuronLayerTest, TestELU) {
function TYPED_TEST (line 284) | TYPED_TEST(NeuronLayerTest, TestELUasReLU) {
function TYPED_TEST (line 301) | TYPED_TEST(NeuronLayerTest, TestELUGradient) {
function TYPED_TEST (line 310) | TYPED_TEST(NeuronLayerTest, TestELUasReLUGradient) {
function TYPED_TEST (line 321) | TYPED_TEST(NeuronLayerTest, TestSigmoid) {
function TYPED_TEST (line 338) | TYPED_TEST(NeuronLayerTest, TestSigmoidGradient) {
function TYPED_TEST (line 347) | TYPED_TEST(NeuronLayerTest, TestTanH) {
function TYPED_TEST (line 370) | TYPED_TEST(NeuronLayerTest, TestTanHGradient) {
function TYPED_TEST (line 379) | TYPED_TEST(NeuronLayerTest, TestExpLayer) {
function TYPED_TEST (line 388) | TYPED_TEST(NeuronLayerTest, TestExpGradient) {
function TYPED_TEST (line 397) | TYPED_TEST(NeuronLayerTest, TestExpLayerWithShift) {
function TYPED_TEST (line 407) | TYPED_TEST(NeuronLayerTest, TestExpGradientWithShift) {
function TYPED_TEST (line 417) | TYPED_TEST(NeuronLayerTest, TestExpLayerBase2) {
function TYPED_TEST (line 425) | TYPED_TEST(NeuronLayerTest, TestExpGradientBase2) {
function TYPED_TEST (line 433) | TYPED_TEST(NeuronLayerTest, TestExpLayerBase2Shift1) {
function TYPED_TEST (line 441) | TYPED_TEST(NeuronLayerTest, TestExpGradientBase2Shift1) {
function TYPED_TEST (line 449) | TYPED_TEST(NeuronLayerTest, TestExpLayerBase2Scale3) {
function TYPED_TEST (line 457) | TYPED_TEST(NeuronLayerTest, TestExpGradientBase2Scale3) {
function TYPED_TEST (line 465) | TYPED_TEST(NeuronLayerTest, TestExpLayerBase2Shift1Scale3) {
function TYPED_TEST (line 473) | TYPED_TEST(NeuronLayerTest, TestExpGradientBase2Shift1Scale3) {
function TYPED_TEST (line 481) | TYPED_TEST(NeuronLayerTest, TestLogLayer) {
function TYPED_TEST (line 490) | TYPED_TEST(NeuronLayerTest, TestLogGradient) {
function TYPED_TEST (line 499) | TYPED_TEST(NeuronLayerTest, TestLogLayerBase2) {
function TYPED_TEST (line 507) | TYPED_TEST(NeuronLayerTest, TestLogGradientBase2) {
function TYPED_TEST (line 515) | TYPED_TEST(NeuronLayerTest, TestLogLayerBase2Shift1) {
function TYPED_TEST (line 523) | TYPED_TEST(NeuronLayerTest, TestLogGradientBase2Shift1) {
function TYPED_TEST (line 531) | TYPED_TEST(NeuronLayerTest, TestLogLayerBase2Scale3) {
function TYPED_TEST (line 539) | TYPED_TEST(NeuronLayerTest, TestLogGradientBase2Scale3) {
function TYPED_TEST (line 547) | TYPED_TEST(NeuronLayerTest, TestLogLayerBase2Shift1Scale3) {
function TYPED_TEST (line 555) | TYPED_TEST(NeuronLayerTest, TestLogGradientBase2Shift1Scale3) {
function TYPED_TEST (line 563) | TYPED_TEST(NeuronLayerTest, TestDropoutHalf) {
function TYPED_TEST (line 568) | TYPED_TEST(NeuronLayerTest, TestDropoutThreeQuarters) {
function TYPED_TEST (line 573) | TYPED_TEST(NeuronLayerTest, TestDropoutTestPhase) {
function TYPED_TEST (line 590) | TYPED_TEST(NeuronLayerTest, TestDropoutGradient) {
function TYPED_TEST (line 600) | TYPED_TEST(NeuronLayerTest, TestDropoutGradientTest) {
function TYPED_TEST (line 610) | TYPED_TEST(NeuronLayerTest, TestBNLL) {
function TYPED_TEST (line 625) | TYPED_TEST(NeuronLayerTest, TestBNLLGradient) {
function TYPED_TEST (line 634) | TYPED_TEST(NeuronLayerTest, TestPReLUParam) {
function TYPED_TEST (line 646) | TYPED_TEST(NeuronLayerTest, TestPReLUForward) {
function TYPED_TEST (line 657) | TYPED_TEST(NeuronLayerTest, TestPReLUForwardChannelShared) {
function TYPED_TEST (line 666) | TYPED_TEST(NeuronLayerTest, TestPReLUGradient) {
function TYPED_TEST (line 679) | TYPED_TEST(NeuronLayerTest, TestPReLUGradientChannelShared) {
function TYPED_TEST (line 690) | TYPED_TEST(NeuronLayerTest, TestPReLUConsistencyReLU) {
function TYPED_TEST (line 733) | TYPED_TEST(NeuronLayerTest, TestPReLUInPlace) {
class CuDNNNeuronLayerTest (line 813) | class CuDNNNeuronLayerTest : public GPUDeviceTest<Dtype> {
method CuDNNNeuronLayerTest (line 815) | CuDNNNeuronLayerTest()
function TYPED_TEST (line 835) | TYPED_TEST(CuDNNNeuronLayerTest, TestReLUCuDNN) {
function TYPED_TEST (line 849) | TYPED_TEST(CuDNNNeuronLayerTest, TestReLUGradientCuDNN) {
function TYPED_TEST (line 857) | TYPED_TEST(CuDNNNeuronLayerTest, TestReLUWithNegativeSlopeCuDNN) {
function TYPED_TEST (line 876) | TYPED_TEST(CuDNNNeuronLayerTest, TestReLUGradientWithNegativeSlopeCuDN...
function TYPED_TEST (line 886) | TYPED_TEST(CuDNNNeuronLayerTest, TestSigmoidCuDNN) {
function TYPED_TEST (line 902) | TYPED_TEST(CuDNNNeuronLayerTest, TestSigmoidGradientCuDNN) {
function TYPED_TEST (line 910) | TYPED_TEST(CuDNNNeuronLayerTest, TestTanHCuDNN) {
function TYPED_TEST (line 932) | TYPED_TEST(CuDNNNeuronLayerTest, TestTanHGradientCuDNN) {
FILE: caffe/src/caffe/test/test_platform.cpp
type caffe (line 11) | namespace caffe {
class PlatformTest (line 15) | class PlatformTest : public ::testing::Test {}
function TEST_F (line 17) | TEST_F(PlatformTest, TestInitialization) {
FILE: caffe/src/caffe/test/test_point_bilinear_layer.cpp
type caffe (line 17) | namespace caffe {
class PointBilinearLayerTest (line 20) | class PointBilinearLayerTest : public MultiDeviceTest<TypeParam> {
method PointBilinearLayerTest (line 24) | PointBilinearLayerTest()
function TYPED_TEST (line 64) | TYPED_TEST(PointBilinearLayerTest, TestGradient) {
FILE: caffe/src/caffe/test/test_pooling_layer.cpp
type caffe (line 17) | namespace caffe {
class PoolingLayerTest (line 20) | class PoolingLayerTest : public MultiDeviceTest<TypeParam> {
method PoolingLayerTest (line 24) | PoolingLayerTest()
method SetUp (line 28) | virtual void SetUp() {
method TestForwardSquare (line 49) | void TestForwardSquare() {
method TestForwardRectHigh (line 121) | void TestForwardRectHigh() {
method TestForwardRectWide (line 246) | void TestForwardRectWide() {
function TYPED_TEST (line 376) | TYPED_TEST(PoolingLayerTest, TestSetup) {
function TYPED_TEST (line 390) | TYPED_TEST(PoolingLayerTest, TestSetupPadded) {
function TYPED_TEST (line 406) | TYPED_TEST(PoolingLayerTest, TestSetupGlobalPooling) {
function TYPED_TEST (line 446) | TYPED_TEST(PoolingLayerTest, TestForwardMax) {
function TYPED_TEST (line 452) | TYPED_TEST(PoolingLayerTest, TestForwardMaxTopMask) {
function TYPED_TEST (line 459) | TYPED_TEST(PoolingLayerTest, TestGradientMax) {
function TYPED_TEST (line 478) | TYPED_TEST(PoolingLayerTest, TestForwardMaxPadded) {
function TYPED_TEST (line 523) | TYPED_TEST(PoolingLayerTest, TestGradientMaxTopMask) {
function TYPED_TEST (line 543) | TYPED_TEST(PoolingLayerTest, TestForwardAve) {
function TYPED_TEST (line 575) | TYPED_TEST(PoolingLayerTest, TestGradientAve) {
function TYPED_TEST (line 593) | TYPED_TEST(PoolingLayerTest, TestGradientAvePadded) {
class CuDNNPoolingLayerTest (line 614) | class CuDNNPoolingLayerTest : public GPUDeviceTest<Dtype> {
method CuDNNPoolingLayerTest (line 616) | CuDNNPoolingLayerTest()
method SetUp (line 620) | virtual void SetUp() {
method TestForwardSquare (line 641) | void TestForwardSquare() {
method TestForwardRectHigh (line 713) | void TestForwardRectHigh() {
method TestForwardRectWide (line 838) | void TestForwardRectWide() {
function TYPED_TEST (line 968) | TYPED_TEST(CuDNNPoolingLayerTest, TestSetupCuDNN) {
function TYPED_TEST (line 981) | TYPED_TEST(CuDNNPoolingLayerTest, TestSetupPaddedCuDNN) {
function TYPED_TEST (line 1022) | TYPED_TEST(CuDNNPoolingLayerTest, TestForwardMaxCuDNN) {
function TYPED_TEST (line 1039) | TYPED_TEST(CuDNNPoolingLayerTest, TestGradientMaxCuDNN) {
function TYPED_TEST (line 1058) | TYPED_TEST(CuDNNPoolingLayerTest, TestForwardMaxPaddedCuDNN) {
function TYPED_TEST (line 1123) | TYPED_TEST(CuDNNPoolingLayerTest, TestForwardAveCuDNN) {
function TYPED_TEST (line 1148) | TYPED_TEST(CuDNNPoolingLayerTest, TestGradientAveCuDNN) {
function TYPED_TEST (line 1165) | TYPED_TEST(CuDNNPoolingLayerTest, TestGradientAvePaddedCuDNN) {
FILE: caffe/src/caffe/test/test_power_layer.cpp
type caffe (line 14) | namespace caffe {
class PowerLayerTest (line 17) | class PowerLayerTest : public MultiDeviceTest<TypeParam> {
method PowerLayerTest (line 21) | PowerLayerTest()
method TestForward (line 34) | void TestForward(Dtype power, Dtype scale, Dtype shift) {
method TestBackward (line 61) | void TestBackward(Dtype power, Dtype scale, Dtype shift) {
function TYPED_TEST (line 90) | TYPED_TEST(PowerLayerTest, TestPower) {
function TYPED_TEST (line 98) | TYPED_TEST(PowerLayerTest, TestPowerGradient) {
function TYPED_TEST (line 106) | TYPED_TEST(PowerLayerTest, TestPowerGradientShiftZero) {
function TYPED_TEST (line 114) | TYPED_TEST(PowerLayerTest, TestPowerZero) {
function TYPED_TEST (line 122) | TYPED_TEST(PowerLayerTest, TestPowerZeroGradient) {
function TYPED_TEST (line 130) | TYPED_TEST(PowerLayerTest, TestPowerOne) {
function TYPED_TEST (line 138) | TYPED_TEST(PowerLayerTest, TestPowerOneGradient) {
function TYPED_TEST (line 146) | TYPED_TEST(PowerLayerTest, TestPowerTwo) {
function TYPED_TEST (line 154) | TYPED_TEST(PowerLayerTest, TestPowerTwoGradient) {
function TYPED_TEST (line 162) | TYPED_TEST(PowerLayerTest, TestPowerTwoScaleHalfGradient) {
FILE: caffe/src/caffe/test/test_protobuf.cpp
type caffe (line 12) | namespace caffe {
class ProtoTest (line 14) | class ProtoTest : public ::testing::Test {}
function TEST_F (line 16) | TEST_F(ProtoTest, TestSerialization) {
FILE: caffe/src/caffe/test/test_random_number_generator.cpp
type caffe (line 11) | namespace caffe {
class RandomNumberGeneratorTest (line 14) | class RandomNumberGeneratorTest : public ::testing::Test {
method RandomNumberGeneratorTest (line 16) | RandomNumberGeneratorTest()
method SetUp (line 25) | virtual void SetUp() {
method Dtype (line 29) | Dtype sample_mean(const Dtype* const seqs, const int sample_size) {
method Dtype (line 37) | Dtype sample_mean(const Dtype* const seqs) {
method Dtype (line 41) | Dtype sample_mean(const int* const seqs, const int sample_size) {
method Dtype (line 49) | Dtype sample_mean(const int* const seqs) {
method Dtype (line 53) | Dtype mean_bound(const Dtype std, const int sample_size) {
method Dtype (line 57) | Dtype mean_bound(const Dtype std) {
method RngGaussianFill (line 61) | void RngGaussianFill(const Dtype mu, const Dtype sigma, void* cpu_da...
method RngGaussianChecks (line 66) | void RngGaussianChecks(const Dtype mu, const Dtype sigma,
method RngUniformFill (line 104) | void RngUniformFill(const Dtype lower, const Dtype upper, void* cpu_...
method RngUniformChecks (line 110) | void RngUniformChecks(const Dtype lower, const Dtype upper,
method RngBernoulliFill (line 157) | void RngBernoulliFill(const Dtype p, void* cpu_data) {
method RngBernoulliChecks (line 162) | void RngBernoulliChecks(const Dtype p, const void* cpu_data) {
method RngGaussianFillGPU (line 173) | void RngGaussianFillGPU(const Dtype mu, const Dtype sigma, void* gpu...
method RngUniformFillGPU (line 178) | void RngUniformFillGPU(const Dtype lower, const Dtype upper, void* g...
method RngUniformIntFillGPU (line 186) | void RngUniformIntFillGPU(void* gpu_data) {
function TYPED_TEST (line 209) | TYPED_TEST(RandomNumberGeneratorTest, TestRngGaussian) {
function TYPED_TEST (line 218) | TYPED_TEST(RandomNumberGeneratorTest, TestRngGaussian2) {
function TYPED_TEST (line 227) | TYPED_TEST(RandomNumberGeneratorTest, TestRngUniform) {
function TYPED_TEST (line 236) | TYPED_TEST(RandomNumberGeneratorTest, TestRngUniform2) {
function TYPED_TEST (line 245) | TYPED_TEST(RandomNumberGeneratorTest, TestRngBernoulli) {
function TYPED_TEST (line 253) | TYPED_TEST(RandomNumberGeneratorTest, TestRngBernoulli2) {
function TYPED_TEST (line 261) | TYPED_TEST(RandomNumberGeneratorTest, TestRngGaussianTimesGaussian) {
function TYPED_TEST (line 287) | TYPED_TEST(RandomNumberGeneratorTest, TestRngUniformTimesUniform) {
function TYPED_TEST (line 315) | TYPED_TEST(RandomNumberGeneratorTest, TestRngGaussianTimesBernoulli) {
function TYPED_TEST (line 340) | TYPED_TEST(RandomNumberGeneratorTest, TestRngUniformTimesBernoulli) {
function TYPED_TEST (line 365) | TYPED_TEST(RandomNumberGeneratorTest, TestRngBernoulliTimesBernoulli) {
function TYPED_TEST (line 400) | TYPED_TEST(RandomNumberGeneratorTest, TestRngGaussianGPU) {
function TYPED_TEST (line 410) | TYPED_TEST(RandomNumberGeneratorTest, TestRngGaussian2GPU) {
function TYPED_TEST (line 420) | TYPED_TEST(RandomNumberGeneratorTest, TestRngUniformGPU) {
function TYPED_TEST (line 430) | TYPED_TEST(RandomNumberGeneratorTest, TestRngUniform2GPU) {
function TYPED_TEST (line 440) | TYPED_TEST(RandomNumberGeneratorTest, TestRngUniformIntGPU) {
function TYPED_TEST (line 457) | TYPED_TEST(RandomNumberGeneratorTest, TestRngGaussianTimesGaussianGPU) {
function TYPED_TEST (line 488) | TYPED_TEST(RandomNumberGeneratorTest, TestRngUniformTimesUniformGPU) {
FILE: caffe/src/caffe/test/test_reduction_layer.cpp
type caffe (line 13) | namespace caffe {
class ReductionLayerTest (line 16) | class ReductionLayerTest : public MultiDeviceTest<TypeParam> {
method ReductionLayerTest (line 20) | ReductionLayerTest()
method TestForward (line 36) | void TestForward(ReductionParameter_ReductionOp op,
method TestGradient (line 80) | void TestGradient(ReductionParameter_ReductionOp op,
function TYPED_TEST (line 102) | TYPED_TEST(ReductionLayerTest, TestSetUp) {
function TYPED_TEST (line 111) | TYPED_TEST(ReductionLayerTest, TestSetUpWithAxis1) {
function TYPED_TEST (line 122) | TYPED_TEST(ReductionLayerTest, TestSetUpWithAxis2) {
function TYPED_TEST (line 134) | TYPED_TEST(ReductionLayerTest, TestSum) {
function TYPED_TEST (line 139) | TYPED_TEST(ReductionLayerTest, TestSumCoeff) {
function TYPED_TEST (line 145) | TYPED_TEST(ReductionLayerTest, TestSumCoeffAxis1) {
function TYPED_TEST (line 152) | TYPED_TEST(ReductionLayerTest, TestSumGradient) {
function TYPED_TEST (line 157) | TYPED_TEST(ReductionLayerTest, TestSumCoeffGradient) {
function TYPED_TEST (line 163) | TYPED_TEST(ReductionLayerTest, TestSumCoeffAxis1Gradient) {
function TYPED_TEST (line 170) | TYPED_TEST(ReductionLayerTest, TestMean) {
function TYPED_TEST (line 176) | TYPED_TEST(ReductionLayerTest, TestMeanCoeff) {
function TYPED_TEST (line 183) | TYPED_TEST(ReductionLayerTest, TestMeanCoeffAxis1) {
function TYPED_TEST (line 191) | TYPED_TEST(ReductionLayerTest, TestMeanGradient) {
function TYPED_TEST (line 197) | TYPED_TEST(ReductionLayerTest, TestMeanCoeffGradient) {
function TYPED_TEST (line 204) | TYPED_TEST(ReductionLayerTest, TestMeanCoeffGradientAxis1) {
function TYPED_TEST (line 212) | TYPED_TEST(ReductionLayerTest, TestAbsSum) {
function TYPED_TEST (line 218) | TYPED_TEST(ReductionLayerTest, TestAbsSumCoeff) {
function TYPED_TEST (line 225) | TYPED_TEST(ReductionLayerTest, TestAbsSumCoeffAxis1) {
function TYPED_TEST (line 233) | TYPED_TEST(ReductionLayerTest, TestAbsSumGradient) {
function TYPED_TEST (line 239) | TYPED_TEST(ReductionLayerTest, TestAbsSumCoeffGradient) {
function TYPED_TEST (line 246) | TYPED_TEST(ReductionLayerTest, TestAbsSumCoeffAxis1Gradient) {
function TYPED_TEST (line 254) | TYPED_TEST(ReductionLayerTest, TestSumOfSquares) {
function TYPED_TEST (line 260) | TYPED_TEST(ReductionLayerTest, TestSumOfSquaresCoeff) {
function TYPED_TEST (line 267) | TYPED_TEST(ReductionLayerTest, TestSumOfSquaresCoeffAxis1) {
function TYPED_TEST (line 275) | TYPED_TEST(ReductionLayerTest, TestSumOfSquaresGradient) {
function TYPED_TEST (line 281) | TYPED_TEST(ReductionLayerTest, TestSumOfSquaresCoeffGradient) {
function TYPED_TEST (line 288) | TYPED_TEST(ReductionLayerTest, TestSumOfSquaresCoeffAxis1Gradient) {
FILE: caffe/src/caffe/test/test_reshape_layer.cpp
type caffe (line 13) | namespace caffe {
class ReshapeLayerTest (line 16) | class ReshapeLayerTest : public MultiDeviceTest<TypeParam> {
method ReshapeLayerTest (line 19) | ReshapeLayerTest()
function TYPED_TEST (line 40) | TYPED_TEST(ReshapeLayerTest, TestFlattenOutputSizes) {
function TYPED_TEST (line 57) | TYPED_TEST(ReshapeLayerTest, TestFlattenValues) {
function TYPED_TEST (line 78) | TYPED_TEST(ReshapeLayerTest, TestCopyDimensions) {
function TYPED_TEST (line 97) | TYPED_TEST(ReshapeLayerTest, TestInferenceOfUnspecified) {
function TYPED_TEST (line 117) | TYPED_TEST(ReshapeLayerTest, TestInferenceOfUnspecifiedWithStartAxis) {
function TYPED_TEST (line 136) | TYPED_TEST(ReshapeLayerTest, TestInsertSingletonAxesStart) {
function TYPED_TEST (line 159) | TYPED_TEST(ReshapeLayerTest, TestInsertSingletonAxesMiddle) {
function TYPED_TEST (line 182) | TYPED_TEST(ReshapeLayerTest, TestInsertSingletonAxesEnd) {
function TYPED_TEST (line 205) | TYPED_TEST(ReshapeLayerTest, TestFlattenMiddle) {
function TYPED_TEST (line 222) | TYPED_TEST(ReshapeLayerTest, TestForward) {
function TYPED_TEST (line 239) | TYPED_TEST(ReshapeLayerTest, TestForwardAfterReshape) {
function TYPED_TEST (line 265) | TYPED_TEST(ReshapeLayerTest, TestGradient) {
FILE: caffe/src/caffe/test/test_rnn_layer.cpp
type caffe (line 14) | namespace caffe {
class RNNLayerTest (line 17) | class RNNLayerTest : public MultiDeviceTest<TypeParam> {
method RNNLayerTest (line 21) | RNNLayerTest() : num_output_(7) {
method ReshapeBlobs (line 41) | void ReshapeBlobs(int num_timesteps, int num_instances) {
function TYPED_TEST (line 68) | TYPED_TEST(RNNLayerTest, TestSetUp) {
function TYPED_TEST (line 78) | TYPED_TEST(RNNLayerTest, TestForward) {
function TYPED_TEST (line 163) | TYPED_TEST(RNNLayerTest, TestGradient) {
function TYPED_TEST (line 171) | TYPED_TEST(RNNLayerTest, TestGradientNonZeroCont) {
function TYPED_TEST (line 182) | TYPED_TEST(RNNLayerTest, TestGradientNonZeroContBufferSize2) {
function TYPED_TEST (line 198) | TYPED_TEST(RNNLayerTest, TestGradientNonZeroContBufferSize2WithStaticI...
FILE: caffe/src/caffe/test/test_scale_layer.cpp
type caffe (line 14) | namespace caffe {
class ScaleLayerTest (line 17) | class ScaleLayerTest : public MultiDeviceTest<TypeParam> {
method ScaleLayerTest (line 21) | ScaleLayerTest()
function TYPED_TEST (line 72) | TYPED_TEST(ScaleLayerTest, TestForwardEltwise) {
function TYPED_TEST (line 90) | TYPED_TEST(ScaleLayerTest, TestForwardEltwiseInPlace) {
function TYPED_TEST (line 110) | TYPED_TEST(ScaleLayerTest, TestBackwardEltwiseInPlace) {
function TYPED_TEST (line 156) | TYPED_TEST(ScaleLayerTest, TestForwardEltwiseWithParam) {
function TYPED_TEST (line 176) | TYPED_TEST(ScaleLayerTest, TestForwardBroadcastBegin) {
function TYPED_TEST (line 199) | TYPED_TEST(ScaleLayerTest, TestForwardBroadcastMiddle) {
function TYPED_TEST (line 222) | TYPED_TEST(ScaleLayerTest, TestForwardBroadcastMiddleInPlace) {
function TYPED_TEST (line 247) | TYPED_TEST(ScaleLayerTest, TestBackwardBroadcastMiddleInPlace) {
function TYPED_TEST (line 293) | TYPED_TEST(ScaleLayerTest, TestForwardBroadcastMiddleWithParam) {
function TYPED_TEST (line 317) | TYPED_TEST(ScaleLayerTest, TestForwardBroadcastMiddleWithParamAndBias) {
function TYPED_TEST (line 344) | TYPED_TEST(ScaleLayerTest, TestForwardBroadcastEnd) {
function TYPED_TEST (line 367) | TYPED_TEST(ScaleLayerTest, TestForwardScale) {
function TYPED_TEST (line 384) | TYPED_TEST(ScaleLayerTest, TestForwardScaleAxis2) {
function TYPED_TEST (line 402) | TYPED_TEST(ScaleLayerTest, TestGradientEltwise) {
function TYPED_TEST (line 413) | TYPED_TEST(ScaleLayerTest, TestGradientEltwiseWithParam) {
function TYPED_TEST (line 426) | TYPED_TEST(ScaleLayerTest, TestGradientBroadcastBegin) {
function TYPED_TEST (line 437) | TYPED_TEST(ScaleLayerTest, TestGradientBroadcastMiddle) {
function TYPED_TEST (line 448) | TYPED_TEST(ScaleLayerTest, TestGradientBroadcastMiddleWithParam) {
function TYPED_TEST (line 462) | TYPED_TEST(ScaleLayerTest, TestGradientBroadcastEnd) {
function TYPED_TEST (line 473) | TYPED_TEST(ScaleLayerTest, TestGradientScale) {
function TYPED_TEST (line 483) | TYPED_TEST(ScaleLayerTest, TestGradientScaleAndBias) {
function TYPED_TEST (line 496) | TYPED_TEST(ScaleLayerTest, TestGradientScaleAxis2) {
FILE: caffe/src/caffe/test/test_sigmoid_cross_entropy_loss_layer.cpp
type caffe (line 14) | namespace caffe {
class SigmoidCrossEntropyLossLayerTest (line 17) | class SigmoidCrossEntropyLossLayerTest : public MultiDeviceTest<TypePa...
method SigmoidCrossEntropyLossLayerTest (line 21) | SigmoidCrossEntropyLossLayerTest()
method Dtype (line 46) | Dtype SigmoidCrossEntropyLossReference(const int count, const int num,
method TestForward (line 62) | void TestForward() {
function TYPED_TEST (line 103) | TYPED_TEST(SigmoidCrossEntropyLossLayerTest, TestSigmoidCrossEntropyLo...
function TYPED_TEST (line 107) | TYPED_TEST(SigmoidCrossEntropyLossLayerTest, TestGradient) {
function TYPED_TEST (line 119) | TYPED_TEST(SigmoidCrossEntropyLossLayerTest, TestIgnoreGradient) {
FILE: caffe/src/caffe/test/test_slice_layer.cpp
type caffe (line 13) | namespace caffe {
class SliceLayerTest (line 16) | class SliceLayerTest : public MultiDeviceTest<TypeParam> {
method SliceLayerTest (line 20) | SliceLayerTest()
method SetUp (line 25) | virtual void SetUp() {
method ReduceBottomBlobSize (line 39) | virtual void ReduceBottomBlobSize() {
function TYPED_TEST (line 61) | TYPED_TEST(SliceLayerTest, TestSetupNum) {
function TYPED_TEST (line 75) | TYPED_TEST(SliceLayerTest, TestSetupChannels) {
function TYPED_TEST (line 90) | TYPED_TEST(SliceLayerTest, TestTrivialSlice) {
function TYPED_TEST (line 105) | TYPED_TEST(SliceLayerTest, TestSliceAcrossNum) {
function TYPED_TEST (line 135) | TYPED_TEST(SliceLayerTest, TestSliceAcrossChannels) {
function TYPED_TEST (line 178) | TYPED_TEST(SliceLayerTest, TestGradientTrivial) {
function TYPED_TEST (line 190) | TYPED_TEST(SliceLayerTest, TestGradientAcrossNum) {
function TYPED_TEST (line 202) | TYPED_TEST(SliceLayerTest, TestGradientAcrossChannels) {
FILE: caffe/src/caffe/test/test_softmax_layer.cpp
type caffe (line 18) | namespace caffe {
class SoftmaxLayerTest (line 21) | class SoftmaxLayerTest : public MultiDeviceTest<TypeParam> {
method SoftmaxLayerTest (line 24) | SoftmaxLayerTest()
function TYPED_TEST (line 43) | TYPED_TEST(
Copy disabled (too large)
Download .json
Condensed preview — 721 files, each showing path, character count, and a content snippet. Download the .json file for the full structured content (11,161K chars).
[
{
"path": ".idea/misc.xml",
"chars": 686,
"preview": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<project version=\"4\">\n <component name=\"ProjectLevelVcsManager\" settingsEditedMa"
},
{
"path": ".idea/modules.xml",
"chars": 274,
"preview": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<project version=\"4\">\n <component name=\"ProjectModuleManager\">\n <modules>\n "
},
{
"path": ".idea/textspotter.iml",
"chars": 512,
"preview": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<module type=\"PYTHON_MODULE\" version=\"4\">\n <component name=\"NewModuleRootManager"
},
{
"path": ".idea/vcs.xml",
"chars": 180,
"preview": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<project version=\"4\">\n <component name=\"VcsDirectoryMappings\">\n <mapping dire"
},
{
"path": ".idea/workspace.xml",
"chars": 29584,
"preview": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<project version=\"4\">\n <component name=\"ChangeListManager\">\n <list default=\"t"
},
{
"path": "README.md",
"chars": 3769,
"preview": "# An End-to-End TextSpotter with Explicit Alignment and Attention\n\nThis is initially described in our [CVPR 2018 paper]("
},
{
"path": "caffe/.Doxyfile",
"chars": 101863,
"preview": "# Doxyfile 1.8.8\n\n# This file describes the settings to be used by the documentation system\n# doxygen (www.doxygen.org) "
},
{
"path": "caffe/.github/ISSUE_TEMPLATE.md",
"chars": 936,
"preview": "Please use the [caffe-users list](https://groups.google.com/forum/#!forum/caffe-users) for usage, installation, or model"
},
{
"path": "caffe/.gitignore",
"chars": 1214,
"preview": "## General\n# [local]\n*mypvanet*\n\n# Compiled Object files\n*.slo\n*.lo\n*.o\n*.cuo\n\n# Compiled Dynamic libraries\n*.so\n*.dylib"
},
{
"path": "caffe/.travis.yml",
"chars": 1938,
"preview": "dist: trusty\nsudo: required\n\nlanguage: cpp\ncompiler: gcc\n\nenv:\n global:\n - NUM_THREADS=4\n matrix:\n # Use a build"
},
{
"path": "caffe/CMakeLists.txt",
"chars": 4196,
"preview": "cmake_minimum_required(VERSION 2.8.7)\nif(POLICY CMP0046)\n cmake_policy(SET CMP0046 NEW)\nendif()\nif(POLICY CMP0054)\n cm"
},
{
"path": "caffe/CONTRIBUTING.md",
"chars": 1917,
"preview": "# Contributing\n\n## Issues\n\nSpecific Caffe design and development issues, bugs, and feature requests are maintained by Gi"
},
{
"path": "caffe/CONTRIBUTORS.md",
"chars": 620,
"preview": "# Contributors\n\nCaffe is developed by a core set of BAIR members and the open-source community.\n\nWe thank all of our [co"
},
{
"path": "caffe/INSTALL.md",
"chars": 210,
"preview": "# Installation\n\nSee http://caffe.berkeleyvision.org/installation.html for the latest\ninstallation instructions.\n\nCheck t"
},
{
"path": "caffe/LICENSE",
"chars": 2092,
"preview": "COPYRIGHT\n\nAll contributions by the University of California:\nCopyright (c) 2014-2017 The Regents of the University of C"
},
{
"path": "caffe/Makefile",
"chars": 24041,
"preview": "PROJECT := caffe\n\nCONFIG_FILE := Makefile.config\n# Explicitly check for the config file, otherwise make -k will proceed "
},
{
"path": "caffe/Makefile.config.example",
"chars": 4631,
"preview": "## Refer to http://caffe.berkeleyvision.org/installation.html\n# Contributions simplifying and improving our build system"
},
{
"path": "caffe/README.md",
"chars": 8,
"preview": "# caffe\n"
},
{
"path": "caffe/caffe.cloc",
"chars": 1180,
"preview": "Bourne Shell\n filter remove_matches ^\\s*#\n filter remove_inline #.*$\n extension sh\n script_exe sh\nC\n filt"
},
{
"path": "caffe/cmake/ConfigGen.cmake",
"chars": 2164,
"preview": "\n################################################################################################\n# Helper function to g"
},
{
"path": "caffe/cmake/Cuda.cmake",
"chars": 11469,
"preview": "if(CPU_ONLY)\n return()\nendif()\n\n# Known NVIDIA GPU achitectures Caffe can be compiled for.\n# This list will be used for"
},
{
"path": "caffe/cmake/Dependencies.cmake",
"chars": 7140,
"preview": "# These lists are later turned into target properties on main caffe library target\nset(Caffe_LINKER_LIBS \"\")\nset(Caffe_I"
},
{
"path": "caffe/cmake/External/gflags.cmake",
"chars": 1939,
"preview": "if (NOT __GFLAGS_INCLUDED) # guard against multiple includes\n set(__GFLAGS_INCLUDED TRUE)\n\n # use the system-wide gfla"
},
{
"path": "caffe/cmake/External/glog.cmake",
"chars": 1777,
"preview": "# glog depends on gflags\ninclude(\"cmake/External/gflags.cmake\")\n\nif (NOT __GLOG_INCLUDED)\n set(__GLOG_INCLUDED TRUE)\n\n "
},
{
"path": "caffe/cmake/Misc.cmake",
"chars": 1764,
"preview": "# ---[ Configuration types\nset(CMAKE_CONFIGURATION_TYPES \"Debug;Release\" CACHE STRING \"Possible configurations\" FORCE)\nm"
},
{
"path": "caffe/cmake/Modules/FindAtlas.cmake",
"chars": 1724,
"preview": "# Find the Atlas (and Lapack) libraries\n#\n# The following variables are optionally searched for defaults\n# Atlas_ROOT_D"
},
{
"path": "caffe/cmake/Modules/FindGFlags.cmake",
"chars": 1545,
"preview": "# - Try to find GFLAGS\n#\n# The following variables are optionally searched for defaults\n# GFLAGS_ROOT_DIR: B"
},
{
"path": "caffe/cmake/Modules/FindGlog.cmake",
"chars": 1451,
"preview": "# - Try to find Glog\n#\n# The following variables are optionally searched for defaults\n# GLOG_ROOT_DIR: Base "
},
{
"path": "caffe/cmake/Modules/FindLAPACK.cmake",
"chars": 6723,
"preview": "# - Find LAPACK library\n# This module finds an installed fortran library that implements the LAPACK\n# linear-algebra int"
},
{
"path": "caffe/cmake/Modules/FindLMDB.cmake",
"chars": 1119,
"preview": "# Try to find the LMBD libraries and headers\n# LMDB_FOUND - system has LMDB lib\n# LMDB_INCLUDE_DIR - the LMDB include "
},
{
"path": "caffe/cmake/Modules/FindLevelDB.cmake",
"chars": 1728,
"preview": "# - Find LevelDB\n#\n# LevelDB_INCLUDES - List of LevelDB includes\n# LevelDB_LIBRARIES - List of libraries when using L"
},
{
"path": "caffe/cmake/Modules/FindMKL.cmake",
"chars": 3250,
"preview": "# Find the MKL libraries\n#\n# Options:\n#\n# MKL_USE_SINGLE_DYNAMIC_LIBRARY : use single dynamic library interface\n# M"
},
{
"path": "caffe/cmake/Modules/FindMatlabMex.cmake",
"chars": 1749,
"preview": "# This module looks for MatlabMex compiler\n# Defines variables:\n# Matlab_DIR - Matlab root dir\n# Matlab_mex "
},
{
"path": "caffe/cmake/Modules/FindNCCL.cmake",
"chars": 654,
"preview": "set(NCCL_INC_PATHS\n /usr/include\n /usr/local/include\n $ENV{NCCL_DIR}/include\n )\n\nset(NCCL_LIB_PATHS\n /lib"
},
{
"path": "caffe/cmake/Modules/FindNumPy.cmake",
"chars": 2333,
"preview": "# - Find the NumPy libraries\n# This module finds if NumPy is installed, and sets the following variables\n# indicating wh"
},
{
"path": "caffe/cmake/Modules/FindOpenBLAS.cmake",
"chars": 1593,
"preview": "\n\nSET(Open_BLAS_INCLUDE_SEARCH_PATHS\n /usr/include\n /usr/include/openblas\n /usr/include/openblas-base\n /usr/local/in"
},
{
"path": "caffe/cmake/Modules/FindSnappy.cmake",
"chars": 1071,
"preview": "# Find the Snappy libraries\n#\n# The following variables are optionally searched for defaults\n# Snappy_ROOT_DIR: Base"
},
{
"path": "caffe/cmake/Modules/FindvecLib.cmake",
"chars": 1326,
"preview": "# Find the vecLib libraries as part of Accelerate.framework or as standalon framework\n#\n# The following are set after co"
},
{
"path": "caffe/cmake/ProtoBuf.cmake",
"chars": 3744,
"preview": "# Finds Google Protocol Buffers library and compilers and extends\n# the standard cmake script with version and python ge"
},
{
"path": "caffe/cmake/Summary.cmake",
"chars": 7651,
"preview": "################################################################################################\n# Caffe status report f"
},
{
"path": "caffe/cmake/Targets.cmake",
"chars": 7191,
"preview": "################################################################################################\n# Defines global Caffe_"
},
{
"path": "caffe/cmake/Templates/CaffeConfig.cmake.in",
"chars": 1769,
"preview": "# Config file for the Caffe package.\n#\n# Note:\n# Caffe and this config file depends on opencv,\n# so put `find_packag"
},
{
"path": "caffe/cmake/Templates/CaffeConfigVersion.cmake.in",
"chars": 377,
"preview": "set(PACKAGE_VERSION \"@Caffe_VERSION@\")\n\n# Check whether the requested PACKAGE_FIND_VERSION is compatible\nif(\"${PACKAGE_V"
},
{
"path": "caffe/cmake/Templates/caffe_config.h.in",
"chars": 366,
"preview": "/* Sources directory */\n#define SOURCE_FOLDER \"${PROJECT_SOURCE_DIR}\"\n\n/* Binaries directory */\n#define BINARY_FOLDER \"$"
},
{
"path": "caffe/cmake/Uninstall.cmake.in",
"chars": 1170,
"preview": "if(NOT EXISTS \"@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt\")\n message(FATAL_ERROR \"Cannot find install manifest: @C"
},
{
"path": "caffe/cmake/Utils.cmake",
"chars": 13393,
"preview": "################################################################################################\n# Command alias for deb"
},
{
"path": "caffe/cmake/lint.cmake",
"chars": 1505,
"preview": "\nset(CMAKE_SOURCE_DIR ..)\nset(LINT_COMMAND ${CMAKE_SOURCE_DIR}/scripts/cpp_lint.py)\nset(SRC_FILE_EXTENSIONS h hpp hu c c"
},
{
"path": "caffe/docker/README.md",
"chars": 1509,
"preview": "### Running an official image\n\nYou can run one of the automatic [builds](https://hub.docker.com/r/bvlc/caffe). E.g. for "
},
{
"path": "caffe/docker/cpu/Dockerfile",
"chars": 1349,
"preview": "FROM ubuntu:16.04\nLABEL maintainer caffe-maint@googlegroups.com\n\nRUN apt-get update && apt-get install -y --no-install-r"
},
{
"path": "caffe/docker/gpu/Dockerfile",
"chars": 1499,
"preview": "FROM nvidia/cuda:8.0-cudnn6-devel-ubuntu16.04\nLABEL maintainer caffe-maint@googlegroups.com\n\nRUN apt-get update && apt-g"
},
{
"path": "caffe/docs/CMakeLists.txt",
"chars": 4532,
"preview": "# Building docs script\n# Requirements:\n# sudo apt-get install doxygen texlive ruby-dev\n# sudo gem install jekyll exe"
},
{
"path": "caffe/docs/CNAME",
"chars": 25,
"preview": "caffe.berkeleyvision.org\n"
},
{
"path": "caffe/docs/README.md",
"chars": 241,
"preview": "# Caffe Documentation\n\nTo generate the documentation, run `$CAFFE_ROOT/scripts/build_docs.sh`.\n\nTo push your changes to "
},
{
"path": "caffe/docs/_config.yml",
"chars": 131,
"preview": "defaults:\n -\n scope:\n path: \"\" # an empty string here means all files in the project\n values:\n layout: "
},
{
"path": "caffe/docs/_layouts/default.html",
"chars": 2058,
"preview": "<!doctype html>\n<html>\n <head>\n <!-- MathJax -->\n <script type=\"text/javascript\"\n src=\"http://cdn.mathjax.or"
},
{
"path": "caffe/docs/development.md",
"chars": 6631,
"preview": "---\ntitle: Developing and Contributing\n---\n# Development and Contributing\n\nCaffe is developed with active participation "
},
{
"path": "caffe/docs/index.md",
"chars": 6337,
"preview": "---\ntitle: Deep Learning Framework\n---\n\n# Caffe\n\nCaffe is a deep learning framework made with expression, speed, and mod"
},
{
"path": "caffe/docs/install_apt.md",
"chars": 1987,
"preview": "---\ntitle: \"Installation: Ubuntu\"\n---\n\n# Ubuntu Installation\n\n**General dependencies**\n\n sudo apt-get install libprot"
},
{
"path": "caffe/docs/install_apt_debian.md",
"chars": 5827,
"preview": "---\ntitle: \"Installation: Debian\"\n---\n\n# Debian Installation\n\nCaffe packages are available for several Debian versions, "
},
{
"path": "caffe/docs/install_osx.md",
"chars": 6318,
"preview": "---\ntitle: \"Installation: OS X\"\n---\n\n# OS X Installation\n\nWe highly recommend using the [Homebrew](http://brew.sh/) pack"
},
{
"path": "caffe/docs/install_yum.md",
"chars": 1783,
"preview": "---\ntitle: \"Installation: RHEL / Fedora / CentOS\"\n---\n\n# RHEL / Fedora / CentOS Installation\n\n**General dependencies**\n\n"
},
{
"path": "caffe/docs/installation.md",
"chars": 8398,
"preview": "---\ntitle: Installation\n---\n\n# Installation\n\nPrior to installing, have a glance through this guide and take note of the "
},
{
"path": "caffe/docs/model_zoo.md",
"chars": 5044,
"preview": "---\ntitle: Model Zoo\n---\n# Caffe Model Zoo\n\nLots of researchers and engineers have made Caffe models for different tasks"
},
{
"path": "caffe/docs/multigpu.md",
"chars": 2811,
"preview": "---\ntitle: Multi-GPU Usage, Hardware Configuration Assumptions, and Performance\n---\n\n# Multi-GPU Usage\n\nCurrently Multi-"
},
{
"path": "caffe/docs/stylesheets/pygment_trac.css",
"chars": 4168,
"preview": ".highlight { background: #ffffff; }\n.highlight .c { color: #999988; font-style: italic } /* Comment */\n.highlight .err "
},
{
"path": "caffe/docs/stylesheets/reset.css",
"chars": 602,
"preview": "/* MeyerWeb Reset */\n\nhtml, body, div, span, applet, object, iframe,\nh1, h2, h3, h4, h5, h6, p, blockquote, pre,\na, abbr"
},
{
"path": "caffe/docs/stylesheets/styles.css",
"chars": 4385,
"preview": "@import url(http://fonts.googleapis.com/css?family=PT+Serif|Open+Sans:600,400);\n\nbody {\n padding:10px 50px 0 0;\n font-"
},
{
"path": "caffe/docs/tutorial/convolution.md",
"chars": 683,
"preview": "---\ntitle: Convolution\n---\n# Caffeinated Convolution\n\nThe Caffe strategy for convolution is to reduce the problem to mat"
},
{
"path": "caffe/docs/tutorial/data.md",
"chars": 3496,
"preview": "---\ntitle: Data\n---\n# Data: Ins and Outs\n\nData flows through Caffe as [Blobs](net_layer_blob.html#blob-storage-and-commu"
},
{
"path": "caffe/docs/tutorial/fig/.gitignore",
"chars": 0,
"preview": ""
},
{
"path": "caffe/docs/tutorial/forward_backward.md",
"chars": 2463,
"preview": "---\ntitle: Forward and Backward for Inference and Learning\n---\n# Forward and Backward\n\nThe forward and backward passes a"
},
{
"path": "caffe/docs/tutorial/index.md",
"chars": 3219,
"preview": "---\ntitle: Caffe Tutorial\n---\n# Caffe Tutorial\n\nCaffe is a deep learning framework and this tutorial explains its philos"
},
{
"path": "caffe/docs/tutorial/interfaces.md",
"chars": 14301,
"preview": "---\ntitle: Interfaces\n---\n# Interfaces\n\nCaffe has command line, Python, and MATLAB interfaces for day-to-day usage, inte"
},
{
"path": "caffe/docs/tutorial/layers/absval.md",
"chars": 792,
"preview": "---\ntitle: Absolute Value Layer\n---\n\n# Absolute Value Layer\n\n* Layer type: `AbsVal`\n* [Doxygen Documentation](http://caf"
},
{
"path": "caffe/docs/tutorial/layers/accuracy.md",
"chars": 864,
"preview": "---\ntitle: Accuracy and Top-k\n---\n\n# Accuracy and Top-k\n\n`Accuracy` scores the output as the accuracy of output with res"
},
{
"path": "caffe/docs/tutorial/layers/argmax.md",
"chars": 700,
"preview": "---\ntitle: ArgMax Layer\n---\n\n# ArgMax Layer\n\n* Layer type: `ArgMax`\n* [Doxygen Documentation](http://caffe.berkeleyvisio"
},
{
"path": "caffe/docs/tutorial/layers/batchnorm.md",
"chars": 890,
"preview": "---\ntitle: Batch Norm Layer\n---\n\n# Batch Norm Layer\n\n* Layer type: `BatchNorm`\n* [Doxygen Documentation](http://caffe.be"
},
{
"path": "caffe/docs/tutorial/layers/batchreindex.md",
"chars": 682,
"preview": "---\ntitle: Batch Reindex Layer\n---\n\n# Batch Reindex Layer\n\n* Layer type: `BatchReindex`\n* [Doxygen Documentation](http:/"
},
{
"path": "caffe/docs/tutorial/layers/bias.md",
"chars": 816,
"preview": "---\ntitle: Bias Layer\n---\n\n# Bias Layer\n\n* Layer type: `Bias`\n* [Doxygen Documentation](http://caffe.berkeleyvision.org/"
},
{
"path": "caffe/docs/tutorial/layers/bnll.md",
"chars": 823,
"preview": "---\ntitle: BNLL Layer\n---\n\n# BNLL Layer\n\n* Layer type: `BNLL`\n* [Doxygen Documentation](http://caffe.berkeleyvision.org/"
},
{
"path": "caffe/docs/tutorial/layers/concat.md",
"chars": 1520,
"preview": "---\ntitle: Concat Layer\n---\n\n# Concat Layer\n\n* Layer type: `Concat`\n* [Doxygen Documentation](http://caffe.berkeleyvisio"
},
{
"path": "caffe/docs/tutorial/layers/contrastiveloss.md",
"chars": 969,
"preview": "---\ntitle: Contrastive Loss Layer\n---\n\n# Contrastive Loss Layer\n\n* Layer type: `ContrastiveLoss`\n* [Doxygen Documentatio"
},
{
"path": "caffe/docs/tutorial/layers/convolution.md",
"chars": 3128,
"preview": "---\ntitle: Convolution Layer\n---\n\n# Convolution Layer\n\n* Layer type: `Convolution`\n* [Doxygen Documentation](http://caff"
},
{
"path": "caffe/docs/tutorial/layers/crop.md",
"chars": 817,
"preview": "---\ntitle: Crop Layer\n---\n\n# Crop Layer\n\n* Layer type: `Crop`\n* [Doxygen Documentation](http://caffe.berkeleyvision.org/"
},
{
"path": "caffe/docs/tutorial/layers/data.md",
"chars": 1058,
"preview": "---\ntitle: Database Layer\n---\n\n# Database Layer\n\n* Layer type: `Data`\n* [Doxygen Documentation](http://caffe.berkeleyvis"
},
{
"path": "caffe/docs/tutorial/layers/deconvolution.md",
"chars": 938,
"preview": "---\ntitle: Deconvolution Layer\n---\n\n# Deconvolution Layer\n\n* Layer type: `Deconvolution`\n* [Doxygen Documentation](http:"
},
{
"path": "caffe/docs/tutorial/layers/dropout.md",
"chars": 856,
"preview": "---\ntitle: Dropout Layer\n---\n\n# Dropout Layer\n\n* Layer type: `Dropout`\n* [Doxygen Documentation](http://caffe.berkeleyvi"
},
{
"path": "caffe/docs/tutorial/layers/dummydata.md",
"chars": 742,
"preview": "---\ntitle: Dummy Data Layer\n---\n\n# Dummy Data Layer\n\n* Layer type: `DummyData`\n* [Doxygen Documentation](http://caffe.be"
},
{
"path": "caffe/docs/tutorial/layers/eltwise.md",
"chars": 856,
"preview": "---\ntitle: Eltwise Layer\n---\n\n# Eltwise Layer\n\n* Layer type: `Eltwise`\n* [Doxygen Documentation](http://caffe.berkeleyvi"
},
{
"path": "caffe/docs/tutorial/layers/elu.md",
"chars": 1024,
"preview": "---\ntitle: ELU Layer\n---\n\n# ELU Layer\n\n* Layer type: `ELU`\n* [Doxygen Documentation](http://caffe.berkeleyvision.org/dox"
},
{
"path": "caffe/docs/tutorial/layers/embed.md",
"chars": 829,
"preview": "---\ntitle: Embed Layer\n---\n\n# Embed Layer\n\n* Layer type: `Embed`\n* [Doxygen Documentation](http://caffe.berkeleyvision.o"
},
{
"path": "caffe/docs/tutorial/layers/euclideanloss.md",
"chars": 865,
"preview": "---\ntitle: Euclidean Loss Layer\n---\n# Sum-of-Squares / Euclidean Loss Layer\n\n* Layer type: `EuclideanLoss`\n* [Doxygen Do"
},
{
"path": "caffe/docs/tutorial/layers/exp.md",
"chars": 858,
"preview": "---\ntitle: Exponential Layer\n---\n\n# Exponential Layer\n\n* Layer type: `Exp`\n* [Doxygen Documentation](http://caffe.berkel"
},
{
"path": "caffe/docs/tutorial/layers/filter.md",
"chars": 628,
"preview": "---\ntitle: Filter Layer\n---\n\n# Filter Layer\n\n* Layer type: `Filter`\n* [Doxygen Documentation](http://caffe.berkeleyvisio"
},
{
"path": "caffe/docs/tutorial/layers/flatten.md",
"chars": 849,
"preview": "---\ntitle: Flatten Layer\n---\n\n# Flatten Layer\n\n* Layer type: `Flatten`\n* [Doxygen Documentation](http://caffe.berkeleyvi"
},
{
"path": "caffe/docs/tutorial/layers/hdf5data.md",
"chars": 877,
"preview": "---\ntitle: HDF5 Data Layer\n---\n\n# HDF5 Data Layer\n\n* Layer type: `HDF5Data`\n* [Doxygen Documentation](http://caffe.berke"
},
{
"path": "caffe/docs/tutorial/layers/hdf5output.md",
"chars": 1093,
"preview": "---\ntitle: HDF5 Output Layer\n---\n\n# HDF5 Output Layer\n\n* Layer type: `HDF5Output`\n* [Doxygen Documentation](http://caffe"
},
{
"path": "caffe/docs/tutorial/layers/hingeloss.md",
"chars": 749,
"preview": "---\ntitle: Hinge Loss Layer\n---\n\n# Hinge (L1, L2) Loss Layer\n\n* Layer type: `HingeLoss`\n* [Doxygen Documentation](http:/"
},
{
"path": "caffe/docs/tutorial/layers/im2col.md",
"chars": 708,
"preview": "---\ntitle: Im2col Layer\n---\n\n# im2col\n\n* File type: `Im2col`\n* Header: [`./include/caffe/layers/im2col_layer.hpp`](https"
},
{
"path": "caffe/docs/tutorial/layers/imagedata.md",
"chars": 1062,
"preview": "---\ntitle: ImageData Layer\n---\n\n# ImageData Layer\n\n* Layer type: `ImageData`\n* [Doxygen Documentation](http://caffe.berk"
},
{
"path": "caffe/docs/tutorial/layers/infogainloss.md",
"chars": 1052,
"preview": "---\ntitle: Infogain Loss Layer\n---\n\n# Infogain Loss Layer\n\n* Layer type: `InfogainLoss`\n* [Doxygen Documentation](http:/"
},
{
"path": "caffe/docs/tutorial/layers/innerproduct.md",
"chars": 2189,
"preview": "---\ntitle: Inner Product / Fully Connected Layer\n---\n\n# Inner Product / Fully Connected Layer\n\n* Layer type: `InnerProdu"
},
{
"path": "caffe/docs/tutorial/layers/input.md",
"chars": 690,
"preview": "---\ntitle: Input Layer\n---\n\n# Input Layer\n\n* Layer type: `Input`\n* [Doxygen Documentation](http://caffe.berkeleyvision.o"
},
{
"path": "caffe/docs/tutorial/layers/log.md",
"chars": 800,
"preview": "---\ntitle: Log Layer\n---\n\n# Log Layer\n\n* Layer type: `Log`\n* [Doxygen Documentation](http://caffe.berkeleyvision.org/dox"
},
{
"path": "caffe/docs/tutorial/layers/lrn.md",
"chars": 1993,
"preview": "---\ntitle: Local Response Normalization (LRN)\n---\n\n# Local Response Normalization (LRN)\n\n* Layer type: `LRN`\n* [Doxygen "
},
{
"path": "caffe/docs/tutorial/layers/lstm.md",
"chars": 995,
"preview": "---\ntitle: LSTM Layer\n---\n\n# LSTM Layer\n\n* Layer type: `LSTM`\n* [Doxygen Documentation](http://caffe.berkeleyvision.org/"
},
{
"path": "caffe/docs/tutorial/layers/memorydata.md",
"chars": 1193,
"preview": "---\ntitle: Memory Data Layer\n---\n\n# Memory Data Layer\n\n* Layer type: `MemoryData`\n* [Doxygen Documentation](http://caffe"
},
{
"path": "caffe/docs/tutorial/layers/multinomiallogisticloss.md",
"chars": 842,
"preview": "---\ntitle: Multinomial Logistic Loss Layer\n---\n\n# Multinomial Logistic Loss Layer\n\n* Layer type: `MultinomialLogisticLos"
},
{
"path": "caffe/docs/tutorial/layers/mvn.md",
"chars": 863,
"preview": "---\ntitle: Mean-Variance Normalization (MVN) Layer\n---\n\n# Mean-Variance Normalization (MVN) Layer\n\n* Layer type: `MVN`\n*"
},
{
"path": "caffe/docs/tutorial/layers/parameter.md",
"chars": 822,
"preview": "---\ntitle: Parameter Layer\n---\n\n# Parameter Layer\n\n* Layer type: `Parameter`\n* [Doxygen Documentation](http://caffe.berk"
},
{
"path": "caffe/docs/tutorial/layers/pooling.md",
"chars": 1947,
"preview": "---\ntitle: Pooling Layer\n---\n# Pooling\n\n* Layer type: `Pooling`\n* [Doxygen Documentation](http://caffe.berkeleyvision.or"
},
{
"path": "caffe/docs/tutorial/layers/power.md",
"chars": 1288,
"preview": "---\ntitle: Power Layer\n---\n\n# Power Layer\n\n* Layer type: `Power`\n* [Doxygen Documentation](http://caffe.berkeleyvision.o"
},
{
"path": "caffe/docs/tutorial/layers/prelu.md",
"chars": 829,
"preview": "---\ntitle: PReLU Layer\n---\n\n# PReLU Layer\n\n* Layer type: `PReLU`\n* [Doxygen Documentation](http://caffe.berkeleyvision.o"
},
{
"path": "caffe/docs/tutorial/layers/python.md",
"chars": 1019,
"preview": "---\ntitle: Python Layer\n---\n\n# Python Layer\n\n* Layer type: `Python`\n* [Doxygen Documentation](http://caffe.berkeleyvisio"
},
{
"path": "caffe/docs/tutorial/layers/recurrent.md",
"chars": 881,
"preview": "---\ntitle: Recurrent Layer\n---\n\n# Recurrent Layer\n\n* Layer type: `Recurrent`\n* [Doxygen Documentation](http://caffe.berk"
},
{
"path": "caffe/docs/tutorial/layers/reduction.md",
"chars": 881,
"preview": "---\ntitle: Reduction Layer\n---\n\n# Reduction Layer\n\n* Layer type: `Reduction`\n* [Doxygen Documentation](http://caffe.berk"
},
{
"path": "caffe/docs/tutorial/layers/relu.md",
"chars": 1687,
"preview": "---\ntitle: ReLU / Rectified-Linear and Leaky-ReLU Layer\n---\n\n# ReLU / Rectified-Linear and Leaky-ReLU Layer\n\n* Layer typ"
},
{
"path": "caffe/docs/tutorial/layers/reshape.md",
"chars": 2376,
"preview": "---\ntitle: Reshape Layer\n---\n\n# Reshape Layer\n* Layer type: `Reshape`\n* [Doxygen Documentation](http://caffe.berkeleyvis"
},
{
"path": "caffe/docs/tutorial/layers/rnn.md",
"chars": 685,
"preview": "---\ntitle: RNN Layer\n---\n\n# RNN Layer\n\n* Layer type: `RNN`\n* [Doxygen Documentation](http://caffe.berkeleyvision.org/dox"
},
{
"path": "caffe/docs/tutorial/layers/scale.md",
"chars": 829,
"preview": "---\ntitle: Scale Layer\n---\n\n# Scale Layer\n\n* Layer type: `Scale`\n* [Doxygen Documentation](http://caffe.berkeleyvision.o"
},
{
"path": "caffe/docs/tutorial/layers/sigmoid.md",
"chars": 1222,
"preview": "---\ntitle: Sigmoid Layer\n---\n\n# Sigmoid Layer\n\n* Layer type: `Sigmoid`\n* [Doxygen Documentation](http://caffe.berkeleyvi"
},
{
"path": "caffe/docs/tutorial/layers/sigmoidcrossentropyloss.md",
"chars": 784,
"preview": "---\ntitle: Sigmoid Cross-Entropy Loss Layer\n---\n\n# Sigmoid Cross-Entropy Loss Layer\n\n* Layer type: `SigmoidCrossEntropyL"
},
{
"path": "caffe/docs/tutorial/layers/silence.md",
"chars": 668,
"preview": "---\ntitle: Silence Layer\n---\n\n# Silence Layer\n\n* Layer type: `Silence`\n* [Doxygen Documentation](http://caffe.berkeleyvi"
},
{
"path": "caffe/docs/tutorial/layers/slice.md",
"chars": 1501,
"preview": "---\ntitle: Slice Layer\n---\n\n# Slice Layer\n\n* Layer type: `Slice`\n* [Doxygen Documentation](http://caffe.berkeleyvision.o"
},
{
"path": "caffe/docs/tutorial/layers/softmax.md",
"chars": 914,
"preview": "---\ntitle: Softmax Layer\n---\n\n# Softmax Layer\n\n* Layer type: `Softmax`\n* [Doxygen Documentation](http://caffe.berkeleyvi"
},
{
"path": "caffe/docs/tutorial/layers/softmaxwithloss.md",
"chars": 1442,
"preview": "---\ntitle: Softmax with Loss Layer\n---\n\n# Softmax with Loss Layer\n\n* Layer type: `SoftmaxWithLoss`\n* [Doxygen Documentat"
},
{
"path": "caffe/docs/tutorial/layers/split.md",
"chars": 769,
"preview": "---\ntitle: Split Layer\n---\n\n# Split Layer\n\n* Layer type: `Split`\n* [Doxygen Documentation](http://caffe.berkeleyvision.o"
},
{
"path": "caffe/docs/tutorial/layers/spp.md",
"chars": 708,
"preview": "---\ntitle: Spatial Pyramid Pooling Layer\n---\n\n# Spatial Pyramid Pooling Layer\n\n* Layer type: `SPP`\n* [Doxygen Documentat"
},
{
"path": "caffe/docs/tutorial/layers/tanh.md",
"chars": 699,
"preview": "---\ntitle: TanH Layer\n---\n\n# TanH Layer\n\n* Header: [`./include/caffe/layers/tanh_layer.hpp`](https://github.com/BVLC/caf"
},
{
"path": "caffe/docs/tutorial/layers/threshold.md",
"chars": 754,
"preview": "---\ntitle: Threshold Layer\n---\n\n# Threshold Layer\n\n* Header: [`./include/caffe/layers/threshold_layer.hpp`](https://gith"
},
{
"path": "caffe/docs/tutorial/layers/tile.md",
"chars": 816,
"preview": "---\ntitle: Tile Layer\n---\n\n# Tile Layer\n\n* Layer type: `Tile`\n* [Doxygen Documentation](http://caffe.berkeleyvision.org/"
},
{
"path": "caffe/docs/tutorial/layers/windowdata.md",
"chars": 731,
"preview": "---\ntitle: WindowData Layer\n---\n\n# WindowData Layer\n\n* Layer type: `WindowData`\n* [Doxygen Documentation](http://caffe.b"
},
{
"path": "caffe/docs/tutorial/layers.md",
"chars": 7237,
"preview": "---\ntitle: Layer Catalogue\n---\n\n# Layers\n\nTo create a Caffe model you need to define the model architecture in a protoco"
},
{
"path": "caffe/docs/tutorial/loss.md",
"chars": 2783,
"preview": "---\ntitle: Loss\n---\n# Loss\n\nIn Caffe, as in most of machine learning, learning is driven by a **loss** function (also kn"
},
{
"path": "caffe/docs/tutorial/net_layer_blob.md",
"chars": 13260,
"preview": "---\ntitle: Blobs, Layers, and Nets\n---\n# Blobs, Layers, and Nets: anatomy of a Caffe model\n\nDeep networks are compositio"
},
{
"path": "caffe/docs/tutorial/solver.md",
"chars": 18227,
"preview": "---\ntitle: Solver / Model Optimization\n---\n# Solver\n\nThe solver orchestrates model optimization by coordinating the netw"
},
{
"path": "caffe/examples/00-classification.ipynb",
"chars": 813348,
"preview": "{\n \"cells\": [\n {\n \"cell_type\": \"markdown\",\n \"metadata\": {},\n \"source\": [\n \"# Classification: Instant Recogniti"
},
{
"path": "caffe/examples/01-learning-lenet.ipynb",
"chars": 376291,
"preview": "{\n \"cells\": [\n {\n \"cell_type\": \"markdown\",\n \"metadata\": {},\n \"source\": [\n \"# Solving in Python with LeNet\\n\",\n"
},
{
"path": "caffe/examples/02-fine-tuning.ipynb",
"chars": 480504,
"preview": "{\n \"cells\": [\n {\n \"cell_type\": \"markdown\",\n \"metadata\": {},\n \"source\": [\n \"# Fine-tuning a Pretrained Network "
},
{
"path": "caffe/examples/CMakeLists.txt",
"chars": 1083,
"preview": "file(GLOB_RECURSE examples_srcs \"${PROJECT_SOURCE_DIR}/examples/*.cpp\")\n\nforeach(source_file ${examples_srcs})\n # get f"
},
{
"path": "caffe/examples/brewing-logreg.ipynb",
"chars": 452886,
"preview": "{\n \"cells\": [\n {\n \"cell_type\": \"markdown\",\n \"metadata\": {},\n \"source\": [\n \"# Brewing Logistic Regression then "
},
{
"path": "caffe/examples/cifar10/cifar10_full.prototxt",
"chars": 2174,
"preview": "name: \"CIFAR10_full_deploy\"\n# N.B. input image must be in CIFAR-10 format\n# as described at http://www.cs.toronto.edu/~k"
},
{
"path": "caffe/examples/cifar10/cifar10_full_sigmoid_solver.prototxt",
"chars": 953,
"preview": "# reduce learning rate after 120 epochs (60000 iters) by factor 0f 10\n# then another factor of 10 after 10 more epochs ("
},
{
"path": "caffe/examples/cifar10/cifar10_full_sigmoid_solver_bn.prototxt",
"chars": 959,
"preview": "# reduce learning rate after 120 epochs (60000 iters) by factor 0f 10\n# then another factor of 10 after 10 more epochs ("
},
{
"path": "caffe/examples/cifar10/cifar10_full_sigmoid_train_test.prototxt",
"chars": 2879,
"preview": "name: \"CIFAR10_full\"\nlayer {\n name: \"cifar\"\n type: \"Data\"\n top: \"data\"\n top: \"label\"\n include {\n phase: TRAIN\n "
},
{
"path": "caffe/examples/cifar10/cifar10_full_sigmoid_train_test_bn.prototxt",
"chars": 3192,
"preview": "name: \"CIFAR10_full\"\nlayer {\n name: \"cifar\"\n type: \"Data\"\n top: \"data\"\n top: \"label\"\n include {\n phase: TRAIN\n "
},
{
"path": "caffe/examples/cifar10/cifar10_full_solver.prototxt",
"chars": 944,
"preview": "# reduce learning rate after 120 epochs (60000 iters) by factor 0f 10\n# then another factor of 10 after 10 more epochs ("
},
{
"path": "caffe/examples/cifar10/cifar10_full_solver_lr1.prototxt",
"chars": 944,
"preview": "# reduce learning rate after 120 epochs (60000 iters) by factor 0f 10\n# then another factor of 10 after 10 more epochs ("
},
{
"path": "caffe/examples/cifar10/cifar10_full_solver_lr2.prototxt",
"chars": 945,
"preview": "# reduce learning rate after 120 epochs (60000 iters) by factor 0f 10\n# then another factor of 10 after 10 more epochs ("
},
{
"path": "caffe/examples/cifar10/cifar10_full_train_test.prototxt",
"chars": 3122,
"preview": "name: \"CIFAR10_full\"\nlayer {\n name: \"cifar\"\n type: \"Data\"\n top: \"data\"\n top: \"label\"\n include {\n phase: TRAIN\n "
},
{
"path": "caffe/examples/cifar10/cifar10_quick.prototxt",
"chars": 1921,
"preview": "name: \"CIFAR10_quick_test\"\nlayer {\n name: \"data\"\n type: \"Input\"\n top: \"data\"\n input_param { shape: { dim: 1 dim: 3 d"
},
{
"path": "caffe/examples/cifar10/cifar10_quick_solver.prototxt",
"chars": 859,
"preview": "# reduce the learning rate after 8 epochs (4000 iters) by a factor of 10\n\n# The train/test net protocol buffer definitio"
},
{
"path": "caffe/examples/cifar10/cifar10_quick_solver_lr1.prototxt",
"chars": 882,
"preview": "# reduce the learning rate after 8 epochs (4000 iters) by a factor of 10\n\n# The train/test net protocol buffer definitio"
},
{
"path": "caffe/examples/cifar10/cifar10_quick_train_test.prototxt",
"chars": 3088,
"preview": "name: \"CIFAR10_quick\"\nlayer {\n name: \"cifar\"\n type: \"Data\"\n top: \"data\"\n top: \"label\"\n include {\n phase: TRAIN\n "
},
{
"path": "caffe/examples/cifar10/convert_cifar_data.cpp",
"chars": 3677,
"preview": "//\n// This script converts the CIFAR dataset to the leveldb format used\n// by caffe to perform classification.\n// Usage:"
},
{
"path": "caffe/examples/cifar10/create_cifar10.sh",
"chars": 467,
"preview": "#!/usr/bin/env sh\n# This script converts the cifar data into leveldb format.\nset -e\n\nEXAMPLE=examples/cifar10\nDATA=data/"
},
{
"path": "caffe/examples/cifar10/readme.md",
"chars": 5243,
"preview": "---\ntitle: CIFAR-10 tutorial\ncategory: example\ndescription: Train and test Caffe on CIFAR-10 data.\ninclude_in_docs: true"
},
{
"path": "caffe/examples/cifar10/train_full.sh",
"chars": 524,
"preview": "#!/usr/bin/env sh\nset -e\n\nTOOLS=./build/tools\n\n$TOOLS/caffe train \\\n --solver=examples/cifar10/cifar10_full_solver.pr"
},
{
"path": "caffe/examples/cifar10/train_full_sigmoid.sh",
"chars": 139,
"preview": "#!/usr/bin/env sh\nset -e\n\nTOOLS=./build/tools\n\n$TOOLS/caffe train \\\n --solver=examples/cifar10/cifar10_full_sigmoid_s"
},
{
"path": "caffe/examples/cifar10/train_full_sigmoid_bn.sh",
"chars": 142,
"preview": "#!/usr/bin/env sh\nset -e\n\nTOOLS=./build/tools\n\n$TOOLS/caffe train \\\n --solver=examples/cifar10/cifar10_full_sigmoid_s"
},
{
"path": "caffe/examples/cifar10/train_quick.sh",
"chars": 338,
"preview": "#!/usr/bin/env sh\nset -e\n\nTOOLS=./build/tools\n\n$TOOLS/caffe train \\\n --solver=examples/cifar10/cifar10_quick_solver.pro"
},
{
"path": "caffe/examples/cpp_classification/classification.cpp",
"chars": 8661,
"preview": "#include <caffe/caffe.hpp>\n#ifdef USE_OPENCV\n#include <opencv2/core/core.hpp>\n#include <opencv2/highgui/highgui.hpp>\n#in"
},
{
"path": "caffe/examples/cpp_classification/readme.md",
"chars": 2837,
"preview": "---\ntitle: CaffeNet C++ Classification example\ndescription: A simple example performing image classification using the l"
},
{
"path": "caffe/examples/detection.ipynb",
"chars": 702457,
"preview": "{\n \"cells\": [\n {\n \"cell_type\": \"markdown\",\n \"metadata\": {},\n \"source\": [\n \"[R-CNN](https://github.com/rbgirshi"
},
{
"path": "caffe/examples/feature_extraction/imagenet_val.prototxt",
"chars": 3279,
"preview": "name: \"CaffeNet\"\nlayer {\n name: \"data\"\n type: \"ImageData\"\n top: \"data\"\n top: \"label\"\n transform_param {\n mirror:"
},
{
"path": "caffe/examples/feature_extraction/readme.md",
"chars": 3087,
"preview": "---\ntitle: Feature extraction with Caffe C++ code.\ndescription: Extract CaffeNet / AlexNet features using the Caffe util"
},
{
"path": "caffe/examples/finetune_flickr_style/assemble_data.py",
"chars": 3636,
"preview": "#!/usr/bin/env python\n\"\"\"\nForm a subset of the Flickr Style data, download images to dirname, and write\nCaffe ImagesData"
},
{
"path": "caffe/examples/finetune_flickr_style/readme.md",
"chars": 10483,
"preview": "---\ntitle: Fine-tuning for style recognition\ndescription: Fine-tune the ImageNet-trained CaffeNet on the \"Flickr Style\" "
},
{
"path": "caffe/examples/finetune_flickr_style/style_names.txt",
"chars": 173,
"preview": "Detailed\nPastel\nMelancholy\nNoir\nHDR\nVintage\nLong Exposure\nHorror\nSunny\nBright\nHazy\nBokeh\nSerene\nTexture\nEthereal\nMacro\nD"
},
{
"path": "caffe/examples/finetune_pascal_detection/pascal_finetune_solver.prototxt",
"chars": 330,
"preview": "net: \"examples/finetune_pascal_detection/pascal_finetune_trainval_test.prototxt\"\ntest_iter: 100\ntest_interval: 1000\nbase"
},
{
"path": "caffe/examples/finetune_pascal_detection/pascal_finetune_trainval_test.prototxt",
"chars": 5614,
"preview": "name: \"CaffeNet\"\nlayer {\n name: \"data\"\n type: \"WindowData\"\n top: \"data\"\n top: \"label\"\n include {\n phase: TRAIN\n "
},
{
"path": "caffe/examples/hdf5_classification/nonlinear_auto_test.prototxt",
"chars": 782,
"preview": "layer {\n name: \"data\"\n type: \"HDF5Data\"\n top: \"data\"\n top: \"label\"\n hdf5_data_param {\n source: \"examples/hdf5_cl"
},
{
"path": "caffe/examples/hdf5_classification/nonlinear_auto_train.prototxt",
"chars": 783,
"preview": "layer {\n name: \"data\"\n type: \"HDF5Data\"\n top: \"data\"\n top: \"label\"\n hdf5_data_param {\n source: \"examples/hdf5_cl"
},
{
"path": "caffe/examples/hdf5_classification/nonlinear_train_val.prototxt",
"chars": 1395,
"preview": "name: \"LogisticRegressionNet\"\nlayer {\n name: \"data\"\n type: \"HDF5Data\"\n top: \"data\"\n top: \"label\"\n include {\n pha"
},
{
"path": "caffe/examples/hdf5_classification/train_val.prototxt",
"chars": 999,
"preview": "name: \"LogisticRegressionNet\"\nlayer {\n name: \"data\"\n type: \"HDF5Data\"\n top: \"data\"\n top: \"label\"\n include {\n pha"
},
{
"path": "caffe/examples/imagenet/create_imagenet.sh",
"chars": 1503,
"preview": "#!/usr/bin/env sh\n# Create the imagenet lmdb inputs\n# N.B. set the path to the imagenet train + val data dirs\nset -e\n\nEX"
},
{
"path": "caffe/examples/imagenet/make_imagenet_mean.sh",
"chars": 287,
"preview": "#!/usr/bin/env sh\n# Compute the mean image from the imagenet training lmdb\n# N.B. this is available in data/ilsvrc12\n\nEX"
},
{
"path": "caffe/examples/imagenet/readme.md",
"chars": 7634,
"preview": "---\ntitle: ImageNet tutorial\ndescription: Train and test \"CaffeNet\" on ImageNet data.\ncategory: example\ninclude_in_docs:"
},
{
"path": "caffe/examples/imagenet/resume_training.sh",
"chars": 207,
"preview": "#!/usr/bin/env sh\nset -e\n\n./build/tools/caffe train \\\n --solver=models/bvlc_reference_caffenet/solver.prototxt \\\n "
},
{
"path": "caffe/examples/imagenet/train_caffenet.sh",
"chars": 117,
"preview": "#!/usr/bin/env sh\nset -e\n\n./build/tools/caffe train \\\n --solver=models/bvlc_reference_caffenet/solver.prototxt $@\n"
},
{
"path": "caffe/examples/mnist/convert_mnist_data.cpp",
"chars": 4520,
"preview": "// This script converts the MNIST dataset to a lmdb (default) or\n// leveldb (--backend=leveldb) format used by caffe to "
},
{
"path": "caffe/examples/mnist/create_mnist.sh",
"chars": 634,
"preview": "#!/usr/bin/env sh\n# This script converts the mnist data into lmdb/leveldb format,\n# depending on the value assigned to $"
},
{
"path": "caffe/examples/mnist/lenet.prototxt",
"chars": 1738,
"preview": "name: \"LeNet\"\nlayer {\n name: \"data\"\n type: \"Input\"\n top: \"data\"\n input_param { shape: { dim: 64 dim: 1 dim: 28 dim: "
},
{
"path": "caffe/examples/mnist/lenet_adadelta_solver.prototxt",
"chars": 777,
"preview": "# The train/test net protocol buffer definition\nnet: \"examples/mnist/lenet_train_test.prototxt\"\n# test_iter specifies ho"
},
{
"path": "caffe/examples/mnist/lenet_auto_solver.prototxt",
"chars": 778,
"preview": "# The train/test net protocol buffer definition\ntrain_net: \"mnist/lenet_auto_train.prototxt\"\ntest_net: \"mnist/lenet_auto"
},
{
"path": "caffe/examples/mnist/lenet_consolidated_solver.prototxt",
"chars": 6003,
"preview": "# lenet_consolidated_solver.prototxt consolidates the lenet_solver, lenet_train,\n# and lenet_test prototxts into a singl"
},
{
"path": "caffe/examples/mnist/lenet_multistep_solver.prototxt",
"chars": 871,
"preview": "# The train/test net protocol buffer definition\nnet: \"examples/mnist/lenet_train_test.prototxt\"\n# test_iter specifies ho"
},
{
"path": "caffe/examples/mnist/lenet_solver.prototxt",
"chars": 790,
"preview": "# The train/test net protocol buffer definition\nnet: \"examples/mnist/lenet_train_test.prototxt\"\n# test_iter specifies ho"
},
{
"path": "caffe/examples/mnist/lenet_solver_adam.prototxt",
"chars": 886,
"preview": "# The train/test net protocol buffer definition\n# this follows \"ADAM: A METHOD FOR STOCHASTIC OPTIMIZATION\"\nnet: \"exampl"
},
{
"path": "caffe/examples/mnist/lenet_solver_rmsprop.prototxt",
"chars": 830,
"preview": "# The train/test net protocol buffer definition\nnet: \"examples/mnist/lenet_train_test.prototxt\"\n# test_iter specifies ho"
},
{
"path": "caffe/examples/mnist/lenet_train_test.prototxt",
"chars": 2282,
"preview": "name: \"LeNet\"\nlayer {\n name: \"mnist\"\n type: \"Data\"\n top: \"data\"\n top: \"label\"\n include {\n phase: TRAIN\n }\n tra"
},
{
"path": "caffe/examples/mnist/mnist_autoencoder.prototxt",
"chars": 4814,
"preview": "name: \"MNISTAutoencoder\"\nlayer {\n name: \"data\"\n type: \"Data\"\n top: \"data\"\n include {\n phase: TRAIN\n }\n transfor"
},
{
"path": "caffe/examples/mnist/mnist_autoencoder_solver.prototxt",
"chars": 433,
"preview": "net: \"examples/mnist/mnist_autoencoder.prototxt\"\ntest_state: { stage: 'test-on-train' }\ntest_iter: 500\ntest_state: { sta"
},
{
"path": "caffe/examples/mnist/mnist_autoencoder_solver_adadelta.prototxt",
"chars": 451,
"preview": "net: \"examples/mnist/mnist_autoencoder.prototxt\"\ntest_state: { stage: 'test-on-train' }\ntest_iter: 500\ntest_state: { sta"
},
{
"path": "caffe/examples/mnist/mnist_autoencoder_solver_adagrad.prototxt",
"chars": 423,
"preview": "net: \"examples/mnist/mnist_autoencoder.prototxt\"\ntest_state: { stage: 'test-on-train' }\ntest_iter: 500\ntest_state: { sta"
},
{
"path": "caffe/examples/mnist/mnist_autoencoder_solver_nesterov.prototxt",
"chars": 466,
"preview": "net: \"examples/mnist/mnist_autoencoder.prototxt\"\ntest_state: { stage: 'test-on-train' }\ntest_iter: 500\ntest_state: { sta"
},
{
"path": "caffe/examples/mnist/readme.md",
"chars": 11948,
"preview": "---\ntitle: LeNet MNIST Tutorial\ndescription: Train and test \"LeNet\" on the MNIST handwritten digit data.\ncategory: examp"
},
{
"path": "caffe/examples/mnist/train_lenet.sh",
"chars": 101,
"preview": "#!/usr/bin/env sh\nset -e\n\n./build/tools/caffe train --solver=examples/mnist/lenet_solver.prototxt $@\n"
},
{
"path": "caffe/examples/mnist/train_lenet_adam.sh",
"chars": 106,
"preview": "#!/usr/bin/env sh\nset -e\n\n./build/tools/caffe train --solver=examples/mnist/lenet_solver_adam.prototxt $@\n"
},
{
"path": "caffe/examples/mnist/train_lenet_consolidated.sh",
"chars": 118,
"preview": "#!/usr/bin/env sh\nset -e\n\n./build/tools/caffe train \\\n --solver=examples/mnist/lenet_consolidated_solver.prototxt $@\n"
}
]
// ... and 521 more files (download for full content)
About this extraction
This page contains the full source code of the tonghe90/textspotter GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 721 files (10.2 MB), approximately 2.7M tokens, and a symbol index with 3195 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.