Showing preview only (2,867K chars total). Download the full file or copy to clipboard to get everything.
Repository: yiwenguo/Dynamic-Network-Surgery
Branch: master
Commit: 27d4926f0f6d
Files: 271
Total size: 2.7 MB
Directory structure:
gitextract_g_i19mmv/
├── CMakeLists.txt
├── CONTRIBUTING.md
├── CONTRIBUTORS.md
├── INSTALL.md
├── LICENSE
├── Makefile
├── Makefile.config.example
├── README.md
├── caffe.cloc
├── cmake/
│ ├── ConfigGen.cmake
│ ├── Cuda.cmake
│ ├── Dependencies.cmake
│ ├── External/
│ │ ├── gflags.cmake
│ │ └── glog.cmake
│ ├── Misc.cmake
│ ├── Modules/
│ │ ├── FindAtlas.cmake
│ │ ├── FindGFlags.cmake
│ │ ├── FindGlog.cmake
│ │ ├── FindLAPACK.cmake
│ │ ├── FindLMDB.cmake
│ │ ├── FindLevelDB.cmake
│ │ ├── FindMKL.cmake
│ │ ├── FindMatlabMex.cmake
│ │ ├── FindNumPy.cmake
│ │ ├── FindOpenBLAS.cmake
│ │ ├── FindSnappy.cmake
│ │ └── FindvecLib.cmake
│ ├── ProtoBuf.cmake
│ ├── Summary.cmake
│ ├── Targets.cmake
│ ├── Templates/
│ │ ├── CaffeConfig.cmake.in
│ │ ├── CaffeConfigVersion.cmake.in
│ │ └── caffe_config.h.in
│ ├── Utils.cmake
│ └── lint.cmake
├── include/
│ └── caffe/
│ ├── blob.hpp
│ ├── caffe.hpp
│ ├── common.hpp
│ ├── common_layers.hpp
│ ├── data_layers.hpp
│ ├── data_reader.hpp
│ ├── data_transformer.hpp
│ ├── filler.hpp
│ ├── internal_thread.hpp
│ ├── layer.hpp
│ ├── layer_factory.hpp
│ ├── loss_layers.hpp
│ ├── net.hpp
│ ├── neuron_layers.hpp
│ ├── parallel.hpp
│ ├── python_layer.hpp
│ ├── solver.hpp
│ ├── syncedmem.hpp
│ ├── test/
│ │ ├── test_caffe_main.hpp
│ │ └── test_gradient_check_util.hpp
│ ├── util/
│ │ ├── benchmark.hpp
│ │ ├── blocking_queue.hpp
│ │ ├── cudnn.hpp
│ │ ├── db.hpp
│ │ ├── db_leveldb.hpp
│ │ ├── db_lmdb.hpp
│ │ ├── device_alternate.hpp
│ │ ├── hdf5.hpp
│ │ ├── im2col.hpp
│ │ ├── insert_splits.hpp
│ │ ├── io.hpp
│ │ ├── math_functions.hpp
│ │ ├── mkl_alternate.hpp
│ │ ├── rng.hpp
│ │ └── upgrade_proto.hpp
│ └── vision_layers.hpp
├── models/
│ ├── lenet300100/
│ │ ├── caffe_lenet300100_original.caffemodel
│ │ ├── caffe_lenet300100_sparse.caffemodel
│ │ └── lenet300100.prototxt
│ └── lenet5/
│ ├── caffe_lenet5_original.caffemodel
│ ├── caffe_lenet5_sparse.caffemodel
│ └── lenet5.prototxt
├── src/
│ ├── caffe/
│ │ ├── CMakeLists.txt
│ │ ├── blob.cpp
│ │ ├── common.cpp
│ │ ├── data_reader.cpp
│ │ ├── data_transformer.cpp
│ │ ├── internal_thread.cpp
│ │ ├── layer.cpp
│ │ ├── layer_factory.cpp
│ │ ├── layers/
│ │ │ ├── absval_layer.cpp
│ │ │ ├── absval_layer.cu
│ │ │ ├── accuracy_layer.cpp
│ │ │ ├── argmax_layer.cpp
│ │ │ ├── base_conv_layer.cpp
│ │ │ ├── base_data_layer.cpp
│ │ │ ├── base_data_layer.cu
│ │ │ ├── bnll_layer.cpp
│ │ │ ├── bnll_layer.cu
│ │ │ ├── compress_conv_layer.cpp
│ │ │ ├── compress_conv_layer.cu
│ │ │ ├── compress_inner_product_layer.cpp
│ │ │ ├── compress_inner_product_layer.cu
│ │ │ ├── concat_layer.cpp
│ │ │ ├── concat_layer.cu
│ │ │ ├── contrastive_loss_layer.cpp
│ │ │ ├── contrastive_loss_layer.cu
│ │ │ ├── conv_layer.cpp
│ │ │ ├── conv_layer.cu
│ │ │ ├── cudnn_conv_layer.cpp
│ │ │ ├── cudnn_conv_layer.cu
│ │ │ ├── cudnn_pooling_layer.cpp
│ │ │ ├── cudnn_pooling_layer.cu
│ │ │ ├── cudnn_relu_layer.cpp
│ │ │ ├── cudnn_relu_layer.cu
│ │ │ ├── cudnn_sigmoid_layer.cpp
│ │ │ ├── cudnn_sigmoid_layer.cu
│ │ │ ├── cudnn_softmax_layer.cpp
│ │ │ ├── cudnn_softmax_layer.cu
│ │ │ ├── cudnn_tanh_layer.cpp
│ │ │ ├── cudnn_tanh_layer.cu
│ │ │ ├── data_layer.cpp
│ │ │ ├── deconv_layer.cpp
│ │ │ ├── deconv_layer.cu
│ │ │ ├── dropout_layer.cpp
│ │ │ ├── dropout_layer.cu
│ │ │ ├── dummy_data_layer.cpp
│ │ │ ├── eltwise_layer.cpp
│ │ │ ├── eltwise_layer.cu
│ │ │ ├── euclidean_loss_layer.cpp
│ │ │ ├── euclidean_loss_layer.cu
│ │ │ ├── exp_layer.cpp
│ │ │ ├── exp_layer.cu
│ │ │ ├── filter_layer.cpp
│ │ │ ├── filter_layer.cu
│ │ │ ├── flatten_layer.cpp
│ │ │ ├── hdf5_data_layer.cpp
│ │ │ ├── hdf5_data_layer.cu
│ │ │ ├── hdf5_output_layer.cpp
│ │ │ ├── hdf5_output_layer.cu
│ │ │ ├── hinge_loss_layer.cpp
│ │ │ ├── im2col_layer.cpp
│ │ │ ├── im2col_layer.cu
│ │ │ ├── image_data_layer.cpp
│ │ │ ├── infogain_loss_layer.cpp
│ │ │ ├── inner_product_layer.cpp
│ │ │ ├── inner_product_layer.cu
│ │ │ ├── loss_layer.cpp
│ │ │ ├── lrn_layer.cpp
│ │ │ ├── lrn_layer.cu
│ │ │ ├── memory_data_layer.cpp
│ │ │ ├── multinomial_logistic_loss_layer.cpp
│ │ │ ├── mvn_layer.cpp
│ │ │ ├── mvn_layer.cu
│ │ │ ├── neuron_layer.cpp
│ │ │ ├── pooling_layer.cpp
│ │ │ ├── pooling_layer.cu
│ │ │ ├── power_layer.cpp
│ │ │ ├── power_layer.cu
│ │ │ ├── prelu_layer.cpp
│ │ │ ├── prelu_layer.cu
│ │ │ ├── reduction_layer.cpp
│ │ │ ├── reduction_layer.cu
│ │ │ ├── relu_layer.cpp
│ │ │ ├── relu_layer.cu
│ │ │ ├── reshape_layer.cpp
│ │ │ ├── sigmoid_cross_entropy_loss_layer.cpp
│ │ │ ├── sigmoid_cross_entropy_loss_layer.cu
│ │ │ ├── sigmoid_layer.cpp
│ │ │ ├── sigmoid_layer.cu
│ │ │ ├── silence_layer.cpp
│ │ │ ├── silence_layer.cu
│ │ │ ├── slice_layer.cpp
│ │ │ ├── slice_layer.cu
│ │ │ ├── softmax_layer.cpp
│ │ │ ├── softmax_layer.cu
│ │ │ ├── softmax_loss_layer.cpp
│ │ │ ├── softmax_loss_layer.cu
│ │ │ ├── split_layer.cpp
│ │ │ ├── split_layer.cu
│ │ │ ├── spp_layer.cpp
│ │ │ ├── tanh_layer.cpp
│ │ │ ├── tanh_layer.cu
│ │ │ ├── threshold_layer.cpp
│ │ │ ├── threshold_layer.cu
│ │ │ └── window_data_layer.cpp
│ │ ├── net.cpp
│ │ ├── parallel.cpp
│ │ ├── proto/
│ │ │ └── caffe.proto
│ │ ├── solver.cpp
│ │ ├── syncedmem.cpp
│ │ ├── test/
│ │ │ ├── CMakeLists.txt
│ │ │ ├── test_accuracy_layer.cpp
│ │ │ ├── test_argmax_layer.cpp
│ │ │ ├── test_benchmark.cpp
│ │ │ ├── test_blob.cpp
│ │ │ ├── test_caffe_main.cpp
│ │ │ ├── test_common.cpp
│ │ │ ├── test_concat_layer.cpp
│ │ │ ├── test_contrastive_loss_layer.cpp
│ │ │ ├── test_convolution_layer.cpp
│ │ │ ├── test_data/
│ │ │ │ ├── generate_sample_data.py
│ │ │ │ ├── sample_data.h5
│ │ │ │ ├── sample_data_2_gzip.h5
│ │ │ │ ├── sample_data_list.txt
│ │ │ │ ├── solver_data.h5
│ │ │ │ └── solver_data_list.txt
│ │ │ ├── test_data_layer.cpp
│ │ │ ├── test_data_transformer.cpp
│ │ │ ├── test_db.cpp
│ │ │ ├── test_deconvolution_layer.cpp
│ │ │ ├── test_dummy_data_layer.cpp
│ │ │ ├── test_eltwise_layer.cpp
│ │ │ ├── test_euclidean_loss_layer.cpp
│ │ │ ├── test_filler.cpp
│ │ │ ├── test_filter_layer.cpp
│ │ │ ├── test_flatten_layer.cpp
│ │ │ ├── test_gradient_based_solver.cpp
│ │ │ ├── test_hdf5_output_layer.cpp
│ │ │ ├── test_hdf5data_layer.cpp
│ │ │ ├── test_hinge_loss_layer.cpp
│ │ │ ├── test_im2col_kernel.cu
│ │ │ ├── test_im2col_layer.cpp
│ │ │ ├── test_image_data_layer.cpp
│ │ │ ├── test_infogain_loss_layer.cpp
│ │ │ ├── test_inner_product_layer.cpp
│ │ │ ├── test_internal_thread.cpp
│ │ │ ├── test_io.cpp
│ │ │ ├── test_layer_factory.cpp
│ │ │ ├── test_lrn_layer.cpp
│ │ │ ├── test_math_functions.cpp
│ │ │ ├── test_maxpool_dropout_layers.cpp
│ │ │ ├── test_memory_data_layer.cpp
│ │ │ ├── test_multinomial_logistic_loss_layer.cpp
│ │ │ ├── test_mvn_layer.cpp
│ │ │ ├── test_net.cpp
│ │ │ ├── test_neuron_layer.cpp
│ │ │ ├── test_platform.cpp
│ │ │ ├── test_pooling_layer.cpp
│ │ │ ├── test_power_layer.cpp
│ │ │ ├── test_protobuf.cpp
│ │ │ ├── test_random_number_generator.cpp
│ │ │ ├── test_reduction_layer.cpp
│ │ │ ├── test_reshape_layer.cpp
│ │ │ ├── test_sigmoid_cross_entropy_loss_layer.cpp
│ │ │ ├── test_slice_layer.cpp
│ │ │ ├── test_softmax_layer.cpp
│ │ │ ├── test_softmax_with_loss_layer.cpp
│ │ │ ├── test_solver.cpp
│ │ │ ├── test_split_layer.cpp
│ │ │ ├── test_spp_layer.cpp
│ │ │ ├── test_stochastic_pooling.cpp
│ │ │ ├── test_syncedmem.cpp
│ │ │ ├── test_tanh_layer.cpp
│ │ │ ├── test_threshold_layer.cpp
│ │ │ ├── test_upgrade_proto.cpp
│ │ │ └── test_util_blas.cpp
│ │ └── util/
│ │ ├── benchmark.cpp
│ │ ├── blocking_queue.cpp
│ │ ├── cudnn.cpp
│ │ ├── db.cpp
│ │ ├── db_leveldb.cpp
│ │ ├── db_lmdb.cpp
│ │ ├── hdf5.cpp
│ │ ├── im2col.cpp
│ │ ├── im2col.cu
│ │ ├── insert_splits.cpp
│ │ ├── io.cpp
│ │ ├── math_functions.cpp
│ │ ├── math_functions.cu
│ │ └── upgrade_proto.cpp
│ └── gtest/
│ ├── CMakeLists.txt
│ ├── gtest-all.cpp
│ ├── gtest.h
│ └── gtest_main.cc
└── tools/
└── caffe.cpp
================================================
FILE CONTENTS
================================================
================================================
FILE: CMakeLists.txt
================================================
cmake_minimum_required(VERSION 2.8.7)
# ---[ Caffe project
project(Caffe C CXX)
# ---[ Using cmake scripts and modules
list(APPEND CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake/Modules)
include(ExternalProject)
include(cmake/Utils.cmake)
include(cmake/Targets.cmake)
include(cmake/Misc.cmake)
include(cmake/Summary.cmake)
include(cmake/ConfigGen.cmake)
# ---[ Options
caffe_option(CPU_ONLY "Build Caffe without CUDA support" OFF) # TODO: rename to USE_CUDA
caffe_option(USE_CUDNN "Build Caffe with cuDNN libary support" ON IF NOT CPU_ONLY)
caffe_option(BUILD_SHARED_LIBS "Build shared libraries" ON)
caffe_option(BUILD_python "Build Python wrapper" ON)
set(python_version "2" CACHE STRING "Specify which python version to use")
caffe_option(BUILD_matlab "Build Matlab wrapper" OFF IF UNIX OR APPLE)
caffe_option(BUILD_docs "Build documentation" ON IF UNIX OR APPLE)
caffe_option(BUILD_python_layer "Build the Caffe python layer" ON)
# ---[ Dependencies
include(cmake/Dependencies.cmake)
# ---[ Flags
if(UNIX OR APPLE)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC -Wall")
endif()
if(USE_libstdcpp)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libstdc++")
message("-- Warning: forcing libstdc++ (controlled by USE_libstdcpp option in cmake)")
endif()
add_definitions(-DGTEST_USE_OWN_TR1_TUPLE)
# ---[ Warnings
caffe_warnings_disable(CMAKE_CXX_FLAGS -Wno-sign-compare -Wno-uninitialized)
# ---[ Config generation
configure_file(cmake/Templates/caffe_config.h.in "${PROJECT_BINARY_DIR}/caffe_config.h")
# ---[ Includes
set(Caffe_INCLUDE_DIR ${PROJECT_SOURCE_DIR}/include)
include_directories(${Caffe_INCLUDE_DIR} ${PROJECT_BINARY_DIR})
include_directories(BEFORE src) # This is needed for gtest.
# ---[ Subdirectories
add_subdirectory(src/gtest)
add_subdirectory(src/caffe)
add_subdirectory(tools)
add_subdirectory(examples)
add_subdirectory(python)
add_subdirectory(matlab)
add_subdirectory(docs)
# ---[ Linter target
add_custom_target(lint COMMAND ${CMAKE_COMMAND} -P ${PROJECT_SOURCE_DIR}/cmake/lint.cmake)
# ---[ pytest target
add_custom_target(pytest COMMAND python${python_version} -m unittest discover -s caffe/test WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}/python )
add_dependencies(pytest pycaffe)
# ---[ Configuration summary
caffe_print_configuration_summary()
# ---[ Export configs generation
caffe_generate_export_configs()
================================================
FILE: CONTRIBUTING.md
================================================
# Contributing
## Issues
Specific Caffe design and development issues, bugs, and feature requests are maintained by GitHub Issues.
_Please do not post usage, installation, or modeling questions, or other requests for help to Issues._
Use the [caffe-users list](https://groups.google.com/forum/#!forum/caffe-users) instead. This helps developers maintain a clear, uncluttered, and efficient view of the state of Caffe.
When reporting a bug, it's most helpful to provide the following information, where applicable:
* What steps reproduce the bug?
* Can you reproduce the bug using the latest [master](https://github.com/BVLC/caffe/tree/master), compiled with the `DEBUG` make option?
* What hardware and operating system/distribution are you running?
* If the bug is a crash, provide the backtrace (usually printed by Caffe; always obtainable with `gdb`).
Try to give your issue a title that is succinct and specific. The devs will rename issues as needed to keep track of them.
## Pull Requests
Caffe welcomes all contributions.
See the [contributing guide](http://caffe.berkeleyvision.org/development.html) for details.
Briefly: read commit by commit, a PR should tell a clean, compelling story of _one_ improvement to Caffe. In particular:
* A PR should do one clear thing that obviously improves Caffe, and nothing more. Making many smaller PRs is better than making one large PR; review effort is superlinear in the amount of code involved.
* Similarly, each commit should be a small, atomic change representing one step in development. PRs should be made of many commits where appropriate.
* Please do rewrite PR history to be clean rather than chronological. Within-PR bugfixes, style cleanups, reversions, etc. should be squashed and should not appear in merged PR history.
* Anything nonobvious from the code should be explained in comments, commit messages, or the PR description, as appropriate.
================================================
FILE: CONTRIBUTORS.md
================================================
# Contributors
Caffe is developed by a core set of BVLC members and the open-source community.
We thank all of our [contributors](https://github.com/BVLC/caffe/graphs/contributors)!
**For the detailed history of contributions** of a given file, try
git blame file
to see line-by-line credits and
git log --follow file
to see the change log even across renames and rewrites.
Please refer to the [acknowledgements](http://caffe.berkeleyvision.org/#acknowledgements) on the Caffe site for further details.
**Copyright** is held by the original contributor according to the versioning history; see LICENSE.
================================================
FILE: INSTALL.md
================================================
# Installation
See http://caffe.berkeleyvision.org/installation.html for the latest
installation instructions.
Check the issue tracker in case you need help:
https://github.com/BVLC/caffe/issues
================================================
FILE: LICENSE
================================================
COPYRIGHT
All contributions by the University of California:
Copyright (c) 2014, 2015, The Regents of the University of California (Regents)
All rights reserved.
All other contributions:
Copyright (c) 2014, 2015, the respective contributors
All rights reserved.
Caffe uses a shared copyright model: each contributor holds copyright over
their contributions to Caffe. The project versioning records all such
contribution and copyright details. If a contributor wants to further mark
their specific copyright on a particular contribution, they should indicate
their copyright solely in the commit message of the change when it is
committed.
LICENSE
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
CONTRIBUTION AGREEMENT
By contributing to the BVLC/caffe repository through pull-request, comment,
or otherwise, the contributor releases their content to the
license and copyright terms herein.
INTEL LICENSE ON THE SURGETY PARTS
Intel Corporation holds license for the surgery related code.
================================================
FILE: Makefile
================================================
PROJECT := caffe
CONFIG_FILE := Makefile.config
# Explicitly check for the config file, otherwise make -k will proceed anyway.
ifeq ($(wildcard $(CONFIG_FILE)),)
$(error $(CONFIG_FILE) not found. See $(CONFIG_FILE).example.)
endif
include $(CONFIG_FILE)
BUILD_DIR_LINK := $(BUILD_DIR)
ifeq ($(RELEASE_BUILD_DIR),)
RELEASE_BUILD_DIR := .$(BUILD_DIR)_release
endif
ifeq ($(DEBUG_BUILD_DIR),)
DEBUG_BUILD_DIR := .$(BUILD_DIR)_debug
endif
DEBUG ?= 0
ifeq ($(DEBUG), 1)
BUILD_DIR := $(DEBUG_BUILD_DIR)
OTHER_BUILD_DIR := $(RELEASE_BUILD_DIR)
else
BUILD_DIR := $(RELEASE_BUILD_DIR)
OTHER_BUILD_DIR := $(DEBUG_BUILD_DIR)
endif
# All of the directories containing code.
SRC_DIRS := $(shell find * -type d -exec bash -c "find {} -maxdepth 1 \
\( -name '*.cpp' -o -name '*.proto' \) | grep -q ." \; -print)
# The target shared library name
LIB_BUILD_DIR := $(BUILD_DIR)/lib
STATIC_NAME := $(LIB_BUILD_DIR)/lib$(PROJECT).a
DYNAMIC_NAME := $(LIB_BUILD_DIR)/lib$(PROJECT).so
##############################
# Get all source files
##############################
# CXX_SRCS are the source files excluding the test ones.
CXX_SRCS := $(shell find src/$(PROJECT) ! -name "test_*.cpp" -name "*.cpp")
# CU_SRCS are the cuda source files
CU_SRCS := $(shell find src/$(PROJECT) ! -name "test_*.cu" -name "*.cu")
# TEST_SRCS are the test source files
TEST_MAIN_SRC := src/$(PROJECT)/test/test_caffe_main.cpp
TEST_SRCS := $(shell find src/$(PROJECT) -name "test_*.cpp")
TEST_SRCS := $(filter-out $(TEST_MAIN_SRC), $(TEST_SRCS))
TEST_CU_SRCS := $(shell find src/$(PROJECT) -name "test_*.cu")
GTEST_SRC := src/gtest/gtest-all.cpp
# TOOL_SRCS are the source files for the tool binaries
TOOL_SRCS := $(shell find tools -name "*.cpp")
# EXAMPLE_SRCS are the source files for the example binaries
EXAMPLE_SRCS := $(shell find examples -name "*.cpp")
# BUILD_INCLUDE_DIR contains any generated header files we want to include.
BUILD_INCLUDE_DIR := $(BUILD_DIR)/src
# PROTO_SRCS are the protocol buffer definitions
PROTO_SRC_DIR := src/$(PROJECT)/proto
PROTO_SRCS := $(wildcard $(PROTO_SRC_DIR)/*.proto)
# PROTO_BUILD_DIR will contain the .cc and obj files generated from
# PROTO_SRCS; PROTO_BUILD_INCLUDE_DIR will contain the .h header files
PROTO_BUILD_DIR := $(BUILD_DIR)/$(PROTO_SRC_DIR)
PROTO_BUILD_INCLUDE_DIR := $(BUILD_INCLUDE_DIR)/$(PROJECT)/proto
# NONGEN_CXX_SRCS includes all source/header files except those generated
# automatically (e.g., by proto).
NONGEN_CXX_SRCS := $(shell find \
src/$(PROJECT) \
include/$(PROJECT) \
python/$(PROJECT) \
matlab/+$(PROJECT)/private \
examples \
tools \
-name "*.cpp" -or -name "*.hpp" -or -name "*.cu" -or -name "*.cuh")
LINT_SCRIPT := scripts/cpp_lint.py
LINT_OUTPUT_DIR := $(BUILD_DIR)/.lint
LINT_EXT := lint.txt
LINT_OUTPUTS := $(addsuffix .$(LINT_EXT), $(addprefix $(LINT_OUTPUT_DIR)/, $(NONGEN_CXX_SRCS)))
EMPTY_LINT_REPORT := $(BUILD_DIR)/.$(LINT_EXT)
NONEMPTY_LINT_REPORT := $(BUILD_DIR)/$(LINT_EXT)
# PY$(PROJECT)_SRC is the python wrapper for $(PROJECT)
PY$(PROJECT)_SRC := python/$(PROJECT)/_$(PROJECT).cpp
PY$(PROJECT)_SO := python/$(PROJECT)/_$(PROJECT).so
PY$(PROJECT)_HXX := include/$(PROJECT)/python_layer.hpp
# MAT$(PROJECT)_SRC is the mex entrance point of matlab package for $(PROJECT)
MAT$(PROJECT)_SRC := matlab/+$(PROJECT)/private/$(PROJECT)_.cpp
ifneq ($(MATLAB_DIR),)
MAT_SO_EXT := $(shell $(MATLAB_DIR)/bin/mexext)
endif
MAT$(PROJECT)_SO := matlab/+$(PROJECT)/private/$(PROJECT)_.$(MAT_SO_EXT)
##############################
# Derive generated files
##############################
# The generated files for protocol buffers
PROTO_GEN_HEADER_SRCS := $(addprefix $(PROTO_BUILD_DIR)/, \
$(notdir ${PROTO_SRCS:.proto=.pb.h}))
PROTO_GEN_HEADER := $(addprefix $(PROTO_BUILD_INCLUDE_DIR)/, \
$(notdir ${PROTO_SRCS:.proto=.pb.h}))
PROTO_GEN_CC := $(addprefix $(BUILD_DIR)/, ${PROTO_SRCS:.proto=.pb.cc})
PY_PROTO_BUILD_DIR := python/$(PROJECT)/proto
PY_PROTO_INIT := python/$(PROJECT)/proto/__init__.py
PROTO_GEN_PY := $(foreach file,${PROTO_SRCS:.proto=_pb2.py}, \
$(PY_PROTO_BUILD_DIR)/$(notdir $(file)))
# The objects corresponding to the source files
# These objects will be linked into the final shared library, so we
# exclude the tool, example, and test objects.
CXX_OBJS := $(addprefix $(BUILD_DIR)/, ${CXX_SRCS:.cpp=.o})
CU_OBJS := $(addprefix $(BUILD_DIR)/cuda/, ${CU_SRCS:.cu=.o})
PROTO_OBJS := ${PROTO_GEN_CC:.cc=.o}
OBJS := $(PROTO_OBJS) $(CXX_OBJS) $(CU_OBJS)
# tool, example, and test objects
TOOL_OBJS := $(addprefix $(BUILD_DIR)/, ${TOOL_SRCS:.cpp=.o})
TOOL_BUILD_DIR := $(BUILD_DIR)/tools
TEST_CXX_BUILD_DIR := $(BUILD_DIR)/src/$(PROJECT)/test
TEST_CU_BUILD_DIR := $(BUILD_DIR)/cuda/src/$(PROJECT)/test
TEST_CXX_OBJS := $(addprefix $(BUILD_DIR)/, ${TEST_SRCS:.cpp=.o})
TEST_CU_OBJS := $(addprefix $(BUILD_DIR)/cuda/, ${TEST_CU_SRCS:.cu=.o})
TEST_OBJS := $(TEST_CXX_OBJS) $(TEST_CU_OBJS)
GTEST_OBJ := $(addprefix $(BUILD_DIR)/, ${GTEST_SRC:.cpp=.o})
EXAMPLE_OBJS := $(addprefix $(BUILD_DIR)/, ${EXAMPLE_SRCS:.cpp=.o})
# Output files for automatic dependency generation
DEPS := ${CXX_OBJS:.o=.d} ${CU_OBJS:.o=.d} ${TEST_CXX_OBJS:.o=.d} \
${TEST_CU_OBJS:.o=.d} $(BUILD_DIR)/${MAT$(PROJECT)_SO:.$(MAT_SO_EXT)=.d}
# tool, example, and test bins
TOOL_BINS := ${TOOL_OBJS:.o=.bin}
EXAMPLE_BINS := ${EXAMPLE_OBJS:.o=.bin}
# symlinks to tool bins without the ".bin" extension
TOOL_BIN_LINKS := ${TOOL_BINS:.bin=}
# Put the test binaries in build/test for convenience.
TEST_BIN_DIR := $(BUILD_DIR)/test
TEST_CU_BINS := $(addsuffix .testbin,$(addprefix $(TEST_BIN_DIR)/, \
$(foreach obj,$(TEST_CU_OBJS),$(basename $(notdir $(obj))))))
TEST_CXX_BINS := $(addsuffix .testbin,$(addprefix $(TEST_BIN_DIR)/, \
$(foreach obj,$(TEST_CXX_OBJS),$(basename $(notdir $(obj))))))
TEST_BINS := $(TEST_CXX_BINS) $(TEST_CU_BINS)
# TEST_ALL_BIN is the test binary that links caffe dynamically.
TEST_ALL_BIN := $(TEST_BIN_DIR)/test_all.testbin
##############################
# Derive compiler warning dump locations
##############################
WARNS_EXT := warnings.txt
CXX_WARNS := $(addprefix $(BUILD_DIR)/, ${CXX_SRCS:.cpp=.o.$(WARNS_EXT)})
CU_WARNS := $(addprefix $(BUILD_DIR)/cuda/, ${CU_SRCS:.cu=.o.$(WARNS_EXT)})
TOOL_WARNS := $(addprefix $(BUILD_DIR)/, ${TOOL_SRCS:.cpp=.o.$(WARNS_EXT)})
EXAMPLE_WARNS := $(addprefix $(BUILD_DIR)/, ${EXAMPLE_SRCS:.cpp=.o.$(WARNS_EXT)})
TEST_WARNS := $(addprefix $(BUILD_DIR)/, ${TEST_SRCS:.cpp=.o.$(WARNS_EXT)})
TEST_CU_WARNS := $(addprefix $(BUILD_DIR)/cuda/, ${TEST_CU_SRCS:.cu=.o.$(WARNS_EXT)})
ALL_CXX_WARNS := $(CXX_WARNS) $(TOOL_WARNS) $(EXAMPLE_WARNS) $(TEST_WARNS)
ALL_CU_WARNS := $(CU_WARNS) $(TEST_CU_WARNS)
ALL_WARNS := $(ALL_CXX_WARNS) $(ALL_CU_WARNS)
EMPTY_WARN_REPORT := $(BUILD_DIR)/.$(WARNS_EXT)
NONEMPTY_WARN_REPORT := $(BUILD_DIR)/$(WARNS_EXT)
##############################
# Derive include and lib directories
##############################
CUDA_INCLUDE_DIR := $(CUDA_DIR)/include
CUDA_LIB_DIR :=
# add <cuda>/lib64 only if it exists
ifneq ("$(wildcard $(CUDA_DIR)/lib64)","")
CUDA_LIB_DIR += $(CUDA_DIR)/lib64
endif
CUDA_LIB_DIR += $(CUDA_DIR)/lib
INCLUDE_DIRS += $(BUILD_INCLUDE_DIR) ./src ./include
ifneq ($(CPU_ONLY), 1)
INCLUDE_DIRS += $(CUDA_INCLUDE_DIR)
LIBRARY_DIRS += $(CUDA_LIB_DIR)
LIBRARIES := cudart cublas curand
endif
LIBRARIES += glog gflags protobuf leveldb snappy \
lmdb boost_system hdf5_hl hdf5 m \
opencv_core opencv_highgui opencv_imgproc
PYTHON_LIBRARIES := boost_python python2.7
WARNINGS := -Wall -Wno-sign-compare
##############################
# Set build directories
##############################
DISTRIBUTE_DIR ?= distribute
DISTRIBUTE_SUBDIRS := $(DISTRIBUTE_DIR)/bin $(DISTRIBUTE_DIR)/lib
DIST_ALIASES := dist
ifneq ($(strip $(DISTRIBUTE_DIR)),distribute)
DIST_ALIASES += distribute
endif
ALL_BUILD_DIRS := $(sort $(BUILD_DIR) $(addprefix $(BUILD_DIR)/, $(SRC_DIRS)) \
$(addprefix $(BUILD_DIR)/cuda/, $(SRC_DIRS)) \
$(LIB_BUILD_DIR) $(TEST_BIN_DIR) $(PY_PROTO_BUILD_DIR) $(LINT_OUTPUT_DIR) \
$(DISTRIBUTE_SUBDIRS) $(PROTO_BUILD_INCLUDE_DIR))
##############################
# Set directory for Doxygen-generated documentation
##############################
DOXYGEN_CONFIG_FILE ?= ./.Doxyfile
# should be the same as OUTPUT_DIRECTORY in the .Doxyfile
DOXYGEN_OUTPUT_DIR ?= ./doxygen
DOXYGEN_COMMAND ?= doxygen
# All the files that might have Doxygen documentation.
DOXYGEN_SOURCES := $(shell find \
src/$(PROJECT) \
include/$(PROJECT) \
python/ \
matlab/ \
examples \
tools \
-name "*.cpp" -or -name "*.hpp" -or -name "*.cu" -or -name "*.cuh" -or \
-name "*.py" -or -name "*.m")
DOXYGEN_SOURCES += $(DOXYGEN_CONFIG_FILE)
##############################
# Configure build
##############################
# Determine platform
UNAME := $(shell uname -s)
ifeq ($(UNAME), Linux)
LINUX := 1
else ifeq ($(UNAME), Darwin)
OSX := 1
endif
# Linux
ifeq ($(LINUX), 1)
CXX ?= /usr/bin/g++
GCCVERSION := $(shell $(CXX) -dumpversion | cut -f1,2 -d.)
# older versions of gcc are too dumb to build boost with -Wuninitalized
ifeq ($(shell echo | awk '{exit $(GCCVERSION) < 4.6;}'), 1)
WARNINGS += -Wno-uninitialized
endif
# boost::thread is reasonably called boost_thread (compare OS X)
# We will also explicitly add stdc++ to the link target.
LIBRARIES += boost_thread stdc++
endif
# OS X:
# clang++ instead of g++
# libstdc++ for NVCC compatibility on OS X >= 10.9 with CUDA < 7.0
ifeq ($(OSX), 1)
CXX := /usr/bin/clang++
ifneq ($(CPU_ONLY), 1)
CUDA_VERSION := $(shell $(CUDA_DIR)/bin/nvcc -V | grep -o 'release \d' | grep -o '\d')
ifeq ($(shell echo | awk '{exit $(CUDA_VERSION) < 7.0;}'), 1)
CXXFLAGS += -stdlib=libstdc++
LINKFLAGS += -stdlib=libstdc++
endif
# clang throws this warning for cuda headers
WARNINGS += -Wno-unneeded-internal-declaration
endif
# gtest needs to use its own tuple to not conflict with clang
COMMON_FLAGS += -DGTEST_USE_OWN_TR1_TUPLE=1
# boost::thread is called boost_thread-mt to mark multithreading on OS X
LIBRARIES += boost_thread-mt
# we need to explicitly ask for the rpath to be obeyed
DYNAMIC_FLAGS := -install_name @rpath/libcaffe.so
ORIGIN := @loader_path
else
ORIGIN := \$$ORIGIN
endif
# Custom compiler
ifdef CUSTOM_CXX
CXX := $(CUSTOM_CXX)
endif
# Static linking
ifneq (,$(findstring clang++,$(CXX)))
STATIC_LINK_COMMAND := -Wl,-force_load $(STATIC_NAME)
else ifneq (,$(findstring g++,$(CXX)))
STATIC_LINK_COMMAND := -Wl,--whole-archive $(STATIC_NAME) -Wl,--no-whole-archive
else
# The following line must not be indented with a tab, since we are not inside a target
$(error Cannot static link with the $(CXX) compiler)
endif
# Debugging
ifeq ($(DEBUG), 1)
COMMON_FLAGS += -DDEBUG -g -O0
NVCCFLAGS += -G
else
COMMON_FLAGS += -DNDEBUG -O2
endif
# cuDNN acceleration configuration.
ifeq ($(USE_CUDNN), 1)
LIBRARIES += cudnn
COMMON_FLAGS += -DUSE_CUDNN
endif
# CPU-only configuration
ifeq ($(CPU_ONLY), 1)
OBJS := $(PROTO_OBJS) $(CXX_OBJS)
TEST_OBJS := $(TEST_CXX_OBJS)
TEST_BINS := $(TEST_CXX_BINS)
ALL_WARNS := $(ALL_CXX_WARNS)
TEST_FILTER := --gtest_filter="-*GPU*"
COMMON_FLAGS += -DCPU_ONLY
endif
# Python layer support
ifeq ($(WITH_PYTHON_LAYER), 1)
COMMON_FLAGS += -DWITH_PYTHON_LAYER
LIBRARIES += $(PYTHON_LIBRARIES)
endif
# BLAS configuration (default = ATLAS)
BLAS ?= atlas
ifeq ($(BLAS), mkl)
# MKL
LIBRARIES += mkl_rt
COMMON_FLAGS += -DUSE_MKL
MKL_DIR ?= /opt/intel/mkl
BLAS_INCLUDE ?= $(MKL_DIR)/include
BLAS_LIB ?= $(MKL_DIR)/lib $(MKL_DIR)/lib/intel64
else ifeq ($(BLAS), open)
# OpenBLAS
LIBRARIES += openblas
else
# ATLAS
ifeq ($(LINUX), 1)
ifeq ($(BLAS), atlas)
# Linux simply has cblas and atlas
LIBRARIES += cblas atlas
endif
else ifeq ($(OSX), 1)
# OS X packages atlas as the vecLib framework
LIBRARIES += cblas
# 10.10 has accelerate while 10.9 has veclib
XCODE_CLT_VER := $(shell pkgutil --pkg-info=com.apple.pkg.CLTools_Executables | grep -o 'version: 6')
ifneq (,$(findstring version: 6,$(XCODE_CLT_VER)))
BLAS_INCLUDE ?= /System/Library/Frameworks/Accelerate.framework/Versions/Current/Frameworks/vecLib.framework/Headers/
LDFLAGS += -framework Accelerate
else
BLAS_INCLUDE ?= /System/Library/Frameworks/vecLib.framework/Versions/Current/Headers/
LDFLAGS += -framework vecLib
endif
endif
endif
INCLUDE_DIRS += $(BLAS_INCLUDE)
LIBRARY_DIRS += $(BLAS_LIB)
LIBRARY_DIRS += $(LIB_BUILD_DIR)
# Automatic dependency generation (nvcc is handled separately)
CXXFLAGS += -MMD -MP
# Complete build flags.
COMMON_FLAGS += $(foreach includedir,$(INCLUDE_DIRS),-I$(includedir))
CXXFLAGS += -pthread -fPIC $(COMMON_FLAGS) $(WARNINGS)
NVCCFLAGS += -ccbin=$(CXX) -Xcompiler -fPIC $(COMMON_FLAGS)
# mex may invoke an older gcc that is too liberal with -Wuninitalized
MATLAB_CXXFLAGS := $(CXXFLAGS) -Wno-uninitialized
LINKFLAGS += -pthread -fPIC $(COMMON_FLAGS) $(WARNINGS)
USE_PKG_CONFIG ?= 0
ifeq ($(USE_PKG_CONFIG), 1)
PKG_CONFIG := $(shell pkg-config opencv --libs)
else
PKG_CONFIG :=
endif
LDFLAGS += $(foreach librarydir,$(LIBRARY_DIRS),-L$(librarydir)) $(PKG_CONFIG) \
$(foreach library,$(LIBRARIES),-l$(library))
PYTHON_LDFLAGS := $(LDFLAGS) $(foreach library,$(PYTHON_LIBRARIES),-l$(library))
# 'superclean' target recursively* deletes all files ending with an extension
# in $(SUPERCLEAN_EXTS) below. This may be useful if you've built older
# versions of Caffe that do not place all generated files in a location known
# to the 'clean' target.
#
# 'supercleanlist' will list the files to be deleted by make superclean.
#
# * Recursive with the exception that symbolic links are never followed, per the
# default behavior of 'find'.
SUPERCLEAN_EXTS := .so .a .o .bin .testbin .pb.cc .pb.h _pb2.py .cuo
# Set the sub-targets of the 'everything' target.
EVERYTHING_TARGETS := all py$(PROJECT) test warn lint
# Only build matcaffe as part of "everything" if MATLAB_DIR is specified.
ifneq ($(MATLAB_DIR),)
EVERYTHING_TARGETS += mat$(PROJECT)
endif
##############################
# Define build targets
##############################
.PHONY: all lib test clean docs linecount lint lintclean tools examples $(DIST_ALIASES) \
py mat py$(PROJECT) mat$(PROJECT) proto runtest \
superclean supercleanlist supercleanfiles warn everything
all: lib tools examples
lib: $(STATIC_NAME) $(DYNAMIC_NAME)
everything: $(EVERYTHING_TARGETS)
linecount:
cloc --read-lang-def=$(PROJECT).cloc \
src/$(PROJECT) include/$(PROJECT) tools examples \
python matlab
lint: $(EMPTY_LINT_REPORT)
lintclean:
@ $(RM) -r $(LINT_OUTPUT_DIR) $(EMPTY_LINT_REPORT) $(NONEMPTY_LINT_REPORT)
docs: $(DOXYGEN_OUTPUT_DIR)
@ cd ./docs ; ln -sfn ../$(DOXYGEN_OUTPUT_DIR)/html doxygen
$(DOXYGEN_OUTPUT_DIR): $(DOXYGEN_CONFIG_FILE) $(DOXYGEN_SOURCES)
$(DOXYGEN_COMMAND) $(DOXYGEN_CONFIG_FILE)
$(EMPTY_LINT_REPORT): $(LINT_OUTPUTS) | $(BUILD_DIR)
@ cat $(LINT_OUTPUTS) > $@
@ if [ -s "$@" ]; then \
cat $@; \
mv $@ $(NONEMPTY_LINT_REPORT); \
echo "Found one or more lint errors."; \
exit 1; \
fi; \
$(RM) $(NONEMPTY_LINT_REPORT); \
echo "No lint errors!";
$(LINT_OUTPUTS): $(LINT_OUTPUT_DIR)/%.lint.txt : % $(LINT_SCRIPT) | $(LINT_OUTPUT_DIR)
@ mkdir -p $(dir $@)
@ python $(LINT_SCRIPT) $< 2>&1 \
| grep -v "^Done processing " \
| grep -v "^Total errors found: 0" \
> $@ \
|| true
test: $(TEST_ALL_BIN) $(TEST_ALL_DYNLINK_BIN) $(TEST_BINS)
tools: $(TOOL_BINS) $(TOOL_BIN_LINKS)
examples: $(EXAMPLE_BINS)
py$(PROJECT): py
py: $(PY$(PROJECT)_SO) $(PROTO_GEN_PY)
$(PY$(PROJECT)_SO): $(PY$(PROJECT)_SRC) $(PY$(PROJECT)_HXX) | $(DYNAMIC_NAME)
@ echo CXX/LD -o $@ $<
$(Q)$(CXX) -shared -o $@ $(PY$(PROJECT)_SRC) \
-o $@ $(LINKFLAGS) -l$(PROJECT) $(PYTHON_LDFLAGS) \
-Wl,-rpath,$(ORIGIN)/../../build/lib
mat$(PROJECT): mat
mat: $(MAT$(PROJECT)_SO)
$(MAT$(PROJECT)_SO): $(MAT$(PROJECT)_SRC) $(STATIC_NAME)
@ if [ -z "$(MATLAB_DIR)" ]; then \
echo "MATLAB_DIR must be specified in $(CONFIG_FILE)" \
"to build mat$(PROJECT)."; \
exit 1; \
fi
@ echo MEX $<
$(Q)$(MATLAB_DIR)/bin/mex $(MAT$(PROJECT)_SRC) \
CXX="$(CXX)" \
CXXFLAGS="\$$CXXFLAGS $(MATLAB_CXXFLAGS)" \
CXXLIBS="\$$CXXLIBS $(STATIC_LINK_COMMAND) $(LDFLAGS)" -output $@
@ if [ -f "$(PROJECT)_.d" ]; then \
mv -f $(PROJECT)_.d $(BUILD_DIR)/${MAT$(PROJECT)_SO:.$(MAT_SO_EXT)=.d}; \
fi
runtest: $(TEST_ALL_BIN)
$(TOOL_BUILD_DIR)/caffe
$(TEST_ALL_BIN) $(TEST_GPUID) --gtest_shuffle $(TEST_FILTER)
pytest: py
cd python; python -m unittest discover -s caffe/test
mattest: mat
cd matlab; $(MATLAB_DIR)/bin/matlab -nodisplay -r 'caffe.run_tests(), exit()'
warn: $(EMPTY_WARN_REPORT)
$(EMPTY_WARN_REPORT): $(ALL_WARNS) | $(BUILD_DIR)
@ cat $(ALL_WARNS) > $@
@ if [ -s "$@" ]; then \
cat $@; \
mv $@ $(NONEMPTY_WARN_REPORT); \
echo "Compiler produced one or more warnings."; \
exit 1; \
fi; \
$(RM) $(NONEMPTY_WARN_REPORT); \
echo "No compiler warnings!";
$(ALL_WARNS): %.o.$(WARNS_EXT) : %.o
$(BUILD_DIR_LINK): $(BUILD_DIR)/.linked
# Create a target ".linked" in this BUILD_DIR to tell Make that the "build" link
# is currently correct, then delete the one in the OTHER_BUILD_DIR in case it
# exists and $(DEBUG) is toggled later.
$(BUILD_DIR)/.linked:
@ mkdir -p $(BUILD_DIR)
@ $(RM) $(OTHER_BUILD_DIR)/.linked
@ $(RM) -r $(BUILD_DIR_LINK)
@ ln -s $(BUILD_DIR) $(BUILD_DIR_LINK)
@ touch $@
$(ALL_BUILD_DIRS): | $(BUILD_DIR_LINK)
@ mkdir -p $@
$(DYNAMIC_NAME): $(OBJS) | $(LIB_BUILD_DIR)
@ echo LD -o $@
$(Q)$(CXX) -shared -o $@ $(OBJS) $(LINKFLAGS) $(LDFLAGS) $(DYNAMIC_FLAGS)
$(STATIC_NAME): $(OBJS) | $(LIB_BUILD_DIR)
@ echo AR -o $@
$(Q)ar rcs $@ $(OBJS)
$(BUILD_DIR)/%.o: %.cpp | $(ALL_BUILD_DIRS)
@ echo CXX $<
$(Q)$(CXX) $< $(CXXFLAGS) -c -o $@ 2> $@.$(WARNS_EXT) \
|| (cat $@.$(WARNS_EXT); exit 1)
@ cat $@.$(WARNS_EXT)
$(PROTO_BUILD_DIR)/%.pb.o: $(PROTO_BUILD_DIR)/%.pb.cc $(PROTO_GEN_HEADER) \
| $(PROTO_BUILD_DIR)
@ echo CXX $<
$(Q)$(CXX) $< $(CXXFLAGS) -c -o $@ 2> $@.$(WARNS_EXT) \
|| (cat $@.$(WARNS_EXT); exit 1)
@ cat $@.$(WARNS_EXT)
$(BUILD_DIR)/cuda/%.o: %.cu | $(ALL_BUILD_DIRS)
@ echo NVCC $<
$(Q)$(CUDA_DIR)/bin/nvcc $(NVCCFLAGS) $(CUDA_ARCH) -M $< -o ${@:.o=.d} \
-odir $(@D)
$(Q)$(CUDA_DIR)/bin/nvcc $(NVCCFLAGS) $(CUDA_ARCH) -c $< -o $@ 2> $@.$(WARNS_EXT) \
|| (cat $@.$(WARNS_EXT); exit 1)
@ cat $@.$(WARNS_EXT)
$(TEST_ALL_BIN): $(TEST_MAIN_SRC) $(TEST_OBJS) $(GTEST_OBJ) \
| $(DYNAMIC_NAME) $(TEST_BIN_DIR)
@ echo CXX/LD -o $@ $<
$(Q)$(CXX) $(TEST_MAIN_SRC) $(TEST_OBJS) $(GTEST_OBJ) \
-o $@ $(LINKFLAGS) $(LDFLAGS) -l$(PROJECT) -Wl,-rpath,$(ORIGIN)/../lib
$(TEST_CU_BINS): $(TEST_BIN_DIR)/%.testbin: $(TEST_CU_BUILD_DIR)/%.o \
$(GTEST_OBJ) | $(DYNAMIC_NAME) $(TEST_BIN_DIR)
@ echo LD $<
$(Q)$(CXX) $(TEST_MAIN_SRC) $< $(GTEST_OBJ) \
-o $@ $(LINKFLAGS) $(LDFLAGS) -l$(PROJECT) -Wl,-rpath,$(ORIGIN)/../lib
$(TEST_CXX_BINS): $(TEST_BIN_DIR)/%.testbin: $(TEST_CXX_BUILD_DIR)/%.o \
$(GTEST_OBJ) | $(DYNAMIC_NAME) $(TEST_BIN_DIR)
@ echo LD $<
$(Q)$(CXX) $(TEST_MAIN_SRC) $< $(GTEST_OBJ) \
-o $@ $(LINKFLAGS) $(LDFLAGS) -l$(PROJECT) -Wl,-rpath,$(ORIGIN)/../lib
# Target for extension-less symlinks to tool binaries with extension '*.bin'.
$(TOOL_BUILD_DIR)/%: $(TOOL_BUILD_DIR)/%.bin | $(TOOL_BUILD_DIR)
@ $(RM) $@
@ ln -s $(abspath $<) $@
$(TOOL_BINS): %.bin : %.o | $(DYNAMIC_NAME)
@ echo CXX/LD -o $@
$(Q)$(CXX) $< -o $@ $(LINKFLAGS) -l$(PROJECT) $(LDFLAGS) \
-Wl,-rpath,$(ORIGIN)/../lib
$(EXAMPLE_BINS): %.bin : %.o | $(DYNAMIC_NAME)
@ echo CXX/LD -o $@
$(Q)$(CXX) $< -o $@ $(LINKFLAGS) -l$(PROJECT) $(LDFLAGS) \
-Wl,-rpath,$(ORIGIN)/../../lib
proto: $(PROTO_GEN_CC) $(PROTO_GEN_HEADER)
$(PROTO_BUILD_DIR)/%.pb.cc $(PROTO_BUILD_DIR)/%.pb.h : \
$(PROTO_SRC_DIR)/%.proto | $(PROTO_BUILD_DIR)
@ echo PROTOC $<
$(Q)protoc --proto_path=$(PROTO_SRC_DIR) --cpp_out=$(PROTO_BUILD_DIR) $<
$(PY_PROTO_BUILD_DIR)/%_pb2.py : $(PROTO_SRC_DIR)/%.proto \
$(PY_PROTO_INIT) | $(PY_PROTO_BUILD_DIR)
@ echo PROTOC \(python\) $<
$(Q)protoc --proto_path=$(PROTO_SRC_DIR) --python_out=$(PY_PROTO_BUILD_DIR) $<
$(PY_PROTO_INIT): | $(PY_PROTO_BUILD_DIR)
touch $(PY_PROTO_INIT)
clean:
@- $(RM) -rf $(ALL_BUILD_DIRS)
@- $(RM) -rf $(OTHER_BUILD_DIR)
@- $(RM) -rf $(BUILD_DIR_LINK)
@- $(RM) -rf $(DISTRIBUTE_DIR)
@- $(RM) $(PY$(PROJECT)_SO)
@- $(RM) $(MAT$(PROJECT)_SO)
supercleanfiles:
$(eval SUPERCLEAN_FILES := $(strip \
$(foreach ext,$(SUPERCLEAN_EXTS), $(shell find . -name '*$(ext)' \
-not -path './data/*'))))
supercleanlist: supercleanfiles
@ \
if [ -z "$(SUPERCLEAN_FILES)" ]; then \
echo "No generated files found."; \
else \
echo $(SUPERCLEAN_FILES) | tr ' ' '\n'; \
fi
superclean: clean supercleanfiles
@ \
if [ -z "$(SUPERCLEAN_FILES)" ]; then \
echo "No generated files found."; \
else \
echo "Deleting the following generated files:"; \
echo $(SUPERCLEAN_FILES) | tr ' ' '\n'; \
$(RM) $(SUPERCLEAN_FILES); \
fi
$(DIST_ALIASES): $(DISTRIBUTE_DIR)
$(DISTRIBUTE_DIR): all py | $(DISTRIBUTE_SUBDIRS)
# add include
cp -r include $(DISTRIBUTE_DIR)/
mkdir -p $(DISTRIBUTE_DIR)/include/caffe/proto
cp $(PROTO_GEN_HEADER_SRCS) $(DISTRIBUTE_DIR)/include/caffe/proto
# add tool and example binaries
cp $(TOOL_BINS) $(DISTRIBUTE_DIR)/bin
cp $(EXAMPLE_BINS) $(DISTRIBUTE_DIR)/bin
# add libraries
cp $(STATIC_NAME) $(DISTRIBUTE_DIR)/lib
cp $(DYNAMIC_NAME) $(DISTRIBUTE_DIR)/lib
# add python - it's not the standard way, indeed...
cp -r python $(DISTRIBUTE_DIR)/python
-include $(DEPS)
================================================
FILE: Makefile.config.example
================================================
## Refer to http://caffe.berkeleyvision.org/installation.html
# Contributions simplifying and improving our build system are welcome!
# cuDNN acceleration switch (uncomment to build with cuDNN).
# USE_CUDNN := 1
# CPU-only switch (uncomment to build without GPU support).
# CPU_ONLY := 1
# To customize your choice of compiler, uncomment and set the following.
# N.B. the default for Linux is g++ and the default for OSX is clang++
# CUSTOM_CXX := g++
# CUDA directory contains bin/ and lib/ directories that we need.
CUDA_DIR := /usr/local/cuda
# On Ubuntu 14.04, if cuda tools are installed via
# "sudo apt-get install nvidia-cuda-toolkit" then use this instead:
# CUDA_DIR := /usr
# CUDA architecture setting: going with all of them.
# For CUDA < 6.0, comment the *_50 lines for compatibility.
CUDA_ARCH := -gencode arch=compute_20,code=sm_20 \
-gencode arch=compute_20,code=sm_21 \
-gencode arch=compute_30,code=sm_30 \
-gencode arch=compute_35,code=sm_35 \
-gencode arch=compute_50,code=sm_50 \
-gencode arch=compute_50,code=compute_50
# BLAS choice:
# atlas for ATLAS (default)
# mkl for MKL
# open for OpenBlas
BLAS := atlas
# Custom (MKL/ATLAS/OpenBLAS) include and lib directories.
# Leave commented to accept the defaults for your choice of BLAS
# (which should work)!
# BLAS_INCLUDE := /path/to/your/blas
# BLAS_LIB := /path/to/your/blas
# Homebrew puts openblas in a directory that is not on the standard search path
# BLAS_INCLUDE := $(shell brew --prefix openblas)/include
# BLAS_LIB := $(shell brew --prefix openblas)/lib
# This is required only if you will compile the matlab interface.
# MATLAB directory should contain the mex binary in /bin.
# MATLAB_DIR := /usr/local
# MATLAB_DIR := /Applications/MATLAB_R2012b.app
# NOTE: this is required only if you will compile the python interface.
# We need to be able to find Python.h and numpy/arrayobject.h.
PYTHON_INCLUDE := /usr/include/python2.7 \
/usr/lib/python2.7/dist-packages/numpy/core/include
# Anaconda Python distribution is quite popular. Include path:
# Verify anaconda location, sometimes it's in root.
# ANACONDA_HOME := $(HOME)/anaconda
# PYTHON_INCLUDE := $(ANACONDA_HOME)/include \
# $(ANACONDA_HOME)/include/python2.7 \
# $(ANACONDA_HOME)/lib/python2.7/site-packages/numpy/core/include \
# We need to be able to find libpythonX.X.so or .dylib.
PYTHON_LIB := /usr/lib
# PYTHON_LIB := $(ANACONDA_HOME)/lib
# Homebrew installs numpy in a non standard path (keg only)
# PYTHON_INCLUDE += $(dir $(shell python -c 'import numpy.core; print(numpy.core.__file__)'))/include
# PYTHON_LIB += $(shell brew --prefix numpy)/lib
# Uncomment to support layers written in Python (will link against Python libs)
# WITH_PYTHON_LAYER := 1
# Whatever else you find you need goes here.
INCLUDE_DIRS := $(PYTHON_INCLUDE) /usr/local/include
LIBRARY_DIRS := $(PYTHON_LIB) /usr/local/lib /usr/lib
# If Homebrew is installed at a non standard location (for example your home directory) and you use it for general dependencies
# INCLUDE_DIRS += $(shell brew --prefix)/include
# LIBRARY_DIRS += $(shell brew --prefix)/lib
# Uncomment to use `pkg-config` to specify OpenCV library paths.
# (Usually not necessary -- OpenCV libraries are normally installed in one of the above $LIBRARY_DIRS.)
# USE_PKG_CONFIG := 1
BUILD_DIR := build
DISTRIBUTE_DIR := distribute
# Uncomment for debugging. Does not work on OSX due to https://github.com/BVLC/caffe/issues/171
# DEBUG := 1
# The ID of the GPU that 'make runtest' will use to run unit tests.
TEST_GPUID := 0
# enable pretty build (comment to see full commands)
Q ?= @
================================================
FILE: README.md
================================================
# Dynamic network surgery
Dynamic network surgery is a very effective method for DNN pruning. To better use it with python and matlab, you may also need a [classic version](https://github.com/BVLC/caffe/tree/aa2a6f55b9e50b29d607aaee0fae19bd085d6565) of the [Caffe framework](http://caffe.berkeleyvision.org).
For the convolutional and fully-connected layers to be pruned, change their layer types to "CConvolution" and "CInnerProduct" respectively. Then, pass "cconvolution_param" and "cinner_product_param" messages to these modified layers for better pruning performance.
# Example for usage
Below is an example for pruning the "ip1" layer in LeNet5:
layer {
name: "ip1"
type: "CInnerProduct"
bottom: "pool2"
top: "ip1"
param {
lr_mult: 1
}
param {
lr_mult: 2
}
inner_product_param {
num_output: 500
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
cinner_product_param {
gamma: 0.0001
power: 1
c_rate: 4
iter_stop: 14000
weight_mask_filler {
type: "constant"
value: 1
}
bias_mask_filler {
type: "constant"
value: 1
}
}
}
# Citation
Please cite our work in your publications if it helps your research:
@inproceedings{guo2016dynamic,
title = {Dynamic Network Surgery for Efficient DNNs},
author = {Guo, Yiwen and Yao, Anbang and Chen, Yurong},
booktitle = {Advances in neural information processing systems (NIPS)},
year = {2016}
}
and do not forget about Caffe:
@article{jia2014caffe,
Author = {Jia, Yangqing and Shelhamer, Evan and Donahue, Jeff and Karayev, Sergey and Long, Jonathan and Girshick, Ross and Guadarrama, Sergio and Darrell, Trevor},
Journal = {arXiv preprint arXiv:1408.5093},
Title = {Caffe: Convolutional Architecture for Fast Feature Embedding},
Year = {2014}
}
Enjoy your own surgeries!
================================================
FILE: caffe.cloc
================================================
Bourne Shell
filter remove_matches ^\s*#
filter remove_inline #.*$
extension sh
script_exe sh
C
filter remove_matches ^\s*//
filter call_regexp_common C
filter remove_inline //.*$
extension c
extension ec
extension pgc
C++
filter remove_matches ^\s*//
filter remove_inline //.*$
filter call_regexp_common C
extension C
extension cc
extension cpp
extension cxx
extension pcc
C/C++ Header
filter remove_matches ^\s*//
filter call_regexp_common C
filter remove_inline //.*$
extension H
extension h
extension hh
extension hpp
CUDA
filter remove_matches ^\s*//
filter remove_inline //.*$
filter call_regexp_common C
extension cu
Python
filter remove_matches ^\s*#
filter docstring_to_C
filter call_regexp_common C
filter remove_inline #.*$
extension py
make
filter remove_matches ^\s*#
filter remove_inline #.*$
extension Gnumakefile
extension Makefile
extension am
extension gnumakefile
extension makefile
filename Gnumakefile
filename Makefile
filename gnumakefile
filename makefile
script_exe make
================================================
FILE: cmake/ConfigGen.cmake
================================================
################################################################################################
# Helper function to fetch caffe includes which will be passed to dependent projects
# Usage:
# caffe_get_current_includes(<includes_list_variable>)
function(caffe_get_current_includes includes_variable)
get_property(current_includes DIRECTORY PROPERTY INCLUDE_DIRECTORIES)
caffe_convert_absolute_paths(current_includes)
# remove at most one ${PROJECT_BINARY_DIR} include added for caffe_config.h
list(FIND current_includes ${PROJECT_BINARY_DIR} __index)
list(REMOVE_AT current_includes ${__index})
# removing numpy includes (since not required for client libs)
set(__toremove "")
foreach(__i ${current_includes})
if(${__i} MATCHES "python")
list(APPEND __toremove ${__i})
endif()
endforeach()
if(__toremove)
list(REMOVE_ITEM current_includes ${__toremove})
endif()
caffe_list_unique(current_includes)
set(${includes_variable} ${current_includes} PARENT_SCOPE)
endfunction()
################################################################################################
# Helper function to get all list items that begin with given prefix
# Usage:
# caffe_get_items_with_prefix(<prefix> <list_variable> <output_variable>)
function(caffe_get_items_with_prefix prefix list_variable output_variable)
set(__result "")
foreach(__e ${${list_variable}})
if(__e MATCHES "^${prefix}.*")
list(APPEND __result ${__e})
endif()
endforeach()
set(${output_variable} ${__result} PARENT_SCOPE)
endfunction()
################################################################################################
# Function for generation Caffe build- and install- tree export config files
# Usage:
# caffe_generate_export_configs()
function(caffe_generate_export_configs)
set(install_cmake_suffix "share/Caffe")
# ---[ Configure build-tree CaffeConfig.cmake file ]---
caffe_get_current_includes(Caffe_INCLUDE_DIRS)
set(Caffe_DEFINITIONS "")
if(NOT HAVE_CUDA)
set(HAVE_CUDA FALSE)
list(APPEND Caffe_DEFINITIONS -DCPU_ONLY)
endif()
if(NOT HAVE_CUDNN)
set(HAVE_CUDNN FALSE)
else()
list(APPEND DEFINITIONS -DUSE_CUDNN)
endif()
if(BLAS STREQUAL "MKL" OR BLAS STREQUAL "mkl")
list(APPEND Caffe_DEFINITIONS -DUSE_MKL)
endif()
configure_file("cmake/Templates/CaffeConfig.cmake.in" "${PROJECT_BINARY_DIR}/CaffeConfig.cmake" @ONLY)
# Add targets to the build-tree export set
export(TARGETS caffe proto FILE "${PROJECT_BINARY_DIR}/CaffeTargets.cmake")
export(PACKAGE Caffe)
# ---[ Configure install-tree CaffeConfig.cmake file ]---
# remove source and build dir includes
caffe_get_items_with_prefix(${PROJECT_SOURCE_DIR} Caffe_INCLUDE_DIRS __insource)
caffe_get_items_with_prefix(${PROJECT_BINARY_DIR} Caffe_INCLUDE_DIRS __inbinary)
list(REMOVE_ITEM Caffe_INCLUDE_DIRS ${__insource} ${__inbinary})
# add `install` include folder
set(lines
"get_filename_component(__caffe_include \"\${Caffe_CMAKE_DIR}/../../include\" ABSOLUTE)\n"
"list(APPEND Caffe_INCLUDE_DIRS \${__caffe_include})\n"
"unset(__caffe_include)\n")
string(REPLACE ";" "" Caffe_INSTALL_INCLUDE_DIR_APPEND_COMMAND ${lines})
configure_file("cmake/Templates/CaffeConfig.cmake.in" "${PROJECT_BINARY_DIR}/cmake/CaffeConfig.cmake" @ONLY)
# Install the CaffeConfig.cmake and export set to use with install-tree
install(FILES "${PROJECT_BINARY_DIR}/cmake/CaffeConfig.cmake" DESTINATION ${install_cmake_suffix})
install(EXPORT CaffeTargets DESTINATION ${install_cmake_suffix})
# ---[ Configure and install version file ]---
# TODO: Lines below are commented because Caffe does't declare its version in headers.
# When the declarations are added, modify `caffe_extract_caffe_version()` macro and uncomment
# configure_file(cmake/Templates/CaffeConfigVersion.cmake.in "${PROJECT_BINARY_DIR}/CaffeConfigVersion.cmake" @ONLY)
# install(FILES "${PROJECT_BINARY_DIR}/CaffeConfigVersion.cmake" DESTINATION ${install_cmake_suffix})
endfunction()
================================================
FILE: cmake/Cuda.cmake
================================================
if(CPU_ONLY)
return()
endif()
# Known NVIDIA GPU achitectures Caffe can be compiled for.
# This list will be used for CUDA_ARCH_NAME = All option
set(Caffe_known_gpu_archs "20 21(20) 30 35 50")
################################################################################################
# A function for automatic detection of GPUs installed (if autodetection is enabled)
# Usage:
# caffe_detect_installed_gpus(out_variable)
function(caffe_detect_installed_gpus out_variable)
if(NOT CUDA_gpu_detect_output)
set(__cufile ${PROJECT_BINARY_DIR}/detect_cuda_archs.cu)
file(WRITE ${__cufile} ""
"#include <cstdio>\n"
"int main()\n"
"{\n"
" int count = 0;\n"
" if (cudaSuccess != cudaGetDeviceCount(&count)) return -1;\n"
" if (count == 0) return -1;\n"
" for (int device = 0; device < count; ++device)\n"
" {\n"
" cudaDeviceProp prop;\n"
" if (cudaSuccess == cudaGetDeviceProperties(&prop, device))\n"
" std::printf(\"%d.%d \", prop.major, prop.minor);\n"
" }\n"
" return 0;\n"
"}\n")
execute_process(COMMAND "${CUDA_NVCC_EXECUTABLE}" "--run" "${__cufile}"
WORKING_DIRECTORY "${PROJECT_BINARY_DIR}/CMakeFiles/"
RESULT_VARIABLE __nvcc_res OUTPUT_VARIABLE __nvcc_out
ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)
if(__nvcc_res EQUAL 0)
string(REPLACE "2.1" "2.1(2.0)" __nvcc_out "${__nvcc_out}")
set(CUDA_gpu_detect_output ${__nvcc_out} CACHE INTERNAL "Returned GPU architetures from caffe_detect_gpus tool" FORCE)
endif()
endif()
if(NOT CUDA_gpu_detect_output)
message(STATUS "Automatic GPU detection failed. Building for all known architectures.")
set(${out_variable} ${Caffe_known_gpu_archs} PARENT_SCOPE)
else()
set(${out_variable} ${CUDA_gpu_detect_output} PARENT_SCOPE)
endif()
endfunction()
################################################################################################
# Function for selecting GPU arch flags for nvcc based on CUDA_ARCH_NAME
# Usage:
# caffe_select_nvcc_arch_flags(out_variable)
function(caffe_select_nvcc_arch_flags out_variable)
# List of arch names
set(__archs_names "Fermi" "Kepler" "Maxwell" "All" "Manual")
set(__archs_name_default "All")
if(NOT CMAKE_CROSSCOMPILING)
list(APPEND __archs_names "Auto")
set(__archs_name_default "Auto")
endif()
# set CUDA_ARCH_NAME strings (so it will be seen as dropbox in CMake-Gui)
set(CUDA_ARCH_NAME ${__archs_name_default} CACHE STRING "Select target NVIDIA GPU achitecture.")
set_property( CACHE CUDA_ARCH_NAME PROPERTY STRINGS "" ${__archs_names} )
mark_as_advanced(CUDA_ARCH_NAME)
# verify CUDA_ARCH_NAME value
if(NOT ";${__archs_names};" MATCHES ";${CUDA_ARCH_NAME};")
string(REPLACE ";" ", " __archs_names "${__archs_names}")
message(FATAL_ERROR "Only ${__archs_names} architeture names are supported.")
endif()
if(${CUDA_ARCH_NAME} STREQUAL "Manual")
set(CUDA_ARCH_BIN ${Caffe_known_gpu_archs} CACHE STRING "Specify 'real' GPU architectures to build binaries for, BIN(PTX) format is supported")
set(CUDA_ARCH_PTX "50" CACHE STRING "Specify 'virtual' PTX architectures to build PTX intermediate code for")
mark_as_advanced(CUDA_ARCH_BIN CUDA_ARCH_PTX)
else()
unset(CUDA_ARCH_BIN CACHE)
unset(CUDA_ARCH_PTX CACHE)
endif()
if(${CUDA_ARCH_NAME} STREQUAL "Fermi")
set(__cuda_arch_bin "20 21(20)")
elseif(${CUDA_ARCH_NAME} STREQUAL "Kepler")
set(__cuda_arch_bin "30 35")
elseif(${CUDA_ARCH_NAME} STREQUAL "Maxwell")
set(__cuda_arch_bin "50")
elseif(${CUDA_ARCH_NAME} STREQUAL "All")
set(__cuda_arch_bin ${Caffe_known_gpu_archs})
elseif(${CUDA_ARCH_NAME} STREQUAL "Auto")
caffe_detect_installed_gpus(__cuda_arch_bin)
else() # (${CUDA_ARCH_NAME} STREQUAL "Manual")
set(__cuda_arch_bin ${CUDA_ARCH_BIN})
endif()
# remove dots and convert to lists
string(REGEX REPLACE "\\." "" __cuda_arch_bin "${__cuda_arch_bin}")
string(REGEX REPLACE "\\." "" __cuda_arch_ptx "${CUDA_ARCH_PTX}")
string(REGEX MATCHALL "[0-9()]+" __cuda_arch_bin "${__cuda_arch_bin}")
string(REGEX MATCHALL "[0-9]+" __cuda_arch_ptx "${__cuda_arch_ptx}")
caffe_list_unique(__cuda_arch_bin __cuda_arch_ptx)
set(__nvcc_flags "")
set(__nvcc_archs_readable "")
# Tell NVCC to add binaries for the specified GPUs
foreach(__arch ${__cuda_arch_bin})
if(__arch MATCHES "([0-9]+)\\(([0-9]+)\\)")
# User explicitly specified PTX for the concrete BIN
list(APPEND __nvcc_flags -gencode arch=compute_${CMAKE_MATCH_2},code=sm_${CMAKE_MATCH_1})
list(APPEND __nvcc_archs_readable sm_${CMAKE_MATCH_1})
else()
# User didn't explicitly specify PTX for the concrete BIN, we assume PTX=BIN
list(APPEND __nvcc_flags -gencode arch=compute_${__arch},code=sm_${__arch})
list(APPEND __nvcc_archs_readable sm_${__arch})
endif()
endforeach()
# Tell NVCC to add PTX intermediate code for the specified architectures
foreach(__arch ${__cuda_arch_ptx})
list(APPEND __nvcc_flags -gencode arch=compute_${__arch},code=compute_${__arch})
list(APPEND __nvcc_archs_readable compute_${__arch})
endforeach()
string(REPLACE ";" " " __nvcc_archs_readable "${__nvcc_archs_readable}")
set(${out_variable} ${__nvcc_flags} PARENT_SCOPE)
set(${out_variable}_readable ${__nvcc_archs_readable} PARENT_SCOPE)
endfunction()
################################################################################################
# Short command for cuda comnpilation
# Usage:
# caffe_cuda_compile(<objlist_variable> <cuda_files>)
macro(caffe_cuda_compile objlist_variable)
foreach(var CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_RELEASE CMAKE_CXX_FLAGS_DEBUG)
set(${var}_backup_in_cuda_compile_ "${${var}}")
# we remove /EHa as it generates warnings under windows
string(REPLACE "/EHa" "" ${var} "${${var}}")
endforeach()
if(UNIX OR APPLE)
list(APPEND CUDA_NVCC_FLAGS -Xcompiler -fPIC)
endif()
if(APPLE)
list(APPEND CUDA_NVCC_FLAGS -Xcompiler -Wno-unused-function)
endif()
cuda_compile(cuda_objcs ${ARGN})
foreach(var CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_RELEASE CMAKE_CXX_FLAGS_DEBUG)
set(${var} "${${var}_backup_in_cuda_compile_}")
unset(${var}_backup_in_cuda_compile_)
endforeach()
set(${objlist_variable} ${cuda_objcs})
endmacro()
################################################################################################
# Short command for cuDNN detection. Believe it soon will be a part of CUDA toolkit distribution.
# That's why not FindcuDNN.cmake file, but just the macro
# Usage:
# detect_cuDNN()
function(detect_cuDNN)
set(CUDNN_ROOT "" CACHE PATH "CUDNN root folder")
find_path(CUDNN_INCLUDE cudnn.h
PATHS ${CUDNN_ROOT} $ENV{CUDNN_ROOT} ${CUDA_TOOLKIT_INCLUDE}
DOC "Path to cuDNN include directory." )
get_filename_component(__libpath_hist ${CUDA_CUDART_LIBRARY} PATH)
find_library(CUDNN_LIBRARY NAMES libcudnn.so # libcudnn_static.a
PATHS ${CUDNN_ROOT} $ENV{CUDNN_ROOT} ${CUDNN_INCLUDE} ${__libpath_hist}
DOC "Path to cuDNN library.")
if(CUDNN_INCLUDE AND CUDNN_LIBRARY)
set(HAVE_CUDNN TRUE PARENT_SCOPE)
set(CUDNN_FOUND TRUE PARENT_SCOPE)
mark_as_advanced(CUDNN_INCLUDE CUDNN_LIBRARY CUDNN_ROOT)
message(STATUS "Found cuDNN (include: ${CUDNN_INCLUDE}, library: ${CUDNN_LIBRARY})")
endif()
endfunction()
################################################################################################
### Non macro section
################################################################################################
find_package(CUDA 5.5 QUIET)
find_cuda_helper_libs(curand) # cmake 2.8.7 compartibility which doesn't search for curand
if(NOT CUDA_FOUND)
return()
endif()
set(HAVE_CUDA TRUE)
message(STATUS "CUDA detected: " ${CUDA_VERSION})
include_directories(SYSTEM ${CUDA_INCLUDE_DIRS})
list(APPEND Caffe_LINKER_LIBS ${CUDA_CUDART_LIBRARY}
${CUDA_curand_LIBRARY} ${CUDA_CUBLAS_LIBRARIES})
# cudnn detection
if(USE_CUDNN)
detect_cuDNN()
if(HAVE_CUDNN)
add_definitions(-DUSE_CUDNN)
include_directories(SYSTEM ${CUDNN_INCLUDE})
list(APPEND Caffe_LINKER_LIBS ${CUDNN_LIBRARY})
endif()
endif()
# setting nvcc arch flags
caffe_select_nvcc_arch_flags(NVCC_FLAGS_EXTRA)
list(APPEND CUDA_NVCC_FLAGS ${NVCC_FLAGS_EXTRA})
message(STATUS "Added CUDA NVCC flags for: ${NVCC_FLAGS_EXTRA_readable}")
# Boost 1.55 workaround, see https://svn.boost.org/trac/boost/ticket/9392 or
# https://github.com/ComputationalRadiationPhysics/picongpu/blob/master/src/picongpu/CMakeLists.txt
if(Boost_VERSION EQUAL 105500)
message(STATUS "Cuda + Boost 1.55: Applying noinline work around")
# avoid warning for CMake >= 2.8.12
set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} \"-DBOOST_NOINLINE=__attribute__((noinline))\" ")
endif()
# disable some nvcc diagnostic that apears in boost, glog, glags, opencv, etc.
foreach(diag cc_clobber_ignored integer_sign_change useless_using_declaration set_but_not_used)
list(APPEND CUDA_NVCC_FLAGS -Xcudafe --diag_suppress=${diag})
endforeach()
# setting default testing device
if(NOT CUDA_TEST_DEVICE)
set(CUDA_TEST_DEVICE -1)
endif()
mark_as_advanced(CUDA_BUILD_CUBIN CUDA_BUILD_EMULATION CUDA_VERBOSE_BUILD)
mark_as_advanced(CUDA_SDK_ROOT_DIR CUDA_SEPARABLE_COMPILATION)
# Handle clang/libc++ issue
if(APPLE)
caffe_detect_darwin_version(OSX_VERSION)
# OSX 10.9 and higher uses clang/libc++ by default which is incompartible with old CUDA toolkits
if(OSX_VERSION VERSION_GREATER 10.8)
# enabled by default if and only if CUDA version is less than 7.0
caffe_option(USE_libstdcpp "Use libstdc++ instead of libc++" (CUDA_VERSION VERSION_LESS 7.0))
endif()
endif()
================================================
FILE: cmake/Dependencies.cmake
================================================
# This list is required for static linking and exported to CaffeConfig.cmake
set(Caffe_LINKER_LIBS "")
# ---[ Boost
find_package(Boost 1.46 REQUIRED COMPONENTS system thread)
include_directories(SYSTEM ${Boost_INCLUDE_DIR})
list(APPEND Caffe_LINKER_LIBS ${Boost_LIBRARIES})
# ---[ Threads
find_package(Threads REQUIRED)
list(APPEND Caffe_LINKER_LIBS ${CMAKE_THREAD_LIBS_INIT})
# ---[ Google-glog
include("cmake/External/glog.cmake")
include_directories(SYSTEM ${GLOG_INCLUDE_DIRS})
list(APPEND Caffe_LINKER_LIBS ${GLOG_LIBRARIES})
# ---[ Google-gflags
include("cmake/External/gflags.cmake")
include_directories(SYSTEM ${GFLAGS_INCLUDE_DIRS})
list(APPEND Caffe_LINKER_LIBS ${GFLAGS_LIBRARIES})
# ---[ Google-protobuf
include(cmake/ProtoBuf.cmake)
# ---[ HDF5
find_package(HDF5 COMPONENTS HL REQUIRED)
include_directories(SYSTEM ${HDF5_INCLUDE_DIRS} ${HDF5_HL_INCLUDE_DIR})
list(APPEND Caffe_LINKER_LIBS ${HDF5_LIBRARIES})
# ---[ LMDB
find_package(LMDB REQUIRED)
include_directories(SYSTEM ${LMDB_INCLUDE_DIR})
list(APPEND Caffe_LINKER_LIBS ${LMDB_LIBRARIES})
# ---[ LevelDB
find_package(LevelDB REQUIRED)
include_directories(SYSTEM ${LevelDB_INCLUDE})
list(APPEND Caffe_LINKER_LIBS ${LevelDB_LIBRARIES})
# ---[ Snappy
find_package(Snappy REQUIRED)
include_directories(SYSTEM ${Snappy_INCLUDE_DIR})
list(APPEND Caffe_LINKER_LIBS ${Snappy_LIBRARIES})
# ---[ CUDA
include(cmake/Cuda.cmake)
if(NOT HAVE_CUDA)
if(CPU_ONLY)
message("-- CUDA is disabled. Building without it...")
else()
message("-- CUDA is not detected by cmake. Building without it...")
endif()
# TODO: remove this not cross platform define in future. Use caffe_config.h instead.
add_definitions(-DCPU_ONLY)
endif()
# ---[ OpenCV
find_package(OpenCV QUIET COMPONENTS core highgui imgproc imgcodecs)
if(NOT OpenCV_FOUND) # if not OpenCV 3.x, then imgcodecs are not found
find_package(OpenCV REQUIRED COMPONENTS core highgui imgproc)
endif()
include_directories(SYSTEM ${OpenCV_INCLUDE_DIRS})
list(APPEND Caffe_LINKER_LIBS ${OpenCV_LIBS})
message(STATUS "OpenCV found (${OpenCV_CONFIG_PATH})")
# ---[ BLAS
if(NOT APPLE)
set(BLAS "Atlas" CACHE STRING "Selected BLAS library")
set_property(CACHE BLAS PROPERTY STRINGS "Atlas;Open;MKL")
if(BLAS STREQUAL "Atlas" OR BLAS STREQUAL "atlas")
find_package(Atlas REQUIRED)
include_directories(SYSTEM ${Atlas_INCLUDE_DIR})
list(APPEND Caffe_LINKER_LIBS ${Atlas_LIBRARIES})
elseif(BLAS STREQUAL "Open" OR BLAS STREQUAL "open")
find_package(OpenBLAS REQUIRED)
include_directories(SYSTEM ${OpenBLAS_INCLUDE_DIR})
list(APPEND Caffe_LINKER_LIBS ${OpenBLAS_LIB})
elseif(BLAS STREQUAL "MKL" OR BLAS STREQUAL "mkl")
find_package(MKL REQUIRED)
include_directories(SYSTEM ${MKL_INCLUDE_DIR})
list(APPEND Caffe_LINKER_LIBS ${MKL_LIBRARIES})
add_definitions(-DUSE_MKL)
endif()
elseif(APPLE)
find_package(vecLib REQUIRED)
include_directories(SYSTEM ${vecLib_INCLUDE_DIR})
list(APPEND Caffe_LINKER_LIBS ${vecLib_LINKER_LIBS})
endif()
# ---[ Python
if(BUILD_python)
if(NOT "${python_version}" VERSION_LESS "3.0.0")
# use python3
find_package(PythonInterp 3.0)
find_package(PythonLibs 3.0)
find_package(NumPy 1.7.1)
# Find the matching boost python implementation
set(version ${PYTHONLIBS_VERSION_STRING})
STRING( REPLACE "." "" boost_py_version ${version} )
find_package(Boost 1.46 COMPONENTS "python-py${boost_py_version}")
set(Boost_PYTHON_FOUND ${Boost_PYTHON-PY${boost_py_version}_FOUND})
while(NOT "${version}" STREQUAL "" AND NOT Boost_PYTHON_FOUND)
STRING( REGEX REPLACE "([0-9.]+).[0-9]+" "\\1" version ${version} )
STRING( REPLACE "." "" boost_py_version ${version} )
find_package(Boost 1.46 COMPONENTS "python-py${boost_py_version}")
set(Boost_PYTHON_FOUND ${Boost_PYTHON-PY${boost_py_version}_FOUND})
STRING( REGEX MATCHALL "([0-9.]+).[0-9]+" has_more_version ${version} )
if("${has_more_version}" STREQUAL "")
break()
endif()
endwhile()
if(NOT Boost_PYTHON_FOUND)
find_package(Boost 1.46 COMPONENTS python)
endif()
else()
# disable Python 3 search
find_package(PythonInterp 2.7)
find_package(PythonLibs 2.7)
find_package(NumPy 1.7.1)
find_package(Boost 1.46 COMPONENTS python)
endif()
if(PYTHONLIBS_FOUND AND NUMPY_FOUND AND Boost_PYTHON_FOUND)
set(HAVE_PYTHON TRUE)
if(BUILD_python_layer)
add_definitions(-DWITH_PYTHON_LAYER)
include_directories(SYSTEM ${PYTHON_INCLUDE_DIRS} ${NUMPY_INCLUDE_DIR} ${Boost_INCLUDE_DIRS})
list(APPEND Caffe_LINKER_LIBS ${PYTHON_LIBRARIES} ${Boost_LIBRARIES})
endif()
endif()
endif()
# ---[ Matlab
if(BUILD_matlab)
find_package(MatlabMex)
if(MATLABMEX_FOUND)
set(HAVE_MATLAB TRUE)
endif()
# sudo apt-get install liboctave-dev
find_program(Octave_compiler NAMES mkoctfile DOC "Octave C++ compiler")
if(HAVE_MATLAB AND Octave_compiler)
set(Matlab_build_mex_using "Matlab" CACHE STRING "Select Matlab or Octave if both detected")
set_property(CACHE Matlab_build_mex_using PROPERTY STRINGS "Matlab;Octave")
endif()
endif()
# ---[ Doxygen
if(BUILD_docs)
find_package(Doxygen)
endif()
================================================
FILE: cmake/External/gflags.cmake
================================================
if (NOT __GFLAGS_INCLUDED) # guard against multiple includes
set(__GFLAGS_INCLUDED TRUE)
# use the system-wide gflags if present
find_package(GFlags)
if (GFLAGS_FOUND)
set(GFLAGS_EXTERNAL FALSE)
else()
# gflags will use pthreads if it's available in the system, so we must link with it
find_package(Threads)
# build directory
set(gflags_PREFIX ${CMAKE_BINARY_DIR}/external/gflags-prefix)
# install directory
set(gflags_INSTALL ${CMAKE_BINARY_DIR}/external/gflags-install)
# we build gflags statically, but want to link it into the caffe shared library
# this requires position-independent code
if (UNIX)
set(GFLAGS_EXTRA_COMPILER_FLAGS "-fPIC")
endif()
set(GFLAGS_CXX_FLAGS ${CMAKE_CXX_FLAGS} ${GFLAGS_EXTRA_COMPILER_FLAGS})
set(GFLAGS_C_FLAGS ${CMAKE_C_FLAGS} ${GFLAGS_EXTRA_COMPILER_FLAGS})
ExternalProject_Add(gflags
PREFIX ${gflags_PREFIX}
GIT_REPOSITORY "https://github.com/gflags/gflags.git"
GIT_TAG "v2.1.2"
UPDATE_COMMAND ""
INSTALL_DIR ${gflags_INSTALL}
CMAKE_ARGS -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}
-DCMAKE_INSTALL_PREFIX=${gflags_INSTALL}
-DBUILD_SHARED_LIBS=OFF
-DBUILD_STATIC_LIBS=ON
-DBUILD_PACKAGING=OFF
-DBUILD_TESTING=OFF
-DBUILD_NC_TESTS=OFF
-BUILD_CONFIG_TESTS=OFF
-DINSTALL_HEADERS=ON
-DCMAKE_C_FLAGS=${GFLAGS_C_FLAGS}
-DCMAKE_CXX_FLAGS=${GFLAGS_CXX_FLAGS}
LOG_DOWNLOAD 1
LOG_INSTALL 1
)
set(GFLAGS_FOUND TRUE)
set(GFLAGS_INCLUDE_DIRS ${gflags_INSTALL}/include)
set(GFLAGS_LIBRARIES ${gflags_INSTALL}/lib/libgflags.a ${CMAKE_THREAD_LIBS_INIT})
set(GFLAGS_LIBRARY_DIRS ${gflags_INSTALL}/lib)
set(GFLAGS_EXTERNAL TRUE)
list(APPEND external_project_dependencies gflags)
endif()
endif()
================================================
FILE: cmake/External/glog.cmake
================================================
# glog depends on gflags
include("cmake/External/gflags.cmake")
if (NOT __GLOG_INCLUDED)
set(__GLOG_INCLUDED TRUE)
# try the system-wide glog first
find_package(Glog)
if (GLOG_FOUND)
set(GLOG_EXTERNAL FALSE)
else()
# fetch and build glog from github
# build directory
set(glog_PREFIX ${CMAKE_BINARY_DIR}/external/glog-prefix)
# install directory
set(glog_INSTALL ${CMAKE_BINARY_DIR}/external/glog-install)
# we build glog statically, but want to link it into the caffe shared library
# this requires position-independent code
if (UNIX)
set(GLOG_EXTRA_COMPILER_FLAGS "-fPIC")
endif()
set(GLOG_CXX_FLAGS ${CMAKE_CXX_FLAGS} ${GLOG_EXTRA_COMPILER_FLAGS})
set(GLOG_C_FLAGS ${CMAKE_C_FLAGS} ${GLOG_EXTRA_COMPILER_FLAGS})
# depend on gflags if we're also building it
if (GFLAGS_EXTERNAL)
set(GLOG_DEPENDS gflags)
endif()
ExternalProject_Add(glog
DEPENDS ${GLOG_DEPENDS}
PREFIX ${glog_PREFIX}
GIT_REPOSITORY "https://github.com/google/glog"
GIT_TAG "v0.3.4"
UPDATE_COMMAND ""
INSTALL_DIR ${gflags_INSTALL}
CONFIGURE_COMMAND env "CFLAGS=${GLOG_C_FLAGS}" "CXXFLAGS=${GLOG_CXX_FLAGS}" ${glog_PREFIX}/src/glog/configure --prefix=${glog_INSTALL} --enable-shared=no --enable-static=yes --with-gflags=${GFLAGS_LIBRARY_DIRS}/..
LOG_DOWNLOAD 1
LOG_CONFIGURE 1
LOG_INSTALL 1
)
set(GLOG_FOUND TRUE)
set(GLOG_INCLUDE_DIRS ${glog_INSTALL}/include)
set(GLOG_LIBRARIES ${GFLAGS_LIBRARIES} ${glog_INSTALL}/lib/libglog.a)
set(GLOG_LIBRARY_DIRS ${glog_INSTALL}/lib)
set(GLOG_EXTERNAL TRUE)
list(APPEND external_project_dependencies glog)
endif()
endif()
================================================
FILE: cmake/Misc.cmake
================================================
# ---[ Configuration types
set(CMAKE_CONFIGURATION_TYPES "Debug;Release" CACHE STRING "Possible configurations" FORCE)
mark_as_advanced(CMAKE_CONFIGURATION_TYPES)
if(DEFINED CMAKE_BUILD_TYPE)
set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS ${CMAKE_CONFIGURATION_TYPES})
endif()
# --[ If user doesn't specify build type then assume release
if("${CMAKE_BUILD_TYPE}" STREQUAL "")
set(CMAKE_BUILD_TYPE Release)
endif()
if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
set(CMAKE_COMPILER_IS_CLANGXX TRUE)
endif()
# ---[ Solution folders
caffe_option(USE_PROJECT_FOLDERS "IDE Solution folders" (MSVC_IDE OR CMAKE_GENERATOR MATCHES Xcode) )
if(USE_PROJECT_FOLDERS)
set_property(GLOBAL PROPERTY USE_FOLDERS ON)
set_property(GLOBAL PROPERTY PREDEFINED_TARGETS_FOLDER "CMakeTargets")
endif()
# ---[ Install options
if(CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT)
set(CMAKE_INSTALL_PREFIX "${PROJECT_BINARY_DIR}/install" CACHE PATH "Default install path" FORCE)
endif()
# ---[ RPATH settings
set(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE CACHE BOOLEAN "Use link paths for shared library rpath")
set(CMAKE_MACOSX_RPATH TRUE)
list(FIND CMAKE_PLATFORM_IMPLICIT_LINK_DIRECTORIES ${CMAKE_INSTALL_PREFIX}/lib __is_systtem_dir)
if(${__is_systtem_dir} STREQUAL -1)
set(CMAKE_INSTALL_RPATH ${CMAKE_INSTALL_PREFIX}/lib)
endif()
# ---[ Funny target
if(UNIX OR APPLE)
add_custom_target(symlink_to_build COMMAND "ln" "-sf" "${PROJECT_BINARY_DIR}" "${PROJECT_SOURCE_DIR}/build"
COMMENT "Adding symlink: <caffe_root>/build -> ${PROJECT_BINARY_DIR}" )
endif()
# ---[ Set debug postfix
set(Caffe_DEBUG_POSTFIX "-d")
set(CAffe_POSTFIX "")
if(CMAKE_BUILD_TYPE MATCHES "Debug")
set(CAffe_POSTFIX ${Caffe_DEBUG_POSTFIX})
endif()
================================================
FILE: cmake/Modules/FindAtlas.cmake
================================================
# Find the Atlas (and Lapack) libraries
#
# The following variables are optionally searched for defaults
# Atlas_ROOT_DIR: Base directory where all Atlas components are found
#
# The following are set after configuration is done:
# Atlas_FOUND
# Atlas_INCLUDE_DIRS
# Atlas_LIBRARIES
# Atlas_LIBRARYRARY_DIRS
set(Atlas_INCLUDE_SEARCH_PATHS
/usr/include/atlas
/usr/include/atlas-base
$ENV{Atlas_ROOT_DIR}
$ENV{Atlas_ROOT_DIR}/include
)
set(Atlas_LIB_SEARCH_PATHS
/usr/lib/atlas
/usr/lib/atlas-base
$ENV{Atlas_ROOT_DIR}
$ENV{Atlas_ROOT_DIR}/lib
)
find_path(Atlas_CBLAS_INCLUDE_DIR NAMES cblas.h PATHS ${Atlas_INCLUDE_SEARCH_PATHS})
find_path(Atlas_CLAPACK_INCLUDE_DIR NAMES clapack.h PATHS ${Atlas_INCLUDE_SEARCH_PATHS})
find_library(Atlas_CBLAS_LIBRARY NAMES ptcblas_r ptcblas cblas_r cblas PATHS ${Atlas_LIB_SEARCH_PATHS})
find_library(Atlas_BLAS_LIBRARY NAMES atlas_r atlas PATHS ${Atlas_LIB_SEARCH_PATHS})
find_library(Atlas_LAPACK_LIBRARY NAMES alapack_r alapack lapack_atlas PATHS ${Atlas_LIB_SEARCH_PATHS})
set(LOOKED_FOR
Atlas_CBLAS_INCLUDE_DIR
Atlas_CLAPACK_INCLUDE_DIR
Atlas_CBLAS_LIBRARY
Atlas_BLAS_LIBRARY
Atlas_LAPACK_LIBRARY
)
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(Atlas DEFAULT_MSG ${LOOKED_FOR})
if(ATLAS_FOUND)
set(Atlas_INCLUDE_DIR ${Atlas_CBLAS_INCLUDE_DIR} ${Atlas_CLAPACK_INCLUDE_DIR})
set(Atlas_LIBRARIES ${Atlas_LAPACK_LIBRARY} ${Atlas_CBLAS_LIBRARY} ${Atlas_BLAS_LIBRARY})
mark_as_advanced(${LOOKED_FOR})
message(STATUS "Found Atlas (include: ${Atlas_CBLAS_INCLUDE_DIR}, library: ${Atlas_BLAS_LIBRARY})")
endif(ATLAS_FOUND)
================================================
FILE: cmake/Modules/FindGFlags.cmake
================================================
# - Try to find GFLAGS
#
# The following variables are optionally searched for defaults
# GFLAGS_ROOT_DIR: Base directory where all GFLAGS components are found
#
# The following are set after configuration is done:
# GFLAGS_FOUND
# GFLAGS_INCLUDE_DIRS
# GFLAGS_LIBRARIES
# GFLAGS_LIBRARYRARY_DIRS
include(FindPackageHandleStandardArgs)
set(GFLAGS_ROOT_DIR "" CACHE PATH "Folder contains Gflags")
# We are testing only a couple of files in the include directories
if(WIN32)
find_path(GFLAGS_INCLUDE_DIR gflags/gflags.h
PATHS ${GFLAGS_ROOT_DIR}/src/windows)
else()
find_path(GFLAGS_INCLUDE_DIR gflags/gflags.h
PATHS ${GFLAGS_ROOT_DIR})
endif()
if(MSVC)
find_library(GFLAGS_LIBRARY_RELEASE
NAMES libgflags
PATHS ${GFLAGS_ROOT_DIR}
PATH_SUFFIXES Release)
find_library(GFLAGS_LIBRARY_DEBUG
NAMES libgflags-debug
PATHS ${GFLAGS_ROOT_DIR}
PATH_SUFFIXES Debug)
set(GFLAGS_LIBRARY optimized ${GFLAGS_LIBRARY_RELEASE} debug ${GFLAGS_LIBRARY_DEBUG})
else()
find_library(GFLAGS_LIBRARY gflags)
endif()
find_package_handle_standard_args(GFlags DEFAULT_MSG GFLAGS_INCLUDE_DIR GFLAGS_LIBRARY)
if(GFLAGS_FOUND)
set(GFLAGS_INCLUDE_DIRS ${GFLAGS_INCLUDE_DIR})
set(GFLAGS_LIBRARIES ${GFLAGS_LIBRARY})
message(STATUS "Found gflags (include: ${GFLAGS_INCLUDE_DIR}, library: ${GFLAGS_LIBRARY})")
mark_as_advanced(GFLAGS_LIBRARY_DEBUG GFLAGS_LIBRARY_RELEASE
GFLAGS_LIBRARY GFLAGS_INCLUDE_DIR GFLAGS_ROOT_DIR)
endif()
================================================
FILE: cmake/Modules/FindGlog.cmake
================================================
# - Try to find Glog
#
# The following variables are optionally searched for defaults
# GLOG_ROOT_DIR: Base directory where all GLOG components are found
#
# The following are set after configuration is done:
# GLOG_FOUND
# GLOG_INCLUDE_DIRS
# GLOG_LIBRARIES
# GLOG_LIBRARYRARY_DIRS
include(FindPackageHandleStandardArgs)
set(GLOG_ROOT_DIR "" CACHE PATH "Folder contains Google glog")
if(WIN32)
find_path(GLOG_INCLUDE_DIR glog/logging.h
PATHS ${GLOG_ROOT_DIR}/src/windows)
else()
find_path(GLOG_INCLUDE_DIR glog/logging.h
PATHS ${GLOG_ROOT_DIR})
endif()
if(MSVC)
find_library(GLOG_LIBRARY_RELEASE libglog_static
PATHS ${GLOG_ROOT_DIR}
PATH_SUFFIXES Release)
find_library(GLOG_LIBRARY_DEBUG libglog_static
PATHS ${GLOG_ROOT_DIR}
PATH_SUFFIXES Debug)
set(GLOG_LIBRARY optimized ${GLOG_LIBRARY_RELEASE} debug ${GLOG_LIBRARY_DEBUG})
else()
find_library(GLOG_LIBRARY glog
PATHS ${GLOG_ROOT_DIR}
PATH_SUFFIXES lib lib64)
endif()
find_package_handle_standard_args(Glog DEFAULT_MSG GLOG_INCLUDE_DIR GLOG_LIBRARY)
if(GLOG_FOUND)
set(GLOG_INCLUDE_DIRS ${GLOG_INCLUDE_DIR})
set(GLOG_LIBRARIES ${GLOG_LIBRARY})
message(STATUS "Found glog (include: ${GLOG_INCLUDE_DIR}, library: ${GLOG_LIBRARY})")
mark_as_advanced(GLOG_ROOT_DIR GLOG_LIBRARY_RELEASE GLOG_LIBRARY_DEBUG
GLOG_LIBRARY GLOG_INCLUDE_DIR)
endif()
================================================
FILE: cmake/Modules/FindLAPACK.cmake
================================================
# - Find LAPACK library
# This module finds an installed fortran library that implements the LAPACK
# linear-algebra interface (see http://www.netlib.org/lapack/).
#
# The approach follows that taken for the autoconf macro file, acx_lapack.m4
# (distributed at http://ac-archive.sourceforge.net/ac-archive/acx_lapack.html).
#
# This module sets the following variables:
# LAPACK_FOUND - set to true if a library implementing the LAPACK interface is found
# LAPACK_LIBRARIES - list of libraries (using full path name) for LAPACK
# Note: I do not think it is a good idea to mixup different BLAS/LAPACK versions
# Hence, this script wants to find a Lapack library matching your Blas library
# Do nothing if LAPACK was found before
IF(NOT LAPACK_FOUND)
SET(LAPACK_LIBRARIES)
SET(LAPACK_INFO)
IF(LAPACK_FIND_QUIETLY OR NOT LAPACK_FIND_REQUIRED)
FIND_PACKAGE(BLAS)
ELSE(LAPACK_FIND_QUIETLY OR NOT LAPACK_FIND_REQUIRED)
FIND_PACKAGE(BLAS REQUIRED)
ENDIF(LAPACK_FIND_QUIETLY OR NOT LAPACK_FIND_REQUIRED)
# Old search lapack script
include(CheckFortranFunctionExists)
macro(Check_Lapack_Libraries LIBRARIES _prefix _name _flags _list _blas)
# This macro checks for the existence of the combination of fortran libraries
# given by _list. If the combination is found, this macro checks (using the
# Check_Fortran_Function_Exists macro) whether can link against that library
# combination using the name of a routine given by _name using the linker
# flags given by _flags. If the combination of libraries is found and passes
# the link test, LIBRARIES is set to the list of complete library paths that
# have been found. Otherwise, LIBRARIES is set to FALSE.
# N.B. _prefix is the prefix applied to the names of all cached variables that
# are generated internally and marked advanced by this macro.
set(_libraries_work TRUE)
set(${LIBRARIES})
set(_combined_name)
foreach(_library ${_list})
set(_combined_name ${_combined_name}_${_library})
if(_libraries_work)
if (WIN32)
find_library(${_prefix}_${_library}_LIBRARY
NAMES ${_library} PATHS ENV LIB PATHS ENV PATH)
else (WIN32)
if(APPLE)
find_library(${_prefix}_${_library}_LIBRARY
NAMES ${_library}
PATHS /usr/local/lib /usr/lib /usr/local/lib64 /usr/lib64
ENV DYLD_LIBRARY_PATH)
else(APPLE)
find_library(${_prefix}_${_library}_LIBRARY
NAMES ${_library}
PATHS /usr/local/lib /usr/lib /usr/local/lib64 /usr/lib64
ENV LD_LIBRARY_PATH)
endif(APPLE)
endif(WIN32)
mark_as_advanced(${_prefix}_${_library}_LIBRARY)
set(${LIBRARIES} ${${LIBRARIES}} ${${_prefix}_${_library}_LIBRARY})
set(_libraries_work ${${_prefix}_${_library}_LIBRARY})
endif(_libraries_work)
endforeach(_library ${_list})
if(_libraries_work)
# Test this combination of libraries.
set(CMAKE_REQUIRED_LIBRARIES ${_flags} ${${LIBRARIES}} ${_blas})
if (CMAKE_Fortran_COMPILER_WORKS)
check_fortran_function_exists(${_name} ${_prefix}${_combined_name}_WORKS)
else (CMAKE_Fortran_COMPILER_WORKS)
check_function_exists("${_name}_" ${_prefix}${_combined_name}_WORKS)
endif (CMAKE_Fortran_COMPILER_WORKS)
set(CMAKE_REQUIRED_LIBRARIES)
mark_as_advanced(${_prefix}${_combined_name}_WORKS)
set(_libraries_work ${${_prefix}${_combined_name}_WORKS})
endif(_libraries_work)
if(NOT _libraries_work)
set(${LIBRARIES} FALSE)
endif(NOT _libraries_work)
endmacro(Check_Lapack_Libraries)
if(BLAS_FOUND)
# Intel MKL
IF((NOT LAPACK_INFO) AND (BLAS_INFO STREQUAL "mkl"))
IF(MKL_LAPACK_LIBRARIES)
SET(LAPACK_LIBRARIES ${MKL_LAPACK_LIBRARIES} ${MKL_LIBRARIES})
ELSE(MKL_LAPACK_LIBRARIES)
SET(LAPACK_LIBRARIES ${MKL_LIBRARIES})
ENDIF(MKL_LAPACK_LIBRARIES)
SET(LAPACK_INCLUDE_DIR ${MKL_INCLUDE_DIR})
SET(LAPACK_INFO "mkl")
ENDIF()
# OpenBlas
IF((NOT LAPACK_INFO) AND (BLAS_INFO STREQUAL "open"))
SET(CMAKE_REQUIRED_LIBRARIES ${BLAS_LIBRARIES})
check_function_exists("cheev_" OPEN_LAPACK_WORKS)
if(OPEN_LAPACK_WORKS)
SET(LAPACK_INFO "open")
else()
message(STATUS "It seems OpenBlas has not been compiled with Lapack support")
endif()
endif()
# GotoBlas
IF((NOT LAPACK_INFO) AND (BLAS_INFO STREQUAL "goto"))
SET(CMAKE_REQUIRED_LIBRARIES ${BLAS_LIBRARIES})
check_function_exists("cheev_" GOTO_LAPACK_WORKS)
if(GOTO_LAPACK_WORKS)
SET(LAPACK_INFO "goto")
else()
message(STATUS "It seems GotoBlas has not been compiled with Lapack support")
endif()
endif()
# ACML
IF((NOT LAPACK_INFO) AND (BLAS_INFO STREQUAL "acml"))
SET(CMAKE_REQUIRED_LIBRARIES ${BLAS_LIBRARIES})
check_function_exists("cheev_" ACML_LAPACK_WORKS)
if(ACML_LAPACK_WORKS)
SET(LAPACK_INFO "acml")
else()
message(STATUS "Strangely, this ACML library does not support Lapack?!")
endif()
endif()
# Accelerate
IF((NOT LAPACK_INFO) AND (BLAS_INFO STREQUAL "accelerate"))
SET(CMAKE_REQUIRED_LIBRARIES ${BLAS_LIBRARIES})
check_function_exists("cheev_" ACCELERATE_LAPACK_WORKS)
if(ACCELERATE_LAPACK_WORKS)
SET(LAPACK_INFO "accelerate")
else()
message(STATUS "Strangely, this Accelerate library does not support Lapack?!")
endif()
endif()
# vecLib
IF((NOT LAPACK_INFO) AND (BLAS_INFO STREQUAL "veclib"))
SET(CMAKE_REQUIRED_LIBRARIES ${BLAS_LIBRARIES})
check_function_exists("cheev_" VECLIB_LAPACK_WORKS)
if(VECLIB_LAPACK_WORKS)
SET(LAPACK_INFO "veclib")
else()
message(STATUS "Strangely, this vecLib library does not support Lapack?!")
endif()
endif()
# Generic LAPACK library?
IF((NOT LAPACK_INFO) AND (BLAS_INFO STREQUAL "generic"))
check_lapack_libraries(
LAPACK_LIBRARIES
LAPACK
cheev
""
"lapack"
"${BLAS_LIBRARIES}"
)
if(LAPACK_LIBRARIES)
SET(LAPACK_INFO "generic")
endif(LAPACK_LIBRARIES)
endif()
else(BLAS_FOUND)
message(STATUS "LAPACK requires BLAS")
endif(BLAS_FOUND)
if(LAPACK_INFO)
set(LAPACK_FOUND TRUE)
else(LAPACK_INFO)
set(LAPACK_FOUND FALSE)
endif(LAPACK_INFO)
IF (NOT LAPACK_FOUND AND LAPACK_FIND_REQUIRED)
message(FATAL_ERROR "Cannot find a library with LAPACK API. Please specify library location.")
ENDIF (NOT LAPACK_FOUND AND LAPACK_FIND_REQUIRED)
IF(NOT LAPACK_FIND_QUIETLY)
IF(LAPACK_FOUND)
MESSAGE(STATUS "Found a library with LAPACK API. (${LAPACK_INFO})")
ELSE(LAPACK_FOUND)
MESSAGE(STATUS "Cannot find a library with LAPACK API. Not using LAPACK.")
ENDIF(LAPACK_FOUND)
ENDIF(NOT LAPACK_FIND_QUIETLY)
# Do nothing if LAPACK was found before
ENDIF(NOT LAPACK_FOUND)
================================================
FILE: cmake/Modules/FindLMDB.cmake
================================================
# Try to find the LMBD libraries and headers
# LMDB_FOUND - system has LMDB lib
# LMDB_INCLUDE_DIR - the LMDB include directory
# LMDB_LIBRARIES - Libraries needed to use LMDB
# FindCWD based on FindGMP by:
# Copyright (c) 2006, Laurent Montel, <montel@kde.org>
#
# Redistribution and use is allowed according to the terms of the BSD license.
# Adapted from FindCWD by:
# Copyright 2013 Conrad Steenberg <conrad.steenberg@gmail.com>
# Aug 31, 2013
find_path(LMDB_INCLUDE_DIR NAMES lmdb.h PATHS "$ENV{LMDB_DIR}/include")
find_library(LMDB_LIBRARIES NAMES lmdb PATHS "$ENV{LMDB_DIR}/lib" )
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(LMDB DEFAULT_MSG LMDB_INCLUDE_DIR LMDB_LIBRARIES)
if(LMDB_FOUND)
message(STATUS "Found lmdb (include: ${LMDB_INCLUDE_DIR}, library: ${LMDB_LIBRARIES})")
mark_as_advanced(LMDB_INCLUDE_DIR LMDB_LIBRARIES)
caffe_parse_header(${LMDB_INCLUDE_DIR}/lmdb.h
LMDB_VERSION_LINES MDB_VERSION_MAJOR MDB_VERSION_MINOR MDB_VERSION_PATCH)
set(LMDB_VERSION "${MDB_VERSION_MAJOR}.${MDB_VERSION_MINOR}.${MDB_VERSION_PATCH}")
endif()
================================================
FILE: cmake/Modules/FindLevelDB.cmake
================================================
# - Find LevelDB
#
# LevelDB_INCLUDES - List of LevelDB includes
# LevelDB_LIBRARIES - List of libraries when using LevelDB.
# LevelDB_FOUND - True if LevelDB found.
# Look for the header file.
find_path(LevelDB_INCLUDE NAMES leveldb/db.h
PATHS $ENV{LEVELDB_ROOT}/include /opt/local/include /usr/local/include /usr/include
DOC "Path in which the file leveldb/db.h is located." )
# Look for the library.
find_library(LevelDB_LIBRARY NAMES leveldb
PATHS /usr/lib $ENV{LEVELDB_ROOT}/lib
DOC "Path to leveldb library." )
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(LevelDB DEFAULT_MSG LevelDB_INCLUDE LevelDB_LIBRARY)
if(LEVELDB_FOUND)
message(STATUS "Found LevelDB (include: ${LevelDB_INCLUDE}, library: ${LevelDB_LIBRARY})")
set(LevelDB_INCLUDES ${LevelDB_INCLUDE})
set(LevelDB_LIBRARIES ${LevelDB_LIBRARY})
mark_as_advanced(LevelDB_INCLUDE LevelDB_LIBRARY)
if(EXISTS "${LevelDB_INCLUDE}/leveldb/db.h")
file(STRINGS "${LevelDB_INCLUDE}/leveldb/db.h" __version_lines
REGEX "static const int k[^V]+Version[ \t]+=[ \t]+[0-9]+;")
foreach(__line ${__version_lines})
if(__line MATCHES "[^k]+kMajorVersion[ \t]+=[ \t]+([0-9]+);")
set(LEVELDB_VERSION_MAJOR ${CMAKE_MATCH_1})
elseif(__line MATCHES "[^k]+kMinorVersion[ \t]+=[ \t]+([0-9]+);")
set(LEVELDB_VERSION_MINOR ${CMAKE_MATCH_1})
endif()
endforeach()
if(LEVELDB_VERSION_MAJOR AND LEVELDB_VERSION_MINOR)
set(LEVELDB_VERSION "${LEVELDB_VERSION_MAJOR}.${LEVELDB_VERSION_MINOR}")
endif()
caffe_clear_vars(__line __version_lines)
endif()
endif()
================================================
FILE: cmake/Modules/FindMKL.cmake
================================================
# Find the MKL libraries
#
# Options:
#
# MKL_USE_SINGLE_DYNAMIC_LIBRARY : use single dynamic library interface
# MKL_USE_STATIC_LIBS : use static libraries
# MKL_MULTI_THREADED : use multi-threading
#
# This module defines the following variables:
#
# MKL_FOUND : True mkl is found
# MKL_INCLUDE_DIR : unclude directory
# MKL_LIBRARIES : the libraries to link against.
# ---[ Options
caffe_option(MKL_USE_SINGLE_DYNAMIC_LIBRARY "Use single dynamic library interface" ON)
caffe_option(MKL_USE_STATIC_LIBS "Use static libraries" OFF IF NOT MKL_USE_SINGLE_DYNAMIC_LIBRARY)
caffe_option(MKL_MULTI_THREADED "Use multi-threading" ON IF NOT MKL_USE_SINGLE_DYNAMIC_LIBRARY)
# ---[ Root folders
set(INTEL_ROOT "/opt/intel" CACHE PATH "Folder contains intel libs")
find_path(MKL_ROOT include/mkl.h PATHS $ENV{MKL_ROOT} ${INTEL_ROOT}/mkl
DOC "Folder contains MKL")
# ---[ Find include dir
find_path(MKL_INCLUDE_DIR mkl.h PATHS ${MKL_ROOT} PATH_SUFFIXES include)
set(__looked_for MKL_INCLUDE_DIR)
# ---[ Find libraries
if(CMAKE_SIZEOF_VOID_P EQUAL 4)
set(__path_suffixes lib lib/ia32)
else()
set(__path_suffixes lib lib/intel64)
endif()
set(__mkl_libs "")
if(MKL_USE_SINGLE_DYNAMIC_LIBRARY)
list(APPEND __mkl_libs rt)
else()
if(CMAKE_SIZEOF_VOID_P EQUAL 4)
if(WIN32)
list(APPEND __mkl_libs intel_c)
else()
list(APPEND __mkl_libs intel gf)
endif()
else()
list(APPEND __mkl_libs intel_lp64 gf_lp64)
endif()
if(MKL_MULTI_THREADED)
list(APPEND __mkl_libs intel_thread)
else()
list(APPEND __mkl_libs sequential)
endif()
list(APPEND __mkl_libs core cdft_core)
endif()
foreach (__lib ${__mkl_libs})
set(__mkl_lib "mkl_${__lib}")
string(TOUPPER ${__mkl_lib} __mkl_lib_upper)
if(MKL_USE_STATIC_LIBS)
set(__mkl_lib "lib${__mkl_lib}.a")
endif()
find_library(${__mkl_lib_upper}_LIBRARY
NAMES ${__mkl_lib}
PATHS ${MKL_ROOT} "${MKL_INCLUDE_DIR}/.."
PATH_SUFFIXES ${__path_suffixes}
DOC "The path to Intel(R) MKL ${__mkl_lib} library")
mark_as_advanced(${__mkl_lib_upper}_LIBRARY)
list(APPEND __looked_for ${__mkl_lib_upper}_LIBRARY)
list(APPEND MKL_LIBRARIES ${${__mkl_lib_upper}_LIBRARY})
endforeach()
if(NOT MKL_USE_SINGLE_DYNAMIC_LIBRARY)
if (MKL_USE_STATIC_LIBS)
set(__iomp5_libs iomp5 libiomp5mt.lib)
else()
set(__iomp5_libs iomp5 libiomp5md.lib)
endif()
if(WIN32)
find_path(INTEL_INCLUDE_DIR omp.h PATHS ${INTEL_ROOT} PATH_SUFFIXES include)
list(APPEND __looked_for INTEL_INCLUDE_DIR)
endif()
find_library(MKL_RTL_LIBRARY ${__iomp5_libs}
PATHS ${INTEL_RTL_ROOT} ${INTEL_ROOT}/compiler ${MKL_ROOT}/.. ${MKL_ROOT}/../compiler
PATH_SUFFIXES ${__path_suffixes}
DOC "Path to Path to OpenMP runtime library")
list(APPEND __looked_for MKL_RTL_LIBRARY)
list(APPEND MKL_LIBRARIES ${MKL_RTL_LIBRARY})
endif()
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(MKL DEFAULT_MSG ${__looked_for})
if(MKL_FOUND)
message(STATUS "Found MKL (include: ${MKL_INCLUDE_DIR}, lib: ${MKL_LIBRARIES}")
endif()
caffe_clear_vars(__looked_for __mkl_libs __path_suffixes __lib_suffix __iomp5_libs)
================================================
FILE: cmake/Modules/FindMatlabMex.cmake
================================================
# This module looks for MatlabMex compiler
# Defines variables:
# Matlab_DIR - Matlab root dir
# Matlab_mex - path to mex compiler
# Matlab_mexext - path to mexext
if(MSVC)
foreach(__ver "9.30" "7.14" "7.11" "7.10" "7.9" "7.8" "7.7")
get_filename_component(__matlab_root "[HKEY_LOCAL_MACHINE\\SOFTWARE\\MathWorks\\MATLAB\\${__ver};MATLABROOT]" ABSOLUTE)
if(__matlab_root)
break()
endif()
endforeach()
endif()
if(APPLE)
foreach(__ver "R2014b" "R2014a" "R2013b" "R2013a" "R2012b" "R2012a" "R2011b" "R2011a" "R2010b" "R2010a")
if(EXISTS /Applications/MATLAB_${__ver}.app)
set(__matlab_root /Applications/MATLAB_${__ver}.app)
break()
endif()
endforeach()
endif()
if(UNIX)
execute_process(COMMAND which matlab OUTPUT_STRIP_TRAILING_WHITESPACE
OUTPUT_VARIABLE __out RESULT_VARIABLE __res)
if(__res MATCHES 0) # Suppress `readlink` warning if `which` returned nothing
execute_process(COMMAND which matlab COMMAND xargs readlink
COMMAND xargs dirname COMMAND xargs dirname COMMAND xargs echo -n
OUTPUT_VARIABLE __matlab_root OUTPUT_STRIP_TRAILING_WHITESPACE)
endif()
endif()
find_path(Matlab_DIR NAMES bin/mex bin/mexext PATHS ${__matlab_root}
DOC "Matlab directory" NO_DEFAULT_PATH)
find_program(Matlab_mex NAMES mex mex.bat HINTS ${Matlab_DIR} PATH_SUFFIXES bin NO_DEFAULT_PATH)
find_program(Matlab_mexext NAMES mexext mexext.bat HINTS ${Matlab_DIR} PATH_SUFFIXES bin NO_DEFAULT_PATH)
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(MatlabMex DEFAULT_MSG Matlab_mex Matlab_mexext)
if(MATLABMEX_FOUND)
mark_as_advanced(Matlab_mex Matlab_mexext)
endif()
================================================
FILE: cmake/Modules/FindNumPy.cmake
================================================
# - Find the NumPy libraries
# This module finds if NumPy is installed, and sets the following variables
# indicating where it is.
#
# TODO: Update to provide the libraries and paths for linking npymath lib.
#
# NUMPY_FOUND - was NumPy found
# NUMPY_VERSION - the version of NumPy found as a string
# NUMPY_VERSION_MAJOR - the major version number of NumPy
# NUMPY_VERSION_MINOR - the minor version number of NumPy
# NUMPY_VERSION_PATCH - the patch version number of NumPy
# NUMPY_VERSION_DECIMAL - e.g. version 1.6.1 is 10601
# NUMPY_INCLUDE_DIR - path to the NumPy include files
unset(NUMPY_VERSION)
unset(NUMPY_INCLUDE_DIR)
if(PYTHONINTERP_FOUND)
execute_process(COMMAND "${PYTHON_EXECUTABLE}" "-c"
"import numpy as n; print(n.__version__); print(n.get_include());"
RESULT_VARIABLE __result
OUTPUT_VARIABLE __output
OUTPUT_STRIP_TRAILING_WHITESPACE)
if(__result MATCHES 0)
string(REGEX REPLACE ";" "\\\\;" __values ${__output})
string(REGEX REPLACE "\r?\n" ";" __values ${__values})
list(GET __values 0 NUMPY_VERSION)
list(GET __values 1 NUMPY_INCLUDE_DIR)
string(REGEX MATCH "^([0-9])+\\.([0-9])+\\.([0-9])+" __ver_check "${NUMPY_VERSION}")
if(NOT "${__ver_check}" STREQUAL "")
set(NUMPY_VERSION_MAJOR ${CMAKE_MATCH_1})
set(NUMPY_VERSION_MINOR ${CMAKE_MATCH_2})
set(NUMPY_VERSION_PATCH ${CMAKE_MATCH_3})
math(EXPR NUMPY_VERSION_DECIMAL
"(${NUMPY_VERSION_MAJOR} * 10000) + (${NUMPY_VERSION_MINOR} * 100) + ${NUMPY_VERSION_PATCH}")
string(REGEX REPLACE "\\\\" "/" NUMPY_INCLUDE_DIR ${NUMPY_INCLUDE_DIR})
else()
unset(NUMPY_VERSION)
unset(NUMPY_INCLUDE_DIR)
message(STATUS "Requested NumPy version and include path, but got instead:\n${__output}\n")
endif()
endif()
else()
message(STATUS "To find NumPy Python interpretator is required to be found.")
endif()
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(NumPy REQUIRED_VARS NUMPY_INCLUDE_DIR NUMPY_VERSION
VERSION_VAR NUMPY_VERSION)
if(NUMPY_FOUND)
message(STATUS "NumPy ver. ${NUMPY_VERSION} found (include: ${NUMPY_INCLUDE_DIR})")
endif()
caffe_clear_vars(__result __output __error_value __values __ver_check __error_value)
================================================
FILE: cmake/Modules/FindOpenBLAS.cmake
================================================
SET(Open_BLAS_INCLUDE_SEARCH_PATHS
/usr/include
/usr/include/openblas-base
/usr/local/include
/usr/local/include/openblas-base
/opt/OpenBLAS/include
$ENV{OpenBLAS_HOME}
$ENV{OpenBLAS_HOME}/include
)
SET(Open_BLAS_LIB_SEARCH_PATHS
/lib/
/lib/openblas-base
/lib64/
/usr/lib
/usr/lib/openblas-base
/usr/lib64
/usr/local/lib
/usr/local/lib64
/opt/OpenBLAS/lib
$ENV{OpenBLAS}cd
$ENV{OpenBLAS}/lib
$ENV{OpenBLAS_HOME}
$ENV{OpenBLAS_HOME}/lib
)
FIND_PATH(OpenBLAS_INCLUDE_DIR NAMES cblas.h PATHS ${Open_BLAS_INCLUDE_SEARCH_PATHS})
FIND_LIBRARY(OpenBLAS_LIB NAMES openblas PATHS ${Open_BLAS_LIB_SEARCH_PATHS})
SET(OpenBLAS_FOUND ON)
# Check include files
IF(NOT OpenBLAS_INCLUDE_DIR)
SET(OpenBLAS_FOUND OFF)
MESSAGE(STATUS "Could not find OpenBLAS include. Turning OpenBLAS_FOUND off")
ENDIF()
# Check libraries
IF(NOT OpenBLAS_LIB)
SET(OpenBLAS_FOUND OFF)
MESSAGE(STATUS "Could not find OpenBLAS lib. Turning OpenBLAS_FOUND off")
ENDIF()
IF (OpenBLAS_FOUND)
IF (NOT OpenBLAS_FIND_QUIETLY)
MESSAGE(STATUS "Found OpenBLAS libraries: ${OpenBLAS_LIB}")
MESSAGE(STATUS "Found OpenBLAS include: ${OpenBLAS_INCLUDE_DIR}")
ENDIF (NOT OpenBLAS_FIND_QUIETLY)
ELSE (OpenBLAS_FOUND)
IF (OpenBLAS_FIND_REQUIRED)
MESSAGE(FATAL_ERROR "Could not find OpenBLAS")
ENDIF (OpenBLAS_FIND_REQUIRED)
ENDIF (OpenBLAS_FOUND)
MARK_AS_ADVANCED(
OpenBLAS_INCLUDE_DIR
OpenBLAS_LIB
OpenBLAS
)
================================================
FILE: cmake/Modules/FindSnappy.cmake
================================================
# Find the Snappy libraries
#
# The following variables are optionally searched for defaults
# Snappy_ROOT_DIR: Base directory where all Snappy components are found
#
# The following are set after configuration is done:
# SNAPPY_FOUND
# Snappy_INCLUDE_DIR
# Snappy_LIBRARIES
find_path(Snappy_INCLUDE_DIR NAMES snappy.h
PATHS ${SNAPPY_ROOT_DIR} ${SNAPPY_ROOT_DIR}/include)
find_library(Snappy_LIBRARIES NAMES snappy
PATHS ${SNAPPY_ROOT_DIR} ${SNAPPY_ROOT_DIR}/lib)
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(Snappy DEFAULT_MSG Snappy_INCLUDE_DIR Snappy_LIBRARIES)
if(SNAPPY_FOUND)
message(STATUS "Found Snappy (include: ${Snappy_INCLUDE_DIR}, library: ${Snappy_LIBRARIES})")
mark_as_advanced(Snappy_INCLUDE_DIR Snappy_LIBRARIES)
caffe_parse_header(${Snappy_INCLUDE_DIR}/snappy-stubs-public.h
SNAPPY_VERION_LINES SNAPPY_MAJOR SNAPPY_MINOR SNAPPY_PATCHLEVEL)
set(Snappy_VERSION "${SNAPPY_MAJOR}.${SNAPPY_MINOR}.${SNAPPY_PATCHLEVEL}")
endif()
================================================
FILE: cmake/Modules/FindvecLib.cmake
================================================
# Find the vecLib libraries as part of Accelerate.framework or as standalon framework
#
# The following are set after configuration is done:
# VECLIB_FOUND
# vecLib_INCLUDE_DIR
# vecLib_LINKER_LIBS
if(NOT APPLE)
return()
endif()
set(__veclib_include_suffix "Frameworks/vecLib.framework/Versions/Current/Headers")
find_path(vecLib_INCLUDE_DIR vecLib.h
DOC "vecLib include directory"
PATHS /System/Library/${__veclib_include_suffix}
/System/Library/Frameworks/Accelerate.framework/Versions/Current/${__veclib_include_suffix}
/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.9.sdk/System/Library/Frameworks/Accelerate.framework/Versions/Current/Frameworks/vecLib.framework/Headers/)
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(vecLib DEFAULT_MSG vecLib_INCLUDE_DIR)
if(VECLIB_FOUND)
if(vecLib_INCLUDE_DIR MATCHES "^/System/Library/Frameworks/vecLib.framework.*")
set(vecLib_LINKER_LIBS -lcblas "-framework vecLib")
message(STATUS "Found standalone vecLib.framework")
else()
set(vecLib_LINKER_LIBS -lcblas "-framework Accelerate")
message(STATUS "Found vecLib as part of Accelerate.framework")
endif()
mark_as_advanced(vecLib_INCLUDE_DIR)
endif()
================================================
FILE: cmake/ProtoBuf.cmake
================================================
# Finds Google Protocol Buffers library and compilers and extends
# the standard cmake script with version and python generation support
find_package( Protobuf REQUIRED )
include_directories(SYSTEM ${PROTOBUF_INCLUDE_DIR})
list(APPEND Caffe_LINKER_LIBS ${PROTOBUF_LIBRARIES})
# As of Ubuntu 14.04 protoc is no longer a part of libprotobuf-dev package
# and should be installed separately as in: sudo apt-get install protobuf-compiler
if(EXISTS ${PROTOBUF_PROTOC_EXECUTABLE})
message(STATUS "Found PROTOBUF Compiler: ${PROTOBUF_PROTOC_EXECUTABLE}")
else()
message(FATAL_ERROR "Could not find PROTOBUF Compiler")
endif()
if(PROTOBUF_FOUND)
# fetches protobuf version
caffe_parse_header(${PROTOBUF_INCLUDE_DIR}/google/protobuf/stubs/common.h VERION_LINE GOOGLE_PROTOBUF_VERSION)
string(REGEX MATCH "([0-9])00([0-9])00([0-9])" PROTOBUF_VERSION ${GOOGLE_PROTOBUF_VERSION})
set(PROTOBUF_VERSION "${CMAKE_MATCH_1}.${CMAKE_MATCH_2}.${CMAKE_MATCH_3}")
unset(GOOGLE_PROTOBUF_VERSION)
endif()
# place where to generate protobuf sources
set(proto_gen_folder "${PROJECT_BINARY_DIR}/include/caffe/proto")
include_directories(SYSTEM "${PROJECT_BINARY_DIR}/include")
set(PROTOBUF_GENERATE_CPP_APPEND_PATH TRUE)
################################################################################################
# Modification of standard 'protobuf_generate_cpp()' with output dir parameter and python support
# Usage:
# caffe_protobuf_generate_cpp_py(<output_dir> <srcs_var> <hdrs_var> <python_var> <proto_files>)
function(caffe_protobuf_generate_cpp_py output_dir srcs_var hdrs_var python_var)
if(NOT ARGN)
message(SEND_ERROR "Error: caffe_protobuf_generate_cpp_py() called without any proto files")
return()
endif()
if(PROTOBUF_GENERATE_CPP_APPEND_PATH)
# Create an include path for each file specified
foreach(fil ${ARGN})
get_filename_component(abs_fil ${fil} ABSOLUTE)
get_filename_component(abs_path ${abs_fil} PATH)
list(FIND _protoc_include ${abs_path} _contains_already)
if(${_contains_already} EQUAL -1)
list(APPEND _protoc_include -I ${abs_path})
endif()
endforeach()
else()
set(_protoc_include -I ${CMAKE_CURRENT_SOURCE_DIR})
endif()
if(DEFINED PROTOBUF_IMPORT_DIRS)
foreach(dir ${PROTOBUF_IMPORT_DIRS})
get_filename_component(abs_path ${dir} ABSOLUTE)
list(FIND _protoc_include ${abs_path} _contains_already)
if(${_contains_already} EQUAL -1)
list(APPEND _protoc_include -I ${abs_path})
endif()
endforeach()
endif()
set(${srcs_var})
set(${hdrs_var})
set(${python_var})
foreach(fil ${ARGN})
get_filename_component(abs_fil ${fil} ABSOLUTE)
get_filename_component(fil_we ${fil} NAME_WE)
list(APPEND ${srcs_var} "${output_dir}/${fil_we}.pb.cc")
list(APPEND ${hdrs_var} "${output_dir}/${fil_we}.pb.h")
list(APPEND ${python_var} "${output_dir}/${fil_we}_pb2.py")
add_custom_command(
OUTPUT "${output_dir}/${fil_we}.pb.cc"
"${output_dir}/${fil_we}.pb.h"
"${output_dir}/${fil_we}_pb2.py"
COMMAND ${CMAKE_COMMAND} -E make_directory "${output_dir}"
COMMAND ${PROTOBUF_PROTOC_EXECUTABLE} --cpp_out ${output_dir} ${_protoc_include} ${abs_fil}
COMMAND ${PROTOBUF_PROTOC_EXECUTABLE} --python_out ${output_dir} ${_protoc_include} ${abs_fil}
DEPENDS ${abs_fil}
COMMENT "Running C++/Python protocol buffer compiler on ${fil}" VERBATIM )
endforeach()
set_source_files_properties(${${srcs_var}} ${${hdrs_var}} ${${python_var}} PROPERTIES GENERATED TRUE)
set(${srcs_var} ${${srcs_var}} PARENT_SCOPE)
set(${hdrs_var} ${${hdrs_var}} PARENT_SCOPE)
set(${python_var} ${${python_var}} PARENT_SCOPE)
endfunction()
================================================
FILE: cmake/Summary.cmake
================================================
################################################################################################
# Caffe status report function.
# Automatically align right column and selects text based on condition.
# Usage:
# caffe_status(<text>)
# caffe_status(<heading> <value1> [<value2> ...])
# caffe_status(<heading> <condition> THEN <text for TRUE> ELSE <text for FALSE> )
function(caffe_status text)
set(status_cond)
set(status_then)
set(status_else)
set(status_current_name "cond")
foreach(arg ${ARGN})
if(arg STREQUAL "THEN")
set(status_current_name "then")
elseif(arg STREQUAL "ELSE")
set(status_current_name "else")
else()
list(APPEND status_${status_current_name} ${arg})
endif()
endforeach()
if(DEFINED status_cond)
set(status_placeholder_length 23)
string(RANDOM LENGTH ${status_placeholder_length} ALPHABET " " status_placeholder)
string(LENGTH "${text}" status_text_length)
if(status_text_length LESS status_placeholder_length)
string(SUBSTRING "${text}${status_placeholder}" 0 ${status_placeholder_length} status_text)
elseif(DEFINED status_then OR DEFINED status_else)
message(STATUS "${text}")
set(status_text "${status_placeholder}")
else()
set(status_text "${text}")
endif()
if(DEFINED status_then OR DEFINED status_else)
if(${status_cond})
string(REPLACE ";" " " status_then "${status_then}")
string(REGEX REPLACE "^[ \t]+" "" status_then "${status_then}")
message(STATUS "${status_text} ${status_then}")
else()
string(REPLACE ";" " " status_else "${status_else}")
string(REGEX REPLACE "^[ \t]+" "" status_else "${status_else}")
message(STATUS "${status_text} ${status_else}")
endif()
else()
string(REPLACE ";" " " status_cond "${status_cond}")
string(REGEX REPLACE "^[ \t]+" "" status_cond "${status_cond}")
message(STATUS "${status_text} ${status_cond}")
endif()
else()
message(STATUS "${text}")
endif()
endfunction()
################################################################################################
# Function for fetching Caffe version from git and headers
# Usage:
# caffe_extract_caffe_version()
function(caffe_extract_caffe_version)
set(Caffe_GIT_VERSION "unknown")
find_package(Git)
if(GIT_FOUND)
execute_process(COMMAND ${GIT_EXECUTABLE} describe --tags --always --dirty
ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE
WORKING_DIRECTORY "${PROJECT_SOURCE_DIR}"
OUTPUT_VARIABLE Caffe_GIT_VERSION
RESULT_VARIABLE __git_result)
if(NOT ${__git_result} EQUAL 0)
set(Caffe_GIT_VERSION "unknown")
endif()
endif()
set(Caffe_GIT_VERSION ${Caffe_GIT_VERSION} PARENT_SCOPE)
set(Caffe_VERSION "<TODO> (Caffe doesn't declare its version in headers)" PARENT_SCOPE)
# caffe_parse_header(${Caffe_INCLUDE_DIR}/caffe/version.hpp Caffe_VERSION_LINES CAFFE_MAJOR CAFFE_MINOR CAFFE_PATCH)
# set(Caffe_VERSION "${CAFFE_MAJOR}.${CAFFE_MINOR}.${CAFFE_PATCH}" PARENT_SCOPE)
# or for #define Caffe_VERSION "x.x.x"
# caffe_parse_header_single_define(Caffe ${Caffe_INCLUDE_DIR}/caffe/version.hpp Caffe_VERSION)
# set(Caffe_VERSION ${Caffe_VERSION_STRING} PARENT_SCOPE)
endfunction()
################################################################################################
# Prints accumulated caffe configuration summary
# Usage:
# caffe_print_configuration_summary()
function(caffe_print_configuration_summary)
caffe_extract_caffe_version()
set(Caffe_VERSION ${Caffe_VERSION} PARENT_SCOPE)
caffe_merge_flag_lists(__flags_rel CMAKE_CXX_FLAGS_RELEASE CMAKE_CXX_FLAGS)
caffe_merge_flag_lists(__flags_deb CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS)
caffe_status("")
caffe_status("******************* Caffe Configuration Summary *******************")
caffe_status("General:")
caffe_status(" Version : ${Caffe_VERSION}")
caffe_status(" Git : ${Caffe_GIT_VERSION}")
caffe_status(" System : ${CMAKE_SYSTEM_NAME}")
caffe_status(" C++ compiler : ${CMAKE_CXX_COMPILER}")
caffe_status(" Release CXX flags : ${__flags_rel}")
caffe_status(" Debug CXX flags : ${__flags_deb}")
caffe_status(" Build type : ${CMAKE_BUILD_TYPE}")
caffe_status("")
caffe_status(" BUILD_SHARED_LIBS : ${BUILD_SHARED_LIBS}")
caffe_status(" BUILD_python : ${BUILD_python}")
caffe_status(" BUILD_matlab : ${BUILD_matlab}")
caffe_status(" BUILD_docs : ${BUILD_docs}")
caffe_status(" CPU_ONLY : ${CPU_ONLY}")
caffe_status("")
caffe_status("Dependencies:")
caffe_status(" BLAS : " APPLE THEN "Yes (vecLib)" ELSE "Yes (${BLAS})")
caffe_status(" Boost : Yes (ver. ${Boost_MAJOR_VERSION}.${Boost_MINOR_VERSION})")
caffe_status(" glog : Yes")
caffe_status(" gflags : Yes")
caffe_status(" protobuf : " PROTOBUF_FOUND THEN "Yes (ver. ${PROTOBUF_VERSION})" ELSE "No" )
caffe_status(" lmdb : " LMDB_FOUND THEN "Yes (ver. ${LMDB_VERSION})" ELSE "No")
caffe_status(" Snappy : " SNAPPY_FOUND THEN "Yes (ver. ${Snappy_VERSION})" ELSE "No" )
caffe_status(" LevelDB : " LEVELDB_FOUND THEN "Yes (ver. ${LEVELDB_VERSION})" ELSE "No")
caffe_status(" OpenCV : Yes (ver. ${OpenCV_VERSION})")
caffe_status(" CUDA : " HAVE_CUDA THEN "Yes (ver. ${CUDA_VERSION})" ELSE "No" )
caffe_status("")
if(HAVE_CUDA)
caffe_status("NVIDIA CUDA:")
caffe_status(" Target GPU(s) : ${CUDA_ARCH_NAME}" )
caffe_status(" GPU arch(s) : ${NVCC_FLAGS_EXTRA_readable}")
if(USE_CUDNN)
caffe_status(" cuDNN : " HAVE_CUDNN THEN "Yes" ELSE "Not found")
else()
caffe_status(" cuDNN : Disabled")
endif()
caffe_status("")
endif()
if(HAVE_PYTHON)
caffe_status("Python:")
caffe_status(" Interpreter :" PYTHON_EXECUTABLE THEN "${PYTHON_EXECUTABLE} (ver. ${PYTHON_VERSION_STRING})" ELSE "No")
caffe_status(" Libraries :" PYTHONLIBS_FOUND THEN "${PYTHON_LIBRARIES} (ver ${PYTHONLIBS_VERSION_STRING})" ELSE "No")
caffe_status(" NumPy :" NUMPY_FOUND THEN "${NUMPY_INCLUDE_DIR} (ver ${NUMPY_VERSION})" ELSE "No")
caffe_status("")
endif()
if(BUILD_matlab)
caffe_status("Matlab:")
caffe_status(" Matlab :" HAVE_MATLAB THEN "Yes (${Matlab_mex}, ${Matlab_mexext}" ELSE "No")
caffe_status(" Octave :" Octave_compiler THEN "Yes (${Octave_compiler})" ELSE "No")
if(HAVE_MATLAB AND Octave_compiler)
caffe_status(" Build mex using : ${Matlab_build_mex_using}")
endif()
caffe_status("")
endif()
if(BUILD_docs)
caffe_status("Documentaion:")
caffe_status(" Doxygen :" DOXYGEN_FOUND THEN "${DOXYGEN_EXECUTABLE} (${DOXYGEN_VERSION})" ELSE "No")
caffe_status(" config_file : ${DOXYGEN_config_file}")
caffe_status("")
endif()
caffe_status("Install:")
caffe_status(" Install path : ${CMAKE_INSTALL_PREFIX}")
caffe_status("")
endfunction()
================================================
FILE: cmake/Targets.cmake
================================================
################################################################################################
# Defines global Caffe_LINK flag, This flag is required to prevent linker from excluding
# some objects which are not addressed directly but are registered via static constructors
if(BUILD_SHARED_LIBS)
set(Caffe_LINK caffe)
else()
if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
set(Caffe_LINK -Wl,-force_load caffe)
elseif("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
set(Caffe_LINK -Wl,--whole-archive caffe -Wl,--no-whole-archive)
endif()
endif()
################################################################################################
# Convenient command to setup source group for IDEs that support this feature (VS, XCode)
# Usage:
# caffe_source_group(<group> GLOB[_RECURSE] <globbing_expression>)
function(caffe_source_group group)
cmake_parse_arguments(CAFFE_SOURCE_GROUP "" "" "GLOB;GLOB_RECURSE" ${ARGN})
if(CAFFE_SOURCE_GROUP_GLOB)
file(GLOB srcs1 ${CAFFE_SOURCE_GROUP_GLOB})
source_group(${group} FILES ${srcs1})
endif()
if(CAFFE_SOURCE_GROUP_GLOB_RECURSE)
file(GLOB_RECURSE srcs2 ${CAFFE_SOURCE_GROUP_GLOB_RECURSE})
source_group(${group} FILES ${srcs2})
endif()
endfunction()
################################################################################################
# Collecting sources from globbing and appending to output list variable
# Usage:
# caffe_collect_sources(<output_variable> GLOB[_RECURSE] <globbing_expression>)
function(caffe_collect_sources variable)
cmake_parse_arguments(CAFFE_COLLECT_SOURCES "" "" "GLOB;GLOB_RECURSE" ${ARGN})
if(CAFFE_COLLECT_SOURCES_GLOB)
file(GLOB srcs1 ${CAFFE_COLLECT_SOURCES_GLOB})
set(${variable} ${variable} ${srcs1})
endif()
if(CAFFE_COLLECT_SOURCES_GLOB_RECURSE)
file(GLOB_RECURSE srcs2 ${CAFFE_COLLECT_SOURCES_GLOB_RECURSE})
set(${variable} ${variable} ${srcs2})
endif()
endfunction()
################################################################################################
# Short command getting caffe sources (assuming standard Caffe code tree)
# Usage:
# caffe_pickup_caffe_sources(<root>)
function(caffe_pickup_caffe_sources root)
# put all files in source groups (visible as subfolder in many IDEs)
caffe_source_group("Include" GLOB "${root}/include/caffe/*.h*")
caffe_source_group("Include\\Util" GLOB "${root}/include/caffe/util/*.h*")
caffe_source_group("Include" GLOB "${PROJECT_BINARY_DIR}/caffe_config.h*")
caffe_source_group("Source" GLOB "${root}/src/caffe/*.cpp")
caffe_source_group("Source\\Util" GLOB "${root}/src/caffe/util/*.cpp")
caffe_source_group("Source\\Layers" GLOB "${root}/src/caffe/layers/*.cpp")
caffe_source_group("Source\\Cuda" GLOB "${root}/src/caffe/layers/*.cu")
caffe_source_group("Source\\Cuda" GLOB "${root}/src/caffe/util/*.cu")
caffe_source_group("Source\\Proto" GLOB "${root}/src/caffe/proto/*.proto")
# source groups for test target
caffe_source_group("Include" GLOB "${root}/include/caffe/test/test_*.h*")
caffe_source_group("Source" GLOB "${root}/src/caffe/test/test_*.cpp")
caffe_source_group("Source\\Cuda" GLOB "${root}/src/caffe/test/test_*.cu")
# collect files
file(GLOB test_hdrs ${root}/include/caffe/test/test_*.h*)
file(GLOB test_srcs ${root}/src/caffe/test/test_*.cpp)
file(GLOB_RECURSE hdrs ${root}/include/caffe/*.h*)
file(GLOB_RECURSE srcs ${root}/src/caffe/*.cpp)
list(REMOVE_ITEM hdrs ${test_hdrs})
list(REMOVE_ITEM srcs ${test_srcs})
# adding headers to make the visible in some IDEs (Qt, VS, Xcode)
list(APPEND srcs ${hdrs} ${PROJECT_BINARY_DIR}/caffe_config.h)
list(APPEND test_srcs ${test_hdrs})
# collect cuda files
file(GLOB test_cuda ${root}/src/caffe/test/test_*.cu)
file(GLOB_RECURSE cuda ${root}/src/caffe/*.cu)
list(REMOVE_ITEM cuda ${test_cuda})
# add proto to make them editable in IDEs too
file(GLOB_RECURSE proto_files ${root}/src/caffe/*.proto)
list(APPEND srcs ${proto_files})
# convet to absolute paths
caffe_convert_absolute_paths(srcs)
caffe_convert_absolute_paths(cuda)
caffe_convert_absolute_paths(test_srcs)
caffe_convert_absolute_paths(test_cuda)
# propogate to parent scope
set(srcs ${srcs} PARENT_SCOPE)
set(cuda ${cuda} PARENT_SCOPE)
set(test_srcs ${test_srcs} PARENT_SCOPE)
set(test_cuda ${test_cuda} PARENT_SCOPE)
endfunction()
################################################################################################
# Short command for setting defeault target properties
# Usage:
# caffe_default_properties(<target>)
function(caffe_default_properties target)
set_target_properties(${target} PROPERTIES
DEBUG_POSTFIX ${Caffe_DEBUG_POSTFIX}
ARCHIVE_OUTPUT_DIRECTORY "${PROJECT_BINARY_DIR}/lib"
LIBRARY_OUTPUT_DIRECTORY "${PROJECT_BINARY_DIR}/lib"
RUNTIME_OUTPUT_DIRECTORY "${PROJECT_BINARY_DIR}/bin")
# make sure we build all external depepdencies first
if (DEFINED external_project_dependencies)
add_dependencies(${target} ${external_project_dependencies})
endif()
endfunction()
################################################################################################
# Short command for setting runtime directory for build target
# Usage:
# caffe_set_runtime_directory(<target> <dir>)
function(caffe_set_runtime_directory target dir)
set_target_properties(${target} PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${dir}")
endfunction()
################################################################################################
# Short command for setting solution folder property for target
# Usage:
# caffe_set_solution_folder(<target> <folder>)
function(caffe_set_solution_folder target folder)
if(USE_PROJECT_FOLDERS)
set_target_properties(${target} PROPERTIES FOLDER "${folder}")
endif()
endfunction()
################################################################################################
# Reads lines from input file, prepends source directory to each line and writes to output file
# Usage:
# caffe_configure_testdatafile(<testdatafile>)
function(caffe_configure_testdatafile file)
file(STRINGS ${file} __lines)
set(result "")
foreach(line ${__lines})
set(result "${result}${PROJECT_SOURCE_DIR}/${line}\n")
endforeach()
file(WRITE ${file}.gen.cmake ${result})
endfunction()
################################################################################################
# Filter out all files that are not included in selected list
# Usage:
# caffe_leave_only_selected_tests(<filelist_variable> <selected_list>)
function(caffe_leave_only_selected_tests file_list)
if(NOT ARGN)
return() # blank list means leave all
endif()
string(REPLACE "," ";" __selected ${ARGN})
list(APPEND __selected caffe_main)
set(result "")
foreach(f ${${file_list}})
get_filename_component(name ${f} NAME_WE)
string(REGEX REPLACE "^test_" "" name ${name})
list(FIND __selected ${name} __index)
if(NOT __index EQUAL -1)
list(APPEND result ${f})
endif()
endforeach()
set(${file_list} ${result} PARENT_SCOPE)
endfunction()
================================================
FILE: cmake/Templates/CaffeConfig.cmake.in
================================================
# Config file for the Caffe package.
#
# Note:
# Caffe and this config file depends on opencv,
# so put `find_package(OpenCV)` before searching Caffe
# via `find_package(Caffe)`. All other lib/includes
# dependencies are hard coded in the file
#
# After successful configuration the following variables
# will be defined:
#
# Caffe_INCLUDE_DIRS - Caffe include directories
# Caffe_LIBRARIES - libraries to link against
# Caffe_DEFINITIONS - a list of definitions to pass to compiler
#
# Caffe_HAVE_CUDA - signals about CUDA support
# Caffe_HAVE_CUDNN - signals about cuDNN support
# OpenCV dependency
if(NOT OpenCV_FOUND)
set(Caffe_OpenCV_CONFIG_PATH "@OpenCV_CONFIG_PATH@")
if(Caffe_OpenCV_CONFIG_PATH)
get_filename_component(Caffe_OpenCV_CONFIG_PATH ${Caffe_OpenCV_CONFIG_PATH} ABSOLUTE)
if(EXISTS ${Caffe_OpenCV_CONFIG_PATH} AND NOT TARGET opencv_core)
message(STATUS "Caffe: using OpenCV config from ${Caffe_OpenCV_CONFIG_PATH}")
include(${Caffe_OpenCV_CONFIG_PATH}/OpenCVModules.cmake)
endif()
else()
find_package(OpenCV REQUIRED)
endif()
unset(Caffe_OpenCV_CONFIG_PATH)
endif()
# Compute paths
get_filename_component(Caffe_CMAKE_DIR "${CMAKE_CURRENT_LIST_FILE}" PATH)
set(Caffe_INCLUDE_DIRS "@Caffe_INCLUDE_DIRS@")
@Caffe_INSTALL_INCLUDE_DIR_APPEND_COMMAND@
# Our library dependencies
if(NOT TARGET caffe AND NOT caffe_BINARY_DIR)
include("${Caffe_CMAKE_DIR}/CaffeTargets.cmake")
endif()
# List of IMPORTED libs created by CaffeTargets.cmake
set(Caffe_LIBRARIES caffe)
# Definitions
set(Caffe_DEFINITIONS "@Caffe_DEFINITIONS@")
# Cuda support variables
set(Caffe_CPU_ONLY @CPU_ONLY@)
set(Caffe_HAVE_CUDA @HAVE_CUDA@)
set(Caffe_HAVE_CUDNN @HAVE_CUDNN@)
================================================
FILE: cmake/Templates/CaffeConfigVersion.cmake.in
================================================
set(PACKAGE_VERSION "@Caffe_VERSION@")
# Check whether the requested PACKAGE_FIND_VERSION is compatible
if("${PACKAGE_VERSION}" VERSION_LESS "${PACKAGE_FIND_VERSION}")
set(PACKAGE_VERSION_COMPATIBLE FALSE)
else()
set(PACKAGE_VERSION_COMPATIBLE TRUE)
if ("${PACKAGE_VERSION}" VERSION_EQUAL "${PACKAGE_FIND_VERSION}")
set(PACKAGE_VERSION_EXACT TRUE)
endif()
endif()
================================================
FILE: cmake/Templates/caffe_config.h.in
================================================
/* Sources directory */
#define SOURCE_FOLDER "${PROJECT_SOURCE_DIR}"
/* Binaries directory */
#define BINARY_FOLDER "${PROJECT_BINARY_DIR}"
/* NVIDA Cuda */
#cmakedefine HAVE_CUDA
/* NVIDA cuDNN */
#cmakedefine HAVE_CUDNN
#cmakedefine USE_CUDNN
/* NVIDA cuDNN */
#cmakedefine CPU_ONLY
/* Test device */
#define CUDA_TEST_DEVICE ${CUDA_TEST_DEVICE}
/* Temporary (TODO: remove) */
#if 1
#define CMAKE_SOURCE_DIR SOURCE_FOLDER "/src/"
#define EXAMPLES_SOURCE_DIR BINARY_FOLDER "/examples/"
#define CMAKE_EXT ".gen.cmake"
#else
#define CMAKE_SOURCE_DIR "src/"
#define EXAMPLES_SOURCE_DIR "examples/"
#define CMAKE_EXT ""
#endif
/* Matlab */
#cmakedefine HAVE_MATLAB
================================================
FILE: cmake/Utils.cmake
================================================
################################################################################################
# Command alias for debugging messages
# Usage:
# dmsg(<message>)
function(dmsg)
message(STATUS ${ARGN})
endfunction()
################################################################################################
# Removes duplicates from list(s)
# Usage:
# caffe_list_unique(<list_variable> [<list_variable>] [...])
macro(caffe_list_unique)
foreach(__lst ${ARGN})
if(${__lst})
list(REMOVE_DUPLICATES ${__lst})
endif()
endforeach()
endmacro()
################################################################################################
# Clears variables from list
# Usage:
# caffe_clear_vars(<variables_list>)
macro(caffe_clear_vars)
foreach(_var ${ARGN})
unset(${_var})
endforeach()
endmacro()
################################################################################################
# Removes duplicates from string
# Usage:
# caffe_string_unique(<string_variable>)
function(caffe_string_unique __string)
if(${__string})
set(__list ${${__string}})
separate_arguments(__list)
list(REMOVE_DUPLICATES __list)
foreach(__e ${__list})
set(__str "${__str} ${__e}")
endforeach()
set(${__string} ${__str} PARENT_SCOPE)
endif()
endfunction()
################################################################################################
# Prints list element per line
# Usage:
# caffe_print_list(<list>)
function(caffe_print_list)
foreach(e ${ARGN})
message(STATUS ${e})
endforeach()
endfunction()
################################################################################################
# Function merging lists of compiler flags to single string.
# Usage:
# caffe_merge_flag_lists(out_variable <list1> [<list2>] [<list3>] ...)
function(caffe_merge_flag_lists out_var)
set(__result "")
foreach(__list ${ARGN})
foreach(__flag ${${__list}})
string(STRIP ${__flag} __flag)
set(__result "${__result} ${__flag}")
endforeach()
endforeach()
string(STRIP ${__result} __result)
set(${out_var} ${__result} PARENT_SCOPE)
endfunction()
################################################################################################
# Converts all paths in list to absolute
# Usage:
# caffe_convert_absolute_paths(<list_variable>)
function(caffe_convert_absolute_paths variable)
set(__dlist "")
foreach(__s ${${variable}})
get_filename_component(__abspath ${__s} ABSOLUTE)
list(APPEND __list ${__abspath})
endforeach()
set(${variable} ${__list} PARENT_SCOPE)
endfunction()
################################################################################################
# Reads set of version defines from the header file
# Usage:
# caffe_parse_header(<file> <define1> <define2> <define3> ..)
macro(caffe_parse_header FILENAME FILE_VAR)
set(vars_regex "")
set(__parnet_scope OFF)
set(__add_cache OFF)
foreach(name ${ARGN})
if("${name}" STREQUAL "PARENT_SCOPE")
set(__parnet_scope ON)
elseif("${name}" STREQUAL "CACHE")
set(__add_cache ON)
elseif(vars_regex)
set(vars_regex "${vars_regex}|${name}")
else()
set(vars_regex "${name}")
endif()
endforeach()
if(EXISTS "${FILENAME}")
file(STRINGS "${FILENAME}" ${FILE_VAR} REGEX "#define[ \t]+(${vars_regex})[ \t]+[0-9]+" )
else()
unset(${FILE_VAR})
endif()
foreach(name ${ARGN})
if(NOT "${name}" STREQUAL "PARENT_SCOPE" AND NOT "${name}" STREQUAL "CACHE")
if(${FILE_VAR})
if(${FILE_VAR} MATCHES ".+[ \t]${name}[ \t]+([0-9]+).*")
string(REGEX REPLACE ".+[ \t]${name}[ \t]+([0-9]+).*" "\\1" ${name} "${${FILE_VAR}}")
else()
set(${name} "")
endif()
if(__add_cache)
set(${name} ${${name}} CACHE INTERNAL "${name} parsed from ${FILENAME}" FORCE)
elseif(__parnet_scope)
set(${name} "${${name}}" PARENT_SCOPE)
endif()
else()
unset(${name} CACHE)
endif()
endif()
endforeach()
endmacro()
################################################################################################
# Reads single version define from the header file and parses it
# Usage:
# caffe_parse_header_single_define(<library_name> <file> <define_name>)
function(caffe_parse_header_single_define LIBNAME HDR_PATH VARNAME)
set(${LIBNAME}_H "")
if(EXISTS "${HDR_PATH}")
file(STRINGS "${HDR_PATH}" ${LIBNAME}_H REGEX "^#define[ \t]+${VARNAME}[ \t]+\"[^\"]*\".*$" LIMIT_COUNT 1)
endif()
if(${LIBNAME}_H)
string(REGEX REPLACE "^.*[ \t]${VARNAME}[ \t]+\"([0-9]+).*$" "\\1" ${LIBNAME}_VERSION_MAJOR "${${LIBNAME}_H}")
string(REGEX REPLACE "^.*[ \t]${VARNAME}[ \t]+\"[0-9]+\\.([0-9]+).*$" "\\1" ${LIBNAME}_VERSION_MINOR "${${LIBNAME}_H}")
string(REGEX REPLACE "^.*[ \t]${VARNAME}[ \t]+\"[0-9]+\\.[0-9]+\\.([0-9]+).*$" "\\1" ${LIBNAME}_VERSION_PATCH "${${LIBNAME}_H}")
set(${LIBNAME}_VERSION_MAJOR ${${LIBNAME}_VERSION_MAJOR} ${ARGN} PARENT_SCOPE)
set(${LIBNAME}_VERSION_MINOR ${${LIBNAME}_VERSION_MINOR} ${ARGN} PARENT_SCOPE)
set(${LIBNAME}_VERSION_PATCH ${${LIBNAME}_VERSION_PATCH} ${ARGN} PARENT_SCOPE)
set(${LIBNAME}_VERSION_STRING "${${LIBNAME}_VERSION_MAJOR}.${${LIBNAME}_VERSION_MINOR}.${${LIBNAME}_VERSION_PATCH}" PARENT_SCOPE)
# append a TWEAK version if it exists:
set(${LIBNAME}_VERSION_TWEAK "")
if("${${LIBNAME}_H}" MATCHES "^.*[ \t]${VARNAME}[ \t]+\"[0-9]+\\.[0-9]+\\.[0-9]+\\.([0-9]+).*$")
set(${LIBNAME}_VERSION_TWEAK "${CMAKE_MATCH_1}" ${ARGN} PARENT_SCOPE)
endif()
if(${LIBNAME}_VERSION_TWEAK)
set(${LIBNAME}_VERSION_STRING "${${LIBNAME}_VERSION_STRING}.${${LIBNAME}_VERSION_TWEAK}" ${ARGN} PARENT_SCOPE)
else()
set(${LIBNAME}_VERSION_STRING "${${LIBNAME}_VERSION_STRING}" ${ARGN} PARENT_SCOPE)
endif()
endif()
endfunction()
########################################################################################################
# An option that the user can select. Can accept condition to control when option is available for user.
# Usage:
# caffe_option(<option_variable> "doc string" <initial value or boolean expression> [IF <condition>])
function(caffe_option variable description value)
set(__value ${value})
set(__condition "")
set(__varname "__value")
foreach(arg ${ARGN})
if(arg STREQUAL "IF" OR arg STREQUAL "if")
set(__varname "__condition")
else()
list(APPEND ${__varname} ${arg})
endif()
endforeach()
unset(__varname)
if("${__condition}" STREQUAL "")
set(__condition 2 GREATER 1)
endif()
if(${__condition})
if("${__value}" MATCHES ";")
if(${__value})
option(${variable} "${description}" ON)
else()
option(${variable} "${description}" OFF)
endif()
elseif(DEFINED ${__value})
if(${__value})
option(${variable} "${description}" ON)
else()
option(${variable} "${description}" OFF)
endif()
else()
option(${variable} "${description}" ${__value})
endif()
else()
unset(${variable} CACHE)
endif()
endfunction()
################################################################################################
# Utility macro for comparing two lists. Used for CMake debugging purposes
# Usage:
# caffe_compare_lists(<list_variable> <list2_variable> [description])
function(caffe_compare_lists list1 list2 desc)
set(__list1 ${${list1}})
set(__list2 ${${list2}})
list(SORT __list1)
list(SORT __list2)
list(LENGTH __list1 __len1)
list(LENGTH __list2 __len2)
if(NOT ${__len1} EQUAL ${__len2})
message(FATAL_ERROR "Lists are not equal. ${__len1} != ${__len2}. ${desc}")
endif()
foreach(__i RANGE 1 ${__len1})
math(EXPR __index "${__i}- 1")
list(GET __list1 ${__index} __item1)
list(GET __list2 ${__index} __item2)
if(NOT ${__item1} STREQUAL ${__item2})
message(FATAL_ERROR "Lists are not equal. Differ at element ${__index}. ${desc}")
endif()
endforeach()
endfunction()
################################################################################################
# Command for disabling warnings for different platforms (see below for gcc and VisualStudio)
# Usage:
# caffe_warnings_disable(<CMAKE_[C|CXX]_FLAGS[_CONFIGURATION]> -Wshadow /wd4996 ..,)
macro(caffe_warnings_disable)
set(_flag_vars "")
set(_msvc_warnings "")
set(_gxx_warnings "")
foreach(arg ${ARGN})
if(arg MATCHES "^CMAKE_")
list(APPEND _flag_vars ${arg})
elseif(arg MATCHES "^/wd")
list(APPEND _msvc_warnings ${arg})
elseif(arg MATCHES "^-W")
list(APPEND _gxx_warnings ${arg})
endif()
endforeach()
if(NOT _flag_vars)
set(_flag_vars CMAKE_C_FLAGS CMAKE_CXX_FLAGS)
endif()
if(MSVC AND _msvc_warnings)
foreach(var ${_flag_vars})
foreach(warning ${_msvc_warnings})
set(${var} "${${var}} ${warning}")
endforeach()
endforeach()
elseif((CMAKE_COMPILER_IS_GNUCXX OR CMAKE_COMPILER_IS_CLANGXX) AND _gxx_warnings)
foreach(var ${_flag_vars})
foreach(warning ${_gxx_warnings})
if(NOT warning MATCHES "^-Wno-")
string(REPLACE "${warning}" "" ${var} "${${var}}")
string(REPLACE "-W" "-Wno-" warning "${warning}")
endif()
set(${var} "${${var}} ${warning}")
endforeach()
endforeach()
endif()
caffe_clear_vars(_flag_vars _msvc_warnings _gxx_warnings)
endmacro()
################################################################################################
# Helper function get current definitions
# Usage:
# caffe_get_current_definitions(<definitions_variable>)
function(caffe_get_current_definitions definitions_var)
get_property(current_definitions DIRECTORY PROPERTY COMPILE_DEFINITIONS)
set(result "")
foreach(d ${current_definitions})
list(APPEND result -D${d})
endforeach()
caffe_list_unique(result)
set(${definitions_var} ${result} PARENT_SCOPE)
endfunction()
################################################################################################
# Helper function get current includes/definitions
# Usage:
# caffe_get_current_cflags(<cflagslist_variable>)
function(caffe_get_current_cflags cflags_var)
get_property(current_includes DIRECTORY PROPERTY INCLUDE_DIRECTORIES)
caffe_convert_absolute_paths(current_includes)
caffe_get_current_definitions(cflags)
foreach(i ${current_includes})
list(APPEND cflags "-I${i}")
endforeach()
caffe_list_unique(cflags)
set(${cflags_var} ${cflags} PARENT_SCOPE)
endfunction()
################################################################################################
# Helper function to parse current linker libs into link directories, libflags and osx frameworks
# Usage:
# caffe_parse_linker_libs(<Caffe_LINKER_LIBS_var> <directories_var> <libflags_var> <frameworks_var>)
function(caffe_parse_linker_libs Caffe_LINKER_LIBS_variable folders_var flags_var frameworks_var)
set(__unspec "")
set(__debug "")
set(__optimized "")
set(__framework "")
set(__varname "__unspec")
# split libs into debug, optimized, unspecified and frameworks
foreach(list_elem ${${Caffe_LINKER_LIBS_variable}})
if(list_elem STREQUAL "debug")
set(__varname "__debug")
elseif(list_elem STREQUAL "optimized")
set(__varname "__optimized")
elseif(list_elem MATCHES "^-framework[ \t]+([^ \t].*)")
list(APPEND __framework -framework ${CMAKE_MATCH_1})
else()
list(APPEND ${__varname} ${list_elem})
set(__varname "__unspec")
endif()
endforeach()
# attach debug or optimized libs to unspecified according to current configuration
if(CMAKE_BUILD_TYPE MATCHES "Debug")
set(__libs ${__unspec} ${__debug})
else()
set(__libs ${__unspec} ${__optimized})
endif()
set(libflags "")
set(folders "")
# convert linker libraries list to link flags
foreach(lib ${__libs})
if(TARGET ${lib})
list(APPEND folders $<TARGET_LINKER_FILE_DIR:${lib}>)
list(APPEND libflags -l${lib})
elseif(lib MATCHES "^-l.*")
list(APPEND libflags ${lib})
elseif(IS_ABSOLUTE ${lib})
get_filename_component(name_we ${lib} NAME_WE)
get_filename_component(folder ${lib} PATH)
string(REGEX MATCH "^lib(.*)" __match ${name_we})
list(APPEND libflags -l${CMAKE_MATCH_1})
list(APPEND folders ${folder})
else()
message(FATAL_ERROR "Logic error. Need to update cmake script")
endif()
endforeach()
caffe_list_unique(libflags folders)
set(${folders_var} ${folders} PARENT_SCOPE)
set(${flags_var} ${libflags} PARENT_SCOPE)
set(${frameworks_var} ${__framework} PARENT_SCOPE)
endfunction()
################################################################################################
# Helper function to detect Darwin version, i.e. 10.8, 10.9, 10.10, ....
# Usage:
# caffe_detect_darwin_version(<version_variable>)
function(caffe_detect_darwin_version output_var)
if(APPLE)
execute_process(COMMAND /usr/bin/sw_vers -productVersion
RESULT_VARIABLE __sw_vers OUTPUT_VARIABLE __sw_vers_out
ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)
set(${output_var} ${__sw_vers_out} PARENT_SCOPE)
else()
set(${output_var} "" PARENT_SCOPE)
endif()
endfunction()
================================================
FILE: cmake/lint.cmake
================================================
set(CMAKE_SOURCE_DIR ..)
set(LINT_COMMAND ${CMAKE_SOURCE_DIR}/scripts/cpp_lint.py)
set(SRC_FILE_EXTENSIONS h hpp hu c cpp cu cc)
set(EXCLUDE_FILE_EXTENSTIONS pb.h pb.cc)
set(LINT_DIRS include src/caffe examples tools python matlab)
cmake_policy(SET CMP0009 NEW) # suppress cmake warning
# find all files of interest
foreach(ext ${SRC_FILE_EXTENSIONS})
foreach(dir ${LINT_DIRS})
file(GLOB_RECURSE FOUND_FILES ${CMAKE_SOURCE_DIR}/${dir}/*.${ext})
set(LINT_SOURCES ${LINT_SOURCES} ${FOUND_FILES})
endforeach()
endforeach()
# find all files that should be excluded
foreach(ext ${EXCLUDE_FILE_EXTENSTIONS})
file(GLOB_RECURSE FOUND_FILES ${CMAKE_SOURCE_DIR}/*.${ext})
set(EXCLUDED_FILES ${EXCLUDED_FILES} ${FOUND_FILES})
endforeach()
# exclude generated pb files
list(REMOVE_ITEM LINT_SOURCES ${EXCLUDED_FILES})
execute_process(
COMMAND ${LINT_COMMAND} ${LINT_SOURCES}
ERROR_VARIABLE LINT_OUTPUT
ERROR_STRIP_TRAILING_WHITESPACE
)
string(REPLACE "\n" ";" LINT_OUTPUT ${LINT_OUTPUT})
list(GET LINT_OUTPUT -1 LINT_RESULT)
list(REMOVE_AT LINT_OUTPUT -1)
string(REPLACE " " ";" LINT_RESULT ${LINT_RESULT})
list(GET LINT_RESULT -1 NUM_ERRORS)
if(NUM_ERRORS GREATER 0)
foreach(msg ${LINT_OUTPUT})
string(FIND ${msg} "Done" result)
if(result LESS 0)
message(STATUS ${msg})
endif()
endforeach()
message(FATAL_ERROR "Lint found ${NUM_ERRORS} errors!")
else()
message(STATUS "Lint did not find any errors!")
endif()
================================================
FILE: include/caffe/blob.hpp
================================================
#ifndef CAFFE_BLOB_HPP_
#define CAFFE_BLOB_HPP_
#include <algorithm>
#include <string>
#include <vector>
#include "caffe/common.hpp"
#include "caffe/proto/caffe.pb.h"
#include "caffe/syncedmem.hpp"
#include "caffe/util/math_functions.hpp"
const int kMaxBlobAxes = 32;
namespace caffe {
/**
* @brief A wrapper around SyncedMemory holders serving as the basic
* computational unit through which Layer%s, Net%s, and Solver%s
* interact.
*
* TODO(dox): more thorough description.
*/
template <typename Dtype>
class Blob {
public:
Blob()
: data_(), diff_(), count_(0), capacity_(0) {}
/// @brief Deprecated; use <code>Blob(const vector<int>& shape)</code>.
explicit Blob(const int num, const int channels, const int height,
const int width);
explicit Blob(const vector<int>& shape);
/// @brief Deprecated; use <code>Reshape(const vector<int>& shape)</code>.
void Reshape(const int num, const int channels, const int height,
const int width);
/**
* @brief Change the dimensions of the blob, allocating new memory if
* necessary.
*
* This function can be called both to create an initial allocation
* of memory, and to adjust the dimensions of a top blob during Layer::Reshape
* or Layer::Forward. When changing the size of blob, memory will only be
* reallocated if sufficient memory does not already exist, and excess memory
* will never be freed.
*
* Note that reshaping an input blob and immediately calling Net::Backward is
* an error; either Net::Forward or Net::Reshape need to be called to
* propagate the new input shape to higher layers.
*/
void Reshape(const vector<int>& shape);
void Reshape(const BlobShape& shape);
void ReshapeLike(const Blob& other);
inline string shape_string() const {
ostringstream stream;
for (int i = 0; i < shape_.size(); ++i) {
stream << shape_[i] << " ";
}
stream << "(" << count_ << ")";
return stream.str();
}
inline const vector<int>& shape() const { return shape_; }
/**
* @brief Returns the dimension of the index-th axis (or the negative index-th
* axis from the end, if index is negative).
*
* @param index the axis index, which may be negative as it will be
* "canonicalized" using CanonicalAxisIndex.
* Dies on out of range index.
*/
inline int shape(int index) const {
return shape_[CanonicalAxisIndex(index)];
}
inline int num_axes() const { return shape_.size(); }
inline int count() const { return count_; }
/**
* @brief Compute the volume of a slice; i.e., the product of dimensions
* among a range of axes.
*
* @param start_axis The first axis to include in the slice.
*
* @param end_axis The first axis to exclude from the slice.
*/
inline int count(int start_axis, int end_axis) const {
CHECK_LE(start_axis, end_axis);
CHECK_GE(start_axis, 0);
CHECK_GE(end_axis, 0);
CHECK_LE(start_axis, num_axes());
CHECK_LE(end_axis, num_axes());
int count = 1;
for (int i = start_axis; i < end_axis; ++i) {
count *= shape(i);
}
return count;
}
/**
* @brief Compute the volume of a slice spanning from a particular first
* axis to the final axis.
*
* @param start_axis The first axis to include in the slice.
*/
inline int count(int start_axis) const {
return count(start_axis, num_axes());
}
/**
* @brief Returns the 'canonical' version of a (usually) user-specified axis,
* allowing for negative indexing (e.g., -1 for the last axis).
*
* @param index the axis index.
* If 0 <= index < num_axes(), return index.
* If -num_axes <= index <= -1, return (num_axes() - (-index)),
* e.g., the last axis index (num_axes() - 1) if index == -1,
* the second to last if index == -2, etc.
* Dies on out of range index.
*/
inline int CanonicalAxisIndex(int axis_index) const {
CHECK_GE(axis_index, -num_axes())
<< "axis " << axis_index << " out of range for " << num_axes()
<< "-D Blob with shape " << shape_string();
CHECK_LT(axis_index, num_axes())
<< "axis " << axis_index << " out of range for " << num_axes()
<< "-D Blob with shape " << shape_string();
if (axis_index < 0) {
return axis_index + num_axes();
}
return axis_index;
}
/// @brief Deprecated legacy shape accessor num: use shape(0) instead.
inline int num() const { return LegacyShape(0); }
/// @brief Deprecated legacy shape accessor channels: use shape(1) instead.
inline int channels() const { return LegacyShape(1); }
/// @brief Deprecated legacy shape accessor height: use shape(2) instead.
inline int height() const { return LegacyShape(2); }
/// @brief Deprecated legacy shape accessor width: use shape(3) instead.
inline int width() const { return LegacyShape(3); }
inline int LegacyShape(int index) const {
CHECK_LE(num_axes(), 4)
<< "Cannot use legacy accessors on Blobs with > 4 axes.";
CHECK_LT(index, 4);
CHECK_GE(index, -4);
if (index >= num_axes() || index < -num_axes()) {
// Axis is out of range, but still in [0, 3] (or [-4, -1] for reverse
// indexing) -- this special case simulates the one-padding used to fill
// extraneous axes of legacy blobs.
return 1;
}
return shape(index);
}
inline int offset(const int n, const int c = 0, const int h = 0,
const int w = 0) const {
CHECK_GE(n, 0);
CHECK_LE(n, num());
CHECK_GE(channels(), 0);
CHECK_LE(c, channels());
CHECK_GE(height(), 0);
CHECK_LE(h, height());
CHECK_GE(width(), 0);
CHECK_LE(w, width());
return ((n * channels() + c) * height() + h) * width() + w;
}
inline int offset(const vector<int>& indices) const {
CHECK_LE(indices.size(), num_axes());
int offset = 0;
for (int i = 0; i < num_axes(); ++i) {
offset *= shape(i);
if (indices.size() > i) {
CHECK_GE(indices[i], 0);
CHECK_LT(indices[i], shape(i));
offset += indices[i];
}
}
return offset;
}
/**
* @brief Copy from a source Blob.
*
* @param source the Blob to copy from
* @param copy_diff if false, copy the data; if true, copy the diff
* @param reshape if false, require this Blob to be pre-shaped to the shape
* of other (and die otherwise); if true, Reshape this Blob to other's
* shape if necessary
*/
void CopyFrom(const Blob<Dtype>& source, bool copy_diff = false,
bool reshape = false);
inline Dtype data_at(const int n, const int c, const int h,
const int w) const {
return cpu_data()[offset(n, c, h, w)];
}
inline Dtype diff_at(const int n, const int c, const int h,
const int w) const {
return cpu_diff()[offset(n, c, h, w)];
}
inline Dtype data_at(const vector<int>& index) const {
return cpu_data()[offset(index)];
}
inline Dtype diff_at(const vector<int>& index) const {
return cpu_diff()[offset(index)];
}
inline const shared_ptr<SyncedMemory>& data() const {
CHECK(data_);
return data_;
}
inline const shared_ptr<SyncedMemory>& diff() const {
CHECK(diff_);
return diff_;
}
const Dtype* cpu_data() const;
void set_cpu_data(Dtype* data);
const Dtype* gpu_data() const;
const Dtype* cpu_diff() const;
const Dtype* gpu_diff() const;
Dtype* mutable_cpu_data();
Dtype* mutable_gpu_data();
Dtype* mutable_cpu_diff();
Dtype* mutable_gpu_diff();
void Update();
void FromProto(const BlobProto& proto, bool reshape = true);
void ToProto(BlobProto* proto, bool write_diff = false) const;
/// @brief Compute the sum of absolute values (L1 norm) of the data.
Dtype asum_data() const;
/// @brief Compute the sum of absolute values (L1 norm) of the diff.
Dtype asum_diff() const;
/// @brief Compute the sum of squares (L2 norm squared) of the data.
Dtype sumsq_data() const;
/// @brief Compute the sum of squares (L2 norm squared) of the diff.
Dtype sumsq_diff() const;
/// @brief Scale the blob data by a constant factor.
void scale_data(Dtype scale_factor);
/// @brief Scale the blob diff by a constant factor.
void scale_diff(Dtype scale_factor);
/**
* @brief Set the data_ shared_ptr to point to the SyncedMemory holding the
* data_ of Blob other -- useful in Layer%s which simply perform a copy
* in their Forward pass.
*
* This deallocates the SyncedMemory holding this Blob's data_, as
* shared_ptr calls its destructor when reset with the "=" operator.
*/
void ShareData(const Blob& other);
/**
* @brief Set the diff_ shared_ptr to point to the SyncedMemory holding the
* diff_ of Blob other -- useful in Layer%s which simply perform a copy
* in their Forward pass.
*
* This deallocates the SyncedMemory holding this Blob's diff_, as
* shared_ptr calls its destructor when reset with the "=" operator.
*/
void ShareDiff(const Blob& other);
bool ShapeEquals(const BlobProto& other);
protected:
shared_ptr<SyncedMemory> data_;
shared_ptr<SyncedMemory> diff_;
vector<int> shape_;
int count_;
int capacity_;
DISABLE_COPY_AND_ASSIGN(Blob);
}; // class Blob
} // namespace caffe
#endif // CAFFE_BLOB_HPP_
================================================
FILE: include/caffe/caffe.hpp
================================================
// caffe.hpp is the header file that you need to include in your code. It wraps
// all the internal caffe header files into one for simpler inclusion.
#ifndef CAFFE_CAFFE_HPP_
#define CAFFE_CAFFE_HPP_
#include "caffe/blob.hpp"
#include "caffe/common.hpp"
#include "caffe/filler.hpp"
#include "caffe/layer.hpp"
#include "caffe/layer_factory.hpp"
#include "caffe/net.hpp"
#include "caffe/parallel.hpp"
#include "caffe/proto/caffe.pb.h"
#include "caffe/solver.hpp"
#include "caffe/util/benchmark.hpp"
#include "caffe/util/io.hpp"
#include "caffe/vision_layers.hpp"
#endif // CAFFE_CAFFE_HPP_
================================================
FILE: include/caffe/common.hpp
================================================
#ifndef CAFFE_COMMON_HPP_
#define CAFFE_COMMON_HPP_
#include <boost/shared_ptr.hpp>
#include <gflags/gflags.h>
#include <glog/logging.h>
#include <climits>
#include <cmath>
#include <fstream> // NOLINT(readability/streams)
#include <iostream> // NOLINT(readability/streams)
#include <map>
#include <set>
#include <sstream>
#include <string>
#include <utility> // pair
#include <vector>
#include "caffe/util/device_alternate.hpp"
// gflags 2.1 issue: namespace google was changed to gflags without warning.
// Luckily we will be able to use GFLAGS_GFLAGS_H_ to detect if it is version
// 2.1. If yes, we will add a temporary solution to redirect the namespace.
// TODO(Yangqing): Once gflags solves the problem in a more elegant way, let's
// remove the following hack.
#ifndef GFLAGS_GFLAGS_H_
namespace gflags = google;
#endif // GFLAGS_GFLAGS_H_
// Disable the copy and assignment operator for a class.
#define DISABLE_COPY_AND_ASSIGN(classname) \
private:\
classname(const classname&);\
classname& operator=(const classname&)
// Instantiate a class with float and double specifications.
#define INSTANTIATE_CLASS(classname) \
char gInstantiationGuard##classname; \
template class classname<float>; \
template class classname<double>
#define INSTANTIATE_LAYER_GPU_FORWARD(classname) \
template void classname<float>::Forward_gpu( \
const std::vector<Blob<float>*>& bottom, \
const std::vector<Blob<float>*>& top); \
template void classname<double>::Forward_gpu( \
const std::vector<Blob<double>*>& bottom, \
const std::vector<Blob<double>*>& top);
#define INSTANTIATE_LAYER_GPU_BACKWARD(classname) \
template void classname<float>::Backward_gpu( \
const std::vector<Blob<float>*>& top, \
const std::vector<bool>& propagate_down, \
const std::vector<Blob<float>*>& bottom); \
template void classname<double>::Backward_gpu( \
const std::vector<Blob<double>*>& top, \
const std::vector<bool>& propagate_down, \
const std::vector<Blob<double>*>& bottom)
#define INSTANTIATE_LAYER_GPU_FUNCS(classname) \
INSTANTIATE_LAYER_GPU_FORWARD(classname); \
INSTANTIATE_LAYER_GPU_BACKWARD(classname)
// A simple macro to mark codes that are not implemented, so that when the code
// is executed we will see a fatal log.
#define NOT_IMPLEMENTED LOG(FATAL) << "Not Implemented Yet"
// See PR #1236
namespace cv { class Mat; }
namespace caffe {
// We will use the boost shared_ptr instead of the new C++11 one mainly
// because cuda does not work (at least now) well with C++11 features.
using boost::shared_ptr;
// Common functions and classes from std that caffe often uses.
using std::fstream;
using std::ios;
using std::isnan;
using std::isinf;
using std::iterator;
using std::make_pair;
using std::map;
using std::ostringstream;
using std::pair;
using std::set;
using std::string;
using std::stringstream;
using std::vector;
// A global initialization function that you should call in your main function.
// Currently it initializes google flags and google logging.
void GlobalInit(int* pargc, char*** pargv);
// A singleton class to hold common caffe stuff, such as the handler that
// caffe is going to use for cublas, curand, etc.
class Caffe {
public:
~Caffe();
// Thread local context for Caffe. Moved to common.cpp instead of
// including boost/thread.hpp to avoid a boost/NVCC issues (#1009, #1010)
// on OSX. Also fails on Linux with CUDA 7.0.18.
static Caffe& Get();
enum Brew { CPU, GPU };
// This random number generator facade hides boost and CUDA rng
// implementation from one another (for cross-platform compatibility).
class RNG {
public:
RNG();
explicit RNG(unsigned int seed);
explicit RNG(const RNG&);
RNG& operator=(const RNG&);
void* generator();
private:
class Generator;
shared_ptr<Generator> generator_;
};
// Getters for boost rng, curand, and cublas handles
inline static RNG& rng_stream() {
if (!Get().random_generator_) {
Get().random_generator_.reset(new RNG());
}
return *(Get().random_generator_);
}
#ifndef CPU_ONLY
inline static cublasHandle_t cublas_handle() { return Get().cublas_handle_; }
inline static curandGenerator_t curand_generator() {
return Get().curand_generator_;
}
#endif
// Returns the mode: running on CPU or GPU.
inline static Brew mode() { return Get().mode_; }
// The setters for the variables
// Sets the mode. It is recommended that you don't change the mode halfway
// into the program since that may cause allocation of pinned memory being
// freed in a non-pinned way, which may cause problems - I haven't verified
// it personally but better to note it here in the header file.
inline static void set_mode(Brew mode) { Get().mode_ = mode; }
// Sets the random seed of both boost and curand
static void set_random_seed(const unsigned int seed);
// Sets the device. Since we have cublas and curand stuff, set device also
// requires us to reset those values.
static void SetDevice(const int device_id);
// Prints the current GPU status.
static void DeviceQuery();
// Parallel training info
inline static int solver_count() { return Get().solver_count_; }
inline static void set_solver_count(int val) { Get().solver_count_ = val; }
inline static bool root_solver() { return Get().root_solver_; }
inline static void set_root_solver(bool val) { Get().root_solver_ = val; }
protected:
#ifndef CPU_ONLY
cublasHandle_t cublas_handle_;
curandGenerator_t curand_generator_;
#endif
shared_ptr<RNG> random_generator_;
Brew mode_;
int solver_count_;
bool root_solver_;
private:
// The private constructor to avoid duplicate instantiation.
Caffe();
DISABLE_COPY_AND_ASSIGN(Caffe);
};
} // namespace caffe
#endif // CAFFE_COMMON_HPP_
================================================
FILE: include/caffe/common_layers.hpp
================================================
#ifndef CAFFE_COMMON_LAYERS_HPP_
#define CAFFE_COMMON_LAYERS_HPP_
#include <string>
#include <utility>
#include <vector>
#include "caffe/blob.hpp"
#include "caffe/common.hpp"
#include "caffe/data_layers.hpp"
#include "caffe/layer.hpp"
#include "caffe/loss_layers.hpp"
#include "caffe/neuron_layers.hpp"
#include "caffe/proto/caffe.pb.h"
namespace caffe {
/**
* @brief Compute the index of the @f$ K @f$ max values for each datum across
* all dimensions @f$ (C \times H \times W) @f$.
*
* Intended for use after a classification layer to produce a prediction.
* If parameter out_max_val is set to true, output is a vector of pairs
* (max_ind, max_val) for each image.
*
* NOTE: does not implement Backwards operation.
*/
template <typename Dtype>
class ArgMaxLayer : public Layer<Dtype> {
public:
/**
* @param param provides ArgMaxParameter argmax_param,
* with ArgMaxLayer options:
* - top_k (\b optional uint, default 1).
* the number @f$ K @f$ of maximal items to output.
* - out_max_val (\b optional bool, default false).
* if set, output a vector of pairs (max_ind, max_val) for each image.
*/
explicit ArgMaxLayer(const LayerParameter& param)
: Layer<Dtype>(param) {}
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual inline const char* type() const { return "ArgMax"; }
virtual inline int ExactNumBottomBlobs() const { return 1; }
virtual inline int ExactNumTopBlobs() const { return 1; }
protected:
/**
* @param bottom input Blob vector (length 1)
* -# @f$ (N \times C \times H \times W) @f$
* the inputs @f$ x @f$
* @param top output Blob vector (length 1)
* -# @f$ (N \times 1 \times K \times 1) @f$ or, if out_max_val
* @f$ (N \times 2 \times K \times 1) @f$
* the computed outputs @f$
* y_n = \arg\max\limits_i x_{ni}
* @f$ (for @f$ K = 1 @f$).
*/
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
/// @brief Not implemented (non-differentiable function)
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
NOT_IMPLEMENTED;
}
bool out_max_val_;
size_t top_k_;
};
/**
* @brief Takes at least two Blob%s and concatenates them along either the num
* or channel dimension, outputting the result.
*/
template <typename Dtype>
class ConcatLayer : public Layer<Dtype> {
public:
explicit ConcatLayer(const LayerParameter& param)
: Layer<Dtype>(param) {}
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual inline const char* type() const { return "Concat"; }
virtual inline int MinBottomBlobs() const { return 2; }
virtual inline int ExactNumTopBlobs() const { return 1; }
protected:
/**
* @param bottom input Blob vector (length 2+)
* -# @f$ (N \times C \times H \times W) @f$
* the inputs @f$ x_1 @f$
* -# @f$ (N \times C \times H \times W) @f$
* the inputs @f$ x_2 @f$
* -# ...
* - K @f$ (N \times C \times H \times W) @f$
* the inputs @f$ x_K @f$
* @param top output Blob vector (length 1)
* -# @f$ (KN \times C \times H \times W) @f$ if axis == 0, or
* @f$ (N \times KC \times H \times W) @f$ if axis == 1:
* the concatenated output @f$
* y = [\begin{array}{cccc} x_1 & x_2 & ... & x_K \end{array}]
* @f$
*/
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
/**
* @brief Computes the error gradient w.r.t. the concatenate inputs.
*
* @param top output Blob vector (length 1), providing the error gradient with
* respect to the outputs
* -# @f$ (KN \times C \times H \times W) @f$ if axis == 0, or
* @f$ (N \times KC \times H \times W) @f$ if axis == 1:
* containing error gradients @f$ \frac{\partial E}{\partial y} @f$
* with respect to concatenated outputs @f$ y @f$
* @param propagate_down see Layer::Backward.
* @param bottom input Blob vector (length K), into which the top gradient
* @f$ \frac{\partial E}{\partial y} @f$ is deconcatenated back to the
* inputs @f$
* \left[ \begin{array}{cccc}
* \frac{\partial E}{\partial x_1} &
* \frac{\partial E}{\partial x_2} &
* ... &
* \frac{\partial E}{\partial x_K}
* \end{array} \right] =
* \frac{\partial E}{\partial y}
* @f$
*/
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
int count_;
int num_concats_;
int concat_input_size_;
int concat_axis_;
};
/**
* @brief Compute elementwise operations, such as product and sum,
* along multiple input Blobs.
*
* TODO(dox): thorough documentation for Forward, Backward, and proto params.
*/
template <typename Dtype>
class EltwiseLayer : public Layer<Dtype> {
public:
explicit EltwiseLayer(const LayerParameter& param)
: Layer<Dtype>(param) {}
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual inline const char* type() const { return "Eltwise"; }
virtual inline int MinBottomBlobs() const { return 2; }
virtual inline int ExactNumTopBlobs() const { return 1; }
protected:
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
EltwiseParameter_EltwiseOp op_;
vector<Dtype> coeffs_;
Blob<int> max_idx_;
bool stable_prod_grad_;
};
/**
* @brief Takes two+ Blobs, interprets last Blob as a selector and
* filter remaining Blobs accordingly with selector data (0 means that
* the corresponding item has to be filtered, non-zero means that corresponding
* item needs to stay).
*/
template <typename Dtype>
class FilterLayer : public Layer<Dtype> {
public:
explicit FilterLayer(const LayerParameter& param)
: Layer<Dtype>(param) {}
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual inline const char* type() const { return "Filter"; }
virtual inline int MinBottomBlobs() const { return 2; }
virtual inline int MinTopBlobs() const { return 1; }
protected:
/**
* @param bottom input Blob vector (length 2+)
* -# @f$ (N \times C \times H \times W) @f$
* the inputs to be filtered @f$ x_1 @f$
* -# ...
* -# @f$ (N \times C \times H \times W) @f$
* the inputs to be filtered @f$ x_K @f$
* -# @f$ (N \times 1 \times 1 \times 1) @f$
* the selector blob
* @param top output Blob vector (length 1+)
* -# @f$ (S \times C \times H \times W) @f$ ()
* the filtered output @f$ x_1 @f$
* where S is the number of items
* that haven't been filtered
* @f$ (S \times C \times H \times W) @f$
* the filtered output @f$ x_K @f$
* where S is the number of items
* that haven't been filtered
*/
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
/**
* @brief Computes the error gradient w.r.t. the forwarded inputs.
*
* @param top output Blob vector (length 1+), providing the error gradient with
* respect to the outputs
* @param propagate_down see Layer::Backward.
* @param bottom input Blob vector (length 2+), into which the top error
* gradient is copied
*/
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
bool first_reshape_;
vector<int> indices_to_forward_;
};
/**
* @brief Reshapes the input Blob into flat vectors.
*
* Note: because this layer does not change the input values -- merely the
* dimensions -- it can simply copy the input. The copy happens "virtually"
* (thus taking effectively 0 real time) by setting, in Forward, the data
* pointer of the top Blob to that of the bottom Blob (see Blob::ShareData),
* and in Backward, the diff pointer of the bottom Blob to that of the top Blob
* (see Blob::ShareDiff).
*/
template <typename Dtype>
class FlattenLayer : public Layer<Dtype> {
public:
explicit FlattenLayer(const LayerParameter& param)
: Layer<Dtype>(param) {}
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual inline const char* type() const { return "Flatten"; }
virtual inline int ExactNumBottomBlobs() const { return 1; }
virtual inline int ExactNumTopBlobs() const { return 1; }
protected:
/**
* @param bottom input Blob vector (length 2+)
* -# @f$ (N \times C \times H \times W) @f$
* the inputs
* @param top output Blob vector (length 1)
* -# @f$ (N \times CHW \times 1 \times 1) @f$
* the outputs -- i.e., the (virtually) copied, flattened inputs
*/
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
/**
* @brief Computes the error gradient w.r.t. the concatenate inputs.
*
* @param top output Blob vector (length 1), providing the error gradient with
* respect to the outputs
* @param propagate_down see Layer::Backward.
* @param bottom input Blob vector (length K), into which the top error
* gradient is (virtually) copied
*/
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
};
/**
* @brief Also known as a "fully-connected" layer, computes an inner product
* with a set of learned weights, and (optionally) adds biases.
*
* TODO(dox): thorough documentation for Forward, Backward, and proto params.
*/
template <typename Dtype>
class InnerProductLayer : public Layer<Dtype> {
public:
explicit InnerProductLayer(const LayerParameter& param)
: Layer<Dtype>(param) {}
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual inline const char* type() const { return "InnerProduct"; }
virtual inline int ExactNumBottomBlobs() const { return 1; }
virtual inline int ExactNumTopBlobs() const { return 1; }
protected:
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
int M_;
int K_;
int N_;
bool bias_term_;
Blob<Dtype> bias_multiplier_;
};
/**
* @brief The compressed InnerProduct layer, also known as a compressed
* "fully-connected" layer
*
* TODO(dox): thorough documentation for Forward, Backward, and proto params.
*/
template <typename Dtype>
class CInnerProductLayer : public Layer<Dtype> {
public:
explicit CInnerProductLayer(const LayerParameter& param)
: Layer<Dtype>(param) {}
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual inline const char* type() const { return "CInnerProduct"; }
virtual inline int ExactNumBottomBlobs() const { return 1; }
virtual inline int ExactNumTopBlobs() const { return 1; }
protected:
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
int M_;
int K_;
int N_;
bool bias_term_;
Blob<Dtype> bias_multiplier_;
private:
Blob<Dtype> weight_tmp_;
Blob<Dtype> bias_tmp_;
Blob<Dtype> rand_weight_m_;
Blob<Dtype> rand_bias_m_;
Dtype gamma,power;
Dtype crate;
Dtype mu,std;
int iter_stop_;
};
/**
* @brief Normalizes the input to have 0-mean and/or unit (1) variance.
*
* TODO(dox): thorough documentation for Forward, Backward, and proto params.
*/
template <typename Dtype>
class MVNLayer : public Layer<Dtype> {
public:
explicit MVNLayer(const LayerParameter& param)
: Layer<Dtype>(param) {}
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual inline const char* type() const { return "MVN"; }
virtual inline int ExactNumBottomBlobs() const { return 1; }
virtual inline int ExactNumTopBlobs() const { return 1; }
protected:
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
Blob<Dtype> mean_, variance_, temp_;
/// sum_multiplier is used to carry out sum using BLAS
Blob<Dtype> sum_multiplier_;
Dtype eps_;
};
/*
* @brief Reshapes the input Blob into an arbitrary-sized output Blob.
*
* Note: similarly to FlattenLayer, this layer does not change the input values
* (see FlattenLayer, Blob::ShareData and Blob::ShareDiff).
*/
template <typename Dtype>
class ReshapeLayer : public Layer<Dtype> {
public:
explicit ReshapeLayer(const LayerParameter& param)
: Layer<Dtype>(param) {}
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual inline const char* type() const { return "Reshape"; }
virtual inline int ExactNumBottomBlobs() const { return 1; }
virtual inline int ExactNumTopBlobs() const { return 1; }
protected:
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {}
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {}
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {}
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {}
/// @brief vector of axes indices whose dimensions we'll copy from the bottom
vector<int> copy_axes_;
/// @brief the index of the axis whose dimension we infer, or -1 if none
int inferred_axis_;
/// @brief the product of the "constant" output dimensions
int constant_count_;
};
/**
* @brief Compute "reductions" -- operations that return a scalar output Blob
* for an input Blob of arbitrary size, such as the sum, absolute sum,
* and sum of squares.
*
* TODO(dox): thorough documentation for Forward, Backward, and proto params.
*/
template <typename Dtype>
class ReductionLayer : public Layer<Dtype> {
public:
explicit ReductionLayer(const LayerParameter& param)
: Layer<Dtype>(param) {}
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual inline const char* type() const { return "Reduction"; }
virtual inline int ExactNumBottomBlobs() const { return 1; }
virtual inline int ExactNumTopBlobs() const { return 1; }
protected:
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
/// @brief the reduction operation performed by the layer
ReductionParameter_ReductionOp op_;
/// @brief a scalar coefficient applied to all outputs
Dtype coeff_;
/// @brief the index of the first input axis to reduce
int axis_;
/// @brief the number of reductions performed
int num_;
/// @brief the input size of each reduction
int dim_;
/// @brief a helper Blob used for summation (op_ == SUM)
Blob<Dtype> sum_multiplier_;
};
/**
* @brief Ignores bottom blobs while producing no top blobs. (This is useful
* to suppress outputs during testing.)
*/
template <typename Dtype>
class SilenceLayer : public Layer<Dtype> {
public:
explicit SilenceLayer(const LayerParameter& param)
: Layer<Dtype>(param) {}
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {}
virtual inline const char* type() const { return "Silence"; }
virtual inline int MinBottomBlobs() const { return 1; }
virtual inline int ExactNumTopBlobs() const { return 0; }
protected:
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {}
// We can't define Forward_gpu here, since STUB_GPU will provide
// its own definition for CPU_ONLY mode.
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
};
/**
* @brief Computes the softmax function.
*
* TODO(dox): thorough documentation for Forward, Backward, and proto params.
*/
template <typename Dtype>
class SoftmaxLayer : public Layer<Dtype> {
public:
explicit SoftmaxLayer(const LayerParameter& param)
: Layer<Dtype>(param) {}
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual inline const char* type() const { return "Softmax"; }
virtual inline int ExactNumBottomBlobs() const { return 1; }
virtual inline int ExactNumTopBlobs() const { return 1; }
protected:
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
int outer_num_;
int inner_num_;
int softmax_axis_;
/// sum_multiplier is used to carry out sum using BLAS
Blob<Dtype> sum_multiplier_;
/// scale is an intermediate Blob to hold temporary results.
Blob<Dtype> scale_;
};
#ifdef USE_CUDNN
/**
* @brief cuDNN implementation of SoftmaxLayer.
* Fallback to SoftmaxLayer for CPU mode.
*/
template <typename Dtype>
class CuDNNSoftmaxLayer : public SoftmaxLayer<Dtype> {
public:
explicit CuDNNSoftmaxLayer(const LayerParameter& param)
: SoftmaxLayer<Dtype>(param), handles_setup_(false) {}
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual ~CuDNNSoftmaxLayer();
protected:
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
bool handles_setup_;
cudnnHandle_t handle_;
cudnnTensorDescriptor_t bottom_desc_;
cudnnTensorDescriptor_t top_desc_;
};
#endif
/**
* @brief Creates a "split" path in the network by copying the bottom Blob
* into multiple top Blob%s to be used by multiple consuming layers.
*
* TODO(dox): thorough documentation for Forward, Backward, and proto params.
*/
template <typename Dtype>
class SplitLayer : public Layer<Dtype> {
public:
explicit SplitLayer(const LayerParameter& param)
: Layer<Dtype>(param) {}
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual inline const char* type() const { return "Split"; }
virtual inline int ExactNumBottomBlobs() const { return 1; }
virtual inline int MinTopBlobs() const { return 1; }
protected:
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
int count_;
};
/**
* @brief Takes a Blob and slices it along either the num or channel dimension,
* outputting multiple sliced Blob results.
*
* TODO(dox): thorough documentation for Forward, Backward, and proto params.
*/
template <typename Dtype>
class SliceLayer : public Layer<Dtype> {
public:
explicit SliceLayer(const LayerParameter& param)
: Layer<Dtype>(param) {}
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual inline const char* type() const { return "Slice"; }
virtual inline int ExactNumBottomBlobs() const { return 1; }
virtual inline int MinTopBlobs() const { return 2; }
protected:
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
int count_;
int num_slices_;
int slice_size_;
int slice_axis_;
vector<int> slice_point_;
};
} // namespace caffe
#endif // CAFFE_COMMON_LAYERS_HPP_
================================================
FILE: include/caffe/data_layers.hpp
================================================
#ifndef CAFFE_DATA_LAYERS_HPP_
#define CAFFE_DATA_LAYERS_HPP_
#include <string>
#include <utility>
#include <vector>
#include "hdf5.h"
#include "caffe/blob.hpp"
#include "caffe/common.hpp"
#include "caffe/data_reader.hpp"
#include "caffe/data_transformer.hpp"
#include "caffe/filler.hpp"
#include "caffe/internal_thread.hpp"
#include "caffe/layer.hpp"
#include "caffe/proto/caffe.pb.h"
#include "caffe/util/blocking_queue.hpp"
#include "caffe/util/db.hpp"
namespace caffe {
/**
* @brief Provides base for data layers that feed blobs to the Net.
*
* TODO(dox): thorough documentation for Forward and proto params.
*/
template <typename Dtype>
class BaseDataLayer : public Layer<Dtype> {
public:
explicit BaseDataLayer(const LayerParameter& param);
// LayerSetUp: implements common data layer setup functionality, and calls
// DataLayerSetUp to do special data layer setup for individual layer types.
// This method may not be overridden except by the BasePrefetchingDataLayer.
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
// Data layers should be shared by multiple solvers in parallel
virtual inline bool ShareInParallel() const { return true; }
virtual void DataLayerSetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {}
// Data layers have no bottoms, so reshaping is trivial.
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {}
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {}
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {}
protected:
TransformationParameter transform_param_;
shared_ptr<DataTransformer<Dtype> > data_transformer_;
bool output_labels_;
};
template <typename Dtype>
class Batch {
public:
Blob<Dtype> data_, label_;
};
template <typename Dtype>
class BasePrefetchingDataLayer :
public BaseDataLayer<Dtype>, public InternalThread {
public:
explicit BasePrefetchingDataLayer(const LayerParameter& param);
// LayerSetUp: implements common data layer setup functionality, and calls
// DataLayerSetUp to do special data layer setup for individual layer types.
// This method may not be overridden.
void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
// Prefetches batches (asynchronously if to GPU memory)
static const int PREFETCH_COUNT = 3;
protected:
virtual void InternalThreadEntry();
virtual void load_batch(Batch<Dtype>* batch) = 0;
Batch<Dtype> prefetch_[PREFETCH_COUNT];
BlockingQueue<Batch<Dtype>*> prefetch_free_;
BlockingQueue<Batch<Dtype>*> prefetch_full_;
Blob<Dtype> transformed_data_;
};
template <typename Dtype>
class DataLayer : public BasePrefetchingDataLayer<Dtype> {
public:
explicit DataLayer(const LayerParameter& param);
virtual ~DataLayer();
virtual void DataLayerSetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
// DataLayer uses DataReader instead for sharing for parallelism
virtual inline bool ShareInParallel() const { return false; }
virtual inline const char* type() const { return "Data"; }
virtual inline int ExactNumBottomBlobs() const { return 0; }
virtual inline int MinTopBlobs() const { return 1; }
virtual inline int MaxTopBlobs() const { return 2; }
protected:
virtual void load_batch(Batch<Dtype>* batch);
DataReader reader_;
};
/**
* @brief Provides data to the Net generated by a Filler.
*
* TODO(dox): thorough documentation for Forward and proto params.
*/
template <typename Dtype>
class DummyDataLayer : public Layer<Dtype> {
public:
explicit DummyDataLayer(const LayerParameter& param)
: Layer<Dtype>(param) {}
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
// Data layers should be shared by multiple solvers in parallel
virtual inline bool ShareInParallel() const { return true; }
// Data layers have no bottoms, so reshaping is trivial.
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {}
virtual inline const char* type() const { return "DummyData"; }
virtual inline int ExactNumBottomBlobs() const { return 0; }
virtual inline int MinTopBlobs() const { return 1; }
protected:
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {}
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {}
vector<shared_ptr<Filler<Dtype> > > fillers_;
vector<bool> refill_;
};
/**
* @brief Provides data to the Net from HDF5 files.
*
* TODO(dox): thorough documentation for Forward and proto params.
*/
template <typename Dtype>
class HDF5DataLayer : public Layer<Dtype> {
public:
explicit HDF5DataLayer(const LayerParameter& param)
: Layer<Dtype>(param) {}
virtual ~HDF5DataLayer();
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
// Data layers should be shared by multiple solvers in parallel
virtual inline bool ShareInParallel() const { return true; }
// Data layers have no bottoms, so reshaping is trivial.
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {}
virtual inline const char* type() const { return "HDF5Data"; }
virtual inline int ExactNumBottomBlobs() const { return 0; }
virtual inline int MinTopBlobs() const { return 1; }
protected:
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {}
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {}
virtual void LoadHDF5FileData(const char* filename);
std::vector<std::string> hdf_filenames_;
unsigned int num_files_;
unsigned int current_file_;
hsize_t current_row_;
std::vector<shared_ptr<Blob<Dtype> > > hdf_blobs_;
std::vector<unsigned int> data_permutation_;
std::vector<unsigned int> file_permutation_;
};
/**
* @brief Write blobs to disk as HDF5 files.
*
* TODO(dox): thorough documentation for Forward and proto params.
*/
template <typename Dtype>
class HDF5OutputLayer : public Layer<Dtype> {
public:
explicit HDF5OutputLayer(const LayerParameter& param)
: Layer<Dtype>(param), file_opened_(false) {}
virtual ~HDF5OutputLayer();
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
// Data layers should be shared by multiple solvers in parallel
virtual inline bool ShareInParallel() const { return true; }
// Data layers have no bottoms, so reshaping is trivial.
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {}
virtual inline const char* type() const { return "HDF5Output"; }
// TODO: no limit on the number of blobs
virtual inline int ExactNumBottomBlobs() const { return 2; }
virtual inline int ExactNumTopBlobs() const { return 0; }
inline std::string file_name() const { return file_name_; }
protected:
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
virtual void SaveBlobs();
bool file_opened_;
std::string file_name_;
hid_t file_id_;
Blob<Dtype> data_blob_;
Blob<Dtype> label_blob_;
};
/**
* @brief Provides data to the Net from image files.
*
* TODO(dox): thorough documentation for Forward and proto params.
*/
template <typename Dtype>
class ImageDataLayer : public BasePrefetchingDataLayer<Dtype> {
public:
explicit ImageDataLayer(const LayerParameter& param)
: BasePrefetchingDataLayer<Dtype>(param) {}
virtual ~ImageDataLayer();
virtual void DataLayerSetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual inline const char* type() const { return "ImageData"; }
virtual inline int ExactNumBottomBlobs() const { return 0; }
virtual inline int ExactNumTopBlobs() const { return 2; }
protected:
shared_ptr<Caffe::RNG> prefetch_rng_;
virtual void ShuffleImages();
virtual void load_batch(Batch<Dtype>* batch);
vector<std::pair<std::string, int> > lines_;
int lines_id_;
};
/**
* @brief Provides data to the Net from memory.
*
* TODO(dox): thorough documentation for Forward and proto params.
*/
template <typename Dtype>
class MemoryDataLayer : public BaseDataLayer<Dtype> {
public:
explicit MemoryDataLayer(const LayerParameter& param)
: BaseDataLayer<Dtype>(param), has_new_data_(false) {}
virtual void DataLayerSetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual inline const char* type() const { return "MemoryData"; }
virtual inline int ExactNumBottomBlobs() const { return 0; }
virtual inline int ExactNumTopBlobs() const { return 2; }
virtual void AddDatumVector(const vector<Datum>& datum_vector);
virtual void AddMatVector(const vector<cv::Mat>& mat_vector,
const vector<int>& labels);
// Reset should accept const pointers, but can't, because the memory
// will be given to Blob, which is mutable
void Reset(Dtype* data, Dtype* label, int n);
void set_batch_size(int new_size);
int batch_size() { return batch_size_; }
int channels() { return channels_; }
int height() { return height_; }
int width() { return width_; }
protected:
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
int batch_size_, channels_, height_, width_, size_;
Dtype* data_;
Dtype* labels_;
int n_;
size_t pos_;
Blob<Dtype> added_data_;
Blob<Dtype> added_label_;
bool has_new_data_;
};
/**
* @brief Provides data to the Net from windows of images files, specified
* by a window data file.
*
* TODO(dox): thorough documentation for Forward and proto params.
*/
template <typename Dtype>
class WindowDataLayer : public BasePrefetchingDataLayer<Dtype> {
public:
explicit WindowDataLayer(const LayerParameter& param)
: BasePrefetchingDataLayer<Dtype>(param) {}
virtual ~WindowDataLayer();
virtual void DataLayerSetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual inline const char* type() const { return "WindowData"; }
virtual inline int ExactNumBottomBlobs() const { return 0; }
virtual inline int ExactNumTopBlobs() const { return 2; }
protected:
virtual unsigned int PrefetchRand();
virtual void load_batch(Batch<Dtype>* batch);
shared_ptr<Caffe::RNG> prefetch_rng_;
vector<std::pair<std::string, vector<int> > > image_database_;
enum WindowField { IMAGE_INDEX, LABEL, OVERLAP, X1, Y1, X2, Y2, NUM };
vector<vector<float> > fg_windows_;
vector<vector<float> > bg_windows_;
Blob<Dtype> data_mean_;
vector<Dtype> mean_values_;
bool has_mean_file_;
bool has_mean_values_;
bool cache_images_;
vector<std::pair<std::string, Datum > > image_database_cache_;
};
} // namespace caffe
#endif // CAFFE_DATA_LAYERS_HPP_
================================================
FILE: include/caffe/data_reader.hpp
================================================
#ifndef CAFFE_DATA_READER_HPP_
#define CAFFE_DATA_READER_HPP_
#include <map>
#include <string>
#include <vector>
#include "caffe/common.hpp"
#include "caffe/internal_thread.hpp"
#include "caffe/util/blocking_queue.hpp"
#include "caffe/util/db.hpp"
namespace caffe {
/**
* @brief Reads data from a source to queues available to data layers.
* A single reading thread is created per source, even if multiple solvers
* are running in parallel, e.g. for multi-GPU training. This makes sure
* databases are read sequentially, and that each solver accesses a different
* subset of the database. Data is distributed to solvers in a round-robin
* way to keep parallel training deterministic.
*/
class DataReader {
public:
explicit DataReader(const LayerParameter& param);
~DataReader();
inline BlockingQueue<Datum*>& free() const {
return queue_pair_->free_;
}
inline BlockingQueue<Datum*>& full() const {
return queue_pair_->full_;
}
protected:
// Queue pairs are shared between a body and its readers
class QueuePair {
public:
explicit QueuePair(int size);
~QueuePair();
BlockingQueue<Datum*> free_;
BlockingQueue<Datum*> full_;
DISABLE_COPY_AND_ASSIGN(QueuePair);
};
// A single body is created per source
class Body : public InternalThread {
public:
explicit Body(const LayerParameter& param);
virtual ~Body();
protected:
void InternalThreadEntry();
void read_one(db::Cursor* cursor, QueuePair* qp);
const LayerParameter param_;
BlockingQueue<shared_ptr<QueuePair> > new_queue_pairs_;
friend class DataReader;
DISABLE_COPY_AND_ASSIGN(Body);
};
// A source is uniquely identified by its layer name + path, in case
// the same database is read from two different locations in the net.
static inline string source_key(const LayerParameter& param) {
return param.name() + ":" + param.data_param().source();
}
const shared_ptr<QueuePair> queue_pair_;
shared_ptr<Body> body_;
static map<const string, boost::weak_ptr<DataReader::Body> > bodies_;
DISABLE_COPY_AND_ASSIGN(DataReader);
};
} // namespace caffe
#endif // CAFFE_DATA_READER_HPP_
================================================
FILE: include/caffe/data_transformer.hpp
================================================
#ifndef CAFFE_DATA_TRANSFORMER_HPP
#define CAFFE_DATA_TRANSFORMER_HPP
#include <vector>
#include "caffe/blob.hpp"
#include "caffe/common.hpp"
#include "caffe/proto/caffe.pb.h"
namespace caffe {
/**
* @brief Applies common transformations to the input data, such as
* scaling, mirroring, substracting the image mean...
*/
template <typename Dtype>
class DataTransformer {
public:
explicit DataTransformer(const TransformationParameter& param, Phase phase);
virtual ~DataTransformer() {}
/**
* @brief Initialize the Random number generations if needed by the
* transformation.
*/
void InitRand();
/**
* @brief Applies the transformation defined in the data layer's
* transform_param block to the data.
*
* @param datum
* Datum containing the data to be transformed.
* @param transformed_blob
* This is destination blob. It can be part of top blob's data if
* set_cpu_data() is used. See data_layer.cpp for an example.
*/
void Transform(const Datum& datum, Blob<Dtype>* transformed_blob);
/**
* @brief Applies the transformation defined in the data layer's
* transform_param block to a vector of Datum.
*
* @param datum_vector
* A vector of Datum containing the data to be transformed.
* @param transformed_blob
* This is destination blob. It can be part of top blob's data if
* set_cpu_data() is used. See memory_layer.cpp for an example.
*/
void Transform(const vector<Datum> & datum_vector,
Blob<Dtype>* transformed_blob);
/**
* @brief Applies the transformation defined in the data layer's
* transform_param block to a vector of Mat.
*
* @param mat_vector
* A vector of Mat containing the data to be transformed.
* @param transformed_blob
* This is destination blob. It can be part of top blob's data if
* set_cpu_data() is used. See memory_layer.cpp for an example.
*/
void Transform(const vector<cv::Mat> & mat_vector,
Blob<Dtype>* transformed_blob);
/**
* @brief Applies the transformation defined in the data layer's
* transform_param block to a cv::Mat
*
* @param cv_img
* cv::Mat containing the data to be transformed.
* @param transformed_blob
* This is destination blob. It can be part of top blob's data if
* set_cpu_data() is used. See image_data_layer.cpp for an example.
*/
void Transform(const cv::Mat& cv_img, Blob<Dtype>* transformed_blob);
/**
* @brief Applies the same transformation defined in the data layer's
* transform_param block to all the num images in a input_blob.
*
* @param input_blob
* A Blob containing the data to be transformed. It applies the same
* transformation to all the num images in the blob.
* @param transformed_blob
* This is destination blob, it will contain as many images as the
* input blob. It can be part of top blob's data.
*/
void Transform(Blob<Dtype>* input_blob, Blob<Dtype>* transformed_blob);
/**
* @brief Infers the shape of transformed_blob will have when
* the transformation is applied to the data.
*
* @param datum
* Datum containing the data to be transformed.
*/
vector<int> InferBlobShape(const Datum& datum);
/**
* @brief Infers the shape of transformed_blob will have when
* the transformation is applied to the data.
* It uses the first element to infer the shape of the blob.
*
* @param datum_vector
* A vector of Datum containing the data to be transformed.
*/
vector<int> InferBlobShape(const vector<Datum> & datum_vector);
/**
* @brief Infers the shape of transformed_blob will have when
* the transformation is applied to the data.
* It uses the first element to infer the shape of the blob.
*
* @param mat_vector
* A vector of Mat containing the data to be transformed.
*/
vector<int> InferBlobShape(const vector<cv::Mat> & mat_vector);
/**
* @brief Infers the shape of transformed_blob will have when
* the transformation is applied to the data.
*
* @param cv_img
* cv::Mat containing the data to be transformed.
*/
vector<int> InferBlobShape(const cv::Mat& cv_img);
protected:
/**
* @brief Generates a random integer from Uniform({0, 1, ..., n-1}).
*
* @param n
* The upperbound (exclusive) value of the random number.
* @return
* A uniformly random integer value from ({0, 1, ..., n-1}).
*/
virtual int Rand(int n);
void Transform(const Datum& datum, Dtype* transformed_data);
// Tranformation parameters
TransformationParameter param_;
shared_ptr<Caffe::RNG> rng_;
Phase phase_;
Blob<Dtype> data_mean_;
vector<Dtype> mean_values_;
};
} // namespace caffe
#endif // CAFFE_DATA_TRANSFORMER_HPP_
================================================
FILE: include/caffe/filler.hpp
================================================
// Fillers are random number generators that fills a blob using the specified
// algorithm. The expectation is that they are only going to be used during
// initialization time and will not involve any GPUs.
#ifndef CAFFE_FILLER_HPP
#define CAFFE_FILLER_HPP
#include <string>
#include "caffe/blob.hpp"
#include "caffe/common.hpp"
#include "caffe/proto/caffe.pb.h"
#include "caffe/syncedmem.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
/// @brief Fills a Blob with constant or randomly-generated data.
template <typename Dtype>
class Filler {
public:
explicit Filler(const FillerParameter& param) : filler_param_(param) {}
virtual ~Filler() {}
virtual void Fill(Blob<Dtype>* blob) = 0;
protected:
FillerParameter filler_param_;
}; // class Filler
/// @brief Fills a Blob with constant values @f$ x = 0 @f$.
template <typename Dtype>
class ConstantFiller : public Filler<Dtype> {
public:
explicit ConstantFiller(const FillerParameter& param)
: Filler<Dtype>(param) {}
virtual void Fill(Blob<Dtype>* blob) {
Dtype* data = blob->mutable_cpu_data();
const int count = blob->count();
const Dtype value = this->filler_param_.value();
CHECK(count);
for (int i = 0; i < count; ++i) {
data[i] = value;
}
CHECK_EQ(this->filler_param_.sparse(), -1)
<< "Sparsity not supported by this Filler.";
}
};
/// @brief Fills a Blob with uniformly distributed values @f$ x\sim U(a, b) @f$.
template <typename Dtype>
class UniformFiller : public Filler<Dtype> {
public:
explicit UniformFiller(const FillerParameter& param)
: Filler<Dtype>(param) {}
virtual void Fill(Blob<Dtype>* blob) {
CHECK(blob->count());
caffe_rng_uniform<Dtype>(blob->count(), Dtype(this->filler_param_.min()),
Dtype(this->filler_param_.max()), blob->mutable_cpu_data());
CHECK_EQ(this->filler_param_.sparse(), -1)
<< "Sparsity not supported by this Filler.";
}
};
/// @brief Fills a Blob with Gaussian-distributed values @f$ x = a @f$.
template <typename Dtype>
class GaussianFiller : public Filler<Dtype> {
public:
explicit GaussianFiller(const FillerParameter& param)
: Filler<Dtype>(param) {}
virtual void Fill(Blob<Dtype>* blob) {
Dtype* data = blob->mutable_cpu_data();
CHECK(blob->count());
caffe_rng_gaussian<Dtype>(blob->count(), Dtype(this->filler_param_.mean()),
Dtype(this->filler_param_.std()), blob->mutable_cpu_data());
int sparse = this->filler_param_.sparse();
CHECK_GE(sparse, -1);
if (sparse >= 0) {
// Sparse initialization is implemented for "weight" blobs; i.e. matrices.
// These have num == channels == 1; width is number of inputs; height is
// number of outputs. The 'sparse' variable specifies the mean number
// of non-zero input weights for a given output.
CHECK_GE(blob->num_axes(), 1);
const int num_outputs = blob->shape(0);
Dtype non_zero_probability = Dtype(sparse) / Dtype(num_outputs);
rand_vec_.reset(new SyncedMemory(blob->count() * sizeof(int)));
int* mask = reinterpret_cast<int*>(rand_vec_->mutable_cpu_data());
caffe_rng_bernoulli(blob->count(), non_zero_probability, mask);
for (int i = 0; i < blob->count(); ++i) {
data[i] *= mask[i];
}
}
}
protected:
shared_ptr<SyncedMemory> rand_vec_;
};
/** @brief Fills a Blob with values @f$ x \in [0, 1] @f$
* such that @f$ \forall i \sum_j x_{ij} = 1 @f$.
*/
template <typename Dtype>
class PositiveUnitballFiller : public Filler<Dtype> {
public:
explicit PositiveUnitballFiller(const FillerParameter& param)
: Filler<Dtype>(param) {}
virtual void Fill(Blob<Dtype>* blob) {
Dtype* data = blob->mutable_cpu_data();
DCHECK(blob->count());
caffe_rng_uniform<Dtype>(blob->count(), 0, 1, blob->mutable_cpu_data());
// We expect the filler to not be called very frequently, so we will
// just use a simple implementation
int dim = blob->count() / blob->num();
CHECK(dim);
for (int i = 0; i < blob->num(); ++i) {
Dtype sum = 0;
for (int j = 0; j < dim; ++j) {
sum += data[i * dim + j];
}
for (int j = 0; j < dim; ++j) {
data[i * dim + j] /= sum;
}
}
CHECK_EQ(this->filler_param_.sparse(), -1)
<< "Sparsity not supported by this Filler.";
}
};
/**
* @brief Fills a Blob with values @f$ x \sim U(-a, +a) @f$ where @f$ a @f$ is
* set inversely proportional to number of incoming nodes, outgoing
* nodes, or their average.
*
* A Filler based on the paper [Bengio and Glorot 2010]: Understanding
* the difficulty of training deep feedforward neuralnetworks.
*
* It fills the incoming matrix by randomly sampling uniform data from [-scale,
* scale] where scale = sqrt(3 / n) where n is the fan_in, fan_out, or their
* average, depending on the variance_norm option. You should make sure the
* input blob has shape (num, a, b, c) where a * b * c = fan_in and num * b * c
* = fan_out. Note that this is currently not the case for inner product layers.
*
* TODO(dox): make notation in above comment consistent with rest & use LaTeX.
*/
template <typename Dtype>
class XavierFiller : public Filler<Dtype> {
public:
explicit XavierFiller(const FillerParameter& param)
: Filler<Dtype>(param) {}
virtual void Fill(Blob<Dtype>* blob) {
CHECK(blob->count());
int fan_in = blob->count() / blob->num();
int fan_out = blob->count() / blob->channels();
Dtype n = fan_in; // default to fan_in
if (this->filler_param_.variance_norm() ==
FillerParameter_VarianceNorm_AVERAGE) {
n = (fan_in + fan_out) / Dtype(2);
} else if (this->filler_param_.variance_norm() ==
FillerParameter_VarianceNorm_FAN_OUT) {
n = fan_out;
}
Dtype scale = sqrt(Dtype(3) / n);
caffe_rng_uniform<Dtype>(blob->count(), -scale, scale,
blob->mutable_cpu_data());
CHECK_EQ(this->filler_param_.sparse(), -1)
<< "Sparsity not supported by this Filler.";
}
};
/**
* @brief Fills a Blob with values @f$ x \sim N(0, \sigma^2) @f$ where
* @f$ \sigma^2 @f$ is set inversely proportional to number of incoming
* nodes, outgoing nodes, or their average.
*
* A Filler based on the paper [He, Zhang, Ren and Sun 2015]: Specifically
* accounts for ReLU nonlinearities.
*
* Aside: for another perspective on the scaling factor, see the derivation of
* [Saxe, McClelland, and Ganguli 2013 (v3)].
*
* It fills the incoming matrix by randomly sampling Gaussian data with std =
* sqrt(2 / n) where n is the fan_in, fan_out, or their average, depending on
* the variance_norm option. You should make sure the input blob has shape (num,
* a, b, c) where a * b * c = fan_in and num * b * c = fan_out. Note that this
* is currently not the case for inner product layers.
*/
template <typename Dtype>
class MSRAFiller : public Filler<Dtype> {
public:
explicit MSRAFiller(const FillerParameter& param)
: Filler<Dtype>(param) {}
virtual void Fill(Blob<Dtype>* blob) {
CHECK(blob->count());
int fan_in = blob->count() / blob->num();
int fan_out = blob->count() / blob->channels();
Dtype n = fan_in; // default to fan_in
if (this->filler_param_.variance_norm() ==
FillerParameter_VarianceNorm_AVERAGE) {
n = (fan_in + fan_out) / Dtype(2);
} else if (this->filler_param_.variance_norm() ==
FillerParameter_VarianceNorm_FAN_OUT) {
n = fan_out;
}
Dtype std = sqrt(Dtype(2) / n);
caffe_rng_gaussian<Dtype>(blob->count(), Dtype(0), std,
blob->mutable_cpu_data());
CHECK_EQ(this->filler_param_.sparse(), -1)
<< "Sparsity not supported by this Filler.";
}
};
/*!
@brief Fills a Blob with coefficients for bilinear interpolation.
A common use case is with the DeconvolutionLayer acting as upsampling.
You can upsample a feature map with shape of (B, C, H, W) by any integer factor
using the following proto.
\code
layer {
name: "upsample", type: "Deconvolution"
bottom: "{{bottom_name}}" top: "{{top_name}}"
convolution_param {
kernel_size: {{2 * factor - factor % 2}} stride: {{factor}}
num_output: {{C}} group: {{C}}
pad: {{ceil((factor - 1) / 2.)}}
weight_filler: { type: "bilinear" } bias_term: false
}
param { lr_mult: 0 decay_mult: 0 }
}
\endcode
Please use this by replacing `{{}}` with your values. By specifying
`num_output: {{C}} group: {{C}}`, it behaves as
channel-wise convolution. The filter shape of this deconvolution layer will be
(C, 1, K, K) where K is `kernel_size`, and this filler will set a (K, K)
interpolation kernel for every channel of the filter identically. The resulting
shape of the top feature map will be (B, C, factor * H, factor * W).
Note that the learning rate and the
weight decay are set to 0 in order to keep coefficient values of bilinear
interpolation unchanged during training. If you apply this to an image, this
operation is equivalent to the following call in Python with Scikit.Image.
\code{.py}
out = skimage.transform.rescale(img, factor, mode='constant', cval=0)
\endcode
*/
template <typename Dtype>
class BilinearFiller : public Filler<Dtype> {
public:
explicit BilinearFiller(const FillerParameter& param)
: Filler<Dtype>(param) {}
virtual void Fill(Blob<Dtype>* blob) {
CHECK_EQ(blob->num_axes(), 4) << "Blob must be 4 dim.";
CHECK_EQ(blob->width(), blob->height()) << "Filter must be square";
Dtype* data = blob->mutable_cpu_data();
int f = ceil(blob->width() / 2.);
float c = (2 * f - 1 - f % 2) / (2. * f);
for (int i = 0; i < blob->count(); ++i) {
float x = i % blob->width();
float y = (i / blob->width()) % blob->height();
data[i] = (1 - fabs(x / f - c)) * (1 - fabs(y / f - c));
}
CHECK_EQ(this->filler_param_.sparse(), -1)
<< "Sparsity not supported by this Filler.";
}
};
/**
* @brief Get a specific filler from the specification given in FillerParameter.
*
* Ideally this would be replaced by a factory pattern, but we will leave it
* this way for now.
*/
template <typename Dtype>
Filler<Dtype>* GetFiller(const FillerParameter& param) {
const std::string& type = param.type();
if (type == "constant") {
return new ConstantFiller<Dtype>(param);
} else if (type == "gaussian") {
return new GaussianFiller<Dtype>(param);
} else if (type == "positive_unitball") {
return new PositiveUnitballFiller<Dtype>(param);
} else if (type == "uniform") {
return new UniformFiller<Dtype>(param);
} else if (type == "xavier") {
return new XavierFiller<Dtype>(param);
} else if (type == "msra") {
return new MSRAFiller<Dtype>(param);
} else if (type == "bilinear") {
return new BilinearFiller<Dtype>(param);
} else {
CHECK(false) << "Unknown filler name: " << param.type();
}
return (Filler<Dtype>*)(NULL);
}
} // namespace caffe
#endif // CAFFE_FILLER_HPP_
================================================
FILE: include/caffe/internal_thread.hpp
================================================
#ifndef CAFFE_INTERNAL_THREAD_HPP_
#define CAFFE_INTERNAL_THREAD_HPP_
#include "caffe/common.hpp"
/**
Forward declare boost::thread instead of including boost/thread.hpp
to avoid a boost/NVCC issues (#1009, #1010) on OSX.
*/
namespace boost { class thread; }
namespace caffe {
/**
* Virtual class encapsulate boost::thread for use in base class
* The child class will acquire the ability to run a single thread,
* by reimplementing the virtual function InternalThreadEntry.
*/
class InternalThread {
public:
InternalThread() : thread_() {}
virtual ~InternalThread();
/**
* Caffe's thread local state will be initialized using the current
* thread values, e.g. device id, solver index etc. The random seed
* is initialized using caffe_rng_rand.
*/
void StartInternalThread();
/** Will not return until the internal thread has exited. */
void StopInternalThread();
bool is_started() const;
protected:
/* Implement this method in your subclass
with the code you want your thread to run. */
virtual void InternalThreadEntry() {}
/* Should be tested when running loops to exit when requested. */
bool must_stop();
private:
void entry(int device, Caffe::Brew mode, int rand_seed, int solver_count,
bool root_solver);
shared_ptr<boost::thread> thread_;
};
} // namespace caffe
#endif // CAFFE_INTERNAL_THREAD_HPP_
================================================
FILE: include/caffe/layer.hpp
================================================
#ifndef CAFFE_LAYER_H_
#define CAFFE_LAYER_H_
#include <algorithm>
#include <string>
#include <vector>
#include "caffe/blob.hpp"
#include "caffe/common.hpp"
#include "caffe/layer_factory.hpp"
#include "caffe/proto/caffe.pb.h"
#include "caffe/util/device_alternate.hpp"
/**
Forward declare boost::thread instead of including boost/thread.hpp
to avoid a boost/NVCC issues (#1009, #1010) on OSX.
*/
namespace boost { class mutex; }
namespace caffe {
/**
* @brief An interface for the units of computation which can be composed into a
* Net.
*
* Layer%s must implement a Forward function, in which they take their input
* (bottom) Blob%s (if any) and compute their output Blob%s (if any).
* They may also implement a Backward function, in which they compute the error
* gradients with respect to their input Blob%s, given the error gradients with
* their output Blob%s.
*/
te
gitextract_g_i19mmv/
├── CMakeLists.txt
├── CONTRIBUTING.md
├── CONTRIBUTORS.md
├── INSTALL.md
├── LICENSE
├── Makefile
├── Makefile.config.example
├── README.md
├── caffe.cloc
├── cmake/
│ ├── ConfigGen.cmake
│ ├── Cuda.cmake
│ ├── Dependencies.cmake
│ ├── External/
│ │ ├── gflags.cmake
│ │ └── glog.cmake
│ ├── Misc.cmake
│ ├── Modules/
│ │ ├── FindAtlas.cmake
│ │ ├── FindGFlags.cmake
│ │ ├── FindGlog.cmake
│ │ ├── FindLAPACK.cmake
│ │ ├── FindLMDB.cmake
│ │ ├── FindLevelDB.cmake
│ │ ├── FindMKL.cmake
│ │ ├── FindMatlabMex.cmake
│ │ ├── FindNumPy.cmake
│ │ ├── FindOpenBLAS.cmake
│ │ ├── FindSnappy.cmake
│ │ └── FindvecLib.cmake
│ ├── ProtoBuf.cmake
│ ├── Summary.cmake
│ ├── Targets.cmake
│ ├── Templates/
│ │ ├── CaffeConfig.cmake.in
│ │ ├── CaffeConfigVersion.cmake.in
│ │ └── caffe_config.h.in
│ ├── Utils.cmake
│ └── lint.cmake
├── include/
│ └── caffe/
│ ├── blob.hpp
│ ├── caffe.hpp
│ ├── common.hpp
│ ├── common_layers.hpp
│ ├── data_layers.hpp
│ ├── data_reader.hpp
│ ├── data_transformer.hpp
│ ├── filler.hpp
│ ├── internal_thread.hpp
│ ├── layer.hpp
│ ├── layer_factory.hpp
│ ├── loss_layers.hpp
│ ├── net.hpp
│ ├── neuron_layers.hpp
│ ├── parallel.hpp
│ ├── python_layer.hpp
│ ├── solver.hpp
│ ├── syncedmem.hpp
│ ├── test/
│ │ ├── test_caffe_main.hpp
│ │ └── test_gradient_check_util.hpp
│ ├── util/
│ │ ├── benchmark.hpp
│ │ ├── blocking_queue.hpp
│ │ ├── cudnn.hpp
│ │ ├── db.hpp
│ │ ├── db_leveldb.hpp
│ │ ├── db_lmdb.hpp
│ │ ├── device_alternate.hpp
│ │ ├── hdf5.hpp
│ │ ├── im2col.hpp
│ │ ├── insert_splits.hpp
│ │ ├── io.hpp
│ │ ├── math_functions.hpp
│ │ ├── mkl_alternate.hpp
│ │ ├── rng.hpp
│ │ └── upgrade_proto.hpp
│ └── vision_layers.hpp
├── models/
│ ├── lenet300100/
│ │ ├── caffe_lenet300100_original.caffemodel
│ │ ├── caffe_lenet300100_sparse.caffemodel
│ │ └── lenet300100.prototxt
│ └── lenet5/
│ ├── caffe_lenet5_original.caffemodel
│ ├── caffe_lenet5_sparse.caffemodel
│ └── lenet5.prototxt
├── src/
│ ├── caffe/
│ │ ├── CMakeLists.txt
│ │ ├── blob.cpp
│ │ ├── common.cpp
│ │ ├── data_reader.cpp
│ │ ├── data_transformer.cpp
│ │ ├── internal_thread.cpp
│ │ ├── layer.cpp
│ │ ├── layer_factory.cpp
│ │ ├── layers/
│ │ │ ├── absval_layer.cpp
│ │ │ ├── absval_layer.cu
│ │ │ ├── accuracy_layer.cpp
│ │ │ ├── argmax_layer.cpp
│ │ │ ├── base_conv_layer.cpp
│ │ │ ├── base_data_layer.cpp
│ │ │ ├── base_data_layer.cu
│ │ │ ├── bnll_layer.cpp
│ │ │ ├── bnll_layer.cu
│ │ │ ├── compress_conv_layer.cpp
│ │ │ ├── compress_conv_layer.cu
│ │ │ ├── compress_inner_product_layer.cpp
│ │ │ ├── compress_inner_product_layer.cu
│ │ │ ├── concat_layer.cpp
│ │ │ ├── concat_layer.cu
│ │ │ ├── contrastive_loss_layer.cpp
│ │ │ ├── contrastive_loss_layer.cu
│ │ │ ├── conv_layer.cpp
│ │ │ ├── conv_layer.cu
│ │ │ ├── cudnn_conv_layer.cpp
│ │ │ ├── cudnn_conv_layer.cu
│ │ │ ├── cudnn_pooling_layer.cpp
│ │ │ ├── cudnn_pooling_layer.cu
│ │ │ ├── cudnn_relu_layer.cpp
│ │ │ ├── cudnn_relu_layer.cu
│ │ │ ├── cudnn_sigmoid_layer.cpp
│ │ │ ├── cudnn_sigmoid_layer.cu
│ │ │ ├── cudnn_softmax_layer.cpp
│ │ │ ├── cudnn_softmax_layer.cu
│ │ │ ├── cudnn_tanh_layer.cpp
│ │ │ ├── cudnn_tanh_layer.cu
│ │ │ ├── data_layer.cpp
│ │ │ ├── deconv_layer.cpp
│ │ │ ├── deconv_layer.cu
│ │ │ ├── dropout_layer.cpp
│ │ │ ├── dropout_layer.cu
│ │ │ ├── dummy_data_layer.cpp
│ │ │ ├── eltwise_layer.cpp
│ │ │ ├── eltwise_layer.cu
│ │ │ ├── euclidean_loss_layer.cpp
│ │ │ ├── euclidean_loss_layer.cu
│ │ │ ├── exp_layer.cpp
│ │ │ ├── exp_layer.cu
│ │ │ ├── filter_layer.cpp
│ │ │ ├── filter_layer.cu
│ │ │ ├── flatten_layer.cpp
│ │ │ ├── hdf5_data_layer.cpp
│ │ │ ├── hdf5_data_layer.cu
│ │ │ ├── hdf5_output_layer.cpp
│ │ │ ├── hdf5_output_layer.cu
│ │ │ ├── hinge_loss_layer.cpp
│ │ │ ├── im2col_layer.cpp
│ │ │ ├── im2col_layer.cu
│ │ │ ├── image_data_layer.cpp
│ │ │ ├── infogain_loss_layer.cpp
│ │ │ ├── inner_product_layer.cpp
│ │ │ ├── inner_product_layer.cu
│ │ │ ├── loss_layer.cpp
│ │ │ ├── lrn_layer.cpp
│ │ │ ├── lrn_layer.cu
│ │ │ ├── memory_data_layer.cpp
│ │ │ ├── multinomial_logistic_loss_layer.cpp
│ │ │ ├── mvn_layer.cpp
│ │ │ ├── mvn_layer.cu
│ │ │ ├── neuron_layer.cpp
│ │ │ ├── pooling_layer.cpp
│ │ │ ├── pooling_layer.cu
│ │ │ ├── power_layer.cpp
│ │ │ ├── power_layer.cu
│ │ │ ├── prelu_layer.cpp
│ │ │ ├── prelu_layer.cu
│ │ │ ├── reduction_layer.cpp
│ │ │ ├── reduction_layer.cu
│ │ │ ├── relu_layer.cpp
│ │ │ ├── relu_layer.cu
│ │ │ ├── reshape_layer.cpp
│ │ │ ├── sigmoid_cross_entropy_loss_layer.cpp
│ │ │ ├── sigmoid_cross_entropy_loss_layer.cu
│ │ │ ├── sigmoid_layer.cpp
│ │ │ ├── sigmoid_layer.cu
│ │ │ ├── silence_layer.cpp
│ │ │ ├── silence_layer.cu
│ │ │ ├── slice_layer.cpp
│ │ │ ├── slice_layer.cu
│ │ │ ├── softmax_layer.cpp
│ │ │ ├── softmax_layer.cu
│ │ │ ├── softmax_loss_layer.cpp
│ │ │ ├── softmax_loss_layer.cu
│ │ │ ├── split_layer.cpp
│ │ │ ├── split_layer.cu
│ │ │ ├── spp_layer.cpp
│ │ │ ├── tanh_layer.cpp
│ │ │ ├── tanh_layer.cu
│ │ │ ├── threshold_layer.cpp
│ │ │ ├── threshold_layer.cu
│ │ │ └── window_data_layer.cpp
│ │ ├── net.cpp
│ │ ├── parallel.cpp
│ │ ├── proto/
│ │ │ └── caffe.proto
│ │ ├── solver.cpp
│ │ ├── syncedmem.cpp
│ │ ├── test/
│ │ │ ├── CMakeLists.txt
│ │ │ ├── test_accuracy_layer.cpp
│ │ │ ├── test_argmax_layer.cpp
│ │ │ ├── test_benchmark.cpp
│ │ │ ├── test_blob.cpp
│ │ │ ├── test_caffe_main.cpp
│ │ │ ├── test_common.cpp
│ │ │ ├── test_concat_layer.cpp
│ │ │ ├── test_contrastive_loss_layer.cpp
│ │ │ ├── test_convolution_layer.cpp
│ │ │ ├── test_data/
│ │ │ │ ├── generate_sample_data.py
│ │ │ │ ├── sample_data.h5
│ │ │ │ ├── sample_data_2_gzip.h5
│ │ │ │ ├── sample_data_list.txt
│ │ │ │ ├── solver_data.h5
│ │ │ │ └── solver_data_list.txt
│ │ │ ├── test_data_layer.cpp
│ │ │ ├── test_data_transformer.cpp
│ │ │ ├── test_db.cpp
│ │ │ ├── test_deconvolution_layer.cpp
│ │ │ ├── test_dummy_data_layer.cpp
│ │ │ ├── test_eltwise_layer.cpp
│ │ │ ├── test_euclidean_loss_layer.cpp
│ │ │ ├── test_filler.cpp
│ │ │ ├── test_filter_layer.cpp
│ │ │ ├── test_flatten_layer.cpp
│ │ │ ├── test_gradient_based_solver.cpp
│ │ │ ├── test_hdf5_output_layer.cpp
│ │ │ ├── test_hdf5data_layer.cpp
│ │ │ ├── test_hinge_loss_layer.cpp
│ │ │ ├── test_im2col_kernel.cu
│ │ │ ├── test_im2col_layer.cpp
│ │ │ ├── test_image_data_layer.cpp
│ │ │ ├── test_infogain_loss_layer.cpp
│ │ │ ├── test_inner_product_layer.cpp
│ │ │ ├── test_internal_thread.cpp
│ │ │ ├── test_io.cpp
│ │ │ ├── test_layer_factory.cpp
│ │ │ ├── test_lrn_layer.cpp
│ │ │ ├── test_math_functions.cpp
│ │ │ ├── test_maxpool_dropout_layers.cpp
│ │ │ ├── test_memory_data_layer.cpp
│ │ │ ├── test_multinomial_logistic_loss_layer.cpp
│ │ │ ├── test_mvn_layer.cpp
│ │ │ ├── test_net.cpp
│ │ │ ├── test_neuron_layer.cpp
│ │ │ ├── test_platform.cpp
│ │ │ ├── test_pooling_layer.cpp
│ │ │ ├── test_power_layer.cpp
│ │ │ ├── test_protobuf.cpp
│ │ │ ├── test_random_number_generator.cpp
│ │ │ ├── test_reduction_layer.cpp
│ │ │ ├── test_reshape_layer.cpp
│ │ │ ├── test_sigmoid_cross_entropy_loss_layer.cpp
│ │ │ ├── test_slice_layer.cpp
│ │ │ ├── test_softmax_layer.cpp
│ │ │ ├── test_softmax_with_loss_layer.cpp
│ │ │ ├── test_solver.cpp
│ │ │ ├── test_split_layer.cpp
│ │ │ ├── test_spp_layer.cpp
│ │ │ ├── test_stochastic_pooling.cpp
│ │ │ ├── test_syncedmem.cpp
│ │ │ ├── test_tanh_layer.cpp
│ │ │ ├── test_threshold_layer.cpp
│ │ │ ├── test_upgrade_proto.cpp
│ │ │ └── test_util_blas.cpp
│ │ └── util/
│ │ ├── benchmark.cpp
│ │ ├── blocking_queue.cpp
│ │ ├── cudnn.cpp
│ │ ├── db.cpp
│ │ ├── db_leveldb.cpp
│ │ ├── db_lmdb.cpp
│ │ ├── hdf5.cpp
│ │ ├── im2col.cpp
│ │ ├── im2col.cu
│ │ ├── insert_splits.cpp
│ │ ├── io.cpp
│ │ ├── math_functions.cpp
│ │ ├── math_functions.cu
│ │ └── upgrade_proto.cpp
│ └── gtest/
│ ├── CMakeLists.txt
│ ├── gtest-all.cpp
│ ├── gtest.h
│ └── gtest_main.cc
└── tools/
└── caffe.cpp
Showing preview only (676K chars total). Download the full file or copy to clipboard to get everything.
SYMBOL INDEX (2123 symbols across 176 files)
FILE: include/caffe/blob.hpp
type caffe (line 15) | namespace caffe {
class Blob (line 25) | class Blob {
method Blob (line 27) | Blob()
method string (line 55) | inline string shape_string() const {
method shape (line 72) | inline int shape(int index) const {
method num_axes (line 75) | inline int num_axes() const { return shape_.size(); }
method count (line 76) | inline int count() const { return count_; }
method count (line 86) | inline int count(int start_axis, int end_axis) const {
method count (line 104) | inline int count(int start_axis) const {
method CanonicalAxisIndex (line 119) | inline int CanonicalAxisIndex(int axis_index) const {
method num (line 133) | inline int num() const { return LegacyShape(0); }
method channels (line 135) | inline int channels() const { return LegacyShape(1); }
method height (line 137) | inline int height() const { return LegacyShape(2); }
method width (line 139) | inline int width() const { return LegacyShape(3); }
method LegacyShape (line 140) | inline int LegacyShape(int index) const {
method offset (line 154) | inline int offset(const int n, const int c = 0, const int h = 0,
method offset (line 167) | inline int offset(const vector<int>& indices) const {
method Dtype (line 192) | inline Dtype data_at(const int n, const int c, const int h,
method Dtype (line 197) | inline Dtype diff_at(const int n, const int c, const int h,
method Dtype (line 202) | inline Dtype data_at(const vector<int>& index) const {
method Dtype (line 206) | inline Dtype diff_at(const vector<int>& index) const {
FILE: include/caffe/common.hpp
type cv (line 69) | namespace cv { class Mat; }
class Mat (line 69) | class Mat
type caffe (line 71) | namespace caffe {
class Caffe (line 98) | class Caffe {
type Brew (line 107) | enum Brew { CPU, GPU }
class RNG (line 111) | class RNG {
class Generator (line 119) | class Generator
method RNG (line 124) | inline static RNG& rng_stream() {
class Generator (line 119) | class Generator
method cublasHandle_t (line 131) | inline static cublasHandle_t cublas_handle() { return Get().cublas_h...
method curandGenerator_t (line 132) | inline static curandGenerator_t curand_generator() {
method Brew (line 138) | inline static Brew mode() { return Get().mode_; }
method set_mode (line 144) | inline static void set_mode(Brew mode) { Get().mode_ = mode; }
method solver_count (line 153) | inline static int solver_count() { return Get().solver_count_; }
method set_solver_count (line 154) | inline static void set_solver_count(int val) { Get().solver_count_ =...
method root_solver (line 155) | inline static bool root_solver() { return Get().root_solver_; }
method set_root_solver (line 156) | inline static void set_root_solver(bool val) { Get().root_solver_ = ...
FILE: include/caffe/common_layers.hpp
type caffe (line 16) | namespace caffe {
class ArgMaxLayer (line 29) | class ArgMaxLayer : public Layer<Dtype> {
method ArgMaxLayer (line 39) | explicit ArgMaxLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 47) | virtual inline int ExactNumBottomBlobs() const { return 1; }
method ExactNumTopBlobs (line 48) | virtual inline int ExactNumTopBlobs() const { return 1; }
method Backward_cpu (line 65) | virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
class ConcatLayer (line 78) | class ConcatLayer : public Layer<Dtype> {
method ConcatLayer (line 80) | explicit ConcatLayer(const LayerParameter& param)
method MinBottomBlobs (line 88) | virtual inline int MinBottomBlobs() const { return 2; }
method ExactNumTopBlobs (line 89) | virtual inline int ExactNumTopBlobs() const { return 1; }
class EltwiseLayer (line 153) | class EltwiseLayer : public Layer<Dtype> {
method EltwiseLayer (line 155) | explicit EltwiseLayer(const LayerParameter& param)
method MinBottomBlobs (line 163) | virtual inline int MinBottomBlobs() const { return 2; }
method ExactNumTopBlobs (line 164) | virtual inline int ExactNumTopBlobs() const { return 1; }
class FilterLayer (line 190) | class FilterLayer : public Layer<Dtype> {
method FilterLayer (line 192) | explicit FilterLayer(const LayerParameter& param)
method MinBottomBlobs (line 200) | virtual inline int MinBottomBlobs() const { return 2; }
method MinTopBlobs (line 201) | virtual inline int MinTopBlobs() const { return 1; }
class FlattenLayer (line 257) | class FlattenLayer : public Layer<Dtype> {
method FlattenLayer (line 259) | explicit FlattenLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 265) | virtual inline int ExactNumBottomBlobs() const { return 1; }
method ExactNumTopBlobs (line 266) | virtual inline int ExactNumTopBlobs() const { return 1; }
class InnerProductLayer (line 300) | class InnerProductLayer : public Layer<Dtype> {
method InnerProductLayer (line 302) | explicit InnerProductLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 310) | virtual inline int ExactNumBottomBlobs() const { return 1; }
method ExactNumTopBlobs (line 311) | virtual inline int ExactNumTopBlobs() const { return 1; }
class CInnerProductLayer (line 338) | class CInnerProductLayer : public Layer<Dtype> {
method CInnerProductLayer (line 340) | explicit CInnerProductLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 348) | virtual inline int ExactNumBottomBlobs() const { return 1; }
method ExactNumTopBlobs (line 349) | virtual inline int ExactNumTopBlobs() const { return 1; }
class MVNLayer (line 385) | class MVNLayer : public Layer<Dtype> {
method MVNLayer (line 387) | explicit MVNLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 393) | virtual inline int ExactNumBottomBlobs() const { return 1; }
method ExactNumTopBlobs (line 394) | virtual inline int ExactNumTopBlobs() const { return 1; }
class ReshapeLayer (line 420) | class ReshapeLayer : public Layer<Dtype> {
method ReshapeLayer (line 422) | explicit ReshapeLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 430) | virtual inline int ExactNumBottomBlobs() const { return 1; }
method ExactNumTopBlobs (line 431) | virtual inline int ExactNumTopBlobs() const { return 1; }
method Forward_cpu (line 434) | virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
method Backward_cpu (line 436) | virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
method Forward_gpu (line 438) | virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
method Backward_gpu (line 440) | virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
class ReductionLayer (line 459) | class ReductionLayer : public Layer<Dtype> {
method ReductionLayer (line 461) | explicit ReductionLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 469) | virtual inline int ExactNumBottomBlobs() const { return 1; }
method ExactNumTopBlobs (line 470) | virtual inline int ExactNumTopBlobs() const { return 1; }
class SilenceLayer (line 501) | class SilenceLayer : public Layer<Dtype> {
method SilenceLayer (line 503) | explicit SilenceLayer(const LayerParameter& param)
method Reshape (line 505) | virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
method MinBottomBlobs (line 509) | virtual inline int MinBottomBlobs() const { return 1; }
method ExactNumTopBlobs (line 510) | virtual inline int ExactNumTopBlobs() const { return 0; }
method Forward_cpu (line 513) | virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
class SoftmaxLayer (line 531) | class SoftmaxLayer : public Layer<Dtype> {
method SoftmaxLayer (line 533) | explicit SoftmaxLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 539) | virtual inline int ExactNumBottomBlobs() const { return 1; }
method ExactNumTopBlobs (line 540) | virtual inline int ExactNumTopBlobs() const { return 1; }
class CuDNNSoftmaxLayer (line 567) | class CuDNNSoftmaxLayer : public SoftmaxLayer<Dtype> {
method CuDNNSoftmaxLayer (line 569) | explicit CuDNNSoftmaxLayer(const LayerParameter& param)
class SplitLayer (line 597) | class SplitLayer : public Layer<Dtype> {
method SplitLayer (line 599) | explicit SplitLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 605) | virtual inline int ExactNumBottomBlobs() const { return 1; }
method MinTopBlobs (line 606) | virtual inline int MinTopBlobs() const { return 1; }
class SliceLayer (line 628) | class SliceLayer : public Layer<Dtype> {
method SliceLayer (line 630) | explicit SliceLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 638) | virtual inline int ExactNumBottomBlobs() const { return 1; }
method MinTopBlobs (line 639) | virtual inline int MinTopBlobs() const { return 2; }
FILE: include/caffe/data_layers.hpp
type caffe (line 21) | namespace caffe {
class BaseDataLayer (line 29) | class BaseDataLayer : public Layer<Dtype> {
method ShareInParallel (line 38) | virtual inline bool ShareInParallel() const { return true; }
method DataLayerSetUp (line 39) | virtual void DataLayerSetUp(const vector<Blob<Dtype>*>& bottom,
method Reshape (line 42) | virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
method Backward_cpu (line 45) | virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
method Backward_gpu (line 47) | virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
class Batch (line 57) | class Batch {
class BasePrefetchingDataLayer (line 63) | class BasePrefetchingDataLayer :
class DataLayer (line 93) | class DataLayer : public BasePrefetchingDataLayer<Dtype> {
method ShareInParallel (line 100) | virtual inline bool ShareInParallel() const { return false; }
method ExactNumBottomBlobs (line 102) | virtual inline int ExactNumBottomBlobs() const { return 0; }
method MinTopBlobs (line 103) | virtual inline int MinTopBlobs() const { return 1; }
method MaxTopBlobs (line 104) | virtual inline int MaxTopBlobs() const { return 2; }
class DummyDataLayer (line 118) | class DummyDataLayer : public Layer<Dtype> {
method DummyDataLayer (line 120) | explicit DummyDataLayer(const LayerParameter& param)
method ShareInParallel (line 125) | virtual inline bool ShareInParallel() const { return true; }
method Reshape (line 127) | virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
method ExactNumBottomBlobs (line 131) | virtual inline int ExactNumBottomBlobs() const { return 0; }
method MinTopBlobs (line 132) | virtual inline int MinTopBlobs() const { return 1; }
method Backward_cpu (line 137) | virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
method Backward_gpu (line 139) | virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
class HDF5DataLayer (line 152) | class HDF5DataLayer : public Layer<Dtype> {
method HDF5DataLayer (line 154) | explicit HDF5DataLayer(const LayerParameter& param)
method ShareInParallel (line 160) | virtual inline bool ShareInParallel() const { return true; }
method Reshape (line 162) | virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
method ExactNumBottomBlobs (line 166) | virtual inline int ExactNumBottomBlobs() const { return 0; }
method MinTopBlobs (line 167) | virtual inline int MinTopBlobs() const { return 1; }
method Backward_cpu (line 174) | virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
method Backward_gpu (line 176) | virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
class HDF5OutputLayer (line 195) | class HDF5OutputLayer : public Layer<Dtype> {
method HDF5OutputLayer (line 197) | explicit HDF5OutputLayer(const LayerParameter& param)
method ShareInParallel (line 203) | virtual inline bool ShareInParallel() const { return true; }
method Reshape (line 205) | virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
method ExactNumBottomBlobs (line 210) | virtual inline int ExactNumBottomBlobs() const { return 2; }
method ExactNumTopBlobs (line 211) | virtual inline int ExactNumTopBlobs() const { return 0; }
method file_name (line 213) | inline std::string file_name() const { return file_name_; }
class ImageDataLayer (line 239) | class ImageDataLayer : public BasePrefetchingDataLayer<Dtype> {
method ImageDataLayer (line 241) | explicit ImageDataLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 248) | virtual inline int ExactNumBottomBlobs() const { return 0; }
method ExactNumTopBlobs (line 249) | virtual inline int ExactNumTopBlobs() const { return 2; }
class MemoryDataLayer (line 266) | class MemoryDataLayer : public BaseDataLayer<Dtype> {
method MemoryDataLayer (line 268) | explicit MemoryDataLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 274) | virtual inline int ExactNumBottomBlobs() const { return 0; }
method ExactNumTopBlobs (line 275) | virtual inline int ExactNumTopBlobs() const { return 2; }
method batch_size (line 286) | int batch_size() { return batch_size_; }
method channels (line 287) | int channels() { return channels_; }
method height (line 288) | int height() { return height_; }
method width (line 289) | int width() { return width_; }
class WindowDataLayer (line 312) | class WindowDataLayer : public BasePrefetchingDataLayer<Dtype> {
method WindowDataLayer (line 314) | explicit WindowDataLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 321) | virtual inline int ExactNumBottomBlobs() const { return 0; }
method ExactNumTopBlobs (line 322) | virtual inline int ExactNumTopBlobs() const { return 2; }
type WindowField (line 330) | enum WindowField { IMAGE_INDEX, LABEL, OVERLAP, X1, Y1, X2, Y2, NUM }
FILE: include/caffe/data_reader.hpp
type caffe (line 13) | namespace caffe {
class DataReader (line 23) | class DataReader {
class QueuePair (line 37) | class QueuePair {
class Body (line 49) | class Body : public InternalThread {
method string (line 68) | static inline string source_key(const LayerParameter& param) {
FILE: include/caffe/data_transformer.hpp
type caffe (line 10) | namespace caffe {
class DataTransformer (line 17) | class DataTransformer {
FILE: include/caffe/filler.hpp
type caffe (line 16) | namespace caffe {
class Filler (line 20) | class Filler {
method Filler (line 22) | explicit Filler(const FillerParameter& param) : filler_param_(param) {}
class ConstantFiller (line 32) | class ConstantFiller : public Filler<Dtype> {
method ConstantFiller (line 34) | explicit ConstantFiller(const FillerParameter& param)
method Fill (line 36) | virtual void Fill(Blob<Dtype>* blob) {
class UniformFiller (line 51) | class UniformFiller : public Filler<Dtype> {
method UniformFiller (line 53) | explicit UniformFiller(const FillerParameter& param)
method Fill (line 55) | virtual void Fill(Blob<Dtype>* blob) {
class GaussianFiller (line 66) | class GaussianFiller : public Filler<Dtype> {
method GaussianFiller (line 68) | explicit GaussianFiller(const FillerParameter& param)
method Fill (line 70) | virtual void Fill(Blob<Dtype>* blob) {
class PositiveUnitballFiller (line 102) | class PositiveUnitballFiller : public Filler<Dtype> {
method PositiveUnitballFiller (line 104) | explicit PositiveUnitballFiller(const FillerParameter& param)
method Fill (line 106) | virtual void Fill(Blob<Dtype>* blob) {
class XavierFiller (line 145) | class XavierFiller : public Filler<Dtype> {
method XavierFiller (line 147) | explicit XavierFiller(const FillerParameter& param)
method Fill (line 149) | virtual void Fill(Blob<Dtype>* blob) {
class MSRAFiller (line 187) | class MSRAFiller : public Filler<Dtype> {
method MSRAFiller (line 189) | explicit MSRAFiller(const FillerParameter& param)
method Fill (line 191) | virtual void Fill(Blob<Dtype>* blob) {
class BilinearFiller (line 245) | class BilinearFiller : public Filler<Dtype> {
method BilinearFiller (line 247) | explicit BilinearFiller(const FillerParameter& param)
method Fill (line 249) | virtual void Fill(Blob<Dtype>* blob) {
FILE: include/caffe/internal_thread.hpp
type boost (line 10) | namespace boost { class thread; }
class thread (line 10) | class thread
type caffe (line 12) | namespace caffe {
class InternalThread (line 19) | class InternalThread {
method InternalThread (line 21) | InternalThread() : thread_() {}
method InternalThreadEntry (line 39) | virtual void InternalThreadEntry() {}
FILE: include/caffe/layer.hpp
type boost (line 18) | namespace boost { class mutex; }
class mutex (line 18) | class mutex
type caffe (line 20) | namespace caffe {
class Layer (line 33) | class Layer {
method Layer (line 40) | explicit Layer(const LayerParameter& param)
method SetUp (line 67) | void SetUp(const vector<Blob<Dtype>*>& bottom,
method LayerSetUp (line 92) | virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
method ShareInParallel (line 101) | virtual inline bool ShareInParallel() const { return false; }
method IsShared (line 107) | inline bool IsShared() const { return is_shared_; }
method SetShared (line 113) | inline void SetShared(bool is_shared) {
method LayerParameter (line 189) | const LayerParameter& layer_param() const { return layer_param_; }
method Dtype (line 199) | inline Dtype loss(const int top_index) const {
method set_loss (line 206) | inline void set_loss(const int top_index, const Dtype value) {
method ExactNumBottomBlobs (line 225) | virtual inline int ExactNumBottomBlobs() const { return -1; }
method MinBottomBlobs (line 233) | virtual inline int MinBottomBlobs() const { return -1; }
method MaxBottomBlobs (line 241) | virtual inline int MaxBottomBlobs() const { return -1; }
method ExactNumTopBlobs (line 249) | virtual inline int ExactNumTopBlobs() const { return -1; }
method MinTopBlobs (line 257) | virtual inline int MinTopBlobs() const { return -1; }
method MaxTopBlobs (line 265) | virtual inline int MaxTopBlobs() const { return -1; }
method EqualNumBottomTopBlobs (line 273) | virtual inline bool EqualNumBottomTopBlobs() const { return false; }
method AutoTopBlobs (line 283) | virtual inline bool AutoTopBlobs() const { return false; }
method AllowForceBackward (line 293) | virtual inline bool AllowForceBackward(const int bottom_index) const {
method param_propagate_down (line 304) | inline bool param_propagate_down(const int param_id) {
method set_param_propagate_down (line 312) | inline void set_param_propagate_down(const int param_id, const bool ...
method set_current_iter_num (line 319) | inline void set_current_iter_num(const int iter_num) {
method Forward_gpu (line 346) | virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
method Backward_gpu (line 364) | virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
method CheckBlobCounts (line 376) | virtual void CheckBlobCounts(const vector<Blob<Dtype>*>& bottom,
method SetLossWeights (line 419) | inline void SetLossWeights(const vector<Blob<Dtype>*>& top) {
function Dtype (line 456) | inline Dtype Layer<Dtype>::Forward(const vector<Blob<Dtype>*>& bottom,
FILE: include/caffe/layer_factory.hpp
type caffe (line 49) | namespace caffe {
class Layer (line 52) | class Layer
class LayerRegistry (line 55) | class LayerRegistry {
method CreatorRegistry (line 60) | static CreatorRegistry& Registry() {
method AddCreator (line 66) | static void AddCreator(const string& type, Creator creator) {
method CreateLayer (line 74) | static shared_ptr<Layer<Dtype> > CreateLayer(const LayerParameter& p...
method LayerTypeList (line 85) | static vector<string> LayerTypeList() {
method LayerRegistry (line 98) | LayerRegistry() {}
method string (line 100) | static string LayerTypeListString() {
class LayerRegisterer (line 116) | class LayerRegisterer {
method LayerRegisterer (line 118) | LayerRegisterer(const string& type,
FILE: include/caffe/loss_layers.hpp
type caffe (line 14) | namespace caffe {
class AccuracyLayer (line 23) | class AccuracyLayer : public Layer<Dtype> {
method AccuracyLayer (line 33) | explicit AccuracyLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 41) | virtual inline int ExactNumBottomBlobs() const { return 2; }
method ExactNumTopBlobs (line 42) | virtual inline int ExactNumTopBlobs() const { return 1; }
method Backward_cpu (line 74) | virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
class LossLayer (line 100) | class LossLayer : public Layer<Dtype> {
method LossLayer (line 102) | explicit LossLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 109) | virtual inline int ExactNumBottomBlobs() const { return 2; }
method AutoTopBlobs (line 117) | virtual inline bool AutoTopBlobs() const { return true; }
method ExactNumTopBlobs (line 118) | virtual inline int ExactNumTopBlobs() const { return 1; }
method AllowForceBackward (line 123) | virtual inline bool AllowForceBackward(const int bottom_index) const {
class ContrastiveLossLayer (line 153) | class ContrastiveLossLayer : public LossLayer<Dtype> {
method ContrastiveLossLayer (line 155) | explicit ContrastiveLossLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 160) | virtual inline int ExactNumBottomBlobs() const { return 3; }
method AllowForceBackward (line 166) | virtual inline bool AllowForceBackward(const int bottom_index) const {
class EuclideanLossLayer (line 240) | class EuclideanLossLayer : public LossLayer<Dtype> {
method EuclideanLossLayer (line 242) | explicit EuclideanLossLayer(const LayerParameter& param)
method AllowForceBackward (line 252) | virtual inline bool AllowForceBackward(const int bottom_index) const {
class HingeLossLayer (line 348) | class HingeLossLayer : public LossLayer<Dtype> {
method HingeLossLayer (line 350) | explicit HingeLossLayer(const LayerParameter& param)
class InfogainLossLayer (line 424) | class InfogainLossLayer : public LossLayer<Dtype> {
method InfogainLossLayer (line 426) | explicit InfogainLossLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 436) | virtual inline int ExactNumBottomBlobs() const { return -1; }
method MinBottomBlobs (line 437) | virtual inline int MinBottomBlobs() const { return 2; }
method MaxBottomBlobs (line 438) | virtual inline int MaxBottomBlobs() const { return 3; }
class MultinomialLogisticLossLayer (line 515) | class MultinomialLogisticLossLayer : public LossLayer<Dtype> {
method MultinomialLogisticLossLayer (line 517) | explicit MultinomialLogisticLossLayer(const LayerParameter& param)
class SigmoidCrossEntropyLossLayer (line 591) | class SigmoidCrossEntropyLossLayer : public LossLayer<Dtype> {
method SigmoidCrossEntropyLossLayer (line 593) | explicit SigmoidCrossEntropyLossLayer(const LayerParameter& param)
class SoftmaxLayer (line 655) | class SoftmaxLayer
class SoftmaxWithLossLayer (line 686) | class SoftmaxWithLossLayer : public LossLayer<Dtype> {
method SoftmaxWithLossLayer (line 696) | explicit SoftmaxWithLossLayer(const LayerParameter& param)
method ExactNumTopBlobs (line 704) | virtual inline int ExactNumTopBlobs() const { return -1; }
method MinTopBlobs (line 705) | virtual inline int MinTopBlobs() const { return 1; }
method MaxTopBlobs (line 706) | virtual inline int MaxTopBlobs() const { return 2; }
FILE: include/caffe/net.hpp
type caffe (line 15) | namespace caffe {
class Net (line 24) | class Net {
method Dtype (line 85) | Dtype ForwardBackward(const vector<Blob<Dtype>* > & bottom) {
method string (line 123) | inline const string& name() const { return name_; }
method Phase (line 137) | inline Phase phase() const { return phase_; }
method num_inputs (line 183) | inline int num_inputs() const { return net_input_blobs_.size(); }
method num_outputs (line 184) | inline int num_outputs() const { return net_output_blobs_.size(); }
method set_current_iter_num (line 198) | inline void set_current_iter_num(const int iter_num) {
method set_debug_info (line 210) | void set_debug_info(const bool value) { debug_info_ = value; }
FILE: include/caffe/neuron_layers.hpp
type caffe (line 16) | namespace caffe {
class NeuronLayer (line 25) | class NeuronLayer : public Layer<Dtype> {
method NeuronLayer (line 27) | explicit NeuronLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 32) | virtual inline int ExactNumBottomBlobs() const { return 1; }
method ExactNumTopBlobs (line 33) | virtual inline int ExactNumTopBlobs() const { return 1; }
class AbsValLayer (line 47) | class AbsValLayer : public NeuronLayer<Dtype> {
method AbsValLayer (line 49) | explicit AbsValLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 55) | virtual inline int ExactNumBottomBlobs() const { return 1; }
method ExactNumTopBlobs (line 56) | virtual inline int ExactNumTopBlobs() const { return 1; }
class BNLLLayer (line 106) | class BNLLLayer : public NeuronLayer<Dtype> {
method BNLLLayer (line 108) | explicit BNLLLayer(const LayerParameter& param)
class DropoutLayer (line 154) | class DropoutLayer : public NeuronLayer<Dtype> {
method DropoutLayer (line 162) | explicit DropoutLayer(const LayerParameter& param)
class ExpLayer (line 212) | class ExpLayer : public NeuronLayer<Dtype> {
method ExpLayer (line 222) | explicit ExpLayer(const LayerParameter& param)
class LogLayer (line 276) | class LogLayer : public NeuronLayer<Dtype> {
method LogLayer (line 286) | explicit LogLayer(const LayerParameter& param)
class PowerLayer (line 342) | class PowerLayer : public NeuronLayer<Dtype> {
method PowerLayer (line 351) | explicit PowerLayer(const LayerParameter& param)
class ReLULayer (line 414) | class ReLULayer : public NeuronLayer<Dtype> {
method ReLULayer (line 422) | explicit ReLULayer(const LayerParameter& param)
class CuDNNReLULayer (line 483) | class CuDNNReLULayer : public ReLULayer<Dtype> {
method CuDNNReLULayer (line 485) | explicit CuDNNReLULayer(const LayerParameter& param)
class SigmoidLayer (line 515) | class SigmoidLayer : public NeuronLayer<Dtype> {
method SigmoidLayer (line 517) | explicit SigmoidLayer(const LayerParameter& param)
class CuDNNSigmoidLayer (line 566) | class CuDNNSigmoidLayer : public SigmoidLayer<Dtype> {
method CuDNNSigmoidLayer (line 568) | explicit CuDNNSigmoidLayer(const LayerParameter& param)
class TanHLayer (line 598) | class TanHLayer : public NeuronLayer<Dtype> {
method TanHLayer (line 600) | explicit TanHLayer(const LayerParameter& param)
class CuDNNTanHLayer (line 651) | class CuDNNTanHLayer : public TanHLayer<Dtype> {
method CuDNNTanHLayer (line 653) | explicit CuDNNTanHLayer(const LayerParameter& param)
class ThresholdLayer (line 679) | class ThresholdLayer : public NeuronLayer<Dtype> {
method ThresholdLayer (line 687) | explicit ThresholdLayer(const LayerParameter& param)
method Backward_cpu (line 714) | virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
class PReLULayer (line 731) | class PReLULayer : public NeuronLayer<Dtype> {
method PReLULayer (line 741) | explicit PReLULayer(const LayerParameter& param)
FILE: include/caffe/parallel.hpp
type caffe (line 17) | namespace caffe {
class Params (line 23) | class Params {
method size (line 29) | inline size_t size() const {
method Dtype (line 32) | inline Dtype* data() const {
method Dtype (line 35) | inline Dtype* diff() const {
class GPUParams (line 49) | class GPUParams : public Params<Dtype> {
class DevicePair (line 62) | class DevicePair {
method DevicePair (line 64) | DevicePair(int parent, int device)
method parent (line 68) | inline int parent() {
method device (line 71) | inline int device() {
class P2PSync (line 85) | class P2PSync : public GPUParams<Dtype>, public Solver<Dtype>::Callback,
FILE: include/caffe/python_layer.hpp
type caffe (line 11) | namespace caffe {
class PythonLayer (line 14) | class PythonLayer : public Layer<Dtype> {
method PythonLayer (line 16) | PythonLayer(PyObject* self, const LayerParameter& param)
method LayerSetUp (line 19) | virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
method Reshape (line 25) | virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
method ShareInParallel (line 30) | virtual inline bool ShareInParallel() const {
method Forward_cpu (line 37) | virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
method Backward_cpu (line 41) | virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
FILE: include/caffe/solver.hpp
type caffe (line 9) | namespace caffe {
class Solver (line 18) | class Solver {
method Solve (line 29) | inline void Solve(const string resume_file) { Solve(resume_file.c_st...
method SolverParameter (line 36) | inline const SolverParameter& param() const { return param_; }
method net (line 37) | inline shared_ptr<Net<Dtype> > net() { return net_; }
method iter (line 41) | int iter() { return iter_; }
class Callback (line 44) | class Callback {
method add_callback (line 53) | void add_callback(Callback* value) {
class WorkerSolver (line 95) | class WorkerSolver : public Solver<Dtype> {
method WorkerSolver (line 97) | explicit WorkerSolver(const SolverParameter& param,
method ApplyUpdate (line 102) | void ApplyUpdate() {}
method SnapshotSolverState (line 103) | void SnapshotSolverState(const string& model_filename) {
method RestoreSolverStateFromBinaryProto (line 106) | void RestoreSolverStateFromBinaryProto(const string& state_file) {
method RestoreSolverStateFromHDF5 (line 109) | void RestoreSolverStateFromHDF5(const string& state_file) {
class SGDSolver (line 119) | class SGDSolver : public Solver<Dtype> {
method SGDSolver (line 121) | explicit SGDSolver(const SolverParameter& param)
method SGDSolver (line 123) | explicit SGDSolver(const string& param_file)
class NesterovSolver (line 151) | class NesterovSolver : public SGDSolver<Dtype> {
method NesterovSolver (line 153) | explicit NesterovSolver(const SolverParameter& param)
method NesterovSolver (line 155) | explicit NesterovSolver(const string& param_file)
class AdaGradSolver (line 165) | class AdaGradSolver : public SGDSolver<Dtype> {
method AdaGradSolver (line 167) | explicit AdaGradSolver(const SolverParameter& param)
method AdaGradSolver (line 169) | explicit AdaGradSolver(const string& param_file)
method constructor_sanity_check (line 174) | void constructor_sanity_check() {
class RMSPropSolver (line 184) | class RMSPropSolver : public SGDSolver<Dtype> {
method RMSPropSolver (line 186) | explicit RMSPropSolver(const SolverParameter& param)
method RMSPropSolver (line 188) | explicit RMSPropSolver(const string& param_file)
method constructor_sanity_check (line 193) | void constructor_sanity_check() {
class AdaDeltaSolver (line 206) | class AdaDeltaSolver : public SGDSolver<Dtype> {
method AdaDeltaSolver (line 208) | explicit AdaDeltaSolver(const SolverParameter& param)
method AdaDeltaSolver (line 210) | explicit AdaDeltaSolver(const string& param_file)
class AdamSolver (line 229) | class AdamSolver : public SGDSolver<Dtype> {
method AdamSolver (line 231) | explicit AdamSolver(const SolverParameter& param)
method AdamSolver (line 233) | explicit AdamSolver(const string& param_file)
FILE: include/caffe/syncedmem.hpp
type caffe (line 9) | namespace caffe {
function CaffeMallocHost (line 16) | inline void CaffeMallocHost(void** ptr, size_t size) {
function CaffeFreeHost (line 27) | inline void CaffeFreeHost(void* ptr) {
class SyncedMemory (line 44) | class SyncedMemory {
method SyncedMemory (line 46) | SyncedMemory()
method SyncedMemory (line 49) | explicit SyncedMemory(size_t size)
type SyncedHead (line 59) | enum SyncedHead { UNINITIALIZED, HEAD_AT_CPU, HEAD_AT_GPU, SYNCED }
method SyncedHead (line 60) | SyncedHead head() { return head_; }
method size (line 61) | size_t size() { return size_; }
FILE: include/caffe/test/test_caffe_main.hpp
type caffe (line 28) | namespace caffe {
class MultiDeviceTest (line 31) | class MultiDeviceTest : public ::testing::Test {
method MultiDeviceTest (line 35) | MultiDeviceTest() {
type CPUDevice (line 44) | struct CPUDevice {
class CPUDeviceTest (line 50) | class CPUDeviceTest : public MultiDeviceTest<CPUDevice<Dtype> > {
type GPUDevice (line 61) | struct GPUDevice {
class GPUDeviceTest (line 67) | class GPUDeviceTest : public MultiDeviceTest<GPUDevice<Dtype> > {
FILE: include/caffe/test/test_gradient_check_util.hpp
type caffe (line 14) | namespace caffe {
class GradientChecker (line 19) | class GradientChecker {
method GradientChecker (line 24) | GradientChecker(const Dtype stepsize, const Dtype threshold,
method CheckGradient (line 33) | void CheckGradient(Layer<Dtype>* layer, const vector<Blob<Dtype>*>& ...
function Dtype (line 227) | Dtype GradientChecker<Dtype>::GetObjAndGradient(const Layer<Dtype>& la...
FILE: include/caffe/util/benchmark.hpp
type caffe (line 8) | namespace caffe {
class Timer (line 10) | class Timer {
method initted (line 20) | inline bool initted() { return initted_; }
method running (line 21) | inline bool running() { return running_; }
method has_run_at_least_once (line 22) | inline bool has_run_at_least_once() { return has_run_at_least_once_; }
class CPUTimer (line 40) | class CPUTimer : public Timer {
FILE: include/caffe/util/blocking_queue.hpp
type caffe (line 9) | namespace caffe {
class BlockingQueue (line 12) | class BlockingQueue {
class sync (line 37) | class sync
FILE: include/caffe/util/cudnn.hpp
type caffe (line 45) | namespace caffe {
type cudnn (line 47) | namespace cudnn {
class dataType (line 49) | class dataType
class dataType<float> (line 50) | class dataType<float> {
class dataType<double> (line 56) | class dataType<double> {
function createTensor4dDesc (line 64) | inline void createTensor4dDesc(cudnnTensorDescriptor_t* desc) {
function setTensor4dDesc (line 69) | inline void setTensor4dDesc(cudnnTensorDescriptor_t* desc,
function setTensor4dDesc (line 77) | inline void setTensor4dDesc(cudnnTensorDescriptor_t* desc,
function createFilterDesc (line 88) | inline void createFilterDesc(cudnnFilterDescriptor_t* desc,
function createConvolutionDesc (line 96) | inline void createConvolutionDesc(cudnnConvolutionDescriptor_t* conv) {
function setConvolutionDesc (line 101) | inline void setConvolutionDesc(cudnnConvolutionDescriptor_t* conv,
function createPoolingDesc (line 109) | inline void createPoolingDesc(cudnnPoolingDescriptor_t* pool_desc,
FILE: include/caffe/util/db.hpp
type caffe (line 9) | namespace caffe { namespace db {
type db (line 9) | namespace db {
type Mode (line 11) | enum Mode { READ, WRITE, NEW }
class Cursor (line 13) | class Cursor {
method Cursor (line 15) | Cursor() { }
class Transaction (line 26) | class Transaction {
method Transaction (line 28) | Transaction() { }
class DB (line 36) | class DB {
method DB (line 38) | DB() { }
FILE: include/caffe/util/db_leveldb.hpp
type caffe (line 11) | namespace caffe { namespace db {
type db (line 11) | namespace db {
class LevelDBCursor (line 13) | class LevelDBCursor : public Cursor {
method LevelDBCursor (line 15) | explicit LevelDBCursor(leveldb::Iterator* iter)
method SeekToFirst (line 18) | virtual void SeekToFirst() { iter_->SeekToFirst(); }
method Next (line 19) | virtual void Next() { iter_->Next(); }
method string (line 20) | virtual string key() { return iter_->key().ToString(); }
method string (line 21) | virtual string value() { return iter_->value().ToString(); }
method valid (line 22) | virtual bool valid() { return iter_->Valid(); }
class LevelDBTransaction (line 28) | class LevelDBTransaction : public Transaction {
method LevelDBTransaction (line 30) | explicit LevelDBTransaction(leveldb::DB* db) : db_(db) { CHECK_NOT...
method Put (line 31) | virtual void Put(const string& key, const string& value) {
method Commit (line 34) | virtual void Commit() {
class LevelDB (line 47) | class LevelDB : public DB {
method LevelDB (line 49) | LevelDB() : db_(NULL) { }
method Close (line 52) | virtual void Close() {
method LevelDBCursor (line 58) | virtual LevelDBCursor* NewCursor() {
method LevelDBTransaction (line 61) | virtual LevelDBTransaction* NewTransaction() {
FILE: include/caffe/util/db_lmdb.hpp
type caffe (line 10) | namespace caffe { namespace db {
type db (line 10) | namespace db {
function MDB_CHECK (line 12) | inline void MDB_CHECK(int mdb_status) {
class LMDBCursor (line 16) | class LMDBCursor : public Cursor {
method LMDBCursor (line 18) | explicit LMDBCursor(MDB_txn* mdb_txn, MDB_cursor* mdb_cursor)
method SeekToFirst (line 26) | virtual void SeekToFirst() { Seek(MDB_FIRST); }
method Next (line 27) | virtual void Next() { Seek(MDB_NEXT); }
method string (line 28) | virtual string key() {
method string (line 31) | virtual string value() {
method valid (line 35) | virtual bool valid() { return valid_; }
method Seek (line 38) | void Seek(MDB_cursor_op op) {
class LMDBTransaction (line 54) | class LMDBTransaction : public Transaction {
method LMDBTransaction (line 56) | explicit LMDBTransaction(MDB_dbi* mdb_dbi, MDB_txn* mdb_txn)
method Commit (line 59) | virtual void Commit() { MDB_CHECK(mdb_txn_commit(mdb_txn_)); }
class LMDB (line 68) | class LMDB : public DB {
method LMDB (line 70) | LMDB() : mdb_env_(NULL) { }
method Close (line 73) | virtual void Close() {
FILE: include/caffe/util/device_alternate.hpp
type caffe (line 78) | namespace caffe {
function CAFFE_GET_BLOCKS (line 94) | inline int CAFFE_GET_BLOCKS(const int N) {
FILE: include/caffe/util/hdf5.hpp
type caffe (line 11) | namespace caffe {
FILE: include/caffe/util/im2col.hpp
type caffe (line 4) | namespace caffe {
FILE: include/caffe/util/insert_splits.hpp
type caffe (line 8) | namespace caffe {
FILE: include/caffe/util/io.hpp
type caffe (line 13) | namespace caffe {
function MakeTempFilename (line 17) | inline void MakeTempFilename(string* temp_filename) {
function MakeTempDir (line 30) | inline void MakeTempDir(string* temp_dirname) {
function ReadProtoFromTextFile (line 45) | inline bool ReadProtoFromTextFile(const string& filename, Message* pro...
function ReadProtoFromTextFileOrDie (line 49) | inline void ReadProtoFromTextFileOrDie(const char* filename, Message* ...
function ReadProtoFromTextFileOrDie (line 53) | inline void ReadProtoFromTextFileOrDie(const string& filename, Message...
function WriteProtoToTextFile (line 58) | inline void WriteProtoToTextFile(const Message& proto, const string& f...
function ReadProtoFromBinaryFile (line 64) | inline bool ReadProtoFromBinaryFile(const string& filename, Message* p...
function ReadProtoFromBinaryFileOrDie (line 68) | inline void ReadProtoFromBinaryFileOrDie(const char* filename, Message...
function ReadProtoFromBinaryFileOrDie (line 72) | inline void ReadProtoFromBinaryFileOrDie(const string& filename,
function WriteProtoToBinaryFile (line 79) | inline void WriteProtoToBinaryFile(
function ReadFileToDatum (line 86) | inline bool ReadFileToDatum(const string& filename, Datum* datum) {
function ReadImageToDatum (line 94) | inline bool ReadImageToDatum(const string& filename, const int label,
function ReadImageToDatum (line 100) | inline bool ReadImageToDatum(const string& filename, const int label,
function ReadImageToDatum (line 105) | inline bool ReadImageToDatum(const string& filename, const int label,
function ReadImageToDatum (line 110) | inline bool ReadImageToDatum(const string& filename, const int label,
function ReadImageToDatum (line 115) | inline bool ReadImageToDatum(const string& filename, const int label,
FILE: include/caffe/util/math_functions.hpp
type caffe (line 13) | namespace caffe {
function caffe_memset (line 42) | inline void caffe_memset(const size_t N, const int alpha, void* X) {
function caffe_sign (line 114) | inline int8_t caffe_sign(Dtype val) {
function caffe_gpu_memset (line 177) | inline void caffe_gpu_memset(const size_t N, const int alpha, void* X) {
FILE: include/caffe/util/mkl_alternate.hpp
function cblas_saxpby (line 83) | inline void cblas_saxpby(const int N, const float alpha, const float* X,
function cblas_daxpby (line 89) | inline void cblas_daxpby(const int N, const double alpha, const double* X,
FILE: include/caffe/util/rng.hpp
type caffe (line 12) | namespace caffe {
function rng_t (line 16) | inline rng_t* caffe_rng() {
function shuffle (line 22) | inline void shuffle(RandomAccessIterator begin, RandomAccessIterator end,
function shuffle (line 38) | inline void shuffle(RandomAccessIterator begin, RandomAccessIterator e...
FILE: include/caffe/util/upgrade_proto.hpp
type caffe (line 8) | namespace caffe {
FILE: include/caffe/vision_layers.hpp
type caffe (line 17) | namespace caffe {
class BaseConvolutionLayer (line 24) | class BaseConvolutionLayer : public Layer<Dtype> {
method BaseConvolutionLayer (line 26) | explicit BaseConvolutionLayer(const LayerParameter& param)
method MinBottomBlobs (line 33) | virtual inline int MinBottomBlobs() const { return 1; }
method MinTopBlobs (line 34) | virtual inline int MinTopBlobs() const { return 1; }
method EqualNumBottomTopBlobs (line 35) | virtual inline bool EqualNumBottomTopBlobs() const { return true; }
method conv_im2col_cpu (line 81) | inline void conv_im2col_cpu(const Dtype* data, Dtype* col_buff) {
method conv_col2im_cpu (line 85) | inline void conv_col2im_cpu(const Dtype* col_buff, Dtype* data) {
method conv_im2col_gpu (line 90) | inline void conv_im2col_gpu(const Dtype* data, Dtype* col_buff) {
method conv_col2im_gpu (line 94) | inline void conv_col2im_gpu(const Dtype* col_buff, Dtype* data) {
class ConvolutionLayer (line 131) | class ConvolutionLayer : public BaseConvolutionLayer<Dtype> {
method ConvolutionLayer (line 161) | explicit ConvolutionLayer(const LayerParameter& param)
method reverse_dimensions (line 175) | virtual inline bool reverse_dimensions() { return false; }
class CConvolutionLayer (line 185) | class CConvolutionLayer : public BaseConvolutionLayer<Dtype> {
method CConvolutionLayer (line 188) | explicit CConvolutionLayer(const LayerParameter& param)
method reverse_dimensions (line 203) | virtual inline bool reverse_dimensions() { return false; }
class DeconvolutionLayer (line 234) | class DeconvolutionLayer : public BaseConvolutionLayer<Dtype> {
method DeconvolutionLayer (line 236) | explicit DeconvolutionLayer(const LayerParameter& param)
method reverse_dimensions (line 250) | virtual inline bool reverse_dimensions() { return true; }
class CuDNNConvolutionLayer (line 270) | class CuDNNConvolutionLayer : public ConvolutionLayer<Dtype> {
method CuDNNConvolutionLayer (line 272) | explicit CuDNNConvolutionLayer(const LayerParameter& param)
class Im2colLayer (line 307) | class Im2colLayer : public Layer<Dtype> {
method Im2colLayer (line 309) | explicit Im2colLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 317) | virtual inline int ExactNumBottomBlobs() const { return 1; }
method ExactNumTopBlobs (line 318) | virtual inline int ExactNumTopBlobs() const { return 1; }
class PoolingLayer (line 338) | class PoolingLayer
method PoolingLayer (line 425) | explicit PoolingLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 433) | virtual inline int ExactNumBottomBlobs() const { return 1; }
method MinTopBlobs (line 434) | virtual inline int MinTopBlobs() const { return 1; }
method MaxTopBlobs (line 437) | virtual inline int MaxTopBlobs() const {
class SplitLayer (line 339) | class SplitLayer
class LRNLayer (line 347) | class LRNLayer : public Layer<Dtype> {
method LRNLayer (line 349) | explicit LRNLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 357) | virtual inline int ExactNumBottomBlobs() const { return 1; }
method ExactNumTopBlobs (line 358) | virtual inline int ExactNumTopBlobs() const { return 1; }
class PoolingLayer (line 423) | class PoolingLayer : public Layer<Dtype> {
method PoolingLayer (line 425) | explicit PoolingLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 433) | virtual inline int ExactNumBottomBlobs() const { return 1; }
method MinTopBlobs (line 434) | virtual inline int MinTopBlobs() const { return 1; }
method MaxTopBlobs (line 437) | virtual inline int MaxTopBlobs() const {
class CuDNNPoolingLayer (line 469) | class CuDNNPoolingLayer : public PoolingLayer<Dtype> {
method CuDNNPoolingLayer (line 471) | explicit CuDNNPoolingLayer(const LayerParameter& param)
method MinTopBlobs (line 479) | virtual inline int MinTopBlobs() const { return -1; }
method ExactNumTopBlobs (line 480) | virtual inline int ExactNumTopBlobs() const { return 1; }
class SPPLayer (line 503) | class SPPLayer : public Layer<Dtype> {
method SPPLayer (line 505) | explicit SPPLayer(const LayerParameter& param)
method ExactNumBottomBlobs (line 513) | virtual inline int ExactNumBottomBlobs() const { return 1; }
method MinTopBlobs (line 514) | virtual inline int MinTopBlobs() const { return 1; }
method MaxTopBlobs (line 517) | virtual inline int MaxTopBlobs() const {
FILE: src/caffe/blob.cpp
type caffe (line 9) | namespace caffe {
function Dtype (line 71) | const Dtype* Blob<Dtype>::cpu_data() const {
function Dtype (line 83) | const Dtype* Blob<Dtype>::gpu_data() const {
function Dtype (line 89) | const Dtype* Blob<Dtype>::cpu_diff() const {
function Dtype (line 95) | const Dtype* Blob<Dtype>::gpu_diff() const {
function Dtype (line 101) | Dtype* Blob<Dtype>::mutable_cpu_data() {
function Dtype (line 107) | Dtype* Blob<Dtype>::mutable_gpu_data() {
function Dtype (line 113) | Dtype* Blob<Dtype>::mutable_cpu_diff() {
function Dtype (line 119) | Dtype* Blob<Dtype>::mutable_gpu_diff() {
function Dtype (line 179) | Dtype Blob<Dtype>::asum_data() const {
function Dtype (line 214) | Dtype Blob<Dtype>::asum_diff() const {
function Dtype (line 249) | Dtype Blob<Dtype>::sumsq_data() const {
function Dtype (line 286) | Dtype Blob<Dtype>::sumsq_diff() const {
class Blob<int> (line 526) | class Blob<int>
class Blob<unsigned int> (line 527) | class Blob<unsigned int>
FILE: src/caffe/common.cpp
type caffe (line 9) | namespace caffe {
function Caffe (line 14) | Caffe& Caffe::Get() {
function cluster_seedgen (line 22) | int64_t cluster_seedgen(void) {
function GlobalInit (line 42) | void GlobalInit(int* pargc, char*** pargv) {
class Caffe::RNG::Generator (line 73) | class Caffe::RNG::Generator {
method Generator (line 75) | Generator() : rng_(new caffe::rng_t(cluster_seedgen())) {}
method Generator (line 76) | explicit Generator(unsigned int seed) : rng_(new caffe::rng_t(seed)) {}
method Generator (line 197) | Generator() : rng_(new caffe::rng_t(cluster_seedgen())) {}
method Generator (line 198) | explicit Generator(unsigned int seed) : rng_(new caffe::rng_t(seed)) {}
class Caffe::RNG::Generator (line 195) | class Caffe::RNG::Generator {
method Generator (line 75) | Generator() : rng_(new caffe::rng_t(cluster_seedgen())) {}
method Generator (line 76) | explicit Generator(unsigned int seed) : rng_(new caffe::rng_t(seed)) {}
method Generator (line 197) | Generator() : rng_(new caffe::rng_t(cluster_seedgen())) {}
method Generator (line 198) | explicit Generator(unsigned int seed) : rng_(new caffe::rng_t(seed)) {}
FILE: src/caffe/data_reader.cpp
type caffe (line 11) | namespace caffe {
FILE: src/caffe/data_transformer.cpp
type caffe (line 11) | namespace caffe {
FILE: src/caffe/internal_thread.cpp
type caffe (line 7) | namespace caffe {
FILE: src/caffe/layer.cpp
type caffe (line 4) | namespace caffe {
FILE: src/caffe/layer_factory.cpp
type caffe (line 17) | namespace caffe {
function GetConvolutionLayer (line 21) | shared_ptr<Layer<Dtype> > GetConvolutionLayer(
function GetCConvolutionLayer (line 45) | shared_ptr<Layer<Dtype> > GetCConvolutionLayer(
function GetPoolingLayer (line 71) | shared_ptr<Layer<Dtype> > GetPoolingLayer(const LayerParameter& param) {
function GetReLULayer (line 101) | shared_ptr<Layer<Dtype> > GetReLULayer(const LayerParameter& param) {
function GetSigmoidLayer (line 124) | shared_ptr<Layer<Dtype> > GetSigmoidLayer(const LayerParameter& param) {
function GetSoftmaxLayer (line 147) | shared_ptr<Layer<Dtype> > GetSoftmaxLayer(const LayerParameter& param) {
function GetTanHLayer (line 170) | shared_ptr<Layer<Dtype> > GetTanHLayer(const LayerParameter& param) {
function GetPythonLayer (line 193) | shared_ptr<Layer<Dtype> > GetPythonLayer(const LayerParameter& param) {
FILE: src/caffe/layers/absval_layer.cpp
type caffe (line 7) | namespace caffe {
FILE: src/caffe/layers/accuracy_layer.cpp
type caffe (line 11) | namespace caffe {
FILE: src/caffe/layers/argmax_layer.cpp
type caffe (line 9) | namespace caffe {
FILE: src/caffe/layers/base_conv_layer.cpp
type caffe (line 9) | namespace caffe {
FILE: src/caffe/layers/base_data_layer.cpp
type caffe (line 9) | namespace caffe {
FILE: src/caffe/layers/bnll_layer.cpp
type caffe (line 7) | namespace caffe {
FILE: src/caffe/layers/compress_conv_layer.cpp
type caffe (line 11) | namespace caffe {
FILE: src/caffe/layers/compress_inner_product_layer.cpp
type caffe (line 12) | namespace caffe {
FILE: src/caffe/layers/concat_layer.cpp
type caffe (line 7) | namespace caffe {
FILE: src/caffe/layers/contrastive_loss_layer.cpp
type caffe (line 9) | namespace caffe {
FILE: src/caffe/layers/conv_layer.cpp
type caffe (line 9) | namespace caffe {
FILE: src/caffe/layers/cudnn_conv_layer.cpp
type caffe (line 10) | namespace caffe {
FILE: src/caffe/layers/cudnn_pooling_layer.cpp
type caffe (line 10) | namespace caffe {
FILE: src/caffe/layers/cudnn_relu_layer.cpp
type caffe (line 8) | namespace caffe {
FILE: src/caffe/layers/cudnn_sigmoid_layer.cpp
type caffe (line 8) | namespace caffe {
FILE: src/caffe/layers/cudnn_softmax_layer.cpp
type caffe (line 12) | namespace caffe {
FILE: src/caffe/layers/cudnn_tanh_layer.cpp
type caffe (line 8) | namespace caffe {
FILE: src/caffe/layers/data_layer.cpp
type caffe (line 15) | namespace caffe {
FILE: src/caffe/layers/deconv_layer.cpp
type caffe (line 9) | namespace caffe {
FILE: src/caffe/layers/dropout_layer.cpp
type caffe (line 11) | namespace caffe {
FILE: src/caffe/layers/dummy_data_layer.cpp
type caffe (line 7) | namespace caffe {
FILE: src/caffe/layers/eltwise_layer.cpp
type caffe (line 8) | namespace caffe {
FILE: src/caffe/layers/euclidean_loss_layer.cpp
type caffe (line 8) | namespace caffe {
FILE: src/caffe/layers/exp_layer.cpp
type caffe (line 8) | namespace caffe {
FILE: src/caffe/layers/filter_layer.cpp
type caffe (line 8) | namespace caffe {
FILE: src/caffe/layers/flatten_layer.cpp
type caffe (line 7) | namespace caffe {
FILE: src/caffe/layers/hdf5_data_layer.cpp
type caffe (line 21) | namespace caffe {
FILE: src/caffe/layers/hdf5_output_layer.cpp
type caffe (line 12) | namespace caffe {
FILE: src/caffe/layers/hinge_loss_layer.cpp
type caffe (line 11) | namespace caffe {
FILE: src/caffe/layers/im2col_layer.cpp
type caffe (line 8) | namespace caffe {
FILE: src/caffe/layers/image_data_layer.cpp
type caffe (line 16) | namespace caffe {
FILE: src/caffe/layers/infogain_loss_layer.cpp
type caffe (line 11) | namespace caffe {
FILE: src/caffe/layers/inner_product_layer.cpp
type caffe (line 10) | namespace caffe {
FILE: src/caffe/layers/loss_layer.cpp
type caffe (line 11) | namespace caffe {
FILE: src/caffe/layers/lrn_layer.cpp
type caffe (line 7) | namespace caffe {
FILE: src/caffe/layers/memory_data_layer.cpp
type caffe (line 9) | namespace caffe {
FILE: src/caffe/layers/multinomial_logistic_loss_layer.cpp
type caffe (line 11) | namespace caffe {
FILE: src/caffe/layers/mvn_layer.cpp
type caffe (line 8) | namespace caffe {
FILE: src/caffe/layers/neuron_layer.cpp
type caffe (line 6) | namespace caffe {
FILE: src/caffe/layers/pooling_layer.cpp
type caffe (line 11) | namespace caffe {
FILE: src/caffe/layers/power_layer.cpp
type caffe (line 8) | namespace caffe {
FILE: src/caffe/layers/prelu_layer.cpp
type caffe (line 8) | namespace caffe {
FILE: src/caffe/layers/reduction_layer.cpp
type caffe (line 9) | namespace caffe {
FILE: src/caffe/layers/relu_layer.cpp
type caffe (line 7) | namespace caffe {
FILE: src/caffe/layers/reshape_layer.cpp
type caffe (line 6) | namespace caffe {
FILE: src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp
type caffe (line 9) | namespace caffe {
FILE: src/caffe/layers/sigmoid_layer.cpp
type caffe (line 8) | namespace caffe {
function Dtype (line 11) | inline Dtype sigmoid(Dtype x) {
FILE: src/caffe/layers/silence_layer.cpp
type caffe (line 7) | namespace caffe {
FILE: src/caffe/layers/slice_layer.cpp
type caffe (line 8) | namespace caffe {
FILE: src/caffe/layers/softmax_layer.cpp
type caffe (line 8) | namespace caffe {
FILE: src/caffe/layers/softmax_loss_layer.cpp
type caffe (line 10) | namespace caffe {
FILE: src/caffe/layers/split_layer.cpp
type caffe (line 7) | namespace caffe {
FILE: src/caffe/layers/spp_layer.cpp
type caffe (line 11) | namespace caffe {
function LayerParameter (line 17) | LayerParameter SPPLayer<Dtype>::GetPoolingParam(const int pyramid_level,
FILE: src/caffe/layers/tanh_layer.cpp
type caffe (line 10) | namespace caffe {
FILE: src/caffe/layers/threshold_layer.cpp
type caffe (line 7) | namespace caffe {
FILE: src/caffe/layers/window_data_layer.cpp
type caffe (line 26) | namespace caffe {
FILE: src/caffe/net.cpp
type caffe (line 22) | namespace caffe {
function Dtype (line 588) | Dtype Net<Dtype>::ForwardFromTo(int start, int end) {
function Dtype (line 607) | Dtype Net<Dtype>::ForwardFrom(int start) {
function Dtype (line 612) | Dtype Net<Dtype>::ForwardTo(int end) {
function string (line 637) | string Net<Dtype>::Forward(const string& input_blob_protos, Dtype* los...
FILE: src/caffe/parallel.cpp
type caffe (line 19) | namespace caffe {
type Op (line 21) | enum Op {
function apply_buffers (line 30) | static void apply_buffers(const vector<Blob<Dtype>*>& blobs,
function total_size (line 64) | static size_t total_size(const vector<Blob<Dtype>*>& params) {
FILE: src/caffe/solver.cpp
type caffe (line 18) | namespace caffe {
function string (line 388) | string Solver<Dtype>::SnapshotFilename(const string extension) {
function string (line 397) | string Solver<Dtype>::SnapshotToBinaryProto() {
function string (line 407) | string Solver<Dtype>::SnapshotToHDF5() {
function Dtype (line 442) | Dtype SGDSolver<Dtype>::GetLearningRate() {
FILE: src/caffe/syncedmem.cpp
type caffe (line 7) | namespace caffe {
FILE: src/caffe/test/test_accuracy_layer.cpp
type caffe (line 16) | namespace caffe {
class AccuracyLayerTest (line 19) | class AccuracyLayerTest : public CPUDeviceTest<Dtype> {
method AccuracyLayerTest (line 21) | AccuracyLayerTest()
method FillBottoms (line 39) | virtual void FillBottoms() {
function TYPED_TEST (line 70) | TYPED_TEST(AccuracyLayerTest, TestSetup) {
function TYPED_TEST (line 80) | TYPED_TEST(AccuracyLayerTest, TestSetupTopK) {
function TYPED_TEST (line 93) | TYPED_TEST(AccuracyLayerTest, TestForwardCPU) {
function TYPED_TEST (line 119) | TYPED_TEST(AccuracyLayerTest, TestForwardWithSpatialAxes) {
function TYPED_TEST (line 162) | TYPED_TEST(AccuracyLayerTest, TestForwardIgnoreLabel) {
function TYPED_TEST (line 200) | TYPED_TEST(AccuracyLayerTest, TestForwardCPUTopK) {
FILE: src/caffe/test/test_argmax_layer.cpp
type caffe (line 13) | namespace caffe {
class ArgMaxLayerTest (line 16) | class ArgMaxLayerTest : public CPUDeviceTest<Dtype> {
method ArgMaxLayerTest (line 18) | ArgMaxLayerTest()
function TYPED_TEST (line 40) | TYPED_TEST(ArgMaxLayerTest, TestSetup) {
function TYPED_TEST (line 48) | TYPED_TEST(ArgMaxLayerTest, TestSetupMaxVal) {
function TYPED_TEST (line 58) | TYPED_TEST(ArgMaxLayerTest, TestCPU) {
function TYPED_TEST (line 81) | TYPED_TEST(ArgMaxLayerTest, TestCPUMaxVal) {
function TYPED_TEST (line 107) | TYPED_TEST(ArgMaxLayerTest, TestCPUTopK) {
function TYPED_TEST (line 136) | TYPED_TEST(ArgMaxLayerTest, TestCPUMaxValTopK) {
FILE: src/caffe/test/test_benchmark.cpp
type caffe (line 10) | namespace caffe {
class BenchmarkTest (line 15) | class BenchmarkTest : public MultiDeviceTest<TypeParam> {}
function TYPED_TEST (line 19) | TYPED_TEST(BenchmarkTest, TestTimerConstructor) {
function TYPED_TEST (line 26) | TYPED_TEST(BenchmarkTest, TestTimerStart) {
function TYPED_TEST (line 43) | TYPED_TEST(BenchmarkTest, TestTimerStop) {
function TYPED_TEST (line 60) | TYPED_TEST(BenchmarkTest, TestTimerMilliSeconds) {
function TYPED_TEST (line 75) | TYPED_TEST(BenchmarkTest, TestTimerSeconds) {
FILE: src/caffe/test/test_blob.cpp
type caffe (line 12) | namespace caffe {
class BlobSimpleTest (line 15) | class BlobSimpleTest : public ::testing::Test {
method BlobSimpleTest (line 17) | BlobSimpleTest()
function TYPED_TEST (line 27) | TYPED_TEST(BlobSimpleTest, TestInitialization) {
function TYPED_TEST (line 39) | TYPED_TEST(BlobSimpleTest, TestPointersCPUGPU) {
function TYPED_TEST (line 46) | TYPED_TEST(BlobSimpleTest, TestReshape) {
function TYPED_TEST (line 55) | TYPED_TEST(BlobSimpleTest, TestLegacyBlobProtoShapeEquals) {
class BlobMathTest (line 109) | class BlobMathTest : public MultiDeviceTest<TypeParam> {
method BlobMathTest (line 112) | BlobMathTest()
function TYPED_TEST (line 123) | TYPED_TEST(BlobMathTest, TestSumOfSquares) {
function TYPED_TEST (line 178) | TYPED_TEST(BlobMathTest, TestAsum) {
function TYPED_TEST (line 232) | TYPED_TEST(BlobMathTest, TestScaleData) {
FILE: src/caffe/test/test_caffe_main.cpp
type caffe (line 7) | namespace caffe {
function main (line 17) | int main(int argc, char** argv) {
FILE: src/caffe/test/test_common.cpp
type caffe (line 11) | namespace caffe {
class CommonTest (line 13) | class CommonTest : public ::testing::Test {}
function TEST_F (line 17) | TEST_F(CommonTest, TestCublasHandlerGPU) {
function TEST_F (line 25) | TEST_F(CommonTest, TestBrewMode) {
function TEST_F (line 32) | TEST_F(CommonTest, TestRandSeedCPU) {
function TEST_F (line 49) | TEST_F(CommonTest, TestRandSeedGPU) {
FILE: src/caffe/test/test_concat_layer.cpp
type caffe (line 14) | namespace caffe {
class ConcatLayerTest (line 17) | class ConcatLayerTest : public MultiDeviceTest<TypeParam> {
method ConcatLayerTest (line 21) | ConcatLayerTest()
method SetUp (line 26) | virtual void SetUp() {
function TYPED_TEST (line 61) | TYPED_TEST(ConcatLayerTest, TestSetupNum) {
function TYPED_TEST (line 74) | TYPED_TEST(ConcatLayerTest, TestSetupChannels) {
function TYPED_TEST (line 86) | TYPED_TEST(ConcatLayerTest, TestSetupChannelsNegativeIndexing) {
function TYPED_TEST (line 102) | TYPED_TEST(ConcatLayerTest, TestForwardNum) {
function TYPED_TEST (line 131) | TYPED_TEST(ConcatLayerTest, TestForwardChannels) {
function TYPED_TEST (line 157) | TYPED_TEST(ConcatLayerTest, TestGradientNum) {
function TYPED_TEST (line 167) | TYPED_TEST(ConcatLayerTest, TestGradientChannels) {
FILE: src/caffe/test/test_contrastive_loss_layer.cpp
type caffe (line 17) | namespace caffe {
class ContrastiveLossLayerTest (line 20) | class ContrastiveLossLayerTest : public MultiDeviceTest<TypeParam> {
method ContrastiveLossLayerTest (line 24) | ContrastiveLossLayerTest()
function TYPED_TEST (line 61) | TYPED_TEST(ContrastiveLossLayerTest, TestForward) {
function TYPED_TEST (line 90) | TYPED_TEST(ContrastiveLossLayerTest, TestGradient) {
function TYPED_TEST (line 103) | TYPED_TEST(ContrastiveLossLayerTest, TestForwardLegacy) {
function TYPED_TEST (line 132) | TYPED_TEST(ContrastiveLossLayerTest, TestGradientLegacy) {
FILE: src/caffe/test/test_convolution_layer.cpp
type caffe (line 14) | namespace caffe {
function caffe_conv (line 19) | void caffe_conv(const Blob<Dtype>* in, ConvolutionParameter* conv_param,
class ConvolutionLayerTest (line 104) | class ConvolutionLayerTest : public MultiDeviceTest<TypeParam> {
method ConvolutionLayerTest (line 108) | ConvolutionLayerTest()
method SetUp (line 113) | virtual void SetUp() {
function TYPED_TEST (line 148) | TYPED_TEST(ConvolutionLayerTest, TestSetup) {
function TYPED_TEST (line 184) | TYPED_TEST(ConvolutionLayerTest, TestSimpleConvolution) {
function TYPED_TEST (line 220) | TYPED_TEST(ConvolutionLayerTest, Test1x1Convolution) {
function TYPED_TEST (line 247) | TYPED_TEST(ConvolutionLayerTest, TestSimpleConvolutionGroup) {
function TYPED_TEST (line 275) | TYPED_TEST(ConvolutionLayerTest, TestSobelConvolution) {
function TYPED_TEST (line 371) | TYPED_TEST(ConvolutionLayerTest, TestGradient) {
function TYPED_TEST (line 389) | TYPED_TEST(ConvolutionLayerTest, Test1x1Gradient) {
function TYPED_TEST (line 407) | TYPED_TEST(ConvolutionLayerTest, TestGradientGroup) {
class CuDNNConvolutionLayerTest (line 427) | class CuDNNConvolutionLayerTest : public GPUDeviceTest<Dtype> {
method CuDNNConvolutionLayerTest (line 429) | CuDNNConvolutionLayerTest()
method SetUp (line 434) | virtual void SetUp() {
function TYPED_TEST (line 469) | TYPED_TEST(CuDNNConvolutionLayerTest, TestSetupCuDNN) {
function TYPED_TEST (line 506) | TYPED_TEST(CuDNNConvolutionLayerTest, TestSimpleConvolutionCuDNN) {
function TYPED_TEST (line 541) | TYPED_TEST(CuDNNConvolutionLayerTest, TestSimpleConvolutionGroupCuDNN) {
function TYPED_TEST (line 568) | TYPED_TEST(CuDNNConvolutionLayerTest, TestSobelConvolutionCuDNN) {
function TYPED_TEST (line 664) | TYPED_TEST(CuDNNConvolutionLayerTest, TestGradientCuDNN) {
function TYPED_TEST (line 681) | TYPED_TEST(CuDNNConvolutionLayerTest, TestGradientGroupCuDNN) {
FILE: src/caffe/test/test_data_layer.cpp
type caffe (line 17) | namespace caffe {
class DataLayerTest (line 22) | class DataLayerTest : public MultiDeviceTest<TypeParam> {
method DataLayerTest (line 26) | DataLayerTest()
method SetUp (line 31) | virtual void SetUp() {
method Fill (line 42) | void Fill(const bool unique_pixels, DataParameter_DB backend) {
method TestRead (line 69) | void TestRead() {
method TestReshape (line 107) | void TestReshape(DataParameter_DB backend) {
method TestReadCrop (line 172) | void TestReadCrop(Phase phase) {
method TestReadCropTrainSequenceSeeded (line 226) | void TestReadCropTrainSequenceSeeded() {
method TestReadCropTrainSequenceUnseeded (line 281) | void TestReadCropTrainSequenceUnseeded() {
function TYPED_TEST (line 351) | TYPED_TEST(DataLayerTest, TestReadLevelDB) {
function TYPED_TEST (line 357) | TYPED_TEST(DataLayerTest, TestReshapeLevelDB) {
function TYPED_TEST (line 361) | TYPED_TEST(DataLayerTest, TestReadCropTrainLevelDB) {
function TYPED_TEST (line 369) | TYPED_TEST(DataLayerTest, TestReadCropTrainSequenceSeededLevelDB) {
function TYPED_TEST (line 377) | TYPED_TEST(DataLayerTest, TestReadCropTrainSequenceUnseededLevelDB) {
function TYPED_TEST (line 383) | TYPED_TEST(DataLayerTest, TestReadCropTestLevelDB) {
function TYPED_TEST (line 389) | TYPED_TEST(DataLayerTest, TestReadLMDB) {
function TYPED_TEST (line 395) | TYPED_TEST(DataLayerTest, TestReshapeLMDB) {
function TYPED_TEST (line 399) | TYPED_TEST(DataLayerTest, TestReadCropTrainLMDB) {
function TYPED_TEST (line 407) | TYPED_TEST(DataLayerTest, TestReadCropTrainSequenceSeededLMDB) {
function TYPED_TEST (line 415) | TYPED_TEST(DataLayerTest, TestReadCropTrainSequenceUnseededLMDB) {
function TYPED_TEST (line 421) | TYPED_TEST(DataLayerTest, TestReadCropTestLMDB) {
FILE: src/caffe/test/test_data_transformer.cpp
type caffe (line 16) | namespace caffe {
function FillDatum (line 18) | void FillDatum(const int label, const int channels, const int height,
class DataTransformTest (line 33) | class DataTransformTest : public ::testing::Test {
method DataTransformTest (line 35) | DataTransformTest()
method NumSequenceMatches (line 39) | int NumSequenceMatches(const TransformationParameter transform_param,
function TYPED_TEST (line 83) | TYPED_TEST(DataTransformTest, TestEmptyTransform) {
function TYPED_TEST (line 107) | TYPED_TEST(DataTransformTest, TestEmptyTransformUniquePixels) {
function TYPED_TEST (line 131) | TYPED_TEST(DataTransformTest, TestCropSize) {
function TYPED_TEST (line 160) | TYPED_TEST(DataTransformTest, TestCropTrain) {
function TYPED_TEST (line 177) | TYPED_TEST(DataTransformTest, TestCropTest) {
function TYPED_TEST (line 194) | TYPED_TEST(DataTransformTest, TestMirrorTrain) {
function TYPED_TEST (line 210) | TYPED_TEST(DataTransformTest, TestMirrorTest) {
function TYPED_TEST (line 226) | TYPED_TEST(DataTransformTest, TestCropMirrorTrain) {
function TYPED_TEST (line 248) | TYPED_TEST(DataTransformTest, TestCropMirrorTest) {
function TYPED_TEST (line 270) | TYPED_TEST(DataTransformTest, TestMeanValue) {
function TYPED_TEST (line 292) | TYPED_TEST(DataTransformTest, TestMeanValues) {
function TYPED_TEST (line 317) | TYPED_TEST(DataTransformTest, TestMeanFile) {
FILE: src/caffe/test/test_db.cpp
type caffe (line 13) | namespace caffe {
class DBTest (line 18) | class DBTest : public ::testing::Test {
method DBTest (line 20) | DBTest()
method SetUp (line 24) | virtual void SetUp() {
type TypeLevelDB (line 49) | struct TypeLevelDB {
type TypeLMDB (line 54) | struct TypeLMDB {
function TYPED_TEST (line 64) | TYPED_TEST(DBTest, TestGetDB) {
function TYPED_TEST (line 68) | TYPED_TEST(DBTest, TestNext) {
function TYPED_TEST (line 79) | TYPED_TEST(DBTest, TestSeekToFirst) {
function TYPED_TEST (line 95) | TYPED_TEST(DBTest, TestKeyValue) {
function TYPED_TEST (line 119) | TYPED_TEST(DBTest, TestWrite) {
FILE: src/caffe/test/test_deconvolution_layer.cpp
type caffe (line 14) | namespace caffe {
class DeconvolutionLayerTest (line 19) | class DeconvolutionLayerTest : public MultiDeviceTest<TypeParam> {
method DeconvolutionLayerTest (line 23) | DeconvolutionLayerTest()
method SetUp (line 28) | virtual void SetUp() {
function TYPED_TEST (line 56) | TYPED_TEST(DeconvolutionLayerTest, TestSetup) {
function TYPED_TEST (line 92) | TYPED_TEST(DeconvolutionLayerTest, TestSimpleDeconvolution) {
function TYPED_TEST (line 140) | TYPED_TEST(DeconvolutionLayerTest, TestGradient) {
FILE: src/caffe/test/test_dummy_data_layer.cpp
type caffe (line 13) | namespace caffe {
class DummyDataLayerTest (line 16) | class DummyDataLayerTest : public CPUDeviceTest<Dtype> {
method DummyDataLayerTest (line 18) | DummyDataLayerTest()
method SetUp (line 23) | virtual void SetUp() {
function TYPED_TEST (line 46) | TYPED_TEST(DummyDataLayerTest, TestOneTopConstant) {
function TYPED_TEST (line 75) | TYPED_TEST(DummyDataLayerTest, TestTwoTopConstant) {
function TYPED_TEST (line 113) | TYPED_TEST(DummyDataLayerTest, TestThreeTopConstantGaussianConstant) {
FILE: src/caffe/test/test_eltwise_layer.cpp
type caffe (line 14) | namespace caffe {
class EltwiseLayerTest (line 17) | class EltwiseLayerTest : public MultiDeviceTest<TypeParam> {
method EltwiseLayerTest (line 21) | EltwiseLayerTest()
function TYPED_TEST (line 54) | TYPED_TEST(EltwiseLayerTest, TestSetUp) {
function TYPED_TEST (line 68) | TYPED_TEST(EltwiseLayerTest, TestProd) {
function TYPED_TEST (line 87) | TYPED_TEST(EltwiseLayerTest, TestSum) {
function TYPED_TEST (line 106) | TYPED_TEST(EltwiseLayerTest, TestSumCoeff) {
function TYPED_TEST (line 129) | TYPED_TEST(EltwiseLayerTest, TestStableProdGradient) {
function TYPED_TEST (line 141) | TYPED_TEST(EltwiseLayerTest, TestUnstableProdGradient) {
function TYPED_TEST (line 153) | TYPED_TEST(EltwiseLayerTest, TestSumGradient) {
function TYPED_TEST (line 164) | TYPED_TEST(EltwiseLayerTest, TestSumCoeffGradient) {
function TYPED_TEST (line 178) | TYPED_TEST(EltwiseLayerTest, TestMax) {
function TYPED_TEST (line 198) | TYPED_TEST(EltwiseLayerTest, TestMaxGradient) {
FILE: src/caffe/test/test_euclidean_loss_layer.cpp
type caffe (line 16) | namespace caffe {
class EuclideanLossLayerTest (line 19) | class EuclideanLossLayerTest : public MultiDeviceTest<TypeParam> {
method EuclideanLossLayerTest (line 23) | EuclideanLossLayerTest()
method TestForward (line 42) | void TestForward() {
function TYPED_TEST (line 75) | TYPED_TEST(EuclideanLossLayerTest, TestForward) {
function TYPED_TEST (line 79) | TYPED_TEST(EuclideanLossLayerTest, TestGradient) {
FILE: src/caffe/test/test_filler.cpp
type caffe (line 9) | namespace caffe {
class ConstantFillerTest (line 12) | class ConstantFillerTest : public ::testing::Test {
method ConstantFillerTest (line 14) | ConstantFillerTest()
function TYPED_TEST (line 29) | TYPED_TEST(ConstantFillerTest, TestFill) {
class UniformFillerTest (line 40) | class UniformFillerTest : public ::testing::Test {
method UniformFillerTest (line 42) | UniformFillerTest()
function TYPED_TEST (line 58) | TYPED_TEST(UniformFillerTest, TestFill) {
class PositiveUnitballFillerTest (line 69) | class PositiveUnitballFillerTest : public ::testing::Test {
method PositiveUnitballFillerTest (line 71) | PositiveUnitballFillerTest()
function TYPED_TEST (line 85) | TYPED_TEST(PositiveUnitballFillerTest, TestFill) {
class GaussianFillerTest (line 106) | class GaussianFillerTest : public ::testing::Test {
method GaussianFillerTest (line 108) | GaussianFillerTest()
function TYPED_TEST (line 124) | TYPED_TEST(GaussianFillerTest, TestFill) {
class XavierFillerTest (line 146) | class XavierFillerTest : public ::testing::Test {
method XavierFillerTest (line 148) | XavierFillerTest()
method test_params (line 152) | virtual void test_params(FillerParameter_VarianceNorm variance_norm,
function TYPED_TEST (line 181) | TYPED_TEST(XavierFillerTest, TestFillFanIn) {
function TYPED_TEST (line 185) | TYPED_TEST(XavierFillerTest, TestFillFanOut) {
function TYPED_TEST (line 189) | TYPED_TEST(XavierFillerTest, TestFillAverage) {
class MSRAFillerTest (line 195) | class MSRAFillerTest : public ::testing::Test {
method MSRAFillerTest (line 197) | MSRAFillerTest()
method test_params (line 201) | virtual void test_params(FillerParameter_VarianceNorm variance_norm,
function TYPED_TEST (line 230) | TYPED_TEST(MSRAFillerTest, TestFillFanIn) {
function TYPED_TEST (line 234) | TYPED_TEST(MSRAFillerTest, TestFillFanOut) {
function TYPED_TEST (line 238) | TYPED_TEST(MSRAFillerTest, TestFillAverage) {
FILE: src/caffe/test/test_filter_layer.cpp
type caffe (line 15) | namespace caffe {
class FilterLayerTest (line 18) | class FilterLayerTest : public MultiDeviceTest<TypeParam> {
method FilterLayerTest (line 22) | FilterLayerTest()
method SetUp (line 28) | virtual void SetUp() {
function TYPED_TEST (line 69) | TYPED_TEST(FilterLayerTest, TestReshape) {
function TYPED_TEST (line 89) | TYPED_TEST(FilterLayerTest, TestForward) {
function TYPED_TEST (line 117) | TYPED_TEST(FilterLayerTest, TestGradient) {
FILE: src/caffe/test/test_flatten_layer.cpp
type caffe (line 14) | namespace caffe {
class FlattenLayerTest (line 17) | class FlattenLayerTest : public MultiDeviceTest<TypeParam> {
method FlattenLayerTest (line 20) | FlattenLayerTest()
function TYPED_TEST (line 40) | TYPED_TEST(FlattenLayerTest, TestSetup) {
function TYPED_TEST (line 50) | TYPED_TEST(FlattenLayerTest, TestSetupWithAxis) {
function TYPED_TEST (line 62) | TYPED_TEST(FlattenLayerTest, TestSetupWithEndAxis) {
function TYPED_TEST (line 74) | TYPED_TEST(FlattenLayerTest, TestSetupWithStartAndEndAxis) {
function TYPED_TEST (line 86) | TYPED_TEST(FlattenLayerTest, TestForward) {
function TYPED_TEST (line 100) | TYPED_TEST(FlattenLayerTest, TestGradient) {
FILE: src/caffe/test/test_gradient_based_solver.cpp
type caffe (line 20) | namespace caffe {
class GradientBasedSolverTest (line 23) | class GradientBasedSolverTest : public MultiDeviceTest<TypeParam> {
method GradientBasedSolverTest (line 27) | GradientBasedSolverTest() :
method InitSolverFromProtoString (line 53) | virtual void InitSolverFromProtoString(const string& proto) {
method string (line 71) | string RunLeastSquaresSolver(const Dtype learning_rate,
method ComputeLeastSquaresUpdate (line 226) | void ComputeLeastSquaresUpdate(const Dtype learning_rate,
method CheckLeastSquaresUpdate (line 363) | void CheckLeastSquaresUpdate(
method CheckAccumulation (line 413) | void CheckAccumulation(const Dtype kLearningRate, const Dtype kWeigh...
method TestLeastSquaresUpdate (line 467) | void TestLeastSquaresUpdate(const Dtype learning_rate = 1.0,
method TestSnapshot (line 504) | void TestSnapshot(const Dtype learning_rate = 1.0,
class SGDSolverTest (line 577) | class SGDSolverTest : public GradientBasedSolverTest<TypeParam> {
method InitSolver (line 581) | virtual void InitSolver(const SolverParameter& param) {
method SolverParameter_SolverType (line 585) | virtual SolverParameter_SolverType solver_type() {
function TYPED_TEST (line 592) | TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdate) {
function TYPED_TEST (line 596) | TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateLROneHundredth) {
function TYPED_TEST (line 602) | TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithWeightDecay) {
function TYPED_TEST (line 613) | TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithWeightDecayMultiIt...
function TYPED_TEST (line 624) | TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithMomentum) {
function TYPED_TEST (line 635) | TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithMomentumMultiIter) {
function TYPED_TEST (line 646) | TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverything) {
function TYPED_TEST (line 657) | TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverythingShare) {
function TYPED_TEST (line 669) | TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverythingAccum) {
function TYPED_TEST (line 680) | TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverythingAccumSha...
function TYPED_TEST (line 692) | TYPED_TEST(SGDSolverTest, TestSnapshot) {
function TYPED_TEST (line 703) | TYPED_TEST(SGDSolverTest, TestSnapshotShare) {
class AdaGradSolverTest (line 717) | class AdaGradSolverTest : public GradientBasedSolverTest<TypeParam> {
method InitSolver (line 721) | virtual void InitSolver(const SolverParameter& param) {
method SolverParameter_SolverType (line 724) | virtual SolverParameter_SolverType solver_type() {
function TYPED_TEST (line 731) | TYPED_TEST(AdaGradSolverTest, TestAdaGradLeastSquaresUpdate) {
function TYPED_TEST (line 735) | TYPED_TEST(AdaGradSolverTest, TestAdaGradLeastSquaresUpdateLROneHundre...
function TYPED_TEST (line 741) | TYPED_TEST(AdaGradSolverTest, TestAdaGradLeastSquaresUpdateWithWeightD...
function TYPED_TEST (line 748) | TYPED_TEST(AdaGradSolverTest, TestAdaGradLeastSquaresUpdateWithEveryth...
function TYPED_TEST (line 759) | TYPED_TEST(AdaGradSolverTest,
function TYPED_TEST (line 772) | TYPED_TEST(AdaGradSolverTest, TestLeastSquaresUpdateWithEverythingAccu...
function TYPED_TEST (line 783) | TYPED_TEST(AdaGradSolverTest, TestLeastSquaresUpdateWithEverythingAccu...
function TYPED_TEST (line 795) | TYPED_TEST(AdaGradSolverTest, TestSnapshot) {
function TYPED_TEST (line 806) | TYPED_TEST(AdaGradSolverTest, TestSnapshotShare) {
class NesterovSolverTest (line 820) | class NesterovSolverTest : public GradientBasedSolverTest<TypeParam> {
method InitSolver (line 824) | virtual void InitSolver(const SolverParameter& param) {
method SolverParameter_SolverType (line 827) | virtual SolverParameter_SolverType solver_type() {
function TYPED_TEST (line 834) | TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdate) {
function TYPED_TEST (line 838) | TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdateLROneHund...
function TYPED_TEST (line 844) | TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdateWithWeigh...
function TYPED_TEST (line 851) | TYPED_TEST(NesterovSolverTest,
function TYPED_TEST (line 863) | TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdateWithMomen...
function TYPED_TEST (line 874) | TYPED_TEST(NesterovSolverTest, TestLeastSquaresUpdateWithMomentumMulti...
function TYPED_TEST (line 885) | TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdateWithEvery...
function TYPED_TEST (line 896) | TYPED_TEST(NesterovSolverTest,
function TYPED_TEST (line 909) | TYPED_TEST(NesterovSolverTest, TestLeastSquaresUpdateWithEverythingAcc...
function TYPED_TEST (line 920) | TYPED_TEST(NesterovSolverTest, TestLeastSquaresUpdateWithEverythingAcc...
function TYPED_TEST (line 932) | TYPED_TEST(NesterovSolverTest, TestSnapshot) {
function TYPED_TEST (line 943) | TYPED_TEST(NesterovSolverTest, TestSnapshotShare) {
class AdaDeltaSolverTest (line 956) | class AdaDeltaSolverTest : public GradientBasedSolverTest<TypeParam> {
method InitSolver (line 960) | virtual void InitSolver(const SolverParameter& param) {
method SolverParameter_SolverType (line 964) | virtual SolverParameter_SolverType solver_type() {
function TYPED_TEST (line 971) | TYPED_TEST(AdaDeltaSolverTest, TestAdaDeltaLeastSquaresUpdate) {
function TYPED_TEST (line 977) | TYPED_TEST(AdaDeltaSolverTest, TestAdaDeltaLeastSquaresUpdateWithWeigh...
function TYPED_TEST (line 985) | TYPED_TEST(AdaDeltaSolverTest, TestAdaDeltaLeastSquaresUpdateWithHalfM...
function TYPED_TEST (line 996) | TYPED_TEST(AdaDeltaSolverTest, TestAdaDeltaLeastSquaresUpdateWithMomen...
function TYPED_TEST (line 1007) | TYPED_TEST(AdaDeltaSolverTest, TestLeastSquaresUpdateWithMomentumMulti...
function TYPED_TEST (line 1018) | TYPED_TEST(AdaDeltaSolverTest, TestAdaDeltaLeastSquaresUpdateWithEvery...
function TYPED_TEST (line 1029) | TYPED_TEST(AdaDeltaSolverTest,
function TYPED_TEST (line 1042) | TYPED_TEST(AdaDeltaSolverTest, TestLeastSquaresUpdateWithEverythingAcc...
function TYPED_TEST (line 1053) | TYPED_TEST(AdaDeltaSolverTest, TestLeastSquaresUpdateWithEverythingAcc...
function TYPED_TEST (line 1065) | TYPED_TEST(AdaDeltaSolverTest, TestSnapshot) {
function TYPED_TEST (line 1076) | TYPED_TEST(AdaDeltaSolverTest, TestSnapshotShare) {
class AdamSolverTest (line 1089) | class AdamSolverTest : public GradientBasedSolverTest<TypeParam> {
method InitSolver (line 1093) | virtual void InitSolver(const SolverParameter& param) {
method SolverParameter_SolverType (line 1101) | virtual SolverParameter_SolverType solver_type() {
function TYPED_TEST (line 1108) | TYPED_TEST(AdamSolverTest, TestAdamLeastSquaresUpdate) {
function TYPED_TEST (line 1116) | TYPED_TEST(AdamSolverTest, TestAdamLeastSquaresUpdateWithWeightDecay) {
function TYPED_TEST (line 1124) | TYPED_TEST(AdamSolverTest, TestAdamLeastSquaresUpdateWithEverything) {
function TYPED_TEST (line 1135) | TYPED_TEST(AdamSolverTest, TestAdamLeastSquaresUpdateWithEverythingSha...
function TYPED_TEST (line 1147) | TYPED_TEST(AdamSolverTest, TestLeastSquaresUpdateWithEverythingAccum) {
function TYPED_TEST (line 1158) | TYPED_TEST(AdamSolverTest, TestLeastSquaresUpdateWithEverythingAccumSh...
function TYPED_TEST (line 1170) | TYPED_TEST(AdamSolverTest, TestSnapshot) {
function TYPED_TEST (line 1181) | TYPED_TEST(AdamSolverTest, TestSnapshotShare) {
class RMSPropSolverTest (line 1194) | class RMSPropSolverTest : public GradientBasedSolverTest<TypeParam> {
method InitSolver (line 1198) | virtual void InitSolver(const SolverParameter& param) {
method SolverParameter_SolverType (line 1204) | virtual SolverParameter_SolverType solver_type() {
function TYPED_TEST (line 1211) | TYPED_TEST(RMSPropSolverTest, TestRMSPropLeastSquaresUpdateWithWeightD...
function TYPED_TEST (line 1218) | TYPED_TEST(RMSPropSolverTest, TestRMSPropLeastSquaresUpdateWithRmsDeca...
function TYPED_TEST (line 1229) | TYPED_TEST(RMSPropSolverTest, TestRMSPropLeastSquaresUpdateWithEveryth...
function TYPED_TEST (line 1240) | TYPED_TEST(RMSPropSolverTest,
function TYPED_TEST (line 1253) | TYPED_TEST(RMSPropSolverTest, TestLeastSquaresUpdateWithEverythingAccu...
function TYPED_TEST (line 1264) | TYPED_TEST(RMSPropSolverTest, TestLeastSquaresUpdateWithEverythingAccu...
function TYPED_TEST (line 1276) | TYPED_TEST(RMSPropSolverTest, TestSnapshot) {
function TYPED_TEST (line 1287) | TYPED_TEST(RMSPropSolverTest, TestSnapshotShare) {
FILE: src/caffe/test/test_hdf5_output_layer.cpp
type caffe (line 15) | namespace caffe {
class HDF5OutputLayerTest (line 18) | class HDF5OutputLayerTest : public MultiDeviceTest<TypeParam> {
method HDF5OutputLayerTest (line 22) | HDF5OutputLayerTest()
function TYPED_TEST (line 73) | TYPED_TEST(HDF5OutputLayerTest, TestForward) {
FILE: src/caffe/test/test_hdf5data_layer.cpp
type caffe (line 14) | namespace caffe {
class HDF5DataLayerTest (line 17) | class HDF5DataLayerTest : public MultiDeviceTest<TypeParam> {
method HDF5DataLayerTest (line 21) | HDF5DataLayerTest()
method SetUp (line 26) | virtual void SetUp() {
function TYPED_TEST (line 54) | TYPED_TEST(HDF5DataLayerTest, TestRead) {
FILE: src/caffe/test/test_hinge_loss_layer.cpp
type caffe (line 16) | namespace caffe {
class HingeLossLayerTest (line 19) | class HingeLossLayerTest : public MultiDeviceTest<TypeParam> {
method HingeLossLayerTest (line 23) | HingeLossLayerTest()
function TYPED_TEST (line 55) | TYPED_TEST(HingeLossLayerTest, TestGradientL1) {
function TYPED_TEST (line 64) | TYPED_TEST(HingeLossLayerTest, TestGradientL2) {
FILE: src/caffe/test/test_im2col_layer.cpp
type caffe (line 14) | namespace caffe {
class Im2colLayerTest (line 17) | class Im2colLayerTest : public MultiDeviceTest<TypeParam> {
method Im2colLayerTest (line 20) | Im2colLayerTest()
function TYPED_TEST (line 39) | TYPED_TEST(Im2colLayerTest, TestSetup) {
function TYPED_TEST (line 54) | TYPED_TEST(Im2colLayerTest, TestForward) {
function TYPED_TEST (line 71) | TYPED_TEST(Im2colLayerTest, TestGradient) {
function TYPED_TEST (line 85) | TYPED_TEST(Im2colLayerTest, TestRect) {
function TYPED_TEST (line 104) | TYPED_TEST(Im2colLayerTest, TestRectGradient) {
FILE: src/caffe/test/test_image_data_layer.cpp
type caffe (line 16) | namespace caffe {
class ImageDataLayerTest (line 19) | class ImageDataLayerTest : public MultiDeviceTest<TypeParam> {
method ImageDataLayerTest (line 23) | ImageDataLayerTest()
method SetUp (line 27) | virtual void SetUp() {
function TYPED_TEST (line 64) | TYPED_TEST(ImageDataLayerTest, TestRead) {
function TYPED_TEST (line 90) | TYPED_TEST(ImageDataLayerTest, TestResize) {
function TYPED_TEST (line 118) | TYPED_TEST(ImageDataLayerTest, TestReshape) {
function TYPED_TEST (line 145) | TYPED_TEST(ImageDataLayerTest, TestShuffle) {
FILE: src/caffe/test/test_infogain_loss_layer.cpp
type caffe (line 16) | namespace caffe {
class InfogainLossLayerTest (line 19) | class InfogainLossLayerTest : public MultiDeviceTest<TypeParam> {
method InfogainLossLayerTest (line 23) | InfogainLossLayerTest()
function TYPED_TEST (line 61) | TYPED_TEST(InfogainLossLayerTest, TestGradient) {
FILE: src/caffe/test/test_inner_product_layer.cpp
type caffe (line 14) | namespace caffe {
class InnerProductLayerTest (line 21) | class InnerProductLayerTest : public MultiDeviceTest<TypeParam> {
method InnerProductLayerTest (line 24) | InnerProductLayerTest()
function TYPED_TEST (line 48) | TYPED_TEST(InnerProductLayerTest, TestSetUp) {
function TYPED_TEST (line 64) | TYPED_TEST(InnerProductLayerTest, TestForward) {
function TYPED_TEST (line 95) | TYPED_TEST(InnerProductLayerTest, TestForwardNoBatch) {
function TYPED_TEST (line 126) | TYPED_TEST(InnerProductLayerTest, TestGradient) {
FILE: src/caffe/test/test_internal_thread.cpp
type caffe (line 9) | namespace caffe {
class InternalThreadTest (line 12) | class InternalThreadTest : public ::testing::Test {}
function TEST_F (line 14) | TEST_F(InternalThreadTest, TestStartAndExit) {
class TestThreadA (line 23) | class TestThreadA : public InternalThread {
method InternalThreadEntry (line 24) | void InternalThreadEntry() {
class TestThreadB (line 29) | class TestThreadB : public InternalThread {
method InternalThreadEntry (line 30) | void InternalThreadEntry() {
function TEST_F (line 35) | TEST_F(InternalThreadTest, TestRandomSeed) {
FILE: src/caffe/test/test_io.cpp
type caffe (line 15) | namespace caffe {
class IOTest (line 17) | class IOTest : public ::testing::Test {}
function ReadImageToDatumReference (line 19) | bool ReadImageToDatumReference(const string& filename, const int label,
function TEST_F (line 64) | TEST_F(IOTest, TestReadImageToDatum) {
function TEST_F (line 73) | TEST_F(IOTest, TestReadImageToDatumReference) {
function TEST_F (line 92) | TEST_F(IOTest, TestReadImageToDatumReferenceResized) {
function TEST_F (line 110) | TEST_F(IOTest, TestReadImageToDatumContent) {
function TEST_F (line 131) | TEST_F(IOTest, TestReadImageToDatumContentGray) {
function TEST_F (line 150) | TEST_F(IOTest, TestReadImageToDatumResized) {
function TEST_F (line 160) | TEST_F(IOTest, TestReadImageToDatumResizedSquare) {
function TEST_F (line 169) | TEST_F(IOTest, TestReadImageToDatumGray) {
function TEST_F (line 179) | TEST_F(IOTest, TestReadImageToDatumResizedGray) {
function TEST_F (line 189) | TEST_F(IOTest, TestReadImageToCVMat) {
function TEST_F (line 197) | TEST_F(IOTest, TestReadImageToCVMatResized) {
function TEST_F (line 205) | TEST_F(IOTest, TestReadImageToCVMatResizedSquare) {
function TEST_F (line 213) | TEST_F(IOTest, TestReadImageToCVMatGray) {
function TEST_F (line 222) | TEST_F(IOTest, TestReadImageToCVMatResizedGray) {
function TEST_F (line 231) | TEST_F(IOTest, TestCVMatToDatum) {
function TEST_F (line 241) | TEST_F(IOTest, TestCVMatToDatumContent) {
function TEST_F (line 260) | TEST_F(IOTest, TestCVMatToDatumReference) {
function TEST_F (line 279) | TEST_F(IOTest, TestReadFileToDatum) {
function TEST_F (line 288) | TEST_F(IOTest, TestDecodeDatum) {
function TEST_F (line 308) | TEST_F(IOTest, TestDecodeDatumToCVMat) {
function TEST_F (line 322) | TEST_F(IOTest, TestDecodeDatumToCVMatContent) {
function TEST_F (line 342) | TEST_F(IOTest, TestDecodeDatumNative) {
function TEST_F (line 362) | TEST_F(IOTest, TestDecodeDatumToCVMatNative) {
function TEST_F (line 372) | TEST_F(IOTest, TestDecodeDatumNativeGray) {
function TEST_F (line 392) | TEST_F(IOTest, TestDecodeDatumToCVMatNativeGray) {
function TEST_F (line 402) | TEST_F(IOTest, TestDecodeDatumToCVMatContentNative) {
FILE: src/caffe/test/test_layer_factory.cpp
type caffe (line 15) | namespace caffe {
class LayerFactoryTest (line 18) | class LayerFactoryTest : public MultiDeviceTest<TypeParam> {}
function TYPED_TEST (line 22) | TYPED_TEST(LayerFactoryTest, TestCreateLayer) {
FILE: src/caffe/test/test_lrn_layer.cpp
type caffe (line 18) | namespace caffe {
class LRNLayerTest (line 21) | class LRNLayerTest : public MultiDeviceTest<TypeParam> {
method LRNLayerTest (line 25) | LRNLayerTest()
method SetUp (line 29) | virtual void SetUp() {
function TYPED_TEST (line 115) | TYPED_TEST(LRNLayerTest, TestSetupAcrossChannels) {
function TYPED_TEST (line 126) | TYPED_TEST(LRNLayerTest, TestForwardAcrossChannels) {
function TYPED_TEST (line 141) | TYPED_TEST(LRNLayerTest, TestForwardAcrossChannelsLargeRegion) {
function TYPED_TEST (line 157) | TYPED_TEST(LRNLayerTest, TestGradientAcrossChannels) {
function TYPED_TEST (line 178) | TYPED_TEST(LRNLayerTest, TestGradientAcrossChannelsLargeRegion) {
function TYPED_TEST (line 200) | TYPED_TEST(LRNLayerTest, TestSetupWithinChannel) {
function TYPED_TEST (line 214) | TYPED_TEST(LRNLayerTest, TestForwardWithinChannel) {
function TYPED_TEST (line 232) | TYPED_TEST(LRNLayerTest, TestGradientWithinChannel) {
FILE: src/caffe/test/test_math_functions.cpp
type caffe (line 16) | namespace caffe {
class MathFunctionsTest (line 19) | class MathFunctionsTest : public MultiDeviceTest<TypeParam> {
method MathFunctionsTest (line 23) | MathFunctionsTest()
method SetUp (line 28) | virtual void SetUp() {
method ReferenceHammingDistance (line 45) | int ReferenceHammingDistance(const int n, const Dtype* x, const Dtyp...
class CPUMathFunctionsTest (line 70) | class CPUMathFunctionsTest
function TYPED_TEST (line 76) | TYPED_TEST(CPUMathFunctionsTest, TestNothing) {
function TYPED_TEST (line 81) | TYPED_TEST(CPUMathFunctionsTest, TestHammingDistance) {
function TYPED_TEST (line 89) | TYPED_TEST(CPUMathFunctionsTest, TestAsum) {
function TYPED_TEST (line 100) | TYPED_TEST(CPUMathFunctionsTest, TestSign) {
function TYPED_TEST (line 110) | TYPED_TEST(CPUMathFunctionsTest, TestSgnbit) {
function TYPED_TEST (line 120) | TYPED_TEST(CPUMathFunctionsTest, TestFabs) {
function TYPED_TEST (line 130) | TYPED_TEST(CPUMathFunctionsTest, TestScale) {
function TYPED_TEST (line 143) | TYPED_TEST(CPUMathFunctionsTest, TestCopy) {
class GPUMathFunctionsTest (line 156) | class GPUMathFunctionsTest : public MathFunctionsTest<GPUDevice<Dtype>...
function TYPED_TEST (line 162) | TYPED_TEST(GPUMathFunctionsTest, DISABLED_TestHammingDistance) {
function TYPED_TEST (line 173) | TYPED_TEST(GPUMathFunctionsTest, TestAsum) {
function TYPED_TEST (line 185) | TYPED_TEST(GPUMathFunctionsTest, TestSign) {
function TYPED_TEST (line 196) | TYPED_TEST(GPUMathFunctionsTest, TestSgnbit) {
function TYPED_TEST (line 207) | TYPED_TEST(GPUMathFunctionsTest, TestFabs) {
function TYPED_TEST (line 218) | TYPED_TEST(GPUMathFunctionsTest, TestScale) {
function TYPED_TEST (line 231) | TYPED_TEST(GPUMathFunctionsTest, TestCopy) {
FILE: src/caffe/test/test_maxpool_dropout_layers.cpp
type caffe (line 14) | namespace caffe {
class MaxPoolingDropoutTest (line 17) | class MaxPoolingDropoutTest : public MultiDeviceTest<TypeParam> {
method MaxPoolingDropoutTest (line 20) | MaxPoolingDropoutTest()
method SetUp (line 23) | virtual void SetUp() {
function TYPED_TEST (line 43) | TYPED_TEST(MaxPoolingDropoutTest, TestSetup) {
function TYPED_TEST (line 60) | TYPED_TEST(MaxPoolingDropoutTest, TestForward) {
function TYPED_TEST (line 89) | TYPED_TEST(MaxPoolingDropoutTest, TestBackward) {
FILE: src/caffe/test/test_memory_data_layer.cpp
type caffe (line 11) | namespace caffe {
class MemoryDataLayerTest (line 14) | class MemoryDataLayerTest : public MultiDeviceTest<TypeParam> {
method MemoryDataLayerTest (line 18) | MemoryDataLayerTest()
method SetUp (line 23) | virtual void SetUp() {
function TYPED_TEST (line 64) | TYPED_TEST(MemoryDataLayerTest, TestSetup) {
function TYPED_TEST (line 87) | TYPED_TEST(MemoryDataLayerTest, TestForward) {
function TYPED_TEST (line 116) | TYPED_TEST(MemoryDataLayerTest, AddDatumVectorDefaultTransform) {
function TYPED_TEST (line 170) | TYPED_TEST(MemoryDataLayerTest, AddMatVectorDefaultTransform) {
function TYPED_TEST (line 216) | TYPED_TEST(MemoryDataLayerTest, TestSetBatchSize) {
FILE: src/caffe/test/test_multinomial_logistic_loss_layer.cpp
type caffe (line 16) | namespace caffe {
class MultinomialLogisticLossLayerTest (line 19) | class MultinomialLogisticLossLayerTest : public CPUDeviceTest<Dtype> {
method MultinomialLogisticLossLayerTest (line 21) | MultinomialLogisticLossLayerTest()
function TYPED_TEST (line 52) | TYPED_TEST(MultinomialLogisticLossLayerTest, TestGradientCPU) {
FILE: src/caffe/test/test_mvn_layer.cpp
type caffe (line 14) | namespace caffe {
class MVNLayerTest (line 17) | class MVNLayerTest : public MultiDeviceTest<TypeParam> {
method MVNLayerTest (line 20) | MVNLayerTest()
function TYPED_TEST (line 39) | TYPED_TEST(MVNLayerTest, TestForward) {
function TYPED_TEST (line 73) | TYPED_TEST(MVNLayerTest, TestForwardMeanOnly) {
function TYPED_TEST (line 105) | TYPED_TEST(MVNLayerTest, TestForwardAcrossChannels) {
function TYPED_TEST (line 140) | TYPED_TEST(MVNLayerTest, TestGradient) {
function TYPED_TEST (line 149) | TYPED_TEST(MVNLayerTest, TestGradientMeanOnly) {
function TYPED_TEST (line 159) | TYPED_TEST(MVNLayerTest, TestGradientAcrossChannels) {
FILE: src/caffe/test/test_net.cpp
type caffe (line 17) | namespace caffe {
class NetTest (line 20) | class NetTest : public MultiDeviceTest<TypeParam> {
method NetTest (line 24) | NetTest() : seed_(1701) {}
method InitNetFromProtoString (line 26) | virtual void InitNetFromProtoString(const string& proto) {
method CopyNetBlobs (line 32) | virtual void CopyNetBlobs(const bool copy_diff,
method CopyNetParams (line 45) | virtual void CopyNetParams(const bool copy_diff,
method InitTinyNet (line 58) | virtual void InitTinyNet(const bool force_backward = false,
method InitTinyNetEuclidean (line 135) | virtual void InitTinyNetEuclidean(const bool force_backward = false) {
method InitTrickyNet (line 195) | virtual void InitTrickyNet(Dtype* loss_weight = NULL) {
method InitUnsharedWeightsNet (line 286) | virtual void InitUnsharedWeightsNet(const Dtype* loss_weight = NULL,
method InitSharedWeightsNet (line 374) | virtual void InitSharedWeightsNet() {
method InitDiffDataUnsharedWeightsNet (line 431) | virtual void InitDiffDataUnsharedWeightsNet() {
method InitDiffDataSharedWeightsNet (line 493) | virtual void InitDiffDataSharedWeightsNet() {
method InitReshapableNet (line 555) | virtual void InitReshapableNet() {
method InitSkipPropNet (line 617) | virtual void InitSkipPropNet(bool test_skip_true) {
function TYPED_TEST (line 722) | TYPED_TEST(NetTest, TestHasBlob) {
function TYPED_TEST (line 731) | TYPED_TEST(NetTest, TestGetBlob) {
function TYPED_TEST (line 740) | TYPED_TEST(NetTest, TestHasLayer) {
function TYPED_TEST (line 748) | TYPED_TEST(NetTest, TestGetLayerByName) {
function TYPED_TEST (line 756) | TYPED_TEST(NetTest, TestBottomNeedBackward) {
function TYPED_TEST (line 769) | TYPED_TEST(NetTest, TestBottomNeedBackwardForce) {
function TYPED_TEST (line 783) | TYPED_TEST(NetTest, TestBottomNeedBackwardEuclideanForce) {
function TYPED_TEST (line 797) | TYPED_TEST(NetTest, TestBottomNeedBackwardTricky) {
function TYPED_TEST (line 815) | TYPED_TEST(NetTest, TestLossWeight) {
function TYPED_TEST (line 866) | TYPED_TEST(NetTest, TestLossWeightMidNet) {
function TYPED_TEST (line 904) | TYPED_TEST(NetTest, TestComboLossWeight) {
function TYPED_TEST (line 1034) | TYPED_TEST(NetTest, TestBackwardWithAccuracyLayer) {
function TYPED_TEST (line 1045) | TYPED_TEST(NetTest, TestUnsharedWeightsDataNet) {
function TYPED_TEST (line 1054) | TYPED_TEST(NetTest, TestSharedWeightsDataNet) {
function TYPED_TEST (line 1063) | TYPED_TEST(NetTest, TestUnsharedWeightsDiffNet) {
function TYPED_TEST (line 1081) | TYPED_TEST(NetTest, TestSharedWeightsDiffNet) {
function TYPED_TEST (line 1101) | TYPED_TEST(NetTest, TestSharedWeightsUpdate) {
function TYPED_TEST (line 1183) | TYPED_TEST(NetTest, TestSharedWeightsResume) {
function TYPED_TEST (line 1229) | TYPED_TEST(NetTest, TestParamPropagateDown) {
function TYPED_TEST (line 1311) | TYPED_TEST(NetTest, TestFromTo) {
class FilterNetTest (line 1345) | class FilterNetTest : public ::testing::Test {
method RunFilterNetTest (line 1347) | void RunFilterNetTest(
function TEST_F (line 1367) | TEST_F(FilterNetTest, TestNoFilter) {
function TEST_F (line 1391) | TEST_F(FilterNetTest, TestFilterLeNetTrainTest) {
function TEST_F (line 1633) | TEST_F(FilterNetTest, TestFilterOutByStage) {
function TEST_F (line 1672) | TEST_F(FilterNetTest, TestFilterOutByStage2) {
function TEST_F (line 1711) | TEST_F(FilterNetTest, TestFilterInByStage) {
function TEST_F (line 1737) | TEST_F(FilterNetTest, TestFilterInByStage2) {
function TEST_F (line 1762) | TEST_F(FilterNetTest, TestFilterOutByMultipleStage) {
function TEST_F (line 1805) | TEST_F(FilterNetTest, TestFilterInByMultipleStage) {
function TEST_F (line 1833) | TEST_F(FilterNetTest, TestFilterInByMultipleStage2) {
function TEST_F (line 1860) | TEST_F(FilterNetTest, TestFilterInByNotStage) {
function TEST_F (line 1887) | TEST_F(FilterNetTest, TestFilterOutByNotStage) {
function TEST_F (line 1923) | TEST_F(FilterNetTest, TestFilterOutByMinLevel) {
function TEST_F (line 1962) | TEST_F(FilterNetTest, TestFilterOutByMaxLevel) {
function TEST_F (line 2001) | TEST_F(FilterNetTest, TestFilterInByMinLevel) {
function TEST_F (line 2026) | TEST_F(FilterNetTest, TestFilterInByMinLevel2) {
function TEST_F (line 2052) | TEST_F(FilterNetTest, TestFilterInByMaxLevel) {
function TEST_F (line 2077) | TEST_F(FilterNetTest, TestFilterInByMaxLevel2) {
function TEST_F (line 2103) | TEST_F(FilterNetTest, TestFilterInOutByIncludeMultiRule) {
function TEST_F (line 2166) | TEST_F(FilterNetTest, TestFilterInByIncludeMultiRule) {
function TEST_F (line 2199) | TEST_F(FilterNetTest, TestFilterInOutByExcludeMultiRule) {
function TYPED_TEST (line 2262) | TYPED_TEST(NetTest, TestReshape) {
function TYPED_TEST (line 2320) | TYPED_TEST(NetTest, TestSkipPropagateDown) {
FILE: src/caffe/test/test_neuron_layer.cpp
type caffe (line 16) | namespace caffe {
class NeuronLayerTest (line 19) | class NeuronLayerTest : public MultiDeviceTest<TypeParam> {
method NeuronLayerTest (line 23) | NeuronLayerTest()
method TestDropoutForward (line 40) | void TestDropoutForward(const float dropout_ratio) {
method TestExpForward (line 72) | void TestExpForward(const float base, const float scale, const float...
method TestExpGradient (line 94) | void TestExpGradient(const float base, const float scale, const floa...
method TestPReLU (line 104) | void TestPReLU(PReLULayer<Dtype> *layer) {
method LogBottomInit (line 121) | void LogBottomInit() {
method TestLogForward (line 129) | void TestLogForward(const float base, const float scale, const float...
method TestLogGradient (line 153) | void TestLogGradient(const float base, const float scale, const floa...
function TYPED_TEST (line 167) | TYPED_TEST(NeuronLayerTest, TestAbsVal) {
function TYPED_TEST (line 181) | TYPED_TEST(NeuronLayerTest, TestAbsGradient) {
function TYPED_TEST (line 190) | TYPED_TEST(NeuronLayerTest, TestReLU) {
function TYPED_TEST (line 205) | TYPED_TEST(NeuronLayerTest, TestReLUGradient) {
function TYPED_TEST (line 214) | TYPED_TEST(NeuronLayerTest, TestReLUWithNegativeSlope) {
function TYPED_TEST (line 234) | TYPED_TEST(NeuronLayerTest, TestReLUGradientWithNegativeSlope) {
function TYPED_TEST (line 245) | TYPED_TEST(NeuronLayerTest, TestSigmoid) {
function TYPED_TEST (line 262) | TYPED_TEST(NeuronLayerTest, TestSigmoidGradient) {
function TYPED_TEST (line 271) | TYPED_TEST(NeuronLayerTest, TestTanH) {
function TYPED_TEST (line 294) | TYPED_TEST(NeuronLayerTest, TestTanHGradient) {
function TYPED_TEST (line 303) | TYPED_TEST(NeuronLayerTest, TestExpLayer) {
function TYPED_TEST (line 312) | TYPED_TEST(NeuronLayerTest, TestExpGradient) {
function TYPED_TEST (line 321) | TYPED_TEST(NeuronLayerTest, TestExpLayerBase2) {
function TYPED_TEST (line 329) | TYPED_TEST(NeuronLayerTest, TestExpGradientBase2) {
function TYPED_TEST (line 337) | TYPED_TEST(NeuronLayerTest, TestExpLayerBase2Shift1) {
function TYPED_TEST (line 345) | TYPED_TEST(NeuronLayerTest, TestExpGradientBase2Shift1) {
function TYPED_TEST (line 353) | TYPED_TEST(NeuronLayerTest, TestExpLayerBase2Scale3) {
function TYPED_TEST (line 361) | TYPED_TEST(NeuronLayerTest, TestExpGradientBase2Scale3) {
function TYPED_TEST (line 369) | TYPED_TEST(NeuronLayerTest, TestExpLayerBase2Shift1Scale3) {
function TYPED_TEST (line 377) | TYPED_TEST(NeuronLayerTest, TestExpGradientBase2Shift1Scale3) {
function TYPED_TEST (line 385) | TYPED_TEST(NeuronLayerTest, TestLogLayer) {
function TYPED_TEST (line 394) | TYPED_TEST(NeuronLayerTest, TestLogGradient) {
function TYPED_TEST (line 403) | TYPED_TEST(NeuronLayerTest, TestLogLayerBase2) {
function TYPED_TEST (line 411) | TYPED_TEST(NeuronLayerTest, TestLogGradientBase2) {
function TYPED_TEST (line 419) | TYPED_TEST(NeuronLayerTest, TestLogLayerBase2Shift1) {
function TYPED_TEST (line 427) | TYPED_TEST(NeuronLayerTest, TestLogGradientBase2Shift1) {
function TYPED_TEST (line 435) | TYPED_TEST(NeuronLayerTest, TestLogLayerBase2Scale3) {
function TYPED_TEST (line 443) | TYPED_TEST(NeuronLayerTest, TestLogGradientBase2Scale3) {
function TYPED_TEST (line 451) | TYPED_TEST(NeuronLayerTest, TestLogLayerBase2Shift1Scale3) {
function TYPED_TEST (line 459) | TYPED_TEST(NeuronLayerTest, TestLogGradientBase2Shift1Scale3) {
function TYPED_TEST (line 467) | TYPED_TEST(NeuronLayerTest, TestDropoutHalf) {
function TYPED_TEST (line 472) | TYPED_TEST(NeuronLayerTest, TestDropoutThreeQuarters) {
function TYPED_TEST (line 477) | TYPED_TEST(NeuronLayerTest, TestDropoutTestPhase) {
function TYPED_TEST (line 494) | TYPED_TEST(NeuronLayerTest, TestDropoutGradient) {
function TYPED_TEST (line 504) | TYPED_TEST(NeuronLayerTest, TestDropoutGradientTest) {
function TYPED_TEST (line 514) | TYPED_TEST(NeuronLayerTest, TestBNLL) {
function TYPED_TEST (line 529) | TYPED_TEST(NeuronLayerTest, TestBNLLGradient) {
function TYPED_TEST (line 538) | TYPED_TEST(NeuronLayerTest, TestPReLUParam) {
function TYPED_TEST (line 550) | TYPED_TEST(NeuronLayerTest, TestPReLUForward) {
function TYPED_TEST (line 561) | TYPED_TEST(NeuronLayerTest, TestPReLUForwardChannelShared) {
function TYPED_TEST (line 570) | TYPED_TEST(NeuronLayerTest, TestPReLUGradient) {
function TYPED_TEST (line 583) | TYPED_TEST(NeuronLayerTest, TestPReLUGradientChannelShared) {
function TYPED_TEST (line 594) | TYPED_TEST(NeuronLayerTest, TestPReLUConsistencyReLU) {
function TYPED_TEST (line 637) | TYPED_TEST(NeuronLayerTest, TestPReLUInPlace) {
class CuDNNNeuronLayerTest (line 714) | class CuDNNNeuronLayerTest : public GPUDeviceTest<Dtype> {
method CuDNNNeuronLayerTest (line 716) | CuDNNNeuronLayerTest()
function TYPED_TEST (line 736) | TYPED_TEST(CuDNNNeuronLayerTest, TestReLUCuDNN) {
function TYPED_TEST (line 750) | TYPED_TEST(CuDNNNeuronLayerTest, TestReLUGradientCuDNN) {
function TYPED_TEST (line 758) | TYPED_TEST(CuDNNNeuronLayerTest, TestReLUWithNegativeSlopeCuDNN) {
function TYPED_TEST (line 777) | TYPED_TEST(CuDNNNeuronLayerTest, TestReLUGradientWithNegativeSlopeCuDN...
function TYPED_TEST (line 787) | TYPED_TEST(CuDNNNeuronLayerTest, TestSigmoidCuDNN) {
function TYPED_TEST (line 803) | TYPED_TEST(CuDNNNeuronLayerTest, TestSigmoidGradientCuDNN) {
function TYPED_TEST (line 811) | TYPED_TEST(CuDNNNeuronLayerTest, TestTanHCuDNN) {
function TYPED_TEST (line 833) | TYPED_TEST(CuDNNNeuronLayerTest, TestTanHGradientCuDNN) {
FILE: src/caffe/test/test_platform.cpp
type caffe (line 11) | namespace caffe {
class PlatformTest (line 15) | class PlatformTest : public ::testing::Test {}
function TEST_F (line 17) | TEST_F(PlatformTest, TestInitialization) {
FILE: src/caffe/test/test_pooling_layer.cpp
type caffe (line 14) | namespace caffe {
class PoolingLayerTest (line 17) | class PoolingLayerTest : public MultiDeviceTest<TypeParam> {
method PoolingLayerTest (line 21) | PoolingLayerTest()
method SetUp (line 25) | virtual void SetUp() {
method TestForwardSquare (line 46) | void TestForwardSquare() {
method TestForwardRectHigh (line 118) | void TestForwardRectHigh() {
method TestForwardRectWide (line 243) | void TestForwardRectWide() {
function TYPED_TEST (line 373) | TYPED_TEST(PoolingLayerTest, TestSetup) {
function TYPED_TEST (line 387) | TYPED_TEST(PoolingLayerTest, TestSetupPadded) {
function TYPED_TEST (line 403) | TYPED_TEST(PoolingLayerTest, TestSetupGlobalPooling) {
function TYPED_TEST (line 443) | TYPED_TEST(PoolingLayerTest, TestForwardMax) {
function TYPED_TEST (line 449) | TYPED_TEST(PoolingLayerTest, TestForwardMaxTopMask) {
function TYPED_TEST (line 456) | TYPED_TEST(PoolingLayerTest, TestGradientMax) {
function TYPED_TEST (line 475) | TYPED_TEST(PoolingLayerTest, TestForwardMaxPadded) {
function TYPED_TEST (line 520) | TYPED_TEST(PoolingLayerTest, TestGradientMaxTopMask) {
function TYPED_TEST (line 540) | TYPED_TEST(PoolingLayerTest, TestForwardAve) {
function TYPED_TEST (line 572) | TYPED_TEST(PoolingLayerTest, TestGradientAve) {
function TYPED_TEST (line 590) | TYPED_TEST(PoolingLayerTest, TestGradientAvePadded) {
class CuDNNPoolingLayerTest (line 611) | class CuDNNPoolingLayerTest : public GPUDeviceTest<Dtype> {
method CuDNNPoolingLayerTest (line 613) | CuDNNPoolingLayerTest()
method SetUp (line 617) | virtual void SetUp() {
method TestForwardSquare (line 638) | void TestForwardSquare() {
method TestForwardRectHigh (line 710) | void TestForwardRectHigh() {
method TestForwardRectWide (line 835) | void TestForwardRectWide() {
function TYPED_TEST (line 965) | TYPED_TEST(CuDNNPoolingLayerTest, TestSetupCuDNN) {
function TYPED_TEST (line 978) | TYPED_TEST(CuDNNPoolingLayerTest, TestSetupPaddedCuDNN) {
function TYPED_TEST (line 1019) | TYPED_TEST(CuDNNPoolingLayerTest, TestForwardMaxCuDNN) {
function TYPED_TEST (line 1036) | TYPED_TEST(CuDNNPoolingLayerTest, TestGradientMaxCuDNN) {
function TYPED_TEST (line 1055) | TYPED_TEST(CuDNNPoolingLayerTest, TestForwardMaxPaddedCuDNN) {
function TYPED_TEST (line 1120) | TYPED_TEST(CuDNNPoolingLayerTest, TestForwardAveCuDNN) {
function TYPED_TEST (line 1145) | TYPED_TEST(CuDNNPoolingLayerTest, TestGradientAveCuDNN) {
function TYPED_TEST (line 1162) | TYPED_TEST(CuDNNPoolingLayerTest, TestGradientAvePaddedCuDNN) {
FILE: src/caffe/test/test_power_layer.cpp
type caffe (line 14) | namespace caffe {
class PowerLayerTest (line 17) | class PowerLayerTest : public MultiDeviceTest<TypeParam> {
method PowerLayerTest (line 21) | PowerLayerTest()
method TestForward (line 34) | void TestForward(Dtype power, Dtype scale, Dtype shift) {
method TestBackward (line 61) | void TestBackward(Dtype power, Dtype scale, Dtype shift) {
function TYPED_TEST (line 90) | TYPED_TEST(PowerLayerTest, TestPower) {
function TYPED_TEST (line 98) | TYPED_TEST(PowerLayerTest, TestPowerGradient) {
function TYPED_TEST (line 106) | TYPED_TEST(PowerLayerTest, TestPowerGradientShiftZero) {
function TYPED_TEST (line 114) | TYPED_TEST(PowerLayerTest, TestPowerZero) {
function TYPED_TEST (line 122) | TYPED_TEST(PowerLayerTest, TestPowerZeroGradient) {
function TYPED_TEST (line 130) | TYPED_TEST(PowerLayerTest, TestPowerOne) {
function TYPED_TEST (line 138) | TYPED_TEST(PowerLayerTest, TestPowerOneGradient) {
function TYPED_TEST (line 146) | TYPED_TEST(PowerLayerTest, TestPowerTwo) {
function TYPED_TEST (line 154) | TYPED_TEST(PowerLayerTest, TestPowerTwoGradient) {
function TYPED_TEST (line 162) | TYPED_TEST(PowerLayerTest, TestPowerTwoScaleHalfGradient) {
FILE: src/caffe/test/test_protobuf.cpp
type caffe (line 12) | namespace caffe {
class ProtoTest (line 14) | class ProtoTest : public ::testing::Test {}
function TEST_F (line 16) | TEST_F(ProtoTest, TestSerialization) {
FILE: src/caffe/test/test_random_number_generator.cpp
type caffe (line 12) | namespace caffe {
class RandomNumberGeneratorTest (line 15) | class RandomNumberGeneratorTest : public ::testing::Test {
method RandomNumberGeneratorTest (line 17) | RandomNumberGeneratorTest()
method SetUp (line 26) | virtual void SetUp() {
method Dtype (line 30) | Dtype sample_mean(const Dtype* const seqs, const int sample_size) {
method Dtype (line 38) | Dtype sample_mean(const Dtype* const seqs) {
method Dtype (line 42) | Dtype sample_mean(const int* const seqs, const int sample_size) {
method Dtype (line 50) | Dtype sample_mean(const int* const seqs) {
method Dtype (line 54) | Dtype mean_bound(const Dtype std, const int sample_size) {
method Dtype (line 58) | Dtype mean_bound(const Dtype std) {
method RngGaussianFill (line 62) | void RngGaussianFill(const Dtype mu, const Dtype sigma, void* cpu_da...
method RngGaussianChecks (line 67) | void RngGaussianChecks(const Dtype mu, const Dtype sigma,
method RngUniformFill (line 105) | void RngUniformFill(const Dtype lower, const Dtype upper, void* cpu_...
method RngUniformChecks (line 111) | void RngUniformChecks(const Dtype lower, const Dtype upper,
method RngBernoulliFill (line 158) | void RngBernoulliFill(const Dtype p, void* cpu_data) {
method RngBernoulliChecks (line 163) | void RngBernoulliChecks(const Dtype p, const void* cpu_data) {
method RngGaussianFillGPU (line 174) | void RngGaussianFillGPU(const Dtype mu, const Dtype sigma, void* gpu...
method RngUniformFillGPU (line 179) | void RngUniformFillGPU(const Dtype lower, const Dtype upper, void* g...
method RngUniformIntFillGPU (line 187) | void RngUniformIntFillGPU(void* gpu_data) {
function TYPED_TEST (line 210) | TYPED_TEST(RandomNumberGeneratorTest, TestRngGaussian) {
function TYPED_TEST (line 219) | TYPED_TEST(RandomNumberGeneratorTest, TestRngGaussian2) {
function TYPED_TEST (line 228) | TYPED_TEST(RandomNumberGeneratorTest, TestRngUniform) {
function TYPED_TEST (line 237) | TYPED_TEST(RandomNumberGeneratorTest, TestRngUniform2) {
function TYPED_TEST (line 246) | TYPED_TEST(RandomNumberGeneratorTest, TestRngBernoulli) {
function TYPED_TEST (line 254) | TYPED_TEST(RandomNumberGeneratorTest, TestRngBernoulli2) {
function TYPED_TEST (line 262) | TYPED_TEST(RandomNumberGeneratorTest, TestRngGaussianTimesGaussian) {
function TYPED_TEST (line 288) | TYPED_TEST(RandomNumberGeneratorTest, TestRngUniformTimesUniform) {
function TYPED_TEST (line 316) | TYPED_TEST(RandomNumberGeneratorTest, TestRngGaussianTimesBernoulli) {
function TYPED_TEST (line 341) | TYPED_TEST(RandomNumberGeneratorTest, TestRngUniformTimesBernoulli) {
function TYPED_TEST (line 366) | TYPED_TEST(RandomNumberGeneratorTest, TestRngBernoulliTimesBernoulli) {
function TYPED_TEST (line 401) | TYPED_TEST(RandomNumberGeneratorTest, TestRngGaussianGPU) {
function TYPED_TEST (line 411) | TYPED_TEST(RandomNumberGeneratorTest, TestRngGaussian2GPU) {
function TYPED_TEST (line 421) | TYPED_TEST(RandomNumberGeneratorTest, TestRngUniformGPU) {
function TYPED_TEST (line 431) | TYPED_TEST(RandomNumberGeneratorTest, TestRngUniform2GPU) {
function TYPED_TEST (line 441) | TYPED_TEST(RandomNumberGeneratorTest, TestRngUniformIntGPU) {
function TYPED_TEST (line 458) | TYPED_TEST(RandomNumberGeneratorTest, TestRngGaussianTimesGaussianGPU) {
function TYPED_TEST (line 489) | TYPED_TEST(RandomNumberGeneratorTest, TestRngUniformTimesUniformGPU) {
FILE: src/caffe/test/test_reduction_layer.cpp
type caffe (line 14) | namespace caffe {
class ReductionLayerTest (line 17) | class ReductionLayerTest : public MultiDeviceTest<TypeParam> {
method ReductionLayerTest (line 21) | ReductionLayerTest()
method TestForward (line 37) | void TestForward(ReductionParameter_ReductionOp op,
method TestGradient (line 81) | void TestGradient(ReductionParameter_ReductionOp op,
function TYPED_TEST (line 103) | TYPED_TEST(ReductionLayerTest, TestSetUp) {
function TYPED_TEST (line 112) | TYPED_TEST(ReductionLayerTest, TestSetUpWithAxis1) {
function TYPED_TEST (line 123) | TYPED_TEST(ReductionLayerTest, TestSetUpWithAxis2) {
function TYPED_TEST (line 135) | TYPED_TEST(ReductionLayerTest, TestSum) {
function TYPED_TEST (line 140) | TYPED_TEST(ReductionLayerTest, TestSumCoeff) {
function TYPED_TEST (line 146) | TYPED_TEST(ReductionLayerTest, TestSumCoeffAxis1) {
function TYPED_TEST (line 153) | TYPED_TEST(ReductionLayerTest, TestSumGradient) {
function TYPED_TEST (line 158) | TYPED_TEST(ReductionLayerTest, TestSumCoeffGradient) {
function TYPED_TEST (line 164) | TYPED_TEST(ReductionLayerTest, TestSumCoeffAxis1Gradient) {
function TYPED_TEST (line 171) | TYPED_TEST(ReductionLayerTest, TestMean) {
function TYPED_TEST (line 177) | TYPED_TEST(ReductionLayerTest, TestMeanCoeff) {
function TYPED_TEST (line 184) | TYPED_TEST(ReductionLayerTest, TestMeanCoeffAxis1) {
function TYPED_TEST (line 192) | TYPED_TEST(ReductionLayerTest, TestMeanGradient) {
function TYPED_TEST (line 198) | TYPED_TEST(ReductionLayerTest, TestMeanCoeffGradient) {
function TYPED_TEST (line 205) | TYPED_TEST(ReductionLayerTest, TestMeanCoeffGradientAxis1) {
function TYPED_TEST (line 213) | TYPED_TEST(ReductionLayerTest, TestAbsSum) {
function TYPED_TEST (line 219) | TYPED_TEST(ReductionLayerTest, TestAbsSumCoeff) {
function TYPED_TEST (line 226) | TYPED_TEST(ReductionLayerTest, TestAbsSumCoeffAxis1) {
function TYPED_TEST (line 234) | TYPED_TEST(ReductionLayerTest, TestAbsSumGradient) {
function TYPED_TEST (line 240) | TYPED_TEST(ReductionLayerTest, TestAbsSumCoeffGradient) {
function TYPED_TEST (line 247) | TYPED_TEST(ReductionLayerTest, TestAbsSumCoeffAxis1Gradient) {
function TYPED_TEST (line 255) | TYPED_TEST(ReductionLayerTest, TestSumOfSquares) {
function TYPED_TEST (line 261) | TYPED_TEST(ReductionLayerTest, TestSumOfSquaresCoeff) {
function TYPED_TEST (line 268) | TYPED_TEST(ReductionLayerTest, TestSumOfSquaresCoeffAxis1) {
function TYPED_TEST (line 276) | TYPED_TEST(ReductionLayerTest, TestSumOfSquaresGradient) {
function TYPED_TEST (line 282) | TYPED_TEST(ReductionLayerTest, TestSumOfSquaresCoeffGradient) {
function TYPED_TEST (line 289) | TYPED_TEST(ReductionLayerTest, TestSumOfSquaresCoeffAxis1Gradient) {
FILE: src/caffe/test/test_reshape_layer.cpp
type caffe (line 14) | namespace caffe {
class ReshapeLayerTest (line 17) | class ReshapeLayerTest : public MultiDeviceTest<TypeParam> {
method ReshapeLayerTest (line 20) | ReshapeLayerTest()
function TYPED_TEST (line 41) | TYPED_TEST(ReshapeLayerTest, TestFlattenOutputSizes) {
function TYPED_TEST (line 58) | TYPED_TEST(ReshapeLayerTest, TestFlattenValues) {
function TYPED_TEST (line 79) | TYPED_TEST(ReshapeLayerTest, TestCopyDimensions) {
function TYPED_TEST (line 98) | TYPED_TEST(ReshapeLayerTest, TestInferenceOfUnspecified) {
function TYPED_TEST (line 118) | TYPED_TEST(ReshapeLayerTest, TestInferenceOfUnspecifiedWithStartAxis) {
function TYPED_TEST (line 137) | TYPED_TEST(ReshapeLayerTest, TestInsertSingletonAxesStart) {
function TYPED_TEST (line 160) | TYPED_TEST(ReshapeLayerTest, TestInsertSingletonAxesMiddle) {
function TYPED_TEST (line 183) | TYPED_TEST(ReshapeLayerTest, TestInsertSingletonAxesEnd) {
function TYPED_TEST (line 206) | TYPED_TEST(ReshapeLayerTest, TestFlattenMiddle) {
function TYPED_TEST (line 223) | TYPED_TEST(ReshapeLayerTest, TestForward) {
function TYPED_TEST (line 240) | TYPED_TEST(ReshapeLayerTest, TestForwardAfterReshape) {
function TYPED_TEST (line 266) | TYPED_TEST(ReshapeLayerTest, TestGradient) {
FILE: src/caffe/test/test_sigmoid_cross_entropy_loss_layer.cpp
type caffe (line 16) | namespace caffe {
class SigmoidCrossEntropyLossLayerTest (line 19) | class SigmoidCrossEntropyLossLayerTest : public MultiDeviceTest<TypePa...
method SigmoidCrossEntropyLossLayerTest (line 23) | SigmoidCrossEntropyLossLayerTest()
method Dtype (line 48) | Dtype SigmoidCrossEntropyLossReference(const int count, const int num,
method TestForward (line 64) | void TestForward() {
function TYPED_TEST (line 105) | TYPED_TEST(SigmoidCrossEntropyLossLayerTest, TestSigmoidCrossEntropyLo...
function TYPED_TEST (line 109) | TYPED_TEST(SigmoidCrossEntropyLossLayerTest, TestGradient) {
FILE: src/caffe/test/test_slice_layer.cpp
type caffe (line 14) | namespace caffe {
class SliceLayerTest (line 17) | class SliceLayerTest : public MultiDeviceTest<TypeParam> {
method SliceLayerTest (line 21) | SliceLayerTest()
method SetUp (line 26) | virtual void SetUp() {
method ReduceBottomBlobSize (line 40) | virtual void ReduceBottomBlobSize() {
function TYPED_TEST (line 62) | TYPED_TEST(SliceLayerTest, TestSetupNum) {
function TYPED_TEST (line 76) | TYPED_TEST(SliceLayerTest, TestSetupChannels) {
function TYPED_TEST (line 91) | TYPED_TEST(SliceLayerTest, TestSliceAcrossNum) {
function TYPED_TEST (line 121) | TYPED_TEST(SliceLayerTest, TestSliceAcrossChannels) {
function TYPED_TEST (line 164) | TYPED_TEST(SliceLayerTest, TestGradientAcrossNum) {
function TYPED_TEST (line 176) | TYPED_TEST(SliceLayerTest, TestGradientAcrossChannels) {
FILE: src/caffe/test/test_softmax_layer.cpp
type caffe (line 15) | namespace caffe {
class SoftmaxLayerTest (line 18) | class SoftmaxLayerTest : public MultiDeviceTest<TypeParam> {
method SoftmaxLayerTest (line 21) | SoftmaxLayerTest()
function TYPED_TEST (line 40) | TYPED_TEST(SoftmaxLayerTest, TestForward) {
function TYPED_TEST (line 74) | TYPED_TEST(SoftmaxLayerTest, TestGradient) {
class CuDNNSoftmaxLayerTest (line 85) | class CuDNNSoftmaxLayerTest : public GPUDeviceTest<Dtype> {
method CuDNNSoftmaxLayerTest (line 87) | CuDNNSoftmaxLayerTest()
function TYPED_TEST (line 106) | TYPED_TEST(CuDNNSoftmaxLayerTest, TestForwardCuDNN) {
function TYPED_TEST (line 139) | TYPED_TEST(CuDNNSoftmaxLayerTest, TestGradientCuDNN) {
FILE: src/caffe/test/test_softmax_with_loss_layer.cpp
type caffe (line 19) | namespace caffe {
class SoftmaxWithLossLayerTest (line 22) | class SoftmaxWithLossLayerTest : public MultiDeviceTest<TypeParam> {
method SoftmaxWithLossLayerTest (line 26) | SoftmaxWithLossLayerTest()
function TYPED_TEST (line 56) | TYPED_TEST(SoftmaxWithLossLayerTest, TestGradient) {
function TYPED_TEST (line 66) | TYPED_TEST(SoftmaxWithLossLayerTest, TestForwardIgnoreLabel) {
function TYPED_TEST (line 89) | TYPED_TEST(SoftmaxWithLossLayerTest, TestGradientIgnoreLabel) {
function TYPED_TEST (line 100) | TYPED_TEST(SoftmaxWithLossLayerTest, TestGradientUnnormalized) {
FILE: src/caffe/test/test_solver.cpp
type caffe (line 16) | namespace caffe {
class SolverTest (line 19) | class SolverTest : public MultiDeviceTest<TypeParam> {
method InitSolverFromProtoString (line 23) | virtual void InitSolverFromProtoString(const string& proto) {
function TYPED_TEST (line 45) | TYPED_TEST(SolverTest, TestInitTrainTestNets) {
FILE: src/caffe/test/test_split_layer.cpp
type caffe (line 18) | namespace caffe {
class SplitLayerTest (line 21) | class SplitLayerTest : public MultiDeviceTest<TypeParam> {
method SplitLayerTest (line 25) | SplitLayerTest()
function TYPED_TEST (line 51) | TYPED_TEST(SplitLayerTest, TestSetup) {
function TYPED_TEST (line 66) | TYPED_TEST(SplitLayerTest, Test) {
function TYPED_TEST (line 79) | TYPED_TEST(SplitLayerTest, TestGradient) {
class SplitLayerInsertionTest (line 89) | class SplitLayerInsertionTest : public ::testing::Test {
method RunInsertionTest (line 91) | void RunInsertionTest(
function TEST_F (line 114) | TEST_F(SplitLayerInsertionTest, TestNoInsertion1) {
function TEST_F (line 138) | TEST_F(SplitLayerInsertionTest, TestNoInsertion2) {
function TEST_F (line 175) | TEST_F(SplitLayerInsertionTest, TestNoInsertionImageNet) {
function TEST_F (line 530) | TEST_F(SplitLayerInsertionTest, TestNoInsertionWithInPlace) {
function TEST_F (line 560) | TEST_F(SplitLayerInsertionTest, TestLossInsertion) {
function TEST_F (line 689) | TEST_F(SplitLayerInsertionTest, TestInsertion) {
function TEST_F (line 784) | TEST_F(SplitLayerInsertionTest, TestInsertionTwoTop) {
function TEST_F (line 890) | TEST_F(SplitLayerInsertionTest, TestInputInsertion) {
function TEST_F (line 951) | TEST_F(SplitLayerInsertionTest, TestWithInPlace) {
FILE: src/caffe/test/test_spp_layer.cpp
type caffe (line 15) | namespace caffe {
class SPPLayerTest (line 18) | class SPPLayerTest : public MultiDeviceTest<TypeParam> {
method SPPLayerTest (line 22) | SPPLayerTest()
method SetUp (line 27) | virtual void SetUp() {
function TYPED_TEST (line 55) | TYPED_TEST(SPPLayerTest, TestSetup) {
function TYPED_TEST (line 72) | TYPED_TEST(SPPLayerTest, TestEqualOutputDims) {
function TYPED_TEST (line 89) | TYPED_TEST(SPPLayerTest, TestEqualOutputDims2) {
function TYPED_TEST (line 106) | TYPED_TEST(SPPLayerTest, TestForwardBackward) {
function TYPED_TEST (line 118) | TYPED_TEST(SPPLayerTest, TestGradient) {
FILE: src/caffe/test/test_stochastic_pooling.cpp
type caffe (line 17) | namespace caffe {
class StochasticPoolingLayerTest (line 20) | class StochasticPoolingLayerTest : public MultiDeviceTest<TypeParam> {
method StochasticPoolingLayerTest (line 24) | StochasticPoolingLayerTest()
method SetUp (line 27) | virtual void SetUp() {
class CPUStochasticPoolingLayerTest (line 51) | class CPUStochasticPoolingLayerTest
function TYPED_TEST (line 57) | TYPED_TEST(CPUStochasticPoolingLayerTest, TestSetup) {
class GPUStochasticPoolingLayerTest (line 73) | class GPUStochasticPoolingLayerTest
function TYPED_TEST (line 79) | TYPED_TEST(GPUStochasticPoolingLayerTest, TestStochastic) {
function TYPED_TEST (line 122) | TYPED_TEST(GPUStochasticPoolingLayerTest, TestStochasticTestPhase) {
function TYPED_TEST (line 159) | TYPED_TEST(GPUStochasticPoolingLayerTest, TestGradient) {
FILE: src/caffe/test/test_syncedmem.cpp
type caffe (line 13) | namespace caffe {
class SyncedMemoryTest (line 15) | class SyncedMemoryTest : public ::testing::Test {}
function TEST_F (line 17) | TEST_F(SyncedMemoryTest, TestInitialization) {
function TEST_F (line 28) | TEST_F(SyncedMemoryTest, TestAllocationCPUGPU) {
function TEST_F (line 38) | TEST_F(SyncedMemoryTest, TestAllocationCPU) {
function TEST_F (line 46) | TEST_F(SyncedMemoryTest, TestAllocationGPU) {
function TEST_F (line 54) | TEST_F(SyncedMemoryTest, TestCPUWrite) {
function TEST_F (line 73) | TEST_F(SyncedMemoryTest, TestGPURead) {
function TEST_F (line 103) | TEST_F(SyncedMemoryTest, TestGPUWrite) {
FILE: src/caffe/test/test_tanh_layer.cpp
type caffe (line 14) | namespace caffe {
function tanh_naive (line 16) | double tanh_naive(double x) {
class TanHLayerTest (line 31) | class TanHLayerTest : public MultiDeviceTest<TypeParam> {
method TanHLayerTest (line 35) | TanHLayerTest()
method TestForward (line 45) | void TestForward(Dtype filler_std) {
method TestBackward (line 67) | void TestBackward(Dtype filler_std) {
function TYPED_TEST (line 88) | TYPED_TEST(TanHLayerTest, TestTanH) {
function TYPED_TEST (line 92) | TYPED_TEST(TanHLayerTest, TestTanHOverflow) {
function TYPED_TEST (line 97) | TYPED_TEST(TanHLayerTest, TestTanHGradient) {
FILE: src/caffe/test/test_threshold_layer.cpp
type caffe (line 12) | namespace caffe {
class ThresholdLayerTest (line 15) | class ThresholdLayerTest : public MultiDeviceTest<TypeParam> {
method ThresholdLayerTest (line 18) | ThresholdLayerTest()
function TYPED_TEST (line 39) | TYPED_TEST(ThresholdLayerTest, TestSetup) {
function TYPED_TEST (line 50) | TYPED_TEST(ThresholdLayerTest, Test) {
function TYPED_TEST (line 72) | TYPED_TEST(ThresholdLayerTest, Test2) {
FILE: src/caffe/test/test_upgrade_proto.cpp
type caffe (line 18) | namespace caffe {
class PaddingLayerUpgradeTest (line 20) | class PaddingLayerUpgradeTest : public ::testing::Test {
method RunPaddingUpgradeTest (line 22) | void RunPaddingUpgradeTest(
function TEST_F (line 45) | TEST_F(PaddingLayerUpgradeTest, TestSimple) {
function TEST_F (line 193) | TEST_F(PaddingLayerUpgradeTest, TestTwoTops) {
function TEST_F (line 388) | TEST_F(PaddingLayerUpgradeTest, TestImageNet) {
class NetUpgradeTest (line 1090) | class NetUpgradeTest : public ::testing::Test {
method RunV0UpgradeTest (line 1092) | void RunV0UpgradeTest(
method RunV1UpgradeTest (line 1109) | void RunV1UpgradeTest(
function TEST_F (line 1127) | TEST_F(NetUpgradeTest, TestSimple) {
function TEST_F (line 1354) | TEST_F(NetUpgradeTest, TestAllParams) {
function TEST_F (line 1854) | TEST_F(NetUpgradeTest, TestImageNet) {
function TEST_F (line 2895) | TEST_F(NetUpgradeTest, TestUpgradeV1LayerType) {
FILE: src/caffe/test/test_util_blas.cpp
type caffe (line 13) | namespace caffe {
class GemmTest (line 18) | class GemmTest : public ::testing::Test {}
function TYPED_TEST (line 22) | TYPED_TEST(GemmTest, TestGemmCPUGPU) {
function TYPED_TEST (line 93) | TYPED_TEST(GemmTest, TestGemvCPUGPU) {
FILE: src/caffe/util/benchmark.cpp
type caffe (line 6) | namespace caffe {
FILE: src/caffe/util/blocking_queue.cpp
type caffe (line 9) | namespace caffe {
class BlockingQueue<T>::sync (line 12) | class BlockingQueue<T>::sync {
function T (line 45) | T BlockingQueue<T>::pop(const string& log_on_wait) {
function T (line 73) | T BlockingQueue<T>::peek() {
class BlockingQueue<Batch<float>*> (line 89) | class BlockingQueue<Batch<float>*>
class BlockingQueue<Batch<double>*> (line 90) | class BlockingQueue<Batch<double>*>
class BlockingQueue<Datum*> (line 91) | class BlockingQueue<Datum*>
class BlockingQueue<shared_ptr<DataReader::QueuePair> > (line 92) | class BlockingQueue<shared_ptr<DataReader::QueuePair> >
class BlockingQueue<P2PSync<float>*> (line 93) | class BlockingQueue<P2PSync<float>*>
class BlockingQueue<P2PSync<double>*> (line 94) | class BlockingQueue<P2PSync<double>*>
FILE: src/caffe/util/cudnn.cpp
type caffe (line 4) | namespace caffe {
type cudnn (line 5) | namespace cudnn {
FILE: src/caffe/util/db.cpp
type caffe (line 7) | namespace caffe { namespace db {
type db (line 7) | namespace db {
function DB (line 9) | DB* GetDB(DataParameter::DB backend) {
function DB (line 20) | DB* GetDB(const string& backend) {
FILE: src/caffe/util/db_leveldb.cpp
type caffe (line 5) | namespace caffe { namespace db {
type db (line 5) | namespace db {
FILE: src/caffe/util/db_lmdb.cpp
type caffe (line 7) | namespace caffe { namespace db {
type db (line 7) | namespace db {
function LMDBCursor (line 25) | LMDBCursor* LMDB::NewCursor() {
function LMDBTransaction (line 34) | LMDBTransaction* LMDB::NewTransaction() {
FILE: src/caffe/util/hdf5.cpp
type caffe (line 6) | namespace caffe {
function hdf5_load_nd_dataset_helper (line 10) | void hdf5_load_nd_dataset_helper(
function string (line 99) | string hdf5_load_string(hid_t loc_id, const string& dataset_name) {
function hdf5_save_string (line 115) | void hdf5_save_string(hid_t loc_id, const string& dataset_name,
function hdf5_load_int (line 123) | int hdf5_load_int(hid_t loc_id, const string& dataset_name) {
function hdf5_save_int (line 131) | void hdf5_save_int(hid_t loc_id, const string& dataset_name, int i) {
function hdf5_get_num_links (line 139) | int hdf5_get_num_links(hid_t loc_id) {
function string (line 146) | string hdf5_get_name_by_idx(hid_t loc_id, int idx) {
FILE: src/caffe/util/im2col.cpp
type caffe (line 8) | namespace caffe {
function im2col_cpu (line 11) | void im2col_cpu(const Dtype* data_im, const int channels,
function col2im_cpu (line 48) | void col2im_cpu(const Dtype* data_col, const int channels,
FILE: src/caffe/util/insert_splits.cpp
type caffe (line 10) | namespace caffe {
function InsertSplits (line 12) | void InsertSplits(const NetParameter& param, NetParameter* param_split) {
function ConfigureSplitLayer (line 108) | void ConfigureSplitLayer(const string& layer_name, const string& blob_...
function string (line 128) | string SplitLayerName(const string& layer_name, const string& blob_name,
function string (line 136) | string SplitBlobName(const string& layer_name, const string& blob_name,
FILE: src/caffe/util/io.cpp
type caffe (line 22) | namespace caffe {
function ReadProtoFromTextFile (line 32) | bool ReadProtoFromTextFile(const char* filename, Message* proto) {
function WriteProtoToTextFile (line 42) | void WriteProtoToTextFile(const Message& proto, const char* filename) {
function ReadProtoFromBinaryFile (line 50) | bool ReadProtoFromBinaryFile(const char* filename, Message* proto) {
function WriteProtoToBinaryFile (line 65) | void WriteProtoToBinaryFile(const Message& proto, const char* filename) {
function ReadImageToCVMat (line 70) | cv::Mat ReadImageToCVMat(const string& filename,
function ReadImageToCVMat (line 88) | cv::Mat ReadImageToCVMat(const string& filename,
function ReadImageToCVMat (line 93) | cv::Mat ReadImageToCVMat(const string& filename,
function ReadImageToCVMat (line 98) | cv::Mat ReadImageToCVMat(const string& filename) {
function matchExt (line 102) | static bool matchExt(const std::string & fn,
function ReadImageToDatum (line 114) | bool ReadImageToDatum(const string& filename, const int label,
function ReadFileToDatum (line 139) | bool ReadFileToDatum(const string& filename, const int label,
function DecodeDatumToCVMatNative (line 159) | cv::Mat DecodeDatumToCVMatNative(const Datum& datum) {
function DecodeDatumToCVMat (line 170) | cv::Mat DecodeDatumToCVMat(const Datum& datum, bool is_color) {
function DecodeDatumNative (line 186) | bool DecodeDatumNative(Datum* datum) {
function DecodeDatum (line 195) | bool DecodeDatum(Datum* datum, bool is_color) {
function CVMatToDatum (line 205) | void CVMatToDatum(const cv::Mat& cv_img, Datum* datum) {
FILE: src/caffe/util/math_functions.cpp
type caffe (line 10) | namespace caffe {
function caffe_set (line 57) | void caffe_set(const int N, const Dtype alpha, Dtype* Y) {
function caffe_add_scalar (line 72) | void caffe_add_scalar(const int N, const float alpha, float* Y) {
function caffe_add_scalar (line 79) | void caffe_add_scalar(const int N, const double alpha, double* Y) {
function caffe_copy (line 86) | void caffe_copy(const int N, const Dtype* X, Dtype* Y) {
function caffe_rng_rand (line 229) | unsigned int caffe_rng_rand() {
function Dtype (line 234) | Dtype caffe_nextafter(const Dtype b) {
function caffe_rng_uniform (line 246) | void caffe_rng_uniform(const int n, const Dtype a, const Dtype b, Dtyp...
function caffe_rng_gaussian (line 267) | void caffe_rng_gaussian(const int n, const Dtype a,
function caffe_rng_bernoulli (line 289) | void caffe_rng_bernoulli(const int n, const Dtype p, int* r) {
function caffe_rng_bernoulli (line 309) | void caffe_rng_bernoulli(const int n, const Dtype p, unsigned int* r) {
function Dtype (line 341) | Dtype caffe_cpu_dot(const int n, const Dtype* x, const Dtype* y) {
FILE: src/caffe/util/upgrade_proto.cpp
type caffe (line 13) | namespace caffe {
function NetNeedsUpgrade (line 15) | bool NetNeedsUpgrade(const NetParameter& net_param) {
function NetNeedsV0ToV1Upgrade (line 19) | bool NetNeedsV0ToV1Upgrade(const NetParameter& net_param) {
function NetNeedsV1ToV2Upgrade (line 28) | bool NetNeedsV1ToV2Upgrade(const NetParameter& net_param) {
function UpgradeV0Net (line 32) | bool UpgradeV0Net(const NetParameter& v0_net_param_padding_layers,
function UpgradeV0PaddingLayers (line 59) | void UpgradeV0PaddingLayers(const NetParameter& param,
function UpgradeV0LayerParameter (line 118) | bool UpgradeV0LayerParameter(const V1LayerParameter& v0_layer_connection,
function V1LayerParameter_LayerType (line 470) | V1LayerParameter_LayerType UpgradeV0LayerType(const string& type) {
function NetNeedsDataUpgrade (line 525) | bool NetNeedsDataUpgrade(const NetParameter& net_param) {
function UpgradeNetDataTransformation (line 578) | void UpgradeNetDataTransformation(NetParameter* net_param) {
function UpgradeNetAsNeeded (line 586) | bool UpgradeNetAsNeeded(const string& param_file, NetParameter* param) {
function UpgradeV1Net (line 633) | bool UpgradeV1Net(const NetParameter& v1_net_param, NetParameter* net_...
function UpgradeV1LayerParameter (line 653) | bool UpgradeV1LayerParameter(const V1LayerParameter& v1_layer_param,
function ReadNetParamsFromTextFileOrDie (line 926) | void ReadNetParamsFromTextFileOrDie(const string& param_file,
function ReadNetParamsFromBinaryFileOrDie (line 933) | void ReadNetParamsFromBinaryFileOrDie(const string& param_file,
FILE: src/gtest/gtest-all.cpp
type testing (line 113) | namespace testing {
function ScopedFakeTestPartResultReporter (line 124) | class GTEST_API_ ScopedFakeTestPartResultReporter
function AssertionResult (line 2510) | AssertionResult CmpHelperSTRCASENE(const char* s1_expression,
type internal (line 6293) | namespace internal {
function String (line 6340) | static String ExitSummary(int exit_code) {
function ExitedUnsuccessfully (line 6366) | bool ExitedUnsuccessfully(int exit_status) {
function String (line 6375) | static String DeathTestThreadWarning(size_t thread_count) {
type DeathTestOutcome (line 6402) | enum DeathTestOutcome { IN_PROGRESS, DIED, LIVED, RETURNED, THREW }
function DeathTestAbort (line 6409) | void DeathTestAbort(const String& message) {
function String (line 6460) | String GetLastErrnoDescription() {
function FailFromInternalError (line 6468) | static void FailFromInternalError(int fd) {
class DeathTestImpl (line 6518) | class DeathTestImpl : public DeathTest {
method DeathTestImpl (line 6520) | DeathTestImpl(const char* a_statement, const RE* a_regex)
method RE (line 6536) | const RE* regex() const { return regex_; }
method spawned (line 6537) | bool spawned() const { return spawned_; }
method set_spawned (line 6538) | void set_spawned(bool is_spawned) { spawned_ = is_spawned; }
method status (line 6539) | int status() const { return status_; }
method set_status (line 6540) | void set_status(int a_status) { status_ = a_status; }
method DeathTestOutcome (line 6541) | DeathTestOutcome outcome() const { return outcome_; }
method set_outcome (line 6542) | void set_outcome(DeathTestOutcome an_outcome) { outcome_ = an_outc...
method read_fd (line 6543) | int read_fd() const { return read_fd_; }
method set_read_fd (line 6544) | void set_read_fd(int fd) { read_fd_ = fd; }
method write_fd (line 6545) | int write_fd() const { return write_fd_; }
method set_write_fd (line 6546) | void set_write_fd(int fd) { write_fd_ = fd; }
function FormatDeathTestOutput (line 6649) | static ::std::string FormatDeathTestOutput(const ::std::string& outp...
class WindowsDeathTest (line 6764) | class WindowsDeathTest : public DeathTestImpl {
method WindowsDeathTest (line 6766) | WindowsDeathTest(const char* a_statement,
class ForkingDeathTest (line 6936) | class ForkingDeathTest : public DeathTestImpl {
method set_child_pid (line 6944) | void set_child_pid(pid_t child_pid) { child_pid_ = child_pid; }
class NoExecDeathTest (line 6973) | class NoExecDeathTest : public ForkingDeathTest {
method NoExecDeathTest (line 6975) | NoExecDeathTest(const char* a_statement, const RE* a_regex) :
class ExecDeathTest (line 7027) | class ExecDeathTest : public ForkingDeathTest {
method ExecDeathTest (line 7029) | ExecDeathTest(const char* a_statement, const RE* a_regex,
class Arguments (line 7041) | class Arguments {
method Arguments (line 7043) | Arguments() {
method AddArgument (line 7053) | void AddArgument(const char* argument) {
method AddArguments (line 7058) | void AddArguments(const ::std::vector<Str>& arguments) {
type ExecDeathTestArgs (line 7074) | struct ExecDeathTestArgs {
function ExecDeathTestChildMain (line 7096) | static int ExecDeathTestChildMain(void* child_arg) {
function StackLowerThanAddress (line 7136) | bool StackLowerThanAddress(const void* ptr) {
function StackGrowsDown (line 7141) | bool StackGrowsDown() {
function pid_t (line 7149) | static pid_t ExecDeathTestFork(char* const* argv, int close_fd) {
function IsPathSeparator (line 7518) | static bool IsPathSeparator(char c) {
function FilePath (line 7527) | FilePath FilePath::GetCurrentDir() {
function FilePath (line 7545) | FilePath FilePath::RemoveExtension(const char* extension) const {
function FilePath (line 7575) | FilePath FilePath::RemoveDirectoryName() const {
function FilePath (line 7586) | FilePath FilePath::RemoveFileName() const {
function FilePath (line 7603) | FilePath FilePath::MakeFileName(const FilePath& directory,
function FilePath (line 7618) | FilePath FilePath::ConcatPaths(const FilePath& directory,
function FilePath (line 7706) | FilePath FilePath::GenerateUniqueFileName(const FilePath& directory,
function FilePath (line 7766) | FilePath FilePath::RemoveTrailingPathSeparator() const {
function GetThreadCount (line 7885) | size_t GetThreadCount() {
function GetThreadCount (line 7904) | size_t GetThreadCount() {
function IsInSet (line 7979) | bool IsInSet(char ch, const char* str) {
function IsAsciiDigit (line 7986) | bool IsAsciiDigit(char ch) { return '0' <= ch && ch <= '9'; }
function IsAsciiPunct (line 7987) | bool IsAsciiPunct(char ch) {
function IsRepeat (line 7990) | bool IsRepeat(char ch) { return IsInSet(ch, "?*+"); }
function IsAsciiWhiteSpace (line 7991) | bool IsAsciiWhiteSpace(char ch) { return IsInSet(ch, " \f\n\r\t\v"); }
function IsAsciiWordChar (line 7992) | bool IsAsciiWordChar(char ch) {
function IsValidEscape (line 7998) | bool IsValidEscape(char c) {
function AtomMatchesChar (line 8004) | bool AtomMatchesChar(bool escaped, char pattern_char, char ch) {
function String (line 8026) | String FormatRegexSyntaxError(const char* regex, int index) {
function ValidateRegex (line 8033) | bool ValidateRegex(const char* regex) {
function MatchRepetitionAndRegexAtHead (line 8096) | bool MatchRepetitionAndRegexAtHead(
function MatchRegexAtHead (line 8123) | bool MatchRegexAtHead(const char* regex, const char* str) {
function MatchRegexAnywhere (line 8159) | bool MatchRegexAnywhere(const char* regex, const char* str) {
function FormatFileLocation (line 8232) | GTEST_API_ ::std::string FormatFileLocation(const char* file, int li...
function FormatCompilerIndependentFileLocation (line 8250) | GTEST_API_ ::std::string FormatCompilerIndependentFileLocation(
class CapturedStream (line 8289) | class CapturedStream {
method CapturedStream (line 8292) | CapturedStream(int fd) : fd_(fd), uncaptured_fd_(dup(fd)) {
method String (line 8326) | String GetCapturedString() {
function String (line 8363) | String CapturedStream::ReadEntireFile(FILE* file) {
function CaptureStream (line 8393) | void CaptureStream(int fd, const char* stream_name, CapturedStream**...
function String (line 8402) | String GetCapturedStream(CapturedStream** captured_stream) {
function CaptureStdout (line 8412) | void CaptureStdout() {
function CaptureStderr (line 8417) | void CaptureStderr() {
function String (line 8422) | String GetCapturedStdout() { return GetCapturedStream(&g_captured_st...
function String (line 8425) | String GetCapturedStderr() { return GetCapturedStream(&g_captured_st...
type posix (line 8440) | namespace posix {
function Abort (line 8441) | void Abort() {
function String (line 8451) | static String FlagToEnvVar(const char* flag) {
function ParseInt32 (line 8466) | bool ParseInt32(const Message& src_text, const char* str, Int32* val...
function BoolFromGTestEnv (line 8508) | bool BoolFromGTestEnv(const char* flag, bool default_value) {
function Int32 (line 8518) | Int32 Int32FromGTestEnv(const char* flag, Int32 default_value) {
type CharFormat (line 8677) | enum CharFormat {
function IsPrintableAscii (line 8686) | inline bool IsPrintableAscii(wchar_t c) {
function CharFormat (line 8695) | static CharFormat PrintAsCharLiteralTo(Char c, ostream* os) {
function CharFormat (line 8741) | static CharFormat PrintAsWideStringLiteralTo(wchar_t c, ostream* os) {
function CharFormat (line 8756) | static CharFormat PrintAsNarrowStringLiteralTo(char c, ostream* os) {
function PrintCharAndCodeTo (line 8765) | void PrintCharAndCodeTo(Char c, ostream* os) {
function PrintTo (line 8790) | void PrintTo(unsigned char c, ::std::ostream* os) {
function PrintTo (line 8793) | void PrintTo(signed char c, ::std::ostream* os) {
function PrintTo (line 8799) | void PrintTo(wchar_t wc, ostream* os) {
function PrintCharsAsStringTo (line 8806) | static void PrintCharsAsStringTo(const char* begin, size_t len, ostr...
function UniversalPrintArray (line 8823) | void UniversalPrintArray(const char* begin, size_t len, ostream* os) {
function PrintWideCharsAsStringTo (line 8830) | static void PrintWideCharsAsStringTo(const wchar_t* begin, size_t len,
function PrintTo (line 8848) | void PrintTo(const char* s, ostream* os) {
function PrintTo (line 8865) | void PrintTo(const wchar_t* s, ostream* os) {
function PrintStringTo (line 8877) | void PrintStringTo(const ::string& s, ostream* os) {
function PrintStringTo (line 8882) | void PrintStringTo(const ::std::string& s, ostream* os) {
function PrintWideStringTo (line 8888) | void PrintWideStringTo(const ::wstring& s, ostream* os) {
function PrintWideStringTo (line 8894) | void PrintWideStringTo(const ::std::wstring& s, ostream* os) {
type internal (line 6334) | namespace internal {
function String (line 6340) | static String ExitSummary(int exit_code) {
function ExitedUnsuccessfully (line 6366) | bool ExitedUnsuccessfully(int exit_status) {
function String (line 6375) | static String DeathTestThreadWarning(size_t thread_count) {
type DeathTestOutcome (line 6402) | enum DeathTestOutcome { IN_PROGRESS, DIED, LIVED, RETURNED, THREW }
function DeathTestAbort (line 6409) | void DeathTestAbort(const String& message) {
function String (line 6460) | String GetLastErrnoDescription() {
function FailFromInternalError (line 6468) | static void FailFromInternalError(int fd) {
class DeathTestImpl (line 6518) | class DeathTestImpl : public DeathTest {
method DeathTestImpl (line 6520) | DeathTestImpl(const char* a_statement, const RE* a_regex)
method RE (line 6536) | const RE* regex() const { return regex_; }
method spawned (line 6537) | bool spawned() const { return spawned_; }
method set_spawned (line 6538) | void set_spawned(bool is_spawned) { spawned_ = is_spawned; }
method status (line 6539) | int status() const { return status_; }
method set_status (line 6540) | void set_status(int a_status) { status_ = a_status; }
method DeathTestOutcome (line 6541) | DeathTestOutcome outcome() const { return outcome_; }
method set_outcome (line 6542) | void set_outcome(DeathTestOutcome an_outcome) { outcome_ = an_outc...
method read_fd (line 6543) | int read_fd() const { return read_fd_; }
method set_read_fd (line 6544) | void set_read_fd(int fd) { read_fd_ = fd; }
method write_fd (line 6545) | int write_fd() const { return write_fd_; }
method set_write_fd (line 6546) | void set_write_fd(int fd) { write_fd_ = fd; }
function FormatDeathTestOutput (line 6649) | static ::std::string FormatDeathTestOutput(const ::std::string& outp...
class WindowsDeathTest (line 6764) | class WindowsDeathTest : public DeathTestImpl {
method WindowsDeathTest (line 6766) | WindowsDeathTest(const char* a_statement,
class ForkingDeathTest (line 6936) | class ForkingDeathTest : public DeathTestImpl {
method set_child_pid (line 6944) | void set_child_pid(pid_t child_pid) { child_pid_ = child_pid; }
class NoExecDeathTest (line 6973) | class NoExecDeathTest : public ForkingDeathTest {
method NoExecDeathTest (line 6975) | NoExecDeathTest(const char* a_statement, const RE* a_regex) :
class ExecDeathTest (line 7027) | class ExecDeathTest : public ForkingDeathTest {
method ExecDeathTest (line 7029) | ExecDeathTest(const char* a_statement, const RE* a_regex,
class Arguments (line 7041) | class Arguments {
method Arguments (line 7043) | Arguments() {
method AddArgument (line 7053) | void AddArgument(const char* argument) {
method AddArguments (line 7058) | void AddArguments(const ::std::vector<Str>& arguments) {
type ExecDeathTestArgs (line 7074) | struct ExecDeathTestArgs {
function ExecDeathTestChildMain (line 7096) | static int ExecDeathTestChildMain(void* child_arg) {
function StackLowerThanAddress (line 7136) | bool StackLowerThanAddress(const void* ptr) {
function StackGrowsDown (line 7141) | bool StackGrowsDown() {
function pid_t (line 7149) | static pid_t ExecDeathTestFork(char* const* argv, int close_fd) {
function IsPathSeparator (line 7518) | static bool IsPathSeparator(char c) {
function FilePath (line 7527) | FilePath FilePath::GetCurrentDir() {
function FilePath (line 7545) | FilePath FilePath::RemoveExtension(const char* extension) const {
function FilePath (line 7575) | FilePath FilePath::RemoveDirectoryName() const {
function FilePath (line 7586) | FilePath FilePath::RemoveFileName() const {
function FilePath (line 7603) | FilePath FilePath::MakeFileName(const FilePath& directory,
function FilePath (line 7618) | FilePath FilePath::ConcatPaths(const FilePath& directory,
function FilePath (line 7706) | FilePath FilePath::GenerateUniqueFileName(const FilePath& directory,
function FilePath (line 7766) | FilePath FilePath::RemoveTrailingPathSeparator() const {
function GetThreadCount (line 7885) | size_t GetThreadCount() {
function GetThreadCount (line 7904) | size_t GetThreadCount() {
function IsInSet (line 7979) | bool IsInSet(char ch, const char* str) {
function IsAsciiDigit (line 7986) | bool IsAsciiDigit(char ch) { return '0' <= ch && ch <= '9'; }
function IsAsciiPunct (line 7987) | bool IsAsciiPunct(char ch) {
function IsRepeat (line 7990) | bool IsRepeat(char ch) { return IsInSet(ch, "?*+"); }
function IsAsciiWhiteSpace (line 7991) | bool IsAsciiWhiteSpace(char ch) { return IsInSet(ch, " \f\n\r\t\v"); }
function IsAsciiWordChar (line 7992) | bool IsAsciiWordChar(char ch) {
function IsValidEscape (line 7998) | bool IsValidEscape(char c) {
function AtomMatchesChar (line 8004) | bool AtomMatchesChar(bool escaped, char pattern_char, char ch) {
function String (line 8026) | String FormatRegexSyntaxError(const char* regex, int index) {
function ValidateRegex (line 8033) | bool ValidateRegex(const char* regex) {
function MatchRepetitionAndRegexAtHead (line 8096) | bool MatchRepetitionAndRegexAtHead(
function MatchRegexAtHead (line 8123) | bool MatchRegexAtHead(const char* regex, const char* str) {
function MatchRegexAnywhere (line 8159) | bool MatchRegexAnywhere(const char* regex, const char* str) {
function FormatFileLocation (line 8232) | GTEST_API_ ::std::string FormatFileLocation(const char* file, int li...
function FormatCompilerIndependentFileLocation (line 8250) | GTEST_API_ ::std::string FormatCompilerIndependentFileLocation(
class CapturedStream (line 8289) | class CapturedStream {
method CapturedStream (line 8292) | CapturedStream(int fd) : fd_(fd), uncaptured_fd_(dup(fd)) {
method String (line 8326) | String GetCapturedString() {
function String (line 8363) | String CapturedStream::ReadEntireFile(FILE* file) {
function CaptureStream (line 8393) | void CaptureStream(int fd, const char* stream_name, CapturedStream**...
function String (line 8402) | String GetCapturedStream(CapturedStream** captured_stream) {
function CaptureStdout (line 8412) | void CaptureStdout() {
function CaptureStderr (line 8417) | void CaptureStderr() {
function String (line 8422) | String GetCapturedStdout() { return GetCapturedStream(&g_captured_st...
function String (line 8425) | String GetCapturedStderr() { return GetCapturedStream(&g_captured_st...
type posix (line 8440) | namespace posix {
function Abort (line 8441) | void Abort() {
function String (line 8451) | static String FlagToEnvVar(const char* flag) {
function ParseInt32 (line 8466) | bool ParseInt32(const Message& src_text, const char* str, Int32* val...
function BoolFromGTestEnv (line 8508) | bool BoolFromGTestEnv(const char* flag, bool default_value) {
function Int32 (line 8518) | Int32 Int32FromGTestEnv(const char* flag, Int32 default_value) {
type CharFormat (line 8677) | enum CharFormat {
function IsPrintableAscii (line 8686) | inline bool IsPrintableAscii(wchar_t c) {
function CharFormat (line 8695) | static CharFormat PrintAsCharLiteralTo(Char c, ostream* os) {
function CharFormat (line 8741) | static CharFormat PrintAsWideStringLiteralTo(wchar_t c, ostream* os) {
function CharFormat (line 8756) | static CharFormat PrintAsNarrowStringLiteralTo(char c, ostream* os) {
function PrintCharAndCodeTo (line 8765) | void PrintCharAndCodeTo(Char c, ostream* os) {
function PrintTo (line 8790) | void PrintTo(unsigned char c, ::std::ostream* os) {
function PrintTo (line 8793) | void PrintTo(signed char c, ::std::ostream* os) {
function PrintTo (line 8799) | void PrintTo(wchar_t wc, ostream* os) {
function PrintCharsAsStringTo (line 8806) | static void PrintCharsAsStringTo(const char* begin, size_t len, ostr...
function UniversalPrintArray (line 8823) | void UniversalPrintArray(const char* begin, size_t len, ostream* os) {
function PrintWideCharsAsStringTo (line 8830) | static void PrintWideCharsAsStringTo(const wchar_t* begin, size_t len,
function PrintTo (line 8848) | void PrintTo(const char* s, ostream* os) {
function PrintTo (line 8865) | void PrintTo(const wchar_t* s, ostream* os) {
function PrintStringTo (line 8877) | void PrintStringTo(const ::string& s, ostream* os) {
function PrintStringTo (line 8882) | void PrintStringTo(const ::std::string& s, ostream* os) {
function PrintWideStringTo (line 8888) | void PrintWideStringTo(const ::wstring& s, ostream* os) {
function PrintWideStringTo (line 8894) | void PrintWideStringTo(const ::std::wstring& s, ostream* os) {
function SplitString (line 7294) | static void SplitString(const ::std::string& str, char delimiter,
function GetStatusFileDescriptor (line 7315) | int GetStatusFileDescriptor(unsigned int parent_process_id,
function InternalRunDeathTestFlag (line 7380) | InternalRunDeathTestFlag* ParseInternalRunDeathTestFlag() {
type internal (line 7490) | namespace internal {
function String (line 6340) | static String ExitSummary(int exit_code) {
function ExitedUnsuccessfully (line 6366) | bool ExitedUnsuccessfully(int exit_status) {
function String (line 6375) | static String DeathTestThreadWarning(size_t thread_count) {
type DeathTestOutcome (line 6402) | enum DeathTestOutcome { IN_PROGRESS, DIED, LIVED, RETURNED, THREW }
function DeathTestAbort (line 6409) | void DeathTestAbort(const String& message) {
function String (line 6460) | String GetLastErrnoDescription() {
function FailFromInternalError (line 6468) | static void FailFromInternalError(int fd) {
class DeathTestImpl (line 6518) | class DeathTestImpl : public DeathTest {
method DeathTestImpl (line 6520) | DeathTestImpl(const char* a_statement, const RE* a_regex)
method RE (line 6536) | const RE* regex() const { return regex_; }
method spawned (line 6537) | bool spawned() const { return spawned_; }
method set_spawned (line 6538) | void set_spawned(bool is_spawned) { spawned_ = is_spawned; }
method status (line 6539) | int status() const { return status_; }
method set_status (line 6540) | void set_status(int a_status) { status_ = a_status; }
method DeathTestOutcome (line 6541) | DeathTestOutcome outcome() const { return outcome_; }
method set_outcome (line 6542) | void set_outcome(DeathTestOutcome an_outcome) { outcome_ = an_outc...
method read_fd (line 6543) | int read_fd() const { return read_fd_; }
method set_read_fd (line 6544) | void set_read_fd(int fd) { read_fd_ = fd; }
method write_fd (line 6545) | int write_fd() const { return write_fd_; }
method set_write_fd (line 6546) | void set_write_fd(int fd) { write_fd_ = fd; }
function FormatDeathTestOutput (line 6649) | static ::std::string FormatDeathTestOutput(const ::std::string& outp...
class WindowsDeathTest (line 6764) | class WindowsDeathTest : public DeathTestImpl {
method WindowsDeathTest (line 6766) | WindowsDeathTest(const char* a_statement,
class ForkingDeathTest (line 6936) | class ForkingDeathTest : public DeathTestImpl {
method set_child_pid (line 6944) | void set_child_pid(pid_t child_pid) { child_pid_ = child_pid; }
class NoExecDeathTest (line 6973) | class NoExecDeathTest : public ForkingDeathTest {
method NoExecDeathTest (line 6975) | NoExecDeathTest(const char* a_statement, const RE* a_regex) :
class ExecDeathTest (line 7027) | class ExecDeathTest : public ForkingDeathTest {
method ExecDeathTest (line 7029) | ExecDeathTest(const char* a_statement, const RE* a_regex,
class Arguments (line 7041) | class Arguments {
method Arguments (line 7043) | Arguments() {
method AddArgument (line 7053) | void AddArgument(const char* argument) {
method AddArguments (line 7058) | void AddArguments(const ::std::vector<Str>& arguments) {
type ExecDeathTestArgs (line 7074) | struct ExecDeathTestArgs {
function ExecDeathTestChildMain (line 7096) | static int ExecDeathTestChildMain(void* child_arg) {
function StackLowerThanAddress (line 7136) | bool StackLowerThanAddress(const void* ptr) {
function StackGrowsDown (line 7141) | bool StackGrowsDown() {
function pid_t (line 7149) | static pid_t ExecDeathTestFork(char* const* argv, int close_fd) {
function IsPathSeparator (line 7518) | static bool IsPathSeparator(char c) {
function FilePath (line 7527) | FilePath FilePath::GetCurrentDir() {
function FilePath (line 7545) | FilePath FilePath::RemoveExtension(const char* extension) const {
function FilePath (line 7575) | FilePath FilePath::RemoveDirectoryName() const {
function FilePath (line 7586) | FilePath FilePath::RemoveFileName() const {
function FilePath (line 7603) | FilePath FilePath::MakeFileName(const FilePath& directory,
function FilePath (line 7618) | FilePath FilePath::ConcatPaths(const FilePath& directory,
function FilePath (line 7706) | FilePath FilePath::GenerateUniqueFileName(const FilePath& directory,
function FilePath (line 7766) | FilePath FilePath::RemoveTrailingPathSeparator() const {
function GetThreadCount (line 7885) | size_t GetThreadCount() {
function GetThreadCount (line 7904) | size_t GetThreadCount() {
function IsInSet (line 7979) | bool IsInSet(char ch, const char* str) {
function IsAsciiDigit (line 7986) | bool IsAsciiDigit(char ch) { return '0' <= ch && ch <= '9'; }
function IsAsciiPunct (line 7987) | bool IsAsciiPunct(char ch) {
function IsRepeat (line 7990) | bool IsRepeat(char ch) { return IsInSet(ch, "?*+"); }
function IsAsciiWhiteSpace (line 7991) | bool IsAsciiWhiteSpace(char ch) { return IsInSet(ch, " \f\n\r\t\v"); }
function IsAsciiWordChar (line 7992) | bool IsAsciiWordChar(char ch) {
function IsValidEscape (line 7998) | bool IsValidEscape(char c) {
function AtomMatchesChar (line 8004) | bool AtomMatchesChar(bool escaped, char pattern_char, char ch) {
function String (line 8026) | String FormatRegexSyntaxError(const char* regex, int index) {
function ValidateRegex (line 8033) | bool ValidateRegex(const char* regex) {
function MatchRepetitionAndRegexAtHead (line 8096) | bool MatchRepetitionAndRegexAtHead(
function MatchRegexAtHead (line 8123) | bool MatchRegexAtHead(const char* regex, const char* str) {
function MatchRegexAnywhere (line 8159) | bool MatchRegexAnywhere(const char* regex, const char* str) {
function FormatFileLocation (line 8232) | GTEST_API_ ::std::string FormatFileLocation(const char* file, int li...
function FormatCompilerIndependentFileLocation (line 8250) | GTEST_API_ ::std::string FormatCompilerIndependentFileLocation(
class CapturedStream (line 8289) | class CapturedStream {
method CapturedStream (line 8292) | CapturedStream(int fd) : fd_(fd), uncaptured_fd_(dup(fd)) {
method String (line 8326) | String GetCapturedString() {
function String (line 8363) | String CapturedStream::ReadEntireFile(FILE* file) {
function CaptureStream (line 8393) | void CaptureStream(int fd, const char* stream_name, CapturedStream**...
function String (line 8402) | String GetCapturedStream(CapturedStream** captured_stream) {
function CaptureStdout (line 8412) | void CaptureStdout() {
function CaptureStderr (line 8417) | void CaptureStderr() {
function String (line 8422) | String GetCapturedStdout() { return GetCapturedStream(&g_captured_st...
function String (line 8425) | String GetCapturedStderr() { return GetCapturedStream(&g_captured_st...
type posix (line 8440) | namespace posix {
function Abort (line 8441) | void Abort() {
function String (line 8451) | static String FlagToEnvVar(const char* flag) {
function ParseInt32 (line 8466) | bool ParseInt32(const Message& src_text, const char* str, Int32* val...
function BoolFromGTestEnv (line 8508) | bool BoolFromGTestEnv(const char* flag, bool default_value) {
function Int32 (line 8518) | Int32 Int32FromGTestEnv(const char* flag, Int32 default_value) {
type CharFormat (line 8677) | enum CharFormat {
function IsPrintableAscii (line 8686) | inline bool IsPrintableAscii(wchar_t c) {
function CharFormat (line 8695) | static CharFormat PrintAsCharLiteralTo(Char c, ostream* os) {
function CharFormat (line 8741) | static CharFormat PrintAsWideStringLiteralTo(wchar_t c, ostream* os) {
function CharFormat (line 8756) | static CharFormat PrintAsNarrowStringLiteralTo(char c, ostream* os) {
function PrintCharAndCodeTo (line 8765) | void PrintCharAndCodeTo(Char c, ostream* os) {
function PrintTo (line 8790) | void PrintTo(unsigned char c, ::std::ostream* os) {
function PrintTo (line 8793) | void PrintTo(signed char c, ::std::ostream* os) {
function PrintTo (line 8799) | void PrintTo(wchar_t wc, ostream* os) {
function PrintCharsAsStringTo (line 8806) | static void PrintCharsAsStringTo(const char* begin, size_t len, ostr...
function UniversalPrintArray (line 8823) | void UniversalPrintArray(const char* begin, size_t len, ostream* os) {
function PrintWideCharsAsStringTo (line 8830) | static void PrintWideCharsAsStringTo(const wchar_t* begin, size_t len,
function PrintTo (line 8848) | void PrintTo(const char* s, ostream* os) {
function PrintTo (line 8865) | void PrintTo(const wchar_t* s, ostream* os) {
function PrintStringTo (line 8877) | void PrintStringTo(const ::string& s, ostream* os) {
function PrintStringTo (line 8882) | void PrintStringTo(const ::std::string& s, ostream* os) {
function PrintWideStringTo (line 8888) | void PrintWideStringTo(const ::wstring& s, ostream* os) {
function PrintWideStringTo (line 8894) | void PrintWideStringTo(const ::std::wstring& s, ostream* os) {
type internal (line 7870) | namespace internal {
function String (line 6340) | static String ExitSummary(int exit_code) {
function ExitedUnsuccessfully (line 6366) | bool ExitedUnsuccessfully(int exit_status) {
function String (line 6375) | static String DeathTestThreadWarning(size_t thread_count) {
type DeathTestOutcome (line 6402) | enum DeathTestOutcome { IN_PROGRESS, DIED, LIVED, RETURNED, THREW }
function DeathTestAbort (line 6409) | void DeathTestAbort(const String& message) {
function String (line 6460) | String GetLastErrnoDescription() {
function FailFromInternalError (line 6468) | static void FailFromInternalError(int fd) {
class DeathTestImpl (line 6518) | class DeathTestImpl : public DeathTest {
method DeathTestImpl (line 6520) | DeathTestImpl(const char* a_statement, const RE* a_regex)
method RE (line 6536) | const RE* regex() const { return regex_; }
method spawned (line 6537) | bool spawned() const { return spawned_; }
method set_spawned (line 6538) | void set_spawned(bool is_spawned) { spawned_ = is_spawned; }
method status (line 6539) | int status() const { return status_; }
method set_status (line 6540) | void set_status(int a_status) { status_ = a_status; }
method DeathTestOutcome (line 6541) | DeathTestOutcome outcome() const { return outcome_; }
method set_outcome (line 6542) | void set_outcome(DeathTestOutcome an_outcome) { outcome_ = an_outc...
method read_fd (line 6543) | int read_fd() const { return read_fd_; }
method set_read_fd (line 6544) | void set_read_fd(int fd) { read_fd_ = fd; }
method write_fd (line 6545) | int write_fd() const { return write_fd_; }
method set_write_fd (line 6546) | void set_write_fd(int fd) { write_fd_ = fd; }
function FormatDeathTestOutput (line 6649) | static ::std::string FormatDeathTestOutput(const ::std::string& outp...
class WindowsDeathTest (line 6764) | class WindowsDeathTest : public DeathTestImpl {
method WindowsDeathTest (line 6766) | WindowsDeathTest(const char* a_statement,
class ForkingDeathTest (line 6936) | class ForkingDeathTest : public DeathTestImpl {
method set_child_pid (line 6944) | void set_child_pid(pid_t child_pid) { child_pid_ = child_pid; }
class NoExecDeathTest (line 6973) | class NoExecDeathTest : public ForkingDeathTest {
method NoExecDeathTest (line 6975) | NoExecDeathTest(const char* a_statement, const RE* a_regex) :
class ExecDeathTest (line 7027) | class ExecDeathTest : public ForkingDeathTest {
method ExecDeathTest (line 7029) | ExecDeathTest(const char* a_statement, const RE* a_regex,
class Arguments (line 7041) | class Arguments {
method Arguments (line 7043) | Arguments() {
method AddArgument (line 7053) | void AddArgument(const char* argument) {
method AddArguments (line 7058) | void AddArguments(const ::std::vector<Str>& arguments) {
type ExecDeathTestArgs (line 7074) | struct ExecDeathTestArgs {
function ExecDeathTestChildMain (line 7096) | static int ExecDeathTestChildMain(void* child_arg) {
function StackLowerThanAddress (line 7136) | bool StackLowerThanAddress(const void* ptr) {
function StackGrowsDown (line 7141) | bool StackGrowsDown() {
function pid_t (line 7149) | static pid_t ExecDeathTestFork(char* const* argv, int close_fd) {
function IsPathSeparator (line 7518) | static bool IsPathSeparator(char c) {
function FilePath (line 7527) | FilePath FilePath::GetCurrentDir() {
function FilePath (line 7545) | FilePath FilePath::RemoveExtension(const char* extension) const {
function FilePath (line 7575) | FilePath FilePath::RemoveDirectoryName() const {
function FilePath (line 7586) | FilePath FilePath::RemoveFileName() const {
function FilePath (line 7603) | FilePath FilePath::MakeFileName(const FilePath& directory,
function FilePath (line 7618) | FilePath FilePath::ConcatPaths(const FilePath& directory,
function FilePath (line 7706) | FilePath FilePath::GenerateUniqueFileName(const FilePath& directory,
function FilePath (line 7766) | FilePath FilePath::RemoveTrailingPathSeparator() const {
function GetThreadCount (line 7885) | size_t GetThreadCount() {
function GetThreadCount (line 7904) | size_t GetThreadCount() {
function IsInSet (line 7979) | bool IsInSet(char ch, const char* str) {
function IsAsciiDigit (line 7986) | bool IsAsciiDigit(char ch) { return '0' <= ch && ch <= '9'; }
function IsAsciiPunct (line 7987) | bool IsAsciiPunct(char ch) {
function IsRepeat (line 7990) | bool IsRepeat(char ch) { return IsInSet(ch, "?*+"); }
function IsAsciiWhiteSpace (line 7991) | bool IsAsciiWhiteSpace(char ch) { return IsInSet(ch, " \f\n\r\t\v"); }
function IsAsciiWordChar (line 7992) | bool IsAsciiWordChar(char ch) {
function IsValidEscape (line 7998) | bool IsValidEscape(char c) {
function AtomMatchesChar (line 8004) | bool AtomMatchesChar(bool escaped, char pattern_char, char ch) {
function String (line 8026) | String FormatRegexSyntaxError(const char* regex, int index) {
function ValidateRegex (line 8033) | bool ValidateRegex(const char* regex) {
function MatchRepetitionAndRegexAtHead (line 8096) | bool MatchRepetitionAndRegexAtHead(
function MatchRegexAtHead (line 8123) | bool MatchRegexAtHead(const char* regex, const char* str) {
function MatchRegexAnywhere (line 8159) | bool MatchRegexAnywhere(const char* regex, const char* str) {
function FormatFileLocation (line 8232) | GTEST_API_ ::std::string FormatFileLocation(const char* file, int li...
function FormatCompilerIndependentFileLocation (line 8250) | GTEST_API_ ::std::string FormatCompilerIndependentFileLocation(
class CapturedStream (line 8289) | class CapturedStream {
method CapturedStream (line 8292) | CapturedStream(int fd) : fd_(fd), uncaptured_fd_(dup(fd)) {
method String (line 8326) | String GetCapturedString() {
function String (line 8363) | String CapturedStream::ReadEntireFile(FILE* file) {
function CaptureStream (line 8393) | void CaptureStream(int fd, const char* stream_name, CapturedStream**...
function String (line 8402) | String GetCapturedStream(CapturedStream** captured_stream) {
function CaptureStdout (line 8412) | void CaptureStdout() {
function CaptureStderr (line 8417) | void CaptureStderr() {
function String (line 8422) | String GetCapturedStdout() { return GetCapturedStream(&g_captured_st...
function String (line 8425) | String GetCapturedStderr() { return GetCapturedStream(&g_captured_st...
type posix (line 8440) | namespace posix {
function Abort (line 8441) | void Abort() {
function String (line 8451) | static String FlagToEnvVar(const char* flag) {
function ParseInt32 (line 8466) | bool ParseInt32(const Message& src_text, const char* str, Int32* val...
function BoolFromGTestEnv (line 8508) | bool BoolFromGTestEnv(const char* flag, bool default_value) {
function Int32 (line 8518) | Int32 Int32FromGTestEnv(const char* flag, Int32 default_value) {
type CharFormat (line 8677) | enum CharFormat {
function IsPrintableAscii (line 8686) | inline bool IsPrintableAscii(wchar_t c) {
function CharFormat (line 8695) | static CharFormat PrintAsCharLiteralTo(Char c, ostream* os) {
function CharFormat (line 8741) | static CharFormat PrintAsWideStringLiteralTo(wchar_t c, ostream* os) {
function CharFormat (line 8756) | static CharFormat PrintAsNarrowStringLiteralTo(char c, ostream* os) {
function PrintCharAndCodeTo (line 8765) | void PrintCharAndCodeTo(Char c, ostream* os) {
function PrintTo (line 8790) | void PrintTo(unsigned char c, ::std::ostream* os) {
function PrintTo (line 8793) | void PrintTo(signed char c, ::std::ostream* os) {
function PrintTo (line 8799) | void PrintTo(wchar_t wc, ostream* os) {
function PrintCharsAsStringTo (line 8806) | static void PrintCharsAsStringTo(const char* begin, size_t len, ostr...
function UniversalPrintArray (line 8823) | void UniversalPrintArray(const char* begin, size_t len, ostream* os) {
function PrintWideCharsAsStringTo (line 8830) | static void PrintWideCharsAsStringTo(const wchar_t* begin, size_t len,
function PrintTo (line 8848) | void PrintTo(const char* s, ostream* os) {
function PrintTo (line 8865) | void PrintTo(const wchar_t* s, ostream* os) {
function PrintStringTo (line 8877) | void PrintStringTo(const ::string& s, ostream* os) {
function PrintStringTo (line 8882) | void PrintStringTo(const ::std::string& s, ostream* os) {
function PrintWideStringTo (line 8888) | void PrintWideStringTo(const ::wstring& s, ostream* os) {
function PrintWideStringTo (line 8894) | void PrintWideStringTo(const ::std::wstring& s, ostream* os) {
function PrintByteSegmentInObjectTo (line 8612) | void PrintByteSegmentInObjectTo(const unsigned char* obj_bytes, size_t...
function PrintBytesInObjectToImpl (line 8631) | void PrintBytesInObjectToImpl(const unsigned char* obj_bytes, size_t c...
type internal2 (line 8656) | namespace internal2 {
function PrintBytesInObjectTo (line 8663) | void PrintBytesInObjectTo(const unsigned char* obj_bytes, size_t count,
type internal (line 8670) | namespace internal {
function String (line 6340) | static String ExitSummary(int exit_code) {
function ExitedUnsuccessfully (line 6366) | bool ExitedUnsuccessfully(int exit_status) {
function String (line 6375) | static String DeathTestThreadWarning(size_t thread_count) {
type DeathTestOutcome (line 6402) | enum DeathTestOutcome { IN_PROGRESS, DIED, LIVED, RETURNED, THREW }
function DeathTestAbort (line 6409) | void DeathTestAbort(const String& message) {
function String (line 6460) | String GetLastErrnoDescription() {
function FailFromInternalError (line 6468) | static void FailFromInternalError(int fd) {
class DeathTestImpl (line 6518) | class DeathTestImpl : public DeathTest {
method DeathTestImpl (line 6520) | DeathTestImpl(const char* a_statement, const RE* a_regex)
method RE (line 6536) | const RE* regex() const { return regex_; }
method spawned (line 6537) | bool spawned() const { return spawned_; }
method set_spawned (line 6538) | void set_spawned(bool is_spawned) { spawned_ = is_spawned; }
method status (line 6539) | int status() const { return status_; }
method set_status (line 6540) | void set_status(int a_status) { status_ = a_status; }
method DeathTestOutcome (line 6541) | DeathTestOutcome outcome() const { return outcome_; }
method set_outcome (line 6542) | void set_outcome(DeathTestOutcome an_outcome) { outcome_ = an_outc...
method read_fd (line 6543) | int read_fd() const { return read_fd_; }
method set_read_fd (line 6544) | void set_read_fd(int fd) { read_fd_ = fd; }
method write_fd (line 6545) | int write_fd() const { return write_fd_; }
method set_write_fd (line 6546) | void set_write_fd(int fd) { write_fd_ = fd; }
function FormatDeathTestOutput (line 6649) | static ::std::string FormatDeathTestOutput(const ::std::string& outp...
class WindowsDeathTest (line 6764) | class WindowsDeathTest : public DeathTestImpl {
method WindowsDeathTest (line 6766) | WindowsDeathTest(const char* a_statement,
class ForkingDeathTest (line 6936) | class ForkingDeathTest : public DeathTestImpl {
method set_child_pid (line 6944) | void set_child_pid(pid_t child_pid) { child_pid_ = child_pid; }
class NoExecDeathTest (line 6973) | class NoExecDeathTest : public ForkingDeathTest {
method NoExecDeathTest (line 6975) | NoExecDeathTest(const char* a_statement, const RE* a_regex) :
class ExecDeathTest (line 7027) | class ExecDeathTest : public ForkingDeathTest {
method ExecDeathTest (line 7029) | ExecDeathTest(const char* a_statement, const RE* a_regex,
class Arguments (line 7041) | class Arguments {
method Arguments (line 7043) | Arguments() {
method AddArgument (line 7053) | void AddArgument(const char* argument) {
method AddArguments (line 7058) | void AddArguments(const ::std::vector<Str>& arguments) {
type ExecDeathTestArgs (line 7074) | struct ExecDeathTestArgs {
function ExecDeathTestChildMain (line 7096) | static int ExecDeathTestChildMain(void* child_arg) {
function StackLowerThanAddress (line 7136) | bool StackLowerThanAddress(const void* ptr) {
function StackGrowsDown (line 7141) | bool StackGrowsDown() {
function pid_t (line 7149) | static pid_t ExecDeathTestFork(char* const* argv, int close_fd) {
function IsPathSeparator (line 7518) | static bool IsPathSeparator(char c) {
function FilePath (line 7527) | FilePath FilePath::GetCurrentDir() {
function FilePath (line 7545) | FilePath FilePath::RemoveExtension(const char* extension) const {
function FilePath (line 7575) | FilePath FilePath::RemoveDirectoryName() const {
function FilePath (line 7586) | FilePath FilePath::RemoveFileName() const {
function FilePath (line 7603) | FilePath FilePath::MakeFileName(const FilePath& directory,
function FilePath (line 7618) | FilePath FilePath::ConcatPaths(const FilePath& directory,
function FilePath (line 7706) | FilePath FilePath::GenerateUniqueFileName(const FilePath& directory,
function FilePath (line 7766) | FilePath FilePath::RemoveTrailingPathSeparator() const {
function GetThreadCount (line 7885) | size_t GetThreadCount() {
function GetThreadCount (line 7904) | size_t GetThreadCount() {
function IsInSet (line 7979) | bool IsInSet(char ch, const char* str) {
function IsAsciiDigit (line 7986) | bool IsAsciiDigit(char ch) { return '0' <= ch && ch <= '9'; }
function IsAsciiPunct (line 7987) | bool IsAsciiPunct(char ch) {
function IsRepeat (line 7990) | bool IsRepeat(char ch) { return IsInSet(ch, "?*+"); }
function IsAsciiWhiteSpace (line 7991) | bool IsAsciiWhiteSpace(char ch) { return IsInSet(ch, " \f\n\r\t\v"); }
function IsAsciiWordChar (line 7992) | bool IsAsciiWordChar(char ch) {
function IsValidEscape (line 7998) | bool IsValidEscape(char c) {
function AtomMatchesChar (line 8004) | bool AtomMatchesChar(bool escaped, char pattern_char, char ch) {
function String (line 8026) | String FormatRegexSyntaxError(const char* regex, int index) {
function ValidateRegex (line 8033) | bool ValidateRegex(const char* regex) {
function MatchRepetitionAndRegexAtHead (line 8096) | bool MatchRepetitionAndRegexAtHead(
function MatchRegexAtHead (line 8123) | bool MatchRegexAtHead(const char* regex, const char* str) {
function MatchRegexAnywhere (line 8159) | bool MatchRegexAnywhere(const char* regex, const char* str) {
function FormatFileLocation (line 8232) | GTEST_API_ ::std::string FormatFileLocation(const char* file, int li...
function FormatCompilerIndependentFileLocation (line 8250) | GTEST_API_ ::std::string FormatCompilerIndependentFileLocation(
class CapturedStream (line 8289) | class CapturedStream {
method CapturedStream (line 8292) | CapturedStream(int fd) : fd_(fd), uncaptured_fd_(dup(fd)) {
method String (line 8326) | String GetCapturedString() {
function String (line 8363) | String CapturedStream::ReadEntireFile(FILE* file) {
function CaptureStream (line 8393) | void CaptureStream(int fd, const char* stream_name, CapturedStream**...
function String (line 8402) | String GetCapturedStream(CapturedStream** captured_stream) {
function CaptureStdout (line 8412) | void CaptureStdout() {
function CaptureStderr (line 8417) | void CaptureStderr() {
function String (line 8422) | String GetCapturedStdout() { return GetCapturedStream(&g_captured_st...
function String (line 8425) | String GetCapturedStderr() { return GetCapturedStream(&g_captured_st...
type posix (line 8440) | namespace posix {
function Abort (line 8441) | void Abort() {
function String (line 8451) | static String FlagToEnvVar(const char* flag) {
function ParseInt32 (line 8466) | bool ParseInt32(const Message& src_text, const char* str, Int32* val...
function BoolFromGTestEnv (line 8508) | bool BoolFromGTestEnv(const char* flag, bool default_value) {
function Int32 (line 8518) | Int32 Int32FromGTestEnv(const char* flag, Int32 default_value) {
type CharFormat (line 8677) | enum CharFormat {
function IsPrintableAscii (line 8686) | inline bool IsPrintableAscii(wchar_t c) {
function CharFormat (line 8695) | static CharFormat PrintAsCharLiteralTo(Char c, ostream* os) {
function CharFormat (line 8741) | static CharFormat PrintAsWideStringLiteralTo(wchar_t c, ostream* os) {
function CharFormat (line 8756) | static CharFormat PrintAsNarrowStringLiteralTo(char c, ostream* os) {
function PrintCharAndCodeTo (line 8765) | void PrintCharAndCodeTo(Char c, ostream* os) {
function PrintTo (line 8790) | void PrintTo(unsigned char c, ::std::ostream* os) {
function PrintTo (line 8793) | void PrintTo(signed char c, ::std::ostream* os) {
function PrintTo (line 8799) | void PrintTo(wchar_t wc, ostream* os) {
function PrintCharsAsStringTo (line 8806) | static void PrintCharsAsStringTo(const char* begin, size_t len, ostr...
function UniversalPrintArray (line 8823) | void UniversalPrintArray(const char* begin, size_t len, ostream* os) {
function PrintWideCharsAsStringTo (line 8830) | static void PrintWideCharsAsStringTo(const wchar_t* begin, size_t len,
function PrintTo (line 8848) | void PrintTo(const char* s, ostream* os) {
function PrintTo (line 8865) | void PrintTo(const wchar_t* s, ostream* os) {
function PrintStringTo (line 8877) | void PrintStringTo(const ::string& s, ostream* os) {
function PrintStringTo (line 8882) | void PrintStringTo(const ::std::string& s, ostream* os) {
function PrintWideStringTo (line 8888) | void PrintWideStringTo(const ::wstring& s, ostream* os) {
function PrintWideStringTo (line 8894) | void PrintWideStringTo(const ::std::wstring& s, ostream* os) {
function TestPartResult (line 8972) | const TestPartResult& TestPartResultArray::GetTestPartResult(int index...
type internal (line 8986) | namespace internal {
function String (line 6340) | static String ExitSummary(int exit_code) {
function ExitedUnsuccessfully (line 6366) | bool ExitedUnsuccessfully(int exit_status) {
function String (line 6375) | static String DeathTestThreadWarning(size_t thread_count) {
type DeathTestOutcome (line 6402) | enum DeathTestOutcome { IN_PROGRESS, DIED, LIVED, RETURNED, THREW }
function DeathTestAbort (line 6409) | void DeathTestAbort(const String& message) {
function String (line 6460) | String GetLastErrnoDescription() {
function FailFromInternalError (line 6468) | static void FailFromInternalError(int fd) {
class DeathTestImpl (line 6518) | class DeathTestImpl : public DeathTest {
method DeathTestImpl (line 6520) | DeathTestImpl(const char* a_statement, const RE* a_regex)
method RE (line 6536) | const RE* regex() const { return regex_; }
method spawned (line 6537) | bool spawned() const { return spawned_; }
method set_spawned (line 6538) | void set_spawned(bool is_spawned) { spawned_ = is_spawned; }
method status (line 6539) | int status() const { return status_; }
method set_status (line 6540) | void set_status(int a_status) { status_ = a_status; }
method DeathTestOutcome (line 6541) | DeathTestOutcome outcome() const { return outcome_; }
method set_outcome (line 6542) | void set_outcome(DeathTestOutcome an_outcome) { outcome_ = an_outc...
method read_fd (line 6543) | int read_fd() const { return read_fd_; }
method set_read_fd (line 6544) | void set_read_fd(int fd) { read_fd_ = fd; }
method write_fd (line 6545) | int write_fd() const { return write_fd_; }
method set_write_fd (line 6546) | void set_write_fd(int fd) { write_fd_ = fd; }
function FormatDeathTestOutput (line 6649) | static ::std::string FormatDeathTestOutput(const ::std::string& outp...
class WindowsDeathTest (line 6764) | class WindowsDeathTest : public DeathTestImpl {
method WindowsDeathTest (line 6766) | WindowsDeathTest(const char* a_statement,
class ForkingDeathTest (line 6936) | class ForkingDeathTest : public DeathTestImpl {
method set_child_pid (line 6944) | void set_child_pid(pid_t child_pid) { child_pid_ = child_pid; }
class NoExecDeathTest (line 6973) | class NoExecDeathTest : public ForkingDeathTest {
method NoExecDeathTest (line 6975) | NoExecDeathTest(const char* a_statement, const RE* a_regex) :
class ExecDeathTest (line 7027) | class ExecDeathTest : public ForkingDeathTest {
method ExecDeathTest (line 7029) | ExecDeathTest(const char* a_statement, const RE* a_regex,
class Arguments (line 7041) | class Arguments {
method Arguments (line 7043) | Arguments() {
method AddArgument (line 7053) | void AddArgument(const char* argument) {
method AddArguments (line 7058) | void AddArguments(const ::std::vector<Str>& arguments) {
type ExecDeathTestArgs (line 7074) | struct ExecDeathTestArgs {
function ExecDeathTestChildMain (line 7096) | static int ExecDeathTestChildMain(void* child_arg) {
function StackLowerThanAddress (line 7136) | bool StackLowerThanAddress(const void* ptr) {
function StackGrowsDown (line 7141) | bool StackGrowsDown() {
function pid_t (line 7149) | static pid_t ExecDeathTestFork(char* const* argv, int close_fd) {
function IsPathSeparator (line 7518) | static bool IsPathSeparator(char c) {
function FilePath (line 7527) | FilePath FilePath::GetCurrentDir() {
function FilePath (line 7545) | FilePath FilePath::RemoveExtension(const char* extension) const {
function FilePath (line 7575) | FilePath FilePath::RemoveDirectoryName() const {
function FilePath (line 7586) | FilePath FilePath::RemoveFileName() const {
function FilePath (line 7603) | FilePath FilePath::MakeFileName(const FilePath& directory,
function FilePath (line 7618) | FilePath FilePath::ConcatPaths(const FilePath& directory,
function FilePath (line 7706) | FilePath FilePath::GenerateUniqueFileName(const FilePath& directory,
function FilePath (line 7766) | FilePath FilePath::RemoveTrailingPathSeparator() const {
function GetThreadCount (line 7885) | size_t GetThreadCount() {
function GetThreadCount (line 7904) | size_t GetThreadCount() {
function IsInSet (line 7979) | bool IsInSet(char ch, const char* str) {
function IsAsciiDigit (line 7986) | bool IsAsciiDigit(char ch) { return '0' <= ch && ch <= '9'; }
function IsAsciiPunct (line 7987) | bool IsAsciiPunct(char ch) {
function IsRepeat (line 7990) | bool IsRepeat(char ch) { return IsInSet(ch, "?*+"); }
function IsAsciiWhiteSpace (line 7991) | bool IsAsciiWhiteSpace(char ch) { return IsInSet(ch, " \f\n\r\t\v"); }
function IsAsciiWordChar (line 7992) | bool IsAsciiWordChar(char ch) {
function IsValidEscape (line 7998) | bool IsValidEscape(char c) {
function AtomMatchesChar (line 8004) | bool AtomMatchesChar(bool escaped, char pattern_char, char ch) {
function String (line 8026) | String FormatRegexSyntaxError(const char* regex, int index) {
function ValidateRegex (line 8033) | bool ValidateRegex(const char* regex) {
function MatchRepetitionAndRegexAtHead (line 8096) | bool MatchRepetitionAndRegexAtHead(
function MatchRegexAtHead (line 8123) | bool MatchRegexAtHead(const char* regex, const char* str) {
function MatchRegexAnywhere (line 8159) | bool MatchRegexAnywhere(const char* regex, const char* str) {
function FormatFileLocation (line 8232) | GTEST_API_ ::std::string FormatFileLocation(const char* file, int li...
function FormatCompilerIndependentFileLocation (line 8250) | GTEST_API_ ::std::string FormatCompilerIndependentFileLocation(
class CapturedStream (line 8289) | class CapturedStream {
method CapturedStream (line 8292) | CapturedStream(int fd) : fd_(fd), uncaptured_fd_(dup(fd)) {
method String (line 8326) | String GetCapturedString() {
function String (line 8363) | String CapturedStream::ReadEntireFile(FILE* file) {
function CaptureStream (line 8393) | void CaptureStream(int fd, const char* stream_name, CapturedStream**...
function String (line 8402) | String GetCapturedStream(CapturedStream** captured_stream) {
function CaptureStdout (line 8412) | void CaptureStdout() {
function CaptureStderr (line 8417) | void CaptureStderr() {
function String (line 8422) | String GetCapturedStdout() { return GetCapturedStream(&g_captured_st...
function String (line 8425) | String GetCapturedStderr() { return GetCapturedStream(&g_captured_st...
type posix (line 8440) | namespace posix {
function Abort (line 8441) | void Abort() {
function String (line 8451) | static String FlagToEnvVar(const char* flag) {
function ParseInt32 (line 8466) | bool ParseInt32(const Message& src_text, const char* str, Int32* val...
function BoolFromGTestEnv (line 8508) | bool BoolFromGTestEnv(const char* flag, bool default_value) {
function Int32 (line 8518) | Int32 Int32FromGTestEnv(const char* flag, Int32 default_value) {
type CharFormat (line 8677) | enum CharFormat {
function IsPrintableAscii (line 8686) | inline bool IsPrintableAscii(wchar_t c) {
function CharFormat (line 8695) | static CharFormat PrintAsCharLiteralTo(Char c, ostream* os) {
function CharFormat (line 8741) | static CharFormat PrintAsWideStringLiteralTo(wchar_t c, ostream* os) {
function CharFormat (line 8756) | static CharFormat PrintAsNarrowStringLiteralTo(char c, ostream* os) {
function PrintCharAndCodeTo (line 8765) | void PrintCharAndCodeTo(Char c, ostream* os) {
function PrintTo (line 8790) | void PrintTo(unsigned char c, ::std::ostream* os) {
function PrintTo (line 8793) | void PrintTo(signed char c, ::std::ostream* os) {
function PrintTo (line 8799) | void PrintTo(wchar_t wc, ostream* os) {
function PrintCharsAsStringTo (line 8806) | static void PrintCharsAsStringTo(const char* begin, size_t len, ostr...
function UniversalPrintArray (line 8823) | void UniversalPrintArray(const char* begin, size_t len, ostream* os) {
function PrintWideCharsAsStringTo (line 8830) | static void PrintWideCharsAsStringTo(const wchar_t* begin, size_t len,
function PrintTo (line 8848) | void PrintTo(const char* s, ostream* os) {
function PrintTo (line 8865) | void PrintTo(const wchar_t* s, ostream* os) {
function PrintStringTo (line 8877) | void PrintStringTo(const ::string& s, ostream* os) {
function PrintStringTo (line 8882) | void PrintStringTo(const ::std::string& s, ostream* os) {
function PrintWideStringTo (line 8888) | void PrintWideStringTo(const ::wstring& s, ostream* os) {
function PrintWideStringTo (line 8894) | void PrintWideStringTo(const ::std::wstring& s, ostream* os) {
type internal (line 9043) | namespace internal {
function String (line 6340) | static String ExitSummary(int exit_code) {
function ExitedUnsuccessfully (line 6366) | bool ExitedUnsuccessfully(int exit_status) {
function String (line 6375) | static String DeathTestThreadWarning(size_t thread_count) {
type DeathTestOutcome (line 6402) | enum DeathTestOutcome { IN_PROGRESS, DIED, LIVED, RETURNED, THREW }
function DeathTestAbort (line 6409) | void DeathTestAbort(const String& message) {
function String (line 6460) | String GetLastErrnoDescription() {
function FailFromInternalError (line 6468) | static void FailFromInternalError(int fd) {
class DeathTestImpl (line 6518) | class DeathTestImpl : public DeathTest {
method DeathTestImpl (line 6520) | DeathTestImpl(const char* a_statement, const RE* a_regex)
method RE (line 6536) | const RE* regex() const { return regex_; }
method spawned (line 6537) | bool spawned() const { return spawned_; }
method set_spawned (line 6538) | void set_spawned(bool is_spawned) { spawned_ = is_spawned; }
method status (line 6539) | int status() const { return status_; }
method set_status (line 6540) | void set_status(int a_status) { status_ = a_status; }
method DeathTestOutcome (line 6541) | DeathTestOutcome outcome() const { return outcome_; }
method set_outcome (line 6542) | void set_outcome(DeathTestOutcome an_outcome) { outcome_ = an_outc...
method read_fd (line 6543) | int read_fd() const { return read_fd_; }
method set_read_fd (line 6544) | void set_read_fd(int fd) { read_fd_ = fd; }
method write_fd (line 6545) | int write_fd() const { return write_fd_; }
method set_write_fd (line 6546) | void set_write_fd(int fd) { write_fd_ = fd; }
function FormatDeathTestOutput (line 6649) | static ::std::string FormatDeathTestOutput(const ::std::string& outp...
class WindowsDeathTest (line 6764) | class WindowsDeathTest : public DeathTestImpl {
method WindowsDeathTest (line 6766) | WindowsDeathTest(const char* a_statement,
class ForkingDeathTest (line 6936) | class ForkingDeathTest : public DeathTestImpl {
method set_child_pid (line 6944) | void set_child_pid(pid_t child_pid) { child_pid_ = child_pid; }
class NoExecDeathTest (line 6973) | class NoExecDeathTest : public ForkingDeathTest {
method NoExecDeathTest (line 6975) | NoExecDeathTest(const char* a_statement, const RE* a_regex) :
class ExecDeathTest (line 7027) | class ExecDeathTest : public ForkingDeathTest {
method ExecDeathTest (line 7029) | ExecDeathTest(const char* a_statement, const RE* a_regex,
class Arguments (line 7041) | class Arguments {
method Arguments (line 7043) | Arguments() {
method AddArgument (line 7053) | void AddArgument(const char* argument) {
method AddArguments (line 7058) | void AddArguments(const ::std::vector<Str>& arguments) {
type ExecDeathTestArgs (line 7074) | struct ExecDeathTestArgs {
function ExecDeathTestChildMain (line 7096) | static int ExecDeathTestChildMain(void* child_arg) {
function StackLowerThanAddress (line 7136) | bool StackLowerThanAddress(const void* ptr) {
function StackGrowsDown (line 7141) | bool StackGrowsDown() {
function pid_t (line 7149) | static pid_t ExecDeathTestFork(char* const* argv, int close_fd) {
function IsPathSeparator (line 7518) | static bool IsPathSeparator(char c) {
function FilePath (line 7527) | FilePath FilePath::GetCurrentDir() {
function FilePath (line 7545) | FilePath FilePath::RemoveExtension(const char* extension) const {
function FilePath (line 7575) | FilePath FilePath::RemoveDirectoryName() const {
function FilePath (line 7586) | FilePath FilePath::RemoveFileName() const {
function FilePath (line 7603) | FilePath FilePath::MakeFileName(const FilePath& directory,
function FilePath (line 7618) | FilePath FilePath::ConcatPaths(const FilePath& directory,
function FilePath (line 7706) | FilePath FilePath::GenerateUniqueFileName(const FilePath& directory,
function FilePath (line 7766) | FilePath FilePath::RemoveTrailingPathSeparator() const {
function GetThreadCount (line 7885) | size_t GetThreadCount() {
function GetThreadCount (line 7904) | size_t GetThreadCount() {
function IsInSet (line 7979) | bool IsInSet(char ch, const char* str) {
function IsAsciiDigit (line 7986) | bool IsAsciiDigit(char ch) { return '0' <= ch && ch <= '9'; }
function IsAsciiPunct (line 7987) | bool IsAsciiPunct(char ch) {
function IsRepeat (line 7990) | bool IsRepeat(char ch) { return IsInSet(ch, "?*+"); }
function IsAsciiWhiteSpace (line 7991) | bool IsAsciiWhiteSpace(char ch) { return IsInSet(ch, " \f\n\r\t\v"); }
function IsAsciiWordChar (line 7992) | bool IsAsciiWordChar(char ch) {
function IsValidEscape (line 7998) | bool IsValidEscape(char c) {
function AtomMatchesChar (line 8004) | bool AtomMatchesChar(bool escaped, char pattern_char, char ch) {
function String (line 8026) | String FormatRegexSyntaxError(const char* regex, int index) {
function ValidateRegex (line 8033) | bool ValidateRegex(const char* regex) {
function MatchRepetitionAndRegexAtHead (line 8096) | bool MatchRepetitionAndRegexAtHead(
function MatchRegexAtHead (line 8123) | bool MatchRegexAtHead(const char* regex, const char* str) {
function MatchRegexAnywhere (line 8159) | bool MatchRegexAnywhere(const char* regex, const char* str) {
function FormatFileLocation (line 8232) | GTEST_API_ ::std::string FormatFileLocation(const char* file, int li...
function FormatCompilerIndependentFileLocation (line 8250) | GTEST_API_ ::std::string FormatCompilerIndependentFileLocation(
class CapturedStream (line 8289) | class CapturedStream {
method CapturedStream (line 8292) | CapturedStream(int fd) : fd_(fd), uncaptured_fd_(dup(fd)) {
method String (line 8326) | String GetCapturedString() {
function String (line 8363) | String CapturedStream::ReadEntireFile(FILE* file) {
function CaptureStream (line 8393) | void CaptureStream(int fd, const char* stream_name, CapturedStream**...
function String (line 8402) | String GetCapturedStream(CapturedStream** captured_stream) {
function CaptureStdout (line 8412) | void CaptureStdout() {
function CaptureStderr (line 8417) | void CaptureStderr() {
function String (line 8422) | String GetCapturedStdout() { return GetCapturedStream(&g_captured_st...
function String (line 8425) | String GetCapturedStderr() { return GetCapturedStream(&g_captured_st...
type posix (line 8440) | namespace posix {
function Abort (line 8441) | void Abort() {
function String (line 8451) | static String FlagToEnvVar(const char* flag) {
function ParseInt32 (line 8466) | bool ParseInt32(const Message& src_text, const char* str, Int32* val...
function BoolFromGTestEnv (line 8508) | bool BoolFromGTestEnv(const char* flag, bool default_value) {
function Int32 (line 8518) | Int32 Int32FromGTestEnv(const char* flag, Int32 default_value) {
type CharFormat (line 8677) | enum CharFormat {
function IsPrintableAscii (line 8686) | inline bool IsPrintableAscii(wchar_t c) {
function CharFormat (line 8695) | static CharFormat PrintAsCharLiteralTo(Char c, ostream* os) {
function CharFormat (line 8741) | static CharFormat PrintAsWideStringLiteralTo(wchar_t c, ostream* os) {
function CharFormat (line 8756) | static CharFormat PrintAsNarrowStringLiteralTo(char c, ostream* os) {
function PrintCharAndCodeTo (line 8765) | void PrintCharAndCodeTo(Char c, ostream* os) {
function PrintTo (line 8790) | void PrintTo(unsigned char c, ::std::ostream* os) {
function PrintTo (line 8793) | void PrintTo(signed char c, ::std::ostream* os) {
function PrintTo (line 8799) | void PrintTo(wchar_t wc, ostream* os) {
function PrintCharsAsStringTo (line 8806) | static void PrintCharsAsStringTo(const char* begin, size_t len, ostr...
function UniversalPrintArray (line 8823) | void UniversalPrintArray(const char* begin, size_t len, ostream* os) {
function PrintWideCharsAsStringTo (line 8830) | static void PrintWideCharsAsStringTo(const wchar_t* begin, size_t len,
function PrintTo (line 8848) | void PrintTo(const char* s, ostream* os) {
function PrintTo (line 8865) | void PrintTo(const wchar_t* s, ostream* os) {
function PrintStringTo (line 8877) | void PrintStringTo(const ::string& s, ostream* os) {
function PrintStringTo (line 8882) | void PrintStringTo(const ::std::string& s, ostream* os) {
function PrintWideStringTo
Condensed preview — 271 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (2,934K chars).
[
{
"path": "CMakeLists.txt",
"chars": 2361,
"preview": "cmake_minimum_required(VERSION 2.8.7)\n\n# ---[ Caffe project\nproject(Caffe C CXX)\n\n# ---[ Using cmake scripts and modules"
},
{
"path": "CONTRIBUTING.md",
"chars": 1917,
"preview": "# Contributing\n\n## Issues\n\nSpecific Caffe design and development issues, bugs, and feature requests are maintained by Gi"
},
{
"path": "CONTRIBUTORS.md",
"chars": 620,
"preview": "# Contributors\n\nCaffe is developed by a core set of BVLC members and the open-source community.\n\nWe thank all of our [co"
},
{
"path": "INSTALL.md",
"chars": 197,
"preview": "# Installation\n\nSee http://caffe.berkeleyvision.org/installation.html for the latest\ninstallation instructions.\n\nCheck t"
},
{
"path": "LICENSE",
"chars": 2195,
"preview": "COPYRIGHT\n\nAll contributions by the University of California:\nCopyright (c) 2014, 2015, The Regents of the University of"
},
{
"path": "Makefile",
"chars": 21386,
"preview": "PROJECT := caffe\n\nCONFIG_FILE := Makefile.config\n# Explicitly check for the config file, otherwise make -k will proceed "
},
{
"path": "Makefile.config.example",
"chars": 3604,
"preview": "## Refer to http://caffe.berkeleyvision.org/installation.html\n# Contributions simplifying and improving our build system"
},
{
"path": "README.md",
"chars": 2083,
"preview": "# Dynamic network surgery\n\nDynamic network surgery is a very effective method for DNN pruning. To better use it with pyt"
},
{
"path": "caffe.cloc",
"chars": 1180,
"preview": "Bourne Shell\n filter remove_matches ^\\s*#\n filter remove_inline #.*$\n extension sh\n script_exe sh\nC\n filt"
},
{
"path": "cmake/ConfigGen.cmake",
"chars": 4049,
"preview": "\n################################################################################################\n# Helper function to f"
},
{
"path": "cmake/Cuda.cmake",
"chars": 9903,
"preview": "if(CPU_ONLY)\n return()\nendif()\n\n# Known NVIDIA GPU achitectures Caffe can be compiled for.\n# This list will be used for"
},
{
"path": "cmake/Dependencies.cmake",
"chars": 5222,
"preview": "# This list is required for static linking and exported to CaffeConfig.cmake\nset(Caffe_LINKER_LIBS \"\")\n\n# ---[ Boost\nfin"
},
{
"path": "cmake/External/gflags.cmake",
"chars": 1939,
"preview": "if (NOT __GFLAGS_INCLUDED) # guard against multiple includes\n set(__GFLAGS_INCLUDED TRUE)\n\n # use the system-wide gfla"
},
{
"path": "cmake/External/glog.cmake",
"chars": 1719,
"preview": "# glog depends on gflags\ninclude(\"cmake/External/gflags.cmake\")\n\nif (NOT __GLOG_INCLUDED)\n set(__GLOG_INCLUDED TRUE)\n\n "
},
{
"path": "cmake/Misc.cmake",
"chars": 1764,
"preview": "# ---[ Configuration types\nset(CMAKE_CONFIGURATION_TYPES \"Debug;Release\" CACHE STRING \"Possible configurations\" FORCE)\nm"
},
{
"path": "cmake/Modules/FindAtlas.cmake",
"chars": 1666,
"preview": "# Find the Atlas (and Lapack) libraries\n#\n# The following variables are optionally searched for defaults\n# Atlas_ROOT_D"
},
{
"path": "cmake/Modules/FindGFlags.cmake",
"chars": 1545,
"preview": "# - Try to find GFLAGS\n#\n# The following variables are optionally searched for defaults\n# GFLAGS_ROOT_DIR: B"
},
{
"path": "cmake/Modules/FindGlog.cmake",
"chars": 1451,
"preview": "# - Try to find Glog\n#\n# The following variables are optionally searched for defaults\n# GLOG_ROOT_DIR: Base "
},
{
"path": "cmake/Modules/FindLAPACK.cmake",
"chars": 6723,
"preview": "# - Find LAPACK library\n# This module finds an installed fortran library that implements the LAPACK\n# linear-algebra int"
},
{
"path": "cmake/Modules/FindLMDB.cmake",
"chars": 1119,
"preview": "# Try to find the LMBD libraries and headers\n# LMDB_FOUND - system has LMDB lib\n# LMDB_INCLUDE_DIR - the LMDB include "
},
{
"path": "cmake/Modules/FindLevelDB.cmake",
"chars": 1728,
"preview": "# - Find LevelDB\n#\n# LevelDB_INCLUDES - List of LevelDB includes\n# LevelDB_LIBRARIES - List of libraries when using L"
},
{
"path": "cmake/Modules/FindMKL.cmake",
"chars": 3251,
"preview": "# Find the MKL libraries\n#\n# Options:\n#\n# MKL_USE_SINGLE_DYNAMIC_LIBRARY : use single dynamic library interface\n# M"
},
{
"path": "cmake/Modules/FindMatlabMex.cmake",
"chars": 1749,
"preview": "# This module looks for MatlabMex compiler\n# Defines variables:\n# Matlab_DIR - Matlab root dir\n# Matlab_mex "
},
{
"path": "cmake/Modules/FindNumPy.cmake",
"chars": 2333,
"preview": "# - Find the NumPy libraries\n# This module finds if NumPy is installed, and sets the following variables\n# indicating wh"
},
{
"path": "cmake/Modules/FindOpenBLAS.cmake",
"chars": 1539,
"preview": "\n\nSET(Open_BLAS_INCLUDE_SEARCH_PATHS\n /usr/include\n /usr/include/openblas-base\n /usr/local/include\n /usr/local/inclu"
},
{
"path": "cmake/Modules/FindSnappy.cmake",
"chars": 1071,
"preview": "# Find the Snappy libraries\n#\n# The following variables are optionally searched for defaults\n# Snappy_ROOT_DIR: Base"
},
{
"path": "cmake/Modules/FindvecLib.cmake",
"chars": 1304,
"preview": "# Find the vecLib libraries as part of Accelerate.framework or as standalon framework\n#\n# The following are set after co"
},
{
"path": "cmake/ProtoBuf.cmake",
"chars": 3733,
"preview": "# Finds Google Protocol Buffers library and compilers and extends\n# the standard cmake script with version and python ge"
},
{
"path": "cmake/Summary.cmake",
"chars": 7249,
"preview": "################################################################################################\n# Caffe status report f"
},
{
"path": "cmake/Targets.cmake",
"chars": 7135,
"preview": "################################################################################################\n# Defines global Caffe_"
},
{
"path": "cmake/Templates/CaffeConfig.cmake.in",
"chars": 1736,
"preview": "# Config file for the Caffe package.\n#\n# Note:\n# Caffe and this config file depends on opencv,\n# so put `find_packag"
},
{
"path": "cmake/Templates/CaffeConfigVersion.cmake.in",
"chars": 377,
"preview": "set(PACKAGE_VERSION \"@Caffe_VERSION@\")\n\n# Check whether the requested PACKAGE_FIND_VERSION is compatible\nif(\"${PACKAGE_V"
},
{
"path": "cmake/Templates/caffe_config.h.in",
"chars": 682,
"preview": "/* Sources directory */\n#define SOURCE_FOLDER \"${PROJECT_SOURCE_DIR}\"\n\n/* Binaries directory */\n#define BINARY_FOLDER \"$"
},
{
"path": "cmake/Utils.cmake",
"chars": 13288,
"preview": "################################################################################################\n# Command alias for deb"
},
{
"path": "cmake/lint.cmake",
"chars": 1505,
"preview": "\nset(CMAKE_SOURCE_DIR ..)\nset(LINT_COMMAND ${CMAKE_SOURCE_DIR}/scripts/cpp_lint.py)\nset(SRC_FILE_EXTENSIONS h hpp hu c c"
},
{
"path": "include/caffe/blob.hpp",
"chars": 9342,
"preview": "#ifndef CAFFE_BLOB_HPP_\n#define CAFFE_BLOB_HPP_\n\n#include <algorithm>\n#include <string>\n#include <vector>\n\n#include \"caf"
},
{
"path": "include/caffe/caffe.hpp",
"chars": 593,
"preview": "// caffe.hpp is the header file that you need to include in your code. It wraps\n// all the internal caffe header files i"
},
{
"path": "include/caffe/common.hpp",
"chars": 5831,
"preview": "#ifndef CAFFE_COMMON_HPP_\n#define CAFFE_COMMON_HPP_\n\n#include <boost/shared_ptr.hpp>\n#include <gflags/gflags.h>\n#include"
},
{
"path": "include/caffe/common_layers.hpp",
"chars": 24589,
"preview": "#ifndef CAFFE_COMMON_LAYERS_HPP_\n#define CAFFE_COMMON_LAYERS_HPP_\n\n#include <string>\n#include <utility>\n#include <vector"
},
{
"path": "include/caffe/data_layers.hpp",
"chars": 12205,
"preview": "#ifndef CAFFE_DATA_LAYERS_HPP_\n#define CAFFE_DATA_LAYERS_HPP_\n\n#include <string>\n#include <utility>\n#include <vector>\n\n#"
},
{
"path": "include/caffe/data_reader.hpp",
"chars": 2167,
"preview": "#ifndef CAFFE_DATA_READER_HPP_\n#define CAFFE_DATA_READER_HPP_\n\n#include <map>\n#include <string>\n#include <vector>\n\n#incl"
},
{
"path": "include/caffe/data_transformer.hpp",
"chars": 4825,
"preview": "#ifndef CAFFE_DATA_TRANSFORMER_HPP\n#define CAFFE_DATA_TRANSFORMER_HPP\n\n#include <vector>\n\n#include \"caffe/blob.hpp\"\n#inc"
},
{
"path": "include/caffe/filler.hpp",
"chars": 10963,
"preview": "// Fillers are random number generators that fills a blob using the specified\n// algorithm. The expectation is that they"
},
{
"path": "include/caffe/internal_thread.hpp",
"chars": 1382,
"preview": "#ifndef CAFFE_INTERNAL_THREAD_HPP_\n#define CAFFE_INTERNAL_THREAD_HPP_\n\n#include \"caffe/common.hpp\"\n\n/**\n Forward declare"
},
{
"path": "include/caffe/layer.hpp",
"chars": 18753,
"preview": "#ifndef CAFFE_LAYER_H_\n#define CAFFE_LAYER_H_\n\n#include <algorithm>\n#include <string>\n#include <vector>\n\n#include \"caffe"
},
{
"path": "include/caffe/layer_factory.hpp",
"chars": 4490,
"preview": "/**\n * @brief A layer factory that allows one to register layers.\n * During runtime, registered layers could be called b"
},
{
"path": "include/caffe/loss_layers.hpp",
"chars": 34381,
"preview": "#ifndef CAFFE_LOSS_LAYERS_HPP_\n#define CAFFE_LOSS_LAYERS_HPP_\n\n#include <string>\n#include <utility>\n#include <vector>\n\n#"
},
{
"path": "include/caffe/net.hpp",
"chars": 11720,
"preview": "#ifndef CAFFE_NET_HPP_\n#define CAFFE_NET_HPP_\n\n#include <map>\n#include <set>\n#include <string>\n#include <utility>\n#inclu"
},
{
"path": "include/caffe/neuron_layers.hpp",
"chars": 30486,
"preview": "#ifndef CAFFE_NEURON_LAYERS_HPP_\n#define CAFFE_NEURON_LAYERS_HPP_\n\n#include <string>\n#include <utility>\n#include <vector"
},
{
"path": "include/caffe/parallel.hpp",
"chars": 2765,
"preview": "#ifndef CAFFE_PARALLEL_HPP_\n#define CAFFE_PARALLEL_HPP_\n\n#include <boost/date_time/posix_time/posix_time.hpp>\n\n#include "
},
{
"path": "include/caffe/python_layer.hpp",
"chars": 1395,
"preview": "#ifndef CAFFE_PYTHON_LAYER_HPP_\n#define CAFFE_PYTHON_LAYER_HPP_\n\n#include <boost/python.hpp>\n#include <vector>\n\n#include"
},
{
"path": "include/caffe/solver.hpp",
"chars": 9093,
"preview": "#ifndef CAFFE_OPTIMIZATION_SOLVER_HPP_\n#define CAFFE_OPTIMIZATION_SOLVER_HPP_\n\n#include <string>\n#include <vector>\n\n#inc"
},
{
"path": "include/caffe/syncedmem.hpp",
"chars": 2207,
"preview": "#ifndef CAFFE_SYNCEDMEM_HPP_\n#define CAFFE_SYNCEDMEM_HPP_\n\n#include <cstdlib>\n\n#include \"caffe/common.hpp\"\n#include \"caf"
},
{
"path": "include/caffe/test/test_caffe_main.hpp",
"chars": 1738,
"preview": "// The main caffe test code. Your test cpp code should include this hpp\n// to allow a main function to be compiled into "
},
{
"path": "include/caffe/test/test_gradient_check_util.hpp",
"chars": 11063,
"preview": "#ifndef CAFFE_TEST_GRADIENT_CHECK_UTIL_H_\n#define CAFFE_TEST_GRADIENT_CHECK_UTIL_H_\n\n#include <glog/logging.h>\n#include "
},
{
"path": "include/caffe/util/benchmark.hpp",
"chars": 1113,
"preview": "#ifndef CAFFE_UTIL_BENCHMARK_H_\n#define CAFFE_UTIL_BENCHMARK_H_\n\n#include <boost/date_time/posix_time/posix_time.hpp>\n\n#"
},
{
"path": "include/caffe/util/blocking_queue.hpp",
"chars": 885,
"preview": "#ifndef CAFFE_UTIL_BLOCKING_QUEUE_HPP_\n#define CAFFE_UTIL_BLOCKING_QUEUE_HPP_\n\n#include <queue>\n#include <string>\n\n#incl"
},
{
"path": "include/caffe/util/cudnn.hpp",
"chars": 4128,
"preview": "#ifndef CAFFE_UTIL_CUDNN_H_\n#define CAFFE_UTIL_CUDNN_H_\n#ifdef USE_CUDNN\n\n#include <cudnn.h>\n\n#include \"caffe/common.hpp"
},
{
"path": "include/caffe/util/db.hpp",
"chars": 1065,
"preview": "#ifndef CAFFE_UTIL_DB_HPP\n#define CAFFE_UTIL_DB_HPP\n\n#include <string>\n\n#include \"caffe/common.hpp\"\n#include \"caffe/prot"
},
{
"path": "include/caffe/util/db_leveldb.hpp",
"chars": 1831,
"preview": "#ifndef CAFFE_UTIL_DB_LEVELDB_HPP\n#define CAFFE_UTIL_DB_LEVELDB_HPP\n\n#include <string>\n\n#include \"leveldb/db.h\"\n#include"
},
{
"path": "include/caffe/util/db_lmdb.hpp",
"chars": 2196,
"preview": "#ifndef CAFFE_UTIL_DB_LMDB_HPP\n#define CAFFE_UTIL_DB_LMDB_HPP\n\n#include <string>\n\n#include \"lmdb.h\"\n\n#include \"caffe/uti"
},
{
"path": "include/caffe/util/device_alternate.hpp",
"chars": 3069,
"preview": "#ifndef CAFFE_UTIL_DEVICE_ALTERNATE_H_\n#define CAFFE_UTIL_DEVICE_ALTERNATE_H_\n\n#ifdef CPU_ONLY // CPU-only Caffe.\n\n#inc"
},
{
"path": "include/caffe/util/hdf5.hpp",
"chars": 1076,
"preview": "#ifndef CAFFE_UTIL_HDF5_H_\n#define CAFFE_UTIL_HDF5_H_\n\n#include <string>\n\n#include \"hdf5.h\"\n#include \"hdf5_hl.h\"\n\n#inclu"
},
{
"path": "include/caffe/util/im2col.hpp",
"chars": 1193,
"preview": "#ifndef _CAFFE_UTIL_IM2COL_HPP_\n#define _CAFFE_UTIL_IM2COL_HPP_\n\nnamespace caffe {\n\ntemplate <typename Dtype>\nvoid im2co"
},
{
"path": "include/caffe/util/insert_splits.hpp",
"chars": 833,
"preview": "#ifndef _CAFFE_UTIL_INSERT_SPLITS_HPP_\n#define _CAFFE_UTIL_INSERT_SPLITS_HPP_\n\n#include <string>\n\n#include \"caffe/proto/"
},
{
"path": "include/caffe/util/io.hpp",
"chars": 4678,
"preview": "#ifndef CAFFE_UTIL_IO_H_\n#define CAFFE_UTIL_IO_H_\n\n#include <unistd.h>\n#include <string>\n\n#include \"google/protobuf/mess"
},
{
"path": "include/caffe/util/math_functions.hpp",
"chars": 9477,
"preview": "#ifndef CAFFE_UTIL_MATH_FUNCTIONS_H_\n#define CAFFE_UTIL_MATH_FUNCTIONS_H_\n\n#include <stdint.h>\n#include <cmath> // for "
},
{
"path": "include/caffe/util/mkl_alternate.hpp",
"chars": 3366,
"preview": "#ifndef CAFFE_UTIL_MKL_ALTERNATE_H_\n#define CAFFE_UTIL_MKL_ALTERNATE_H_\n\n#ifdef USE_MKL\n\n#include <mkl.h>\n\n#else // If "
},
{
"path": "include/caffe/util/rng.hpp",
"chars": 1157,
"preview": "#ifndef CAFFE_RNG_CPP_HPP_\n#define CAFFE_RNG_CPP_HPP_\n\n#include <algorithm>\n#include <iterator>\n\n#include \"boost/random/"
},
{
"path": "include/caffe/util/upgrade_proto.hpp",
"chars": 2622,
"preview": "#ifndef CAFFE_UTIL_UPGRADE_PROTO_H_\n#define CAFFE_UTIL_UPGRADE_PROTO_H_\n\n#include <string>\n\n#include \"caffe/proto/caffe."
},
{
"path": "include/caffe/vision_layers.hpp",
"chars": 22720,
"preview": "#ifndef CAFFE_VISION_LAYERS_HPP_\n#define CAFFE_VISION_LAYERS_HPP_\n\n#include <string>\n#include <utility>\n#include <vector"
},
{
"path": "models/lenet300100/lenet300100.prototxt",
"chars": 1124,
"preview": "name: \"LeNet\"\ninput: \"data\"\ninput_dim: 64\ninput_dim: 1\ninput_dim: 28\ninput_dim: 28\nlayer {\n name: \"ip1\"\n type: \"InnerP"
},
{
"path": "models/lenet5/lenet5.prototxt",
"chars": 1692,
"preview": "name: \"LeNet\"\ninput: \"data\"\ninput_dim: 64\ninput_dim: 1\ninput_dim: 28\ninput_dim: 28\nlayer {\n name: \"conv1\"\n type: \"Conv"
},
{
"path": "src/caffe/CMakeLists.txt",
"chars": 1167,
"preview": "# generate protobuf sources\nfile(GLOB proto_files proto/*.proto)\ncaffe_protobuf_generate_cpp_py(${proto_gen_folder} prot"
},
{
"path": "src/caffe/blob.cpp",
"chars": 13580,
"preview": "#include <climits>\n#include <vector>\n\n#include \"caffe/blob.hpp\"\n#include \"caffe/common.hpp\"\n#include \"caffe/syncedmem.hp"
},
{
"path": "src/caffe/common.cpp",
"chars": 8985,
"preview": "#include <boost/thread.hpp>\n#include <glog/logging.h>\n#include <cstdio>\n#include <ctime>\n\n#include \"caffe/common.hpp\"\n#i"
},
{
"path": "src/caffe/data_reader.cpp",
"chars": 3252,
"preview": "#include <boost/thread.hpp>\n#include <map>\n#include <string>\n#include <vector>\n\n#include \"caffe/common.hpp\"\n#include \"ca"
},
{
"path": "src/caffe/data_transformer.cpp",
"chars": 17677,
"preview": "#include <opencv2/core/core.hpp>\n\n#include <string>\n#include <vector>\n\n#include \"caffe/data_transformer.hpp\"\n#include \"c"
},
{
"path": "src/caffe/internal_thread.cpp",
"chars": 1616,
"preview": "#include <boost/thread.hpp>\n#include <exception>\n\n#include \"caffe/internal_thread.hpp\"\n#include \"caffe/util/math_functio"
},
{
"path": "src/caffe/layer.cpp",
"chars": 452,
"preview": "#include <boost/thread.hpp>\n#include \"caffe/layer.hpp\"\n\nnamespace caffe {\n\ntemplate <typename Dtype>\nvoid Layer<Dtype>::"
},
{
"path": "src/caffe/layer_factory.cpp",
"chars": 7179,
"preview": "// Make sure we include Python.h before any system header\n// to avoid _POSIX_C_SOURCE redefinition\n#ifdef WITH_PYTHON_LA"
},
{
"path": "src/caffe/layers/absval_layer.cpp",
"chars": 1359,
"preview": "#include <vector>\n\n#include \"caffe/layer.hpp\"\n#include \"caffe/neuron_layers.hpp\"\n#include \"caffe/util/math_functions.hpp"
},
{
"path": "src/caffe/layers/absval_layer.cu",
"chars": 1009,
"preview": "#include <vector>\n\n#include \"caffe/layer.hpp\"\n#include \"caffe/util/math_functions.hpp\"\n#include \"caffe/vision_layers.hpp"
},
{
"path": "src/caffe/layers/accuracy_layer.cpp",
"chars": 3187,
"preview": "#include <algorithm>\n#include <functional>\n#include <utility>\n#include <vector>\n\n#include \"caffe/layer.hpp\"\n#include \"ca"
},
{
"path": "src/caffe/layers/argmax_layer.cpp",
"chars": 2040,
"preview": "#include <algorithm>\n#include <functional>\n#include <utility>\n#include <vector>\n\n#include \"caffe/layer.hpp\"\n#include \"ca"
},
{
"path": "src/caffe/layers/base_conv_layer.cpp",
"chars": 11464,
"preview": "#include <vector>\n\n#include \"caffe/filler.hpp\"\n#include \"caffe/layer.hpp\"\n#include \"caffe/util/im2col.hpp\"\n#include \"caf"
},
{
"path": "src/caffe/layers/base_data_layer.cpp",
"chars": 3906,
"preview": "#include <boost/thread.hpp>\n#include <string>\n#include <vector>\n\n#include \"caffe/data_layers.hpp\"\n#include \"caffe/net.hp"
},
{
"path": "src/caffe/layers/base_data_layer.cu",
"chars": 829,
"preview": "#include <vector>\n\n#include \"caffe/data_layers.hpp\"\n\nnamespace caffe {\n\ntemplate <typename Dtype>\nvoid BasePrefetchingDa"
},
{
"path": "src/caffe/layers/bnll_layer.cpp",
"chars": 1343,
"preview": "#include <algorithm>\n#include <vector>\n\n#include \"caffe/layer.hpp\"\n#include \"caffe/vision_layers.hpp\"\n\nnamespace caffe {"
},
{
"path": "src/caffe/layers/bnll_layer.cu",
"chars": 1854,
"preview": "#include <algorithm>\n#include <vector>\n\n#include \"caffe/layer.hpp\"\n#include \"caffe/vision_layers.hpp\"\n\nnamespace caffe {"
},
{
"path": "src/caffe/layers/compress_conv_layer.cpp",
"chars": 7799,
"preview": "#include <vector>\n\n#include \"caffe/filler.hpp\"\n#include \"caffe/layer.hpp\"\n#include \"caffe/util/io.hpp\"\n#include \"caffe/u"
},
{
"path": "src/caffe/layers/compress_conv_layer.cu",
"chars": 10412,
"preview": "#include <vector>\n\n#include \"caffe/filler.hpp\"\n#include \"caffe/layer.hpp\"\n#include \"caffe/util/io.hpp\"\n#include \"caffe/u"
},
{
"path": "src/caffe/layers/compress_inner_product_layer.cpp",
"chars": 9834,
"preview": "#include <vector>\n\n#include \"caffe/blob.hpp\"\n#include \"caffe/common.hpp\"\n#include \"caffe/filler.hpp\"\n#include \"caffe/lay"
},
{
"path": "src/caffe/layers/compress_inner_product_layer.cu",
"chars": 10475,
"preview": "#include <vector>\n\n#include \"caffe/blob.hpp\"\n#include \"caffe/common.hpp\"\n#include \"caffe/filler.hpp\"\n#include \"caffe/lay"
},
{
"path": "src/caffe/layers/concat_layer.cpp",
"chars": 3833,
"preview": "#include <vector>\n\n#include \"caffe/layer.hpp\"\n#include \"caffe/util/math_functions.hpp\"\n#include \"caffe/vision_layers.hpp"
},
{
"path": "src/caffe/layers/concat_layer.cu",
"chars": 2905,
"preview": "#include <vector>\n\n#include \"caffe/layer.hpp\"\n#include \"caffe/util/math_functions.hpp\"\n#include \"caffe/vision_layers.hpp"
},
{
"path": "src/caffe/layers/contrastive_loss_layer.cpp",
"chars": 4176,
"preview": "#include <algorithm>\n#include <vector>\n\n#include \"caffe/layer.hpp\"\n#include \"caffe/loss_layers.hpp\"\n#include \"caffe/util"
},
{
"path": "src/caffe/layers/contrastive_loss_layer.cu",
"chars": 3796,
"preview": "#include <algorithm>\n#include <vector>\n\n#include \"caffe/layer.hpp\"\n#include \"caffe/util/io.hpp\"\n#include \"caffe/util/mat"
},
{
"path": "src/caffe/layers/conv_layer.cpp",
"chars": 2673,
"preview": "#include <vector>\n\n#include \"caffe/filler.hpp\"\n#include \"caffe/layer.hpp\"\n#include \"caffe/util/im2col.hpp\"\n#include \"caf"
},
{
"path": "src/caffe/layers/conv_layer.cu",
"chars": 2345,
"preview": "#include <vector>\n\n#include \"caffe/filler.hpp\"\n#include \"caffe/layer.hpp\"\n#include \"caffe/util/im2col.hpp\"\n#include \"caf"
},
{
"path": "src/caffe/layers/cudnn_conv_layer.cpp",
"chars": 4334,
"preview": "#ifdef USE_CUDNN\n#include <vector>\n\n#include \"caffe/filler.hpp\"\n#include \"caffe/layer.hpp\"\n#include \"caffe/util/im2col.h"
},
{
"path": "src/caffe/layers/cudnn_conv_layer.cu",
"chars": 5935,
"preview": "#ifdef USE_CUDNN\n#include <vector>\n\n#include \"caffe/filler.hpp\"\n#include \"caffe/layer.hpp\"\n#include \"caffe/util/im2col.h"
},
{
"path": "src/caffe/layers/cudnn_pooling_layer.cpp",
"chars": 1631,
"preview": "#ifdef USE_CUDNN\n#include <vector>\n\n#include \"caffe/filler.hpp\"\n#include \"caffe/layer.hpp\"\n#include \"caffe/util/im2col.h"
},
{
"path": "src/caffe/layers/cudnn_pooling_layer.cu",
"chars": 1426,
"preview": "#ifdef USE_CUDNN\n#include <vector>\n\n#include \"caffe/filler.hpp\"\n#include \"caffe/layer.hpp\"\n#include \"caffe/util/im2col.h"
},
{
"path": "src/caffe/layers/cudnn_relu_layer.cpp",
"chars": 1324,
"preview": "#ifdef USE_CUDNN\n#include <algorithm>\n#include <vector>\n\n#include \"caffe/layer.hpp\"\n#include \"caffe/vision_layers.hpp\"\n\n"
},
{
"path": "src/caffe/layers/cudnn_relu_layer.cu",
"chars": 1810,
"preview": "#ifdef USE_CUDNN\n#include <algorithm>\n#include <vector>\n\n#include \"caffe/layer.hpp\"\n#include \"caffe/vision_layers.hpp\"\n\n"
},
{
"path": "src/caffe/layers/cudnn_sigmoid_layer.cpp",
"chars": 1345,
"preview": "#ifdef USE_CUDNN\n#include <algorithm>\n#include <vector>\n\n#include \"caffe/layer.hpp\"\n#include \"caffe/vision_layers.hpp\"\n\n"
},
{
"path": "src/caffe/layers/cudnn_sigmoid_layer.cu",
"chars": 1442,
"preview": "#ifdef USE_CUDNN\n#include <algorithm>\n#include <vector>\n\n#include \"caffe/layer.hpp\"\n#include \"caffe/vision_layers.hpp\"\n\n"
},
{
"path": "src/caffe/layers/cudnn_softmax_layer.cpp",
"chars": 1394,
"preview": "#ifdef USE_CUDNN\n#include <algorithm>\n#include <cfloat>\n#include <vector>\n\n#include \"thrust/device_vector.h\"\n\n#include \""
},
{
"path": "src/caffe/layers/cudnn_softmax_layer.cu",
"chars": 1502,
"preview": "#ifdef USE_CUDNN\n#include <algorithm>\n#include <cfloat>\n#include <vector>\n\n#include \"thrust/device_vector.h\"\n\n#include \""
},
{
"path": "src/caffe/layers/cudnn_tanh_layer.cpp",
"chars": 1324,
"preview": "#ifdef USE_CUDNN\n#include <algorithm>\n#include <vector>\n\n#include \"caffe/layer.hpp\"\n#include \"caffe/vision_layers.hpp\"\n\n"
},
{
"path": "src/caffe/layers/cudnn_tanh_layer.cu",
"chars": 1428,
"preview": "#ifdef USE_CUDNN\n#include <algorithm>\n#include <vector>\n\n#include \"caffe/layer.hpp\"\n#include \"caffe/vision_layers.hpp\"\n\n"
},
{
"path": "src/caffe/layers/data_layer.cpp",
"chars": 3732,
"preview": "#include <opencv2/core/core.hpp>\n\n#include <stdint.h>\n\n#include <string>\n#include <vector>\n\n#include \"caffe/common.hpp\"\n"
},
{
"path": "src/caffe/layers/deconv_layer.cpp",
"chars": 2838,
"preview": "#include <vector>\n\n#include \"caffe/filler.hpp\"\n#include \"caffe/layer.hpp\"\n#include \"caffe/util/im2col.hpp\"\n#include \"caf"
},
{
"path": "src/caffe/layers/deconv_layer.cu",
"chars": 2347,
"preview": "#include <vector>\n\n#include \"caffe/filler.hpp\"\n#include \"caffe/layer.hpp\"\n#include \"caffe/util/im2col.hpp\"\n#include \"caf"
},
{
"path": "src/caffe/layers/dropout_layer.cpp",
"chars": 2424,
"preview": "// TODO (sergeyk): effect should not be dependent on phase. wasted memcpy.\n\n#include <vector>\n\n#include \"caffe/common.hp"
},
{
"path": "src/caffe/layers/dropout_layer.cu",
"chars": 2456,
"preview": "#include <algorithm>\n#include <limits>\n#include <vector>\n\n#include \"caffe/common.hpp\"\n#include \"caffe/layer.hpp\"\n#includ"
},
{
"path": "src/caffe/layers/dummy_data_layer.cpp",
"chars": 4843,
"preview": "#include <vector>\n\n#include \"caffe/filler.hpp\"\n#include \"caffe/layer.hpp\"\n#include \"caffe/vision_layers.hpp\"\n\nnamespace "
},
{
"path": "src/caffe/layers/eltwise_layer.cpp",
"chars": 5517,
"preview": "#include <cfloat>\n#include <vector>\n\n#include \"caffe/layer.hpp\"\n#include \"caffe/util/math_functions.hpp\"\n#include \"caffe"
},
{
"path": "src/caffe/layers/eltwise_layer.cu",
"chars": 4543,
"preview": "#include <cfloat>\n#include <vector>\n\n#include \"caffe/layer.hpp\"\n#include \"caffe/util/math_functions.hpp\"\n#include \"caffe"
},
{
"path": "src/caffe/layers/euclidean_loss_layer.cpp",
"chars": 1763,
"preview": "#include <vector>\n\n#include \"caffe/layer.hpp\"\n#include \"caffe/util/io.hpp\"\n#include \"caffe/util/math_functions.hpp\"\n#inc"
},
{
"path": "src/caffe/layers/euclidean_loss_layer.cu",
"chars": 1373,
"preview": "#include <vector>\n\n#include \"caffe/layer.hpp\"\n#include \"caffe/util/io.hpp\"\n#include \"caffe/util/math_functions.hpp\"\n#inc"
},
{
"path": "src/caffe/layers/exp_layer.cpp",
"chars": 2367,
"preview": "#include <algorithm>\n#include <vector>\n\n#include \"caffe/layer.hpp\"\n#include \"caffe/util/math_functions.hpp\"\n#include \"ca"
},
{
"path": "src/caffe/layers/exp_layer.cu",
"chars": 1350,
"preview": "#include <algorithm>\n#include <vector>\n\n#include \"caffe/layer.hpp\"\n#include \"caffe/util/math_functions.hpp\"\n#include \"ca"
},
{
"path": "src/caffe/layers/filter_layer.cpp",
"chars": 4705,
"preview": "#include <algorithm>\n#include <vector>\n\n#include \"caffe/layer.hpp\"\n#include \"caffe/util/math_functions.hpp\"\n#include \"ca"
},
{
"path": "src/caffe/layers/filter_layer.cu",
"chars": 2734,
"preview": "#include <vector>\n\n#include \"caffe/layer.hpp\"\n#include \"caffe/util/math_functions.hpp\"\n#include \"caffe/vision_layers.hpp"
},
{
"path": "src/caffe/layers/flatten_layer.cpp",
"chars": 1392,
"preview": "#include <vector>\n\n#include \"caffe/layer.hpp\"\n#include \"caffe/util/math_functions.hpp\"\n#include \"caffe/vision_layers.hpp"
},
{
"path": "src/caffe/layers/hdf5_data_layer.cpp",
"chars": 5694,
"preview": "/*\nTODO:\n- load file in a separate thread (\"prefetch\")\n- can be smarter about the memcpy call instead of doing it row-by"
},
{
"path": "src/caffe/layers/hdf5_data_layer.cu",
"chars": 1648,
"preview": "/*\nTODO:\n- only load parts of the file, in accordance with a prototxt param \"max_mem\"\n*/\n\n#include <stdint.h>\n#include <"
},
{
"path": "src/caffe/layers/hdf5_output_layer.cpp",
"chars": 2637,
"preview": "#include <vector>\n\n#include \"hdf5.h\"\n#include \"hdf5_hl.h\"\n\n#include \"caffe/blob.hpp\"\n#include \"caffe/common.hpp\"\n#includ"
},
{
"path": "src/caffe/layers/hdf5_output_layer.cu",
"chars": 1414,
"preview": "#include <vector>\n\n#include \"hdf5.h\"\n#include \"hdf5_hl.h\"\n\n#include \"caffe/blob.hpp\"\n#include \"caffe/common.hpp\"\n#includ"
},
{
"path": "src/caffe/layers/hinge_loss_layer.cpp",
"chars": 2476,
"preview": "#include <algorithm>\n#include <cfloat>\n#include <cmath>\n#include <vector>\n\n#include \"caffe/layer.hpp\"\n#include \"caffe/ut"
},
{
"path": "src/caffe/layers/im2col_layer.cpp",
"chars": 3519,
"preview": "#include <vector>\n\n#include \"caffe/common.hpp\"\n#include \"caffe/layer.hpp\"\n#include \"caffe/util/im2col.hpp\"\n#include \"caf"
},
{
"path": "src/caffe/layers/im2col_layer.cu",
"chars": 1222,
"preview": "#include <vector>\n\n#include \"caffe/common.hpp\"\n#include \"caffe/layer.hpp\"\n#include \"caffe/util/im2col.hpp\"\n#include \"caf"
},
{
"path": "src/caffe/layers/image_data_layer.cpp",
"chars": 6468,
"preview": "#include <opencv2/core/core.hpp>\n\n#include <fstream> // NOLINT(readability/streams)\n#include <iostream> // NOLINT(read"
},
{
"path": "src/caffe/layers/infogain_loss_layer.cpp",
"chars": 3592,
"preview": "#include <algorithm>\n#include <cfloat>\n#include <cmath>\n#include <vector>\n\n#include \"caffe/layer.hpp\"\n#include \"caffe/ut"
},
{
"path": "src/caffe/layers/inner_product_layer.cpp",
"chars": 4919,
"preview": "#include <vector>\n\n#include \"caffe/blob.hpp\"\n#include \"caffe/common.hpp\"\n#include \"caffe/filler.hpp\"\n#include \"caffe/lay"
},
{
"path": "src/caffe/layers/inner_product_layer.cu",
"chars": 2414,
"preview": "#include <vector>\n\n#include \"caffe/blob.hpp\"\n#include \"caffe/common.hpp\"\n#include \"caffe/filler.hpp\"\n#include \"caffe/lay"
},
{
"path": "src/caffe/layers/loss_layer.cpp",
"chars": 922,
"preview": "#include <algorithm>\n#include <cfloat>\n#include <cmath>\n#include <vector>\n\n#include \"caffe/layer.hpp\"\n#include \"caffe/ut"
},
{
"path": "src/caffe/layers/lrn_layer.cpp",
"chars": 10883,
"preview": "#include <vector>\n\n#include \"caffe/layer.hpp\"\n#include \"caffe/util/math_functions.hpp\"\n#include \"caffe/vision_layers.hpp"
},
{
"path": "src/caffe/layers/lrn_layer.cu",
"chars": 7783,
"preview": "#include <vector>\n\n#include \"caffe/layer.hpp\"\n#include \"caffe/util/math_functions.hpp\"\n#include \"caffe/vision_layers.hpp"
},
{
"path": "src/caffe/layers/memory_data_layer.cpp",
"chars": 4376,
"preview": "#include <opencv2/core/core.hpp>\n\n#include <vector>\n\n#include \"caffe/data_layers.hpp\"\n#include \"caffe/layer.hpp\"\n#includ"
},
{
"path": "src/caffe/layers/multinomial_logistic_loss_layer.cpp",
"chars": 2223,
"preview": "#include <algorithm>\n#include <cfloat>\n#include <cmath>\n#include <vector>\n\n#include \"caffe/layer.hpp\"\n#include \"caffe/ut"
},
{
"path": "src/caffe/layers/mvn_layer.cpp",
"chars": 5348,
"preview": "#include <algorithm>\n#include <vector>\n\n#include \"caffe/common_layers.hpp\"\n#include \"caffe/layer.hpp\"\n#include \"caffe/ut"
},
{
"path": "src/caffe/layers/mvn_layer.cu",
"chars": 4590,
"preview": "#include <algorithm>\n#include <vector>\n\n#include \"caffe/common_layers.hpp\"\n#include \"caffe/layer.hpp\"\n#include \"caffe/ut"
},
{
"path": "src/caffe/layers/neuron_layer.cpp",
"chars": 330,
"preview": "#include <vector>\n\n#include \"caffe/layer.hpp\"\n#include \"caffe/vision_layers.hpp\"\n\nnamespace caffe {\n\ntemplate <typename "
},
{
"path": "src/caffe/layers/pooling_layer.cpp",
"chars": 11611,
"preview": "#include <algorithm>\n#include <cfloat>\n#include <vector>\n\n#include \"caffe/common.hpp\"\n#include \"caffe/layer.hpp\"\n#includ"
},
{
"path": "src/caffe/layers/pooling_layer.cu",
"chars": 15456,
"preview": "#include <algorithm>\n#include <cfloat>\n#include <vector>\n\n#include \"caffe/layer.hpp\"\n#include \"caffe/util/math_functions"
},
{
"path": "src/caffe/layers/power_layer.cpp",
"chars": 3634,
"preview": "#include <algorithm>\n#include <vector>\n\n#include \"caffe/layer.hpp\"\n#include \"caffe/util/math_functions.hpp\"\n#include \"ca"
},
{
"path": "src/caffe/layers/power_layer.cu",
"chars": 3157,
"preview": "#include <algorithm>\n#include <vector>\n\n#include \"caffe/layer.hpp\"\n#include \"caffe/util/math_functions.hpp\"\n#include \"ca"
},
{
"path": "src/caffe/layers/prelu_layer.cpp",
"chars": 4752,
"preview": "#include <algorithm>\n#include <vector>\n\n#include \"caffe/filler.hpp\"\n#include \"caffe/layer.hpp\"\n#include \"caffe/vision_la"
},
{
"path": "src/caffe/layers/prelu_layer.cu",
"chars": 4401,
"preview": "#include <algorithm>\n#include <vector>\n\n#include \"caffe/layer.hpp\"\n#include \"caffe/vision_layers.hpp\"\n\nnamespace caffe {"
},
{
"path": "src/caffe/layers/reduction_layer.cpp",
"chars": 4364,
"preview": "#include <algorithm>\n#include <cfloat>\n#include <vector>\n\n#include \"caffe/layer.hpp\"\n#include \"caffe/util/math_functions"
},
{
"path": "src/caffe/layers/reduction_layer.cu",
"chars": 2952,
"preview": "#include <cfloat>\n#include <vector>\n\n#include \"caffe/layer.hpp\"\n#include \"caffe/util/math_functions.hpp\"\n#include \"caffe"
},
{
"path": "src/caffe/layers/relu_layer.cpp",
"chars": 1375,
"preview": "#include <algorithm>\n#include <vector>\n\n#include \"caffe/layer.hpp\"\n#include \"caffe/vision_layers.hpp\"\n\nnamespace caffe {"
},
{
"path": "src/caffe/layers/relu_layer.cu",
"chars": 2232,
"preview": "#include <algorithm>\n#include <vector>\n\n#include \"caffe/layer.hpp\"\n#include \"caffe/vision_layers.hpp\"\n\nnamespace caffe {"
},
{
"path": "src/caffe/layers/reshape_layer.cpp",
"chars": 3920,
"preview": "#include <vector>\n\n#include \"caffe/common_layers.hpp\"\n#include \"caffe/layer.hpp\"\n\nnamespace caffe {\n\ntemplate <typename "
},
{
"path": "src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp",
"chars": 2882,
"preview": "#include <algorithm>\n#include <cfloat>\n#include <vector>\n\n#include \"caffe/layer.hpp\"\n#include \"caffe/util/math_functions"
},
{
"path": "src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu",
"chars": 1165,
"preview": "#include <algorithm>\n#include <cfloat>\n#include <vector>\n\n#include \"caffe/layer.hpp\"\n#include \"caffe/util/math_functions"
},
{
"path": "src/caffe/layers/sigmoid_layer.cpp",
"chars": 1262,
"preview": "#include <algorithm>\n#include <cmath>\n#include <vector>\n\n#include \"caffe/layer.hpp\"\n#include \"caffe/vision_layers.hpp\"\n\n"
},
{
"path": "src/caffe/layers/sigmoid_layer.cu",
"chars": 2003,
"preview": "#include <algorithm>\n#include <cmath>\n#include <vector>\n\n#include \"caffe/layer.hpp\"\n#include \"caffe/vision_layers.hpp\"\n\n"
},
{
"path": "src/caffe/layers/silence_layer.cpp",
"chars": 635,
"preview": "#include <vector>\n\n#include \"caffe/common_layers.hpp\"\n#include \"caffe/layer.hpp\"\n#include \"caffe/util/math_functions.hpp"
},
{
"path": "src/caffe/layers/silence_layer.cu",
"chars": 735,
"preview": "#include <vector>\n\n#include \"caffe/common_layers.hpp\"\n#include \"caffe/layer.hpp\"\n#include \"caffe/util/math_functions.hpp"
},
{
"path": "src/caffe/layers/slice_layer.cpp",
"chars": 4460,
"preview": "#include <algorithm>\n#include <vector>\n\n#include \"caffe/layer.hpp\"\n#include \"caffe/util/math_functions.hpp\"\n#include \"ca"
},
{
"path": "src/caffe/layers/slice_layer.cu",
"chars": 2799,
"preview": "#include <vector>\n\n#include \"caffe/layer.hpp\"\n#include \"caffe/util/math_functions.hpp\"\n#include \"caffe/vision_layers.hpp"
},
{
"path": "src/caffe/layers/softmax_layer.cpp",
"chars": 3550,
"preview": "#include <algorithm>\n#include <vector>\n\n#include \"caffe/layer.hpp\"\n#include \"caffe/util/math_functions.hpp\"\n#include \"ca"
},
{
"path": "src/caffe/layers/softmax_layer.cu",
"chars": 5291,
"preview": "#include <algorithm>\n#include <cfloat>\n#include <vector>\n\n#include \"thrust/device_vector.h\"\n\n#include \"caffe/layer.hpp\"\n"
},
{
"path": "src/caffe/layers/softmax_loss_layer.cpp",
"chars": 4514,
"preview": "#include <algorithm>\n#include <cfloat>\n#include <vector>\n\n#include \"caffe/layer.hpp\"\n#include \"caffe/layer_factory.hpp\"\n"
},
{
"path": "src/caffe/layers/softmax_loss_layer.cu",
"chars": 4718,
"preview": "#include <algorithm>\n#include <cfloat>\n#include <vector>\n\n#include \"caffe/layer.hpp\"\n#include \"caffe/util/math_functions"
},
{
"path": "src/caffe/layers/split_layer.cpp",
"chars": 1935,
"preview": "#include <vector>\n\n#include \"caffe/layer.hpp\"\n#include \"caffe/util/math_functions.hpp\"\n#include \"caffe/vision_layers.hpp"
},
{
"path": "src/caffe/layers/split_layer.cu",
"chars": 1120,
"preview": "#include <vector>\n\n#include \"caffe/layer.hpp\"\n#include \"caffe/util/math_functions.hpp\"\n#include \"caffe/vision_layers.hpp"
},
{
"path": "src/caffe/layers/spp_layer.cpp",
"chars": 6811,
"preview": "#include <algorithm>\n#include <cfloat>\n#include <vector>\n\n#include \"caffe/common.hpp\"\n#include \"caffe/layer.hpp\"\n#includ"
},
{
"path": "src/caffe/layers/tanh_layer.cpp",
"chars": 1229,
"preview": "// TanH neuron activation function layer.\n// Adapted from ReLU layer code written by Yangqing Jia\n\n#include <algorithm>\n"
},
{
"path": "src/caffe/layers/tanh_layer.cu",
"chars": 1791,
"preview": "// TanH neuron activation function layer.\n// Adapted from ReLU layer code written by Yangqing Jia\n\n#include <algorithm>\n"
},
{
"path": "src/caffe/layers/threshold_layer.cpp",
"chars": 912,
"preview": "#include <vector>\n\n#include \"caffe/layer.hpp\"\n#include \"caffe/vision_layers.hpp\"\n\n\nnamespace caffe {\n\ntemplate <typename"
},
{
"path": "src/caffe/layers/threshold_layer.cu",
"chars": 893,
"preview": "#include <algorithm>\n#include <vector>\n\n#include \"caffe/layer.hpp\"\n#include \"caffe/vision_layers.hpp\"\n\nnamespace caffe {"
},
{
"path": "src/caffe/layers/window_data_layer.cpp",
"chars": 17350,
"preview": "#include <opencv2/highgui/highgui_c.h>\n#include <stdint.h>\n\n#include <algorithm>\n#include <map>\n#include <string>\n#inclu"
},
{
"path": "src/caffe/net.cpp",
"chars": 43176,
"preview": "#include <algorithm>\n#include <map>\n#include <set>\n#include <string>\n#include <utility>\n#include <vector>\n\n#include \"hdf"
},
{
"path": "src/caffe/parallel.cpp",
"chars": 12793,
"preview": "#ifndef CPU_ONLY\n#include <cuda_runtime.h>\n#endif\n#include <glog/logging.h>\n#include <stdio.h>\n#include <sys/ioctl.h>\n#i"
},
{
"path": "src/caffe/proto/caffe.proto",
"chars": 47190,
"preview": "syntax = \"proto2\";\n\npackage caffe;\n\n// Specifies the shape (dimensions) of a Blob.\nmessage BlobShape {\n repeated int64 "
},
{
"path": "src/caffe/solver.cpp",
"chars": 45082,
"preview": "#include <cstdio>\n\n#include <algorithm>\n#include <string>\n#include <vector>\n\n#include \"hdf5.h\"\n#include \"hdf5_hl.h\"\n\n#in"
},
{
"path": "src/caffe/syncedmem.cpp",
"chars": 3210,
"preview": "#include <cstring>\n\n#include \"caffe/common.hpp\"\n#include \"caffe/syncedmem.hpp\"\n#include \"caffe/util/math_functions.hpp\"\n"
},
{
"path": "src/caffe/test/CMakeLists.txt",
"chars": 1477,
"preview": "# The option allows to include in build only selected test files and exclude all others\n# Usage example:\n# cmake -DBUIL"
},
{
"path": "src/caffe/test/test_accuracy_layer.cpp",
"chars": 7603,
"preview": "#include <cfloat>\n#include <cmath>\n#include <cstring>\n#include <vector>\n\n#include \"gtest/gtest.h\"\n\n#include \"caffe/blob."
},
{
"path": "src/caffe/test/test_argmax_layer.cpp",
"chars": 5574,
"preview": "#include <utility>\n#include <vector>\n\n#include \"gtest/gtest.h\"\n\n#include \"caffe/blob.hpp\"\n#include \"caffe/common.hpp\"\n#i"
},
{
"path": "src/caffe/test/test_benchmark.cpp",
"chars": 2492,
"preview": "#include <unistd.h> // for usleep\n\n#include \"gtest/gtest.h\"\n\n#include \"caffe/common.hpp\"\n#include \"caffe/util/benchmark"
},
{
"path": "src/caffe/test/test_blob.cpp",
"chars": 9546,
"preview": "#include <cstring>\n#include <vector>\n\n#include \"gtest/gtest.h\"\n\n#include \"caffe/blob.hpp\"\n#include \"caffe/common.hpp\"\n#i"
},
{
"path": "src/caffe/test/test_caffe_main.cpp",
"chars": 1155,
"preview": "// The main caffe test code. Your test cpp code should include this hpp\n// to allow a main function to be compiled into "
},
{
"path": "src/caffe/test/test_common.cpp",
"chars": 1833,
"preview": "#include <cstring>\n\n#include \"gtest/gtest.h\"\n\n#include \"caffe/common.hpp\"\n#include \"caffe/syncedmem.hpp\"\n#include \"caffe"
},
{
"path": "src/caffe/test/test_concat_layer.cpp",
"chars": 6639,
"preview": "#include <cstring>\n#include <vector>\n\n#include \"gtest/gtest.h\"\n\n#include \"caffe/blob.hpp\"\n#include \"caffe/common.hpp\"\n#i"
},
{
"path": "src/caffe/test/test_contrastive_loss_layer.cpp",
"chars": 5317,
"preview": "#include <algorithm>\n#include <cmath>\n#include <cstdlib>\n#include <cstring>\n#include <vector>\n\n#include \"gtest/gtest.h\"\n"
},
{
"path": "src/caffe/test/test_convolution_layer.cpp",
"chars": 27169,
"preview": "#include <cstring>\n#include <vector>\n\n#include \"gtest/gtest.h\"\n\n#include \"caffe/blob.hpp\"\n#include \"caffe/common.hpp\"\n#i"
},
{
"path": "src/caffe/test/test_data/generate_sample_data.py",
"chars": 2023,
"preview": "\"\"\"\nGenerate data used in the HDF5DataLayer and GradientBasedSolver tests.\n\"\"\"\nimport os\nimport numpy as np\nimport h5py\n"
},
{
"path": "src/caffe/test/test_data/sample_data_list.txt",
"chars": 87,
"preview": "src/caffe/test/test_data/sample_data.h5\nsrc/caffe/test/test_data/sample_data_2_gzip.h5\n"
},
{
"path": "src/caffe/test/test_data/solver_data_list.txt",
"chars": 40,
"preview": "src/caffe/test/test_data/solver_data.h5\n"
},
{
"path": "src/caffe/test/test_data_layer.cpp",
"chars": 14857,
"preview": "#include <string>\n#include <vector>\n\n#include \"boost/scoped_ptr.hpp\"\n#include \"gtest/gtest.h\"\n\n#include \"caffe/blob.hpp\""
},
{
"path": "src/caffe/test/test_data_transformer.cpp",
"chars": 11873,
"preview": "#include <string>\n#include <vector>\n\n#include \"gtest/gtest.h\"\n#include \"leveldb/db.h\"\n\n#include \"caffe/blob.hpp\"\n#includ"
},
{
"path": "src/caffe/test/test_db.cpp",
"chars": 3676,
"preview": "#include <string>\n\n#include \"boost/scoped_ptr.hpp\"\n#include \"gtest/gtest.h\"\n\n#include \"caffe/common.hpp\"\n#include \"caffe"
},
{
"path": "src/caffe/test/test_deconvolution_layer.cpp",
"chars": 5869,
"preview": "#include <cstring>\n#include <vector>\n\n#include \"gtest/gtest.h\"\n\n#include \"caffe/blob.hpp\"\n#include \"caffe/common.hpp\"\n#i"
},
{
"path": "src/caffe/test/test_dummy_data_layer.cpp",
"chars": 7400,
"preview": "#include <string>\n#include <vector>\n\n#include \"gtest/gtest.h\"\n\n#include \"caffe/blob.hpp\"\n#include \"caffe/common.hpp\"\n#in"
}
]
// ... and 71 more files (download for full content)
About this extraction
This page contains the full source code of the yiwenguo/Dynamic-Network-Surgery GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 271 files (2.7 MB), approximately 717.3k tokens, and a symbol index with 2123 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.